seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
18915553573 | import pytest
from src.same_tree import Solution
from src.utils.binary_tree import list_to_tree
@pytest.mark.parametrize(
"list_p,list_q,equal",
[
([1, 2, 3], [1, 2, 3], True),
([1, 2], [1, None, 2], False),
([], [], True),
([1, 2, 1], [1, 1, 2], False),
],
)
def test_solution(list_p, list_q, equal):
p = list_to_tree(list_p)
q = list_to_tree(list_q)
assert Solution().isSameTree(p, q) is equal
| lancelote/leetcode | tests/test_same_tree.py | test_same_tree.py | py | 456 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "src.utils.binary_tree.list_to_tree",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "src.utils.binary_tree.list_to_tree",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "src.same_tree.Solution",
"line_number": 20,
"usage_type": "call"
... |
38830842298 | from rest_framework import status
def jwt_response_payload_handler(token, user=None, request=None):
return {
'code': status.HTTP_200_OK,
'message': '',
'result': {
'token': token,
'user_id': user.id,
'username': user.username
}
}
| helloming86/DjangoJWTDemo | users/utils.py | utils.py | py | 308 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "rest_framework.status.HTTP_200_OK",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 6,
"usage_type": "name"
}
] |
16283819127 | from django.conf.urls import url
from .views import(
AddQuestionCreateAPIView,
QuestionListAPIView,
QuestionRUDAPIView,
QuestionImageRUDAPIView,
UserQuestionListAPIView,
TopicCreateAPIView,
TopicRUDAPIView,
SubTopicCreateAPIView,
SubTopicRUDAPIView,
QuestionOptionCreateAPIView,
QuestionOptionRUDAPIView,
QuestionSolutionCreateAPIView,
QuestionSolutionRUDAPIView,
QuestionDiscussionCreateAPIView,
QuestionDiscussionRUDAPIView,
)
urlpatterns = [
url(r'^$' ,QuestionListAPIView.as_view() ,name="questions"),
url(r'user' ,UserQuestionListAPIView.as_view() ,name="user_questions"),
url(r'create' ,AddQuestionCreateAPIView.as_view() ,name="question_create"),
url(r'edit/(?P<pk>\d+)' ,QuestionRUDAPIView.as_view() ,name="question_edit"),
url(r'edit-image/(?P<pk>\d+)',QuestionImageRUDAPIView.as_view() ,name="question_edit_image"),
url(r'option-create' ,QuestionOptionCreateAPIView.as_view() ,name="question_answer_create"),
url(r'option-edit/(?P<pk>\d+)' ,QuestionOptionRUDAPIView.as_view() ,name="question_answer_edit"),
###################
url(r'solution-create' ,QuestionSolutionCreateAPIView.as_view() ,name="question_solution_create"),
url(r'solution-edit/(?P<pk>\d+)' ,QuestionSolutionRUDAPIView.as_view() ,name="question_solution_edit"),
url(r'discussion-create' ,QuestionDiscussionCreateAPIView.as_view(),name="discussion_create"),
url(r'discussion-edit/(?P<pk>\d+)' ,QuestionDiscussionRUDAPIView.as_view() ,name="discussion_edit"),
###################
url(r'topic-create' ,TopicCreateAPIView.as_view() ,name="topic_create"),
url(r'topic-edit/(?P<pk>\d+)' ,TopicRUDAPIView.as_view() ,name="topic_edit"),
url(r'subTopic-create' ,SubTopicCreateAPIView.as_view() ,name="subTopic_create"),
url(r'subTopic-edit/(?P<pk>\d+)' ,SubTopicRUDAPIView.as_view() ,name="subTopic_edit"),
]
| ashukesri/100Percentile | questions/urls.py | urls.py | py | 2,307 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.conf.urls.url",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "views.QuestionListAPIView.as_view",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "views.QuestionListAPIView",
"line_number": 28,
"usage_type": "name"
},
{
"... |
71056879784 | from wordcloud import WordCloud
import matplotlib.pyplot as plt
from collections import Counter
from konlpy.tag import Okt
from PIL import Image
import numpy as np
import sys
#์ฌ์ฉ์ ์ ์ ๊ฐ๋ฅํ ์ ๋ณด ์
๋ ฅ
least_num = int(input("์๋ ํด๋ผ์ฐ๋ ๋จ์ด ์ต์ ๋น๋๋ฅผ ์ ์๋ก ์
๋ ฅํ์์ค.:"))
directory = input("๋ฐ์ดํฐ์ ์ฃผ์๋ฅผ ์
๋ ฅํด ์ฃผ์ธ์.(ํ์ผ๋จ์์
๋๋ค.):")
temp_save_dirc = input("์์ฑ๋ ์๋ํด๋ผ์ฐ๋๊ฐ ์ ์ฅ๋ ์ฃผ์๋ฅผ ์
๋ ฅํด ์ฃผ์ธ์.:")
#ํ์ผ ์ฃผ์ ์ฒ๋ฆฌ
empty_list = []
empty_str = ""
for i in directory:
if(i == "\\"):
i = '/'
empty_list.append(i)
else:
empty_list.append(i)
real_dirc = empty_str.join(empty_list)
#์ ์ฅ ์ฃผ์ ์ฒ๋ฆฌ
save_empty_list = []
save_empty_str = ""
for i in temp_save_dirc:
if(i == "\\"):
i = '/'
save_empty_list.append(i)
else:
save_empty_list.append(i)
real_save_dirc = save_empty_str.join(save_empty_list)
real_save_dirc = real_save_dirc + "/Word_cloud.png"
#matplotlib ๋ํํ ๋ชจ๋ ์ผ๊ธฐ
plt.ion()
#์๋ํด๋ผ์ฐ๋์ ๊ธฐ๋ณธ ๋ฐ์ดํฐ ์์น ์ค์
with open(real_dirc, 'r', encoding='utf-8') as f:
text = f.read()
# OKT ์ฌ์ ์ค์
okt = Okt()
#๋ช
์ฌ๋ง ์ถ์ถ
nouns = okt.nouns(text)
# ๋จ์ด์ ๊ธธ์ด๊ฐ 1๊ฐ์ธ ๊ฒ์ ์ ์ธ
words = [n for n in nouns if len(n) > 1]
# ์์์ ์ป์ words๋ฅผ ์ฒ๋ฆฌํ์ฌ ๋จ์ด๋ณ ๋น๋์ ํํ์ ๋์
๋๋ฆฌ ๋ฐ์ดํฐ๋ฅผ ๊ตฌํจ
c = Counter(words)
#๊ฐ ๋จ์ด์ ๋น๋์ ํ์ธ
print(c)
#์ต์ ๋น๋์ ์ฒ๋ฆฌ
key = list(c.keys())
for a in key:
if(c[a] < least_num):
del c[a]
#๋น๋์๊ฐ ๋ง์ง ์์ ์ ํ๋ก๊ทธ๋จ์ ์ข
๋ฃ
if(len(c) == 0):
print("์ต์ ๋น๋์๊ฐ ๋๋ฌด ํฝ๋๋ค. ๋ค์ ์ค์ ํด ์ฃผ์ธ์.")
print("ํ๋ก๊ทธ๋จ์ ์ข
๋ฃํฉ๋๋ค.")
sys.exit()
#์๋ํด๋ผ์ฐ๋ ๋ง๋ค๊ธฐ
wc = WordCloud(background_color="white" , font_path=r"C:/Windows/Fonts/malgun.ttf", width=600, height=600, scale=2.0, max_font_size=250)
gen = wc.generate_from_frequencies(c)
plt.figure()
plt.imshow(gen)
#ํ์ผ๋ก ์ ์ฅ
wc.to_file(real_save_dirc) | LimJinOuk/Word-Cloud | WordCloud.py | WordCloud.py | py | 2,206 | python | ko | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.ion",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "konlpy.tag.Okt",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "collections.Co... |
74202841385 | import pyaudio
import numpy as np
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 16000
CHUNK_SIZE = 1000
MAX_INT16 = np.iinfo(np.int16).max
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
output=True)
for i in range(0, 18):
print(i)
f = open(str(i) + ".raw", "rb")
with f:
data = f.read()
data_float = np.frombuffer(data, dtype=np.float)
data_scaled = data_float * MAX_INT16
data_int = data_scaled.astype(int)
buff = memoryview(data_int).tobytes()
stream.write(buff)
stream.stop_stream()
stream.close()
p.terminate()
| gmamaladze/tf-voice-pi | tfvoicepi/tools/play.py | play.py | py | 663 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "pyaudio.paInt16",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "numpy.iinfo",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.int16",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "pyaudio.PyAudio",
... |
35555355731 | from datetime import date
from fastapi import APIRouter, Depends, Query
from sqlalchemy.ext.asyncio import AsyncSession
from api.deps import get_db
from crud.analytics import get_analytics_by_range_of_dates, get_analytics_by_student_id
from schemas.analytics import AnalyticsByRangeOfDates
router = APIRouter()
@router.get("/", response_model=list[AnalyticsByRangeOfDates | None])
async def get_analytics_by_dates(
date_start: date = Query(...), date_end: date = Query(...), db: AsyncSession = Depends(get_db)
):
"""Get analytics by all students by range of dates.
Args:
date_start: Range start date.
date_end: Range end date incl.
db: SQLAlchemy local session.
Returns:
List of AnalyticsByRangeOfDates each containing emotion, emotion's count and date.
"""
return await get_analytics_by_range_of_dates(
db=db, date_start=date_start, date_end=date_end
)
@router.get("/{student_track_id}", response_model=list[AnalyticsByRangeOfDates | None])
async def get_analytics_by_student(
student_track_id: int,
date_start: date = Query(...),
date_end: date = Query(...),
db: AsyncSession = Depends(get_db),
):
"""Get analytics by student's track id and range of dates.
Args:
student_track_id: Student's track ID.
date_start: Range date start.
date_end: Range date end.
db: SQLAlchemy local session,
Returns:
List of AnalyticsByRangeOfDates each containing emotion, emotion's count and date.
"""
return await get_analytics_by_student_id(
db=db, student_track_id=student_track_id, start_date=date_start, end_date=date_end
)
| starminalush/mfdp-2023-mvp | backend/api/endpoints/analytics.py | analytics.py | py | 1,680 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "fastapi.APIRouter",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.ext.asyncio.AsyncSession",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "f... |
35941174358 | import requests
import json
from PIL import Image, ImageTk
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.service import Service
from webdriver_manager.chrome import ChromeDriverManager
import time
import os
from bs4 import BeautifulSoup
import tkinter as tk
import io
# Custom Exceptions Start
class invalidInputInfo(Exception):
pass
class clearList(Exception):
pass
class checkFaild(Exception):
pass
class siteUnreachable(Exception):
pass
class screenshotSelectedElementError(Exception):
pass
class imageCropError(Exception):
pass
class displayError(Exception):
pass
# Custom Exceptions End
# Player API Start
def playerUUIDgui():
homeUnpack()
# Frame init
mapiot.geometry('1000x600')
canvasFrame = tk.Frame(mapiot)
infoFrame = tk.Frame(mapiot)
# one time search
def startThisFunc():
# Clear previous canvas in frame
try:
for skinC in canvasFrame.winfo_children():
skinC.destroy()
except:
pass
# get info from gui
uI = usrInput.get()
# processing
try:
# Info processing
getInfo = playerAPI(uI)
outBlock.set(getInfo[0])
# image processing
url = str("https://minecraftskinstealer.com/api/v1/skin/render/fullbody/" + getInfo[1] + "/700")
skinImage = ImageTk.PhotoImage(Image.open(io.BytesIO(requests.get(url).content)))
skinCanvas = tk.Label(canvasFrame, image=skinImage, bg="white")
skinCanvas.image = skinImage
skinCanvas.pack()
except invalidInputInfo:
outBlock.set("Invalid Info")
except Exception:
outBlock.set("Something went wrong")
# dynamic info init
outBlock = tk.StringVar()
# Default Image init
defaultImageUrl = "https://upload.wikimedia.org/wikipedia/en/5/51/Minecraft_cover.png"
skinImage = ImageTk.PhotoImage(Image.open(io.BytesIO(requests.get(defaultImageUrl).content)))
skinCanvas = tk.Label(canvasFrame, image=skinImage, bg="white")
skinCanvas.image = skinImage
skinCanvas.pack()
canvasFrame.pack()
# button init
outLable = tk.Label(infoFrame, textvariable=outBlock, font=('Arial', 14))
outLable.pack()
usrInput = tk.Entry(infoFrame, show=None, font=('Arial', 14))
usrInput.pack()
startIt = tk.Button(infoFrame, text = 'Search', command=startThisFunc)
startIt.pack()
infoFrame.pack()
# exit init
def fucExit():
homePack()
try:
infoFrame.pack_forget()
canvasFrame.destroy()
except:
outBlock.set("Something went wrong")
buttonExit = tk.Button(infoFrame, text = 'Back to home', command=fucExit)
buttonExit.pack()
def formatUUID(uuid):
outLst = [alphabit for alphabit in uuid if alphabit != "-"]
return "".join(outLst)
def testUUID(uuid):
fullURL = "https://api.minetools.eu/profile/" + uuid
content = requests.get(url=fullURL)
result = json.loads(content.text)
try:
if str(result["decoded"]) == "None":
return False
else:
return True
except:
return False
def playerAPI(infoIn):
toolDict = {
"MoJangAPI": "https://api.mojang.com/user/profiles/",
# "MineToolsEU": "https://api.minetools.eu/profile/"
}
if testUUID(infoIn) is False:
raise invalidInputInfo()
for tool in toolDict.keys():
if tool == "MoJangAPI":
infoNeeded = formatUUID(infoIn)
FullURL = toolDict[tool] + infoNeeded + "/names"
content = requests.get(url=FullURL)
nameLst = json.loads(content.text)
if len(nameLst) > 1:
infoA = nameLst[-1]["name"]
previousName = []
for name in nameLst[:-1]:
previousName.append(name["name"])
infoB = "Used IDs: " + "; ".join(previousName)
if len(nameLst) == 1:
infoA = nameLst[0]["name"]
returnLst = []
returnLst.append(str("-=" * 15))
returnLst.append(str("Current ID: " + infoA))
returnLst.append(infoB)
returnLst.append(str("-=" * 15))
return "\n".join(returnLst), infoA
# Player API End
# Server API Start
def serverAPIgui():
homeUnpack()
def startThisFunc():
uI = usrInputIP.get()
uI2 = usrInputPort.get()
try:
outBlock.set(serverAPI(uI, uI2))
except invalidInputInfo:
outBlock.set("Invalid Info")
outBlock = tk.StringVar()
outBlock.set("Ip in upper box \nport in lower box \ntype 0 indicate default port")
outLable = tk.Label(mapiot, textvariable=outBlock, font=('Arial', 14))
outLable.pack()
usrInputIP = tk.Entry(mapiot, show=None, font=('Arial', 14))
usrInputIP.pack()
usrInputPort = tk.Entry(mapiot, show=None, font=('Arial', 14))
usrInputPort.pack()
startIt = tk.Button(mapiot, text = 'Search', command=startThisFunc)
startIt.pack()
def fucExit():
homePack()
buttonExit.pack_forget()
usrInputIP.pack_forget()
usrInputPort.pack_forget()
startIt.pack_forget()
outLable.pack_forget()
buttonExit = tk.Button(mapiot, text = 'Back to home', command=fucExit)
buttonExit.pack()
def minecraftColorcodeTranslate(letter):
mcFontDict = {
"DARK_RED": ["\u00A74", "&4"],
"RED": ["\u00A7c", "&c"],
"GOLD": ["\u00A76", "&6"],
"YELLOW": ["\u00A7e", "&e"],
"DARK_GREEN": ["\u00A72", "&2"],
"GREEN": ["\u00A7a", "&a"],
"AQUA": ["\u00A7b", "&b"],
"DARK_AQUA": ["\u00A73", "&3"],
"DARK_BLUE": ["\u00A71", "&1"],
"BLUE": ["\u00A79", "&9"],
"LIGHT_PURPLE": ["\u00A7d", "&d"],
"DARK_PURPLE": ["\u00A75", "&5"],
"WHITE": ["\u00A7f", "&f"],
"GRAY": ["\u00A77", "&7"],
"DARK_GRAY": ["\u00A78", "&8"],
"BLACK": ["\u00A70", "&0"],
"FONT_RESET": ["\u00A7r", "&r"],
"FONT_BOLD": ["\u00A7l", "&l"],
"FONT_ITALIC": ["\u00A7o", "&o"],
"FONT_UNDERLINE": ["\u00A7n", "&n"],
"FONT_STRIKE": ["\u00A7m", "&m"]
}
for colorCodes in mcFontDict.keys():
letter = letter.replace(mcFontDict[colorCodes][0], mcFontDict[colorCodes][1])
letter = letter.replace(">>>", ">>>")
return letter
def serverAPI(infoIn, gamePort):
toolDict = {
"mcsrvstat": "https://api.mcsrvstat.us/2/",
"mcapi": "https://mcapi.us/server/status?ip=",
}
dumpLst = []
outLst = []
def getConent(fullURL):
content = requests.get(url=fullURL)
formated = json.loads(content.text)
dumpLst.append([tool, formated])
try:
if int(gamePort) == 0:
for tool in toolDict.keys():
fullURL = toolDict[tool] + infoIn
getConent(fullURL)
else:
for tool in toolDict.keys():
fullURL = toolDict[tool] + infoIn + "&port=" + gamePort
getConent(fullURL)
except:
raise invalidInputInfo
if dumpLst[0][1]["online"] == True:
outLst.append(str("-=" * 15))
outLst.append("Stat: Serving")
outLst.append(f"Ping: {int(dumpLst[1][1]['duration']) / 1000000:.2f} ms")
outLst.append(f"IP:{dumpLst[0][1]['hostname']} ({dumpLst[0][1]['ip']})")
outLst.append(f'Port: {dumpLst[0][1]["port"]}')
try:
outLst.append(f'Motd Line A: {minecraftColorcodeTranslate(dumpLst[0][1]["motd"]["clean"][0]).strip()}')
except:
outLst.append(f'Motd Line A: NoInfo')
try:
outLst.append(f'Motd Line B: {minecraftColorcodeTranslate(dumpLst[0][1]["motd"]["clean"][1]).strip()}')
except:
outLst.append(f'Motd Line B: NoInfo')
outLst.append(f"Players: {dumpLst[0][1]['players']['online']} / {dumpLst[0][1]['players']['max']}")
outLst.append(str("-=" * 15))
else:
outLst.append(str("-=" * 15))
outLst.append(f"IP:{dumpLst[0][1]['hostname']} ({dumpLst[0][1]['ip']})")
outLst.append("Stat: Down")
outLst.append(str("-=" * 15))
return "\n".join(outLst)
# Server API End
# Slime Chunck Finder Start
def slimeCFgui():
homeUnpack()
mapiot.geometry('1000x600')
slimeImgFrame = tk.Frame(mapiot)
slimeImgFrame.pack()
infoFrame = tk.Frame(mapiot)
infoFrame.pack()
def startSearch():
try:
try:
for slimeImg in slimeImgFrame.winfo_children():
slimeImg.destroy()
except:
pass
try:
slimeFilePath = slimeChunckFinder(seedInputEntry.get(), xLocateEntry.get(), yLocateEntry.get())
slimeImageCall = tk.PhotoImage(file=slimeFilePath)
slimeImageDisplay = tk.Label(slimeImgFrame, image=slimeImageCall)
slimeImageDisplay.image = slimeImageCall
slimeImageDisplay.pack()
except:
raise displayError
except checkFaild:
errorTextVar.set("checkFaild")
except siteUnreachable:
errorTextVar.set("siteUnreachable")
except screenshotSelectedElementError:
errorTextVar.set("screenshotSelectedElementError")
except imageCropError:
errorTextVar.set("imageCropError")
except displayError:
errorTextVar.set("displayError")
errorTextVar = tk.StringVar()
errorTextVar.set("First Line: Minecraft Seed \nSecond Line: X Location \nThird Line: Y Location")
errorNoticeBlock = tk.Label(infoFrame, textvariable=errorTextVar, font=('Arial', 14))
errorNoticeBlock.pack()
seedInputEntry = tk.Entry(infoFrame, show=None, font=('Arial', 14))
seedInputEntry.pack()
xLocateEntry = tk.Entry(infoFrame, show=None, font=('Arial', 14))
xLocateEntry.pack()
yLocateEntry = tk.Entry(infoFrame, show=None, font=('Arial', 14))
yLocateEntry.pack()
searchStartButton = tk.Button(infoFrame, text="Search 5x5 Chunks", command=startSearch)
searchStartButton.pack()
def exitSearch():
infoFrame.pack_forget()
slimeImgFrame.pack_forget()
homePack()
exitButton = tk.Button(infoFrame, text = 'Back to home', command=exitSearch)
exitButton.pack()
def slimeChunckFinder(seedInput, locationX, locationY):
baseURL = "http://mineatlas.com/?levelName=Random&seed="
uselessArg = [
"&mapZoom=18",
"&pos=",
"&Player=true",
"&Spawn=true",
"&Likely+Villages=false",
"&Ocean+Monuments=false",
"&Jungle+Temples=false",
"&Desert+Temples=false",
"&Witch+Huts=false",
"&Slime+Chunks=true"
]
otherAttri = ''.join(uselessArg)
try:
driver = visitSite(baseURL + seedInput + locationX + locationY + otherAttri)
except:
raise siteUnreachable
webXPATH = '/html/body/div/div[2]/div[1]/div[2]'
try:
slimeCanvas = driver.find_element(By.XPATH,webXPATH)
except:
raise checkFaild
try:
slimeFilePath = os.path.expandvars('$HOME') + "/Downloads/mapiot"
if not os.path.exists(slimeFilePath):
os.makedirs(slimeFilePath)
slimeFile = slimeFilePath + "/slimeChunks.png"
slimeCanvas.screenshot(slimeFile)
except:
raise screenshotSelectedElementError
driver.quit()
try:
slimeCanvasScreenShot = Image.open(slimeFile)
originalWidth, originalHeight = slimeCanvasScreenShot.size
width = originalWidth / 2 - 60
top = originalWidth / 2 - 60
right = originalHeight / 2 + 60
bottom = originalHeight / 2 + 60
slimeResult = slimeCanvasScreenShot.crop((width, top, right, bottom))
slimeResult.save(slimeFile)
return slimeFile
except:
raise imageCropError
# Slime Chunck Finder End
# Major Bug Checker Start
def majorBugGUI():
textBlockA = tk.Label(mapiot, text = 'This may take seconds to load, pls wait', font=('Arial', 14))
textBlockA.pack()
homeUnpack()
textBlockB = tk.Listbox(mapiot, yscrollcommand = scrollB.set, font=('Arial', 14), height=10, width=50)
for eachEr in checkMajorBug():
textBlockB.insert("end", eachEr + "\n")
textBlockB.pack()
# Finish loading
textBlockA.pack_forget()
def fucExit():
homePack()
buttonExit.pack_forget()
textBlockB.pack_forget()
buttonExit = tk.Button(mapiot, text = 'Back to home', command=fucExit)
buttonExit.pack()
def checkMajorBug():
mojangBugURL = "https://bugs.mojang.com/issues/"
jqlArg = "?jql=project%20%3D%20MC%20AND%20status%20%3D%20%22In%20Progress%22%20ORDER%20BY%20votes%20DESC%2C%20updated%20DESC"
mojangBugReportURL = mojangBugURL + jqlArg
siteXPATH = '//*[@id="main"]/div/div[2]/div/div/div/div/div/div[1]/div[1]/div/div[1]/div[2]/div/ol'
driver = visitSite(mojangBugReportURL)
inProgressBugLst = driver.find_element(By.XPATH,siteXPATH)
lstHTML = inProgressBugLst.get_attribute('innerHTML')
bfObject = BeautifulSoup(str(lstHTML), features="lxml")
preBugLst = bfObject.find_all('li')
guiDisplay = []
for preBug in preBugLst:
guiDisplay.append(str("โ" * 70))
guiDisplay.append(f"\t[{preBug.get('data-key')}] \t{preBug.get('title')}")
driver.quit()
return guiDisplay
# Major Bug Checker End
# Spigot Resource Checker Start
def spigotCheckerGUI():
homeUnpack()
processLst = []
def inCheck(usrIn):
try:
testA = usrIn.find("-")
except:
raise invalidInputInfo
if len(usrIn) < 3:
raise invalidInputInfo
if usrIn == "clear":
raise clearList
return usrIn
def addToProcessLst():
try:
processLst.append(inCheck(usrInputId.get()))
outBlock.set("\n".join(processLst))
except invalidInputInfo:
outBlock.set("Invalid Resource Info")
except clearList:
for i in range(len(processLst)):
processLst.pop(0)
outBlock.set("Cleared List")
def startThisFunc():
try:
outBlock.set(spigotResourceChecker(processLst))
except invalidInputInfo:
outBlock.set("Invalid Info")
def seeList():
outBlock.set("\n".join(processLst))
# Display
outBlock = tk.StringVar()
outBlock.set("type in the format of <spigotID>[dash]<version>, click add")
outLable = tk.Label(mapiot, textvariable=outBlock, font=('Arial', 14))
outLable.pack()
usrInputId = tk.Entry(mapiot, show=None, font=('Arial', 14))
usrInputId.pack()
addTrigger = tk.Button(mapiot, text = 'Add to List', command=addToProcessLst)
addTrigger.pack()
curLst = tk.Button(mapiot, text = 'Current List', command=seeList)
curLst.pack()
startIt = tk.Button(mapiot, text = 'Check', command=startThisFunc)
startIt.pack()
# Exit Button
def fucExit():
homePack()
buttonExit.pack_forget()
usrInputId.pack_forget()
addTrigger.pack_forget()
startIt.pack_forget()
outLable.pack_forget()
curLst.pack_forget()
buttonExit = tk.Button(mapiot, text = 'Back to home', command=fucExit)
buttonExit.pack()
def spigotResourceChecker(resDetail):
returnLst = []
try:
for spigotPlugin in resDetail:
versionPosition = spigotPlugin.find("-")
versionId = spigotPlugin[versionPosition+1:]
resId = spigotPlugin[:versionPosition]
fullURL = "https://api.spigotmc.org/legacy/update.php?resource=" + resId
spigotAPI = requests.get(url=fullURL)
if str(spigotAPI.text) != versionId:
yesOrNoUTD = "X"
else:
yesOrNoUTD = "โ"
returnLst.append(str("-" * 70))
returnLst.append(f"Resource ID: {resId} | Your Version: {versionId} | Newest: {str(spigotAPI.text)} | Uptodate: {yesOrNoUTD}")
return "\n".join(returnLst)
except:
return "empty list"
# Spigot Resource Checker Stop
# Environment Start
def chromeSetting():
options = webdriver.ChromeOptions()
options.add_argument('--no-sandbox')
options.add_argument('--disable-gpu')
options.add_argument('window-size=1920x1080')
options.add_argument('--hide-scrollbars')
options.add_argument('--headless')
options.add_experimental_option("excludeSwitches", ["ignore-certificate-errors", "enable-automation"])
return options
def visitSite(FullURL):
driver = webdriver.Chrome(options=options, service=Service(ChromeDriverManager().install()))
driver.get(FullURL)
time.sleep(2)
return driver
def excecutePath():
preworkPath = "C:/Program Files/mapiot" if os.name=='nt' else str(os.environ['HOME'] + "/Downloads/mapiot")
if not os.path.exists(preworkPath):
os.makedirs(preworkPath)
return preworkPath + "/"
# Environment End
# GUI Start
def homeUnpack():
# Frame Unpack
homeMenu.pack_forget()
def homePack():
# Frame Pack, init window size
mapiot.geometry('500x300')
homeMenu.pack()
# GUI End
# Script Start
if __name__ == '__main__':
# Headless Browser Init
options = chromeSetting()
# GUI Init
mapiot = tk.Tk()
mapiot.title("Mapiot v1.0.0")
mapiot.geometry('500x300')
scrollB= tk.Scrollbar(mapiot)
scrollB.pack(side="right", fill="y")
# Buttons
homeMenu = tk.Frame(mapiot)
nameDisplay = tk.Label(homeMenu, text = 'Thank you for using Mapiot.', font=('Arial', 20), width=30, height=2)
buttonUUID = tk.Button(homeMenu, text = 'Player UUID Checker', command=playerUUIDgui)
buttonMajorBugGUI = tk.Button(homeMenu, text = 'Mojang Bugs Checker', command=majorBugGUI)
buttonServerAPI = tk.Button(homeMenu, text = 'Server Stats Checker', command=serverAPIgui)
buttonSpigotChecker = tk.Button(homeMenu, text = 'Spigot Resources Checker', command=spigotCheckerGUI)
slimeChecker = tk.Button(homeMenu, text = 'Slime Chunk Finder', command=slimeCFgui)
buttonQuit = tk.Button(homeMenu, text = 'Quit', command=quit)
# Button Install
nameDisplay.pack()
buttonMajorBugGUI.pack()
buttonUUID.pack()
buttonServerAPI.pack()
buttonSpigotChecker.pack()
slimeChecker.pack()
buttonQuit.pack()
# Frame Install
homePack()
# GUI Loop
mapiot.mainloop()
| akaTiger/Mapiot | old.py | old.py | py | 18,576 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tkinter.Frame",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "tkinter.Frame",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "PIL.ImageTk.PhotoImage",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "PIL.ImageTk",
... |
29642501697 | import argparse
import numpy as np
import scipy.stats
from statsmodels.stats.proportion import *
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from matplotlib.patches import Patch
import matplotlib.patches as mpatches
matplotlib.rcParams['font.family'] = 'Arial'
def get_conf_int_stats(obs_count, total_count, method='jeffreys'):
pref_value = obs_count/total_count
ci_lower, ci_upper = proportion_confint(obs_count, total_count, alpha=0.05, method=method)
return pref_value, [ci_lower, ci_upper]
def plot_rs_by_test_suite_grid_5_by_6(rs, models, human_data, test_names, model2run_indice, model2color, model2name, add_test_name=True, savepath=None):
# Plot results as bar graph grid 5*6
n_row = 5
n_col = 6
bar_width = 0.75
fig, axs = plt.subplots(n_row, n_col, figsize=(8, 6.5), sharey='row', sharex='col')
plt.subplots_adjust(wspace=0.4, hspace=0.4)
for k, test_name in enumerate(test_names):
row_id = k // n_col
col_id = k % n_col
axs[row_id, col_id].set_title('Test {}'.format(k+1), fontsize=12)
axs[row_id, col_id].set_ylim(0,1)
axs[row_id, col_id].set_xlim(-1.75,len(models)-0.25)
axs[row_id, col_id].set_xticks(np.arange(0, len(models)))
axs[row_id, col_id].set_yticks(np.arange(0, 1.2, 0.25))
axs[row_id, col_id].set_xticklabels([])
axs[row_id, col_id].spines['right'].set_visible(False)
axs[row_id, col_id].spines['top'].set_visible(False)
axs[row_id, col_id].grid(linestyle='--', alpha=0.5, zorder=0, axis='y')
axs[row_id, col_id].set_axisbelow(True)
axs[row_id, col_id].errorbar(-1, human_data[test_name]['acc_value'], yerr=[[human_data[test_name]['acc_value'] - human_data[test_name]['acc_lower']], [human_data[test_name]['acc_upper'] - human_data[test_name]['acc_value']]], label='Human', color='black', marker='None', linestyle='none')
axs[row_id, col_id].bar(-1, human_data[test_name]['acc_value'], label='Human', width=bar_width, color='white', edgecolor='k')
for i, model in enumerate(models):
data = np.array([rs[model][run_index][test_name]['item_acc_list'] for run_index in model2run_indice[model]], dtype='float')
score_averaged_across_run = np.mean(data, axis=0)
y_mean = np.mean(score_averaged_across_run)
yerr = 1.96*(np.std(score_averaged_across_run)/np.sqrt(len(score_averaged_across_run)))
axs[row_id, col_id].bar(i, y_mean, label=model, width=bar_width, color=model2color[model], yerr=yerr)
for index in range(k+1, n_row*n_col):
row_id = index // n_col
col_id = index % n_col
axs[row_id, col_id].set_axis_off()
ax = axs[4, 5]
ax.bar(0, 0, label='Human', width=0.35, color='black', fill=False)
for i, model in enumerate(models):
ax.bar(i+1, 0, label=model2name[model], width=0.35, color=model2color[model])
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.legend(loc = 'center', bbox_to_anchor=(-1.2, 0.5), ncol=2, fontsize=12)
fig.text(0.06, 0.5, 'Test Accuracy Score', ha='center', va='center', rotation='vertical')
if add_test_name:
textstr = '\n'.join(['({}) {}'.format(k+1, test_name2pretty_name[test_name]) for k, test_name in enumerate(test_names)])
props = dict(boxstyle='round,pad=0.5', facecolor='white', alpha=0.5, ec='lightgray')
fig.text(0.94, 0.5, textstr, fontsize=10,
verticalalignment='center', bbox=props, linespacing = 1.65)
if savepath is not None:
plt.savefig(savepath, bbox_inches='tight')
plt.show(block=False)
plt.pause(1)
plt.close()
def plot_rs_by_test_suite_grid_3_by_9(rs, models, human_data, test_names, model2run_indice, model2color, model2name, savepath=None):
# Plot results as bar graph grid 3*9
n_row = 3
n_col = 9
bar_width = 0.75
fig, axs = plt.subplots(n_row, n_col, figsize=(11, 3.6), sharey='row', sharex='col')
plt.subplots_adjust(wspace=0.4, hspace=0.4)
for k, test_name in enumerate(test_names):
row_id = k // n_col
col_id = k % n_col
axs[row_id, col_id].set_title('Test {}'.format(k+1), fontsize=10)
axs[row_id, col_id].set_ylim(0,1)
axs[row_id, col_id].set_xlim(-1.75,len(models)-0.25)
axs[row_id, col_id].spines['right'].set_visible(False)
axs[row_id, col_id].spines['top'].set_visible(False)
axs[row_id, col_id].grid(linestyle='--', alpha=0.5, zorder=0, axis='y')
axs[row_id, col_id].set_xticks(np.arange(0, len(models)))
axs[row_id, col_id].set_yticks(np.arange(0, 1.2, 0.25))
axs[row_id, col_id].set_xticklabels([])
axs[row_id, col_id].set_axisbelow(True)
axs[row_id, col_id].errorbar(-1, human_data[test_name]['acc_value'], yerr=[[human_data[test_name]['acc_value'] - human_data[test_name]['acc_lower']], [human_data[test_name]['acc_upper'] - human_data[test_name]['acc_value']]], color='black', marker='None', linestyle='none')
axs[row_id, col_id].bar(-1, human_data[test_name]['acc_value'], label='Human', width=bar_width, color='white', edgecolor='k')
for i, model in enumerate(models):
data = np.array([rs[model][run_index][test_name]['item_acc_list'] for run_index in model2run_indice[model]], dtype='float')
score_averaged_across_run = np.mean(data, axis=0)
y_mean = np.mean(score_averaged_across_run)
yerr = 1.96*(np.std(score_averaged_across_run)/np.sqrt(len(score_averaged_across_run)))
# bar plot
axs[row_id, col_id].bar(i, y_mean, label=model2name[model], width=bar_width, color=model2color[model], yerr=yerr)
if k == 22:
axs[row_id, col_id].legend(loc='center', bbox_to_anchor=(0.5, -0.35), ncol=5, fontsize=10)
for index in range(k+1, n_row*n_col):
row_id = index // n_col
col_id = index % n_col
axs[row_id, col_id].set_axis_off()
fig.text(0.08, 0.5, 'Test Accuracy Score', ha='center', va='center', rotation='vertical')
if savepath is not None:
plt.savefig(savepath, bbox_inches='tight')
plt.show(block=False)
plt.pause(1)
plt.close()
def plot_aggregated_rs(rs, models, human_data, test_names, model2run_indice, model2color, model2name, savepath=None):
# Plot averaged performance over all the test suites
plt.figure(figsize=(2.5,2.5))
ax = plt.gca()
bar_width = 0.75
# Use asymptotic confidence interval
human_acc_by_test_suite = [human_data[test_name]['acc_value'] for test_name in test_names]
human_acc_mean = np.mean(human_acc_by_test_suite)
yerr = 1.96*(np.std(human_acc_by_test_suite)/np.sqrt(len(human_acc_by_test_suite)))
ax.bar(-1, human_acc_mean, label='Human', width=bar_width, color='black', fill=False, yerr=yerr)
print('Human average acc: {}'.format(human_acc_mean))
for i, model in enumerate(models):
data = [[rs[model][run_index][test_name]['acc'] for test_name in test_names] for run_index in model2run_indice[model]]
test_suite_acc_list_averaged_across_run = np.mean(data, axis=0)
mean_test_suite_acc = np.mean(test_suite_acc_list_averaged_across_run)
yerr = 1.96*(np.std(test_suite_acc_list_averaged_across_run)/np.sqrt(len(test_suite_acc_list_averaged_across_run)))
ax.bar(i, mean_test_suite_acc, label=model2name[model], width=bar_width, color=model2color[model], yerr=yerr)
ax.set_ylim(0,1)
ax.set_xlim(-1.75,len(models)-0.25)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_xticks(np.arange(-1, len(models)))
ax.set_yticks(np.arange(0, 1.2, 0.25))
ax.set_xticklabels([])
plt.ylabel('Accuracy Score')
plt.legend(loc = 'center', bbox_to_anchor=(1.45, 0.5))
if savepath is not None:
plt.savefig(savepath, bbox_inches='tight')
plt.show(block=False)
plt.pause(1)
plt.close()
def plot_summmary_across_model_conditions(exp_data_all, model_conditions, savepath=None):
fig = plt.figure(constrained_layout=False, figsize=(7.2,2.4))
hatch_style_list = [{'hatch':None}, {'hatch':'///'}, {'hatch':'.'}]
model_condition2style = dict(zip(['finetune', 'nyt_from_scratch', 'bllip_from_scratch'], hatch_style_list))
gs = fig.add_gridspec(nrows=1, ncols=4, width_ratios=[0.25, 0.8, 0.8, 0.8], wspace=0.1)
bar_width = 0.75
ax = fig.add_subplot(gs[0])
human_acc_by_test_suite = [human_data[test_name]['acc_value'] for test_name in test_names]
human_acc_mean = np.mean(human_acc_by_test_suite)
yerr = 1.96*(np.std(human_acc_by_test_suite)/np.sqrt(len(human_acc_by_test_suite)))
ax.bar(0, human_acc_mean, label='Human', width=bar_width, color='black', fill=False, yerr=yerr)
print('Human average acc: {}'.format(human_acc_mean))
ax.set_ylim(0,1)
ax.set_xlim(-0.75,0.75)
ax.set_yticks(np.arange(0, 1.2, 0.25))
ax.set_ylabel('Accuracy', fontsize=10)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_xticks([])
ax.set_xticklabels([])
for model_cond_idx, model_condition in enumerate(model_conditions):
ax = fig.add_subplot(gs[model_cond_idx+1])
rs, models, model2run_indice, model2name, model2color = exp_data_all[model_condition]
for i, model in enumerate(models):
data = [[rs[model][run_index][test_name]['acc'] for test_name in test_names] for run_index in model2run_indice[model]]
test_suite_acc_list_averaged_across_run = np.mean(data, axis=0)
mean_test_suite_acc = np.mean(test_suite_acc_list_averaged_across_run)
yerr = 1.96*(np.std(test_suite_acc_list_averaged_across_run)/np.sqrt(len(test_suite_acc_list_averaged_across_run)))
ax.bar(i, mean_test_suite_acc, label=model2name[model], width=bar_width, color=model2color[model], yerr=yerr, **model_condition2style[model_condition])
ax.set_ylim(0,1)
ax.set_xlim(-0.75,len(models)-0.25)
ax.spines['left'].set_visible(False)
ax.set_yticks([])
ax.set_yticklabels([])
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_xticks([])
ax.set_xticklabels([])
if model_cond_idx == 2:
colors =['C{}'.format(k) for k in range(4)]
model_names = ['GibbsComplete', 'InfillT5', 'InfillBART', 'ILM']
model_condition_names = ['Pretrain/Fine-tune', 'From scratch (NYT)', 'From scratch (BLLIP)']
color_legend = plt.legend(handles=[mpatches.Patch(facecolor='white', edgecolor='k', label='Human')]+[mpatches.Patch(facecolor=colors[k], edgecolor=colors[k], label=model_names[k]) for k in range(len(model_names))], loc='upper left', bbox_to_anchor=(1.15, 1.05), ncol=1, fontsize=10)
hatch_legend = plt.legend(handles=[mpatches.Patch(facecolor='lightgray', edgecolor='k', linewidth=0, label=model_condition_names[k], **hatch_style_list[k]) for k in range(len(hatch_style_list))], loc='upper left', bbox_to_anchor=(1.15, 0.41), ncol=1, fontsize=10)
ax.add_artist(color_legend)
ax.add_artist(hatch_legend)
if savepath is not None:
plt.savefig(savepath, bbox_inches='tight')
plt.show(block=False)
plt.pause(1)
plt.close()
def run_paired_t_tests(exp_data_all, model_conditions):
for model_cond_idx, model_condition in enumerate(model_conditions):
rs, models, model2run_indice, model2name, model2color = exp_data_all[model_condition]
model_acc_list_all = []
for i, model in enumerate(models):
data = [[rs[model][run_index][test_name]['acc'] for test_name in test_names] for run_index in model2run_indice[model]]
model_acc_list_all.append(np.mean(data, axis=0))
print('{:<22} {:<15} {:<15} {:<6} {:<6}'.format('Learning setup', 'Model name', 'Model name', 't_stat', 'p_value'))
print('-'*70)
for i in range(len(models)):
for j in range(i+1, len(models)):
d1 = np.array(model_acc_list_all[i])
d2 = np.array(model_acc_list_all[j])
t_stat, p_value = scipy.stats.ttest_rel(d1, d2, alternative='two-sided')
print('{:<22} {:<15} {:<15} {:<6.3f} {:<6.3f}'.format(model_condition, model2name[models[i]], model2name[models[j]], t_stat, p_value))
for i in range(len(models)):
d1 = np.array(model_acc_list_all[i])
d2 = [human_data[test_name]['acc_value'] for test_name in test_names]
t_stat, p_value = scipy.stats.ttest_rel(d1, d2, alternative='two-sided')
print('{:<22} {:<15} {:<15} {:<6.3f} {:<6.3f}'.format(model_condition, model2name[models[i]], 'Human', t_stat, p_value))
print()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Analyze results in Evaluation III.')
parser.add_argument('--rerank', action='store_true', help='Plot results from directly specialized models with reranking.')
args = parser.parse_args()
DO_RERANK='rerank' if args.rerank else 'norerank'
DATA_DIR='data/exp1'
test_names = ["agreement_subj", "agreement_subj-long", "agreement_emb-subj-long", "agreement_subj-with-coord", "agreement_subj-with-PP",
"clause_VP","clause_VP-with-PP-adjunct", "clause_VP-with-adjunct-long",
"clause_VP-with-complement", "clause_VP-with-complement-long", "clause_VP-gerund",
"clause_phrasal-verb", "clause_phrasal-verb-with-subj",
"clause_resultative", "clause_resultative-long",
"coord_S", "coord_VP", "coord_emb-NP", "coord_emb-VP",
"coord_either", "coord_neither", "coord_gap-NP", "gap_adjunct", "gap_obj", "gap_subj", "gap_phrasal-verb"]
pretty_test_names = ["Number Agreement", "Number Agreement (Long Subject)", "Number Agreement (Embedded Clause)",
"Number Agreement (Coordination)", "Number Agreement (with PP)", "Clausal Structure", "Clausal Structure (PP Adjunct)",
"Clausal Structure (Long Adjunct)", "Clausal Structure (Complement)", "Clausal Structure (Long Complement)",
"Gerund", "Phrasal Verb", "Phrasal Verb (with NP)", "Resultative", "Resultative (Long NP)", "S Coordiation",
"VP Coordination", "Embedded NP Coordination", "Embedded VP Coordination", "Coordination (either)",
"Coordination (neither)", "Coordination in wh-clause", "Filler-Gap (Adjunct)", "Filler-Gap (Object)",
"Filler-Gap (Subject)", "Filler-Gap (Phrasal Verb)"]
test_name2pretty_name = dict(zip(test_names, pretty_test_names))
stimuli_example = {}
for test_name in test_names:
stimuli_path = '../stimuli/exp1/{}.txt'.format(test_name)
with open(stimuli_path) as f:
line = f.readline()
stimuli_example[test_name] = line.strip().replace('%%', '____')
# Load human behavioral results
with open('{}/results/human_eval_rs.txt'.format(DATA_DIR)) as f:
lines = f.readlines()
lines = [line.strip().split() for line in lines if line.strip() != '']
human_data = {}
for line in lines:
test_name = line[1]
human_data[test_name] = {}
human_data[test_name]['acc'] = float(line[2])
proportions1 = [float(item) for item in line[3].split('/')]
proportions2 = [float(item) for item in line[4].split('/')]
acc_value, [acc_lower, acc_upper] = get_conf_int_stats(proportions1[0] + proportions2[0], proportions1[1] + proportions2[1], method='jeffreys')
human_data[test_name]['acc_value'] = acc_value
human_data[test_name]['acc_lower'] = acc_lower
human_data[test_name]['acc_upper'] = acc_upper
exp_data_all = {}
fig_dir = 'fig/exp1/'
model_name_list = ['GibbsComplete', 'InfillT5', 'InfillBART', 'ILM']
model_color_list = ['C0', 'C1', 'C2', 'C3']
model_conditions = ['finetune', 'nyt_from_scratch', 'bllip_from_scratch']
model_condition2dir_name = dict(zip(model_conditions, ['pretrain-finetune', 'nyt-lg', 'bllip-lg']))
for model_condition in model_conditions:
if model_condition == 'nyt_from_scratch':
# Load and visualize results for models trained from scratch on a subset of NYT
models = ['gibbscomplete-nyt-lg', 't5-nyt-lg', 'bart-nyt-lg', 'ilm-nyt-lg']
model2run_indice = {'gibbscomplete-nyt-lg':['0001', '0002', '0003'], 't5-nyt-lg':['0001', '0002', '0003'],
'bart-nyt-lg':['0001', '0002', '0003'], 'ilm-nyt-lg':['0001', '0002', '0003']}
elif model_condition == 'finetune':
# Load and visualize results for pretrained models finetuned on a subset of NYT 2007
models = ['gibbscomplete', 't5-finetune', 'bart-finetune', 'ilm']
if DO_RERANK == 'rerank':
model2run_indice = {'gibbscomplete':['0001', '0002', '0003'], 't5-finetune':['1001', '1002', '1003'],
'bart-finetune':['1001', '1002', '1003'], 'ilm':['1001', '1002', '1003']}
else:
model2run_indice = {'gibbscomplete':['0001', '0002', '0003'], 't5-finetune':['0001', '0002', '0003'],
'bart-finetune':['0001', '0002', '0003'], 'ilm':['0001', '0002', '0003']}
elif model_condition == 'bllip_from_scratch':
# Load and visualize results for models trained from scratch on BLLIP-lg
models = ['gibbscomplete-bllip-lg', 't5-bllip-lg', 'bart-bllip-lg', 'ilm-bllip-lg']
model2run_indice = {'gibbscomplete-bllip-lg':['0101', '0102', '0103'], 't5-bllip-lg':['0001', '0002', '0003'],
'bart-bllip-lg':['0001', '0002', '0003'], 'ilm-bllip-lg':['0001', '0002', '0003']}
model2name = dict(zip(models, model_name_list))
model2color = dict(zip(models, model_color_list))
rs = {}
for model in models:
rs[model] = {}
for run_index in model2run_indice[model]:
rs[model][run_index] = {}
for test_name in test_names:
rs[model][run_index][test_name] = {'acc':None, 'item_acc_list':[]}
if model.startswith('gibbscomplete'):
path = '{}/results/{}/{}_{}_eval_rs.txt'.format(DATA_DIR, model_condition2dir_name[model_condition], model, run_index)
else:
if DO_RERANK == 'rerank':
path = '{}/results/{}/{}_rerank_{}_eval_rs.txt'.format(DATA_DIR, model_condition2dir_name[model_condition], model, run_index)
else:
path = '{}/results/{}/{}_{}_eval_rs.txt'.format(DATA_DIR, model_condition2dir_name[model_condition], model, run_index)
lines = open(path).readlines()
lines = [line.strip().split() for line in lines]
for line in lines:
if len(line) < 1:
continue
test_name = line[0]
item_acc = float(line[2])
rs[model][run_index][test_name]['item_acc_list'].append(item_acc)
for test_name in test_names:
rs[model][run_index][test_name]['acc'] = np.mean(rs[model][run_index][test_name]['item_acc_list'])
plot_rs_by_test_suite_grid_5_by_6(rs, models, human_data, test_names, model2run_indice, model2color, model2name, savepath='{}/exp1_{}_{}_eval_grid_bar_5x6.pdf'.format(fig_dir, DO_RERANK, model_condition))
plot_rs_by_test_suite_grid_3_by_9(rs, models, human_data, test_names, model2run_indice, model2color, model2name, savepath='{}/exp1_{}_{}_eval_grid_bar_3x9.pdf'.format(fig_dir, DO_RERANK, model_condition))
# plot_aggregated_rs(rs, models, human_data, test_names, model2run_indice, model2color, model2name, savepath='{}/exp1_{}_{}_eval_bar_average_score.pdf'.format(fig_dir, model_condition, DO_RERANK))
exp_data_all[model_condition] = [rs, models, model2run_indice, model2name, model2color]
run_paired_t_tests(exp_data_all, model_conditions)
plot_summmary_across_model_conditions(exp_data_all, model_conditions, savepath='{}/exp1_{}_overall_summary.pdf'.format(fig_dir, DO_RERANK))
| pqian11/fragment-completion | analysis/exp1_analysis.py | exp1_analysis.py | py | 20,639 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "matplotlib.rcParams",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": ... |
33660433557 | import os
from flask import Flask, Response, request, current_app, url_for, send_from_directory
from fishapiv2.database.models import *
from flask_restful import Resource
from werkzeug.utils import secure_filename
from fishapiv2.resources.helper import *
from fishapiv2.resources.controller.authentication import *
import datetime
import json
from mongoengine import ObjectIdField
from flask_jwt_extended import jwt_required
from flask_jwt_extended import get_jwt_identity
from bson.objectid import ObjectId
class PondsApi(Resource):
@jwt_required()
# @token_req
def get(self):
try:
url = url_for('pondimageapidummy', _external=True)
current_user = get_jwt_identity()
farm = str(current_user['farm_id'])
farm_id = ObjectId(farm)
# farm = farm_id.objectId
pipeline = [
{"$match": {"farm_id": farm_id}},
{"$sort": {"status": 1,"alias": 1}},
{'$lookup': {
'from': 'pond_activation',
'let': {"pondid": "$_id"},
'pipeline': [
{'$match': {'$expr': {'$and': [
{'$eq': ['$pond_id', '$$pondid']},
]}}},
{"$sort": {"activated_at": -1}},
{'$lookup': {
'from': 'fish_log',
'let': {"pond_activation_id": "$_id"},
'pipeline': [
{'$match': {
'$expr': {'$and': [
{'$eq': ['$pond_activation_id',
'$$pond_activation_id']},
]}
}},
{"$project": {
"created_at": 0,
"updated_at": 0,
}},
{"$group": {
"_id": "$fish_type",
"fish_type": {"$first": "$fish_type"},
"fish_amount": {"$sum": "$fish_amount"}
}},
{"$sort": {"fish_type": -1}},
{"$project": {
"_id": 0,
}},
],
'as': 'fish_alive'
}},
{"$addFields": {
"activated_at": {'$dateToString': {
'format': "%d-%m-%Y", 'date': "$activated_at"}},
"deactivated_at": {'$dateToString': {
'format': "%d-%m-%Y", 'date': "$deactivated_at"}},
"total_fish_alive": {"$sum": "$fish_alive.fish_amount"}
}},
{"$project": {
"pond_id": 0,
"feed_type_id": 0,
"created_at": 0,
"updated_at": 0,
}},
],
'as': 'pond_activation_list'
}},
{"$addFields": {
"area": {"$cond": {
"if": {"$eq": ["$shape", "persegi"]},
"then": {"$multiply": ["$length", "$width"]},
"else": {"$divide": [
{"$multiply": [float(22), "$diameter", "$diameter"]},
28
]},
}},
"image_link":{"$concat": [url, "/", {"$toString": "$_id"}]}
}},
{"$addFields": {
"volume": {"$multiply": ["$area", "$height"]},
"last_activation": {"$first": "$pond_activation_list"},
"status": {
"$switch":
{
"branches": [
{
"case": {"$eq": ["$isActive", True]},
"then": "Aktif"
},
{
"case": {"$and": [
{"$eq": ["$isActive", False]},
{"$lt": [
{"$size": "$pond_activation_list"}, 1]}
]},
"then": "Tidak Aktif"
}
],
"default": "Panen"
}
},
}},
{"$addFields": {
"activation_date": "$last_activation.activated_at",
"fish_alive": "$last_activation.total_fish_alive",
}},
{"$project": {
"pond_id": 0,
"feed_type_id": 0,
"created_at": 0,
"updated_at": 0,
"pond_activation_list": 0,
"last_activation": 0,
}}
]
ponds = Pond.objects.aggregate(pipeline)
# token = request.headers['Authorization']
# token = str.replace(str(token), 'Bearer ', '')
# tokens = jwt.decode(token, current_app.config['SECRET_KEY'], algorithms=["HS256"])
# user = _ruleUserObj.getRuleUser(tokens["sub"]["username"])
# token = request.form.get('token')
# current_user = get_jwt_identity()
# user = json.dumps(current_user, default=str)
# usernow = jsonify(user)
# pondlist = Pond.objects.get(farm_id=current_user['farm_id'])
list_ponds = list(ponds)
# farm_id = list_ponds.alias
response = json.dumps(list_ponds, default=str)
# response = response[0].alias
return Response(response, mimetype="application/json", status=200)
except Exception as e:
response = {"message": str(e)}
response = json.dumps(response, default=str)
return Response(response, mimetype="application/json", status=400)
@jwt_required()
def post(self):
try:
current_user = get_jwt_identity()
farm = str(current_user['farm_id'])
shape = request.form.get("shape", None)
if shape == "bundar":
body = {
"farm_id": farm,
"alias": request.form.get("alias", None),
"location": request.form.get("location", None),
"shape": request.form.get("shape", None),
"material": request.form.get("material", None),
"status": 'Tidak Aktif',
"diameter": request.form.get("diameter", None),
"height": request.form.get("height", None),
"build_at": request.form.get("build_at", None),
}
else :
body = {
"farm_id": farm,
"alias": request.form.get("alias", None),
"location": request.form.get("location", None),
"shape": request.form.get("shape", None),
"material": request.form.get("material", None),
"length": request.form.get("length", None),
"width": request.form.get("width", None),
"status": 'Tidak Aktif',
"height": request.form.get("height", None),
"build_at": request.form.get("build_at", None),
}
pond = Pond(**body).save()
id = pond.id
response = {"message": "success add pond", "id": id}
response = json.dumps(response, default=str)
return Response(response, mimetype="application/json", status=200)
except Exception as e:
response = {"message": str(e)}
response = json.dumps(response, default=str)
return Response(response, mimetype="application/json", status=400)
class PondApi(Resource):
def put(self, id):
try:
body = request.form.to_dict(flat=True)
Pond.objects.get(id=id).update(**body)
response = {"message": "success change data pond", "id": id}
response = json.dumps(response, default=str)
return Response(response, mimetype="application/json", status=200)
except Exception as e:
response = {"message": str(e)}
response = json.dumps(response, default=str)
return Response(response, mimetype="application/json", status=400)
return
def delete(self, id):
try:
pond = Pond.objects.get(id=id).delete()
response = {"message": "success delete pond"}
response = json.dumps(response, default=str)
return Response(response, mimetype="application/json", status=200)
except Exception as e:
response = {"message": str(e)}
response = json.dumps(response, default=str)
return Response(response, mimetype="application/json", status=400)
def get(self, id):
try:
objects = Pond.objects.get(id=id)
pond = objects.to_mongo()
response_dump = json.dumps(pond, default=str)
return Response(response_dump, mimetype="application/json", status=200)
except Exception as e:
response = {"message": str(e)}
response = json.dumps(response, default=str)
return Response(response, mimetype="application/json", status=400)
class PondImageApiDummy(Resource):
def get(self):
pass
class PondImageApi(Resource):
def get(self, id):
# init object pond
objects = Pond.objects.get(id=id)
# convert to dict
pond = objects.to_mongo()
# dump dict to json string
path = os.path.join(current_app.instance_path,
current_app.config['UPLOAD_DIR'])
return send_from_directory(path, pond['image_name'])
def put(self, id):
try:
file = request.files['image']
if not file:
response = {"message": "no file selected"}
response = json.dumps(response, default=str)
return Response(response, mimetype="application/json", status=400)
if not allowed_file(file.filename):
response = {"message": "file type not allowed"}
response = json.dumps(response, default=str)
return Response(response, mimetype="application/json", status=400)
filename = secure_filename(file.filename)
filename = pad_timestamp(filename)
path = os.path.join(current_app.instance_path,
current_app.config['UPLOAD_DIR'])
try:
os.makedirs(path)
except OSError:
pass
filepath = os.path.join(path, filename)
file.save(filepath)
# database
objects = Pond.objects.get(id=id)
pond = objects.to_mongo()
old_image_name = pond["image_name"]
new_image_name = filename
if old_image_name != "default.jpg":
os.remove(os.path.join(path, old_image_name))
data = {
"image_name": new_image_name
}
objects.update(**data)
id = objects.id
response = {"message": "success change image", "id": id}
response = json.dumps(response, default=str)
return Response(response, mimetype="application/json", status=200)
except Exception as e:
response = {"message": str(e)}
response = json.dumps(response, default=str)
return Response(response, mimetype="application/json", status=400)
| MauL08/AquaBreedingAPI-V2 | fishapiv2/resources/controller/pond.py | pond.py | py | 12,401 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask_restful.Resource",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "flask.url_for",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "flask_jwt_extended.get_jwt_identity",
"line_number": 21,
"usage_type": "call"
},
{
"api_name... |
71257318823 | import math
from typing import List
import numpy as np
import torch
import torch.jit as jit
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from torch.nn import Parameter
from language_models.language_base_model import LanguageBaselightning
class RNNCell(jit.ScriptModule):
def __init__(self, input_size, hidden_size):
super(RNNCell, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
# Initialize the weights with random numbers.
self.weight_ih = Parameter(torch.randn(hidden_size, input_size))
self.weight_hh = Parameter(torch.randn(hidden_size, hidden_size))
self.bias_ih = Parameter(torch.randn(hidden_size)) # input to hidden
self.bias_hh = Parameter(torch.randn(hidden_size)) # hidden to hidden
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.hidden_size)
for weight in self.parameters():
weight.data.uniform_(-stdv, stdv)
@jit.script_method
def forward(self, input: Tensor, state: Tensor):
# input is the input at the current timestep
# state is the hidden state from the previous timestep
hx = state
hidden = (
torch.mm(input, self.weight_ih.t())
+ self.bias_ih
+ torch.mm(hx, self.weight_hh.t())
+ self.bias_hh
)
hy = torch.tanh(hidden)
return hy
class RNNLayer(jit.ScriptModule):
def __init__(self, cell, *cell_args):
super(RNNLayer, self).__init__()
self.cell = cell(*cell_args)
@jit.script_method
def forward(self, input: Tensor, state: Tensor):
inputs = input.unbind(1)
outputs = torch.jit.annotate(List[Tensor], [])
for i in range(len(inputs)):
state = self.cell(inputs[i], state)
outputs += [state]
return torch.stack(outputs, 1), state
class JitRNN_language_model(LanguageBaselightning):
def __init__(
self,
vocab_size: int,
embedding_size: int,
hidden_size: int,
padding_idx: int,
learning_rate: int = 0.001,
):
super(JitRNN_language_model, self).__init__()
self.vocab_size = vocab_size
self.hidden_size = hidden_size
# self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# self.padding_idx = torch.tensor(padding_idx).to(self.device)
self.padding_idx = torch.tensor(padding_idx)
self.learning_rate = learning_rate
self.embedding = nn.Embedding(
vocab_size, embedding_size, padding_idx=self.padding_idx
)
self.dense = nn.Linear(hidden_size, embedding_size)
self.rnn = RNNLayer(RNNCell, embedding_size, hidden_size)
self.output_layer = nn.Linear(embedding_size, vocab_size)
self.hidden = None
# tie the weights of the output embeddings with the input embeddings
self.output_layer.weight = self.embedding.weight
self.loss_func = nn.CrossEntropyLoss()
def forward(self, x, seq_length):
batch_size, seq_length = x.size()
# get embedding encoder
x = self.embedding(x)
# get output of rnn
self.hidden = torch.zeros(batch_size, self.hidden_size).type_as(x)
output, self.hidden = self.rnn(x, self.hidden)
out = self.dense(output)
out = self.output_layer(out)
return out.view(
batch_size, seq_length, self.vocab_size
) # Dimensions -> Batch x Sequence x Vocab
def reset_intermediate_vars(self):
self.hidden = None
def detach_intermediate_vars(self):
self.hidden = self.hidden.detach()
# class RNN(nn.Module):
# # you can also accept arguments in your model constructor
# # we don't use the output in this implemention
# def __init__(
# self,
# embed_size,
# hidden_size,
# ):
# super(RNN, self).__init__()
# self.hidden_size = hidden_size
# # input_size = embed_size + hidden_size
# self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# # self.i2h = nn.Linear(input_size, hidden_size)
# self.Wih = nn.Linear(embed_size, hidden_size)
# self.Whh = nn.Linear(hidden_size, hidden_size)
# # self.h2o = nn.Linear(input_size, output_size)
# def forward(self, data, last_hidden):
# wi = self.Wih(data)
# wh = self.Whh(last_hidden)
# hidden = torch.relu(wi + wh)
# # output = self.h2o(input)
# return hidden
# def initHidden(self, batch_size):
# # return torch.zeros(batch_size,self.hidden_size).to(self.device)
# return nn.init.kaiming_uniform_(torch.empty(batch_size, self.hidden_size)).to(
# self.device
# )
# class RNN_language_model(nn.Module):
# def __init__(
# self,
# vocab_size: int,
# embed_size: int,
# hidden_size: int,
# padding_idx: int,
# ):
# super(RNN_language_model, self).__init__()
# self.vocab_size = vocab_size
# self.hidden_size = hidden_size
# self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# self.padding_idx = torch.tensor(padding_idx).to(self.device)
# self.embedding = nn.Embedding(
# vocab_size, embed_size, padding_idx=self.padding_idx
# )
# self.dense = nn.Linear(hidden_size, embed_size)
# # note that output_size = vocab_size
# self.rnn_cell = RNN(
# embed_size,
# hidden_size,
# )
# self.output_layer = nn.Linear(embed_size, vocab_size)
# # tie the weights of the output embeddings with the input embeddings
# # self.output_layer.weight = self.embedding.weight
# self.loss_func = nn.CrossEntropyLoss()
# def forward(self, x, seq_length):
# batch_size, seq_length = x.size()
# # get embedding encoder
# x = self.embedding(x)
# # get output of rnn
# self.hidden = self.rnn_cell.initHidden(batch_size)
# hiddens = []
# # recurrent rnn
# for i in range(seq_length):
# hidden_next = self.rnn_cell(x[:, i, :], self.hidden)
# hiddens.append(hidden_next.unsqueeze(1))
# self.hidden = hidden_next
# hidden_tensor = torch.cat(hiddens, 1)
# out = hidden_tensor.contiguous().view(-1, self.hidden_size)
# out = self.dense(out)
# out = self.output_layer(out)
# return (
# out.view(batch_size, seq_length, self.vocab_size),
# self.hidden,
# ) # Dimensions -> Batch x Sequence x Vocab
# def loss(self, predictions, y, mask):
# predictions = predictions.view(-1, predictions.size(2))
# predictions *= torch.stack([mask] * predictions.size(1)).transpose(0, 1).float()
# return self.loss_func(predictions, y)
| shuishen112/TensorLanguageModel | language_models/lightRNN.py | lightRNN.py | py | 7,044 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.jit.ScriptModule",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "torch.jit",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "torch.nn.Parameter",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "torch.randn"... |
35257408476 | import gi
gi.require_version("Gtk", "3.0")
from gi.repository import Gtk,GdkPixbuf
from ui import login
import socket
import select
import json
import os
import redis
from ui import event
HOST = "127.0.0.1"
PORT = 5000
class ChatWindow(Gtk.Window):
def __init__(self):
super().__init__(title="Mega Chat | Chat")
event.Event(name="login", callback=self.regy_date)
self.login_win = login.LoginWindow()
self.login_win.show_all()
self.connection = None
self.__interfase()
def __interfase(self):
self.set_position(Gtk.WindowPosition.CENTER)
self.set_size_request(800, 600)
master_box=Gtk.Box()
master_box.set_spacing(5)
self.add(master_box)
left_box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
left_box.set_size_request(200, -1)
master_box.pack_start(left_box, False, True, 0)
separator = Gtk.VSeparator()
master_box.pack_start(separator, False, True, 0)
pixbuf = GdkPixbuf.Pixbuf.new_from_file_at_scale(
filename = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"Avatar.png"
),
width = 190,
height = 190,
preserve_aspect_ratio=True,
)
avatar = Gtk.Image.new_from_pixbuf(pixbuf)
left_box.pack_start(avatar, False, True, 5)
separator = Gtk.HSeparator()
left_box.pack_start(separator, False, True, 5)
user_label= Gtk.Label(label="User name")
left_box.pack_start(user_label, False, True, 5)
separator = Gtk.HSeparator()
left_box.pack_start(separator, False, True, 5)
l_space = Gtk.Alignment()
left_box.pack_start(l_space, True, True, 5)
separator = Gtk.HSeparator()
left_box.pack_start(separator, False, True, 0)
b_box = Gtk.ButtonBox()
left_box.pack_start(b_box, False, True, 5)
close_button = Gtk.Button(label="Close")
close_button.connect("clicked", Gtk.main_quit)
b_box.pack_start(close_button, True, True, 5)
center_box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
master_box.pack_start(center_box, True, True, 0)
separator = Gtk.VSeparator()
master_box.pack_start(separator, False, True, 0)
scroll_box = Gtk.ScrolledWindow()
scroll_box.set_policy(Gtk.PolicyType.NEVER, Gtk.PolicyType.AUTOMATIC)
center_box.pack_start(scroll_box, True, True, 5)
self.chat_box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
scroll_box.add(self.chat_box)
separator = Gtk.HSeparator()
center_box.pack_start(separator, False, False, 5)
send_box = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL)
send_box.set_spacing(5)
center_box.pack_start(send_box, False, True, 5)
separator = Gtk.HSeparator()
center_box.pack_start(separator, False, False, 5)
smile_buttom = Gtk.Button(label = ":-}")
send_box.pack_start(smile_buttom, False, False, 0)
message_entry = Gtk.Entry()
send_box.pack_start(message_entry, True, True, 0)
send_button = Gtk.Button(label = "Send")
send_box.pack_start(send_button, False, False, 0)
right_box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
right_box.set_size_request(200, 1)
master_box.pack_start(right_box, False, True, 0)
favorit_label = Gtk.Label(label="ะะทะฑัะฐะฝะฝะพะต")
right_box.pack_start(favorit_label, False, True, 5)
# test_input = {
# "message": (
# "ะะพะผะฟะธะปัฬัะธั โ ัะฑะพัะบะฐ ะฟัะพะณัะฐะผะผั, ะฒะบะปััะฐััะฐั ััะฐะฝัะปััะธั ะฒัะตั
ะผะพะดัะปะตะน ะฟัะพะณัะฐะผะผั, "
# "ะฝะฐะฟะธัะฐะฝะฝัั
ะฝะฐ ะพะดะฝะพะผ ะธะปะธ ะฝะตัะบะพะปัะบะธั
ะธัั
ะพะดะฝัั
ัะทัะบะฐั
ะฟัะพะณัะฐะผะผะธัะพะฒะฐะฝะธั ะฒััะพะบะพะณะพ "
# "ััะพะฒะฝั ะธ/ะธะปะธ ัะทัะบะต ะฐััะตะผะฑะปะตัะฐ, ะฒ ัะบะฒะธะฒะฐะปะตะฝัะฝัะต ะฟัะพะณัะฐะผะผะฝัะต ะผะพะดัะปะธ ะฝะฐ "
# "ะฝะธะทะบะพััะพะฒะฝะตะฒะพะผ ัะทัะบะต, ะฑะปะธะทะบะพะผ ะผะฐัะธะฝะฝะพะผั ะบะพะดั"
# ),
# "user": "Vasia"
# }
#
# test_output = {
# "message": (
# "ะะฝะธัะธะฐะปะธะทะฐัะธั โ ัะพะทะดะฐะฝะธะต, ะฐะบัะธะฒะฐัะธั, ะฟะพะดะณะพัะพะฒะบะฐ ะบ ัะฐะฑะพัะต, ะพะฟัะตะดะตะปะตะฝะธะต ะฟะฐัะฐะผะตััะพะฒ. " "ะัะธะฒะตะดะตะฝะธะต ะฟัะพะณัะฐะผะผั ะธะปะธ ััััะพะนััะฒะฐ ะฒ ัะพััะพัะฝะธะต ะณะพัะพะฒะฝะพััะธ ะบ ะธัะฟะพะปัะทะพะฒะฐะฝะธั. "
# ),
# "user": "User"
# }
# self.__add_message_box(test_input)
# self.__add_message_box(test_output, False)
# self.__add_message_box(test_input)
# self.__add_message_box(test_input)
# self.__add_message_box(test_output, False)
# self.__add_message_box(test_output, False)
# self.__add_message_box(test_input)
# self.__add_message_box(test_output, False)
def __add_message_box(self, data, input=True):
message_frame = Gtk.Frame()
message_box = Gtk.Box()
message_frame.add(message_box)
pixbuf = GdkPixbuf.Pixbuf.new_from_file_at_scale(
filename = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
f".contacts/{data['user']}.png" if input
else "Avatar.png"
),
width = 100,
height = 100,
preserve_aspect_ratio=True,
)
avatar = Gtk.Image.new_from_pixbuf(pixbuf)
text_label = Gtk.Label()
text_label.set_markup(data["message"])
text_label.set_selectable(True)
text_label.set_line_wrap(True)
if input:
message_box.pack_start(avatar, False, True, 5)
else:
text_label.set_justify(Gtk.Justification.RIGHT)
message_box.pack_end(avatar, False, True, 5)
message_box.pack_start(text_label, True, False, 5)
self.chat_box.pack_start(message_frame, False, True, 5)
def regy_date(self, *args, **kwargs):
self.login_win.hide()
storage = redis.StrictRedis() #ะฟะพะดะบะปััะฐะตะผัั ะบ ะผะตะผ ะบััั. ัััะปะบะฐ ะฝะฐ ะดะพัััะฟ ะบ ะฑะฐะทะต ะดะฐะฝะฝัั
try:
self.login_win = str(storage.get("login"))
self.password = str(storage.get("password"))
except:
redis.RedisError
print("ะะฐะฝะฝัั
ะฟะพัะตะผััะพ ะฝะตั")
Gtk.main_quit()
else:
self.__create_conntection()
self.show_all()
def __create_conntection(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# self.sock.setblocking(0)
self.sock.connect((HOST,PORT))
result = self.connection.recv(2048)
data = json.load(result.decode("utf-8")) #ะฟัะตะพะฑัะฐะทัะตะผ ัััะพะบั ะพะฑัะฐัะฝะพ ะฒ ะพะฑัะตะบั ะฟัะธ ะฟะพะผะพัะธ ะปะพะฐะด
if data.get("status") != "OK":
print(data.get("message"))
Gtk.main_quit()
else:
data = json.dumps({"login": self.login, "password": self.password})
self.connection.send(data.encode("utf-8"))
self.__run()
def __run(self):
pass
# self.epoll = select.epoll()
# self.epoll.register(self.sock.fileno(), select.EPOLLIN)
| Kiril0l/gtk_new | ui/chat.py | chat.py | py | 7,521 | python | ru | code | 0 | github-code | 36 | [
{
"api_name": "gi.require_version",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "gi.repository.Gtk.Window",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "ui.... |
12803276455 | import os, sys, io, math
class SequenceReader:
def __init__(self, file_path):
self.file_path = file_path
def set_file_path(self, file_path):
self.file_path = file_path
def get_file_path(self):
return self.file_path
def read_sequence(self):
with open(self.file_path) as f:
lines = f.read().strip().splitlines()
sequence = None
for line in lines:
if not (line.startswith(">") or line.startswith("#")):
sequence = line
break
elif line.startswith(">"):
sequence_descriptor = line
return sequence_descriptor, sequence
class Utils:
@staticmethod
def find_max_index(l):
max_index = 0
i = 1
while i < len(l):
if l[i] > l[max_index]:
max_index = i
i = i + 1
return max_index
@staticmethod
def content_to_dict(content):
l = [i.strip() for i in content.splitlines() if i.strip()]
return {key: value for key, value in [((int(i.split()[1])-1, int(i.split()[2]) - 1), i[0][0].replace("S", "E")) for i in l]}
@staticmethod
def count_for_confusion_matrix(truth_dict, prediction_dict, truth_key, prediction_key):
start = min(truth_dict.keys())
end = max(truth_dict.keys())
counter = 0
for i in range(start, end + 1):
if prediction_dict[i] == prediction_key and truth_dict[i] == truth_key:
counter += 1
return counter
@staticmethod
def count_individual_confusion_statistics(truth_dict, prediction_dict, key):
start = min(truth_dict.keys())
end = max(truth_dict.keys())
true_positive, true_negative, false_positive, false_negative = 0, 0, 0, 0
for i in range(start, end + 1):
if truth_dict[i] == key and prediction_dict[i] == key: true_positive += 1
if truth_dict[i] != key and prediction_dict[i] != key: true_negative += 1
if truth_dict[i] != key and prediction_dict[i] == key: false_positive += 1
if truth_dict[i] == key and prediction_dict[i] != key: false_negative += 1
return true_positive, true_negative, false_positive, false_negative
@staticmethod
def path_to_position_dict(path):
return {key: value for key, value in [(index, path[index]) for index in range(len(path))]}
@staticmethod
def generate_position_dict(d, length):
result = {}
sorted_keys = sorted(d)
i = 0
for interval in sorted_keys:
ll, ul = interval
if i < ll:
for y in range(i, ll):
result[y] = 'N'
for y in range(ll, ul + 1):
result[y] = d[interval]
i = ul + 1
if i < length:
for y in range(i, length):
result[y] = 'N'
return result
class ViterbiAlgorithm:
def __init__(self, hmm, sequence):
self.hmm = hmm
self.sequence = sequence
self.column_count = len(self.sequence)
self.states_list = self.hmm.get_states()
self.matrix = [[0 for j in range(len(sequence))] for i in range(len(self.states_list))]
self.arrow_map = {}
self.fill_in_the_matrix()
def fill_in_the_matrix(self):
j = 0
for i in range(len(self.states_list)):
state = self.states_list[i]
self.matrix[i][j] = self.hmm.tlp('START', state) + self.hmm.elp(state, self.sequence[j])
for j in range(1, self.column_count):
aa = self.sequence[j] # aa stands for amino_acid
for i in range(len(self.states_list)):
state = self.states_list[i]
self.matrix[i][j] = self.hmm.elp(state, aa)
list_to_look_for_max = []
for k in range(len(self.states_list)):
inner_state = self.states_list[k]
list_to_look_for_max.append(self.matrix[k][j - 1] + self.hmm.tlp(inner_state, state))
max_index = Utils.find_max_index(list_to_look_for_max)
self.arrow_map[(i, j)] = max_index
self.matrix[i][j] += list_to_look_for_max[max_index]
if j == self.column_count - 1: # if we are in the last column, take into account the end state probability
self.matrix[i][j] += self.hmm.tlp(state, 'END')
def construct_path(self):
self.path = ""
list_to_look_for_max = []
for i in range(len(self.states_list)):
list_to_look_for_max.append(self.matrix[i][self.column_count - 1])
max_index = Utils.find_max_index(list_to_look_for_max)
j = self.column_count - 1
i = max_index
log_probability = list_to_look_for_max[max_index]
while j > 0:
to_go = self.arrow_map[(i, j)]
self.path = self.states_list[i] + self.path
i = to_go
j -= 1
self.path = self.states_list[i] + self.path
return self.path, log_probability
class HMM:
def __init__(self, training_set_path):
self.load_training_set(training_set_path)
self.preprocess_training_set()
# X and the lowercase letters are for the letters found in the training set
self.amino_acid_alphabet = "ACDEFGHIKLMNPQRSTVWYXabcdegfhijklmnopqrutvw"
self.states = {'H': {key: 0 for key in self.amino_acid_alphabet},
'E': {key: 0 for key in self.amino_acid_alphabet},
'T': {key: 0 for key in self.amino_acid_alphabet}}
self.transitions = {}
for state_i in "HET":
for state_j in "HET":
self.transitions[(state_i, state_j)] = 0
for state in "HET":
self.transitions[("START", state)] = 0
for state in "HET":
self.transitions[(state, "END")] = 0
self.train()
def get_states(self):
return tuple("HET")
def tlp(self, from_state, to_state):
# tlp stands for transition_log_probability
return self.transitions[(from_state, to_state)]
def elp(self, state, amino_acid):
# elp stands for emission_log_probability
return self.states[state][amino_acid]
def load_training_set(self, training_set_path):
with open(training_set_path) as file:
training_set = file.read().strip().splitlines()
self.training_sequences = {}
index_list = [i for i in range(len(training_set)) if training_set[i].startswith(">")]
for index in index_list:
self.training_sequences[training_set[index].strip()] = (training_set[index + 1].strip(), training_set[index + 2].strip())
print(f"Loaded {len(self.training_sequences)} training samples.")
def preprocess_training_set(self):
print("Preprocessing training data...", end = ' ')
sys.stdout.flush()
for key, sequence_structure_tuple in self.training_sequences.items():
sequence, structure = sequence_structure_tuple
preprocessed_sequence_io = io.StringIO()
preprocessed_structure_io = io.StringIO()
for i in range(len(sequence)):
structure_char = structure[i]
sequence_char = sequence[i]
if structure_char != "_":
preprocessed_sequence_io.write(sequence_char)
if structure_char in ('G', 'H', 'I'):
preprocessed_structure_io.write('H')
elif structure_char in ('B', 'E'):
preprocessed_structure_io.write('E')
elif structure_char in ('T', 'S', 'L'):
preprocessed_structure_io.write('T')
self.training_sequences[key] = (preprocessed_sequence_io.getvalue(), preprocessed_structure_io.getvalue())
print("Done!")
def train(self):
print ("Training...", end = ' ')
sys.stdout.flush()
inner_transition_counts = {'H': 0, 'E': 0, 'T': 0}
start_transition_count = 0
for key, sequence_structure_tuple in self.training_sequences.items():
sequence, structure = sequence_structure_tuple
for index in range(len(sequence)):
sequence_char = sequence[index]
structure_char = structure[index]
if index == 0:
start_transition_count += 1
self.transitions[('START', structure_char)] += 1
else:
inner_transition_counts[structure[index - 1]] += 1
self.transitions[(structure[index - 1], structure_char)] += 1
if index == len(sequence) - 1:
inner_transition_counts[structure_char] += 1
self.transitions[(structure_char, 'END')] += 1
self.states[structure_char][sequence_char] += 1
for state, emissions in self.states.items():
summation = sum(emissions.values())
for amino_acid, count in emissions.items():
self.states[state][amino_acid] = math.log2((count + 1) / (summation + len(self.amino_acid_alphabet)))
for state_i in "HET":
for state_j in "HET":
self.transitions[(state_i, state_j)] = math.log2(self.transitions[(state_i, state_j)] / inner_transition_counts[state_i])
for state in "HET":
self.transitions[("START", state)] = math.log2(self.transitions[("START", state)] / start_transition_count)
for state in "HET":
self.transitions[(state, "END")] = math.log2(self.transitions[(state, "END")] / inner_transition_counts[state])
print("Done!")
class Main:
def __init__(self):
try:
training_set_path = sys.argv[1]
sequence_path = sys.argv[2]
except IndexError:
self.print_usage()
sys.exit()
truth_interval_dict = None
if len(sys.argv) > 3:
secondary_structure_path = sys.argv[3]
with open(secondary_structure_path) as f:
truth_interval_dict = Utils.content_to_dict(f.read().strip())
sequence_reader = SequenceReader(sequence_path)
header, sequence = sequence_reader.read_sequence()
self.hmm = HMM(training_set_path)
self.viterbi_algorithm = ViterbiAlgorithm(self.hmm, sequence)
path, log_probability = self.viterbi_algorithm.construct_path()
print("\nInput protein sequence:\n" + "-"*30 + "\n" + header + "\n" + sequence)
print("\nThe path predicted by HMM:\n" + "-"*30 + "\n" + path)
print("\nLog2 probability of this path:\n" + "-"*30 + "\n" + str(log_probability))
if truth_interval_dict:
truth_dict = Utils.generate_position_dict(truth_interval_dict, len(sequence))
prediction_dict = Utils.path_to_position_dict(path)
print("\n3x3 confusion matrix computations:")
print("True".ljust(10), "Predicted".ljust(10), "Count".ljust(10))
for key_i in "HET":
for key_j in "HET":
print (key_i.ljust(10), key_j.ljust(10), str(Utils.count_for_confusion_matrix(truth_dict, prediction_dict, key_i, key_j)).ljust(10))
print("Individual confusion matrix computations:")
for key in "HET":
print(f"Individual confusion matrix computations for {key}:")
print("TP".ljust(10), "TN".ljust(10), "FP".ljust(10), "FN".ljust(10))
tp, tn, fp, fn = Utils.count_individual_confusion_statistics(truth_dict, prediction_dict, key)
print(str(tp).ljust(10), str(tn).ljust(10), str(fp).ljust(10), str(fn).ljust(10))
def print_usage(self):
print(f"Usage: python3 {os.path.split(sys.argv[0])[-1]} <training_set_path> <sequence_path> <secondary_structure_path>")
if __name__ == "__main__":
main = Main()
| ender-s/HMM-Based-Secondary-Structure-Prediction | hmm_based_predictor.py | hmm_based_predictor.py | py | 12,161 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.stdout.flush",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 191,
"usage_type": "attribute"
},
{
"api_name": "io.StringIO",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "io.StringIO",
"li... |
13989585282 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from PyQt5.QtWidgets import QWidget, QApplication
from PyQt5.QtGui import QPainter, QColor
class MainWindow(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.setGeometry(300, 300, 350, 100)
self.setWindowTitle('Drawing rectangles')
self.show()
# ็ปๅพๆฏๅจpaintEvent()ๆนๆณไธญๅฎๆใ
# QPainter ๅฏน่ฑกๆพๅจbegin()ๆนๆณๅend()ๆนๆณไน้ด๏ผๅฎๆง่ก้จไปถไธ็ไฝๅฑๆฌก็็ป็ปๅๅ
ถไป็ปๅพ่ฎพๅคใ
# ๅฎ้
็็ป็ปๆไปฌๅงๆ็ปdrawText()ๆนๆณใ
def paintEvent(self, event):
painter = QPainter()
painter.begin(self)
self.drawRectangles(event, painter)
painter.end()
def drawRectangles(self, event, painter):
color = QColor(0, 0, 0)
color.setNamedColor('#d4d4d4')
painter.setPen(color)
painter.setBrush(QColor(200, 0, 0))
painter.drawRect(10, 15, 90, 60)
painter.setBrush(QColor(255, 80, 0, 160))
painter.drawRect(130, 15, 90, 60)
painter.setBrush(QColor(25, 0, 90, 200))
painter.drawRect(250, 15, 90, 60)
if __name__ == '__main__':
app = QApplication(sys.argv)
win = MainWindow()
sys.exit(app.exec_())
| shellever/Python3Learning | thirdparty/pyqt5/painting/drawrectangles.py | drawrectangles.py | py | 1,308 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "PyQt5.QtWidgets.QWidget",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtGui.QPainter",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtGui.QColor",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "PyQt5.... |
42244011138 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models, tools, _
from odoo import SUPERUSER_ID
import io
import csv
import base64
import ftplib
from odoo.tools import pycompat
import logging
_logger = logging.getLogger(__name__)
from odoo.exceptions import UserError, AccessError
from odoo.addons.website_mail.models.mail_message import MailMessage
from datetime import datetime, timedelta
from odoo.http import request
from odoo.exceptions import ValidationError
from odoo.addons.website_sale.models.sale_order import SaleOrder
def _cart_update(self, product_id=None, line_id=None, add_qty=0, set_qty=0, **kwargs):
""" Method monkey patched to handle multiple UoM from website """
self.ensure_one()
print (kwargs, 'In Override method \n\n\n\n\n\n\n')
product_context = dict(self.env.context)
product_context.setdefault('lang', self.sudo().partner_id.lang)
SaleOrderLineSudo = self.env['sale.order.line'].sudo().with_context(product_context)
# change lang to get correct name of attributes/values
product_with_context = self.env['product.product'].with_context(product_context)
product = product_with_context.browse(int(product_id))
try:
if add_qty:
add_qty = float(add_qty)
except ValueError:
add_qty = 1
try:
if set_qty:
set_qty = float(set_qty)
except ValueError:
set_qty = 0
quantity = 0
order_line = False
if self.state != 'draft':
request.session['sale_order_id'] = None
raise UserError(_('It is forbidden to modify a sales order which is not in draft status.'))
if line_id is not False:
order_line = self._cart_find_product_line(product_id, line_id, **kwargs)[:1]
# Create line if no line with product_id can be located
if not order_line:
if not product:
raise UserError(_("The given product does not exist therefore it cannot be added to cart."))
no_variant_attribute_values = kwargs.get('no_variant_attribute_values') or []
received_no_variant_values = product.env['product.template.attribute.value'].browse([int(ptav['value']) for ptav in no_variant_attribute_values])
received_combination = product.product_template_attribute_value_ids | received_no_variant_values
product_template = product.product_tmpl_id
# handle all cases where incorrect or incomplete data are received
combination = product_template._get_closest_possible_combination(received_combination)
# get or create (if dynamic) the correct variant
product = product_template._create_product_variant(combination)
if not product:
raise UserError(_("The given combination does not exist therefore it cannot be added to cart."))
product_id = product.id
values = self._website_product_id_change(self.id, product_id, qty=1)
# add no_variant attributes that were not received
for ptav in combination.filtered(lambda ptav: ptav.attribute_id.create_variant == 'no_variant' and ptav not in received_no_variant_values):
no_variant_attribute_values.append({
'value': ptav.id,
})
# save no_variant attributes values
if no_variant_attribute_values:
values['product_no_variant_attribute_value_ids'] = [
(6, 0, [int(attribute['value']) for attribute in no_variant_attribute_values])
]
# add is_custom attribute values that were not received
custom_values = kwargs.get('product_custom_attribute_values') or []
received_custom_values = product.env['product.template.attribute.value'].browse([int(ptav['custom_product_template_attribute_value_id']) for ptav in custom_values])
for ptav in combination.filtered(lambda ptav: ptav.is_custom and ptav not in received_custom_values):
custom_values.append({
'custom_product_template_attribute_value_id': ptav.id,
'custom_value': '',
})
# save is_custom attributes values
if custom_values:
values['product_custom_attribute_value_ids'] = [(0, 0, {
'custom_product_template_attribute_value_id': custom_value['custom_product_template_attribute_value_id'],
'custom_value': custom_value['custom_value']
}) for custom_value in custom_values]
# create the line
order_line = SaleOrderLineSudo.create(values)
if 'product_uom_id' in kwargs:
order_line.product_uom = int(kwargs['product_uom_id'])
order_line.product_uom_change()
try:
order_line._compute_tax_id()
except ValidationError as e:
# The validation may occur in backend (eg: taxcloud) but should fail silently in frontend
_logger.debug("ValidationError occurs during tax compute. %s" % (e))
if add_qty:
add_qty -= 1
# compute new quantity
if set_qty:
quantity = set_qty
elif add_qty is not None:
quantity = order_line.product_uom_qty + (add_qty or 0)
# Remove zero of negative lines
if quantity <= 0:
linked_line = order_line.linked_line_id
order_line.unlink()
if linked_line:
# update description of the parent
linked_product = product_with_context.browse(linked_line.product_id.id)
linked_line.name = linked_line.get_sale_order_line_multiline_description_sale(linked_product)
else:
# update line
no_variant_attributes_price_extra = [ptav.price_extra for ptav in order_line.product_no_variant_attribute_value_ids]
values = self.with_context(no_variant_attributes_price_extra=tuple(no_variant_attributes_price_extra))._website_product_id_change(self.id, product_id, qty=quantity)
if self.pricelist_id.discount_policy == 'with_discount' and not self.env.context.get('fixed_price'):
order = self.sudo().browse(self.id)
product_context.update({
'partner': order.partner_id,
'quantity': quantity,
'date': order.date_order,
'pricelist': order.pricelist_id.id,
'force_company': order.company_id.id,
})
product_with_context = self.env['product.product'].with_context(product_context)
product = product_with_context.browse(product_id)
values['price_unit'] = self.env['account.tax']._fix_tax_included_price_company(
order_line._get_display_price(product),
order_line.product_id.taxes_id,
order_line.tax_id,
self.company_id
)
if 'product_uom_id' in kwargs:
values.update({'product_uom': int(kwargs['product_uom_id'])})
else:
del values['product_uom']
order_line.write(values)
order_line.product_uom_change()
# link a product to the sales order
if kwargs.get('linked_line_id'):
linked_line = SaleOrderLineSudo.browse(kwargs['linked_line_id'])
order_line.write({
'linked_line_id': linked_line.id,
})
linked_product = product_with_context.browse(linked_line.product_id.id)
linked_line.name = linked_line.get_sale_order_line_multiline_description_sale(linked_product)
# Generate the description with everything. This is done after
# creating because the following related fields have to be set:
# - product_no_variant_attribute_value_ids
# - product_custom_attribute_value_ids
# - linked_line_id
order_line.name = order_line.get_sale_order_line_multiline_description_sale(product)
option_lines = self.order_line.filtered(lambda l: l.linked_line_id.id == order_line.id)
return {'line_id': order_line.id, 'quantity': quantity, 'option_ids': list(set(option_lines.ids))}
SaleOrder._cart_update = _cart_update
class ProductBrand(models.Model):
_name = "product.brand"
name = fields.Char("Brand")
class product(models.Model):
_inherit = 'product.template'
brand_id = fields.Many2many("product.brand", string="Brand")
extra_units = fields.Many2many('uom.uom', 'product_id', 'uom_id', 'prod_uom_rel', string="Extra Units")
def units_web(self):
product = self.env['product.template'].sudo().browse(self.id)
units = [product.uom_id]
for item in product.extra_units:
units.append(item)
return units
| eqilibruim-solutions/Theme-1 | clarico_ext/models/product_template.py | product_template.py | py | 7,913 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "odoo.http.request.session",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "odoo.http.request",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "od... |
30420538376 | from pyspark.sql import Window
import pyspark.sql.functions as f
from app import columns
class QueryManager:
def __init__(self, spark, trip_fare_df, trip_data_df):
self.spark = spark
self.trip_fare_df = trip_fare_df
self.trip_data_df = trip_data_df
def trips_count(self, date_column):
"""
Args:
date_column: desired date column in dataframe
Returns:
dataframe which has three columns
1. Vendor_ID
2. Day of Week
3. Count (count of trips)
"""
trip_df = self.trip_data_df.withColumn('dayofweek',
f.date_format(self.trip_data_df[date_column], 'EEEE'))
trips_by_week = (trip_df.filter(f.col(columns.vendor_id).isNotNull()).groupBy(columns.vendor_id, 'dayofweek').
count().orderBy(f.desc(columns.vendor_id), f.desc('count')).withColumn('max_trip_count',
f.max('count').over(
Window.partitionBy(
'vendor_id')))
.filter(f.col('count') == f.col('max_trip_count')).drop('max_trip_count'))
return trips_by_week
def total_revenue(self):
""" Calculates the total revenue of each vendor
Returns:
DataFrame: A DataFrame containing the total revenue for each vendor.
"""
dataframe = (self.trip_fare_df.filter(f.col(columns.vendor_id).isNotNull()).groupBy(columns.vendor_id)
.agg(f.format_number(f.sum(columns.total_amount), 2).alias('total revenue')))
return dataframe
def avg_trip_distance(self):
"""
Calculates the average trip distance for different numbers of passengers.
Returns:
DataFrame: A DataFrame containing the average trip distance for each combination of vendor
and passenger count.
"""
dataframe = (self.trip_data_df.filter(f.col(columns.passenger_count).
isNotNull()).groupBy(columns.vendor_id, columns.passenger_count).
agg(f.avg(columns.trip_distance)).orderBy(f.desc(columns.passenger_count)))
return dataframe
def simultaneous_trips(self):
"""
Calculates the maximum number of simultaneous trips that happened on the same day.
Returns:
DataFrame: A DataFrame containing the maximum number of simultaneous trips for the top 5 days.
"""
pickup_dataframe = (self.trip_data_df.filter(f.col(columns.pickup_datetime).isNotNull()).
select(f.col(columns.pickup_datetime).alias('event_time'),
f.lit(1).alias('event_count')))
dropoff_dateframe = (self.trip_data_df.filter(f.col(columns.dropoff_datetime).isNotNull()).
select(f.col(columns.dropoff_datetime).alias('event_time'),
f.lit(-1).alias('event_count')))
event_dateframe = pickup_dataframe.union(dropoff_dateframe)
dataframe = event_dateframe.withColumn('sum', f.sum('event_count').over(Window.partitionBy('event_time')
.orderBy(f.asc('event_time'))))
dataframe = dataframe.groupBy(f.date_format('event_time', 'yyyy-MM-dd').alias('day')
).agg(f.max('sum').alias('simultaneous_trips')).orderBy(
f.desc(f.col('simultaneous_trips'))).limit(5)
return dataframe
def most_expensive_trips(self):
"""
Calculates the most expensive trips for each vendor.
Returns:
DataFrame: A DataFrame containing the most expensive trips for each vendor.
"""
dataframe = (self.trip_fare_df.filter(f.col(columns.vendor_id).isNotNull())
.groupBy(columns.vendor_id).agg(f.max(columns.total_amount).
alias(columns.total_amount)))
return dataframe
def avg_amount_rate_code(self):
"""
Calculates the count of trips with a tip above the average tip amount for trips with different rate codes.
Returns:
DataFrame: A DataFrame containing the count of such trips for each rate code.
"""
dataframe = self.trip_fare_df.join(self.trip_data_df, ['medallion', 'hack_license', 'vendor_id',
'pickup_datetime'], 'inner')
average_tip_amounts = dataframe.groupBy(columns.rate_code).agg(f.avg(columns.tip_amount)
.alias('avg_tip_amount'))
joined_data = dataframe.join(average_tip_amounts, [columns.rate_code], 'inner')
dataframe = joined_data.withColumn('tip_above_avg', f.col('tip_amount') > f.col('avg_tip_amount'))
dataframe = (dataframe.groupBy(columns.rate_code).count().withColumnRenamed('count', 'trip_count').
orderBy(f.desc('trip_count')))
return dataframe
def tips_count(self):
""" Identifies the specific day of the week when each vendor tends to receive the highest amount of tips.
Returns:
DataFrame: A DataFrame containing the day of the week and the corresponding highest amount of tips received
for each vendor.
"""
window_spec = Window.partitionBy(columns.vendor_id).orderBy(f.col("total_tips").desc())
dataframe = (self.trip_fare_df.withColumn("day_of_week", f.date_format(columns.pickup_datetime, 'EEEE'))
.groupBy(columns.vendor_id, "day_of_week")
.agg(f.format_number(f.sum(columns.tip_amount), 2).alias("total_tips"))
.withColumn("rank", f.row_number().over(window_spec))
.filter(f.col("rank") == 1)
.select(columns.vendor_id, "day_of_week", "total_tips"))
return dataframe
def avg_fare_amount_payment(self):
""" Calculates the average fare amount for each payment type.
Returns:
DataFrame: A DataFrame containing the average fare amount for each payment type.
"""
dataframe = (self.trip_fare_df.groupBy(columns.payment_type)
.agg(f.format_number(f.avg(columns.fare_amount), 2).alias("average_fare_amount"))
.orderBy(f.desc("average_fare_amount")))
return dataframe
def top_vendor_drivers(self):
""" Identifies the top 10 drivers for each vendor based on average trip distance and total tip amount.
Returns:
DataFrame: A DataFrame containing the vendor ID, unique driver license, average mileage covered, total tip
amount received and the corresponding rank.
"""
joined_df = (self.trip_data_df.withColumnRenamed(columns.vendor_id, "vendor")
.join(self.trip_fare_df, [columns.hack_license, columns.pickup_datetime],
'inner'))
window_spec = Window.partitionBy("vendor").orderBy(f.desc("average mileage"), f.desc("total tip amount"))
dataframe = (joined_df.groupBy(["vendor", columns.hack_license])
.agg(f.format_number(f.avg(columns.trip_distance), 2).alias('average mileage'),
f.format_number(f.sum(columns.tip_amount), 2).alias('total tip amount'))
.withColumn("rank", f.rank().over(window_spec))
.filter(f.col("rank") <= 10))
return dataframe
def percentage_long_trips(self):
""" Calculates the percentage of trips with a duration greater than 30 minutes for each vendor.
Returns:
DataFrame: A DataFrame containing the vendor ID, total trips executed for each vendor, amount of trips whose
duration greater than 30 minutes and percentage of these trips.
"""
dataframe = (self.trip_data_df.filter(f.col(columns.vendor_id) != 'None')
.groupBy(columns.vendor_id)
.agg(f.count("*").alias("total_trips"),
f.count(f.when(f.col(columns.trip_time_in_secs) > 1800, True))
.alias("long_trips"))
.withColumn("percentage_long_trips",
f.format_number((f.col("long_trips") /
f.col("total_trips")) * 100, 2)))
return dataframe
def top_tips_in_cash(self):
""" Calculates top 5 biggest tips for each vendor if the user paid in cash.
Returns:
DataFrame: A DataFrame containing the vendor ID and top 5 largest tips paid in cash for each vendor.
"""
window_spec = Window.partitionBy(columns.vendor_id).orderBy(f.desc(columns.tip_amount))
dataframe = (self.trip_fare_df.filter(f.col(columns.payment_type) == "CSH")
.withColumn("rank", f.dense_rank().over(window_spec))
.filter(f.col("rank") <= 5).select(columns.vendor_id, columns.tip_amount, "rank"))
return dataframe
def trips_weekdays_weekend(self):
""" Calculates the number of trips occurred on weekend and weekdays for each vendor.
Returns:
DataFrame: A DataFrame containing the number of trips executed on weekdays and weekends for each vendor.
"""
weekdays = [2, 3, 4, 5, 6]
dataframe = self.trip_fare_df.withColumn("day_of_week", f.dayofweek(f.col(columns.pickup_datetime)))
dataframe = (dataframe.withColumn("day_type", f.when(f.col("day_of_week")
.isin(weekdays), "weekday").otherwise("weekend"))
.groupBy(columns.vendor_id, "day_type")
.count()
.orderBy(columns.vendor_id, "day_type"))
return dataframe
def trips_with_tip_mount_greater_than_fare_amount(self):
""" Data of trips with tips amount greater than the fare amount.
Returns:
dataframe with columns:
medallion, hack_license, vendor_id, pickup_datetime, payment_type, fare_amount, tip_amount.
"""
result_columns_names = [columns.medallion, columns.hack_license, columns.vendor_id, columns.pickup_datetime,
columns.payment_type, columns.fare_amount, columns.tip_amount]
trips_with_tip_mount_greater_than_fare_amount = (
self.trip_fare_df.filter(f.col(columns.fare_amount) < f.col(columns.tip_amount))
.select(*result_columns_names)
)
return trips_with_tip_mount_greater_than_fare_amount
def total_earnings_of_each_vendor_for_first_seven_days_of_january(self):
""" Sum of earning of each vendor for trips that started on each of the first seven days of January 2013.
Returns:
dataframe with columns:
vendor_id, date(in format yyyy-MM-dd), total_earnings.
"""
column_date = 'date'
column_total_earnings = 'total_earnings'
start_date_string = '2012-12-31 23:59:59.59'
end_date_string = '2013-01-07 23:59:59.59'
total_earnings_of_each_vendor_for_first_seven_days_of_january = (
self.trip_fare_df
.withColumn(column_date, f.date_format(self.trip_fare_df[columns.pickup_datetime], 'yyyy-MM-dd'))
.filter(f.col(column_date).between(start_date_string, end_date_string))
.orderBy(columns.vendor_id, column_date)
.groupBy(columns.vendor_id, column_date)
.agg(f.sum(columns.total_amount).alias(column_total_earnings))
)
return total_earnings_of_each_vendor_for_first_seven_days_of_january
def driver_of_each_day(self):
""" Driver who received the biggest amount of tips for each day
(tips are considered received when the trip is over).
Returns:
dataframe with columns:
date, hack_licence, vendor_id, tips_sum.
"""
column_date = 'date'
column_tips_sum = 'tips_sum'
column_max_tips_sum = 'max_tips_sum'
join_column_names = [columns.vendor_id, columns.medallion, columns.hack_license, columns.pickup_datetime]
joined_df = self.trip_fare_df.join(self.trip_data_df, join_column_names, 'inner')
drivers = (
joined_df.withColumn('date', f.date_format(joined_df[columns.dropoff_datetime], 'yyyy-MM-dd'))
.groupBy(columns.vendor_id, columns.hack_license, column_date)
.agg(f.sum(columns.tip_amount).alias(column_tips_sum))
.orderBy(column_date, f.desc(column_tips_sum))
.withColumn(column_max_tips_sum, f.max(f.col(column_tips_sum))
.over(Window.partitionBy(column_date)).alias(column_max_tips_sum))
.filter(f.col(column_max_tips_sum) == f.col(column_tips_sum))
.select(column_date, columns.hack_license, columns.vendor_id, column_tips_sum)
)
return drivers
def price_per_second_of_drive_for_each_vendor(self):
""" Average price per second of drive for each vendor.
Returns:
dataframe with columns:
vendor_id, average_fare_per_second
"""
column_sum_fare_amount = 'sum_fare_amount'
column_sum_trip_time_in_secs = 'sum_trip_time_in_secs'
column_average_fare_per_second = 'average_fare_per_second'
join_column_names = [columns.vendor_id, columns.medallion, columns.hack_license, columns.pickup_datetime]
joined_df = self.trip_fare_df.join(self.trip_data_df, join_column_names, 'inner')
price_per_second_of_drive_for_each_vendor = (
joined_df.groupBy('vendor_id')
.agg(f.sum(columns.fare_amount).alias(column_sum_fare_amount),
f.sum(columns.trip_time_in_secs).alias(column_sum_trip_time_in_secs))
.withColumn(column_average_fare_per_second,
f.col(column_sum_fare_amount) / f.col(column_sum_trip_time_in_secs))
.select(columns.vendor_id, column_average_fare_per_second)
)
return price_per_second_of_drive_for_each_vendor
def top_vendor_for_each_payment_type(self):
""" Vendor who received the biggest amount of money for each payment type.
Returns:
dataframe with columns:
payment_type, vendor_id, sum_total_amount.
"""
column_sum_total_amount = 'sum_total_amount'
column_max_for_payment_type = 'max_for_payment_type'
top_vendor_for_each_payment_type = (
self.trip_fare_df.groupBy(columns.vendor_id, columns.payment_type)
.agg(f.sum(columns.total_amount).alias(column_sum_total_amount))
.orderBy(columns.payment_type, f.desc(column_sum_total_amount))
.withColumn(column_max_for_payment_type,
f.max(f.col(column_sum_total_amount))
.over(Window.partitionBy(columns.payment_type)))
.filter(f.col(column_sum_total_amount) == f.col(column_max_for_payment_type))
.select(columns.payment_type, columns.vendor_id, column_sum_total_amount)
)
return top_vendor_for_each_payment_type
def top_five_drivers_with_greatest_sum_of_time_in_trip(self):
""" Top 5 drivers with greatest sum of time spent in trips.
Returns:
dataframe with columns:
vendor_id, hack_license, sum_trip_time_in_secs
"""
column_sum_trip_time_in_secs = 'sum_trip_time_in_secs'
top_five_drivers_with_greatest_sum_of_time_in_trip = (
self.trip_data_df.groupBy(columns.vendor_id, columns.hack_license)
.agg(f.sum(f.col(columns.trip_time_in_secs)).alias(column_sum_trip_time_in_secs))
.orderBy(f.desc(column_sum_trip_time_in_secs))
).limit(5)
return top_five_drivers_with_greatest_sum_of_time_in_trip
def most_popular_payment_type(self):
"""
Calculates the most popular payment type.
Returns:
DataFrame: A DataFrame containing only one row with the most popular payment type.
"""
dataframe = (
self.trip_fare_df.groupBy(columns.payment_type)
.count()
.orderBy('count', ascending=False)
.limit(1)
)
return dataframe
def highest_fare_amount(self):
"""
Calculates the highest fare when vendor is VTS.
Returns:
DataFrame: A DataFrame containing only one row with the highest fare amount for VTS.
"""
dataframe = (
self.trip_fare_df.filter(f.col(columns.vendor_id) == 'VTS')
.orderBy(columns.fare_amount, ascending=False)
.limit(1)
)
return dataframe
def top_total_amount(self):
"""
Calculates the top 10 total_amount values for drivers when passengers count > 5.
Returns:
DataFrame: A DataFrame containing 10 rows with biggest total_amount values for drivers
when passengers count > 5.
"""
dataframe = (
self.trip_fare_df.join(self.trip_data_df, [columns.medallion, columns.hack_license,
columns.pickup_datetime], 'inner')
.filter(f.col(columns.passenger_count) > 5)
.groupBy(columns.medallion, columns.hack_license, columns.passenger_count)
.agg(f.max(columns.total_amount))
.orderBy(f.col(f'max({columns.total_amount})'), ascending=False)
.limit(10)
)
return dataframe
def total_revenue_per_day(self):
"""
Calculates the total revenue for each day of the week, categorized by payment type.
Returns:
DataFrame: A DataFrame with columns: 'pickup_datetime', 'payment_type', 'total_amount',
and 'total_revenue_per_day'.
"""
dataframe = self.trip_fare_df.withColumn('day_num', f.dayofweek(columns.pickup_datetime))
window_spec = (
Window.partitionBy(
f.col('day_num'),
f.col(columns.payment_type)
).orderBy(f.col('day_num'))
)
dataframe = dataframe.withColumn('total_revenue_per_day', f.sum(f.col(columns.total_amount)).over(window_spec))
return dataframe
def tip_percentage(self):
"""
Calculates percentage of tip to total_amount if payment type not cash.
Returns:
DataFrame: A DataFrame with new column tips_percentages and only rides which were paid not in cash.
"""
window_spec = Window.partitionBy(columns.medallion, columns.hack_license, columns.pickup_datetime)
dataframe = self.trip_fare_df.filter(f.col(columns.payment_type) != 'CSH')
dataframe = dataframe.withColumn('tips_percetages',
(f.sum(columns.tip_amount).over(window_spec) /
f.sum(columns.total_amount).over(window_spec)) * 100)
return dataframe
def avg_trip_duration(self):
"""
Calculates the average trip duration for different rate codes.
Returns:
DataFrame: A DataFrame grouped by rate codes and found avg trip duration time for them
"""
dataframe = (
self.trip_data_df
.filter(f.col(columns.rate_code).isNotNull())
.groupBy(columns.rate_code)
.agg(
f.avg(columns.trip_time_in_secs)
.alias('avg_trip_duration')
).orderBy(f.asc(columns.rate_code))
)
return dataframe
| andriisydor/big_data_2023 | app/QueryManager.py | QueryManager.py | py | 20,166 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pyspark.sql.functions.date_format",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.functions",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "pyspark.sql.functions.col",
"line_number": 24,
"usage_type": "call"
},
{
... |
2894217699 | from typing import Dict, Callable
from src.dialog.common.manage_entity.ManageEntityDialogMode import ManageEntityDialogMode
from src.property.Property import Property
from src.session.common.Session import Session
from src.storage.common.entity.Entity import Entity
from src.storage.common.entity.EntityStorage import EntityStorage
class ManageEntityContainerSaver:
def __init__(
self,
session: Session,
storage: EntityStorage,
close_dialog: Callable[[], None],
show_error: Callable[[str], None]
):
self.__session = session
self.__storage = storage
self.__close_dialog = close_dialog
self.__show_error = show_error
def save_entity(self, key: str, props: Dict[str, Property]):
if self.__session.get_manage_entity_mode() == ManageEntityDialogMode.CREATE:
self.handle_new_entity(key, props)
elif self.__session.get_manage_entity_mode() == ManageEntityDialogMode.EDIT and \
self.__session.get_edit_entity_key() != key:
self.handle_edit_entity_key_changed(key, props)
elif self.__session.get_manage_entity_mode() == ManageEntityDialogMode.EDIT and \
self.__session.get_edit_entity_key() == key:
self.handle_edit_entity_key_unchanged(key, props)
def handle_new_entity(self, key: str, props: Dict[str, Property]):
if not self.__storage.check_entity_exists(key):
self.put_entity_close_dialog(key, props)
else:
self.report_entity_exists(key)
def handle_edit_entity_key_changed(self, key: str, props: Dict[str, Property]):
if not self.__storage.check_entity_exists(key):
self.remove_session_entity()
self.put_entity_close_dialog(key, props)
else:
self.report_entity_exists(key)
def handle_edit_entity_key_unchanged(self, key: str, props: Dict[str, Property]):
self.put_entity_close_dialog(key, props)
def put_entity_close_dialog(self, key: str, props: Dict[str, Property]):
self.__storage.put_entity(
Entity(key, props)
)
self.__close_dialog()
def remove_session_entity(self):
self.__storage.remove_entity(self.__session.get_edit_entity_key())
def report_entity_exists(self, key: str):
self.__show_error("ะะตะปะพ ะพะฑ ะะ ั ะฝะพะผะตัะพะผ" + key + " ัะถะต ัััะตััะฒัะตั")
| andreyzaytsev21/MasterDAPv2 | src/dialog/common/manage_entity/ManageEntityContainerSaver.py | ManageEntityContainerSaver.py | py | 2,456 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "src.session.common.Session.Session",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "src.storage.common.entity.EntityStorage.EntityStorage",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "typing.Callable",
"line_number": 16,
"usage_type... |
5491253992 | import torch
import torch.nn.functional as F
import constants
import numpy as np
def gauss1D(window_size, sigma):
center = window_size // 2
gauss = torch.Tensor([np.exp(-(x - center)**2 / (2*(sigma**2))) for x in range(window_size)])
gauss = gauss/gauss.sum()
return gauss
def create_window(window_size, sigma, channels: int = 3):
window1d = gauss1D(window_size, sigma).unsqueeze(1)
window2d = torch.mm(window1d, window1d.t())
window2d = window2d.repeat(channels, 1, 1, 1)
return window2d
def rgb_to_ycbcr(image: torch.Tensor, only_use_y_channel: bool = True) -> torch.Tensor:
"""Convert RGB Image to YCbCr Image
Args:
- image (Tensor): Tensor image shape (B, 3, H, W)
- only_use_y_channel (bool): whether or not extract image with only Y channel.
Returns:
- Tensor image: shape (B, 1, H, W) if only_use_y_channel is True and (B, 3, H, W) the other way.
"""
if not isinstance(image, torch.Tensor) or image.size(-3) != 3:
raise ValueError("Invalid format of image, should be Tensor(B, 3, H, W)")
image = image.to(constants.DEVICE)
if only_use_y_channel:
weight = torch.tensor([[65.481], [128.533], [24.966]]).to(constants.DEVICE)
image = torch.matmul(image.permute(0, 2, 3, 1), weight).permute(0, 3, 1, 2) + 16.0
else:
weight = torch.tensor([[65.481, -37.797, 112.0],
[128.553, -74.203, -93.786],
[24.966, 112.0, -18.214]]).to(constants.DEVICE)
bias = torch.tensor([16, 128, 128]).view(1, 3, 1, 1).to(constants.DEVICE)
image = torch.matmul(image.permute(0, 2, 3, 1), weight).permute(0, 3, 1, 2) + bias
image /= 255.
return image
def _ssim(img1: torch.Tensor, img2: torch.Tensor, window_size: int, sigma: float, channels: int, batch_average: bool = True) -> torch.Tensor:
"""Caculate SSIM of 2 images.
Returns:
- Tensor: value of SSIM, which is (B,) if batch_average is not True and scalar if True.
"""
# to device
window = create_window(window_size, sigma, channels).to(constants.DEVICE)
img1 = img1.to(constants.DEVICE)
img2 = img2.to(constants.DEVICE)
c1 = (0.01 * constants.PIXEL_VALUE_RANGE)**2
c2 = (0.03 * constants.PIXEL_VALUE_RANGE)**2
mu1 = F.conv2d(img1, window, padding=window_size//2, groups=channels)
mu2 = F.conv2d(img2, window, padding=window_size//2, groups=channels)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1*mu2
sigma1_sq = F.conv2d(img1*img1, window, padding = window_size//2, groups = channels) - mu1_sq
sigma2_sq = F.conv2d(img2*img2, window, padding = window_size//2, groups = channels) - mu2_sq
sigma12 = F.conv2d(img1*img2, window, padding = window_size//2, groups = channels) - mu1_mu2
ssim_map = ((2*mu1_mu2 + c1)*(2*sigma12 + c2))/((mu1_sq + mu2_sq + c1)*(sigma1_sq + sigma2_sq + c2))
if batch_average:
return ssim_map.mean()
else:
return ssim_map.mean(dim=(1,2,3))
class Metrics():
def __init__(
self,
extract_y_channel: bool = True) -> None:
""" Caculate PSNR and SSIM metrics.
- extract_y_channel: whether or not extract y channel in YCrCb format
then PSNR and SSIM will be computed on only y channel images.
"""
self.extract_y_channel = extract_y_channel
def extractYchannel(self):
self.lowres = rgb_to_ycbcr(self.lowres)
self.highres = rgb_to_ycbcr(self.highres)
def psnr(self, img1: torch.Tensor, img2: torch.Tensor):
""""""
img1 = img1.to(constants.DEVICE)
img2 = img2.to(constants.DEVICE)
rmse = torch.sqrt(F.mse_loss(img1, img2))
psnr = 20 * torch.log10(constants.PIXEL_VALUE_RANGE/ (rmse + 1e-10))
return psnr
def ssim(self, img1: torch.Tensor, img2: torch.Tensor):
""""""
return _ssim(img1, img2, window_size=11, sigma=0.15, channels=img1.size(-3))
| daoduyhungkaistgit/SRGAN | src/metrics.py | metrics.py | py | 4,011 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "torch.Tensor",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "torch.mm",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "torch.Tensor",
"line_number": 18,
... |
27359625037 | #!/usr/bin/python3
import os
import requests
my_ip_file = os.path.join("/tmp", "myIp.txt")
def myIp():
return requests.get("https://gianlu.dev/ip").text.strip()
def writToFile(filename, content):
fp = open(filename, "wt", encoding="utf8")
fp.write(content)
fp.close()
def readFile(filename):
fp = open(filename, "rt", encoding="utf8")
content = fp.read().strip()
fp.close()
return content
if not os.path.exists(my_ip_file):
writToFile(my_ip_file, "")
current_ip = myIp()
if current_ip != readFile(my_ip_file):
print(current_ip, readFile(my_ip_file))
writToFile(my_ip_file, current_ip)
import Client
Client.mainFunc()
| GianluDeveloper/OpenRemotePort | CronKeeper.py | CronKeeper.py | py | 681 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.join",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "requests.get",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_numbe... |
3746051837 | # Standard Library
import json
import logging
import urllib.parse
# Third Party
from fastapi import APIRouter, Depends, HTTPException, Query, status
from fastapi_cache.decorator import cache
# First Party
from resc_backend.constants import (
CACHE_NAMESPACE_FINDING,
DEFAULT_RECORDS_PER_PAGE_LIMIT,
ERROR_MESSAGE_500,
ERROR_MESSAGE_503,
FINDINGS_TAG,
REDIS_CACHE_EXPIRE,
RWS_ROUTE_DETAILED_FINDINGS
)
from resc_backend.db.connection import Session
from resc_backend.resc_web_service.crud import detailed_finding as detailed_finding_crud
from resc_backend.resc_web_service.dependencies import get_db_connection
from resc_backend.resc_web_service.filters import FindingsFilter
from resc_backend.resc_web_service.helpers.resc_swagger_models import Model404
from resc_backend.resc_web_service.schema import detailed_finding as detailed_finding_schema
from resc_backend.resc_web_service.schema.pagination_model import PaginationModel
router = APIRouter(prefix=f"{RWS_ROUTE_DETAILED_FINDINGS}", tags=[FINDINGS_TAG])
logger = logging.getLogger(__name__)
@router.get("",
response_model=PaginationModel[detailed_finding_schema.DetailedFindingRead],
summary="Get all detailed findings",
status_code=status.HTTP_200_OK,
responses={
200: {"description": "Retrieve all the findings"},
500: {"description": ERROR_MESSAGE_500},
503: {"description": ERROR_MESSAGE_503}
})
@cache(namespace=CACHE_NAMESPACE_FINDING, expire=REDIS_CACHE_EXPIRE)
def get_all_detailed_findings(skip: int = Query(default=0, ge=0),
limit: int = Query(default=DEFAULT_RECORDS_PER_PAGE_LIMIT, ge=1),
db_connection: Session = Depends(get_db_connection),
query_string: str = None
) \
-> PaginationModel[detailed_finding_schema.DetailedFindingRead]:
"""
Retrieve all findings objects paginated
- **query_string**
A query string with the following format:
param1=value1¶m2=value2¶m3=value3
Where the possible parameters are:
- vcs_providers [enum] of type VCSProviders, possible values are: BITBUCKET, AZURE_DEVOPS.
Will default to all if non-specified.
- finding_statuses [enum of type FindingStatus], possible values are:NOT_ANALYZED,FALSE_POSITIVE,
TRUE_POSITIVE. Will default to all if non-specified.
- rule_pack_versions of type [String]
- rule_names of type [String]
- rule_tags of type [String] findings in the result will have at least one of the specified tags
for the rules
- project_name of type String
- repository_names of type [String]
- scan_ids of type list Integer
- start_date_time of type datetime with the following format: 1970-01-31T00:00:00
- end_date_time of type datetime with the following format: 1970-01-31T00:00:00
- **db_connection**
Session of the database connection
- **skip**
Integer amount of records to skip to support pagination
- **limit**
Integer amount of records to return, to support pagination
- **return** [FindingRead]
The output will contain a PaginationModel containing the list of DetailedFinding type objects,
or an empty list if no finding was found
"""
parsed_query_string_params = dict(urllib.parse.parse_qsl(query_string))
if parsed_query_string_params.get('scan_ids'):
parsed_query_string_params['scan_ids'] = json.loads(parsed_query_string_params['scan_ids'])
if parsed_query_string_params.get('vcs_providers'):
parsed_query_string_params['vcs_providers'] = json.loads(parsed_query_string_params['vcs_providers']
.replace('\'', '"'))
if parsed_query_string_params.get('finding_statuses'):
parsed_query_string_params['finding_statuses'] = json.loads(parsed_query_string_params['finding_statuses']
.replace('\'', '"'))
if parsed_query_string_params.get('rule_names'):
parsed_query_string_params['rule_names'] = json.loads(parsed_query_string_params['rule_names']
.replace('\'', '"'))
if parsed_query_string_params.get('rule_tags'):
parsed_query_string_params['rule_tags'] = json.loads(parsed_query_string_params['rule_tags']
.replace('\'', '"'))
if parsed_query_string_params.get('rule_pack_versions'):
parsed_query_string_params['rule_pack_versions'] = json.loads(parsed_query_string_params['rule_pack_versions']
.replace('\'', '"'))
findings_filter = FindingsFilter(**parsed_query_string_params)
findings = detailed_finding_crud.get_detailed_findings(
db_connection, findings_filter=findings_filter, skip=skip, limit=limit)
total_findings = detailed_finding_crud.get_detailed_findings_count(
db_connection, findings_filter=findings_filter)
return PaginationModel[detailed_finding_schema.DetailedFindingRead](
data=findings, total=total_findings, limit=limit, skip=skip)
@router.get("/{finding_id}",
response_model=detailed_finding_schema.DetailedFindingRead,
summary="Fetch detailed finding by ID",
status_code=status.HTTP_200_OK,
responses={
200: {"description": "Retrieve detailed finding <finding_id>"},
404: {"model": Model404, "description": "Finding <finding_id> not found"},
500: {"description": ERROR_MESSAGE_500},
503: {"description": ERROR_MESSAGE_503}
})
@cache(namespace=CACHE_NAMESPACE_FINDING, expire=REDIS_CACHE_EXPIRE)
def read_finding(finding_id: int, db_connection: Session = Depends(get_db_connection)) \
-> detailed_finding_schema.DetailedFindingRead:
"""
Retrieve detailed finding by its ID
- **db_connection**: Session of the database connection
- **finding_id**: ID of the finding for which details need to be fetched
- **return**: [DetailedFindingRead]
The output will contain the details of a finding
"""
db_finding = detailed_finding_crud.get_detailed_finding(db_connection, finding_id=finding_id)
if db_finding is None:
raise HTTPException(status_code=404, detail="Finding not found")
return db_finding
| abnamro/repository-scanner | components/resc-backend/src/resc_backend/resc_web_service/endpoints/detailed_findings.py | detailed_findings.py | py | 6,741 | python | en | code | 137 | github-code | 36 | [
{
"api_name": "fastapi.APIRouter",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "resc_backend.constants.RWS_ROUTE_DETAILED_FINDINGS",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "resc_backend.constants.FINDINGS_TAG",
"line_number": 28,
"usage_type... |
74998248105 | from __future__ import print_function
import numpy as np
import cv2
import subprocess
import itertools
from multiprocessing import Pool
import sys
import os
import time
import numpy as np
import theano
import theano.tensor as T
import lasagne
f = subprocess.check_output(["ls"]).split()
files = []
#make list of files that contain ellipse data
for i in f:
if "ellipseList.txt" in i:
files.append(i)
print(files)
class Image:
def __init__(self, filename, window_size):
self.im = cv2.imread(filename,0)
#self.im = cv2.resize(self.im,(0,0),fx=0.5, fy=0.5, interpolation=cv2.INTER_AREA)
self.mask = []
self.mask_small = []
self.windows = []
self.windows_small = []
self.scores = []
self.scores_small = []
self.cx = []
self.cy = []
self.decimation_factor = []
self.imno = 0
#self.slide = [-6,-4,-2,0,2,4,6]
self.slide = [-3,-2,-1,0,1,2,3]
self.window_size = window_size
def ellipse(self, ellipse_info):
ellipse_info = ellipse_info.split(" ")
axes = [float(ellipse_info[0]),float(ellipse_info[1])]
decim_fac = int(max(max(axes[0]*2/self.window_size,axes[1]*2/self.window_size),1))
self.decimation_factor.append(decim_fac)
#print "best decimation is %.2f and %.2f"%(axes[0]*2/32,axes[1]*2/32)
theta = float(ellipse_info[2])
self.cx.append(float(ellipse_info[3]))
self.cy.append(float(ellipse_info[4]))
#print "diameter is %0.2f"%(2*max(axes[0],axes[1]))
y,x = np.ogrid[0:self.im.shape[0],0:self.im.shape[1]]
mask = np.power(((x-self.cx[-1])*np.cos(theta) + (y-self.cy[-1])*np.sin(theta))/axes[0],2) + np.power(((x-self.cx[-1])*np.sin(theta) - (y-self.cy[-1])*np.cos(theta))/axes[1],2) <= 1
self.mask.append(mask)
#self.mask.append(mask[::2,::2])
#self.cx[-1] /= 2
#self.cy[-1] /= 2
def ellipse_decim(self, ellipse_info):
ellipse_info = ellipse_info.split(" ")
axes = [float(ellipse_info[0])/2,float(ellipse_info[1])/2]
print("best decimation is %.2f and %.2f"%(axes[0]*2/32,axes[1]*2/32))
theta = float(ellipse_info[2])
self.cx.append(float(ellipse_info[3])/2)
self.cy.append(float(ellipse_info[4])/2)
#print "diameter is %0.2f"%(2*max(axes[0],axes[1]))
y,x = np.ogrid[0:self.im.shape[0],0:self.im.shape[1]]
mask = np.power(((x-self.cx[-1])*np.cos(theta) + (y-self.cy[-1])*np.sin(theta))/axes[0],2) + np.power(((x-self.cx[-1])*np.sin(theta) - (y-self.cy[-1])*np.cos(theta))/axes[1],2) <= 1
self.mask.append(mask)
def get_score(self,mask,cx,cy,x,i,ellipse_size):
s = self.window_size/2
flag = False
flag = flag or cy+x[0]-s < 0
flag = flag or cx+x[0]-s < 0
flag = flag or cy+x[1]+s+1 > mask.shape[0]
flag = flag or cx+x[1]+s+1 > mask.shape[1]
if flag == True:
return -1.
#intersect = np.sum(self.mask[i][cy+x[0]-16:cy+x[0]+17,cx+x[1]-16:cx+x[1]+17]).astype(float)
#union = ellipse_size - intersect + (32*32)
intersect = np.sum(mask[cy+x[0]-s:cy+x[0]+s+1,cx+x[1]-s:cx+x[1]+s+1]).astype(float)
union = ellipse_size - intersect + (4*s*s)
self.imno += 1
#CHOOSE THE SCORE YOU WANT
return np.float32(intersect/union)
#return intersect/ellipse_size
def get_random_window(self,image,mask,center):
s = self.window_size/2
rand_mask = mask[center[0]-s:center[0]+s+1,center[1]-s:center[1]+s+1]
if rand_mask.size < (self.window_size**2) or np.sum(rand_mask) > 5:
return None
return image[center[0]-s:center[0]+s+1,center[1]-s:center[1]+s+1].astype(np.float32)
def get_windows(self):
s = self.window_size/2
self.image_slides = []
self.score_slides = []
for i in xrange(len(self.mask)):
image = cv2.resize(self.im,(0,0),fx=1./self.decimation_factor[i],fy=1./self.decimation_factor[i],interpolation=cv2.INTER_AREA)
mask = cv2.resize(self.mask[i].astype(np.uint8),(0,0),fx=1./self.decimation_factor[i],fy=1./self.decimation_factor[i],interpolation=cv2.INTER_AREA).astype(bool)
mask_size = np.sum(mask)
cx = int(round(self.cx[i]/self.decimation_factor[i]))
cy = int(round(self.cy[i]/self.decimation_factor[i]))
self.score_slides.append(map(lambda x: self.get_score(mask,cx,cy,x,i,mask_size), itertools.product(self.slide,self.slide)))
self.image_slides.append(map(lambda x: image[cy+x[0]-s:cy+x[0]+s+1,cx+x[1]-s:cx+x[1]+s+1].astype(np.float32), itertools.product(self.slide,self.slide)))
#generate random images
self.random_slides = []
self.random_scores = []
mask = np.zeros(self.im.shape)
for i in xrange(len(self.mask)):
mask = np.maximum(mask, self.mask[i].astype(int))
mask = mask.astype(bool)
rand = np.random.rand(self.imno,2)
rand[:,0] *= self.im.shape[0]
rand[:,1] *= self.im.shape[1]
rand = rand.astype(int)
iterate = 0
goal = 2*self.imno
while(self.imno < goal):
try:
randy = rand[iterate,0]
randx = rand[iterate,1]
except IndexError:
rand = np.random.rand(self.imno,2)
rand[:,0] *= self.im.shape[0]
rand[:,1] *= self.im.shape[1]
rand = rand.astype(int)
iterate=0
continue
try:
small = mask[randy-s:randy+s+1,randx-s:randx+s+1]
#print "shape is %d %d"%(small.shape[0],small.shape[1])
#print "val is %d"%np.sum(small)
except IndexError:
iterate+=1
continue
iterate+=1
if small.size - (self.window_size**2) < 10:
continue
elif np.sum(small) > 10:
continue
self.random_slides.append(self.im[randy-s:randy+s+1,randx-s:randx+s+1].astype(np.float32))
self.random_scores.append(np.float32(0))
self.imno += 1
#print "Adding random image"
#print "%d left to go"%(goal-self.imno)
def get_data(self):
flatten = lambda l: [item for sublist in l for item in sublist]
return flatten(self.image_slides)+self.random_slides, flatten(self.score_slides)+self.random_scores
def info(filename):
with open(filename,"r") as f:
slides = []
scores = []
while(True):
try:
imgpath = f.readline().split("\n")[0]+".jpg"
if imgpath == ".jpg":
return np.array(slides), np.array(scores)
#print imgpath
e = Image(imgpath,32)
numfaces = f.readline().strip()
#print numfaces
print(numfaces)
for i in xrange(int(numfaces)):
ellipse_info = f.readline().split("\n")[0]
#print ellipse_info
e.ellipse(ellipse_info)
#plt.imshow(e.im,cmap="gray",alpha=0.5)
#plt.imshow(e.ellipse(ellipse_info),alpha=0.1,cmap="gray")
#plt.show()
e.get_windows()
ims, im_scores = e.get_data()
for i in xrange(len(ims)):
slides.append(ims[i])
scores.append(im_scores[i])
#print
#e.get_windows()
except ValueError as a:
#pass
# print e
return
#return
#info(files[0])
#exit()
pool = Pool(4)
a = np.array(pool.map(info,files[:2]))
images = np.concatenate(a[:,0]).tolist()
scores = np.concatenate(a[:,1]).tolist()
i=0
while(True):
if i==len(images):
break
elif images[i].shape != (33,33):
del images[i]
del scores[i]
else:
i+=1
images = np.array(images)
scores = np.array(scores)
# images_flat = []
# scores_flat = []
# for i in xrange(len(images)):
# assert len(images[i]) == len(scores[i])
# for j in xrange(len(images[i])):
# print type(scores[i][j])
# images_flat.append(images[i][j])
# scores_flat.append(scores[i][j])
# images = np.array(images_flat)
# scores = np.array(scores_flat)
images = images[np.where(scores >= 0)]
scores = scores[np.where(scores >= 0)]
#scores_second = np.add(-1,scores)
#scores = np.concatenate((scores[:,np.newaxis],scores_second[:,np.newaxis]),axis=1)
#data = np.stack((images,scores[:,np.newaxis]),axis=1)
#np.random.shuffle(data)
#print(data.shape)
# plt.hist(scores,bins=50)
# plt.show()
# rand_range = (np.random.rand(10)*1000).astype(int)
# for i in xrange(10):
# print images[rand_range[i]].shape
# plt.imshow(images[rand_range[i]],cmap="gray",interpolation="nearest")
# print scores[rand_range[i]]
# plt.show()
print(scores.shape)
print(np.amin(scores))
def build_cnn(input_var=None):
# As a third model, we'll create a CNN of two convolution + pooling stages
# and a fully-connected hidden layer in front of the output layer.
# Input layer, as usual:
network = lasagne.layers.InputLayer(shape=(None, 1, 33, 33),
input_var=input_var)
# This time we do not apply input dropout, as it tends to work less well
# for convolutional layers.
# Convolutional layer with 32 kernels of size 5x5. Strided and padded
# convolutions are supported as well; see the docstring.
network = lasagne.layers.Conv2DLayer(
network, num_filters=32, filter_size=(5, 5),
nonlinearity=lasagne.nonlinearities.rectify,
W=lasagne.init.GlorotUniform())
# Expert note: Lasagne provides alternative convolutional layers that
# override Theano's choice of which implementation to use; for details
# please see http://lasagne.readthedocs.org/en/latest/user/tutorial.html.
# Max-pooling layer of factor 2 in both dimensions:
network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))
# Another convolution with 32 5x5 kernels, and another 2x2 pooling:
network = lasagne.layers.Conv2DLayer(
network, num_filters=32, filter_size=(5, 5),
nonlinearity=lasagne.nonlinearities.rectify)
network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))
# A fully-connected layer of 256 units with 50% dropout on its inputs:
network = lasagne.layers.DenseLayer(
lasagne.layers.dropout(network, p=.5),
num_units=256,
nonlinearity=lasagne.nonlinearities.rectify)
# And, finally, the 10-unit output layer with 50% dropout on its inputs:
network = lasagne.layers.DenseLayer(
network,
num_units=1,
nonlinearity=lasagne.nonlinearities.sigmoid)
return network
def iterate_minibatches(inputs, targets, batchsize, shuffle=False):
assert len(inputs) == len(targets)
if shuffle:
indices = np.arange(len(inputs))
np.random.shuffle(indices)
for start_idx in range(0, len(inputs) - batchsize + 1, batchsize):
if shuffle:
excerpt = indices[start_idx:start_idx + batchsize]
else:
excerpt = slice(start_idx, start_idx + batchsize)
yield inputs[excerpt], targets[excerpt]
def main(data,model='cnn', num_epochs=500):
# Load the dataset
print("Loading data...")
X = data[0].reshape(-1, 1, 33, 33)
X /= np.float32(255)
Y = np.round_(data[1]).astype(np.float32)
#X = X.astype(np.float32)
#Y = Y.astype(np.float32)
# X_train = X[0:300000]
# y_train = Y[0:300000]
# X_val = X[-20000:]
# y_val = Y[-20000:]
# X_test = X[300000:400000]
# y_test = Y[300000:400000]
X_train = X[0:50000]
y_train = Y[0:50000]
X_val = X[-4000:]
y_val = Y[-4000:]
X_test = X[50000:80000]
y_test = Y[50000:80000]
# Prepare Theano variables for inputs and targets
input_var = T.tensor4('inputs')
target_var = T.fvector('targets')
# Create neural network model (depending on first command line parameter)
network = build_cnn(input_var)
# Create a loss expression for training, i.e., a scalar objective we want
# to minimize (for our multi-class problem, it is the cross-entropy loss):
prediction = lasagne.layers.get_output(network)
loss = lasagne.objectives.binary_hinge_loss(prediction, target_var, log_odds=False)
loss = loss.mean()
# We could add some weight decay as well here, see lasagne.regularization.
# Create update expressions for training, i.e., how to modify the
# parameters at each training step. Here, we'll use Stochastic Gradient
# Descent (SGD) with Nesterov momentum, but Lasagne offers plenty more.
params = lasagne.layers.get_all_params(network, trainable=True)
updates = lasagne.updates.nesterov_momentum(
loss, params, learning_rate=0.01, momentum=0.9)
# Create a loss expression for validation/testing. The crucial difference
# here is that we do a deterministic forward pass through the network,
# disabling dropout layers.
test_prediction = lasagne.layers.get_output(network, deterministic=True)
test_loss = lasagne.objectives.binary_hinge_loss(test_prediction,
target_var, log_odds=False)
test_loss = test_loss.mean()
# As a bonus, also create an expression for the classification accuracy:
test_acc = T.mean(T.eq(test_prediction, target_var),
dtype=theano.config.floatX)
#test_acc = T.mean(lasagne.objectives.binary_hinge_loss(prediction, target_var, log_odds=False),
# dtype=theano.config.floatX)
# Compile a function performing a training step on a mini-batch (by giving
# the updates dictionary) and returning the corresponding training loss:
train_fn = theano.function([input_var, target_var], loss, updates=updates)
# Compile a second function computing the validation loss and accuracy:
val_fn = theano.function([input_var, target_var], [test_loss, test_acc])
# Finally, launch the training loop.
print("Starting training...")
# We iterate over epochs:
for epoch in range(num_epochs):
# In each epoch, we do a full pass over the training data:
train_err = 0
train_batches = 0
start_time = time.time()
for batch in iterate_minibatches(X_train, y_train, 500, shuffle=True):
inputs, targets = batch
train_err += train_fn(inputs, targets)
train_batches += 1
# And a full pass over the validation data:
val_err = 0
val_acc = 0
val_batches = 0
for batch in iterate_minibatches(X_val, y_val, 500, shuffle=False):
inputs, targets = batch
err, acc = val_fn(inputs, targets)
val_err += err
val_acc += acc
val_batches += 1
# Then we print the results for this epoch:
print("Epoch {} of {} took {:.3f}s".format(
epoch + 1, num_epochs, time.time() - start_time))
print(" training loss:\t\t{:.6f}".format(train_err / train_batches))
print(" validation loss:\t\t{:.6f}".format(val_err / val_batches))
print(" validation accuracy:\t\t{:.2f} %".format(
val_acc / val_batches * 100))
# After training, we compute and print the test error:
test_err = 0
test_acc = 0
test_batches = 0
for batch in iterate_minibatches(X_test, y_test, 500, shuffle=False):
inputs, targets = batch
err, acc = val_fn(inputs, targets)
test_err += err
test_acc += acc
test_batches += 1
print("Final results:")
print(" test loss:\t\t\t{:.6f}".format(test_err / test_batches))
print(" test accuracy:\t\t{:.2f} %".format(
test_acc / test_batches * 100))
# Optionally, you could now dump the network weights to a file like this:
# np.savez('model.npz', *lasagne.layers.get_all_param_values(network))
#
# And load them again later on like this:
# with np.load('model.npz') as f:
# param_values = [f['arr_%d' % i] for i in range(len(f.files))]
# lasagne.layers.set_all_param_values(network, param_values)
main([images,scores])
| arvigj/cv_hw3 | new_eval.py | new_eval.py | py | 15,057 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "subprocess.check_output",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.ogrid",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "numpy.power",
... |
41977316312 | import json
class Destinations:
def __init__(self):
self.destination = ""
self.file_name = "destinations.json"
def write_to_json_file(self):
dictionary = {
"destination": self.destination
}
json_object = json.dumps(dictionary, indent=1, ensure_ascii=False)
with open(self.file_name, 'a', encoding='utf-8') as f:
f.write(json_object)
| DistributedTravels/Scraper | scraper/destinations.py | destinations.py | py | 416 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "json.dumps",
"line_number": 12,
"usage_type": "call"
}
] |
73571310823 | from datetime import datetime
from typing import List, Union
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncSession
from app.models import CharityProject, Donation
async def get_not_closed_investing_objects(
model: Union[CharityProject, Donation],
session: AsyncSession
) -> List[Union[CharityProject, Donation]]:
db_obj = await session.execute(
select(model).where(
model.fully_invested == 0
).order_by(model.create_date)
)
return db_obj.scalars().all()
def close_investing_object(
obj_to_close: Union[CharityProject, Donation]
):
obj_to_close.invested_amount = obj_to_close.full_amount
obj_to_close.fully_invested = True
obj_to_close.close_date = datetime.now()
def make_investing(
new_obj: Union[CharityProject, Donation],
model_obj: Union[CharityProject, Donation]
) -> (Union[CharityProject, Donation], Union[CharityProject, Donation]):
new_obj_free_amount = new_obj.full_amount - new_obj.invested_amount
model_obj_free_amount = model_obj.full_amount - model_obj.invested_amount
if new_obj_free_amount == model_obj_free_amount:
close_investing_object(new_obj)
close_investing_object(model_obj)
elif new_obj_free_amount > model_obj_free_amount:
new_obj.invested_amount += model_obj_free_amount
close_investing_object(model_obj)
else:
model_obj.invested_amount += new_obj_free_amount
close_investing_object(new_obj)
return new_obj, model_obj
async def investing_process(
new_object: Union[CharityProject, Donation],
model: Union[CharityProject, Donation],
session: AsyncSession
):
model_objects = await get_not_closed_investing_objects(model, session)
for model_object in model_objects:
new_obj, model_obj = make_investing(new_object, model_object)
session.add(new_obj)
session.add(model_obj)
await session.commit()
await session.refresh(new_object)
| ThatCoderMan/QRkot_spreadsheets | app/services/investing.py | investing.py | py | 2,014 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "typing.Union",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "app.models.CharityProject",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "app.models.Donation",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "sqlalchemy... |
25338326690 | import torch
import seaborn as sn
from matplotlib import pyplot as plt
from model import ConvNet
from MnistDataset import Mydataset
from torch.utils.data import DataLoader
import numpy as np
import pandas as pd
torch.manual_seed(13)
def get_score(confusion_mat):
smooth = 0.0001 #้ฒๆญขๅบ็ฐ้คๆฐไธบ0่ๅ ไธไธไธชๅพๅฐ็ๆฐ
tp = np.diagonal(confusion_mat)
fp = np.sum(confusion_mat, axis=0)
fn = np.sum(confusion_mat, axis=1)
precision = tp / (fp + smooth)
recall = tp / (fn + smooth)
f1 = 2 * precision * recall / (precision + recall + smooth)
return precision, recall, f1
def get_confusion(confusion_matrix, out, label):
idx = np.argmax(out.detach().numpy())
confusion_matrix[idx, label] += 1
return confusion_matrix
def main():
confusion_matrix = np.zeros((10, 10))
net = ConvNet()
net.load_state_dict(torch.load('model_parameter\\parameter_epo90.pth'))
test_path = ['test.txt', r'dataset/test_label.txt']
test_dataset = Mydataset(test_path[0], test_path[1], 'cpu')
test_dataloader = DataLoader(test_dataset, 1, True)
for i, (pic, label) in enumerate(test_dataloader):
out = net(pic)
confusion_matrix = get_confusion(confusion_matrix, out, label)
precision, recall, f1 = get_score(confusion_matrix)
print(f'precision: {np.average(precision)}\trecall: {np.average(recall)}\tf1: {np.average(f1)}')
confusion_mat = pd.DataFrame(confusion_matrix)
confusion_df = pd.DataFrame(confusion_mat, index=[i for i in range(10)], columns=[i for i in range(10)])
sn.heatmap(data=confusion_df, cmap='RdBu_r')
plt.show()
confusion_df.to_csv(r'confusion.csv', encoding='ANSI')
if __name__ == '__main__':
main()
| Huyf9/mnist_pytorch | test.py | test.py | py | 1,732 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "torch.manual_seed",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.diagonal",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_numb... |
35906634663 | from flask import Flask, render_template, flash, redirect, request, url_for, jsonify
from multiprocessing import Process, Queue
from xBee_recieve import reciever
app = Flask(__name__)
processes = []
collectedData = []
def getNewXbeeData(q):
PORT = "COM2"
BAUD = 9600
MAC = "13A20041C7BFFC"
r = reciever(PORT, BAUD, MAC)
while True:
msg = r.check_for_message()
if msg:
q.put(msg) # data needs to first be parsed, so if the msg is a json, we need to format to [msg['x'], msg['y']]
#tester method to get generated data should function the same as getNewXbeeData
def getNewRandomData(q):
"""
temp()\n
accel()\n
mag()\n
gyro()\n
euler()\n
quaternion()\n
linear_accel()\n
gravity()\n
"""
import time
from random import randint
t = 0
lastAccel = [0,0,0]
while True:
r = randint(5,10)/10.0
print(r)
time.sleep(r)
t += r
data = {
"time" : t,
"accel" : [lastAccel[0] + randint(-20,20),lastAccel[1] + randint(-20,20),lastAccel[2] + randint(-20,20)],
"gyro" : [randint(-20,20),randint(-20,20),randint(-20,20)],
"temp" : randint(30,100),
}
q.put(data)
lastAccel = data["accel"]
@app.route("/", methods=["GET", ])
def main():
return render_template("main.html")
#main page
@app.route('/api/<data>/<num>', methods=['GET'])
def api(data, num):
q = processes[0][0]
while not q.empty():
d = q.get()
collectedData.append(d)
#num is current size of users data, so we only give them the data they dont have
out = []
if "accel" in data:
n = 0
if "Y" in data:
n = 1
elif "Z" in data:
n = 2
for d in collectedData[int(num)::]:
out.append([d["time"], d["accel"][n]])
elif "gyro" in data:
n = 0
if "Y" in data:
n = 1
elif "Z" in data:
n = 2
for d in collectedData[int(num)::]:
out.append([d["time"], d["gyro"][n]])
elif data == "temp":
for d in collectedData[int(num)::]:
out.append([d["time"], d["temp"]])
return jsonify(out)
if __name__ == '__main__':
q = Queue()
p = Process(target=getNewRandomData, args=[q,])
processes.append((q,p))
p.start()
app.run(host="0.0.0.0", port=80)
for p in processes:
p[1].terminate() | explosion33/PIPayload | ground/api.py | api.py | py | 2,488 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "xBee_recieve.reciever",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"lin... |
11014211257 |
from django.shortcuts import render, redirect, get_object_or_404
from .forms import LibroForm
from django.shortcuts import render
from .models import Libro
from django.urls import reverse_lazy
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic.edit import CreateView
from django.views.generic.edit import UpdateView
class IngresarLibroView(LoginRequiredMixin, CreateView):
model = Libro
form_class = LibroForm
template_name = 'libros/ingresar_libro.html'
success_url = reverse_lazy('lista_libros')
def form_valid(self, form):
form.instance.usuario = self.request.user
titulo = form.cleaned_data['titulo']
autor = form.cleaned_data['autor']
if not Libro.objects.filter(titulo=titulo, autor=autor).exists():
return super().form_valid(form)
else:
form.add_error('titulo', 'Este libro ya existe en la biblioteca.')
return self.form_invalid(form)
class EditarLibroView(LoginRequiredMixin, UpdateView):
model = Libro
form_class = LibroForm
template_name = 'libros/editar_libro.html'
success_url = reverse_lazy('lista_libros')
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['libro'] = self.get_object()
return context
def form_valid(self, form):
return super().form_valid(form)
def lista_libros(request):
libros = Libro.objects.all().order_by('titulo')
return render(request, 'libros/lista_libros.html', {'libros': libros})
def eliminar_libro(request, libro_id):
libro = get_object_or_404(Libro, id=libro_id)
if request.method == 'POST':
libro.delete()
return redirect('lista_libros')
return render(request, 'libros/eliminar_libro.html', {'libro': libro})
def lista_detalle_libros(request):
libros = Libro.objects.all().order_by('titulo')
return render(request, 'libros/lista_detalle_libros.html', {'libros': libros})
| ezecodo/Entrega1-Angeloni | libros/views.py | views.py | py | 2,052 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "django.views.generic.edit.CreateView",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "models.Libro",
"line_number": 18,
"usage_type": "name"... |
38164787541 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('foi_requests', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='foirequest',
name='title',
field=models.CharField(max_length=255, blank=True),
),
migrations.AlterField(
model_name='foirequest',
name='foi_text',
field=models.TextField(verbose_name='Your FOI Request'),
),
]
| foilaundering/foilaundering | foilaundering/apps/foi_requests/migrations/0002_auto_20151122_1253.py | 0002_auto_20151122_1253.py | py | 587 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.db.migrations.Migration",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.db.migrations",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.AddField",
"line_number": 14,
"usage_type": "call"
},
{
... |
35396952388 | from __future__ import (nested_scopes, generators, division, absolute_import, with_statement,
print_function, unicode_literals)
from collections import defaultdict
from contextlib import contextmanager
import inspect
import logging
import os
import re
import sys
import traceback
from twitter.common import log
from twitter.common.collections import OrderedSet
from twitter.common.lang import Compatibility
from twitter.common.log.options import LogOptions
from pants.backend.core.tasks.task import QuietTaskMixin, Task
from pants.backend.jvm.tasks.nailgun_task import NailgunTask # XXX(pl)
from pants.base.build_environment import get_buildroot
from pants.base.build_file import BuildFile
from pants.base.cmd_line_spec_parser import CmdLineSpecParser
from pants.base.config import Config
from pants.base.rcfile import RcFile
from pants.base.workunit import WorkUnit
from pants.commands.command import Command
from pants.engine.engine import Engine
from pants.engine.round_engine import RoundEngine
from pants.goal.context import Context
from pants.goal.error import GoalError
from pants.goal.initialize_reporting import update_reporting
from pants.goal.goal import Goal
from pants.option.bootstrap_options import create_bootstrapped_options
from pants.option.global_options import register_global_options
from pants.util.dirutil import safe_mkdir
StringIO = Compatibility.StringIO
class GoalRunner(Command):
"""Lists installed goals or else executes a named goal."""
class IntermixedArgumentsError(GoalError):
pass
__command__ = 'goal'
output = None
def __init__(self, *args, **kwargs):
self.targets = []
known_scopes = ['']
for goal in Goal.all():
# Note that enclosing scopes will appear before scopes they enclose.
known_scopes.extend(filter(None, goal.known_scopes()))
self.new_options = create_bootstrapped_options(known_scopes=known_scopes)
self.config = Config.from_cache() # Get the bootstrapped version.
super(GoalRunner, self).__init__(*args, needs_old_options=False, **kwargs)
def get_spec_excludes(self):
# Note: Only call after register_options() has been called.
return [os.path.join(self.root_dir, spec_exclude)
for spec_exclude in self.new_options.for_global_scope().spec_excludes]
@property
def global_options(self):
return self.new_options.for_global_scope()
@contextmanager
def check_errors(self, banner):
errors = {}
def error(key, include_traceback=False):
exc_type, exc_value, _ = sys.exc_info()
msg = StringIO()
if include_traceback:
frame = inspect.trace()[-2]
filename = frame[1]
lineno = frame[2]
funcname = frame[3]
code = ''.join(frame[4]) if frame[4] else None
traceback.print_list([(filename, lineno, funcname, code)], file=msg)
if exc_type:
msg.write(''.join(traceback.format_exception_only(exc_type, exc_value)))
errors[key] = msg.getvalue()
sys.exc_clear()
yield error
if errors:
msg = StringIO()
msg.write(banner)
invalid_keys = [key for key, exc in errors.items() if not exc]
if invalid_keys:
msg.write('\n %s' % '\n '.join(invalid_keys))
for key, exc in errors.items():
if exc:
msg.write('\n %s =>\n %s' % (key, '\n '.join(exc.splitlines())))
# The help message for goal is extremely verbose, and will obscure the
# actual error message, so we don't show it in this case.
self.error(msg.getvalue(), show_help=False)
def register_options(self):
# Add a 'bootstrap' attribute to the register function, so that register_global can
# access the bootstrap option values.
def register_global(*args, **kwargs):
return self.new_options.register_global(*args, **kwargs)
register_global.bootstrap = self.new_options.bootstrap_option_values()
register_global_options(register_global)
for goal in Goal.all():
goal.register_options(self.new_options)
def setup_parser(self, parser, args):
if not args:
args.append('help')
logger = logging.getLogger(__name__)
goals = self.new_options.goals
specs = self.new_options.target_specs
fail_fast = self.new_options.for_global_scope().fail_fast
for goal in goals:
if BuildFile.from_cache(get_buildroot(), goal, must_exist=False).exists():
logger.warning(" Command-line argument '{0}' is ambiguous and was assumed to be "
"a goal. If this is incorrect, disambiguate it with ./{0}.".format(goal))
if self.new_options.is_help:
self.new_options.print_help(goals=goals)
sys.exit(0)
self.requested_goals = goals
with self.run_tracker.new_workunit(name='setup', labels=[WorkUnit.SETUP]):
spec_parser = CmdLineSpecParser(self.root_dir, self.address_mapper,
spec_excludes=self.get_spec_excludes())
with self.run_tracker.new_workunit(name='parse', labels=[WorkUnit.SETUP]):
for spec in specs:
for address in spec_parser.parse_addresses(spec, fail_fast):
self.build_graph.inject_address_closure(address)
self.targets.append(self.build_graph.get_target(address))
self.goals = [Goal.by_name(goal) for goal in goals]
rcfiles = self.config.getdefault('rcfiles', type=list,
default=['/etc/pantsrc', '~/.pants.rc'])
if rcfiles:
rcfile = RcFile(rcfiles, default_prepend=False, process_default=True)
# Break down the goals specified on the command line to the full set that will be run so we
# can apply default flags to inner goal nodes. Also break down goals by Task subclass and
# register the task class hierarchy fully qualified names so we can apply defaults to
# baseclasses.
sections = OrderedSet()
for goal in Engine.execution_order(self.goals):
for task_name in goal.ordered_task_names():
sections.add(task_name)
task_type = goal.task_type_by_name(task_name)
for clazz in task_type.mro():
if clazz == Task:
break
sections.add('%s.%s' % (clazz.__module__, clazz.__name__))
augmented_args = rcfile.apply_defaults(sections, args)
if augmented_args != args:
# TODO(John Sirois): Cleanup this currently important mutation of the passed in args
# once the 2-layer of command -> goal is squashed into one.
args[:] = augmented_args
sys.stderr.write("(using pantsrc expansion: pants goal %s)\n" % ' '.join(augmented_args))
def run(self):
# TODO(John Sirois): Consider moving to straight python logging. The divide between the
# context/work-unit logging and standard python logging doesn't buy us anything.
# Enable standard python logging for code with no handle to a context/work-unit.
if self.global_options.level:
LogOptions.set_stderr_log_level((self.global_options.level or 'info').upper())
logdir = self.global_options.logdir or self.config.get('goals', 'logdir', default=None)
if logdir:
safe_mkdir(logdir)
LogOptions.set_log_dir(logdir)
prev_log_level = None
# If quiet, temporarily change stderr log level to kill init's output.
if self.global_options.quiet:
prev_log_level = LogOptions.loglevel_name(LogOptions.stderr_log_level())
# loglevel_name can fail, so only change level if we were able to get the current one.
if prev_log_level is not None:
LogOptions.set_stderr_log_level(LogOptions._LOG_LEVEL_NONE_KEY)
log.init('goals')
if prev_log_level is not None:
LogOptions.set_stderr_log_level(prev_log_level)
else:
log.init()
# Update the reporting settings, now that we have flags etc.
def is_quiet_task():
for goal in self.goals:
if goal.has_task_of_type(QuietTaskMixin):
return True
return False
# Target specs are mapped to the patterns which match them, if any. This variable is a key for
# specs which don't match any exclusion regexes. We know it won't already be in the list of
# patterns, because the asterisks in its name make it an invalid regex.
_UNMATCHED_KEY = '** unmatched **'
def targets_by_pattern(targets, patterns):
mapping = defaultdict(list)
for target in targets:
matched_pattern = None
for pattern in patterns:
if re.search(pattern, target.address.spec) is not None:
matched_pattern = pattern
break
if matched_pattern is None:
mapping[_UNMATCHED_KEY].append(target)
else:
mapping[matched_pattern].append(target)
return mapping
is_explain = self.global_options.explain
update_reporting(self.global_options, is_quiet_task() or is_explain, self.run_tracker)
if self.global_options.exclude_target_regexp:
excludes = self.global_options.exclude_target_regexp
log.debug('excludes:\n {excludes}'.format(excludes='\n '.join(excludes)))
by_pattern = targets_by_pattern(self.targets, excludes)
self.targets = by_pattern[_UNMATCHED_KEY]
# The rest of this if-statement is just for debug logging.
log.debug('Targets after excludes: {targets}'.format(
targets=', '.join(t.address.spec for t in self.targets)))
excluded_count = sum(len(by_pattern[p]) for p in excludes)
log.debug('Excluded {count} target{plural}.'.format(count=excluded_count,
plural=('s' if excluded_count != 1 else '')))
for pattern in excludes:
log.debug('Targets excluded by pattern {pattern}\n {targets}'.format(pattern=pattern,
targets='\n '.join(t.address.spec for t in by_pattern[pattern])))
context = Context(
config=self.config,
new_options=self.new_options,
run_tracker=self.run_tracker,
target_roots=self.targets,
requested_goals=self.requested_goals,
build_graph=self.build_graph,
build_file_parser=self.build_file_parser,
address_mapper=self.address_mapper,
spec_excludes=self.get_spec_excludes()
)
unknown = []
for goal in self.goals:
if not goal.ordered_task_names():
unknown.append(goal)
if unknown:
context.log.error('Unknown goal(s): %s\n' % ' '.join(goal.name for goal in unknown))
return 1
engine = RoundEngine()
return engine.execute(context, self.goals)
def cleanup(self):
# TODO: This is JVM-specific and really doesn't belong here.
# TODO: Make this more selective? Only kill nailguns that affect state? E.g., checkstyle
# may not need to be killed.
NailgunTask.killall(log.info)
sys.exit(1)
| fakeNetflix/square-repo-pants | src/python/pants/commands/goal_runner.py | goal_runner.py | py | 10,794 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "twitter.common.lang.Compatibility.StringIO",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "twitter.common.lang.Compatibility",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "pants.commands.command.Command",
"line_number": 41,
"us... |
23469121006 | import yaml,os
class Common_funcs():
def get_datas(self,path:str)-> list:
# ๆๅผๆไปถ
current_path = os.getcwd().split("lagou05")[0]
#print(current_path)
with open(current_path+"\\lagou05"+path) as f:
datas = yaml.safe_load(f)
#print(datas)
# ่ทๅๆไปถไธญkeyไธบdatas็ๆฐๆฎ
# data_all = datas["datas"]
add_datas = datas["datas"]["add"]
# ่ทๅๆไปถไธญkeyไธบmyids็ๆฐๆฎ
add_ids = datas["myids"]["add"]
# ่ทๅๆไปถไธญkeyไธบdatas็ๆฐๆฎ
div_datas = datas["datas"]["div"]
# ่ทๅๆไปถไธญkeyไธบmyids็ๆฐๆฎ
div_ids = datas["myids"]["div"]
# ่ทๅๆไปถไธญkeyไธบdatas็ๆฐๆฎ
mul_datas = datas["datas"]["mul"]
# ่ทๅๆไปถไธญkeyไธบmyids็ๆฐๆฎ
mul_ids = datas["myids"]["mul"]
# ่ทๅๆไปถไธญkeyไธบdatas็ๆฐๆฎ
sub_datas = datas["datas"]["sub"]
# ่ทๅๆไปถไธญkeyไธบmyids็ๆฐๆฎ
sub_ids = datas["myids"]["sub"]
#print(add_ids,add_datas)
#print(data_all)
f.close()
return [add_datas,add_ids,div_datas,div_ids,mul_datas,mul_ids,sub_datas,sub_ids]
| testroute/lagou05 | Common/Read_yaml.py | Read_yaml.py | py | 1,286 | python | zh | code | null | github-code | 36 | [
{
"api_name": "os.getcwd",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "yaml.safe_load",
"line_number": 8,
"usage_type": "call"
}
] |
74218356582 | from PySide6.QtCore import QObject, Property, Slot, Signal, QTimer
from typing import Optional
from .qml_file_wrapper import QmlFileWrapper
class MainController(QObject):
main_content_qml_changed = Signal()
def __init__(self, parent=None):
super().__init__(parent)
self._app = parent
self._qml_wrappers = {
"HOME": QmlFileWrapper('Home.qml'),
"OTHER": QmlFileWrapper('Other.qml')
}
self._active_id = "HOME"
self._active_wrapper: QmlFileWrapper = self._qml_wrappers[self._active_id]
self._counter = 0
self._timer = QTimer()
self._timer.setInterval(10)
self._timer.setSingleShot(False)
self._timer.timeout.connect(self._toggle_screen)
@Property(str, notify=main_content_qml_changed)
def main_content_qml(self) -> str:
return self._active_wrapper.qml_path
def startup(self):
self._timer.start()
def shutdown(self):
print(f"Stopping after {self._counter} iterations.")
@Slot(str, result=QmlFileWrapper)
def get_wrapper_object_by_name(self, screen_name: str) -> Optional[QmlFileWrapper]:
return self._qml_wrappers[screen_name.upper()]
@Slot(str) # QML will only send a string
def go_to_qml_by_name(self, next_id: str) -> None:
self._active_wrapper = self.get_wrapper_object_by_name(next_id)
self.main_content_qml_changed.emit()
def _toggle_screen(self):
self._counter = self._counter + 1
if self._active_id == "HOME":
self._active_id = "OTHER"
else:
self._active_id = "HOME"
self.go_to_qml_by_name(self._active_id)
| maldata/qml-error-test | errortest/main_controller.py | main_controller.py | py | 1,733 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "PySide6.QtCore.QObject",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "PySide6.QtCore.Signal",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "qml_file_wrapper.QmlFileWrapper",
"line_number": 14,
"usage_type": "call"
},
{
"api_na... |
39885168812 | import pytz
import base64
from typing import List
from flask import Blueprint, request, redirect, abort
from flask_login.utils import login_required
from datetime import datetime, timedelta, timezone
from flask.templating import render_template
from flask_login import current_user
from mib.rao.user_manager import UserManager, User
from mib.rao.message_manager import MessageManager, MessagePost, Message
from mib.rao.draft_manager import DraftManager, DraftPost, Draft
messages = Blueprint('messages', __name__)
@ messages.route('/messages/send', methods=['GET', 'POST'])
@login_required
def send_message():
''' GET: get the page for write and send a message to the chosen recipient/s
POST: send the message to the recipient/s at the chosen date '''
if request.method == 'POST':
emails = request.form.get('receiver').split(',')
recipient_list = []
recipient_error_list = []
message_ok = False
for email in emails:
email = email.strip(' ')
user = UserManager.get_user_by_email(email)
check = True
if user is not None and user.is_active:
recipient_list.append(user.id)
check = False
if check:
recipient_error_list.append(email)
new_message :MessagePost = MessagePost()
new_message.attachment_list = []
new_message.id_sender = current_user.id
new_message.recipients_list = recipient_list
message_date = request.form.get('date')
tz=timezone(timedelta(hours=1))
message_date = datetime.fromisoformat(message_date)
message_date = message_date.replace(tzinfo=tz)
message_date = message_date.astimezone(pytz.UTC)
message_date = message_date.isoformat()
new_message.date_delivery = message_date
new_message.text = request.form.get('text')
uploaded_files = request.files.getlist("files")
if uploaded_files and any(f for f in uploaded_files):
for file in uploaded_files:
if file:
attachment = file.read()
new_message.attachment_list.append(base64.b64encode(attachment).decode('ascii'))
new_message = MessageManager.send_message(new_message)
if new_message is not None:
message_ok = True
else:
for email in emails:
recipient_error_list.append(email)
return render_template("send_message.html", form=dict(), message_ok=message_ok,
recipient_error_list=recipient_error_list)
else:
# landing from the recipients page, we want to populate the field with the chosen one
recipient_message = request.args.items(multi=True)
form = {'recipient': ''}
for recipient in recipient_message:
if recipient[1] != '':
form['recipient'] += recipient[1] if form['recipient'] == '' else ', ' + recipient[1]
return render_template("send_message.html", form=form)
@messages.route('/messages/<message_id>', methods=["GET"])
@login_required
def view_message(message_id):
''' GET: visualize the chosen message '''
message: Message = MessageManager.get_message(message_id)
if message is None:
abort(404)
else:
recipient: User = UserManager.get_user_by_id(message.id_recipient)
sender: User = UserManager.get_user_by_id(message.id_sender)
return render_template("message.html",
sender=sender,
recipient=recipient,
message=message,
images=message.attachment_list)
@messages.route('/messages/<message_id>/delete', methods=["POST"])
@login_required
def deleteMessage(message_id):
''' POST: delete the chosen message '''
ret: int = MessageManager.delete_message(message_id)
if ret == 404:
abort(404)
elif ret == 403:
abort(403)
else:
return redirect('/inbox')
@messages.route("/messages/<id>/withdraw", methods=['POST'])
@login_required
def withdraw_message(id):
''' POST: withdraw a message not sent yet, paying points '''
ret: int = MessageManager.withdraw_message(id)
if ret == 404:
abort(404)
elif ret == 403:
abort(403)
else:
return redirect('/outbox')
@messages.route('/messages/<id_message>/forward', methods=['GET'])
@login_required
def send_forward_msg(id_message):
''' GET: get the send message page filled with the text to forward '''
recipient_message = request.args.items(multi=True)
text = MessageManager.get_message(id_message).text
form = dict(recipient="", text=text, message_id=id_message)
for recipient in recipient_message:
if recipient[1] != '':
form['recipient'] += recipient[1] if form['recipient'] == '' else ', ' + recipient[1]
return render_template("send_message.html", form=form, forward=True) | squad03mib/api-gateway | mib/views/messages.py | messages.py | py | 5,044 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Blueprint",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "flask.request... |
27616329139 | #coding=utf8
import numpy as np
np.random.seed(1337) # for reproducibility
import re
import h5py
import os
from nltk import tokenize
from keras.preprocessing.text import Tokenizer, text_to_word_sequence
from attention import Attention_input1, Attention_input2
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils.np_utils import to_categorical
from keras.layers import Reshape, Dense, Input, Flatten, Dropout, merge, BatchNormalization
from keras.layers import TimeDistributed, LSTM, GRU, Bidirectional
from keras.models import Model
from keras.optimizers import SGD, Adadelta, Adam, RMSprop
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.layers.core import Reshape, RepeatVector
from keras.callbacks import EarlyStopping
from keras.models import Sequential, Model
from keras.layers import Activation, Dense, Dropout, Embedding, Flatten, Input, Merge, Convolution1D, MaxPooling1D
GLOVE_DIR = '../data/'
MAX_SEQUENCE_LENGTH = 140
MAX_NB_WORDS = 10000
EMBEDDING_DIM = 200
VALIDATION_SPLIT = 0.1
NB_EPOCH = 100
NB_CLASS = 3
DIM_HIDDEN = 128
DIM_LSTM = 128
# datamode = 'mul'
datamode = 'single'
if datamode == 'mul':
DATA_PATH = '../data/MSVA_multiple_17024.h5'
BATCH_SIZE = 128
else:
DATA_PATH = '../data/MSVA_single_4511.h5'
BATCH_SIZE = 32
def load_data():
read_file = h5py.File(DATA_PATH, 'r')
texts = read_file['txt_data'][:]
labels = read_file['label'][:]
scenes = read_file['scene_data'][:]
objects = read_file['object_data'][:]
return texts,labels,scenes,objects
def split_data(data,VALIDATION_SPLIT):
nb_validation_samples = int(VALIDATION_SPLIT * data.shape[0])
data_train = data[:-(nb_validation_samples * 2)]
data_val = data[-(nb_validation_samples * 2):-(nb_validation_samples)]
data_test = data[-nb_validation_samples:]
return data_train,data_val,data_test
def dp_txt(txt):
# nonEnglish_regex = re.compile('[^a-zA-Z0-9\\?\\!\\,\\.@#\\+\\-=\\*\'\"><&\\$%\\(\\)\\[\\]:;]+')
hashtag_pattern = re.compile('#[a-zA-Z0-9]+')
at_pattern = re.compile('@[a-zA-Z0-9]+')
http_pattern = re.compile("((http|ftp|https)://)(([a-zA-Z0-9\._-]+\.[a-zA-Z]{2,6})|([0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}))(:[0-9]{1,4})*(/[a-zA-Z0-9\&%_\./-~-]*)?")
txt = txt.strip()
txt_hashtag = re.sub(hashtag_pattern, '', txt)
txt_nonat = re.sub(at_pattern, '', txt_hashtag)
txt_nonhttp = re.sub(http_pattern, '', txt_nonat)
txt = txt_nonhttp
return txt
def fun():
texts,labels,scenes,objects = load_data()
new_texts = []
for idx in range(len(texts)):
text = texts[idx]
text = dp_txt(str(text))
new_texts.append(text)
texts = new_texts
tokenizer = Tokenizer(nb_words=MAX_NB_WORDS)
tokenizer.fit_on_texts(texts)
sequences = tokenizer.texts_to_sequences(texts)
word_index = tokenizer.word_index
text_data = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)
labels = to_categorical(np.asarray(labels))
# print('Text tensor shape:', text_data.shape)
# print('Label tensor shape:', labels.shape)
# print('Scene tensor shape:', scenes.shape)
# print('Object tensor shape:', objects.shape)
# # split the text_data into a training set and a validation set
rand = np.arange(labels.shape[0])
np.random.shuffle(rand)
indices = rand
text_data = text_data[indices]
labels = labels[indices]
scenes = scenes[indices]
objects = objects[indices]
text_train,text_val,text_test = split_data(text_data,VALIDATION_SPLIT)
label_train,label_val,label_test = split_data(labels,VALIDATION_SPLIT)
scene_train,scene_val,scene_test = split_data(scenes,VALIDATION_SPLIT)
object_train,object_val,object_test = split_data(objects,VALIDATION_SPLIT)
text_shape = text_train.shape[1:]
scene_shape = scene_train.shape[1:]
object_shape = object_train.shape[1:]
embeddings_index = {}
f = open(os.path.join(GLOVE_DIR, 'glove.6B.200d.txt'))
for line in f:
values = line.split()
word = values[2]
coefs = np.asarray(values[1], dtype='float32')
embeddings_index[word] = coefs
f.close()
nb_words = min(MAX_NB_WORDS, len(word_index))
embedding_matrix = np.zeros((nb_words + 1, EMBEDDING_DIM))
for word, i in word_index.items():
if i > MAX_NB_WORDS:
continue
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
embedding_layer = Embedding(nb_words + 1,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=True)
save_best = ModelCheckpoint('../../model/{}.hdf5'.format('my_weight'), save_best_only=True)
elstop = EarlyStopping(monitor='val_loss', min_delta=1e-4, patience=5)
# Image Sence
scene_input = Input(shape=scene_shape, dtype='float32')
img_scene = Dense(DIM_HIDDEN, activation='relu')(scene_input)
img_scene_encoder = RepeatVector(text_shape[0], name='scene-repeat')(img_scene)
# Image Object
object_input = Input(shape=object_shape, dtype='float32')
img_object = Dense(DIM_HIDDEN, activation='relu')(object_input)
img_object_encoder = RepeatVector(text_shape[0], name='object-repeat')(img_object)
# Text
txt_input = Input(shape=text_shape, dtype='float32')
txt = embedding_layer(txt_input)
txt_hidden = (LSTM(DIM_HIDDEN, return_sequences=True, name='tweet-lstm'))(txt)
txt_att = Attention_input2(name='att_so')([txt_hidden, img_object_encoder, img_scene_encoder])
# Merge
img_txt = merge([img_scene, img_object, txt_att], mode='concat')
img_txt = Dense(DIM_HIDDEN, activation='relu')(img_txt)
img_txt_loss = Dense(NB_CLASS, activation='softmax', name='main_output')(img_txt)
model = Model(input=[txt_input, scene_input, object_input], output=[img_txt_loss])
model.compile(loss='categorical_crossentropy', optimizer='RMSprop',
metrics=['acc', 'fmeasure'])
model.fit([text_train, scene_train, object_train], [label_train],
validation_data=([text_val, scene_val, object_val], [label_val]),
nb_epoch=NB_EPOCH, batch_size=BATCH_SIZE, callbacks=[elstop,save_best], verbose=1)
model.load_weights('../../model/{}.hdf5'.format('my_weight'))
score = model.evaluate([text_test, scene_test, object_test], label_test, verbose=0)
print('results๏ผ', score[1], score[2])
return score[1:]
if __name__ == '__main__':
fun()
| xunan0812/MultiSentiNet | src/att_sc_ob_txt.py | att_sc_ob_txt.py | py | 6,726 | python | en | code | 16 | github-code | 36 | [
{
"api_name": "numpy.random.seed",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "h5py.File",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_nu... |
7813396326 | import re
from math import ceil
import dateparser
from aspen.database.models import TreeType
from aspen.workflows.nextstrain_run.build_plugins.base_plugin import BaseConfigPlugin
class TreeTypePlugin(BaseConfigPlugin):
crowding_penalty: float = 0
tree_type: TreeType
subsampling_scheme: str = "NONE"
def _update_config_params(self, config):
if not config.get("builds"):
# TODO, force MPX structure to look more like SC2's
config["builds"] = {"aspen": {}}
build = config["builds"]["aspen"]
location = self.template_args["location"]
# Make a shortcut to decide whether this is a location vs division vs country level build
if not location.division:
self.tree_build_level = "country"
elif not location.location:
self.tree_build_level = "division"
# Fill out country/division/location fields if the group has them,
# or remove those fields if they don't.
location_fields = ["country", "division", "location"]
location_values = []
for field in location_fields:
value = getattr(location, field)
if value:
build[field] = value
location_values.append(value)
else:
if build.get(field):
del build[field]
# NOTE: <TreeTypePlugin>.subsampling_scheme is used in 3 places:
# - Its lowercase'd name is used to find a markdown file with an "about this tree" description
# - It refers to a subsampling_scheme key in the mega nextstrain template
# - It's title-case'd and included in the tree title as human-readable text
build["subsampling_scheme"] = self.subsampling_scheme
# Update the tree's title with build type, location and date range.
# We always provide some form of end date in the title.
end_date = self._get_formatted_tree_end_date()
# We base format of title on whether we have a `filter_start_date`
if self.template_args.get("filter_start_date") is not None:
title_template = "{tree_type} tree for samples collected in {location} between {start_date} and {end_date}"
build["title"] = title_template.format(
tree_type=self.subsampling_scheme.title(),
location=", ".join(location_values),
start_date=dateparser.parse(
self.template_args.get("filter_start_date")
).strftime("%Y-%m-%d"),
end_date=end_date,
)
else:
title_template = "{tree_type} tree for samples collected in {location} up until {end_date}"
build["title"] = title_template.format(
tree_type=self.subsampling_scheme.title(),
location=", ".join(location_values),
end_date=end_date,
)
if config.get("files"):
config["files"]["description"] = config["files"]["description"].format(
tree_type=self.subsampling_scheme.lower()
)
if config.get("priorities"):
config["priorities"]["crowding_penalty"] = self.crowding_penalty
def _get_formatted_tree_end_date(self):
"""Returns appropriate YYYY-MM-DD for tree's end date or "--" if none.
For tree titles, we want to always have an end date to display. If
the tree had a `filter_end_date` arg, we can use that. However, if no
filter arg was given for the end date, we use the implicit end date of
when the tree build was kicked off (from PhyloRun.start_datetime), as
the tree build process can only use samples up to the moment in time
when it was kicked off, so it's an implicit end date to samples.
If there is no date available at all, we return "--" as an absolute
fall back. PhyloRun.start_datetime is not actually guaranteed at the DB
level, but all our code that creates runs always provides one (as of
Nov 2022, every single run has a start_datetime). The fall back is
provided just to code defensively in case something weird ever happens.
"""
formatted_end_date = "--" # safe default, should never happen
filter_end_date = self.template_args.get("filter_end_date")
if filter_end_date is not None:
formatted_end_date = dateparser.parse(filter_end_date).strftime("%Y-%m-%d")
else:
# `run_start_datetime` is a `context` kwarg, so not guaranteed
run_start_datetime = getattr(self, "run_start_datetime", None)
if run_start_datetime is not None:
formatted_end_date = run_start_datetime.strftime("%Y-%m-%d")
else:
print("WARNING -- Run missing a start_datetime. Default to '--'")
return formatted_end_date
def update_config(self, config):
self._update_config_params(config)
subsampling = config["subsampling"][self.subsampling_scheme]
self.run_type_config(config, subsampling)
# Remove unused subsampling schemes from our output file
config["subsampling"] = {self.subsampling_scheme: subsampling}
def run_type_config(self, config, subsampling):
raise NotImplementedError("base class doesn't implement this")
class OverviewPlugin(TreeTypePlugin):
crowding_penalty = 0.1
tree_type = TreeType.OVERVIEW
subsampling_scheme = "OVERVIEW"
def run_type_config(self, config, subsampling):
if self.group.name == "Chicago Department of Public Health":
if "--query" in subsampling["group"]["query"]: # SC2 format
subsampling["group"][
"query"
] = '''--query "((location == '{location}') & (division == '{division}')) | submitting_lab == 'RIPHL at Rush University Medical Center'"'''
else: # MPX format
subsampling["group"]["query"] = (
"("
+ subsampling["group"]["query"]
+ ") | submitting_lab == 'RIPHL at Rush University Medical Center'"
)
# Handle sampling date & pango lineage filters
apply_filters(config, subsampling, self.template_args)
# Update our sampling for state/country level builds if necessary
update_subsampling_for_location(self.tree_build_level, subsampling)
# Update country and international max sequences.
if self.tree_build_level == "country":
subsampling["international"]["max_sequences"] = 1000
if self.tree_build_level == "division":
subsampling["country"]["max_sequences"] = 800
subsampling["international"]["max_sequences"] = 200
# If there aren't any selected samples
# Either due to being a scheduled run or no user selection
# Put reference sequences in include.txt so tree run don't break
if self.num_included_samples == 0:
if config.get("files", {}).get("include"):
del config["files"]["include"]
class NonContextualizedPlugin(TreeTypePlugin):
crowding_penalty = 0.1
tree_type = TreeType.NON_CONTEXTUALIZED
subsampling_scheme = "NON_CONTEXTUALIZED"
def run_type_config(self, config, subsampling):
# Handle sampling date & pango lineage filters
apply_filters(config, subsampling, self.template_args)
# Update our sampling for state/country level builds if necessary
update_subsampling_for_location(self.tree_build_level, subsampling)
# If there aren't any selected samples due to no user selection
# Put reference sequences in include.txt so tree run don't break
if self.num_included_samples == 0:
if config.get("files", {}).get("include"):
del config["files"]["include"]
# Set max_sequences for targeted builds.
class TargetedPlugin(TreeTypePlugin):
crowding_penalty = 0
tree_type = TreeType.TARGETED
subsampling_scheme = "TARGETED"
def run_type_config(self, config, subsampling):
"""
DATA we can use in this function:
config : the entire mega-template data structure, with some fields already updated by BaseNextstrainConfigBuilder.update_build()
subsampling : the subsampling scheme for *this build type only* (ex: mega_template["subsampling"]["TARGETED"])
self.subsampling_scheme : the value a few lines above
self.crowding_penalty : the value a few lines above
self.group : information about the group that this run is for (ex: self.group.name or self.group.default_tree_location)
self.num_sequences : the number of aspen samples written to our fasta input file
self.num_included_samples : the number of samples in include.txt (aspen + gisaid samples) for on-demand runs only
EXAMPLES SECTION:
Delete a group from a subsampling scheme:
del subsampling["international"]
Delete a setting from a group:
del subsampling["international"]["seq_per_group"]
Add a group to a subsampling scheme:
subsampling["my_new_group_name"] = {
"group_by": "region",
"max_sequences": 200,
"query": '--query "(foo != {bar})"'
}
Add a setting to a group (this is the same as updating an existing setting!):
subsampling["international"]["mynewsetting"] = "mynewvalue"
"""
# Adjust group sizes if we have a lot of samples.
closest_max_sequences = 100
other_max_sequences = 25
if self.num_included_samples >= 100:
closest_max_sequences = self.num_included_samples
other_max_sequences = int(ceil(self.num_included_samples / 4.0))
subsampling["closest"]["max_sequences"] = closest_max_sequences
subsampling["group"]["max_sequences"] = (
other_max_sequences * 2
) # Temp mitigation for missing on-demand overview
subsampling["state"]["max_sequences"] = (
other_max_sequences * 2
) # Temp mitigation for missing on-demand overview
subsampling["country"]["max_sequences"] = other_max_sequences
subsampling["international"]["max_sequences"] = other_max_sequences
# Update our sampling for state/country level builds if necessary
update_subsampling_for_location(self.tree_build_level, subsampling)
# Increase int'l sequences for state/country builds.
if (
self.tree_build_level != "location"
and subsampling["international"]["max_sequences"] < 100
):
subsampling["international"]["max_sequences"] = 100
def update_subsampling_for_location(tree_build_level, subsampling):
if tree_build_level == "country":
update_subsampling_for_country(subsampling)
if tree_build_level == "division":
update_subsampling_for_division(subsampling)
def update_subsampling_for_country(subsampling):
# State and country aren't useful
if "state" in subsampling:
del subsampling["state"]
if "country" in subsampling:
del subsampling["country"]
# Update our local group query
if "--query" in subsampling["group"]["query"]:
subsampling["group"]["query"] = '''--query "(country == '{country}')"'''
else:
subsampling["group"]["query"] = "(country == '{country}')"
def update_subsampling_for_division(subsampling):
# State isn't useful
if "state" in subsampling:
del subsampling["state"]
# Update our local group query
if "--query" in subsampling["group"]["query"]:
subsampling["group"][
"query"
] = '''--query "(division == '{division}') & (country == '{country}')"''' # Keep the country filter in case of multiple divisions worldwide
else:
subsampling["group"][
"query"
] = "(division == '{division}') & (country == '{country}')" # Keep the country filter in case of multiple divisions worldwide
def apply_filters(config, subsampling, template_args):
# MPX format
include_arguments_in_filters = False
lineage_field = "lineage"
if "--query" in subsampling["group"]["query"]:
# SC2 format
include_arguments_in_filters = True
lineage_field = "pango_lineage"
min_date = template_args.get("filter_start_date")
if min_date:
# Support date expressions like "5 days ago" in our cron schedule.
min_date = dateparser.parse(min_date).strftime("%Y-%m-%d")
if include_arguments_in_filters:
subsampling["group"][
"min_date"
] = f"--min-date {min_date}" # ex: --max-date 2020-01-01
else:
subsampling["group"]["min-date"] = str(min_date) # ex: max-date: 2020-01-01
max_date = template_args.get("filter_end_date")
if max_date:
# Support date expressions like "5 days ago" in our cron schedule.
max_date = dateparser.parse(max_date).strftime("%Y-%m-%d")
if include_arguments_in_filters:
subsampling["group"][
"max_date"
] = f"--max-date {max_date}" # ex: --max-date 2020-01-01
if "international_serial_sampling" in subsampling:
subsampling["international_serial_sampling"][
"max_date"
] = f"--max-date {max_date}" # ex: --max-date 2020-01-01
else:
subsampling["group"]["max-date"] = str(max_date) # ex: max-date: 2020-01-01
if "international_serial_sampling" in subsampling:
subsampling["international_serial_sampling"]["max-date"] = str(
max_date
) # ex: max-date: 2020-01-01
pango_lineages = template_args.get("filter_pango_lineages")
if pango_lineages:
# Nextstrain is rather particular about the acceptable syntax for
# values in the pango_lineages key. Before modifying please see
# https://discussion.nextstrain.org/t/failure-when-specifying-multiple-pango-lineages-in-a-build/670
clean_values = [re.sub(r"[^0-9a-zA-Z.]", "", item) for item in pango_lineages]
clean_values.sort()
config["builds"]["aspen"]["pango_lineage"] = clean_values
# Remove the last " from our old query so we can inject more filters
end_string = ""
old_query = subsampling["group"]["query"]
if old_query.endswith('"'):
end_string = '"'
old_query = old_query[:-1]
pango_query = " & (" + lineage_field + " in {pango_lineage})"
subsampling["group"]["query"] = old_query + pango_query + end_string
| chanzuckerberg/czgenepi | src/backend/aspen/workflows/nextstrain_run/build_plugins/type_plugins.py | type_plugins.py | py | 14,798 | python | en | code | 11 | github-code | 36 | [
{
"api_name": "aspen.workflows.nextstrain_run.build_plugins.base_plugin.BaseConfigPlugin",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "aspen.database.models.TreeType",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "dateparser.parse",
"line_number": 55... |
35851037675 | #Django Libs
from django.http.response import FileResponse, HttpResponse
from django.shortcuts import render
from django.urls import reverse
from django.views.generic import View, CreateView, DeleteView, UpdateView, DetailView, ListView, TemplateView
from django.db.models import Sum
from django.core.serializers import serialize
#Self Libs
from .forms import ComprasForm, ConsumidorFinalForm, ContribuyenteForm, EmpresaF, LibroForm
from .models import *
from empresas.models import Empresa as Cliente
from .export import *
#Factura CF
class FacturaCFCV(CreateView):
model = FacturaCF
template_name = "iva/lfcf.html"
form_class = ConsumidorFinalForm
def get_context_data(self, **kwargs):
facturas = Libro.objects.get(id=self.kwargs["libro"]).facturacf
context = super(FacturaCFCV,self).get_context_data(**kwargs)
context["libro"] = Libro.objects.get(id=self.kwargs["libro"])
context['direccion'] = 'cont:nueva_fcf'
context['titulo'] = 'Crear Factura Consumidor Final'
context["parametro"] = self.kwargs['libro']
context["totales"] = [
facturas.all().aggregate(total_exento=Sum('exento'))["total_exento"],
facturas.all().aggregate(total_local=Sum('locales'))["total_local"],
facturas.all().aggregate(total_exportacion=Sum('exportaciones'))["total_exportacion"],
facturas.all().aggregate(total_ventasNSujetas=Sum('ventasNSujetas'))["total_ventasNSujetas"],
facturas.all().aggregate(total_venta=Sum('ventaTotal'))["total_venta"],
facturas.all().aggregate(total_ventaCtaTerceros=Sum('ventaCtaTerceros'))["total_ventaCtaTerceros"],
]
return context
def get_initial(self, **kwargs):
initial = super(FacturaCFCV,self).get_initial()
initial["libro"] = Libro.objects.get(id=self.kwargs["libro"]).id
return initial
def get_success_url(self,**kwargs):
libro=Libro.objects.get(id=self.kwargs["libro"])
return reverse("iva:nueva_fcf",args=[libro.id])
#Factura Ct
class FacturaCtCV(CreateView):
model = FacturaCt
template_name = "iva/lfct.html"
form_class = ContribuyenteForm
def get_context_data(self, **kwargs):
facturas = Libro.objects.get(id=self.kwargs["libro"]).facturact
context = super(FacturaCtCV,self).get_context_data(**kwargs)
context["libro"] = Libro.objects.get(id=self.kwargs["libro"])
context['direccion'] = 'cont:nueva_fct'
context['titulo'] = 'Crear Factura Contribuyente'
context["parametro"] = self.kwargs['libro']
context["totales"] = [
facturas.all().aggregate(total=Sum('venExentas'))["total"],
facturas.all().aggregate(total=Sum('venGravadas'))["total"],
facturas.all().aggregate(total=Sum('ventasNSujetas'))["total"],
facturas.all().aggregate(total=Sum('ivaDebFiscal'))["total"],
facturas.all().aggregate(total=Sum('vtVentas'))["total"],
facturas.all().aggregate(total=Sum('vtIVA'))["total"],
facturas.all().aggregate(total=Sum('ivaRetenido'))["total"],
facturas.all().aggregate(total=Sum('total'))["total"],
]
return context
def get_initial(self, **kwargs):
initial = super(FacturaCtCV,self).get_initial()
initial["libro"] = Libro.objects.get(id=self.kwargs["libro"]).id
return initial
def get_success_url(self,**kwargs):
libro=Libro.objects.get(id=self.kwargs["libro"])
return reverse("iva:nueva_fct",args=[libro.id])
#Factura Cm
class FacturaCmCV(CreateView):
model = FacturaCm
template_name = "iva/lfcm.html"
form_class = ComprasForm
def get_context_data(self, **kwargs):
facturas = Libro.objects.get(id=self.kwargs["libro"]).facturacm
context = super(FacturaCmCV,self).get_context_data(**kwargs)
context["libro"] = Libro.objects.get(id=self.kwargs["libro"])
context['direccion'] = 'cont:nueva_fcm'
context['titulo'] = 'Crear Factura Compra'
context["parametro"] = self.kwargs['libro']
context["totales"] = [
facturas.all().aggregate(total=Sum('cExenteInterna'))["total"],
facturas.all().aggregate(total=Sum('cExenteImportaciones'))["total"],
facturas.all().aggregate(total=Sum('cGravadaInterna'))["total"],
facturas.all().aggregate(total=Sum('cGravadaImportaciones'))["total"],
facturas.all().aggregate(total=Sum('comprasNSujetas'))["total"],
facturas.all().aggregate(total=Sum('ivaCdtoFiscal'))["total"],
facturas.all().aggregate(total=Sum('totalCompra'))["total"],
facturas.all().aggregate(total=Sum('retencionPretencion'))["total"],
facturas.all().aggregate(total=Sum('anticipoCtaIva'))["total"],
facturas.all().aggregate(total=Sum('ivaTerceros'))["total"],
]
return context
def get_initial(self, **kwargs):
initial = super(FacturaCmCV,self).get_initial()
initial["libro"] = Libro.objects.get(id=self.kwargs["libro"]).id
return initial
def get_success_url(self,**kwargs):
libro=Libro.objects.get(id=self.kwargs["libro"])
return reverse("iva:nueva_fcm",args=[libro.id])
#Libros vistas
class LibroCV(CreateView):
model = Libro
template_name = "iva/modal.html"
form_class = LibroForm
def get_context_data(self, **kwargs):
context = super(LibroCV,self).get_context_data(**kwargs)
context["empresa"] = Cliente.objects.get(id=self.kwargs["empresa"])
context['direccion'] = 'iva:nuevo_libro'
context['titulo'] = 'Crear Libro'
context["tipo"] = self.kwargs["tipo"]
context["parametro"] = self.kwargs['empresa']
context["parametro2"] = self.kwargs['tipo']
return context
def get_initial(self, **kwargs):
initial = super(LibroCV,self).get_initial()
initial["cliente"] = Cliente.objects.get(id=self.kwargs["empresa"]).id
initial["tipo"] = self.kwargs["tipo"]
return initial
def get_success_url(self,**kwargs):
return reverse("iva:lista_libro",args=[self.kwargs["empresa"],self.kwargs["tipo"]])
class LibroLV(ListView):
model = Libro
template_name = "iva/llibro.html"
context_object_name = 'libros'
def get_context_data(self, **kwargs):
context = super(LibroLV,self).get_context_data(**kwargs)
context["cliente"] = Cliente.objects.get(id=self.kwargs['empresa'])
context["tipo"] = self.kwargs["tipo"]
return context
def get_queryset(self):
queryset = super(LibroLV, self).get_queryset()
queryset = queryset.filter(cliente__id = self.kwargs['empresa'],tipo=self.kwargs["tipo"]).order_by('ano','mes')
return queryset
class EmpresaDV(DetailView):
model = Cliente
template_name = "iva/detalle_cliente.html"
context_object_name = "cliente"
#Empresa Vistas
class EmpresaCV(CreateView):
model = Empresa
template_name = "iva/empresa.html"
form_class = EmpresaF
def get_context_data(self, **kwargs):
context = super(EmpresaCV,self).get_context_data(**kwargs)
context['direccion'] = 'cont:nuevo_empresa'
context['titulo'] = 'Crear Empresa'
return context
class EmpresaDetail(DetailView):
model = Empresa
template_name='empresaJson.html'
def get(self,request,*args, **kwarg ):
empresa = Empresa.objects.get(nRegistro = self.kwargs['nReg'])
empresa = serialize('json',[empresa,])
return HttpResponse(empresa,'application/json')
#Exportacion
class ExportarView(View):
def get(self, request, *args, **kwargs):
tipo = self.kwargs.get('tipo')
id_libro = self.kwargs.get('id_libro')
libro = Libro.objects.get(id=id_libro)
if tipo == 1:
tipol = "Consumidor"
libroEx = export_libroCF(id_libro)
elif tipo == 2:
tipol = "Contibuyente"
libroEx = export_libroct(id_libro)
elif tipo == 3:
tipol = "Compras"
libroEx = export_librocm(id_libro)
print(libro)
# create the HttpResponse object ...
response = FileResponse(open(libroEx, 'rb'))
return response
| RobertoMarroquin/garrobo | iva/views.py | views.py | py | 8,321 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.views.generic.CreateView",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "forms.ConsumidorFinalForm",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "django.db.models.Sum",
"line_number": 29,
"usage_type": "call"
},
{
"ap... |
43753620311 | from rest_framework_simplejwt.authentication import JWTAuthentication
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.decorators import (
api_view,
permission_classes,
authentication_classes
)
from django.contrib.auth.models import User
from django.shortcuts import get_object_or_404
from repository.models import Repository, Branch, Commit
from repository.serializers.repo_serializers import (
RepositorySerializer,
RepositoryCreateSerializer,
RepositoryUpdateSerializer,
)
from repository.serializers.branch_serializers import BranchSerializer
from repository.serializers.commit_serializers import CommitSerializer
from users.serializers import UserSerializer
from backend.exceptions import GeneralException
from datetime import datetime
import requests
import re
import json
import pytz
@api_view(['GET'])
@authentication_classes([JWTAuthentication])
@permission_classes([IsAuthenticated])
def get_one_repo(request, repo_id):
repo = get_object_or_404(Repository, pk=repo_id)
serializer = RepositorySerializer(repo, many=False)
return Response(serializer.data)
@api_view(['GET'])
@authentication_classes([JWTAuthentication])
@permission_classes([IsAuthenticated])
def get_all_repos(request, username):
repos = Repository.objects.filter(user__username=username)
serializer = RepositorySerializer(repos, many=True)
return Response(serializer.data)
@api_view(['POST'])
@authentication_classes([JWTAuthentication])
@permission_classes([IsAuthenticated])
def create_repo(request):
repo_ser = RepositoryCreateSerializer(data=request.data)
if not repo_ser.is_valid():
raise GeneralException("Invalid request.")
found_repos = Repository.objects.filter(name=repo_ser.data['name'])
if len(found_repos) > 0:
raise GeneralException("Repository with given name already exists.")
repo = Repository.objects.create(
name=repo_ser.data['name'],
description=repo_ser.data['description'],
url=repo_ser.data['url'],
is_private=repo_ser.data['is_private'],
user=request.user,
)
repo.save()
load_repo(repo, request.user)
serializer = RepositorySerializer(repo, many=False)
return Response(serializer.data)
@api_view(['PUT'])
@authentication_classes([JWTAuthentication])
@permission_classes([IsAuthenticated])
def update_repo(request, repo_id):
repo_ser = RepositoryUpdateSerializer(data=request.data)
if not repo_ser.is_valid():
raise GeneralException("Invalid request.")
repo = get_object_or_404(Repository, pk=repo_id)
if (repo.name != repo_ser.data['name']):
found_repos = Repository.objects.filter(name=repo_ser.data['name'])
if len(found_repos) > 0:
raise GeneralException(
"Repository with given name already exists.")
repo.name = repo_ser.data['name']
repo.description = repo_ser.data['description']
repo.is_private = repo_ser.data['is_private']
repo.save()
repo.refresh_from_db()
serializer = RepositorySerializer(repo, many=False)
return Response(serializer.data)
@api_view(['PUT'])
@authentication_classes([JWTAuthentication])
@permission_classes([IsAuthenticated])
def reload_repo(request, repo_id):
repo = get_object_or_404(Repository, pk=repo_id)
branches = Branch.objects.filter(repo__id=repo_id)
for branch in branches:
branch.delete()
load_repo(repo, request.user)
return Response()
@api_view(['DELETE'])
@authentication_classes([JWTAuthentication])
@permission_classes([IsAuthenticated])
def delete_repo(request, repo_id):
repo = get_object_or_404(Repository, pk=repo_id)
repo.delete()
return Response()
def load_repo_readme(remote_username, remote_repo_name):
# Fetch readme
readme_info_resp = requests.get(
'https://api.github.com/repos/{0}/{1}/readme'.format(remote_username, remote_repo_name))
readme_info = readme_info_resp.json()
readme_text_resp = requests.get(readme_info['download_url'])
return readme_text_resp.text
def load_repo(repo, user):
groups = re.findall(r"^https:\/\/github.com\/(.*)\/(.*)", repo.url)
remote_username = groups[0][0]
remote_repo_name = groups[0][1]
# Get and set README
repo.readme = load_repo_readme(remote_username, remote_repo_name)
repo.save()
branches_resp = requests.get(
'https://api.github.com/repos/{0}/{1}/branches'.format(remote_username, remote_repo_name))
for b in branches_resp.json():
branch = Branch.objects.create(
name=b['name'],
creator=user,
repo=repo,
last_commit=None,
)
branch.save()
commits_resp = requests.get(
'https://api.github.com/repos/{0}/{1}/commits?sha={2}'
.format(remote_username, remote_repo_name, b['name']))
for c in commits_resp.json():
c_time = datetime.strptime(
c['commit']['author']['date'], '%Y-%m-%dT%H:%M:%SZ')
timezone = pytz.timezone("Europe/Belgrade")
c_time_zoned = timezone.localize(c_time)
commit = Commit.objects.create(
message=c['commit']['message'],
hash=c['sha'],
timestamp=c_time_zoned,
author_email=c['commit']['author']['email'],
branch=branch,
)
# Add latest commit to branch
for b in branches_resp.json():
branches = Branch.objects.filter(
repo__name=repo.name, name=b['name'])
commits = Commit.objects.filter(
branch__name=b['name'], hash=b['commit']['sha'])
if len(branches) > 0:
if len(commits) > 0:
branches[0].last_commit = commits[0]
branches[0].save()
return Response(commits_resp.json())
@api_view(['GET'])
@authentication_classes([JWTAuthentication])
@permission_classes([IsAuthenticated])
def get_all_branches(request, repo_name):
repos = Branch.objects.filter(repo__name=repo_name)
serializer = BranchSerializer(repos, many=True)
return Response(serializer.data)
@api_view(['GET'])
@authentication_classes([JWTAuthentication])
@permission_classes([IsAuthenticated])
def get_all_commits(request, repo_name, branch_name):
repos = Commit.objects.filter(
branch__repo__name=repo_name).filter(branch__name=branch_name.replace('~', '/'))
serializer = CommitSerializer(repos, many=True)
return Response(serializer.data)
@api_view(['GET'])
@authentication_classes([JWTAuthentication])
@permission_classes([IsAuthenticated])
def get_repo_collaborators(request, repo_id):
repo = get_object_or_404(Repository, pk=repo_id)
serializer = UserSerializer(repo.collaborators, many=True)
return Response(serializer.data)
@api_view(['PUT'])
@authentication_classes([JWTAuthentication])
@permission_classes([IsAuthenticated])
def update_collaborators(request, repo_id):
repo = get_object_or_404(Repository, pk=repo_id)
signed_in_user = request.user.id
if repo.user.id != signed_in_user:
raise GeneralException("Not authorized")
user_id_list = request.data
if len(user_id_list) > 0:
repo.collaborators.clear()
repo.collaborators.add(*user_id_list)
else:
repo.assignees.clear()
repo.save()
repo.refresh_from_db()
serializer = UserSerializer(repo.collaborators, many=True)
return Response(serializer.data)
@api_view(['GET'])
@authentication_classes([JWTAuthentication])
@permission_classes([IsAuthenticated])
def search_users_for_collaborators(request, repo_id, search_value):
signed_in_user = request.user.id
repo = get_object_or_404(Repository, pk=repo_id);
if repo.user.id != signed_in_user:
raise GeneralException("Not authorized")
repo_collaborators = repo.collaborators.all()
potential_collaborators = User.objects.filter(is_active=True, is_superuser=False, is_staff=False, username__icontains=search_value).exclude(pk=signed_in_user)
serializer = UserSerializer(potential_collaborators.difference(repo_collaborators), many=True)
return Response(serializer.data)
| lazarmarkovic/uks2020 | backend/repository/views/repo_views.py | repo_views.py | py | 8,275 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "repository.models.Repository",
"line_number": 38,
"usage_type": "argument"
},
{
"api_name": "repository.serializers.repo_serializers.RepositorySerializer",
"line_numb... |
41635503393 | from google.cloud import firestore, storage, exceptions
import os
db = firestore.Client()
content = db.collection('fl_content')
storage_client = storage.client.Client()
bucket = storage_client.get_bucket('psyclonic-studios-website.appspot.com')
def new_transaction():
return db.transaction()
@firestore.transactional
def get_artwork_collection(transaction, size, args):
artworks_query = content.where('_fl_meta_.schema', '==', 'artwork')
artworks_query = sort_query(artworks_query, args)
artworks = []
for artwork_ref in artworks_query.stream(transaction=transaction):
artwork = artwork_ref.to_dict()
image_refs = artwork['images']
artwork['images'] = [get_sized_image_urls(image.get(transaction=transaction).to_dict(), size) for image in artwork['images']]
artwork['inventory'] = int(artwork['inventory']) # hack to fix flamelink screwup
artworks.append(artwork)
return artworks
@firestore.transactional
def get_artwork(transaction, id, size):
artwork = content.document(id).get(transaction=transaction).to_dict()
if not artwork:
return None
artwork['images'] = [get_sized_image_urls(image.get(transaction=transaction).to_dict(), size) for image in artwork['images']]
artwork['inventory'] = int(artwork['inventory']) # hack to fix flamelink screwup
return artwork
@firestore.transactional
def get_artwork_from_ref(transaction, ref, size):
artwork = ref.get(transaction=transaction).to_dict()
if not artwork:
return None
artwork['images'] = [get_sized_image_urls(image.get(transaction=transaction).to_dict(), size) for image in artwork['images']]
return artwork
@firestore.transactional
def get_non_series_artwork_collection(transaction, size, args):
artworks_query = content.where('_fl_meta_.schema', '==', 'artwork').where('partOfASeries', '==', False)
artworks_query = sort_query(artworks_query, args)
artworks = []
for artwork_ref in artworks_query.stream(transaction=transaction):
artwork = artwork_ref.to_dict()
image_refs = artwork['images']
artwork['images'] = [get_sized_image_urls(image.get(transaction=transaction).to_dict(), size) for image in artwork['images']]
artworks.append(artwork)
return artworks
@firestore.transactional
def get_series_collection(transaction, size, args):
series_query = content.where('_fl_meta_.schema', '==', 'series')
series_query = sort_query(series_query, args)
series_collection = []
for series_ref in series_query.stream(transaction=transaction):
series = series_ref.to_dict()
series_image_refs = series['seriesImages']
if series_image_refs:
series_image_urls = [get_sized_image_urls(image.get(transaction=transaction).to_dict(), size) for image in series_image_refs]
series['thumbnail_image'] = get_sized_image_urls(series_image_refs[0].get(transaction=transaction).to_dict(), size)
else:
artwork = series['artworks'][0].get(transaction=transaction).to_dict()
artwork_image = artwork['images'][0].get(transaction=transaction).to_dict()
artwork_image_url = get_sized_image_urls(artwork_image, size)
series['thumbnail_image'] = artwork_image_url
series_collection.append(series)
return series_collection
@firestore.transactional
def get_series(transaction, id, size):
series = content.document(id).get(transaction=transaction).to_dict()
# todo
if series is None:
return None
series_image_refs = series['seriesImages']
series_image_urls = [get_sized_image_urls(image.get(transaction=transaction).to_dict(), size) for image in series_image_refs]
if not series:
return None
artworks_resolved = []
for artwork_ref in series['artworks']:
artwork = artwork_ref.get(transaction=transaction).to_dict()
image_refs = artwork['images']
image_urls = [get_sized_image_urls(image.get(transaction=transaction).to_dict(), size) for image in image_refs]
artwork['images'] = image_urls
artwork['inventory'] = int(artwork['inventory']) # hack to fix flamelink screwup
artworks_resolved.append(artwork)
series['artworks_resolved'] = artworks_resolved
series['series_images'] = series_image_urls
return series
#
#@firestore.transactional
#def get_blog_collection(transaction, size, args):
# blog_collection_query = content.where('_fl_meta_.schema', '==', 'posts').where('status', '==', 'published')
# blog_collection_query = sort_query(blog_collection_query, args)
# blog_collection = []
# for blog_ref in blog_collection_query.stream(transaction=transaction):
# blog = blog_ref.to_dict()
# blog_thumbnail_ref = blog['thumbnail'][0]
# blog_thumbnail = get_file_url(get_image_size_path(blog_thumbnail_ref.get(transaction=transaction).to_dict(), size))
# blog['thumbnail_image'] = blog_thumbnail
# blog_collection.append(blog)
# return blog_collection
#
#@firestore.transactional
#def get_blog(transaction, id, size):
# blog = content.document(id).get(transaction=transaction).to_dict()
# thumbnail_ref = blog['thumbnail'][0]
# blog['thumbnail_image'] = get_file_url(get_image_size_path(thumbnail_ref.get(transaction=transaction).to_dict(), size))
# return blog
@firestore.transactional
def get_home_images(transaction):
home_images_query = content.where('_fl_meta_.schema', '==', 'websiteImages').where('position', '==', 'Home').limit(1)
home_images = next(home_images_query.stream(transaction=transaction)).to_dict()
home_images['images'] = [get_sized_image_urls(image.get(transaction=transaction).to_dict()) for image in home_images['images']]
return home_images
def get_cost(cost):
query = content.where('_fl_meta_.schema', '==', 'costs').where('name', '==', cost).limit(1)
cost = next(query.stream()).to_dict()
return cost['cost']
def get_international_shipping():
return get_cost('International shipping')
def get_website_component(component):
query = content.where('_fl_meta_.schema', '==', 'websiteComponents').where('component', '==', component).limit(1)
component = next(query.stream()).to_dict()
return component['content']
def get_home_text():
return get_website_component('Home')
def get_about():
return get_website_component('About')
def get_policies():
return get_website_component('Policies')
@firestore.transactional
def get_contribute_products(transaction, size, args):
contribute_products_query = content.where('_fl_meta_.schema', '==', 'supportProducts').where('available', '==', True)
contribute_products_query = sort_query(contribute_products_query, args)
contribute_products = []
for product_ref in contribute_products_query.stream(transaction=transaction):
product = product_ref.to_dict()
product['sku'] = f'sku_{product["id"]}'
product_artwork_image_ref = product['artworkImage'][0]
product['artwork_image'] = get_sized_image_urls(product_artwork_image_ref.get(transaction=transaction).to_dict(), size)
product_image_ref = product['productImage'][0]
product['product_image'] = get_sized_image_urls(product_image_ref.get(transaction=transaction).to_dict(), size)
contribute_products.append(product)
return contribute_products
#def sync_contribute_products_to_stripe():
# contribution_product_id = STRIPE_DATA['contribution_product_id']
# contribute_products = get_contribute_products(new_transaction(), 375, None)
# products = {product['sku']: product for product in contribute_products}
# stripe_skus = stripe.SKU.list(product=contribution_product_id, limit=100)['data']
# stripe_sku_list = [sku['id'] for sku in stripe_skus]
# existing_skus = filter(lambda sku: sku in stripe_sku_list, products.keys())
# new_skus = filter(lambda sku: sku not in stripe_sku_list, products.keys())
#
# for sku in existing_skus:
# product = products[sku]
# stripe.SKU.modify(
# sku,
# currency='aud',
# inventory={'type': 'infinite'},
# active=product['available'],
# price=int(product['basePrice'] * 100),
# image=product['product_image_url'],
# product=contribution_product_id,
# attributes={'name': product['title']}
# )
#
# for sku in new_skus:
# product = products[sku]
# stripe.SKU.create(
# id=product['sku'],
# currency='aud',
# inventory={'type': 'infinite'},
# active=product['available'],
# price=int(product['basePrice'] * 100),
# image=product['product_image_url'],
# product=contribution_product_id,
# attributes={'name': product['title']}
# )
#
#def get_donation_skus():
# donation_product_id = STRIPE_DATA['donation_product_id']
# donation_skus = stripe.SKU.list(product=donation_product_id)['data']
# return sorted(donation_skus, key=lambda sku: sku['price'])
#
#def get_shipping_sku():
# shipping_sku = stripe.SKU.retrieve(STRIPE_DATA['shipping_sku_id'])
# return shipping_sku
def get_contribute_text():
return get_website_component('Contribute')
def get_subscribe():
return get_website_component('Subscribe')
def get_contact_message():
return get_website_component('Contact message')
def get_contact_email_template():
return get_website_component('Contact email template')
def get_subscribe_success():
return get_website_component('Thankyou subscribe')
def post_email_address(email):
subscribers = db.collection('subscribers')
subscribers.document(email).set({'subscribe': True}, merge=True)
def get_artwork_buy_email_template():
return get_website_component('Artwork buy email')
def get_artwork_enquiry_email_template():
return get_website_component('Artwork enquire email')
def get_series_enquiry_email_template():
return get_website_component('Series enquire email')
def get_enquire_thankyou():
return get_website_component('Thankyou enquiry')
def get_payment_success():
return get_website_component('Thankyou payment')
def get_order(id):
order = db.collection('orders').document(id).get().to_dict()
transaction = new_transaction()
artworks = [{'artwork': get_artwork_from_ref(transaction, artwork['artwork'], 300), 'quantity': artwork['quantity']} for artwork in order['artworks']]
order['artworks'] = artworks
return order
def finalise_order(payment_intent):
orders = db.collection('orders')
order = orders.document(payment_intent.id)
order.update({
'payment_recieved': True,
'customer': {
'name': payment_intent.shipping.name,
'email': payment_intent.receipt_email
},
'shipping': {
'street': payment_intent.shipping.address.line1,
'city': payment_intent.shipping.address.city,
'state': payment_intent.shipping.address.state,
'country': payment_intent.shipping.address.country,
'postal_code': payment_intent.shipping.address.postal_code,
},
'paid_at': firestore.SERVER_TIMESTAMP
})
artworks = order.get().to_dict()['artworks']
for artwork in artworks:
artwork['artwork'].update({'inventory': firestore.Increment(-artwork['quantity'])})
def update_order(payment_intent_id, cart, subtotal, shipping_cost, total, payment_recieved):
orders = db.collection('orders')
order = orders.document(payment_intent_id)
try:
order_doc = order.get()
if not order_doc.to_dict():
order.set({'created_at': firestore.SERVER_TIMESTAMP}, merge=True)
except exceptions.NotFound:
order.set({'created_at': firestore.SERVER_TIMESTAMP}, merge=True)
artworks = [{'artwork': content.document(id), 'quantity': cart[id]} for id in cart]
order_update = {'payment_recieved': False, 'artworks': artworks, 'cost': {'subtotal': subtotal, 'shipping': shipping_cost, 'total': total}}
order.update(order_update)
def get_flamelink_file_url(path):
flamelink_path = 'flamelink/media'
blob = bucket.blob(os.path.join(flamelink_path, path))
return blob.public_url
def get_sized_image_urls(image_dict, upto=None):
filename = image_dict['file']
image_dict['full_size'] = {'width': 'full', 'storage_path': filename, 'url': get_flamelink_file_url(filename)}
sizes = image_dict['sizes']
if upto:
sizes = list(filter(lambda size: size['width'] <= upto, sizes))
for s in sizes:
s['storage_path'] = os.path.join('sized', str(s['path']), filename)
sizes = {s['width']: s for s in sizes}
sizes[240] = {'width': 240, 'storage_path': os.path.join('sized', str(240), filename)}
for size in sizes.values():
size['url'] = get_flamelink_file_url(size['storage_path'])
image_dict['full_size'] = sizes[max(sizes)]
image_dict['sizes'] = sizes
return image_dict
def sort_query(query, args=None):
if args is None:
return query
sort_by = args.get('sort_by','')
sort_direction = args.get('sort_direction','')
if sort_by:
if sort_direction == 'descending':
query = query.order_by(sort_by, direction=firestore.Query.DESCENDING)
elif sort_direction == 'ascending':
query = query.order_by(sort_by, direction=firestore.Query.ASCENDING)
else:
query = query.order_by(sort_by)
return query | Psyclonic-Studios/psyclonic-studios-website | server/crud.py | crud.py | py | 13,541 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "google.cloud.firestore.Client",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "google.cloud.firestore",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "google.cloud.storage.client.Client",
"line_number": 7,
"usage_type": "call"
},
{
... |
32527125731 | #!/usr/bin/env python
# coding: utf-8
# In[2]:
# input_data
import numpy as np
import pandas as pd
import pickle as pkl
def load_dc_data(dataset):
dc_adj1 = pd.read_csv('C:/YimingXu/Micromobility_DL/data/adjacency_selected.csv')
adj1 = np.mat(dc_adj1)
dc_adj2 = pd.read_csv('C:/YimingXu/Micromobility_DL/data/accessibility_selected.csv')
adj2 = np.mat(dc_adj2)
dc_adj3 = pd.read_csv('C:/YimingXu/Micromobility_DL/data/landuse_selected.csv')
adj3 = np.mat(dc_adj3)
dc_adj4 = pd.read_csv('C:/YimingXu/Micromobility_DL/data/demographic_selected.csv')
adj4 = np.mat(dc_adj4)
dc_dm = pd.read_pickle('C:/YimingXu/Micromobility_DL/data/Input_Selected_Zones.pkl')
return dc_dm, adj1, adj2, adj3, adj4
def preprocess_data(data, time_len, rate, seq_len, pre_len):
train_size = int(time_len * rate)
train_data = data[0:train_size]
test_data = data[train_size:time_len]
trainX, trainY, testX, testY = [], [], [], []
for i in range(len(train_data) - seq_len - pre_len):
a = train_data[i: i + seq_len + pre_len]
trainX.append(a[0 : seq_len])
trainY.append(a[seq_len : seq_len + pre_len])
for i in range(len(test_data) - seq_len -pre_len):
b = test_data[i: i + seq_len + pre_len]
testX.append(b[0 : seq_len])
testY.append(b[seq_len : seq_len + pre_len])
trainX1 = np.array(trainX)
trainY1 = np.array(trainY)
testX1 = np.array(testX)
testY1 = np.array(testY)
return trainX1, trainY1, testX1, testY1
# In[3]:
# utils
import tensorflow as tf
import scipy.sparse as sp
import numpy as np
def normalized_adj(adj):
adj = sp.coo_matrix(adj)
rowsum = np.array(adj.sum(1))
d_inv_sqrt = np.power(rowsum, -0.5).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
normalized_adj = adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()
normalized_adj = normalized_adj.astype(np.float32)
return normalized_adj
def sparse_to_tuple(mx):
mx = mx.tocoo()
coords = np.vstack((mx.row, mx.col)).transpose()
L = tf.SparseTensor(coords, mx.data, mx.shape)
return tf.sparse.reorder(L)
def calculate_laplacian(adj, lambda_max=1):
adj = normalized_adj(adj + sp.eye(adj.shape[0]))
adj = sp.csr_matrix(adj)
adj = adj.astype(np.float32)
return sparse_to_tuple(adj)
def weight_variable_glorot(input_dim, output_dim, name=""):
init_range = np.sqrt(6.0 / (input_dim + output_dim))
initial = tf.random_uniform([input_dim, output_dim], minval=-init_range,
maxval=init_range, dtype=tf.float32)
return tf.Variable(initial,name=name)
# In[4]:
# TGCN Cell
from tensorflow.compat.v1.nn.rnn_cell import RNNCell
class tgcnCell(RNNCell):
"""Temporal Graph Convolutional Network """
def call(self, inputs, **kwargs):
pass
def __init__(self, num_units, adj, num_nodes, input_size=None,
act=tf.nn.tanh, reuse=None):
super(tgcnCell, self).__init__(_reuse=reuse)
self._act = act
self._nodes = num_nodes
self._units = num_units
self._adj = []
self._adj.append(calculate_laplacian(adj))
@property
def state_size(self):
return self._nodes * self._units
@property
def output_size(self):
return self._units
def __call__(self, inputs, state, scope=None):
with tf.compat.v1.variable_scope(scope or "tgcn",reuse=tf.compat.v1.AUTO_REUSE):
with tf.compat.v1.variable_scope("gates",reuse=tf.compat.v1.AUTO_REUSE):
value = tf.nn.sigmoid(
self._gc(inputs, state, 2 * self._units, bias=1.0, scope=scope))
r, u = tf.split(value=value, num_or_size_splits=2, axis=1)
with tf.compat.v1.variable_scope("candidate",reuse=tf.compat.v1.AUTO_REUSE):
r_state = r * state
c = self._act(self._gc(inputs, r_state, self._units, scope=scope))
new_h = u * state + (1 - u) * c
return new_h, new_h
def _gc(self, inputs, state, output_size, bias=0.0, scope=None):
## inputs:(-1,num_nodes)
inputs = tf.expand_dims(inputs, 2)
## state:(batch,num_node,gru_units)
state = tf.reshape(state, (-1, self._nodes, self._units))
## concat
x_s = tf.concat([inputs, state], axis=2)
input_size = x_s.get_shape()[2]
## (num_node,input_size,-1)
x0 = tf.transpose(x_s, perm=[1, 2, 0])
x0 = tf.reshape(x0, shape=[self._nodes, -1])
scope = tf.compat.v1.get_variable_scope()
with tf.compat.v1.variable_scope(scope):
for m in self._adj:
x1 = tf.sparse.sparse_dense_matmul(m, x0)
# print(x1)
x = tf.reshape(x1, shape=[self._nodes, input_size,-1])
x = tf.transpose(x,perm=[2,0,1])
x = tf.reshape(x, shape=[-1, input_size])
weights = tf.compat.v1.get_variable(
'weights', [input_size, output_size], initializer=tf.keras.initializers.glorot_normal)
x = tf.matmul(x, weights) # (batch_size * self._nodes, output_size)
biases = tf.compat.v1.get_variable(
"biases", [output_size], initializer=tf.constant_initializer(bias))
x = tf.nn.bias_add(x, biases)
x = tf.reshape(x, shape=[-1, self._nodes, output_size])
x = tf.reshape(x, shape=[-1, self._nodes * output_size])
return x
# In[5]:
import pickle as pkl
import tensorflow as tf
import pandas as pd
import numpy as np
import math
import os
import numpy.linalg as la
from sklearn.metrics import mean_squared_error,mean_absolute_error
import time
time_start = time.time()
###### Settings ######
# flags = tf.compat.v1.flags
# FLAGS = flags.FLAGS
# flags.DEFINE_float('learning_rate', 0.001, 'Initial learning rate.')
# flags.DEFINE_integer('training_epoch', 1, 'Number of epochs to train.')
# flags.DEFINE_integer('gru_units', 64, 'hidden units of gru.')
# flags.DEFINE_integer('seq_len',12 , ' time length of inputs.')
# flags.DEFINE_integer('pre_len', 3, 'time length of prediction.')
# flags.DEFINE_float('train_rate', 0.8, 'rate of training set.')
# flags.DEFINE_integer('batch_size', 32, 'batch size.')
# flags.DEFINE_string('dataset', 'los', 'sz or los.')
# flags.DEFINE_string('model_name', 'tgcn', 'tgcn')
model_name = 'tgcn'
data_name = 'dc'
train_rate = 0.8
seq_len = 24
output_dim = pre_len = 3
batch_size = 32
lr = 0.001
training_epoch = 1
gru_units = 64
# In[6]:
###### load data ######
if data_name == 'dc':
data, adj1, adj2, adj3, adj4 = load_dc_data('dc')
time_len = data.shape[0]
num_nodes = data.shape[1]
data1 =np.mat(data,dtype=np.float32)
# In[7]:
#### normalization
# max_value = np.max(data1)
# data1 = data1/max_value
max_value=1
mean_value=np.mean(data1)
std_value=np.std(data1)
data1=(data1-mean_value)/std_value
trainX, trainY, testX, testY = preprocess_data(data1, time_len, train_rate, seq_len, pre_len)
totalbatch = int(trainX.shape[0]/batch_size)
training_data_count = len(trainX)
# In[8]:
def process_output(otp):
m = []
for i in otp:
o = tf.reshape(i,shape=[-1,num_nodes,gru_units])
o = tf.reshape(o,shape=[-1,gru_units])
m.append(o)
return m
# In[9]:
# TGCN
from tensorflow import keras
def TGCN(_X, _weights, _biases):
###
# multi-GCN-GRU
cell_1 = tgcnCell(gru_units, adj1, num_nodes=num_nodes)
cell_2 = tgcnCell(gru_units, adj2, num_nodes=num_nodes)
cell_3 = tgcnCell(gru_units, adj3, num_nodes=num_nodes)
cell_4 = tgcnCell(gru_units, adj4, num_nodes=num_nodes)
cell_11 = tf.compat.v1.nn.rnn_cell.MultiRNNCell([cell_1], state_is_tuple=True)
cell_22 = tf.compat.v1.nn.rnn_cell.MultiRNNCell([cell_2], state_is_tuple=True)
cell_33 = tf.compat.v1.nn.rnn_cell.MultiRNNCell([cell_3], state_is_tuple=True)
cell_44 = tf.compat.v1.nn.rnn_cell.MultiRNNCell([cell_4], state_is_tuple=True)
_X = tf.unstack(_X, axis=1)
outputs_1, states_1 = tf.compat.v1.nn.static_rnn(cell_11, _X, dtype=tf.float32)
outputs_2, states_2 = tf.compat.v1.nn.static_rnn(cell_22, _X, dtype=tf.float32)
outputs_3, states_3 = tf.compat.v1.nn.static_rnn(cell_33, _X, dtype=tf.float32)
outputs_4, states_4 = tf.compat.v1.nn.static_rnn(cell_44, _X, dtype=tf.float32)
m_1 = process_output(outputs_1)
m_2 = process_output(outputs_2)
m_3 = process_output(outputs_3)
m_4 = process_output(outputs_4)
last_output_1 = m_1[-1]
last_output_2 = m_2[-1]
last_output_3 = m_3[-1]
last_output_4 = m_4[-1]
dense_input = tf.concat([last_output_1, last_output_2, last_output_3, last_output_4], 1)
# Dense
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Dense(64, activation='sigmoid'))
model.add(tf.keras.layers.Dense(64))
last_output = model(dense_input)
output = tf.matmul(last_output, _weights['out']) + _biases['out']
output = tf.reshape(output,shape=[-1,num_nodes,pre_len])
output = tf.transpose(output, perm=[0,2,1])
output = tf.reshape(output, shape=[-1,num_nodes])
return output, m_1 , states_1
# In[10]:
###### placeholders ######
tf.compat.v1.disable_eager_execution()
inputs = tf.compat.v1.placeholder(tf.float32, shape=[None, seq_len, num_nodes])
labels = tf.compat.v1.placeholder(tf.float32, shape=[None, pre_len, num_nodes])
# In[11]:
# Graph weights
weights = {
'out': tf.Variable(tf.compat.v1.random_normal([gru_units, pre_len], mean=1.0), name='weight_o')}
biases = {
'out': tf.Variable(tf.compat.v1.random_normal([pre_len]),name='bias_o')}
if model_name == 'tgcn':
pred,ttts,ttto = TGCN(inputs, weights, biases)
y_pred = pred
# In[12]:
###### optimizer ######
lambda_loss = 0.0015
Lreg = lambda_loss * sum(tf.nn.l2_loss(tf_var) for tf_var in tf.compat.v1.trainable_variables())
label = tf.reshape(labels, [-1,num_nodes])
##loss
loss = tf.reduce_mean(tf.nn.l2_loss(y_pred-label) + Lreg)
##rmse
error = tf.sqrt(tf.reduce_mean(tf.square(y_pred-label)))
optimizer = tf.compat.v1.train.AdamOptimizer(lr).minimize(loss)
# In[13]:
###### Initialize session ######
variables = tf.compat.v1.global_variables()
saver = tf.compat.v1.train.Saver(tf.compat.v1.global_variables())
#sess = tf.Session()
gpu_options = tf.compat.v1.GPUOptions(per_process_gpu_memory_fraction=0.8)
sess = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(gpu_options=gpu_options))
sess.run(tf.compat.v1.global_variables_initializer())
out = 'out/%s'%(model_name)
#out = 'out/%s_%s'%(model_name,'perturbation')
path1 = '%s_%s_lr%r_batch%r_unit%r_seq%r_pre%r_epoch%r'%(model_name,data_name,lr,batch_size,gru_units,seq_len,pre_len,training_epoch)
path = os.path.join(out,path1)
if not os.path.exists(path):
os.makedirs(path)
# In[15]:
###### evaluation ######
def evaluation(a,b):
rmse = math.sqrt(mean_squared_error(a,b))
mae = mean_absolute_error(a, b)
F_norm = la.norm(a-b,'fro')/la.norm(a,'fro')
r2 = 1-((a-b)**2).sum()/((a-a.mean())**2).sum()
var = 1-(np.var(a-b))/np.var(a)
return rmse, mae, 1-F_norm, r2, var
x_axe,batch_loss,batch_rmse,batch_pred = [], [], [], []
test_loss,test_rmse,test_mae,test_acc,test_r2,test_var,test_pred = [],[],[],[],[],[],[]
training_epoch = 20
for epoch in range(training_epoch):
for m in range(totalbatch):
mini_batch = trainX[m * batch_size : (m+1) * batch_size]
mini_label = trainY[m * batch_size : (m+1) * batch_size]
_, loss1, rmse1, train_output = sess.run([optimizer, loss, error, y_pred],
feed_dict = {inputs:mini_batch, labels:mini_label})
batch_loss.append(loss1)
batch_rmse.append(rmse1 * max_value)
# Test completely at every epoch
loss2, rmse2, test_output = sess.run([loss, error, y_pred],
feed_dict = {inputs:testX, labels:testY})
test_label = np.reshape(testY,[-1,num_nodes])
rmse, mae, acc, r2_score, var_score = evaluation(test_label, test_output)
test_label1 = test_label * max_value
test_output1 = test_output * max_value
test_loss.append(loss2)
test_rmse.append(rmse * max_value)
test_mae.append(mae * max_value)
test_acc.append(acc)
test_r2.append(r2_score)
test_var.append(var_score)
test_pred.append(test_output1)
print('Iter:{}'.format(epoch),
'train_rmse:{:.4}'.format(batch_rmse[-1]),
'test_loss:{:.4}'.format(loss2),
'test_rmse:{:.4}'.format(rmse),
'test_mae:{:.4}'.format(mae))
if (epoch % 500 == 0):
saver.save(sess, path+'/model_100/TGCN_pre_%r'%epoch, global_step = epoch)
time_end = time.time()
print(time_end-time_start,'s')
# In[ ]:
# In[ ]:
# In[120]:
############## visualization ###############
b = int(len(batch_rmse)/totalbatch)
batch_rmse1 = [i for i in batch_rmse]
train_rmse = [(sum(batch_rmse1[i*totalbatch:(i+1)*totalbatch])/totalbatch) for i in range(b)]
batch_loss1 = [i for i in batch_loss]
train_loss = [(sum(batch_loss1[i*totalbatch:(i+1)*totalbatch])/totalbatch) for i in range(b)]
index = test_rmse.index(np.min(test_rmse))
test_result = test_pred[index]
var = pd.DataFrame(test_result)
# var.to_csv(path+'/test_result.csv',index = False,header = False)
#plot_result(test_result,test_label1,path)
#plot_error(train_rmse,train_loss,test_rmse,test_acc,test_mae,path)
print('min_rmse:%r'%(np.min(test_rmse)),
'min_mae:%r'%(test_mae[index]),
'max_acc:%r'%(test_acc[index]),
'r2:%r'%(test_r2[index]),
'var:%r'%test_var[index])
| xuyimingxym/MicroMobility-DL | Multi-GCN_GRU.py | Multi-GCN_GRU.py | py | 13,757 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.mat",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.mat",
"line_numbe... |
1206611132 | """Utils functions."""
import datetime
def MillisecondsSinceEpoch(hours):
"""Returns time in milliseconds since epoch for given time in hours.
Args:
hours: Int, the hours of the future timestamp.
Returns:
Int, the future timestamp in milliseconds.
"""
hours = datetime.datetime.now() + datetime.timedelta(hours=hours)
epoch = datetime.datetime.utcfromtimestamp(0)
delta = hours - epoch
return int(delta.total_seconds() * 1000)
| DomRosenberger/google_bigquery | google_bigquery/common/utils.py | utils.py | py | 479 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "datetime.datetime.now",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "datetime.timedelta",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "datet... |
20763051017 | import os
import json
import time
from datetime import datetime
# Importing shared dependencies
from task_management import task_list
from ai_agent_management import ai_agents
sync_status = {}
def autoSync():
while True:
time.sleep(60) # Sync every minute
sync_status['last_sync'] = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
syncTasks()
syncAgents()
def syncTasks():
with open('TaskMaster/src/task_data.json', 'w') as file:
json.dump(task_list, file)
def syncAgents():
with open('TaskMaster/src/ai_agent_data.json', 'w') as file:
json.dump(ai_agents, file)
if __name__ == "__main__":
autoSync() | shadowaxe99/c | TaskMaster/src/auto_sync.py | auto_sync.py | py | 668 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "time.sleep",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "json.dump",
"l... |
34086502752 | from batch import create_udb
from projectMetrics import projectMetric
from subprocess import call
import git
import sys
import datetime
import os
import shutil
import time
def main():
git_repo = sys.argv[1] # git repo is the relative path from the folder
all_sha1 = []
sha_dtime = []
repo = git.Repo(git_repo)
for commit in repo.iter_commits('master'):
sha = commit.hexsha
get_sha = repo.git.rev_parse(sha)
all_sha1.append(get_sha)
sha_dtime.append(datetime.datetime.fromtimestamp(commit.committed_date))
start_time = time.time()
print(len(all_sha1))
exit()
g = git.Git(git_repo)
for i in range(len(all_sha1)):
sha = all_sha1[i]
d_time = sha_dtime[i]
g.checkout(sha)
db_name = create_udb(git_repo)
projectMetric(db_name,sha,d_time)
call('rm -f ' + db_name)
print("--- %s minutes ---" % round((time.time() - start_time) / 60,5))
if __name__ == '__main__':
main() | akhilsinghal1234/mdd-intern-work | Extraction/main.py | main.py | py | 1,009 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.argv",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "git.Repo",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.fromtimestamp",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "datetime.datet... |
30338835101 | import marqo
import pprint
import requests
import random
import math
# Test bug in pagination feature of OpenSearch
# Create marqo index
mq = marqo.Client(url='http://localhost:8882')
try:
mq.index("my-first-index").delete()
except:
pass
# Index set number of documents
# 100 random words
mq.create_index("my-first-index")
vocab_source = "https://www.mit.edu/~ecprice/wordlist.10000"
vocab = requests.get(vocab_source).text.splitlines()
num_docs = 100
random.seed(2020)
docs = [{"Title": "a " + (" ".join(random.choices(population=vocab, k=25))),
"_id": str(i)
}
for i in range(num_docs)]
mq.index("my-first-index").add_documents(
docs, auto_refresh=False
)
mq.index("my-first-index").refresh()
search_method = "TENSOR"
# Search for all 100 documents at the same time
# DEBUG FULL RESULTS
debug_res = mq.index("my-first-index").search(
search_method=search_method,
q='a',
limit=num_docs)
debug_res_id_only = [hit["_id"] for hit in debug_res["hits"]]
# Search for pages of 1 document at a time
for page_size in [1]:
print("========================================================")
print(f"{search_method}: Results for page_size = {page_size}")
paginated_search_results = {"hits": []}
for page_num in range(math.ceil(num_docs / page_size)):
lim = page_size
off = page_num * page_size
# print(f"Now testing: limit={lim}, offset={off}")
page_res = mq.index("my-first-index").search(
search_method=search_method,
q='a',
limit=lim, offset=off)
single_page_id_only = [hit["_id"] for hit in page_res["hits"]]
paginated_search_results["hits"].extend(page_res["hits"])
print("========================================================")
print(f"Query for page num {page_num}")
print(f"size: {page_res['limit']}, from: {page_res['offset']}")
expected_res = debug_res_id_only[off:off+lim]
print(f"Paginated result for page num {page_num}: {single_page_id_only}")
print(f"Expected result for page num {page_num}: {expected_res}")
if expected_res != single_page_id_only:
print("DISCREPANCY FOUND.")
page_id_only = [hit["_id"] for hit in paginated_search_results["hits"]]
print("========================================================")
print(f"FULL RESULTS: (length = {len(debug_res['hits'])})")
print(debug_res_id_only)
print(f"PAGINATED: (length = {len(paginated_search_results['hits'])})")
print(page_id_only)
print("Paginated results same as expected full results?")
print(debug_res["hits"] == paginated_search_results["hits"])
| vicilliar/public-code | pagination/os_from_tester.py | os_from_tester.py | py | 2,920 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "marqo.Client",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "random.seed",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "random.choices",
"line_numbe... |
21056220601 | #!/usr/bin/python3
import requests, argparse
parser = argparse.ArgumentParser()
parser.add_argument("--rhost", "-rh", type=str, help="remote host (if not specified, 127.0.0.1 will be used)", default="127.0.0.1")
parser.add_argument("--rport", "-rp", type=str, help="remote port (if not specified, 8500 will be used)", default="8500")
parser.add_argument("--lhost", "-lh", type=str, help="local host", required=True)
parser.add_argument("--lport", "-lp", type=str, help="local port", required=True)
parser.add_argument("--token", "-tk", type=str, help="acl token", required=True)
parser.add_argument("--ssl", "-s", action="store_true", help="use ssl (https) in the request")
args = parser.parse_args()
if args.ssl:
target = f"https://{args.rhost}:{args.rport}/v1/agent/service/register"
else:
target = f"http://{args.rhost}:{args.rport}/v1/agent/service/register"
headers = {"X-Consul-Token": f"{args.token}"}
json = {"Address": "127.0.0.1", "check": {"Args": ["/bin/bash", "-c", f"bash -i >& /dev/tcp/{args.lhost}/{args.lport} 0>&1"], "interval": "10s", "Timeout": "864000s"}, "ID": "gato", "Name": "gato", "Port": 80}
try:
requests.put(target, headers=headers, json=json, verify=False)
print("\n[\033[1;32m+\033[1;37m] Request sent successfully, check your listener\n")
except:
print("\n[\033[1;31m-\033[1;37m] Something went wrong, check the connection and try again\n")
exit(1)
| GatoGamer1155/Scripts | Ambassador/privesc.py | privesc.py | py | 1,409 | python | en | code | 33 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "requests.put",
"line_number": 22,
"usage_type": "call"
}
] |
5468407661 | import os
import numpy as np
import torch
import torchvision
import torch.nn as nn
import torchvision.transforms as transforms
import torch.optim as optim
import matplotlib.pyplot as plt
import torch.nn.functional as F
from torchvision import datasets
from torch.utils.data import DataLoader
from torchvision.utils import save_image
from PIL import Image
import pylab as py
from IPython import embed
from naive_ae import ConvAutoencoder
DATA_PATH = '../data_sets/mnist'
NAIVE_AE_PATH = './trained_models/convAutoEncSigmoid/naive_ae25.pth'
CLEVER_AE_PATH = './trained_models/convAutoEncNoSigmoid/naive_ae25.pth'
def posterior_loss_denoising(I, I_c, AE, sigma, T):
likelihood_term = torch.exp(-torch.norm(I - I_c)) / 2 * (sigma**2)
prior_term = torch.norm(AE(I_c) - I) / T
# print(f'likelyhood_term:{likelihood_term} prior_term:{prior_term}')
# print(f'loss: {-torch.log(likelihood_term)}, { - torch.log(prior_term)}')
return -torch.log(likelihood_term) - torch.log(prior_term)
def maximize_posterior_denoising(I_c, AE, sigma=1, T=0.1):
I_0 = torch.rand(1,1,28, 28, requires_grad=True)
I_i = I_0
optimizer = torch.optim.Adam([I_i], lr=0.1)
for i in range(2000):
loss = posterior_loss_denoising(I_i, I_c, AE, sigma, T)
optimizer.zero_grad()
loss.backward()
optimizer.step()
return I_i
def posterior_loss_mid_suppression(I, I_c, AE, T):
# I = suppress_mid(I)
prior_term = torch.norm(AE(I_c) - I) / T
return - torch.log(prior_term)
def maximize_posterior_mid_suppression(I_c, AE, sigma=1, T=100):
I_0 = torch.rand(1,1,28, 28, requires_grad=True)
I_i = I_0
optimizer = torch.optim.Adam([I_i], lr=0.1)
for i in range(2000):
loss = posterior_loss_mid_suppression(I_i, I_c, AE, T)
optimizer.zero_grad()
loss.backward()
optimizer.step()
return I_i
def gaussian_noise(I):
return I + torch.randn(1,1,28,28)
def suppress_mid(I):
I_c = torch.clone(I)
I_c[:,:,9:18,9:18] = 0
return I_c
naive_ae = ConvAutoencoder()
naive_ae.load_state_dict(torch.load(NAIVE_AE_PATH))
clever_ae = ConvAutoencoder()
clever_ae.load_state_dict(torch.load(CLEVER_AE_PATH))
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))
])
test_set = datasets.MNIST(root=DATA_PATH, train=False, download=True, transform=transform)
############################
# denoising task #
############################
I = test_set[2][0]
I_c = gaussian_noise(I)
naive_denoising = maximize_posterior_denoising(I_c, naive_ae)
clever_denoising = maximize_posterior_denoising(I_c, clever_ae)
fig, ax = plt.subplots(2,2)
fig.suptitle('denoising task')
ax[0,0].imshow(I.squeeze())
ax[0,0].set_title('original image')
ax[0,1].imshow(I_c.squeeze())
ax[0,1].set_title('noised image')
ax[1,0].imshow(naive_denoising.detach().squeeze())
ax[1,0].set_title('naive AE denoising')
ax[1,1].imshow(clever_denoising.detach().squeeze())
ax[1,1].set_title('clever AE denoising')
############################
# inpainting task #
############################
I = test_set[2][0].view(1,1,28,28)
I_c = suppress_mid(I)
naive_inpainting = maximize_posterior_mid_suppression(I_c, naive_ae)
clever_inpainting = maximize_posterior_mid_suppression(I_c, clever_ae)
fig, ax = plt.subplots(2,2)
fig.suptitle('inpainting task')
ax[0,0].imshow(I.squeeze())
ax[0,0].set_title('original image')
ax[0,1].imshow(I_c.squeeze())
ax[0,1].set_title('noised image')
ax[1,0].imshow(naive_inpainting.detach().squeeze())
ax[1,0].set_title('naive AE inpainting')
ax[1,1].imshow(clever_inpainting.detach().squeeze())
ax[1,1].set_title('clever AE inpainting')
plt.show() | MAyaCohenCS/Experimental_CNN_3 | image_posterior.py | image_posterior.py | py | 3,724 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.exp",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "torch.norm",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "torch.norm",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "torch.log",
"line_number": 28,
... |
29197653617 | import re
import json
import torch
import logging
from tokenizers import ByteLevelBPETokenizer
from os.path import exists, join, abspath
from . import Target, Entity
from models.pre_abstract.model import LSTMTagger
class PreAbstractParser(Target):
def __init__(self, model_dir, device="cpu"):
super().__init__()
self.model_dir = abspath(model_dir)
assert exists(self.model_dir), f"model directory '{self.model_dir}' does not exist"
assert exists(join(self.model_dir, "classes.json")), f"classes file does not exist in {self.model_dir}"
assert exists(join(self.model_dir, "config.json")), f"configuration file does not exist in {self.model_dir}"
assert exists(join(self.model_dir, "merges.txt")), f"merges file does not exist in {self.model_dir}"
assert exists(join(self.model_dir, "weights.pt")), f"weights file does not exist in {self.model_dir}"
assert exists(join(self.model_dir, "vocab.json")), f"vocab file does not exist in {self.model_dir}"
with open(join(self.model_dir, "classes.json"), "r") as classes_file:
self.class_to_index = json.load(classes_file)
self.index_to_class = {v: k for k, v in self.class_to_index.items()}
with open(join(self.model_dir, "config.json"), "r") as config_file:
self.model_config = json.load(config_file)
if not torch.cuda.is_available():
device = "cpu"
self.device = torch.device(device)
self.model = LSTMTagger(vocab_size=self.model_config["vocab_size"],
embedding_dim=self.model_config["embedding_dim"],
lstm_dim=self.model_config["lstm_dim"],
n_classes=len(self.class_to_index)).to(self.device)
weights = torch.load(join(self.model_dir, "weights.pt"), map_location=device)
self.model.load_state_dict(weights)
self.model = self.model.eval()
self.tokenizer = ByteLevelBPETokenizer(vocab_file=join(self.model_dir, "vocab.json"),
merges_file=join(self.model_dir, "merges.txt"),
lowercase=self.model_config["lowercase"])
self.noise_re = re.compile(r"[^A-Za-z ]")
self.department_re = re.compile(r"(?:,\s*)?[^,]*Department[^,]*(?:,)", re.IGNORECASE)
def __call__(self, document):
assert isinstance(document, dict), f"wrong input of type {type(document)} to author parser"
try:
lines, labels = self.annotate_lines(document["text"][:document["abstract_start"]])
except RuntimeError:
logging.error(f"could not parse pre abstract of {document['name']}")
return document
keep_lines = []
for line, label in zip(lines, labels):
if "meta" in document and self.noise_re.sub("", line) == self.noise_re.sub("", document["meta"]["title"]):
keep_lines.append(line)
elif label == "other":
keep_lines.append(line)
else:
self.create_annotation(document, line, label)
if "meta" in document:
keep_lines = self.post_process_lines(document, keep_lines)
document["text_cleaned"] = "\n".join(keep_lines) + document["text"][document["abstract_start"]:]
return document
def annotate_lines(self, text):
lines = text.split("\n")
tokenized = [x.ids for x in self.tokenizer.encode_batch(lines)]
# padding
max_tokens = max(len(sentence) for sentence in tokenized)
for sentence in range(len(tokenized)):
for _ in range(max_tokens - len(tokenized[sentence])):
tokenized[sentence].insert(0, 0)
tensor = torch.tensor([tokenized]).to(self.device)
predictions = self.model.forward(tensor)
predictions = torch.argmax(predictions[0], -1)
predictions = [self.index_to_class[prediction.item()] for prediction in predictions]
return lines, predictions
def create_annotation(self, document, line, label):
if label == "private":
document["entities"][Entity.PERSONAL_DATA].add(line)
elif label == "author":
document["entities"][Entity.AUTHOR].add(line)
elif label == "email":
document["entities"][Entity.EMAIL].add(line)
elif label == "organization":
for department_mention in self.department_re.findall(line):
document["entities"][Entity.PERSONAL_DATA].add(department_mention)
line = self.department_re.sub("", line)
document["entities"][Entity.INSTITUTION_COMPANY].add(line)
else:
logging.error(f"label '{label}' not recognized in {type(self)}")
raise ValueError(f"label '{label}' not recognized")
def post_process_lines(self, document, lines):
keep_lines = []
for line in lines:
mention = False
try:
for author in document["meta"]["authors"]:
if re.search("[\s\-]*".join(re.escape(name) for name in author.split()), line, re.IGNORECASE):
mention = True
document["entities"][Entity.AUTHOR].add(line)
for organization in document["meta"]["orgs"]:
if re.search("[\s\-]*".join(re.escape(name) for name in organization["name"].split()), line, re.IGNORECASE):
mention = True
document["entities"][Entity.INSTITUTION_COMPANY].add(line)
except KeyError:
logging.error(f"conferences meta file misses key for {document['name']}")
if not mention:
keep_lines.append(line)
return keep_lines
| kherud/native-language-identification | pipeline/pipes/pre_abstract.py | pre_abstract.py | py | 5,835 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "os.path.abspath",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"lin... |
35104869337 | # -*- coding: utf-8 -*-
import os
import codecs
import collections
from six.moves import cPickle
import numpy as np
import re
import itertools
import pandas as pd
from ts_FeatureCoding import Feature_Coding
DATA_DIR = "data/events"
class DataLoader():
def __init__(self, args):
self.data_dir = args.data_dir
self.data_file = args.data_file
self.batch_size = args.batch_size
self.seq_length = args.seq_length
self.max_records = args.max_records
self.encoding=args.input_encoding
self.featureCodes = Feature_Coding()
self.nfeatures = self.featureCodes.nfeatures
input_file = os.path.join(self.data_dir, self.data_file)
print("reading text file")
self.loadcsv(input_file)
def preparedata(self):
vocab_file = os.path.join(self.data_dir, "vocab.pkl")
tensor_file = os.path.join(self.data_dir, "data.npy")
# Let's not read vocab and data from file. We may change them.
if True or not (os.path.exists(vocab_file) and os.path.exists(tensor_file)):
print("building vocabulary files...")
self.preprocess(vocab_file, tensor_file, self.encoding)
else:
print("loading preprocessed files...")
self.load_preprocessed(vocab_file, tensor_file)
self.create_batches()
self.reset_batch_pointer()
def clean_str(self, string):
"""
Tokenization/string cleaning for all datasets except for SST.
Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data
"""
#string = re.sub(r"_", " Period_", string)
string = re.sub(r",", "_", string)
string = re.sub(r"VS_15,Neutral", "\.", string)
return string
#string = re.sub(r"[^๊ฐ-ํฃA-Za-z0-9(),!?\'\`]", " ", string)
#string = re.sub(r"\'s", " \'s", string)
#string = re.sub(r"\'ve", " \'ve", string)
#string = re.sub(r"n\'t", " n\'t", string)
#string = re.sub(r"\'re", " \'re", string)
#string = re.sub(r"\'d", " \'d", string)
#string = re.sub(r"\'ll", " \'ll", string)
#string = re.sub(r"!", " ! ", string)
#string = re.sub(r"\(", " \( ", string)
#string = re.sub(r"\)", " \) ", string)
#string = re.sub(r"\?", " \? ", string)
#string = re.sub(r"\s{2,}", " ", string)
#return string.strip().lower()
def build_vocab(self, sentences):
"""
Builds a vocabulary mapping from word to index based on the sentences.
Returns vocabulary mapping and inverse vocabulary mapping.
"""
# Build vocabulary
word_counts = collections.Counter(sentences)
# Mapping from index to word
vocabulary_inv = [x[0] for x in word_counts.most_common()]
vocabulary_inv = list(sorted(vocabulary_inv))
# Mapping from word to index
vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}
return [vocabulary, vocabulary_inv]
def loadcsv(self, input_file):
columns= self.featureCodes.featuresAll
nread = 100000
skip_rows = 0
max_records = self.max_records
self.raw_df = pd.DataFrame(columns=columns)
reader = pd.read_csv(input_file, iterator=True, chunksize=nread,
header=0, names=columns, index_col=False,
na_values='NA', skip_blank_lines=True,
skipinitialspace=True, infer_datetime_format=False,
parse_dates=False, skiprows=skip_rows)
do_more = True
total_read = 0
dailyRowSeen = False
for csvrows in reader:
if csvrows.shape[0] == 0:
doMore = False
break
# convert TimeStamp column to a datatime
csvrows['TimeStamp'] = pd.to_datetime(csvrows['TimeStamp'], format='%Y/%m/%dT%H:%M:%S')
# raw_df = raw_df.append(csvrows, ignore_index=True)
self.raw_df = pd.concat([self.raw_df, csvrows], axis=0, copy=False, ignore_index=True)
skip_rows += nread
total_read += nread
print('Records read:', total_read, self.raw_df.shape)
if max_records > 0 and total_read >= max_records:
doMore = False
break
print('Total Records read:', total_read, ' Saved:', self.raw_df.shape)
self.raw_df.columns = columns
self.raw_df.set_index('TimeStamp')
"""
# extract the event TypeCode
self.raw_df['TypeCode'] = self.raw_df['Type'].str.split('_').str[0]
# extract the Direction code
self.raw_df['Dir'] = self.raw_df['TypeCode'].str[-1:]
self.raw_df['Period'] = self.raw_df['Type'].str.split('_').str[1]
# map the Period (D,60,15,5,1) to int PeriodCode (1440,60,15,5,1)
try:
self.raw_df['TypeCodeNum'] = self.raw_df['TypeCode'].map(self.featureCodes.eventCodeDict).astype('int32')
self.raw_df['PeriodCode'] = self.raw_df['Period'].map(self.featureCodes.periodCodeDict).astype('int32')
except RuntimeError as e:
print( e.args)
"""
print('Checking for Nan rows...')
nandf = self.raw_df[self.raw_df.isnull().any(axis=1)]
if not nandf.empty:
print(nandf)
# For VS events, set direction code to X, since the direction is unknown
#self.raw_df.Dir[self.raw_df[self.raw_df.TypeCode == 'VS'].index] = 'X'
# drop rows with unwanted type codes (HEARTB)
print('Pruning unwanted event types...')
self.raw_df = self.raw_df.drop(self.raw_df[self.raw_df.EventCode == 'HEARTB'].index)
self.raw_df = self.raw_df.drop(self.raw_df[self.raw_df.EventCode == 'VSX'].index)
self.raw_df.reset_index()
print('Total Records after pruning:', self.raw_df.shape)
categ_features = pd.get_dummies(self.raw_df[['PeriodCode', 'EventDir', 'MarketTrend_D', 'MarketTrend_60', 'MarketTrend_15', 'MarketTrend_5', 'MarketTrend_1']], drop_first=False)
self.data = pd.concat([self.raw_df.Type, categ_features], axis=1)
#self.data = self.raw_df[['Type']]
#self.data = np.array(self.raw_df.Type)
#self.data['X'] = '{' + self.data['PeriodCode'] + ' ' + self.data['Dir'] + ' ' + self.data['TypeCode'] + '}'
#labels = dftrim['Dir'] + '_' + dftrim['Period']
self.labels = self.data.Type[1:]
self.data = self.data[:-1]
#all_data = pd.concat([data, labels], axis=0)
#self.data.reset_index()
self.nfeatures = self.data.shape[1]
# scan for first row containing 'HIL*D' event code
for idx in range(len(self.raw_df)):
t = self.raw_df.Type.iloc[idx]
mf = re.match(r'HILMF..D', t)
ft = re.match(r'HILFT..D', t)
if mf or ft:
print('Found ', t, ' at index', idx)
self.data=self.data[idx:]
self.labels = self.labels[idx:]
break
def preprocess(self, vocab_file, tensor_file, encoding):
#X = '[ ' + self.data.PeriodCode.astype(str) + ' ' + self.data.Dir + ' ' + self.data.TypeCode + ' ]'
# save the data in a numpy file
#self.tensor = np.array(self.data)
#self.label_tensor = np.array(self.labels)
#np.save(tensor_file, self.tensor)
#self.vocab_size = len(self.featureCodes.eventCodeDict)
self.vocab, self.words = self.build_vocab(self.data.Type)
self.vocab_size = len(self.words)
with open(vocab_file, 'wb') as f:
cPickle.dump(self.words, f)
#The same operation like this [self.vocab[word] for word in x_text]
# index of words as our basic data
self.data['Type'] = np.array(list(map(self.vocab.get, self.data.Type)))
self.tensor = np.array(self.data)
self.label_tensor = np.array(list(map(self.vocab.get, self.labels)))
# Save the data to data.npy
np.save(tensor_file, self.tensor)
def load_preprocessed(self, vocab_file, tensor_file):
with open(vocab_file, 'rb') as f:
self.words = cPickle.load(f)
self.vocab_size = len(self.words)
self.vocab = dict(zip(self.words, range(len(self.words))))
self.tensor = np.load(tensor_file)
self.num_batches = int(self.tensor.size / (self.batch_size *
self.seq_length))
def create_batches(self):
self.num_batches = int(self.tensor.shape[0] / (self.batch_size * self.seq_length))
if self.num_batches == 0:
assert False, "Not enough data. Make seq_length and batch_size smaller."
# truncate input tensor shape [n, self.nfeatures] to even number of full batches
self.tensor = self.tensor[:self.num_batches * self.batch_size * self.seq_length]
self.label_tensor = self.label_tensor[:self.num_batches * self.batch_size * self.seq_length]
self.x_batches = np.split(self.tensor.reshape((-1, self.seq_length, self.nfeatures)),
self.num_batches, axis=0)
self.y_batches = np.split(self.label_tensor.reshape(-1, self.seq_length),
self.num_batches, axis=0)
def next_batch(self):
x, y = self.x_batches[self.pointer], self.y_batches[self.pointer]
self.pointer += 1
return x, y
def reset_batch_pointer(self):
self.pointer = 0
| traderscience/market_transformer | tsutils/data_loader.py | data_loader.py | py | 9,544 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "ts_FeatureCoding.Feature_Coding",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "os.path.joi... |
29314289325 | from django.shortcuts import render, redirect
from django.contrib import messages
from .models import *
import bcrypt
# Create your views here.
def main(request):
if 'logged_in' in request.session:
# messages.success(request,"Welcome to Tom's Library!"),
return render(request, 'main/index.html',{
"books": Book.objects.order_by('created_at').reverse(),
"user": User.objects.get(id=request.session["logged_in"])
})
else:
return render(request, 'main/index.html',{
"books": Book.objects.order_by('created_at').reverse(),
})
def index(request):
if "logged_in" in request.session:
messages.success(request,"You already signed in!")
return redirect("/")
return render(request, 'main/login.html')
def register(request):
form = request.POST
errors = User.objects.basic_validator(form)
if len(errors) > 0:
for key, val in errors.items():
messages.error(request, val)
return redirect('/')
User.objects.create(
first_name=form["first_name"],
last_name=form["last_name"],
student_id=form["student_id"],
email=form["email"],
password=bcrypt.hashpw(form["password"].encode(), bcrypt.gensalt()),
)
user = User.objects.last()
request.session["logged_in"] = user.id
request.session["first_name"] = user.first_name
request.session["last_name"] = user.last_name
request.session["email"] = user.email
request.session["student_id"] = user.student_id
return redirect('/')
def login(request):
form = request.POST
try:
user=User.objects.get(email=form["login_email"])
except:
messages.error(request,"Please enter a correct email!")
return redirect("/login")
if bcrypt.checkpw(form["login_password"].encode(), user.password.encode()) == False:
messages.error(request,"Please enter a correct password!")
return redirect("/login")
errors = User.objects.login_validation(form)
if len(errors):
for key, value in errors.items():
messages.error(request, value)
user = User.objects.get(email=form['login_email'])
request.session["logged_in"] = user.id
request.session["email"] = user.email
request.session["first_name"] = user.first_name
request.session["last_name"] = user.last_name
request.session["student_id"] = user.student_id
return redirect('/login')
# return redirect("/login")
def logout(request):
# form = request.session
# errors = User.objects.logout_validation(form)
# user = User.objects.get(id=request.session["logged_in"])
# if not user:
# messages.error(request,"your didn't signin")
# else:
# if len(errors) > 0:
# for key, val in errors.items():
# messages.error(request, val)
request.session.clear()
return redirect('/login')
def add_question(request):
form = request.POST
Message.objects.create(
message= form['question_message'],
user= request.session["logged_in"]
)
return redirect('/')
def add_book(request,book_id):
return render(request,'main/product-single.html',{
"books": Book.objects.all(),
"user": User.objects.get(id=request.session["logged_in"]),
})
def about(request):
if "logged_in" not in request.session:
return render(request, 'main/about.html')
else:
return render(request, 'main/about.html',{
"user": User.objects.get(id=request.session["logged_in"]),
})
def books(request):
if "logged_in" in request.session:
# this_book = Book.objects.get(id=request.session["logged_in"])
return render(request, 'main/books.html',{
"user": User.objects.get(id=request.session["logged_in"]),
"books": Book.objects.all(),
"recent_added_book": Book.objects.order_by('created_at').reverse()
})
else:
return render(request, 'main/books.html',{
"books": Book.objects.all(),
"recent_added_book": Book.objects.order_by('created_at').reverse()
})
def faq(request):
if "logged_in" not in request.session:
return render(request, 'main/faq.html')
else:
return render(request, 'main/faq.html',{
"user": User.objects.get(id=request.session["logged_in"]),
})
def privacy_policy(request):
if "logged_in" not in request.session:
return render(request, 'main/privacy_policy.html')
else:
return render(request, 'main/privacy-policy.html',{
"user": User.objects.get(id=request.session["logged_in"]),
})
def terms_conditions(request):
if "logged_in" not in request.session:
return render(request, 'main/terms-conditions.html')
else:
return render(request, 'main/terms-conditions.html',{
"user": User.objects.get(id=request.session["logged_in"]),
})
def products(request):
if "logged_in" not in request.session:
return render(request, 'main/products.html',{
"books": Book.objects.all(),
"recent_added_book": Book.objects.order_by('created_at').reverse(),
})
else:
return render(request, 'main/products.html',{
"user": User.objects.get(id=request.session["logged_in"]),
"books": Book.objects.all(),
"recent_added_book": Book.objects.order_by('created_at').reverse(),
})
def book_detail(request,book_id):
if 'logged_in' not in request.session:
# messages.error(request, "You need to log in first!")
# return redirect('/login')
return render(request,'main/product-single.html',{
"this_book": Book.objects.get(id=book_id)
})
else:
this_book = Book.objects.get(id= book_id)
this_user = User.objects.get(id= request.session["logged_in"])
user_book= this_user.books.all
return render(request, 'main/product-single.html',{
"user": User.objects.get(id=request.session['logged_in']),
"this_book": Book.objects.get(id=book_id),
"books": Book.objects.all(),
"user_book": user_book,
})
def borrow(request,book_id):
if 'logged_in' not in request.session:
messages.error(request, "You need to log in first!")
return redirect('/login')
this_book = Book.objects.get(id= book_id)
this_user = User.objects.get(id= request.session["logged_in"])
if this_user in this_book.users.all():
messages.error(request,"You already chose this book!")
return redirect(f"/books/{book_id}")
else:
this_book.users.add(this_user)
messages.success(request,"Success!")
return redirect(f"/books/{book_id}")
# def choose_book(request,book_id):
# form = request.POST
# this_user = User.objects.get(id=request.session["logged_in"])
# this_book = Book.objects.get(id=request.session["logged_in"])
def question(request):
form = request.POST
# # errors = Message.objects.message_validator(form)
# if len(errors):
# for key, value in errors.items():
# messages.error(request, value)
# else:
Message.objects.create(message= form['question_message'],message_email= form['question_email'],message_name=form['question_name'])
return redirect('/')
def profile(request):
# book= Book.objects.all()
this_person = User.objects.get(id=request.session["logged_in"])
books_add = this_person.books.all()
return render(request,"main/profile.html",{
"user": User.objects.get(id=request.session["logged_in"]),
"books": books_add.order_by('created_at'),
"books_add": books_add,
})
def delete_book(request,book_id):
this_book = Book.objects.get(id=book_id)
this_user = User.objects.get(id=request.session["logged_in"])
this_user.books.remove(this_book)
return redirect('/profile')
def delete_book1(request,book_id):
this_book = Book.objects.get(id=book_id)
this_user = User.objects.get(id=request.session["logged_in"])
if this_book not in this_user.books.all():
messages.error(request,"You didn't choose this book!")
else:
this_user.books.remove(this_book)
messages.success(request,"Remove")
return redirect(f'/books/{book_id}')
# def search(request):
# if request.method == "GET":
# query = request.GET.get('q')
# submitbutton = request.GET.get('submit')
# if query is not None:
# lookup = Book(title= query) | tomnguyen103/Coding_Dojo | python_stack/django/Project1/apps/main/views.py | views.py | py | 8,660 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.shortcuts.render",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages.success",
"line_number": 23,
"usage_type": "call"
},
{
"a... |
74004428583 | from flask import Flask, jsonify
from apscheduler.schedulers.background import BackgroundScheduler
app = Flask(__name__)
#Sample data not acurate
cancer_stats = {
'Total_infected': 1000,
'Active_cases': 500,
'Recovered': 400,
'Deaths': 200,
'Critical': 50,
'Mortality_rate': 20,
'deceased': 100,
'Population': 1000000
}
def update_stats():
cancer_stats['Total_infected'] +=10
cancer_stats['Active_cases'] +=10
cancer_stats['Recovered'] +=10
cancer_stats['Deaths'] +=10
cancer_stats['Critical'] +=10
cancer_stats['Mortality_rate'] +=10
cancer_stats['deceased'] +=10
cancer_stats['Population'] +=10
def get_cancer_stats():
return jsonify(cancer_stats)
if __name__ == '__main__':
scheduler = BackgroundScheduler()
scheduler.add_job(update_stats, 'interval', minutes=1)
scheduler.start()
print('Scheduler started')
scheduler.print_jobs()
app.run(debug=True)
| Ceced20/SimpleCancerAPI | API.py | API.py | py | 955 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "apscheduler.schedulers.background.BackgroundScheduler",
"line_number": 32,
"usage_type": "call"
}
] |
6774316642 | import pickle
import streamlit as st
classifier_in=open("classifier.pkl","rb")
clf=pickle.load(classifier_in)
def predict_banknote(variance,skewness,kurtosis,entropy):
pred=clf.predict([[variance,skewness,kurtosis,entropy]])
if(pred[0]>0.5):
pred="Its a fake note"
else:
pred="It's a real banknote"
return pred
variance=st.number_input("Enter the variance")
skewness=st.number_input("Enter the skewness")
kurtosis=st.number_input("Enter the kurtosis")
entropy=st.number_input("Enter the entropy")
if(st.button("Predict")):
result=predict_banknote(variance,skewness,kurtosis,entropy)
st.success(result) | adamdavis99/Bank-Note-Authentication | streamlit_app.py | streamlit_app.py | py | 645 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pickle.load",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "streamlit.number_input",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "streamlit.number_input",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "streamlit.nu... |
455748841 | from astropy.io import fits
import numpy as np
hdulist=fits.open('/Users/dhk/work/cat/NGC_IC/VII_118.fits')
tb=hdulist[1].data
for x in range(0,len(tb)/1000+1):
f=open("sha_quarry_batch_%d.txt" % (x),"w")
f.write("COORD_SYSTEM: Equatorial\n")
f.write("EQUINOX: J2000\n")
f.write("NAME-RESOLVER: NED\n")
for y in range(x*1000,(x+1)*1000):
if y == len(tb) :
break
if tb[y][1]==' Gx':
if tb[y][0][0]=='I':
f.write('ic'+tb[y][0][1:].strip()+'\n')
else:
f.write('ngc'+tb[y][0].strip()+'\n')
f.close()
| DuhoKim/py_code_US | ngc_ic_cat.py | ngc_ic_cat.py | py | 533 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "astropy.io.fits.open",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "astropy.io.fits",
"line_number": 4,
"usage_type": "name"
}
] |
30143632560 | from itertools import product
# from PyMiniSolvers import minisolvers
import os
def req1(n: int, N: int, disjunctions_list):
i_range = range(n, N + n)
for i in i_range:
clauses = [(f"t_{i}_0_0_" ), (f"t_{i}_0_1_" ),
(f"t_{i}_1_0_" ), (f"-t_{i}_1_1_" )]
disjunctions_list.extend(clauses)
def req2(n: int, N: int, disjunctions_list):
i_range = range(n, n + N)
k_range = range(2)
j_range = range(N + n)
for (i, k) in product(i_range, k_range):
existence_cond_variables = list((f"c_{i}_{k}_{j}_" for j in range(i))) # range(i)))
disjunctions_list.append(existence_cond_variables)
for j_1 in j_range:
for j_2 in range(n, N + n):
if j_2 < j_1:
disjunction_clause = [f"-c_{i}_{k}_{j_1}_", f"-c_{i}_{k}_{j_2}_"]
disjunctions_list.append(disjunction_clause)
def req2_(n: int, N: int, disjunctions_list):
i_range = range(n, n + N)
k_range = range(2)
j_range = range(N + n)
for (i, k) in product(i_range, k_range):
existence_cond_variables = list((f"c_{i}_{k}_{j}_" for j in range(i))) # range(i)))
disjunctions_list.append(existence_cond_variables)
for j_1 in range(i + 1, N + n):
for j_2 in range(i + 1, N + n):
if j_2 == j_1:
continue
disjunction_clause = [f"-c_{i}_{k}_{j_1}_", f"-c_{i}_{k}_{j_2}_"]
disjunctions_list.append(disjunction_clause)
def req3(n: int, N: int, output_size_m: int, disjunctions_list):
i_range = range(n, n + N)
j_range = range(output_size_m)
for j in j_range:
existence_cond = list(f"o_{i}_{j}_" for i in i_range)
disjunctions_list.append(existence_cond)
for i_1 in i_range:
# for i_2 in range(i_1 + 1, n + N):
for i_2 in i_range:
if i_1 == i_2:
continue
# if i_1 < i_2:
disjunction_clause = [f"-o_{i_1}_{j}_", f"-o_{i_2}_{j}_"]
disjunctions_list.append(disjunction_clause)
def req4(n: int, input_sets, disjunctions_list):
i_range = range(n)
t_range = range(2 ** n)
assert len(input_sets) == 2 ** n
for (i, t) in product(i_range, t_range):
input_value = input_sets[t][i]
sign = '' if input_value == 1 else '-'
clause = (f"{sign}v_{i}_{t}_")
disjunctions_list.append(clause)
def req5(n: int, N: int, disjunctions_list):
i_range = range(n, N + n)
t_range = range(2 ** n)
bit_range = range(2)
for (i, r, i_0, i_1) in product(i_range, t_range, bit_range, bit_range):
for j_0 in range(0, i):
# for j_0 in i_range:
for j_1 in range(0, i):
i_0_sign = '-' if i_0 == 1 else ''
i_1_sign = '-' if i_1 == 1 else ''
clause_1 = [f"-c_{i}_{0}_{j_0}_", f"-c_{i}_{1}_{j_1}_", f"{i_0_sign}v_{j_0}_{r}_",
f"{i_1_sign}v_{j_1}_{r}_", f"v_{i}_{r}_", f"-t_{i}_{i_0}_{i_1}_"]
clause_2 = [f"-c_{i}_{0}_{j_0}_",
f"-c_{i}_{1}_{j_1}_",
f"{i_0_sign}v_{j_0}_{r}_",
f"{i_1_sign}v_{j_1}_{r}_",
f"-v_{i}_{r}_",
f"t_{i}_{i_0}_{i_1}_"]
disjunctions_list.append(clause_1)
disjunctions_list.append(clause_2)
def req6(n: int, N: int, output_size_m: int, values, disjunctions_list):
i_range = range(n, N + n)
r_range = range(2 ** n)
k_range = range(output_size_m)
for (i, r, k) in product(i_range, r_range, k_range):
value = values[r][k]
sign = '' if value == 0 else '-'
clause = [f"-o_{i}_{k}_", f"{sign}v_{i}_{r}_"]
disjunctions_list.append(clause)
vectorOfValue = "0111"
quantityOfElement = 2
import math
numOfVars = int(math.log2(len(vectorOfValue)))
if 2 ** numOfVars != len(vectorOfValue):
raise ValueError("bad length")
print(numOfVars)
vectorOfValue = vectorOfValue.replace("1", "a").replace("0", "1").replace("a", "0")
dis_list = []
req1(quantityOfElement, numOfVars, dis_list)
string_clause = ""
string_clause += "ฮ".join(dis_list)
dis_list = []
req2(numOfVars, quantityOfElement, dis_list)
string_clause += "ฮ" + "ฮ".join([ "V".join(dis) for dis in dis_list])
dis_list = []
req3(numOfVars, quantityOfElement, 1, dis_list)
string_clause += "ฮ" + "ฮ".join([ "V".join(dis) for dis in dis_list])
dis_list = []
input_sets = list(product((0, 1), repeat=numOfVars))
req4(numOfVars, input_sets, dis_list)
string_clause += "ฮ" + "ฮ".join(dis_list)
dis_list = []
req5(numOfVars, quantityOfElement, dis_list)
string_clause += "ฮ" + "ฮ".join([ "V".join(dis) for dis in dis_list])
dis_list = []
values = [(int(value),) for value in vectorOfValue]
req6(numOfVars, quantityOfElement, 1,values,dis_list)
string_clause += "ฮ" + "ฮ".join([ "V".join(dis) for dis in dis_list])
string_clause += f"ฮo_{numOfVars + quantityOfElement - 1}_0_"
final = string_clause
fclause = [ [element for element in dis.split("V")] for dis in string_clause.split("ฮ")]
# print(fclause)
variables = set()
for dis in fclause:
for element in dis:
if element[0]=="-":
variables.add(element[1:])
else:
variables.add(element)
variables = (list(variables))
map_index_to_item = {}
map_item_to_index = {}
for i, var in enumerate(variables):
map_index_to_item[i+1] = var
map_item_to_index[var] = i + 1
final = final.replace(var, str(map_item_to_index[var]))
lens = len(string_clause.split("ฮ"))
for_minisat = f"p cnf {len(map_index_to_item)} {lens} \n"
for dis in string_clause.split("ฮ"):
if "V" in dis:
for elem in dis.split("V"):
sign = (-1 if elem[0]=="-" else 1)
for_minisat += str(sign * map_item_to_index[elem[1:] if elem[0]=="-" else elem]) + " "
else:
for_minisat += str((-1 if dis[0]=="-" else 1) * map_item_to_index[dis[1:] if dis[0]=="-" else dis]) + " "
for_minisat+="0\n"
# print(for_minisat)
file_str = for_minisat
file = open("for_minisat", 'w')
file.write(file_str)
file.close()
minisat_solution = {}
def from_minisat(output_minisat):
output_minisat = output_minisat.split(" ")[:-1]
print(output_minisat)
for item in output_minisat:
if item[0] == "-":
minisat_solution[map_index_to_item[int(item[1:])]] = False
else:
minisat_solution[map_index_to_item[int(item)]] = True
os.system("minisat for_minisat output")
file = open("output", 'r')
output_minisat= file.read().split("\n")[1]
file.close()
from_minisat(output_minisat)
# print(minisat_solution)
body_string = "\n"
print(minisat_solution)
for key in minisat_solution.keys():
if minisat_solution[key]:
if key[0] == "c":
c = key
print(c)
c = c[2:-1]
c = c.split("_")
from_ = ("x"+c[2]) if int(c[2]) < numOfVars else ("element"+c[2])
to_ = ("x"+c[0]) if int(c[0]) < numOfVars else ("element"+c[0])
body_string = body_string + """ "{}" -> "{}";\n""".format(from_, to_)
if key[0] == "o":
o = key
print(o)
o = o[2:-1]
o = o.split("_")
o[0] = ("x"+o[0]) if int(o[0])< numOfVars else ("element"+o[0])
body_string = body_string + """ "{}" -> "{}";\n""".format(o[0], "end")
# os.system("rm scheme.dot")
# os.system("rm scheme.dot.png")
file_name = "scheme.dot"
file_str = """digraph G {\n""" + body_string + """\n}"""
file = open(file_name, 'w')
file.write(file_str)
file.close()
os.system("dot -T png -O " + file_name)
exit()
S = minisolvers.MinisatSolver()
for i in range(len(map_index_to_item)):
S.new_var()
for dis in final.split("ฮ"):
clause = [ int(elem) for elem in dis.split("V")]
S.add_clause(clause)
print(S.solve())
solution = (list(S.get_model()))
print(solution) | PeterLarochkin/discrete_structures | HM2/final.py | final.py | py | 8,046 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "itertools.product",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "itertools.product",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "itertools.product",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "itertools.produ... |
852394623 | # -*- coding: utf-8 -*-
import requests
import json
import csv
import time
import re
from CrawlClient import Crawler
from lxml import etree
class ZOJCrawler(Crawler.Crawler):
def __init__(self, max_try_cnt, url = 'http://acm.zju.edu.cn/onlinejudge'):
self.try_cnt = 0
self.max_try_cnt = max_try_cnt
self.url = url
self.rows = []
self.try_second = 10
def crawl(self):
print("ๆญฃๅจไป ZOJๆๅๆฐๆฎ...")
begin_time = time.time()
#print("Vol 66 ".find("Vol 66 "))
volume_cnt = 1
while True:
#Crawler.Crawler.progressbar(volume_cnt, 31)
print("ๆญฃๅจๆๅZOJ volume %d .." % volume_cnt)
url = self.url + "/showProblems.do?contestId=1&pageNumber=%d" % volume_cnt
while True:
try:
u = requests.get(url, headers= None)
break
except (requests.exceptions.RequestException, requests.exceptions.ConnectionError):
print("่ฏทๆฑๅคฑ่ดฅ๏ผ%ds ๅ้่ฏ" % self.try_second)
time.sleep(self.try_second)
# with open("column.html", "r", encoding="utf-8") as f:
# data = f.read()
html = etree.HTML(u.text)
vol_id = html.xpath('//*[@id="content_title"]/text()')[0]
if vol_id.find("Vol %d" % volume_cnt) == -1:
break
cnt = 2
while True:
problem = html.xpath('//*[@id="content_body"]/form[1]/table/tr[%d]' % cnt)
if not problem:
break
#print(type(problem[0]))
pro_id = problem[0].xpath("td[1]//font/text()")[0]
pro_title = problem[0].xpath("td[2]//font/text()")[0]
try:
ac_submission = problem[0].xpath("td[3]//a[1]/text()")[0]
all_submission = problem[0].xpath("td[3]//a[2]/text()")[0]
except IndexError:
all_submission = ac_submission
ac_submission = 0
item = []
item.append("ZOJ")
item.append(pro_id)
item.append(pro_title)
item.append("")
item.append("")
item.append(ac_submission)
item.append(all_submission)
self.rows.append(item)
#print(pro_id, pro_title)
cnt = cnt + 1
volume_cnt = volume_cnt + 1
end_time = time.time()
print("ๆๅๅฎๆ๏ผ่ๆถ" ,time.strftime("%M:%S", time.localtime(end_time - begin_time)))
return True
def save(self, filename):
headers = ["OJ", "Problem Number", "Problem Title", "AC Users", "Try Users", "AC Submission",
"All Submission"]
with open(filename, "wt", encoding="GBK") as f:
f_csv = csv.writer(f, lineterminator='\n')
f_csv.writerow(headers)
f_csv.writerows(self.rows) | deepwzh/OJ-Crawers | CrawlClient/ZOJCrawler.py | ZOJCrawler.py | py | 3,051 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "CrawlClient.Crawler.Crawler",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "CrawlClient.Crawler",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "time.time",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "requests... |
70878385064 | from secrets import choice
from asyncio import sleep
import discord
from discord.ext import tasks, commands
from extras import constants
from utils.audio import YoutubeHelper, YTDLSource
from utils.docker import DockerLogger
from utils import decorators
class TiozaoZap(commands.Cog):
'''
TiozaoZap Cogs
'''
def __init__(self, client):
self.client = client
self.logger = DockerLogger(lvl=DockerLogger.INFO, prefix='TiozaoZap')
async def _play_from_url(self, ctx, video_url, send_message=False):
'''
Plays the zap audio.
'''
voice_client = discord.utils.get(self.client.voice_clients, guild=ctx.guild)
async with ctx.typing():
player = await YTDLSource.from_url(video_url, loop=self.client.loop)
voice_client.play(
player,
after=lambda e: print(f'Player error: %{e}') if e else None
)
if send_message:
await ctx.message.channel.send(f'Se liga nesse audio... {player.title}')
@commands.Cog.listener()
@commands.guild_only()
async def on_message(self, message):
'''
When any member sends a message inside a guild text-channel.
'''
# Cancels the request if the sender was a bot.
if message.author.bot:
return
# bozo xingo
if any(word in message.content.lower() for word in constants.BOZO_XINGO_TRIGGERS):
choice(constants.RESPOSTA_XINGO)
await message.channel.send(choice(constants.RESPOSTA_XINGO))
@commands.command(name='audio_do_zap', aliases=['zap', 'audio', 'audio_zap'])
@decorators.in_voice_chat_only
@commands.guild_only()
async def audio_do_zap(self, ctx):
'''
Plays a video of selection 'audios do zap' to the users channel.
'''
voice_channel = ctx.message.author.voice.channel
# Sรณ tenta conectar se nรฃo estรก conectado, depois reseta
voice_client = discord.utils.get(self.client.voice_clients, guild=ctx.guild)
if not voice_client:
await voice_channel.connect()
voice_client = discord.utils.get(self.client.voice_clients, guild=ctx.guild)
await self._play_from_url(
ctx,
video_url=choice(YoutubeHelper.get_urls_list()),
send_message=True
)
self.logger.log(
f'{ctx.guild.id} - {ctx.message.author.id} requested ZAP_AUDIO',
lvl=self.logger.INFO
)
# Disconnects after 5 seconds of audio ending
while voice_client.is_playing():
await sleep(5)
await voice_client.disconnect()
@commands.command(name='sus_sound_effect', aliases=['sus'])
@decorators.in_voice_chat_only
@commands.guild_only()
async def play_sus_sound(self, ctx):
'''
Plays the "sus" sound effect from amongus.
'''
voice_channel = ctx.message.author.voice.channel
# Sรณ tenta conectar se nรฃo estรก conectado, depois reseta
voice_client = discord.utils.get(self.client.voice_clients, guild=ctx.guild)
if not voice_client:
await voice_channel.connect()
voice_client = discord.utils.get(self.client.voice_clients, guild=ctx.guild)
await self._play_from_url(
ctx,
video_url=constants.SUS_VIDEO_URL,
send_message=False
)
self.logger.log(
f'{ctx.guild.id} - {ctx.message.author.id} requested ZAP_AUDIO',
lvl=self.logger.INFO
)
# Disconnects after 5 seconds of audio ending
while voice_client.is_playing():
await sleep(5)
await voice_client.disconnect()
def setup(client):
'''
Cog setup.
'''
client.add_cog(TiozaoZap(client))
| LombardiDaniel/Sebotiao | src/cogs/tiozao.py | tiozao.py | py | 3,850 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "discord.ext.commands.Cog",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "utils.docker.DockerLogger",
"line_number": 20,
"usage_type": "call"
},
{
"api_... |
39353557558 | import math
def area(r):
"""Area of a circle with radius 'r'"""
return math.pi * (r**2)
radii = [2, 5, 7.1, 0.3, 10]
# Method 1: Direct method
areas = []
for r in radii:
a = area(r)
areas.append(a)
print(areas)
# Method 2: Use 'map' functions
print(list(map(area, radii)))
print("===========")
temps = [("Berlin", 29), ("Cairo", 36), ("Buenos Aires", 19),
("Los Angeles", 26), ("Tokyo", 27),
("New York", 28), ("London", 22), ("Beiking", 32)]
c_to_f = lambda data: (data[0], (9/5)*data[1] + 32)
print(list(map(c_to_f, temps)))
print("===========")
import statistics
data = [1.3, 2.7, 0.8, 4.1, 4.3, -0.1]
avg = statistics.mean(data)
print(avg)
print(list(filter(lambda x: x > avg, data)))
print("===========")
countries = ["", "Argentina", "Brazil", "Chile",
"", "Colombia", "", "Ecuador", "", "",
"Venezuela"]
print(list(filter(None, countries)))
print("===========")
from functools import reduce
# Multiply all numbers in a list
data = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29]
multi = lambda x, y: x * y
print(reduce(multi, data))
| Vaijyant/PythonPlayground | 23_map_filter_redunce.py | 23_map_filter_redunce.py | py | 1,113 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "math.pi",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "statistics.mean",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "functools.reduce",
"line_number": 60,
"usage_type": "call"
}
] |
6672970825 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""``stsynphot`` configurable items.
The default configuration heavily depends on STScI TRDS structure
but it can be easily re-configured as the user wishes via
`astropy.config`.
``PYSYN_CDBS`` must be a defined system environment variable for
directories to be configured properly. It also overwrites
``synphot`` configurable items.
"""
# STDLIB
import os
# THIRD-PARTY
import numpy as np
from astropy import log
from astropy.config import ConfigNamespace, ConfigItem
# SYNPHOT
from synphot.config import Conf as synconf
from synphot.utils import generate_wavelengths
__all__ = ['conf', 'getref', 'showref', 'overwrite_synphot_config']
class Conf(ConfigNamespace):
"""Configuration parameters."""
# Set up default wavelength
_wave, _wave_str = generate_wavelengths(
minwave=500, maxwave=26000, num=10000, delta=None, log=True,
wave_unit='angstrom')
# Root directory
rootdir = ConfigItem(
os.environ.get('PYSYN_CDBS', '/grp/redcat/trds'),
'TRDS data root directory')
# Graph, optical component, and thermal component tables
graphtable = ConfigItem('mtab$*_tmg.fits', 'Graph table')
comptable = ConfigItem('mtab$*_tmc.fits', 'Component table')
thermtable = ConfigItem('mtab$*_tmt.fits', 'Thermal table')
# Default wavelength in Angstrom and its description
waveset_array = ConfigItem(
_wave.value.tolist(),
'Default wavelength set in Angstrom', 'float_list')
waveset = ConfigItem(_wave_str, 'Default wavelength set description')
# Telescope primary mirror collecting area in cm^2
area = ConfigItem(45238.93416, 'Telescope collecting area in cm^2')
# Common filter name
clear_filter = ConfigItem('clear', 'Name for a clear filter')
# Wavelength catalog file
wavecatfile = ConfigItem(
'synphot$wavecats/wavecat.dat', 'Wavelength catalog file')
# Detector parameters file
detectorfile = ConfigItem(
'synphot$detectors.dat', 'Detector parameters file')
# IRAF shortcuts file for stsynphot.stio.irafconvert()
irafshortcutfile = ConfigItem(
'synphot$irafshortcuts.txt',
'col1=shortcut_name col2=relpath_to_rootdir, has header.')
# Clean up
del _wave
del _wave_str
def _get_synphot_cfgitems():
"""Iterator for ``synphot`` configuration items."""
for c in synconf.__dict__.values():
if isinstance(c, ConfigItem):
yield c
def overwrite_synphot_config(root):
"""Silently overwrite ``synphot`` configurable items to point to
given root directory.
Parameters
----------
root : str
Root directory name.
"""
subdir_keys = ['calspec', 'extinction', 'nonhst']
# Need this for Windows support
if root.startswith(('http', 'ftp')):
sep = '/'
else:
sep = os.sep # Can be / or \
for cfgitem in _get_synphot_cfgitems():
path, fname = os.path.split(cfgitem())
i = np.where(list(map(path.__contains__, subdir_keys)))[0]
if len(i) == 0:
continue
subdir = subdir_keys[i[0]]
if subdir == 'nonhst':
cfgval = sep.join([root, 'comp', subdir, fname])
else:
cfgval = sep.join([root, subdir, fname])
cfgitem.set(cfgval)
conf = Conf()
# Override SYNPHOT configuration
overwrite_synphot_config(conf.rootdir)
def _get_ref_cfgitems():
"""Iterator for configuration items to be displayed."""
from stsynphot.stio import get_latest_file, irafconvert
for cfgitem, do_conv in (
(Conf.graphtable, True),
(Conf.comptable, True),
(Conf.thermtable, True),
(Conf.area, False),
(Conf.waveset, False)):
val = cfgitem()
if do_conv:
val = get_latest_file(irafconvert(val))
yield cfgitem.name, val
def getref():
"""Return current values of select configurable items as a dictionary.
Returns
-------
refdict : dict
"""
return dict([x for x in _get_ref_cfgitems()])
def showref(): # pragma: no cover
"""Show the values of select configurable items."""
info_str = '\n'
for x in _get_ref_cfgitems():
info_str += f'{x[0]:10s}: {x[1]}\n'
log.info(info_str)
| spacetelescope/stsynphot_refactor | stsynphot/config.py | config.py | py | 4,330 | python | en | code | 11 | github-code | 36 | [
{
"api_name": "astropy.config.ConfigNamespace",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "synphot.utils.generate_wavelengths",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "astropy.config.ConfigItem",
"line_number": 37,
"usage_type": "call"
}... |
11939197341 | from flask import (
Blueprint,
flash,
redirect,
url_for,
render_template,
request,
send_from_directory,
)
from filenavi import model
from .wrap import require_authentication
from .error import MalformedRequest, Unauthorized, NotAuthenticated, NotAccessible
INLINE_EXTENSIONS = ["txt", "pdf", "png", "jpg", "jpeg", "gif"]
bp = Blueprint("storage", __name__)
@bp.route("/<user:owner>/<visibility:visibility>/browse/")
@bp.route("/<user:owner>/<visibility:visibility>/browse/<path:path>")
def browse(owner, visibility, path=None):
user = model.User.current()
home = owner.home(visibility)
path = (home / path) if path is not None else home
target = model.File(path, owner, visibility)
if visibility == model.Visibility.PRIVATE:
if user is None:
raise NotAuthenticated
if not user.has_access_to(target):
raise Unauthorized
if not path.is_dir():
as_attachment = True
if any(str(target.path).lower().endswith(f".{e}") for e in INLINE_EXTENSIONS):
as_attachment = False
return send_from_directory(
home, target.path.relative_to(home), as_attachment=as_attachment
)
if user is None or not user.has_access_to(target):
raise Unauthorized
if not request.path.endswith("/"):
return redirect(f"{request.url}/")
files = []
try:
for f in path.iterdir():
f = f.relative_to(home)
files.append(model.File(f, owner, visibility))
except:
raise NotAccessible
parent = None
if not home.samefile(path):
parent = model.File(path.parent, owner, visibility)
return render_template(
"storage/browse.html",
files=files,
user=user,
owner=owner,
visibility=visibility,
current=path.relative_to(home) if path != home else "",
parent=parent,
)
@bp.route("/<user:owner>/<visibility:visibility>/browse/", methods=["POST"])
@bp.route(
"/<user:owner>/<visibility:visibility>/browse/<path:path>", methods=["POST"]
)
@require_authentication
def browse_handler(owner, visibility, path=None):
user = model.User.current()
if "files" not in request.files and "directory" not in request.form:
raise MalformedRequest
home = owner.home(visibility)
path = (home / path) if path is not None else home
target = model.File(path, owner, visibility)
if not user.has_access_to(target):
raise Unauthorized
if "files" in request.files:
uploads = request.files.getlist("files")
for upload in uploads:
if upload.filename == "":
raise MalformedRequest
upload.save(path / upload.filename)
if "directory" in request.form:
if request.form["directory"] == "":
raise MalformedRequest
directory = model.File(path / request.form["directory"], owner, visibility)
directory.mkdir()
return redirect(
url_for(
".browse", visibility=visibility, path=path.relative_to(home), owner=owner
)
)
@bp.route("/<user:owner>/<visibility:visibility>/move/<path:path>")
@require_authentication
def move(owner, visibility, path=None):
user = model.User.current()
home = owner.home(visibility)
path = (home / path) if path is not None else home
target = model.File(path, owner, visibility)
if not user.has_access_to(target):
raise Unauthorized
return render_template(
"storage/move.html",
file=target,
user=user,
owner=owner,
visibility=visibility,
)
@bp.route(
"/<user:owner>/<visibility:visibility>/move/<path:path>",
methods=["POST"],
)
@require_authentication
def move_handler(owner, visibility, path=None):
user = model.User.current()
home = owner.home(visibility)
path = (home / path) if path is not None else home
target = model.File(home / path, owner, visibility)
if not user.has_access_to(target):
raise Unauthorized
rv = redirect(
url_for(
".browse",
visibility=visibility,
path=path.relative_to(home).parents[0],
owner=owner,
)
)
if "path" not in request.form:
raise MalformedRequest
if not target.path.exists():
flash("No such file or directory", "error")
return rv
try:
force = "replace" in request.form
target.move(home / request.form["move-path"], force=force)
except ValueError:
flash("Unable to move file", "error")
return rv
return rv
@bp.route("/<user:owner>/<visibility:visibility>/toggle/<path:path>")
@require_authentication
def toggle(owner, visibility, path=None):
user = model.User.current()
home = owner.home(visibility)
path = (home / path) if path is not None else home
target = model.File(path, owner, visibility)
if not user.has_access_to(target):
raise Unauthorized
return render_template(
"storage/toggle.html",
file=target,
user=user,
owner=owner,
visibility=visibility,
)
@bp.route(
"/<user:owner>/<visibility:visibility>/toggle/<path:path>",
methods=["POST"],
)
@require_authentication
def toggle_handler(owner, visibility, path=None):
user = model.User.current()
home = owner.home(visibility)
path = (home / path) if path is not None else home
target = model.File(home / path, owner, visibility)
if not user.has_access_to(target):
raise Unauthorized
rv = redirect(
url_for(
".browse",
visibility=visibility,
path=path.relative_to(home).parents[0],
owner=owner,
)
)
if "path" not in request.form:
raise MalformedRequest
try:
force = "replace" in request.form
# TODO: Do not require a Path object
from pathlib import Path
target.toggle(Path(request.form["path"]), force=force)
except ValueError:
flash("Cannot toggle visibility", "error")
return rv
return rv
@bp.route("/<user:owner>/<visibility:visibility>/remove/<path:path>")
@require_authentication
def remove(owner, visibility, path=None):
user = model.User.current()
home = owner.home(visibility)
path = (home / path) if path is not None else home
target = model.File(path, owner, visibility)
if not user.has_access_to(target):
raise Unauthorized
return render_template(
"storage/remove.html",
file=target,
user=user,
owner=owner,
visibility=visibility,
)
@bp.route(
"/<user:owner>/<visibility:visibility>/remove/<path:path>",
methods=["POST"],
)
@require_authentication
def remove_handler(owner, visibility, path=None):
user = model.User.current()
home = owner.home(visibility)
path = (home / path) if path is not None else home
target = model.File(home / path, owner, visibility)
if not user.has_access_to(target):
raise Unauthorized
rv = redirect(
url_for(
".browse",
visibility=visibility,
path=path.relative_to(home).parents[0],
owner=owner,
)
)
recursive = "recursive" in request.form
try:
target.remove(recursive=recursive)
except ValueError:
flash("No such file or directory", "error")
return rv
except OSError:
flash("Cannot remove file or directory", "error")
return rv
return rv
| lukaswrz/filenavi | filenavi/routing/storage.py | storage.py | py | 7,605 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Blueprint",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "filenavi.model.User.current",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "filenavi.model.User",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "... |
27970752684 | import shutil
import tarfile
from collections.abc import Sequence
from pathlib import Path
from typing import Callable, Generic, TypedDict, TypeVar
import lightning.pytorch as pl
import torch
import torchaudio
from einops import rearrange
from torch import Tensor
from torch.hub import download_url_to_file
from torch.utils.data import DataLoader, Dataset
from tqdm.auto import tqdm
T = TypeVar('T')
class SequenceDataset(Dataset, Generic[T]):
def __init__(self, entries: Sequence[T], transform: Callable[[T], T] | None = None) -> None:
super().__init__()
self.entries = entries
self.transform = transform
def __getitem__(self, index: int):
ret = self.entries[index]
if self.transform:
ret = self.transform(ret)
return ret
def __len__(self):
return len(self.entries)
class SignalTrainDatasetModuleParams(TypedDict):
root: str
batch_size: int
training_segment_length: int
validation_segment_length: int
testing_segment_length: int
class SignalTrainDatasetModule(pl.LightningDataModule):
sample_rate = 44_100
hparams: SignalTrainDatasetModuleParams
def __init__(
self,
root: str = './data/SignalTrain',
batch_size: int = 32,
training_segment_length: int = 2 ** 16,
validation_segment_length: int = 2 ** 18,
testing_segment_length: int = 2 ** 23,
) -> None:
super().__init__()
self.save_hyperparameters()
def prepare_data(self) -> None:
link = 'https://zenodo.org/record/3824876/files/SignalTrain_LA2A_Dataset_1.1.tgz'
root = Path(self.hparams['root'])
if (root / 'Train').exists():
print('The SignalTrain dataset has been downloaded. Skipping ... ')
return
root.mkdir(511, True, True)
d = root / 'temp.tgz'
download_url_to_file(link, d)
with tarfile.open(d, 'r') as tf:
tf.extractall()
d.unlink()
shutil.move(root / 'SignalTrain_LA2A_Dataset_1.1' / 'Train', root)
shutil.move(root / 'SignalTrain_LA2A_Dataset_1.1' / 'Test', root)
shutil.move(root / 'SignalTrain_LA2A_Dataset_1.1' / 'Val', root)
(root / 'SignalTrain_LA2A_Dataset_1.1').unlink()
def train_dataloader(self):
entries = self._read_data(
Path(self.hparams['root']) / 'Train',
self.hparams['training_segment_length'],
)
return DataLoader(
entries,
self.hparams['batch_size'],
num_workers=8,
shuffle=True,
pin_memory=True,
collate_fn=self._collate_fn
)
def val_dataloader(self):
entries = self._read_data(
Path(self.hparams['root']) / 'Val',
self.hparams['validation_segment_length'],
)
return DataLoader(
entries,
self.hparams['batch_size'],
num_workers=8,
shuffle=False,
pin_memory=True,
collate_fn=self._collate_fn
)
def test_dataloader(self):
entries = self._read_data(
Path(self.hparams['root']) / 'Test',
self.hparams['testing_segment_length'],
)
return DataLoader(
entries,
self.hparams['batch_size'],
num_workers=8,
shuffle=False,
pin_memory=True,
collate_fn=self._collate_fn
)
@staticmethod
def _collate_fn(batch: list[tuple[Tensor, Tensor, Tensor]]):
return (
torch.stack([b[0] for b in batch]),
torch.stack([b[1] for b in batch]),
torch.stack([b[2] for b in batch]),
)
@staticmethod
def _data_augmentation(entry: tuple[Tensor, Tensor, Tensor]):
x, y, cond = entry
if torch.rand([1]).item() < 0.5:
x *= -1
y *= -1
return x, y, cond
@classmethod
def _slice_audio(cls, file: Path, segment_length: int) -> list[Tensor]:
load_result: tuple[Tensor, int] = torchaudio.load(file) # type: ignore
dat, sr = load_result
assert sr == cls.sample_rate
dat.squeeze_(0)
if dat.dim() != 1:
raise ValueError(f'{file} is not a mono audio.')
size, trill = divmod(dat.size(0), segment_length)
if trill != 0:
dat = dat[:-trill]
dat = rearrange(dat, '(S L) -> S L', S=size)
return [dat[i] for i in range(dat.size(0))]
def _read_data(self, data_path: Path, segment_length: int):
entries: list[tuple[Tensor, Tensor, Tensor]] = []
all_files = sorted(data_path.glob('*.wav'))
for file in tqdm(all_files, desc=f'Loading dataset from {data_path}.'):
if file.name.startswith('input'):
continue
file_id = file.name[7:10]
switch_value, peak_reduction_value = map(
int, file.stem.split('__')[1:])
input_file = file.with_name(f'input_{file_id}_.wav')
input_datas = self._slice_audio(input_file, segment_length)
output_datas = self._slice_audio(file, segment_length)
for input_data, output_data in zip(input_datas, output_datas):
assert input_data.size() == output_data.size()
entries.append((
input_data,
output_data,
torch.tensor([
switch_value, peak_reduction_value
], dtype=torch.float32)
))
return SequenceDataset(entries, self._data_augmentation)
| int0thewind/s4-dynamic-range-compressor | s4drc/src/dataset.py | dataset.py | py | 5,685 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "typing.TypeVar",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.Dataset",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "typing.Generic",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "collections.ab... |
6217952013 | """
Runs that functionality of the program, the flask app and the server that communicates with Walabot.
"""
from threading import Thread
from meeting_room import app
from FreeRoomsServer import FreeRoomsServer
from config import HOST, PORT
def main():
"""
Start the server that communicates with Walabot and the flask app the communicated with Alexa.
"""
try:
server = FreeRoomsServer(HOST, PORT)
free_rooms_server_thread = Thread(target=server.start)
alexa_server_thread = Thread(target=app.run)
free_rooms_server_thread.start()
alexa_server_thread.start()
free_rooms_server_thread.join()
alexa_server_thread.join()
except Exception:
print("Unknown exception occurred!")
raise
if __name__ == '__main__':
main()
| Walabot-Projects/Walabot-MeetingRoom | server/main.py | main.py | py | 817 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "FreeRoomsServer.FreeRoomsServer",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "config.HOST",
"line_number": 17,
"usage_type": "argument"
},
{
"api_name": "config.PORT",
"line_number": 17,
"usage_type": "argument"
},
{
"api_name": "threa... |
20145975874 | import torch.nn as nn
# define small classifier
class MlpClassifier(nn.Module):
""" Simple classifier """
def __init__(self, args, n_classes, pretrain_stage_config):
super(MlpClassifier, self).__init__()
self.input_size = int(args['pretrain_output_size'] * args['seq_length'])
self.hidden_dim1 = 512
self.hidden_dim2 = 256
self.freeze = not args['finetuning']
self.fc1 = nn.Linear(in_features=self.input_size, out_features=self.hidden_dim1)
self.fc2 = nn.Linear(in_features=self.hidden_dim1, out_features=self.hidden_dim2)
self.fc3 = nn.Linear(in_features=self.hidden_dim2, out_features=n_classes)
def forward(self, src):
batch_size = src.size(0)
if self.freeze:
# detach src
src1 = src.data
else:
src1 = src
src2 = src1.reshape(batch_size, -1)
src3 = nn.functional.relu(self.fc1(src2))
src4 = nn.functional.relu(self.fc2(src3))
out = self.fc3(src4)
return out
| antonior92/physionet-12ecg-classification | models/mlp.py | mlp.py | py | 1,046 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_numb... |
6971950883 | '''creating a form using flask to get username and password using html
and displaying success on submitting'''
from flask import Flask, redirect, render_template, request
app = Flask(__name__)
@app.route('/')
def home():
return render_template("index.html")
@app.route("/success", methods = ['POST', "GET"])
def success():
if request.method == 'POST':
result = request.form
uname = request.form['username']
return render_template("success.html", result= result, username=uname)
if __name__ == '__main__':
app.run(debug=True) | R19R/Login_App_Using_Flask | may8th_ex1.py | may8th_ex1.py | py | 590 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "flask.requ... |
70862843305 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 15 14:41:09 2022
@author: bas
"""
#https://instaloader.github.io/as-module.html
import instaloader
from datetime import datetime
from login import getMyUsername
import random
import pandas
def login(L, username, filename='login_session'):
if not isinstance(L.test_login(),str):
L.load_session_from_file(username, filename=filename)
return L
def get_posts(L, myUsername, targetUsername, datetimeEarliest, datetimeLatest):
L=login(L, myUsername)
profile = instaloader.Profile.from_username(L.context, targetUsername)
print('getting all posts...')
posts = [post for post in profile.get_posts()]
print('selecting posts...')
posts_interval = [post for post in posts if (post.date_utc>datetimeEarliest and post.date_utc<datetimeLatest)]
return posts_interval
if not 'L' in locals():
L = instaloader.Instaloader()
if not 'posts' in locals():
username = 'nyenrodebu'
myUsername = getMyUsername()
date_earliest = datetime(2020, 1, 1)
date_latest = datetime(2022, 1, 1)
posts = get_posts(L, myUsername, username, date_earliest, date_latest)
n = 78
posts_sampled = random.sample(posts, n)
posts_dict = {}
n_post = 0
for post in posts_sampled:
n_post += 1
print(f'post {n_post}/{n}')
post_dict = {}
post_dict['is_video'] = post.is_video
post_dict['likes'] = post.likes
post_dict['video_duration'] = post.video_duration
post_dict['video_view_count'] = post.video_view_count
post_dict['title'] = post.title
post_dict['url'] = f'https://www.instagram.com/p/{post.shortcode}/'
post_dict['mediacount'] = post.mediacount
post_dict['caption'] = post.caption
post_dict['date_utc'] = post.date_utc
post_dict['comments'] = post.comments
posts_dict[post.mediaid] = post_dict
df = pandas.DataFrame.from_dict(posts_dict, orient='index')
df.to_csv(f'output_files/username={username}_posts={n}.csv')
| Basdorsman/instagram-analysis | collect_data.py | collect_data.py | py | 1,976 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "instaloader.Profile.from_username",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "instaloader.Profile",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "instaloader.Instaloader",
"line_number": 31,
"usage_type": "call"
},
{
... |
18553686764 | import random
from itertools import chain
import numpy as np
import pandas as pd
from cytoolz import itemmap, sliding_window, valmap
from skfusion import fusion
class DataFusionModel(object):
def __init__(
self, nodes, relations, init_type="random", random_state=666, n_jobs=1
):
self.nodes = nodes
self.relation_definitions = relations
self.random_state = random_state
self.n_jobs = n_jobs
self.init_type = init_type
def reconstruct(self, src, dst, idx=0, return_dataframe=True):
relation = list(
self.fuser.fusion_graph.get_relations(self.types[src], self.types[dst])
)[idx]
values = self.fuser.complete(relation)
if return_dataframe:
components = self.relation_definitions[(src, dst)][idx]
return pd.DataFrame(
values, index=components.index.values, columns=components.columns.values
)
return values
def factor(self, type_name, return_dataframe=True):
factor = self.fuser.factor(self.types[type_name])
if not return_dataframe:
return factor
profile = pd.DataFrame(
factor,
index=self.indices[type_name],
columns=[f"C{i:02}" for i in range(factor.shape[1])],
)
return profile
def _construct_relationship(self, path, updated_factors):
start_node = path[0]
end_node = path[-1]
computed_matrix = (
self.fuser.factor(start_node)
if not start_node.name in updated_factors
else updated_factors[start_node.name]
)
print(
type(start_node),
start_node,
start_node.name in updated_factors,
computed_matrix.shape,
)
for src, dst in sliding_window(2, path):
relation = list(self.fuser.fusion_graph.get_relations(src, dst))[0]
print(relation)
computed_matrix = np.dot(computed_matrix, self.fuser.backbone(relation))
end_factor = (
self.fuser.factor(end_node)
if not end_node.name in updated_factors
else updated_factors[end_node.name]
)
computed_matrix = np.dot(computed_matrix, end_factor.T)
return computed_matrix
def relation_profiles(self, src, dst, updated_factors=None, index=None):
if updated_factors is None:
updated_factors = {}
if index is None:
index = self.indices[src]
paths = list(self.fuser.chain(self.types[src], self.types[dst]))
relations = []
for path in paths:
rel = self._construct_relationship(path, updated_factors)
profile = pd.DataFrame(rel, index=index, columns=self.indices[dst])
relations.append(profile)
return list(zip(paths, relations))
def fit(self, method='factorization'):
self.types = dict(
zip(
self.nodes.keys(),
map(lambda x: fusion.ObjectType(*x), self.nodes.items()),
)
)
print(self.types)
self.relations = map(
lambda x: map(
lambda r: fusion.Relation(
r.values, self.types[x[0][0]], self.types[x[0][1]]
),
x[1],
),
self.relation_definitions.items(),
)
self.relations = list(chain(*self.relations))
print(self.relations)
self.indices = {}
for (src, dst), dfs in self.relation_definitions.items():
if not src in self.indices:
self.indices[src] = list(dfs[0].index)
if not dst in self.indices:
self.indices[dst] = list(dfs[0].columns)
random.seed(self.random_state)
np.random.seed(self.random_state)
self.fusion_graph = fusion.FusionGraph(self.relations)
if method == 'factorization':
fuser = fusion.Dfmf
elif method == 'completion':
fuser = fusion.Dfmc
else:
raise ValueError('method must be factorization or completion')
self.fuser = fuser(
init_type=self.init_type, random_state=self.random_state, n_jobs=self.n_jobs
)
self.fuser.fuse(self.fusion_graph)
| zorzalerrante/aves | src/aves/models/datafusion/base.py | base.py | py | 4,339 | python | en | code | 57 | github-code | 36 | [
{
"api_name": "pandas.DataFrame",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "cytoolz.sliding_window",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
... |
24331960093 | from argparse import ArgumentParser
from ast import parse
import os
def handle_file(filename: str, blank: list[str]):
with open(filename) as f:
content = f.readlines()
data = ['[']
data.extend([f'"{x}",' for x in blank])
data.extend(['\n'])
skip = True
for line in content:
# line2 = line.replace(',', '').replace('"', '').strip()
# print(f"{line2=}")
# if line2 in blank:
# continue
if '[' in line or ']' in line:
continue
# print(f"{line=}")
if line == '\n':
skip = False
continue
if skip:
continue
data.append(line.strip())
data.append(']\n')
with open(filename, 'w') as f:
f.write('\n'.join(data))
if __name__ == '__main__':
parser = ArgumentParser(description='Update all templates based on the blank template')
parser.add_argument(type=str, dest='filename', help='blank template')
args = parser.parse_args()
blank_file = args.filename
with open(blank_file) as f:
blank = f.read().replace('[', '').replace(']', '').replace('"', '').replace(',' , '').strip().split('\n')
for _, _, files in os.walk('.'):
for filename in files:
if filename == blank_file:
continue
if filename.endswith('.txt'):
print(filename)
handle_file(filename, blank)
| zeusops/mission-templates | limited-arsenal-factions/update.py | update.py | py | 1,430 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "os.walk",
"line_number": 39,
"usage_type": "call"
}
] |
3325760856 | from django.contrib import admin
from .models import Review
# Register your models here.
class ReviewAdmin(admin.ModelAdmin):
list_display = (
'product',
'user',
'rating',
'title',
'description',
'review_date',
)
ordering = ('product',)
admin.site.register(Review, ReviewAdmin)
| mosull20/crushed-grapes-ms4 | reviews/admin.py | admin.py | py | 343 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.contrib.admin.ModelAdmin",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.site.register",
"line_number": 19,
"usage_type": "call"
},... |
40917483210 | import requests
import json
import sys
import os
class PR():
def __init__(self, token, user, repo) -> None:
self.token = token
self.user = user
self.repo = repo
def raise_pr(self, title, head, base):
url = "https://api.github.com/repos/"+ self.user +"/"+ self.repo+"/pulls"
payload = json.dumps({
"title": title,
"head": head,
"base": base
})
headers = {
'Authorization': 'Bearer ' + self.token,
'Content-Type': 'application/json'
}
response = requests.request("POST", url, headers=headers, data=payload)
if response.status_code == 201:
data = response.json()
return data["number"]
print(response.json())
return -1
def request_review(self, pr_number, reviewers):
print("Requesting for reviewers for PR {0}".format(pr_number))
url = "https://api.github.com/repos/" + self.user + "/" + self.repo + "/pulls/" + str(pr_number) + "/requested_reviewers"
print(url)
payload = {
"reviewers": reviewers
}
print(payload)
headers = {
'Authorization': 'Bearer ' + self.token
}
response = requests.post(url, headers=headers, json=payload)
if response.status_code == 201:
return True
return False
def workflow(token, user, repo, title, head, base, reviewers):
pr = PR(token, user, repo)
pr_number = pr.raise_pr(title, head, base)
if pr_number == -1:
print("PULL_REQUEST ERROR unable to raise a PR")
review = pr.request_review(pr_number, reviewers)
if not review:
print("REVIEW_REQUEST ERROR unable to add reviewer to the PR")
if __name__ == '__main__':
if len(sys.argv) < 8:
print("Usage: python3 main.py <token> <user> <repo> <pull request title> <pull request head> <pull request base> <pull request reviewers>")
sys.exit(1)
workflow(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5], sys.argv[6], sys.argv[7].split(","))
| ajayk007/UI_release | raise_pr.py | raise_pr.py | py | 2,156 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "json.dumps",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "requests.request",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number... |
13565958708 | # Author: Joshua Jackson
# Date: 06/20/2020
# This file will contain the class which create Word2Vec file using gensim
from gensim.models import Word2Vec
from gensim.models.phrases import Phrases, Phraser
from datetime import datetime
# script to create word embeddings for Neural Network weight
class word2vec:
def __init__(self, debug=False):
try:
#initiliaze init variable
self.debug = debug
except Exception as e:
print(f"Something went wrong in __init__: {e}")
#using a pre tokenized list create the word2vec traing data
def create_bigram_embedding(self, tokens, emb_size=250, minCount=1, threshold_amount=1, workers=3, algo=0, window=5):
try:
#generate tri-gram using gensim
phrases = Phrases(tokens, min_count=minCount, threshold=threshold_amount)
#create bi-gram
bigram = Phraser(phrases)
#build model
model = Word2Vec(bigram[tokens],\
size=emb_size,\
window=window,\
min_count=minCount,\
workers=workers,\
sg=algo)
dateTimeObj = datetime.now()
timestampStr = dateTimeObj.strftime("%d-%b-%Y")
#save model to local directory as bin file
#save both binary and non binary file
model.save(f'bigram-model-{timestampStr}.bin', binary=True)
model.save(f'bigram-model-{timestampStr}.txt', binary=False)
return model
except Exception as e:
print(f"Something went wrong in create_training_data: {e}")
| jjacks95/sentiment-analysis-financial-news | financialTextProcessing/financialTextProcessing/createWord2Vec.py | createWord2Vec.py | py | 1,730 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "gensim.models.phrases.Phrases",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "gensim.models.phrases.Phraser",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "gensim.models.Word2Vec",
"line_number": 31,
"usage_type": "call"
},
{
... |
74051194023 | #from apiclient.discovery import build
from googleapiclient.discovery import build
from oauth2client.service_account import ServiceAccountCredentials
import httplib2
#from oauth2client import client, file, tools
import datetime
import pytz
import re
import configparser
# """
# timezone/DST correction:
# """
# def getTimeFromUTC() ):
# #get the difference between localtime and now. round to half hours
# #N.B. bad b/c daylight saving!
# secdiff=(datetime.datetime.utcnow() - datetime.datetime.now()).total_seconds()
# hourdiff=round(secdiff/60,2)
# return( datetime.timedelta(seconds=hourdiff*60) )
#DELTAFROMUTC = getTimeFromUTC()
def to_utc(dt, tzstr="America/New_York"):
tz = pytz.timezone(tzstr)
return(dt - tz.utcoffset(dt))
# later when printing, will get the same time as we put in
# utc=pytz.timezone('UTC')
# return( tz.localize(dt).astimezone( utc ) )
# def to_tz(dt,tzstr="America/New_York"):
# tz=pytz.timezone(tzstr)
# utc=pytz.timezone('UTC')
# return( utc.localize(dt).astimezone( tz ) )
def get_service(api_name, api_version, scope, key_file_location,
service_account_email):
credentials = ServiceAccountCredentials.from_p12_keyfile(
service_account_email, key_file_location, scopes=scope)
# UPMC MItM's our SSL connection: disable_ssl_certificate_validation=True
# todo: add as config switch
http = credentials.authorize(httplib2.Http(
disable_ssl_certificate_validation=True))
# Build the service object.
service = build(api_name, api_version, http=http)
return service
def g2time(dtstr):
"""
google time string to datetime
-> google gives back time in localtime
"""
return(datetime.datetime.strptime(dtstr[0:18], '%Y-%m-%dT%H:%M:%S'))
def calInfo(e):
"""
get calendar info from google api returned dict
split summary into expected parts: study age sex subj_initials ra score
"""
d = {
'start': e['start']['dateTime'],
'starttime': g2time(e['start']['dateTime']),
'dur_hr': (g2time(e['end']['dateTime']) - g2time(e['start']['dateTime'])).seconds / 60 / 60,
'creator': e['creator'].get('displayName'),
'note': e.get('description'),
'calid': e.get('id'),
'summary': e.get('summary'),
'htmlLink': e.get('htmlLink')
}
c = re.compile(
r'(?P<study>[a-z/]+)[ -]*(?P<age>[0-9.]+) *yo(?P<sex>m|f) *\(?(?P<subjinit>[A-Z]{2,3})\)? *(?P<ra>[A-Z]{2,3})[ -]*(?P<score>[0-9.]+)',
re.I)
m = re.search(c, e['summary'])
if m:
md = m.groupdict()
d = {**d, **md}
return(d)
def time2g(dt, tzstr="America/New_York"):
dtutc = to_utc(dt)
return(dtutc.isoformat() + 'Z')
def time2gdict(dt,
tzstr="America/New_York"): return({'dateTime': time2g(dt),
'timeZone': tzstr})
"""
a class containing a connection to our google calendar
"""
class LNCDcal():
# authetenticate
# ini: cal.ini
# [Calendar]
# email = 'yyy@xxx.iam.gserviceaccount.com'
# p12 = '/path/to/creds.p12'
# calID = 'email@gmail.com'
# tz = 'America/New_York'
def __init__(self, ini):
# Define the auth scopes to request.
# -- read in from ini
config = configparser.RawConfigParser()
config.read(ini)
service_account_email = config.get('Calendar', 'email')
key_file_location = config.get('Calendar', 'p12')
self.calendarId = config.get('Calendar', 'calID')
self.backCalID = config.get('Calendar', 'backCalID', fallback=None)
self.tzstr = config.get('Calendar', 'tz')
scope = ['https://www.googleapis.com/auth/calendar']
self.cal = get_service('calendar', 'v3', scope, key_file_location,
service_account_email)
# might need to be updated after events are add
self.events = self.cal.events()
def find_in_range(self, dtmin, dtmax):
if(isinstance(dtmin, datetime.datetime)):
dtmin = time2g(dtmin)
if(isinstance(dtmax, datetime.datetime)):
dtmax = time2g(dtmax)
events = self.events.list(
calendarId=self.calendarId,
singleEvents=True,
timeMin=dtmin,
timeMax=dtmax).execute()
# use only events with datetime starts (remove full day events)
items = [calInfo(i) for i in events['items']
if i['start'].get('dateTime')]
# check time
#dt.isoformat()[0:16] == items[0]['start']['dateTime'][0:16]
return(items)
def find(self, dt):
delta = 10
dtmin = dt - datetime.timedelta(minutes=delta)
dtmax = dt + datetime.timedelta(minutes=delta)
items = self.find_in_range(dtmin, dtmax)
return(items)
def upcoming(self, daydelta=5):
dt = datetime.datetime.now()
dtmin = time2g(dt, self.tzstr)
dtmax = time2g(dt + datetime.timedelta(days=daydelta), self.tzstr)
items = self.find_in_range(dtmin, dtmax)
return(items)
def insert_event(self, startdt, dur_h, summary, desc):
endtime = startdt + datetime.timedelta(hours=dur_h)
event = {
'summary': summary,
'description': desc,
'start': time2gdict(startdt, self.tzstr),
'end': time2gdict(endtime, self.tzstr)
}
eventres = self.cal.events().insert(
calendarId=self.calendarId, body=event).execute()
return(eventres)
def delete_event(self, eventId):
res = self.cal.events().delete(
calendarId=self.calendarId,
eventId=eventId).execute()
return(res)
def get_event(self, eventId):
""" get an event: useful for testing successful delete"""
res = self.cal.events().get(
calendarId=self.calendarId,
eventId=eventId).execute()
return(res)
def move_event(self, eventId):
"""move event to different calendar we have a 'backCalID' in config"""
if self.backCalID is None:
raise Exception("No backCalID in config, but trying to move")
print("moving %s to %s" % (eventId, self.backCalID))
res = self.cal.events().move(
calendarId=self.calendarId,
eventId=eventId,
destination=self.backCalID).execute()
return(res)
| LabNeuroCogDevel/LNCDcal.py | LNCDcal/LNCDcal.py | LNCDcal.py | py | 6,453 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pytz.timezone",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "oauth2client.service_account.ServiceAccountCredentials.from_p12_keyfile",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "oauth2client.service_account.ServiceAccountCredentials",
... |
34998272866 | # Import packages
import cv2
import numpy as np
from PIL import Image
from pytesseract import pytesseract
from pytesseract import Output
if __name__ == "__main__":
img = cv2.imread('shelf_for_rectangles.jpg')
print(img.shape) # Print image shape
cv2.imshow("original", img)
# Cropping an image
# cropped_image = img[35:75, 65:275] #1
# cropped_image = img[35:75, 285:495] #2
# cropped_image = img[35:75, 495:705] #3
# cropped_image = img[35:75, 715:925] #4
# cropped_image = img[175:215, 65:275] #LCD 5
# cropped_image = img[175:215, 285:495] # 6
# cropped_image = img[175:215, 495:705] #7
# cropped_image = img[175:215, 715:925] #8
# cropped_image = img[310:345, 65:275] #9 battery
# cropped_image = img[310:345, 285:495] #10
# cropped_image = img[310:345, 495:705] #11
# cropped_image = img[310:345, 715:925] #12
# cropped_image = img[450:485, 153:300] #13 joystick
# cropped_image = img[450:485, 395:620] #14
# cropped_image = img[450:495, 670:910] #15
# cropped_image = img[630:675, 420:590] #16 arduino
#list with positions : pos (upper left corner) for all signs
signs = [[35,65],]
w = 210 #width sign
h = 40 #hight sign
# A text file is created and flushed
file = open("signs_position_name.txt", "w+")
file.write("")
file.close()
# Creating a copy of image
im2 = img.copy()
for pos in signs:
y = pos[0]
x = pos[1]
mid_x = x + w/2
mid_x = str(int(mid_x))
mid_y = y + h/2
mid_y = str(int(mid_y))
cropped = im2[y:y + h, x:x + w]
text = pytesseract.image_to_string(cropped)
file = open("signs_position_name.txt", "a")
if text == '':
continue
# Appending the text into file
file.write(text + ' - ' + mid_x + ',' + mid_y + ',90')
file.close()
# Display cropped image
cv2.imshow("cropped", cropped_image)
# Save the cropped image
cv2.imwrite("Cropped Image.jpg", cropped_image)
cv2.waitKey(0)
#cv2.destroyAllWindows()
imgGray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
def tesseract():
path_to_tesseract = r"C:\Program Files\Tesseract-OCR\tesseract.exe"
image_path = 'Cropped Image.jpg'
pytesseract.tesseract_cmd = path_to_tesseract
text = pytesseract.image_to_string(Image.open(image_path))
print(text[:-1])
tesseract() | klarahi/Fuzzy_project | cropped_image.py | cropped_image.py | py | 2,544 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cv2.imread",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pytesseract.pytesseract.image_to_string",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "pytesse... |
18879607170 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
"""
Support for django-reversion on models with translatable fields and django-cms
placeholder fields.
"""
from functools import partial
from django.db.models.signals import post_save
from cms.models.pluginmodel import CMSPlugin
from reversion.revisions import (
default_revision_manager, revision_context_manager, VersionAdapter)
# We would like this to not depend on Parler, but still support if it is
# available.
try:
from parler import cache
except:
pass
def _add_to_context(obj, manager=None, context=None):
if manager is None:
manager = default_revision_manager
if context is None:
context = default_revision_manager._revision_context_manager
adapter = manager.get_adapter(obj.__class__)
version_data = adapter.get_version_data(obj)
context.add_to_context(manager, obj, version_data)
def create_revision(obj, user=None, comment=None):
with revision_context_manager.create_revision():
if user:
revision_context_manager.set_user(user)
if comment:
revision_context_manager.set_comment(comment)
_add_to_context(obj)
if hasattr(obj._meta, 'placeholder_field_names'):
add_placeholders_to_revision(instance=obj)
def add_placeholders_to_revision(
instance, revision_manager=None, rev_ctx=None):
"""
Manually add plugins to the revision.
This function is an updated version of
http://github.com/divio/django-cms/blob/develop/cms/utils/helpers.py#L34
but instead of working on pages, works on models with placeholder
fields.
"""
add_to_context = partial(
_add_to_context,
manager=revision_manager,
context=rev_ctx,
)
# Add the placeholder to the revision
for name in instance._meta.placeholder_field_names:
add_to_context(getattr(instance, name))
# Add all plugins to the revision
ph_ids = [getattr(instance, '{0}_id'.format(name))
for name in instance._meta.placeholder_field_names]
for plugin in CMSPlugin.objects.filter(placeholder_id__in=ph_ids):
plugin_instance, _ = plugin.get_plugin_instance()
if plugin_instance:
add_to_context(plugin_instance)
add_to_context(plugin)
class TranslatableVersionAdapterMixin(object):
revision_manager = None
def __init__(self, model):
super(TranslatableVersionAdapterMixin, self).__init__(model)
# If the model is translated with django-parler, register the
# translation model to be tracked as well, by following all placeholder
# fields, if any.
if hasattr(model, '_parler_meta'):
root_model = model._parler_meta.root_model
self.revision_manager.register(root_model)
# Also add the translations to the models to follow.
self.follow = list(self.follow) + [model._parler_meta.root_rel_name]
# And make sure that when we revert them, we update the translations
# cache (this is normally done in the translation `save_base`
# method, but it is not called when reverting changes).
post_save.connect(self._update_cache, sender=root_model)
def _update_cache(self, sender, instance, raw, **kwargs):
"""Update the translations cache when restoring from a revision."""
if raw:
# Raw is set to true (only) when restoring from fixtures or,
# django-reversion
cache._cache_translation(instance)
class PlaceholderVersionAdapterMixin(object):
follow_placeholders = True
def __init__(self, model):
super(PlaceholderVersionAdapterMixin, self).__init__(model)
# Add cms placeholders the to the models to follow.
placeholders = getattr(model._meta, 'placeholder_field_names', None)
if self.follow_placeholders and placeholders:
self.follow = list(self.follow) + placeholders
post_save.connect(self._add_plugins_to_revision, sender=model)
def _add_plugins_to_revision(self, sender, instance, **kwargs):
rev_ctx = self.revision_manager._revision_context_manager
if rev_ctx.is_active() and not rev_ctx.is_managing_manually():
add_placeholders_to_revision(
instance=instance,
revision_manager=self.revision_manager,
rev_ctx=rev_ctx,
)
class ContentEnabledVersionAdapter(TranslatableVersionAdapterMixin,
PlaceholderVersionAdapterMixin,
VersionAdapter):
pass
version_controlled_content = partial(default_revision_manager.register,
adapter_cls=ContentEnabledVersionAdapter,
revision_manager=default_revision_manager)
| aldryn/aldryn-reversion | aldryn_reversion/core.py | core.py | py | 4,835 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "reversion.revisions.default_revision_manager",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "reversion.revisions.default_revision_manager._revision_context_manager",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "reversion.revisions.defa... |
19950192198 | import xdg.BaseDirectory
import xdg.MenuEditor
import gtk
import gio
import uxm.adapters as adapters
from uxm.adapters import xdg_adapter
def lookup_menu_files(filename):
return [f for f in xdg.BaseDirectory.load_config_paths('menus/' + filename)]
class MenuTreeModel(gtk.TreeStore):
(
COLUMN_HIDE,
COLUMN_TYPE,
COLUMN_ID,
COLUMN_NAME,
COLUMN_ICON,
COLUMN_MENU_FILE,
COLUMN_SYSTEM_VISIBLE,
COLUMN_USER_VISIBLE,
COLUMN_OBJECT
) = range(9)
COLUMN_LIST_PATH = 9
COLUMN_TYPES = (
bool, int, str, str, gio.Icon, str, bool, bool, object
)
def __init__(self, menu_file):
gtk.TreeStore.__init__(self, *self.COLUMN_TYPES)
if not menu_file:
menu_file = 'uxm-applications.menu'
self.menu_editor = xdg.MenuEditor.MenuEditor(menu_file)
root = xdg_adapter.XdgDirectoryAdapter(self.menu_editor.menu)
self.__append_directory(root, None, False, menu_file)
self.entries_list_iter = None
def to_liststore(self):
types = self.COLUMN_TYPES + (str,)
store = gtk.ListStore(*types)
columns = range(self.get_n_columns())
def add(model, path, it):
path = self.path_to_string(path)
row = self.get_row(it, columns) + (path,)
store.append(row)
self.foreach(add)
return store
def path_to_string(self, path):
if isinstance(path, str):
return path
return ':'.join((str(p) for p in path))
def string_to_path(self, path):
if isinstance(path, tuple):
return path
return tuple(path.split(':'))
def get_row(self, iter, columns=None):
if not columns:
columns = range(self.get_n_columns())
return self.get(iter, *columns)
def update(self, data):
t = data['type']
# update menu
if adapters.TYPE_ENTRY == t:
self.menu_editor.editMenuEntry(
data['object'].adaptee,
name=data['name'],
#genericname = data['name'],
comment=data['comment'],
command=data['command'],
icon=data['icon'],
terminal=data['terminal']
)
elif adapters.TYPE_DIRECTORY == t:
self.menu_editor.editMenu(
data['object'].adaptee,
name=data['name'],
#genericname = data['name'],
comment=data['comment'],
icon=data['icon'],
)
# update treemodel
it = self.get_iter_from_string(data['_path'])
obj = self.get_value(it, self.COLUMN_OBJECT)
icon = gio.ThemedIcon(str(obj.get_icon()), True)
self.set(
it,
self.COLUMN_ID, obj.get_filename(),
self.COLUMN_NAME, obj.get_display_name(),
self.COLUMN_ICON, icon
)
def create(self, data):
t = data['type']
parent_path = data['_parent']
parent_iter = self.get_iter_from_string(parent_path)
parent_entry = self.get_value(parent_iter, self.COLUMN_OBJECT)
if adapters.TYPE_ENTRY == t:
entry = self.menu_editor.createMenuEntry(
parent_entry and parent_entry.adaptee or None,
data['name'],
#genericname = data['name'],
comment=data['comment'],
command=data['command'],
icon=data['icon'],
terminal=data['terminal']
)
elif adapters.TYPE_DIRECTORY == t:
entry = self.menu_editor.createMenu(
parent_entry and parent_entry.adaptee or None,
data['name'],
#genericname = data['name'],
comment=data['comment'],
icon=data['icon'],
)
obj = xdg_adapter.factory(entry)
icon = gio.ThemedIcon(str(obj.get_icon()), True)
#FIXME: this doesn't update the view ???
self.append(
parent_iter,
(
t == adapters.TYPE_DIRECTORY,
obj.get_type(), obj.get_display_name(),
obj.get_display_name(), icon,
None, True, True,
obj
)
)
def __append_directory(self, directory, parent_iter, system, menu_file):
if not directory:
return
iter = self.iter_children(parent_iter)
while iter is not None:
if self.get_value(iter, self.COLUMN_ID) == directory.get_name():
break
iter = self.iter_next(iter)
if iter is None:
icon = gio.ThemedIcon(str(directory.get_icon()), True)
type = directory.get_type()
row = (
type == adapters.TYPE_ENTRY,
type, directory.get_name(),
directory.get_display_name(), icon,
menu_file, False, False,
directory
)
iter = self.append(parent_iter, row)
if system:
self.set_value(iter, self.COLUMN_SYSTEM_VISIBLE, True)
else:
self.set_value(iter, self.COLUMN_USER_VISIBLE, True)
for entry in directory:
current_type = entry.get_type()
if current_type == adapters.TYPE_DIRECTORY:
self.__append_directory(entry, iter, system, None)
if current_type != adapters.TYPE_ENTRY:
continue
child_iter = self.iter_children(iter)
while child_iter is not None:
if self.get_value(child_iter, self.COLUMN_TYPE) == adapters.TYPE_ENTRY and (
self.get_value(child_iter, self.COLUMN_ID) == entry.get_filename()
):
break
child_iter = self.iter_next(child_iter)
if child_iter is None:
icon = gio.ThemedIcon(str(entry.get_icon()), True)
type = entry.get_type()
row = (
type == adapters.TYPE_ENTRY,
type, entry.get_filename(),
entry.get_display_name(), icon,
None, False, False,
entry
)
child_iter = self.append(iter, row)
if system:
self.set_value(child_iter, self.COLUMN_SYSTEM_VISIBLE, entry.is_visible(), )
else:
self.set_value(child_iter, self.COLUMN_USER_VISIBLE, entry.is_visible(), )
| ju1ius/uxdgmenu | usr/lib/uxdgmenu/uxm/dialogs/editor/treemodel.py | treemodel.py | py | 6,603 | python | en | code | 17 | github-code | 36 | [
{
"api_name": "xdg.BaseDirectory.BaseDirectory.load_config_paths",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "xdg.BaseDirectory.BaseDirectory",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "xdg.BaseDirectory",
"line_number": 10,
"usage_type... |
11936779128 | #import ipdb
import logging
from typing import Optional, cast
from rest_framework import serializers
from rest_framework.exceptions import APIException, ErrorDetail, ValidationError
from rest_flex_fields.serializers import FlexFieldsSerializerMixin
from ..exception.unprocessable_entity import UnprocessableEntity
from ..models import *
from .name_and_uuid_serializer import NameAndUuidSerializer
from .embedded_id_validating_serializer_mixin import (
EmbeddedIdValidatingSerializerMixin
)
from .group_setting_serializer_mixin import GroupSettingSerializerMixin
from .workflow_task_instance_serializer import WorkflowTaskInstanceSerializer
from .workflow_transition_serializer import WorkflowTransitionSerializer
from .workflow_execution_serializer import WorkflowExecutionSummarySerializer
logger = logging.getLogger(__name__)
COMMON_FIELDS = [
'url', 'uuid', 'name', 'description', 'dashboard_url',
'schedule', 'max_concurrency',
'max_age_seconds', 'default_max_retries',
'max_postponed_failure_count', 'max_postponed_missing_execution_count',
'max_postponed_timeout_count',
'min_missing_execution_delay_seconds',
'postponed_failure_before_success_seconds',
'postponed_missing_execution_before_start_seconds',
'postponed_timeout_before_success_seconds',
'scheduled_instance_count',
'should_clear_failure_alerts_on_success',
'should_clear_timeout_alerts_on_success',
'latest_workflow_execution',
'created_by_user', 'created_by_group',
'run_environment',
'enabled',
'created_at', 'updated_at'
]
COMMON_READ_ONLY_FIELDS = [
'url', 'uuid', 'dashboard_url',
'latest_workflow_execution',
'created_by_user', 'created_by_group',
'created_at', 'updated_at'
]
class WorkflowSummarySerializer(GroupSettingSerializerMixin,
serializers.HyperlinkedModelSerializer):
"""
Selected properties of Workflows.
"""
class Meta:
model = Workflow
fields = COMMON_FIELDS
read_only_fields = COMMON_READ_ONLY_FIELDS
latest_workflow_execution = WorkflowExecutionSummarySerializer(
required=False, allow_null=True, read_only=True)
url = serializers.HyperlinkedIdentityField(
view_name='workflows-detail',
lookup_field='uuid'
)
class WorkflowSerializer(
EmbeddedIdValidatingSerializerMixin,
FlexFieldsSerializerMixin,
WorkflowSummarySerializer):
"""
Workflows are Tasks arranged in a directed graph. Configured Tasks
are held by WorkflowTaskInstances, and WorkflowTransitions connect
WorkflowTaskInstances together.
"""
NEW_UUID_PREFIX = 'NEW_'
class Meta:
model = Workflow
fields = COMMON_FIELDS + [
'alert_methods', 'workflow_task_instances',
'workflow_transitions',
]
read_only_fields = COMMON_READ_ONLY_FIELDS
workflow_task_instances = WorkflowTaskInstanceSerializer(
many=True, read_only=True)
workflow_transitions = WorkflowTransitionSerializer(many=True, read_only=True)
alert_methods = NameAndUuidSerializer(include_name=True,
view_name='alert_methods-detail', many=True, required=False)
def to_internal_value(self, data):
logger.info(f"wfs: to_internal value, data = {data}")
workflow: Optional[Workflow] = cast(Workflow, self.instance) if self.instance else None
data['description'] = data.get('description') or ''
data['schedule'] = data.get('schedule') or ''
data.pop('latest_workflow_execution', None)
validated = super().to_internal_value(data)
validated['workflow_task_instances'] = data.get('workflow_task_instances')
validated['workflow_transitions'] = data.get('workflow_transitions')
logger.debug(f"wfs: to_internal value, validated = {validated}")
run_environment = validated.get('run_environment',
workflow.run_environment if workflow else None)
self.set_validated_alert_methods(data=data, validated=validated,
run_environment=run_environment,
allow_any_run_environment=(run_environment is None))
return validated
def create(self, validated_data):
return self.create_or_update(None, validated_data)
def update(self, instance, validated_data):
return self.create_or_update(instance, validated_data)
def create_or_update(self, instance, validated_data):
defaults = validated_data
alert_methods = defaults.pop('alert_methods', None)
wtis = defaults.pop('workflow_task_instances', None)
wts = defaults.pop('workflow_transitions', None)
if instance:
super().update(instance, defaults)
workflow = instance
else:
defaults.pop('uuid', None)
workflow = Workflow(**defaults)
workflow.save()
if alert_methods is not None:
workflow.alert_methods.set(alert_methods)
if wtis is None:
return workflow
old_wtis_by_uuid = {}
old_wtis_by_name = {}
for wti in workflow.workflow_task_instances.select_related(
'task__run_environment').all():
old_wtis_by_uuid[str(wti.uuid)] = wti
old_wtis_by_name[wti.name] = wti
new_wtis_by_uuid = {}
new_wtis_by_name = {}
for wti_dict in wtis:
wti_uuid = wti_dict.get('uuid')
if wti_uuid:
new_wtis_by_uuid[wti_uuid] = wti_dict
else:
wti_name = wti_dict.get('name')
if wti_name is None:
raise ValidationError({
'workflow_task_instances': [
ErrorDetail('Workflow Task Instance missing uuid and name', code='invalid')
]
})
new_wtis_by_name[wti_name] = wti_dict
for wti_uuid, wti in old_wtis_by_uuid.items():
if (wti_uuid not in new_wtis_by_uuid) and (wti.name not in new_wtis_by_name):
wti.delete()
logger.info(f"old_wtis_by_uuid = {old_wtis_by_uuid}")
old_wts_by_uuid = {}
for wt in workflow.workflow_transitions().all():
old_wts_by_uuid[str(wt.uuid)] = wt
for wti_dict in wtis:
wti_uuid = wti_dict.pop('uuid', None)
wti_name = wti_dict.get('name')
existing_wti = None
if wti_uuid:
if not wti_uuid.startswith(self.NEW_UUID_PREFIX):
existing_wti = old_wtis_by_uuid.get(wti_uuid)
if existing_wti is None:
raise ValidationError({
'workflow_task_instances': [
ErrorDetail(f'Workflow Task Instance with UUID {wti_uuid} is not part of Workflow',
code='invalid')
]
})
logger.info(f"Found existing WTI with UUID {wti_uuid}")
elif wti_name:
existing_wti = old_wtis_by_name.get(wti_name)
if existing_wti is None:
raise ValidationError({
'workflow_task_instances': [
ErrorDetail(f"Workflow Task Instance with name '{wti_name}' is not part of Workflow",
code='invalid')
]
})
ser = WorkflowTaskInstanceSerializer(instance=existing_wti, data=wti_dict,
partial=True, context=self.context, workflow=workflow,
for_embedded_deserialization=True)
try:
if not ser.is_valid():
msg = f"Error saving Workflow Task Instance with UUID {wti_uuid or '[Empty]'}, name '{wti_name or '[Empty]'}'"
logger.error(msg)
# ser.errors results in ValueError: Too many values to unpack
#errors = [error_detail.string for error_detail in ser.errors]
raise serializers.ValidationError({
'workflow_task_instances': [msg]
})
except serializers.ValidationError as ve:
logger.exception('workflow serializer validation error')
raise serializers.ValidationError({
'workflow_task_instances': [str(ve)]
}) from ve
except UnprocessableEntity as ue:
raise UnprocessableEntity({
'workflow_task_instances': [str(ue)]
}) from ue
except APIException as api_ex:
raise APIException({
'workflow_task_instances': [str(api_ex)]
}) from api_ex
saved_wti = ser.save(workflow=workflow)
if wti_uuid and wti_uuid.startswith(self.NEW_UUID_PREFIX):
new_wtis_by_uuid[wti_uuid] = saved_wti
if wts is None:
# FIXME: handle case when transitions are not resent
logger.info('Workflow Transitions not set')
else:
for wt_dict in wts:
wt_uuid = wt_dict.pop('uuid', None)
existing_wt = None
if wt_uuid and not wt_uuid.startswith(self.NEW_UUID_PREFIX):
existing_wt = old_wts_by_uuid.pop(wt_uuid, None)
if existing_wt is None:
raise ValidationError({
'workflow_task_instances': [
ErrorDetail(f'Workflow Transition with UUID {wt_uuid} is not part of Workflow',
code='invalid')
]
})
from_wti_dict = wt_dict.get('from_workflow_task_instance', None)
if from_wti_dict:
wti_uuid = from_wti_dict['uuid']
if wti_uuid.startswith(self.NEW_UUID_PREFIX):
from_wti_dict['uuid'] = str(new_wtis_by_uuid[wti_uuid].uuid)
to_wti_dict = wt_dict.get('to_workflow_task_instance', None)
if to_wti_dict:
wti_uuid = to_wti_dict['uuid']
if wti_uuid.startswith(self.NEW_UUID_PREFIX):
to_wti_dict['uuid'] = str(new_wtis_by_uuid[wti_uuid].uuid)
if existing_wt:
ser = WorkflowTransitionSerializer(existing_wt, data=wt_dict, context=self.context)
else:
ser = WorkflowTransitionSerializer(data=wt_dict, context=self.context)
ser.is_valid(raise_exception=True)
ser.save()
WorkflowTransition.objects.filter(uuid__in=old_wts_by_uuid.keys()).delete()
return workflow
| CloudReactor/task_manager | server/processes/serializers/workflow_serializer.py | workflow_serializer.py | py | 10,948 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "group_setting_serializer_mixin.GroupSettingSerializerMixin",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "rest_framework.serializers.HyperlinkedModelSerializer",
"line_nu... |
9710241755 | import subprocess
from datetime import datetime
import tkinter as tk
from tkinter import ttk
from tkinter import messagebox
from local import my_printer, printer_list
# Ce programme imprime de petites รฉtiquettes pour des tubes de type Eppendorf 1.5 ml
# L'utilisateur dispose de 4 champs.
# L'utilisateur peut dรฉcider d'imprimer la date d'impression ou un cinquiรจme champ.
# Fonction pour imprimer les รฉtiquettes
def print_labels():
field_1 = entry_field_1.get()
field_2 = entry_field_2.get()
field_3 = entry_field_3.get()
field_4 = entry_field_4.get()
nb_labels = int(entry_nb_labels.get()) # Rรฉcupรจre le nombre d'รฉtiquettes
if add_date_var.get():
now = datetime.now().strftime("%Y/%m/%d %H:%M")
else:
now = alt_field_for_date.get()
ipl_format_for_Epp_1_5_ml = f"""
<STX><ESC>C<ETX><STX><ESC>P<ETX><STX>E5;F5;<ETX>
<STX>H01;o315,565;b0;f2;h01;w01;c34;d3,{field_1};<ETX>
<STX>H02;o55,565;b1;f2;h01;w01;c31;d3,{field_2};<ETX>
<STX>H04;o315,520;b0;f2;h01;w01;c34;d3,{field_3};<ETX>
<STX>H05;o315,455;b0;f2;h02;w01;c2;d3,{field_4};<ETX>
<STX>H06;o315,415;b0;f2;h01;w01;c30;d3,{now};<ETX>
/* ligne */
<STX>L07;o315,380;f2;l1300;w4<ETX>
# <STX>B10;o125,115;c2;f3;h160;w03;i0;d3," + ";<ETX>
/* afficher ALIQUOT BIO MOL */
<STX>H14;o315,300;b1;f2;h01;w01;c31;d3,BIOMOL;<ETX>
/* Mini รฉtiquette pour couvercle */
<STX>H16;o315,100;b0;f2;h01;w01;c31;d3,{field_1};<ETX>
<STX>H17;o315,65;b0;f2;h01;w01;c31;d3,{field_3};<ETX>
<STX>R<ETX><STX><ESC>E5<CAN><ETX><STX><RS>{nb_labels}<ETB><ETX>
"""
with open("etiq.txt", 'w') as f:
f.writelines(ipl_format_for_Epp_1_5_ml)
try:
subprocess.check_output(["copy", ".\etiq.txt", selected_printer.get()], shell=True)
messagebox.showinfo("Impression rรฉussie", "Les รฉtiquettes ont รฉtรฉ imprimรฉes avec succรจs.")
except Exception as e:
messagebox.showerror("Erreur d'impression", f"Une erreur est survenue lors de l'impression : {str(e)}")
# Fonction pour activer ou dรฉsactiver le champ field_5 en fonction de la case ร cocher
def toggle_field_5():
if add_date_var.get():
alt_field_for_date.grid_remove() # Masquer le champ field_5
else:
alt_field_for_date.grid(row=6, column=1) # Afficher le champ field_5
alt_field_for_date.configure(state ='normal')
# Crรฉation de la fenรชtre principale
root = tk.Tk()
# root.geometry("600x400")
root.title("Gรฉnรฉrateur d'รฉtiquettes")
# Sรฉlection de l'imprimante
printer_frame = ttk.Frame(root)
printer_frame.grid(row= 0, column=0, rowspan=2, columnspan=2)
label_printer = ttk.Label(printer_frame, text="Sรฉlectionnez l'imprimante :")
label_printer.grid(row = 0, column = 0, pady = 30, sticky='W')
# printer_list = ["Imprimante1", "Imprimante2", "Imprimante3"] # Remplacez par vos imprimantes rรฉelles
selected_printer = tk.StringVar(value=printer_list[0])
printer_menu = ttk.Combobox(printer_frame, textvariable=selected_printer, values=printer_list)
printer_menu.grid(row = 0, column = 1, sticky='W')
# Champs ร remplir
entry_frame = ttk.Frame(root)
entry_frame.grid(pady=20, padx= 50)
label_fields = ttk.Label(entry_frame, text="Remplissez les champs :")
label_fields.grid(row=1, column=0, rowspan=3)
entry_field_1 = ttk.Entry(entry_frame, width=11)
entry_field_1.insert(0, "25121245")
entry_field_1.grid(row=1, column=1, sticky='W')
entry_field_2 = ttk.Entry(entry_frame, width=2) # Champ 2 rรฉduit ร 2 caractรจres
entry_field_2.insert(0, "98")
entry_field_2.grid(row=1, column=2, sticky='W')
entry_field_3 = ttk.Entry(entry_frame, width=24)
entry_field_3.insert(0, "TEST second")
entry_field_3.grid(row=2, column=1 )
entry_field_4 = ttk.Entry(entry_frame, width=20)
entry_field_4.insert(0, "31/12/1964 M")
entry_field_4.grid(row=3, column=1, sticky='W' )
# Option pour ajouter la date du jour
add_date_var = tk.BooleanVar(value = True)
add_date_checkbox = ttk.Checkbutton(entry_frame, text="Ajouter la date du jour", variable=add_date_var,
command=toggle_field_5)
add_date_checkbox.grid(row=5, column=1)
# Champ pour spรฉcifier le champ field_5 (initialement grisรฉ)
label_field_5 = ttk.Label(entry_frame, text="Champ libre :")
label_field_5.grid(row=6, column=0)
alt_field_for_date = ttk.Entry(entry_frame, width=20,
state="disabled"
)
alt_field_for_date.grid(row=6, column=1)
# Champ pour spรฉcifier le nombre d'รฉtiquettes ร imprimer
last_frame = ttk.Frame(root)
last_frame.grid(pady=20)
label_nb_labels = ttk.Label(last_frame, text="Nombre d'รฉtiquettes ร imprimer :")
label_nb_labels.grid(row= 0, column= 0)
entry_nb_labels = ttk.Entry(last_frame, width=5)
entry_nb_labels.insert(0, "3") # Valeur par dรฉfaut
entry_nb_labels.grid(row= 0, column= 1, sticky='W')
# Bouton d'impression
bottom_frame = ttk.Frame(root)
bottom_frame.grid(pady=20)
print_button = ttk.Button(bottom_frame, text="Imprimer", command=print_labels)
print_button.grid(row= 1, column = 1)
root.mainloop()
input("") | bermau/py_liq_dilutions | tk_label.py | tk_label.py | py | 5,100 | python | fr | code | 0 | github-code | 36 | [
{
"api_name": "datetime.datetime.now",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "subprocess.check_output",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "tkint... |
37662015618 | from PyQt5.QtWidgets import QDialog, QComboBox, QPushButton, QRadioButton
from pulse.utils import error
from os.path import basename
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import Qt
from PyQt5 import uic
import configparser
class ElementTypeInput(QDialog):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
uic.loadUi('pulse/uix/user_input/ui/elementTypeInput.ui', self)
icons_path = 'pulse\\data\\icons\\'
self.icon = QIcon(icons_path + 'pulse.png')
self.setWindowIcon(self.icon)
self.index = 0
self.element_type = 'pipe_1'
self.comboBox = self.findChild(QComboBox, 'comboBox')
self.comboBox.currentIndexChanged.connect(self.selectionChange)
self.index = self.comboBox.currentIndex()
self.radioButton_all = self.findChild(QRadioButton, 'radioButton_all')
self.radioButton_entity = self.findChild(QRadioButton, 'radioButton_entity')
self.radioButton_all.toggled.connect(self.radioButtonEvent)
self.radioButton_entity.toggled.connect(self.radioButtonEvent)
self.flagAll = self.radioButton_all.isChecked()
self.flagEntity = self.radioButton_entity.isChecked()
self.pushButton_2 = self.findChild(QPushButton, 'pushButton_confirm')
self.pushButton_2.clicked.connect(self.button_clicked)
self.exec_()
def radioButtonEvent(self):
self.flagAll = self.radioButton_all.isChecked()
self.flagEntity = self.radioButton_entity.isChecked()
def keyPressEvent(self, event):
if event.key() == Qt.Key_Enter or event.key() == Qt.Key_Return:
self.check()
elif event.key() == Qt.Key_Escape:
# self.index = -1
self.close()
def selectionChange(self, index):
self.index = self.comboBox.currentIndex()
if self.index == 0:
self.element_type = 'pipe_1'
elif self.index == 1:
self.element_type = 'pipe_2'
elif self.index == 2:
self.element_type = 'shell'
def check(self):
self.close()
def button_clicked(self):
self.check() | atbrandao/OpenPulse_f | pulse/uix/user_input/elementTypeInput.py | elementTypeInput.py | py | 2,166 | python | en | code | null | github-code | 36 | [
{
"api_name": "PyQt5.QtWidgets.QDialog",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "PyQt5.uic.loadUi",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "PyQt5.uic",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtGui.QIcon",... |
72416805225 | # -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
def approach_angle_reward(roll,pitch):
if np.abs(roll) + np.abs(pitch) < 0.174:
return 100*np.exp((7.0*(0.174-np.abs(roll) - np.abs(pitch)))**1)
if (np.abs(roll) + np.abs(pitch)<=1.55)and(np.abs(roll) + np.abs(pitch) >=0.174):
return -6.0*(np.exp((3.2*(np.abs(roll) + np.abs(pitch)-0.174))**1))
if (np.abs(roll) + np.abs(pitch)>1.55):
return -500.0
def flip_reward(angle,prev_angle):
if np.abs(angle) < 0.26:
return 0.05*np.exp(20*(0.26-np.abs(angle)))
if (np.abs(angle)>=0.26):
return -7.0*np.exp((2.1*(np.abs(angle)-0.26))**1)
def approach_velocity_reward(velocity):
if velocity>1.6:
return -20.0*np.exp((0.45*(np.abs(velocity)))**1)
if (velocity<=1.6) and (velocity >=0.1):
return - 12.5 * np.exp(2.1*(velocity-0.1))
if velocity < 0.1:
return +55.0 * np.exp(20*(0.1-velocity))
# approach angle
#roll_space = np.linspace(-1.57,1.57,300)
#pitch_space = np.linspace(-1.57,1.57,300)
#X,Y = np.meshgrid(roll_space,pitch_space)
#
#Z = np.zeros(shape = (len(roll_space),len(pitch_space)))
#for it_r,r in enumerate(roll_space):
# for it_p,p in enumerate(pitch_space):
# Z[it_r,it_p] = approach_angle_reward(r,p)
# calculate angle_space for flipping
#angle_space = np.linspace(-3.14,3.14,500)
#dummy_space = np.linspace(-3.14,3.14,500)
#
#
#X,Y = np.meshgrid(angle_space,dummy_space)
#Z = np.zeros(shape = (len(angle_space),len(dummy_space)))
#
#for it_a1,a1 in enumerate(angle_space):
# for it_a2,a2 in enumerate(dummy_space):
# Z[it_a1,it_a2] = flip_reward(a1,a2)
# approach velocity
vel_space = np.linspace(0.0,10,500)
dummy_space = np.linspace(0.0,10,500)
X,Y = np.meshgrid(vel_space,dummy_space)
Z = np.zeros(shape = (len(vel_space),len(dummy_space)))
for it_a1,a1 in enumerate(vel_space):
for it_a2,a2 in enumerate(dummy_space):
Z[it_a1,it_a2] = approach_velocity_reward(a1)
fig, ax = plt.subplots(figsize=(7, 7), dpi=100)
# for positive values
p = ax.pcolor(X, Y, Z, cmap=plt.cm.RdBu, vmin=(Z).min(), vmax=(Z).max())
#p = ax.pcolor(X, Y, Z, cmap=plt.cm.RdBu, vmin=Z.min(), vmax=Z.max())
cb = fig.colorbar(p)
#cnt = plt.contour(Z, cmap=plt.cm.RdBu, vmin=abs(Z).min(), vmax=abs(Z).max(), extent=[0, 1, 0, 1])
| Zbigor/DeepRL_UAV_landing | drl_landing/rl_pipeline/catkin_ws/src/hummingbird/scripts/plot_reward_functions.py | plot_reward_functions.py | py | 2,384 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "numpy.abs",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 11,
"... |
15991499425 | import os
import argparse
import torch
from torch import nn
import torch.backends.cudnn as cudnn
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import DataLoader
import numpy as np
import cv2
from seg_metric import SegmentationMetric
import random
import shutil
import setproctitle
import time
import logging
from dataset import potsdam
from custom_transforms import Mixup, edge_contour
from loss import CrossEntropyLoss, Edge_loss, Edge_weak_loss
class FullModel(nn.Module):
def __init__(self, model, args2):
super(FullModel, self).__init__()
self.model = model
self.use_mixup = args2.use_mixup
self.use_edge = args2.use_edge
# self.ce_loss = Edge_weak_loss()
self.ce_loss = CrossEntropyLoss()
self.edge_loss = Edge_loss()
if self.use_mixup:
self.mixup = Mixup(use_edge=args2.use_edge)
def forward(self, input, label=None, train=True):
if train and self.use_mixup and label is not None:
if self.use_edge:
loss = self.mixup(input, label, [self.ce_loss, self.edge_loss], self.model)
else:
loss = self.mixup(input, label, self.ce_loss, self.model)
return loss
output = self.model(input)
if train:
losses = 0
if isinstance(output, (list, tuple)):
if self.use_edge:
for i in range(len(output) - 1):
loss = self.ce_loss(output[i], label)
losses += loss
losses += self.edge_loss(output[-1], edge_contour(label).long())
else:
for i in range(len(output)):
loss = self.ce_loss(output[i], label)
losses += loss
else:
losses = self.ce_loss(output, label)
return losses
else:
if isinstance(output, (list, tuple)):
return output[0]
else:
return output
def get_world_size():
if not torch.distributed.is_initialized():
return 1
return torch.distributed.get_world_size()
def get_rank():
if not torch.distributed.is_initialized():
return 0
return torch.distributed.get_rank()
class params():
def __init__(self, args2):
if args2.dataset in ['potsdam', 'vaihingen']:
self.number_of_classes = 6
models = args2.models
if models == 'HRNet_32':
self.STAGE2 = {'NUM_MODULES': 1,
'NUM_BRANCHES': 2,
'NUM_BLOCKS': [4, 4],
'NUM_CHANNELS': [32, 64],
'BLOCK': 'BASIC',
'FUSE_METHOD': 'SUM'}
self.STAGE3 = {'NUM_MODULES': 4,
'NUM_BRANCHES': 3,
'NUM_BLOCKS': [4, 4, 4],
'NUM_CHANNELS': [32, 64, 128],
'BLOCK': 'BASIC',
'FUSE_METHOD': 'SUM'}
self.STAGE4 = {'NUM_MODULES': 3,
'NUM_BRANCHES': 4,
'NUM_BLOCKS': [4, 4, 4, 4],
'NUM_CHANNELS': [32, 64, 128, 256],
'BLOCK': 'BASIC',
'FUSE_METHOD': 'SUM'}
elif models == 'HRNet_48':
"hrnet48"
self.STAGE2 = {'NUM_MODULES': 1,
'NUM_BRANCHES': 2,
'NUM_BLOCKS': [4, 4],
'NUM_CHANNELS': [48, 96],
'BLOCK':'BASIC',
'FUSE_METHOD': 'SUM'}
self.STAGE3 = {'NUM_MODULES': 4,
'NUM_BRANCHES': 3,
'NUM_BLOCKS': [4, 4, 4],
'NUM_CHANNELS': [48, 96, 192],
'BLOCK': 'BASIC',
'FUSE_METHOD': 'SUM'}
self.STAGE4 = {'NUM_MODULES': 3,
'NUM_BRANCHES': 4,
'NUM_BLOCKS': [4, 4, 4, 4],
'NUM_CHANNELS': [48, 96, 192, 384],
'BLOCK': 'BASIC',
'FUSE_METHOD': 'SUM'}
def get_model(args2, device, models='DANet'):
if models in ['swinT', 'resT']:
print(models, args2.head)
else:
print(models)
if args2.dataset in ['potsdam', 'vaihingen']:
nclass = 6
assert models in ['danet', 'bisenetv2', 'pspnet', 'segbase', 'swinT',
'deeplabv3', 'fcn', 'fpn', 'unet', 'resT']
if models == 'danet':
from models.danet import DANet
model = DANet(nclass=nclass, backbone='resnet50', pretrained_base=True)
if models == 'bisenetv2':
from models.bisenetv2 import BiSeNetV2
model = BiSeNetV2(nclass=nclass)
if models == 'pspnet':
from models.pspnet import PSPNet
model = PSPNet(nclass=nclass, backbone='resnet50', pretrained_base=True)
if models == 'segbase':
from models.segbase import SegBase
model = SegBase(nclass=nclass, backbone='resnet50', pretrained_base=True)
if models == 'swinT':
from models.swinT import swin_tiny as swinT
model = swinT(nclass=nclass, pretrained=True, aux=True, head=args2.head, edge_aux=args2.use_edge)
if models == 'resT':
from models.resT import rest_tiny as resT
model = resT(nclass=nclass, pretrained=True, aux=True, head=args2.head, edge_aux=args2.use_edge)
if models == 'deeplabv3':
from models.deeplabv3 import DeepLabV3
model = DeepLabV3(nclass=nclass, backbone='resnet50', pretrained_base=True)
if models == 'fcn':
from models.fcn import FCN16s
model = FCN16s(nclass=nclass)
if models == 'fpn':
from models.fpn import FPN
model = FPN(nclass=nclass)
if models == 'unet':
from models.unet import UNet
model = UNet(nclass=nclass)
model = FullModel(model, args2)
model = nn.SyncBatchNorm.convert_sync_batchnorm(model)
model = model.to(device)
model = nn.parallel.DistributedDataParallel(
model, device_ids=[args2.local_rank], output_device=args2.local_rank, find_unused_parameters=True)
return model
def reduce_tensor(inp):
"""
Reduce the loss from all processes so that
process with rank 0 has the averaged results.
"""
world_size = get_world_size()
if world_size < 2:
return inp
with torch.no_grad():
reduced_inp = inp
torch.distributed.reduce(reduced_inp, dst=0)
return reduced_inp
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.initialized = False
self.val = None
self.avg = None
self.sum = None
self.count = None
def initialize(self, val, weight):
self.val = val
self.avg = val
self.sum = val * weight
self.count = weight
self.initialized = True
def update(self, val, weight=1):
if not self.initialized:
self.initialize(val, weight)
else:
self.add(val, weight)
def add(self, val, weight):
self.val = val
self.sum += val * weight
self.count += weight
self.avg = self.sum / self.count
def value(self):
return self.val
def average(self):
return self.avg
def parse_args():
parser = argparse.ArgumentParser(description='Train segmentation network')
parser.add_argument("--dataset", type=str, default='vaihingen', choices=['potsdam', 'vaihingen'])
parser.add_argument("--end_epoch", type=int, default=200)
parser.add_argument("--warm_epochs", type=int, default=5)
parser.add_argument("--lr", type=float, default=0.01)
parser.add_argument("--train_batchsize", type=int, default=1)
parser.add_argument("--val_batchsize", type=int, default=1)
parser.add_argument("--crop_size", type=int, nargs='+', default=[512, 512], help='H, W')
parser.add_argument("--information", type=str, default='RS')
parser.add_argument("--models", type=str, default='danet',
choices=['danet', 'bisenetv2', 'pspnet', 'segbase', 'resT',
'swinT', 'deeplabv3', 'fcn', 'fpn', 'unet'])
parser.add_argument("--head", type=str, default='seghead')
parser.add_argument("--seed", type=int, default=6)
parser.add_argument("--save_dir", type=str, default='./work_dir')
parser.add_argument("--use_edge", type=int, default=0)
parser.add_argument("--use_mixup", type=int, default=0)
parser.add_argument("--local_rank", type=int, default=0)
parser.add_argument('opts',
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER)
args2 = parser.parse_args()
return args2
def save_model_file(save_dir, save_name):
save_dir = os.path.join(save_dir, save_name)
if not os.path.exists(save_dir):
os.makedirs(save_dir + '/weights/')
os.makedirs(save_dir + '/outputs/')
for file in os.listdir('.'):
if os.path.isfile(file):
shutil.copy(file, save_dir)
if not os.path.exists(os.path.join(save_dir, 'models')):
shutil.copytree('./models', os.path.join(save_dir, 'models'))
logging.basicConfig(filename=save_dir + '/train.log', level=logging.INFO)
def train():
"""############### Notice ###############"""
distributed = True
args2 = parse_args()
if distributed:
torch.cuda.set_device(args2.local_rank)
torch.distributed.init_process_group(
backend="nccl", init_method="env://",
)
torch.manual_seed(args2.seed)
torch.cuda.manual_seed(args2.seed)
random.seed(args2.seed)
np.random.seed(args2.seed)
save_name = "{}_lr{}_epoch{}_batchsize{}_{}".format(args2.models, args2.lr, args2.end_epoch,
args2.train_batchsize * get_world_size(), args2.information)
save_dir = args2.save_dir
if args2.local_rank == 0:
save_model_file(save_dir=save_dir, save_name=save_name)
device = torch.device(('cuda:{}').format(args2.local_rank))
model = get_model(args2, device, models=args2.models)
potsdam_train = potsdam(train=True, dataset=args2.dataset, crop_szie=args2.crop_size)
if distributed:
train_sampler = DistributedSampler(potsdam_train)
else:
train_sampler = None
dataloader_train = DataLoader(
potsdam_train,
batch_size=args2.train_batchsize,
shuffle=True and train_sampler is None,
num_workers=4,
pin_memory=True,
drop_last=True,
sampler=train_sampler)
potsdam_val = potsdam(train=False, dataset=args2.dataset, crop_szie=args2.crop_size)
if distributed:
val_sampler = DistributedSampler(potsdam_val)
else:
val_sampler = None
dataloader_val = DataLoader(
potsdam_val,
batch_size=args2.val_batchsize,
shuffle=False,
num_workers=4,
pin_memory=True,
sampler=val_sampler)
# optimizer = torch.optim.SGD([{'params':
# filter(lambda p: p.requires_grad,
# model.parameters()),
# 'lr': args2.lr}],
# lr=args2.lr,
# momentum=0.9,
# weight_decay=0.0005,
# nesterov=False,
# )
optimizer = torch.optim.AdamW([{'params':
filter(lambda p: p.requires_grad,
model.parameters()),
'lr': args2.lr}],
lr=args2.lr,
betas=(0.9, 0.999),
weight_decay=0.01,
)
start = time.time()
miou = 0
acc = 0
f1 = 0
precision = 0
recall = 0
best_miou = 0
best_acc = 0
best_f1 = 0
last_epoch = 0
test_epoch = args2.end_epoch - 3
ave_loss = AverageMeter()
world_size = get_world_size()
weight_save_dir = os.path.join(save_dir, save_name + '/weights')
model_state_file = weight_save_dir + "/{}_lr{}_epoch{}_batchsize{}_{}.pkl.tar" \
.format(args2.models, args2.lr, args2.end_epoch, args2.train_batchsize * world_size, args2.information)
if os.path.isfile(model_state_file):
print('loaded successfully')
logging.info("=> loading checkpoint '{}'".format(model_state_file))
checkpoint = torch.load(model_state_file, map_location=lambda storage, loc: storage)
checkpoint = {k: v for k, v in checkpoint.items() if not 'loss' in k}
best_miou = checkpoint['best_miou']
best_acc = checkpoint['best_acc']
best_f1 = checkpoint['best_f1']
last_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
logging.info("=> loaded checkpoint '{}' (epoch {})".format(
model_state_file, checkpoint['epoch']))
for epoch in range(last_epoch, args2.end_epoch):
if distributed:
train_sampler.set_epoch(epoch)
model.train()
setproctitle.setproctitle("xzy:" + str(epoch) + "/" + "{}".format(args2.end_epoch))
for i, sample in enumerate(dataloader_train):
image, label = sample['image'], sample['label']
image, label = image.to(device), label.to(device)
label = label.long().squeeze(1)
losses = model(image, label)
loss = losses.mean()
ave_loss.update(loss.item())
lenth_iter = len(dataloader_train)
lr = adjust_learning_rate(optimizer,
args2.lr,
args2.end_epoch * lenth_iter,
i + epoch * lenth_iter,
args2.warm_epochs * lenth_iter
)
if i % 50 == 0:
reduced_loss = ave_loss.average()
print_loss = reduce_tensor(torch.from_numpy(np.array(reduced_loss)).to(device)).cpu() / world_size
print_loss = print_loss.item()
if args2.local_rank == 0:
time_cost = time.time() - start
start = time.time()
print("epoch:[{}/{}], iter:[{}/{}], loss:{:.4f}, time:{:.4f}, lr:{:.4f}, "
"best_miou:{:.4f}, miou:{:.4f}, acc:{:.4f}, f1:{:.4f}, precision:{:.4f}, recall:{:.4f}".
format(epoch,args2.end_epoch,i,len(dataloader_train),print_loss,time_cost,lr,
best_miou,miou, acc, f1, precision, recall))
logging.info(
"epoch:[{}/{}], iter:[{}/{}], loss:{:.4f}, time:{:.4f}, lr:{:.4f}, "
"best_miou:{:.4f}, miou:{:.4f}, acc:{:.4f}, f1:{:.4f}, precision:{:.4f}, recall:{:.4f}".
format(epoch, args2.end_epoch, i, len(dataloader_train), print_loss, time_cost, lr,
best_miou, miou, acc, f1, precision, recall))
model.zero_grad()
loss.backward()
optimizer.step()
if epoch > test_epoch:
miou, acc, f1, precision, recall = validate(dataloader_val, device, model, args2)
miou = (reduce_tensor(miou).cpu() / world_size).item()
acc = (reduce_tensor(acc).cpu() / world_size).item()
f1 = (reduce_tensor(f1).cpu() / world_size).item()
precision = (reduce_tensor(precision).cpu() / world_size).item()
recall = (reduce_tensor(recall).cpu() / world_size).item()
if args2.local_rank == 0:
if epoch > test_epoch and epoch != 0:
print('miou:{}, acc:{}, f1:{}, precision:{}, recall:{}'.format(miou, acc, f1, precision, recall))
torch.save(model.state_dict(),
weight_save_dir + '/{}_lr{}_epoch{}_batchsize{}_{}_xzy_{}.pkl'
.format(args2.models, args2.lr, args2.end_epoch, args2.train_batchsize * world_size, args2.information, epoch))
if miou >= best_miou and miou != 0:
best_miou = miou
best_acc, best_f1 = acc, f1
best_weight_name = weight_save_dir + '/{}_lr{}_epoch{}_batchsize{}_{}_best_epoch_{}.pkl'.format(
args2.models, args2.lr, args2.end_epoch, args2.train_batchsize * world_size, args2.information, epoch)
torch.save(model.state_dict(), best_weight_name)
torch.save(model.state_dict(), weight_save_dir + '/best_weight.pkl')
torch.save({
'epoch': epoch + 1,
'best_miou': best_miou,
'best_acc': best_acc,
'best_f1':best_f1,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
}, weight_save_dir + '/{}_lr{}_epoch{}_batchsize{}_{}.pkl.tar'
.format(args2.models, args2.lr, args2.end_epoch, args2.train_batchsize * world_size, args2.information))
if args2.local_rank == 0:
torch.save(model.state_dict(),
weight_save_dir + '/{}_lr{}_epoch{}_batchsize{}_{}_xzy_{}.pkl'
.format(args2.models, args2.lr, args2.end_epoch, args2.train_batchsize * world_size, args2.information, args2.end_epoch))
try:
print("epoch:[{}/{}], iter:[{}/{}], loss:{:.4f}, time:{:.4f}, lr:{:.4f}, best_miou:{:.4f}, "
"miou:{:.4f}, acc:{:.4f} f1:{:.4f}, precision:{:.4f}, recall:{:.4f}".
format(epoch, args2.end_epoch, i, len(dataloader_train),
print_loss, time_cost, lr, best_miou, miou, acc, f1, precision, recall))
logging.info(
"epoch:[{}/{}], iter:[{}/{}], loss:{:.4f}, time:{:.4f}, lr:{:.4f}, best_miou:{:.4f}, "
"miou:{:.4f}, acc:{:.4f} f1:{:.4f}, precision:{:.4f}, recall:{:.4f}".
format(epoch, args2.end_epoch, i, len(dataloader_train),
print_loss, time_cost, lr, best_miou, miou, acc, f1, precision, recall))
except:
pass
logging.info("***************super param*****************")
logging.info("dataset:{} information:{} lr:{} epoch:{} batchsize:{} best_miou:{} best_acc:{} best_f1:{}"
.format(args2.dataset, args2.information, args2.lr, args2.end_epoch, args2.train_batchsize *
world_size, best_miou, best_acc, best_f1))
logging.info("***************end*************************")
print("***************super param*****************")
print("dataset:{} information:{} lr:{} epoch:{} batchsize:{} best_miou:{} best_acc:{} best_f1:{}"
.format(args2.dataset, args2.information, args2.lr, args2.end_epoch, args2.train_batchsize * world_size,
best_miou, best_acc, best_f1))
print("***************end*************************")
def adjust_learning_rate(optimizer, base_lr, max_iters,
cur_iters, warmup_iter=None, power=0.9):
if warmup_iter is not None and cur_iters < warmup_iter:
lr = base_lr * cur_iters / (warmup_iter + 1e-8)
elif warmup_iter is not None:
lr = base_lr*((1-float(cur_iters - warmup_iter) / (max_iters - warmup_iter))**(power))
else:
lr = base_lr * ((1 - float(cur_iters / max_iters)) ** (power))
optimizer.param_groups[0]['lr'] = lr
return lr
def validate(dataloader_val, device, model, args2):
model.eval()
MIOU = [0]
ACC = [0]
F1 = [0]
Precision = [0]
Recall = [0]
nclass = 6
metric = SegmentationMetric(nclass)
with torch.no_grad():
for i, sample in enumerate(dataloader_val):
image, label = sample['image'], sample['label']
image, label = image.to(device), label.to(device)
label = label.long().squeeze(1)
logit = model(image, label, train=False)
logit = logit.argmax(dim=1)
logit = logit.cpu().detach().numpy()
label = label.cpu().detach().numpy()
metric.addBatch(logit, label)
iou = metric.IntersectionOverUnion()
acc = metric.Accuracy()
precision = metric.Precision()
recall = metric.Recall()
miou = np.nanmean(iou[0:5])
mprecision = np.nanmean(precision[0:5])
mrecall = np.nanmean(recall[0:5])
MIOU = MIOU + miou
ACC = ACC + acc
Recall = Recall + mrecall
Precision = Precision + mprecision
F1 = F1 + 2 * Precision * Recall / (Precision + Recall)
MIOU = torch.from_numpy(MIOU).to(device)
ACC = torch.from_numpy(ACC).to(device)
F1 = torch.from_numpy(F1).to(device)
Recall = torch.from_numpy(Recall).to(device)
Precision = torch.from_numpy(Precision).to(device)
return MIOU, ACC, F1, Precision, Recall
if __name__ == '__main__':
# os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3"
# os.environ.setdefault('RANK', '0')
# os.environ.setdefault('WORLD_SIZE', '1')
# os.environ.setdefault('MASTER_ADDR', '127.0.0.1')
# os.environ.setdefault('MASTER_PORT', '29556')
cudnn.benchmark = True
cudnn.enabled = True
# don't use cudnn
#cudnn.benchmark = False
#cudnn.deterministic = True
train()
| zyxu1996/Efficient-Transformer | train.py | train.py | py | 21,965 | python | en | code | 67 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "loss.CrossEntropyLoss",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "loss.Edge_loss",
... |
13535976062 | import csv
import json
from collections import OrderedDict
def import_jsonfile_as_OrderedDict(json_filepath):
f = open(json_filepath, "r")
return json.loads(f.read(), object_pairs_hook = OrderedDict)
def export_dict_to_jsonfile(dic, json_filepath, indent = 2, separators=(',', ': ')):
outstr = json.dumps(dic, indent = indent, separators = separators)
with open(json_filepath, "w") as outfile:
outfile.write(outstr)
def get_entries_in_csv_col(csv_filepath, col_name, delimiter = ','):
with open(csv_filepath) as csv_file:
csv_reader = csv.reader(csv_file, delimiter = delimiter)
i_col_requested = 0
res = []
for i_row, row in enumerate(csv_reader):
if i_row == 0:
for i_col, col in enumerate(row):
if col == col_name: i_col_requested = i_col
else:
res.append(row[i_col_requested])
return res
| tyjyang/CampaignManager | lib/io_tools.py | io_tools.py | py | 944 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "json.loads",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "collections.OrderedDict",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "csv.reader",
"line_nu... |
2403860193 | from decimal import Decimal
from django.test import TestCase
from parameterized import parameterized
from calculator.calculation import calculate_total_cost
from calculator.exceptions import StateNotFound
from calculator.repository import Repository
from calculator.tests.common import fill_db
class CalculateTotalCostTestCase(TestCase):
"""ะขะตััั ะฝะฐ calculate_total_cost."""
def setUp(self):
"""ะะฐัััะพะนะบะฐ ัะตััะพะฒ."""
fill_db()
@parameterized.expand(
[
(Decimal(1), 1, 'UT', Decimal('1.0685')),
(Decimal(1000), 1, 'NV', Decimal('1047.6')),
(Decimal(1), 1000, 'TX', Decimal('1030.625')),
(Decimal(200), 100, 'AL', Decimal('18720')),
(Decimal('123.33'), 175, 'CA', Decimal('21026.9941875')),
],
)
def test_calculate_total_cost(self, price, quantity, state_code, expected):
"""ะัะพะฒะตัะบะฐ ััะฟะตัะฝัั
ัะฐััััะพะฒ calculate_total_cost."""
repository = Repository()
self.assertEqual(
calculate_total_cost(
price=price,
quantity=quantity,
state_code=state_code,
repository=repository,
),
expected,
)
def test_bad_state_code(self):
"""ะัะพะฒะตัะบะฐ ะฝะตะฒะตัะฝะพะณะพ ะบะพะดะฐ ััะฐัะฐ."""
repository = Repository()
with self.assertRaises(StateNotFound):
calculate_total_cost(
price=Decimal('11.33'),
quantity=12,
state_code='WRONG',
repository=repository,
)
| SpiritD/tax_calculator | tom_project/calculator/tests/total_costs.py | total_costs.py | py | 1,659 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.test.TestCase",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "calculator.tests.common.fill_db",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "calculator.repository.Repository",
"line_number": 30,
"usage_type": "call"
},
{
... |
30249155883 | import cv2
import os
import numpy as np
import PIL.Image
from PIL import ImageEnhance
# per ogni immagine presente nella cartella crea una foto piรน luminosa e una meno luminosa
def imageBrightener(pathImmagine, pathContorno, pathSalvataggio, pathSalvataggioContorno):
os.chdir(pathImmagine)
files = os.listdir()
chiara = 1.25
scura = 0.75
i = 1
lenFiles = len(files)
for file in files:
print(f'Immagine {i} di {lenFiles}')
img = PIL.Image.open(pathImmagine + "\\" + file)
# image brightness enhancer
enhancer = ImageEnhance.Brightness(img)
im_output = enhancer.enhance(scura)
if im_output.mode != 'RGB':
im_output = im_output.convert('RGB')
save = f'{pathSalvataggio}\\{file[:len(file) - 4]}_darkened.jpg'
opencvImage = cv2.cvtColor(np.array(im_output), cv2.COLOR_RGB2BGR)
cv2.imwrite(save, opencvImage)
contorno = cv2.imread(f'{pathContorno}\\{file[:len(file) - 4]}.png')
cv2.imwrite(f'{pathSalvataggioContorno}\\{file[:len(file) - 4]}_darkened.png', contorno)
im_output2 = enhancer.enhance(chiara)
opencvImage2 = cv2.cvtColor(np.array(im_output2), cv2.COLOR_RGB2BGR)
cv2.imwrite(f'{pathSalvataggio}\\{file[:len(file) - 4]}_brightened.jpg', opencvImage2)
cv2.imwrite(f'{pathSalvataggioContorno}\\{file[:len(file) - 4]}_brightened.png', contorno)
i += 1
# per ogni immagine presente nella cartella crea una foto piรน luminosa e una meno luminosa
def imageContrast(pathImmagine, pathContorno, pathSalvataggio, pathSalvataggioContorno):
os.chdir(pathImmagine)
files = os.listdir()
chiara = 1.25
scura = 0.75
i = 1
lenFiles = len(files)
for file in files:
print(f'Immagine {i} di {lenFiles}')
img = PIL.Image.open(pathImmagine + "\\" + file)
# image brightness enhancer
enhancer = ImageEnhance.Contrast(img)
im_output = enhancer.enhance(scura)
if im_output.mode != 'RGB':
im_output = im_output.convert('RGB')
save = f'{pathSalvataggio}\\{file[:len(file) - 4]}_lessContrast.jpg'
opencvImage = cv2.cvtColor(np.array(im_output), cv2.COLOR_RGB2BGR)
cv2.imwrite(save, opencvImage)
contorno = cv2.imread(f'{pathContorno}\\{file[:len(file) - 4]}.png')
cv2.imwrite(f'{pathSalvataggioContorno}\\{file[:len(file) - 4]}_lessContrast.png', contorno)
im_output2 = enhancer.enhance(chiara)
opencvImage2 = cv2.cvtColor(np.array(im_output2), cv2.COLOR_RGB2BGR)
cv2.imwrite(f'{pathSalvataggio}\\{file[:len(file) - 4]}_moreContrast.jpg', opencvImage2)
cv2.imwrite(f'{pathSalvataggioContorno}\\{file[:len(file) - 4]}_moreContrast.png', contorno)
i += 1
# rupta l'immagine di un angolo dato in input
def rotateAngle(img, angle, color):
"""
Rotates an image (angle in degrees) and expands image to avoid cropping
"""
height, width = img.shape[:2] # image shape has 3 dimensions
image_center = (width/2, height/2) # getRotationMatrix2D needs coordinates in reverse order (width, height) compared to shape
rotation_img = cv2.getRotationMatrix2D(image_center, angle, 1.)
# rotation calculates the cos and sin, taking absolutes of those.
abs_cos = abs(rotation_img[0,0])
abs_sin = abs(rotation_img[0,1])
# find the new width and height bounds
bound_w = int(height * abs_sin + width * abs_cos)
bound_h = int(height * abs_cos + width * abs_sin)
# subtract old image center (bringing image back to origo) and adding the new image center coordinates
rotation_img[0, 2] += bound_w/2 - image_center[0]
rotation_img[1, 2] += bound_h/2 - image_center[1]
# rotate image with the new bounds and translated rotation imgrix
rotated_img = cv2.warpAffine(img, rotation_img, (bound_w, bound_h), borderValue=color)
return rotated_img
# crea tutte le rotazioni dell'immagine di partenza
def createImageRotations(path, pathSalvataggio, color, extension):
angles = [30, 45, 60, 120, 150, 270]
os.chdir(path)
files = os.listdir()
i = 1
for file in files:
print("Immagine numero: " + str(i) + "su 515")
filePath = path + "\\" + file
savePath = pathSalvataggio + "\\" + file
print(savePath)
original = cv2.imread(filePath)
if original is None:
stream = open(filePath, "rb")
bytesArray = bytearray(stream.read())
numpyarray = np.asarray(bytesArray, dtype=np.uint8)
original = cv2.imdecode(numpyarray, cv2.IMREAD_UNCHANGED)
for angle in angles:
img = rotateAngle(original, angle, color)
cv2.imwrite(savePath[:len(savePath) - 4] + "_" + str(angle) + extension, img)
i = i + 1
# permette di specchiare le immagini
def flipImages(path, pathSalvataggio, extension):
os.chdir(path)
files = os.listdir()
i = 1
for file in files:
print("Immagine numero: " + str(i))
filePath = path + file
savePath = pathSalvataggio + "\\" + file
print(savePath)
original = cv2.imread(filePath)
if original is None:
stream = open(filePath, "rb")
bytesArray = bytearray(stream.read())
numpyarray = np.asarray(bytesArray, dtype=np.uint8)
original = cv2.imdecode(numpyarray, cv2.IMREAD_UNCHANGED)
img = cv2.flip(original, 1)
cv2.imwrite(savePath[:len(savePath) - 4] + "_flipped" + extension, img)
i = i + 1
# salvare immagini e aprirle con cv2
# per ogni immagine
# per ogni angolo
# ruota immagine e salva
if __name__ == '__main__':
dirname = os.path.dirname(__file__)
pathContorni = os.path.join(dirname, 'Dataset\\Contorni\\')
pathNuoviContorni =os.path.join(dirname, 'Dataset\\ContorniRotazione\\')
pathOriginali = os.path.join(dirname, 'Dataset\\JPEGImages\\')
pathOriginaliRotazione = os.path.join(dirname, 'Dataset\\JPEGRotazione\\')
createImageRotations(pathContorni, pathNuoviContorni, (0,0,0), '.png')
createImageRotations(pathOriginali, pathOriginaliRotazione, (0,0,255), '.jpg')
print("Nuovi contorni")
flipImages(pathNuoviContorni, pathNuoviContorni, ".png")
print("Contorni")
flipImages(pathContorni, pathNuoviContorni, ".png")
print("Ruotate")
flipImages(pathOriginaliRotazione, pathOriginaliRotazione, ".jpg")
print("Originali")
flipImages(pathOriginali, pathOriginaliRotazione, ".jpg")
imageBrightener(pathOriginaliRotazione, pathNuoviContorni, pathOriginaliRotazione, pathNuoviContorni)
imageBrightener(pathOriginali, pathContorni, pathOriginaliRotazione, pathNuoviContorni)
imageContrast(pathOriginaliRotazione, pathNuoviContorni, pathOriginaliRotazione, pathNuoviContorni)
imageContrast(pathOriginali, pathContorni, pathOriginaliRotazione, pathNuoviContorni)
| ApulianGCC/TesiSegmentazionePinna | data_augmentation.py | data_augmentation.py | py | 6,913 | python | it | code | 0 | github-code | 36 | [
{
"api_name": "os.chdir",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "PIL.Image.Image.open",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "PIL.Image.Image",
"line_... |
43296965814 | # spaceconfig = {"usemodules" : ["_collections"]}
from _collections import deque
from pytest import raises
def test_basics():
assert deque.__module__ == 'collections'
d = deque(xrange(-5125, -5000))
d.__init__(xrange(200))
for i in xrange(200, 400):
d.append(i)
for i in reversed(xrange(-200, 0)):
d.appendleft(i)
assert list(d) == range(-200, 400)
assert len(d) == 600
left = [d.popleft() for i in xrange(250)]
assert left == range(-200, 50)
assert list(d) == range(50, 400)
right = [d.pop() for i in xrange(250)]
right.reverse()
assert right == range(150, 400)
assert list(d) == range(50, 150)
def test_maxlen():
raises(ValueError, deque, 'abc', -1)
raises(ValueError, deque, 'abc', -2)
it = iter(range(10))
d = deque(it, maxlen=3)
assert list(it) == []
assert repr(d) == 'deque([7, 8, 9], maxlen=3)'
assert list(d) == range(7, 10)
d.appendleft(3)
assert list(d) == [3, 7, 8]
d.extend([20, 21])
assert list(d) == [8, 20, 21]
d.extendleft([-7, -6])
assert list(d) == [-6, -7, 8]
def test_maxlen_zero():
it = iter(range(100))
d = deque(it, maxlen=0)
assert list(d) == []
assert list(it) == []
d.extend(range(100))
assert list(d) == []
d.extendleft(range(100))
assert list(d) == []
def test_maxlen_attribute():
assert deque().maxlen is None
assert deque('abc').maxlen is None
assert deque('abc', maxlen=4).maxlen == 4
assert deque('abc', maxlen=0).maxlen == 0
raises((AttributeError, TypeError), "deque('abc').maxlen = 10")
def test_runtimeerror():
d = deque('abcdefg')
it = iter(d)
d.pop()
raises(RuntimeError, it.next)
#
d = deque('abcdefg')
it = iter(d)
d.append(d.pop())
raises(RuntimeError, it.next)
#
d = deque()
it = iter(d)
d.append(10)
raises(RuntimeError, it.next)
def test_count():
for s in ('', 'abracadabra', 'simsalabim'*50+'abc'):
s = list(s)
d = deque(s)
for letter in 'abcdeilmrs':
assert s.count(letter) == d.count(letter)
class MutatingCompare:
def __eq__(self, other):
d.pop()
return True
m = MutatingCompare()
d = deque([1, 2, 3, m, 4, 5])
raises(RuntimeError, d.count, 3)
def test_comparisons():
d = deque('xabc'); d.popleft()
for e in [d, deque('abc'), deque('ab'), deque(), list(d)]:
assert (d==e) == (type(d)==type(e) and list(d)==list(e))
assert (d!=e) == (not(type(d)==type(e) and list(d)==list(e)))
args = map(deque, ('', 'a', 'b', 'ab', 'ba', 'abc', 'xba', 'xabc', 'cba'))
for x in args:
for y in args:
assert (x == y) == (list(x) == list(y))
assert (x != y) == (list(x) != list(y))
assert (x < y) == (list(x) < list(y))
assert (x <= y) == (list(x) <= list(y))
assert (x > y) == (list(x) > list(y))
assert (x >= y) == (list(x) >= list(y))
assert cmp(x,y) == cmp(list(x),list(y))
def test_extend():
d = deque('a')
d.extend('bcd')
assert list(d) == list('abcd')
d.extend(d)
assert list(d) == list('abcdabcd')
def test_iadd():
d = deque('a')
original_d = d
d += 'bcd'
assert list(d) == list('abcd')
d += d
assert list(d) == list('abcdabcd')
assert original_d is d
def test_extendleft():
d = deque('a')
d.extendleft('bcd')
assert list(d) == list(reversed('abcd'))
d.extendleft(d)
assert list(d) == list('abcddcba')
def test_getitem():
n = 200
l = xrange(1000, 1000 + n)
d = deque(l)
for j in xrange(-n, n):
assert d[j] == l[j]
raises(IndexError, "d[-n-1]")
raises(IndexError, "d[n]")
def test_setitem():
n = 200
d = deque(xrange(n))
for i in xrange(n):
d[i] = 10 * i
assert list(d) == [10*i for i in xrange(n)]
l = list(d)
for i in xrange(1-n, 0, -3):
d[i] = 7*i
l[i] = 7*i
assert list(d) == l
def test_delitem():
d = deque("abcdef")
del d[-2]
assert list(d) == list("abcdf")
def test_reverse():
d = deque(xrange(1000, 1200))
d.reverse()
assert list(d) == list(reversed(range(1000, 1200)))
#
n = 100
data = map(str, range(n))
for i in range(n):
d = deque(data[:i])
r = d.reverse()
assert list(d) == list(reversed(data[:i]))
assert r is None
d.reverse()
assert list(d) == data[:i]
def test_rotate():
s = tuple('abcde')
n = len(s)
d = deque(s)
d.rotate(1) # verify rot(1)
assert ''.join(d) == 'eabcd'
d = deque(s)
d.rotate(-1) # verify rot(-1)
assert ''.join(d) == 'bcdea'
d.rotate() # check default to 1
assert tuple(d) == s
d.rotate(500000002)
assert tuple(d) == tuple('deabc')
d.rotate(-5000002)
assert tuple(d) == tuple(s)
def test_len():
d = deque('ab')
assert len(d) == 2
d.popleft()
assert len(d) == 1
d.pop()
assert len(d) == 0
raises(IndexError, d.pop)
raises(IndexError, d.popleft)
assert len(d) == 0
d.append('c')
assert len(d) == 1
d.appendleft('d')
assert len(d) == 2
d.clear()
assert len(d) == 0
assert list(d) == []
def test_remove():
d = deque('abcdefghcij')
d.remove('c')
assert d == deque('abdefghcij')
d.remove('c')
assert d == deque('abdefghij')
raises(ValueError, d.remove, 'c')
assert d == deque('abdefghij')
def test_repr():
d = deque(xrange(20))
e = eval(repr(d))
assert d == e
d.append(d)
assert '...' in repr(d)
def test_hash():
raises(TypeError, hash, deque('abc'))
def test_roundtrip_iter_init():
d = deque(xrange(200))
e = deque(d)
assert d is not e
assert d == e
assert list(d) == list(e)
def test_reduce():
#
d = deque('hello world')
r = d.__reduce__()
assert r == (deque, (list('hello world'),))
#
d = deque('hello world', 42)
r = d.__reduce__()
assert r == (deque, (list('hello world'), 42))
#
class D(deque):
pass
d = D('hello world')
d.a = 5
r = d.__reduce__()
assert r == (D, (list('hello world'), None), {'a': 5})
#
class D(deque):
pass
d = D('hello world', 42)
d.a = 5
r = d.__reduce__()
assert r == (D, (list('hello world'), 42), {'a': 5})
def test_copy():
import copy
mut = [10]
d = deque([mut])
e = copy.copy(d)
assert d is not e
assert d == e
mut[0] = 11
assert d == e
def test_reversed():
for s in ('abcd', xrange(200)):
assert list(reversed(deque(s))) == list(reversed(s))
def test_free():
import gc
class X(object):
freed = False
def __del__(self):
X.freed = True
d = deque()
d.append(X())
d.pop()
gc.collect(); gc.collect(); gc.collect()
assert X.freed
def test_index_method():
d = deque([1, 2, 3, 4, 5])
class A(object):
def __index__(self):
return 1
assert d[A()] == 2
def test_index_method_mutates():
d = deque([1, 2, 3, 4, 5])
class A(object):
def __index__(self):
d.clear()
return 1
with raises(IndexError):
d[A()]
d = deque([1, 2, 3, 4, 5])
with raises(IndexError):
d[A()] = 2
| mozillazg/pypy | pypy/module/_collections/test/apptest_deque.py | apptest_deque.py | py | 7,397 | python | en | code | 430 | github-code | 36 | [
{
"api_name": "_collections.deque.__module__",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "_collections.deque",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "_collections.deque",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": ... |
70606154344 | __all__ = (
'Persistor',
'SQLPersistor',
'SQLitePersistor',
)
try:
import sqlite3
except Exception:
sqlite3 = None
class Persistor(object):
"""
Class providing methods for persisting input (persistence occurs when the
`persist` method is called on a `Model` instance)
"""
def persist(self, attributes):
"""
Persist the specified `attributes`
Args:
attributes (dict): the attributes
Returns:
bool: the result
Raises:
NotImplementedError: if this method is not overridden by an
inheriting class
"""
raise NotImplementedError
class SQLPersistor(Persistor):
"""
Class providing methods for persisting input to a SQL DB (persistence occurs
when the `persist` method is called on a `Model` instance)
Instance Attributes:
table_name (str): the table name
key_attribute_names (set of str): the key-attribute names (in the future
complex keys will likely be supported, for now only simple/singular
keys are supported)
"""
def __init__(
self,
table_name,
key_attribute_name=None,
):
self.table_name = table_name
self.key_attribute_names = frozenset([key_attribute_name]) if \
key_attribute_name else frozenset()
@property
def connection(self):
"""
Lazy-load and return the "Connection" instance
Returns:
mixed: the instantiated/connected "Connection" instance
Raises:
NotImplementedError: if the `_connect` method is not overridden by
an inheriting class
"""
if not hasattr(self, '_connection'):
self._connection = self._connect()
return self._connection
def persist(self, attributes):
"""
Persist the specified `attributes`
Args:
attributes (dict): the attributes
Returns:
mixed: the mapped INSERT/UPDATE result
Raises:
RuntimeError: if a dependency could not be loaded or a connection to
DB could not be established
"""
key_attributes, non_key_attributes = \
self._partition_attributes(attributes)
if key_attributes and all(key_attributes.values()):
return self._update(key_attributes, non_key_attributes)
return self._insert(non_key_attributes)
def _column_name(self, attribute_name):
"""
Convert an attribute-name to a column-name
Args:
attribute_name (str): the attribute-name
Returns:
str: the column-name
"""
return ''.join(
str.capitalize(attribute_name_part)
for attribute_name_part in attribute_name.split('_')
)
def _column_value(self, attribute_value):
"""
Sanitize and quote an attribute-value
Args:
attribute_value (mixed): the attribute-value
Returns:
str: the sanitized and quoted attribute-value
"""
return "'%s'" % attribute_value if attribute_value is not None else \
'NULL'
def _connect(self):
"""
Establish a new connection to a DB
Returns:
mixed: the new connection instance
Raises:
NotImplementedError: if this method is not overridden by an
inheriting class
"""
raise NotImplementedError
def _insert(self, non_key_attributes):
"""
Perform an INSERT operation based on the specified `non_key_attributes`
Args:
non_key_attributes (dict): the non-key-attributes
Returns:
mixed: the mapped INSERT result
"""
return self._map_insert_result(self.connection.execute(self._insert_sql(
non_key_attributes)))
def _insert_sql(self, non_key_attributes):
"""
Generate the SQL required for an INSERT operation based on the specified
`non_key_attributes`
Args:
non_key_attributes (dict): the non-key-attributes
Returns:
str: the SQL string
"""
return 'INSERT INTO %s (%s) VALUES (%s)' % (
self.table_name,
', '.join(
self._column_name(attribute_name)
for attribute_name in non_key_attributes.keys()
),
', '.join(
self._column_value(attribute_value)
for attribute_value in non_key_attributes.values()
),
)
def _map_insert_result(self, result):
"""
Map the result from an INSERT operation
Args:
result (mixed): the unmapped INSERT result
Returns:
mixed: the mapped INSERT result
"""
return result
def _map_update_result(self, result):
"""
Map the result from an UPDATE operation
Args:
result (mixed): the unmapped UPDATE result
Returns:
mixed: the mapped UPDATE result
"""
return result
def _partition_attributes(self, attributes):
"""
Partition the specified `attributes` into two `dict(s)`, one of the
`key_attributes` and another of the `non_key_attributes`
Args:
attributes (dict): the attributes
Returns:
tuple (of dicts): a `tuple` of the `key_attributes` and
`non_key_attributes`
"""
key_attributes, non_key_attributes = {}, {}
key_attribute_names = self.key_attribute_names
for attribute_name, attribute_value in attributes.items():
if attribute_name in key_attribute_names:
key_attributes[attribute_name] = attribute_value
else:
non_key_attributes[attribute_name] = attribute_value
return (key_attributes, non_key_attributes)
def _update(
self,
key_attributes,
non_key_attributes
):
"""
Perform an UPDATE operation based on the specified `key_attributes` and
`non_key_attributes`
Args:
key_attributes (dict): the key-attributes
non_key_attributes (dict): the non-key-attributes
Returns:
mixed: the mapped UPDATE result
"""
return self._map_update_result(self.connection.execute(self._update_sql(
key_attributes, non_key_attributes)))
def _update_sql(
self,
key_attributes,
non_key_attributes
):
"""
Generate the SQL required for an UPDATE operation based on the specified
`key_attributes` and `non_key_attributes`
Args:
key_attributes (dict): the key-attributes
non_key_attributes (dict): the non-key-attributes
Returns:
str: the SQL string
"""
return 'UPDATE %s SET %s WHERE %s' % (
self.table_name,
', '.join(
'%s = %s' % (self._column_name(attribute_name),
self._column_value(attribute_value))
for attribute_name, attribute_value in
non_key_attributes.items()
),
' AND '.join(
'%s = %s' % (self._column_name(attribute_name),
self._column_value(attribute_value))
for attribute_name, attribute_value in
key_attributes.items()
)
)
class SQLitePersistor(SQLPersistor):
"""
Class providing methods for persisting input to a SQLite DB (persistence
occurs when the `persist` method is called on a `Model` instance)
Instance Attributes:
database_file_path (str): the database file-path
table_name (str): the table name
key_attribute_names (set of str): the key-attribute names (in the future
complex keys will likely be supported, for now only simple/singular
keys are supported)
"""
def __init__(
self,
database_file_path,
table_name,
key_attribute_name=None
):
super(SQLitePersistor, self).__init__(table_name, key_attribute_name)
self.database_file_path = database_file_path
def _connect(self):
"""
Establish a new connection to a SQLite DB
Returns:
sqlite3.Connection: the new connection instance
Raises:
RuntimeError: if the `sqlite3` library was not successfully loaded
"""
if sqlite3 is None:
raise RuntimeError
return sqlite3.connect(self.database_file_path)
def _map_insert_result(self, result):
"""
Map the result from an INSERT operation
Args:
result (mixed): the unmapped INSERT result
Returns:
mixed: the mapped INSERT result
"""
return {next(iter(self.key_attribute_names)): result.lastrowid}
def _map_update_result(self, result):
"""
Map the result from an UPDATE operation
Args:
result (mixed): the unmapped UPDATE result
Returns:
mixed: the mapped UPDATE result
"""
return {next(iter(self.key_attribute_names)): result.lastrowid}
| jzaleski/formulaic | formulaic/persistors.py | persistors.py | py | 9,411 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "sqlite3.connect",
"line_number": 299,
"usage_type": "call"
}
] |
36117885682 | """
Revision ID: a93cd7e01a93
Revises: 6052d96d32f0
Create Date: 2020-06-28 16:58:12.857105
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'a93cd7e01a93'
down_revision = '6052d96d32f0'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('group_menu',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('group_id', sa.Integer(), nullable=True),
sa.Column('group_key', sa.String(length=32), nullable=True),
sa.Column('menu_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['group_id'], ['group.id'], ),
sa.ForeignKeyConstraint(['menu_id'], ['menu.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_group_menu_id'), 'group_menu', ['id'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_group_menu_id'), table_name='group_menu')
op.drop_table('group_menu')
# ### end Alembic commands ###
| lianjy357/vue-element-admin-fastapi | backend/app/alembic/versions/a93cd7e01a93_.py | a93cd7e01a93_.py | py | 1,142 | python | en | code | 14 | github-code | 36 | [
{
"api_name": "alembic.op.create_table",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integ... |
5035742744 | import os
import sys
from ase import io
#color can be
# - A color is specified either as a number between 0 and 1 (gray value),
# three numbers between 0 and 1 (red, green, blue values or RGB),
# or as a color name from the file /usr/lib/X11/rgb.txt (or similar).
xbs_file = open("new_xbs.bs",'w')
xbs_str="atom {} {:.3f} {:.3f} {:.3f}"
# spec Name Radius Colour
spec_strs= ["spec Fe 0.450 0.4",
"spec C 0.450 0.7",
"spec H 0.200 0.0"]
# bonds name 1 name 2 min-length max-length radius color
bond_strs =["bonds Fe Fe 0.000 2.6 0.06 1.0",
"bonds C Fe 0.000 2.6 0.09 0.8",
"bonds C H 0.000 2.1 0.04 0.8",
"bonds Fe H 0.000 2.0 0.04 1.0"]
#various parameters that can be controlled on the command line.
param_str = "inc 1"
#read xyzfile from sys.argv
ats = io.read(sys.argv[1],index="1")
print >> xbs_file, "*FeH system Migrating Fe is Labeled C"
for symbol, pos in zip(ats.get_chemical_symbols(), ats.get_positions()):
print >> xbs_file, xbs_str.format(symbol,pos[0],pos[1],pos[2])
print >> xbs_file,""
for spec_str in spec_strs:
print >> xbs_file, spec_str
print >> xbs_file,""
for bond_str in bond_strs:
print >> xbs_file, bond_str
print >> xbs_file,""
print >> xbs_file, param_str
xbs_file.close()
| Montmorency/imeall | imeall/tbe_tools/xyz_xbs.py | xyz_xbs.py | py | 1,503 | python | en | code | 8 | github-code | 36 | [
{
"api_name": "ase.io.read",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "ase.io",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "sys.argv",
"line_number": 25,
"usage_type": "attribute"
}
] |
6200744085 | import torch
import torch.nn as nn
from attention import NewAttention
from language_model import (
WordEmbedding,
QuestionEmbedding,
TemporalConvNet,
BertEmbedding,
)
from classifier import SimpleClassifier
from fc import FCNet
class BaseModel(nn.Module):
def __init__(self, w_emb, q_emb, v_att, q_net, v_net, classifier):
super(BaseModel, self).__init__()
self.w_emb = w_emb
self.q_emb = q_emb
self.v_att = v_att
self.q_net = q_net
self.v_net = v_net
self.classifier = classifier
def forward(self, v, q, attention_output=False):
"""Forward
v: [batch, num_objs, obj_dim], visual features
b: [batch, num_objs, b_dim], spatial features
q: [batch_size, seq_length], tokenized question
return: logits, not probs
"""
w_emb = self.w_emb(q)
q_emb = self.q_emb(w_emb) # [batch, q_dim]
att = self.v_att(v, q_emb)
# use att weights to compute attention output
v_emb = (att * v).sum(1) # [batch, v_dim], values are img features
q_repr = self.q_net(q_emb)
v_repr = self.v_net(v_emb)
joint_repr = q_repr * v_repr
logits = self.classifier(joint_repr)
if attention_output:
return logits, att
return logits
def build_baseline0_newatt(
dataset,
num_hid,
bidirectional=False,
emb_dim=300,
w_emb_type="baseline",
rnn_type="GRU",
activation=nn.ReLU,
rnn_init=False,
relu_init=False,
var_analysis=False,
):
if w_emb_type == "BERT":
w_emb = BertEmbedding(0.0)
else:
w_emb = WordEmbedding(dataset.dictionary.ntoken, emb_dim, 0.0)
if rnn_type == "TCN":
q_emb = TemporalConvNet(14, [14] * 2, num_hid, kernel_size=(3, 300))
else:
q_emb = QuestionEmbedding(
emb_dim,
num_hid,
1,
bidirectional,
0.0,
rnn_type=rnn_type,
personalized_init=rnn_init,
)
num_hid = num_hid * 2 if bidirectional else num_hid # to double number of params
v_att = NewAttention(dataset.v_dim, q_emb.out_size, num_hid, activation=activation)
q_net = FCNet(
[q_emb.out_size, num_hid],
activation,
relu_init=relu_init,
var_analysis=var_analysis,
name="q_net",
)
v_net = FCNet(
[dataset.v_dim, num_hid],
activation,
relu_init=relu_init,
var_analysis=var_analysis,
name="v_net",
)
classifier = SimpleClassifier(
num_hid, num_hid * 2, dataset.num_ans_candidates, 0.5, activation
)
return BaseModel(w_emb, q_emb, v_att, q_net, v_net, classifier)
| cliziam/VQA_project_Demo | demo-vqa-webcam/base_model.py | base_model.py | py | 2,732 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line... |
17848204772 | #! /usr/bin/env python3
import os
import re
from azure.identity import DefaultAzureCredential
from azure.mgmt.compute import ComputeManagementClient
# Variables
subscription_id = os.environ.get("AZURE_SUBSCRIPTION_ID")
location = "eastus"
publisher_name = "PaloAltoNetworks"
# Acquire a credential object
token_credential = DefaultAzureCredential()
# Acquire a compute client
compute_client = ComputeManagementClient(token_credential, subscription_id)
# Gather version numbers per offer and per sku
fixed_bnd1 = []
offer = "vmseries1" # Fixed CPU
sku = "bundle1"
images = compute_client.virtual_machine_images.list(location, publisher_name, offer, sku)
for image in images:
fixed_bnd1.append(image.name)
fixed_bnd2 = []
offer = "vmseries1" # Fixed CPU
sku = "bundle2"
images = compute_client.virtual_machine_images.list(location, publisher_name, offer, sku)
for image in images:
fixed_bnd2.append(image.name)
fixed_byol = []
offer = "vmseries1" # Fixed CPU
sku = "byol"
images = compute_client.virtual_machine_images.list(location, publisher_name, offer, sku)
for image in images:
fixed_byol.append(image.name)
flex_bnd1_v9 = []
flex_bnd2_v9 = []
flex_bnd3_v9 = []
flex_byol_v9 = []
panorama_v9 = []
flex_bnd1 = []
offer = "vmseries-flex" # Flex
sku = "bundle1"
images = compute_client.virtual_machine_images.list(location, publisher_name, offer, sku)
for image in images:
if image.name[0]=="9":
flex_bnd1_v9.append(image.name)
else:
flex_bnd1.append(image.name)
flex_bnd2 = []
offer = "vmseries-flex" # Flex
sku = "bundle2"
images = compute_client.virtual_machine_images.list(location, publisher_name, offer, sku)
for image in images:
if image.name[0]=="9":
flex_bnd2_v9.append(image.name)
else:
flex_bnd2.append(image.name)
flex_bnd3 = []
offer = "vmseries-flex" # Flex
sku = "bundle3"
images = compute_client.virtual_machine_images.list(location, publisher_name, offer, sku)
for image in images:
if image.name[0]=="9":
flex_bnd3_v9.append(image.name)
else:
flex_bnd3.append(image.name)
flex_byol = []
offer = "vmseries-flex" # Flex
sku = "byol"
images = compute_client.virtual_machine_images.list(location, publisher_name, offer, sku)
for image in images:
if image.name[0]=="9":
flex_byol_v9.append(image.name)
else:
flex_byol.append(image.name)
panorama = []
offer = "panorama" # Panorama
sku = "byol"
images = compute_client.virtual_machine_images.list(location, publisher_name, offer, sku)
for image in images:
if image.name[0]=="9":
panorama_v9.append(image.name)
else:
panorama.append(image.name)
# Output in markdown format
result = "\n# Azure\n"
result += "\n## Flexible CPU (Offer: `vmseries-flex`)\n"
result += "\n### BYOL (SKU: `byol`)\n"
for sku in flex_byol_v9:
result += "`" + sku + "` "
for sku in flex_byol:
result += "`" + sku + "` "
result += "\n### PAYG Bundle 1 (SKU: `bundle1`)\n"
for sku in flex_bnd1_v9:
result += "`" + sku + "` "
for sku in flex_bnd1:
result += "`" + sku + "` "
result += "\n### PAYG Bundle 2 (SKU: `bundle2`)\n"
for sku in flex_bnd2_v9:
result += "`" + sku + "` "
for sku in flex_bnd2:
result += "`" + sku + "` "
result += "\n### PAYG Bundle 3 (SKU: `bundle3`)\n"
for sku in flex_bnd3_v9:
result += "`" + sku + "` "
for sku in flex_bnd3:
result += "`" + sku + "` "
result += "\n## Fixed CPU (Offer: `vmseries1`)\n"
result += "\n### BYOL (SKU: `byol`)\n"
for sku in fixed_byol:
result += "`" + sku + "` "
result += "\n### PAYG Bundle 1 (SKU: `bundle1`)\n"
for sku in fixed_bnd1:
result += "`" + sku + "` "
result += "\n### PAYG Bundle 2 (SKU: `bundle2`)\n"
for sku in fixed_bnd2:
result += "`" + sku + "` "
result += "\n"
result += "\n## Panorama (Offer: `panorama`, SKU: `byol`)\n"
for sku in panorama_v9:
result += "`" + sku + "` "
for sku in panorama:
result += "`" + sku + "` "
print(result) | jamesholland-uk/pan-os-versions-in-public-cloud-providers | azure-processing.py | azure-processing.py | py | 3,946 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "os.environ.get",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "azure.identity.DefaultAzureCredential",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "az... |
12835968022 | import time
from itertools import chain
import email
import imaplib
import smtplib
from readair import Log
MY_NAME="Nile Walker"
MY_ADDRESS = 'nilezwalker@gmail.com'
PASSWORD = input('Enter the password for {}\n'.format(MY_ADDRESS))
MY_NUMBER='410-805-0012'
SUBJECT="Google Housing Request"
SERVER_ADDRESS="smtp.gmail.com"
PORT=587
# Restrict mail search. Be very specific.
# Machine should be very selective to receive messages.
criteria = {
'FROM': 'yahoo@antonakis.co.uk',
#S'SUBJECT': 'SPECIAL SUBJECT LINE',
#'BODY': 'SECRET SIGNATURE',
}
uid_max = 0
def getBody(b):
body = ""
if b.is_multipart():
for part in b.walk():
ctype = part.get_content_type()
cdispo = str(part.get('Content-Disposition'))
# skip any text/plain (txt) attachments
if ctype == 'text/plain' and 'attachment' not in cdispo:
body = part.get_payload(decode=True) # decode
break
# not multipart - i.e. plain text, no attachments, keeping fingers crossed
else:
body = b.get_payload(decode=True)
return body
def get_first_text_block(msg):
type = msg.get_content_maintype()
if type == 'multipart':
for part in msg.get_payload():
if part.get_content_maintype() == 'text':
return part.get_payload()
elif type == 'text':
return msg.get_payload()
mail = imaplib.IMAP4_SSL(SERVER_ADDRESS)
mail.login(MY_ADDRESS,PASSWORD)
mail.select('INBOX')
typ, data = mail.search(None, '(FROM "yahoo@antonakis.co.uk")')
mail_ids = data[0]
id_list = mail_ids.split()
for email_id in id_list:
result, data = mail.fetch(email_id, "(RFC822)") # fetch the email body (RFC822) for the given ID
msg=email.message_from_bytes(data[0][1])
body = getBody(msg)
body = body.decode("utf-8")
body = "".join(body.split('\r'))
Log(body)
mail.logout()
"""
# Keep checking messages ...
# I don't like using IDLE because Yahoo does not support it.
while 1:
# Have to login/logout each time because that's the only way to get fresh results.
server = imaplib.IMAP4_SSL(SERVER_ADDRESS)
server.login(MY_ADDRESS,PASSWORD)
server.select('INBOX')
result, data = server.uid('search', None, search_string(uid_max, criteria))
uids = [int(s) for s in data[0].split()]
for uid in uids:
# Have to check again because Gmail sometimes does not obey UID criterion.
if uid > uid_max:
result, data = server.uid('fetch', uid, '(RFC822)') # fetch entire message
msg = email.message_from_string(data[0][1])
uid_max = uid
text = get_first_text_block(msg)
print('New message :::::::::::::::::::::')
print(text)
server.logout()
time.sleep(5*60)"""
| NWalker4483/FlightRegister | main.py | main.py | py | 2,831 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "imaplib.IMAP4_SSL",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "email.message_from_bytes",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "readair.Log",
"line_number": 65,
"usage_type": "call"
}
] |
40306764598 | import numpy as np
import pandas as pd
from collections import OrderedDict
def loss(h,y):
return ( -y * np.log(h) - ( 1- y )*(np.log(1-y)) ).mean()
def add_intercept(X):
intercept = np.ones((X.shape[0],1))
X= np.reshape(X,(-1,1))
#print('intercept',intercept,X)
return np.concatenate((intercept, X), axis=1)
def predict(x,w):
x = add_intercept(x)
h = np.dot(x,w)
return sigmoid(h).round()
def sigmoid(x):
'''
returns sigmoid h(x)= 1/(e^-x + 1) of the input x
'''
return 1/(1+np.exp(-x))
def check_for_convergence(beta_old,beta_new,tol=1e-3):
'''
Checks whether the coefficients have converged in the l-infinity norm.
Returns True if they have converged, False otherwise.'''
#calculate the change in the coefficients
coef_change = np.abs(beta_old - beta_new)
#if change hasn't reached the threshold and we have more iterations to go, keep training
return not (np.any(coef_change>tol) )
def get_data():
data = OrderedDict(
amount_spent = [50, 10, 20, 5, 95, 70, 100, 200, 0],
send_discount = [0, 1, 1, 1, 0, 0, 0, 0, 1]
)
df = pd.DataFrame.from_dict(data) # creating a dataframe
X = df['amount_spent'].astype('float').values # converting the type to 'float'
y = df['send_discount'].astype('float').values # converting the type to 'float'
return (X,y) # returning the X , y
def hessian_runner(X,y,learning_rate=0.01,epochs=10000):
X = add_intercept(X)
W = np.zeros(X.shape[1])
#print('m =>' ,X)
for i in range(epochs):
theta = np.dot(X,W)
h = sigmoid(theta)
gradient = np.dot( X.T , h-y) / y.size
hessian = np.dot(X.T,np.dot(h,1-h)).dot(X) / y.size
#hessian = np.dot(x_h,X.T)
inv_hessian = np.linalg.inv(hessian)
#sprint('inverse-hessian -> ',inv_hessian)
W_old = W
W = W - ( learning_rate * np.dot( inv_hessian, gradient ) )
if check_for_convergence(W_old,W):
W=W_old
print('Converged @ ',i)
break;
if i % 1000 == 0:
print('Running : ',i,W,W_old)
print('test : ',predict(np.array([[15],[155],[45],[55]]),W))
def run():
X , y= get_data()
hessian_runner(X,y)
if __name__ == "__main__":
run()
| guruprasaad123/ml_for_life | from_scratch/logistic_regression/Newtons method/hessian.py | hessian.py | py | 2,333 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "numpy.log",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number... |
8550138978 | from urllib.request import urlopen
import plotly.graph_objs as go
import plotly.express as px
import pandas as pd
import plotly
import json
import os
data_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..\..', 'data'))
usa_data_dir = os.path.join(data_dir, 'usa')
csv_dir = os.path.join(usa_data_dir, 'csv')
csv_data = os.path.join(csv_dir, 'us-counties.csv')
df = pd.read_csv(csv_data, sep=',', header=0)
# Filtering by latest date
df = df.loc[df['date'] == '2020-08-04']
# Filling empty cells
df = df.fillna(0)
# Removing rows where fips is 0
df = df.loc[df['fips'] != 0]
df = df.loc[df['state'] == 'New York']
geojson_url = 'https://raw.githubusercontent.com/plotly/datasets/master/geojson-counties-fips.json'
with urlopen(geojson_url) as response:
counties = json.load(response)
maps_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..\..', 'maps'))
result_html = os.path.join(maps_dir, 'ny-counties-coronavirus-heatmap.html')
fig = px.choropleth_mapbox(df,
geojson=counties,
locations='fips',
color='deaths',
hover_data=["county", "state", "deaths", "cases"],
color_continuous_scale="Jet",
range_color=(0, 30),
mapbox_style="carto-positron",
zoom=6.0, center={"lat": 42.723, "lon": -75.762},
opacity=0.5,
labels={'county': 'County', 'state': 'State'}
)
fig.update_layout(margin={"r": 0, "t": 0, "l": 0, "b": 0})
plotly.offline.plot(fig, filename=result_html)
state_data = os.path.join(csv_dir, 'us-states.csv')
state_code_data = os.path.join(csv_dir, 'us-agr-exports-2011.csv')
df_state = pd.read_csv(state_data, sep=',', header=0)
junk_data = pd.read_csv(state_code_data, sep=',', header=0)
df_code = junk_data[['code', 'state']]
plot_data = df_state.merge(df_code, on=['state'], how='left')
plot_data = plot_data.loc[plot_data['date'] == '2020-08-04']
print(plot_data)
states_plot = os.path.join(maps_dir, 'usa-states-coronavirus-heatmap.html')
plot_data['text'] = 'State: ' + plot_data['state'].astype(str) + '<br>' + \
'Cases: ' + plot_data['cases'].astype(str) + '<br>' + \
'Deaths: ' + plot_data['deaths'].astype(str)
# Color-scales: https://plotly.com/python/v3/colorscales/
# Maps Reference: https://plotly.com/python/choropleth-maps/
fig = go.Figure(data=go.Choropleth(
locations=plot_data['code'],
z=plot_data['deaths'],
locationmode='USA-states',
colorscale=[
[0.0, 'rgb(165,0,38)'],
[0.1111111111111111, 'rgb(215,48,39)'],
[0.2222222222222222, 'rgb(244,109,67)'],
[0.3333333333333333, 'rgb(253,174,97)'],
[0.4444444444444444, 'rgb(254,224,144)'],
[0.5555555555555556, 'rgb(224,243,248)'],
[0.6666666666666666, 'rgb(171,217,233)'],
[0.7777777777777778, 'rgb(116,173,209)'],
[0.8888888888888888, 'rgb(69,117,180)'],
[1.0, 'rgb(49,54,149)']
],
text=plot_data['text'], # hover text
colorbar_title='Deaths'
))
fig.update_layout(
title_text='2020 Coronavirus Deaths in USA',
geo=dict(
scope='usa',
projection=go.layout.geo.Projection(type='albers usa')
)
)
plotly.offline.plot(fig, filename=states_plot)
| vinitshah24/Coronavirus-Analysis | src/usa/data_plots.py | data_plots.py | py | 3,402 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.abspath",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_nu... |
6742324568 | # -*- coding: utf-8 -*-
# file docbook2epub.py
# This file is part of LyX, the document processor.
# Licence details can be found in the file COPYING.
#
# \author Thibaut Cuvelier
#
# Full author contact details are available in file CREDITS
# Usage:
# python docbook2epub.py java_binary saxon_path xsltproc_path xslt_path in.docbook in.orig.path out.epub
from __future__ import print_function
import glob
import os
import shutil
import sys
import tempfile
import zipfile
from io import open # Required for Python 2.
def _parse_nullable_argument(arg):
return arg if arg != '' and arg != 'none' else None
class ImageRename:
def __init__(self, opf_path, local_path, epub_path):
self.opf_path = opf_path
self.local_path = local_path
self.epub_path = epub_path
class DocBookToEpub:
def __init__(self, args=None):
if args is None:
args = sys.argv
if len(args) != 8:
print('Exactly eight arguments are expected, only %s found: %s.' % (len(args), args))
sys.exit(1)
self.own_path = sys.argv[0]
self.java_path = _parse_nullable_argument(sys.argv[1])
self.saxon_path = _parse_nullable_argument(sys.argv[2])
self.xsltproc_path = _parse_nullable_argument(sys.argv[3])
self.xslt_path = _parse_nullable_argument(sys.argv[4])
self.input = sys.argv[5]
self.input_path = sys.argv[6]
self.output = sys.argv[7]
self.script_folder = os.path.dirname(self.own_path) + '/../'
print('Generating ePub with the following parameters:')
print(self.own_path)
print(self.java_path)
print(self.saxon_path)
print(self.xsltproc_path)
print(self.xslt_path)
print(self.input)
print(self.input_path)
print(self.output)
# Precompute paths that will be used later.
self.output_dir = tempfile.mkdtemp().replace('\\', '/')
self.package_opf = self.output_dir + '/OEBPS/package.opf' # Does not exist yet,
print('Temporary output directory: %s' % self.output_dir)
if self.xslt_path is None:
self.xslt = self.script_folder + 'docbook/epub3/chunk.xsl'
else:
self.xslt = self.xslt_path + '/epub3/chunk.xsl'
print('XSLT style sheet to use:')
print(self.xslt)
if self.saxon_path is None:
self.saxon_path = self.script_folder + 'scripts/saxon6.5.5.jar'
# These will be filled during the execution of the script.
self.renamed = None
def gracefully_fail(self, reason):
print('docbook2epub fails: %s' % reason)
shutil.rmtree(self.output_dir, ignore_errors=True)
sys.exit(1)
def start_xslt_transformation(self):
command = None
if self.xsltproc_path is not None:
command = self.start_xslt_transformation_xsltproc()
elif self.java_path is not None:
command = self.start_xslt_transformation_saxon6()
if command is None:
self.gracefully_fail('no XSLT processor available')
print('Command to execute:')
print(command)
quoted_command = command
if os.name == 'nt':
# On Windows, it is typical to have spaces in folder names, and that requires to wrap the whole command
# in quotes. On Linux, this might create errors when starting the command.
quoted_command = '"' + command + '"'
# This could be simplified by using subprocess.run, but this requires Python 3.5.
if os.system(quoted_command) != 0:
self.gracefully_fail('error from the XSLT processor')
print('Generated ePub contents.')
def start_xslt_transformation_xsltproc(self):
params = '-stringparam base.dir "' + self.output_dir + '"'
return '"' + self.xsltproc_path + '" ' + params + ' "' + self.xslt + '" "' + self.input + '"'
def start_xslt_transformation_saxon6(self):
params = 'base.dir=%s' % self.output_dir
executable = '"' + self.java_path + '" -jar "' + self.saxon_path + '"'
return executable + ' "' + self.input + '" "' + self.xslt + '" "' + params + '"'
def get_images_from_package_opf(self):
images = []
# Example in the OPF file:
# <item id="d436e1" href="D:/LyX/lib/images/buffer-view.svgz" media-type="image/SVGZ"/>
# The XHTML files are also <item> tags:
# <item id="id-d0e2" href="index.xhtml" media-type="application/xhtml+xml"/>
try:
with open(self.package_opf, 'r') as f:
for line in f.readlines():
if '<item' in line and 'media-type="image' in line:
images.append(line.split('href="')[1].split('"')[0])
except FileNotFoundError:
print('The package.opf file was not found, probably due to a DocBook error. The ePub file will be corrupt.')
return images
def get_image_changes(self):
epub_folder = 'images/'
changes = []
for image in self.get_images_from_package_opf():
if os.path.exists(image):
file_system_path = image
elif os.path.exists(self.input_path + image):
file_system_path = self.input_path + image
else:
file_system_path = ''
changes.append(ImageRename(image, file_system_path, epub_folder + os.path.basename(image)))
return changes
def change_image_paths(self, file):
# This could be optimised, as the same operation is performed a zillion times on many files:
# https://www.oreilly.com/library/view/python-cookbook/0596001673/ch03s15.html
with open(file, 'r', encoding='utf8') as f:
contents = list(f)
with open(file, 'w', encoding='utf8') as f:
for line in contents:
for change in self.renamed:
line = line.replace(change.opf_path, change.epub_path)
f.write(line)
def copy_images(self):
# Copy the assets to the OEBPS/images/. All paths are available in OEBPS/package.opf, but they must also be
# changed in the XHTML files. Typically, the current paths are absolute.
# First, get the mapping old file => file in the ePub archive.
self.renamed = self.get_image_changes()
# Then, transform all paths (both OPF and XHTML files).
self.change_image_paths(self.output_dir + '/OEBPS/package.opf')
for file in glob.glob(self.output_dir + '/OEBPS/*.xhtml'):
self.change_image_paths(file)
# Ensure that the destination path exists. OEBPS exists due to the DocBook-to-ePub transformation.
if not os.path.exists(self.output_dir + '/OEBPS/images/'):
os.mkdir(self.output_dir + '/OEBPS/images/')
# Finally, actually copy the image files.
for change in self.renamed:
shutil.copyfile(change.local_path, self.output_dir + '/OEBPS/' + change.epub_path)
def create_zip_archive(self):
with zipfile.ZipFile(self.output, 'w', zipfile.ZIP_DEFLATED) as zip:
# Python 3.5 brings the `recursive` argument. For older versions, this trick is required...
# for file in glob.glob(output_dir + '/**/*', recursive=True):
for file in [os.path.join(dp, f) for dp, dn, filenames in os.walk(self.output_dir) for f in filenames]:
zip.write(file, os.path.relpath(file, self.output_dir), compress_type=zipfile.ZIP_STORED)
shutil.rmtree(self.output_dir)
print('Generated ePub.')
def transform(self):
self.start_xslt_transformation()
self.copy_images()
self.create_zip_archive()
if __name__ == '__main__':
DocBookToEpub(sys.argv).transform()
| cburschka/lyx | lib/scripts/docbook2epub.py | docbook2epub.py | py | 7,833 | python | en | code | 33 | github-code | 36 | [
{
"api_name": "sys.argv",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 46... |
27941614537 | from itertools import chain
from itertools import islice
from itertools import repeat
from math import ceil
import numpy as np
from scipy.sparse import issparse
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.neighbors import kneighbors_graph
from sklearn.preprocessing import scale
kernels = ["linear", "poly", "polynomial", "rbf", "laplacian", "sigmoid",
"cosine"]
oneminus = ["braycurtis", "correlation", "dice", "jaccard", "kulsinksi",
"rogerstanimoto", "russelrao", "rbf", "chi2", "laplacian",
"sigmoid"]
def _knn_sim(X, metric=None, n_neighbors=None, p=None,
metric_params=None):
"""Compute the Jaccard distance over the kNN graph.
The metric parameter can be used to specify which metric
is used to construct the kNN graph."""
n_neighbors = 5 if n_neighbors is None else n_neighbors
metric = "euclidean" if metric is None else metric
# get the kNN graph
knn_graph = kneighbors_graph(X, n_neighbors, mode="distance",
metric=metric, p=p,
metric_params=metric_params).toarray()
return _similarities(knn_graph, metric="jaccard")
def _distances(X1, X2=None, metric=None, metric_params=None):
"""Calls sklearn.pairwise.pairwise_distances or
sklearn.pairwise_pairwise_kernels and returns the distance
between X1 and X2."""
metric = "euclidean" if metric is None else metric
if metric in kernels:
if metric == "cosine":
return pairwise_distances(X1, X2, metric="cosine")
else:
if metric_params is None:
S = pairwise_kernels(X1, X2, metric)
else:
S = pairwise_kernels(X1, X2, metric, **metric_params)
if metric == "additive_chi2":
return - 1 * S
else:
return np.max(S) - S
elif metric == "knn_jaccard":
S = _similarities(X1, X2, metric="knn_jaccard",
**metric_params)
return 1 - S
else:
return pairwise_distances(X=X1, Y=X2, metric=metric)
def _similarities(X1, X2=None, metric=None, knn_metric=None,
n_neighbors=None, p=None, metric_params=None):
"""Calls sklearn.pairwise.pairwise_distances or
sklearn.pairwise_pairwise_kernels and returns the similarity
between X1 and X2.
n_neighbors and p are only for knn_metrics."""
metric = "euclidean" if metric is None else metric
if metric in kernels:
if metric_params is None:
return pairwise_kernels(X1, X2, metric)
else:
return pairwise_kernels(X1, X2, metric, **metric_params)
elif metric == "knn_jaccard":
if X2 is None:
return _knn_sim(X1, metric=knn_metric,
n_neighbors=n_neighbors, p=p,
metric_params=metric_params)
else:
print("Not implemented for two matrices")
return None
else:
D = pairwise_distances(X1, X2, metric)
if metric in oneminus:
return 1 - D
else:
return 1 / (1 + D)
def _permute(X, n=None, axis=None, seed=None):
"""Permute a frame n times along a given axis."""
X = X.copy()
if (issparse(X)) and (X.getformat() not in ["csr", "csc"]):
X = X.tocsr()
axis = 0 if axis is None else axis
seed = 42 if seed is None else seed
np.random.seed(seed)
indices = np.random.permutation(X.shape[axis])
P = X[:, indices] if axis == 1 else X[indices, :]
return P
def _linreg_get_beta(x, y, scale_exp):
"""Use Scipy linregress to get the regression coefficient."""
from scipy.stats import linregress
if scale_exp is True:
x = scale(x)
return linregress(x, y)[0]
def _chunk_indices(X, n, axis=None):
"""A generator to return n chunks of an array."""
axis = 0 if axis is None else axis
if (axis != 0) and (axis != 1):
print("Please provide a valid axis (0 or 1)")
length = X.shape[0] if axis == 0 else X.shape[1]
size = ceil(length / n)
for i in range(0, length, size):
yield range(length)[i:i + size]
def _make_generator(iterable):
for i in iterable:
yield i
def _chunk_generator(generator, size=None):
for g in generator:
yield chain([g], islice(generator, size - 1))
def _std_sparse(X, axis=None, ddof=None):
axis = 0 if axis is None else axis
ddof = 0 if ddof is None else ddof
def _variance(array):
N = len(array)
return 1 / (N - ddof) * (np.sum(np.abs(array - array.mean()) ** 2))
if axis == 0:
c = X.shape[1]
var = np.array([_variance(X[:, i].data) for i in range(c)])
return np.sqrt(var)
else:
c = X.shape[0]
var = np.array([_variance(X[i, :].data) for i in range(c)])
return np.sqrt(var)
| ohlerlab/SEMITONES | src/SEMITONES/_utils.py | _utils.py | py | 4,987 | python | en | code | 8 | github-code | 36 | [
{
"api_name": "sklearn.neighbors.kneighbors_graph",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.pairwise.pairwise_distances",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.pairwise.pairwise_kernels",
"line_number": 49,... |
30438808856 | """
TFE - Chatbot Tifi - Technifutur
by Nicolas Christiaens
"""
from datasets import Dataset
from FineTuning import STSBTrainingModel
from torch.utils.data import DataLoader
import pandas as pd
from Preprocessing import Preprocessing
from transformers import AdamW,get_constant_schedule
from transformers import AutoModel,AutoTokenizer,Trainer
import torch
from tqdm.auto import tqdm
# Load the custom dataset and make our preprocessing
def getCustomDS(file="customDS.xlsx"):
df = pd.read_excel(file)
df["sentence1"] = df["sentence1"].apply(Preprocessing)
df["sentence2"] = df["sentence2"].apply(Preprocessing)
df["score"] = df["score"].astype(float)
dataset = Dataset.from_pandas(df)
return dataset
if __name__ == "__main__":
# Inform the user if no GPU is detected
if torch.cuda.is_available() is True:
device = "cuda"
else:
print("Pas de GPU pour le training")
# Read the custom dataset
train = getCustomDS()
# Set Global Parameters
max_length = 128
model_name = "Model_SentenceEmbedding/Finetuning/Final_model"
model_save = "Model_SentenceEmbedding/Custom/Final_model"
batch_size = 16
learning_rate = 2e-5
weight_decay = 0.01
tokenizer = AutoTokenizer.from_pretrained(model_name)
num_epochs = 2
# Create the tokenize function
def tokenize1(df):
return tokenizer(df["sentence1"],padding=True,truncation=True,max_length=max_length)
def tokenize2(df):
return tokenizer(df["sentence2"],padding=True,truncation=True,max_length=max_length)
# Transform in the correct form : ['input_ids1', 'attention_mask1', 'input_ids2', 'attention_mask2','score']
train_encoded = train.map(tokenize1,batched=True,batch_size=None)
train_encoded = train_encoded.rename_column("input_ids","input_ids1")
train_encoded = train_encoded.rename_column("attention_mask","attention_mask1")
train_encoded = train_encoded.map(tokenize2,batched=True,batch_size=None)
train_encoded = train_encoded.rename_column("input_ids","input_ids2")
train_encoded = train_encoded.rename_column("attention_mask","attention_mask2")
train_encoded = train_encoded.remove_columns(["sentence1"])
train_encoded = train_encoded.remove_columns(["sentence2"])
train_encoded = train_encoded.remove_columns(["Old Similarity"])
# Set the correct format
train_encoded.set_format("torch")
# Create the Dataloader
trainloader = DataLoader(train_encoded,shuffle=True,batch_size=batch_size)
# Load the model used as body
body = AutoModel.from_pretrained(model_name,max_length=max_length)
# Create the training model
model = STSBTrainingModel(body=body).to(device)
# Load the model and it
optimizer = AdamW(model.parameters(),lr=learning_rate,weight_decay=weight_decay)
training_steps = num_epochs*len(trainloader)
scheduler = get_constant_schedule(optimizer=optimizer)
# Set up the progress bar
progress_bar = tqdm(range(training_steps))
# Loss keeper
loss_train = []
# Get the loss without training (epoch 0)
tmp_loss = []
for batch in trainloader:
# Batch to GPU
batch = {k: v.to(device) for k, v in batch.items()}
# Predict the batch (no gradients needed)
with torch.no_grad():
loss,_ = model(**batch)
# Append the loss
tmp_loss.append(loss.item())
# Make the loss independant to the batch size
tmp_loss = sum(tmp_loss)/len(trainloader)
# Append the epoch training loss
loss_train.append(tmp_loss)
# Train the model
for epoch in range(num_epochs):
model.train()
tmp_loss = []
for batch in trainloader:
# Clear the gradient
optimizer.zero_grad()
# Batch to GPU
batch = {k: v.to(device) for k, v in batch.items()}
# Predict the batch
loss,_ = model(**batch)
# Compute the gradient
loss.backward()
# Make the step of training
optimizer.step()
scheduler.step()
# Update the progess bar
progress_bar.update(1)
# Add the loss
tmp_loss.append(loss.item())
# Make the loss independant to the batch size
tmp_loss = sum(tmp_loss)/len(trainloader)
# Append the epoch training loss
loss_train.append(tmp_loss)
# Save the trained model with the tokenizer
trainer = Trainer(model=body,tokenizer=tokenizer)
trainer.save_model(model_save)
| TheCricri/TFE_Chatbot_Tifi | CustomFineTuning.py | CustomFineTuning.py | py | 4,735 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_excel",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "Preprocessing.Preprocessing",
"line_number": 20,
"usage_type": "argument"
},
{
"api_name": "Preprocessing.Preprocessing",
"line_number": 21,
"usage_type": "argument"
},
{
... |
74050038184 | import numpy as np
from typing import Iterable, List
from nltk.stem import PorterStemmer
from parlai.crowdsourcing.utils.acceptability import (
AcceptabilityChecker,
normalize_answer,
)
import parlai.utils.logging as logging
# Bad persona violations
PERSONA_REPEATS_PROMPT = 'repeated the prompt text'
ASKED_WIZARD_QUESTION = 'asked wizard in the persona details'
COPIED_EXTENDED_PERSONA = 'extended persona copies the main persona'
GENERIC_EXTENDED_PERSONA = 'extended persona is generic'
QUESTION_PHRASE = 'what is your'
# Wizard knowledge violations
DEFAULT_KNOWLEDGE_OVERLAP_THRESHOLD = 0.05
POOR_SEARCH_QUERIES = 'poor search queries'
IRRELEVANT_SEARCH__QUERIES = 'irrelevant search terms'
NOT_ENOUGH_SEARCH = 'not enough selected knowledge sources'
SELECTED_SHORT_PIECES = 'short knowledge pieces selected.'
LOW_KNOWLEDGE_OVERLAP = 'low knowledge overlap'
def tokenize_text(text, stemmer, as_set=True):
text = normalize_answer(text)
tokens = [stemmer.stem(word) for word in text.split(' ')]
if as_set:
tokens = set(tokens)
return tokens
def overlap_ratios(a: set, b: set) -> float:
"""
Calculates the Jacard distance between two sets.
"""
overlap = a.intersection(b)
union = a.union(b)
return len(overlap) / (len(union) + 0.001)
def is_valid_agent_chat_message(message, agent_id):
return (
message.get('text')
and message.get('id') == agent_id
and not message.get('is_search_query', False)
)
def bad_persona(persona, stemmer):
"""
Check for poor persona selection by apprentice.
"""
persona_parts = persona.split('\n')
# It is not from the persona selection ones (personas used during the pilot).
if not (
len(persona_parts) == 2
or (len(persona_parts) == 3 and 'I live in ' in persona_parts[0])
):
logging.warning(f'Old fashioned persona: {persona}')
return
# Removing the location ('I live in X') part
if len(persona_parts) == 3:
persona_parts = persona_parts[1:]
main_pers, ext_pers = [p.lower() for p in persona_parts]
violations = []
# Bad main persona response
if main_pers.startswith('My favorite '):
for phrase in ('i like', 'my favorite'):
persona_core = main_pers
# Remove the original My favorite
persona_core = main_pers[len('My favorite ') :]
if phrase in persona_core.lower():
violations.append(PERSONA_REPEATS_PROMPT)
break
# Extended persona that asks questions
for phrase in (QUESTION_PHRASE,):
if phrase in ext_pers:
violations.append(ASKED_WIZARD_QUESTION)
# Extended persona that mostly repeats the main persona
main_pers_tokens = tokenize_text(main_pers, stemmer)
ext_pers_tokens = tokenize_text(ext_pers, stemmer)
if len(ext_pers_tokens.difference(main_pers_tokens)) < 2:
violations.append(COPIED_EXTENDED_PERSONA)
# Use of non-generic words in persona.
common_phrases = ('i', 'it', 'like', 'very', 'much', 'favorite', 'is', 'am')
tokens = [w.strip() for w in ext_pers.split(' ') if w]
ext_useful_words = [t for t in tokens if t not in common_phrases]
if len(tokens) > 4 and len(ext_useful_words) < 2:
violations.append(GENERIC_EXTENDED_PERSONA)
return violations
def poor_knowledge_selection(messages, persona, stemmer, knwldg_ovlp_thrshld):
"""
Check for poor search and knowledge selection by wizard.
"""
# Collecting search and knowledge selections
search_terms = []
selected_knowledge = []
message_history_tokens = tokenize_text(persona, stemmer)
n_search_query_not_in_history = 0
for msg in messages:
if msg.get('text', None):
message_history_tokens = message_history_tokens.union(
tokenize_text(msg['text'], stemmer)
)
if msg['id'] != 'Wizard':
continue
selections = msg.get('task_data', {}).get('selected_text_candidates')
if not selections or selections[0][0]:
continue
search_query = msg['task_data']['search_query']
search_terms.append(search_query)
if message_history_tokens.isdisjoint(tokenize_text(search_query, stemmer)):
n_search_query_not_in_history += 1
selected_parts = []
for doc_id in range(1, len(selections)):
doc_selections = selections[doc_id]
for sentence_id in range(len(doc_selections)):
if doc_selections[sentence_id]:
selected_parts.append(
msg['task_data']['text_candidates'][doc_id - 1]['content'][
sentence_id
]
)
selected_knowledge.append(
{'text': msg['text'], 'knowledge': ' '.join(selected_parts)}
)
knowledge_length = []
knowledge_overlaps = []
for knwldg in selected_knowledge:
knowledge_tokens = tokenize_text(knwldg['knowledge'], stemmer)
knowledge_length.append(len(knowledge_tokens))
response_tokens = tokenize_text(knwldg['text'], stemmer)
knowledge_overlaps.append(overlap_ratios(knowledge_tokens, response_tokens))
violations = []
# Repeated the same search queries
if len(search_terms) - len(set(search_terms)) > 3:
violations.append(POOR_SEARCH_QUERIES)
# Search doesn't have overlap with message history
if n_search_query_not_in_history > 2:
violations.append(IRRELEVANT_SEARCH__QUERIES)
# No selection
if not knowledge_length:
violations.append(NOT_ENOUGH_SEARCH)
# Only selecting short sentences
if np.average(knowledge_length) < 5:
violations.append(SELECTED_SHORT_PIECES)
# Small overlap between response and the selected knowledge parts
knowledge_overlap_avg = np.average(knowledge_overlaps)
if knowledge_overlap_avg < knwldg_ovlp_thrshld:
violations.append(f'{LOW_KNOWLEDGE_OVERLAP} ({knowledge_overlap_avg})')
return violations
class WizardOfInternetAcceptabilityChecker(AcceptabilityChecker):
"""
ParlAI general acceptabilty checker customized for the wizard of internet.
"""
def __init__(self):
self.knowledge_overlap_threshold = DEFAULT_KNOWLEDGE_OVERLAP_THRESHOLD
self.post_stemmer = PorterStemmer()
super().__init__()
def check_messages(
self,
agent_id: str,
persona: str,
messages: List[str],
is_worker_0: bool,
violation_types: Iterable[str] = (),
) -> str:
violations = []
general_chat_violations = super().check_messages(
self.get_conversation_messages(messages, agent_id),
is_worker_0,
violation_types,
)
if general_chat_violations:
violations.extend(general_chat_violations.split(','))
if agent_id == 'Apprentice':
persona_violations = bad_persona(persona, self.post_stemmer)
if persona_violations:
violations.extend(persona_violations)
if agent_id == 'Wizard':
knowledge_violations = poor_knowledge_selection(
messages, persona, self.post_stemmer, self.knowledge_overlap_threshold
)
if knowledge_violations:
violations.extend(knowledge_violations)
return ','.join(violations)
def get_conversation_messages(self, agent_messages, agent_id):
return [
msg['text']
for msg in agent_messages
if is_valid_agent_chat_message(msg, agent_id)
]
| facebookresearch/ParlAI | parlai/crowdsourcing/projects/wizard_of_internet/acceptability.py | acceptability.py | py | 7,697 | python | en | code | 10,365 | github-code | 36 | [
{
"api_name": "parlai.crowdsourcing.utils.acceptability.normalize_answer",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "parlai.utils.logging.warning",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "parlai.utils.logging",
"line_number": 65,
"usage_t... |
22355326575 | import pathlib
from contextlib import nullcontext as does_not_raise
import pytest
import mlrun.runtimes.generators
@pytest.mark.parametrize(
"strategy,param_file,expected_generator_class,expected_error,expected_iterations",
[
(
"list",
"hyperparams.csv",
mlrun.runtimes.generators.ListGenerator,
does_not_raise(),
2,
),
(
"list",
"hyperparams.json",
mlrun.runtimes.generators.ListGenerator,
does_not_raise(),
2,
),
(
"grid",
"hyperparams.json",
mlrun.runtimes.generators.GridGenerator,
does_not_raise(),
4,
),
(
"random",
"hyperparams.json",
mlrun.runtimes.generators.RandomGenerator,
does_not_raise(),
# default max iterations
mlrun.runtimes.generators.default_max_iterations,
),
# no strategy, default to list
(
"",
"hyperparams.csv",
mlrun.runtimes.generators.ListGenerator,
does_not_raise(),
2,
),
# no strategy, default to grid
(
"",
"hyperparams.json",
mlrun.runtimes.generators.GridGenerator,
does_not_raise(),
4,
),
# invalid request
("grid", "hyperparams.csv", None, pytest.raises(ValueError), 0),
],
)
def test_get_generator(
rundb_mock,
strategy,
param_file,
expected_generator_class,
expected_error,
expected_iterations,
):
run_spec = mlrun.model.RunSpec(inputs={"input1": 1})
run_spec.strategy = strategy
run_spec.param_file = str(
pathlib.Path(__file__).absolute().parent / "assets" / param_file
)
execution = mlrun.run.MLClientCtx.from_dict(
mlrun.run.RunObject(spec=run_spec).to_dict(),
rundb_mock,
autocommit=False,
is_api=False,
store_run=False,
)
with expected_error:
generator = mlrun.runtimes.generators.get_generator(run_spec, execution, None)
assert isinstance(
generator, expected_generator_class
), f"unexpected generator type {type(generator)}"
iterations = sum(
1 for _ in generator.generate(mlrun.run.RunObject(spec=run_spec))
)
assert (
iterations == expected_iterations
), f"unexpected number of iterations {iterations}"
if strategy == "list":
assert generator.df.keys().to_list() == ["p1", "p2"]
elif strategy in ["grid", "random"]:
assert sorted(list(generator.hyperparams.keys())) == ["p1", "p2"]
| mlrun/mlrun | tests/runtimes/test_generators.py | test_generators.py | py | 2,780 | python | en | code | 1,129 | github-code | 36 | [
{
"api_name": "mlrun.runtimes.generators.model.RunSpec",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "mlrun.runtimes.generators.model",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "mlrun.runtimes.generators",
"line_number": 69,
"usage_type":... |
26921499285 | import random
import json
import datetime
from flask import Flask, request, render_template
from flask_cors import CORS, cross_origin
from nltk.chat.util import Chat, reflections
app = Flask(__name__)
cors = CORS(app)
app.config["CORS_HEADERS"] = "Content-Type"
current_date = datetime.datetime.now().strftime("%A, %B %d, %Y")
current_time = datetime.datetime.now().strftime("%H:%M:%S")
pairs = [
["hi", ["Hello!", "Hi there!"]],
["what is your name?", ["My name is Chatbot."]],
["bye", ["Goodbye!", "Bye!"]],
["what is the current date?", [f"The current date is {current_date}."]],
["what is the current time?", [f"The current time is {current_time}."]],
]
with open("data.json", "r", encoding="utf-8") as f:
data = json.load(f)
user_inputs = []
def chatbot_response(user_input, confirm_message, new_data):
bot_response = ""
if user_input:
chatbot = Chat(pairs, reflections)
bot_response = chatbot.respond(user_input)
if not bot_response:
if user_input in data:
if isinstance(data[user_input], list):
bot_response = random.choice(data[user_input])
else:
bot_response = data[user_input]
else:
bot_response = "I'm sorry, I'm not sure. Please try asking a different question or providing more information."
if confirm_message:
if confirm_message.lower() == "yes":
if new_data:
data[user_input] = new_data
with open("data.json", "w", encoding="utf-8") as f:
json.dump(data, f, ensure_ascii=False)
bot_response = (
"Thank you! I've added that to my knowledge base."
)
else:
bot_response = "I'm sorry, I didn't receive any new data. Please try again."
else:
bot_response = "I'm sorry, I can't help with that."
else:
bot_response = "I'm not sure what you mean. Do you want to add this to my knowledge base?"
else:
save_user_input(user_input, bot_response)
return bot_response
def save_user_input(user_input, bot_response):
user_inputs.append(
{"user_input": user_input, "bot_response": bot_response})
with open("user_inputs.json", "w", encoding="utf-8") as f:
json.dump(user_inputs, f, ensure_ascii=False)
@app.route("/")
def index():
return render_template("index.html")
@app.route("/chat", methods=["POST"])
@cross_origin()
def chat():
user_input = request.form.get("user_input")
confirm_message = request.form.get("confirm_message")
new_data = request.form.get("new_data")
bot_response = ""
if user_input:
bot_response = chatbot_response(user_input, confirm_message, new_data)
response = {"bot_response": bot_response}
else:
response = {
"bot_response": "I'm sorry, I did not receive any input. Please try again."
}
return response
if __name__ == "__main__":
app.run(debug=True, port=8080)
| sinde530/python | pino-chatbot/flask_test.py | flask_test.py | py | 3,170 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "flask_cors.CORS",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
... |
31064293035 |
from ..utils import Object
class Photo(Object):
"""
Describes a photo
Attributes:
ID (:obj:`str`): ``Photo``
Args:
has_stickers (:obj:`bool`):
True, if stickers were added to the photoThe list of corresponding sticker sets can be received using getAttachedStickerSets
minithumbnail (:class:`telegram.api.types.minithumbnail`):
Photo minithumbnail; may be null
sizes (List of :class:`telegram.api.types.photoSize`):
Available variants of the photo, in different sizes
Returns:
Photo
Raises:
:class:`telegram.Error`
"""
ID = "photo"
def __init__(self, has_stickers, minithumbnail, sizes, **kwargs):
self.has_stickers = has_stickers # bool
self.minithumbnail = minithumbnail # Minithumbnail
self.sizes = sizes # list of photoSize
@staticmethod
def read(q: dict, *args) -> "Photo":
has_stickers = q.get('has_stickers')
minithumbnail = Object.read(q.get('minithumbnail'))
sizes = [Object.read(i) for i in q.get('sizes', [])]
return Photo(has_stickers, minithumbnail, sizes)
| iTeam-co/pytglib | pytglib/api/types/photo.py | photo.py | py | 1,177 | python | en | code | 20 | github-code | 36 | [
{
"api_name": "utils.Object",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "utils.Object.read",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "utils.Object",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "utils.Object.read",
"l... |
33516010146 | # -*- coding: utf-8 -*-
from collective.es.index.interfaces import IElasticSearchClient
from elasticsearch import Elasticsearch
from zope.component import provideUtility
from zope.interface import directlyProvides
class ElasticSearchIngressConfFactory(object):
def __init__(self, section):
self.section = section
def _client_dict(self, value):
if not value:
value = [('127.0.0.1', '9200')]
return [dict(zip(['host', 'port'], el)) for el in value]
def prepare(self, *args, **kwargs):
self.query = self._client_dict(self.section.query)
self.ingest = self._client_dict(self.section.ingest)
self.ssl = self.section.ssl
self.verify_certs = self.section.verify_certs
self.ca_certs = self.section.ca_certs
self.client_cert = self.section.client_cert
self.client_key = self.section.client_key
def create(self):
base_client = Elasticsearch(
self.query,
use_ssl=self.ssl,
# here some more params need to be configured.
)
ingest_client = Elasticsearch(
self.ingest,
use_ssl=self.ssl,
# here some more params need to be configured.
)
base_client.ingest = ingest_client
directlyProvides(base_client, IElasticSearchClient)
provideUtility(base_client)
| collective/collective.es.index | src/collective/es/index/components.py | components.py | py | 1,379 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "elasticsearch.Elasticsearch",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "elasticsearch.Elasticsearch",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "zope.interface.directlyProvides",
"line_number": 39,
"usage_type": "call"
},
... |
31063179375 |
from ..utils import Object
class GroupCallParticipantVideoInfo(Object):
"""
Contains information about a group call participant's video channel
Attributes:
ID (:obj:`str`): ``GroupCallParticipantVideoInfo``
Args:
source_groups (List of :class:`telegram.api.types.groupCallVideoSourceGroup`):
List of synchronization source groups of the video
endpoint_id (:obj:`str`):
Video channel endpoint identifier
is_paused (:obj:`bool`):
True if the video is pausedThis flag needs to be ignored, if new video frames are received
Returns:
GroupCallParticipantVideoInfo
Raises:
:class:`telegram.Error`
"""
ID = "groupCallParticipantVideoInfo"
def __init__(self, source_groups, endpoint_id, is_paused, **kwargs):
self.source_groups = source_groups # list of groupCallVideoSourceGroup
self.endpoint_id = endpoint_id # str
self.is_paused = is_paused # bool
@staticmethod
def read(q: dict, *args) -> "GroupCallParticipantVideoInfo":
source_groups = [Object.read(i) for i in q.get('source_groups', [])]
endpoint_id = q.get('endpoint_id')
is_paused = q.get('is_paused')
return GroupCallParticipantVideoInfo(source_groups, endpoint_id, is_paused)
| iTeam-co/pytglib | pytglib/api/types/group_call_participant_video_info.py | group_call_participant_video_info.py | py | 1,336 | python | en | code | 20 | github-code | 36 | [
{
"api_name": "utils.Object",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "utils.Object.read",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "utils.Object",
"line_number": 37,
"usage_type": "name"
}
] |
71079803305 | import gzip
import json
import numpy as np
import pandas as pd
from tqdm.notebook import tqdm
from datetime import datetime
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import KMeans
from collections import Counter
#Funciones.
def jl_to_list(fname):
output = []
with gzip.open(fname, 'rb') as f:
for line in f:
output.append(json.loads(line))
return output
def load_item_data(all_itms = False):
ITEM_DATA = pd.read_csv('item_data.csv', sep=';')
ITEM_DATA.loc[ITEM_DATA['product_id'] == 0, 'product_id'] = -1
ITEM_DATA['domain_code'], domain_uniques = pd.factorize(ITEM_DATA['domain_id'], sort=True)
ITEM_DATA['category_code'], category_uniques = pd.factorize(ITEM_DATA['category_id'], sort=True)
fields = ['item_id', 'domain_id', 'domain_code', 'product_id', 'category_id', 'category_code', 'price', 'price_cluster', 'condition', 'mexico']
m = {}
for column in tqdm(fields):
m[column] = list(ITEM_DATA[column])
metadata = {}
for i, j in tqdm(enumerate(m['item_id'])):
metadata[j] = {}
for column in fields:
metadata[j].update({column: m[column][i]})
if all_itms:
all_items = list(metadata)
else:
all_items = []
return metadata, all_items
def views(row):
return ([ev['event_info'] for ev in row['user_history'] if ev['event_type']=='view'])
def searchs(row):
return ([ev['event_info'] for ev in row['user_history'] if ev['event_type']=='search'])
def dominios_visitados(visits):
domains = Counter()
for item in visits:
domain = metadata[item]['domain_code']
domains[domain] += 1
return domains
def productos_visitados(visits):
productos = Counter()
for item in visits:
producto = metadata[item]['product_id']
if producto:
productos[producto] += 1
return productos
def categorias_visitadas(visits):
categorias = Counter()
for item in visits:
categoria = metadata[item]['category_code']
if categoria:
categorias[categoria] += 1
return categorias
def get_session_time(history):
last_event=len(history)-1
t0=datetime.strptime(history[0]['event_timestamp'].replace('T',' ')[:-5],'%Y-%m-%d %H:%M:%S.%f')
t1=datetime.strptime(history[last_event]['event_timestamp'].replace('T',' ')[:-5],'%Y-%m-%d %H:%M:%S.%f')
T=t1-t0
return T.days*24*60*60+T.seconds+T.microseconds/1000000
def precio_mediano(visits):
precios = []
for item in visits:
if metadata[item]['price']:
precios.append(float(metadata[item]['price']))
if len(precios) != 0:
return np.median(np.array(precios))
else:
return 0
def precio_desvio(visits):
precios = []
for item in visits:
if metadata[item]['price']:
precios.append(float(metadata[item]['price']))
if len(precios) != 0:
return np.std(np.array(precios))
else:
return 0
def mercado(visits):
mexico = []
for item in visits:
mexico.append(int(metadata[item]['mexico']))
if np.mean(np.array(mexico)) > 0.5:
return 1
else:
return 0
def data_for_clusters(rows_data):
cluster_data = []
for row in tqdm(rows_data):
temp = {'d_visitados': len(dominios_visitados(views(row))),
'p_visitados': len(productos_visitados(views(row))),
'c_visitadas': len(categorias_visitadas(views(row))),
's_time': get_session_time(row['user_history']),
's_len': len(row['user_history']),
'v_len': len(views(row)),
'p_views': len(views(row)) / len(row['user_history']),
'median_p': precio_mediano(views(row)),
'sd_p': precio_desvio(views(row)),
'mercado': mercado(views(row))}
cluster_data.append(temp)
return pd.DataFrame(cluster_data)
def data_for_segments(rows_data):
cluster_data = []
for row in tqdm(rows_data):
temp = {'v_len': len(views(row)),
's_len': len(searchs(row))}
cluster_data.append(temp)
return pd.DataFrame(cluster_data)
def data_for_features(rows_data):
cluster_data = []
for row in tqdm(rows_data):
temp = {'domain_code': list(dominios_visitados(views(row))),
'product_id': list(productos_visitados(views(row))),
'category_code': list(categorias_visitadas(views(row))),
'median_p': precio_mediano(views(row)),
'sd_p': precio_desvio(views(row)),
'mercado': mercado(views(row))}
cluster_data.append(temp)
return pd.DataFrame(cluster_data)
def dominio_mas_visitado(rows_data):
cluster_data = []
for row in tqdm(rows_data):
dominios = list(dominios_visitados(views(row)))
if len(dominios) > 0:
temp = {'vdomain': list(dominios_visitados(views(row)))[0]}
else:
temp = {'vdomain': -1}
cluster_data.append(temp)
return pd.DataFrame(cluster_data)
def clustering_process(df, k):
#Normalizacion.
df_norm = StandardScaler().fit_transform(df)
#Estructura para resultados.
cs=np.empty(shape=[len(df_norm),1])
#Algoritmo.
kmeans=KMeans(n_clusters=k)
kmeans.fit(df_norm)
cs[:,0]=kmeans.fit_predict(df_norm)
#Concat.
df_cs=pd.DataFrame(cs,columns=['cluster'])
df_final=pd.concat([df,df_cs],axis=1)
if k <= 100:
print(df_cs['cluster'].value_counts())
return df_cs, kmeans
def clustering_predict(df, kmeans, k=10):
#Normalizacion.
df_norm = StandardScaler().fit_transform(df)
#Estructura para resultados.
cs=np.empty(shape=[len(df_norm),1])
#Algoritmo.
cs[:,0]=kmeans.fit_predict(df_norm)
#Concat.
df_cs=pd.DataFrame(cs,columns=['cluster'])
df_final=pd.concat([df,df_cs],axis=1)
if k <= 100:
print(df_final['cluster'].value_counts())
return df_final
def meli_clusters(k):
df = pd.read_csv('meli_data.csv', sep=';')
df_c, kmeans = clustering_process(df, k)
return df_c, kmeans
#Datos.
metadata, _ = load_item_data() | estereotipau/meli_challenge_2020 | simple_cluster_EG.py | simple_cluster_EG.py | py | 6,186 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "gzip.open",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "pandas.factorize",
"line_num... |
73933034022 | import sys
import datetime
import socket, time
from PyQt5.QtWidgets import *
from PyQt5.QtGui import QPixmap, QImage
from PyQt5.QtCore import *
from PyQt5.QtCore import pyqtSlot
from PyQt5.QtCore import QTimer, QTime
from PyQt5.QtCore import pyqtSignal, QObject
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5 import uic
from PyQt5 import QtWidgets
from PyQt5.QtCore import Qt, QByteArray, QSettings, QTimer, pyqtSlot
from PyQt5.QtWidgets import QWidget, QApplication, QLabel, QSizePolicy, QVBoxLayout, QAction, QPushButton, QLineEdit
from PyQt5.QtGui import QMovie
from time import sleep
import main as m
import Python_to_Linux as PtoL
import cv2
import threading
import serial
import numpy as np
import math
import statistics
import os
import sys
import pymysql
import base64
import requests
cam0 = cv2.VideoCapture(2)
cam1 = cv2.VideoCapture(0)
arduino = serial.Serial('/dev/ttyACM0', 115200)
print("camera on")
ar_flag = 0
Impact_frame=0
cnt=0
id_text =""
light_stop=False
######################################## light thread
class lightThread(threading.Thread):
def __init__(self, end, stop):
threading.Thread.__init__(self)
self.end = end
self.stop = stop
def __del__(self):
print("del")
def run(self):
ligth(self.end, self.stop)
def ligth(end, stop):
youngsun =1
while youngsun:
if stop():
print("stop hihi")
break
f=arduino.readline()
f=f.decode()
if f == 'Impact\r\n':
end.light_signal.emit()
break
####################################### cam ๋
นํ ์ค๋ ๋
class camThread(threading.Thread):
success = 0
def __init__(self, previewName, camID, cam):
threading.Thread.__init__(self)
self.previewName = previewName
self.camID = camID
self.cam = cam
def run(self):
self.success = camPreview(self.previewName, self.camID, self.cam)
def camPreview(previewName, camID, cam):
global cnt
cam.set(3,640)
cam.set(4,480)
##
#frame_width=int(cam.get(3))
#frame_height=int(cam.get(4))
##
fps = 30
out = cv2.VideoWriter('./examples/media/'+ str(previewName)+'.avi',cv2.VideoWriter_fourcc('M','J','P','G'), fps, (640,480))
if camID==0:
cnt = 0
start_time = datetime.datetime.now()
end_time = start_time + datetime.timedelta(seconds=8)
while(True):
if camID==0:
cnt+=1
ret, frame = cam.read()
if ret:
out.write(frame)
if end_time < datetime.datetime.now():
out.release()
print("recoding success "+str(previewName))
return 1
else:
print("error "+str(previewName))
return 5
def impact_fram(cnt):
global Impact_frame
print(cnt)
Impact_frame=cnt
####################################### interrupt ๋ง๋ค๊ธฐ
class Communicate(QObject):
end_signal = pyqtSignal()
cam_signal = pyqtSignal()
main_signal = pyqtSignal()
light_signal = pyqtSignal()
take_signal = pyqtSignal()
top_signal = pyqtSignal()
impact_signal = pyqtSignal()
youngseon = pyqtSignal()
####################################### ์์ ์ฌ์ ์ค๋ ๋
class Video(threading.Thread):
def __init__(self, ui, previewName, labelName, width, height, re, stop, end):
threading.Thread.__init__(self)
self.previewName = previewName
self.labelName = labelName
self.ui = ui
self.width = width
self.height = height
self.re = re
self.stop = stop
self.end = end
def run(self):
VideoPlayer(self.ui, self.previewName, self.labelName, self.width, self.height, self.re, self.stop, self.end)
def VideoPlayer(ui, previewName, label, width, height, re, stop, end):
marker_cnt=0
global ar_flag
while True:
cap = cv2.VideoCapture(previewName)
if stop():
break
if re ==3 :
while ar_flag == 0 :
a=arduino.readline()
a=a.decode()
if a == 'Start\r\n':
ar_flag = 1
end.cam_signal.emit()
while True:
if re == 0:
if ar_flag == 1:
break
else:
pass
elif re == 9:
marker_cnt +=1
label.ret, label.frame = cap.read()
if label.ret:
label.rgbImage = cv2.cvtColor(label.frame, cv2.COLOR_BGR2RGB)
label.convertToQtFormat = QImage(label.rgbImage.data, label.rgbImage.shape[1],
label.rgbImage.shape[0], QImage.Format_RGB888)
label.pixmap = QPixmap(label.convertToQtFormat)
label.p = label.pixmap.scaled(width, height, QtCore.Qt.IgnoreAspectRatio)
label.setPixmap(label.p)
label.update()
if re == 9:
if marker_cnt == math.floor(m.point[1]*3): #takeaway์ง์
end.take_signal.emit()
elif marker_cnt == math.floor(m.point[3]*3): #top์ง์
end.top_signal.emit()
elif marker_cnt == math.floor(m.point[4]*3): #impact์ง์
end.impact_signal.emit()
loop = QtCore.QEventLoop()
QtCore.QTimer.singleShot(25, loop.quit)
loop.exec_()
else:
break
if stop():
break
cap.release()
if re == 0 or re == 3:
break
else:
pass
if re == 3:
end.end_signal.emit()
def camera(end):
global light_stop
light_stop=False
cam_t1 = camThread("Camera1", 0, cam0)
cam_t2 = camThread("Camera2", 1, cam1)
light = lightThread(end, lambda: light_stop)
loop = QtCore.QEventLoop()
QtCore.QTimer.singleShot(3000, loop.quit)
loop.exec_()
cam_t1.start()
cam_t2.start()
light.start()
return light
def young(light):
light.quit()
########################################## ํผ๋๋ฐฑ ์ค๋ ๋
class MainThread(threading.Thread):
success = 0
def __init__(self,end):
threading.Thread.__init__(self)
self.end = end
def run(self):
main_run(self.end)
def main_run(end):
global id_text
global Impact_frame
PtoL.JSONmaker()
m.main(Impact_frame, id_text)
end.main_signal.emit()
########################################### main GUI
gifFile = "loading.gif"
class MyWindow_step(QMainWindow):
def __init__(self, gifFile):
super().__init__()
self.gifFile = gifFile
self.GUI_login()
#self.GUI_all()
def GUI_login(self):
self.ui = uic.loadUi('Designer_login.ui')
self.ui.show()
self.ui.LoginButton.clicked.connect(lambda : self.LoginDB(self.ui))
def LoginDB(self,a):
global id_text
id_text = a.UserID.text()
try:
#send db -> response 200
conn = pymysql.connect("db-ladybug.cmghyay3tpvl.ap-northeast-2.rds.amazonaws.com",user="ladybug",passwd = "ladybug456123",db="AppService", port=3306,use_unicode=True,charset ='utf8')
cursor = conn.cursor()
query = """SELECT * FROM AppService.MEMBER WHERE user_id = '{0}';""".format(id_text)
cursor.execute(query)
result = cursor.fetchall()
conn.commit()
asdf=()
if result == asdf:
a.UserID.setText("Please Sign up in application")
else:
self.GUI_all()
except:
#respose 404
print("server not connect")
intro_stop = False
swing_stop = False
def GUI_all(self):
self.ui = uic.loadUi('Designer_all.ui')
#print("all"+str(threading.active_count()))
self.ui.loadinglabel_2.hide()
global ar_flag
global intro_stop
global swing_stop
global cnt
global light_stop
light_stop=False
ar_flag = 0
self.end = Communicate()
intro_stop = False
swing_stop = False
intro_thread = Video(self.ui,"golf_animation_intro.avi", self.ui.video_label, 1920, 1080, 0, lambda: intro_stop, self.end)
swing_thread = Video(self.ui,"golf_animation_swing.avi", self.ui.video_label, 1920, 1080, 3, lambda: swing_stop, self.end)
intro_thread.daemon = True
swing_thread.daemon = True
intro_thread.start()
swing_thread.start()
self.ui.show()
light = self.end.cam_signal.connect(lambda: camera(self.end))
self.end.light_signal.connect(lambda: impact_fram(cnt))
self.end.end_signal.connect(self.GUI_loading)
self.end.youngseon.connect(lambda: young(light))
def GUI_loading(self):
self.ui.loadinglabel_2.show()
print("loding" + str(threading.active_count()))
self.end = Communicate()
self.movie = QMovie(self.gifFile, QByteArray(), self)
self.movie.setCacheMode(QMovie.CacheAll)
self.ui.loadinglabel.setMovie(self.movie)
self.movie.start()
self.movie.loopCount()
global Impact_frame
if Impact_frame==0:
self.GUI_fakeswing(self.end)
return
loop = QtCore.QEventLoop()
QtCore.QTimer.singleShot(3000, loop.quit)
loop.exec_()
main_Thread = MainThread(self.end)
main_Thread.daemon = True
main_Thread.start()
self.end.main_signal.connect(self.GUI_feedback)
def GUI_fakeswing(self,end):
end.youngseon.emit()
print(threading.active_count())
global intro_stop
global swing_stop
global light_stop
light_stop=True
print(threading.active_count())
intro_stop=True
print(threading.active_count())
swing_stop=True
print(threading.active_count())
self.ui = uic.loadUi('Designer_fakeswing.ui')
self.ui.show()
loop = QtCore.QEventLoop()
QtCore.QTimer.singleShot(3000, loop.quit)
loop.exec_()
self.GUI_all()
marker_stop=False
def GUI_feedback(self):
self.ui = uic.loadUi('Designer_feedback.ui')
self.end = Communicate()
self.ui.show()
self.ui.home.clicked.connect(lambda: self.feedback_clicked(1))
self.ui.replay.clicked.connect(lambda: self.feedback_clicked(2))
self.ui.feedback1.clicked.connect(lambda: self.feedback_clicked(3))
self.ui.feedback2.clicked.connect(lambda: self.feedback_clicked(4))
self.ui.feedback3.clicked.connect(lambda: self.feedback_clicked(5))
def GUI_feedback1(self):
self.ui = uic.loadUi('Designer_feedback1.ui')
self.end = Communicate()
global marker_stop
global intro_stop
global swing_stop
intro_stop = True
swing_stop = True
marker_stop=False
front_thread = Video(self.ui,"Camera1_out.avi", self.ui.front_label, 830, 700, 9, lambda: marker_stop, self.end)
side_thread = Video(self.ui,"Camera2_out.avi", self.ui.side_label, 830, 700, 1, lambda: marker_stop, self.end)
front_thread.daemon=True
side_thread.daemon=True
front_thread.start()
side_thread.start()
self.ui.show()
self.textbox(self.ui.textBrowser,1)
self.end.take_signal.connect(lambda: self.textbox(self.ui.textBrowser,2))
self.end.top_signal.connect(lambda: self.textbox(self.ui.textBrowser,3))
self.end.impact_signal.connect(lambda: self.textbox(self.ui.textBrowser,4))
self.end.impact_signal.connect(lambda: self.textbox(self.ui.textBrowser,0))
self.ui.skip_button.clicked.connect(self.feedback_clicked1)
feedback_stop = False
def GUI_feedback2(self):
self.ui = uic.loadUi('Designer_feedback2.ui')
self.end = Communicate()
global feedback_stop
feedback_stop = False
address_thread = Video(self.ui,"testing1.avi", self.ui.video1, 425, 530, 1, lambda: feedback_stop, self.end)
backswing_thread = Video(self.ui,"testing2.avi", self.ui.video2, 425, 530, 1, lambda: feedback_stop, self.end)
swing_thread = Video(self.ui,"testing3.avi", self.ui.video3, 425, 530, 1, lambda: feedback_stop, self.end)
finish_thread = Video(self.ui,"testing4.avi", self.ui.video4, 425, 530, 1, lambda: feedback_stop, self.end)
address_thread.daemon=True
backswing_thread.daemon=True
swing_thread.daemon=True
finish_thread.daemon=True
address_thread.start()
backswing_thread.start()
swing_thread.start()
finish_thread.start()
self.ui.show()
self.textbox(self.ui.text1,1)
self.textbox(self.ui.text2,2)
self.textbox(self.ui.text3,3)
self.textbox(self.ui.text4,4)
self.ui.backButton.clicked.connect(self.feedback_clicked2)
def GUI_feedback3(self):
self.ui = uic.loadUi('Designer_feedback3.ui')
self.end = Communicate()
global feedback_stop
feedback_stop = False
address_thread = Video(self.ui,"master_out.avi", self.ui.video1, 911, 471, 1, lambda: feedback_stop, self.end)
backswing_thread = Video(self.ui,"pelvis_out.avi", self.ui.video2, 911, 471, 1, lambda: feedback_stop, self.end)
swing_thread = Video(self.ui,"Camera1_master_out.avi", self.ui.video3, 911, 471, 1, lambda: feedback_stop, self.end)
finish_thread = Video(self.ui,"Camera1_pelvis_out.avi", self.ui.video4, 911, 471, 1, lambda: feedback_stop, self.end)
address_thread.daemon=True
backswing_thread.daemon=True
swing_thread.daemon=True
finish_thread.daemon=True
address_thread.start()
backswing_thread.start()
swing_thread.start()
finish_thread.start()
self.ui.show()
self.ui.backButton.clicked.connect(self.feedback_clicked3)
def textbox(self, textBox, text):
if text ==0:
for i, val in enumerate(m.stands):
textBox.append(val)
textBox.show()
elif text ==1:
for i, val in enumerate(m.address_feedback):
textBox.append(val)
textBox.show()
elif text ==2:
for i, val in enumerate(m.backswing_feedback):
textBox.append(val)
textBox.show()
elif text ==3:
for i, val in enumerate(m.swing_feedback):
textBox.append(val)
textBox.show()
elif text ==4:
for i, val in enumerate(m.finish_feedback):
textBox.append(val)
textBox.show()
def feedback_clicked(self,button):
global feedback_stop
feedback_stop = True
self.ui.close()
if button ==1:
self.GUI_login()
elif button ==2:
self.GUI_all()
elif button ==3:
self.GUI_feedback1()
elif button ==4:
self.GUI_feedback2()
elif button ==5:
self.GUI_feedback3()
def feedback_clicked1(self):
global marker_stop
marker_stop=True
self.ui.close()
self.GUI_feedback()
def feedback_clicked2(self):
global marker_stop
marker_stop=True
self.ui.close()
self.GUI_feedback()
def feedback_clicked3(self):
global marker_stop
marker_stop=True
self.ui.close()
self.GUI_feedback()
if __name__ == "__main__":
app = QApplication(sys.argv)
myApp_step = MyWindow_step(gifFile)
app.exec_()
| 0sun-creater/golf_swing_coaching_program | python/GUI.py | GUI.py | py | 17,024 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cv2.VideoCapture",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "serial.Serial",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
... |
17106921371 | import os
from dataclasses import dataclass, field
from typing import List
with open(os.path.join(os.path.dirname(__file__), "input"), "r") as inputFile:
inputLines = [line.strip() for line in inputFile.readlines() if line]
@dataclass
class Signal:
cycle: int
register: int
strength = 0
def __post_init__(self):
self.strength = self.cycle * self.register
@dataclass
class CRT:
pixels: List[str] = field(default_factory=lambda: [""])
def drawPixel(self, register: int) -> None:
self.pixels[-1] += "#" if abs(len(self.pixels[-1]) - register) <= 1 else "."
if len(self.pixels[-1]) == 40:
self.pixels.append("")
def __str__(self) -> str:
return "\n".join(self.pixels)
class CPU:
def __init__(self) -> None:
self.states: List[Signal] = [Signal(cycle=0, register=1)]
self.instructions: List[str] = []
self.interestingSignals: List[Signal] = []
self.crt = CRT()
def parseLine(self, line) -> None:
"""Transform line into instructions"""
if line == "noop":
return self.instructions.append("noop")
self.instructions.append("start " + line)
self.instructions.append("end " + line)
def executeInstructions(self) -> None:
for cycle, line in enumerate(self.instructions, start=1):
register = self.states[-1].register
self.crt.drawPixel(register)
# Read is **during** the cycle, not after the cycle
if cycle % 40 == 20:
self.interestingSignals.append(Signal(register=register, cycle=cycle))
if line.startswith("end "):
register += int(line.split(" ")[-1])
self.states.append(Signal(register=register, cycle=cycle))
def sumInterestingSignals(self) -> int:
return sum([signal.strength for signal in self.interestingSignals])
def answer(iterable):
cpu = CPU()
[cpu.parseLine(line) for line in iterable]
cpu.executeInstructions()
# Answer 1
print(cpu.sumInterestingSignals())
# Answer 2
print(cpu.crt)
answer(inputLines)
| mmmaxou/advent-of-code | 2022/day-10/answer.py | answer.py | py | 2,140 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.join",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "dataclasses.dataclass",
"l... |
17232778502 | import cv2
import numpy as np
def bitmap(n):
map = []
for i in range(256):
if i & pow(2, n-1) == pow(2, n-1):
map.append(255)
else:
map.append(0)
print(map)
return map
def bitimage(x, n):
image = x.copy()
height, width = image.shape
map = bitmap(n)
for rows in range(height):
for cols in range(width):
image[rows, cols] = map[image[rows, cols]]
return image
if __name__ == '__main__':
original = cv2.imread('imgae\Fig0314(a)(100-dollars).tif',
cv2.IMREAD_GRAYSCALE)
cv2.imshow('a', original)
"""
b = bitimage(original,1)
cv2.imshow('bitmap1',b)
c = bitimage(original,2)
cv2.imshow('bitmap2',c)
d = bitimage(original,3)
cv2.imshow('bitmap3',d)
e = bitimage(original,4)
cv2.imshow('bitmap4',e)
f = bitimage(original,5)
cv2.imshow('bitmap5',f)
g = bitimage(original,6)
cv2.imshow('bitmap6',g)
h = bitimage(original,7)
cv2.imshow('bitmap7',h)
i = bitimage(original,8)
cv2.imshow('bitmap8',i)
"""
bit5 = bitimage(original, 5)
bit6 = bitimage(original, 6)
bit7 = bitimage(original, 7)
bit8 = bitimage(original, 8)
bit5 = np.where(bit5 == 255, 16, 0)
bit6 = np.where(bit6 == 255, 32, 0)
bit7 = np.where(bit7 == 255, 64, 0)
bit8 = np.where(bit8 == 255, 128, 0)
re_7_8 = np.uint8(bit7 + bit8)
re_6_7_8 = np.uint8(bit6 + bit7 + bit8)
re_5_6_7_8 = np.uint8(bit5 + bit6 + bit7 + bit8)
cv2.imshow('re_7_8', re_7_8)
cv2.imshow('re_6_7_8', re_6_7_8)
cv2.imshow('re_5_6_7_8', re_5_6_7_8)
'''
a = np.array([1,3,4,5,6,1,1,1])
a = np.where(a==1,0,255)
'''
cv2.waitKey() | VJaGG/digital-image-processing | chapter2/3.2.4.2ใbitmaplayer.py | 3.2.4.2ใbitmaplayer.py | py | 1,805 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cv2.imread",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "cv2.IMREAD_GRAYSCALE",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "cv2.imshow",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"li... |
16140910097 | """
Recognizes the mine board from screenshot.
"""
import os
import sys
import numpy as np
from scipy.spatial.distance import cdist
import cv2
from PIL import Image
from solverutils import CID
import pyautogui as pg
IMGDIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'imgs')
# related to board cells localization
DOTS_TOL = 200 # the max allowed template matching difference
# related to open cell recognition
OPEN_THR = 153 # the brightness between digit (122) and background (188)
# related to remaining mines digit recognition
MR_LOOKUPTABLE = np.array([
[1, 0, 1, 1, 0, 1, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 1, 0, 1, 1],
[1, 0, 1, 1, 0, 1, 1, 0, 1, 1],
[1, 0, 0, 0, 1, 1, 1, 0, 1, 1],
[1, 0, 1, 0, 0, 0, 1, 0, 1, 0],
[1, 1, 1, 1, 1, 0, 0, 1, 1, 1],
[1, 1, 0, 1, 1, 1, 1, 1, 1, 1],
]) * 2 - 1
# related to remaining mines digit recognition
MR_UNITS = np.array([100, 10, 1])
def normalize(image):
"""
Normalize a uint8 image to [-1.0, 1.0].
"""
return (image.astype(np.float64) - 128) / 128
def tobw(img, threshold):
return ((img.astype(np.int64) >= threshold) * 255).astype(np.uint8)
def loadimg(filename: str):
"""
Load image as grayscale from ``IMGDIR``.
:param filename: the image filename
:return: a uint8 image
"""
filename = os.path.join(IMGDIR, filename)
img = np.asarray(Image.open(filename).convert('L'))
return img
def get_rect_midpoint(top_left, shape):
return np.array([
top_left[0] + shape[1] // 2,
top_left[1] + shape[0] // 2,
])
def make_screenshot(sct, monitor=None, region=None, esc_before_grab=False):
"""
Make uint8 grayscale screenshot of specified region on specified monitor.
:param sct: the ``mss.mss()`` instance
:param monitor: ``None`` for the first monitor, positive integer for the
monitor of that id, and dict for that monitor
:type monitor: Union[None, int, dict]
:param region: ``None`` for the entire region, and dict for the specified
region plus the offset imposed by the specified monitor
:param esc_before_grab: press Esc key before grabbing to temporarily hide
the mouse cursor
:return: numpy array of the grayscale screenshot
"""
if isinstance(monitor, int):
monitor = sct.monitors[monitor]
elif not monitor:
monitor = sct.monitors[1]
if esc_before_grab:
pg.press('esc')
if region:
adjusted_region = region.copy()
adjusted_region['top'] += monitor['top']
adjusted_region['left'] += monitor['left']
img = sct.grab(adjusted_region)
else:
img = sct.grab(monitor)
img = Image.frombytes('RGB', img.size, img.bgra, 'raw', 'BGRX')
return np.asarray(img.convert('L'))
class BoardNotFoundError(Exception):
"""
Raised when the board cells cannot be segmented out correctly.
"""
pass
class BoardDetector:
"""
Attributes (note: the x-y coordinate complies to image convention):
- ``upper``: the smallest y coordinate of the board (readonly)
- ``lower``: the largest y coordinate of the board (readonly)
- ``left``: the smallest x coordinate of the board (readonly)
- ``right``: the largest x coordinate of the baord (readonly)
- ``height``: the number of cells along each column (readonly)
- ``width``: the number of cells along each row (readonly)
- ``hkls``: horizontal key lines of the cell board
- ``vkls``: vertical key lines of the cell board
Below attributes may be ``None`` if ``enable_mr_detect=False`` when
``new``:
- ``upper_mr``: the smallest y coordinate of the remaining mines label
- ``lower_mr``: the largest y coordinate of the remaining mines label
- ``left_mr``: the smallest x coordinate of the remaining mines label
- ``right_mr``: the largest x coordinate of the remaining mines label
"""
def __init__(self, mon_id, dpr, hkls, vkls, upper_mr, lower_mr, left_mr,
right_mr):
"""
This method shouldn't be called explicitly.
"""
# the monitor id
self.mon_id = mon_id
# the device pixel ratio (x, y)
self.dpr = dpr
# the cell board key lines
self.hkls = hkls
self.vkls = vkls
# the remaining mines label location
self.upper_mr = upper_mr
self.lower_mr = lower_mr
self.left_mr = left_mr
self.right_mr = right_mr
# precomputed board region and remaining mines region
self.board_region = {
'top': self.upper, # self.upper is a property
'left': self.left, # same
'width': self.right - self.left, # same
'height': self.lower - self.upper, # same
}
if self.upper_mr is not None:
self.mr_region = {
'top': self.upper_mr // self.dpr[1],
'left': self.left_mr // self.dpr[0],
'width': (self.right_mr - self.left_mr) // self.dpr[0],
'height': (self.lower_mr - self.upper_mr) // self.dpr[1],
}
else:
self.mr_region = None
# precomputed offset hkls and vkls, i.e. the key lines with respect
# to the upper left corner of the board region
self.offset_hkls = self.hkls - self.hkls[0]
self.offset_vkls = self.vkls - self.vkls[0]
# preload various cells
loaded_imgs = [
tobw(loadimg('open{}.gif'.format(i)), OPEN_THR)
for i in range(0, 9)
]
loaded_imgs.extend(
map(loadimg, [
'bombflagged.gif', 'bombdeath.gif', 'bombmisflagged.gif',
'bombrevealed.gif', 'blank.gif'
]))
self._face_templates = np.stack(loaded_imgs).astype(np.float64)
self._face_templates = self._face_templates / 255 * 2 - 1
self._face_templates = self._face_templates.reshape(
self._face_templates.shape[0], -1)
self._face_templates_cids = [
0,
1,
2,
3,
4,
5,
6,
7,
8,
CID['f'],
CID['m'],
CID['m'],
CID['m'],
CID['q'],
]
@property
def upper(self):
return self.hkls[0] // self.dpr[1]
@property
def lower(self):
return self.hkls[-1] // self.dpr[1]
@property
def left(self):
return self.vkls[0] // self.dpr[0]
@property
def right(self):
return self.vkls[-1] // self.dpr[0]
@property
def height(self):
"""Board height, not pixel height"""
return self.hkls.size - 1
@property
def width(self):
"""Board width, not pixel width"""
return self.vkls.size - 1
def __str__(self):
return ('{0.__class__.__name__}('
'mon_id={0.mon_id}, '
'dpr={0.dpr}, '
'hkls={0.hkls}, '
'vkls={0.vkls}, '
'upper_mr={0.upper_mr}, '
'lower_mr={0.lower_mr}, '
'left_mr={0.left_mr}, '
'right_mr={0.right_mr})'.format(self))
def __repr__(self):
return ('{0.__class__.__name__}('
'mon_id={0.mon_id} '
'dpr={0.dpr}, '
'hkls={0.hkls!r}, '
'vkls={0.vkls!r}, '
'upper_mr={0.upper_mr!r}, '
'lower_mr={0.lower_mr!r}, '
'left_mr={0.left_mr!r}, '
'right_mr={0.right_mr!r})'.format(self))
@classmethod
def new(cls, mon_screenshots, enable_mr_detect=False):
"""
Try every pair of (monitor id, monitor resolution, screenshot) until
one returns an instance of ``BoardDetector``.
:param mon_screenshots: list of tuples of (monitor id, monitor
resolution (width, height), the uint8 grayscale screenshot
possibly containing an empty board)
:param enable_mr_detect: if ``True``, enable mines remaining detection
:return: a ``BoardDetector`` object
:raise BoardNotFoundError: if until the last monitor ``BoardDetector``
is not instantiated successfully
"""
total_num = len(mon_screenshots)
for i, (mon_id, mon_res, screenshot) in enumerate(mon_screenshots, 1):
try:
return cls._new(mon_id, mon_res, screenshot, enable_mr_detect)
except BoardNotFoundError:
if i == total_num:
raise
@classmethod
def _new(cls, mon_id: int, mon_res, screenshot: np.ndarray,
enable_mr_detect):
"""
Returns a new instance of ``BoardDetector`` from ``screenshot``.
:param mon_id: the monitor id
:param mon_res: the monitor resolution (width, height)
:param screenshot: the uint8 grayscale screenshot containing an empty
board
:param enable_mr_detect: if ``True``, enable mines remaining detection
:return: a ``BoardDetector`` object
:raise BoardNotFoundError:
"""
# COMPUTE DEVICE PIXEL RATIO
dpr_x = screenshot.shape[1] // mon_res[0]
dpr_y = screenshot.shape[0] // mon_res[1]
# LOCALIZE CELL BOARD
crosstmpl = loadimg('b_crs.png')
mmr = cv2.matchTemplate(screenshot, crosstmpl,
cv2.TM_SQDIFF) <= DOTS_TOL
dots = np.stack(np.nonzero(mmr), axis=1)
if dots.size == 0:
raise BoardNotFoundError('no board cross is found')
u0, cnt0 = np.unique(dots[:, 0], return_counts=True)
u1, cnt1 = np.unique(dots[:, 1], return_counts=True)
# remove outliers
cnt0_e, cnt0_c = np.unique(cnt0, return_counts=True)
cnt0_mode = cnt0_e[np.argmax(cnt0_c)]
cnt1_e, cnt1_c = np.unique(cnt1, return_counts=True)
cnt1_mode = cnt1_e[np.argmax(cnt1_c)]
to_delete = [
np.where(dots[:, 0] == x)[0] for x in u0[cnt0 < cnt0_mode]
] + [np.where(dots[:, 1] == x)[0] for x in u1[cnt1 < cnt1_mode]]
if to_delete:
dots = np.delete(
dots, np.unique(np.concatenate(to_delete)), axis=0)
ch_ = np.unique(np.diff(np.unique(dots[:, 0]))) # cell intervals y
cw_ = np.unique(np.diff(np.unique(dots[:, 1]))) # cell intervals x
# allow one unique dot interval or two successive dot intervals due
# to rounding error
if not ((ch_.size == 1 or
(ch_.size == 2 and abs(ch_[0] - ch_[1]) == 1)) and
(cw_.size == 1 or
(cw_.size == 2 and abs(cw_[0] - cw_[1]) == 1))):
raise BoardNotFoundError('board crosses are not localized '
'correctly')
# the horizontal (arranged along matrix axis=0) key lines
hkls = np.unique(dots[:, 0])
hkls = np.concatenate((
[hkls[0] - (hkls[1] - hkls[0])],
hkls,
[hkls[-1] + (hkls[-1] - hkls[-2])],
)) + 1
# the vertical (arranged along matrix axis=1) key lines
vkls = np.unique(dots[:, 1])
vkls = np.concatenate((
[vkls[0] - (vkls[1] - vkls[0])],
vkls,
[vkls[-1] + (vkls[-1] - vkls[-2])],
)) + 1
if not enable_mr_detect:
return cls(mon_id, (dpr_x, dpr_y), hkls, vkls, None, None, None,
None)
left = vkls[0]
right = vkls[-1]
# LOCALIZE MINE REMAINING LABEL
mrlltmpl = loadimg('mr_ll.png')
mrlrtmpl = loadimg('mr_lr.png')
mrultmpl = loadimg('mr_ul.png')
MR_TOL = 50
mrllloc = np.stack(
np.nonzero(
cv2.matchTemplate(screenshot, mrlltmpl, cv2.TM_SQDIFF) <=
MR_TOL),
axis=1)
mrlrloc = np.stack(
np.nonzero(
cv2.matchTemplate(screenshot, mrlrtmpl, cv2.TM_SQDIFF) <=
MR_TOL),
axis=1)
mrulloc = np.stack(
np.nonzero(
cv2.matchTemplate(screenshot, mrultmpl, cv2.TM_SQDIFF) <=
MR_TOL),
axis=1)
mrlrloc = np.delete(
mrlrloc, np.where(mrlrloc[:, 1] >= np.mean((left, right))), axis=0)
mrulloc = np.delete(
mrulloc, np.where(mrulloc[:, 1] >= np.mean((left, right))), axis=0)
if mrllloc.size > 0 and abs(mrllloc[0, 1] - left + 1) <= 1:
mrllloc[0, 1] = left - 1
if mrulloc.size > 0 and abs(mrulloc[0, 1] - left + 1) <= 1:
mrulloc[0, 1] = left - 1
if (any(x.shape[0] != 1 for x in (mrllloc, mrlrloc, mrulloc))
or mrllloc[0, 1] != left - 1 or mrllloc[0, 0] != mrlrloc[0, 0]
or mrulloc[0, 1] != left - 1):
raise BoardNotFoundError('remaining mines label is not localized '
'correctly')
lower_mr, left_mr = mrllloc[0] + 1
upper_mr = mrulloc[0, 0] + 1
right_mr = mrlrloc[0, 1] + 1
return cls(mon_id, (dpr_x, dpr_y), hkls, vkls, upper_mr, lower_mr,
left_mr, right_mr)
def recognize_board_and_mr(self, sct):
boardimg, mrimg = self.localize_board_and_mr(sct)
cellimgs = self.get_cells_from_board(boardimg)
cells = self.recognize_cells(cellimgs)
if self.upper_mr is None:
mr = None
else:
mr = self.recognize_mr_digits(mrimg)
return cells, mr, boardimg
@staticmethod
def recognize_mr_digits(roi_gray):
region = roi_gray > 50
vert = np.linspace(0, region.shape[1], 7, dtype=np.int64)
hori = np.linspace(0, region.shape[0], 5, dtype=np.int64)
vresults = np.split(region[:, vert[1::2]], hori[1::2], axis=0)
hresults = np.split(region[hori[1::2], :], vert[1:-1], axis=1)
vresults = np.stack([np.sum(x, axis=0) > 0 for x in vresults], axis=1)
hresults = np.stack([np.sum(x, axis=1) > 0 for x in hresults])
hresults = hresults.reshape((3, 4))
results = np.concatenate((vresults, hresults), axis=1).astype(np.int64)
digits = np.argmax(np.matmul(results * 2 - 1, MR_LOOKUPTABLE), axis=1)
return np.dot(digits, MR_UNITS)
def localize_board_and_mr(self, sct):
"""
Returns ``(cell_board_image, mine_remaining_image)`` if
``enable_mr_detect`` was ``True`` when calling ``new`` to construct
this ``BoardDetector``; otherwise, returns
``(cell_board_image, None)``.
"""
boardimg = make_screenshot(sct, self.mon_id, self.board_region,
esc_before_grab=True)
if self.upper_mr is None:
return boardimg, None
mrimg = make_screenshot(sct, self.mon_id, self.mr_region)
return boardimg, mrimg
def get_cells_from_board(self, boardimg):
cells = []
for i in range(self.offset_hkls.size - 1):
for j in range(self.offset_vkls.size - 1):
# yapf: disable
c = boardimg[self.offset_hkls[i]:self.offset_hkls[i + 1],
self.offset_vkls[j]:self.offset_vkls[j + 1]]
# yapf: enable
cells.append(np.copy(c))
cells = np.stack(cells)
return cells
def recognize_cells(self, cells):
cells = np.stack(
[tobw(cv2.resize(x, (16, 16)), OPEN_THR) for x in cells])
cells = cells.astype(np.float64) / 255 * 2 - 1
cells = cells.reshape((cells.shape[0], -1))
D = cdist(self._face_templates, cells)
predictions = np.argmin(D, axis=0)
predictions = [self._face_templates_cids[x] for x in predictions]
predictions = np.array(predictions).reshape((self.height, self.width))
return predictions
def boardloc_as_pixelloc(self, blocs):
"""
Convert a batch of board locations to a batch of pixel locations. Note
that in the board coordinate x axis is from the upper left corner to
the lower left corner and the y axis is from the upper left corner to
the upper right corner; whereas in the pixel coordinate x axis is from
the upper left corner to the upper right corner, etc.
:param blocs: of form (array([...], dtype=int), array([...], dtype=int)
where the first array is the board x coordinates, and the
second array the board y coordinates
:return: pixel coordinates of the same form as ``blocs``
"""
bx, by = blocs
py = ((self.hkls[bx] + self.hkls[bx + 1]) / 2).astype(int)
px = ((self.vkls[by] + self.vkls[by + 1]) / 2).astype(int)
return px, py
@staticmethod
def _cc_dist(query, templates):
return min(
abs(x.astype(np.int64) - query.astype(np.int64))
for x in templates)
# pylint: disable=too-few-public-methods
class StageIdentifier:
def identify_stage(self, scr, board):
"""
:param scr: should be an array of shape (H, W), of dtype uint8
:param board: the recognized board
"""
min_white_ratio = 1 / 3 # minimum required ratio of white pixels
sample_size = 32 # size of center crop
assert scr.shape[0] > sample_size and scr.shape[1] > sample_size
splower = (scr.shape[0] - sample_size) // 2
spleft = (scr.shape[1] - sample_size) // 2
spl = scr[splower:splower + sample_size, spleft:spleft + sample_size]
# if the winning message appears, there should be many white pixels
# within the crop region
if np.sum(spl > 250) / spl.size > min_white_ratio:
return 'win'
if np.any(board == CID['m']):
return 'lost'
return 'ongoing'
def _main():
parser = argparse.ArgumentParser(
description='Recognize board from screenshot.')
parser.add_argument(
'-R',
dest='empty_board',
type=os.path.normpath,
help='recognize from screenshot given EMPTY_BOARD in '
'scene if specified; otherwise, localize board '
'and mine remaining label from screenshot')
parser.add_argument(
'-D',
dest='empty_board_monitor',
type=int,
default=1,
help='the monitor id of the empty_board')
parser.add_argument(
'-b',
type=os.path.normpath,
dest='board_tofile',
metavar='FILE',
help='if specified, the board image will be saved to '
'FILE')
parser.add_argument(
'-m',
type=os.path.normpath,
dest='mr_tofile',
metavar='FILE',
help='if specified, the mine remaining image will be '
'saved to FILE')
parser.add_argument(
'-C',
type=os.path.normpath,
dest='cellnpy_tofile',
metavar='FILE',
help='if specified, the cell images are zipped in an npy FILE')
args = parser.parse_args()
with mss.mss() as sct:
def get_mon_resolution(_mon_id):
_mon = sct.monitors[_mon_id]
return _mon['width'], _mon['height']
if not args.empty_board:
empty_board = [(i, get_mon_resolution(i), make_screenshot(sct, i))
for i in range(1, len(sct.monitors))]
else:
empty_board = [
(
args.empty_board_monitor,
get_mon_resolution(args.empty_board_monitor),
np.asarray(Image.open(args.empty_board).convert('L')),
),
]
bd = BoardDetector.new(empty_board, True)
boardimg, mrimg = bd.localize_board_and_mr(sct)
if args.board_tofile:
Image.fromarray(boardimg).save(args.board_tofile)
if args.mr_tofile:
Image.fromarray(mrimg).save(args.mr_tofile)
print('The board:')
board = bd.recognize_cells(bd.get_cells_from_board(boardimg))
np.savetxt(sys.stdout, board, fmt='%d', delimiter=',')
print('Mines remaining:')
print(bd.recognize_mr_digits(mrimg))
print('Winning state:')
print(StageIdentifier().identify_stage(boardimg, board))
if args.cellnpy_tofile:
np.save(args.cellnpy_tofile, bd.get_cells_from_board(boardimg))
print(bd)
if __name__ == '__main__':
import argparse
import mss
_main()
| kkew3/sat-minesweeper | vboard.py | vboard.py | py | 20,542 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "os.path.join",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path.realpath",
"lin... |
41801368948 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 9 14:49:52 2023
@author: intern
"""
import cv2
kernel = np.ones((3, 3), dtype=np.uint8)
erosion = cv2.erode(im0, kernel, iterations=1)
plt.imshow( erosion[:,:,0:3])
#%%
erosion = cv2.morphologyEx(im0, cv2.MORPH_OPEN, kernel, 1)
plt.imshow( erosion[:,:,0:3])
hsvim = rgb_to_hsv(erosion[:,:,0:3])
#%%
float("0.5555554573")
a = format(float("0.5555554573"), '.6f') | xsmsh7/label-color | filterblackedge.py | filterblackedge.py | py | 436 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cv2.erode",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "cv2.morphologyEx",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "cv2.MORPH_OPEN",
"line_number": 15,
"usage_type": "attribute"
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.