seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
70153811305 | # -*- coding: utf-8 -*-
from django.http import HttpResponse, JsonResponse
from django.views.decorators.csrf import csrf_exempt
from rest_framework.renderers import JSONRenderer
from rest_framework.parsers import JSONParser
from employee.models import Employee
from employee.serializers import employee_serializer
# Create your views here.
@csrf_exempt
def employee_list(request):
"""
Get all employee from db and return in json format
Post (save) a new employee in the database
"""
if request.method == 'GET':
#Get all object form database
employee = Employee.objects.all()
#Serializer all
serializer = employee_serializer(employee, many=True)
#return to the request
return JsonResponse(serializer.data, safe=False)
elif request.method == 'POST':
#retrive the post content
data = JSONParser().parse(request)
# Serializer it
serializer = employee_serializer(data=data)
if serializer.is_valid():
# if is correct, save in the database
serializer.save()
return JsonResponse(serializer.data, status=201)
#otherside return an error (400)
return JsonResponse(serializer.errors, status=400)
@csrf_exempt
def employee_detail(request, pk):
"""
Get details, update or delete a specific (pk) employee
"""
try:
#Find employee by the id
employee = Employee.objects.get(pk=pk)
except Employee.DoesNotExist:
#if not exist!!
return HttpResponse(status=404)
#if the request is get
if request.method == 'GET':
#return the employee in Json
serializer = employee_serializer(employee)
return JsonResponse(serializer.data)
#if request is put
elif request.method == 'PUT':
#parse the body request content
data = JSONParser().parse(request)
#Serializer the employee
serializer = employee_serializer(employee, data=data)
if serializer.is_valid():
#save if is valid
serializer.save()
return JsonResponse(serializer.data)
#otherside return 400 - bad Request
return JsonResponse(serializer.errors, status=400)
#if request is delete
elif request.method == 'DELETE':
#end of live of the employee
employee.delete()
#return No content
return HttpResponse(status=204) | borgessouza/DjangoLabs | employee/views.py | views.py | py | 2,429 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "employee.models",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "employee.models.Employee.objects.all",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "employee.models.Employee.objects",
"line_number": 18,
"usage_type": "attribute"
},... |
27971059344 | from bs4 import BeautifulSoup
from urllib.request import urlopen,Request,urlretrieve,build_opener,install_opener
import os
import random
from main.models import Post,Images
from django.core.files import File
from django.contrib.auth.models import User
from main.categories import user_agent_list
class ScrapeFunction:
user_agent=user_agent = random.choice(user_agent_list)
headers = {'User-Agent': user_agent }
def __init__(self,):
super().__init__()
def getHTML(self,link):
try:
html = urlopen(link).read()
except Exception as e:
req = Request(link, headers=self.headers)
html = urlopen(req).read()
response = BeautifulSoup(html, "html.parser")
return response
class PamakeoPress:
def __init__(self):
super().__init__()
def news_items(self,link='http://pamakiopress.rw/'):
try:
get_html=ScrapeFunction().getHTML(link)
#get_html=ScrapeFunction().getHTML('file:///home/hacker/Desktop/Video%20Toutorial/Amakuru%20yizewe%20yuzuye%20kandi%20kugihe%20-%20Pamakio%20Press.html')
#articles=get_html.find_all('article',class_='mh-loop-item')
except Exception as e:
pass
result=list()
from_={
'Amakuru':'mh_magazine_lite_posts_focus-3',
'Imikino':'mh_magazine_lite_posts_focus-4',
'Politiki':'mh_custom_posts-12',
'Imyidagaduro':'mh_custom_posts-9',
'Ubuzima':'mh_custom_posts-8'
#'utunu n\'utundi'
}
i=0
try:
for category in from_:
#print(category)
#print('--'*15)
articles=get_html.find(id=from_[category])
user=User.objects.all().first()
for li in articles.find_all('figure'):
im=Images()
news_link=li.find('a')['href']
thumbnail=self.getThumbnail(news_link)
title=self.getTitle()
result = urlretrieve(thumbnail)
im.image_from=title
im.image.save(os.path.basename(im.image_from+str(i) +'.png'), File(open(result[0], 'rb')))
post=Post.objects.filter(title=title).exists()
#check if post not exist
if post:
continue
post_item=Post()
category=self.getCategory()
post_from='PamakeoPress'
body=self.getContents()
#insert section
post_item.title=title
post_item.thumbnail=im.image
post_item.post_from=post_from
post_item.body=body
post_item.post_category=category
post_item.status='published'
post_item.author=user
post_item.save()
#print(user.username)
i=i+1
#continue
except Exception as e:
#scrape item section problem
#print("error "+str(e))
pass
return i
def getArticleLink(self,inThis):
arlink=inThis.find('a')['href'].strip()
return arlink
def getThumbnail(self,l):
other=ScrapeFunction().getHTML(l)
self.contents=other.find(class_='mh-content')
entry_thumb=self.contents.find(class_='entry-thumbnail')
thumbnail=entry_thumb.find_all('img')[0]['src']
if "https:" not in thumbnail:
thumbnail="http://pamakiopress.rw/{}".format(thumbnail)
#img_urls="file:///home/hacker/Downloads/{}".format(img_urls)
return thumbnail
def getContents(self):
contents=self.contents.find(class_='entry-content mh-clearfix')
htmlContent=contents
i=0
for img in contents.find_all('img'):
i=i+1
img_urls = img['src']
if "https:" not in img_urls:
img_urls="http://pamakiopress.rw/{}".format(img_urls)
#img_urls="file:///home/hacker/Downloads/{}".format(img_urls)
if i==1:
#thumb=im.image
htmlContent=contents
im=Images()
opener = build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
install_opener(opener)
title=self.getTitle()
im.image_from=title
result=urlretrieve(img_urls)
im.image.save(os.path.basename(im.image_from+str(i)+'.png'),File(open(result[0], 'rb')))
im.save()
htmlContent=str(htmlContent).replace(img['src'],'/media/'+str(im.image))
htmlContent=str(htmlContent).replace("(adsbygoogle = window.adsbygoogle || []).push(\{\});",'')
htmlContent=str(htmlContent).replace('<!-- 720x90 adsense -->','')
htmlContent=str(htmlContent).replace('<!--CusAds1118-->','')
return htmlContent
def getCategory(self):
category=self.contents.find(class_='entry-meta-categories')
category=category.text.strip()
#print(category)
return category.capitalize()
def getTitle(self):
title=self.contents.find(class_='entry-title')
title=title.text.strip()
#print(title)
return title
#v=PamakeoPress()
#v.getThumbnail()
#v.getContents()
#v.getCategory()
#v.getTitle()
#v.news_items()
'''
for article in articles:
articlelink=self.getArticleLink(article.find('h3',class_='entry-title mh-loop-title'))
get_thumbnail=self.getThumbnail(l=articlelink)
title=self.getTitle()
body=self.getContents()
category=self.getCategory()
result.append({
'title':title,
'body':body,
'category':category,
'thumbnail':get_thumbnail,
'post_from':'PamakeoPress',
})
#print(get_thumbnail)
return result
#return get_html.getHTML(link)
result.append({
'thumbnail':thumbnail,
'title':title,
'category':self.getCategory(),
'post_from':'PamakeoPress',
'body':self.getContents(),
})
''' | itfidele/Bhano-Blog | operations/scrape.py | scrape.py | py | 6,352 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "random.choice",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "main.categories.user_agent_list",
"line_number": 12,
"usage_type": "argument"
},
{
"api_name": "urllib.request.urlopen",
"line_number": 20,
"usage_type": "call"
},
{
"api_name... |
30503083715 | #!/usr/bin/env python
# _*_ coding: utf-8 _*_
import os
import shutil
import argparse
"""
for i in `find . -maxdepth 1 | awk -F '/' '{ print $2 }' | grep -v "\ "`; do
echo "-----------------------------------------$i---------------------------------";
python gen_dao.py --dir=$i ; done
可自动生成dao文件,并移动到public目录下[cdb_mtnc/public/db_access/xxx]:
1.通过sql文件生成model及mappers,并生成dao文件 【较少用】
python gen_dao.py --dir=test --sql=yes
2.根据models及mappers生成dao文件 【常用】
python gen_dao.py --dir=test
"""
step = 0
def red_print(msg):
global step
step += 1
print("\033[0;33;40m\t" + str(step) + msg + "\033[0m")
def yellow_print(msg):
print("\033[0;31;40m\tError: " + msg + "\033[0m")
def copy_files(src_dir, dest_dir):
for file in os.listdir(src_dir):
if file == "daos.h":
continue
src_file = os.path.join(src_dir, file)
dest_file = os.path.join(dest_dir, file)
if os.path.isfile(src_file):
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
if not os.path.exists(dest_file) or(os.path.exists(dest_file) and (os.path.getsize(dest_file) != os.path.getsize(src_file))):
open(dest_file, "wb").write(open(src_file, "rb").read())
if os.path.isdir(src_file):
copyFiles(src_file, dest_file)
def gen_dao():
path = os.getcwd() + "/" + args.dir
os.chdir(path)
flag = False
if not os.path.exists('cowboy'):
red_print(".create cowboy link.")
os.symlink("../../cowboy", "cowboy")
if args.sql:
red_print(".update models and mappers by sql ")
flag = os.system("./cowboy -u -o -x -m")
else:
red_print(".gen dao.cpp from mappers and models ")
flag = os.system("./cowboy -o -x -m")
if not flag:
red_print(".gen dao files done")
move_dao_files()
else:
yellow_print("gen dao files err ")
exit(-1)
def move_dao_files():
cur_path = os.getcwd()
#yellow_print(path)
path = cur_path.split("tools")[0]
#yellow_print(path)
global dest_dir
dest_dir = path + "public/db_access/" + args.dir
red_print(".move dao files to: " + dest_dir )
os.popen("rm -rf " + dest_dir)
os.system("mkdir " + dest_dir)
copy_files("dao", dest_dir)
copy_files("model", dest_dir)
copy_files("mapper", dest_dir)
def parse_options():
global args
info = """1.python gen_dao.py --dir=xxxx --sql=yes
2.python gen_dao.py --dir=xxxx """
parser = argparse.ArgumentParser(usage=info, description="")
parser.add_argument("--dir", required=True, help="\tgen dao.cpp from which dir.", dest="dir")
parser.add_argument("-sql", "--sql", default=False, dest="sql", help="\tupdate models and mappers by sql.")
args = parser.parse_args()
if not os.path.exists(args.dir):
yellow_print("dir " + args.dir + " not exits!")
exit()
if __name__ == '__main__':
parse_options()
gen_dao()
red_print(".all done ")
print("new files:__________________________")
os.system("ls -l --full-time " + dest_dir)
| feng1o/python_1 | tx_add/gen_dao.py | gen_dao.py | py | 3,263 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "os.listdir",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number"... |
42242964730 | import numpy as np
import bead_util as bu
import matplotlib.pyplot as plt
import os
import scipy.signal as sig
import scipy
import glob
from scipy.optimize import curve_fit
import cant_util as cu
data_dir1 = "/data/20170831/image_calibration2/align_profs"
data_dir2 = "/data/20170831/image_calibration2/align_profs"
out_dir = "/calibrations/image_alignments"
date = '20170831'
def get_stage_column(attribs, stage_cols = [17, 18, 19], attrib_inds = [3, 6, 9], ortho_columns = [18, 17, 19]):
'''gets the first driven stage axis from data attribs'''
stage_settings = attribs['stage_settings']
driven = np.array(list(map(bool, stage_settings[attrib_inds])))
return (np.array(stage_cols)[driven])[0], (np.array(ortho_columns)[driven])[0]
def gauss_beam(r, mu, w, A):
'''gaussian beam function for fitting'''
return A*np.exp(-2.*(r-mu)**2/w**2)
def line(x, m, b):
'''line function for fitting'''
return m*x + b
def line_intersection(popt0, popt1):
'''the intersection of 2 lines where y=mx+b and popt = [m, b]'''
x_int = (popt1[1]-popt0[1])/(popt0[0]-popt1[0])
return x_int, line(x_int, *popt0)
def profile(fname, ends = 100, stage_cal = 8., data_column = 5, make_plot = False, p0 = [30, 30, .001], ortho_column = [18, 17, 19]):
'''takes raw data makes profile and fits to gaussian to determine beam center. returns beam center and position on orthogonal beam axis'''
dat, attribs, f = bu.getdata(fname)
dat = dat[ends:-ends, :]
stage_column, ortho_column = get_stage_column(attribs)
dat[:,stage_column]*=stage_cal
dat[:, ortho_column]*=stage_cal
f.close()
bp, yp, ep = cu.sbin_pn(dat[:, stage_column], dat[:, data_column], bin_size = .1, vel_mult = 1.)
bn, yn, en = cu.sbin_pn(dat[:, stage_column], dat[:, data_column], bin_size = .1, vel_mult = -1.)
profp = np.abs(np.gradient(yp, bp))
profn = np.abs(np.gradient(yn, bn))
poptp, pcovp = curve_fit(gauss_beam, bp[10:-10], profp[10:-10], p0 = p0)
poptn, pcovn = curve_fit(gauss_beam, bn[10:-10], profn[10:-10], p0 = p0)
if make_plot:
plt.semilogy(bp, profp, 'o')
plt.semilogy(bp, gauss_beam(bp, *poptp), 'r')
plt.semilogy(bn, profn, 'o')
plt.semilogy(bn, gauss_beam(bn, *poptn), 'k')
plt.show()
return np.mean([poptn[0], poptp[0]]), np.mean(dat[:, ortho_column])
def find_edge(xsweep_dir, ysweep_dir, over_plot = 10.):
xfs = glob.glob(xsweep_dir + '/*.h5')
yfs = glob.glob(ysweep_dir + '/*.h5')
xdata = np.array(list(map(profile, xfs)))
ydata = np.array(list(map(profile, yfs)))
plt.plot(xdata[:, 0], xdata[:, 1], 'x')
plt.plot(ydata[:, 1], ydata[:, 0], 'x')
p0x = [xdata[-1, 0]-xdata[0, 0]/(xdata[-1, 1]-xdata[0, 1]), 0]
p0y = [ydata[-1, 0]-ydata[0, 0]/(ydata[-1, 1]-ydata[0, 1]), 0]
poptx, pcovx = curve_fit(line, xdata[:, 0], xdata[:, 1], p0 = p0x)
popty, pcovy = curve_fit(line, ydata[:, 1], ydata[:, 0], p0 = p0y)
xplt = np.linspace(np.min(xdata[:, 0])-over_plot, np.max(xdata[:, 0])+over_plot, 1000)
yplt = np.linspace(np.min(ydata[:, 1])-over_plot, np.max(ydata[:, 1])+over_plot, 1000)
plt.plot(xplt, line(xplt, *poptx))
plt.plot(yplt, line(yplt, *popty))
xint, yint = line_intersection(poptx, popty)
plt.plot([xint], [yint], 'o')
plt.show()
return np.array([xint, yint])
def save_cal(p_arr, path, date):
#Makes path if it does not exist and saves parr to path/stage_position.npy
if not os.path.exists(path):
os.makedirs(path)
outfile = os.path.join(path, 'stage_position_' + date)
np.save(outfile, p_arr)
p_arr = find_edge(data_dir1, data_dir2)
save_cal(p_arr, out_dir, date)
| charlesblakemore/opt_lev_analysis | scripts/camera_analysis/align_image.py | align_image.py | py | 3,689 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "numpy.array",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "bead_util.getdata",
"line_numbe... |
14983002282 | #!/usr/bin/python
# encoding=utf8
from flask import Flask, render_template, request, flash, redirect, session, abort
from tinydb import TinyDB,Query
import os
import json
import cPickle as cp
import sys
reload(sys)
sys.setdefaultencoding('utf8')
app = Flask(__name__)
app.secret_key = os.urandom(12)
usersAnnot={"admin":"pwd"}
userAdmin={}
db=""
init = 0
tI = 0
userN = ""
def loadPickle(nameF):
#a = json.load(open(nameF,"r"))
#return a["data"]
return cp.load(open(nameF,"rb"))
rand150 = loadPickle("rand5.cp")
quotes = loadPickle("quotes.cp")
@app.route('/')
def home(user = ""):
global db, init, tI, userN
complete = 0
msg = []
if user:
if not init:
db = TinyDB(user+".json")
init = 1
done = []
for i in db:
done.append(i["id"])
toDo = list(set(rand150).difference(done))
if not toDo:
complete = 1
else:
tI = toDo[0]
msg = quotes[tI]
return render_template("index.html", user = user, complete = complete, msg = msg, lenMsg = len(msg))
@app.route('/', methods=['POST'])
def doStuff():
global db,tI
tP = {}
ky = request.form.keys()
#print ky
for i in ky:
if not "other" in i:
if not request.form[i]:
tP[i] = request.form['other'+str(i)]
else:
tP[i] = request.form[i]
db.insert({"id":tI,"em":tP})
return home(user = userN)
@app.route('/login', methods=['POST'])
def do_admin_login():
global userN
userN = request.form['username']
password = request.form['password']
if userN in usersAnnot:
if usersAnnot[userN] == password:
session['logged_in'] = True
else:
userN = ""
flash("Wrong Password Entered.!")
else:
flash('User Not Authorized.!')
userN = ""
return home(user = userN)
@app.route('/login')
def something():
return home()
@app.route("/logout")
def logout():
global userN, db, init, tI
session['logged_in'] = False
if db:
db.close()
db = ""
init = 0
tI = 0
userN = ""
return home(user = "")
@app.errorhandler(404)
def page_not_found(e):
return home()
if __name__ == "__main__":
app.secret_key = os.urandom(12)
app.run() | ankitvad/AnnotationSite | hello.py | hello.py | py | 2,037 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.setdefaultencoding",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "flask.Flask",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.urandom",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "cPickle.load",
"lin... |
72663792744 | # mypy: ignore-errors
import streamlit as st
from bokeh.models import CustomJS
from bokeh.models.widgets import Button
from streamlit_bokeh_events import streamlit_bokeh_events
REC_GIF = "ai_talks/assets/icons/rec_on.gif"
def get_js_code(lang: str) -> str:
return """
var value = "";
var rand = 0;
var recognition = new webkitSpeechRecognition();
recognition.continuous = false;
recognition.interimResults = true;
""" + f"recognition.lang = '{lang}';" + """
document.dispatchEvent(new CustomEvent("GET_ONREC", {detail: 'start'}));
recognition.onspeechstart = function () {
document.dispatchEvent(new CustomEvent("GET_ONREC", {detail: 'running'}));
}
recognition.onsoundend = function () {
document.dispatchEvent(new CustomEvent("GET_ONREC", {detail: 'stop'}));
}
recognition.onresult = function (e) {
var value2 = "";
for (var i = e.resultIndex; i < e.results.length; ++i) {
if (e.results[i].isFinal) {
value += e.results[i][0].transcript;
rand = Math.random();
} else {
value2 += e.results[i][0].transcript;
}
}
document.dispatchEvent(new CustomEvent("GET_TEXT", {detail: {t:value, s:rand}}));
document.dispatchEvent(new CustomEvent("GET_INTRM", {detail: value2}));
}
recognition.onerror = function(e) {
document.dispatchEvent(new CustomEvent("GET_ONREC", {detail: 'stop'}));
}
recognition.start();
"""
def show_speak_btn() -> Button:
stt_button = Button(label=st.session_state.locale.speak_btn, button_type="success", width=100)
stt_button.js_on_event("button_click", CustomJS(code=get_js_code(st.session_state.locale.lang_code)))
return stt_button
def get_bokeh_result() -> dict:
stt_button = show_speak_btn()
return streamlit_bokeh_events(
bokeh_plot=stt_button,
events="GET_TEXT,GET_ONREC,GET_INTRM",
key="listen",
refresh_on_update=False,
override_height=75,
debounce_time=0,
)
def show_voice_input() -> None:
if "input" not in st.session_state:
st.session_state.input = {"text": "", "session": 0}
result = get_bokeh_result()
tr = st.empty()
tr.code(st.session_state.input["text"])
if result:
if "GET_TEXT" in result and (
result.get("GET_TEXT")["t"] != "" and result.get("GET_TEXT")["s"] != st.session_state.input["session"]):
st.session_state.input["text"] = result.get("GET_TEXT")["t"] # type: ignore
tr.code(st.session_state.input["text"])
st.session_state.input["session"] = result.get("GET_TEXT")["s"]
if "GET_INTRM" in result and result.get("GET_INTRM") != "":
tr.code(st.session_state.input["text"] + " " + result.get("GET_INTRM"))
if "GET_ONREC" in result:
if result.get("GET_ONREC") == "start":
st.image(REC_GIF)
st.session_state.input["text"] = ""
elif result.get("GET_ONREC") == "running":
st.image(REC_GIF)
elif result.get("GET_ONREC") == "stop" and st.session_state.input["text"] != "":
st.session_state.user_text = st.session_state.input["text"]
st.session_state.input["text"] = ""
| dKosarevsky/AI-Talks | ai_talks/src/utils/stt.py | stt.py | py | 3,457 | python | en | code | 243 | github-code | 36 | [
{
"api_name": "bokeh.models.widgets.Button",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "streamlit.session_state",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "bokeh.models.CustomJS",
"line_number": 51,
"usage_type": "call"
},
{
"ap... |
41237480224 | from fastapi import FastAPI, APIRouter, HTTPException, status
from pydantic import BaseModel,json
from api.settings import base_url
import pandas as pd
import requests
import json
from typing import List, Optional
from routers.users import user_login
import datetime
vehicle_router = APIRouter(tags=["Vehicle"])
@vehicle_router.get("/labels/{labelId}")
def get_color(labelId):
url = f'{base_url}/dev/index.php/v1/labels/{labelId}'
access_token = user_login()
headers = {
"Authorization": f"Bearer {access_token}",
"Content-Type": "application/json"}
response = requests.request("GET",url, headers=headers)
return response.json()
@vehicle_router.get("/filter/hu")
def filter():
with open("response.json","r",encoding="utf-8") as file:
data = json.load(file)
filtered_data = [x for x in data if x["hu"] != None]
with open("filter_hu.json", "w") as write_file:
json.dump(filtered_data, write_file, indent=4)
return filtered_data
@vehicle_router.get("/hu")
def check_hu(datas):
strToday = datetime.datetime.today().strftime('%Y-%m-%d')
dateToday = datetime.datetime.strptime(strToday, '%Y-%m-%d')
for data in datas:
if data["hu"] and data["colored"]:
hu = datetime.datetime.strptime(data["hu"], "%Y-%m-%d")
result = (dateToday.year - hu.year) * 12 + (dateToday.month - hu.month)
if result < 3:
data["colorCode"] = "#007500"
elif result < 12:
data["colorCode"] = "#FFA500"
else:
data["colorCode"] = "#b30000"
return datas
@vehicle_router.post("/upload")
def upload(kys:List[str],url, colored:Optional[bool] = True):
csvDataFrame = pd.read_csv(url, encoding='utf8',sep=';',header=None,names=kys)
df = pd.DataFrame(csvDataFrame)
df["colored"] = colored
datas = df.to_json(orient="table",indent=4)
check_hu(datas)
current_date = datetime.datetime.now().isoformat('-',"hours")
with pd.ExcelWriter(f"vehicles_{current_date}.xlsx") as writer:
datas.to_excel(writer)
dataframe = pd.read_excel(f"vehicles_{current_date}.xlsx")
dataframe.to_json('vehicle.json', index=False, orient="table", indent=4)
new_data = datas["data"]
return new_data
@vehicle_router.post("/vehicle")
def post_data():
url = f"{base_url}/dev/index.php/v1/vehicles/select/active"
access_token = user_login()
headers = {
"Authorization": f"Bearer {access_token}",
"Content-Type": "application/json"}
response = requests.request("GET",url,headers= headers)
data = json.load(open('vehicle.json'))
new_data = data["data"]
with open("sorted_vehicle.json", "w", encoding="utf-8") as file1:
json.dump(sorted(new_data, key=lambda x: (x["gruppe"] is None, x["gruppe"])), file1 ,indent=4)
with open("response.json", "w",encoding="utf-8") as file:
json.dump(response.json(), file, indent=4)
return response.json()
@vehicle_router.get("/search")
def search_field(key):
search_data = []
with open("reesponse.json","r",encoding="utf-8") as file:
data = json.load(file)
s_data = {}
for src in data:
if src[key]:
s_data[key] = src[key]
s_data["kurzname"] = src["kurzname"]
s_data["info"] = src["info"]
search_data.append(s_data)
else:
return HTTPException(status_code=status.HTTP_404_NOT_FOUND,detail="Data does not exist.")
return search_data
@vehicle_router.post("/merge")
def merge_data(url1, url2)->dict:
csvData1 = pd.read_csv(url1, encoding='utf8',sep=';',error_bad_lines=False)
csvData2 = pd.read_csv(url2, encoding='utf8',sep=';',error_bad_lines=False)
df1 = pd.DataFrame(csvData1)
df2 = pd.DataFrame(csvData2)
merge_data = pd.concat([df1,df2]).drop_duplicates().reset_index(drop=True)
merge_data.to_json("merged_data.json", indent=4)
return merge_data
| deryacortuk/FastAPI-Pandas | routers/vehicles.py | vehicles.py | py | 4,489 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "fastapi.APIRouter",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "api.settings.base_url",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "routers.users.user_login",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "requ... |
1098849667 | import logging
import traceback
import psycopg2
from django.db import IntegrityError
from apps.fyle_expense.models import Expense, ExpenseGroup
from apps.task_log.exceptions import MissingMappingsError
from apps.task_log.models import TaskLog
from apps.xero_workspace.models import EmployeeMapping, CategoryMapping, ProjectMapping, Invoice, InvoiceLineItem, \
FyleCredential, XeroCredential
from apps.xero_workspace.utils import connect_to_fyle, connect_to_xero
from fyle_jobs import FyleJobsSDK
from fyle_xero_integration_web_app import settings
LOGGER = logging.getLogger(__name__)
def schedule_expense_group_creation(workspace_id, user):
"""
Schedule Expense Group creation
:param workspace_id:
:param user:
:return:
"""
task_log = TaskLog.objects.create(
workspace_id=workspace_id,
type="FETCHING EXPENSES",
status="IN_PROGRESS"
)
try:
fyle_sdk_connection = connect_to_fyle(workspace_id)
jobs = FyleJobsSDK(settings.FYLE_JOBS_URL, fyle_sdk_connection)
created_job = jobs.trigger_now(
callback_url='{0}{1}'.format(
settings.API_BASE_URL,
'/workspace_jobs/{0}/expense_group/trigger/'.format(
workspace_id
)
),
callback_method='POST',
object_id=task_log.id,
payload={
'task_log_id': task_log.id
},
job_description=f'Fetch expenses: Workspace id - {workspace_id}, user - {user}'
)
task_log.task_id = created_job['id']
task_log.save()
except FyleCredential.DoesNotExist:
LOGGER.error('Error: Fyle Credentials not found for this workspace.')
task_log.detail = {
'error': 'Please connect your Source (Fyle) Account'
}
task_log.status = 'FYLE CONNECTION ERROR'
task_log.save()
def schedule_invoice_creation(workspace_id, expense_group_ids, user):
"""
Schedule Invoice creation
:param workspace_id:
:param expense_group_ids:
:param user:
:return:
"""
expense_groups = ExpenseGroup.objects.filter(
workspace_id=workspace_id, id__in=expense_group_ids).all()
fyle_sdk_connection = connect_to_fyle(workspace_id)
jobs = FyleJobsSDK(settings.FYLE_JOBS_URL, fyle_sdk_connection)
for expense_group in expense_groups:
task_log = TaskLog.objects.create(
workspace_id=expense_group.workspace.id,
expense_group=expense_group,
type='CREATING INVOICE',
status='IN_PROGRESS'
)
created_job = jobs.trigger_now(
callback_url='{0}{1}'.format(
settings.API_BASE_URL,
'/workspace_jobs/{0}/expense_group/{1}/invoice/trigger/'.format(
workspace_id,
expense_group.id
)
),
callback_method='POST',
object_id=task_log.id,
payload={
'task_log_id': task_log.id
},
job_description=f'Create invoice: Workspace id - {workspace_id}, \
user - {user}, expense group id - {expense_group.id}'
)
task_log.task_id = created_job['id']
task_log.save()
def fetch_expenses_and_create_groups(workspace_id, task_log, user):
"""
Fetch expenses and create expense groups
:param workspace_id
:param task_log
:param user
"""
expense_group_ids = []
try:
updated_at = None
task_logs = TaskLog.objects.filter(workspace__id=workspace_id, type='FETCHING EXPENSES',
status='COMPLETE')
if task_logs:
updated_at = task_logs.latest().created_at
expenses = Expense.fetch_paid_expenses(workspace_id, updated_at)
expense_objects = Expense.create_expense_objects(expenses)
connection = connect_to_fyle(workspace_id)
expense_groups = ExpenseGroup.group_expense_by_report_id(expense_objects, workspace_id, connection)
expense_group_objects = ExpenseGroup.create_expense_groups(expense_groups)
for expense_group in expense_group_objects:
expense_group_ids.append(expense_group.id)
task_log.status = 'COMPLETE'
task_log.detail = 'Expense groups created successfully!'
task_log.save()
schedule_invoice_creation(workspace_id, expense_group_ids, user)
except FyleCredential.DoesNotExist:
LOGGER.error('Error: Fyle Credentials not found for this workspace.')
task_log.detail = {
'error': 'Please connect your Source (Fyle) Account'
}
task_log.status = 'FYLE CONNECTION ERROR'
task_log.save()
except Exception:
error = traceback.format_exc()
LOGGER.exception(f'Error: Workspace id - {workspace_id}\n{error}')
task_log.detail = {
'error': 'Please contact system administrator.'
}
task_log.status = 'FATAL'
task_log.save()
return expense_group_ids
def check_mappings(expense_group):
mappings_error = ""
employee_email = expense_group.description.get("employee_email")
if not EmployeeMapping.objects.filter(workspace=expense_group.workspace,
employee_email=employee_email).exists():
mappings_error += f"Employee mapping missing for employee_email: {employee_email} \n"
try:
EmployeeMapping.objects.create(workspace=expense_group.workspace,
employee_email=employee_email, invalid=True)
except (psycopg2.errors.UniqueViolation, IntegrityError):
pass
for expense in expense_group.expenses.all():
if not CategoryMapping.objects.filter(workspace=expense_group.workspace,
category=expense.category).exists():
mappings_error += f"Category mapping missing for category name: {expense.category} \n"
try:
CategoryMapping.objects.create(workspace=expense_group.workspace, category=expense.category,
sub_category=expense.sub_category,
invalid=True)
except (psycopg2.errors.UniqueViolation, IntegrityError):
pass
if expense.project is not None:
if not ProjectMapping.objects.filter(workspace=expense_group.workspace,
project_name=expense.project).exists():
mappings_error += f"Project mapping missing for project_name: {expense.project}"
try:
ProjectMapping.objects.create(workspace=expense_group.workspace,
project_name=expense.project, invalid=True)
except (psycopg2.errors.UniqueViolation, IntegrityError):
pass
if mappings_error:
raise MissingMappingsError(message=mappings_error)
def create_invoice_and_post_to_xero(expense_group, task_log):
"""
Creates an Xero Invoice
:param expense_group:
:param task_log:
:return:
"""
try:
check_mappings(expense_group)
invoice_id = Invoice.create_invoice(expense_group)
InvoiceLineItem.create_invoice_line_item(invoice_id, expense_group)
xero_sdk_connection = connect_to_xero(expense_group.workspace.id)
invoice_obj = Invoice.objects.get(id=invoice_id)
invoice_data = generate_invoice_request_data(invoice_obj)
response = post_invoice(invoice_data, xero_sdk_connection)
for invoice in response["Invoices"]:
invoice_obj.invoice_id = invoice["InvoiceID"]
invoice_obj.save()
expense_group.status = 'Complete'
expense_group.save()
task_log.invoice = invoice_obj
task_log.detail = 'Invoice created successfully!'
task_log.status = 'COMPLETE'
task_log.save()
except XeroCredential.DoesNotExist:
LOGGER.error('Error: Xero Credentials not found for this workspace.')
expense_group.status = 'Failed'
expense_group.save()
task_log.detail = {
'error': 'Please connect your Destination (Xero) Account'
}
task_log.status = 'XERO CONNECTION ERROR'
task_log.save()
except MissingMappingsError as error:
LOGGER.error(f'Error: {error.message}')
expense_group.status = 'Failed'
expense_group.save()
task_log.detail = {
'error': error.message
}
task_log.status = 'MISSING MAPPINGS'
task_log.save()
except Exception:
error = traceback.format_exc()
LOGGER.exception(f'Error: Workspace id - {task_log.workspace.id}\n{error}')
expense_group.status = 'Failed'
expense_group.save()
task_log.detail = {
'error': 'Please contact system administrator.'
}
task_log.status = 'FATAL'
task_log.save()
def generate_invoice_request_data(invoice):
"""
Generate invoice request data as defined by Xero
:param invoice
:return: request_data
"""
request_data = {
"Type": "ACCPAY",
"Contact": {
"Name": invoice.contact_name,
},
"DateString": str(invoice.date),
"InvoiceNumber": invoice.invoice_number,
"LineAmountTypes": "Exclusive",
"LineItems": []
}
for line_item in invoice.invoice_line_items.all():
request_data["LineItems"].append({
"Description": line_item.description,
"Quantity": "1",
"UnitAmount": str(line_item.amount),
"AccountCode": line_item.account_code,
"Tracking": [{
"Name": line_item.tracking_category_name,
"Option": line_item.tracking_category_option,
}]
})
return request_data
def post_invoice(data, xero):
""" Makes an API call to create invoices in Xero
:param data: Request data for the invoice API
:param xero: Xero connection object
:return response: response data from Xero API
"""
response = xero.invoices.post(data)
return response
| akshay-codemonk/fyle-xero | apps/task_log/tasks.py | tasks.py | py | 10,343 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "apps.task_log.models.TaskLog.objects.create",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "apps.task_log.models.TaskLog.objects",
"line_number": 27,
"usage_type": "at... |
35366813632 | # Image Credits
# Bullet and Spaceship sprite: https://q.utoronto.ca/courses/288975/files/24417060?module_item_id=4444455
# Dinosaur sprite: https://arks.itch.io/dino-characters
# Block sprite: https://replit.com/talk/ask/Pygame-Sprite-Graphics/38044
# Gem, Box, Half platform: https://opengameart.org/content/platformer-art-deluxe
# imports
import pygame
import numpy
import spritesheet
import random
from pygame.locals import *
pygame.init()
#width and height for screen
width = 1500
height = 400
screen = pygame.display.set_mode((width, height))
bullets = pygame.sprite.Group()
# colour constants
BLACK = (0, 0, 0)
clear = (0, 0, 0, 0)
class Sprite(pygame.sprite.Sprite):
def __init__(self, image, startx, starty):
super().__init__()
self.image = pygame.image.load(image)
self.rect = self.image.get_rect()
self.rect.center = [startx, starty]
def update(self):
pass
def draw(self, screen):
screen.blit(self.image, self.rect)
class Player(Sprite):
change_y = 0
def __init__(self):
pygame.sprite.Sprite.__init__(self)
# loading images
sprite_sheet_image = pygame.image.load('dino.png').convert_alpha()
sprite_sheet = spritesheet.SpriteSheet(sprite_sheet_image)
self.dinos = []
self.dinosteps = [4, 6, 3, 4]
self.action = 0
self.t = pygame.time.get_ticks()
self.cooldown = 100
self.frame = 1
self.count = 0
self.direction = True
self.bg = True
self.bullets = 0
#set up the background image
self.background = pygame.image.load('background.png')
self.background = pygame.transform.scale(self.background,(width,height))
# adding the frames of the player sprite to the dinos list
for x in self.dinosteps:
temp = []
for i in range(x):
temp.append(sprite_sheet.get_image(self.count, 24, 24, 3, BLACK))
self.count += 1
self.dinos.append(temp)
# setting the initial player display
self.image = self.dinos[0][0]
self.rect = self.image.get_rect()
self.rect.y = 330
def walk_animation(self):
# updating the player's walking frames
curr = pygame.time.get_ticks()
if curr - self.t >= self.cooldown:
self.frame += 1
self.t = curr
if self.frame >= len(self.dinos):
self.frame = 0
# switching images based on direction
if self.direction:
self.image = self.dinos[self.action][self.frame]
else:
self.image = pygame.transform.flip(self.dinos[self.action][self.frame], True, False)
def jump(self):
self.change_y = -10
# citation: https://q.utoronto.ca/courses/288975/files/24582167?module_item_id=4467158
def calc_grav(self):
if self.change_y == 0:
self.change_y = 1
else:
self.change_y += .35
# See if we are on the ground
if self.rect.y >= height - self.rect.height and self.change_y >= 0:
self.change_y = 0
self.rect.y = height - self.rect.height
def check_collision(self, boxes):
block_hit_list = pygame.sprite.spritecollide(self, boxes, False)
for block in block_hit_list:
if self.direction:
self.rect.right = block.rect.left
elif not self.direction:
# Otherwise if we are moving left, do the opposite
self.rect.left = block.rect.right
def check_under(self, boxes):
block_hit_list = pygame.sprite.spritecollide(self, boxes, False)
for block in block_hit_list:
# Reset our position based on the top/bottom of the object
if self.change_y > 0:
self.rect.bottom = block.rect.top
elif self.change_y < 0:
self.rect.top = block.rect.bottom
self.change_y = 0
def update(self, boxes):
self.calc_grav()
if self.change_y > 0:
self.check_under(boxes)
# moving the player in the direction they press
key = pygame.key.get_pressed()
if key[pygame.K_LEFT]:
self.rect.x -= 5
self.action = 1
self.direction = False
self.walk_animation()
self.check_collision(boxes)
elif key[pygame.K_RIGHT]:
self.rect.x += 5
self.action = 1
self.direction = True
self.walk_animation()
self.check_collision(boxes)
else:
self.action = 0
self.walk_animation()
self.rect.y += self.change_y
# change background and increasing bullets once the player crosses the end
if self.rect.x > 1400:
if self.bg:
self.bg = False
self.background = pygame.image.load('background_01.png')
self.background = pygame.transform.scale(self.background,(width,height))
self.rect.x = 0
self.bullets += 2
else:
self.bg = True
self.background = pygame.image.load('background.png')
self.background = pygame.transform.scale(self.background,(width,height))
self.rect.x = 0
self.bullets += 2
class Enemy(Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
# loading images
player_img = pygame.image.load("enemy.png").convert_alpha()
self.image = pygame.transform.scale(player_img, (100, 100))
self.image.set_colorkey(BLACK)
self.rect = self.image.get_rect()
self.radius = 20
self.rect.x = 1400
self.rect.y = 100
self.speedy = 3
def update(self, player):
# moving the enemy from the bottom to the top of the screen
self.rect.y += self.speedy
if self.rect.y >= 350 or self.rect.y < 50:
self.speedy = -self.speedy
self.shoot(player)
bullets.update()
def shoot(self, player):
# creating more bullets based on how many times the player crossed the screen
while player.bullets >= len(bullets):
b = Bullet(self.rect.x, random.randint(self.rect.top, self.rect.bottom))
bullets.add(b)
class Bullet(Sprite):
def __init__(self, x, y):
pygame.sprite.Sprite.__init__(self)
# loading images and setting start position
self.image = pygame.image.load("laser.png").convert_alpha()
self.rect = self.image.get_rect()
self.rect.y = y
self.rect.x = x
def update(self):
# moving the bullet towards the player, killing it if it goes off screen
self.rect.x -= 3
if self.rect.x < 0:
self.kill()
class Gem(Sprite):
def __init__(self, startx, starty):
super().__init__("gemBlue.png", startx, starty)
class Ledge (Sprite):
def __init__(self, startx, starty):
super().__init__("grassHalf.png", startx, starty)
class Lava (Sprite):
def __init__(self, startx, starty):
super().__init__("liquidLavaTop_mid.png", startx, starty)
class Platform(Sprite):
def __init__(self, startx, starty):
super().__init__("boxAlt.png", startx, starty)
class MovablePlatform(Platform):
def __init__(self, startx, starty, start, end, speed):
super().__init__(startx, starty)
self.start = start
self.end = end
self.speed = speed
self.direction = numpy.sign(end - start)
def update(self):
self.rect.x += self.speed * self.direction
if self.rect.x <= self.start:
self.direction = numpy.sign(self.end - self.start)
elif self.rect.x >= self.end:
self.direction = numpy.sign(self.start - self.end)
def main():
pygame.init()
screen = pygame.display.set_mode((width,height))
clock = pygame.time.Clock()
#all sprites will be added here
player = Player()
players = pygame.sprite.Group()
players.add(player)
enemies = pygame.sprite.Group()
enemy = Enemy()
enemies.add(enemy)
platforms = pygame.sprite.Group()
dangerZone = pygame.sprite.Group()
gems = pygame.sprite.Group()
#platform coordinates
platforms.add(Platform(225, 365))
platforms.add(Platform(295, 365))
platforms.add(Platform(365, 365))
platforms.add(Platform(365, 295))
platforms.add(Ledge(580, 170))
platforms.add(Platform(755,295))
#Left wall border
platforms.add(Platform(-50, 365))
platforms.add(Platform(-50, 295))
platforms.add(Platform(-50, 225))
platforms.add(Platform(-50, 155))
platforms.add(Platform(-50, 85))
platforms.add(Platform(-50, 15))
#Right wall border
platforms.add(Platform(1535,0))
platforms.add(Platform(1535,70))
platforms.add(Platform(1535,140))
platforms.add(Platform(1535,210))
platforms.add(Platform(1535,280))
platforms.add(Platform(1535,350))
platforms.add(Platform(1535,420))
platforms.add(Platform(755,365))
platforms.add(MovablePlatform(485, 295, 400, 650, 1))
#add danger zones
dangerZone.add(Lava(435, 365))
dangerZone.add(Lava(505, 365))
dangerZone.add(Lava(575, 365))
dangerZone.add(Lava(645, 365))
dangerZone.add(Lava(715, 365))
#add gem placement
gems.add(Gem(585, 115))
#Exits game
done = True
while done is True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_UP:
player.jump()
pygame.event.pump()
# Draw loop
screen.fill((0,0,0))
screen.blit(player.background,(0,-1))
for gem in gems:
gem.draw(screen)
for i in range(len(gems)):
if player.rect.colliderect(gem.rect):
gem.image.fill(clear)
for lava in dangerZone:
dangerZone.draw(screen)
for i in range(len(dangerZone)):
if player.rect.colliderect(lava.rect):
done = False
for enemy in enemies:
enemy.draw(screen)
for i in range(len(enemies)):
if player.rect.colliderect(enemy.rect):
done = False
for b in bullets:
b.draw(screen)
for i in range(len(bullets)):
if player.rect.colliderect(b.rect):
done = False
platforms.draw(screen)
player.draw(screen)
player.update(platforms)
pygame.display.flip()
platforms.update()
dangerZone.update()
gems.update()
enemies.update(player)
clock.tick(60)
pygame.quit()
if __name__ == "__main__":
main()
| mashalll/cct211 | main.py | main.py | py | 10,968 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pygame.init",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "pygame.sprite... |
16182812319 | import boto3
class S3:
def __init__(self, aws_access_key_id, aws_secret_access_key):
self.s3_client = boto3.client(
's3',
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key)
self.s3 = boto3.resource(
's3',
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key)
def upload_file(self, file_name, bucket):
"""
Function to upload a file to an S3 bucket
"""
object_name = file_name.split('/')[-1]
response = self.s3_client.upload_file(file_name, bucket, object_name)
return response
def download_file(self, file_name, bucket):
"""
Function to download a given file from an S3 bucket
"""
output = f"downloads/{file_name}"
self.s3.Bucket(bucket).download_file(file_name, output)
return output
def delete_file(self, file_name, bucket):
'''
Function to delete a file from an S3 bucket
'''
self.s3.Object(bucket, file_name).delete()
def list_files(self, bucket):
"""
Function to list files in a given S3 bucket
"""
contents = []
try:
for item in self.s3_client.list_objects(Bucket=bucket)['Contents']:
contents.append(item)
except KeyError:
print("No contents available")
contents.append("No items in the bucket... Add some...")
return contents
| satishvis/s3test | s3_demo.py | s3_demo.py | py | 1,544 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "boto3.client",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "boto3.resource",
"line_number": 10,
"usage_type": "call"
}
] |
25717509151 | from typing import Type, TypeVar, MutableMapping, Any, Iterable, Generator
from datapipelines import (
DataSource,
PipelineContext,
Query,
NotFoundError,
validate_query,
)
from .common import KernelSource, APINotFoundError
from ...data import Platform
from ...dto.thirdpartycode import VerificationStringDto
from ..uniquekeys import convert_region_to_platform
T = TypeVar("T")
def _get_default_locale(
query: MutableMapping[str, Any], context: PipelineContext
) -> str:
return query["platform"].default_locale
class ThirdPartyCodeAPI(KernelSource):
@DataSource.dispatch
def get(
self,
type: Type[T],
query: MutableMapping[str, Any],
context: PipelineContext = None,
) -> T:
pass
@DataSource.dispatch
def get_many(
self,
type: Type[T],
query: MutableMapping[str, Any],
context: PipelineContext = None,
) -> Iterable[T]:
pass
#######################
# Verification String #
#######################
_validate_get_verification_string_query = (
Query.has("platform").as_(Platform).also.has("summoner.id").as_(str)
)
@get.register(VerificationStringDto)
@validate_query(_validate_get_verification_string_query, convert_region_to_platform)
def get_verification_string(
self, query: MutableMapping[str, Any], context: PipelineContext = None
) -> VerificationStringDto:
parameters = {"platform": query["platform"].value}
endpoint = "lol/platform/v4/third-party-code/by-summoner/{summonerId}".format(
summonerId=query["summoner.id"]
)
try:
data = self._get(endpoint=endpoint, parameters=parameters)
except (ValueError, APINotFoundError) as error:
raise NotFoundError(str(error)) from error
data = {"string": data}
data["region"] = query["platform"].region.value
data["summonerId"] = query["summoner.id"]
return VerificationStringDto(data)
_validate_get_many_verification_string_query = (
Query.has("platforms").as_(Iterable).also.has("summoner.ids").as_(Iterable)
)
@get_many.register(VerificationStringDto)
@validate_query(
_validate_get_many_verification_string_query, convert_region_to_platform
)
def get_many_verification_string(
self, query: MutableMapping[str, Any], context: PipelineContext = None
) -> Generator[VerificationStringDto, None, None]:
def generator():
parameters = {"platform": query["platform"].value}
for platform, summoner_id in zip(query["platforms"], query["summoner.ids"]):
platform = Platform(platform.upper())
endpoint = (
"lol/platform/v4/third-party-code/by-summoner/{summonerId}".format(
summonerId=summoner_id
)
)
try:
data = self._get(endpoint=endpoint, parameters=parameters)
except APINotFoundError as error:
raise NotFoundError(str(error)) from error
data = {"string": data}
data["region"] = platform.region.value
data["summonerId"] = summoner_id
yield VerificationStringDto(data)
return generator()
| meraki-analytics/cassiopeia | cassiopeia/datastores/kernel/thirdpartycode.py | thirdpartycode.py | py | 3,374 | python | en | code | 522 | github-code | 36 | [
{
"api_name": "typing.TypeVar",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "typing.MutableMapping",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "datapipelines.Pipelin... |
6911964039 | from os.path import dirname, realpath, join
import time
from datetime import datetime
from rich import box
from rich.table import Table
from rich.console import Console
SCRIPT_PATH = dirname(realpath(__file__))
class Results:
RESULTS_PATH = "results"
def __init__(self, results_name: str):
self.method = ""
self.start_time = None
self.results_name = results_name
self.results_path = join(SCRIPT_PATH, "..", "..", "performance_results", f"{results_name}.md")
self.table = self.initiate_table()
self.accuracies = list()
@staticmethod
def initiate_table():
table = Table(title="", box=box.MINIMAL_DOUBLE_HEAD)
table.add_column("Dataset")
table.add_column("Method")
table.add_column("Train \n size", justify="right")
table.add_column("Test \n size", justify="right")
table.add_column("Time", justify="right")
table.add_column("Acc.", justify="right")
return table
def set_start_time(self):
print("start time")
self.start_time = time.time()
def get_total_time(self):
total_time = round(time.time() - self.start_time)
print(f"{datetime.now():%Y_%m_%d_%H_%M}", f"finished in {total_time} seconds")
return str(total_time) + "s"
@staticmethod
def format_dataset_name(name):
return name.replace(".tsv", "").replace("_", " ")
def save_result(self, dataset: str, method: str, accuracy: float, train_length: int, test_length: int):
self.accuracies.append(accuracy)
self.table.add_row(
self.format_dataset_name(dataset),
method,
str(train_length),
str(test_length),
self.get_total_time(),
str(round(accuracy)) + "%",
)
def write_results(self):
accuracies_average = round(sum(self.accuracies) / len(self.accuracies))
self.table.add_row("", "", "", "", "", "")
self.table.add_row("average", "", "", "", "", str(accuracies_average) + "%")
console = Console(record=True)
console.print(self.table)
console.save_text(self.results_path)
| huridocs/pdf_metadata_extraction | src/performance/Results.py | Results.py | py | 2,170 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "os.path.dirname",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path.realpath",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "rich.table.Table",
"l... |
1963001410 | from django.conf.urls import include, url
from django.urls import reverse
from django.utils.html import format_html
from django.utils.translation import ugettext
from wagtail.admin.rich_text.editors.draftail import features as draftail_features
from wagtail.core import hooks
from . import urls
from .richtext import (
ContentstateSnippetLinkConversionRule, ContentstateSnippetEmbedConversionRule,
SnippetLinkHandler, SnippetEmbedHandler,
)
@hooks.register("register_rich_text_features")
def register_snippet_link_feature(features):
feature_name = "snippet-link"
type_ = "SNIPPET"
features.register_link_type(SnippetLinkHandler)
features.register_editor_plugin(
"draftail",
feature_name,
draftail_features.EntityFeature(
{"type": type_, "icon": "snippet", "description": ugettext("Snippet Link")},
js=[
"wagtailsnippets/js/snippet-chooser-modal.js",
"wagtail_draftail_snippet/js/snippet-model-chooser-modal.js",
"wagtail_draftail_snippet/js/wagtail-draftail-snippet.js",
],
),
)
features.register_converter_rule(
"contentstate", feature_name, ContentstateSnippetLinkConversionRule
)
@hooks.register("register_rich_text_features")
def register_snippet_embed_feature(features):
feature_name = "snippet-embed"
type_ = "SNIPPET-EMBED"
features.register_embed_type(SnippetEmbedHandler)
features.register_editor_plugin(
"draftail",
feature_name,
draftail_features.EntityFeature(
{"type": type_, "icon": "code", "description": ugettext("Snippet Embed")},
js=[
"wagtailsnippets/js/snippet-chooser-modal.js",
"wagtail_draftail_snippet/js/snippet-model-chooser-modal.js",
"wagtail_draftail_snippet/js/wagtail-draftail-snippet.js",
],
),
)
features.register_converter_rule(
"contentstate", feature_name, ContentstateSnippetEmbedConversionRule
)
@hooks.register("insert_editor_js")
def editor_js():
# window.chooserUrls.snippetChooser = '{0}';
return format_html(
"""
<script>
window.chooserUrls.snippetLinkModelChooser = '{0}';
window.chooserUrls.snippetEmbedModelChooser = '{1}';
</script>
""",
# reverse('wagtailsnippets:list'),
reverse("wagtaildraftailsnippet:choose-snippet-link-model"),
reverse("wagtaildraftailsnippet:choose-snippet-embed-model"),
)
@hooks.register("register_admin_urls")
def register_admin_urls():
return [url(r"^snippets/", include(urls, namespace="wagtaildraftailsnippet"))]
| cividi/wagtail-draftail-snippet | wagtail_draftail_snippet/wagtail_hooks.py | wagtail_hooks.py | py | 2,730 | python | en | code | null | github-code | 36 | [
{
"api_name": "richtext.SnippetLinkHandler",
"line_number": 21,
"usage_type": "argument"
},
{
"api_name": "wagtail.admin.rich_text.editors.draftail.features.EntityFeature",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.rich_text.editors.draftail.features... |
1806212132 | from __future__ import annotations
import asyncio
import concurrent.futures
import dataclasses
import functools
import logging
import os
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.utils import formatdate, make_msgid
import jinja2
from .. import scraper
logger = logging.getLogger(__name__)
@dataclasses.dataclass
class MailConfig(object):
from_addr: str
host: str
port: str
tls: bool
user: str = None
passwd: str = None
def _do_send_email(cfg: MailConfig, to_addr: str, subject: str, text: str):
msg = MIMEMultipart()
msg['Subject'] = subject
msg['From'] = cfg.from_addr
msg['To'] = to_addr
msg['Date'] = formatdate(localtime=True)
msg['Message-ID'] = make_msgid('punkow')
txt = MIMEText(text)
msg.attach(txt)
smtp = smtplib.SMTP(host=cfg.host, port=cfg.port)
if cfg.tls:
smtp.starttls()
if cfg.user is not None:
smtp.login(cfg.user, cfg.passwd)
try:
smtp.sendmail(cfg.from_addr, [to_addr], msg.as_string())
finally:
smtp.quit()
logger.info("Sent an email")
class Mailer(object):
def __init__(self, loop: asyncio.AbstractEventLoop, config: MailConfig, base_url: str):
self._loop = loop
self._config = config
self._base_url = base_url
self._tpl = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.join(os.path.dirname(__file__),
'email_templates')),
autoescape=jinja2.select_autoescape(['html', 'xml'])
)
self._executor = concurrent.futures.ProcessPoolExecutor(max_workers=2)
async def _send_email(self, to_addr, subject, text):
await self._loop.run_in_executor(self._executor, _do_send_email, self._config, to_addr, subject, text)
async def send_success_email(self, email, booking: scraper.BookingResult):
tpl = self._tpl.get_template("success.txt")
text = tpl.render(meta=booking.metadata, change_url=scraper.BASE_URL + scraper.MANAGE_URL,
process_id=booking.process_id, auth_code=booking.auth_key)
await self._send_email(email, "Your appointment was booked", text)
async def send_confirmation_email(self, email, req_key):
tpl = self._tpl.get_template("confirmation.txt")
text = tpl.render(base_url=self._base_url, req_key=req_key)
await self._send_email(email, "Your booking request was registered", text)
async def send_cancel_email(self, email, req_key):
tpl = self._tpl.get_template("cancel.txt")
text = tpl.render(base_url=self._base_url, req_key=req_key)
await self._send_email(email, "Your booking request was canceled", text)
def start_queue(self) -> AsyncMailQueue:
return AsyncMailQueue(self._loop, self)
class AsyncMailQueue(object):
def __init__(self, loop: asyncio.AbstractEventLoop, mailer: Mailer):
self._loop = loop
self._mailer = mailer
self._queue = []
async def __aenter__(self) -> AsyncMailQueue:
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
if len(self._queue) != 0:
try:
await self._loop.run_in_executor(None, functools.partial(
concurrent.futures.wait, self._queue))
except:
logger.exception("Exception in waiting for mail sending")
def _append_task(self, coro):
self._queue.append(asyncio.run_coroutine_threadsafe(coro, self._loop))
def send_success_email(self, email, booking: scraper.BookingResult):
self._append_task(self._mailer.send_success_email(email, booking))
def send_confirmation_email(self, email, req_key):
self._append_task(self._mailer.send_confirmation_email(email, req_key))
async def send_cancel_email(self, email, req_key):
self._append_task(self._mailer.send_cancel_email(email, req_key))
| janLo/punkow | punkow/service/mailer.py | mailer.py | py | 4,021 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "email.mime.multipart.MIMEMultipart",
"line_number": 33,
"usage_type": "call"
},
{
"a... |
70173864105 | import os
import re
from typing import Any, Iterable, List
from flask import Flask, request
from werkzeug.exceptions import HTTPException
app = Flask(__name__)
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = os.path.join(BASE_DIR, "data")
class CustomBadRequest(HTTPException):
status_code = 400
description = 'Bad request'
def parse_query(file: Iterable[str], query: str) -> List[str]:
res = map(lambda v: v.strip(), file)
for q in query.split("|"):
q_split = q.split(":")
cmd = q_split[0]
if cmd == "filter":
arg = q_split[1]
res = filter(lambda v, txt=arg: txt in v, res)
if cmd == "map":
arg = int(q_split[1])
res = map(lambda v, idx=arg: v.split(" ")[idx], res)
if cmd == "unique":
res = set(res)
if cmd == "sort":
arg = q_split[1]
reverse = arg == "desc"
res = sorted(res, reverse=reverse)
if cmd == "limit":
arg = int(q_split[1])
res = list(res)[:arg]
if cmd == "regex":
arg = q_split[1]
res = filter(lambda v, pattern=arg: re.search(pattern, v), res)
return res
@app.route("/perform_query")
def perform_query() -> Any:
try:
query = request.args['query']
file_name = request.args['file_name']
except KeyError:
raise CustomBadRequest(description=f"Neded uery was not found")
file_path = os.path.join(DATA_DIR, file_name)
if not os.path.exists(file_path):
return CustomBadRequest(description=f"{file_name} was not found")
with open(file_path) as file:
res = parse_query(file, query)
data = '\n'.join(res)
return app.response_class(data, content_type="text/plain")
| IgorVolokho99/LESSON_24_HomeWork | app.py | app.py | py | 1,798 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_n... |
36515561991 | # -*- coding: utf-8 -*-
"""
Created on Mon Nov 11 10:11:41 2019
@author: Mohammed
"""
from sklearn import datasets
from sklearn import metrics
from sklearn import linear_model
from sklearn import svm
from sklearn import model_selection
import matplotlib.pyplot as plt
def main():
digits = datasets.load_digits()
print(digits.DESCR)
print()
plt.imshow(digits.data[0,:].reshape(8,8))
kf = model_selection.KFold(n_splits=2, shuffle=True)
for train_index,test_index in kf.split(digits.data):
clf1 = linear_model.Perceptron()
clf2 = svm.SVC(kernel="rbf", gamma=1e-3)
clf3 = svm.SVC(kernel="sigmoid", gamma=1e-4)
clf1.fit(digits.data[train_index], digits.target[train_index ])
prediction1 = clf1.predict(digits.data[test_index])
clf2.fit(digits.data[train_index], digits.target[train_index])
prediction2 = clf2.predict(digits.data[test_index])
clf3.fit(digits.data[train_index], digits.target[train_index])
prediction3 = clf3.predict(digits.data[test_index])
score1 = metrics.accuracy_score(digits.target[test_index], prediction1)
score2 = metrics.accuracy_score(digits.target[test_index], prediction2)
score3 = metrics.accuracy_score(digits.target[test_index], prediction3)
print("Perceptron accuracy score: ", score1)
print("SVM with RBF kernel accuracy score: ", score2)
print("SVM with Sigmoid kernel accuracy score: ", score3)
print()
main()
| mjachowdhury/MachineLearning-4thYear-CIT | Lab6/lab6.py | lab6.py | py | 1,564 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sklearn.datasets.load_digits",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "sklearn.datasets",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 20,
"usage_type": "call"
},
{
"api_name":... |
17991705417 | import pandas as pd
import numpy as np
from prediction2 import create_model, evaluate_model
from sklearn.cross_validation import train_test_split
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.grid_search import GridSearchCV
def get_data():
df = pd.read_csv("data/clean_data.csv")
df["young_cust"] = df.age<26
df["mature_cust"] = df.age>60
df["target"]= df.y=="yes"
df.drop("y", axis=1, inplace=True)
return df
def transform_model(df):
df_transform = pd.get_dummies(df)
return df_transform
def test_model(df):
gbc= GradientBoostingClassifier()
test_data(df, gbc)
gbc_params = {
'min_samples_leaf': [1, 5, 10, 20],
'n_estimators': [100,200,300,400],
'learning_rate': [0.01, 0.025, 0.05, 0.1],
}
search = GridSearchCV(gbc, gbc_params, n_jobs=-1, scoring = "f1")
search.fit(X_train, y_train)
preds_b = search.best_estimator_.predict(X_test)
evaluate_model(preds_b, y_test)
return search.best_estimator_
def test_data(df,model, fit=False):
target = df.target
features = df.drop("target", axis=1)
X_train, X_test, y_train, y_test = train_test_split(features,target)
if fit:
preds =model.predict(X_test)
else:
preds,_=create_model(model,X_train, X_test, y_train)
evaluate_model(preds,y_test)
# Get Data
df = get_data()
persona0= df[df.cluster==0]
persona1= df[df.cluster==1]
persona2= df[df.cluster==2]
persona3= df[df.cluster==3]
#Build subsets
P0=transform_model(persona0)
persona0_columns = ["poutcome_failure", "poutcome_nonexistent","poutcome_success","month_oct","month_sep",
"month_mar","month_may","month_dec","month_apr", "education_university.degree","education_basic.4y",
"job_blue-collar", "job_admin.", "duration","campaign", "pdays","emp.var.rate", "cons.price.idx",
"euribor3m","nr.employed","cell_phone","target"]
P0=P0[persona0_columns]
P1 = transform_model(persona1)
persona1_columns = ["age","duration","campaign", "pdays","emp.var.rate", "cons.price.idx", "cons.conf.idx",
"euribor3m","nr.employed","cell_phone", "clust_dist", "young_cust", "job_student", "marital_divorced",
"marital_single", "marital_married", "education_basic.9y","education_unknown","month_apr","month_dec",
"month_jul", "month_may","month_mar","month_oct","month_sep","poutcome_success", "poutcome_nonexistent","target"]
P1=P1[persona1_columns]
P2 = transform_model(persona2)
persona2_columns = ["duration","campaign", "pdays","emp.var.rate", "cons.price.idx", "cons.conf.idx",
"euribor3m","nr.employed","cell_phone", "job_admin.","job_blue-collar",
"education_university.degree","month_dec",
"month_may","month_mar","month_oct","month_sep","poutcome_success", "poutcome_nonexistent","target"]
P2=P2[persona2_columns]
P3 = transform_model(persona3)
persona3_columns = ["age", "duration","campaign", "pdays","emp.var.rate", "cons.price.idx", "cons.conf.idx",
"euribor3m","nr.employed","cell_phone","clust_dist","mature_cust", "job_blue-collar",
"education_basic.4y","month_dec", "job_retired", "job_self-employed", "job_services",
"job_technician", "education_basic.6y", "month_apr","month_jul","month_jun","poutcome_failure",
"month_may","month_mar","month_oct","month_sep","poutcome_success", "poutcome_nonexistent","target"]
P3=P3[persona3_columns]
| dbluiett/more_than_gut | project/customer_preds.py | customer_preds.py | py | 3,594 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pandas.get_dummies",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "sklearn.ensemble.GradientBoostingClassifier",
"line_number": 22,
"usage_type": "call"
},
{
"ap... |
70606645225 | import os
import sys
# 在linux会识别不了包 所以要加临时搜索目录
curPath = os.path.abspath(os.path.dirname(__file__))
rootPath = os.path.split(curPath)[0]
sys.path.append(rootPath)
import execjs
import time
from datetime import datetime
import pandas as pd
import requests
import json
import akshare as ak
def get_var():
'''
获取js
:return:
'''
js = '/opt/code/pythonstudy_space/05_quantitative_trading_hive/util/ths.js'
with open(js) as f:
comm = f.read()
comms = execjs.compile(comm)
result = comms.call('v')
return result
def get_headers(cookie='Hm_lvt_78c58f01938e4d85eaf619eae71b4ed1=1672230413; historystock=688255%7C*%7C003816%7C*%7C002933%7C*%7C600706%7C*%7C688687; Hm_lvt_da7579fd91e2c6fa5aeb9d1620a9b333=1673161546; log=; user=MDq080MxOjpOb25lOjUwMDo1MTMxNDQ1NjI6NywxMTExMTExMTExMSw0MDs0NCwxMSw0MDs2LDEsNDA7NSwxLDQwOzEsMTAxLDQwOzIsMSw0MDszLDEsNDA7NSwxLDQwOzgsMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDEsNDA7MTAyLDEsNDA6MjQ6Ojo1MDMxNDQ1NjI6MTY3MzY1OTkxNTo6OjE1NzQ1NTQ5ODA6MjY3ODQwMDowOjFkZjgzN2I5YThiZTRiNzBhZTIyZTE2MzViYWFiYjlhODpkZWZhdWx0XzQ6MQ%3D%3D; userid=503144562; u_name=%B4%F3C1; escapename=%25u5927C1; ticket=90f706428300af2c9ad5b9bc8faf3498; user_status=0; utk=bd0610c31e8fad6a9f67c1c47f83cb90; Hm_lpvt_da7579fd91e2c6fa5aeb9d1620a9b333=1673661956; Hm_lpvt_78c58f01938e4d85eaf619eae71b4ed1=1673661957; v=A5hM9kejT7kEnWM9jZ0-eQlTac0vgfoIXu7QjdKD5lmEWjbzepHMm671oDEh'):
'''
获取请求头 设置自己的请求头在get_header配置
:param cookie:
:return:
'''
v = get_var()
cookie = cookie.split('v=')
cookie = cookie[0] + 'v=' + v
headers={
'Cookie':cookie ,
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.69 Safari/537.36',
}
return headers
def get_all_stock():
'''
获取全部自选股
:return:
'''
headers=get_headers()
url='https://t.10jqka.com.cn/newcircle/group/getSelfStockWithMarket/?'
params={
'callback': 'selfStock',
'_': '1673617915586'
}
res=requests.get(url=url,params=params,headers=headers)
text=res.text[10:len(res.text)-2]
json_text=json.loads(text)
df = pd.DataFrame(json_text['result'])
return df
def add_stock_to_account(stock='600111'):
'''
添加股票到自选股
:param stock:
:return:
'''
url='https://t.10jqka.com.cn/newcircle/group/modifySelfStock/?'
headers=get_headers()
params={
'callback':'modifyStock',
'op': 'add',
'stockcode': stock,
'_': '1673620068115',
}
res = requests.get(url=url, params=params, headers=headers)
text = res.text[12:len(res.text) - 2]
json_text = json.loads(text)
err=json_text['errorMsg']
if err=='修改成功':
print('{}加入自选股成功'.format(stock))
else:
print('{}{}'.format(stock,err))
def del_stock_from_account(stock='600111'):
'''
删除股票从自选股
:param stock:
:return:
'''
url = 'https://t.10jqka.com.cn/newcircle/group/modifySelfStock/?'
headers = get_headers()
df=get_all_stock()
try:
marker=df[df['code']==stock]['marketid'].tolist()[0]
stockcode='{}_{}'.format(stock,marker)
params={
'op':'del',
'stockcode':stock
}
res = requests.get(url=url, params=params, headers=headers)
text = res.text
json_text = json.loads(text)
err = json_text['errorMsg']
if err == '修改成功':
print('{}删除自选股成功'.format(stock))
else:
print('{}{}'.format(stock, err))
except:
print('{}没有在自选股'.format(stock))
def all_zt_stock_add_account(date='20230113'):
'''
将涨停的股票全部加入自选股
:return:
'''
df=ak.stock_zt_pool_em(date=date)
for stock in df['代码'].tolist():
add_stock_to_account(stock=stock)
def all_del_add_stocks(codes):
'''
将所有自选股删除并加入新股票
:return:
'''
del_df = get_all_stock()
d_n = 0
a_n = 0
url = 'https://t.10jqka.com.cn/newcircle/group/modifySelfStock/?'
headers = get_headers()
for stock in del_df['code'].tolist():
try:
marker = del_df[del_df['code'] == stock]['marketid'].tolist()[0]
stockcode = '{}_{}'.format(stock, marker)
params = {
'op': 'del',
'stockcode': stock
}
res = requests.get(url=url, params=params, headers=headers)
text = res.text
json_text = json.loads(text)
err = json_text['errorMsg']
if err == '修改成功':
d_n = d_n+1
else:
print('{}{}'.format(stock, err))
except:
print('{}没有在自选股'.format(stock))
for stock in codes:
params = {
'callback': 'modifyStock',
'op': 'add',
'stockcode': stock,
'_': '1673620068115',
}
res = requests.get(url=url, params=params, headers=headers)
text = res.text[12:len(res.text) - 2]
json_text = json.loads(text)
err = json_text['errorMsg']
if err == '修改成功':
a_n = a_n+1
else:
print('{}{}'.format(stock, err))
print('删除自选股成功,删除了{}个;加入自选股成功,加入了{}个'.format(d_n,a_n))
# python /opt/code/pythonstudy_space/05_quantitative_trading_hive/util/同花顺自选股.py
if __name__=='__main__':
start_time = time.time()
codes = ['002689','002094','002651','002264','002808','002888','003040','002762','002238','002766','003028']
all_del_add_stocks(codes)
end_time = time.time()
print('{}:程序运行时间:{}s,{}分钟'.format(os.path.basename(__file__),end_time - start_time, (end_time - start_time) / 60))
| cgyPension/pythonstudy_space | 05_quantitative_trading_hive/util/同花顺自选股.py | 同花顺自选股.py | py | 5,988 | python | en | code | 7 | github-code | 36 | [
{
"api_name": "os.path.abspath",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "os.path.split",
"line_n... |
11230963388 | import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.models as models
import torchvision.datasets as datasets
import torchvision.transforms as transforms
# 加载数据集
transform = transforms.Compose([transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))])
train_dataset = datasets.CIFAR10(root='./data', train=True, download=True, transform=transform)
test_dataset = datasets.CIFAR10(root='./data', train=False, download=True, transform=transform)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=32, shuffle=True, num_workers=4)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=32, shuffle=False, num_workers=4)
# 定义模型,加载预训练参数
model = models.resnet18(pretrained=True)
num_features = model.fc.in_features
model.fc = nn.Linear(num_features, 10)
# 定义损失函数和优化器
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
# 训练模型
def train(model, train_loader, criterion, optimizer):
model.train()
for inputs, labels in train_loader:
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# 测试模型
def test(model, test_loader, criterion):
model.eval()
with torch.no_grad():
total_loss = 0.0
total_corrects = 0
for inputs, labels in test_loader:
outputs = model(inputs)
loss = criterion(outputs, labels)
_, preds = torch.max(outputs, 1)
total_loss += loss.item() * inputs.size(0)
total_corrects += torch.sum(preds == labels.data)
avg_loss = total_loss / len(test_loader.dataset)
accuracy = float(total_corrects) / len(test_loader.dataset)
return avg_loss, accuracy
# 训练和测试模型
num_epochs = 10
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch+1, num_epochs))
train(model, train_loader, criterion, optimizer)
test_loss, test_acc = test(model, test_loader, criterion)
print('Test loss: {:.4f}, Test accuracy: {:.4f}'.format(test_loss, test_acc))
# 保存模型
torch.save(model.state_dict(), 'cifar10_resnet18.pth') | rainy2k/deep-learning | transfer_learning.py | transfer_learning.py | py | 2,427 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torchvision.transforms.Compose",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Resize",
"line_number": 9,
"usage_type": "call"
},
{
"... |
2410926756 | from django.contrib import admin
from db_file_storage.form_widgets import DBAdminClearableFileInput
from django import forms
from .models import Kid, Photo, PhotoFile
admin.site.site_header = "Administración del sitio"
admin.site.site_title = admin.site.site_header
class PhotoForm(forms.ModelForm):
class Meta:
model = Photo
exclude = []
widgets = {
'image': DBAdminClearableFileInput
}
class PhotoAdmin(admin.ModelAdmin):
form = PhotoForm
admin.site.register(Kid)
admin.site.register(Photo)
| kiddybigmoments/kiddybigmoments-server | webapp/admin.py | admin.py | py | 553 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.contrib.admin.site",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.site",
"line_number": 7,
"usage_type": "attribute"
},
{
"a... |
73349078504 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Car',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('brand', models.CharField(max_length=64, null=True)),
('model', models.CharField(max_length=64, null=True)),
('color', models.CharField(max_length=64)),
('reg_number', models.CharField(unique=True, max_length=16)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Driver',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('account', models.ForeignKey(to=settings.AUTH_USER_MODEL, unique=True)),
('car', models.ForeignKey(to='TaxiService.Car', unique=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Ride',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('fromAddress', models.CharField(max_length=256)),
('toAddress', models.CharField(max_length=256)),
('date', models.DateTimeField()),
('car', models.ForeignKey(to='TaxiService.Car')),
],
options={
},
bases=(models.Model,),
),
]
| IlyaSergeev/taxi_service | TaxiService/migrations/0001_initial.py | 0001_initial.py | py | 1,872 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.db.migrations.Migration",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "django.db.migrations",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.swappable_dependency",
"line_number": 11,
"usage_type": "call... |
3310605398 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Block(nn.Sequential):
def __init__(self, in_planes, out_planes, args):
super(Block, self).__init__()
self.x5_block = nn.Sequential(
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=(3, 3), padding=1, bias=False),
nn.ReLU(),
nn.BatchNorm2d(64),
nn.Dropout(args.dropout),
)
self.x6_block = nn.Sequential(
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=(3, 3), padding=1, bias=False),
nn.ReLU(),
nn.BatchNorm2d(64),
nn.Dropout(args.dropout),
)
self.x7_block = nn.Sequential(
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=(3, 3), padding=1, bias=False),
nn.ReLU(),
nn.BatchNorm2d(64),
# nn.Dropout(args.dropout),
)
def forward(self, x4):
x5 = self.x5_block(x4)
x6 = self.x6_block(x4+x5)
x7 = self.x7_block(x4+x5+x6)
return x5, x6, x7
class DNN(nn.Module):
def __init__(self, args):
super(DNN, self).__init__()
self.x1_block = nn.Sequential(
nn.Conv2d(in_channels=6, out_channels=64, kernel_size=(3, 3), padding=1, bias=False),
nn.ReLU(),
nn.BatchNorm2d(64),
nn.Dropout(args.dropout),
)
self.x2_block = nn.Sequential(
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=(3, 3), padding=1, bias=False),
nn.ReLU(),
nn.BatchNorm2d(64),
nn.Dropout(args.dropout),
)
self.x3_block = nn.Sequential(
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=(3, 3), padding=1, bias=False),
nn.ReLU(),
nn.BatchNorm2d(64),
# nn.Dropout(args.dropout),
)
self.x4_pool = nn.MaxPool2d(2, 2)
self.x5_6_7_block = Block(64, 64, args)
self.x8_pool = nn.MaxPool2d(2, 2)
self.x9_10_11_block = Block(64, 64, args)
self.idm1 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=(1, 1), padding=0, bias=False)
self.xm2_block = Block(64, 64, args)
self.idm2 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=(1, 1), padding=0, bias=False)
self.xm4_block = Block(64, 64, args)
self.idm4 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=(1, 1), padding=0, bias=False)
self.finm = nn.Conv2d(64, 1, kernel_size=3, stride=1, padding=1, bias=False)
self.idd1 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=(1, 1), padding=0, bias=False)
self.xd2_block = Block(64, 64, args)
self.idd2 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=(1, 1), padding=0, bias=False)
self.xd4_block = Block(64, 64, args)
self.idd4 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=(1, 1), padding=0, bias=False)
self.find = nn.Conv2d(64, 1, kernel_size=3, stride=1, padding=1, bias=False)
def forward(self, x):
x1 = self.x1_block(x)
x2 = self.x2_block(x1)
x3 = self.x3_block(x1+x2)
x4 = self.x4_pool(x1+x2+x3)
x5, x6, x7 = self.x5_6_7_block(x4)
x8 = self.x8_pool(x5+x6+x7)
x9, x10, x11 = self.x9_10_11_block(x8)
outm = self.idm1(x11)
outm = F.interpolate(outm, scale_factor=2, mode='bilinear')
_, _, outm = self.xm2_block(outm + x7)
outm = self.idm2(outm)
outm = F.interpolate(outm, scale_factor=2, mode='bilinear')
_, _, outm = self.xm4_block(outm + x3)
outm = self.idm4(outm)
outm = self.finm(outm)
outd = self.idd1(x11)
outd = F.interpolate(outd, scale_factor=2, mode='bilinear')
_, _, outd = self.xd2_block(outd + x7)
outd = self.idd2(outd)
outd = F.interpolate(outd, scale_factor=2, mode='bilinear')
_, _, outd = self.xd4_block(outd + x3)
outd = self.idd4(outd)
outd = self.finm(outd)
return outm, outd
if __name__ == '__main__':
class DNNArg:
dropout = 0.0
net = DNN(DNNArg())
y = net(torch.randn(1,3,224,224))
print(y.size())
| uday96/EVA4-TSAI | S15/models/quiz_dense.py | quiz_dense.py | py | 4,240 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "torch.nn.Sequential",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"li... |
7989155548 | import numpy as np
import cv2
import time
import math
from visual import *
import visual as vs # for 3D panel
import wx # for widgets
capture = cv2.VideoCapture(1)
def nothing(x):
pass
####### TRACKBAR #########
#cv2.namedWindow('bar')
#cv2.createTrackbar('R','bar',0,255,nothing)
#cv2.createTrackbar('G','bar',0,255,nothing)
#cv2.createTrackbar('B','bar',0,255,nothing)
#cv2.createTrackbar('R1','bar',0,255,nothing)
#cv2.createTrackbar('G1','bar',0,255,nothing)
#cv2.createTrackbar('B1','bar',0,255,nothing)
def rescale_frame(capturing, wpercent=50, hpercent=50):
width = int(capturing.shape[1] * wpercent / 100)
height = int(capturing.shape[0] * hpercent / 100)
return cv2.resize(capturing, (width, height), interpolation=cv2.INTER_AREA)
def roi_seg(img,hsv):
#r = cv2.getTrackbarPos('R','bar')
#g = cv2.getTrackbarPos('G','bar')
#b = cv2.getTrackbarPos('B','bar')
#r1 = cv2.getTrackbarPos('R1','bar')
#g1 = cv2.getTrackbarPos('G1','bar')
#b1 = cv2.getTrackbarPos('B1','bar')
r = 247
g = 145
b = 99
r1 = 255
g1 = 255
b1 = 131
low_limit = np.array([b,g,r]) # color (99,145,247)
upper_limit = np.array([b1,g1,r1]) # color (131,255,255)
# filtro anti-ruido
mask2 = cv2.inRange(hsv,low_limit,upper_limit)
res = cv2.bitwise_and(img,img,mask=mask2)
kernel = np.ones((20,20),np.uint8) # destruindo os ruidos
res1 = cv2.morphologyEx(res,cv2.MORPH_OPEN,kernel)
#cv2.imshow('Segmentando_cor',res1)
return res1
def filtragem(frame):
blurred = cv2.GaussianBlur(frame,(11,11),0)
errosion = cv2.erode(blurred,(11,11),1)
#cv2.imshow('filter',errosion)
hsv = cv2.cvtColor(errosion,cv2.COLOR_BGR2HSV)
roi = roi_seg(frame,hsv)
return roi
def contorno(white_img,frame):
ret1,thr = cv2.threshold(white_img, 127, 255, cv2.THRESH_BINARY)
#cv2.imshow('thr',thr) use issoo aki <----------------
canny = cv2.Canny(white_img, 50, 255)
#cv2.imshow('canny',canny)
# depois tente aplicar contorno no canny
#ret1,thr = cv2.threshold(white_img, 127, 255, cv2.THRESH_BINARY)
result = cv2.findContours(canny,cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)
cont,hierarchy = result if len(result) == 2 else result[1:3]
#cv2.imshow('Canny',canny)
if len(cont) > 0:
areas = [cv2.contourArea(c) for c in cont]
max_index = np.argmax(areas)
cont_max = cont[max_index]
M = cv2.moments(cont[max_index])
area = cv2.contourArea(cont[max_index])
if (M['m00'] != 0):
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
cv2.circle(frame,(cx,cy),8,(0,255,105),3)
return (cx,cy,area)
return (0,0,0)
ball = sphere (color = color.green, radius = 0.4)
ball.mass = 1.0
ball.pos = (0,0,0)
ball_1 = sphere (color = color.blue, radius = 0.4)
dt = 0.5
t=0.0
def axes( frame, colour, sz, posn ): # Make axes visible (of world or frame).
# Use None for world.
directions = [vs.vector(sz,0,0), vs.vector(0,sz,0), vs.vector(0,0,sz)]
texts = ["X","Y","Z"]
posn = vs.vector(posn)
for i in range (3): # EACH DIRECTION
vs.curve( frame = frame, color = colour, pos= [ posn, posn+directions[i]])
vs.label( frame = frame,color = colour, text = texts[i], pos = posn+ directions[i],
opacity = 0, box = False )
axes( None, color.white, 3, (-11,6,0))
while True:
rate(100)
_,img = capture.read()
pressed_key = cv2.waitKey(1) & 0xFF
frame = rescale_frame(img)
height,width = frame.shape[:2]
roi = filtragem(frame)
### draw contorno e pegar o centroide:
cv2.imshow('Segmentando_cor',roi)
(x1,y1,area) = contorno(roi,frame)
r = math.sqrt(area/math.pi)
#cv2.imshow('frame',frame)
# Convertendo para o Mundo virtual
t = t + dt
print(x1,y1)
ball_1.pos = (x1/100,y1/100,0)
ball_1.radius = r/100
if pressed_key == ord("z"):
break
cv2.destroyAllWindows()
capture.release()
| samuelamico/PingPongOpenCV | ball_detect.py | ball_detect.py | py | 4,564 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cv2.VideoCapture",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "cv2.resize",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "cv2.INTER_AREA",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"li... |
27193939959 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
import sys
import gzip
import logging
import argparse
from collections import OrderedDict
LOG = logging.getLogger(__name__)
__version__ = "1.0.1"
__author__ = ("Xingguo Zhang",)
__email__ = "invicoun@foxmail.com"
__all__ = []
def read_tsv(file, sep="\t"):
if file.endswith(".gz"):
fh = gzip.open(file)
else:
fh = open(file)
for line in fh:
if isinstance(line, bytes):
line = line.decode('utf-8')
line = line.strip()
if not line or line.startswith("#"):
continue
yield line.split(sep)
fh.close()
def split_tax(tax):
r = OrderedDict()
for i in tax.split("|"):
level, value = i.split("__", 1)
if (level == "k") and (level in r):
continue
r[level] = value
return r
def stat_mpa_tax(file):
data = {}
for taxs, reads in read_tsv(file, sep="\t"):
taxs = split_tax(taxs)
index = list(taxs)
level = index[-1]
if level not in data:
data[level] = 0
data[level] += int(reads)
if level != "s":
continue
if "." not in taxs[level]:
continue
level = "sub"
if level not in data:
data[level] = 0
data[level] += int(reads)
print("#Kingdom\tPhylum\tClass\tOrder\tFamil\tGenus\tSpecies\tSub Species")
temp = []
for i in ["k", "p", "c", "o", "f", "g", "s", "sub"]:
reads = 0
if i in data:
reads = data[i]
temp.append(format(reads, ","))
print("\t".join(temp))
return 0
def add_hlep_args(parser):
parser.add_argument("input", metavar="FILE", type=str,
help="Input the abundance statistics result file of each sample, kreport2mpa.report")
return parser
def main():
logging.basicConfig(
stream=sys.stderr,
level=logging.INFO,
format="[%(levelname)s] %(message)s"
)
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,
description='''
For exmple:
stat_mpa_tax.py kreport2mpa.report >stat_tax.tsv
version: %s
contact: %s <%s>\
''' % (__version__, " ".join(__author__), __email__))
args = add_hlep_args(parser).parse_args()
stat_mpa_tax(args.input)
if __name__ == "__main__":
main()
| zxgsy520/metavirus | scripts/stat_mpa_tax.py | stat_mpa_tax.py | py | 2,401 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "gzip.open",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "collections.OrderedDict",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "logging.basicConf... |
14919631267 | #!/usr/bin/env python
# This file is part of fdsgeogen.
#
# fdsgeogen is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# fdsgeogen is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with fdsgeogen. If not, see <http://www.gnu.org/licenses/>.
import sys
import subprocess as sp
import os.path
import argparse
# check for command line options
parser = argparse.ArgumentParser()
parser.add_argument("--force",
help="submit FDS job even if job was already submitted", action="store_true")
parser.add_argument("--number_of_jobs",
help="maximal number of jobs in queue (default: 5)", default=5, type=int)
parser.add_argument("--status",
help="shows status of jobs (no action / submitted / running / finished)", action="store_true")
cmdl_args = parser.parse_args()
fn_subdirlist = 'fgg.subdirlist'
submit_cmd = 'sbatch'
subdirs = []
inputs = []
chids = []
# read in all sub directories, FDS input files, and CHIDs
if not os.path.isfile(fn_subdirlist):
print(" -- file %s could not be opened -> EXIT"%fn_subdirlist)
print()
sys.exit(1)
subdirs_file = open(fn_subdirlist, 'r')
for line in subdirs_file:
if line[0] == '#': continue
lc = line.rstrip().split(';')
subdirs.append(lc[0])
inputs.append(lc[1])
chids.append(lc[2])
subdirs_file.close()
print("processing subdirectories: ")
if cmdl_args.status:
cnt_finished = 0
cnt_running = 0
cnt_queued = 0
cnt_noaction = 0
for cd_ind in range(len(subdirs)):
subdir = subdirs[cd_ind]
chid = chids[cd_ind]
inputfile = inputs[cd_ind]
if os.path.isfile(os.path.join(subdir, "fgg.jureca.finished")):
print(subdir + ": simulation finished")
cnt_finished +=1
continue
if os.path.isfile(os.path.join(subdir, "fgg.jureca.running")):
print(subdir + ": simulation running")
cnt_running += 1
continue
if os.path.isfile(os.path.join(subdir, "fgg.jureca.submitted")):
print(subdir + ": simulation queued")
cnt_queued += 1
continue
print(subdir + ": no action so far")
cnt_noaction += 1
print("SUMMARY")
print("finished: ", cnt_finished)
print("running : ", cnt_running)
print("queued : ", cnt_queued)
print("noaction: ", cnt_noaction)
else:
submitted_number = 0
for cd_ind in range(len(subdirs)):
subdir = subdirs[cd_ind]
chid = chids[cd_ind]
inputfile = inputs[cd_ind]
print(" -", subdir)
if os.path.isfile(os.path.join(subdir, "fgg.jureca.finished")):
print(" ... skipping, is already finished")
continue
if os.path.isfile(os.path.join(subdir, "fgg.jureca.submitted")) and not cmdl_args.force:
print(" ... was already submitted")
else:
stdoutf = open(os.path.join(subdir, 'fgg.jureca.stdout'), 'w')
sp.Popen([submit_cmd, 'fgg.jureca.job'], stdout=stdoutf, stderr=sp.STDOUT, cwd=subdir).communicate()
stdoutf.close()
sf = open(os.path.join(subdir, 'fgg.jureca.submitted'), 'w')
sf.close()
print(" ... submitted to job queue")
submitted_number += 1
if submitted_number >= cmdl_args.number_of_jobs:
print(" maximal number of submitted jobs reached, stopping ")
break
| FireDynamics/fdsgeogen | scripts/fgg_run_jureca.py | fgg_run_jureca.py | py | 3,900 | python | en | code | 11 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.path.path.isfile",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "os.path.path",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "os.path... |
27863085436 | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from time import sleep
import os
driver = webdriver.Chrome()
driver.get("https://10fastfingers.com/typing-test/french")
os.system('pause')
WebDriverWait(driver, 60).until(EC.presence_of_element_located((By.ID, 'words')))
WebDriverWait(driver, 60).until(EC.presence_of_element_located((By.ID, 'row1')))
div_word_list = driver.find_element_by_id("row1")
spans_word_list = div_word_list.find_elements_by_xpath("./span")
len_spans_word_list = len(spans_word_list)
while len_spans_word_list == 0:
sleep(0.5)
div_word_list = driver.find_element_by_id("row1")
spans_word_list = div_word_list.find_elements_by_xpath("./span")
len_spans_word_list = len(spans_word_list)
word_list = []
for element in spans_word_list:
word_list.append(element.get_attribute('innerText'))
input_field = driver.find_element_by_id('inputfield')
for word in word_list :
input_field.send_keys(str(word))
input_field.send_keys(Keys.SPACE)
sleep(0.27)
| mmangon/10fastfingers | main.py | main.py | py | 1,196 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "os.system",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "selenium.webdriv... |
71054390185 | import data_pipeline as dp
import glob
import numpy as np
import pandas as pd
import os
import shutil
import matplotlib.pyplot as plt
from keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array, array_to_img
## Global Parameters
IMG_WIDTH=300
IMG_HEIGHT=300
IMG_DIM = (IMG_WIDTH, IMG_HEIGHT)
def process_img(img):
img = conv_img(img)
img = scale_img(img)
return img
def conv_img(img):
return img_to_array(load_img(img, target_size=IMG_DIM))
def scale_img(img):
img_scaled = img.astype("float32")
img_scaled /= 255
return img_scaled
def load_files(classes):
class_string = '_'.join(classes)
files = dp.build_dataset(class_string)
X_train = []
y_train = []
X_val = []
y_val = []
for c in classes:
train_files = files[c]["train"]
val_files = files[c]["val"]
train_imgs = [conv_img(img) for img in train_files]
val_imgs = [conv_img(img) for img in val_files]
i = 0
while i < len(train_imgs):
X_train.append(train_imgs[i])
y_train.append(c)
i = i+1
i = 0
while i < len(val_imgs):
X_val.append(val_imgs[i])
y_val.append(c)
i = i+1
X_train = np.array(X_train)
X_val = np.array(X_val)
# visualize a sample image
array_to_img(train_imgs[0])
return X_train, y_train, X_val, y_val
def scale_imgs(X):
imgs_scaled = X.astype("float32")
imgs_scaled /= 255
return imgs_scaled
def encode_labels(y_train, y_val):
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
le.fit(y_train)
y_train_enc = le.transform(y_train)
y_val_enc = le.transform(y_val)
print(y_train[0:5], y_train_enc[0:5])
# y_train_enc = np.asarray(y_train_enc).astype('float32').reshape((-1,1))
# y_val_enc = np.asarray(y_val_enc).astype('float32').reshape((-1,1))
print(y_train_enc.shape)
return y_train_enc, y_val_enc, le
def gen_augmented_data(X_train, y_train, X_val, y_val):
train_datagen = ImageDataGenerator(rescale=1./255, zoom_range=0.3, rotation_range=50,
width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2,
horizontal_flip=True, fill_mode="nearest")
val_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow(X_train, y_train,batch_size=30)
val_generator = val_datagen.flow(X_val, y_val, batch_size=30)
return train_generator, val_generator
def test_datagen(datagen, X,y):
generator = datagen.flow(X, y, batch_size=1)
img = [next(generator) for i in range(0,5)]
fig, ax = plt.subplots(1,5, figsize=(16, 6))
print("Labels:", [item[1][0] for item in img])
l = [ax[i].imshow(img[i][0][0]) for i in range(0,5)]
def load_generators(classes):
X_train, y_train, X_val, y_val = load_files(classes)
X_train = scale_imgs(X_train)
X_val = scale_imgs(X_val)
y_train, y_val, _ = encode_labels(y_train, y_val)
train_gen, val_gen = gen_augmented_data(X_train, y_train, X_val, y_val)
return train_gen, val_gen
def get_test(classes):
class_string = '_'.join(classes)
files = dp.build_dataset(class_string)
_, y_train, _, y_val = load_files(classes)
X_test = []
y_test = []
for c in classes:
test = files[c]["test"]
test_imgs = [conv_img(img) for img in test]
i = 0
while i < len(test_imgs):
X_test.append(test_imgs[i])
y_test.append(c)
i = i+1
_, _, le = encode_labels(y_train, y_val)
X_test = np.array(X_test)
# visualize a sample image
array_to_img(test_imgs[0])
return X_test, y_test, le
| luke-truitt/learn-together-model | data_preprocessing.py | data_preprocessing.py | py | 3,735 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "keras.preprocessing.image.img_to_array",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "keras.preprocessing.image.load_img",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "data_pipeline.build_dataset",
"line_number": 33,
"usage_type": ... |
34788181082 | """First migration
Revision ID: 1e99703f8998
Revises:
Create Date: 2022-03-30 17:34:52.872031
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '1e99703f8998'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('resources',
sa.Column('resource_uuid', sa.String(), nullable=False),
sa.Column('name', sa.String(), nullable=True),
sa.Column('description', sa.String(), nullable=True),
sa.Column('created', sa.DateTime(), nullable=True),
sa.Column('updated', sa.DateTime(), nullable=True),
sa.Column('state', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('resource_uuid')
)
op.create_index(op.f('ix_resources_resource_uuid'), 'resources', ['resource_uuid'], unique=False)
op.create_table('entries',
sa.Column('resource_uuid', sa.String(), nullable=True),
sa.Column('entry_uuid', sa.String(), nullable=False),
sa.Column('private_body', sa.String(), nullable=True),
sa.Column('public_body', sa.String(), nullable=True),
sa.Column('created', sa.DateTime(), nullable=True),
sa.Column('updated', sa.DateTime(), nullable=True),
sa.Column('state', sa.String(), nullable=True),
sa.ForeignKeyConstraint(['resource_uuid'], ['resources.resource_uuid'], ),
sa.PrimaryKeyConstraint('entry_uuid')
)
op.create_index(op.f('ix_entries_entry_uuid'), 'entries', ['entry_uuid'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_entries_entry_uuid'), table_name='entries')
op.drop_table('entries')
op.drop_index(op.f('ix_resources_resource_uuid'), table_name='resources')
op.drop_table('resources')
# ### end Alembic commands ###
| gilde-der-nacht/website | olymp/app/storage/migrations/versions/1e99703f8998_first_migration.py | 1e99703f8998_first_migration.py | py | 1,893 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "alembic.op.create_table",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Strin... |
28587679911 | # -*- coding: utf-8 -*-
from odoo import models, fields, api
import pika
import json
import time
class rabbitmq(models.Model):
_name = 'res.partner'
_description = 'Processing of Contact Records'
_inherit = "res.partner"
def sequential_contacts(self):
records = self.env['res.partner'].search([])
for record in records:
self.process_contact(record)
def process_contacts(self):
connection, channel = self.get_connection()
records = self.env['res.partner'].search([])
for record in records:
rec = {'id':record.id, 'name': record.name}
# Publish it to RabbitMQ
channel.basic_publish(exchange='',
routing_key='process_contact',
body=json.dumps(rec))
# REFACTOR/MOVE TO CALLBACK METHOD
print(" [x] Sent '"+ record.name+ " '")
connection.close()
# Slow methods / processes / API / Nested Loops / Bad Code / Sad Code / Etc.
def process_contact(self,rec):
print(rec)
time.sleep(1)
def process_queue(self):
connection, channel = self.get_connection()
# PROCESS CONTACT - Anything on the queue needing to be called is moved here
def callback(ch, method, properties, body):
#print(body)
if body:
try:
rec = json.loads(body)
self.process_contact(rec)
print(" [x] Received %r" % rec)
ch.basic_ack(delivery_tag=method.delivery_tag)
except:
print("error loading json")
# Process the callback
channel.basic_qos(prefetch_count=1)
channel.basic_consume('process_contact', callback)
print(' [*] Waiting for messages. To exit press CTRL+C')
channel.start_consuming()
def get_connection(self):
credentials = pika.PlainCredentials(username='mojo', password='mojo')
connection = pika.BlockingConnection(pika.ConnectionParameters(host="168.235.109.177",port=5672,credentials=credentials))
channel = connection.channel()
channel.queue_declare(queue='process_contact', durable=True)
return connection,channel
#name = fields.Char()
# value = fields.Integer()
# value2 = fields.Float(compute="_value_pc", store=True)
# description = fields.Text()
#
# @api.depends('value')
# def _value_pc(self):
# for record in self:
# record.value2 = float(record.value) / 100
| FirstClassComputerConsulting/odoo_insurance_app | rabbitmq/models/models.py | models.py | py | 2,625 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "odoo.models.Model",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "odoo.models",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_nu... |
73947669542 | '''
Desarrollado por: Ferney Vanegas Hernández
Misión TIC 2022
Versión : 1.0.2
Título: Reto 4
'''
import modules.rows as r
import modules.columns as c
import modules.widhts as w
import modules.longs as l
import modules.walls as wall
def main():
dim = int(input('Ingresa un número para dimensionar el tablero (cuadrado Dim x Dim). Ej:2 (Para crear un tablero de 2x2\n'))
pos = int(input('Ingresa por favor las posiciones (ó cantidad de muros) que deseas implementar(Ej: 4)\n'))
# OBTENCIÓN DE LISTAS BASE
# ============================
# Cuando paso a dim como parámetros, le resto uno por la forma en la que se generan aleatorios en las funciones
rows = r.get_rows(dim - 1, pos)
columns = c.get_columns(dim - 1, pos)
widths = w.get_widths(dim - 1, pos)
longs = l.get_longs(dim - 1, pos)
# ============================
# OBTENCIÓN DE COORDENADAS
coord_walls = wall.get_coord_walls(rows,columns, widths, longs, dim - 1)
# CONTRUCCIÓN DE MAPA
print(
f'--------------------------------------------\n'
f'+++ COORDENADAS DE CONSTRUCCIÓN +++\n'
f'--------------------------------------------\n'
f'{coord_walls}\n'
f'--------------------------------------------\n'
f'+++ MAPA +++\n'
f'--------------------------------------------\n'
)
wall.construct_walls(coord_walls, dim)
main()
| ferneyvanegas/WorldCraft-ASCII-Listas | main.py | main.py | py | 1,468 | python | es | code | 1 | github-code | 36 | [
{
"api_name": "modules.rows.get_rows",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "modules.rows",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "modules.columns.get_columns",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "module... |
18913786240 | #! /usr/bin/env python
# -*- coding:utf-8 -*-
"""
@author : MG
@Time : 2018/6/12 20:38
@File : run.py
@contact : mmmaaaggg@163.com
@desc :
"""
import time
import logging
from ibats_bitmex_feeder.backend.orm import init
from ibats_bitmex_feeder.feeder import start_feeder
logger = logging.getLogger()
if __name__ == "__main__":
init(True)
# while True:
supplier = start_feeder(init_symbols=True, do_fill_history=True)
try:
while supplier.is_working:
time.sleep(5)
except KeyboardInterrupt:
logger.warning('Feeder 终止...')
finally:
supplier.is_working = False
supplier.stop()
supplier.join()
logger.warning('子线程已经结束')
| IBATS/IBATS_BitMexFeeder | run.py | run.py | py | 731 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "ibats_bitmex_feeder.backend.orm.init",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "ibats_bitmex_feeder.feeder.start_feeder",
"line_number": 21,
"usage_type": "call"
... |
35386786784 | #!/usr/bin/python3
#import TAlight dove collocare le funzioni scritte una volta per tutte a bagaglio comune dei problemi.
import sys
import yaml
import argparse
from colorama import init
init()
#from termcolor import cprint
parser = argparse.ArgumentParser(description="evaluate one single submission file (the stream received from stdin as default) for one specific (goal,subtask) pair", epilog="Enjoy the program! :)", fromfile_prefix_chars='@')
parser.add_argument("goal", metavar='goal', type=int, choices=[1, 2],
help="goal=1,2 per specificare il goal per il quale la submission è intesa")
parser.add_argument("subtask", metavar='subtask', type=int, choices=[1, 2, 3],
help="subtask=1,2,3 per specificare il subtask del goal per il quale la submission è intesa")
group = parser.add_mutually_exclusive_group()
group.add_argument("-q", "--quiet", action="store_true",
help="impone che non venga scritto nè su stdout nè su stderr")
group.add_argument("-v", "--verbosity", type=int, choices=[0, 1, 2], default=2, metavar='LEVEL',
help="to set a partial feedback-level")
parser.add_argument("-l", "--local", action="store_true",
help="local versus on server")
parser.add_argument("-t", "--test", type=str, default=None, metavar='outcome_code',
help="when testing, to assert the outcome code that the submitted instance should deliver")
args = parser.parse_args()
def internal_error(error_nickname, message): # this function should go in a problem-independent library
#cprint(f"({error_nickname}) Internal error (never fault of the problem solver, detectable with local testing):", 'red', 'on_cyan', attrs=['bold'])
#cprint(message, 'on_cyan')
print(f"({error_nickname}) Internal error (never fault of the problem solver, detectable with local testing):")
print(message)
sys.exit(2)
def format_error(feedback_nickname, goal, subtask, message = None): # this function should go in a problem-independent library
"""Format error. This is fault of the problem solver (on a side that often it is not relevant at all, only a lack of care in the programming issue). Either it must be served here in the checkers writing parsing code (takes a lot of time and load on the problem maker. Also: it makes lenghtly adapting old problems), or format defined and assessed via yaml file). Most of the times (all CMS-like problems) we can obtain, for each goal, a template solution which manages input/output and only calls a function out from a simple yaml file + small script in a minimal language defined by us"""
#cprint(f"({feedback_nickname}) Format error.", 'red', 'on_cyan', attrs=['bold'], end=" ")
#cprint(f"You should review the format of the file you have submitted for [problem={codeproblem}, goal={goal}, subtask={subtask}]. (You can do it in local, also taking profit of the format checking script made available to you.\n", 'on_cyan')
print(f"({feedback_nickname}) Format error.", end=" ")
print(f"You should review the format of the file you have submitted for [problem={codeproblem}, goal={goal}, subtask={subtask}]. (You can do it in local, also taking profit of the format checking script made available to you.\n")
if message != None:
#cprint("More precisely, pay attention to this:", 'on_cyan')
print("More precisely, pay attention to this:")
print(message)
sys.exit(0)
def solution_error(feedback_nickname, goal, subtask, message = None): # this function should go in a problem-independent library
"""True feedback on the problem. There are errors in the solution submitted. This is fault of the problem solver."""
#cprint(f"({feedback_nickname}) Error found in the solution you submitted for [problem={codeproblem}, goal={goal}, subtask={subtask}].\n", 'red', 'on_cyan', attrs=['bold'])
print(f"({feedback_nickname}) Error found in the solution you submitted for [problem={codeproblem}, goal={goal}, subtask={subtask}].\n")
if message != None:
#cprint("More precisely, pay attention to this:", 'on_cyan')
print("More precisely, pay attention to this:")
print(message)
sys.exit(0)
def solution_OK(feedback_nickname, goal, subtask, message = None): # this function should go in a problem-independent library
#cprint(f"({feedback_nickname}) OK. Your solution to [problem={codeproblem}, goal={goal}, subtask={subtask}] is a feasible one.", 'green', attrs=['bold'])
print(f"({feedback_nickname}) OK. Your solution to [problem={codeproblem}, goal={goal}, subtask={subtask}] is a feasible one.")
if message != None:
print(message)
sys.exit(0)
def solution_perfect(feedback_nickname, goal, subtask, lesson = None, next_challenge = None): # this function should go in a problem-independent library
#cprint(f"({feedback_nickname}) OK. Your solution to [problem={codeproblem}, goal={goal}, subtask={subtask}] is perfect!", 'green', attrs=['bold'])
print(f"({feedback_nickname}) OK. Your solution to [problem={codeproblem}, goal={goal}, subtask={subtask}] is perfect!")
if lesson != None:
#cprint("What have we learned:", 'red')
print("What have we learned:")
print(lesson)
if next_challenge != None:
#cprint("What next:", 'red')
print("What next:")
print(next_challenge)
sys.exit(0)
# PROBLEM SPECIFIC PART:
codeproblem = "tiling_mxn-boards_by_1x2-boards"
M=20
N=20
with open("eval_submission.it.yaml", 'r') as stream:
try:
api = yaml.safe_load(stream)
except yaml.YAMLError as exc:
print(exc)
def is_tilable(m, n):
return 1 - (m%2)*(n%2)
def check_decision(goal, subtask):
"""
Se subtask=1 si chiede di verificare solo la prima riga di risposte (per griglie con m=1, ossia di una sola riga).
Se subtask=2 si chiede di verificare solo le prime due righe di risposte (per m=1 e m=2).
Se subtask=3 vanno verificate tutte le MxN risposte.
"""
def fstr(template):
return eval(f"f'{template}'")
global M
if subtask <= 2:
M = subtask
for i in range(1,M+1):
try:
risp_line_full = sys.stdin.readline()
risp_line = risp_line_full.rstrip()
except EOFError:
tmpstr1=api["too-few-lines"]
format_error("too-few-lines", goal, subtask, eval(f"f'{tmpstr1}'"))
if len(risp_line) != N:
if len(risp_line_full)-len(risp_line) == 1:
tmpstr1=api["wrong-line-length-single-newline-char"]
format_error("wrong-line-length-single-newline-char", goal, subtask, eval(f"f'{tmpstr1}'"))
else:
tmpstr1=api["wrong-line-length-more-newline-chars"]
format_error("wrong-line-length-more-newline-chars", goal, subtask, eval(f"f'{tmpstr1}'"))
for j in range(1,N+1):
if risp_line[j-1] not in {"0","1"}:
tmpstr1=api["wrong-char-bool"]
format_error("wrong-char-bool", goal, subtask, eval(f"f'{tmpstr1}'"))
if int(risp_line[j-1]) != is_tilable(i, j):
if is_tilable(i, j):
tmpstr1=api["wrong0-answ"]
solution_error("wrong0-answ", goal, subtask, eval(f"f'{tmpstr1}'"))
else:
tmpstr1=api["wrong1-answ"]
solution_error("wrong1-answ", goal, subtask, eval(f"f'{tmpstr1}'"))
if M==1:
solution_perfect("perfect1-1-challenge", goal, subtask, api["perfect1-1-lesson"], api["perfect1-1-challenge"])
elif M==2:
solution_perfect("perfect1-2-challenge", goal, subtask, api["perfect1-2-lesson"], api["perfect1-2-challenge"])
else:
solution_perfect("perfect1-3-challenge", goal, subtask, api["perfect1-3-lesson"], api["perfect1-3-challenge"])
def check_tiling(goal, subtask):
"""
Valutiamo il tiling offerto anche se esso è per una griglia più grande che come inteso dal subtask scelto.
"""
def fstr(template):
return eval(f"f'{template}'")
m, n = map(int, sys.stdin.readline().split())
if not ( (0 <= m <= 20) and (0 <= n <= 20)):
tmpstr1=api["out-of-range-m-n"]
format_error("out-of-range-m-n", goal, subtask, eval(f"f'{tmpstr1}'"))
booked = [False] * 20
for i in range(1,m+1):
try:
risp_line_full = sys.stdin.readline()
risp_line = risp_line_full.rstrip()
except EOFError:
tmpstr1=api["too-few-lines"]
format_error("too-few-lines", goal, subtask, eval(f"f'{tmpstr1}'"))
if len(risp_line) != n:
if len(risp_line_full)-len(risp_line) == 1:
tmpstr1=api["wrong-line-length-single-newline-char"]
format_error("wrong-line-length-single-newline-char", goal, subtask, eval(f"f'{tmpstr1}'"))
else:
tmpstr1=api["wrong-line-length-more-newline-chars"]
format_error("wrong-line-length-more-newline-chars", goal, subtask, eval(f"f'{tmpstr1}'"))
for j in range(1,n+1):
if booked[j-1] and risp_line[j-1] != "S":
i = i-1
tmpstr1=api["wrong-N"]
format_error("wrong-N", goal, subtask, eval(f"f'{tmpstr1}'"))
if risp_line[j-1] not in {"N","S","W","E"}:
tmpstr1=api["wrong-char-card-point"]
format_error("wrong-char-card-point", goal, subtask, eval(f"f'{tmpstr1}'"))
if risp_line[j-1] == "S" and booked[j-1] == False:
tmpstr1=api["wrong-S"]
format_error("wrong-S", goal, subtask, eval(f"f'{tmpstr1}'"))
if risp_line[j-1] == "E" and (j==1 or risp_line[j-1] != "W"):
tmpstr1=api["wrong-E"]
format_error("wrong-E", goal, subtask, eval(f"f'{tmpstr1}'"))
if risp_line[j-1] == "W" and (j==n or risp_line[j+1] != "E"):
tmpstr1=api["wrong-W"]
format_error("wrong-W", goal, subtask, eval(f"f'{tmpstr1}'"))
if risp_line[j-1] == "N":
if i==m:
tmpstr1=api["wrong-N"]
format_error("wrong-N", goal, subtask, eval(f"f'{tmpstr1}'"))
booked[j-1] == True
solution_perfect("perfect2-challenge", goal, subtask)
#tmpstr1=api["goal2-task3"]
#if args.goal==2 and args.subtask > 2:
# internal_error("goal2-task3", eval(f"f'{tmpstr1}'"))
if args.goal==1:
check_decision(args.goal, args.subtask)
else:
check_tiling(args.goal, args.subtask)
| romeorizzi/TALight | example_problems/tutorial/tiling_mxn-boards_with_1x2-boards/services/eval_submission.py | eval_submission.py | py | 10,376 | python | en | code | 11 | github-code | 36 | [
{
"api_name": "colorama.init",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_nu... |
12772839204 | import os
import base64
import json
import logging
from aws_kinesis_agg.deaggregator import deaggregate_records
from src.consumers.mysql_consumer import MySQLConsumer
from src.utils import get_secret
logger = logging.getLogger()
logger.setLevel(os.environ.get("LOG_LEVEL", "INFO"))
def handle_event(event, context):
raw_records = event["Records"]
records = deaggregate_records(raw_records)
mysql = None
secret_string = None
i = 0
for record in records:
payload = json.loads(base64.b64decode(record["kinesis"]["data"]).decode()) # noqa
if secret_string is None:
try:
secret_string = get_secret(f"/maxwell/{os.environ.get('CLUSTER_NAME')}") # noqa
except Exception:
logger.warn(f"No secret found for table, ignoring. Cluster: /maxwell/{os.environ.get('CLUSTER_NAME')}")
return
if mysql is None:
mysql = MySQLConsumer(payload["database"], secret_string)
logger.info("Processing records for: {}".format(payload["database"])) # noqa
mysql.process_row(payload)
i = i + 1
logger.info("Number of records processed: {} ".format(str(i)))
mysql.close()
| troybESM/maxwell-kinesis-consumer | src/handlers/maxwell_kinesis_mysql.py | maxwell_kinesis_mysql.py | py | 1,216 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "aws_kinesis_agg.deagg... |
32752270722 | import tempfile
import unittest
import pytest
from os import environ
from os.path import join, isdir, getmtime
from time import time
from selenium.webdriver.common.timeouts import Timeouts
from selenium.common.exceptions import TimeoutException
from tbselenium import common as cm
from tbselenium.test import TBB_PATH
from tbselenium.test.fixtures import TBDriverFixture
from selenium.webdriver.common.utils import free_port
from tbselenium.utils import is_busy
class TBDriverTest(unittest.TestCase):
def setUp(self):
self.tb_driver = TBDriverFixture(TBB_PATH)
def tearDown(self):
self.tb_driver.quit()
def test_should_load_check_tpo(self):
congrats = "Congratulations. This browser is configured to use Tor."
self.tb_driver.load_url_ensure(cm.CHECK_TPO_URL)
status = self.tb_driver.find_element_by("h1.on")
self.assertEqual(status.text, congrats)
def test_should_load_hidden_service(self):
# https://support.torproject.org/onionservices/v2-deprecation/index.html
TPO_V3_ONION_URL = "http://2gzyxa5ihm7nsggfxnu52rck2vv4rvmdlkiu3zzui5du4xyclen53wid.onion/" # noqa
self.tb_driver.load_url_ensure(TPO_V3_ONION_URL, wait_for_page_body=True)
self.assertEqual(
'Tor Project | Anonymity Online',
self.tb_driver.title)
def test_should_check_environ_in_prepend(self):
self.tb_driver.quit()
self.tb_driver = TBDriverFixture(TBB_PATH)
paths = environ["PATH"].split(':')
tbbpath_count = paths.count(self.tb_driver.tbb_browser_dir)
self.assertEqual(tbbpath_count, 1)
def test_should_set_timeouts(self):
LOW_PAGE_LOAD_LIMIT = 0.05
self.tb_driver.timeouts = Timeouts(page_load=LOW_PAGE_LOAD_LIMIT)
timed_out = False
t_before_load = time()
try:
self.tb_driver.load_url(cm.CHECK_TPO_URL)
except TimeoutException:
timed_out = True
finally:
t_spent = time() - t_before_load
self.assertAlmostEqual(t_spent, LOW_PAGE_LOAD_LIMIT, delta=1)
assert timed_out
class TBDriverCleanUp(unittest.TestCase):
def setUp(self):
self.tb_driver = TBDriverFixture(TBB_PATH)
def test_should_terminate_geckodriver_process_on_quit(self):
driver = self.tb_driver
geckodriver_process = driver.service.process
self.assertEqual(geckodriver_process.poll(), None)
driver.quit()
self.assertNotEqual(geckodriver_process.poll(), None)
def test_should_remove_profile_dirs_on_quit(self):
temp_profile_dir = self.tb_driver.temp_profile_dir
self.assertTrue(isdir(temp_profile_dir))
self.tb_driver.quit()
self.assertFalse(isdir(temp_profile_dir))
class TBDriverTorDataDir(unittest.TestCase):
TOR_DATA_PATH = join(TBB_PATH, cm.DEFAULT_TOR_DATA_PATH)
@pytest.mark.skipif(cm.TRAVIS, reason="Requires Tor bootstrap,"
"unreliable on Travis")
def test_temp_tor_data_dir(self):
"""Tor data directory in TBB should not be modified if
we use a separate tor_data_dir.
"""
tmp_dir = tempfile.mkdtemp()
mod_time_before = getmtime(self.TOR_DATA_PATH)
with TBDriverFixture(TBB_PATH, tor_data_dir=tmp_dir) as driver:
driver.load_url_ensure(cm.CHECK_TPO_URL)
mod_time_after = getmtime(self.TOR_DATA_PATH)
self.assertEqual(mod_time_before, mod_time_after)
class TBDriverProfile(unittest.TestCase):
TBB_PROFILE_PATH = join(TBB_PATH, cm.DEFAULT_TBB_PROFILE_PATH)
def test_custom_profile_and_tbb_path(self):
"""Make sure we use the right profile directory when the TBB
path and profile path is provided.
"""
tmp_dir = tempfile.mkdtemp()
mod_time_before = getmtime(self.TBB_PROFILE_PATH)
with TBDriverFixture(
TBB_PATH, tbb_profile_path=tmp_dir,
use_custom_profile=True) as driver:
assert isdir(tmp_dir)
assert driver.temp_profile_dir == tmp_dir
driver.load_url_ensure(cm.CHECK_TPO_URL)
mod_time_after = getmtime(self.TBB_PROFILE_PATH)
self.assertEqual(mod_time_before, mod_time_after)
def test_custom_profile_and_binary(self):
"""Make sure we use the right directory when a binary
and profile is provided.
"""
tmp_dir = tempfile.mkdtemp()
fx_binary = join(TBB_PATH, cm.DEFAULT_TBB_FX_BINARY_PATH)
mod_time_before = getmtime(self.TBB_PROFILE_PATH)
with TBDriverFixture(
tbb_fx_binary_path=fx_binary, tbb_profile_path=tmp_dir,
use_custom_profile=True) as driver:
assert isdir(tmp_dir)
assert driver.temp_profile_dir == tmp_dir
driver.load_url_ensure(cm.CHECK_TPO_URL)
mod_time_after = getmtime(self.TBB_PROFILE_PATH)
self.assertEqual(mod_time_before, mod_time_after)
class TBDriverCustomGeckoDriverPort(unittest.TestCase):
def test_should_accept_custom_geckodriver_port(self):
"""Make sure we accept a custom port number to run geckodriver on."""
random_port = free_port()
with TBDriverFixture(TBB_PATH, geckodriver_port=random_port) as driver:
driver.load_url_ensure(cm.ABOUT_TOR_URL)
self.assertTrue(is_busy(random_port)) # check if the port is used
# check if the port is closed after we quit
self.assertFalse(is_busy(random_port))
if __name__ == "__main__":
unittest.main()
| webfp/tor-browser-selenium | tbselenium/test/test_tbdriver.py | test_tbdriver.py | py | 5,565 | python | en | code | 483 | github-code | 36 | [
{
"api_name": "unittest.TestCase",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "tbselenium.test.fixtures.TBDriverFixture",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "tbselenium.test.TBB_PATH",
"line_number": 19,
"usage_type": "argument"
... |
7628041943 | from django.test import TestCase
from api import models
from django.urls import reverse
from django.contrib.auth import get_user_model
from rest_framework import status
from rest_framework.test import APIClient
from exercise.serializers import TagSerializer
TAGS_URL = reverse('exercise:tag-list')
#help funcs
# def create_user(email='user@example.com', password='testpass123'):
# """Create and return a user."""
# return get_user_model().objects.create_user(email=email, password=password)
def tag_url(tag_id):
return reverse('exercise:tag-detail', args=[tag_id])
class TagModelAPITest(TestCase):
#testing api requests for tags
def setUp(self):
# self.user = create_user()
self.client = APIClient()
def test_retrive_tags(self):
#testing retriving list of tags
models.Tag.objects.create(name='chest')
models.Tag.objects.create(name='back')
result = self.client.get(TAGS_URL)
tags = models.Tag.objects.all().order_by('-name')
serializer = TagSerializer(tags, many=True)
self.assertEqual(result.status_code, status.HTTP_200_OK)
self.assertEqual(result.data, serializer.data)
def test_tags_limited_to_user(self):
#testing list of tags
models.Tag.objects.create(name='shoulders')
tag = models.Tag.objects.create(name='shoulders')
result = self.client.get(TAGS_URL)
self.assertEqual(result.status_code, status.HTTP_200_OK)
self.assertEqual(result.data[0]['name'], tag.name)
def test_updating_tag(self):
#testing updating tag
tag = models.Tag.objects.create(name='chest')
credentails = {'name': 'chest updated'}
url = tag_url(tag.id)
result = self.client.patch(url, data=credentails)
self.assertEqual(result.status_code, status.HTTP_200_OK)
tag.refresh_from_db()
self.assertEqual(tag.name, credentails['name'])
def test_deleting_tag(self):
tag = models.Tag.objects.create(name='chest')
url = tag_url(tag.id)
result = self.client.delete(url)
self.assertEqual(result.status_code, status.HTTP_204_NO_CONTENT)
self.assertFalse(models.Tag.objects.filter().exists())
| Mgalazyn/gym_api-drf | app/tests/test_tag_api.py | test_tag_api.py | py | 2,264 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.urls.reverse",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.urls.reverse",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "django.test.TestCase",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "rest_fra... |
27756843887 | import socket
import json
def main():
TYPE_OF_NETWORK_ADRESS = socket.AF_INET
THE_PROTOCOL = socket.SOCK_STREAM # TCP
THE_LEVEL = socket.SOL_SOCKET
THE_OPTION = socket.SO_REUSEADDR
THE_VALUE = 1
with socket.socket(TYPE_OF_NETWORK_ADRESS, THE_PROTOCOL) as the_socket:
the_socket.setsockopt(THE_LEVEL, THE_OPTION, THE_VALUE)
the_socket.bind(("localhost", 8000))
the_socket.listen()
the_socket.settimeout(1)
while True:
try:
CLIENT_SOCKET, ADRESS = the_socket.accept()
except socket.timeout:
continue
print(f"Connected from: {ADRESS[0]}")
with CLIENT_SOCKET:
the_messages = []
while True:
try:
the_data = CLIENT_SOCKET.recv(4096)
except socket.timeout:
continue
if not the_data:
break
the_messages.append(the_data)
# Decode list-of-byte-strings to UTF8 and parse JSON data
message_bytes = b''.join(the_messages)
message_str = message_bytes.decode("utf-8")
try:
message_dict = json.loads(message_str)
except json.JSONDecodeError:
continue
print(f"The message: {message_dict['message']}")
if __name__ == "__main__":
main()
| ibrahimhalilbayat/data_engineering_diary | Sockets/tcp_server.py | tcp_server.py | py | 1,496 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "socket.AF_INET",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "socket.SOCK_STREAM",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "socket.SOL_SOCKET",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "socke... |
43906506457 | """
Methods for pyspectrumscale.Api that deal with filesets
"""
from typing import Union
import json
def get_fileset(
self,
filesystem: Union[str, None],
fileset: Union[str, None]=None,
allfields: Union[bool, None]=None
):
"""
@brief List all filesets or return a specific fileset from a filesystem
@param self The object
@param filesystem The filesystem name, default None, which returns all filesystems
@return The request response as a Response.requests object
"""
params = {}
if allfields is not None:
params['fields'] = ':all:'
if fileset is not None:
commandurl = "%s/filesystems/%s/filesets/%s" % (
self._baseurl,
filesystem,
fileset
)
else:
commandurl = "%s/filesystems/%s/filesets" % (
self._baseurl,
filesystem
)
return self._get(
commandurl,
params=params
)
def fileset(
self,
filesystem: str,
fileset: Union[str, None]=None,
allfields: Union[bool, None]=None,
acl: bool=False,
quota: bool=False,
owner: bool=False,
everything: bool=False
):
"""
@brief This method returns a specifc fileset from a specific filesystem as JSON with the response stripped away.
@param self The object
@param filesystem The filesystem
@param fileset The fileset
@return { description_of_the_return_value }
"""
# everything overrides all the other arguments
if everything:
acl = True
quota = True
owner = True
allfields = True
response = None
fsresponse = self.get_fileset(
filesystem=filesystem,
fileset=fileset,
allfields=allfields
)
if fsresponse.ok:
response = fsresponse.json()['filesets']
if acl or quota or owner:
updatedfs = []
for fs in response:
if acl:
fsacl = self.acl(
filesystem=fs['filesystemName'],
path=fs['config']['path'],
allfields=allfields
)
if fsacl:
fs['config']['acl'] = fsacl
updatedfs.append(fs)
response = updatedfs
# If it's a single element list, just return the element
if len(response) == 1:
response = response[0]
return response
def filesets(
self,
filesystems: Union[str, list, None]=None,
filesets: Union[str, list, None]=None,
allfields: Union[bool, None]=None,
acl: bool=False,
quota: bool=False,
owner: bool=False,
everything: bool=False
):
"""
@brief This method returns the list of matching filesets as JSON with the response stripped away.
@param self The object
@param filesystems The filesystem
@param fileset The fileset
@return { description_of_the_return_value }
"""
response = []
if filesystems is None:
response = self.filesets(
filesystems=self.list_filesystems(),
filesets=filesets,
allfields=allfields
)
elif isinstance(filesystems, list):
for fs in filesystems:
fsresponse = self.filesets(
filesystems=fs,
filesets=filesets,
allfields=allfields,
acl=acl,
owner=owner,
quota=quota,
everything=everything
)
if isinstance(fsresponse, list):
response += fsresponse
else:
if fsresponse is not None:
response.append(fsresponse)
else:
if isinstance(filesets, list):
for fs in filesets:
fsresponse = self.fileset(
filesystem=filesystems,
fileset=fs,
allfields=allfields,
acl=acl,
owner=owner,
quota=quota,
everything=everything
)
if isinstance(fsresponse, list):
response += fsresponse
else:
if fsresponse is not None:
response.append(fsresponse)
else:
fsresponse = self.fileset(
filesystem=filesystems,
fileset=filesets,
allfields=allfields,
acl=acl,
owner=owner,
quota=quota,
everything=everything
)
if isinstance(fsresponse, list):
response += fsresponse
else:
if fsresponse is not None:
response.append(fsresponse)
if isinstance(response, list):
if len(response) == 1:
response = response[0]
if not response:
response = None
return response
def list_filesets(
self,
filesystem: Union[str, None],
fileset: Union[str, None]=None
):
"""
@brief This methdo returns the list of matching filesets as JSON with the response stripped away.
@param self The object
@param filesystem The filesystem
@param fileset The fileset
@return { description_of_the_return_value }
"""
response = self.fileset(
filesystem,
fileset
)
filesets = []
if isinstance(response, list):
for fileset in response:
filesets.append(fileset['filesetName'])
else:
filesets.append(response['filesetName'])
return filesets
## WRITABLE METHODS
## The following methods can create changes in the Spectrum Scale Filesystem
## Make sure that in all cases that they respect the dry-run flag
# Create a prepared request to create a fileset
def preppost_fileset(
self,
filesystem: type=str,
fileset: type=str,
path: type=str,
owner: type=str,
group: type=str,
permissions: type=str,
permissionchangemode: str='chmodAndUpdateAcl',
parent: type=str,
comment: type=str
):
"""
@brief Creates a pepared request to POST creation of a fileset. While this
is not a writable method in itself, if it is sent to the self._session
it will be executed. Using the self.send() method is recommended
@param self The object
@param filesystem The filesystem
@param fileset The fileset
@param path The path
@param owner The owner
@param group The group
@param permissions The permissions
@param permissionchangemode The permissionchangemode
@param parent The parent
@param comment The comment
@return a requests.PreparedRequest object
"""
prepresponse = None
commandurl = (
"%s/filesystems/%s/filesets" % (
self._baseurl,
filesystem
)
)
data = {
'filesetName': fileset,
'path': path,
'owner': ("%s:%s" % (owner, group)),
'permissions': permissions,
'permissionChangeMode': permissionchangemode,
'inodeSpace': parent,
'comment': comment
}
prepresponse = self._preppost(
commandurl=commandurl,
data=data
)
return prepresponse
| Aethylred/pyspectrumscale | pyspectrumscale/Api/_fileset.py | _fileset.py | py | 7,753 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.Union",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_numbe... |
6347327566 | import numpy as np
import torch
from homan.utils.nmr_renderer import OrthographicRenderer, PerspectiveRenderer
import neural_renderer as nr
def visualize_perspective(image, predictions, K=None):
perspect_renderer = PerspectiveRenderer(image_size=max(image.shape))
new_image = image.copy()
# 2 * factor to be investigated !
verts = 2 * torch.Tensor(predictions["verts"]).cuda().unsqueeze(0)
faces = torch.Tensor(predictions["faces"]).cuda().unsqueeze(0)
K = torch.Tensor(K).cuda().unsqueeze(0)
trans = torch.Tensor([0, 0, 0]).cuda().unsqueeze(0)
for i in range(len(verts)):
v = verts[i:i + 1]
new_image = perspect_renderer(vertices=v,
faces=faces,
color_name="blue",
image=new_image,
translation=trans,
K=K)
return (new_image * 255).astype(np.uint8)
def visualize_orthographic(image, predictions):
ortho_renderer = OrthographicRenderer(image_size=max(image.shape))
new_image = image.copy()
verts = torch.Tensor(predictions["verts"]).cuda().unsqueeze(0)
faces = torch.Tensor(predictions["faces"]).cuda().unsqueeze(0)
cams = torch.Tensor(predictions["cams"]).cuda().unsqueeze(0)
for i in range(len(verts)):
v = verts[i:i + 1]
cam = cams[i:i + 1]
new_image = ortho_renderer(vertices=v,
faces=faces,
cam=cam,
color_name="blue",
image=new_image)
return (new_image * 255).astype(np.uint8)
def visualize_hand_object(model,
images,
verts_hand_gt=None,
verts_object_gt=None,
dist=3,
viz_len=7,
init=False,
gt_only=False,
image_size=640,
max_in_batch=2):
if gt_only:
rends, masks = model.render_gt(
model.renderer,
verts_hand_gt=verts_hand_gt,
verts_object_gt=verts_object_gt,
viz_len=viz_len,
max_in_batch=max_in_batch,
)
elif verts_hand_gt is None:
rends, masks = model.render(model.renderer,
viz_len=viz_len,
max_in_batch=max_in_batch)
else:
rends, masks = model.render_with_gt(model.renderer,
verts_hand_gt=verts_hand_gt,
verts_object_gt=verts_object_gt,
viz_len=viz_len,
init=init,
max_in_batch=max_in_batch)
bs = rends.shape[0]
# Rendered frontal image
new_images = []
for image, rend, mask in zip(images, rends, masks):
if image.max() > 1:
image = image / 255.0
h, w, c = image.shape
L = max(h, w)
new_image = np.pad(image.copy(), ((0, L - h), (0, L - w), (0, 0)))
new_image[mask] = rend[mask]
new_image = (new_image[:h, :w] * 255).astype(np.uint8)
new_images.append(new_image)
# Rendered top-down image
theta = 1.3
x, y = np.cos(theta), np.sin(theta)
obj_verts, _ = model.get_verts_object()
mx, my, mz = obj_verts.mean(dim=(0, 1)).detach().cpu().numpy()
K = model.renderer.K
R2 = torch.cuda.FloatTensor([[[1, 0, 0], [0, x, -y], [0, y, x]]])
t2 = torch.cuda.FloatTensor([mx, my + dist, mz])
top_renderer = nr.renderer.Renderer(image_size=image_size,
K=K,
R=R2,
t=t2,
orig_size=1)
top_renderer.background_color = [1, 1, 1]
top_renderer.light_direction = [1, 0.5, 1]
top_renderer.light_intensity_direction = 0.3
top_renderer.light_intensity_ambient = 0.5
top_renderer.background_color = [1, 1, 1]
if verts_hand_gt is None:
top_down, _ = model.render(model.renderer,
rotate=True,
viz_len=viz_len,
max_in_batch=max_in_batch)
elif gt_only:
top_down, _ = model.render_gt(
model.renderer,
verts_hand_gt=verts_hand_gt,
verts_object_gt=verts_object_gt,
viz_len=viz_len,
rotate=True,
max_in_batch=max_in_batch,
)
else:
top_down, _ = model.render_with_gt(model.renderer,
verts_hand_gt=verts_hand_gt,
verts_object_gt=verts_object_gt,
rotate=True,
viz_len=viz_len,
init=init,
max_in_batch=max_in_batch)
top_down = (top_down * 255).astype(np.uint8)
return np.stack(new_images), top_down
| hassony2/homan | homan/visualize.py | visualize.py | py | 5,329 | python | en | code | 85 | github-code | 36 | [
{
"api_name": "homan.utils.nmr_renderer.PerspectiveRenderer",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "torch.Tensor",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "torch.Tensor",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": ... |
26771615241 | #!/usr/bin/python3
""" Display the id of a Github user using Github's API """
import requests
import sys
def get_hub():
""" Get the id of the user using their personal access token password """
req = requests.get("https://api.github.com/user",
auth=(sys.argv[1], sys.argv[2]))
print(req.json().get('id'))
if __name__ == '__main__':
get_hub()
| Alouie412/holbertonschool-higher_level_programming | 0x11-python-network_1/10-my_github.py | 10-my_github.py | py | 385 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 10,
"usage_type": "attribute"
}
] |
18832494510 | from collections import defaultdict
class Solution:
def isAnagram(self, s: str, t: str) -> bool:
history = defaultdict(int)
for c in s:
history[c] += 1
for c in t:
history[c] -= 1
for v in history.values():
if v != 0:
return False
return True
s = Solution()
print(s.isAnagram("anagram", "nagaram"))
print(s.isAnagram("rat", "car")) | parkjuida/leetcode | python/valid_anagram.py | valid_anagram.py | py | 434 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.defaultdict",
"line_number": 6,
"usage_type": "call"
}
] |
12982777466 | import h5py
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.ndimage import gaussian_filter1d
from scipy.ndimage.morphology import distance_transform_edt
from scipy.stats import linregress
from skimage.future import graph
from skimage.measure import regionprops
from sklearn.linear_model import LinearRegression
def get_myo_offset(idx, tf, n=70):
no_cell_mask = segmentation[tf] != idx
dist_tr = distance_transform_edt(no_cell_mask)
dist_tr_around = dist_tr * (dist_tr <= n) * no_cell_mask
mask_around = dist_tr_around > 0
myo_around = myosin[tf] * mask_around
weighed_myo = myosin[tf] * dist_tr_around
return np.sum(weighed_myo) / np.sum(myo_around)
def get_myo_around(idx, tf, n=10, exclude=None, cut=None):
no_cell_mask = segmentation[tf] != idx
dist_tr = distance_transform_edt(no_cell_mask)
mask_around = (dist_tr <= n) * no_cell_mask
if exclude is not None:
assert cut is not None
myo_around = cut_doughnut(mask_around, np.invert(no_cell_mask), cut, exclude)
myo_around = myosin[tf] * mask_around
return np.sum(myo_around) / (np.sum(mask_around) * 0.0148)
def show_myo(idx, tf, n=70):
no_cell_mask = segmentation[tf] != idx
cell_mask = segmentation[tf] == idx
dist_tr = distance_transform_edt(no_cell_mask)
cell_countour = (dist_tr <= 2) * no_cell_mask
myo_countour = (dist_tr < n+1) * (dist_tr > n-1)
mask_around = (dist_tr <= n) * no_cell_mask
myo_around = myosin[tf] * mask_around
myo_in = myosin[tf] * cell_mask
viewer = napari.Viewer()
viewer.add_image(cell_countour + myo_countour, blending='additive')
viewer.add_image(myo_around + myo_in, blending='additive')
def cut_doughnut(myo_mask, cell_mask, line='h', excl='in'):
x_min, y_min, x_max, y_max = regionprops(cell_mask.astype(int))[0]['bbox']
if line == 'h' and excl == 'in':
myo_mask[x_min:x_max] = 0
if line == 'h' and excl == 'out':
myo_mask[:x_min] = 0
myo_mask[x_max:] = 0
if line == 'v' and excl == 'in':
myo_mask[:, y_min:y_max] = 0
if line == 'v' and excl == 'out':
myo_mask[:, :y_min] = 0
myo_mask[:, y_max:] = 0
return myo_mask
def get_myo_in(idx, tf):
cell_mask = segmentation[tf] == idx
myo_in = myosin[tf] * cell_mask
return np.sum(myo_in) / (np.sum(cell_mask) * 0.0148)
def get_area(idx, tf):
return np.sum(segmentation[tf] == idx)
def smooth(values, sigma=3, tolerance=0.1):
values = np.array(values)
# check if any value is suspicious (definitely a merge)
for i in range(1, len(values) - 1):
avg_neigh = (values[i - 1] + values[i + 1]) / 2
if not (1 + tolerance) > (values[i] / avg_neigh) > (1 - tolerance):
#replace this value with neighbors' average
values[i] = avg_neigh
values = gaussian_filter1d(values, sigma=sigma)
return values[1:-1]
def get_size_and_myo_dict(myo_s=3, area_s=3):
all_myo_conc = {}
all_sizes = {}
all_offsets = {}
idx2row = {}
for idx in np.unique(segmentation):
if idx == 0: continue
tps = [tp for tp, segm_tp in enumerate(segmentation) if (idx in segm_tp)]
if len(tps) < 5: continue
myo = [get_myo_in(idx, tp) for tp in tps]
myo = smooth(myo, sigma=myo_s, tolerance=1)
offset = [get_myo_offset(idx, tp) for tp in tps]
offset = smooth(offset, sigma=area_s, tolerance=1)
area = [get_area(idx, tp) for tp in tps]
area = smooth(area, sigma=area_s, tolerance=0.1)
all_myo_conc[idx] = {t: m for t, m in zip(tps[1:-1], myo)}
all_sizes[idx] = {t: s for t, s in zip(tps[1:-1], area)}
all_offsets[idx] = {t: o for t, o in zip(tps[1:-1], offset)}
return all_myo_conc, all_sizes, all_offsets
def get_myo_time_points(myo_conc, sizes, offs, ex=None, plane=None):
points_list = []
for idx in myo_conc.keys():
tps = myo_conc[idx].keys()
for tp in range(min(tps), max(tps) - 1):
if tp not in tps or tp+1 not in tps: continue
size_change = sizes[idx][tp + 1] / sizes[idx][tp]
cell_myo = myo_conc[idx][tp]
nbr_myo = get_myo_around(idx, tp, 70, ex, plane)
offset = offs[idx][tp]
points_list.append([size_change, cell_myo, nbr_myo, offset, idx, tp])
return np.array(points_list)
def train_regr(data):
np.random.shuffle(data)
half = int(len(data) / 2)
data, labels = data[:, 1:3], data[:, 0]
linear_regr = LinearRegression(normalize=True)
linear_regr.fit(data[:half], labels[:half])
score = linear_regr.score(data[half:], labels[half:])
return score
def get_best_regr(data, n=100):
accuracies = [train_regr(data) for i in range(n)]
print("Max accuracy is", np.max(accuracies))
print("Mean accuracy is", np.mean(accuracies))
data_h5 = '/home/zinchenk/work/drosophila_emryo_cells/data/img5_new.h5'
with h5py.File(data_h5, 'r') as f:
myosin = f['myosin'][3:-3]
segmentation = f['segmentation'][3:-3]
myo, area, offsets = get_size_and_myo_dict(myo_s=3, area_s=3)
to_plot = get_myo_time_points(myo, area, offsets)
get_best_regr(to_plot, 400)
fp = '/home/zinchenk/work/drosophila_emryo_cells/imgs/revision_svg/'
## the loglog plot
fig, ax = plt.subplots()
plt.scatter(to_plot[:, 1], to_plot[:, 2], c=to_plot[:, 0], cmap='RdYlBu', vmin=0.9, vmax=1.1, s=20)
ax.vlines([80000, 100000], 24000, 220000, linestyles='dotted')
ax.hlines([24000, 220000], 80000, 100000, linestyles='dotted')
plt.xlabel("[cellular myosin]", size=35)
plt.ylabel("[surrounding myosin]", size=35)
#plt.title('Embryo 5', size=35)
ax.tick_params(length=15, width=3)
ax.tick_params(length=8, width=1, which='minor')
plt.xticks(fontsize=35)
plt.yticks(fontsize=35)
plt.loglog()
cb = plt.colorbar()
for t in cb.ax.get_yticklabels():
t.set_fontsize(35)
figure = plt.gcf()
figure.set_size_inches(16, 12)
plt.savefig(fp + 'fig3j.svg', format='svg')
# the zoom in plot colored by size
plot_cutout = to_plot[(80000 < to_plot[:, 1]) & (to_plot[:, 1] < 100000)]
slope, intercept, rvalue, _, _ = linregress(plot_cutout[:, 0], plot_cutout[:, 2])
y = intercept + slope * plot_cutout[:, 0]
fig, ax = plt.subplots()
ax.plot(plot_cutout[:, 0], y, 'red', label='linear fit')
ax.scatter(plot_cutout[:, 0], plot_cutout[:, 2], s=200, c='tab:grey')
plt.xlabel("Relative size change", size=35)
plt.ylabel("Surrounding myosin", size=35)
plt.text(1.03, 40000, "Correlation={:.4f}".format(rvalue), size=35)
plt.legend(loc='upper left', fontsize=35)
ax.tick_params(length=15, width=3)
plt.xticks(fontsize=35)
plt.yticks(fontsize=35)
figure = plt.gcf()
figure.set_size_inches(16, 16)
plt.savefig(fp + 'fig3k.svg', format='svg')
# the ratio vs size change plot
exp = to_plot[np.where(to_plot[:, 0] > 1.015)]
constr = to_plot[np.where(to_plot[:, 0] < 0.985)]
middle = to_plot[np.where((to_plot[:, 0] >= 0.985) & (to_plot[:, 0] <= 1.015))]
fig, ax = plt.subplots()
ax.scatter(exp[:, 1] / exp[:, 2], exp[:, 0], c='tab:blue')
ax.scatter(constr[:, 1] / constr[:, 2], constr[:, 0], c='tab:red')
ax.scatter(middle[:, 1] / middle[:, 2], middle[:, 0], c='y')
ax.hlines(1, 0.4, 4.9, color='black')
ax.vlines(1, 0.83, 1.10, color='black')
[tick.label.set_fontsize(25) for tick in ax.xaxis.get_major_ticks()]
[tick.label.set_fontsize(25) for tick in ax.yaxis.get_major_ticks()]
plt.xlabel("cellular/neighbourhood myosin ratio", size=35)
plt.ylabel("relative size change", size=35)
#plt.title('Embryo 5', size=35)
#plt.legend(loc='lower right', fontsize=25)
plt.show()
sm_range = np.arange(0.25, 5.25, 0.125)
fig, ax = plt.subplots()
plt.hist(exp[:, 1] / exp[:, 2], bins=sm_range, density=True, histtype='bar', label='Expanding', color='tab:blue', alpha=0.6)
plt.hist(constr[:, 1] / constr[:, 2], bins=sm_range, density=True, histtype='bar', label='Constricting', color='tab:red', alpha=0.6)
plt.ylabel('probability density', size=35)
plt.xlabel('cellular/neighbourhood myosin ratio', size=35)
plt.legend(loc='upper right', fontsize=25)
[tick.label.set_fontsize(25) for tick in ax.xaxis.get_major_ticks()]
[tick.label.set_fontsize(25) for tick in ax.yaxis.get_major_ticks()]
#plt.title('Embryo 5', size=35)
plt.show()
# the offset vs myo in
fig, ax = plt.subplots()
plt.scatter(to_plot[:, 1], to_plot[:, 3] * 0.1217, c=to_plot[:, 0], cmap='RdYlBu', vmin=0.9, vmax=1.1, s=20)
plt.xscale('log')
plt.xlabel("[cellular myosin]", size=35)
plt.ylabel("Myosin offset in the neighbourhood", size=35)
cb = plt.colorbar()
ax.tick_params(length=15, width=3)
ax.tick_params(length=8, width=1, which='minor')
plt.xticks(fontsize=35)
plt.yticks(fontsize=35)
for t in cb.ax.get_yticklabels():
t.set_fontsize(35)
figure = plt.gcf()
figure.set_size_inches(16, 12)
plt.savefig(fp + 'fig3i.svg', format='svg')
plt.show()
| kreshuklab/drosophila_embryo_cells | scripts/predict_fate.py | predict_fate.py | py | 8,815 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "scipy.ndimage.morphology.distance_transform_edt",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "scipy.ndimage.morphology.distance_transform_edt",
"line_number": 25,
"usage_typ... |
35603667971 | import pika
import ssl
import json
class Dev:
def __init__(self):
ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
ssl_context.set_ciphers('ECDHE+AESGCM:!ECDSA')
url = f"amqps://ryanl:842684265santos@b-b86d75fd-5111-4c3c-b62c-b999e666760a.mq.us-east-1.amazonaws.com:5671"
parameters = pika.URLParameters(url)
parameters.ssl_options = pika.SSLOptions(context=ssl_context)
conexão = pika.BlockingConnection(parameters)
self.canal = conexão.channel()
def send(self, nome: str, logo: str, message: str, hora: str):
mensagem = json.dumps(
{"nome": nome, "logo": logo, "hora": hora, "mensagem": message})
self.canal.basic_publish(exchange='chat', body=mensagem, routing_key='tag_mensagem',
properties=pika.BasicProperties(delivery_mode=2))
self.canal.close()
# cliente = Dev()
# cliente.send("fredekel", "java", "Boa tio, ficou show de bola!", "18:43")
| ryanbsdeveloper/opensource-chat | modules/chat/dev.py | dev.py | py | 993 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "ssl.SSLContext",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "ssl.PROTOCOL_TLSv1_2",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "pika.URLParameters",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pika.SSLOpt... |
6298326622 |
import pyttsx3
import speech_recognition as sr
import PyPDF2
from gtts import gTTS
from googletrans import Translator
from playsound import playsound
import os
assistant=pyttsx3.init("sapi5") #creation object for speak
voices=assistant.getProperty('voices') #check voices
assistant.setProperty('voice', voices[0].id) # 1 for female 0 for male
assistant.setProperty('rate',170)
def speaking(audio):
assistant.say(audio) #say() method to speak
print("")
assistant.runAndWait() #to run the speech we use runAndWait() All the say() texts won’t be said unless the interpreter encounters runAndWait().
print("")
def command(): #Create command function
#recognizer
command=sr.Recognizer()
#recognize from micrphone
with sr.Microphone() as source:
print("Listening.....!")
command.pause_threshold=1 #Represents the minimum length of silence (in seconds) that will register as the end of a phrase. Can be changed
audio=command.listen(source)
try:
print("Recognizing....!")
## recognize speech using goggle
query=command.recognize_google(audio,language='en-in')
# print("You said =",query,type(query))
except Exception as Error:
return "None"
return query.lower()
def reader(query):
speaking("Tell me name of the book")
name=input("enter book name :")
Book_dir='C:\\Users\\KABIR\Desktop\\Book' #get directory
Books=os.listdir(Book_dir) #list all of the book
name=name+'.pdf'
if name in Books:
path='C:\\Users\\KABIR\Desktop\\Book\\'+name
os.startfile(path)
book=open(path,'rb')
pdfreader=PyPDF2.PdfFileReader(book)
pages=pdfreader.getNumPages()
speaking(f"Number of pages in this book are {pages}")
speaking("Enter from which page i have to start reading")
numpage=int(input("Enter start page : "))
page=pdfreader.getPage(numpage)
text=page.extractText()
speaking("Tell me which language, i have to read")
lang=command()
dict_lang={'marathi':'mr','bihari':'bh','italian':'it','korean':'ko','swedish':'sw','malayalam':'ml','latin':'la','urdu':'ur','armenian':'hy','english':'en','hindi':'hi','bengali':'bn','arabic':'ar','tamli':'ta','spanish':'es','french':'fr','chinese':'zh-cn'}
if lang in dict_lang:
transl=Translator()
language=dict_lang[lang]
textlang=transl.translate(text,dest=language).text
textm=textlang
speech=gTTS(text=textm)
try:
speech.save("book.mp3")
playsound("book.mp3")
except:
playsound("book.mp3")
else:
speaking(text)
else:
speaking("No book found in your directory")
| Kabir2099/Desktop-Assistant | Book_Reader.py | Book_Reader.py | py | 2,948 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pyttsx3.init",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "speech_recognition.Recognizer",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "speech_recognition.Microphone",
"line_number": 27,
"usage_type": "call"
},
{
"api_name... |
39151761897 | import io
import json
import logging
from fdk import response
def handler(ctx, data: io.BytesIO = None):
name = "World"
try:
body = json.loads(data.getvalue())
name = body.get("name")
except (Exception, ValueError) as ex:
logging.getLogger().info('error parsing json payload: ' + str(ex))
logging.getLogger().info("Inside Python Hello World function")
return response.Response(
ctx, response_data=json.dumps(
{"message": "Hello {0}".format(name)}),
headers={"Content-Type": "application/json"}
)
| wlloyduw/SAAF | jupyter_workspace/platforms/oracle/hello_world/func.py | func.py | py | 576 | python | en | code | 25 | github-code | 36 | [
{
"api_name": "io.BytesIO",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "json.loads",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"... |
30148735325 | from modules.symboltable import Symbol_table
import socket
import struct
import datetime
try:
import yara
except:
pass
symbol_table = Symbol_table()
class Node:
def __init__(self, value, children:list) -> None:
self.value = value
self.children:list = children
def evaluate(self):
return None,"int"
class Bin_op(Node):
def __init__(self, value, children:list): #Aqui
super().__init__(value, children)
def evaluate(self)->Node:
ch0 = self.children[0].evaluate()
ch1 = self.children[1].evaluate()
if type(self.value) == str:
if ch0[1] == "string" or ch1[1] == "string":
if self.value == ".":
return str(ch0[0][0]) + str(ch1[0]), "string"
if ch0[1] == "string" and ch1[1] == "string":
if self.value == "==":
return int(ch0[0] == ch1[0]), "int"
if self.value == ">":
return int(ch0[0] > ch1[0]), "int"
if self.value == "<":
return int(ch0[0] < ch1[0]), "int"
else:
raise Exception("Error")
if ch0[1] == "int" and ch1[1] == "int":
if self.value == "+":
return ch0[0] + ch1[0], "int"
if self.value == "-":
return ch0[0] - ch1[0], "int"
if self.value == "/":
return ch0[0] // ch1[0], "int"
if self.value == "*":
return ch0[0] * ch1[0], "int"
if self.value == "&&":
return int(ch0[0] and ch1[0]), "int"
if self.value == "||":
return int(ch0[0] or ch1[0]), "int"
if self.value == "==":
return int(ch0[0] == ch1[0]), "int"
if self.value == ">":
return int(ch0[0] > ch1[0]), "int"
if self.value == "<":
return int(ch0[0] < ch1[0]), "int"
if self.value == ".":
return str(ch0[0]) + str(ch1[0]), "string"
else:
raise Exception("Error")
else:
raise Exception("Error")
class Un_op(Node):
def __init__(self, value, children:list): #Aqui
super().__init__(value, children)
def evaluate(self)->Node:
if type(self.value) == str:
if self.value == "+":
return +self.children[0].evaluate()[0],"int"
if self.value == "-":
return -self.children[0].evaluate()[0],"int"
if self.value == "!":
return int(not self.children[0].evaluate()[0]),"int"
else:
raise Exception("Error")
class Int_val(Node):
def __init__(self, value, children:list=None): #Aqui
super().__init__(value, children)
def evaluate(self)->Node:
return self.value, "int"
class Str_val(Node):
def __init__(self, value, children:list=None): #Aqui
super().__init__(value, children)
def evaluate(self)->Node:
return self.value, "string"
class Rule_val(Node):
def __init__(self, value, children:list=None): #Aqui
super().__init__(value, children)
def evaluate(self)->Node:
return self.value, "rule"
class No_op(Node):
def __init__(self, value=None,typedef = "int", children:list=None): #Aqui
super().__init__(value, children)
self.typedef = typedef
def evaluate(self)->Node:
return None,self.typedef
class Block(Node):
def __init__(self, value=None, children:list=None): #Aqui
super().__init__(value, children)
def evaluate(self)->Node:
for c in self.children:
c.evaluate()
class Print(Node):
def __init__(self, value = None, children:list = []): #Aqui
super().__init__(value, children)
def evaluate(self)->Node:
print(self.children[0].evaluate()[0])
class Scanhost(Node):
def __init__(self, value = None, children:list = []): #Aqui
super().__init__(value, children)
def evaluate(self)->Node:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(1)
ip, port = self.children
result = "open"
try:
sock.connect((ip.evaluate()[0], port.evaluate()[0]))
except socket.error:
result = "closed"
finally:
sock.close()
return result, "string"
class Traffic(Node):
def __init__(self, value = None, children:list = []): #Aqui
super().__init__(value, children)
def evaluate(self)->Node:
def parse_ethernet_frame(packet_data):
ethernet_header = struct.unpack('!6s6sH', packet_data[:14])
destination_address = ':'.join(f'{byte:02x}' for byte in ethernet_header[0])
source_address = ':'.join(f'{byte:02x}' for byte in ethernet_header[1])
ether_type = ethernet_header[2]
return destination_address, source_address, ether_type
def parse_ip_packet(packet_data):
ip_header = struct.unpack('!BBHHHBBH4s4s', packet_data[14:34])
version_ihl = ip_header[0]
version = version_ihl >> 4
ihl = (version_ihl & 0x0F) * 4
ttl = ip_header[5]
protocol = ip_header[6]
source_ip = socket.inet_ntoa(ip_header[8])
dest_ip = socket.inet_ntoa(ip_header[9])
return version, ihl, ttl, protocol, source_ip, dest_ip
raw_socket = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.ntohs(0x0003))
packet_data, _ = raw_socket.recvfrom(65536)
timestamp = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
dest_address, src_address, ether_type = parse_ethernet_frame(packet_data)
print(f"\nTimestamp: {timestamp}")
print(f"Packet Length: {len(packet_data)} bytes")
print(f"Source MAC: {src_address}")
print(f"Destination MAC: {dest_address}")
print(f"Ethernet Type: {hex(ether_type)}")
if ether_type == 0x0800: # IPv4 EtherType
version, ihl, ttl, protocol, source_ip, dest_ip = parse_ip_packet(packet_data)
print("IPv4 Header:")
print(f"Version: {version}")
print(f"IHL: {ihl} bytes")
print(f"TTL: {ttl}")
print(f"Protocol: {protocol}")
print(f"Source IP: {source_ip}")
print(f"Destination IP: {dest_ip}")
class Match(Node):
def __init__(self, value = None, children:list = []): #Aqui
super().__init__(value, children)
def evaluate(self)->Node:
yara_rule = f"rule {self.children[1].value}" + "{\n\tstrings:"
strings = self.children[1].evaluate()
for string in strings:
yara_rule+= f'\n\t\t${string} = "{strings[string].evaluate()[0]}"'
yara_rule+="\n\tcondition:\n\t\tall of them\n}"
compiled_rule = yara.compile(source=yara_rule)
file_path = self.children[0].evaluate()[0]
matches = compiled_rule.match(file_path)
if matches:
return int(True),"int"
else:
return int(False),"int"
class Input(Node):
def __init__(self, value = None, children:list = []): #Aqui
super().__init__(value, children)
def evaluate(self)->Node:
return input(),"string"
class Identifier(Node):
def __init__(self, value = None, children:list=[]): #Aqui
super().__init__(value, children)
def evaluate(self)->Node:
return symbol_table.getter(self.value)
class Assignment(Node):
def __init__(self, value, children:list): #Aqui
super().__init__(value, children)
def evaluate(self)->Node:
symbol_table.setter(self.children[0].value,
self.children[1].evaluate())
class If(Node):
def __init__(self, value = None, children:list = []): #Aqui
super().__init__(value, children)
def evaluate(self)->Node:
#if
if (self.children[0].evaluate()[0]):
self.children[1].evaluate()
#else
elif len(self.children) > 2:
return self.children[2].evaluate()
class While(Node):
def __init__(self, value = None, children:list = []): #Aqui
super().__init__(value, children)
def evaluate(self)->Node:
condition, block = self.children
while True:
if not condition.evaluate()[0]: #CONDITION
break
block.evaluate() #BLOCK
class Foreach(Node):
def __init__(self, value = None, children:list = []): #Aqui
super().__init__(value, children)
def evaluate(self)->Node:
init, final, block = self.children
init.evaluate()
final.evaluate()
start = init.children[0].evaluate()[0]
end = final.children[0].evaluate()[0]
for i in range(start,end):
block.evaluate() #BLOCK
class VarDec(Node):
def __init__(self, value = None, children:list = []): #Aqui
super().__init__(value, children)
#self.typedef = typedef
def evaluate(self)->Node:
if type(self.value) == dict:
symbol_table.create(self.children[0].value,
self.value)
else:
symbol_table.create(self.children[0].value,
self.children[1].evaluate())
| matheus-1618/GuardScript | Interpreted/modules/node.py | node.py | py | 9,480 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "modules.symboltable.Symbol_table",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "socket.socket",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "socket.AF_INET",
"line_number": 119,
"usage_type": "attribute"
},
{
"api_name": "... |
11467704901 | import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
from torch.nn.functional import pad
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
class ConvBNLayer(nn.Module):
def __init__(self,in_channels,out_channels,kernel,stride=1,act='ReLU'):
super(ConvBNLayer,self).__init__()
self.act_flag = act
self.conv = nn.Conv2d(in_channels,out_channels,kernel_size=2 if stride ==(1,1) else kernel,stride=stride,padding=(kernel-1)//2,dilation=2 if stride==(1,1) else 1)
self.bn = nn.BatchNorm2d(out_channels)
self.act = nn.ReLU(True)
def forward(self,x):
x = self.conv(x)
x = self.bn(x)
if self.act_flag != 'None':
x = self.act(x)
return x
class Shortcut(nn.Module):
def __init__(self,in_channels,out_channels,stride,is_first=False):
super(Shortcut,self).__init__()
self.use_conv = True
if in_channels!=out_channels or stride!=1 or is_first==True:
if stride==(1,1):
self.conv = ConvBNLayer(in_channels,out_channels,1,1)
else:
self.conv = ConvBNLayer(in_channels,out_channels,1,stride)
else:
self.use_conv =False
def forward(self,x):
if self.use_conv:
x = self.conv(x)
return x
class BottleneckBlock(nn.Module):
def __init__(self,in_channels,out_channels,stride):
super(BottleneckBlock,self).__init__()
self.conv0 = ConvBNLayer(in_channels,out_channels,kernel=1)
self.conv1 = ConvBNLayer(out_channels,out_channels,kernel=3,stride=stride)
self.conv2 = ConvBNLayer(out_channels,out_channels*4,kernel=1,act='None')
self.short = Shortcut(in_channels,out_channels*4,stride=stride)
self.out_channels = out_channels*4
self.relu = nn.ReLU(True)
def forward(self,x):
y = self.conv0(x)
y = self.conv1(y)
y = self.conv2(y)
y = y+self.short(x)
y = self.relu(y)
return y
class BasicBlock(nn.Module):
def __init__(self,in_channels,out_channels,stride,is_first):
super(BasicBlock,self).__init__()
self.conv0 = ConvBNLayer(in_channels,out_channels,kernel=3,stride=stride)
self.conv1 = ConvBNLayer(out_channels,out_channels,kernel=3,act='None')
self.short = Shortcut(in_channels,out_channels,stride,is_first)
self.out_chanels = out_channels
self.relu = nn.ReLU(True)
def forward(self,x):
y = self.conv0(x)
y = self.conv1(y)
y = y + self.short(x)
y = self.relu(y)
return y
class ResNet_FPN(nn.Module):
def __init__(self,in_channels=1,layers=50,**kwargs):
super(ResNet_FPN,self).__init__()
supported_layers = {
18:{
'depth':[2,2,2,2],
'block_class': BasicBlock
},
34:{
'depth':[3,4,6,3],
'block_class': BasicBlock
},
50:{
'depth':[3,4,6,3],
'block_class': BottleneckBlock
},
101:{
'depth':[3,4,23,3],
'block_class': BottleneckBlock
},
152:{
'depth':[3,8,36,3],
'block_class': BottleneckBlock
}
}
stride_list = [(2,2),(2,2,),(1,1),(1,1)]
num_filters = [64,128,256,512]
self.depth = supported_layers[layers]['depth']
self.F = []
self.conv = ConvBNLayer(in_channels=in_channels,out_channels=64,kernel=7,stride=2) #64*256 ->32*128
self.block_list = nn.ModuleList()
in_ch = 64
if layers>=50:
for block in range(len(self.depth)):
for i in range(self.depth[block]):
self.block_list.append(BottleneckBlock(in_channels=in_ch,out_channels=num_filters[block],stride = stride_list[block] if i==0 else 1))
in_ch = num_filters[block]*4
else:
for block in range(len(self.depth)):
for i in range(self.depth[block]):
if i==0 and block!=0:
stride = (2,1)
else:
stride = (1,1)
basic_block = BasicBlock(in_channels=in_ch,out_channels=num_filters[block],stride=stride_list[block] if i==0 else 1, is_first= block==i==0)
in_ch = basic_block.out_chanels
self.block_list.append(basic_block)
out_ch_list = [in_ch // 4 ,in_ch // 2, in_ch]
self.base_block = nn.ModuleList()
self.conv_trans = []
self.bn_block = []
for i in [-2,-3]:
in_channels = out_ch_list[i+1] + out_ch_list[i]
self.base_block.append(nn.Conv2d(in_channels,out_ch_list[i],kernel_size=1)) #进行升通道
self.base_block.append(nn.Conv2d(out_ch_list[i],out_ch_list[i],kernel_size=3,padding=1)) #进行合并
self.base_block.append(nn.Sequential(nn.BatchNorm2d(out_ch_list[i]),nn.ReLU(True)))
self.base_block.append(nn.Conv2d(out_ch_list[i],512,kernel_size=1))
self.out_channels = 512
def forward(self,x):
x = self.conv(x)
fpn_list = []
F = [ ]
for i in range(len(self.depth)):
fpn_list.append(np.sum(self.depth[:i+1]))
for i,block in enumerate(self.block_list):
x = block(x)
for number in fpn_list:
if i+1==number:
F.append(x)
base = F[-1]
j = 0
for i,block in enumerate(self.base_block):
if i%3 ==0 and i<6:
j = j+1
b,c,w,h = F[-j-1].size()
if [w,h] == list(base.size()[2:]):
base = base
else:
base = self.conv_trans[j-1](base)
base = self.bn_block[j-1](base)
base = torch.cat([base,F[-j-1]],dim=1)
base = block(base)
return base
if __name__=='__main__':
res_fpn = ResNet_FPN(3,50)
res_fpn = res_fpn.to(device)
print(res_fpn)
x = torch.randn([140,3,64,256]).to(device)
output = res_fpn(x)
| milely/SRN.Pytorch | backbone/resnet_fpn.py | resnet_fpn.py | py | 6,634 | python | en | code | 27 | github-code | 36 | [
{
"api_name": "torch.device",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.Module",
... |
12369223367 | """Add viewed column to batch_job
Revision ID: b23863a37642
Revises: 72a8672de06b
Create Date: 2018-12-31 17:13:54.564192
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'b23863a37642'
down_revision = '72a8672de06b'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table("batch_job") as batch_op:
batch_op.add_column(sa.Column('viewed', sa.Boolean(), nullable=False, server_default='1'))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table("batch_job") as batch_op:
batch_op.drop_column('viewed')
# ### end Alembic commands ###
| golharam/NGS360-FlaskApp | migrations/versions/b23863a37642_add_viewed_column_to_batch_job.py | b23863a37642_add_viewed_column_to_batch_job.py | py | 802 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "alembic.op.batch_alter_table",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.... |
9411931518 | # Primary game file
import sys, pygame
from pygame.locals import *
display_surf = pygame.display.set_mode((800, 600))
pygame.display.set_caption('Hello Pygame World!')
def run():
"""This allows for the running of the game from outside the package"""
print("Started trying to run")
# main game loop
while True:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit() | mlansari/ShellShockClone | ShellShockClone/game.py | game.py | py | 460 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pygame.display.set_mode",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.set_caption",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "p... |
3146953868 | from setuptools import setup, find_packages
from os import path
DIR = path.abspath(path.dirname(__file__))
description = """SharePy will handle authentication for your SharePoint Online/O365 site, allowing
you to make straightforward HTTP requests from Python. It extends the commonly used Requests module,
meaning that returned objects are familliar, easy to work with and well documented."""
with open(path.join(DIR, './README.md')) as f:
long_description = f.read()
setup(
name='sharepy',
version='2.0.0',
description='Simple SharePoint Online authentication for Python',
long_description=long_description,
long_description_content_type='text/markdown',
keywords='sharepoint online authentication',
author='Jonathan Holvey',
author_email='jonathan.holvey@outlook.com',
url='https://github.com/JonathanHolvey/sharepy',
project_urls={
'Issues': 'https://github.com/JonathanHolvey/sharepy/issues',
},
license='GPLv3',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Internet',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Programming Language :: Python :: 3',
],
packages=find_packages('./src'),
package_dir={'': './src'},
package_data={'sharepy.auth.templates': ['*']},
python_requires='>=3.6, <4',
install_requires=['requests>=2,<3']
)
| JonathanHolvey/sharepy | setup.py | setup.py | py | 1,452 | python | en | code | 165 | github-code | 36 | [
{
"api_name": "os.path.abspath",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "os.path.dirname",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number"... |
32560269380 | # coding: utf-8
import sys
import os
from Public.Decorator import *
import uiautomator2 as u2
from Public.common import common
from tenacity import *
cm = common()
#获取resourceID
condition = os.path.exists(cm.mapping_gp_path)
mapping_path = (cm.mapping_vid_path,cm.mapping_gp_path)[condition]
res = cm.parse_mapping_file(mapping_path, condition)
class appBase(common):
"""app基础处理"""
def __init__(self):
self.d = u2.connect()
self.apk_path = cm.apk_rel_path
self.pkg_name = cm.pkg_name
self.d.screen_on() # 打开屏幕
self.d.unlock() # 解锁屏幕
self.d.press('home') # 回到桌面
@exception_decoration
def rate_skip(self):
action.exist_click(text='Exit')
@exception_decoration
def case_restart_check(self,text=None,resourceId=None,restart=False):
"""用例是否重起检查"""
if text is not None:
self.d(text=text).click(timeout=2)
elif resourceId is not None:
self.d(resourceId=text).click(timeout=2)
elif restart is True:
raise Exception('restart')
@exception_decoration
def vip_check(self):
"""VIP检查"""
self.d(text="ME").click(timeout=2)
if not self.d(text='You are already a VIP').exists(timeout=5): # vip标识
self.d(text='3-Day Free Trial').click(timeout=2)
self.d(text="CONTINUE").click(timeout=5)
self.d(text="订阅").click(timeout=5)
action.exist_click(text='以后再说')
action.exist_click(text='不用了')
action.exist_click(text='确定')
action.exist_click(text='Done')
self.d(text="xxx").click(timeout=5)
print('vip检查通过')
@exception_pass
def startpage_handle(self):
"""启动开屏页处理"""
self.d(resourceId="com.android.permissioncontroller:id/permission_allow_button").click(timeout=5)
self.d(text="Skip").click(timeout=2)
self.d(text="Got It").click(timeout=2)
print('启动开屏页检查通过')
@exception_pass
def clear_home_xxx(self):
"""删除首页的xxx"""
self.d(text="xxx").click(timeout=2)
test_xxx_name = self.d(resourceId=res['com.app.xxxxxx:id/tvxxxName']).get_text(timeout=10)
while test_xxx_name not in ['xxx_xxx.mp4','app_xxx.mp4','xxx_xxx.mp4']:
self.d(resourceId=res['com.app.xxxxxx:id/ivMore']).click()
self.d(scrollable=True).scroll.toEnd() # 滚动到最底部
self.d(text="Delete").click(timeout=2)
self.d(text="OK").click(timeout=2)
test_xxx_name = self.d(resourceId=res['com.app.xxxxxx:id/tvxxxName']).get_text(timeout=10)
print('清理测试视频检查通过')
@exception_pass
def clear_downloaded_xxx(self):
"""删除下载管理器记录"""
while not self.d(text='Go to download').exists(timeout=2):
self.d(resourceId=res['com.app.xxxxxx:id/ivMore']).click(timeout=2)
self.d(text="Delete").click(timeout=2)
self.d(text="Confirm").click(timeout=2)
print('清理下载管理器记录通过')
@exception_pass
def clear_music(self):
"""删除下音乐记录"""
self.d(text="MUSIC").click(timeout=2)
music_title = self.d(resourceId=res['com.app.xxxxxx:id/tvSongName']).get_text(timeout=2)
while 'test_music1' not in music_title:
self.d(resourceId=res['com.app.xxxxxx:id/ivMore']).click(timeout=2)
self.d(scrollable=True).scroll.toEnd() # 滚动到最底部
self.d(text="Delete").click(timeout=2)
self.d(text="OK").click(timeout=2)
music_title = self.d(resourceId=res['com.app.xxxxxx:id/tvSongName']).get_text(timeout=2)
self.d(text="xxx").click(timeout=2)
print('清除音乐文件通过')
@exception_pass
def music_permission(self):
"""音乐权限处理"""
self.d(text="Ok").click(timeout=3)
self.d(text="允许").click(timeout=3)
@retry(stop=stop_after_attempt(2))
def download_xxx(self):
"""下载视频"""
#下载新的视频
self.case_restart_check(text='DOWNLOAD')
self.d(text="DOWNLOAD").click(timeout=2)
###### 清理下载记录
self.d(resourceId=res['com.app.xxxxxx:id/ivDownload']).click()
self.clear_downloaded_xxx()
self.d.press('back')
self.d(resourceId=res['com.app.xxxxxx:id/clSearch']).click(timeout=2)
self.d.send_keys("https://www.ted.com/talks/armand_d_angour_the_ancient_origins_of_the_olympics/up-next")
self.d.press('enter')
self.d(resourceId=res['com.app.xxxxxx:id/button_analytics']).click(timeout=10)
time.sleep(10)
if not str(self.d(resourceId=res['com.app.xxxxxx:id/text_size']).get_text(timeout=10)).__contains__('MB'):
time.sleep(10)
self.d(text="Download").click(timeout=5)
self.d(text="view >").click(timeout=5)
if not self.d(resourceId=res['com.app.xxxxxx:id/flCover']).exists(timeout=2): raise ('下载管理器没有视频')
check_text = time.strftime("%Y-%m-%d", time.localtime())
suc_text = self.d(resourceId=res['com.app.xxxxxx:id/tvDownloaded']).get_text(timeout=240)
if check_text not in suc_text:
raise ('测试视频下载超时未完成')
self.d(resourceId=res['com.app.xxxxxx:id/ivLeft']).click(timeout=2)
self.d.press('back')
self.d.press('back')
self.d(text="xxx").click(timeout=1)
print('下载视频通过')
@exception_pass
def clear_notification(self):
"""清理通知栏消息"""
self.d.open_notification()
self.d(text='全部清除').click(timeout=2)
def home_start(self,text=None,resourceId=None):
"""home键再打开app"""
self.d.press('home')
self.d.app_start(self.pkg_name)
if text is not None:
self.d(text=text).click(timeout=5)
elif resourceId is not None:
self.d(resourceId=resourceId).click(timeout=5)
def xxx_xxx_check(self,xxxlink):
"""检查xxx热剧"""
self.case_restart_check(text='DOWNLOAD')
self.d(text="DOWNLOAD").click(timeout=2)
self.d(resourceId=res['com.app.xxxxxx:id/clSearch']).click(timeout=2)
self.d.send_keys(xxxlink)
self.d.xpath('//*[@resource-id="app"]/android.view.View[1]/android.view.View[1]/android.view.View[1]/android.view.View[4]/android.view.View[1]').click(timeout=20)
self.d(scrollable=True).scroll.toEnd()
self.d.click(0.596, 0.808)
self.d(resourceId=res['com.app.xxxxxx:id/iv_close']).click(timeout=20)
self.d.click(0.431, 0.232) # 高亮屏幕
playtime_pre = self.d(resourceId=res['com.app.xxxxxx:id/has_played']).get_text(timeout=2)
time.sleep(10)
self.d.click(0.431, 0.232) # 高亮屏幕
playtime_next = self.d(resourceId=res['com.app.xxxxxx:id/has_played']).get_text(timeout=2)
if playtime_pre == playtime_next: raise Exception('播放时间没有跑动')
self.d.screenshot()
class VdieoPlay(appBase):
d = u2.connect()
@classmethod
@exception_pass
def play_xxx_skip(cls):
"""处理播放引导"""
cls.d(resourceId=res['com.app.xxxxxx:id/svgOrientation']).click(timeout=5)
cls.d(text="skip").click(timeout=2) # 跳过播放引导
cls.d(text="Skip").click(timeout=2) # 跳过播放引导
@classmethod
def xxx_error_feedback(cls):
"""视频异常反馈"""
if cls.d(text='Error!').exists(timeout=10):
cls.d.screenshot()
cls.d(text='Feedback').click(timeout=2)
cls.d(text='Submit').click(timeout=2)
raise Exception('视频异常')
@classmethod
def xxx_play_time_check(cls):
"""视频播放检查"""
###### 时间走动验证 ######
cls.d.click(0.431, 0.232) # 高亮屏幕
playtime_pre = cls.d(resourceId=res['com.app.xxxxxx:id/has_played']).get_text(timeout=2)
time.sleep(10)
cls.d.click(0.431, 0.232) # 高亮屏幕
playtime_next = cls.d(resourceId=res['com.app.xxxxxx:id/has_played']).get_text(timeout=2)
cls.d.screenshot()
return playtime_pre, playtime_next
class action:
d = u2.connect()
@classmethod
def exist_click(cls,text=None,resourceId=None):
"""存在操作"""
if text is not None and cls.d(text=text).exists(timeout=3):
cls.d(text=text).click()
elif resourceId is not None and cls.d(resourceId=resourceId).exists(timeout=3):
cls.d(resourceId=resourceId).click()
@classmethod
def screenshot_name(cls,name):
"""按照名称截图"""
date_time = time.strftime('%Y%m%d%H%M%S', time.localtime(time.time()))
screenshot = name + '-' + date_time + '.PNG'
# path = ReportPath().get_path() + '/' + screenshot
path = os.path.join(ReportPath().get_path(), screenshot)
cls.d.screenshot(path)
return screenshot
if __name__ == '__main__':
print(res['com.app.xxxxxx:id/tvGotIt'])
| taylortaurus/android-ui-runner | Public/appBase.py | appBase.py | py | 9,249 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "Public.common.common",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "Public.common.common... |
18699363715 | #encoding=utf-8
from __future__ import unicode_literals
import sys
sys.path.append("../")
import Terry_toolkit as tkit
# data=tkit.Json(file_path="/mnt/data/dev/tdata/知识提取/chinese/test.json").auto_load()
# for it in data:
# print(it)
import json
# json.load()函数的使用,将读取json信息
file = open('/mnt/data/dev/tdata/知识提取/chinese/train.json','r',encoding='utf-8')
info = json.load(file)
# print(info)
relation={}
relation_full={}
n=0
for it in info:
if it['relation']!="NA":
print(it['head']['word'])
print(it['relation'].split('/')[-1])
relation[it['relation'].split('/')[-1]]=0
relation_full[it['relation']]=0
print(it['tail']['word'])
# print(it)
n=n+1
print("*"*40)
print(relation)
print(relation_full)
print(n)
print(len(relation)) | napoler/Terry-toolkit | test/ttjson.py | ttjson.py | py | 851 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.path.append",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "json.load",
"line_number": 17,
"usage_type": "call"
}
] |
27768921922 | import pandas as pd
from bs4 import BeautifulSoup
import requests
import random
import time
url='https://www.tianyancha.com/search?base=bj'
headers={'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 Safari/537.36'}
response = requests.get(url,headers=headers)
response.encoding='utf8'
soup = BeautifulSoup(response.text,'lxml')
html = soup.prettify()
# print(html)
columns=['公司','状态','主要标签','法人','注册资本','成立日期','邮箱','地址']
item = soup.find(name = 'div',class_='result-list')
result_list = item.find_all('div','search-item sv-search-company')
# print(result_list[5])
name = result_list[5].find('a','name select-none').string
status = result_list[5].find('div','tag-common -normal-bg').string
label = result_list[5].find('div','tag-list').text if result_list[5].find('div','tag-list')!=None else None
legal = result_list[5].find('a','legalPersonName link-click').text
capital = result_list[5].find('div','title -narrow text-ellipsis').find('span').text
build = result_list[5].find('div','title text-ellipsis').find('span').text
email = result_list[5].find_all('div','contact row')[0].find_all('div','col')[1].find_all('span')[1].text
address= result_list[5].find_all('div','contact row')[1].find('div','col').find_all('span')[1].text
#contact row 有0,1、2 三种情况
#0,email和address 都为None
#1,一定是address
#2,第一个是电话和邮箱,取邮箱;第二个是地址
# 复杂情况,用函数
def getContract(html):
print('----------')
email=None
address = None
contract_list = html.find_all('div','contact row')
num =len(contract_list)
print(contract_list)
if num==0:
return (email,address)
if num==1:
address =contract_list[0].find('div','col').find_all('span')[-1].text
print(email,address)
return (email,address)
elif num==2:
email = contract_list[0].find_all('div','col')[-1].find_all('span')[1].text if len(contract_list[0].find_all('div','col')) !=0 else None
address = contract_list[1].find('div','col').find_all('span')[-1].text
print(email,address)
return (email,address)
print('===========')
print(name,status,label,legal,capital,build,email,address)
data = []
name= []
status= []
label= []
legal= []
capital= []
build= []
email= []
address= []
for i in result_list:
# print(i)
i_name=i.find('a','name select-none').string
i_status=i.find('div','tag-common -normal-bg').string if i.find('div','tag-common -normal-bg')!=None else None
i_label=i.find('div','tag-list').text if i.find('div','tag-list')!=None else None
i_legal =i.find('a','legalPersonName link-click').text if i.find('a','legalPersonName link-click') !=None else None
i_capital=i.find('div','title -narrow text-ellipsis').find('span').text
i_build=i.find('div','title text-ellipsis').find('span').text
i_email,i_address = getContract(i)
print(i_name,i_status,i_label,i_legal,i_capital,i_build,i_email,i_address)
name.append(i_name)
status.append(i_status)
label.append(i_label)
legal.append(i_legal)
capital.append(i_capital)
build.append(i_build)
email.append(i_email)
address.append(i_address)
for i in range(len(name)):
data.append([name[i],status[i],label[i],legal[i],capital[i],build[i],email[i],address[i]])
df = pd.DataFrame(data = data ,columns=columns)
print(df)
import pymysql
conn = pymysql.connect(host='192.168.10.108',
user='root',
password='123456',
db='dangdang',
charset='utf-8',
cursorclass=pymysql.cursors.DictCursor)
cursor = conn.cursor()
#创建表,如果不存在就创建
print('============#先删除表,后创建表================')
cursor.execute('drop table emp')
| kshsky/PycharmProjects | case/crawler/TianYanCha.py | TianYanCha.py | py | 3,711 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "pymysql.connect",
... |
21414967137 | from colour import Color
import cv2 as cv2
import numpy as np
a= input('enter a color=')
b = Color(a)
c = b.hsl
d = tuple(255*x for x in c)
print(d)
print(list(d))
img = cv2.imread('color.png')
hsl1 = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
green = np.uint8([[list(d)]])
hsv_green = cv2.cvtColor(green,cv2.COLOR_BGR2HSV)
print (hsv_green)
f = hsv_green[0][0][0]
if f>10:
lower = np.array([f-10,50,50], np.uint8)
else:
lower = np.array([f,100,100, np.uint8])
upper = np.array([f+10,255,255], np.uint8)
colors = cv2.inRange(hsl1, lower,upper)
res = cv2.bitwise_and(img, img, mask = colors)
cv2.imshow('original', img)
cv2.imshow(a, res)
cv2.waitKey(0)
| DESK-webdev/team_webdev | img_pros/star_4.py | star_4.py | py | 657 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "colour.Color",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2HSV",
"line_num... |
6817402374 | from keras.applications.vgg16 import preprocess_input
from keras.preprocessing.image import ImageDataGenerator
# models
from keras.applications.vgg16 import VGG16
from keras.models import Model
# clustering and dimension reduction
# from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
# for everything else
import numpy as np
# import pandas as pd
import pickle
datagen = ImageDataGenerator()
"""
# path to DataGen folder
# DataGen folder must contain two folders inside with name test and train
with each folder containing folders having different image types
# DataGen/train -->airplanes,bikes,cars,faces folders
# DataGen/test -->airplanes,bikes,cars,faces folders
"""
home_path = r'D:\sem1_2021\DIP\assinments\Assignment05\Images\DataGen'
print("getting data using ImageDataGenerator")
train_data = datagen.flow_from_directory(
directory=home_path + r'/train/',
target_size=(224,224), # resize to this size to the size required fo VGG16
color_mode="rgb", # for coloured images
batch_size=1, # number of images to extract from folder for every batch
class_mode="binary", # classes to predict (single class classifier)
)
test_data = datagen.flow_from_directory(
directory=home_path + r'/test/',
target_size=(224,224), # resize to this size to the size required fo VGG16
color_mode="rgb", # for coloured images
batch_size=1, # number of images to extract from folder for every batch
class_mode="binary",
)
model = VGG16()
model = Model(inputs = model.inputs, outputs = model.layers[-2].output) #taking features from the secondlast layer of VGG16
def extract_features(file, model):
imgx = preprocess_input(file) #reshaped_img
# get the feature vector
features = model.predict(imgx, use_multiprocessing=True)
return features
data = {}
p = r'D:\sem1_2021\DIP\assinments\Assignment05\Images\except'
print("exracting features of train/test image using VGG")
features_train = [] #array containg features of each image
labels_train = [] #array containg label(class of img)
i=0
for i in range(120): # 120 is number of traing images
print("train" ,i)
# extract the features and update the dictionary
batchX, batchY = train_data.next() # batchx contains the image aray of particular index
try: # batchy contains the label number present in train_data from DataGen operation
feat = extract_features(batchX,model) #getting features of particular image from VGG model
labels_train.append(batchY)
features_train.append(feat)
# error handling / can ignore
except:
with open(p,'wb') as file:
pickle.dump(data,file)
# similar as train_data operation
features_test = []
labels_test = []
i=0
for i in range(80):
print("test",i)
# try to extract the features and update the dictionary
batchX, batchY = test_data.next()
try:
feat = extract_features(batchX,model)
labels_test.append(batchY)
features_test.append(feat)
# if something fails, save the extracted features as a pickle file (optional)
except:
with open(p,'wb') as file:
pickle.dump(data,file)
features_train = np.array(features_train)
labels_train = np.array(labels_train)
features_test = np.array(features_test)
labels_test = np.array(labels_test)
# reshape so that there are 120 and 80 respective samples of 4096 vectors
features_train = features_train.reshape(-1,4096)
# print(features_train.shape)
features_test = features_test.reshape(-1,4096)
# reduce the amount of dimensions in the feature vector by extracting most dependent featues only using PCA
print("PCA_TRAIN")
pca = PCA(n_components=40, random_state=78) #4096 to 40 features for easy computation by our KNN
pca.fit(features_train)
x_train = pca.transform(features_train)
print("PCA_TEST")
pca = PCA(n_components=40, random_state=78)
pca.fit(features_test)
x_test = pca.transform(features_test)
print("KNN_MODEL")
training_data = np.column_stack((x_train,labels_train)) #merging the two arrays to one to pass to KNN function
testing_data = np.column_stack((x_test,labels_test))
def EUC_DIST(v1,v2): #function returning euclidean distance between any two vectors of equal dim
v1,v2 = np.array(v1),np.array(v2)
distance = 0
for i in range(len(v1)-1):
distance += (v1[i]-v2[i])**2
return np.sqrt(distance)
def Predict(k,train_data,test_instance): # k = number of nearest neighb ,train_data = whole train array , test = only one single test image and its label
distances = [] #array containing euc dist of test image with every training image respectively
for i in range(len(train_data)):
dist = EUC_DIST(train_data[i][:-1], test_instance)
distances.append((train_data[i],dist))
distances.sort(key=lambda x: x[1]) #sorting with least distance on top
neighbors = []
for i in range(k):
neighbors.append(distances[i][0]) #contain array of labels of image with least euc dist to test image
classes = {}
for i in range(len(neighbors)):
response = neighbors[i][-1]
if response in classes:
classes[response] += 1
else:
classes[response] = 1
sorted_classes = sorted(classes.items() , key = lambda x: x[1],reverse = True )
return sorted_classes[0][0] #return the predicted class/label of test img
def Eval_Acc(y_data,y_pred): #function to calculate accuracy from 80 predicted images
correct = 0
for i in range(len(y_pred)):
if y_data[i][-1] == y_pred[i]: #if given data image label is equal to prdicted label of test image
correct += 1
return (correct / len(y_pred))*100
y_pred = [] #array containg KNN predicted labels/class of each image in test_data
for i in range(len(testing_data)):
y_pred.append(Predict(2,training_data, testing_data[i]))
print(Eval_Acc(testing_data, y_pred))
| AnmolGarg98/KNN_image-classification | KNN_VGG16_pretrained_features.py | KNN_VGG16_pretrained_features.py | py | 6,453 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "keras.preprocessing.image.ImageDataGenerator",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "keras.applications.vgg16.VGG16",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "keras.models.Model",
"line_number": 46,
"usage_type": "call"
... |
9360337388 | import fire
from flask import Flask, jsonify, request
from flask_cors import CORS
from flask_restful import Resource, Api
from graph_knn import load_entity_knn, load_multi_knn
class Knn(Resource):
def __init__(self, **kwargs):
self.knn = kwargs['knn']
def post(self):
json_data = request.get_json(force=True)
entity_uri = json_data["entity"]
relation_uri = json_data["relation"]
k = int(json_data["k"])
direction = json_data["direction"]
uris, dists, names = self.knn.find_entity_knn(entity_uri, relation_uri, k, direction)
response = [{'uri': uri, 'dist': float(dist), 'name': name} for [uri, dist, name] in
zip(uris, dists, names)]
return jsonify(response)
class EntitySearch(Resource):
def __init__(self, **kwargs):
self.ent_dict_name = kwargs['ent_dict_name']
def post(self):
json_data = request.get_json(force=True)
query = json_data["query"]
limit = int(json_data["limit"])
offset = int(json_data["offset"])
result = [{'value': k, 'label': v} for k, v in self.ent_dict_name.items() if
query.lower() in v.lower()]
filtered = result[offset:(offset + limit)]
response = {'result': filtered, 'size': len(result)}
return jsonify(response)
class RelationSearch(Resource):
def __init__(self, **kwargs):
self.rel_dict_uri = kwargs['rel_dict_uri']
def post(self):
json_data = request.get_json(force=True)
query = json_data["query"]
limit = int(json_data["limit"])
offset = int(json_data["offset"])
result = [{'value': k, 'label': k} for k, v in self.rel_dict_uri.items() if
query.lower() in k.lower()]
filtered = result[offset:(offset + limit)]
response = {'result': filtered, 'size': len(result)}
return jsonify(response)
class IndexedEntitySearch(Resource):
def __init__(self, **kwargs):
self.entity_index = kwargs['entity_index']
def post(self):
json_data = request.get_json(force=True)
query = json_data["query"]
limit = int(json_data["limit"])
offset = int(json_data["offset"])
result = [{'value': entity.uri, 'label': f'{entity.name} {entity.count} {entity.uri}'}
for entity in self.entity_index.find_entity(query)]
filtered = result[offset:(offset + limit)]
response = {'result': filtered, 'size': len(result)}
return jsonify(response)
class IndexedRelationSearch(Resource):
def __init__(self, **kwargs):
self.entity_index = kwargs['entity_index']
def post(self):
json_data = request.get_json(force=True)
query = json_data["query"]
limit = int(json_data["limit"])
offset = int(json_data["offset"])
result = [{'value': entity.uri, 'label': f'{entity.name} {entity.count} {entity.uri}'}
for entity in self.entity_index.find_entity(query)]
filtered = result[offset:(offset + limit)]
response = {'result': filtered, 'size': len(result)}
return jsonify(response)
def launch_api(ent_path, rel_path, dict_path, name_dict_path):
app = Flask(__name__)
api = Api(app)
knn = load_entity_knn(ent_path, rel_path, dict_path, name_dict_path)
api.add_resource(Knn, "/knn", resource_class_kwargs={'knn': knn})
api.add_resource(EntitySearch, "/knn-entity-search",
resource_class_kwargs={'ent_dict_name': knn.ent_dict_name})
api.add_resource(RelationSearch, "/knn-relation-search",
resource_class_kwargs={'rel_dict_uri': knn.rel_dict_uri})
CORS(app)
app.run(host="0.0.0.0", port="5006")
def launch_api_multi(ent_paths, rel_path, entity_name_file, relation_name_file, port):
app = Flask(__name__)
api = Api(app)
knn = load_multi_knn(ent_paths, rel_path, entity_name_file, relation_name_file)
api.add_resource(Knn, "/knn", resource_class_kwargs={'knn': knn})
api.add_resource(IndexedEntitySearch, "/knn-entity-search",
resource_class_kwargs={'entity_index': knn.entity_index})
api.add_resource(RelationSearch, "/knn-relation-search",
resource_class_kwargs={'rel_dict_uri': knn.relation_index.uri_to_entity})
CORS(app)
app.run(host="0.0.0.0", port=port)
if __name__ == "__main__":
fire.Fire(launch_api_multi)
| graph-embeddings/pbg-helper | knn-graph-viewer/back/api.py | api.py | py | 4,457 | python | en | code | 21 | github-code | 36 | [
{
"api_name": "flask_restful.Resource",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "flask.request.get_json",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "flask.request",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "flask.json... |
43362574911 | from collections import deque
def bfs_shortest_path(adj_matrix, src, dest):
dist = [float('inf')] * n
dist[src] = 0
q = deque()
q.append(src)
while q:
curr = q.popleft()
if curr == dest:
return dist[dest]
for neighbor in range(n):
if adj_matrix[curr][neighbor] and dist[neighbor] == float('inf'):
dist[neighbor] = dist[curr] + 1
q.append(neighbor)
return -1
n = int(input())
matrix = [tuple(map(int, input().split())) for _ in range(n)]
src, dest = map(int, input().split())
print(bfs_shortest_path(matrix, src-1, dest-1)) | slayzerg01/yandex-training-3.0 | 36/task36.py | task36.py | py | 635 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.deque",
"line_number": 8,
"usage_type": "call"
}
] |
22356028668 | import pickle
import json
import sys
from sklearn.feature_extraction.text import CountVectorizer
# Loading the saved model
loaded_model = pickle.load(open('C:/Users/abhis/OneDrive/Desktop/UsingSpawn/logreg_model.pkl', 'rb'))
# Loading the CountVectorizer vocabulary
loaded_vec = CountVectorizer(vocabulary=pickle.load(open('C:/Users/abhis/OneDrive/Desktop/UsingSpawn/count_vector.pkl', 'rb')))
loaded_tfidf = pickle.load(open('C:/Users/abhis/OneDrive/Desktop/UsingSpawn/tfidf.pkl', 'rb'))
# Defining the target names
target_names = ["Bank Account services", "Credit card or prepaid card", "Others", "Theft/Dispute Reporting", "Mortgage/Loan"]
def make_prediction(input_data):
# Perform any necessary data preprocessing here
# Input data should be a Python dictionary
# Example preprocessing:
text = input_data['text']
# Convert input_data to a suitable format for prediction
X_new_counts = loaded_vec.transform([text])
X_new_tfidf = loaded_tfidf.transform(X_new_counts)
prediction_index = loaded_model.predict(X_new_tfidf)[0]
prediction_target_names= target_names[prediction_index]
# Format the prediction label as needed
return {'prediction': prediction_target_names}
if __name__ == '__main__':
# Receive input data from the command line
input_data = json.loads(sys.argv[1])
# Make a prediction
prediction = make_prediction(input_data)
# Output the prediction as a JSON string
print(json.dumps(prediction))
| abtyagi15/Automatic-Ticket-Classification | classify.py | classify.py | py | 1,499 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pickle.load",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sklearn.feature_extraction.text.CountVectorizer",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 12,
"usage_type": "call"
},
{
"api_name":... |
37012746527 | from collections import deque
from typing import List
class Solution:
@staticmethod
def maxSlidingWindow(nums: List[int], k: int) -> List[int]:
if not nums or len(nums) < k:
raise ValueError()
window = deque()
res = []
for i in range(len(nums)):
while window and (i - k) >= window[0][1]:
window.popleft()
while window and (nums[i] >= window[-1][0]):
window.pop()
window.append((nums[i], i))
if window and i >= k - 1:
res.append(window[0][0])
return res
# Checking in console
if __name__ == '__main__':
Instant = Solution()
Solve = Instant.maxSlidingWindow(nums = [1,3,-1,-3,5,3,6,7], k = 3 )
# nums = [1,3,-1,-3,5,3,6,7], k = 3 -> [3,3,5,5,6,7]
# nums = [1], k = 1 -> [1]
print(Solve)
# # Alternative method:
# class Solution:
# def maxSlidingWindow(self, nums: List[int], k: int) -> List[int]:
# if not nums or len(nums) < k:
# raise ValueError()
#
# n = len(nums)
# left, right = [0] * (n + 1), [0] * (n + 1)
# left[-1], right[-1] = float('-inf'), float('-inf')
#
# for i,j in zip(range(0, n), reversed(range(0, n))):
# left[i] = nums[i] if i % k == 0 else max(left[i-1], nums[i])
# right[j] = nums[j] if (j + 1) % k == 0 else max(right[j+1], nums[j])
#
# res = []
# for i in range(n - k + 1):
# res.append(max(left[i + k - 1], right[i]))
#
# return res
| Manu87DS/Solutions-To-Problems | LeetCode/Python Solutions/Sliding Window Maximum/sliding.py | sliding.py | py | 1,559 | python | en | code | null | github-code | 36 | [
{
"api_name": "typing.List",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "collections.deque",
"line_number": 11,
"usage_type": "call"
}
] |
41068970621 | import matplotlib.pyplot as plt
import random
import matplotlib
from matplotlib import font_manager
import numpy as np
# 设置图片大小及像素
plt.figure(figsize=(20, 8), dpi=80)
# 设置中文
my_font = font_manager.FontProperties(
fname='/System/Library/Fonts/Hiragino Sans GB.ttc')
# 生成数据
x = range(0, 120)
random.seed(10) # 生成随机种子,不同时候得到的随机结果都一样
y = [random.randint(20, 35) for i in range(120)]
# 画图
plt.plot(x, y)
# 设置坐标轴刻度
_xticks_lables = ['10点{}分'.format(i) for i in x if i<60 ]
_xticks_lables += ['11点{}分'.format(i-60) for i in x if i>=60]
# 取步长和数字和字符串一一对应,数据的长度一样.rotation:旋转度数
plt.xticks(x[::3], _xticks_lables[::3],
rotation=45, fontproperties=my_font)
# 添加坐标轴描述信息
plt.xlabel('时间', fontproperties=my_font)
plt.ylabel('温度 单位(℃)', fontproperties=my_font)
plt.title('10点到11点每分钟的气温变化情况', fontproperties=my_font)
# 展示
plt.show() | XiongZhouR/python-of-learning | matplotlib/plot_1.py | plot_1.py | py | 1,047 | python | zh | code | 1 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "matplotlib.font_manager.FontProperties",
"line_number": 10,
"usage_type": "call"
},
{
"a... |
39112326593 | import torch
from torch.utils.data import TensorDataset, DataLoader, RandomSampler
from transformers import AutoTokenizer
from sklearn.model_selection import train_test_split
'''
Read the data from a pre-processed CADEC dataset and process them into a format compatible with BERT
'''
class DataProcessor():
"""
Loads the data from a pre-processed CADEC named-entity dataset and creates a BERT dataset
"""
def __init__(self, filename, model, seed, batch_size = 32, max_length = 512):
# Set the device
if torch.cuda.is_available():
self.device = torch.device('cuda')
else:
self.device = torch.device('cpu')
# Initialize attribute variables
self.max_length = max_length
self.filename = filename
self.seed = seed # For test and train split
self.model = model
self.batch_size = batch_size
# Initialize tokenizer
self.tokenizer = AutoTokenizer.from_pretrained(self.model)
print('Parsing the data file...')
# Obtain sentences and labels
self.tokens, self.labels = self.sentence_parser()
# Split sentences if their associated wordpiece encoding is longer than max_length
self.split_tokens, self.split_labels = [], []
for tok, lab in zip(self.tokens, self.labels):
split_tok, split_lab = self.split_sentences(tok, lab)
self.split_tokens.extend(split_tok)
self.split_labels.extend(split_lab)
# Create ids for labels and split into training and test set
self.label2id, self.id2label = self.get_label_encoding_dict() # Initialize mapping of labels to ids
# Split the dataset into 0.8 training and 0.2 test
self.tokens_train, self.tokens_test, self.labels_train, self.labels_test = train_test_split(self.split_tokens, self.split_labels, test_size=0.20, random_state=self.seed)
# Split the training set into 0.875 training and 0.125 validation (0.7 and 0.1 of total dataset, respectively)
self.tokens_train, self.tokens_val, self.labels_train, self.labels_val = train_test_split(self.tokens_train, self.labels_train, test_size=0.125, random_state=self.seed)
print('Tokenize sentences...')
# Tokenize for BERT
# Training set
self.tokenized_input_train = self.tokenizer(self.tokens_train, truncation=True, is_split_into_words=True,
add_special_tokens=True, padding=True)
self.tokenized_input_train = self.add_word_ids(self.tokenized_input_train)
self.train_tags = self.get_bert_labels(self.tokenized_input_train, self.labels_train)
self.train_max_length = len(self.tokenized_input_train['input_ids']) # The length of the longest training message
# Validation set
self.tokenized_input_val = self.tokenizer(self.tokens_val, truncation=True, is_split_into_words=True,
add_special_tokens=True, padding=True, max_length = self.train_max_length)
self.tokenized_input_val = self.add_word_ids(self.tokenized_input_val)
self.val_tags = self.get_bert_labels(self.tokenized_input_val, self.labels_val)
# Test set
self.tokenized_input_test = self.tokenizer(self.tokens_test, truncation=True, is_split_into_words=True,
add_special_tokens=True, padding=True, max_length = self.train_max_length)
self.tokenized_input_test = self.add_word_ids(self.tokenized_input_test)
self.test_tags = self.get_bert_labels(self.tokenized_input_test, self.labels_test)
print('Preparing the dataset...')
# Prepare the data so it is compatible with torch
self.y_train = torch.tensor(self.train_tags).to(self.device)
self.y_val = torch.tensor(self.val_tags).to(self.device)
self.y_test = torch.tensor(self.test_tags).to(self.device)
self.train_dataloader = self.create_data_loaders(self.tokenized_input_train, self.y_train)
self.val_dataloader = self.create_data_loaders(self.tokenized_input_val, self.y_val)
self.test_dataloader = self.create_data_loaders(self.tokenized_input_test, self.y_test)
def sentence_parser(self):
'''
Read the content of filename and parses it into labels and tokens
:return: tokens and labels: two lists containing the tokens and the labels in the dataset
'''
with open(self.filename, 'r') as f:
data_raw = f.read()
sentences = [sent.split('\n') for sent in data_raw.split('\n\n')[:-1]] # Read the sentences
tokens = [[pair.split('\t')[0] for pair in sent] for sent in sentences] # Colect labels and tokens
labels = [[pair.split('\t')[1] for pair in sent] for sent in sentences]
labels = [[lab if lab not in ('I-Finding', 'B-Finding') else 'O' for lab in sent] for sent in labels]
return tokens, labels
def split_sentences(self, sentence, labels):
'''
Read the tokenized sentences and split them if they are longer than a maximum length (by default, 512)
:param: An input tokenized sentence
:param: The labels corresponding to the tokenized sentence
:return: The tokenized sentence
'''
# The BERT encoding of the period token
period_tok = '.'
# Recursion takes place only if the split has to be performed
if len(self.tokenizer.encode(sentence, is_split_into_words=True)) > self.max_length:
idx_half = len(sentence)//2
# Dictionary with position associated to how far each period (if any) is from the middle of the sentence
period_offsets = {pos: abs(idx_half - pos) for pos in range(len(sentence)) if sentence[pos] == period_tok}
if period_offsets != {}:
# If there is a period, sort period locations based on the distance from the central point
period_offsets_sorted = sorted(period_offsets.items(), key=lambda x: x[1])
split_point = period_offsets_sorted[0][0] # The period location closest to the centre of the sequence
else:
# If there is no period, take the middle index
split_point = idx_half
# Define the splits based on the found splitting point
sent1, sent2 = sentence[:split_point+1], sentence[split_point+1:]
lab1, lab2 = labels[:split_point+1], labels[split_point+1:]
split1, split2 = self.split_sentences(sent1, lab1), self.split_sentences(sent2, lab2) # Recursive call
return split1[0]+split2[0], split1[1]+split2[1] # Compose lists of sub-lists of split sentences
else:
return [sentence], [labels]
def train_test_split(self, test_size):
'''
Splits the dataset into training and test observations
:return: Training and test data and labels
'''
X_train, X_test, y_train, y_test = train_test_split(self.split_tokens, self.split_labels, test_size=test_size,
random_state=self.seed)
return X_train, X_test, y_train, y_test
def get_label_encoding_dict(self):
'''
Given the training data, associate each distinct label to an id
:return: lab2id: a dictionary mapping unique labels to ids
'''
labels = [] # list of unique labels
for sent in self.labels:
for label in sent:
if label not in labels and label != 'O':
labels.append(label)
# Sort labels by the first letter after B- and I- in the BIO tag
labels = ['O'] + sorted(labels, key=lambda x: x[2:])
lab2id = {lab: id for lab, id in zip(labels, range(len(labels)))}
id2lab = labels
return lab2id, id2lab
def add_word_ids(self, tokenized_data):
"""
Adds to the tokenized object the original word ids of the token to reconstruct from wordpiece
:param tokenized_data: A dictionary object of tokenized data
:return: The same tokenized data with the word ids for each sentence
"""
word_ids = []
for i in range(len(tokenized_data['input_ids'])):
batch_word_id = tokenized_data.word_ids(batch_index=i)
# Convert Nones to 0 and augment all IDs by 1 (used when we create tensors)
batch_word_id = [i+1 if i!=None else 0 for i in batch_word_id]
word_ids.append(batch_word_id)
tokenized_data['word_ids'] = word_ids
return tokenized_data
def get_bert_labels(self, tokenized_words, labels):
'''
Align labels with the pre-processed token sequences
:return: A list of label sequences for sentences
'''
labels_bert = []
for i, label in enumerate(labels): # Loop over token sentences
# Map each tokenized word to its ID in the original sentence
word_ids = tokenized_words.word_ids(batch_index=i)
# Contains the label ids for a sentence
label_ids = []
for word_idx in word_ids:
# Special characters ([CLS], [SEP], [PAD]) set to -100
if word_idx is None:
label_ids.append(self.label2id['O']) # Assign the O label to the special characters
# If a word is broken by wordpiece, just add as many labels as word chunk
else:
label_ids.append(self.label2id[label[word_idx]])
labels_bert.append(label_ids)
return labels_bert
def create_data_loaders(self, bert_ds, labels):
'''
Create a dataset compatible with torch
:param bert_ds: A tokenized object containing both input_ids and mask ids
:param labels: The label sequence associated to the tokens
:return: A torch DataLoader object
'''
# Create the DataLoader for our training set
# So now only use the inputs, not the original data anymore
data = TensorDataset(torch.tensor(bert_ds['input_ids']), torch.tensor(bert_ds['attention_mask']), labels,
torch.tensor(bert_ds['word_ids']))
sampler = RandomSampler(data)
# For each data loader we need the data, a sampler and a batch size
data_loader = DataLoader(dataset=data, sampler=sampler, batch_size=self.batch_size)
return data_loader
| allepalma/Text-mining-project | bert_data_creation.py | bert_data_creation.py | py | 10,521 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.cuda.is_available",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "torch.device",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "torch.device",
... |
19423963567 | # -*- coding: utf-8 -*-
"""
Created on Tue Oct 23 19:44:34 2018
@author: whockei1
"""
import numpy as np, matplotlib.pyplot as plt, random, json, pickle, datetime, copy, socket, math
from scipy.stats import sem
import matplotlib.colors as colors
from scipy.ndimage import gaussian_filter as gauss # for smoothing ratemaps
import sys, os, csv
import utility_fx as util
import ratterdam_ParseBehavior as Parse
import ratterdam_Defaults as Def
import ratterdam_CoreDataStructures as Core
import ratterdam_DataFiltering as Filt
def poolTrials(unit, alley, labels, txt):
"""
Pool all trials that will form a group.
Group defined as linear RM
from all visits to a given alley when it harbored a given texture.
This does not subsample to approx. balance group sizes. That is done after.
Labels is a list of texture labels, either real or shuffled prior to this fx
"""
rms = []
idx = []
visits = unit.alleys[alley]
for i,visit in enumerate(visits):
if labels[i] == txt:
rm = visit['ratemap1d']
if type(rm) == np.ndarray:
rm = np.nan_to_num(rm)
rms.append(rm)
idx.append(i)
rms = np.asarray(rms)
return idx, rms
def computeTestStatistic_Diffs(groupX, groupY):
"""
Takes two arrays. Each of which is a stack
of single trial {RM or avg? decide}.
Avgs them to a summary trace and returns their bin-wise diff
"""
maskX= np.ma.masked_invalid(groupX)
avgX = maskX.mean(axis=0) # ignores inf and nan
maskY= np.ma.masked_invalid(groupY)
avgY = maskY.mean(axis=0) # ignores inf and nan
return avgX-avgY
def computeTestStatistic_AUCDiffs(groupX, groupY):
"""
Takes two arrays. Each of which is a stack
of single trial
Avgs them to summary traces, compute diff
and return the area of that diff
"""
maskX= np.ma.masked_invalid(groupX)
avgX = maskX.mean(axis=0) # ignores inf and nan
maskY= np.ma.masked_invalid(groupY)
avgY = maskY.mean(axis=0) # ignores inf and nan
diffauc = np.abs(scipy.integrate.simps(avgX)-scipy.integrate.simps(avgY))
return diffauc
def getLabels(unit, alley):
"""
Get actual trial labels for a group
Group defined as visits to a given txt at given alley
"""
visits = unit.alleys[alley]
labels = []
for visit in visits:
labels.append(visit['metadata']['stimulus'])
return labels
def genSingleNullStat(unit, alley, txtX, txtY, labels):
"""
DEPRECATED - making them all array-style in genNNulls
Generate a single null test statistic (diff x-y here)
Shuffle labels, recompute means and take diff. 1x
"""
shuffLabels = np.random.permutation(labels)
idxX, rmsX = poolTrials(unit, alley, shuffLabels, txtX)
idxY, rmsY = poolTrials(unit, alley, shuffLabels, txtY)
null = computeTestStatistic_Diffs(rmsX, rmsY)
return null
def genRealStat(unit, alley, txtX, txtY):
labels = getLabels(unit, alley)
idxX, rmsX = poolTrials(unit, alley, labels, txtX)
idxY, rmsY = poolTrials(unit, alley, labels, txtY)
stat = computeTestStatistic_Diffs(rmsX, rmsY)
return stat
def computeBandThresh(nulls, alpha, side):
'''Given a list of null array traces, find ordinate at
at each point that admits a proportion of nulls equal to cutoff'''
if side == 'upper':
isReversed = True
elif side == 'lower':
isReversed = False
propNull = int(((alpha / 2) * len(nulls)) + 1)
datarange = range(len(nulls[0]))
significanceBand = []
for point in datarange:
nullOrdinates = nulls[:,point]
sortedVals = list(sorted(nullOrdinates, reverse=isReversed))
significanceBand.append(sortedVals[propNull - 1]) #explicitly +1 to cutoff and -1 here to keep clear where thresh is and how 0idx works
significanceBand = np.asarray(significanceBand)
return significanceBand
def computeGlobalCrossings(nulls, lowerBand, upperBand):
"""
Given an array of null test statistics, compute
the number of crossings *anywhere* given the supplied
significance bands. Return proportion (obs. p-value)
"""
passBools = [any(np.logical_or(probe > upperBand, probe < lowerBand)) for probe in nulls] # eg [T,F,F,T..etc]
return sum(passBools)/len(passBools)
def global_FWER_alpha(nulls, unit, alpha=0.05): # fwerModifier should be 3 (txts) x n alleys. 9 in beltway task. But below adjust by how many alleys actually have activity so 9 may become smaller
"""
Calculates the global, FWER corrected p-value at each bin of the data trace
Returns the actual global P and the bands of test statistic ordinates that
are the thresholds.
"""
FWERalphaSelected = None
globalLower, globalUpper = None, None
FWERalpha = unit.acorr # nb this is a proportion (decimal) not a list cutoff (integer)
alphaIncrements = np.linspace(0.017, 1e-4, 50) # start at 0.017 because thats the largest the adj p value could be: 0.05/(3*1)
fwerSatisfied = False
for adjustedAlpha in alphaIncrements:
if not fwerSatisfied:
lowerBand, upperBand = computeBandThresh(nulls, adjustedAlpha, 'lower'), computeBandThresh(nulls, adjustedAlpha, 'upper')
propCrossings = computeGlobalCrossings(nulls, lowerBand, upperBand)
if propCrossings < FWERalpha:
fwerSatisfied = True
FWERalphaSelected = adjustedAlpha
globalLower, globalUpper = lowerBand, upperBand
return FWERalphaSelected, globalLower, globalUpper
def shuffleArray(array, field_idx):
for row in range(len(array)):
array[row,field_idx] = np.random.permutation(array[row,field_idx])
def consecutive(data, stepsize=1):
return np.split(data, np.where(np.diff(data) != stepsize)[0]+1)
def findField(rms,sthresh=3,rthresh=0.2):
"""
Identify a field as a set of sthresh or more contiguous bins
greater than rthresh of max
"""
mean = np.nanmean(rms, axis=0)
fi = np.where(mean>=(rthresh*np.nanmax(mean)))[0]
field = True
try:
field_idx = np.concatenate(([i for i in consecutive(fi) if len(i)>=sthresh]))
except:
field = False
field_idx = None
return field, field_idx
def genNNulls(n, rms, labels, txtX, txtY):
"""
Generates n null test statistics, hard coded
now to be the binwise diff of avg(txtA) - avg(txtB)
Returns np array nXl where l is length of 1d RM in bins
"""
shuffpos = False # toggle to shuffle bins within field
nulls = np.empty((0,Def.singleAlleyBins[0]-1)) # by convention long dim is first
if shuffpos:
result, field_idx = findField(rms)
if result == False: # no good field
shuffpos = False
rmsshuffle = copy.deepcopy(rms)
for i in range(n):
shufflabels = np.random.permutation(labels)
if shuffpos:
shuffleArray(rmsshuffle, field_idx) # shuffle in place within rows
srmsX, srmsY = rmsshuffle[np.where(shufflabels==txtX)[0],:], rmsshuffle[np.where(shufflabels==txtY)[0],:]
null = computeTestStatistic_Diffs(srmsX, srmsY)
nulls = np.vstack((nulls, null))
return nulls
def makeRMS(unit, alley):
"""
Create array of 1d ratemaps each row is a visit
return array and label array of txt present
"""
rms = np.empty((0, Def.singleAlleyBins[0]-1))
labels = np.empty((0))
for visit in unit.alleys[alley]:
rms = np.vstack((rms, visit['ratemap1d']))
labels = np.hstack((labels, visit['metadata']['stimulus']))
return rms, labels
def unitPermutationTest_SinglePair(unit, alley, txtX, txtY, nnulls, plot=False, returnInfo=True):
"""
Wrapper function for global_FWER_alpha() that plots results
"""
rms, labels = makeRMS(unit, alley)
nulls = genNNulls(nnulls,rms,labels,txtX,txtY)
FWERalphaSelected, glowerBand, gupperBand = global_FWER_alpha(nulls, unit)
if FWERalphaSelected == None:
glowerBand, gupperBand, pwAlphaLower, pwAlphaUpper = None, None, None, None
globalCrossings, pointwiseCrossings, bounds, stat = None, None, None, None
else:
stat = genRealStat(unit, alley, txtX, txtY)
#Below, calculate the pw alpha bc significantly modulated regions are defined
# as those that pass the global band somewhere but then their extent is defined
# as the whole region where they pass the pointwise band. See Buzsaki paper.
pwAlphaUpper, pwAlphaLower = computeBandThresh(nulls, 0.05, 'upper'), computeBandThresh(nulls, 0.05, 'lower')
globalCrossings = np.where(np.logical_or(stat > gupperBand, stat < glowerBand))[0]
if globalCrossings.shape[0] > 0:
pointwiseCrossings = np.where(np.logical_or(stat > pwAlphaUpper, stat < pwAlphaLower))[0]
else:
globalCrossings, pointwiseCrossings = None, None
if plot:
plt.plot(nulls.T, 'k', alpha=0.4)
plt.plot(stat,'g')
plt.xlabel("Linearized Position, Long Axis of Alley")
plt.ylabel("Difference in Firing Rate")
plt.title(f"Permutation Test Results for Texture {txtX} vs {txtY} on Alley {alley}")
for band, style in zip([glowerBand, gupperBand, pwAlphaLower, pwAlphaUpper], ['r', 'r', 'r--', 'r--']):
plt.plot(band, style)
if returnInfo:
bounds = glowerBand, gupperBand, pwAlphaLower, pwAlphaUpper
return globalCrossings, pointwiseCrossings, bounds, stat
def permutationResultsLogger(d,fname):
doesPass = False
for alley in [1,3,5,7,8,10,11,16,17]:
for pair in ["AB", "BC", "CA"]:
for crossType in ["global", "pointwise"]:
if d[alley][pair][crossType] != 'XXX':
doesPass = True
if doesPass:
savename = fname + "_PASS"
else:
savename = fname
with open(savename+'.csv', "w") as f:
w = csv.writer(f, delimiter = ' ')
for alley in [1,3,5,7,8,10,11,16,17]:
w.writerow([alley])
for pair in ["AB", "BC", "CA"]:
w.writerow([pair])
for crossType in ["global", "pointwise"]:
w.writerow([crossType, d[alley][pair][crossType]])
f.close()
def unitPermutationTest_AllPairsAllAlleys(unit, nnulls,fpath, logger=True, plot='sepFile'):
"""
Wrapper function to complete permutation tests for a unit
across all alleys and all pairwise stim (A,B,C) combinations
Pointwise p-value is set to 0.05
Global p-value is set to 0.00098 (0.05/(3*17))
All perm tests can be saved to a file for later use, depending on option:
Plots will be in a 17x3 grid where each row is an alley 1-17
and each column is a test stat in order AB, BC, CA
plot = False -> Do not plot
plot = sepFile -> Plot all test results to it's own file in the fpath dir
plot = addFile -> Do not save as this plot is an addon to another file's
plotting routines (which will save the file itself)
"""
if plot != False:
fig, axes = plt.subplots(9, 3, figsize=(12,12), dpi=200) #bigger plot, bigger dpi
pairs = ["AB", "BC", "CA"]
fname = fpath + f"{stamp}_{unit.name}_{Def.singleAlleyBins[0]-1}bins_{Def.smoothing_1d_sigma}smooth_{Def.includeRewards}R_{Def.velocity_filter_thresh}vfilt_permutationResults"
crossings = {i:{pair:{'global':"XXX", 'pointwise':"XXX"} for pair in pairs} for i in [1,3,5,7,8,10,11,16,17]}
axCounter = 0
for alley in unit.validAlleys:
print(f"Running Permutation test in alley {alley}")
for pair in pairs:
txtX, txtY = pair[0], pair[1]
globalCrossings, pointwiseCrossings, bounds, stat = unitPermutationTest_SinglePair(unit, alley, txtX, txtY, nnulls,
plot=False, returnInfo=True)
if globalCrossings is not None:
crossings[alley][pair]['global'] = globalCrossings
crossings[alley][pair]['pointwise'] = pointwiseCrossings
conditionName = unit.name + "_" + str(alley) + "_" + pair
if plot != False and bounds[0] is not None:
# the plot keyword will tell plotting fx whether to save sep or leave live for sep file to save
plotPermutationResults(unit, bounds, stat, conditionName, globalCrossings, pointwiseCrossings, fig.axes[axCounter])
axCounter += 1 # increment to get the next subplot next iteration.
plt.suptitle(f"Permutation Test Results for {unit.name}")
if logger == True:
permutationResultsLogger(crossings, fname)
if plot == 'sepFile':
# just in case this is buggy in future: when sep script is saving the fpath var is ''
plt.savefig(fname + ".svg")
plt.close()
elif plot == 'addFile':
pass # just to be explicit that if another script is saving this plot
# to its own set of plots (e.g the ratemap routine) then leave open
def plotPermutationResults(unit, bounds, stat, conditionName, globalCrossings, pointwiseCrossings, ax):
"""
If the observed test statistic passes the test, plot bounds.
Plot test statistic and original linear ratemaps on top
Does not save, that is done in the wrapper fx for all pairs/alleys (or
in sep script calling it)
"""
colorLookup = {'A':'r', 'B':'b', 'C': 'g'} # keep color coordination
# Get the real traces. Should refactor so I don't need to do this here and in test itself.
txtX, txtY = conditionName.split("_")[2]
alley = int(conditionName.split("_")[1])
labels = getLabels(unit, alley)
_, rmsX = poolTrials(unit, alley, labels, txtX)
_, rmsY = poolTrials(unit, alley, labels, txtY)
traceX, traceY = np.mean(rmsX, axis=0), np.mean(rmsY, axis=0)
g_upper, g_lower, pw_upper, pw_lower = bounds
ax.fill_between(range(len(g_upper)), g_upper, g_lower, color='cornflowerblue')
ax.fill_between(range(len(pw_upper)), pw_upper, pw_lower, color='darkblue')
ax.plot(stat, 'k')
ax.plot(traceX, colorLookup[txtX])
ax.plot(traceY, colorLookup[txtY])
# Were plotting all test results so if it failed, no crossings to highlight
if globalCrossings is not None:
ax.scatter(globalCrossings, stat[globalCrossings], c='cornflowerblue', marker='^')
ax.scatter(pointwiseCrossings, stat[pointwiseCrossings], c='darkblue', marker='^')
if globalCrossings is not None:
ax.set_title(f"{conditionName.split('_')[1:]}", color='r')
else:
ax.set_title(f"{conditionName.split('_')[1:]}", color='k')
if __name__ == '__main__':
rat = "R886"
expCode = "BRD1"
datafile = f"E:\\Ratterdam\\{rat}\\{rat}{expCode}\\"
fpath = f"E:\\Ratterdam\{rat}\\permutation_tests\\{expCode}\\"
stamp = util.genTimestamp()
alleyTracking, alleyVisits, txtVisits, p_sess, ts_sess = Parse.getDaysBehavioralData(datafile, expCode)
if not os.path.isdir(fpath):
os.mkdir(fpath)
print(expCode)
for subdir, dirs, fs in os.walk(datafile):
for f in fs:
if 'cl-maze1' in f and 'OLD' not in f and 'Undefined' not in f:
clustname = subdir[subdir.index("TT"):] + "\\" + f
unit = Core.UnitData(clustname, datafile, expCode, Def.alleyBounds, alleyVisits, txtVisits, p_sess, ts_sess)
unit.loadData_raw()
validalleys = []
for a in [16, 17, 3, 1, 5, 7, 8, 10, 11]:
valid, acorr, alleys = util.checkInclusion(unit, 3)
if valid:
print(clustname)
unit.acorr = acorr
unit.validAlleys = alleys
unitPermutationTest_AllPairsAllAlleys(unit, 1000, fpath)
else:
print(f"{clustname} not run")
| whock3/ratterdam | Beltway_Project/ratterdam_PermutationTests.py | ratterdam_PermutationTests.py | py | 16,496 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.ndarray",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "numpy.nan_to_num",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "numpy.ma.masked_inv... |
37099800243 | import pandas as pd
import pickle
from data import DATA_FILENAME, to_days_since_1998, datetime
def parse_date(string_value: str) -> int:
try:
return datetime.datetime.strptime(string_value.strip(), '%d/%m/%Y').date()
except ValueError:
return None
COLUMNS = ['ibovespa']
df = pd.read_csv(DATA_FILENAME, usecols=COLUMNS)
max_values = {
col: df[col].max()
for col in COLUMNS
}
df = None
MODELS = {
'date': {
'transform': parse_date,
'normalize': to_days_since_1998,
'file': 'svr_model.bin',
'input_label': 'Entre com a data (DD/MM/YYYY): ',
'error_label': 'A data informada não é valida! Por favor tente novamente...'
},
}
print('''Esse programa não garante o seus resultados e não se responsabiliza pelo mesmos.
O modelo utilizado é fruto de um projeto de pesquisa com fins acadêmicos. Todo o projeto está disponível em: https://github.com/fernando7jr/py-ibov-regression
Funcionamento:
* Informe a data no formato DD/MM/YYYY.
* O programa calcula com base no modelo de aprendizado de máquina qual a pontuação possível de acordo com os paramêtros informados.
Pressione ^Z (CTRL+Z) ou ^C (CTRL+C) para sair a qualquer momento.
''')
model_config = MODELS['date']
# load the model
f = open(model_config['file'], 'rb')
model = pickle.load(f)
f.close()
while True:
value = input(model_config['input_label'])
value = model_config['transform'](value)
if value is None:
print(model_config['error_label'])
continue
value_norm = model_config['normalize'](value)
X = [[value_norm]]
y = model.predict(X)
ibov = max_values['ibovespa'] * y[0]
print(f'De acordo com o modelo, o valor esperado paro IBOV é de {str(ibov).replace(".", ".")} pontos\n')
| fernando7jr/py-ibov-regression | ibov.py | ibov.py | py | 1,795 | python | pt | code | 0 | github-code | 36 | [
{
"api_name": "data.datetime.datetime.strptime",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "data.datetime.datetime",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "data.datetime",
"line_number": 8,
"usage_type": "name"
},
{
"api_name":... |
70521717223 | import datetime
import json
import os
import time
import random
import requests
from Crypto.Cipher import AES
from django.db.models import Q
from django.http import JsonResponse,HttpResponseRedirect
from django.views.decorators.cache import cache_page
from activety.models import Usercoupon
from news.views import to_dict
from shopnew.models import Topimg
from shopping.fengqiao import *
from shopping.models import *
from shopping.pay import *
#微信支付统一下单接口
url = "https://api.mch.weixin.qq.com/pay/unifiedorder"
appid = 'wx16360426dc864b7d'
mch_id = '1537642871'
trade_type = 'JSAPI'
key = '1234567890QWERTYUIOPASDFGHJKLZXC'
clientCode = 'LLYLKJSZ'
checkWord = 'STGuVhBlDznxZbvyFFSxP5fdsyH8geFq'
"""
可变参数
body = 'test' #类目
out_trade_no = '20191210' #商户订单号
total_fee = 88 #支付金额,单位分
spbill_create_ip = '14.23.150.211' #终端ip
notify_url = 'https://www.jianshu.com/p/40c7bd9388a6' #通知回调url
"""
def get_params(body,out_trade_no,total_fee,spbill_create_ip,openid,notify_url):
data_params = {
'appid':appid,
'mch_id':mch_id,
'body':body,
'out_trade_no':out_trade_no,
'total_fee':total_fee,
'spbill_create_ip':spbill_create_ip,
'trade_type':trade_type,
'notify_url':notify_url,
'nonce_str':randnum(),
'openid':openid
}
return data_params
#生成sign,并生成xml参数data_params(没有含有sign)
def get_xml_params(data_params,key):
sign = get_sign(data_params, key)
data_params['sign'] = sign
xml_params = trans_dict_to_xml(data_params)
return xml_params
#发起请求,调用微信支付接口
def pay_wx(xml_params):
response = requests.post(url,data=xml_params)
get_dict = trans_xml_to_dict(response.text)
return get_dict
#查询支付状态
def query_pay(obj):
params = {
'appid': appid,
'mch_id': mch_id,
'out_trade_no': obj['order_num'],
'nonce_str': randnum(),
}
sign = get_sign(params, key)
params['sign'] = sign
xml_params = trans_dict_to_xml(params)
#查询订单
url = 'https://api.mch.weixin.qq.com/pay/orderquery'
res = requests.post(url,data=xml_params)
get_dict = trans_xml_to_dict(res.text)
state = get_dict['trade_state']
if state == 'SUCCESS':
# 生成物流订单
xml = compose_addorderxml(obj)
response = addorder(xml)
try:
mailno = response['mailno']
except:
#查询物流状态
res = getstatu(obj['order_num'])
mailno = response['mailno']
#存储订单
order = ZhouBianorders()
order.order_location = obj['order_location']
order.order_phone = obj['order_phone']
order.getman = obj['getman']
order.order_num = obj['order_num']
order.order_user = obj['order_user']
order.order_start_time = obj['order_start_time']
order.order_true_pay =obj['order_true_pay']
order.goodnum =obj['goodnum']
order.type = 2
order.couponid = 0 if obj['couponid'] == '' else obj['couponid']
order.zhoubianid = obj['zhoubianid']
order.goodname = obj['goodname']
order.goodimg = obj['goodimg']
order.goodprice = obj['goodprice']
order.waybill_id = response['mailno']
order.save()
return {'status':1,'data':'SUCCESS'}
else:
return {'status':0,'data':state}
#关闭订单
def close_pay(order_num):
params = {
'appid': appid,
'mch_id': mch_id,
'out_trade_no': order_num,
'nonce_str': randnum(),
}
sign = get_sign(params, key)
params['sign'] = sign
xml_params = trans_dict_to_xml(params)
#关订单
url = 'https://api.mch.weixin.qq.com/pay/closeorder'
res = requests.post(url,data=xml_params)
get_dict = trans_xml_to_dict(res.text)
#关闭物流单
xml = compose_delorderxml(order_num)
fengqiaodelorder(xml)
return get_dict
def closeorder(request):
order_num = request.POST.get('order_num')
get = close_pay(order_num)
result_code = get['result_code']
if result_code == 'SUCCESS':
return JsonResponse({'status':1,'code':'SUCCESS'})
else:
return JsonResponse({'status': 0, 'code': 'FAIL'})
@cache_page(60*60,cache='longtime')
def goods_type(request):
types = Good_types.objects.all()
data =[]
for type in types:
obj = {}
obj['type_id'] = type.id
obj['type_name']=type.type_name
obj['type_icon']=type.type_icon
obj['color']='#6e6d6d'
data.append(obj)
return JsonResponse({'status':1,'data':data})
# def goods(request):
# goods = Goods.objects.all()
# types = Good_types.objects.all()
# typecontent = []
# for type in types:
# obj = {}
# obj['type_id'] = type.id
# obj['type_name'] = type.type_name
# obj['type_icon'] = type.type_icon
# obj['color'] = '#6e6d6d'
# typecontent.append(obj)
# data = {}
# for good in goods:
# type = good.type
# if type in data:
# goodlist = data[type]
# obj_in = {}
# obj_in['good_id'] = good.id
# obj_in['good_name'] = good.goods_name
# obj_in['goods_price'] = float(good.goods_price)
# obj_in['store_num'] = good.store_num
# obj_in['description'] = good.description
# obj_in['picture'] = good.picture
# obj_in['num'] = 0
# obj_in['type'] = type
# goodlist.append(obj_in)
# else:
# goodlist = []
# obj_in = {}
# obj_in['good_id'] = good.id
# obj_in['good_name'] = good.goods_name
# obj_in['goods_price'] = float(good.goods_price)
# obj_in['store_num'] = good.store_num
# obj_in['description'] = good.description
# obj_in['picture'] = good.picture
# obj_in['num'] = 0
# obj_in['type'] = type
# goodlist.append(obj_in)
# data[type] = goodlist
# datalist = [{'type':k,'data':v} for k,v in data.items()]
# return JsonResponse({'status': 1, 'data': datalist,'typecontent':typecontent})
#周边商品
@cache_page(60*60,cache='longtime')
def showzhoubian(request):
zhoubians = Zhoubian.objects.all()
data = []
for zhoubian in zhoubians:
obj = {}
obj['img'] = zhoubian.img
obj['name'] = zhoubian.name
obj['price'] = zhoubian.price
obj['log'] = zhoubian.log
obj['id'] = zhoubian.id
data.append(obj)
return JsonResponse({'status':1,'data':data})
#周边详情
@cache_page(60*60,cache='longtime')
def thezhoubian(request):
id = int(request.GET.get('zhoubianid'))
zhoubian = Zhoubian.objects.get(id = id)
obj = {}
obj['img'] = zhoubian.img
obj['name'] = zhoubian.name
obj['price'] = zhoubian.price
obj['log'] = zhoubian.log
obj['id'] = zhoubian.id
ll = [zhoubian.detail1,zhoubian.detail2,zhoubian.detail3,zhoubian.detail4]
obj['detail'] =[i for i in ll if i != '']
return JsonResponse({'status': 1, 'data': obj})
#15分钟后若任然是未支付,则删除订单
def delorder(order_num):
time.sleep(910)
order = ZhouBianorders.objects.filter(order_num=order_num)
if order.exists() and order[0].type == 1:
order.delete()
#提交周边订单
def post_zhoubianorder(request):
order_user = request.POST.get('userid')
order_location = request.POST.get('location')
order_phone = request.POST.get('phone')
order_couponid = request.POST.get('couponid')
order_true_pay = request.POST.get('true_pay')
getman = request.POST.get('getman')
goodnum = request.POST.get('goodnum')
zhoubianid = request.POST.get('zhoubianid')
#获取客户端ip
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0] # 所以这里是真实的ip(若经过负载均衡,和代理有此项)
else:
ip = request.META.get('REMOTE_ADDR') # 这里获得代理ip
zhoubian = Zhoubian.objects.get(id=int(zhoubianid))
order_num = randnum()
#删除优惠券
if order_couponid != '':
usercou = Usercoupon.objects.filter(Q(userid=order_user)&Q(coupon_id =int(order_couponid)))
if usercou.exists():
usercou[0].delete()
#微信统一下单接口
body = 'test' # 类目
out_trade_no = order_num # 商户订单号
total_fee = int(float(order_true_pay)*100) # 支付金额,单位分
spbill_create_ip = ip # 终端ip
notify_url = 'https://www.jianshu.com/u/44cde87b5c30' # 支付后的通知回调url
data_params = get_params(body, out_trade_no, total_fee, spbill_create_ip,order_user[:-3],notify_url)
xml_params = get_xml_params(data_params, key)
response_dict = pay_wx(xml_params)
# {'return_code': 'SUCCESS', 'trade_type': 'JSAPI', 'prepay_id': 'wx18102325542417b42cdbe9ef1001807600',
# 'mch_id': '1537642871', 'sign': '36DEB26F5187D2DB8ABE839373EC09F1', 'return_msg': 'OK',
# 'appid': 'wx16360426dc864b7d', 'result_code': 'SUCCESS', 'nonce_str': 'vVqn4SuQts0v18iE'}
timestamp = str(int(time.time()))
send_data = {}
send_data['timeStamp']= timestamp
send_data['appId']= response_dict['appid']
send_data['signType']= 'MD5'
send_data['nonceStr']= response_dict['nonce_str'].upper()
send_data['package']= 'prepay_id='+ response_dict['prepay_id']
send_sign = get_sign(send_data, key)
send_data['sign'] = send_sign
send_data['order_num'] = order_num
#订单数据
obj = {}
obj['order_location'] = order_location
obj['order_phone'] = order_phone
obj['getman'] = getman
obj['order_num'] = order_num
obj['order_user'] = order_user
now = datetime.datetime.now()
end = now + datetime.timedelta(minutes=10)
obj['order_start_time'] = str(now)[:-7]
obj['order_end_time'] = str(end)[:-7]
obj['order_true_pay'] = order_true_pay
obj['goodnum'] = goodnum
obj['type'] = 1
obj['couponid'] = order_couponid
obj['zhoubianid'] = zhoubian.id
obj['goodname'] = zhoubian.name
obj['goodimg'] = zhoubian.img
obj['goodprice'] = zhoubian.price
return JsonResponse({'status': 1,'wx_data':send_data,'order_data':obj})
#获取支付结果通知
def get_wxnotice_pay(request):
# data = request.body.decode()
# data_dict = trans_xml_to_dict(data)
# if data_dict['return_code'] == 'SUCCESS':
# order_num = data_dict['out_trade_no']
# order_true_pay = data_dict['total_fee']
# order = ZhouBianorders.objects.filter(Q(order_num=order_num)&Q(order_true_pay=order_true_pay))[0]
# order.type = 2
# order.save()
return JsonResponse({'status': 1})
#待支付再次调用支付
def ready_pay(request):
order = request.POST.get('order_data')
# order = ZhouBianorders.objects.get(order_num=order_num)
# now = time.time()
# distance = now - int(order.timestamp)
# if distance > (60*60*1.9):
# 关闭订单
old_order_num = order['order_num']
data = close_pay(old_order_num)
if data['result_code'] == 'FAIL':
return JsonResponse({'status':0,'wx_data':'关闭订单失败'})
# 重新发起支付,获取客户端ip
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0] # 所以这里是真实的ip(若经过负载均衡,和代理有此项)
else:
ip = request.META.get('REMOTE_ADDR') # 这里获得代理ip
# new_order_num = randnum()
order_true_pay = order['order_true_pay']
order_user = order['order_user']
# 微信统一下单接口
body = 'test' # 类目
out_trade_no = old_order_num # 商户订单号
total_fee = int(float(order_true_pay) * 100) # 支付金额,单位分
spbill_create_ip = ip # 终端ip
notify_url = 'https://www.jianshu.com/u/44cde87b5c30' # 支付后的通知回调url
data_params = get_params(body, out_trade_no, total_fee, spbill_create_ip,order_user[:-3],notify_url)
xml_params = get_xml_params(data_params, key)
response_dict = pay_wx(xml_params)
# {'return_code': 'SUCCESS', 'trade_type': 'JSAPI', 'prepay_id': 'wx18102325542417b42cdbe9ef1001807600',
# 'mch_id': '1537642871', 'sign': '36DEB26F5187D2DB8ABE839373EC09F1', 'return_msg': 'OK',
# 'appid': 'wx16360426dc864b7d', 'result_code': 'SUCCESS', 'nonce_str': 'vVqn4SuQts0v18iE'}
timestamp = str(int(time.time()))
send_data = {}
send_data['timeStamp'] = timestamp
send_data['appId'] = response_dict['appid']
send_data['signType'] = 'MD5'
send_data['nonceStr'] = response_dict['nonce_str'].upper()
send_data['package'] = 'prepay_id=' + response_dict['prepay_id']
send_sign = get_sign(send_data, key)
send_data['sign'] = send_sign
send_data['order_num'] = old_order_num
# 重新存储订单
# order.order_num = new_order_num
# order.timestamp = timestamp
# order.save()
# delorder(order_num)
return JsonResponse({'status': 1, 'wx_data': send_data, 'order_data': order})
# 退款
def refundment(request):
order_num = request.GET.get('order_num')
order = ZhouBianorders.objects.get(order_num=order_num)
if order.type == 41:
order_true_pay = int(order.order_true_pay*100)
notify_url = 'http://101.132.47.14/shop/get_wxnotice_refund/'
url = 'https://api.mch.weixin.qq.com/secapi/pay/refund'
params = {
'appid': appid,
'mch_id': mch_id,
'nonce_str': randnum(),
'out_trade_no': order_num,
'out_refund_no':order_num,
'total_fee': order_true_pay,
'refund_fee': order_true_pay,
'notify_url': notify_url,
}
xml_params = get_xml_params(params, key)
headers = {'Content-Type': 'application/xml'}
ssh_keys_path = '/home/zhou/project/xiaochengxu/cert'
weixinapiclient_cert = os.path.join(ssh_keys_path, "apiclient_cert.pem")
weixinapiclient_key = os.path.join(ssh_keys_path, "apiclient_key.pem")
res = requests.post(url, data=xml_params, headers=headers,
cert=(weixinapiclient_cert, weixinapiclient_key), verify=True)
get_dict = trans_xml_to_dict(res.text)
if get_dict['result_code'] == 'SUCCESS':
#提交退款成功
refund = Refund()
refund.order_num = order_num
refund.refund_num = order.order_true_pay
refund.save()
time.sleep(3)
return HttpResponseRedirect('/admin/shopping/zhoubianorders/')
else:
return JsonResponse({'status': 1,'code':'FAIL'})
#获取退款结果通知
def get_wxnotice_refund(request):
data = request.body.decode()
data_dict = trans_xml_to_dict(data)
if data_dict['return_code'] == 'SUCCESS':
req_info = data_dict['req_info']
md5 = hashlib.md5() # 使用MD5加密模式
md5.update(key.encode('utf-8')) # 将参数字符串传入
tokey = md5.hexdigest().lower()
code = base64.b64decode(req_info)
cipher = AES.new(tokey, AES.MODE_ECB).decrypt(code).decode()
res_data = trans_xml_to_dict(cipher,'root')
order_true_pay = float(res_data['total_fee'])/100
order_num = res_data['out_trade_no']
refund = Refund.objects.get(order_num=order_num)
if refund.refund_status ==0:
refund.refund_status = 1
refund.save()
order = ZhouBianorders.objects.filter(order_num=order_num)[0]
order.type = 43
order.save()
return JsonResponse({'status': 1})
else:
return JsonResponse({'status': 1})
#查询支付状态
def query_pay_state(request):
order = request.POST.get('order_data')
obj = json.loads(order)
data = query_pay(obj)
return JsonResponse(data)
#获取我的订单
def myorder(request):
userid = request.POST.get('userid')
type = int(request.POST.get('type'))
if type == 2:
orders = ZhouBianorders.objects.filter(Q(order_user=userid) & Q(type=2)).order_by('-id')
elif type == 3:
orders = ZhouBianorders.objects.filter( (Q(order_user=userid)&Q(type=31))|(Q(order_user=userid)&Q(type=32)) ).order_by('-id')
else:
orders = ZhouBianorders.objects.filter( (Q(order_user=userid)&Q(type=41))|(Q(order_user=userid)&Q(type=42))|(Q(order_user=userid)&Q(type=43)) ).order_by('-id')
order_data = []
if orders.exists():
for order in orders:
obj = {}
if type == 2:
#物流查询
xml = query_xml(order.order_num)
route_list = queryorder(xml)
if 'remark' in route_list[-1]:
obj['trans']= route_list[-1]['remark']
opcode = route_list[-1]['opcode']
if opcode == '80':
receivetime = route_list[-1]['accept_time']
now = time.time()
receive = datetime.datetime.strptime(receivetime, '%Y-%m-%d %H:%M:%S').timestamp()
if now - receive > 604800:
order.type = 31
order.receivetime = receivetime
order.save()
else:
order.type = 32
order.receivetime = receivetime
order.save()
else:
obj['trans'] = '待揽件'
elif type == 3:
#判断是否过七天
if order.type == 32:
now = time.time()
receive = datetime.datetime.strptime(order.receivetime, '%Y-%m-%d %H:%M:%S').timestamp()
if now - receive > 604800:
order.type = 31
order.save()
obj['order_num'] = order.order_num
obj['order_id'] = order.id
obj['order_start_time'] = order.order_start_time
obj['order_true_pay'] = order.order_true_pay
obj['goodnum'] = order.goodnum
obj['type'] = order.type
obj['goodname'] = order.goodname
obj['goodimg'] = order.goodimg
obj['goodprice'] = order.goodprice
order_data.append(obj)
return JsonResponse({'status': 1,'order_list':order_data,'type':type})
#申请退货
def return_goods(request):
order_num = request.POST.get('order_num')
order = ZhouBianorders.objects.filter(order_num=order_num)[0]
if order.type == 32:
order.type = 41
order.save()
return JsonResponse({'status': 1,'code':'succese'})
else:
return JsonResponse({'status': 0,'code':'该状态不支持退款'})
#取消退款
def cancel_return(request):
order_num = request.POST.get('order_num')
order = ZhouBianorders.objects.filter(order_num=order_num)[0]
order_start_time = order.order_start_time
if order.type == 41:
now = time.time()
start = datetime.datetime.strptime(order_start_time, '%Y-%m-%d %H:%M:%S').timestamp()
if now - start > 604800:
order.type = 31
else:
order.type = 32
order.save()
return JsonResponse({'status': 1})
else:
return JsonResponse({'status': 0,'code':'该状态不支持取消退款'})
#拒绝退款
def refuse_return(request):
order_num = request.GET.get('order_num')
print('order_num',order_num)
order = ZhouBianorders.objects.filter(order_num=order_num)[0]
if order.type == 41:
order.type = 42
order.save()
return HttpResponseRedirect('/admin/shopping/zhoubianorders/')
else:
return JsonResponse({'status': 0,'code':'该状态不支持拒绝退款'})
#现场服务
#@cache_page(60*60,cache='longtime')
def newgoods(request):
goods = Goods.objects.all()
types = Good_types.objects.all()
topimg = Topimg.objects.get(type='xianchang').img
typecontent = []
for type in types:
obj = {}
obj['type_id'] = type.id
obj['type_name'] = type.type_name
obj['type_icon'] = type.type_icon
obj['color'] = '#6e6d6d'
typecontent.append(obj)
data = {}
for good in goods:
type = good.type
if type in data:
gooddict = data[type]
obj_in = {}
id = good.id
obj_in['good_id'] = id
obj_in['good_name'] = good.goods_name
obj_in['goods_price'] = float(good.goods_price)
obj_in['store_num'] = good.store_num
obj_in['description'] = good.description
obj_in['picture'] = good.picture1
obj_in['picture_list'] = [good.picture1,good.picture2,good.picture3,good.picture4,good.picture5]
obj_in['num'] = 0
obj_in['type'] = type
gooddict[id] = obj_in
else:
gooddict = {}
obj_in = {}
id = good.id
obj_in['good_id'] = id
obj_in['good_name'] = good.goods_name
obj_in['goods_price'] = float(good.goods_price)
obj_in['store_num'] = good.store_num
obj_in['description'] = good.description
obj_in['picture'] = good.picture1
obj_in['picture_list'] = [i for i in [good.picture1,good.picture2,good.picture3,good.picture4,good.picture5] if i != '']
obj_in['num'] = 0
obj_in['type'] = type
gooddict[id] = obj_in
data[type] = gooddict
return JsonResponse({'status': 1, 'data': data,'typecontent':typecontent,'topimg':topimg})
def post_xianchangorder(request):
order_userid = request.POST.get('userid')
location_site = request.POST.get('location_site')
location_seat = request.POST.get('location_seat')
phone = request.POST.get('phone')
couponid = request.POST.get('couponid')
order_true_pay = request.POST.get('true_pay')
order_getman = request.POST.get('getman')
goodbag = request.POST.get('goodbag')
goodbag = json.loads(goodbag)
# 获取客户端ip
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0] # 所以这里是真实的ip(若经过负载均衡,和代理有此项)
else:
ip = request.META.get('REMOTE_ADDR') # 这里获得代理ip
order = Xianchangorder()
now = datetime.datetime.now()
ordernum = randnum()
order.order_num = ordernum
order.order_start_time = str(now)[:-7]
order.order_userid = order_userid
order.order_getman = order_getman
order.location_site = location_site
order.location_seat = location_seat
order.phone = phone
order.couponid = 0 if couponid == '' else int(couponid)
order.order_true_pay = order_true_pay
order.save()
for goodid in goodbag:
detail = Xianchangorder_detail()
detail.goodid = goodbag[goodid]['good_id']
detail.goodnum = goodbag[goodid]['num']
detail.goodname = goodbag[goodid]['good_name']
detail.goodprice = goodbag[goodid]['goods_price']
detail.ordernum = ordernum
detail.orderForeignKey = order
detail.save()
#删除优惠券
if couponid != '':
usercou = Usercoupon.objects.filter(Q(userid=order_userid)&Q(coupon_id =int(couponid)))
if usercou.exists():
usercou[0].delete()
#微信统一下单接口
body = 'test' # 类目
out_trade_no = ordernum # 商户订单号
total_fee = int(float(order_true_pay)*100) # 支付金额,单位分
spbill_create_ip = ip # 终端ip
notify_url = 'https://www.jianshu.com/u/44cde87b5c30' # 支付后的通知回调url
data_params = get_params(body, out_trade_no, total_fee, spbill_create_ip,order_userid[:-3],notify_url)
xml_params = get_xml_params(data_params, key)
response_dict = pay_wx(xml_params)
timestamp = str(int(time.time()))
send_data = {}
send_data['timeStamp']= timestamp
send_data['appId']= response_dict['appid']
send_data['signType']= 'MD5'
send_data['nonceStr']= response_dict['nonce_str'].upper()
send_data['package']= 'prepay_id='+ response_dict['prepay_id']
send_sign = get_sign(send_data, key)
send_data['sign'] = send_sign
send_data['order_num'] = ordernum
return JsonResponse({'status': 1, 'wx_data': send_data})
def qureypay_forxianchang(request):
order_num = request.POST.get('order_num')
params = {
'appid': appid,
'mch_id': mch_id,
'out_trade_no': order_num,
'nonce_str': randnum(),
}
sign = get_sign(params, key)
params['sign'] = sign
xml_params = trans_dict_to_xml(params)
# 查询订单
url = 'https://api.mch.weixin.qq.com/pay/orderquery'
res = requests.post(url, data=xml_params)
get_dict = trans_xml_to_dict(res.text)
if get_dict['result_code'] == 'SUCCESS':
state = get_dict['trade_state']
if state == 'SUCCESS':
theorder = Xianchangorder.objects.get(order_num = order_num)
theorder.ispay = 1
theorder.save()
return JsonResponse({'status': 1, 'code': 'SUCCESS'})
else:
return JsonResponse({'status': 0, 'code': 'FAIL'})
else:
return JsonResponse({'status': 0, 'code': 'FAIL'})
def showorder_forxianchang(request):
userid = request.POST.get('userid')
orders = Xianchangorder.objects.filter(order_userid=userid,ispay=1).order_by('-id')
wait = []
get = []
if orders.exists:
for order in orders:
obj = to_dict(order)
ordernum = obj['order_num']
details = Xianchangorder_detail.objects.filter(ordernum=ordernum)
detail_list = []
for detail in details:
inner = {}
inner['goodname'] = detail.goodname
inner['goodnum'] = detail.goodnum
inner['goodprice'] = detail.goodprice
detail_list.append(inner)
obj['detail_list'] = detail_list
sum = 0
for the in detail_list:
sum += the['goodnum'] * the['goodprice']
obj['coupon'] = sum - obj['order_true_pay']
obj['sum'] = sum
if obj['isget'] == 0:
wait.append(obj)
else:
get.append(obj)
return JsonResponse({'waitorder':wait,'got':get})
def loactionforxianchang(request):
loca = Locationforxianchang.objects.all()
list_data = []
for i in loca:
list_data.append(i.location)
return JsonResponse({'status':1,'location':list_data})
| zhoujialefanjiayuan/liu-lian | xiaochengxu/shopping/views.py | views.py | py | 27,704 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.post",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "django.http.JsonResponse",... |
72835791463 | import sqlite3
import click
from flask import current_app, g
from flask.cli import with_appcontext
def get_db():
if 'db' not in g:
g.db = sqlite3.connect(
current_app.config['DATABASE'],
detect_types=sqlite3.PARSE_DECLTYPES
)
g.db.row_factory = sqlite3.Row
return g.db
def close_db(e=None):
db = g.pop('db', None)
if db is not None:
db.close()
def init_db():
db = get_db()
with current_app.open_resource('schema.sql') as f:
db.executescript(f.read().decode('utf8'))
@click.command('init-db')
@with_appcontext
def init_db_command():
init_db()
click.echo('Initialized the database.')
def init_app(app):
app.teardown_appcontext(close_db)
app.cli.add_command(init_db_command)
| yukoga/flask_sample_001 | flaskr/db.py | db.py | py | 794 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.g",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "flask.g.db",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "sqlite3.connect",
"line_number": 1... |
70562902825 | import configparser
import os
class AWSAnmeldung():
def __init__(self,benutzer,account):
self.benutzer = benutzer
self.account = account
configName = "credentials"
configPfad = os.path.join("/","home",self.benutzer,".aws",configName)
self.config = configparser.ConfigParser()
self.config.read(configPfad)
self.aws_access_key_id = self.leseEintrag(account,"aws_access_key_id")
self.aws_secret_access_key = self.leseEintrag(account,"aws_secret_access_key")
self.region_name = self.leseEintrag(account,"region_name")
def leseEintrag(self,auswahl,zeile):
self.config.get(auswahl,zeile)
return self.config.get(auswahl,zeile)
if __name__ == '__main__':
test = AWSAnmeldung("studium","default")
print(test.aws_secret_access_key,test.aws_access_key_id)
print(test.leseEintrag("default","aws_access_key_id")) | charlenebertz/fhb-ws1516-sysint | target/dist/fhb-ws1516-sysint-1.0.dev0/build/lib/config.py | config.py | py | 911 | python | de | code | 0 | github-code | 36 | [
{
"api_name": "os.path.join",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "configparser.ConfigParser",
"line_number": 11,
"usage_type": "call"
}
] |
25715720301 | from functools import partial
from typing import Dict, Callable
from squirrel.driver.msgpack import MessagepackDriver
from squirrel.serialization import MessagepackSerializer
from squirrel.store import SquirrelStore
from squirrel.iterstream import IterableSource, Composable
import numpy as np
N_SAMPLES = 2500
MAX_VALUE = 10.0
SPLIT_25 = int(N_SAMPLES * 0.25)
SPLIT_50 = int(N_SAMPLES * 0.5)
SPLIT_80 = int(N_SAMPLES * 0.8)
SPLIT_90 = int(N_SAMPLES * 0.9)
N_SHARD = 100
def update_range_dict(range_dict: Dict, name: str, value: np.array, op: Callable = np.maximum) -> None:
"""Track maximum and minimum values for normalization"""
if name in range_dict:
range_dict[name] = op(value, range_dict[name])
else:
range_dict[name] = value
def unify_range_dicts(range_dict1: Dict, range_dict2: Dict, op: Callable = np.maximum) -> Dict:
"""Unify maximum and minimum values"""
result = {}
for name in range_dict1:
result[name] = op(range_dict1[name], range_dict2[name])
return result
def map_update_ranges(sample: Dict, range_dict: Dict) -> Dict:
"""Iterate samples and update minimums and maximums"""
max_x = np.amax(np.abs(sample["data_x"]), axis=0)
max_y = np.amax(np.abs(sample["data_y"]), axis=0)
update_range_dict(range_dict, "x_range", max_x)
update_range_dict(range_dict, "y_range", max_y)
return sample
def get_range_dict(base_url: str, split: str) -> Dict:
"""Get maximums and minimums for normalization"""
range_dict = {}
it = MessagepackDriver(f"{base_url}/{split}").get_iter()
it.map(partial(map_update_ranges, range_dict=range_dict)).tqdm().join()
return range_dict
def save_shard(it: Composable, store: SquirrelStore) -> None:
"""Save set of shards"""
store.set(value=list(it))
def scale(sample: Dict, range_dict: Dict) -> Dict:
"""Normalize example using the extreme values"""
range_x = np.clip(range_dict["x_range"], a_min=0.000001, a_max=None)
range_y = np.clip(range_dict["y_range"], a_min=0.000001, a_max=None)
return {
"data_x": sample["data_x"] / range_x.reshape(1, -1),
"data_y": sample["data_y"] / range_y.reshape(1, -1),
"edge_index": sample["edge_index"],
}
def filter_max(sample: Dict) -> bool:
"""Filter outliers"""
if sample["data_x"].max() > MAX_VALUE:
return False
if sample["data_y"].max() > MAX_VALUE:
return False
return True
def save_stream(
it: Composable, output_url: str, split: str, range_dict: Dict = None, filter_outliers: bool = True
) -> None:
"""Scale, filter outliers and save composable as shards"""
if it is None:
return
store = SquirrelStore(f"{output_url}/{split}", serializer=MessagepackSerializer())
if range_dict is not None:
it = it.map(partial(scale, range_dict=range_dict))
if filter_outliers:
it = it.filter(filter_max)
it.batched(N_SHARD, drop_last_if_not_full=False).map(partial(save_shard, store=store)).tqdm().join()
def iterate_source_data(fem_generator: str) -> None:
"""Filter data for a single generator and iterate if necessary to create splits"""
mesh_generators = [
"square",
"disk",
"cylinder",
"l_mesh",
"u_mesh",
"square_extra",
"disk_extra",
"cylinder_extra",
"l_mesh_extra",
"u_mesh_extra",
"square_rand",
"disk_rand",
"cylinder_rand",
"l_mesh_rand",
"u_mesh_rand",
]
for mesh_g in mesh_generators:
key = f"{fem_generator}_{mesh_g}"
path = f"gs://squirrel-core-public-data/gnn_bvp_solver/{key}"
iter = MessagepackDriver(path).get_iter()
print("GENERATING:", fem_generator, mesh_g)
if mesh_g.startswith("u_mesh"):
if mesh_g == "u_mesh":
# test set 2
# TRAIN1, VAL1, TRAIN2, VAL2, TEST1, TEST2
yield None, None, None, None, None, iter
else:
# all but U-mesh
if mesh_g.endswith("extra"):
all_data = iter.tqdm().collect()
# test set 1
# TRAIN1, VAL1, TRAIN2, VAL2, TEST1, TEST2
yield None, None, None, None, IterableSource(all_data[:SPLIT_25]), None
elif mesh_g.endswith("rand"):
all_data = iter.tqdm().collect()
# train/val set 2
# TRAIN1, VAL1, TRAIN2, VAL2, TEST1, TEST2
yield None, None, IterableSource(all_data[:SPLIT_80]), IterableSource(all_data[SPLIT_80:]), None, None
else:
all_data = iter.tqdm().collect()
# train/val set 1
# TRAIN1, VAL1, TRAIN2, VAL2, TEST1, TEST2
yield IterableSource(all_data[:SPLIT_80]), IterableSource(all_data[SPLIT_80:]), None, None, None, None
def scale_and_store(in_split: str, out_split: str, range_dict: Dict, base_url_in: str, base_url_out: str) -> None:
"""Normalize one stream and save it"""
it = MessagepackDriver(f"{base_url_in}/{in_split}").get_iter()
save_stream(it, base_url_out, out_split, range_dict)
def main(fem_generator: str, out_url: str) -> None:
"""Generate split for a single generator"""
for append_train1, append_val1, append_train2, append_val2, append_test1, append_test2 in iterate_source_data(
fem_generator
):
print("saving splits")
print("train1")
save_stream(append_train1, out_url, "raw_train1")
print("val1")
save_stream(append_val1, out_url, "raw_val1")
print("train2")
save_stream(append_train2, out_url, "raw_train2")
print("val2")
save_stream(append_val2, out_url, "raw_val2")
print("test1")
save_stream(append_test1, out_url, "raw_test1")
print("test2")
save_stream(append_test2, out_url, "raw_test2")
print("moving on")
def main_scale(in_url: str, out_url: str) -> None:
"""Apply normalization to generated data"""
range_dict1 = get_range_dict(in_url, "raw_train1")
range_dict2 = get_range_dict(in_url, "raw_train2")
range_dict = unify_range_dicts(range_dict1, range_dict2)
print("unnormalized ranges: ", range_dict)
print("scale and store")
print("train")
scale_and_store("raw_train1", "norm_train_no_ma", range_dict, in_url, out_url)
scale_and_store("raw_train2", "norm_train_ma", range_dict, in_url, out_url)
print("val")
scale_and_store("raw_val1", "norm_val_no_ma", range_dict, in_url, out_url)
scale_and_store("raw_val2", "norm_val_ma", range_dict, in_url, out_url)
print("test1")
scale_and_store("raw_test1", "norm_test_sup", range_dict, in_url, out_url)
print("test2")
scale_and_store("raw_test2", "norm_test_shape", range_dict, in_url, out_url)
def process(generator_key: str) -> None:
"""Process data from a single fem generator"""
base_url_gs = f"gs://squirrel-core-public-data/gnn_bvp_solver/{generator_key}"
base_url = f"data/{generator_key}" # store intermediate results locally
main(generator_key, base_url)
main_scale(base_url, base_url_gs)
if __name__ == "__main__":
for label_g in ["ElectricsRandomChargeGenerator", "MagneticsRandomCurrentGenerator", "ElasticityFixedLineGenerator"]:
process(label_g)
| merantix-momentum/gnn-bvp-solver | gnn_bvp_solver/preprocessing/split_and_normalize.py | split_and_normalize.py | py | 7,373 | python | en | code | 12 | github-code | 36 | [
{
"api_name": "typing.Dict",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "typing.Callable",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "numpy.maximum",
"lin... |
10993947420 | import sys, os, argparse, yaml
from datasets.config.config import data_analysis_parameters
import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.cbook as cbook
def analysis_kitti(args):
# Load the data
flow_volume = []
masks = []
height, width = args.height , args.width
for flow_path in os.listdir(args.flow_path_occ):
flow = cv.resize(cv.imread(os.path.join(args.flow_path_occ, flow_path), cv.IMREAD_ANYCOLOR | cv.IMREAD_ANYDEPTH), (width, height))
u = flow[:,:,2]/64.0 - 512
v = flow[:,:,1]/64.0 - 512
mask = flow[:,:,0]
mag = np.sqrt(u**2 + v**2)
flow_volume.append(mag)
masks.append(mask + 0.00000001)
flow_volume = np.array(flow_volume)
mean = np.mean(flow_volume,axis=0)
# standard_deviation = np.std(flow_volume,axis=0)
masks = np.array(masks)
mean = np.average(flow_volume,weights = masks,axis=0)
# add plot and colorbar
fig, ax = plt.subplots(1,1)
mean_flow_plot = ax.imshow(mean,cmap='Blues',norm=colors.LogNorm(vmin=mean.min()+ 0.01, vmax=mean.max()+0.0000001))
fig.colorbar(mean_flow_plot, ax=ax)
# ax[1].imshow(standard_deviation,cmap='rainbow')
plt.show()
def analysis_vkitti(args):
flow_volume = []
masks = []
height, width = args.height , args.width
for flow_path in os.listdir(args.flow_path_occ):
flow = cv.resize(cv.imread(os.path.join(args.flow_path_occ, flow_path), cv.IMREAD_ANYCOLOR | cv.IMREAD_ANYDEPTH), (width, height))
u = flow[:,:,2]
v = flow[:,:,1]
u = 2*(u/(2**16)) #- 0.5
v = 2*(v/(2**16)) #- 0.5
# u = u*(width - 1)
# v = v*(height - 1)
mask = flow[:,:,0]
print(min(u.flatten()),max(u.flatten()))
mag = np.sqrt(u**2 + v**2)
flow_volume.append(mag)
masks.append(mask)
flow_volume = np.array(flow_volume)
masks = np.array(masks)
mean = np.average(flow_volume,weights = masks,axis=0)
# standard_deviation = np.std(flow_volume,axis=0)
# add plot and colorbar
fig, ax = plt.subplots(1,1)
mean_flow_plot = ax.imshow(mean,cmap='Blues',norm=colors.LogNorm(vmin=mean.min() + 0.01, vmax=mean.max()))
fig.colorbar(mean_flow_plot, ax=ax)
# ax[1].imshow(standard_deviation,cmap='rainbow')
plt.show()
# Load dataset and the parameters to analyse from the config file
parser = argparse.ArgumentParser()
parser.add_argument('-config', help="configuration file *.yml", type=str, required=False, default='data_analysis/config/vkitti.yml')
parser.add_argument('-dataset', help="dataset", type=str, required=False, default="vkitti")
analysis_args = parser.parse_args()
# Load the configuration file arguments
args = data_analysis_parameters(analysis_args.dataset, analysis_args.config)
analysis_vkitti(args)
| sushlokshah/new_approach | general_file/analysis.py | analysis.py | py | 2,885 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.listdir",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "cv2.resize",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 14... |
11352712717 | import json
import requests
import random
def filmes_assistidos_json():
with open('../DadosJSON/filmesAssistidos.json', 'r') as json_file:
dados = json.load(json_file)
return dados
def preferencias_json():
with open('../DadosJSON/preferencias.json', 'r') as json_file:
dados = json.load(json_file)
return dados
def filmes_generos_json():
with open('../DadosJSON/filmesGenero.json', 'r') as json_file:
dados = json.load(json_file)
return dados
def links_imdb_json():
with open('../DadosJSON/linksImdb.json', 'r') as json_file:
dados = json.load(json_file)
return dados
def verificados_json():
with open('../DadosJSON/verificados.json', 'r') as json_file:
dados = json.load(json_file)
return dados
def recomendados_json():
with open('../DadosJSON/recomendacao.json', 'r') as json_file:
dados = json.load(json_file)
return dados
def salva_verificados(imdbid):
verificados.append(imdbid)
with open('../DadosJSON/verificados.json', 'w') as f:
json.dump(verificados, f)
filmesAssistidos_users = filmes_assistidos_json()
preferencias_users = preferencias_json()
filmesPorGenero = filmes_generos_json()
linksImdb = links_imdb_json()
verificados = verificados_json()
def request(imdbid, s):
url = requests.get(f"https://api.themoviedb.org/3/find/{imdbid}?api_key=254c6407feb51fd7f478ec3e6b1abc23"
"&language=en-US&external_source=imdb_id")
data = url.json()
try:
data = data['movie_results'][0]
except IndexError:
data = data['tv_results'][0]
finally:
return data[s]
def retorna_imdbid(movieid):
for i in range(len(linksImdb)):
if linksImdb[i]['movieId'] == movieid:
return linksImdb[i]['imdbId']
def salva_recomendacoes(lista, userid, rod):
if rod == 1:
recomendacao_users.append([])
recomendacao_users[userid].extend(lista)
else:
recomendacao_users[userid].extend(lista)
with open('../DadosJSON/recomendacao.json', 'w') as f:
json.dump(recomendacao_users, f)
recomendacao_users = recomendados_json()
def recomendacao(userid, rod):
recomendados = []
if rod == 1:
count = 3
genero = preferencias_users[userid]['topGeneros'][0]
elif rod == 2:
count = 3
genero = preferencias_users[userid]['topGeneros'][1]
elif rod == 3:
count = 2
genero = random.choice(preferencias_users[userid]['topGeneros'])
else:
count = 1
genero = random.choice(preferencias_users[userid]['outros'])
assistidos = filmesAssistidos_users[userid]['filmes']
while count > 0:
movieid = random.choice(filmesPorGenero[0][genero])
imdbid = retorna_imdbid(movieid)
pop = request(imdbid, "popularity")
if imdbid not in verificados and pop < 30.000:
salva_verificados(imdbid)
continue
elif imdbid in verificados:
continue
if movieid not in assistidos and movieid not in recomendados:
if rod > 1:
if movieid not in recomendacao_users[userid]:
recomendados.append(imdbid)
print('..')
count -= 1
else:
print('..')
recomendados.append(imdbid)
count -= 1
salva_recomendacoes(recomendados, userid, rod)
def main():
try:
for userid in range(len(recomendacao_users), 10):
recomendacao(userid, 1)
print('--')
recomendacao(userid, 2)
print('---')
recomendacao(userid, 3)
print('-----')
recomendacao(userid, 4)
print('-------')
recomendacao(userid, 4)
print('||||||||||')
except:
print('erro')
if len(recomendacao_users[len(recomendacao_users) - 1]) < 10:
recomendacao_users.pop()
with open('../DadosJSON/recomendacao.json', 'w') as f:
json.dump(recomendacao_users, f)
finally:
main()
main()
| CassioFig/Sistema-Recomendacao | backend/recomendacao.py | recomendacao.py | py | 4,167 | python | pt | code | 1 | github-code | 36 | [
{
"api_name": "json.load",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 29,
"... |
15452563301 | from base import VisBase
from helper import get_heat_map
import os
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
import torch
import torch.nn.functional as F
import math
import numpy as np
import matplotlib as mpl
from mpl_toolkits.axes_grid1 import make_axes_locatable
from mpl_toolkits.axes_grid1 import ImageGrid
VIS_ROOT = os.path.dirname(os.path.realpath(__file__))
class ProjVis(VisBase):
def __init__(self,
exp,
**kwargs):
super(ProjVis, self).__init__(exp, **kwargs)
self.save_each = False
self.show = True
self.batch_id = 0
self.target_id = 0
self.center_tf = (23, 23)
self.rect_color = 'yellow'
def center_scan(self):
half_width = 21
half_height = 21
max_t = 250
max_f = 128
t_grid = np.linspace(half_width + 2, max_t - half_width - 2, num=6)
f_grid = np.linspace(half_height + 2, max_f - half_height - 2, num=5)
center_list = []
for t in t_grid:
for f in f_grid:
center_tf = (int(t), int(f))
center_list.append(center_tf)
return center_list
def fig_structure_grid(self):
fig = plt.figure(figsize=(7, 1.5))
grid = ImageGrid(fig, 111,
nrows_ncols=(1, 5),
axes_pad=0.07,
share_all=True,
cbar_mode='single',
label_mode='L')
im1 = self.fig_spec(ax=grid[0])
im2 = self.fig_entropy_softmax(ax=grid[1])
im3 = self.fig_pos_entropy_softmax(ax=grid[2])
im4 = self.fig_entropy_sparsemax(ax=grid[3])
im5 = self.fig_pos_entropy_sparsemax(ax=grid[4])
max_val = im4.get_array().max()
min_val = im4.get_array().min()
if max_val > 0.2 and max_val - min_val > 0.1:
max_val = round(max_val - 0.1, 1)
min_val = round(min_val, 1)
plt.colorbar(im4, cax=grid.cbar_axes[0], ticks=[min_val, max_val])
grid.cbar_axes[0].set_yticklabels([min_val, str(max_val)])
else:
plt.colorbar(im3, cax=grid.cbar_axes[0])
# plt.colorbar(im3, cax=grid.cbar_axes[0])
fontsz = 12
grid[0].set_xlabel('(a) spectrogram', fontsize=fontsz, labelpad=6.2)
grid[1].set_xlabel(r'(b) $\tilde{\mathbf{h}}$', fontsize=fontsz)
grid[2].set_xlabel(r'(c) $\tilde{\mathbf{h}}^\dag$', fontsize=fontsz)
grid[3].set_xlabel(r'(d) $\bar{\mathbf{h}}$', fontsize=fontsz)
grid[4].set_xlabel(r'(e) $\bar{\mathbf{h}}^\dag$', fontsize=fontsz)
grid[0].get_xaxis().set_ticks([])
if self.show:
# fig.suptitle('{}_structure_grid_b{}.png'.format(self.label, str(self.batch_id)))
plt.show()
else:
fig.savefig('{}/{}/{}_structure.png'.format(VIS_ROOT, self.label, self.label))
def fig_relation_grid(self, suffix=None):
fig = plt.figure(figsize=(4.8, 1.8))
grid = ImageGrid(fig, 111,
nrows_ncols=(1, 3),
axes_pad=0.07,
share_all=True,
cbar_mode='single',
label_mode='L'
)
self.fig_spec_rect(ax=grid[0])
self.fig_spec_rect(ax=grid[0])
im1 = self.fig_relation(ax=grid[1])
im2 = self.fig_pos_relation(ax=grid[2])
fontsz = 12
grid[0].set_xlabel('(a) spectrogram', fontsize=fontsz)
grid[1].set_xlabel(r'(b) $\mathbf{E}_i$', fontsize=fontsz)
grid[2].set_xlabel(r'(c) $\mathbf{E}_i^{\dag}$', fontsize=fontsz)
grid[0].get_xaxis().set_ticks([])
if im1.get_array().max() == 0. and im2.get_array().max() == 0.:
import matplotlib.colors
norm = matplotlib.colors.Normalize(vmax=1., vmin=0.)
plt.colorbar(matplotlib.cm.ScalarMappable(norm=norm, cmap='jet'), cax=grid.cbar_axes[0])
elif im1.get_array().max() == 0.:
plt.colorbar(im2, cax=grid.cbar_axes[0])
elif im2.get_array().max() == 0.:
plt.colorbar(im1, cax=grid.cbar_axes[0])
else:
plt.colorbar(im1, cax=grid.cbar_axes[0])
if self.show:
# fig.suptitle('{}_relation_grid_b{}_{}.png'.format(self.label, self.batch_id, str(self.center_tf)))
plt.show()
else:
if suffix is not None:
fig.savefig('{}/{}/{}_relation_grid_{}.png'.format(VIS_ROOT,
self.label,
self.label,
str(suffix)))
else:
fig.savefig('{}/{}/{}_relation_grid.png'.format(VIS_ROOT,
self.label,
self.label))
def fig_spec(self, ax=None):
if not ax:
fig, ax = plt.subplots()
self.feed(batch_id=self.batch_id, data_id=0, target_id=self.target_id)
folder = '{}/{}'.format(VIS_ROOT, self.label)
if not os.path.exists(folder):
os.makedirs(folder)
self.plot_spec(ax=ax)
if self.save_each:
fig.savefig('{}/{}/{}_spec.png'.format(VIS_ROOT, self.label, self.label))
else:
pass
# plt.show()
def fig_spec_rect(self, ax=None):
if not ax:
fig, ax = plt.subplots()
self.feed(batch_id=self.batch_id, data_id=0, target_id=self.target_id)
folder = '{}/{}'.format(VIS_ROOT, self.label)
if not os.path.exists(folder):
os.makedirs(folder)
self.plot_spec_rect(ax)
if self.save_each:
fig.savefig('{}/{}/{}_spec_rect.png'.format(VIS_ROOT, self.label, self.label))
else:
pass
# plt.show()
def fig_relation(self, ax=None):
self.reload(exp="esc-folds-rblock",
r_structure_type="zero", softmax_type="softmax")
self.feed(batch_id=self.batch_id, data_id=0, target_id=self.target_id)
folder = '{}/{}'.format(VIS_ROOT, self.label)
if not os.path.exists(folder):
os.makedirs(folder)
if not ax:
fig, ax = plt.subplots()
im = self.plot_relation_heatmap(ax=ax)
if self.save_each:
fig.savefig('{}/{}/{}_relation.png'.format(VIS_ROOT, self.label, self.label))
else:
pass
# plt.show()
return im
def fig_pos_relation(self, ax=None):
self.reload(exp="esc-folds-rblock-pe",
r_structure_type="zero", softmax_type="softmax")
self.feed(batch_id=self.batch_id, data_id=0, target_id=self.target_id)
folder = '{}/{}'.format(VIS_ROOT, self.label)
if not os.path.exists(folder):
os.makedirs(folder)
if not ax:
fig, ax = plt.subplots()
im = self.plot_relation_heatmap(ax=ax)
if self.save_each:
fig.savefig('{}/{}/{}_pos_relation.png'.format(VIS_ROOT, self.label, self.label))
else:
pass
# plt.show()
return im
def fig_entropy_softmax(self, ax=None):
self.reload(exp="esc-folds-rblock",
r_structure_type="minus_entropy", softmax_type="softmax")
self.feed(batch_id=self.batch_id, data_id=0, target_id=self.target_id)
folder = '{}/{}'.format(VIS_ROOT, self.label)
if not os.path.exists(folder):
os.makedirs(folder)
if not ax:
fig, ax = plt.subplots()
im = self.plot_structure_feat(ax)
if self.save_each:
fig.savefig('{}/{}/{}_entropy_softmax.png'.format(VIS_ROOT, self.label, self.label))
else:
pass
# plt.show()
return im
def fig_entropy_sparsemax(self, ax=None):
self.reload(exp="esc-folds-rblock",
r_structure_type="minus_entropy", softmax_type="sparsemax")
self.feed(batch_id=self.batch_id, data_id=0, target_id=self.target_id)
folder = '{}/{}'.format(VIS_ROOT, self.label)
if not os.path.exists(folder):
os.makedirs(folder)
if not ax:
fig, ax = plt.subplots()
im = self.plot_structure_feat(ax)
if self.save_each:
fig.savefig('{}/{}/{}_entropy_sparsemax.png'.format(VIS_ROOT, self.label, self.label))
else:
pass
# plt.show()
return im
def fig_pos_entropy_softmax(self, ax=None):
self.reload(exp="esc-folds-rblock-pe",
r_structure_type="minus_entropy", softmax_type="softmax")
self.feed(batch_id=self.batch_id, data_id=0, target_id=self.target_id)
folder = '{}/{}'.format(VIS_ROOT, self.label)
if not os.path.exists(folder):
os.makedirs(folder)
if not ax:
fig, ax = plt.subplots()
im = self.plot_structure_feat(ax)
if self.save_each:
fig.savefig('{}/{}/{}_pos_entropy_softmax.png'.format(VIS_ROOT, self.label, self.label))
else:
pass
# plt.show()
return im
def fig_pos_entropy_sparsemax(self, ax=None):
self.reload(exp="esc-folds-rblock-pe",
r_structure_type="minus_entropy", softmax_type="sparsemax")
self.feed(batch_id=self.batch_id, data_id=0, target_id=self.target_id)
folder = '{}/{}'.format(VIS_ROOT, self.label)
if not os.path.exists(folder):
os.makedirs(folder)
if not ax:
fig, ax = plt.subplots()
im = self.plot_structure_feat(ax)
if self.save_each:
fig.savefig('{}/{}/{}_pos_entropy_sparsemax.png'.format(VIS_ROOT, self.label, self.label))
else:
pass
# plt.show()
return im
def plot_spec(self, ax):
ax.imshow(self.spec, cmap='magma', origin='lower')
def plot_spec_rect(self, ax):
ax.imshow(self.spec, cmap='magma', origin='lower')
self.plot_rect(ax)
def plot_rect(self, ax, text=None):
width = 43
height = 43
lower_left = (self.center_tf[0] - math.floor(width / 2), self.center_tf[1] - math.floor(height / 2))
rect = Rectangle(xy=lower_left, width=width, height=height, linewidth=1,
edgecolor=self.rect_color, facecolor='none')
ax.add_patch(rect)
# ax.scatter(self.center_tf[0], self.center_tf[1], s=10, marker='x', c=self.rect_color)
if text == 'p':
ax.text(self.center_tf[0] - 10, self.center_tf[1] - 8, r'$p$', fontsize=10, color=self.rect_color)
elif text == 'q':
ax.text(self.center_tf[0] - 10, self.center_tf[1] - 8, r'$q$', fontsize=10, color=self.rect_color)
def plot_relation_heatmap(self, ax, fig=None, alpha=1.):
fsz, tsz = self.spec.shape
heat_map = get_heat_map(self.spec, nl_map=self.nl_map, center_tf=self.center_tf)
# (F, T)
heat_map = F.interpolate(torch.from_numpy(heat_map),
size=(fsz, tsz),
mode='bicubic').squeeze()
heat_map = heat_map.clamp_(min=0.).numpy()
# alpha, multiply heat_map by alpha
im = ax.imshow(heat_map, cmap='jet', alpha=alpha, origin='lower')
self.plot_rect(ax)
return im
def plot_structure_feat(self, ax, fig=None, alpha=1.):
fsz, tsz = self.spec.shape
structure_feat = F.interpolate(torch.from_numpy(self.relation_feat),
size=(fsz, tsz),
mode='bicubic').squeeze()
structure_feat.clamp_(min=0., max=1.)
structure_feat = structure_feat.numpy()
# alpha, multiply heat_map by alpha
im = ax.imshow(structure_feat, cmap='bwr', origin='lower', alpha=alpha)
return im
def add_colorbar(self, ax):
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(cax=cax)
def plot_relation(vis):
vis.batch_id = 0
for l in range(50):
vis.target_id = l
for i, center_tf in enumerate(vis.center_scan()):
vis.center_tf = center_tf
vis.fig_relation_grid(suffix=i)
audio_path = "{}/{}/{}.wav".format(VIS_ROOT, vis.label, vis.label)
vis.save_audio(wav_path=audio_path)
def plot_structure(vis):
vis.batch_id = 0
# vis.target_id = 47
# vis.fig_structure_grid()
for l in range(50):
vis.target_id = l
vis.fig_structure_grid()
if __name__ == '__main__':
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
vis = ProjVis(exp="esc-folds-rblock",
ckpt_prefix="Run029")
plt.rcParams['figure.dpi'] = 300
plt.rcParams['text.usetex'] = True
plt.rc('font', family='Times Roman')
vis.show = False
plot_relation(vis)
plot_structure(vis)
# vis.target_id = 23
# for i, center_tf in enumerate(vis.center_scan()):
# # if i != 6:
# # continue
# vis.center_tf = center_tf
# vis.fig_relation_grid(suffix=i)
# # break
| hackerekcah/ESRelation | vis_proj/vis_proj.py | vis_proj.py | py | 13,438 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.dirname",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "os.path.realpath",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "base.VisBase",
"lin... |
41924896443 | import sys
import sqlite3
from orders_management import*
#menu for managing customer
class order_menu():
def __init__(self):
self.running = None
self.active_detail = orders_manage()
def run_menu(self,choice):
if choice == 1:
order_date = input("please enter the order date: ")
order_size = input("please enter the size of the order: ")
values = (order_date,order_size)
self.active_detail.insert_order_data(values)
elif choice == 2:
id = input("please enter the id of the product you wish to change: ")
choice = self.get_answers()
if choice == 1:
order_date = input("please enter the date of the order: ")
value = (order_date,id)
self.active_detail.update_order_date(value)
elif choice == 2:
order_size = input("please enter the new size of the order: ")
value = (order_size,id)
self.active_detail.update_order_size(value)
elif choice == 3:
order_date = input("please enter the date of the order: ")
order_size = input("please enter the new size of the order: ")
value = (order_date,order_size,id)
self.active_detail.update_order_sizedate(value)
elif choice == 3:
order = self.active_detail.order_data()
print(order)
elif choice == 4:
done = False
while not done:
print("would you like to search by order_num or by order_date: ",end = "")
choices = input()
choices = choices.lower()
if choices in ["order_num","order num","order number","order_number"]:
print("please enter the order number you wish to view: " ,end = "")
id = input()
rename = self.active_detail.display_order_data(id)
print(rename)
done = True
elif choices in ["order_date","order date"]:
print("please enter the customer id you wish to view: ",end = "")
name = input()
rename = self.active_detail.display_order_data(name)
print(rename)
done = True
else:
print("please enter a valid choice")
done = False
elif choice == 5:
choice = input("which id do you want to delete: ")
self.active_detail.delete_order_data(choice)
def get_order_date(self,id):
with sqlite3.connect("pharmacy_database.db") as db:
cursor = db.cursor()
cursor.execute("select OrderDate from Orders where OrderNum=?",(id,))
Product = cursor.fetchone()
def get_order_size(self,id):
with sqlite3.connect("pharmacy_database.db") as db:
cursor = db.cursor()
cursor.execute("select OrderSize from Orders where OrderNum=?",(id,))
Product = cursor.fetchone()
return Product
def get_answers(self):
print("what do you want to update?")
print()
print("1.order_date")
print("2.order_size")
print("3.update all")
print("what is your choice: ",end = "")
try:
choice = int(input())
except ValueError:
print()
self.get_answers()
return choice
| henrymlongroad/computing-coursework.exe | Implementation/order_menu.py | order_menu.py | py | 3,649 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sqlite3.connect",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 68,
"usage_type": "call"
}
] |
16046372668 | """Pakcage Metadata."""
import pathlib
from setuptools import setup
# The directory containing this file
HERE = pathlib.Path(__file__).parent
# The text of the README file
README = (HERE / "README.md").read_text()
# This call to setup() does all the work
setup(
name="bank-of-england",
version="0.0.1",
description="Retrieve data from the Bank of England's Statistical Interactive Database (IADB)",
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/ronaldocpontes/bank-of-england",
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Operating System :: OS Independent",
],
package_dir={"": "src"},
include_package_data=True,
package_data={"": ["data/*.*"],},
py_modules=["bank_of_england"],
install_requires=["pandas", "requests"],
extras_require={"dev": ["pytest", "tox"]},
)
| ronaldocpontes/bank-of-england | setup.py | setup.py | py | 1,019 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "setuptools.setup",
"line_number": 12,
"usage_type": "call"
}
] |
20940437621 | from collections import OrderedDict
import torch
def anchor_offset_to_midpoint_offset(anchor_offset: torch.Tensor, anchors: torch.Tensor):
b, n, h, w = anchors.shape
num_anchors = int(n/4)
# prediction has 6 * num_anchors in dim=1 (they are concatenated) we reshape
# for easier handling (same for anchors)
r_offset = anchor_offset.reshape((b, num_anchors, 6, h, w))
r_anchors = anchors.reshape((b, num_anchors, 4, h, w))
w = r_anchors[:, :, 2, :, :] * torch.exp(r_offset[:, :, 2, :, :])
h = r_anchors[:, :, 3, :, :] * torch.exp(r_offset[:, :, 3, :, :])
x = r_offset[:, :, 0, :, :] * r_anchors[:, :, 2, :, :] + r_anchors[:, :, 0, :, :]
y = r_offset[:, :, 1, :, :] * r_anchors[:, :, 3, :, :] + r_anchors[:, :, 1, :, :]
delta_alpha = r_offset[:, :, 4, :, :] * w
delta_beta = r_offset[:, :, 5, :, :] * h
r_midpoint_offset = torch.stack((x, y, w, h, delta_alpha, delta_beta), dim=2)
return torch.cat([r_midpoint_offset[:, i, :, :, :] for i in range(num_anchors)], dim=1).float()
def midpoint_offset_to_anchor_offset(midpoint_offset: torch.tensor, anchors: torch.tensor):
b, n, h, w = anchors.shape
num_anchors = int(n/4)
# reshape for easier handling
r_midpoint_offset = midpoint_offset.reshape((b, num_anchors, 6, h, w))
r_anchors = anchors.reshape((b, num_anchors, 4, h, w))
d_a = r_midpoint_offset[:, :, 4, :, :] / r_midpoint_offset[:, :, 2, :, :]
d_b = r_midpoint_offset[:, :, 5, :, :] / r_midpoint_offset[:, :, 3, :, :]
d_w = torch.log(r_midpoint_offset[:, :, 2, :, :] / r_anchors[:, :, 2, :, :])
d_h = torch.log(r_midpoint_offset[:, :, 3, :, :] / r_anchors[:, :, 3, :, :])
d_x = (r_midpoint_offset[:, :, 0, :, :] - r_anchors[:, :, 0, :, :]) / r_anchors[:, :, 2, :, :]
d_y = (r_midpoint_offset[:, :, 1, :, :] - r_anchors[:, :, 1, :, :]) / r_anchors[:, :, 3, :, :]
r_anchor_offset = torch.stack((d_x, d_y, d_w, d_h, d_a, d_b), dim=2)
return torch.cat([r_anchor_offset[:, i, :, :, :] for i in range(num_anchors)], dim=1).float()
def midpoint_offset_to_anchor_offset_gt(midpoint_offset_gt: torch.tensor, tp_anchors: torch.tensor):
num_anchors = len(tp_anchors)
d_a = midpoint_offset_gt[:, 4] / midpoint_offset_gt[:, 2]
d_b = midpoint_offset_gt[:, 5] / midpoint_offset_gt[:, 3]
d_w = torch.log(midpoint_offset_gt[:, 2] / tp_anchors[:, 2])
d_h = torch.log(midpoint_offset_gt[:, 3] / tp_anchors[:, 3])
d_x = (midpoint_offset_gt[:, 0] - tp_anchors[:, 0]) / tp_anchors[:, 2]
d_y = (midpoint_offset_gt[:, 1] - tp_anchors[:, 1]) / tp_anchors[:, 3]
return torch.stack((d_x, d_y, d_w, d_h, d_a, d_b), dim=1)
def midpoint_offset_to_vertices(midpoint_offset: torch.Tensor):
b, n, h, w = midpoint_offset.shape
num_anchors = int(n/6)
# prediction has 6 * num_anchors in dim=1 (they are concatenated) we reshape
# for easier handling
r_midpoint_offset = midpoint_offset.reshape((b, num_anchors, 6, h, w))
x = r_midpoint_offset[:, :, 0, :, :]
y = r_midpoint_offset[:, :, 1, :, :]
w = r_midpoint_offset[:, :, 2, :, :]
h = r_midpoint_offset[:, :, 3, :, :]
d_alpha = r_midpoint_offset[:, :, 4, :, :]
d_beta = r_midpoint_offset[:, :, 5, :, :]
v1 = torch.stack([x + d_alpha, y - h / 2], dim=2)
v2 = torch.stack([x + w / 2, y + d_beta], dim=2)
v3 = torch.stack([x - d_alpha, y + h / 2], dim=2)
v4 = torch.stack([x - w / 2, y - d_beta], dim=2)
r_vertices = torch.stack((v1, v2, v3, v4), dim=2)
return torch.cat([r_vertices[:, i, :, :, :, :] for i in range(num_anchors)], dim=1).float()
def vertices_to_midpoint_offset(vertices: torch.Tensor):
# vertices shape: b, num_anchors * 4, 2, H, W
b, n, _, h, w = vertices.shape
num_anchors = int(n/4)
# reshape for easier handling
r_vertices = vertices.reshape((b, num_anchors, 4, 2, h, w))
x_min = torch.min(r_vertices[:, :, :, 0, :, :], dim=2)[0]
x_max = torch.max(r_vertices[:, :, :, 0, :, :], dim=2)[0]
y_min = torch.min(r_vertices[:, :, :, 1, :, :], dim=2)[0]
y_max = torch.max(r_vertices[:, :, :, 1, :, :], dim=2)[0]
w = x_max - x_min
h = y_max - y_min
x_center = x_min + w / 2
y_center = y_min + h / 2
delta_a = r_vertices[:, :, 0, 0, :, :] - x_center
delta_b = r_vertices[:, :, 1, 1, :, :] - y_center
r_midpoint_offset = torch.stack((x_center, y_center, w, h, delta_a, delta_b), dim=2)
return torch.cat([r_midpoint_offset[:, i, :, :, :] for i in range(num_anchors)], dim=1)
def vertices_to_midpoint_offset_gt(vertices: torch.Tensor):
# vertices shape: n, 4, 2
n, _, _ = vertices.shape
x_min = torch.min(vertices[:, :, 0], dim=1)[0]
x_max = torch.max(vertices[:, :, 0], dim=1)[0]
y_min = torch.min(vertices[:, :, 1], dim=1)[0]
y_max = torch.max(vertices[:, :, 1], dim=1)[0]
# assuming clockwise
# (argmin returns first idx)
top_left_idx = (torch.arange(n), torch.argmin(vertices[:, :, 1], dim=1))
cl_next_idx = (top_left_idx[0], (top_left_idx[1] + 1) % 4)
w = x_max - x_min
h = y_max - y_min
x_center = x_min + w / 2
y_center = y_min + h / 2
delta_a = vertices[top_left_idx][:, 0] - x_center
delta_b = vertices[cl_next_idx][:, 1] - y_center
return torch.stack((x_center, y_center, w, h, delta_a, delta_b), dim=1)
| Simon128/pytorch-ml-models | models/oriented_rcnn/encodings.py | encodings.py | py | 5,317 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.Tensor",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "torch.exp",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "torch.exp",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "torch.stack",
"line_number":... |
19389873266 | import sqlite3
insert()
def insert(cur,name,adress,phone,email):
try:
cur.execute(''' INSERT INTO Contact (name,adress,phone,email)
VALUES (?,?,?,?)
''',(name,adress,phone,email))
print('Sucess: The contact:',(name,adress,phone,email),'has been added to the database')
except:
print('Failed: The contact name already exists.... ')
def update(cur,name):
cur.execute('SELECT * FROM Contact WHERE name= ?',(name,))
row=cur.fetchone()
if row is None:
print("Failed: Contact doesn't exist in the database")
else:
print("Contact found please enter new informations")
adress=input('Enter new adress: ')
phone=input('Enter new phone: ')
email=input('Enter new email: ')
cur.execute('''UPDATE Contact SET adress= ?,
phone= ?,
email= ?
WHERE name= ? ''',(adress,phone,email,name))
print("Sucess: Contact has been updated")
conn=sqlite3.connect('db.sqlite')
cur=conn.cursor()
email=''
#email=input('Enter your email')
print('Your email is',email)
name=input ('Enter Name: ')
adress='TestAdress'
email='TestEmail'
phone='TestPhone'
insert(cur,name,adress,email,phone)
conn.commit()
cur.execute('SELECT * FROM Contact WHERE name= ?',(name,))
row=cur.fetchone()
if row is None:
print('Contact not found in the database')
else:
print('Contact found')
print('\tName:',row[0])
print('\tAdress:',row[1])
print('\tEmail:',row[2])
print('\tPhone:',row[3])
conn.commit()
print('------UDPATE TESTING ---------')
name=input('Enter name: ')
update(cur,name)
conn.commit()
| Mysticboi/Contact_Database | test.py | test.py | py | 1,625 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sqlite3.connect",
"line_number": 29,
"usage_type": "call"
}
] |
43041165646 | import logging
import os
import snyk
# Set up logger
logger = logging.getLogger(__name__)
logger.setLevel(os.getenv("LOG_LEVEL", default="INFO"))
def get_org_admins(org):
"""
Returns a list of org admins
:param org: the org object
:return: a list of org admins
"""
logger.debug("Getting list of admins from %s", org.name)
return org.members.filter(role="admin")
class SnykApiFacade:
def __init__(self, settings):
token = os.getenv(settings.config("snyk_token_env_var_name"))
self.settings = settings
self.client_ll = snyk.SnykClient(
token, version="2022-08-12", url="https://api.snyk.io/api/v1"
)
self.client_hl = snyk.SnykClient(token)
def create_organisation(self, name):
"""
Will try and create a new Snyk organisation with the given name, under the group defined
in the settings file
:param name: the name of the org to create
:return: Either the json response from the API, or False in the case of an error
"""
try:
return self.client_ll.post(
"/org", {"name": name, "groupId": self.settings.config("snyk_group_id")}
).json()
except Exception as error:
logger.error(
"Unable to create organisation, API call threw error %s", str(error)
)
return False
def org_name_exists(self, name):
"""
Because it's possible for multiple orgs to have the same name within Snyk, we must manually
check to ensure that
our org name isn't already in Snyk.
:param name: the name of the org (generated from user input)
:return: Truthy (org id) if the org already exists within our group, False otherwise
"""
logger.debug("Checking if org %s already exists", name)
orgs = self.client_hl.organizations.filter(
name=name
) # TODO: Filter by group ID here too
if orgs:
return [x.id for x in orgs]
return False
def get_user(self, email_address):
"""
Gets the specified user from the Snyk group
:param group_id: the group we're working with
:param email_address: the email address of the user to lookup
:return: a dict of the user if found, None otherwise
"""
try:
logger.debug("Checking if user %s exists in Snyk", email_address)
result = self.client_ll.get(
f"/group/{self.settings.config('snyk_group_id')}/members"
).json()
for user in result:
if user.get("email") == email_address:
return user
except Exception as error:
logger.error(
"Error checking if user %s exists in Snyk - API threw error %s",
email_address,
str(error),
)
return None
def add_user_to_org(self, org_id, user_id):
"""
Will add a user to the specified organisation
:param group_id: the group ID within Snyk
:param org_id: the org ID we want to add the user to
:param user_id: the user ID in Snyk of the user we wish to add
:param role: the role we'll assign the user (default: admin)
:return: True if addition was successful, False otherwise
"""
try:
logger.debug("Adding user %s to org %s", user_id, org_id)
self.client_ll.post(
f"/group/{self.settings.config('snyk_group_id')}/org/{org_id}/members",
{"userId": user_id, "role": "admin"},
).json()
return True
except Exception as error:
logger.error(
"Error adding user %s to org %s - API threw error %s",
user_id,
org_id,
str(error),
)
return False
def get_org_from_name(self, org_name):
"""
Looks up an org by its name in Snyk and returns the org ID
:param org_name: the org ID to look for
:return: the org id, or None if we weren't successful
"""
try:
logger.debug("Looking up org %s by name", org_name)
found_org = self.client_hl.organizations.filter(name=org_name)[0]
return found_org
except Exception as error:
logger.error(
"Error getting org %s by name - API threw error %s",
org_name,
str(error),
)
return None
| snyk-playground/snyk-org-slackbot | snyk_slackbot/api.py | api.py | py | 4,589 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "snyk.SnykClient",
"line_numbe... |
10256295781 | import ast
import json
import cv2
from deepface import DeepFace
from django.contrib.auth import login, logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import UserPassesTestMixin
from django.db import connections
from django.db.utils import ProgrammingError
from django.http import HttpRequest
from django.http.response import HttpResponse, JsonResponse
from django.shortcuts import render, redirect
from django.utils.autoreload import logger
from django.views import View
from django.views.decorators.http import require_http_methods
from app.models import User, Theme, Task, TaskGroup, Grade
from app.utils import dictfetchall
class LoginView(View):
def get(self, request):
if request.user.is_authenticated:
return redirect(request.GET.get("next", '/home/'))
return render(request, 'login.html', context={"head": "Login, please!"})
def post(self, request):
user = User.objects.filter(email=request.POST["email"], password=request.POST["password"]).first()
if user:
login(request, user)
return redirect(request.GET.get("next", '/home/'))
return render(request, 'login.html', context={"head": "user not found"})
@require_http_methods(["GET"])
def start_page(request):
return redirect("/login/")
@require_http_methods(["GET"])
@login_required(login_url='/login/')
def home(request):
themes = Theme.objects.filter(user=request.user)
themes_list = list()
for theme in themes:
if not theme.taskgroup_set.all():
continue
task_group = theme.taskgroup_set.all()[0]
themes_list.append(
{
"description": theme.description,
"time": sum([task.time for task in task_group.task_set.all()]),
"max_grade": sum([task.coefficient for task in task_group.task_set.all()]),
"is_complete": bool(Grade.objects.filter(task__in=task_group.task_set.all())),
"id": theme.id
}
)
return render(request, 'home.html', context={
"themes": themes_list
})
@require_http_methods(["GET"])
@login_required(login_url='/login/')
def my_grades(request):
themes = Theme.objects.filter(user=request.user)
themes_list = list()
for theme in themes:
grades = Grade.theme_is_passed(theme, request.user)
if not theme.taskgroup_set.all() or not grades:
continue
task_group = theme.taskgroup_set.all()[0]
themes_list.append(
{
"description": theme.description,
"my_grade": sum([grade.final_score for grade in grades]),
"max_grade": sum([task.coefficient for task in task_group.task_set.all()]),
"id": theme.id
}
)
return render(request, 'grades.html', context={
"themes": themes_list
})
class CustomAuthMixin(UserPassesTestMixin):
login_url = '/login/'
class SuperUserAuthMixin(CustomAuthMixin):
def test_func(self):
if self.request.user.is_superuser:
return True
return False
class ThemeView(CustomAuthMixin, View):
def test_func(self):
if not self.request.user.is_authenticated:
return False
allow_themes = list(Theme.objects.filter(user=self.request.user).values_list('id', flat=True))
theme_id = self.request.build_absolute_uri().split('/')[-2]
return int(theme_id) in allow_themes
def get(self, request, theme_id):
theme = Theme.objects.get(pk=theme_id)
task_group = TaskGroup.objects.filter(theme=theme).first()
grades = Grade.theme_is_passed(theme, request.user)
return render(request, "theme.html", context={
"description": theme.description,
"time": sum([task.time for task in task_group.task_set.all()]),
"start_link": task_group.id,
"button_desc": "Результати" if grades else "Почати",
"subject_title": task_group.subject_area.title,
"subject_image": task_group.subject_area.schema
})
class TaskGroupView(CustomAuthMixin, View):
def setup(self, request, *args, **kwargs):
super().setup(request, args, kwargs)
self.task_group = TaskGroup.objects.filter(pk=kwargs["task_group_id"]).first()
def test_func(self):
if not self.request.user.is_authenticated:
return False
theme = Theme.objects.filter(user=self.request.user)
self.task_groupes = TaskGroup.objects.filter(theme__in=theme).values_list('id', flat=True)
allow_task_group = list(self.task_groupes)
task_group_id = self.request.build_absolute_uri().split('/')[-2]
return int(task_group_id) in allow_task_group
def get(self, request, task_group_id):
# if Grade.objects.filter(task__in=Task.objects.filter(task_group=self.task_group)):
# return redirect(f'/grade_theme/{self.task_group.theme.id}', self.request)
tasks = [
{
"id": task.id,
"description": task.description,
} for task in Task.objects.filter(task_group=self.task_group)
]
return render(request, 'task.html', context={
"tasks": tasks,
"id": tasks[0]["id"],
"subject_title": self.task_group.subject_area.title,
"subject_img": self.task_group.subject_area.schema
})
def post(self, request, task_group_id):
tasks = [
{
"id": task.id,
"description": task.description,
} for task in Task.objects.filter(task_group=self.task_group)
]
return JsonResponse({"tasks": tasks})
@require_http_methods(["GET"])
def logout_view(request):
logout(request)
return redirect('/login', request)
class VerifyImage(SuperUserAuthMixin, View):
def post(self, request: HttpRequest):
img = bytes(request.POST["img"][22:], 'utf-8')
with open("app/avatars/current_image.jpg", "wb") as fh:
import base64
fh.write(base64.decodebytes(img))
try:
data = DeepFace.verify(
img1_path="app/avatars/current_image.jpg",
img2_path=request.user.avatar,
model_name='ArcFace'
)
except ValueError as e:
print(e)
data = dict()
data["verified"] = False
if data["verified"]:
return HttpResponse('verified', status=200)
return HttpResponse('not verified', status=400)
class GetImage(SuperUserAuthMixin, View):
def get(self, request):
camera = cv2.VideoCapture(0)
import os
try:
os.remove("app/avatars/img_from_opencv.jpg")
except:
pass
for i in range(10):
return_value, image = camera.read()
if return_value:
cv2.imwrite('app/avatars/img_from_opencv.jpg', image)
del camera
cv2.destroyAllWindows()
return HttpResponse('Image successfully saved on app/avatars/img_from_opencv.jpg')
class CheckSyntaxOfTask(View):
def post(self, request):
user_cursor = connections['postgres_trade'].cursor()
try:
user_cursor.execute(request.POST['script'])
dictfetchall(user_cursor)
return JsonResponse({"msg": "OK"})
except ProgrammingError as ex:
logger.error(f'DB Error: {ex}')
return JsonResponse({'error': str(ex)}, status=400)
class TaskView(CustomAuthMixin, View):
def test_func(self):
if not self.request.user.is_authenticated:
return False
theme = Task.objects.get(pk=self.request.POST["task_id"]).task_group.theme
return self.request.user in theme.user.all()
def post(self, request):
task = Task.objects.get(pk=self.request.POST["task_id"])
return JsonResponse(
{
"description": task.description
}
)
class GradeTask(CustomAuthMixin, View):
def test_func(self):
if not self.request.user.is_authenticated:
return False
if self.request.method == 'POST':
theme = Task.objects.get(pk=self.request.POST["task"]).task_group.theme
else:
theme = Task.objects.get(pk=self.request.GET["task_id"]).task_group.theme
return self.request.user in theme.user.all()
def get(self, request):
grade = Grade.objects.get(task_id=request.GET["task_id"], user=request.user)
return JsonResponse({
"description": grade.task.description,
"user_script": grade.user_script,
"grade": grade.get_grade_json()
})
def post(self, request):
user_cursor = connections['postgres_trade'].cursor()
correct_cursor = connections['postgres_trade'].cursor()
task = Task.objects.get(pk=self.request.POST["task"])
grade = Grade.find_or_create(user=self.request.user, task=task)
user_script = request.POST['script']
correct_cursor.execute(task.correct_script)
correct_result = dictfetchall(correct_cursor)
try:
user_cursor.execute(user_script)
user_result = dictfetchall(user_cursor)
grade.user_script = user_script
for keyword in task.key_words.all():
if user_script.find(keyword.word) == -1:
grade.keywords_are_used = False
break
if len(user_result) == len(correct_result):
grade.is_same_count_of_lines = True
if user_result == correct_result:
grade.is_same_output = True
except ProgrammingError as e:
print(e)
grade.is_work = False
grade.keywords_are_used = False
grade.set_final_score()
return JsonResponse({"msg": "OK"})
class FinishTheme(CustomAuthMixin, View):
def test_func(self):
if not self.request.user.is_authenticated:
return False
theme = TaskGroup.objects.get(pk=self.request.POST["task_group"]).theme
return self.request.user in theme.user.all()
def post(self, request):
tasks = TaskGroup.objects.get(pk=self.request.POST["task_group"]).task_set.all()
for task in tasks:
grade = Grade.find_or_create(request.user, task)
if not grade.user_script:
grade.set_not_done()
return JsonResponse({"msg": "OK"})
class GradeTheme(CustomAuthMixin, View):
def test_func(self):
if not self.request.user.is_authenticated:
return False
allow_themes = list(Theme.objects.filter(user=self.request.user).values_list('id', flat=True))
theme_id = self.request.build_absolute_uri().split('/')[-2]
return int(theme_id) in allow_themes
def get(self, request, theme_id):
theme = Theme.objects.get(pk=theme_id)
task_group = TaskGroup.objects.filter(theme=theme).first()
grades = Grade.theme_is_passed(theme, request.user)
if grades:
return render(request, "theme_passed.html", context={
"tasks": [
{
"id": task.id,
"description": task.description,
"grade": Grade.objects.get(task=task, user=request.user).final_score
} for task in task_group.task_set.all()
],
"current_grade": sum([grade.final_score for grade in grades]),
"max_grade": len(grades),
"complete": sum([grade.final_score for grade in grades]) > len(grades) / 0.6
})
return render(request, "theme.html", context={
"description": theme.description,
"time": sum([task.time for task in task_group.task_set.all()]),
"start_link": task_group.id,
"subject_title": task_group.subject_area.title,
"subject_image": task_group.subject_area.schema
})
| lekarus/SQLQueries | web_app/app/views.py | views.py | py | 12,185 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.views.View",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "a... |
7870451227 | from datetime import datetime
import requests
from bs4 import BeautifulSoup
from uk_bin_collection.uk_bin_collection.common import *
from uk_bin_collection.uk_bin_collection.get_bin_data import \
AbstractGetBinDataClass
# import the wonderful Beautiful Soup and the URL grabber
class CouncilClass(AbstractGetBinDataClass):
"""
Concrete classes have to implement all abstract operations of the
base class. They can also override some operations with a default
implementation.
"""
def parse_data(self, page: str, **kwargs) -> dict:
# Get and check UPRN
user_uprn = kwargs.get("uprn")
check_uprn(user_uprn)
user_uprn = user_uprn.zfill(
12
) # Wigan is expecting 12 character UPRN or else it falls over, expects 0 padded UPRNS at the start for any that aren't 12 chars
user_postcode = kwargs.get("postcode")
check_postcode(user_postcode)
# Start a new session to walk through the form
requests.packages.urllib3.disable_warnings()
s = requests.session()
# Get our initial session running
response = s.get("https://apps.wigan.gov.uk/MyNeighbourhood/")
soup = BeautifulSoup(response.text, features="html.parser")
soup.prettify()
# Grab the ASP variables needed to continue
payload = {
"__VIEWSTATE": (soup.find("input", {"id": "__VIEWSTATE"}).get("value")),
"__VIEWSTATEGENERATOR": (
soup.find("input", {"id": "__VIEWSTATEGENERATOR"}).get("value")
),
"__EVENTVALIDATION": (
soup.find("input", {"id": "__EVENTVALIDATION"}).get("value")
),
"ctl00$ContentPlaceHolder1$txtPostcode": (user_postcode),
"ctl00$ContentPlaceHolder1$btnPostcodeSearch": ("Search"),
}
# Use the above to get to the next page with address selection
response = s.post("https://apps.wigan.gov.uk/MyNeighbourhood/", payload)
soup = BeautifulSoup(response.text, features="html.parser")
soup.prettify()
# Load the new variables that are constant and can't be gotten from the page
payload = {
"__EVENTTARGET": ("ctl00$ContentPlaceHolder1$lstAddresses"),
"__EVENTARGUMENT": (""),
"__LASTFOCUS": (""),
"__VIEWSTATE": (soup.find("input", {"id": "__VIEWSTATE"}).get("value")),
"__VIEWSTATEGENERATOR": (
soup.find("input", {"id": "__VIEWSTATEGENERATOR"}).get("value")
),
"__EVENTVALIDATION": (
soup.find("input", {"id": "__EVENTVALIDATION"}).get("value")
),
"ctl00$ContentPlaceHolder1$txtPostcode": (user_postcode),
"ctl00$ContentPlaceHolder1$lstAddresses": ("UPRN" + user_uprn),
}
# Get the final page with the actual dates
response = s.post("https://apps.wigan.gov.uk/MyNeighbourhood/", payload)
soup = BeautifulSoup(response.text, features="html.parser")
soup.prettify()
data = {"bins": []}
# Get the dates.
for bins in soup.find_all("div", {"class": "BinsRecycling"}):
bin_type = bins.find("h2").text
binCollection = bins.find("div", {"class": "dateWrapper-next"}).get_text(
strip=True
)
binData = datetime.strptime(
re.sub(r"(\d)(st|nd|rd|th)", r"\1", binCollection), "%A%d%b%Y"
)
if binData:
data[bin_type] = binData.strftime(date_format)
return data
| robbrad/UKBinCollectionData | uk_bin_collection/uk_bin_collection/councils/WiganBoroughCouncil.py | WiganBoroughCouncil.py | py | 3,612 | python | en | code | 51 | github-code | 36 | [
{
"api_name": "uk_bin_collection.uk_bin_collection.get_bin_data.AbstractGetBinDataClass",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "requests.packages.urllib3.disable_warnings",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "requests.packages",
"line... |
24938208176 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# filename: tietuku.py
# modified: 2019-03-30
"""
贴图库 api 类
"""
__all__ = [
"TietukuClient",
]
import os
import time
from io import BytesIO
from .base import BaseClient
from .utils import get_links_cache_json, save_links_cache_json
from ..utils.log import cout
from ..utils.funcs import xMD5, xSHA1
from ..utils.meta import Singleton
from ..utils.decorator import cached_property
from ..const import TIETUKU_TOKEN, TIETUKU_AID, TIETUKU_CACHE_EXPIRED, TIETUKU_LINKS_CACHE_JSON
from ..exceptions import TietukuUploadError
class TietukuClient(BaseClient, metaclass=Singleton):
"""
贴图库客户端类
"""
def __init__(self):
super().__init__()
self._imgLinks = get_links_cache_json(TIETUKU_LINKS_CACHE_JSON)
def upload(self, filename, imgBytes):
"""
图片上传接口
Args:
filename str 图片名
imgBytes bytes 图片的 bytes
Return:
links dict 该文件的外链信息
{
'url': 图片链接
'md5': 图片 MD5
'sha1': 图片 SHA1
'expire_time': 图片过期的 Unix 时间/s
}
Raise:
TietukuUploadError 图片上传错误,请求状态码非 200 可以查询 code 字段的信息
-------------------------------------------------
请求成功的返回 json 包
{
"width": 1280,
"height": 711,
"type": "jpg",
"size": 24640,
"ubburl": "[img]http://i1.bvimg.com/656554/0cf57e9173c0acaf.jpg[/img]",
"linkurl": "http://i1.bvimg.com/656554/0cf57e9173c0acaf.jpg",
"htmlurl": "<img src='http://i1.bvimg.com/656554/0cf57e9173c0acaf.jpg' />",
"markdown": "",
"s_url": "http://i1.bvimg.com/656554/0cf57e9173c0acafs.jpg",
"t_url": "http://i1.bvimg.com/656554/0cf57e9173c0acaft.jpg",
"findurl": "7cbf06538e66e772"
}
请求失败的返回 json 包,可通过 code 查询相应错误类型,错误信息 == info
{
"code": "4511",
"info": "\u76f8\u518c\u4e0d\u5b58\u5728\u6216\u5df2\u7ecf\u5220\u9664"
}
"""
imgMD5 = xMD5(imgBytes)
imgSHA1 = xSHA1(imgBytes)
links = self._imgLinks.get(imgSHA1)
if links is not None and links['expire_time'] > time.time():
cout.info('Get image %s from cache' % filename)
return links
else:
cout.info('uploading image %s' % filename)
key = "{basename}{ext}".format(
basename=imgSHA1,
ext=os.path.splitext(filename)[1]
)
r = self._post('http://up.imgapi.com/',
data={
'Token': TIETUKU_TOKEN,
'deadline': int(time.time() + 60), # 官方要求的参数,不清楚什么用
'aid': TIETUKU_AID,
'from': 'file', # 可选项 file 或 web ,表示上传的图片来自 本地/网络
},
files={
'file': (key, BytesIO(imgBytes)),
}
)
respJson = r.json()
if "code" in respJson:
raise TietukuUploadError("[%s] %s" % ( respJson['code'], respJson['info'] ) )
links = {
"url": respJson['linkurl'],
# "o_url": respJson['linkurl'], # 原始图
# "s_url": respJson['s_url'], # 展示图
# "t_url": respJson['t_url'], # 缩略图
"md5": imgMD5,
"sha1": imgSHA1,
"expire_time": int(time.time() + TIETUKU_CACHE_EXPIRED) # 用于校验图片有效性
}
self._imgLinks[imgSHA1] = links
save_links_cache_json(TIETUKU_LINKS_CACHE_JSON, self._imgLinks)
return links
| pkuyouth/pkuyouth-html-coder | htmlcoder/core/client/tietuku.py | tietuku.py | py | 4,184 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "base.BaseClient",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "utils.meta.Singleton",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "utils.get_links_cache_json",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "const... |
73087528423 | # -*- coding: utf-8 -*-
# @Author: ahmedkammorah
# @Date: 2019-04-04 15:54:42
# @Last Modified by: Ahmed kammorah
# @Last Modified time: 2019-04-08 22:58:45
from enum import Enum
import json
from MainService.main.email_provider_connector import RESPONSE_STATE
from MainService.main.ak_ep_services import AKEmailServices, AKProviderService,SERVICE_STATUS, logger
class EmailMessage(object):
def __init__(self, to_emails, from_email, subject, body):
if to_emails == None or from_email == None:
return None
if len(to_emails) == 0 or len(from_email) == 0:
return None
self._to_emails = to_emails
self._from_email = from_email
self._subject = subject
self._body = body
@property
def to_emails(self):
return self._to_emails
@property
def from_email(self):
return self._from_email
@property
def subject(self):
return self._subject
@property
def body(self):
return self._body
def __str__(self):
return 'Eamil for subject:{} from_email:{} to_emails:{} \nbody:{}'.format(self.subject, self.from_email, self.to_emails, self.body)
def build_sparkpost_msg(self):
data = {
"recipients": [
],
"content": {
"from": {
"email": "ahmedkammorah@trendship.net",
"name": ""
},
"subject": "",
"html": "<html><body> </body></html>",
"text": ""
}
}
# data['content']['from']['email'] = self.from_email
data['content']['from']['name'] = self.from_email
data['content']['subject'] = self.subject
data['content']['html'] = self.body
data['content']['text'] = self.body
for em in self.to_emails:
newRec = {
"address": em
}
data['recipients'].append(newRec)
return json.dumps(data)
class AKMainEmailService(AKEmailServices):
"""The Main Email service Class
Attributes:
redis_util: instance of the redis util to be manger of the commancation with redis
service_provider_list: List of email provider names
services: map of all avaiable and registered service
"""
def __init__(self):
"""Intiialize the Main Email service with regestering all service providers"""
super().__init__()
def _pick_service(self):
"""Picking the first operational service provider
Args:
Returns:
AKProviderService instance of the first running provider
OR None if there is no up and running provider
"""
logger.debug('Start picking one of the running service provider service ')
for ser_name in self.service_provider_list:
status = self.redis_util.get_ser_status(ser_name)
print(status)
print(SERVICE_STATUS.UP.value)
if status == SERVICE_STATUS.UP.value:
return self.services.get(ser_name, AKProviderService(ser_name))
logger.error("No Service Provider is up right now")
return None
def send_email(self, email_message:EmailMessage):
""" Sending Email messgae by picking the first avaliblae running email service Provider
Args:
email_message: full email email_message
Returns:
response to user
"""
if email_message == None:
logger.error("Can't send Empty or null Email")
return
logger.info('Start the process of Sending Eamil email_message')
email_ser = self._pick_service()
if email_ser == None:
logger.error("No Email Service Provider up and running to Use ")
# TODO: fire slack event to notify the dev team
# TODO: add this request to a queue for next run when there is service to use
return
logger.info("Start using email provider {} for sending email".format(email_ser.name))
email_connector = email_ser.connector
res_status, response = email_connector.send_email(email_message)
if res_status == RESPONSE_STATE.OK:
logger.info("Successfully sending the email by {}".format(email_ser.name))
return (res_status, 'success send the email')
elif res_status == RESPONSE_STATE.USER_ERROR:
logger.error("User email_message related error: {} when sending email by: {} provider".format(response, email_ser.name))
return (res_status, response)
elif res_status == RESPONSE_STATE.SERVICE_ERROR:
# Fail over start use different provider
logger.error("Email Service provider {} is down for now".format(email_ser.name))
email_ser.status = SERVICE_STATUS.DOWN
self.redis_util.set_ser_status(email_ser)
return self.send_email(email_message)
elif res_status == RESPONSE_STATE.OVERRATE_ERROR:
# Fail over start use different provider
logger.error("Email Service provider {} is overlimt for now".format(email_ser.name))
email_ser.status = SERVICE_STATUS.OVERLIMIT
self.redis_util.set_ser_status(email_ser)
return self.send_email(email_message)
elif res_status == RESPONSE_STATE.REQUEST_ERROR:
logger.error("Request related error: {} when sending by: {} provider".format(response, email_ser.name))
# TODO: Notify dev team with this error by slack or push it to error topic in kafka
return
elif res_status == RESPONSE_STATE.OTHER_ERROR:
logger.error("unidentified error: {} when use provider {}".format(response, email_ser.name))
return
return
if __name__ == "__main__":
ak = AKMainEmailService()
| AhmedKammorah/AKEmailService | MainService/main/ak_main_email_service.py | ak_main_email_service.py | py | 5,942 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "json.dumps",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "MainService.main.ak_ep_services.AKEmailServices",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "MainService.main.ak_ep_services.logger.debug",
"line_number": 87,
"usage_type"... |
5877766545 | # -*- coding:utf-8 -*-
"""
说明:
这里实现了单篇文章和专栏的爬取。
article 根据article_id发起网络请求,返回的json文件中包含文章的基本信息和文章主体内容,解析文章的基本信息生成一个msg
字典对象,再将文章主体解析成BeautifulSoup对象,连同msg字典一起交给document模块下的Article解析并保存成markdown文件。
根据专栏id获得专栏下的文章所有文章id后,逐一看成是单一的文章,由article爬取。
"""
from zhihu_spider.util import net, document
from zhihu_spider.util import const
import re
import os
from zhihu_spider.util import timer
from bs4 import BeautifulSoup
import zhihu_spider
__all__ = ['article', 'articles']
TIME_LIMIT_FLAG = False
def articles(column_id, time_limit, topic_limit, save_path):
global TIME_LIMIT_FLAG
# print('正在获取专栏文章 ID ...'.encode('utf-8'))
# 若不是首次运行,就按time_limit爬;否则,获取过去所有文章
if bool(time_limit) and os.path.exists(save_path) and bool(os.listdir(save_path)):
num_limit = (int(time_limit)+1) * 7
else:
num_limit = 0
time_limit = 0
articles_list = articles_id(column_id, num_limit)
request_times = dict([(i, 0) for i in articles_list])
# print('专栏文章总数:'.encode('utf-8'), len(articles_list))
# print('正在获取文章 ...'.encode('utf-8'))
ars = []
while len(articles_list) != 0:
# if len(articles_list) % 10 == 0:
# print(len(articles_list))
article_id = articles_list.pop(0)
try:
ar = article(article_id, topic_limit, time_limit)
if ar:
ars.append(ar)
except ValueError:
if request_times.get(article_id) < 5:
articles_list.append(article_id)
request_times[articles_id] += 1
except IndexError:
# 非论文速递的文章
continue
timer.random_sleep(end=zhihu_spider.SLEEP)
if TIME_LIMIT_FLAG:
break
for article_id, times in request_times.items():
if times >= 5:
print(net.article_spider_url(article_id))
# print('爬取完毕 ...'.encode('utf-8'))
return ars
def articles_id(column_id, num_limit):
article_list = list()
offset = zhihu_spider.Controller()
while not offset.is_end():
response = net.column_spider(column_id, offset.next_offset(), limit=100)
if response is None:
raise ValueError('Response is None')
content = response.text
totals = re.search(r'"totals":\W(\d+)', content).group(1)
offset.totals = int(totals)
article_id_list = re.findall(r'"id":\W(\d+)', content)
offset.increase(len(article_id_list))
article_list.extend(article_id_list)
article_id_list.clear()
timer.random_sleep(end=zhihu_spider.SLEEP)
if bool(num_limit) and len(article_list) > num_limit:
offset.to_stop()
if num_limit:
article_list = article_list[:num_limit]
return article_list
def article(article_id, topic_limit, time_limit):
global TIME_LIMIT_FLAG
response = net.article_spider(article_id)
if response is not None:
response_json = response.json()
topic = re.findall(r'(\w*?)每?日?论文速递', response_json['title'])[0]
create_date = timer.timestamp_to_date(response_json['created'])
time_diff = timer.time_diff(create_date)
if bool(time_limit) and time_diff > int(time_limit):
TIME_LIMIT_FLAG = True
return
elif len(topic_limit) > 0 and topic not in topic_limit:
return
content = BeautifulSoup(response_json['content'], 'lxml').body
article_dict = {'topic': topic,
'create_date': create_date,
'content': str(content.contents)}
return article_dict
else:
raise ValueError('Response is None')
def article_msg(content):
original_url = const.ARTICLE_URL.format(content['id'])
title = content['title']
background_image = content['image_url']
date = timer.timestamp_to_date(content['created'])
author = content['author']['name']
author_page = const.AUTHOR_PAGE_URL.format(content['author']['url_token'])
avatar = content['author']['avatar_url']
article_dict = {'author': author, 'author_avatar_url': avatar, 'author_page': author_page, 'title': title,
'original_url': original_url, 'created_date': date, 'background': background_image}
return document.Meta(**article_dict) | Arlenelalala/ArxivPaper | zhihu_spider/article/__init__.py | __init__.py | py | 4,672 | python | en | code | 7 | github-code | 36 | [
{
"api_name": "os.path.exists",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "zhihu_spider.util.timer.random... |
19715634890 | import networkx as nx
from networkx.algorithms import isomorphism
import argparse
import pickle
from tqdm import tqdm
"""get nx graphs from remapped_fp_file"""
def get_single_subgraph_nx(frequent_subgraph_lines):
'''
create graph from gSpan data format
'''
graph_id = frequent_subgraph_lines[0].strip().split(' ')[2]
support = frequent_subgraph_lines[0].strip().split(' ')[4]
graph_nx = nx.DiGraph(name = graph_id)
for line in frequent_subgraph_lines[1:]:
if line.startswith('v'):
parsed_line = line.strip().split(' ')
node_id = parsed_line[1]
node_type = parsed_line[2]
graph_nx.add_node(node_id,type = node_type)
elif line.startswith('e'):
parsed_line = line.strip().split(' ')
node_from = parsed_line[1]
node_to = parsed_line[2]
edge_type = parsed_line[3]
graph_nx.add_edge(node_from,node_to,type=edge_type)
return graph_nx
def get_subgraphs_nx(remapped_fp_file):
fsg_lines_list = []
fsg_lines = []
with open(remapped_fp_file) as f:
for line in f:
if line == '\n' and fsg_lines != []:
fsg_lines_list.append(fsg_lines)
fsg_lines = []
else:
fsg_lines.append(line)
nx_subgraphs = []
for fsg_ls in fsg_lines_list:
nx_subgraphs.append(get_single_subgraph_nx(fsg_ls))
return nx_subgraphs
"""set up node matcher and edge matcher"""
NODE_MATCHER_FUNC = isomorphism.categorical_node_match("type",None)
EDGE_MATCHER_FUNC = isomorphism.categorical_edge_match("type",None)
def is_subgraph(g,G):
'''check if g is a subgraph of G'''
dgmather = isomorphism.DiGraphMatcher(G,g,node_match = NODE_MATCHER_FUNC, edge_match = EDGE_MATCHER_FUNC)
if dgmather.subgraph_is_isomorphic():
return True
else:
return False
def sort_nx_graphs(nx_graphs, order = 'increasing'):
if order == 'increasing':
return sorted(nx_graphs, key = lambda g: g.number_of_nodes())
elif order == 'decreasing':
return sorted(nx_graphs, key = lambda g: g.number_of_nodes(), reverse = True)
else:
raise NotImplementedError
def filter_mined_nx_subgraphs(nx_subgraphs, save_path = None):
'''filter out mined subgraphs by which is a subgrpah in other mined subgraphs'''
sorted_nx_graphs = sort_nx_graphs(nx_subgraphs, order = 'increasing')
filtered_nx_graphs = []
for i in tqdm(range(len(sorted_nx_graphs)-1)):
g = sorted_nx_graphs[i]
filtered_nx_graphs.append(g)
for j in range(i+1,len(sorted_nx_graphs)):
G = sorted_nx_graphs[j]
if is_subgraph(g,G):
filtered_nx_graphs.pop()
break
if save_path is not None:
write_graphs(filtered_nx_graphs, save_path)
print('write graphs to :', save_path)
return filtered_nx_graphs
def write_graphs(nx_subgraphs, output_pickle_path):
# Dump List of graphs
with open(output_pickle_path, 'wb') as f:
pickle.dump(nx_subgraphs, f)
def load_graphs(input_pickle_path):
# Load List of graphs
with open(input_pickle_path, 'rb') as f:
return pickle.load(f)
'''arg parser'''
if __name__ == "__main__":
# parser = argparse.ArgumentParser(description='filter mined subgraphs')
parser.add_argument('-i', '--input_fp_file', help='input remapped fp file path', required=True)
parser.add_argument('-o', '--output_path', help='write filtered graphs in pickle format', required=False, default = "")
args = vars(parser.parse_args())
remapped_fp_path = args['input_fp_file']
save_path = args['output_path']
'''usage'''
nx_subgraphs = get_subgraphs_nx(remapped_fp_path)
print('original frequent subgraph number: ', len(nx_subgraphs))
filtered_nx_subgraphs = filter_mined_nx_subgraphs(nx_subgraphs)
print('filtered frequent subgraph number: ', len(filtered_nx_subgraphs))
if save_path != "":
write_graphs(filtered_nx_subgraphs,save_path)
# load graphs
# loaded_graphs = load_graphs('/shared/nas/data/m1/wangz3/schema_composition/Schema_Composition/gSpan_official/gSpan6/test_save.pickle')
# print(isinstance(loaded_graphs,list))
# print(len(loaded_graphs))
# print(loaded_graphs[0].nodes.data())
| MikeWangWZHL/Schema_Composition | gSpan_official/gSpan6/filter_mined_graph.py | filter_mined_graph.py | py | 4,396 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "networkx.DiGraph",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "networkx.algorithms.isomorphism.categorical_node_match",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "networkx.algorithms.isomorphism",
"line_number": 49,
"usage_type"... |
16442806200 | # from utils.txt_file_ops import *
from utils.Database_conn import *
from object.base_class import *
from loguru import logger
from tabulate import tabulate
from datetime import datetime
class Subject:
def __init__(self, sub_id='', sub_name=''):
self.__sub_id = sub_id
self.__sub_name = sub_name
db_obj = SQLConnector()
self.__db_conn = db_obj.create_connection()
self.__db_cursor = self.__db_conn.cursor()
def display_menu(self):
while True:
print("--------------------------------")
print("PLEASE SELECT A FUNCTION")
print("1. ADD NEW SUBJECT")
print("2. UPDATE SUBJECT")
print("3. DELETE SUBJECT")
print("4. FIND SUBJECT")
print("5. SHOW ALL SUBJECTS")
print("0. EXIT")
key = input("ENTER YOUR CHOICE: ")
if key == '1':
self.__add_data()
elif key == '2':
self.__update_data()
elif key == '3':
self.__delete_data()
elif key == '4':
self.__search_data()
elif key == '5':
self.__get_data()
elif key == '0':
print("EXITING...")
return
else:
print("INVALID CHOICE")
print("PLEASE TRY AGAIN")
def __get_data(self):
sql_cmd = "SELECT * FROM subject"
self.__db_cursor.execute(sql_cmd)
result = self.__db_cursor.fetchall()
sub_list = []
for row in enumerate(result):
sub_info = [row[0], row[1]]
sub_list.append(sub_info)
print(tabulate(sub_list, headers = ['ID', 'NAME']))
def __input_sub_info(self):
self.__sub_name = input("SUBJECT NAME: ")
def __add_data(self):
#Input information from keyboard
print("--INPUT SUBJECT INFORMATION--")
self.__input_sub_info()
sql_cmd = """INSERT INTO subject (subject_name)
VALUES (%s)
"""
vals = (self.__sub_name,)
self.__db_cursor.execute(sql_cmd, vals)
self.__db_conn.commit()
logger.info("SUBJECT ADDED SUCCESSFULLY")
def __update_data(self):
while True:
print("--UPDATE SUBJECT INFORMATION--")
print("ENTER SUBJECT ID:")
sub_ID_input = input("STUDENT ID: ")
self.__input_sub_info()
sql_cmd = "UPDATE subject SET subject_name = %s WHERE subject_id = %s"
self.__db_cursor.execute(sql_cmd, (self.__sub_name, sub_ID_input))
if (self.__db_conn.commit()):
logger.error("UPDATE SUBJECT FAILED!")
else:
logger.info("UPDATE SUBJECT SUCCESSFULLY!")
print('ID NOT FOUND')
def __delete_data(self):
while True:
print("--DELETE SUBJECT--")
sub_ID_input = input("ENTER SUBJECT ID: ")
sql_cmd = "DELETE FROM subject WHERE subject_id = %s"
self.__db_cursor.execute(sql_cmd, [sub_ID_input])
if(self.__db_conn.commit()):
logger.error("DELETE SUBJECT FAILED!")
else:
logger.info("DELETE SUBJECT SUCCESSFULLY!")
print('ID NOT FOUND')
def __search_data(self):
print("--FIND SUBJECT INFORMATION--")
while True:
print("1. FIND SUBJECT BY ID")
print("2. FIND SUBJECT BY NAME")
key = input("Enter your choice: ")
if key == '1':
self.__search_sub_byID()
elif key == '2':
self.__search_sub_byName()
elif key == '0':
print ("You have exited the program")
return
else:
print ("Invalid choice")
print ("Please try again")
def __search_sub_byID(self):
while True:
print("--FIND SUBJECT INFORMATION--")
sub_ID_input = int(input("ENTER SUBJECT ID: "))
sql_cmd = "SELECT * FROM students WHERE subject_id = %s"
self.__db_cursor.execute(sql_cmd, [sub_ID_input])
results = self.__db_cursor.fetchall()
for row in results:
#logger.info(row)
print("--SUBJECT INFORMATION--")
print(f"SUBJECT ID: {sub_ID_input}")
print(f"SUBJECT NAME: {row[1]}")
print('ID NOT FOUND')
def __search_sub_byName(self):
while True:
print("--FIND SUBJECT INFORMATION--")
sub_name_input = input("ENTER SUBJECT NAME: ")
sql_cmd = "SELECT * FROM students WHERE subject_name = %s"
self.__db_cursor.execute(sql_cmd, [sub_name_input])
results = self.__db_cursor.fetchall()
for row in results:
#logger.info(row)
print("--SUBJECT INFORMATION--")
print(f"SUBJECT ID: {row[0]}")
print(f"SUBJECT NAME: {row[sub_name_input]}")
print('SUB NOT FOUND')
| thanhtugn/python_core_thanhtugn | Lesson_14/object/subject.py | subject.py | py | 5,166 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "tabulate.tabulate",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "loguru.logger.info",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "loguru.logger",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "loguru.logger.erro... |
25947439528 | import os
import sqlite3
from datetime import datetime, timedelta
import telebot
bot = telebot.TeleBot(os.getenv("BOT_TOKEN"))
memes_chat_id = int(os.getenv("MEMES_CHAT_ID"))
flood_thread_id = int(os.getenv("FLOOD_THREAD_ID", 1))
memes_thread_id = int(os.getenv("MEMES_THREAD_ID", 1))
conn = sqlite3.connect("memes.db", check_same_thread=False)
def main():
seven_days_ago = datetime.now() - timedelta(days=7)
query = "SELECT user_id, MAX(username), count(*) FROM memes_posts_v2 WHERE created_at > ? GROUP BY user_id ORDER BY 3 DESC, 3 DESC LIMIT 3"
rows = conn.execute(query, (seven_days_ago,)).fetchall()
msg = ["Количество сброшенных мемов\n"]
stack = ["🥉", "🥈", "🥇"]
for row in rows:
user_id, username, memes_count = row
message = "[{username}](tg://user?id={user_id}) {memes_count} - {medal}".format(
username=username,
user_id=user_id,
memes_count=memes_count,
medal=stack.pop(),
)
msg.append(message)
bot.send_message(
memes_chat_id,
"\n".join(msg),
message_thread_id=flood_thread_id,
parse_mode="Markdown",
)
if __name__ == "__main__":
main()
| dzaytsev91/tachanbot | cron_job_memes_count.py | cron_job_memes_count.py | py | 1,239 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "telebot.TeleBot",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 9,
... |
10261032869 | # from multiprocessing import Process, Queue
from queue import Queue
import threading
from crawler.reviewCrawler import ReviewCrawler
from crawler.userCrawler import UserCrawler
import json
from GameListCrawler import getGameList
import time
from utils.redisUtis import RedisUtil
from utils.sqlUtils import dbconnector
from gameCrawler import GameCrawler
import requests
import properties
game_queue = Queue()
user_queue = Queue()
review_queue = Queue()
def game_consumer(game_queue,user_queue,review_queue):
while True:
game_info_str = game_queue.get(block=True)
try:
game_info = json.loads(game_info_str)
game_helper(game_queue = game_queue,user_queue = user_queue,review_queue = review_queue,id = game_info['id'], url = game_info['url'])
except Exception as e:
print("game_consumer_error:",game_info_str)
time.sleep(1)
def game_helper(game_queue,user_queue,review_queue,id, url):
# crawler review
review_queue.put(id)
redisUtil = RedisUtil()
if redisUtil.checkGameExist(id):
print("exist game"+str(id))
return
gameCrawler = GameCrawler()
gameCrawler.infoSave(id,url)
redisUtil.setGameExist(id)
def review_consumer(game_queue,user_queue,review_queue):
while True:
appid = review_queue.get(block=True)
try:
review_helper(game_queue = game_queue,user_queue = user_queue,review_queue = review_queue,appid = appid)
except Exception as e:
print("review_consumer_error:",appid)
time.sleep(1)
def review_helper(game_queue,user_queue,review_queue,appid):
rc = ReviewCrawler(appid)
reviews = rc.requestReview()
rc.saveReview()
for review in reviews:
steamid = review['steamid']
user_queue.put(steamid)
def user_consumer(game_queue,user_queue,review_queue):
while True:
steamid = user_queue.get(block=True)
try:
user_helper(game_queue = game_queue,user_queue = user_queue,review_queue = review_queue, steamid = steamid)
except Exception as e:
print("user_consumer_error:",steamid)
time.sleep(1)
def user_helper(game_queue,user_queue,review_queue,steamid):
uc = UserCrawler(steamid)
friendList = uc.requestFriendList()
uc.saveFriendList()
if friendList != None:
for friend in friendList:
user_queue.put(friend['steamid'])
ownedGameList = uc.requestOwnedGames()
uc.saveOwnedGames()
# put game task
if ownedGameList != None:
for game in ownedGameList:
url = "https://store.steampowered.com/app/" + str(game['appid'])
try:
response = requests.get(url, headers=properties.headers, timeout=10)
except Exception as e:
print("add owned game to gamelist error: no response and",e)
game_queue.put(json.dumps({"id": game['appid'], "url": url}))
def provider(game_queue):
sql = dbconnector()
start_games =[{"id":"10","url":"https://store.steampowered.com/app/10/CounterStrike/"},{"id":"20","url":"https://store.steampowered.com/app/20/Team_Fortress_Classic/"}]
for item in start_games:
game_info_str = json.dumps(item)
game_queue.put(game_info_str)
if __name__ == '__main__':
# redisUtil = RedisUtil()
game_consumer_num = 5
review_consumer_num = 5
user_consumer_num = 5
game_consumer_list = []
review_consumer_list = []
user_consumer_list = []
game_list_provider_threading = threading.Thread(target=getGameList, args=(game_queue,))
game_list_provider_threading.start()
print("start allocating threading")
for i in range(game_consumer_num):
game_consumer_process = threading.Thread(target=game_consumer, args=(game_queue, user_queue, review_queue,))
game_consumer_list.append(game_consumer_process)
game_consumer_process.start()
for i in range(user_consumer_num):
user_consumer_process = threading.Thread(target=user_consumer, args=(game_queue, user_queue, review_queue,))
user_consumer_list.append(user_consumer_process)
user_consumer_process.start()
for i in range(review_consumer_num):
reveiw_consumer_process = threading.Thread(target=review_consumer, args=(game_queue, user_queue, review_queue,))
review_consumer_list.append(reveiw_consumer_process)
reveiw_consumer_process.start()
| Alex1997222/dataming-on-steam | SteamCrawler/main.py | main.py | py | 4,442 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "queue.Queue",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "queue.Queue",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "queue.Queue",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 2... |
22365841878 | from django import forms
from .models import UserProfile
class UserProfileForm(forms.ModelForm):
class Meta:
model = UserProfile
exclude = ['user']
def __init__(self, *args, **kwargs):
"""
Add placeholders and classes, remove auto-generated
labels and set autofocus on first field
"""
super().__init__(*args, **kwargs)
placeholders = {
'user_phone_number': 'Phone Number',
'user_zip': 'ZIP',
'user_city': 'City',
'user_address_line_1': 'Address line 1',
'user_address_line_2': 'Address line 2',
'user_state': 'State',
}
self.fields['user_phone_number'].widget.attrs['autofocus'] = True
for field in self.fields:
if field != 'user_country':
if self.fields[field]:
placeholder = f'{placeholders[field]}'
else:
placeholder = placeholders[field]
self.fields[field].widget.attrs['placeholder'] = placeholder
self.fields[field].label = False
| folarin-ogungbemi/Gosip-Bookstore | profiles/forms.py | forms.py | py | 1,120 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "django.forms.ModelForm",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "models.UserProfile",
"line_number": 7,
"usage_type": "name"
}
] |
6061528518 | # -*- coding: utf-8 -*-
"""
Created on Thu Mar 12 16:12:53 2020
@author: Monik
"""
import os, tifffile
import numpy as np
import matplotlib.pyplot as plt
import SOFI2_0_fromMatlab as sofi2
#%% helper functions
def where_max(a):
print(a.shape)
return np.unravel_index(np.argmax(a, axis=None), a.shape)
#%% read data and show mean
data_dir='SOFI2-demo-data/'
T=20
data_timelapse=[np.array(tifffile.imread(os.path.join(data_dir, 'Block'+str(k)+'.tif')), dtype=np.float32) for k in range(1, T+1)]
data_mean_series=np.array([np.mean(data_timelapse[k], axis=0) for k in range(T)])
plt.imshow(data_mean_series[-1])
plt.colorbar()
#%% calculate m6 for all data
m6_series=np.array([sofi2.M6(data_timelapse[k], verbose=True, comment=str(k)) for k in range(T)])
plt.imshow(m6_series[-1])
#%% here I need a better deconvolution!
m6_f=sofi2.filter_timelapse(sofi2.kill_outliers(m6_series))
m6_dcnv=np.array([sofi2.deconvolution(m6_f[k], verbose=True, comment=str(k)) for k in range(T)], dtype=np.float32)
m6_dcnv_f=sofi2.filter_timelapse(m6_dcnv)
#plt.imshow(m6_dcnv_f[-1])
#plt.colorbar()
plt.imshow(m6_dcnv_f[-1])
#%% do ldrc
m6_ldrc_series=np.array([sofi2.ldrc(m6_dcnv_f[k], data_mean_series[k], 25) for k in range(T)])
plt.imshow(m6_ldrc_series[-1])
plt.colorbar()
#%% alternative: ldrc without deconv
m6_ldrc_nodeconv=np.array([sofi2.ldrc(m6_f[k], data_mean_series[k], 25) for k in range(T)])
plt.imshow(m6_ldrc_series[-1])
plt.colorbar()
#%%
tifffile.imsave('demo_means'+'.tif', np.uint16(65500*data_mean_series/data_mean_series.max()))
tifffile.imsave('demo_M6_Deconv_ldrc'+'.tif', np.uint16(65500*m6_ldrc_series/m6_ldrc_series.max()))
tifffile.imsave('demo_M6_noDeconv_ldrc'+'.tif', np.uint16(65500*m6_ldrc_nodeconv/m6_ldrc_nodeconv.max()))
| pawlowska/SOFI2-Python-Warsaw | SOFI2_demo.py | SOFI2_demo.py | py | 1,760 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.unravel_index",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.argmax",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "tifffile.imread",
"l... |
15136620120 | import time
import warnings
import mmcv
import torch
from mmcv.runner import RUNNERS, IterBasedRunner, IterLoader, get_host_info
@RUNNERS.register_module()
class MultiTaskIterBasedRunner(IterBasedRunner):
def train(self, data_loader, **kwargs):
self.model.train()
self.mode = 'train'
self.data_loader = data_loader[0]
self._epoch = data_loader[0].epoch
data_batch = []
for dl in data_loader:
data_batch.append(next(dl))
self.call_hook('before_train_iter')
outputs = self.model.train_step(data_batch, self.optimizer, **kwargs)
if not isinstance(outputs, dict):
raise TypeError('model.train_step() must return a dict')
if 'log_vars' in outputs:
self.log_buffer.update(outputs['log_vars'], outputs['num_samples'])
self.outputs = outputs
self.call_hook('after_train_iter')
self._inner_iter += 1
self._iter += 1
def run(self, data_loaders, workflow, max_iters=None, **kwargs):
"""Start running.
Args:
data_loaders (list[:obj:`DataLoader`]): Dataloaders for training
and validation.
workflow (list[tuple]): A list of (phase, iters) to specify the
running order and iterations. E.g, [('train', 10000),
('val', 1000)] means running 10000 iterations for training and
1000 iterations for validation, iteratively.
"""
assert isinstance(data_loaders, list)
assert mmcv.is_list_of(workflow, tuple)
# assert len(data_loaders) == len(workflow)
if max_iters is not None:
warnings.warn(
'setting max_iters in run is deprecated, '
'please set max_iters in runner_config', DeprecationWarning)
self._max_iters = max_iters
assert self._max_iters is not None, (
'max_iters must be specified during instantiation')
work_dir = self.work_dir if self.work_dir is not None else 'NONE'
self.logger.info('Start running, host: %s, work_dir: %s',
get_host_info(), work_dir)
self.logger.info('Hooks will be executed in the following order:\n%s',
self.get_hook_info())
self.logger.info('workflow: %s, max: %d iters', workflow,
self._max_iters)
self.call_hook('before_run')
iter_loaders = [IterLoader(x) for x in data_loaders]
self.call_hook('before_epoch')
while self.iter < self._max_iters:
for i, flow in enumerate(workflow):
self._inner_iter = 0
mode, iters = flow
if not isinstance(mode, str) or not hasattr(self, mode):
raise ValueError(
'runner has no method named "{}" to run a workflow'.
format(mode))
iter_runner = getattr(self, mode)
for _ in range(iters):
if mode == 'train' and self.iter >= self._max_iters:
break
iter_runner(iter_loaders, **kwargs)
time.sleep(1) # wait for some hooks like loggers to finish
self.call_hook('after_epoch')
self.call_hook('after_run')
| CVIU-CSU/PSSNet | mmseg/core/runners/multi_task_iterbased_runner.py | multi_task_iterbased_runner.py | py | 3,324 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "mmcv.runner.IterBasedRunner",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "mmcv.is_list_of",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "warnings.warn",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "mmcv.runner... |
25796455279 | import itertools
import numpy as np
import collections
import tensorflow as tf
from PIL import Image
from keras.models import Model, load_model
from keras import backend as K
from integrations.diagnosis_nn.diagnosisNN import DiagnosisNN
from neural_network.models import NeuralNetwork
from neural_network.nn_manager.GeneratorNNQueryManager import GeneratorNNQueryManager
class DiagnosisQuery(GeneratorNNQueryManager):
input_shape = (100, 100, 1)
db_description = 'diagnosis'
def __init__(self):
self.model = None
self.sess = None
super().__init__()
def transform_image(self, image):
if len(image.shape) == 2:
image = image.reshape((image.shape[0], image.shape[1], 1))
return image
def create_model(self) -> Model:
if self.model is None:
try:
nn = NeuralNetwork.objects.all().filter(description=self.db_description)
if nn.count() > 0:
nn = nn.latest('created')
self.sess = tf.Session()
K.set_session(self.sess)
self.model = load_model(nn.model.path)
return self.model
except IOError as e:
print(e)
def model_predict(self, image_gen, batch=3):
if self.model is None:
self._init_model()
gen, gen_copy = itertools.tee(image_gen)
with self.sess.as_default():
result = super().model_predict(gen, batch=batch)
return result
| AkaG/inz_retina | integrations/diagnosis_nn/DiagnosisQuery.py | DiagnosisQuery.py | py | 1,527 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "neural_network.nn_manager.GeneratorNNQueryManager.GeneratorNNQueryManager",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "neural_network.models.NeuralNetwork.objects.all",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "neural_network.models.N... |
34684043114 | #!/usr/bin/env python3
from collections import deque
def search(lines, pattern, history=5):
previous_lines = deque(maxlen=history)
for i in lines:
if pattern in i:
yield i, previous_lines
previous_lines.append(i)
if __name__ == '__main__':
with open(r'somefile.txt') as f:
for line, prelines in search(f, 'python', 5):
for pline in prelines:
print(pline)
print(line)
print('-' * 20)
| kelify/WorkProgram | CookBook-python3/c01/01.py | 01.py | py | 468 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.deque",
"line_number": 6,
"usage_type": "call"
}
] |
18903357112 | from abc import ABCMeta
from json import dumps
from logging import getLogger
from uchicagoldrtoolsuite import log_aware
from ..materialsuite import MaterialSuite
__author__ = "Brian Balsamo, Tyler Danstrom"
__email__ = "balsamo@uchicago.edu, tdanstrom@uchicago.edu"
__company__ = "The University of Chicago Library"
__copyright__ = "Copyright University of Chicago, 2016"
__publication__ = ""
__version__ = "0.0.1dev"
log = getLogger(__name__)
class AccessionContainer(metaclass=ABCMeta):
"""
A Stage is a structure which holds an aggregates contents
as they are being processed for ingestion into long term storage
"""
@log_aware(log)
def __init__(self, identifier):
"""
Creates a new Stage
__Args__
param1 (str): The identifier that will be assigned to the Stage
"""
log.debug("Entering ABC init")
self._identifier = None
self._materialsuite_list = []
self._accessionrecord = []
self._adminnote = []
self._legalnote = []
self.set_identifier(identifier)
log.debug("Exiting ABC init")
@log_aware(log)
def __repr__(self):
attr_dict = {
'identifier': self.identifier,
'materialsuite_list': [str(x) for x in self.materialsuite_list],
'accessionrecord_list': [str(x) for x in self.accessionrecord_list],
'adminnote_list': [str(x) for x in self.adminnote_list],
'legalnote_list': [str(x) for x in self.legalnote_list]
}
return "<{} {}>".format(str(type(self)),
dumps(attr_dict, sort_keys=True))
@log_aware(log)
def get_identifier(self):
return self._identifier
@log_aware(log)
def set_identifier(self, identifier):
log.debug("{}({}) identifier being set to {}".format(
str(type(self)),
str(self.identifier), identifier)
)
self._identifier = identifier
log.debug(
"{} identifier set to {}".format(str(type(self)), identifier)
)
@log_aware(log)
def get_materialsuite_list(self):
return self._materialsuite_list
@log_aware(log)
def set_materialsuite_list(self, x):
self.del_materialsuite_list()
for y in x:
self.add_materialsuite(y)
@log_aware(log)
def del_materialsuite_list(self):
while self.materialsuite_list:
self.pop_materialsuite()
@log_aware(log)
def add_materialsuite(self, x):
if not isinstance(x, MaterialSuite):
raise ValueError()
self._materialsuite_list.append(x)
@log_aware(log)
def get_materialsuite(self, index):
return self.materialsuite_list[index]
@log_aware(log)
def pop_materialsuite(self, index=None):
if index is None:
self.materialsuite_list.pop()
else:
self.materialsuite_list.pop(index)
@log_aware(log)
def get_accessionrecord_list(self):
return self._accessionrecord
@log_aware(log)
def set_accessionrecord_list(self, acc_rec_list):
self.del_accessionrecord_list()
for x in acc_rec_list:
self.add_accessionrecord(x)
@log_aware(log)
def del_accessionrecord_list(self):
while self.get_accessionrecord_list():
self.pop_accessionrecord()
@log_aware(log)
def add_accessionrecord(self, accrec):
self._accessionrecord.append(accrec)
log.debug("Added accession record to {}({}): ({})".format(
str(type(self)),
self.identifier,
str(accrec))
)
@log_aware(log)
def get_accessionrecord(self, index):
return self.get_accessionrecord_list()[index]
@log_aware(log)
def pop_accessionrecord(self, index=None):
if index is None:
x = self.get_accessionrecord_list.pop()
else:
x = self.get_accessionrecord_list.pop(index)
log.debug("Popped accession record from {}({}): {}".format(
str(type(self)),
self.identifier,
str(x))
)
return x
@log_aware(log)
def get_adminnote_list(self):
return self._adminnote
@log_aware(log)
def set_adminnote_list(self, adminnotelist):
self.del_adminnote_list()
for x in adminnotelist:
self.add_adminnote(x)
@log_aware(log)
def del_adminnote_list(self):
while self.get_adminnote_list():
self.pop_adminnote()
@log_aware(log)
def add_adminnote(self, adminnote):
self.get_adminnote_list().append(adminnote)
log.debug("Added adminnote to {}({}): {}".format(
str(type(self)),
self.identifier,
str(adminnote))
)
@log_aware(log)
def get_adminnote(self, index):
return self.get_adminnote_list()[index]
@log_aware(log)
def pop_adminnote(self, index=None):
if index is None:
x = self.get_adminnote_list().pop()
else:
x = self.get_adminnote_list().pop(index)
log.debug("Popped adminnote from {}({}): {}".format(
str(type(self)),
self.identifier,
str(x))
)
return x
@log_aware(log)
def get_legalnote_list(self):
return self._legalnote
@log_aware(log)
def set_legalnote_list(self, legalnote_list):
self.del_legalnote_list()
for x in legalnote_list:
self.add_legalnote(x)
@log_aware(log)
def del_legalnote_list(self):
while self.get_legalnote_list():
self.pop_legalnote()
@log_aware(log)
def add_legalnote(self, legalnote):
self.get_legalnote_list().append(legalnote)
log.debug("Added legalnote to {}: {}".format(
str(type(self)),
str(legalnote))
)
@log_aware(log)
def get_legalnote(self, index):
return self.get_legalnote_list()[index]
@log_aware(log)
def pop_legalnote(self, index=None):
if index is None:
return self.get_legalnote_list().pop()
else:
return self.get_legalnote_list().pop(index)
identifier = property(get_identifier,
set_identifier)
materialsuite_list = property(get_materialsuite_list,
set_materialsuite_list,
del_materialsuite_list)
accessionrecord_list = property(get_accessionrecord_list,
set_accessionrecord_list,
del_accessionrecord_list)
adminnote_list = property(get_adminnote_list,
set_adminnote_list,
del_adminnote_list)
legalnote_list = property(get_legalnote_list,
set_legalnote_list,
del_legalnote_list)
| uchicago-library/uchicagoldr-toolsuite | uchicagoldrtoolsuite/bit_level/lib/structures/abc/accessioncontainer.py | accessioncontainer.py | py | 7,200 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "abc.ABCMeta",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "uchicagoldrtoolsuite.log_aware",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "json.dum... |
24801071982 | import numpy as np
from scipy import spatial
import matplotlib.pyplot as plt
def fft_smoothing(coords):
#TODO: More relevant procedure required
signal = coords[:,0] + 1j*coords[:,1]
# FFT and frequencies
fft = np.fft.fft(signal)
freq = np.fft.fftfreq(signal.shape[-1])
# filter
cutoff = 0.1
fft[np.abs(freq) > cutoff] = 0
# IFFT
signal_filt = np.fft.ifft(fft)
coords[:,0] = signal_filt.real
coords[:,1] = signal_filt.imag
return coords
def pl_cytopath_alignment(adata, basis="umap", smoothing=False, figsize=(15,4), size = 3,
show=True, save=False,save_type='png', folder=""):
map_state = adata.obsm['X_'+basis]
av_allign_score_glob=[]
std_allign_score_glob=[]
step_time = adata.uns['trajectories']['step_time']
fate_prob = adata.uns['trajectories']['cell_fate_probability']
sequence=0
# TODO: Separate per step average alignment score calculation from plotting
for end_point_cluster in adata.uns['run_info']["end_point_clusters"]:
trajectories = adata.uns['trajectories']["cells_along_trajectories_each_step"]\
[np.where(adata.uns['trajectories']["cells_along_trajectories_each_step"]["End point"]==end_point_cluster)[0]]
for i in range(adata.uns['run_info']['trajectory_count'][end_point_cluster]):
av_trajectories=trajectories[np.where(trajectories["Trajectory"]==i)[0]]
av_allign_score=np.zeros((len(np.unique(av_trajectories["Step"]))))
std_allign_score=np.zeros((len(np.unique(av_trajectories["Step"]))))
for l in range(len(np.unique(av_trajectories["Step"]))):
av_allign_score[l]=np.average((av_trajectories[np.where(av_trajectories["Step"]==l)[0]]["Allignment Score"]))
std_allign_score[l]=np.std((av_trajectories[np.where(av_trajectories["Step"]==l)[0]]["Allignment Score"]))
# Plotting
path = folder+"_end_point_"+end_point_cluster+"_cytopath_"+str(i)+\
"occurance"+str(adata.uns['run_info']["trajectories_sample_counts"][end_point_cluster][i])+"."+save_type
fig, (ax1, ax2, ax3) = plt.subplots(nrows=1, ncols=3, figsize=figsize)
ax1.plot(range(len(np.unique(av_trajectories["Step"]))), av_allign_score, color='black')
ax1.fill_between(range(len(np.unique(av_trajectories["Step"]))),
av_allign_score+std_allign_score, av_allign_score-std_allign_score, facecolor='grey', alpha=0.6)
ax1.set_ylabel('Mean/std. of alignment scores per step')
ax1.set_xlabel('Steps')
# Plot step size for aligned cells
sc_step = ax2.scatter(map_state[:,0], map_state[:,1], alpha=0.6, s=size, color="whitesmoke")
sc_step = ax2.scatter(map_state[:,0], map_state[:,1], alpha=0.9, s=size,
vmin=0, vmax=np.nanmax(step_time), c=step_time[sequence,:], cmap='YlGnBu')
fig.colorbar(sc_step, ax=ax2, label='Step time')
ax2.set_ylabel(basis.upper()+' 2')
ax2.set_xlabel(basis.upper()+' 1')
ax2.set_title('End point: {}-{} Support: {}/{}'.format(end_point_cluster, i,
adata.uns['run_info']['trajectories_sample_counts'][end_point_cluster][i],
int(adata.uns['samples']['cell_sequences'].shape[0]/\
adata.uns['run_info']['end_point_clusters'].shape[0])))
# Plot alignment score
sc_score = ax3.scatter(map_state[:,0], map_state[:,1], alpha=0.6, s=size, color="whitesmoke")
sc_score = ax3.scatter(map_state[:,0], map_state[:,1], alpha=0.9, s=size,
vmin=0, vmax=1, c=fate_prob[sequence,:], cmap='Reds')
fig.colorbar(sc_score, ax=ax3, label='Cell fate probability')
ax3.set_ylabel(basis.upper()+' 2')
ax3.set_xlabel(basis.upper()+' 1')
# Plot trajectory
if basis in adata.uns['run_info']['projection_basis']:
coords = np.array(adata.uns['trajectories']['trajectories_coordinates'][end_point_cluster]['trajectory_'+str(i)+'_coordinates'])
elif ('pca' in adata.uns['run_info']['projection_basis']) and (basis != 'pca'):
coords_ = np.array(adata.uns['trajectories']['trajectories_coordinates'][end_point_cluster]['trajectory_'+str(i)+'_coordinates'])
cell_sequences=[]
for j in range(len(coords_)):
cell_sequences.append(spatial.KDTree(adata.obsm['X_pca']).query(coords_[j])[1])
coords = map_state[cell_sequences]
if smoothing == True:
coords = fft_smoothing(coords)
ax2.plot(coords[:, 0], coords[:, 1], color='black')
ax3.plot(coords[:, 0], coords[:, 1], color='black')
plt.tight_layout()
if save:
fig.savefig(path, bbox_inches='tight', dpi=300)
if show:
plt.show()
# End plotting
sequence+=1
av_allign_score_glob.append(av_allign_score)
std_allign_score_glob.append(std_allign_score)
| aron0093/cytopath | cytopath/plotting_functions/plot_alignment.py | plot_alignment.py | py | 5,486 | python | en | code | 10 | github-code | 36 | [
{
"api_name": "numpy.fft.fft",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.fft",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "numpy.fft.fftfreq",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.fft",
"line_... |
7677909103 | from controller import Robot, Motor, DistanceSensor
import numpy as np
from collections import deque
# import opencv
import cv2 as cv
MAX_SPEED = 47.6
WHEEL_RADIUS = 21
INF = float('inf')
class ChaseFoodState:
def __init__(self, r):
self.r=r
def check_transition(self):
if self.r.has_bumped:
# if we bump to the food we are done
print("donete")
self.r.stop()
def tick(self):
# compute food angle
food_angle = self.r.get_food_angle(2000)
if food_angle == "none":
print("we lost food")
self.r.state = WallFollowState(self.r)
# turn to food
if food_angle == "left":
print("turning left")
self.r.turn_left(.2*MAX_SPEED, 20)
elif food_angle == "right":
print("turning right")
self.r.turn_right(.2*MAX_SPEED, 20)
else:
print("moving forward")
self.r.move_forward(.2*MAX_SPEED, 500)
# force sensors update
self.r.update_sensors(bump_th=250, color_th=4000)
# check transitions
self.check_transition()
def __str__(self):
return "ChaseFoodState"
class WallFollowState:
def __init__(self, r):
self.r=r
self.current_wall = "straight"
def check_transition(self):
if self.r.has_food:
print("going to chase food")
self.r.state = ChaseFoodState(self.r)
elif self.r.has_enemy:
print("going to avoid enemy")
self.r.state = AvoidEnemyState(self.r)
elif self.r.has_danger:
print("going to avoid danger")
self.r.state = AvoidDangerState(self.r)
def tick(self):
# just follow wall
self.r.follow_wall(self.current_wall)
# check transitions (sensors are updated regullary)
self.check_transition()
def __str__(self):
return "WallFollowState"
class AvoidDangerState:
def __init__(self, r):
self.r=r
def check_transitions(self):
if self.r.has_food:
print("going to chase food")
self.r.state = ChaseFoodState(self.r)
elif self.r.has_enemy:
print("going to avoid enemy")
self.r.state = AvoidEnemyState(self.r)
if not self.r.has_danger:
print("going to wall follow")
self.r.state = WallFollowState(self.r)
def tick(self):
# move fast backwards and turn back
self.r.turn_back(0.5*MAX_SPEED, 20)
# force sensors update
self.r.update_sensors(bump_th=250, color_th=2500)
# check transitions
self.check_transitions()
def __str__(self):
return "AvoidDangerState"
class AvoidEnemyState:
def __init__(self, r):
self.r=r
def check_transitions(self):
if self.r.has_food:
print("going to chase food")
self.r.state = ChaseFoodState(self.r)
elif self.r.has_danger:
print("going to avoid danger")
self.r.state = AvoidDangerState(self.r)
if not self.r.has_enemy:
print("going to wall follow")
self.r.state = WallFollowState(self.r)
def tick(self):
# move slowly backwards and turn left
print("avoiding enemy")
self.r.move_backward_turn(0.25*MAX_SPEED, 200)
print("avoiding enemy done")
# force sensors update
self.r.update_sensors(bump_th=250, color_th=4000)
# check transitions
self.check_transitions()
def __str__(self):
return "AvoidEnemyState"
class FixBumpState:
def __init__(self, r):
self.r=r
def check_transition(self):
if not self.r.has_bumped:
print("going to wall follow")
self.r.state = WallFollowState(self.r)
def tick(self):
# move backwards and turn back. If we still are bumping
# repeat the process
self.r.move_backward(MAX_SPEED, 100)
self.r.turn_back(MAX_SPEED, 4)
# force sensors update
print("fixing bump")
self.r.update_sensors(bump_th=250, color_th=4000)
self.check_transition()
def __str__(self):
return "FixBumpState"
class KheperaBot:
def __init__(self):
self.robot = Robot()
self.ts = int(self.robot.getBasicTimeStep())
self.pic_idx = 0
self.sensors = {
"left": self.robot.getDevice("left infrared sensor"),
"right": self.robot.getDevice("right infrared sensor"),
"front": self.robot.getDevice("front infrared sensor"),
"front left": self.robot.getDevice("front left infrared sensor"),
"front right": self.robot.getDevice("front right infrared sensor"),
"camera": self.robot.getDevice("camera")
}
self.motors={
"left wheel": self.robot.getDevice("left wheel motor"),
"right wheel": self.robot.getDevice("right wheel motor")
}
self.init_sensors()
self.init_motors()
self.has_bumped = False # bump = the robot has ran into a wall
self.has_enemy = False # enemy = the robot found something blue
self.has_food = False # food = the robot found something green
self.has_danger = False # danger = the robot found something red
self.state = WallFollowState(self)
# initialization
def init_sensors(self):
# init sensors -> enable them by timestep
for sensor in self.sensors.values():
sensor.enable(self.ts)
def init_motors(self):
# init motors -> set position to inf and velocity to 0
for motor in self.motors.values():
motor.setPosition(float('inf'))
motor.setVelocity(0)
# movements
def move_forward(self, velocity, ammount):
# move forward -> set velocity both wheels the same value
self.motors["left wheel"].setVelocity(velocity)
self.motors["right wheel"].setVelocity(velocity)
self.robot.step(ammount)
def move_backward(self, velocity, ammount):
# move backward -> set velocity both wheels the same value but negative
self.motors["left wheel"].setVelocity(-velocity)
self.motors["right wheel"].setVelocity(-velocity)
self.robot.step(self.ts*ammount)
def move_backward_turn(self, velocity, ammount):
# move backward and turn -> set velocity left wheel to negative velocity and right wheel to 0
self.motors["left wheel"].setVelocity(-velocity)
self.motors["right wheel"].setVelocity(-velocity)
self.robot.step(int(0.75*self.ts*ammount))
self.motors["left wheel"].setVelocity(-velocity)
self.motors["right wheel"].setVelocity(-0.25*velocity)
self.robot.step(int(0.25*self.ts*ammount))
def turn_left(self, velocity, ammount=2):
# turn left -> set velocity left wheel to 0 and right wheel to velocity
self.motors["left wheel"].setVelocity(0)
self.motors["right wheel"].setVelocity(velocity)
self.robot.step(self.ts*ammount)
def turn_right(self, velocity, ammount=2):
# turn right -> set velocity left wheel to velocity and right wheel to 0
self.motors["left wheel"].setVelocity(velocity)
self.motors["right wheel"].setVelocity(0)
self.robot.step(self.ts*ammount)
def turn_back(self, velocity, ammount):
# turn_back -> set velocity both wheels to negative velocity
self.motors["left wheel"].setVelocity(0)
self.motors["right wheel"].setVelocity(velocity)
self.robot.step(self.ts*ammount)
self.has_danger=False
def stop(self):
# stop -> set velocity both wheels to 0
self.motors["left wheel"].setVelocity(0)
self.motors["right wheel"].setVelocity(0)
self.robot.step(self.ts)
self.ts = -1
return
def follow_wall(self, w=None, threshold=150):
speed_offset = 0.3 * (MAX_SPEED - 0.03 * self.sensors["front"].getValue())
fl, fr = self.sensors["front left"].getValue(), self.sensors["front right"].getValue()
l, r = self.sensors["left"].getValue(), self.sensors["right"].getValue()
delta_r, delta_l = 0.02, 0.02
# if we loose our wall turn HARDER
if w=="right" and r<threshold and fr<threshold and l<threshold and fl<threshold:
delta_l=2*delta_l
if w=="left" and l<threshold and fl<threshold and r<threshold and fr<threshold:
delta_r=2*delta_r
speed_delta = delta_l * fl - delta_r * fr
self.motors["left wheel"].setVelocity(speed_offset + speed_delta)
self.motors["right wheel"].setVelocity(speed_offset - speed_delta)
if max(fl,l)<threshold and max(fr,r)<threshold:
return "straight"
return "left" if max(fl, l)>max(fr, r) else "right"
# sensors
def process_camera(self):
# process image camera and returns an array of the number
# of red, green and blue pixels
w,h = self.sensors["camera"].getWidth(), self.sensors["camera"].getHeight()
img = self.sensors["camera"].getImage()
image_array = np.array(self.sensors["camera"].getImageArray(), dtype=np.uint8)
image_array = cv.resize(image_array, (h//2, w//2))
# take only center of image
image_w, image_h = image_array.shape[0], image_array.shape[1]
delta_size = 100
image_array = image_array[image_w//2-delta_size:image_w//2+delta_size, image_h//2-delta_size:image_h//2+delta_size]
# rotate image -90 degrees
image_array = cv.rotate(image_array, cv.ROTATE_90_CLOCKWISE)
# flip image
image_array = cv.flip(image_array, 1)
# save image as rgb
if self.pic_idx%3==0 and False:
print("save image")
image_rgb = cv.cvtColor(image_array, cv.COLOR_BGR2RGB)
cv.imwrite("image"+str(self.pic_idx)+".png", image_rgb)
# remove white pixels
#image_array[image_array.all() > 100] = 0
# save red channel
red_channel = image_array[:,:,0]
red_channel[red_channel < 175] = 0
red_channel[red_channel > 0] = 255
# save green channel
green_channel = image_array[:,:,1]
green_channel[green_channel < 150] = 0
green_channel[green_channel > 0] = 255
# save blue channel
blue_channel = image_array[:,:,2]
blue_channel[blue_channel < 150] = 0
blue_channel[blue_channel > 0] = 255
# save image channels
if self.pic_idx%3==0 and False:
cv.imwrite("red"+str(self.pic_idx)+".png", red_channel)
cv.imwrite("green"+str(self.pic_idx)+".png", green_channel)
cv.imwrite("blue"+str(self.pic_idx)+".png", blue_channel)
self.pic_idx += 1
blue_channel[green_channel > 0] = 0
blue_channel[red_channel > 0] = 0
green_channel[blue_channel > 0] = 0
green_channel[red_channel > 0] = 0
red_channel[blue_channel > 0] = 0
red_channel[green_channel > 0] = 0
red_px = np.count_nonzero(red_channel)
# count food pixels by summing left third, center third and right third
green_px_left = np.count_nonzero(green_channel[:, :green_channel.shape[1]//3])
green_px_center = np.count_nonzero(green_channel[:, green_channel.shape[1]//3:green_channel.shape[1]//3*2])
green_px_right = np.count_nonzero(green_channel[:, green_channel.shape[1]//3:])
green_px = green_px_left+green_px_right
blue_px = np.count_nonzero(blue_channel)
return red_px, green_px, blue_px, (green_px_left, green_px_center, green_px_right)
def get_food_angle(self, th):
# get food position by counting pixels
r, g, b, (gl, gc, gr) = self.process_camera()
print("-> Food:",gl, gc, gr)
if gl<th and gr<th and gc<th:
return "none"
if gl>gr and gl>gc:
return "left"
elif gl<gr and gr>gc:
return "right"
else:
return "center"
def update_sensors(self, bump_th=1000, color_th=15000):
bump_left_val = self.sensors["left"].getValue()
bump_right_val = self.sensors["right"].getValue()
bump_front_val = self.sensors["front"].getValue()
print("-> Bumpers values:",bump_left_val, bump_right_val, bump_front_val)
bump_left = self.sensors["left"].getValue() > bump_th
bump_right = self.sensors["right"].getValue() > bump_th
bump_front = self.sensors["front"].getValue() > bump_th
self.has_bumped = bump_left or bump_right or bump_front
print("-> Bumpers:",bump_left, bump_right, bump_front)
r, g, b, _ = self.process_camera()
negative_th = color_th
self.has_enemy = r > color_th and g < negative_th and b < negative_th
self.has_food = g > color_th and r < negative_th and b < negative_th
self.has_danger = b > color_th and r < negative_th and g < negative_th
print("-> colors (RGB):",r,g,b)
print("-> Enemy, Food or Danger:",self.has_enemy, self.has_food, self.has_danger)
def main_loop(self):
while self.robot.step(self.ts) != -1:
if self.robot.getTime() % 1 <= self.ts / 500:
self.update_sensors(bump_th=250, color_th=2550)
self.state.tick()
robot = KheperaBot()
robot.main_loop() | Polifack/Subsummed-Architecture-Webots | controllers/khepera4_controller/khepera4_controller.py | khepera4_controller.py | py | 13,750 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "controller.Robot",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 287,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 287,
"usage_type": "attribute"
},
{
"api_name": "cv2.resize",
"li... |
75097728425 | import h5py
import numpy as np
import os
import matplotlib.pyplot as plt
from imblearn.over_sampling import SMOTE
import random
# A simple example of what SMOTE data generation might look like...
# Grab the data
path=os.path.join(os.getcwd() , 'batch_train_223.h5')
file = h5py.File(path, 'r')
keys = file.keys()
samples = [file[key] for key in keys]
# List to hold the images and the classes
images=[]
classes=[]
# Populate the the images and classes with examples from the hdf5 file
for sample in samples[:20]:
images.append(sample['cbed_stack'][()].reshape(-1,1))
classes.append(sample.attrs['space_group'].decode('UTF-8'))
# Display the original data
fig, axes = plt.subplots(2,3, figsize=(12, 10))
for ax, cbed in zip(axes.flatten()[:3], samples[10]['cbed_stack']):
ax.imshow(cbed**0.25)
for ax, cbed in zip(axes.flatten()[3:], samples[11]['cbed_stack']):
ax.imshow(cbed**0.25)
title = "Space Group: {} - Original".format(samples[10].attrs['space_group'].decode('UTF-8'))
fig.suptitle(title, size=40)
plt.savefig('original.png')
# Change the dimension of images to a size that SMOTE() likes and call SMOTE()
images=np.squeeze(np.array(images))
sm = SMOTE(random_state=42, k_neighbors=6, ratio={'123':10, '2':15})
images_res, classes_res = sm.fit_resample(images, classes)
# List to hold the final images
images_final=[]
image_res_list=images_res.tolist()
for image_res_list in image_res_list:
images_final.append(np.reshape(image_res_list, (3, 512, 512)))
# print("length of images: {}".format(len(images)))
# print("length of images_final: {}".format(len(images_final)))
# Generate random numbers to display the generated images
listNum = random.sample(range(20,25), 4)
# Display the sythetic images
fig, axes = plt.subplots(4, 3, figsize=(12, 10))
for ax, cbed in zip(axes.flatten()[:3], images_final[listNum[0]]):
ax.imshow(cbed**0.25)
for ax, cbed in zip(axes.flatten()[3:], images_final[listNum[0]]):
ax.imshow(cbed**0.25)
for ax, cbed in zip(axes.flatten()[6:], images_final[listNum[0]]):
ax.imshow(cbed**0.25)
for ax, cbed in zip(axes.flatten()[9:], images_final[listNum[0]]):
ax.imshow(cbed**0.25)
title = "Space Group: {} - Generated".format(classes_res[listNum[0]])
fig.suptitle(title, size=40)
plt.savefig('generated.png')
# print("Original data of class{}: {}".format(classes[-1], samples[-1]['cbed_stack'][()]))
# print("Generated data of class{}: {}".format(classes_res[-1], images_final[-1]))
| emilyjcosta5/datachallenge2 | train/testSMOTE.py | testSMOTE.py | py | 2,466 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "os.path.join",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "h5py.File",
"line_number": 12... |
11045304899 | from datetime import datetime, timedelta
from functools import reduce
from django import db
from django.conf import settings
from django.db import models
from django.db.models import Sum
from django.contrib.auth import get_user_model
from jalali_date import date2jalali
from dmo.models import DmoDay, Dmo
User = get_user_model()
class TodoListManager(models.Manager):
def get_todo_list(self, user, date):
todo_list, _ = self.get_or_create(user=user, date=date)
self._attach_dmo_items(todo_list)
return todo_list
def _attach_dmo_items(self, todo_list):
date = date2jalali(todo_list.date)
user_dmos = Dmo.objects.filter(user=todo_list.user, year=date.year, month=date.month)
for dmo in user_dmos:
if todo_list.items.filter(title=dmo.goal).exists():
continue
todo_list.items.create(title=dmo.goal)
def move_lists_to_today(self):
today = datetime.now()
self.update(date=today)
def move_lists_to_date(self, date):
self.update(date=date)
class TodoListItemManager(models.Manager):
def move_tasks_to_today_list(self):
users = self.values('todo_list__user')
if len(users) > 1:
raise Exception('Multiple users found.')
user = users[0]['todo_list__user']
today_list = TodoList.objects.get_today(user)
self.update(todo_list=today_list)
def add_item(self, title, desc, user, date=None, stauts=None):
if not date:
date = datetime.now()
if not status:
status = TodoList.Statuses.PENDING
todo_list = TodoList.objects.get_todo_list(user, date)
self.create(todo_list=todo_list, title=title, desceription=desc, status=status)
class TodoList(models.Model):
date = models.DateField()
user = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name='کاربر',
related_name='todo_lists')
updated_at = models.DateTimeField(auto_now=True, verbose_name='آخرین ویرایش')
created_at = models.DateTimeField(auto_now_add=True, verbose_name='ایجاد شده در')
objects = TodoListManager()
class Meta:
verbose_name = 'Todo لیست'
verbose_name_plural = 'Todo لیست'
unique_together = ('date', 'user', )
def __str__(self):
return f'{self.user} - {self.date}'
def move_list_to_date(self, to_date, commit=True):
self.date = to_date
if commit:
self.save()
class TodoListItem(models.Model):
class Statuses(models.IntegerChoices):
PENDING = 0, 'در انتظار انجام'
DONE = 100, 'انجام شد'
NOT_DONE = 200, 'انجام نشد'
todo_list = models.ForeignKey(TodoList, verbose_name='Todo', related_name='items',
on_delete=models.CASCADE)
title = models.CharField(max_length=255, verbose_name='عنوان')
desc = models.TextField(verbose_name='توضیحات', blank=True)
status = models.IntegerField(verbose_name='وضعیت', choices=Statuses.choices,
default=Statuses.PENDING)
dmo_day = models.OneToOneField(DmoDay, on_delete=models.CASCADE, verbose_name='دمو',
related_name='todo_list_item', null=True, blank=True)
updated_at = models.DateTimeField(auto_now=True, verbose_name='آخرین ویرایش')
created_at = models.DateTimeField(auto_now_add=True, verbose_name='ایجاد شده در')
objects = TodoListItemManager()
class Meta:
verbose_name = 'آیتم Todo لیست'
verbose_name_plural = 'آیتم Todo لیست'
def __str__(self):
return self.title
def change_status(self, status: Statuses, commit=True):
self.status = status
if commit:
self.save()
def done_task(self, commit=True):
self.status = self.Statuses.DONE
jalali_date = date2jalali(self.todo_list.date)
dmo = Dmo.objects.filter(user=self.todo_list.user, goal=self.title,
year=jalali_date.year, month=jalali_date.month
).first()
if dmo:
dmo.complete(jalali_date.day, done=True)
if commit:
self.save()
def undone_task(self, commit=True):
self.end_datetime = datetime.now()
self.status = self.Statuses.NOT_DONE
jalali_date = date2jalali(self.todo_list.date)
dmo = Dmo.objects.filter(user=self.todo_list.user, goal=self.title,
year=jalali_date.year, month=jalali_date.month
).first()
if dmo:
dmo.complete(jalali_date.day, done=False)
if commit:
self.save()
def start_task(self):
if self.time_tracks.filter(end_datetime__isnull=True).exists():
raise Exception('Task is already started.')
TodoListItemTimeTrack.objects.create(
item=self,
start_datetime=datetime.now(),
end_datetime=None
)
def finish_task(self):
now = datetime.now()
self.time_tracks.filter(end_datetime=None).update(end_datetime=now)
def toggle_start_stop(self):
started_tracks = self.time_tracks.filter(end_datetime__isnull=True)
if started_tracks.exists():
started_tracks.update(end_datetime=datetime.now())
return
TodoListItemTimeTrack.objects.create(
item=self,
start_datetime=datetime.now(),
end_datetime=None
)
def get_total_time_seconds(self):
# db aggrigation doesn't work for some databases, so it's safer to use python
time_tracks = self.time_tracks.filter(end_datetime__isnull=False).values('start_datetime', 'end_datetime')
durations = [time['end_datetime'] - time['start_datetime'] for time in time_tracks]
return reduce(lambda a, b: a+b, durations, timedelta(seconds=0)).seconds
def get_last_ongoing_time_track(self):
return self.time_tracks.filter(end_datetime__isnull=True).last()
class TodoListItemTimeTrack(models.Model):
item = models.ForeignKey(TodoListItem, on_delete=models.CASCADE, verbose_name='آیتم',
related_name='time_tracks')
start_datetime = models.DateTimeField(verbose_name='شروع', null=True, blank=True)
end_datetime = models.DateTimeField(verbose_name='پایان', null=True, blank=True)
updated_at = models.DateTimeField(auto_now=True, verbose_name='آخرین ویرایش')
created_at = models.DateTimeField(auto_now_add=True, verbose_name='ایجاد شده در')
class Meta:
verbose_name = 'Todo لیست زمان'
verbose_name_plural = 'Todo لیست زمان'
def __str__(self):
return f'{self.item}'
| mohsen-hassani-org/teamche | todo_list/models.py | models.py | py | 6,959 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.contrib.auth.get_user_model",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "django.db.models.Manager",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 15,
"usage_type": "name"
},
{
... |
23713128222 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import yaml
from yaml.loader import SafeLoader
import subprocess
import netifaces
import argparse
import os
import time
import fcntl
'''yaml
if_list:
- ipaddr: 10.90.3.37
prefix: 24
mac: 52:54:84:11:00:00
gateway: 10.90.3.1
- ipaddr: 192.168.100.254
prefix: 24
mac: 52:54:84:00:08:38
eip_list:
- eip: 10.90.2.252
vm-ip: 192.168.100.192
- eip: 10.90.2.253
vm-ip: 192.168.100.193
port_forward_list:
# master1.kcp5-arm.iefcu.cn
- eip: 10.90.2.254
protocal: udp
port: 80
end_port: 82
vm-port: 80
vm-ip: 192.168.100.190
'''
# https://blog.csdn.net/sunny_day_day/article/details/119893768
def load_interface():
"""获取接口mac地址对应名称"""
macMap = {}
for interface in netifaces.interfaces():
macAddr = netifaces.ifaddresses(interface)[netifaces.AF_LINK][0]['addr']
macMap[macAddr] = interface
# print(macMap)
return macMap
# 测试:
# 没有文件的情况; 文件为空内容的情况; 语法异常的情况; 配置缺失的情况;
# 读取配置文件
def load_config():
# Open the file and load the file
with open('/etc/kylin-vr/kylin-vr.yaml') as f:
data = yaml.load(f, Loader=SafeLoader)
# print(data)
return data
return nil
# XXX: 优化, 增量更新配置文件?
def one_interface_conf(ifname, ifconf, eip_list):
"""docstring for one_interface"""
# filename = '/etc/sysconfig/network-scripts/ifcfg-' + ifname
filename = '/var/run/kylin-vr/ifcfg-' + ifname
# print(filename)
fp = open(filename, 'w')
fp.write('NAME=%s\nDEVICE="%s"\n' % (ifname, ifname))
fp.write('''BOOTPROTO="none"
ONBOOT="yes"
TYPE="Ethernet"
IPV6INIT="no"
''')
fp.write('''
IPADDR=%s
PREFIX=%s
''' % (ifconf['ipaddr'], ifconf['prefix']))
if 'gateway' in ifconf:
fp.write('GATEWAY=%s\n' % ifconf['gateway'])
for i, eip in enumerate(eip_list):
fp.write('''IPADDR%d=%s\nPREFIX%d=32\n''' % (i+1, eip, i+1))
fp.close()
def get_eip_list(data):
eip_set = set()
for eip in data['eip_list']:
eip_set.add(eip['eip'])
for port_forward in data['port_forward_list']:
eip_set.add(port_forward['eip'])
return eip_set
# XXX: 处理参数异常情况!
def gen_network_conf(data):
macMap = load_interface()
eip_list = []
for i, if_conf in enumerate(data['if_list']):
mac = if_conf['mac']
if mac not in macMap:
# debug log
continue
interface = macMap[mac]
data['if_list'][i]['ifname'] = interface
if 'gateway' in if_conf: # 网关接口为公网物理出口
data['ifname'] = interface
eip_list = get_eip_list(data)
# print(mac)
# print(interface)
one_interface_conf(interface, if_conf, eip_list)
# 最后, 替换成新的ifcfg-xxx配置
subprocess.call("rm -f /etc/sysconfig/network-scripts/ifcfg-eth*", shell=True)
subprocess.call("mv /var/run/kylin-vr/ifcfg-eth* /etc/sysconfig/network-scripts", shell=True)
# 生成eip规则
def gen_eip_iptable_conf(f, data):
# 1. 通过网关地址获取到公网接口名称
# ip route | head -1 | grep default | awk '{print $5}'
# 2. 或者通过mac地址获取公网接口名称
if 'ifname' not in data:
return
ifname = data['ifname']
for eip_item in data['eip_list']:
extern_ip=eip_item['eip']
vm_ip=eip_item['vm-ip']
f.write("-A POSTROUTING -s %s/32 -o %s -j SNAT --to-source %s\n" % (vm_ip, ifname, extern_ip))
f.write("-A PREROUTING -i %s -d %s/32 -j DNAT --to-destination %s\n" % (ifname, extern_ip, vm_ip))
# 生成snat规则
def gen_snat_iptable_conf(f, data):
if 'ifname' not in data:
return
ifname = data['ifname']
# 默认网关接口开启snat
f.write('-A POSTROUTING -o %s -j MASQUERADE\n' % ifname)
# 生成端口转发iptable规则表
def gen_port_forward_iptable_conf(f, data):
for port_forward in data['port_forward_list']:
extern_ip = port_forward['eip']
vm_ip = port_forward['vm-ip']
protocal = port_forward['protocal']
port = port_forward['port']
vm_port = port_forward['vm-port']
if 'end_port' not in port_forward: # 单端口映射
f.write("-A PREROUTING -p %s -d %s --dport %d -j DNAT --to %s:%d\n" % (protocal, extern_ip, port, vm_ip, vm_port))
f.write("-A POSTROUTING -p %s -s %s --sport %d -j SNAT --to %s:%d\n" % (protocal, vm_ip, vm_port, extern_ip, port))
else: # 端口范围映射
end_port = port_forward['end_port']
f.write("-A PREROUTING -p %s -d %s --dport %d:%d -j DNAT --to %s:%d-%d\n" % (protocal, extern_ip, port, end_port, vm_ip, port, end_port))
f.write("-A POSTROUTING -p %s -s %s --sport %d:%d -j SNAT --to %s:%d-%d\n" % (protocal, vm_ip, port, end_port, extern_ip, port, end_port))
# eip, snat, port forward的iptable规则配置
def gen_iptable_conf(data):
f = open("/var/run/kylin-vr/iptable.txt", 'w')
f.write('''
*filter
:INPUT ACCEPT
:FORWARD ACCEPT
:OUTPUT ACCEPT
COMMIT
*mangle
:PREROUTING ACCEPT
:INPUT ACCEPT
:FORWARD ACCEPT
:OUTPUT ACCEPT
:POSTROUTING ACCEPT
COMMIT
*nat
:PREROUTING ACCEPT
:INPUT ACCEPT
:OUTPUT ACCEPT
:POSTROUTING ACCEPT
''')
gen_eip_iptable_conf(f, data)
gen_port_forward_iptable_conf(f, data)
gen_snat_iptable_conf(f, data)
f.write('\nCOMMIT\n')
f.close()
# 恢复iptable配置
def reload_iptable():
return_code = subprocess.call(["iptables-restore","/var/run/kylin-vr/iptable.txt"])
print('iptable reload return %d' % return_code)
# 重置network配置
def reload_network(data):
"""docstring for reload_network"""
return_code = subprocess.call("nmcli c reload", shell=True)
print('nmcli c reload return %d' % return_code)
for if_conf in data['if_list']:
if 'ifname' not in if_conf:
continue
cmd = 'nmcli c up %s' % if_conf['ifname']
return_code = subprocess.call(cmd, shell=True)
print('up connection `%s` return %d' % (cmd, return_code))
def check_flag():
return os.path.exists('/var/run/kylin-vr')
def gen_flag():
os.makedirs('/var/run/kylin-vr')
def config_init():
gen_flag()
data = load_config()
if not data:
print('load config failed!')
return
gen_network_conf(data)
gen_iptable_conf(data)
reload_iptable()
pass
# 系统起来之后的配置更新
def config_reload(device):
if not check_flag():
print('kylin-vr service is not started, can not reload config!')
return
data = load_config()
if not data:
print('load config failed!')
return
gen_network_conf(data)
gen_iptable_conf(data)
reload_network(data)
reload_iptable()
pass
def is_running(file):
fd = open(file, "w")
try:
fcntl.lockf(fd, fcntl.LOCK_EX|fcntl.LOCK_NB)
except :
return None
return fd
def get_lock():
lockfile = "/var/run/kylin-vr-running"
while True:
fd = is_running(lockfile)
if fd:
return fd
time.sleep(1)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--command', help='sub command, Note: the allocate command needs to be used with -d parameters', \
choices=['init', 'reload', 'subnet'], \
default='init')
parser.add_argument('-d', '--device', help='the subnet command needs to specify interface name. Example: -c subnet -d eth2')
args = parser.parse_args()
# 加锁保证单例执行
a = get_lock()
cmd = args.command if args.command else 'init'
if 'reload' == cmd:
config_reload(args.device)
# elif 'subnet' == cmd:
# config_subnet(args.device)
else: # init
config_init()
if __name__ == '__main__':
main()
| adamxiao/adamxiao.github.io | openstack/asserts/kylin-vr.py | kylin-vr.py | py | 7,510 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "netifaces.interfaces",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "netifaces.ifaddresses",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "netifaces.AF_LINK",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "yam... |
34274019963 | import time
import multiprocessing as mp
def show_current_time():
while True:
t = time.strftime("%H:%M:%S")
print("Текущее время:", t)
time.sleep(1)
def show_message():
while True:
print("(* ^ ω ^)")
time.sleep(3)
if __name__ == "__main__":
p1 = mp.Process(target=show_current_time)
p2 = mp.Process(target=show_message)
# метод start() запускает наш процесс (функцию)
p1.start()
p2.start()
# метод join() дожидается окончания нашего процесса (функции)
p1.join()
p2.join() | Surikat226/Python-grade | async_run.py | async_run.py | py | 645 | python | ru | code | 0 | github-code | 36 | [
{
"api_name": "time.strftime",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Process",
"line... |
31524102898 | """
Test the lookup_specs_chain
NOTE: this just makes sure the chain executes properly but DOES NOT assess the quality of the agent's analysis. That is done in the ipython notebooks in the evals/ folder
"""
import pytest
import json
from meche_copilot.get_equipment_results import get_spec_lookup_data, get_spec_page_validations
from meche_copilot.chains.analyze_specs_chain import AnalyzeSpecsChain
from meche_copilot.chains.helpers.specs_retriever import SpecsRetriever
from meche_copilot.schemas import *
from meche_copilot.utils.chunk_dataframe import chunk_dataframe
from meche_copilot.utils.config import load_config, find_config
# Used for verbose output of langchain prompts and responses
from langchain.callbacks import StdOutCallbackHandler
@pytest.mark.skip(reason="TODO - desiding if I want to use analyze specs chain or lookup specs chain...probably analyze specs chain")
def test_lookup_specs_chain(session:Session):
sess = session
config = session.config
# a piece of equipment (idx 2: fan eq)
eq = sess.equipments[2]
# empty_eq_str_srcA, empty_eq_str_srcB = get_spec_lookup_data(eq)
empty_eq_df = eq.instances_to(ScopedEquipment.IOFormats.df)
spec_defs_df = eq.spec_defs_to(ScopedEquipment.IOFormats.df)
concated = pd.concat([spec_defs_df.T, empty_eq_df])
retriever = SpecsRetriever(doc_retriever=config.doc_retriever, source=eq.sourceA)
relavent_docs = retriever.get_relevant_documents(query="")
relavent_ref_docs_as_dicts = [doc.dict() for doc in relavent_docs]
relavent_ref_docs_as_string = json.dumps(relavent_ref_docs_as_dicts) # Convert to JSON string
# lookup specs for source A
lookup_chain = AnalyzeSpecsChain(
doc_retriever=sess.config.doc_retriever,
spec_reader=sess.config.spec_reader,
callbacks=[StdOutCallbackHandler()]
)
result_sourceA = lookup_chain.run({
"source": eq.sourceA,
# "refresh_source_docs": False
# "spec_def_df": spec_defs_df,
"spec_res_df": concated,
})
from langchain.schema import AIMessage, HumanMessage, SystemMessage
messages = [
SystemMessage(
content=f"For each key in results_json, find the corresponding spec in the Context using the definition and replace 'None' with correct value. Context: {relavent_ref_docs_as_string}"
),
HumanMessage(
content=f"results_json={concated.iloc[:, 0:5].to_json()}"
),
]
lookup_chain.chat(messages)
result_sourceA
result_sourceA_validated = get_spec_page_validations(val_pg=result_sourceA, ref_docs=eq.sourceA.ref_docs) | fuzzy-tribble/meche-copilot | tests/unit_tests/chains/get_lookup_specs_chain_test.py | get_lookup_specs_chain_test.py | py | 2,625 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "meche_copilot.chains.helpers.specs_retriever.SpecsRetriever",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "meche_copilot.chains.analyze_specs_chain.AnalyzeSpecsChain",
"line_num... |
6108135387 | from django.db import models
from django.utils.translation import gettext_lazy as _
from solo.models import SingletonModel
class Configuration(SingletonModel):
tenant = models.CharField(max_length=255, help_text="Welkin organization name.")
instance = models.CharField(
max_length=255, help_text="The environment inside a Welkin organization."
)
api_client = models.CharField(
max_length=255, help_text="Welkin API client name.", verbose_name="API client"
)
secret_key = models.CharField(
max_length=255, help_text="Welkin API client secret key."
)
def __str__(self):
return "Welkin configuration"
class Meta:
verbose_name = _("configuration")
@classmethod
def get_test_payload(cls):
config = cls.objects.get()
return {
"sourceId": "SOURCE_ID",
"eventSubtype": "EVENT_SUBTYPE",
"tenantName": config.tenant,
"instanceName": config.instance,
"patientId": "PATIENT_ID",
"eventEntity": "EVENT_ENTITY",
"sourceName": "SOURCE_NAME",
"url": "URL",
}
| Lightmatter/django-welkin | django_welkin/models/configuration.py | configuration.py | py | 1,152 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "solo.models.SingletonModel",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "d... |
37862194723 | from scipy.spatial.distance import cosine
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("AI-Growth-Lab/PatentSBERTa")
def get_sim(anchor: str, target: str) -> float:
anchor_embed = model.encode([anchor])
target_embed = model.encode([target])
return float(1 - cosine(anchor_embed, target_embed))
examples = [
["renewable power", "renewable energy"],
["previously captured image", "image captured previously"],
["labeled ligand", "container labelling"],
["gold alloy", "platinum"],
["dissolve in glycol", "family gathering"],
]
if __name__ == '__main__':
get_sim("renewable power", "renewable energy")
| vquilon/kaggle-competitions | patent-phrase-to-phrase-matching/models/patent_sbert_a.py | patent_sbert_a.py | py | 681 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sentence_transformers.SentenceTransformer",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "scipy.spatial.distance.cosine",
"line_number": 11,
"usage_type": "call"
}
] |
39156539433 | import re
import sys
from collections import namedtuple, Counter, OrderedDict
from operator import itemgetter
from math import log
from Bio import SeqIO
from RecBlast import print, merge_ranges
from RecBlast.Search import id_search
from itertools import chain, islice
import mygene
from pathlib import Path
from RecBlast.RBC import RecBlastContainer
from io import StringIO
import pandas as pd
import sqlite3
def cleanup_fasta_input(handle, filetype='fasta', write=True):
oldlist = [i for i in SeqIO.parse(handle, filetype)]
names = set([i.name for i in oldlist])
newlist = list()
for name in names:
x = [i for i in oldlist if i.name == str(name) and 'Sequenceunavailable' not in i.seq]
for j in x:
j.name += '_' + str(j.description).split('|')[2]
newlist += x
if write:
with open(handle + '.clean', 'w') as outf:
SeqIO.write(newlist, outf, filetype)
return newlist
def massively_translate_fasta(SeqIter):
mg = mygene.MyGeneInfo()
all_genes = []
def chunks(iterable, size=1000):
iterator = iter(iterable)
for first in iterator:
yield list(chain([first], islice(iterator, size - 1)))
for x in chunks(SeqIter):
out = mg.querymany([a.id for a in x], scopes='refseq', fields='symbol', species='Homo sapiens', returnall=True)
tdict = {}
for a in out['out']:
try:
tdict[a['query']] = a['symbol']
except KeyError:
continue
for i in x:
try:
i.id = tdict[i.id]
except KeyError:
continue
all_genes += x
return all_genes
def translate_ids(id_list, orig='refseq', to='symbol', species='human'):
"""Converts a name from one type to another using mygene.
:param id_list list:
:param orig str:
:param to str:
:param species str:
:return list:
"""
mg = mygene.MyGeneInfo()
out = mg.querymany(id_list, scopes=orig, fields=to, species=species)
trans_dict = {rec['query']: rec[to] for rec in out if to in rec}
translated_id_list = [trans_dict[rec] if rec in trans_dict else rec for rec in id_list]
return translated_id_list
def nr_by_longest(handle, filetype='fasta', write=True):
oldlist = SeqIO.parse(handle, filetype)
seqdict = {}
for seq in oldlist:
if seq.seq == 'Sequenceunavailable':
print('Seq Unavailable:\t', seq.name)
continue
try:
seq.id, seq.description = seq.id.split('|')[0], seq.id.split('|')[1]
except IndexError:
seq.id, seq.description = seq.id.split(' ')[0], ''.join(seq.id.split('|')[1:len(seq.id.split('|'))])
assert seq.id != 'gi' or seq.id != 'emb' or seq.id != 'acc'
if seq.id in seqdict:
if len(seq) > len(seqdict[seq.id]):
seqdict[seq.id] = seq
else:
continue
else:
seqdict[seq.id] = seq
newlist = (seq for _, seq in seqdict.items())
if write:
outhandle = 'nr_' + str(Path(handle).name)
with Path(outhandle).open('w') as outf:
SeqIO.write(newlist, outf, filetype)
return newlist
def cull_reciprocal_best_hit(recblast_out):
"""
returns a recblast_out container that only has the reciprocal best hits.
:param recblast_out:
:return:
"""
pat = re.compile('\|\[(.*?)\]\|') # regex for items in annotation
if isinstance(recblast_out, list):
rc_out_list = []
for index, rc in enumerate(recblast_out):
rc_out_list.append(cull_reciprocal_best_hit(rc))
return rc_out_list
else:
# assert isinstance(recblast_out, RecBlastContainer), "Items must be RecBlastContainer Objects!"
for species, rc_spec_rec in recblast_out.items():
# print('Species:\t', species, indent=0)
for query, rc_rec in rc_spec_rec.items():
# print('Query:\t', query, indent=1)
try:
rc_out = rc_rec['recblast_results']
except KeyError:
print('No entries in recblast_results for query {0} in species {1}'.format(query, species))
continue
tmprecord = []
for record in rc_out:
try:
# print(record.description, indent=3)
target_id, annotations = record.description.split('|-|')
# print('Target ID:\t', target_id, indent=4)
# print('Annotations:', annotations.lstrip('\t'), indent=4)
except ValueError:
print(record.description, indent=2)
print('Could not unpack annotations!', indent=2)
continue
id_lst = pat.findall(annotations)
# print('id_list:\t', id_lst, indent=4)
if id_lst:
if query in id_lst[0]:
tmprecord.append(record)
else:
print("For query {0}, target {1} was not a reciprocal best hit!".format(query,
target_id))
continue
else:
print('No annotations found for record {0} in species {1}, query {2}'.format(record.name,
species,
query))
continue
recblast_out[species][query]['recblast_results'] = tmprecord
return recblast_out
def simple_struct(recblast_out, verbose=True):
"""Returns a nice diagram of queries, targets, and annotations"""
master_dict = {}
pat = re.compile('\|\[(.*?)\]\|') # regex for items in annotation
if isinstance(recblast_out, list):
# Prepare a list of dictionaries of length recblast_out, along with a list of respective species
master_count = [dict] * len(recblast_out)
for index, rc in enumerate(recblast_out):
try:
master_count[index] = simple_struct(rc)
except AttributeError:
master_count[index] = rc
for subdict in master_count:
for species, species_dict in subdict.items():
if isinstance(species_dict, Exception):
continue
try:
comb_spec_dict = master_dict[species]
except KeyError:
master_dict[species] = dict()
comb_spec_dict = master_dict[species]
for query, query_dict in species_dict.items():
try:
comb_query_dict = comb_spec_dict[query]
except KeyError:
comb_spec_dict[query] = dict()
comb_query_dict = comb_spec_dict[query]
for target_id, annotation_list in query_dict.items():
try:
comb_anno_list = comb_query_dict[target_id]
except KeyError:
comb_query_dict[target_id] = list()
comb_anno_list = comb_query_dict[target_id]
comb_anno_list += annotation_list if isinstance(annotation_list, list) else [annotation_list]
return master_dict
else:
"""
Structure:
master_dict:
Species| species_dict:
Query| query_dict:
target_id| annotations_list
"""
# assert isinstance(recblast_out, RecBlastContainer), 'Item in recblast_out was not a RecBlastContainer object!'
try:
recblast_out.__delitem__('__dict__')
except KeyError:
pass
for species, rc_spec_rec in recblast_out.items():
# print('Species:\t', species, indent=0)
try:
species_dict = master_dict[species]
except KeyError:
master_dict[species] = dict()
species_dict = master_dict[species]
for query, rc_rec in rc_spec_rec.items():
# print('Query:\t', query, indent=1)
try:
query_dict = species_dict[query]
except KeyError:
species_dict[query] = dict()
query_dict = species_dict[query]
try:
rc_out = rc_rec['recblast_results']
except KeyError:
print('No entries in recblast_results for query {0} in species {1}'.format(query, species))
continue
for record in rc_out:
try:
# print(record.description, indent=3)
target_id, annotations = record.description.split('|-|')
# print('Target ID:\t', target_id, indent=4)
# print('Annotations:', annotations.lstrip('\t'), indent=4)
except ValueError:
print(record.description, indent=2)
# print('Could not unpack annotations!', indent=2)
continue
try:
target_list = query_dict[target_id]
except KeyError:
query_dict[target_id] = list()
target_list = query_dict[target_id]
id_lst = pat.findall(annotations)
# print('id_list:\t', id_lst, indent=4)
if id_lst:
target_list += id_lst
else:
print('No annotations found for record {0} in species {1}, query {2}'.format(record.name,
species,
query))
if verbose:
print('*******************************************')
for species, species_dict in master_dict.items():
print(species, indent=0)
for query, query_dict in species_dict.items():
print(query, indent=1)
for target_id, annotation_list in query_dict.items():
print(target_id, indent=2)
tmp = []
for annotation in annotation_list:
p, item, seq_range, id_type = id_search(annotation, id_type='brute', verbose=0)
if id_type == 'symbol':
tmp.append(item)
else:
tmp.append(item)
query_dict[target_id] = tmp
for annotation in query_dict[target_id]:
print(annotation, indent=3)
print('*******************************************')
return master_dict
def rc_out_stats(rc_out):
# Todo: use 'from Collections import Counter' to rapidly count duplicates
if isinstance(rc_out, list):
holder = []
for rc in rc_out:
holder.append(rc_out_stats(rc))
c_hit_list, c_multihit_list = zip(holder)
hit_perc = sum(c_hit_list) / len(c_hit_list)
multihit_perc = sum(c_multihit_list) / len(c_multihit_list)
# Percentage of searches with reciprocal hits, regardless of number:
# Percentage of searches with more than one hit:
elif isinstance(rc_out, RecBlastContainer):
c_hit = 0
c_multihit = 0
for species, queries_dict in rc_out.items():
for query, results in rc_out.items():
try:
record_list = results['recblast_results']
except KeyError:
return (0, 0)
has_run = 0
for record in record_list:
if not has_run:
c_hit += 1
has_run = 0
c_multihit += 1
else:
return None
def count_dups(recblast_out):
""" Inverts target-annotation dictionary to find out, for every best-hit annotation, how many targets there are"""
species_anno_target_dict = {}
species_anno_count_dict = {}
master_dict = simple_struct(recblast_out, verbose=False)
for species, species_dict in master_dict.items():
try:
anno_target_dict = species_anno_target_dict[species]
except KeyError:
species_anno_target_dict[species] = {}
anno_target_dict = species_anno_target_dict[species]
print(species_dict, indent=0)
for query, query_dict in species_dict.items():
# ignoring query
print(query_dict, indent=1)
for target_id, annotation_list in query_dict.items():
print(annotation_list, indent=2)
tophit = annotation_list[0]
print(tophit, indent=2)
try:
anno_target_dict[tophit] += [target_id]
except KeyError:
anno_target_dict[tophit] = list()
anno_target_dict[tophit].append(target_id)
print(anno_target_dict[tophit], indent=3)
for species, anno_dict in species_anno_target_dict.items():
print(species, indent=0)
try:
anno_count_dict = species_anno_count_dict[species]
except KeyError:
species_anno_count_dict[species] = {}
anno_count_dict = species_anno_count_dict[species]
for annotation, target_list in anno_dict.items():
print(annotation, '\t\t\t', len(target_list))
anno_count_dict[annotation] = len(target_list)
return species_anno_target_dict, species_anno_count_dict
class FilterRBHs(object):
def __init__(self, **kwargs):
"""Convenience class for use with RecBlastContainer.result_filter(). Removes non-Reciprocal Best Hits from RBC.
"""
self._recblast_object = 'query_record'
self.args = {'func': self.fun, 'summary_statistic': self._stat, 'recblast_object': self._recblast_object}
for k, v in kwargs.items():
self.args[k] = v
def _stat(self, query_record):
""" Summary Statistic function for filter_RBH. Requires setting recblast_object='query_record'
:param query_record:
:return:
"""
return query_record.name
def fun(self, hit, stat, verbose=False):
pat = re.compile('\|\[(.*?):.*\]\|') # regex for items in annotation
try:
hit_split = hit.description.split('|-|')
top_anno = hit_split[1]
except ValueError:
print(hit.description, indent=2)
print('Could not unpack annotations!', indent=2)
return False
except IndexError:
print(hit.description, indent=2)
print('Could not unpack annotations!', indent=2)
return False
id_lst = pat.findall(top_anno)[0].strip()
if id_lst:
_, hit_symbol, _, _ = id_search(id_lst, id_type='symbol', verbose=verbose)
if stat == hit_symbol:
return True
else:
return False
def map_ranges(hit):
""" Convenience function for RBC.results_map(). Replaces results with a tup of result descriptions and loci."""
_, h_id, h_range, _ = id_search(hit.description, verbose=False)
h_start = h_range[0]
h_end = h_range[1]
h_strand = h_range[2]
h_d = (hit.description, h_id, h_start, h_end, h_strand)
return h_d
def RBC_drop_many_to_one_hits(RBC):
loci_dict_RBC = {}
for species, query, rec in RBC.result_map(map_ranges):
r = rec['recblast_results']
for index, hit in enumerate(r):
loci_dict_RBC[(hit[1], hit[2], hit[3], ''.join((query, str(index))))] = (species, query, index)
filtered_loci_dict_RBC = drop_overlaps_bed(loci_dict_RBC)
filter_dict = {}
for hit_loc in filtered_loci_dict_RBC.values():
species, query, index = hit_loc
if (species, query) in filter_dict.keys():
filter_dict[(species, query)].append(index)
else:
filter_dict[(species, query)] = [index]
for (species, query), indexes in filter_dict.items():
for hit_index, hit in enumerate(RBC[species][query]['recblast_results']):
if hit_index in indexes:
continue
else:
del RBC[species][query]['recblast_results'][hit_index]
def count_reciprocal_best_hits(recblast_out):
pat = re.compile('\|\[(.*?)\]\|') # regex for items in annotation
species_counters = {}
for species, species_dict in recblast_out.items():
species_counters[species] = Counter()
for query, query_dict in species_dict.items():
try:
rc_out = query_dict['recblast_results']
except KeyError:
print('No entries in recblast_results for query {0} in species {1}'.format(query, species))
continue
for hit in rc_out:
try:
hit_split = hit.description.split('|-|')
target_id = hit_split[0]
annotations = hit_split[1]
except ValueError:
print(hit.description, indent=2)
print('Could not unpack annotations!', indent=2)
continue
except IndexError:
print(hit.description, indent=2)
print('Could not unpack annotations!', indent=2)
continue
id_lst = ''.join(pat.findall(annotations))
if id_lst:
_, hit_symbol, _, _ = id_search(id_lst, id_type='symbol', verbose=0)
else:
print('No annotations found for record {0} in species {1}, query {2}'.format(hit.name,
species,
query))
continue
if query == hit_symbol:
species_counters[species].update({query: 1})
return species_counters
def export_count_as_csv(rec_hit_counter_dict, filename='RecBlastCount'):
# First get a list of all the genes, period.
allgenes = []
for species, species_counter in rec_hit_counter_dict.items():
for key, value in species_counter.items():
if key in allgenes:
continue
else:
allgenes.append(key)
# Next, make a dict with a tuple of counts per species
genedict = {gene: tuple((rec_hit_counter_dict[species][gene]
for species in rec_hit_counter_dict.keys())) for
gene in allgenes}
all_lines = ['Gene\t' + '\t'.join([species for species in rec_hit_counter_dict.keys()]) + '\n']
all_lines += ['{Gene}\t{counts_str}\n'.format(Gene=key, counts_str='\t'.join([str(i) for i in value]))
for key, value in genedict.items()]
with open(filename + '.tsv', 'w') as outf:
outf.writelines(all_lines)
def count_reciprocal_best_hits_from_pandas(pandas_df):
pat = re.compile('\|\[(.*?)\]\|') # regex for items in annotation
spec_list = list(pandas_df.target_species.unique())
species_counters = {}
for species in spec_list:
species_counters[species] = Counter()
species_results = pandas_df.loc[pandas_df['target_species'] == species]
query_list = list(species_results.query_name.unique())
for query in query_list:
print(query)
query_results = species_results.loc[species_results['query_name'] == query].ix[:, 5:-1]
rc_out = []
for i, d in query_results.iterrows():
rc_out += d.tolist()
# Annoying shunt
rc_out_asfasta = '\n'.join(['>' + i for i in rc_out if i is not None])
tmp = StringIO(rc_out_asfasta)
rc_out = SeqIO.parse(tmp, 'fasta')
for hit in rc_out:
try:
hit_split = hit.description.split('|-|')
id_lst = ''.join(pat.findall(hit_split[1]))
except ValueError:
print(hit.description, indent=2)
print('Could not unpack annotations!', indent=2)
continue
if id_lst:
_, hit_symbol, _, _ = id_search(id_lst, id_type='symbol', verbose=0)
else:
print('No annotations found for record {0} in species {1}, query {2}'.format(hit.name,
species,
query))
continue
if query == hit_symbol:
species_counters[species].update({query: 1})
return species_counters
def sqlite_to_pandas(sql_file, table_name):
conn = sqlite3.connect(sql_file)
df = pd.read_sql_query("select * from {0};".format(table_name), conn)
return df
def filter_hits_pandas(pandas_df):
def filter_func(row):
qrec = row.query_record
qrec = SeqIO.read(StringIO(qrec), 'fasta')
min_len = 0.25 * len(qrec)
intro = row.iloc[0:6].tolist()
hits = row.iloc[5:-1].tolist()
new_hits = []
for hit in hits:
if hit == 'NA':
new_hits.append(None)
continue
elif hit is not None:
tmp = '>' + hit
else:
new_hits.append(None)
continue
hit = SeqIO.read(StringIO(tmp), 'fasta')
id_lst = hit.id
_, hit_symbol, seq_range, _ = id_search(id_lst, id_type='brute', verbose=0)
try:
seq_range = seq_range[hit_symbol]
except KeyError:
new_hits.append(None)
continue
seq_len = abs(int(seq_range[1]) - int(seq_range[0]))
new_hits.append(hit.description if seq_len >= min_len else None)
full = intro + new_hits
return full
return pandas_df.apply(filter_func, axis=1)
class DataIntegratorParser(object):
def __init__(self, file):
transtab = str.maketrans('!@#$%^&*();:.,\'\"/\\?<>|[]{}-=+', '_____________________________')
if isinstance(file, str):
self.file = Path(file)
assert self.file.exists(), file + ' is an invalid file path or does not exist!'
assert self.file.is_file(), file + ' is not a valid file!'
elif isinstance(file, Path):
assert self.file.exists(), str(file) + ' is an invalid file path or does not exist!'
assert self.file.is_file(), str(file) + ' is not a valid file!'
else:
raise TypeError('File must be either a str or Path object!')
self.regions = []
with self.file.open() as f:
for index, line in enumerate(f):
line = line.strip()
if index == 0:
self.header = line.lstrip('# ')
continue
elif line.startswith('# region='):
region = line.lstrip('# region=').translate(transtab)
if getattr(self, region, None) is None:
self.regions.append(region)
setattr(self, region, [])
continue
elif line.startswith('#') and not line.startswith('# '):
cnames = line.lstrip('#').translate(transtab)
ColNames = namedtuple('ColNames', cnames.split('\t'))
self.colnames = ColNames._fields
continue
elif line.startswith('# No data'):
newitem = getattr(self, region, []) + [ColNames(*[None] * len(self.colnames))]
setattr(self, region, newitem)
continue
else:
try:
newitem = getattr(self, region, []) + [ColNames(*line.split('\t'))]
setattr(self, region, newitem)
except NameError as err:
raise NameError(str(err) + '\nParser encountered a line of data before either the column names '
'or the genomic region was declared in the file!')
except TypeError:
print(line, file=sys.stderr)
raise
continue
def rename_regions_via_bedfile(self, bedfile):
transtab = str.maketrans('!@#$%^&*();:.,\'\"/\\?<>|[]{}-=+', '_____________________________')
if isinstance(bedfile, str):
self.bedfile = Path(bedfile)
assert self.bedfile.exists(), bedfile + ' is an invalid file path or does not exist!'
assert self.bedfile.is_file(), bedfile + ' is not a valid file!'
elif isinstance(bedfile, Path):
assert self.bedfile.exists(), str(bedfile) + ' is an invalid file path or does not exist!'
assert self.bedfile.is_file(), str(bedfile) + ' is not a valid file!'
else:
raise TypeError('File must be either a str or Path object!')
bed_trans = {}
with self.bedfile.open() as f:
for line in f:
line = line.strip().split('\t')
bed_trans['{0}_{1}_{2}'.format(line[0], str(int(line[1]) + 1), line[2])] = line[3].translate(transtab)
self.regions = []
for oldid in bed_trans:
self.regions.append(bed_trans[oldid])
setattr(self, bed_trans[oldid], getattr(self, oldid, []))
delattr(self, oldid)
def count_stats_per_record(self, attr_name):
counts = OrderedDict()
for region in sorted(self.regions):
rec = getattr(self, region)
c = Counter([getattr(r, attr_name) for r in rec])
counts[region] = c
return counts
def __iter__(self):
for region in self.regions:
yield getattr(self, region)
def __str__(self):
string = ''
for region in self.regions:
content = getattr(self, region)
string += "{0}:\t {1} ... {2} ({3})\n".format(region,
content[0][0],
content[-1][0],
len(content))
return string
def read_bed(bedfile, key_col=3):
"""Returns a dict using the given 0-indexed key_column"""
d = {}
bedfile = Path(bedfile)
assert bedfile.exists(), "Given bedfile path does not exist!"
assert bedfile.is_file(), "Given bedfile path was not a file! Did you provide a directory?"
with bedfile.open() as bed:
for line in bed:
if line.startswith("#"):
continue
items = line.strip().split('\t')
for i, j in enumerate(items):
try:
new_j = int(j)
items[i] = new_j
except ValueError:
try:
new_j = float(j)
items[i] = new_j
except ValueError:
continue
if isinstance(key_col, slice):
key = tuple(items[key_col])
if key in d.keys():
raise KeyError('Duplicate keys in dictionary!')
else:
d[key] = items
else:
if items[key_col] in d.keys():
raise KeyError('Duplicate keys in dictionary!')
else:
d[items[key_col]] = items
return d
def drop_overlaps_bed(bedfile):
d = bedfile if isinstance(bedfile, dict) else read_bed(bedfile, key_col=slice(0, 3))
d_new = []
dlocs = {}
for loc in d.keys():
if loc[0] in dlocs.keys():
dlocs[loc[0]].append([int(loc[1]), int(loc[2]), loc[3]])
else:
dlocs[loc[0]] = [[int(loc[1]), int(loc[2]), loc[3]]]
for k, v in dlocs.items():
if len(v) > 1:
v = [sorted(i[0:2]) + [i[2]] for i in v]
# comparison matrix
t = [[max(v[i][0], j[0]) <= min(v[i][1], j[1]) for j in v] for i in range(0, len(v))]
# set diagonal identities to False
for index in range(0, len(t)):
t[index][index] = False
# sum per column of matrix
t_sums = [sum(i) for i in zip(*t)]
# Select only items which have a zero in the t_sums index
filtered_v = [v[i] for i in range(0, len(t_sums)) if t_sums[i] == 0]
d_new += [(k, i[0], i[1], i[2]) for i in filtered_v]
else:
try:
v = v[0]
d_new.append((k, v[0], v[1], v[2]))
except Exception:
print(k, v)
raise
filtered_d = {}
for item in d_new:
if item in d.keys():
filtered_d[item] = d[item]
elif (item[0], item[2], item[1]) in d.keys():
filtered_d[(item[0], item[2], item[1])] = d[(item[0], item[2], item[1])]
else:
print(item)
raise Exception
return filtered_d
def calc_effective_copy_number_by_coverage(query_record):
# get list of ranges
if len(query_record['recblast_results']) == 0:
return None
else:
raw_ranges = (hit.features[0].qualifiers['query_coverage'] for hit in query_record['recblast_results'])
ranges = []
for r in raw_ranges:
try:
rng = (int(r[0]), int(r[1]))
ranges.append(sorted(rng))
except IndexError:
continue
coverage = list(merge_ranges(ranges))
sum_coverage = sum([i[1] - i[0] for i in coverage])
if sum_coverage == 0:
return 0
else:
sum_nuc = sum(
[sum([sum([s in range(r[0], r[1]) for s in range(i[0], i[1])]) for i in ranges]) for r in coverage])
return round(sum_nuc / sum_coverage, 2)
def bed_get_flanking_regions(bedfile, left_range, right_range, genome_file=None):
"""Returns two new bedfiles with ranges left-and-right of each item of the original file, respectively.
:param str bedfile:
:param left_range: Either a single positive integer indicating the left-most number of bases in range;
or a tuple of two integers indicating the left-and-right bound of the range.
:param right_range: Either a single positive integer indicating the right-most number of bases in range;
or a tuple of two integers indicating the right-and-left bound of the range.
:return:
"""
if isinstance(left_range, int):
left_range = (left_range, 0)
if isinstance(right_range, int):
right_range = (0, right_range)
assert isinstance(left_range, tuple), "Parameter 'left_range' must either be an integer or a tuple!"
assert len(left_range) == 2, "Parameter 'left_range' must be a tuple of length 2!"
assert left_range[0] > left_range[1] or left_range == (0, 0), ("The left-side range modifier of left_range must be "
"less than the right-side!")
assert isinstance(right_range, tuple), "Parameter 'right_range' must either be an integer or a tuple!"
assert len(right_range) == 2, "Parameter 'right_range' must be a tuple of length 2!"
assert right_range[0] < right_range[1] or right_range == (0, 0), ("The right-side range modifier of left_range must"
" be greater than the left-side!")
bedfile = Path(bedfile)
assert bedfile.exists(), "Given bedfile path does not exist!"
assert bedfile.is_file(), "Given bedfile path was not a file! Did you provide a directory?"
leftbed = bedfile.with_name(bedfile.stem +
"_left_Offset{0}_Size{1}".format(left_range[1],
left_range[0] - left_range[1]) +
bedfile.suffix)
rightbed = bedfile.with_name(bedfile.stem +
"_right_Offset{0}_Size{1}".format(right_range[1],
right_range[0] - right_range[1]) +
bedfile.suffix)
granges = {chrm: int(size) for chrm, size
in [line.strip().split("\t") for line in open(genome_file)]} if genome_file else None
with bedfile.open() as bf, leftbed.open("w") as lbf, rightbed.open("w") as rbf:
records = (line.strip().split('\t')[0:4] for line in bf)
for (chr, s, e, id) in records:
if left_range != (0, 0):
left = [chr,
int(s) - left_range[0],
int(s) - left_range[1],
id + "_left"]
ldiff = 0
if left[2] > left[1] > 0:
left[3] += "_offset-{0}_size-{1}".format(left_range[1],
left[2] - left[1])
else:
if left[1] < 0:
ldiff = -left[1] # note its '-' because left[1] is negative
left[2] += ldiff
left[2] = left[2] if left[2] <= int(s) else int(s)
left[1] = 0
if left[1] == left[2]:
left[2] += 1
ldiff -= 1
left[3] += "_offset-{0}_size-{1}".format(left_range[1] - ldiff,
left[2] - left[1])
else:
left[3] += "_offset-{0}_size-{1}".format(left_range[1],
left[2] - left[1])
left = (str(i) for i in left)
lbf.write('\t'.join(left) + "\n")
if right_range != (0, 0):
right = [chr,
int(e) + right_range[0],
int(e) + right_range[1],
id + "_right"]
if granges:
if granges[chr] <= right[2] or granges[chr] <= right[1]:
rdiff = granges[chr] - right[2]
right[2] = granges[chr]
right[1] += rdiff
right[1] = right[1] if right[1] >= int(e) else int(e)
if right[2] == right[1]:
right[1] -= 1
rdiff -= 1
right[3] += "_offset-{0}_size-{1}".format(right_range[0] + rdiff,
right[2] - right[1])
else:
right[3] += "_offset-{0}_size-{1}".format(right_range[0],
right[2] - right[1])
else:
right[3] += "_offset-{0}_size-{1}".format(right_range[0],
right[2] - right[1])
right = (str(i) for i in right)
rbf.write('\t'.join(right) + "\n")
return
def bed_extract_duplicates(bedfile, outfile="", verbose=False):
bedfile = Path(bedfile)
assert bedfile.exists(), "Given bedfile path does not exist!"
assert bedfile.is_file(), "Given bedfile path was not a file! Did you provide a directory?"
bed_dict = read_bed(bedfile)
hits = sorted(bed_dict.keys())
counts = Counter((''.join(hit.split("_")[:-1]) for hit in hits))
duphits = (hit for hit in hits if counts[hit.split("_")[0]] > 1)
outfile = Path(outfile) if outfile else bedfile.with_suffix(".bed.dups")
try:
first = next(duphits)
if verbose:
print(first, "\t", counts[first.split("_")[0]])
with outfile.open("w") as of:
of.write("\t".join((str(i) for i in bed_dict[first])) + "\n")
for hit in duphits:
if verbose:
print(hit, "\t", counts[hit.split("_")[0]])
of.write("\t".join((str(i) for i in bed_dict[hit])) + "\n")
except StopIteration:
if verbose:
print("No duplicates found in file!")
def merge_ids(fasta):
outfasta = Path(fasta)
with outfasta.with_name(outfasta.name + "_joined").open('w') as outfile:
from Bio import SeqIO
bla = SeqIO.parse(fasta, "fasta")
newrec = {}
for rec in bla:
rec.id = rec.id.split("_left")[0].split("_right")[0]
if rec.id in newrec:
newrec[rec.id].seq += rec.seq
newrec[rec.id].description += "\t" + rec.description
else:
newrec[rec.id] = rec
SeqIO.write((v for v in newrec.values()), outfile, "fasta")
class BLASTSearchParameters(object):
def __init__(self, blast_type, blastdb_path, blast_db="auto", expect=10, perc_score=0.009, perc_span=0.1,
ncbi_search=False, perc_ident=0.69, perc_length=0.001, megablast=True, blastdb_version='auto',
email='', **kwargs):
self.search_type = blast_type
self.search_local = not ncbi_search
self.email = email
self.expect = expect
self.perc_score = perc_score
self.perc_ident = perc_ident
self.perc_span = perc_span
self.perc_length = perc_length
self.megablast = megablast
self.id_db_version = blastdb_version
self.id_db_path = blastdb_path
self.search_db = blast_db if isinstance(blast_db, dict) or isinstance(blast_db, str) else "auto"
for k, v in kwargs:
setattr(self, k, v)
if ncbi_search:
assert "@" in self.email, "If using NCBI for remote BLAST searching, a valid email must be set!"
class BLATSearchParameters(object):
def __init__(self, blat_type, twobit_path, twobit_port_dict, gfserver_host="localhost",
expect=10, perc_score=0.009, perc_span=0.1, perc_ident=0.69,
perc_length=0.001, twobit_file_dict="auto", twobit_version='auto'):
self.search_type = blat_type
self.expect = expect
self.perc_score = perc_score
self.perc_ident = perc_ident
self.perc_span = perc_span
self.perc_length = perc_length
self.search_local = gfserver_host
self.id_db_version = twobit_version
self.id_db_path = twobit_path
self.id_db = twobit_file_dict if (isinstance(twobit_file_dict, dict) or
isinstance(twobit_file_dict, str)) else "auto"
self.search_db = twobit_port_dict
self.id_source = "twobit"
class SQLServerParameters(object):
def __init__(self, host='localhost', id_db='bioseqdb', user='postgres', driver='psycopg2',
password='', id_db_version='auto'):
self.id_source = 'sql'
self.driver = driver
self.host = host
self.id_db = id_db
self.user = user
self.password = password
self.id_db = id_db
self.id_db_version = id_db_version
| docmanny/RecSearch | RecBlast/Auxilliary.py | Auxilliary.py | py | 40,401 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "Bio.SeqIO.parse",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "Bio.SeqIO",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "Bio.SeqIO.write",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "Bio.SeqIO",
"line_numbe... |
1253707592 | import time
from selenium import webdriver
# browser = webdriver.Firefox()
# browser.get('https://www.jd.com/')
# browser.execute_script('window.scrollTo(0, document.body.scrollHeight)')
# browser.execute_script('alert("123")')
# browser.close()
#
#
#
# print("=================================================")
bro = webdriver.Firefox()
bro.get('https://www.taobao.com')
#节点定位 find系列的方法
input_text = bro.find_element_by_id('q')
#节点交互
input_text.send_keys('苹果')
time.sleep(2)
#执行js程序(js注入)
bro.execute_script('window.scrollTo(0,document.body.scrollHeight)')
time.sleep(2)
btn = bro.find_element_by_css_selector('.btn-search')
btn.click()
time.sleep(3)
bro.quit() | lyk4411/untitled | pythonWebCrawler/JavaScriptFireFox.py | JavaScriptFireFox.py | py | 711 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "selenium.webdriver.Firefox",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "time.sleep",... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.