seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
21025162572 | #!/usr/bin/env python3
"""
Implementation of R3PTAR
"""
import logging
import signal
import sys
from ev3dev2.motor import OUTPUT_A, OUTPUT_B, OUTPUT_C, OUTPUT_D, MediumMotor, LargeMotor
from ev3dev2.sensor.lego import InfraredSensor
from ev3dev2.sound import Sound
from threading import Thread, Event
from time import sleep
log = logging.getLogger(__name__)
class MonitorRemoteControl(Thread):
"""
A thread to monitor R3PTAR's InfraredSensor and process signals
from the remote control
"""
def __init__(self, parent):
Thread.__init__(self)
self.parent = parent
self.shutdown_event = Event()
def __str__(self):
return "MonitorRemoteControl"
def run(self):
STRIKE_SPEED_PCT = 40
while True:
if self.shutdown_event.is_set():
log.info('%s: shutdown_event is set' % self)
break
#log.info("proximity: %s" % self.parent.remote.proximity)
if self.parent.remote.proximity < 30:
self.parent.speaker.play('snake-hiss.wav', Sound.PLAY_NO_WAIT_FOR_COMPLETE)
self.parent.strike_motor.on_for_seconds(speed=STRIKE_SPEED_PCT, seconds=0.5)
self.parent.strike_motor.on_for_seconds(speed=(STRIKE_SPEED_PCT * -1), seconds=0.5)
self.parent.remote.process()
sleep(0.01)
class R3PTAR(object):
def __init__(self,
drive_motor_port=OUTPUT_B,
strike_motor_port=OUTPUT_D,
steer_motor_port=OUTPUT_A,
drive_speed_pct=60):
self.drive_motor = LargeMotor(drive_motor_port)
self.strike_motor = LargeMotor(strike_motor_port)
self.steer_motor = MediumMotor(steer_motor_port)
self.speaker = Sound()
STEER_SPEED_PCT = 30
self.remote = InfraredSensor()
self.remote.on_channel1_top_left = self.make_move(self.drive_motor, drive_speed_pct)
self.remote.on_channel1_bottom_left = self.make_move(self.drive_motor, drive_speed_pct * -1)
self.remote.on_channel1_top_right = self.make_move(self.steer_motor, STEER_SPEED_PCT)
self.remote.on_channel1_bottom_right = self.make_move(self.steer_motor, STEER_SPEED_PCT * -1)
self.shutdown_event = Event()
self.mrc = MonitorRemoteControl(self)
# Register our signal_term_handler() to be called if the user sends
# a 'kill' to this process or does a Ctrl-C from the command line
signal.signal(signal.SIGTERM, self.signal_term_handler)
signal.signal(signal.SIGINT, self.signal_int_handler)
def make_move(self, motor, speed):
def move(state):
if state:
motor.on(speed)
else:
motor.stop()
return move
def shutdown_robot(self):
if self.shutdown_event.is_set():
return
self.shutdown_event.set()
log.info('shutting down')
self.mrc.shutdown_event.set()
self.remote.on_channel1_top_left = None
self.remote.on_channel1_bottom_left = None
self.remote.on_channel1_top_right = None
self.remote.on_channel1_bottom_right = None
self.drive_motor.off(brake=False)
self.strike_motor.off(brake=False)
self.steer_motor.off(brake=False)
self.mrc.join()
def signal_term_handler(self, signal, frame):
log.info('Caught SIGTERM')
self.shutdown_robot()
def signal_int_handler(self, signal, frame):
log.info('Caught SIGINT')
self.shutdown_robot()
def main(self):
self.mrc.start()
self.shutdown_event.wait()
if __name__ == '__main__':
# Change level to logging.DEBUG for more details
logging.basicConfig(level=logging.INFO,
format="%(asctime)s %(levelname)5s %(filename)s: %(message)s")
log = logging.getLogger(__name__)
# Color the errors and warnings in red
logging.addLevelName(logging.ERROR, "\033[91m %s\033[0m" % logging.getLevelName(logging.ERROR))
logging.addLevelName(logging.WARNING, "\033[91m%s\033[0m" % logging.getLevelName(logging.WARNING))
log.info("Starting R3PTAR")
snake = R3PTAR()
snake.main()
log.info("Exiting R3PTAR")
| ev3dev/ev3dev-lang-python-demo | robots/R3PTAR/r3ptar.py | r3ptar.py | py | 4,270 | python | en | code | 59 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "threading.Thread.__init__",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "threadin... |
5995812394 | #!/usr/bin/env python
import numpy as np
import healpy as hp
import pylab
import matplotlib.pyplot as plt
import time
import mocklc
import matplotlib
import sepmat
import gpkernel
import scipy
import emcee
import sys
import time
Ns=2000
np.random.seed(17)
#set geometry
inc=45.0/180.0*np.pi
Thetaeq=np.pi
zeta=60.0/180.0*np.pi
Pspin=23.9344699/24.0 #Pspin: a sidereal day
wspin=2*np.pi/Pspin
Porb=365.242190402
worb=2*np.pi/Porb
Ni=1024
obst=np.linspace(0.0,Porb,Ni)
# test map
nside=16
npix=hp.nside2npix(nside)
mmap=hp.read_map("/home/kawahara/exomap/sot/data/mockalbedo16.fits")
mask=(mmap>0.0)
mmap[mask]=1.0
M=len(mmap)
#generating light curve
Thetav=worb*obst
Phiv=np.mod(wspin*obst,2*np.pi)
WI,WV=mocklc.comp_weight(nside,zeta,inc,Thetaeq,Thetav,Phiv)
W=WV[:,:]*WI[:,:]
lc=np.dot(W,mmap)
sigma=np.mean(lc)*0.1
noise=np.random.normal(0.0,sigma,len(lc))
lc=lc+noise
## RBF kernel
nside=16
npix=hp.nside2npix(nside)
sep=sepmat.calc_sepmatrix(nside)
## optimization
tag="RBFobl"
## spin and hyperparameter MCMC sampling using emcee
def log_prior(theta):
p_zeta,p_Thetaeq,p_gamma,p_alpha=theta
if 0.0 < p_zeta < np.pi and 0.0 < p_Thetaeq < 2*np.pi and 1.e-10 < p_gamma < np.pi/3.0 and 1.e-10 < p_alpha:
return np.log(np.sin(p_zeta)/p_alpha/p_gamma)
return -np.inf
def log_likelihood(theta, d, covd):
p_zeta,p_Thetaeq,p_gamma,p_alpha=theta
WI,WV=mocklc.comp_weight(nside,p_zeta,inc,p_Thetaeq,Thetav,Phiv)
Wp=WV[:,:]*WI[:,:]
#KS=p_alpha*gpkernel.Matern32(sep,p_gamma)
KS=p_alpha*gpkernel.RBF(sep,p_gamma)
Cov = covd + Wp@KS@Wp.T
sign,logdet=np.linalg.slogdet(Cov)
Pi_d=scipy.linalg.solve(Cov,d,assume_a="pos")
prop = -0.5*logdet-0.5*d@Pi_d #-0.5*np.shape(cov)[0]*np.log(2.0*np.pi)
return prop
def log_probability(theta, d, covd):
lp = log_prior(theta)
if not np.isfinite(lp):
return -np.inf
return lp + log_likelihood(theta, d, covd)
gam0=0.29298260376811
alpha0=sigma**2*0.774263682681127
pos = np.array([zeta,Thetaeq,gam0,alpha0])+ 1e-4 * np.random.randn(16, 4)
nwalkers, ndim = pos.shape
#Assumming we know the data covariance
covd=sigma**2*np.eye(Ni)
sampler = emcee.EnsembleSampler(nwalkers, ndim, log_probability, args=(lc, covd))
sampler.run_mcmc(pos, Ns, progress=True);
flat_samples = sampler.get_chain(discard=100, thin=15, flat=True)
#samples = sampler.get_chain()
#print(samples)
labels=["zeta","Thetaeq","gamma","alpha"]
inputgeo=[inc,Thetaeq,zeta,Pspin,Porb,obst]
np.savez("flat_sample"+tag,flat_samples,W,lc,inputgeo)
import corner
fig = corner.corner(flat_samples, labels=labels, truths=[zeta,Thetaeq,None,None])
plt.savefig("corner_"+tag+".png")
plt.savefig("corner_"+tag+".pdf")
plt.show()
| HajimeKawahara/sot | src/sot/dymap/static_sampling.py | static_sampling.py | py | 2,866 | python | en | code | 4 | github-code | 6 | [
{
"api_name": "numpy.random.seed",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "numpy.pi",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "numpy.pi",
"lin... |
21397302549 | #!/usr/bin/env python3
import argparse
import os
import re
import dataclasses
from dataclasses import dataclass
from pathlib import Path
from typing import Optional, List
"""
Supports following cases:
1. Master version x.y.z needs to be bumped to x.y.z when preparing for official release:
git checkout cluster-test
git merge master
# version = x.y.z
version_bumper.py
# version = x.y.z+1
version_bumper.py --part=minor
# version = x.y+1.0
version_bumper.py --part=major
# version = x+1.0.0
2. Master version x.y.z needs to be bumped to x.y.z-mr-1 when making dev release from feature branch:
git co 123-my-branch
# version = x.y.z
version_bumper.py --mr 123
# version = x.y.z-123-1
And then another call should just bump the dev-version:
version_bumper.py --mr 123
# version = x.y.z-123-2
"""
@dataclass
class Version:
major: int
minor: int
patch: int
mr: int # merge request id
dev: int # sequentially increasing number
def __str__(self):
mr = f"-{self.mr}" if self.mr > 0 else ''
dev = f"-{self.dev}" if self.dev > 0 else ''
return f'{self.major}.{self.minor}.{self.patch}{mr}{dev}'
def bump(self, part: str):
self.__dict__[part] += 1
if part == 'major':
self.minor = self.patch = 0
if part == 'minor':
self.patch = 0
def clone(self) -> 'Version':
return dataclasses.replace(self)
def read_current_version(filepath: Path) -> Version:
for line in filepath.read_text().splitlines():
ver = parse_version(line)
if ver is not None:
return ver
raise RuntimeError('version could not be parsed from ' + str(filepath))
# match X.Y.Z or X.Y.Z-W
# broken down at https://regex101.com/r/IAccOs/3
main_regex = r'TAG\s*\?=\s*?(?P<major>\d+)\.(?P<minor>\d+)\.(?P<patch>\d+)[\-\.]?(?P<details>[\-\w]+)?'
main_pattern = re.compile(main_regex)
def parse_version(line: str) -> Optional[Version]:
match = main_pattern.match(line)
if not match:
return None
ver = Version(major=int(match.group('major')),
minor=int(match.group('minor')),
patch=int(match.group('patch')),
mr=0,
dev=0)
details = match.group('details')
if details is not None:
parse_details(details, ver)
return ver
# match X-Y
# broken down at https://regex101.com/r/jtlQ54/3
details_regex = r'(?P<mr>\d+)[\-](?P<dev>\d+)'
details_pattern = re.compile(details_regex)
def parse_details(details: str, ver: Version):
details_match = details_pattern.match(details)
if details_match:
ver.mr = int(details_match.group('mr'))
ver.dev = int(details_match.group('dev'))
def replace_in_files(curr_ver: Version, new_ver: Version, files: List[Path]):
for path in files:
replace_in_file(path, curr_ver, new_ver)
def replace_in_file(filepath: Path, curr_ver: Version, new_ver: Version):
content = filepath.read_text()
new_content = content.replace(str(curr_ver), str(new_ver))
if content != new_content:
filepath.write_text(new_content)
print(f'Version bumped {curr_ver} -> {new_ver} in {filepath}')
else:
raise RuntimeError(f'Version "{curr_ver}" not found in {filepath}')
def project_root() -> Path:
"""Return Racetrack root dir"""
return Path(os.path.abspath(__file__)).parent.parent.absolute()
def bump_version_in_files(version_path: Path, _args, files: List[Path], prod_files: List[Path]):
orig_version = read_current_version(version_path)
if _args.current:
print(orig_version)
return
new_version = orig_version.clone()
if _args.mr and int(_args.mr) != 0:
new_version.mr = int(_args.mr)
if new_version.mr != 0:
new_version.bump('dev')
else:
new_version.bump(_args.part)
files += prod_files
replace_in_files(orig_version, new_version, files)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--current', action='store_true', help='print current version')
parser.add_argument('--mr', help='set merge request number')
parser.add_argument('--part', help='defines which part to bump: major, minor, patch, dev', default="patch")
files_with_version = [
project_root() / 'Makefile',
]
# files bumped in official (non-dev) releases only
prod_files_with_version = [
project_root() / 'racetrack_client/racetrack_client/__init__.py',
]
args = parser.parse_args()
path = project_root() / 'Makefile'
bump_version_in_files(path, args, files_with_version, prod_files_with_version)
| TheRacetrack/racetrack | utils/version_bumper.py | version_bumper.py | py | 4,748 | python | en | code | 27 | github-code | 6 | [
{
"api_name": "dataclasses.replace",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "re.compile",
... |
17246495292 | #!/usr/bin/env python2
import argparse
import ast
import json
import logging
import os
from collections import namedtuple
import tqdm
import sys
sys.path.append('.')
print(sys.path)
from srcseq.astunparser import Unparser, WriterBase
def file_tqdm(fobj):
return tqdm(fobj, total=get_number_of_lines(fobj))
SrcASTToken = namedtuple("SrcASTToken", "text type lineno col_offset")
logging.basicConfig(level=logging.INFO)
class MyListFile(list, WriterBase):
def write(self, text, type=None, node=None):
text = text.strip()
lineno = node and node.lineno
col_offset = node and node.col_offset
if len(text) > 0:
# write `Str` as it is. `Num` will be kept as a string.
text = eval(text) if type == "Str" else text
self.append(SrcASTToken(text, type, lineno, col_offset))
def flush(self):
pass
def my_tokenize(code_str):
t = ast.parse(code_str)
lst = MyListFile()
Unparser(t, lst)
return lst
def main():
parser = argparse.ArgumentParser(
description="Generate datapoints from source code",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--files_path", "-f", required=True,
help="Filepath with the filenames to be parsed")
parser.add_argument("--save", "-o", default="/tmp/dps.jsonl",
help="Filepath with the output dps")
parser.add_argument("--base_dir", "-b",
help="Base dir to append for the fps."
" If not given, use the dir of `--files_path`.")
args = parser.parse_args()
args.base_dir = args.base_dir or os.path.dirname(args.files_path)
if os.path.exists(args.save):
os.remove(args.save)
num_dps = 0
logging.info("Loading files from: {}".format(args.base_dir))
with open(args.files_path, "r") as fin, open(args.save, "w") as fout:
for i_line, line in enumerate(file_tqdm(fin)):
rel_src_fp = line.strip()
abs_src_fp = os.path.join(args.base_dir, rel_src_fp)
try:
values, types_, linenos, col_offsets = zip(*my_tokenize(open(abs_src_fp).read()))
if len(values) > 1:
json.dump({
'rel_src_fp': rel_src_fp,
'values': values,
'types': types_,
'linenos': linenos,
'col_offsets': col_offsets,
}, fp=fout)
fout.write("\n")
num_dps += 1
else:
# logging.info("In processing {}-th file `{}`: empty token list.".format(i_line, rel_src_fp))
pass
except Exception as e:
logging.warning("In processing {}-th file `{}`:\n\t{}".format(i_line, rel_src_fp, e))
continue
logging.info("Wrote {} datapoints to {}".format(num_dps, args.save))
if __name__ == "__main__":
main()
| ReversalS/coop-code-learning | views/PythonExtractor/source/srcseq/generate_data.py | generate_data.py | py | 2,978 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sys.path.append",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "sys.path",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "collections.namedtuple",
... |
6315981642 | from flask import Blueprint, render_template, flash, request, redirect, url_for, jsonify, abort
from app.extensions import cache, pages
from app.tasks import long_task
import flam3, io, base64, struct
from PIL import Image
main = Blueprint('main', __name__)
@main.route('/')
@cache.cached(timeout=1000)
def home():
return render_template('index.html')
@main.route('/task', methods=['GET', 'POST'])
def index():
return render_template("longtask.html")
@main.route('/adder')
def adder():
return render_template("adder.html")
@main.route('/api/add_numbers')
def add_numbers():
a = request.args.get('a', 0, type=int)
b = request.args.get('b', 0, type=int)
return jsonify(result=a + b)
@main.route('/flam3')
def flam3_html():
return render_template("flam3.html")
def hex_to_rgb(hexstr):
return struct.unpack('BBB', b''.fromhex(hexstr[1:]))
@main.route('/api/gen_flam3')
def gen_flam3():
point_count = request.args.get('point_count', 0, type=int)
back_color = request.args.get('back_color', "#42426f", type=hex_to_rgb)
front_color = request.args.get('front_color', "#f4a460", type=hex_to_rgb)
selection_limiter = request.args.get('selection_limiter', None, type=str)
colors = (back_color, front_color)
print('selection is', selection_limiter)
# Make sure selection limiter is sane
if selection_limiter is None:
selection_limiter = [False]*point_count
else:
selection_limiter = [bool(int(i)) for i in selection_limiter.split(',')]
# Generate the fractal
print(selection_limiter)
mat_points = flam3.Fractal(point_count=point_count, selection_limiter=selection_limiter).execute()
# Convert fractal data to a matrix of color
img_mat = flam3.point_to_image_mat(mat_points)
img = flam3.mat_to_color(img_mat, colors=colors)
# Save data to BytesIO file object
im = Image.fromarray(img)
f = io.BytesIO()
im.save(f, format='png')
f.seek(0)
return jsonify(result="data:image/png;base64,"+base64.b64encode(f.read()).decode())
@main.route('/status/<task_id>')
def taskstatus(task_id):
task = long_task.AsyncResult(task_id)
if task.state == 'PENDING':
# job did not start yet
response = {
'state': task.state,
'current': 0,
'total': 1,
'status': 'Pending...'
}
elif task.state != 'FAILURE':
response = {
'state': task.state,
'current': task.info.get('current', 0),
'total': task.info.get('total', 1),
'status': task.info.get('status', '')
}
if 'result' in task.info:
response['result'] = task.info['result']
else:
# something went wrong in the background jobself.get
response = {
'state': task.state,
'current': 1,
'total': 1,
'status': str(task.info), # this is the exception raised
}
return jsonify(response)
@main.route('/<path:folder>/<path:path>/')
def page(folder, path):
return render_template('page.html', folder=folder, page=pages.get_or_404(folder, path), page_title=path)
@main.route('/<path:folder>/')
def folder(folder):
folder_dict = sorted(pages.get_or_404(folder=folder))
page_title = folder.replace('_', ' ').title()
return render_template('folder.html', folder=folder, pages=folder_dict, page_title=page_title)
@main.route('/topics/')
def folders():
return render_template('folders.html', folders=pages._pages)
| akotlerman/flask-website | app/controllers/main.py | main.py | py | 3,537 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "flask.Blueprint",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "app.extensions.cache.cached",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "app.... |
72536213308 | # 爬取buff平台的商品信息
import asyncio
import aiohttp
from lxml.html import etree
import re
import json
import traceback
import os
from util import fetch_url, get_current_time_str
from models import PriceInfo
import urllib
async def get_goods_info(url, session) -> PriceInfo:
# 获取商品信息
print(url)
# 最多重试3次
for i in range(3):
try:
html_content = await fetch_url(url, session)
result = await parse_html(html_content, session)
result.url = url
print(vars(result))
return result
except:
traceback.print_exc()
continue
# 上抛异常
raise RuntimeError("商品信息获取失败")
def read_headers():
# 读取headers.txt文件,返回键值对
filePath = os.path.join(os.path.dirname(__file__), 'headers.txt')
with open(filePath, 'r', encoding='utf-8') as f:
text = f.read()
# 键值对
headers = {}
for line in text.split('\n'):
if line:
key, value = line.split(': ')
headers[key] = value
return headers
async def get_sell_info(goods_id, session):
# 获取在售情况
sell_info_url = f"https://buff.163.com/api/market/goods/sell_order?game=dota2&goods_id={goods_id}&page_num=1&sort_by=default&mode=&allow_tradable_cooldown=1&_=1693538617921"
sell_info = json.loads(await fetch_url(sell_info_url, session))
return sell_info
async def get_buy_info(goods_id, session):
# 获取求购情况
buy_info_url = f"https://buff.163.com/api/market/goods/buy_order?game=dota2&goods_id={goods_id}&page_num=1&_=1693540558052"
buy_info = json.loads(await fetch_url(buy_info_url, session))
return buy_info
async def get_deal_info(goods_id, session):
# 获取成交情况
deal_info_url = f"https://buff.163.com/api/market/goods/bill_order?game=dota2&goods_id={goods_id}&_=1693543131027"
deal_info = json.loads(await fetch_url(deal_info_url, session))
return deal_info
async def parse_html(htmlContent, session) -> PriceInfo:
# 解析html文本,返回商品信息
root = etree.HTML(htmlContent)
# 商品名称
try:
goods_name = root.xpath('//div[@class="detail-cont"]/div[1]/h1/text()')[0]
except:
print(htmlContent)
raise RuntimeError("商品名称获取失败")
# 在售商品数量
goods_num = root.xpath('//ul[@class="new-tab"]/li[1]/a/text()')[0]
goods_num = re.findall("当前在售\((\d+)\)", goods_num)[0]
goods_num = int(goods_num)
# steam市场链接
steam_url = root.xpath('//div[@class="detail-summ"]/a/@href')[0]
goods_id = root.xpath('//a[@class="i_Btn i_Btn_mid i_Btn_D_red btn-supply-buy"]/@data-goodsid')[0]
# 异步获取在售情况、求购情况和成交情况
sell_info_task = get_sell_info(goods_id, session)
buy_info_task = get_buy_info(goods_id, session)
deal_info_task = get_deal_info(goods_id, session)
sell_info, buy_info, deal_info = await asyncio.gather(sell_info_task, buy_info_task, deal_info_task)
# 在售最低价
lowest_price = sell_info['data']['items'][0]['price'] if sell_info['data']['items'] else "0"
# 求购最高价
highest_price = buy_info['data']['items'][0]['price'] if buy_info['data']['items'] else "0"
# 最新成交价
try:
latest_price = deal_info['data']['items'][0]['price'] if deal_info['data']['items'] else "0"
except:
print("未登录无法获取buff最新成交价")
latest_price = None
result = PriceInfo()
result.min_price = lowest_price
result.highest_buy_price = highest_price
result.name_cn = goods_name.strip()
result.steamUrl = steam_url
result.update_time = get_current_time_str()
result.latest_sale_price = latest_price
result.name_en = steam_url.split('/')[-1].split('?')[0]
# url解码
result.name_en = urllib.parse.unquote(result.name_en).strip()
result.goods_id = goods_id
return result
async def getGoodsUrls(session):
# 获取商品链接
url = "https://buff.163.com/api/market/goods?game=dota2&page_num={}&_=1693544159600"
urls = []
for pageNum in range(1, 6):
goods_info = json.loads(await fetch_url(url.format(pageNum), session))
goods_base_url = "https://buff.163.com/goods/{}?from=market#tab=selling"
urls += [goods_base_url.format(i["id"]) for i in goods_info['data']['items']]
return urls
def update_price_info(priceInfo: PriceInfo) -> PriceInfo:
url = priceInfo.url
if not url:
# TODO: 通过hash_name获取url
raise RuntimeError("url为空")
async def task():
async with aiohttp.ClientSession() as session:
new_price_info = await get_goods_info(url, session)
return new_price_info
return asyncio.get_event_loop().run_until_complete(task())
| ZangYUzhang/aeyl-steam | buff_spider/__init__.py | __init__.py | py | 4,898 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "util.fetch_url",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "traceback.print_exc",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "models.PriceInfo",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "os.path.join",
... |
22293771882 | #!/usr/bin/python3
"""This module contains decorator functions for the views. These includes:
- token_required
"""
import jwt
from functools import wraps
from flask import request, make_response
from os import environ
from flask import jsonify
SECRET_KEY = environ.get('SECRET_KEY')
def token_required(f):
"""Checks if a token is passed by the front-end to the endpoint"""
@wraps(f)
def decorator(*args, **kwargs):
token = request.headers.get('x-token') or request.args.get('x-token')
try:
data = jwt.decode(token, SECRET_KEY, algorithms='HS256')
user_email = data['email']
return f(user_email, *args, **kwargs)
except AttributeError:
response = make_response(jsonify({'error': 'token is missing'}), 403)
response.headers['location'] = 'http://0.0.0.0:5000/login'
return response
except Exception as e:
print(e)
response = make_response(jsonify({'error': 'invalid token'}), 403)
response.headers['location'] = 'http://0.0.0.0:5000/login'
return response
return decorator | Sonlowami/CaseShare | src/api/v1/views/decorators.py | decorators.py | py | 1,141 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.environ.get",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "flask.request.headers.get",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "flask.request.hea... |
33207147676 | from fastapi import HTTPException, status
from db.models import DbLeague
from routers.schemas import LeagueBase
from routers.slug import name_to_slug
from sqlalchemy.orm import Session
def add_team(db: Session, request: LeagueBase):
league = DbLeague(
name=request.name,
country=request.country,
img=f"images/leagues/{request.img}",
slug=name_to_slug(request.name)
)
db.add(league)
db.commit()
db.refresh(league)
return league
def get_all_teams(db: Session):
return db.query(DbLeague).all()
def get_team_id(db: Session, league_id: int):
league = db.query(DbLeague).filter(DbLeague.id == league_id).first()
if not league:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="League has not been found!")
return league | rbujny/League-Team-Players | db/db_league.py | db_league.py | py | 816 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sqlalchemy.orm.Session",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "routers.schemas.LeagueBase",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "db.models.DbLeague",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "rou... |
8891107927 | import cv2
import numpy as np
classify_body = cv2.CascadeClassifier('haarcascade_fullbody.xml')
vid_capture = cv2.VideoCapture('people_walking.mp4')
while vid_capture.isOpened():
ret,frame = vid_capture.read()
frame = cv2.resize(frame, None,fx=0.5,fy=0.5, interpolation = cv2.INTER_LINEAR)
grayscale_img = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
bodies_detected = classify_body.detectMultiScale(grayscale_img,1.2,3)
for(x,y,w,h) in bodies_detected:
cv2.rectangle(frame, (x,y), (x+w, y+h), (0,255,255), 2)
cv2.imshow('Pedestrian',frame)
if cv2.waitKey(1) == 13:
break
vid_capture.release()
cv2.destroyAllWindows()
| RudraCS18/Object-Detection-using-openCV-python | pedestrian detection.py | pedestrian detection.py | py | 707 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "cv2.CascadeClassifier",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "cv2.resize",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "cv2.INTER_LINEAR",
... |
4956366915 | from django.db import models
from django.contrib.auth.models import User
from django.core.validators import MaxValueValidator, MinValueValidator
# Create your models here.
class Pricebaba(models.Model):
first_name = models.CharField(max_length=100, null=False);
last_name = models.CharField(max_length=100, null=False);
email = models.EmailField(max_length = 254);
mobile = models.IntegerField(validators=[MinValueValidator(7000000000), MaxValueValidator(9999999999)], null=False);
age = models.IntegerField(max_length=100, null=False);
dob = models.DateField();
location = models.CharField(max_length=100, null=False);
created_by = models.ForeignKey(User, on_delete=models.CASCADE, default='1')
def details_edit(self):
return f"/user_edit/{self.id}/" | nidhisha-shetty/Human-Resource-CRM-System | pricebabaapp/models.py | models.py | py | 768 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "django.db.models.Model",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "... |
44083675715 | from typing import Iterable
from scapy.all import *
from scapy.layers.inet import IP
def ip_from_packets(packets: Iterable) -> str:
"""
Get the IP of the machine where the packets are recorded
It is the IP which is present in all packets
:param packets:list of packets
:return: ip address
"""
IPs = {}
for packet in packets:
if IP in packet:
ip_from_packet = [packet[IP].src, packet[IP].dst]
for ip_address in ip_from_packet:
if ip_address in IPs:
IPs[ip_address] += 1
else:
IPs[ip_address] = 1
return max(IPs, key=IPs.get)
def ip_from_pcap(file: str) -> str:
"""
Wrap the above function (ip_from_packets) to read from pcap
:param file: file name/path
:return: ip address
"""
packets = rdpcap(file)
return ip_from_packets(packets)
if __name__ == "__main__":
print(ip_from_pcap("capture.pcap"))
print(ip_from_pcap("trickbot.pcapng"))
print(ip_from_pcap("trickbot2.pcapng"))
| llmhyy/malware-traffic | Experiments/exp16_visualisation/ip_from_pcap.py | ip_from_pcap.py | py | 925 | python | en | code | 7 | github-code | 6 | [
{
"api_name": "typing.Iterable",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "scapy.layers.inet.IP",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "scapy.layers.inet.IP",
"line_number": 17,
"usage_type": "name"
}
] |
27388540421 | from discord.ext import commands
import biscuitfunctions as bf
async def fixprivs(context):
return bf.getprivs(context) in ['quaid', 'quaidling', 'tesseract']
class admin(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(
name='getid',
pass_context = True)
async def getid(self, context):
authid = context.author.id
await context.author.send(f"Your id is {authid}")
await context.message.delete()
@commands.command(
name='fix',
pass_context = True,
help="Takes no arguments.\nShould fix most issues with the bot.\nRun once and check problem, if it persists run it again.\nRunning more than twice does not help.")
@commands.check(fixprivs)
async def fix(self, context):
await context.send("I'm trying to fix myself!", delete_after=60)
connections = ""
print(self.bot.voice_clients)
if self.bot.voice_clients:
for x in self.bot.voice_clients:
await x.disconnect(force=True)
connections = connection + f"{x.channel}, "
await context.send(f"I disconnected from the following channels: {connections[:-2]}", delete_after=60)
await context.send("If that doesn't work, try running !fix again")
return
else:
await context.send("I am not connected to any voice channels, reloading all extensions", delete_after=60)
extensions = list(self.bot.extensions.keys())
print(extensions)
for ext in extensions:
try:
self.bot.reload_extension(ext)
await context.message.channel.send("```{} reloaded```".format(ext), delete_after=60)
print(f"----------------- \nReloaded {ext}\n ----------------- ")
except Exception as e:
await context.message.channel.send("```py\n{}: {}\n```".format(type(e).__name__, str(e)), delete_after=60)
print("```py\n{}: {}\n```".format(type(e).__name__, str(e)))
await context.send("I have tried all my troubleshooting, if I'm still not working talk to my dad.", delete_after=60)
def setup(bot):
bot.add_cog(admin(bot))
| delta1713/ButteryBiscuitBot | admin.py | admin.py | py | 2,262 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "biscuitfunctions.getprivs",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.Cog",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 5,
"usage_type": "name"
},
{
"api_nam... |
79843457 | import numpy as np
from scipy.linalg import lstsq
from optimal_control.basis import Basis
from optimal_control.examples.discrete import StoppingExample
from optimal_control.solvers.discrete import DiscreteValueFunction
class ModifiedForStopping(DiscreteValueFunction):
def __init__(self, example: StoppingExample, x_basis: Basis, I: int = 0, positive_continuation=True):
super().__init__(example)
self.positive_continuation = positive_continuation
J = self.n_time_steps - 2
self.x_basis = x_basis
self.y_max = 1
self.regression_coefficients = np.zeros((J + 1, x_basis.dimension + 1, I + 1))
self.I = I if (I <= J) else J
self.basis_normalization = np.ones((J + 1, x_basis.dimension))
self.reinforced_basis_normalization = np.ones((J + 1, I + 1))
def value_and_policy(self, j, Y_j, X_j, depth=0, **kwargs):
m, _ = X_j.shape
J = self.n_time_steps - 2
VH = np.zeros((m, 2))
mask = (Y_j[:, 0] == 0)
FX = self.x_basis.transform(X_j[mask])
m_, _ = FX.shape
I_ = min(self.I, J - j)
H = np.zeros((m_, I_ + 1))
for i in range(I_ + 1):
H[:, i] = self.example.g(j + i, X_j[mask])
VH[mask] = self.__vh__(j, FX, I_, H)
return VH
def fit(self, X):
if np.ndim(X) == 2:
m, n = X.shape
X = X.reshape(m, n, 1)
m, n, d = X.shape
J = self.n_time_steps - 2
I = self.I
x_basis_dimension = self.x_basis.dimension
H = np.zeros((m, 2, I + 1))
H[:, 0, 0] = self.example.g(J + 1, X[:, J + 1])
FX = np.zeros((m, 2, self.regression_coefficients.shape[1]))
FX[:, 0, :x_basis_dimension] = self.x_basis.transform(X[:, J + 1, :])
for j in range(J, -1, -1):
ModifiedForStopping.__print_progression__(j, J)
FX[:, 1, :x_basis_dimension] = FX[:, 0, :x_basis_dimension]
FX[:, 0, :x_basis_dimension] = self.x_basis.transform(X[:, j, :])
H[:, 1] = H[:, 0]
for i in range(min(I, J - j) + 1):
H[:, 0, i] = self.example.g(j + i, X[:, j])
z = self.__vh__(j + 1, FX[:, 1, :x_basis_dimension], min(I, J - (j + 1)), H[:, 1])[:, 0]
if (j == 0) and (FX[:, 0, 1].var() == 0): # Only if index 0 basis function is the constant function!
z_mean = z.mean()
self.regression_coefficients[0, 0, I] = z_mean
else:
for i in range(min(I, J - j) + 1):
if i < I - j:
continue
if i == 0:
res = lstsq(FX[:, 0, :x_basis_dimension], z)[0]
self.regression_coefficients[j, :x_basis_dimension, 0] = res
else:
f = self.__vh__(j + 1, FX[:, 0, :x_basis_dimension], i - 1, H[:, 0, 1:])[:, 0]
FX[:, 0, -1] = f
res = lstsq(FX[:, 0, :], z)[0]
self.regression_coefficients[j, :, i] = res
def __vh__(self, j: int, FX, i: int, H):
m, basis_dimension = FX.shape
J = self.n_time_steps - 2
VH = np.zeros((m, 2))
VI = np.zeros((m, 2))
V = np.zeros((m, i + 1))
C = np.zeros((m, i + 1))
if j == J + 1:
VH[:, 1] = 0
VH[:, 0] = 0
else:
assert J - j >= i, "Only {}-steps to go backwards, but depth is {}.".format(J - j, i)
for u in range(0, i + 1):
s = j + i - u
C[:, s - j] = np.dot(FX, self.regression_coefficients[s, :basis_dimension, u])
if u > 0:
C[:, s - j] += V[:, s - j + 1] * self.regression_coefficients[s, -1, u]
if self.positive_continuation:
C[:, s - j] = np.maximum(C[:, s - j], 0)
VI[:, 0] = C[:, s - j]
VI[:, 1] = H[:, s - j]
if s > j:
V[:, s - j] = np.max(VI, axis=1)
if s == j:
arg_max = np.expand_dims(np.argmax(VI, axis=1), axis=1)
VH[:, 0] = np.take_along_axis(VI, arg_max, axis=1)[:, 0]
VH[:, 1] = arg_max[:, 0]
return VH
def value_all_y(self, j, X_j):
m = X_j.shape[0]
V = np.zeros((m, 2))
V[:, 0] = self.evaluate(j, np.zeros((m, 1)), X_j)
return V
@staticmethod
def __print_progression__(i, n):
print("{}/{} <-".format(i, n), flush=True, end="")
print(end="\r", flush=True)
| hagerpa/reinforced_optimal_control | optimal_control/solvers/discrete/value_function/modified_for_stopping.py | modified_for_stopping.py | py | 4,639 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "optimal_control.solvers.discrete.DiscreteValueFunction",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "optimal_control.examples.discrete.StoppingExample",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "optimal_control.basis.Basis",
"line_n... |
30195630744 | import unittest
from ops.testing import Harness
from charm import CandidCharm
class TestCharm(unittest.TestCase):
def setUp(self):
self.harness = Harness(CandidCharm)
self.addCleanup(self.harness.cleanup)
self.harness.begin()
def test_website_relation_joined(self):
id = self.harness.add_relation("website", "apache2")
self.harness.add_relation_unit(id, "apache2/0")
data = self.harness.get_relation_data(id, self.harness.charm.unit.name)
self.assertTrue(data)
self.assertEqual(data["port"], "8081")
| canonical/candid | charms/candid/tests/unit/test_charm.py | test_charm.py | py | 577 | python | en | code | 41 | github-code | 6 | [
{
"api_name": "unittest.TestCase",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "ops.testing.Harness",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "charm.CandidCharm",
"line_number": 10,
"usage_type": "argument"
}
] |
15565374410 | from pathlib import Path
WHERE_CLAUSE = "where"
# DATABASE Connection constants
DB_USERNAME = "project1user"
DB_PASSWORD = "project1pass"
DEFAULT_DB = "project1db"
VERBOSITY_DEFAULT = 2
MACHINE = "lab-machine"
# Benchmark constants
EPINIONS = "epinions"
INDEXJUNGLE = "indexjungle"
TIMESERIES = "timeseries"
BENCHMARKS = [
EPINIONS,
INDEXJUNGLE,
TIMESERIES,
]
# File Paths
TLD = Path(__file__).parent
DDL_DIRECTORY = TLD / "ddls/"
RESULTS_DIRECTORY = TLD / "benchbase_data/"
SCRIPTS_DIRECTORY = TLD / "scripts/"
TEMP_CSV = TLD / "temp.csv"
ACTIONS_SQL = TLD / "actions.sql"
STATE_DIRECTORY = TLD / "state/"
STATE_JSON = STATE_DIRECTORY / "state.json"
STATE_CANDIDATES = STATE_DIRECTORY / "candidates.txt"
KEY_TABLE_INDEXES = "table_indexes"
KEY_INDEX_COLUMNS = "column_indexes"
| karthik-ramanathan-3006/15-799-Special-Topics-in-Database-Systems | constants.py | constants.py | py | 800 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pathlib.Path",
"line_number": 27,
"usage_type": "call"
}
] |
73348441787 | from datetime import datetime
import math
from abc import abstractmethod
from typing import List, Tuple
from anteater.core.anomaly import Anomaly, RootCause
from anteater.core.kpi import KPI, Feature, JobConfig
from anteater.core.ts import TimeSeries
from anteater.model.algorithms.spectral_residual import SpectralResidual
from anteater.model.algorithms.slope import check_trend
from anteater.source.metric_loader import MetricLoader
from anteater.utils.common import same_intersection_pairs
from anteater.utils.datetime import DateTimeManager as dt
from anteater.utils.log import logger
from anteater.utils.timer import timer
class Detector:
"""The kpi anomaly detector base class"""
def __init__(self, data_loader: MetricLoader, **kwargs) -> None:
"""The detector base class initializer"""
self.data_loader = data_loader
@abstractmethod
def detect_kpis(self, kpis: List[KPI]) -> List[Anomaly]:
"""Executes anomaly detection on kpis"""
def execute(self, job_config: JobConfig) -> List[Anomaly]:
"""The main function of the detector"""
kpis = job_config.kpis
features = job_config.features
n = job_config.root_cause_num
if not kpis:
logger.info('Empty kpi in detector: %s.',
self.__class__.__name__)
return []
return self._execute(kpis, features, top_n=n)
def get_unique_machine_id(self, start: datetime, end: datetime,
kpis: List[KPI]) -> List[str]:
"""Gets unique machine ids during past minutes"""
metrics = [_kpi.metric for _kpi in kpis]
machine_ids = self.data_loader.get_unique_machines(start, end, metrics)
return machine_ids
def find_root_causes(self, anomalies: List[Anomaly],
features: List[Feature], top_n=3) -> List[Anomaly]:
"""Finds root causes for each anomaly events"""
result = []
for anomaly in anomalies:
root_causes = self.cal_top_rac(anomaly, features, top_n=top_n)
anomaly.root_causes = root_causes
result.append(anomaly)
return result
def cal_top_rac(self, anomaly: Anomaly,
features: List[Feature], top_n=3) -> List[RootCause]:
"""calculates the top n root causes for the anomaly events"""
root_causes = []
for f in features:
ts_scores = self.cal_metric_ab_score(f.metric, anomaly.machine_id)
for _ts, _score in ts_scores:
if not check_trend(_ts.values, f.atrend):
logger.info('Trends Filtered: %s', f.metric)
break
if same_intersection_pairs(_ts.labels, anomaly.labels):
root_causes.append(RootCause(
metric=_ts.metric,
labels=_ts.labels,
score=_score))
priorities = {f.metric: f.priority for f in features}
root_causes.sort(key=lambda x: x.score, reverse=True)
root_causes = root_causes[: top_n]
root_causes.sort(key=lambda x: priorities[x.metric])
return root_causes
def cal_kpi_anomaly_score(self, anomalies: List[Anomaly],
kpis: List[KPI]) -> List[Anomaly]:
"""Calculates anomaly scores for the anomaly kpis"""
atrends = {k.metric: k.atrend for k in kpis}
for _anomaly in anomalies:
metric = _anomaly.metric
machine_id = _anomaly.machine_id
labels = _anomaly.labels
ts_scores = self.cal_metric_ab_score(metric, machine_id)
for _ts, _score in ts_scores:
if not same_intersection_pairs(_ts.labels, labels):
continue
if not check_trend(_ts.values, atrends[metric]):
logger.info('Trends Filtered: %s', metric)
_anomaly.score = 0
else:
_anomaly.score = _score
break
return anomalies
def cal_metric_ab_score(self, metric: str, machine_id: str) \
-> List[Tuple[TimeSeries, int]]:
"""Calculates metric abnormal scores based on sr model"""
start, end = dt.last(minutes=10)
ts_list = self.data_loader.get_metric(
start, end, metric, machine_id=machine_id)
point_count = self.data_loader.expected_point_length(start, end)
model = SpectralResidual(12, 24, 50)
ts_scores = []
for _ts in ts_list:
if sum(_ts.values) == 0 or \
len(_ts.values) < point_count * 0.9 or\
len(_ts.values) > point_count * 1.5 or \
all(x == _ts.values[0] for x in _ts.values):
score = 0
else:
score = model.compute_score(_ts.values)
score = max(score[-25:])
if math.isnan(score) or math.isinf(score):
score = 0
ts_scores.append((_ts, score))
return ts_scores
@timer
def _execute(self, kpis: List[KPI], features: List[Feature], **kwargs) \
-> List[Anomaly]:
logger.info('Execute model: %s.', self.__class__.__name__)
anomalies = self.detect_kpis(kpis)
if anomalies:
logger.info('%d anomalies was detected on %s.',
len(anomalies), self.__class__.__name__)
anomalies = self.find_root_causes(anomalies, features, **kwargs)
anomalies = self.cal_kpi_anomaly_score(anomalies, kpis)
return anomalies
| openeuler-mirror/gala-anteater | anteater/model/detector/base.py | base.py | py | 5,619 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "anteater.source.metric_loader.MetricLoader",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "anteater.core.kpi.KPI",
"line_number": 26,
"usage_type": "name"
},
{
"api_... |
34197097202 | import numpy as np
import threading
import time
from datetime import datetime
import jderobot
import math
import cv2
from math import pi as pi
time_cycle = 80
class MyAlgorithm(threading.Thread):
def __init__(self, pose3d, laser1, laser2, laser3, motors):
self.pose3d = pose3d
self.laser1 = laser1
self.laser2 = laser2
self.laser3 = laser3
self.motors = motors
self.StopTaxi = False
self.goForward = False
self.turn1 = False
self.startTime = 0
self.startTimePark = 2
self.DIST_REAR_SPOT = 6.3
self.DIST_REAR_CARY = 4.2
self.DIST_REAR_CARX = 2.2
self.DIST_RIGHT = 3.5
self.MARGIN1 = 0.2
self.MARGIN2 = 0.15
self.YAW_MAX = 1.05
self.YAW_MARGIN = 0.02
self.DIST_MAX = 20
self.stop_event = threading.Event()
self.kill_event = threading.Event()
self.lock = threading.Lock()
threading.Thread.__init__(self, args=self.stop_event)
def parse_laser_data(self,laser_data):
laser = []
for i in range(laser_data.numLaser):
dist = laser_data.distanceData[i]/1000.0
angle = math.radians(i)
laser += [(dist, angle)]
return laser
def get_laser_vector(self,laser_array):
laser_vectorized = []
for d,a in laser_array:
# (4.2.1) laser into GUI reference system
x = d * math.cos(a) * -1
y = d * math.sin(a) * -1
v = (x,y)
laser_vectorized += [v]
return laser_vectorized
def run (self):
while (not self.kill_event.is_set()):
start_time = datetime.now()
if not self.stop_event.is_set():
self.execute()
finish_Time = datetime.now()
dt = finish_Time - start_time
ms = (dt.days * 24 * 60 * 60 + dt.seconds) * 1000 + dt.microseconds / 1000.0
#print (ms)
if (ms < time_cycle):
time.sleep((time_cycle - ms) / 1000.0)
def stop (self):
self.stop_event.set()
def play (self):
if self.is_alive():
self.stop_event.clear()
else:
self.start()
def kill (self):
self.kill_event.set()
def absolutas2relativas(self, x, y, rx, ry, rt):
# Convert to relatives
dx = x - rx
dy = y - ry
# Rotate with current angle
x = dx*math.cos(-rt) - dy*math.sin(-rt)
y = dx*math.sin(-rt) + dy*math.cos(-rt)
return x,y
def driveArc(self, speed, angleTurn):
self.motors.sendV(speed)
self.motors.sendW(angleTurn)
def execute(self):
# TODO
# Get the position of the robot
xCar = self.pose3d.getX()
yCar = self.pose3d.getY()
# We get the orientation of the robot with respect to the map
yawCar = self.pose3d.getYaw()
# Get the data of the laser sensor, which consists of 180 pairs of values
laser_data_Front = self.laser1.getLaserData()
laserFront = self.parse_laser_data(laser_data_Front)
laser_data_Rear = self.laser2.getLaserData()
laserRear = self.parse_laser_data(laser_data_Rear)
laser_data_Right = self.laser3.getLaserData()
laserRight = self.parse_laser_data(laser_data_Right)
laserFront_vectorized = self.get_laser_vector(laserFront)
laserRear_vectorized = self.get_laser_vector(laserRear)
laserRight_vectorized = self.get_laser_vector(laserRight)
# Average of the 180 values of the laser
laserFront_mean = np.mean(laserFront_vectorized, axis=0)
laserRear_mean = np.mean(laserRear_vectorized, axis=0)
laserRight_mean = np.mean(laserRight_vectorized, axis=0)
if self.StopTaxi == False:
if(self.DIST_RIGHT-self.MARGIN1)<=abs(laserRight_mean[1])<=(self.DIST_RIGHT+self.MARGIN1) and (self.DIST_REAR_SPOT-self.MARGIN1)<=abs(laserRear_mean[1])<=(self.DIST_REAR_SPOT+self.MARGIN1):
# If the taxi is alligned with the car in front of the parking spot the taxi stops
self.motors.sendV(0)
self.StopTaxi = True
if self.startTime == 0:
self.startTime = time.time()
else:
# If the taxi did not get to the car ahead, the taxi drives forward
self.motors.sendV(20)
else:
if (time.time() - self.startTime) <= self.startTimePark:
# The taxi stopped for a while
self.motors.sendV(0)
else:
if self.goForward == False:
# The taxi goes backward
if yawCar <= self.YAW_MAX and self.turn1 == False:
# The car is getting into the parking space
self.driveArc(-3, pi/4)
else:
# The taxi straightens
self.turn1 = True
self.driveArc(-3, -pi/7)
if (self.DIST_REAR_CARY-self.MARGIN2) <= abs(laserRear_mean[1]) <= (self.DIST_REAR_CARY+self.MARGIN2):
# If the taxi is very close to the car from behind, it stop
self.goForward = True
self.motors.sendV(0)
self.motors.sendW(0)
else:
if yawCar <= -self.YAW_MARGIN or yawCar >= self.YAW_MARGIN:
# The taxi rectifies
self.driveArc(1, -pi/2)
else:
# When the car is straight, it stops and rectifies until it is centered in the parking spot
self.motors.sendW(0)
if (laser_data_Front.distanceData[90]/10 - laser_data_Rear.distanceData[90]/10) > self.DIST_MAX:
self.motors.sendV(2)
elif (laser_data_Rear.distanceData[90]/10 - laser_data_Front.distanceData[90]/10) > self.DIST_MAX:
self.motors.sendV(-2)
else:
# The taxi is parked
print('CAR PARKED')
self.motors.sendV(0)
| RoboticsLabURJC/2016-tfg-irene-lope | AutoPark_Practice/MyAlgorithm.py | MyAlgorithm.py | py | 6,482 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "threading.Thread",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "threading.Event",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "threading.Event",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "threading.Lock"... |
41474623270 | from __future__ import division # Why is this not standard.
import datetime
import re
class Tribunal(object):
"""System for keeping players in check"""
def __init__(self, config, callback_message_func):
super(Tribunal, self).__init__()
# We need someway of keeping track if someone is being bad.
self._user_points = dict() # single values ['psykzz'] = 0
self._user_spam = dict() # of tuples ['psykzz'] = (10,timestamp)
self._common_urls = dict() # single values ['google.com'] = 5
self._blocked_urls = set() # single values ('google.com',)
# Spam config, the default here is to alert of more then 5 messages in a 10 second burst, gaining 5 points for each infraction
self._spam_message_rate = config.get('spam_message_rate', 5)
self._spam_message_per_sec = config.get('spam_message_per_sec', 10)
self._points_per_infraction = config.get('points_per_infraction', 5)
self._point_deduction_rate = config.get('point_deduction_rate', 5)
self._allcap_percent_threshold = float(config.get('allcap_percent_threshold', 1))
self._allcap_min_length = config.get('allcap_min_length', 3)
# regex for finding urls
self.__url_regex_pattern = r'http[s]?://[^\s<>"]+|www\.[^\s<>"]+'
self._url_regex_pattern = re.compile(self.__url_regex_pattern)
# callback messaging function to message through IRC
self._callback_message_func = callback_message_func
def _send(self, target, message):
return self._callback_message_func(target, message)
def requires_action(self, name, limit=50):
if self._get_points(name) > limit:
return True
return False
''' URL System '''
def add_url(self, url):
self._blocked_urls.add(url)
def remove_url(self, url):
self._blocked_urls.discard(url) # only need to remove once, as its only added once.
def check_url(self, url):
if url in self._blocked_urls:
return True
return False
''' Point System '''
def _get_points(self, name):
if name is None:
return
if name not in self._user_points:
return 0
return self._user_points[name]
def _set_points(self, name, points):
if name is None:
return
if points is None:
return
self._user_points[name] = points
def _add_points(self, name, points=1):
if name not in self._user_points:
self._user_points[name] = points
else:
self._user_points[name] += points
def _remove_points(self, name, points=1):
if name not in self._user_points:
self._user_points[name] = 0
else:
self._user_points[name] -= points
def check_messages(self, client, event):
local_score = 0
error_log = []
# check was there all caps
if self._check_for_allcaps(event):
local_score += self._points_per_infraction # 5 points for all caps
error_log.append('Using AllCaps')
# check for spam :(
spam = self._check_for_individual_spam(event)
self._send(event.target, str(spam))
if spam is False: # Stupid that its false but i want to try and be clever...
local_score += self._points_per_infraction # 5 points for all the things!
error_log.append('Spamming in chat')
# Just do the URL check...
self._capture_urls(event)
# check for spamming urls 5 maybe too many?
'''
if self._capture_urls(event) > 5:
local_score += 1
error_log.append('Spamming URLS')
'''
if local_score > 0:
self._add_points(event.source, local_score)
self._send(event.source, 'OMFG N00B u dun goofed, if you dont stop this shit! Points : {}, errors : {}'.format(self._get_points(event.source), error_log))
else:
self._remove_points(event.source, self._point_deduction_rate)
def _check_for_allcaps(self, event):
if len(event.message) <= self._allcap_min_length:
return False
_len = sum(1 for word in event.message if word.isalpha()) # Ignore none alpha characters
_caps = sum(1 for word in event.message if word.isupper()) # Count the number of upper case characters.
return ((_caps / _len) >= self._allcap_percent_threshold)
def _check_for_individual_spam(self, event):
now = datetime.datetime.now()
allowance = self._spam_message_rate
if event.source in self._user_spam:
time_passed = now - self._user_spam[event.source][1]
allowance = self._user_spam[event.source][0]
allowance += time_passed.seconds * (self._spam_message_rate / self._spam_message_per_sec)
if allowance > self._spam_message_rate:
allowance = self._spam_message_rate
allowance -= 1
self._user_spam[event.source] = (allowance, now)
else:
self._user_spam[event.source] = (allowance, now)
if (allowance < 1):
return False
else:
return allowance
''' I think this whole system needs to be reworked '''
def _capture_urls(self, event, return_urls=False):
# not sure if convert to string is needed.
urls = self._url_regex_pattern.findall( str(event.message) )
for url in urls:
if url in self._capture_urls:
self._capture_urls[url] += 1
else:
self._capture_urls[url] = 1
# Maybe helpful later
if return_urls:
return urls
else:
return len(urls)
def _save_urls(self):
pass
| psykzz/ircmod_gradiusbot | mod_tribunal.py | mod_tribunal.py | py | 5,908 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "re.compile",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 119,
"usage_type": "attribute"
}
] |
18722283162 | import numpy as np
import matplotlib.pyplot as plt
X = np.array([[2.5, 3.0, 3.0, 3.5, 5.5, 6.0, 6.0, 6.5],
[3.5, 3.0, 4.0, 3.5, 5.5, 6.0, 5.0, 5.5]])
num_rows, N = X.shape
c = 2
# c = 3
# c = 4
V = np.zeros((num_rows, c))
U = np.zeros((c, N))
row_iteration = 0
for i in range(N):
U[row_iteration, i] = 1
row_iteration = (row_iteration + 1) % c
print(U)
U = U[:, np.random.permutation(N)]
is_stop_criterion = 10000
epsilon = 0.00001
t = 0
while is_stop_criterion > epsilon:
t += 1
for i in range(c):
for j in range(num_rows):
V[j, i] = np.sum(X[j, :] * U[i, :]) / np.sum(U[i, :])
V[np.isnan(V)] = 0
d = np.zeros((c, N))
for i in range(c):
for j in range(N):
d[i, j] = np.sum((X[:, j] - V[:, i]) ** 2)
J = np.sum(U * d)
U_save = U.copy()
U = np.zeros((c, N))
for j in range(N):
min_cluster = np.argmin(d[:, j])
U[min_cluster, j] = 1
is_stop_criterion = np.linalg.norm(U - U_save)
print("Partition matrix:")
print(U)
print("Cluster centers:")
print(V)
print("Minimum:")
print(J)
print("Number of iterations:")
print(t)
plt.scatter(X[0, :], X[1, :])
plt.scatter(V[0, :], V[1, :])
plt.show()
| vvsct/c-means | hcm.py | hcm.py | py | 1,215 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.array",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.random.permutation",
"li... |
23361556734 | import datetime
from polls.models import LogModel
class LogMiddleware:
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
response = self.get_response(request)
if request.path.find('admin') != -1:
return response
path = request.path
method = request.method
timestamps = datetime.datetime.now()
LogModel.objects.create(path=path, method=method,
timestamps=timestamps)
return response
| konstantinkonstantinovich/home_task_6 | polls/middleware.py | middleware.py | py | 549 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "datetime.datetime.now",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "polls.models.LogModel.objects.create",
"line_number": 18,
"usage_type": "call"
},
{
... |
13284456276 | from django.conf.urls import patterns, include, url
from django.conf import settings
from django.views.generic import TemplateView
from frontend import views
from frontend import facebook
urlpatterns = patterns('',
url(r'^$', views.index, name='index'),
url(r'^settings/', views.settings, name='settings'),
url(r'', include('social_auth.urls')),
url(r'^add/', views.add, name='add'),
url(r'^addgroup/', views.addgroup, name='addgroup'),
url(r'^inviteall/(?P<event_id>\w+)', views.inviteall, name='inviteall'),
url(r'^addfriend/', views.addfriend, name='addfriend'),
url(r'^personal/$', views.personal, name='personal'),
url(r'^logout/$', views.logout, name='logout'),
url(r'^search/$', views.search, name='search'),
url(r'^success/$', TemplateView.as_view(template_name="frontend/success.html"), name="event_success"),
url(r'^tutorial/$', TemplateView.as_view(template_name="frontend/tutorial.html"), name="tutorial"),
url(r'^features/$', TemplateView.as_view(template_name="frontend/features.html"), name="features"),
url(r'^cal/$', views.calendar, name="calendar"),
url(r'^eventsXML$', views.eventsXML),
url(r'^dataprocessor$', views.dataprocessor),
url(r'^refresh/', views.refresh, name='refresh'),
url(r'^rsvp/', views.addrsvp, name='addrsvp'),
url(r'^rmrsvp/(?P<id>\w+)/', views.rmrsvp, name='rmrsvp'),
url(r'^rmrsvp/', views.rmrsvp, name='rmrsvp'),
url(r'^removenew/', views.removenew, name='removenew'),
url(r'^invite/', views.invite, name='invite'),
url(r'^rmgroup/(?P<group>\w+)/$', views.rmgroup, name='rmgroup'),
url(r'^importgroup/(?P<group>\w+)/$', facebook.importgroup, name='importgroup'),
url(r'^rmfriend/(?P<user>\w+)/$', views.rmfriend, name='rmfriend'),
url(r'^rmevent/(?P<event>\w+)/$', views.rmevent, name='rmevent'),
url(r'^edit/(?P<event>\w+)/$', views.edit, name='edit'),
url(r'^import_events/$', facebook.import_events, name='import_events'),
url(r'^export_event/(?P<event>\w+)/$', facebook.export_event, name='export_event'),
url(r'^personal_ajax/(?P<event>\w+)/$', views.personal_ajax, name='personal_ajax'),
url(r'^editevent/(?P<event>\w+)/$', views.editevent, name='editevent'),
url(r'^filter/(?P<tag>\w+)/$', views.filter, name='filter'),
url(r'^filter/$', views.filter, name='filter_init'),
url(r'^api/get_names/', views.get_names, name='get_names'),
url(r'^api/get_tags/', views.get_tags, name='get_tags'),
url(r'^api/get_memnames/', views.get_memnames, name='get_memnames'),
)
if not settings.DEBUG:
urlpatterns += patterns('',
(r'^static/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.STATIC_ROOT}),
)
| jjchen/cos333 | frontend/urls.py | urls.py | py | 2,723 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "django.conf.urls.patterns",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "frontend.views.index",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": ... |
31954675537 | """
The color scheme.
"""
from __future__ import unicode_literals
from prompt_toolkit.styles import PygmentsStyle, Style, Attrs
from pygments.token import Token
__all__ = (
'PymuxStyle',
)
ui_style = {
Token.Line: '#888888',
Token.Line.Focussed: '#448844',
Token.TitleBar: 'bg:#888888 #dddddd ',
Token.TitleBar.Title: '',
Token.TitleBar.Name: '#ffffff noitalic',
Token.TitleBar.Name.Focussed: 'bg:#88aa44',
Token.TitleBar.Line: '#444444',
Token.TitleBar.Line.Focussed: '#448844 noinherit',
Token.TitleBar.Focussed: 'bg:#5f875f #ffffff bold',
Token.TitleBar.Focussed.Title: '',
Token.TitleBar.Zoom: 'bg:#884400 #ffffff',
Token.TitleBar.PaneIndex: '',
Token.TitleBar.CopyMode: 'bg:#88aa88 #444444',
Token.TitleBar.CopyMode.Position: '',
Token.TitleBar.Focussed.PaneIndex: 'bg:#88aa44 #ffffff',
Token.TitleBar.Focussed.CopyMode: 'bg:#aaff44 #000000',
Token.TitleBar.Focussed.CopyMode.Position: '#888888',
Token.CommandLine: 'bg:#4e4e4e #ffffff',
Token.CommandLine.Command: 'bold',
Token.CommandLine.Prompt: 'bold',
Token.StatusBar: 'bg:#444444 #ffffff',
Token.StatusBar.Window: 'bg:#888888',
Token.StatusBar.Window.Current: '#88ff88 bold',
Token.AutoSuggestion: 'bg:#4e5e4e #88aa88',
Token.Message: 'bg:#bbee88 #222222',
Token.Background: '#888888',
Token.Clock: 'bg:#88aa00',
Token.PaneNumber: 'bg:#888888',
Token.PaneNumber.Focussed: 'bg:#aa8800',
Token.Terminated: 'bg:#aa0000 #ffffff',
Token.ConfirmationToolbar: 'bg:#880000 #ffffff',
Token.ConfirmationToolbar.Question: '',
Token.ConfirmationToolbar.YesNo: 'bg:#440000',
Token.Search: 'bg:#88aa88 #444444',
Token.Search.Text: '',
Token.Search.Focussed: 'bg:#aaff44 #444444',
Token.Search.Focussed.Text: 'bold #000000',
Token.SearchMatch: '#000000 bg:#88aa88',
Token.SearchMatch.Current: '#000000 bg:#aaffaa underline',
# Completions menu.
Token.Menu.Completions.Completion: 'bg:#88aa88 #222222',
Token.Menu.Completions.Completion.Current: 'bg:#88cc88 #000000',
Token.Menu.Completions.ProgressBar: 'bg:#889988',
Token.Menu.Completions.ProgressButton: 'bg:#004400',
}
class PymuxStyle(Style):
"""
The styling. It includes the pygments style from above. But further, in
order to proxy all the output from the processes, it interprets all tokens
starting with ('C,) as tokens that describe their own style.
"""
def __init__(self):
self.pygments_style = PygmentsStyle.from_defaults(style_dict=ui_style)
self._token_to_attrs_dict = None
def get_attrs_for_token(self, token):
if token and token[0] == 'C':
# Token starts with ('C',). Token describes its own style.
c, fg, bg, bold, underline, italic, blink, reverse = token
return Attrs(fg, bg, bold, underline, italic, blink, reverse)
else:
# Take styles from Pygments style.
return self.pygments_style.get_attrs_for_token(token)
def invalidation_hash(self):
return None
| jonathanslenders/pymux-test | pymux/style.py | style.py | py | 3,589 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "pygments.token.Token.Line",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "pygments.token.Token",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "pygments.token.Token.Line",
"line_number": 15,
"usage_type": "attribute"
},
{
... |
30301215925 | ########## Use:
########## Last Modified:
########## Author: Yamaga
##### dependencies
from __future__ import print_function, division
import os, sys
from astropy.io import fits
import numpy as np
import astropy.io.fits
from astropy.nddata import Cutout2D
from astropy import units as u
import shutil
import optparse
import astropy
print("input")
#### input
obj = raw_input("object_name (ex. NGC7538) : ")
regrid = raw_input('IR fitsfiles (XXX.fits,YYY.fits...) : ').split(',') # XXX.fits,YYY.fits
template2 = raw_input('regrid_template (ZZZ.fits) : ') # ZZZ.fits
print('===================================================')
waveli = []
# wavelenge search
for k in range(0,len(regrid)):
print("search wavelen"+str(k+1)+" th start.")
print("")
li = []
hdulist = astropy.io.fits.open(regrid[k])
hdu = hdulist[0]
data = hdu.data
header1 = hdu.header
try:
a = hdu.header["WAVELEN"]
except:
try:
a = hdulist[0].header["WAVELNTH"]
except:
print('===================================================')
print(infile[k])
a = input("WAVELEN = ")
print('===================================================')
waveli.append(a)
print('===================================================')
print("1st regrid phase")
print("")
### regrid1
fitsnames = []
template1 = regrid
for k in range(len(regrid)):
image = '.image'
pre = 'regrid_'
### CASAtasks
importfits(fitsimage=regrid[k], imagename=regrid[k] + image)
importfits(fitsimage=template1[k], imagename=template1[k] + image)
imregrid(imagename=regrid[k] + image, output= pre+regrid[k]+image,template=template1[k] + image)
print(pre+regrid[k]+image)
exportfits(imagename=pre+regrid[k]+image, fitsimage= pre+regrid[k], overwrite=True)
fitsnames.append(pre+regrid[k])
print("1st regrid has finished.")
print('===================================================')
print('===================================================')
print("saturate_delete phase")
print("")
### satu_delete
infile = fitsnames
fitsnames = []
wavelen = []
# wavelenge search
for k in range(0,len(infile)):
li = []
hdulist = astropy.io.fits.open(infile[k])
hdu = hdulist[0]
data = hdu.data
header1 = hdu.header
x = hdu.header['NAXIS1']
y = hdu.header['NAXIS2']
hdu.header['OBJECT'] = obj
try:
waveli[k] = hdu.header["WAVELEN"]
except:
hdu.header['WAVELEN'] = waveli[k]
### saturate delete
for i in range(0,y):
for j in range(0,x):
v = data[i][j]
if v == np.nan :
v = np.nan
elif v <= 0:
v = np.nan
li.append(v)
data = np.reshape(li,[y,x]) # reshpe(x*y)
head = astropy.io.fits.PrimaryHDU(data = data)
head.header = header1
filename = obj+"_"+str(waveli[k])+".fits"
fitsnames.append(filename)
wavelen.append(waveli[k])
head.writeto(filename, overwrite=True)
print("satu_delete "+str(k+1)+" th has finished.")
print(" ")
print("wavelen : "+str(wavelen))
print("waveli : "+str(waveli))
print(fitsnames)
print("saturate_delete has finished.")
print('===================================================')
print('===================================================')
print("2nd regrid phase")
print("")
### regrid2
regrid = fitsnames
fitsnames = []
for k in range(len(regrid)):
image = '.image'
pre = 'regrid_'
### CASAtasks
importfits(fitsimage=regrid[k], imagename=regrid[k] + image)
importfits(fitsimage=template2, imagename=template2 + image)
imregrid(imagename=regrid[k] + image, output= pre+regrid[k]+image,template=template2 + image)
print(pre+regrid[k]+image)
exportfits(imagename=pre+regrid[k]+image, fitsimage= pre+regrid[k], overwrite=True)
fitsnames.append(pre+regrid[k])
print(fitsnames)
print("2nd regrid has finished.")
print('===================================================')
print("FINISHED!")
### create new folder
os.mkdir(obj+"_match")
for name in fitsnames:
shutil.move(name,obj+"_match")
| Sound-110316/Personal_repository | pix_awase.py | pix_awase.py | py | 4,121 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "astropy.io.fits.open",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "astropy.io",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "astropy.io.fits.open",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "astropy.io"... |
27741430831 | import os
import fileinput
import logging
import argparse
import shutil
import re
from sys import platform
import socket
# import reggie source code
# use reggie2.0 functions by adding the path
import settings
settings.init() # Call only once
import sys
sys.path.append(settings.absolute_reggie_path)
reggie_exe_path = os.path.join(settings.absolute_reggie_path,'reggie.py')
if not os.path.exists(reggie_exe_path) :
print("Reggie main file not found in reggie repository under: '%s'" % reggie_exe_path)
exit(1)
from repas_tools import finalize
import repas_tools
from combinations import getCombinations
from combinations import isKeyOf
from combinations import readKeyValueFile
from tools import red
from tools import yellow
from timeit import default_timer as timer
import tools
import args_parser
"""
General workflow:
1. FIX THIS: ------------------ get the command line arguments 'args' with path to ".gitlab-ci.yml" file
2. FIX THIS: ------------------ set the logger 'log' with the debug level from 'args' to determine the level of logging which displays output to the user
3. FIX THIS: ------------------ perform the regression check by a) building executables
------------------ b) running the code
------------------ c) performing the defined analyzes
4. FIX THIS: ------------------ display the summary table with information for each build, run and analysis step
5. FIX THIS: ------------------ display if regression check was successful or not and return the corresponding error code
"""
print('')
print(tools.red('=============================================================================================================================='))
print(tools.red(' _____ _____ _____ _____ _____ '))
print(tools.red(' /\ \ /\ \ /\ \ /\ \ /\ \ '))
print(tools.red(' /::\ \ /::\ \ /::\ \ /::\ \ /::\ \ '))
print(tools.red(' /::::\ \ /::::\ \ /::::\ \ /::::\ \ /::::\ \ '))
print(tools.red(' /::::::\ \ /::::::\ \ /::::::\ \ /::::::\ \ /::::::\ \ '))
print(tools.red(' /:::/\:::\ \ /:::/\:::\ \ /:::/\:::\ \ /:::/\:::\ \ /:::/\:::\ \ '))
print(tools.red(' /:::/__\:::\ \ /:::/__\:::\ \ /:::/__\:::\ \ /:::/__\:::\ \ /:::/__\:::\ \ '))
print(tools.red(' /::::\ \:::\ \ /::::\ \:::\ \ /::::\ \:::\ \ /::::\ \:::\ \ \:::\ \:::\ \ '))
print(tools.red(' /::::::\ \:::\ \ /::::::\ \:::\ \ /::::::\ \:::\ \ /::::::\ \:::\ \ ___\:::\ \:::\ \ '))
print(tools.red(' /:::/\:::\ \:::\____\ /:::/\:::\ \:::\ \ /:::/\:::\ \:::\____\ /:::/\:::\ \:::\ \ /\ \:::\ \:::\ \ '))
print(tools.red('/:::/ \:::\ \:::| |/:::/__\:::\ \:::\____\/:::/ \:::\ \:::| |/:::/ \:::\ \:::\____\/::\ \:::\ \:::\____\ '))
print(tools.red('\::/ |::::\ /:::|____|\:::\ \:::\ \::/ /\::/ \:::\ /:::|____|\::/ \:::\ /:::/ /\:::\ \:::\ \::/ / '))
print(tools.red(' \/____|:::::\/:::/ / \:::\ \:::\ \/____/ \/_____/\:::\/:::/ / \/____/ \:::\/:::/ / \:::\ \:::\ \/____/ '))
print(tools.red(' |:::::::::/ / \:::\ \:::\ \ \::::::/ / \::::::/ / \:::\ \:::\ \ '))
print(tools.red(' |::|\::::/ / \:::\ \:::\____\ \::::/ / \::::/ / \:::\ \:::\____\ '))
print(tools.red(' |::| \::/____/ \:::\ \::/ / \::/____/ /:::/ / \:::\ /:::/ / '))
print(tools.red(' |::| ~| \:::\ \/____/ ~~ /:::/ / \:::\/:::/ / '))
print(tools.red(' |::| | \:::\ \ /:::/ / \::::::/ / '))
print(tools.red(' \::| | \:::\____\ /:::/ / \::::/ / '))
print(tools.red(' \:| | \::/ / \::/ / \::/ / '))
print(tools.red(' \|___| \/____/ \/____/ \/____/ '))
print(tools.red('=============================================================================================================================='))
print('')
start = timer()
# argument parser
parser = argparse.ArgumentParser(description='DESCRIPTION:\nScript for executing the regression checker for NRG codes multiple times with for parameter studies.', formatter_class=argparse.RawTextHelpFormatter)
#parser.add_argument('gitlab_ci', help='Path to gitlab-ci.yml which also contains a /regressioncheck/checks/... structure')
parser.add_argument('-c', '--case', default='.', help='Path to casedir, where repas should be executed.')
#parser.add_argument('-b', '--begin', type=int, default=1, help='Number of the case: where to start with the run (from the list that this tools creates)')
parser.add_argument('-d', '--debug', type=int, default=0, help='Debug level for this program. Dumps all info to the screen.')
#parser.add_argument('-i', '--info', type=int, default=1, help='Debug level for the subsequent program execution (e.g. flexi).')
#parser.add_argument('-o', '--only', action='store_true',help='Only run one case and exit afterwards (from the list that this tools creates).')
parser.add_argument('-x', '--dummy', action='store_true',help='Run repas without supplying parameter_rename.ini and parameter_change.ini files.')
parser.add_argument('-n', '--dryrun', action='store_true',help='Simply list all possible cases without performing any run.')
parser.add_argument('-a', '--hlrs', action='store_true', help='Run on with aprun (hlrs system).')
parser.add_argument('exe', help='Path to executable of code that should be tested.')
# get reggie command line arguments
args = parser.parse_args()
if re.search('^linux',platform) :
hostname=socket.gethostname()
print("platform: %s, hostname: %s" % (platform,hostname))
if re.search('^mom[0-9]+$',hostname) :
print(tools.yellow('Automatic detection of hlrs system: Assuming aprun is used and setting args.hlrs = True'))
args.hlrs = True
elif re.search('^eslogin[0-9]+$',hostname) :
if args.hlrs :
raise Exception('Running with -a or --hlrs. Cannot run this program on a login node. Get interactive job and run on mom node!')
# set the logger 'log' with the debug level from 'args' to determine the level of logging which displays output to the user
tools.setup_logger(args.debug)
log = logging.getLogger('logger')
# display all command line arguments
print("Running with the following command line options")
for arg in args.__dict__ :
print(arg.ljust(15)," = [",getattr(args,arg),"]")
print('='*132)
# define command that is usually run in a shell
# -s for save
# -a for hlrs
# -d1 for debug mode 1
if args.hlrs :
cmd = ['python',reggie_exe_path,'-e',str(args.exe),'.','-s','-a','-d1']
else :
cmd = ['python',reggie_exe_path,'-e',str(args.exe),'.','-s','-d1']
#cmd = ["ls","-l"] # for testing some other commands
if args.case :
if os.path.isdir(args.case) :
os.chdir(args.case)
else :
raise Exception('Supplied case directory is not correctly defined! -c [%s]' %args.case)
if args.dummy :
open('parameter_rename.ini', 'a').close()
open('parameter_change.ini', 'a').close()
# initialize central object and run in current working dir
cwd = os.getcwd()
repas = repas_tools.Case(cwd,cmd,'parameter_rename.ini','parameter_change.ini','parameter.ini') # and the case to the list of cases
# read the combinations for running the setups from parameter_change.ini
combis, digits = getCombinations(os.path.join(cwd,repas.names2_file))
# Edit parameter.ini for multiple parameters, subsequently, the reggie will change a set of variables
# and produce output which must be collected
# loop all runs
i=0
for combi in combis :
# print setup info
print(132*'-')
for key, value in combi.items() :
print("[%25s=%25s] digit=%3s" % (key, value, digits[key]))
# create parameter file for current combi
repas.create(combi,digits)
# read 'parameter_rename.ini' for renaming the results file
repas.names()
# run the code and repas output
repas.run(i)
i += 1
# save data: check output directory for .pdf and .csv files and rename according to info in 'parameter_rename.ini'
repas.save_data()
print(132*'-')
print(" ")
finalize(start, repas.nErrors)
| piclas-framework/reggie2.0 | repas/repas.py | repas.py | py | 9,185 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "settings.init",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "sys.path.append",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "settings.absolute_reggie_... |
7970861568 | import os
from charms.reactive import is_state, when_all, when, when_not, set_flag, when_none, when_any, hook, clear_flag
from charmhelpers.core import templating, host, unitdata
from charmhelpers.core.hookenv import ( open_port,
status_set,
config,
unit_public_ip,
log,
application_version_set )
from charmhelpers.core.host import chdir, service_restart
from charms.reactive.relations import endpoint_from_flag
from pathlib import Path
import subprocess
NEXTCLOUD_CONFIG_PHP = '/var/www/nextcloud/config/config.php'
@when('apache.available')
@when_any('mysql.available', 'postgres.master.available')
@when_not('nextcloud.initdone')
def init_nextcloud():
log("Installation and initialization of nextcloud begins.")
mysql = endpoint_from_flag('mysql.available')
postgres = endpoint_from_flag('postgres.master.available')
# Set to 'location' in metadata.yaml IF provided on deploy.
# We cant use the default, since layer:apache-php will not deploy
# the nextcloud site properly if we pre-build the directory structure
# under /var/www/nextcloud
# Hence, we need to use a directory outside of the /var/www/nextcloud structure
# when we use juju storage here (since we are to use the layer:apache-php).
data_dir = unitdata.kv().get("nextcloud.storage.data.mount")
if os.path.exists(str(data_dir)):
# Use non default for nextcloud
log("nextcloud storage location for data set as: {}".format(data_dir))
host.chownr(data_dir, "www-data", "www-data", follow_links=False, chowntopdir=True)
os.chmod(data_dir, 0o700)
else:
# If no custom data_dir get to us via storage, we use the default
data_dir = '/var/www/nextcloud/data'
ctxt = {'dbname': None,
'dbuser': None,
'dbpass': None,
'dbhost': None,
'dbport': None,
'dbtype': None,
'admin_username': config().get('admin-username'),
'admin_password': config().get('admin-password'),
'data_dir': Path(data_dir),
}
if mysql:
ctxt['dbname'] = mysql.database()
ctxt['dbuser'] = mysql.user()
ctxt['dbpass'] = mysql.password()
ctxt['dbhost'] = mysql.host()
ctxt['dbport'] = mysql.port()
ctxt['dbtype'] = 'mysql'
elif postgres:
ctxt['dbname'] = postgres.master.dbname
ctxt['dbuser'] = postgres.master.user
ctxt['dbpass'] = postgres.master.password
ctxt['dbhost'] = postgres.master.host
ctxt['dbport'] = postgres.master.port
ctxt['dbtype'] = 'pgsql'
else:
log("Failed to determine supported database.")
status_set('maintenance', "Initializing Nextcloud")
# Comment below init to test installation manually
log("Running nexcloud occ installation...")
nextcloud_init = ("sudo -u www-data /usr/bin/php occ maintenance:install "
"--database {dbtype} --database-name {dbname} "
"--database-host {dbhost} --database-pass {dbpass} "
"--database-user {dbuser} --admin-user {admin_username} "
"--admin-pass {admin_password} "
"--data-dir {data_dir} ").format(**ctxt)
with chdir('/var/www/nextcloud'):
subprocess.call(("sudo chown -R www-data:www-data .").split())
subprocess.call(nextcloud_init.split())
#TODO: This is wrong and will also replace other values in config.php
#BUG - perhaps add a config here with trusted_domains.
Path('/var/www/nextcloud/config/config.php').write_text(
Path('/var/www/nextcloud/config/config.php').open().read().replace(
"localhost", config().get('fqdn') or unit_public_ip()))
# Enable required modules.
for module in ['rewrite', 'headers', 'env', 'dir', 'mime']:
subprocess.call(['a2enmod', module])
set_flag('apache_reload_needed')
set_flag('nextcloud.initdone')
set_flag('apache.start')
log("Installation and initialization of nextcloud completed.")
open_port(port='80')
status_set('active', "Nextcloud init complete.")
@when_all('apache.started', 'apache_reload_needed')
def reload_apache2():
host.service_reload('apache2')
clear_flag('apache_reload_needed')
@when_none('mysql.available', 'postgres.master.available')
def blocked_on_database():
''' Due for block when no database is available'''
status_set('blocked', "Need Mysql or Postgres relation to continue")
return
@hook('update-status')
def update_status():
'''
Calls occ status and sets version every now and then (update-status).
:return:
'''
nextcloud_status = "sudo -u www-data /usr/bin/php occ status"
with chdir('/var/www/nextcloud'):
try:
output = subprocess.run( nextcloud_status.split(), stdout=subprocess.PIPE ).stdout.split()
version = output[5].decode('UTF-8')
install_status = output[2].decode('UTF-8')
if install_status == 'true':
application_version_set(version)
status_set('active', "Nextcloud is OK.")
else:
status_set('waiting', "Nextcloud install state not OK.")
except:
status_set('waiting', "Nextcloud install state not OK.")
@when('apache.available')
@when_any('config.changed.php_max_file_uploads',
'config.changed.php_upload_max_filesize',
'config.changed.php_post_max_size',
'config.changed.php_memory_limit')
def config_php_settings():
'''
Detects changes in configuration and renders the phpmodule for
nextcloud (nextcloud.ini)
This is instead of manipulating the system wide php.ini
which might be overwitten or changed from elsewhere.
'''
phpmod_context = {
'max_file_uploads': config('php_max_file_uploads'),
'upload_max_filesize': config('php_upload_max_filesize'),
'post_max_size': config('php_post_max_size'),
'memory_limit': config('php_memory_limit')
}
templating.render(source="nextcloud.ini",
target='/etc/php/7.2/mods-available/nextcloud.ini',
context=phpmod_context)
subprocess.check_call(['phpenmod', 'nextcloud'])
if is_state("apache.started"):
log("reloading apache2 after reconfiguration")
host.service_reload('apache2')
flags=['config.changed.php_max_file_uploads',
'config.changed.php_upload_max_filesize',
'config.changed.php_memory_limit',
'config.changed.php_post_max_size']
for f in flags:
clear_flag(f) | erik78se/layer-nextcloud | src/reactive/nextcloud.py | nextcloud.py | py | 6,879 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "charmhelpers.core.hookenv.log",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "charms.reactive.relations.endpoint_from_flag",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "charms.reactive.relations.endpoint_from_flag",
"line_number": 27,
... |
8353691653 | # flake8: noqa
from __future__ import absolute_import, unicode_literals
import json
import os
import pytest
from c8.collection import StandardCollection
from c8.exceptions import (
CollectionCreateError,
CollectionDeleteError,
CollectionFindError,
CollectionImportFromFileError,
CollectionListError,
CollectionPropertiesError,
)
from tests.helpers import assert_raises, extract, generate_random_collection_name
@pytest.mark.vcr
def test_get_collection_information(client, col, tst_fabric_name):
tst_fabric = client._tenant.useFabric(tst_fabric_name)
collection = tst_fabric.collection(col.name)
# Test get information about collection
get_col_info = collection.get_collection_information()
assert get_col_info["error"] is False
assert get_col_info["name"] == collection.name
with assert_raises(CollectionFindError):
tst_fabric.collection(
"test_collection_collection_1"
).get_collection_information()
@pytest.mark.vcr
def test_collection_figures(client, col, tst_fabric_name):
# Test get properties
tst_fabric = client._tenant.useFabric(tst_fabric_name)
collection = tst_fabric.collection(col.name)
get_col_properties = collection.collection_figures()
assert get_col_properties["name"] == collection.name
assert get_col_properties["isSystem"] is False
with assert_raises(CollectionFindError):
tst_fabric.collection("test_collection_collection_2").collection_figures()
@pytest.mark.vcr
def test_collection_attributes(client, col, tst_fabric):
assert col.context in ["default", "async", "batch", "transaction"]
assert col.tenant_name == client._tenant.name
assert col.fabric_name == tst_fabric.name
assert col.name.startswith("test_collection") is True
assert repr(col) == "<StandardCollection {}>".format(col.name)
# def test_collection_misc_methods(col, tst_fabric):
# # Test get properties
# get_col_properties = tst_fabric.collection(col.name).collection_figures()
# assert get_col_properties["name"] == col.name
# assert get_col_properties["isSystem"] is False
# # Test get properties with bad collection
# with assert_raises(CollectionFindError):
# tst_fabric.collection(generate_col_name()).collection_figures()
#
# # Test configure properties
# prev_sync = get_col_properties["waitForSync"]
# prev_has_stream = get_col_properties["hasStream"]
#
# properties = tst_fabric.update_collection_properties(
# collection_name=col.name, has_stream=True, wait_for_sync=True
# )
# assert properties["name"] == col.name
# assert properties["isSystem"] is False
# assert properties["waitForSync"] is not prev_sync
# assert properties["hasStream"] is not prev_has_stream
#
# properties = tst_fabric.update_collection_properties(
# collection_name=col.name, wait_for_sync=False
# )
# assert properties["name"] == col.name
# assert properties["isSystem"] is False
# assert properties["waitForSync"] is False
# assert properties["hasStream"] is True
#
# # Test configure properties with bad collection
# with assert_raises(CollectionPropertiesError) as err:
# tst_fabric.update_collection_properties(
# collection_name=generate_col_name(), wait_for_sync=True
# )
# assert err.value.error_code == 1203
#
# # Test preconditions
# doc_id = col.name + "/" + "foo"
# tst_fabric.collection(col.name).insert({"_id": doc_id})
# assert len(col) == 1
#
# # Test truncate collection
# assert col.truncate() is True
# assert len(col) == 0
# def test_collection_management(tst_fabric, client, bad_fabric):
# # Test create collection
# col_name = generate_col_name()
# assert tst_fabric.has_collection(col_name) is False
#
# col = tst_fabric.create_collection(
# name=col_name,
# sync=False,
# edge=False,
# user_keys=True,
# key_increment=None,
# key_offset=None,
# key_generator="autoincrement",
# shard_fields=None,
# index_bucket_count=None,
# sync_replication=None,
# enforce_replication_factor=None,
# spot_collection=False,
# local_collection=False,
# is_system=False,
# stream=False,
# )
# assert tst_fabric.has_collection(col_name) is True
#
# get_col_properties = tst_fabric.collection(col.name).collection_figures()
# if col.context != "transaction":
# assert "id" in get_col_properties
# assert get_col_properties["name"] == col_name
# assert get_col_properties["waitForSync"] is False
# assert get_col_properties["isSystem"] is False
# assert get_col_properties["keyOptions"]["type"] == "autoincrement"
# assert get_col_properties["keyOptions"]["allowUserKeys"] is True
# assert get_col_properties["keyOptions"]["increment"] == 1
# assert get_col_properties["keyOptions"]["offset"] == 0
#
# # Test create duplicate collection
# with assert_raises(CollectionCreateError) as err:
# tst_fabric.create_collection(col_name)
# assert err.value.error_code == 1207
#
# # Test list collections
# assert col_name in extract("name", tst_fabric.collections())
# bad = client._tenant.useFabric(bad_fabric)
# # Test list collections with bad fabric
# with assert_raises(CollectionListError):
# bad.collections()
#
# # Test get collection object
# test_col = tst_fabric.collection(col.name)
# assert isinstance(test_col, StandardCollection)
# assert test_col.name == col.name
#
# test_col = tst_fabric[col.name]
# assert isinstance(test_col, StandardCollection)
# assert test_col.name == col.name
#
# # Test delete collection
# assert tst_fabric.delete_collection(col_name, system=False) is True
# assert col_name not in extract("name", tst_fabric.collections())
#
# # Test drop missing collection
# with assert_raises(CollectionDeleteError) as err:
# tst_fabric.delete_collection(col_name)
# assert err.value.error_code == 1203
# assert tst_fabric.delete_collection(col_name, ignore_missing=True) is False
@pytest.mark.vcr
def test_insert_from_file(client, col, tst_fabric_name):
absolute_path = os.path.dirname(__file__)
json_path = os.path.join(absolute_path, "files/data.json")
csv_path = os.path.join(absolute_path, "files/data.csv")
invalid_file_path = os.path.join(absolute_path, "files/data")
file = open(json_path)
documents = json.load(file)
client._tenant.useFabric(tst_fabric_name)
client.insert_document_from_file(collection_name=col.name, filepath=json_path)
data = client.collection(collection_name=col.name).export(limit=len(documents))
entries = ("_id", "_key", "_rev")
for doc in data:
for key in entries:
if key in doc:
del doc[key]
assert documents == data
col.truncate()
client.insert_document_from_file(collection_name=col.name, filepath=csv_path)
data = client.collection(collection_name=col.name).export(limit=len(documents))
assert len(data) == len(documents)
col.truncate()
with assert_raises(CollectionImportFromFileError) as err:
client.insert_document_from_file(
collection_name=col.name, filepath=invalid_file_path
)
assert (
str(err)
== "<ExceptionInfo CollectionImportFromFileError('Invalid file') tblen=3>"
)
file.close()
@pytest.mark.vcr
def test_all_documents(client, col, tst_fabric_name):
document_count = 2003
client._tenant.useFabric(tst_fabric_name)
client.execute_query(
query="FOR doc IN 1..{} INSERT {{value:doc}} INTO {}".format(
document_count, col.name
)
)
resp = client.get_all_documents(collection_name=col.name)
assert document_count == len(resp)
for i in range(len(resp)):
assert resp[i]["value"] == i + 1
col.truncate()
document_count = 11
client.execute_query(
query="FOR doc IN 1..{} INSERT {{value:doc}} INTO {}".format(
document_count, col.name
)
)
resp = client.get_all_documents(collection_name=col.name)
assert document_count == len(resp)
for i in range(len(resp)):
assert resp[i]["value"] == i + 1
| Macrometacorp/pyC8 | tests/test_collection.py | test_collection.py | py | 8,364 | python | en | code | 6 | github-code | 6 | [
{
"api_name": "tests.helpers.assert_raises",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "c8.exceptions.CollectionFindError",
"line_number": 29,
"usage_type": "argument"
},
{
"api_name": "pytest.mark",
"line_number": 21,
"usage_type": "attribute"
},
{
... |
23158641917 | import requests
import json
def get_weather(api_key, city):
url = f"http://api.weatherapi.com/v1/current.json?key={api_key}&q={city}"
response = requests.get(url)
data = json.loads(response.text)
if "error" in data:
print("Failed to fetch weather data.")
else:
temperature = data["current"]["temp_c"]
description = data["current"]["condition"]["text"]
print(f"Temperature: {temperature}°C")
print(f"Description: {description}")
def main():
api_key = "ae2fa0e696154eb699092948232106" # Replace with your WeatherAPI.com API key
city = input("Enter city name: ")
get_weather(api_key, city)
if __name__ == "__main__":
main()
| Mutukukioko/WeatherApp | main.py | main.py | py | 703 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "requests.get",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 7,
"usage_type": "call"
}
] |
1584228601 | from django.conf import settings
from cms.models import Title
from minitrue.base import replacer
from minitrue.contrib.django_cms.utils import plugin_get_url
def title_get_url(obj):
return obj.page.get_absolute_url()
replacer.register(Title, fields=['title', 'page_title', 'menu_title', 'redirect', 'meta_description', 'meta_keywords'],
urlgetter=title_get_url, select_related=['page'])
if 'cms.plugins.text' in settings.INSTALLED_APPS:
from cms.plugins.text.models import Text
replacer.register(Text, fields=['body'], urlgetter=plugin_get_url,
select_related=['placeholder__page'])
if 'cms.plugins.snippet' in settings.INSTALLED_APPS:
from cms.plugins.snippet.models import Snippet
replacer.register(Snippet, fields=['html'], select_related=['placeholder__page'])
if 'cms.plugins.file' in settings.INSTALLED_APPS:
from cms.plugins.file.models import File
replacer.register(File, fields=['title'],
urlgetter=plugin_get_url,
select_related=['placeholder__page'],
)
if 'cms.plugins.link' in settings.INSTALLED_APPS:
from cms.plugins.link.models import Link
replacer.register(Link, fields=['name'], urlgetter=plugin_get_url,
select_related=['placeholder__page']
)
if 'cms.plugins.picture' in settings.INSTALLED_APPS:
from cms.plugins.picture.models import Picture
replacer.register(Picture, fields=['alt', 'longdesc'],
urlgetter=plugin_get_url,
select_related=['placeholder__page']
)
if 'cms.plugins.teaser' in settings.INSTALLED_APPS:
from cms.plugins.teaser.models import Teaser
replacer.register(Teaser, fields=['title', 'description'],
urlgetter=plugin_get_url,
select_related=['placeholder__page']
)
if 'cms.plugins.twitter' in settings.INSTALLED_APPS:
from cms.plugins.twitter.models import TwitterRecentEntries, TwitterSearch
replacer.register(TwitterRecentEntries, fields=['title',],
urlgetter=plugin_get_url,
select_related=['placeholder__page']
)
replacer.register(TwitterSearch, fields=['title',],
urlgetter=plugin_get_url,
select_related=['placeholder__page']
) | beniwohli/django-minitrue | minitrue/contrib/django_cms/searchreplace.py | searchreplace.py | py | 2,169 | python | en | code | 4 | github-code | 6 | [
{
"api_name": "minitrue.base.replacer.register",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "cms.models.Title",
"line_number": 11,
"usage_type": "argument"
},
{
"api_name": "minitrue.base.replacer",
"line_number": 11,
"usage_type": "name"
},
{
"api_n... |
36939207944 | import json
import paho.mqtt.client as pmqtt
class mqtt():
"""HIAS iotJumpWay MQTT Module
This module connects devices, applications, robots and software to
the HIAS iotJumpWay MQTT Broker.
"""
def __init__(self,
helpers,
client_type,
configs):
""" Initializes the class. """
self.configs = configs
self.client_type = client_type
self.isConnected = False
self.helpers = helpers
self.program = "HIAS iotJumpWay MQTT Module"
self.mqtt_config = {}
self.module_topics = {}
self.agent = [
'host',
'port',
'location',
'zone',
'entity',
'name',
'un',
'up'
]
self.helpers.logger.info(self.program + " initialization complete.")
def configure(self):
""" Connection configuration.
Configures the HIAS iotJumpWay MQTT connnection.
"""
self.client_id = self.configs['name']
for param in self.agent:
if self.configs[param] is None:
raise ConfigurationException(param + " parameter is required!")
# Sets MQTT connection configuration
self.mqtt_config["tls"] = "/etc/ssl/certs/DST_Root_CA_X3.pem"
self.mqtt_config["host"] = self.configs['host']
self.mqtt_config["port"] = self.configs['port']
# Sets MQTT topics
self.module_topics["statusTopic"] = '%s/Agents/%s/%s/Status' % (
self.configs['location'], self.configs['zone'], self.configs['entity'])
# Sets MQTT callbacks
self.integrity_callback = None
self.helpers.logger.info(
"iotJumpWay " + self.client_type + " connection configured.")
def start(self):
""" Connection
Starts the HIAS iotJumpWay MQTT connection.
"""
self.mqtt_client = pmqtt.Client(client_id=self.client_id, clean_session=True)
self.mqtt_client.will_set(self.module_topics["statusTopic"], "OFFLINE", 0, False)
self.mqtt_client.tls_set(self.mqtt_config["tls"], certfile=None, keyfile=None)
self.mqtt_client.on_connect = self.on_connect
self.mqtt_client.on_message = self.on_message
self.mqtt_client.on_publish = self.on_publish
self.mqtt_client.on_subscribe = self.on_subscribe
self.mqtt_client.username_pw_set(str(self.configs['un']), str(self.configs['up']))
self.mqtt_client.connect(self.mqtt_config["host"], self.mqtt_config["port"], 10)
self.mqtt_client.loop_start()
self.helpers.logger.info(
"iotJumpWay " + self.client_type + " connection started.")
def on_connect(self, client, obj, flags, rc):
""" On connection
On connection callback.
"""
if self.isConnected != True:
self.isConnected = True
self.helpers.logger.info("iotJumpWay " + self.client_type + " connection successful.")
self.helpers.logger.info("rc: " + str(rc))
self.status_publish("ONLINE")
self.subscribe()
def status_publish(self, data):
""" Status publish
Publishes a status.
"""
self.mqtt_client.publish(self.module_topics["statusTopic"], data)
self.helpers.logger.info("Published to " + self.client_type + " status.")
def on_subscribe(self, client, obj, mid, granted_qos):
""" On subscribe
On subscription callback.
"""
self.helpers.logger.info("iotJumpWay " + self.client_type + " subscription")
def on_message(self, client, obj, msg):
""" On message
On message callback.
"""
split_topic = msg.topic.split("/")
conn_type = split_topic[1]
if conn_type == "Agents":
topic = split_topic[4]
elif conn_type == "Robotics":
topic = split_topic[3]
elif conn_type == "Applications":
topic = split_topic[3]
elif conn_type == "Staff":
topic = split_topic[3]
elif conn_type == "Devices":
topic = split_topic[4]
elif conn_type == "HIASBCH":
topic = split_topic[4]
elif conn_type == "HIASCDI":
topic = split_topic[4]
elif conn_type == "HIASHDI":
topic = split_topic[4]
self.helpers.logger.info(msg.payload)
self.helpers.logger.info("iotJumpWay " + conn_type + " " \
+ msg.topic + " communication received.")
if topic == 'Integrity':
if self.integrity_callback == None:
self.helpers.logger.info(
conn_type + " Integrity callback required (integrity_callback) !")
else:
self.integrity_callback(msg.topic, msg.payload)
def publish(self, channel, data, channel_path = ""):
""" Publish
Publishes a iotJumpWay MQTT payload.
"""
if channel == "Custom":
channel = channel_path
else:
channel = '%s/Agents/%s/%s/%s' % (self.configs['location'],
self.configs['zone'], self.configs['entity'], channel)
self.mqtt_client.publish(channel, json.dumps(data))
self.helpers.logger.info("Published to " + channel)
return True
def subscribe(self, application = None, channelID = None, qos=0):
""" Subscribe
Subscribes to an iotJumpWay MQTT channel.
"""
channel = '%s/#' % (self.configs['location'])
self.mqtt_client.subscribe(channel, qos=qos)
self.helpers.logger.info("-- Agent subscribed to all channels")
return True
def on_publish(self, client, obj, mid):
""" On publish
On publish callback.
"""
self.helpers.logger.info("Published: "+str(mid))
def on_log(self, client, obj, level, string):
""" On log
On log callback.
"""
print(string)
def disconnect(self):
""" Disconnect
Disconnects from the HIAS iotJumpWay MQTT Broker.
"""
self.status_publish("OFFLINE")
self.mqtt_client.disconnect()
self.mqtt_client.loop_stop()
| leukaemiamedtech/hiasbch-mqtt-blockchain-agent | modules/mqtt.py | mqtt.py | py | 6,273 | python | en | code | 4 | github-code | 6 | [
{
"api_name": "paho.mqtt.client.Client",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "paho.mqtt.client",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 168,
"usage_type": "call"
}
] |
43391129954 | # 搜索网易云上评论超过几万来着
from selenium import webdriver
class Spider:
page = webdriver.Chrome()
list_ge = []
count = 0
list_url = []
# first_url = "https://music.163.com/#/song?id=31654747"
# list_url.append(first_url)
# print(list_url)
# 获取歌的地址
def get_url(self, url= "https://music.163.com/#/song?id=31654747"):
try:
self.list_url.append(url)
self.page.get(url)
self.page.implicitly_wait(10)
self.page.switch_to_frame("contentFrame")
# 判断评论数、获取歌名
pinglun = self.page.find_element_by_id("cnt_comment_count")
if int(pinglun.text) > 50000:
list_ge = []
ge = self.page.find_element_by_class_name("f-ff2").text
list_ge.append(ge)
# 获取歌曲链接
next_url = self.page.find_elements_by_class_name("s-fc1")[0].get_attribute("href")
# print("next"+next_url)
# print("now"+url)
# 判断如果链接是之前有的就换个链接
for u in self.list_url:
if u == next_url:
next_url = self.page.find_elements_by_class_name("s-fc1")[1].get_attribute("href")
# 递归判断、获取5首
if self.count == 10:
return 1
else:
self.count = self.count+1
# print(self.count)
print(url, ge)
self.get_url(next_url)
except Exception as e:
print(e)
# print(list_url)
spider = Spider()
spider.get_url()
| frebudd/python | wangyiyu_pinglun.py | wangyiyu_pinglun.py | py | 1,676 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 6,
"usage_type": "name"
}
] |
10442912320 | import requests
from bs4 import BeautifulSoup
import html5lib
"""THE BELOW REQUEST CAN BE MODIFIED TO GET MORE DATA BY CHANGING THE /page/1 to any page no"""
r=requests.get('https://cutoffs.aglasem.com/page/1')
s=BeautifulSoup(r.content,'html5lib')
jc=s.find(class_="jeg_posts jeg_load_more_flag")
for i in range(0,len(jc)-2):
v=jc.find_all('article')[i]
t=v.find('div',class_="jeg_postblock_content")
title=t.find('h3').find('a').getText()
link=t.find('h3').find('a')['href']
print(title,link)
| fredysomy/web-scrape-data | college-cuttofs-updates.py | college-cuttofs-updates.py | py | 522 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "requests.get",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 6,
"usage_type": "call"
}
] |
39542654444 | import requests
import json
import csv
headers = {
'Authorization': '',
'API-Key': '',
'Accept': 'application/json',
}
p = {
'severities': ''
}
response = requests.get('https://apptwo.contrastsecurity.com/Contrast/api/ng/ORGID/traces/APPID/filter', params=p,headers=headers)
app = requests.get('https://apptwo.contrastsecurity.com/Contrast/api/ng/ORGID/applications/APPID/', headers=headers)
result=json.loads(response.text)
appName=json.loads(app.text)
print(result)
"""
with open('contrast.csv', mode='w') as csv_file:
fieldnames=['AppName','VulnID', 'Title', 'Status', 'Severity']
writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
writer.writeheader()
for i in range (0, len(result['traces'])):
writer.writerow({'AppName': appName['application']['name'],'VulnID': result['traces'][i]['uuid'], 'Title': result['traces'][i]['title'], 'Status': result['traces'][i]['status'], 'Severity': result['traces'][i]['severity']})
"""
| abridgel-zz/scripts | lab3.py | lab3.py | py | 976 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "requests.get",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": ... |
28806078956 | from typing import Dict
from typing import Iterator
from typing import List
from jira.resources import Board
from ..exceptions import QueryError
from ..plugin import BaseSource
from ..types import SchemaRow
class Source(BaseSource):
SCHEMA: List[SchemaRow] = [
SchemaRow.parse_obj({"id": "id", "type": "int"}),
SchemaRow.parse_obj({"id": "name", "type": "str"}),
SchemaRow.parse_obj({"id": "type", "type": "str"}),
]
def __iter__(self) -> Iterator[Dict]:
start_at = 0
max_results = 2**32
result_limit = self.query.limit or 2**32
if self.query.order_by:
raise QueryError(
"Board query 'order_by' expressions are not supported. "
"Use 'sort_by' instead."
)
if self.query.expand:
raise QueryError("Board query 'expand' expressions are not supported.")
where = self.query.where or {}
if where and not isinstance(where, dict):
raise QueryError(
"Board query 'where' expressions should be a dictionary "
"having any of the following keys: 'type' or 'name'"
)
param_type = where.pop("type", None)
param_name = where.pop("name", None)
if where:
raise QueryError(f"Unexpected 'where' parameters: {where}.")
self.update_progress(completed=0, total=1, visible=True)
while start_at < min(max_results, result_limit):
results = self.jira.boards(
startAt=start_at,
maxResults=min(result_limit, 100),
type=param_type,
name=param_name,
)
max_results = results.total
count = min([results.total, result_limit])
self.update_count(count)
for result in results:
self.update_progress(advance=1, total=count, visible=True)
yield result.raw
start_at += 1
# Return early if our result limit has been reached
if start_at >= result_limit:
break
def rehydrate(self, value: Dict) -> Board:
return Board(
{"agile_rest_path": self.jira._options["agile_rest_path"]}, None, value
)
| coddingtonbear/jira-select | jira_select/sources/boards.py | boards.py | py | 2,300 | python | en | code | 22 | github-code | 6 | [
{
"api_name": "plugin.BaseSource",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "types.SchemaRow",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "types.SchemaRow.parse_o... |
12095699545 | from argparse import ArgumentParser
import json
from tqdm import tqdm
import os, sys
import logging
import re
import gc
import torch
from torch.utils.data import DataLoader
from torch.optim import Adam
from bert_diora.models import BertDiora
from bert_diora.utils import TokenizedLengthSampler
def main(args):
# Set torch
torch.manual_seed(args.torch_seed)
# Set device
device = torch.device(f"cuda:{args.gpu}" if torch.cuda.is_available() else "cpu")
# Make checkpoint/log directory
model_store_path = os.path.join(args.model_store_path, args.model_postfix)
try:
os.mkdir(model_store_path)
except FileExistsError:
if args.secure:
prompt = input("WARNING: overwriting directory " + model_store_path + ". Continue? (y/n)")
if prompt != "y":
exit()
# Init logger
formatter = logging.Formatter('%(asctime)s [%(levelname)s] %(message)s')
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.setFormatter(formatter)
if not args.secure:
# Remove original log file
if os.path.exists(os.path.join(model_store_path, "train.log")):
os.remove(os.path.join(model_store_path, "train.log"))
file_handler = logging.FileHandler(os.path.join(model_store_path, "train.log"))
file_handler.setFormatter(formatter)
logger = logging.getLogger('')
logger.handlers.clear()
logger.addHandler(stdout_handler)
logger.addHandler(file_handler)
logger.setLevel(logging.INFO)
# Log basic info
logger.info("Training arguments:")
for arg, value in sorted(vars(args).items()):
logger.info("- %s: %r", arg, value)
logger.info("")
Arch = {
"diora": BertDiora,
}[args.arch]
model = Arch(
args.model_id,
freeze=not args.unfreeze,
device=device,
loss=args.loss,
loss_margin_k=args.loss_margin_k,
loss_margin_lambda=args.loss_margin_lambda
).to(device)
logger.info(model)
resume_training = False
if args.from_checkpoint is not None:
# Fine-tune from a local checkpoint
assert os.path.isdir(args.model_store_path)
model_load_path = os.path.join(args.model_store_path, args.from_checkpoint)
assert os.path.isdir(model_load_path)
last_checkpoint = sorted([
(int(re.search("epoch_([0-9]*)", f).group(1)), int(re.search("step_([0-9]*)", f).group(1)), f) for f in os.listdir(model_load_path) if f.endswith(".pt")], reverse=True
)[0][2]
model_load_path = os.path.join(model_load_path, last_checkpoint)
model.load_state_dict(torch.load(model_load_path, map_location=device))
model.device = device
model = model.to(device)
if args.from_checkpoint == args.model_postfix:
# If resume training from an error,
resume_training=True
resume_epoch = int(re.search("epoch_([0-9]*)", last_checkpoint).group(1))
resume_step = int(re.search("step_([0-9]*)", last_checkpoint).group(1))
resume_epoch_step = (resume_epoch, resume_step)
logger.info(f"Resume training from checkpoint: epoch {resume_epoch}, step {resume_step}")
# Load data
with open(args.train_data, "r", encoding='UTF-8') as file:
train_data = file.read().splitlines()
with open(args.dev_data, "r", encoding='UTF-8') as file:
dev_data = file.read().splitlines()
train_loader = DataLoader(train_data, batch_sampler=TokenizedLengthSampler(train_data, args.batch_size, seed=args.torch_seed))
dev_loader = DataLoader(dev_data, batch_sampler=TokenizedLengthSampler(dev_data, args.batch_size, seed=args.torch_seed))
# Define optimizer
optimizer = Adam(model.parameters(), lr=args.lr)
optimizer.zero_grad()
min_loss = 1e+10
early_stop_count = 0
loss = 0
for epoch in range(args.epoch): # loop over the dataset multiple times
if resume_training:
# If resume training from an error, skip to the halted epoch/step
if (epoch, len(train_loader) * 100) <= resume_epoch_step:
continue
logger.info(f"< epoch {epoch} >")
# Train phase
model.train()
epoch_size = len(train_loader)
for i, batch in enumerate(tqdm(train_loader, total=epoch_size)):
if resume_training:
# If resume training from an error, skip to the halted epoch/step
if (epoch, i) <= resume_epoch_step:
continue
sent = batch
# try:
if True:
# forward + backward + optimize
loss = model(sent)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
if i % args.update_freq == args.update_freq - 1 or i == epoch_size-1:
optimizer.step()
# zero the parameter gradients
optimizer.zero_grad()
loss = 0
# except Exception as e:
# logger.warning(str(e))
# logger.info("Exception occured; returning to training")
# gc.collect()
# torch.cuda.empty_cache()
# gc.collect()
# torch.cuda.empty_cache()
# finally:
# if i % args.update_freq == args.update_freq - 1 or i == epoch_size-1:
# loss = 0
if i % args.log_interval == args.log_interval-1 or i == epoch_size-1:
# Eval phase (on dev set)
model.eval()
with torch.no_grad():
total = len(dev_data)
dev_loss = 0
first_batch=True
for dev_batch in dev_loader:
dev_sents = dev_batch
if first_batch:
# test_input = gen_inputs[0]
# test_outputs = model.generate([test_input])[0]
dev_loss += (model(dev_sents)).item() * len(dev_sents)
first_batch=False
else:
dev_loss += (model(dev_sents)).item() * len(dev_sents)
logger.info("=================================================")
logger.info(f"epoch {epoch}, step {i}")
logger.info(f"dev loss = {dev_loss/total}")
logger.info("")
# logger.info("Test generation result")
# logger.info(f"input: {test_input}")
# logger.info(f"output:")
# for test_output in test_outputs:
# logger.info(f" {test_output}")
# logger.info("")
if dev_loss/total < min_loss:
logger.info(f"Updating min_loss = {min_loss} -> {dev_loss/total}")
min_loss = dev_loss / total
logger.info("Save model checkpoint because reduced loss...")
name = f"Model_{args.model_postfix}_epoch_{epoch}_step_{i+1}.pt"
torch.save(model.state_dict(), os.path.join(model_store_path, name))
early_stop_count = 0
else:
early_stop_count += 1
logger.info(f"Min loss not updated for {early_stop_count} validation routines...")
if early_stop_count >= args.early_stop:
logger.info("Early stopping....")
return
logger.info("=================================================")
if __name__ == "__main__":
parser = ArgumentParser()
# Dataset
parser.add_argument("--train_data", required=True, help="Training set(raw text, linebreaked)")
parser.add_argument("--dev_data", required=True, help="Validation set(raw text, linebreaked)")
# Base model/checkpoint configuration
parser.add_argument("--from_checkpoint", required=False, default=None, help="Pretrained checkpoint to load and resume training.")
parser.add_argument("--model_id", required=False, default="bert-base-uncased", help="Base model for DIORA architecture.")
parser.add_argument("--arch", required=False, default="diora", choices=["diora", "dora"], help="Recursive autoencoder architecture")
parser.add_argument("--loss", required=False, default="cossim", choices=["cossim", "token_ce", "token_margin"], help="Loss function to apply to DIORA")
parser.add_argument("--loss_margin_k", type=int, required=False, default=50, help="(loss=token_margin) How many negative tokens to compare")
parser.add_argument("--loss_margin_lambda", type=float, required=False, default=1.0, help="(loss=token_margin) max-margin value")
parser.add_argument("--max_grad_norm", type=float, required=False, default=5, help="Max L2 norm for radient cipping")
# Hyperparameters
parser.add_argument("--batch_size", type=int, default=8, help="training batch size")
parser.add_argument("--update_freq", type=int, default=1, help="gradient accumulation for virtually larger batches")
parser.add_argument("--lr", type=float, default=2e-3, help="Learning rate (default: Adam optimizer)")
parser.add_argument("--epoch", type=int, default=5, help="epoch count")
parser.add_argument("--unfreeze", action='store_true', help="If set, we also train the underlying parameter too.")
parser.add_argument("--log_interval", type=int, default=20000, help="validating / checkpoint saving interval. Validates at the end of each epoch for default.")
parser.add_argument("--early_stop", type=int, default=4, help="if valid loss does not decrease for `early_stop` validations, stop training.")
# PyTorch/CUDA configuration
parser.add_argument("--gpu", type=int, default=0, help="CUDA index for training")
parser.add_argument("--torch_seed", type=int, default=0, help="torch_seed() value")
# Checkpoint configs
parser.add_argument("--model_store_path", required=False, default='checkpoints', help="Directory to store model checkpoints.")
parser.add_argument("--model_postfix", required=False, help="Name for the model. defaulted to {model_id}-arch")
parser.add_argument("--secure", required=False, action="store_true", help="")
args = parser.parse_args()
# Post-modification of args
if args.model_postfix is None:
short_model_name = args.model_id.split("-")[0].split("_")[0]
args.model_postfix = short_model_name + '-' + args.arch + "-" + args.loss
main(args) | jinulee-v/bert_diora | train.py | train.py | py | 10,660 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "torch.manual_seed",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "torch.device",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
... |
70893752508 | import sys
import pandas as pd
from sklearn.feature_selection import SelectKBest, mutual_info_regression
from sklearn.preprocessing import LabelEncoder
import matplotlib.pyplot as plt
# Load the dataset
#filename = sys.argv[1]
data = pd.read_csv('uploads/BigBasket.csv')
# Encode categorical variables using label encoding
le = LabelEncoder()
for column in data.columns:
data[column] = le.fit_transform(data[column])
X = data[['index','product','category','sub_category','brand','sale_price','market_price','type','rating','description']]
y = data['brand']
# Use SelectKBest with mutual information to find the top 5 features
selector = SelectKBest(score_func=mutual_info_regression, k=5)
selector.fit(X, y)
# Print the top 5 features and their mutual information scores
top_features = selector.scores_
top_features_index = selector.get_support(indices=True)
feature_names = []
scores = []
for i, feature in enumerate(X.columns[top_features_index]):
feature_names.append(feature)
scores.append(top_features[top_features_index[i]])
# Sort the features based on their scores in descending order
sorted_features = sorted(zip(feature_names, scores), key=lambda x: x[1], reverse=True)
print("Top 5 features:")
for i in range(5):
print(f'{i+1}. {sorted_features[i][0]} ({sorted_features[i][1]:.4f})')
# Define k-anonymity rules for the top 5 features
k_anonymity_rules = {
'index': None,
'product': 2,
'category': 3,
'sub_category': 3,
'brand': 2
}
# Apply k-anonymity to the top 5 features based on the defined rules
for feature in sorted_features[:5]:
feature_name = feature[0]
k = k_anonymity_rules.get(feature_name, None)
if k is not None:
X[feature_name] = X[feature_name] // k * k
# Save the anonymized data to a new CSV file
output_filename = 'BigBasket_anonymized.csv'
X.to_csv(output_filename, index=False)
# Plot the feature selection scores
plt.bar(feature_names, scores)
# # Add labels and title
plt.xlabel('Feature')
plt.ylabel('Score')
plt.title('Feature Selection Scores')
# Show the plot
plt.show()
# Print the filenames of the output files
print(f'Anonymized dataset saved to {output_filename}')
# print(f'Feature selection scores plot saved to {plot_filename}')
| FireQueen-3010/MainProject | script.py | script.py | py | 2,244 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pandas.read_csv",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.LabelEncoder",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "sklearn.feature_selection.SelectKBest",
"line_number": 21,
"usage_type": "call"
},
... |
35060216092 | from flask import Flask, render_template, request, jsonify
import atexit
import cf_deployment_tracker
import os
import json
import requests
# Emit Bluemix deployment event
cf_deployment_tracker.track()
app = Flask(__name__)
db_name = 'mydb'
client = None
db = None
'''
if 'VCAP_SERVICES' in os.environ:
vcap = json.loads(os.getenv('VCAP_SERVICES'))
print('Found VCAP_SERVICES')
if 'cloudantNoSQLDB' in vcap:
creds = vcap['cloudantNoSQLDB'][0]['credentials']
user = creds['username']
password = creds['password']
url = 'https://' + creds['host']
client = Cloudant(user, password, url=url, connect=True)
db = client.create_database(db_name, throw_on_exists=False)
elif os.path.isfile('vcap-local.json'):
with open('vcap-local.json') as f:
vcap = json.load(f)
print('Found local VCAP_SERVICES')
creds = vcap['services']['cloudantNoSQLDB'][0]['credentials']
user = creds['username']
password = creds['password']
url = 'https://' + creds['host']
client = Cloudant(user, password, url=url, connect=True)
db = client.create_database(db_name, throw_on_exists=False)
'''
# On Bluemix, get the port number from the environment variable PORT
# When running this app on the local machine, default the port to 8080
port = int(os.getenv('PORT', 8080))
def loadApiKeys(mltype):
with open('apikeys.json') as data_file:
apikeys = json.load(data_file)
if mltype=="classification":
return apikeys['classification']
elif mltype=="prediction":
return apikeys['prediction']
else:
print("Algorithm doesn't exist")
@app.route('/')
def home():
return render_template('prediction.html')
@app.route('/prediction')
def render_prediction():
return render_template('prediction.html')
@app.route('/classification')
def render_classification():
return render_template('classification.html')
# /* Endpoint to greet and add a new visitor to database.
# * Send a POST request to localhost:8080/api/visitors with body
# * {
# * "name": "Bob"
# * }
# */
@app.route('/prediction/getPrediction', methods=['POST'])
def get_prediction():
try:
apikeys=loadApiKeys('prediction')
if apikeys == None:
print("Api Keys file has some issue")
return_dict = {"predicted_interest_rate":"Some Error occured with api keys file"}
return json.dumps(return_dict)
else:
credit_score=request.json['credit_score']
og_first_time_home_buyer=request.json['og_first_time_home_buyer']
og_upb=request.json['og_upb']
og_loan_term=request.json['og_loan_term']
og_quarter_year=request.json['og_quarter_year']
og_seller_name=request.json['og_seller_name']
og_servicer_name=request.json['og_servicer_name']
algoType = request.json['algoType']
#print(str(algoType)+"\t"+str(credit_score)+"\t"+str(og_first_time_home_buyer)+"\t"+str(og_upb)+"\t"+str(og_loan_term)+"\t"+str(og_quarter_year)+"\t"+str(og_seller_name)+"\t"+str(og_servicer_name))
#make ai call
if algoType=="pred_df":
url=apikeys['boosteddecisiontree']['url']
api_key=apikeys['boosteddecisiontree']['apikey']
elif algoType=="pred_nn":
url=apikeys['neuralnetwork']['url']
api_key=apikeys['neuralnetwork']['apikey']
elif algoType=="pred_lr":
url=apikeys['linearregression']['url']
api_key=apikeys['linearregression']['apikey']
data = {
"Inputs": {
"input1":
{
"ColumnNames": ["CREDIT_SCORE", "FIRST_HOME_BUYER_FLAG", "OG_UPB", "OG_LOANTERM", "SELLER_NAME", "SERVICE_NAME", "OG_QUARTERYEAR"],
"Values": [ [credit_score,og_first_time_home_buyer,og_upb,og_loan_term,og_seller_name,og_servicer_name,og_quarter_year]]
}, },
"GlobalParameters": {
}
}
body = str.encode(json.dumps(data))
#url = 'https://ussouthcentral.services.azureml.net/workspaces/5de0e8bd28f74cf9a40babb3f1799a53/services/300d6267d2f843c9a5975621ff077a09/execute?api-version=2.0&details=true'
#api_key = 'wQWgTpa3GyVACzg7Q6jVDdwt5JEDnfdvqqG21PKDr+UHmZWRQJh1XfrtLVON846vEDEXoDgnruZ1s9zd4Drzyw==' # Replace this with the API key for the web service
headers = {'Content-Type':'application/json', 'Authorization':('Bearer '+ api_key)}
response = requests.post(url, data=body,headers=headers)
#print(response.content)
response_json=json.loads(response.content)
predicted_interest_rate=response_json['Results']['output1']['value']['Values'][0][7]
if predicted_interest_rate == "":
predicted_interest_rate = "Some error occured"
return_dict = {"predicted_interest_rate":predicted_interest_rate}
return json.dumps(return_dict)
except:
return_dict = {"predicted_interest_rate":"Some error occured"}
return json.dumps(return_dict)
@app.route('/classification/getClassification', methods=['POST'])
def get_classification():
try:
apikeys=loadApiKeys('classification')
if apikeys == None:
print("Api Keys file has some issue")
classified_as="Some Error occured with api keys file"
scored_probability = ""
return_dict = {"classified_as":classified_as,"scored_probability":scored_probability}
return json.dumps(return_dict)
else:
curr_act_upb=request.json['curr_act_upb']
loan_age=request.json['loan_age']
months_to_legal_maturity=request.json['months_to_legal_maturity']
curr_interest_rate=request.json['crr_interest_rate']
curr_deferred_upb=request.json['curr_deferred_upb']
algoType = request.json['algoType']
#print(curr_act_upb+"\t"+loan_age+"\t"+months_to_legal_maturity+"\t"+curr_interest_rate+"\t"+curr_deferred_upb)
#make ai call
if algoType=="pred_df":
url=apikeys['decisionjungle']['url']
api_key=apikeys['decisionjungle']['apikey']
elif algoType=="pred_nn":
url=apikeys['bayestwopoint']['url']
api_key=apikeys['bayestwopoint']['apikey']
elif algoType=="pred_lr":
url=apikeys['logisticregression']['url']
api_key = apikeys['logisticregression']['apikey']
data = {
"Inputs": {
"input1":
{
"ColumnNames": ["CUR_ACT_UPB", "LOAN_AGE", "MONTHS_LEGAL_MATURITY", "CURR_INTERESTRATE", "CURR_DEF_UPB"],
"Values": [[curr_act_upb, loan_age, months_to_legal_maturity, curr_interest_rate, curr_deferred_upb]]
}, },
"GlobalParameters": {
}
}
body = str.encode(json.dumps(data))
#url = 'https://ussouthcentral.services.azureml.net/workspaces/5de0e8bd28f74cf9a40babb3f1799a53/services/300d6267d2f843c9a5975621ff077a09/execute?api-version=2.0&details=true'
#api_key = 'wQWgTpa3GyVACzg7Q6jVDdwt5JEDnfdvqqG21PKDr+UHmZWRQJh1XfrtLVON846vEDEXoDgnruZ1s9zd4Drzyw==' # Replace this with the API key for the web service
headers = {'Content-Type':'application/json', 'Authorization':('Bearer '+ api_key)}
response = requests.post(url, data=body,headers=headers)
#print(response.content)
response_json=json.loads(response.content)
if response_json['Results']['output1']['value']['Values'][0][5] == "0":
scored_probability=response_json['Results']['output1']['value']['Values'][0][6]
classified_as="Non-Delinquent"
elif response_json['Results']['output1']['value']['Values'][0][5] == "1":
scored_probability=response_json['Results']['output1']['value']['Values'][0][6]
classified_as="Delinquent"
else:
classified_as="Some Error occured in Classification"
scored_probability = ""
return_dict = {"classified_as":classified_as,"scored_probability":scored_probability}
return json.dumps(return_dict)
except:
return_dict = {"classified_as":"Some Error occured."}
return json.dumps(return_dict)
# /**
# * Endpoint to get a JSON array of all the visitors in the database
# * REST API example:
# * <code>
# * GET http://localhost:8080/api/visitors
# * </code>
# *
# * Response:
# * [ "Bob", "Jane" ]
# * @return An array of all the visitor names
# */
@app.route('/api/visitors', methods=['POST'])
def put_visitor():
user = request.json['name']
if client:
data = {'name':user}
db.create_document(data)
return 'Hello %s! I added you to the database.' % user
else:
print('No database')
return 'Hello %s!' % user
@atexit.register
def shutdown():
if client:
client.disconnect()
if __name__ == '__main__':
app.run(host='0.0.0.0', port=port, debug=True)
| vishalsatam/DeploymentOfMLAlgoOnCloud | Flask Application/webApp.py | webApp.py | py | 9,720 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "cf_deployment_tracker.track",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "flask.Flask",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "json.load",
"lin... |
39961449850 | #!/usr/bin/env python
# -- coding: utf-8 --
import numpy
from tf import transformations, TransformListener
import rospy
import geometry_msgs
import math
class TransformerTool:
def __init__(self, target_frame=None, source_frame=None):
self.target_frame = target_frame
self.source_frame = source_frame
if target_frame is not None and source_frame is not None:
self.mat44 = self.asMatrix(
target_frame=target_frame, source_frame=source_frame)
self.mat44Reserver = self.asMatrix(
target_frame=source_frame, source_frame=target_frame)
def quat2rvec(self, quat):
'四元数=>旋转角'
theta = math.acos(quat[3]) * 2
if theta < 0.001:
return [0, 0, 0]
else:
axis = [x / math.sin(theta) for x in quat[0:3]]
norm = math.sqrt(axis[0] * axis[0] + axis[1]
* axis[1] + axis[2] * axis[2])
rvec = [x * theta / norm for x in axis]
return rvec
def rvec2quat(self, rvec):
'旋转角=>四元数'
theta = math.sqrt(rvec[0] * rvec[0] + rvec[1]
* rvec[1] + rvec[2] * rvec[2])
if theta < 0.001:
return [0, 0, 0, 1]
else:
axis = [x / theta for x in rvec]
sht = math.sin(theta * 0.5)
quat = [x * sht for x in axis]
quat.append(math.cos(theta * 0.5))
return quat
def transformPoseWithFrame(self, target_frame, source_frame, pose):
'位姿在不同坐标系下的变换'
mat44 = self.asMatrix(target_frame=target_frame,
source_frame=source_frame)
return self._transformPose(mat44=mat44, pose=pose)
def transformPose(self, pose):
return self._transformPose(mat44=self.mat44, pose=pose)
def _transformPose(self, mat44, pose):
pose44 = numpy.dot(self.xyz_to_mat44(pose.position),
self.xyzw_to_mat44(pose.orientation))
txpose = numpy.dot(mat44, pose44)
# print(txpose)
xyz = tuple(transformations.translation_from_matrix(txpose))[:3]
quat = tuple(self.quaternion_from_matrix(txpose))
# print(quat)
return geometry_msgs.msg.Pose(geometry_msgs.msg.Point(*xyz), geometry_msgs.msg.Quaternion(*quat))
def asMatrix(self, target_frame, source_frame):
tran = TransformListener()
tran.waitForTransform(
target_frame=target_frame, source_frame=source_frame, time=rospy.Time(0), timeout=rospy.Duration(4.0))
translation, rotation = tran.lookupTransform(target_frame=target_frame,
source_frame=source_frame, time=rospy.Time(0))
return self.fromTranslationRotation(translation, rotation)
def fromTranslationRotation(self, translation, rotation):
return numpy.dot(transformations.translation_matrix(translation), transformations.quaternion_matrix(rotation))
def xyz_to_mat44(self, pos):
return transformations.translation_matrix((pos.x, pos.y, pos.z))
def xyzw_to_mat44(self, ori):
return transformations.quaternion_matrix((ori.x, ori.y, ori.z, ori.w))
def transformQuaternion(self, quaternion):
return self._transformQuaternion(self.mat44, quaternion)
def transformQuaternionWithFrame(self, target_frame, source_frame, quaternion):
mat44 = self.asMatrix(target_frame=target_frame,
source_frame=source_frame)
return self._transformQuaternion(mat44, quaternion)
def _transformQuaternion(self, mat44, quaternion):
pose44 = self.xyzw_to_mat44(quaternion)
txpose = numpy.dot(mat44, pose44)
# TODO:修改转换矩阵
# quat = tuple(transformations.quaternion_from_matrix(txpose))
quat = tuple(self.quaternion_from_matrix(txpose))
return geometry_msgs.msg.Quaternion(*quat)
def quaternion_from_matrix(self,matrix):
"""
自定义转换矩阵,用于替代tf相关函数,避免突变,
采用tf变换函数时,当右臂旋转到一定角度后,出现较大幅度变化
暂不能确定是否会出现其它问题
"""
q = numpy.empty((4, ), dtype=numpy.float64)
M = numpy.array(matrix, dtype=numpy.float64, copy=False)[:4, :4]
t = numpy.trace(M)
# if t > M[3, 3]:
q[3] = t
q[2] = M[1, 0] - M[0, 1]
q[1] = M[0, 2] - M[2, 0]
q[0] = M[2, 1] - M[1, 2]
# else:
# i, j, k = 0, 1, 2
# if M[1, 1] > M[0, 0]:
# i, j, k = 1, 2, 0
# if M[2, 2] > M[i, i]:
# i, j, k = 2, 0, 1
# t = M[i, i] - (M[j, j] + M[k, k]) + M[3, 3]
# q[i] = -t
# q[j] = -(M[i, j] + M[j, i])
# q[k] = -(M[k, i] + M[i, k])
# q[3] = -(M[k, j] - M[j, k])
q *= 0.5 / math.sqrt(t * M[3, 3])
return q
| 6VV/vr-robot-back | robot/robot_control/TransformerTool.py | TransformerTool.py | py | 5,042 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "math.acos",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "math.sin",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
"line_number": 35,
"... |
73928148349 | import random
import string
import factory
from django.contrib.auth import get_user_model
from reviews.models import Doctor, Review, Specialty
User = get_user_model()
def random_string(length=10):
return u"".join(random.choice(string.ascii_letters) for x in range(length))
class DoctorFactory(factory.django.DjangoModelFactory):
class Meta:
model = "reviews.Doctor"
first_name = "Ай"
last_name = "Болит"
patronymic = "Вениаминович"
class SpecFactory(factory.django.DjangoModelFactory):
class Meta:
model = "reviews.Specialty"
title = factory.LazyAttribute(lambda t: random_string())
class UserFactory(factory.django.DjangoModelFactory):
class Meta:
model = User
username = factory.LazyAttribute(lambda t: random_string())
email = "alice@spam.eggs"
password = "superpassword"
class ReviewFactory(factory.DjangoModelFactory):
class Meta:
model = "reviews.Review"
author = factory.SubFactory(UserFactory)
doctor = factory.SubFactory(DoctorFactory)
ip_address = "127.0.0.1"
text = factory.LazyAttribute(lambda t: random_string())
| idesu/review_moderation_lite | reviews/tests/factories.py | factories.py | py | 1,158 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.contrib.auth.get_user_model",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "string.ascii_letters",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_nam... |
35245234184 | from flask import render_template, url_for, flash, redirect, request, make_response, send_from_directory
from os import path
from csaptitude import app, db, bcrypt
from csaptitude.forms import TestResultsForm, TestRegistrationForm, TestLoginForm
from csaptitude.models import User, TestResult, QuestionResponse
from flask_login import login_user, current_user, logout_user, login_required
from sqlalchemy import desc
# Indexes of correct answers to test questions
correctAnswers = [2, 0, 5, 1, 4, 5, 2, 2, 1, 3, 4, 0, 4, 2, 5, 3, 0, 2, 1, 5, 0, 1, 5, 4, 0, 1, 5, 2, 1]
@app.route('/')
def home():
return render_template('home.html')
@app.route('/about')
def about():
return render_template('about.html', title='About the Test')
@app.route("/register", methods=['GET', 'POST'])
def register():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = TestRegistrationForm();
if form.validate_on_submit():
hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')
user = User(student_id=form.studentId.data, email=form.email.data, password=hashed_password)
db.session.add(user)
db.session.commit()
flash('Your account has been created!', 'success')
login_user(user)
next = request.args.get('next')
return redirect(next or url_for('test'))
return render_template('register.html', title='Register', form=form)
@app.route("/login", methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('test'))
form = TestLoginForm()
if form.validate_on_submit():
user = User.query.filter_by(student_id=form.studentId.data).first()
if user and bcrypt.check_password_hash(user.password, form.password.data):
login_user(user, remember=form.remember.data)
next_page = request.args.get('next')
return redirect(next_page) if next_page else redirect(url_for('test'))
else:
flash('Login Unsuccessful. Please check the Student ID and password', 'danger')
return render_template('login.html', title='Login', form=form)
@app.route("/logout")
def logout():
logout_user()
return redirect(url_for('home'))
@app.route('/test', methods = ['GET', 'POST'])
@login_required
def test():
form = TestResultsForm()
if form.validate_on_submit():
#print (request.user_agent.version)
score = 0
answers = form.answers.data.split(',')
elapsedTimes = form.questionTimes.data.split(',')
test = TestResult(
user_id=current_user.id,
elapsed_time_ms=int(form.elapsedTime.data),
platform=request.user_agent.platform,
browser=request.user_agent.browser,
browser_version=request.user_agent.version,
language=request.user_agent.language)
db.session.add(test)
db.session.flush()
for index, ans in enumerate(answers):
if not not ans:
correct = correctAnswers[index]==int(ans)
quest = QuestionResponse(
test_result_id=test.id,
is_example=index < 3,
question_num=index - 3,
response=int(ans),
correct=correct,
elapsed_time_ms = (0 if elapsedTimes[index] == "NaN" else int(elapsedTimes[index])))
db.session.add(quest)
if correct and index >= 3:
score += 1
db.session.commit()
flash(f'Test Submitted! Your score is {score}', 'success')
return redirect(url_for('test_results'))
return render_template('test.html', form=form)
@app.route("/account")
@login_required
def account():
testResult = TestResult.query.filter_by(user_id=current_user.id).order_by(desc(TestResult.id)).first()
score = (QuestionResponse.query
.filter_by(test_result_id=testResult.id)
.filter_by(is_example=False)
.filter_by(correct=True)
.order_by('question_num')
.count()
if testResult else None)
date = testResult.created_at.strftime("%B %d, %Y at %H:%M UTC") if testResult else None
return render_template('account.html', title='Account', score=score, date=date)
@app.route("/results")
@login_required
def test_results():
testResult = TestResult.query.filter_by(user_id=current_user.id).order_by(desc(TestResult.id)).first()
score = None
answered = None
correct = None
if testResult:
answered = (QuestionResponse.query
.filter_by(test_result_id=testResult.id)
.filter_by(is_example=False)
.order_by('question_num').all())
answered = [a.question_num + 1 for a in answered]
correct = (QuestionResponse.query
.filter_by(test_result_id=testResult.id)
.filter_by(is_example=False)
.filter_by(correct=True)
.order_by('question_num').all())
score = len(correct)
correct = [a.question_num + 1 for a in correct]
correct = [c in correct for c in list(range(1, 27))]
[c in correct for c in list(range(1, 27))]
return render_template('results.html', title="Test Results", answered=answered,
correct=correct, score=score)
@app.route("/data/byquest-wide")
@login_required
def by_quest_wide():
if not current_user.is_admin:
flash('You do not have access to this information.', 'danger')
next_page = request.args.get('next')
return redirect(next_page) if next_page else redirect(url_for('account'))
query = (db.session.query(User,TestResult,QuestionResponse)
.filter(User.id == TestResult.user_id)
.filter(TestResult.id == QuestionResponse.test_result_id)
.filter(QuestionResponse.is_example == False)
.order_by(User.id, TestResult.id, QuestionResponse.question_num))
#print(query.statement.compile())
query = query.all()
data = 'id,email,test,date,elapsed_time_ms,q.' + ',q.'.join(str(e) for e in range(1,27))
prev_test = None
next_quest = 0
for (user, test, quest) in query:
if (test.id != prev_test):
prev_test = test.id
next_quest = 0
data += '\n'
data +=f'{user.student_id},{user.email},{test.id},{test.created_at},{test.elapsed_time_ms}'
for num in range (next_quest, quest.question_num):
data += ','
next_quest = quest.question_num + 1
data += f',{quest.correct + 0}'
#print (f'{user.student_id}, {test.id}, {quest.question_num}, {quest.correct}')
response = make_response(data)
response.headers["Content-Disposition"] = "attachment; filename=export.csv"
response.headers["Content-type"] = "text/csv"
return response
@app.route('/favicon.ico')
def favicon():
return send_from_directory(path.join(app.root_path, 'static'),
'favicon.ico', mimetype='image/vnd.microsoft.icon') | DoctorHayes/AptitudeTest-CS | csaptitude/routes.py | routes.py | py | 6,245 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "flask.render_template",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "csaptitude.app.route",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "csaptitude.app",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "flask.rende... |
8067752722 | from django.shortcuts import render_to_response
from curriculum.models import TipoProyecto
from django.conf import settings
# Create your views here.
def home(request):
menuInicio = 'selected'
return render_to_response('default/index.html', {'menuInicio': menuInicio,
'settings': settings, })
def estudios(request):
menuEstudios = 'selected'
return render_to_response('default/estudios.html',
{'menuEstudios': menuEstudios,
'settings': settings, })
def proyectos(request):
menuProyectos = 'selected'
tipoProyectos = TipoProyecto.objects.all()
return render_to_response('default/proyectos.html',
{'tipoProyectos': tipoProyectos,
'menuProyectos': menuProyectos,
'settings': settings, })
def contacto(request):
menuContacto = 'selected'
return render_to_response('default/contacto.html',
{'menuContacto': menuContacto,
'settings': settings, })
| sebasgoldberg/jsg | default/views.py | views.py | py | 1,106 | python | es | code | 0 | github-code | 6 | [
{
"api_name": "django.shortcuts.render_to_response",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.conf.settings",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render_to_response",
"line_number": 13,
"usage_type": "call"
},... |
32731754668 | from collections import deque
n, m, v = map(int, input().split())
lst = [[] for _ in range(n+1)]
visit_d = [0] * (n+1)
bfs_q = []
for i in range(m):
a, b = map(int, input().split())
lst[a].append(b)
lst[b].append(a)
# 각 요소들 정렬
for i in range(1, n+1):
lst[i].sort()
def dfs(start):
visit_d[start] = 1
print(start, end=' ')
for i in lst[start]:
if(visit_d[i] == 0):
dfs(i)
def bfs(start):
bfs_q = deque([start])
visit_b = [0] * (n+1)
visit_b[start] = 1
while(bfs_q):
find = bfs_q.popleft()
print(find, end=' ')
for i in lst[find]:
if(visit_b[i] == 0):
bfs_q.append(i)
visit_b[i] = 1
dfs(v)
print()
bfs(v) | woo222/baekjoon | python/그래프/s2_1260_DFS와 BFS.py | s2_1260_DFS와 BFS.py | py | 773 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "collections.deque",
"line_number": 26,
"usage_type": "call"
}
] |
17591799943 | import requests
headers = {
'Host': 'bagel.htb:8000',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:91.0) Gecko/20100101 Firefox/91.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Connection': 'close',
'Upgrade-Insecure-Requests': '1',
'Pragma': 'no-cache',
'Cache-Control': 'no-cache',
}
# Open the log file for writing
with open("log", "w") as log_file:
# Loop through the range of process IDs
for proc_id in range(1, 1001):
# Construct the URL for the current process ID
page_url = f"http://bagel.htb:8000/?page=../../../../../../../../proc/{proc_id}/cmdline"
# Use requests to fetch the page contents
response = requests.get(page_url, headers=headers, verify=False)
# Write the response content to the log file
log_file.write(f"Contents of /proc/{proc_id}/cmdline:\n{response.content.decode()}\n\n")
| 0xRoqeeb/scripts | ProcScanner/proscanner.py | proscanner.py | py | 984 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "requests.get",
"line_number": 24,
"usage_type": "call"
}
] |
10282905855 | import os
from flask import Flask
from flask_modals import Modal
from flask_login import LoginManager
from flask_sqlalchemy import SQLAlchemy, Pagination
from importlib import import_module
from apps.utils.stocks_properties import read_properties_file
db = SQLAlchemy()
login_manager = LoginManager()
print('El path de la aplicacion es : ',__path__)
props = read_properties_file('finanzas.properties')
sql_scripts = read_properties_file('sql_scripts.properties')
def register_extensions(app):
db.init_app(app)
print('1 Register extension')
login_manager.init_app(app)
def register_blueprints(app):
print('1 Register blueprints')
for module_name in ('authentication', 'home', 'masterplan', 'organizations', 'reports'):
module = import_module('apps.{}.routes'.format(module_name))
app.register_blueprint(module.blueprint)
def configure_database(app):
@app.before_first_request
def initialize_database():
print('3 configure database')
try:
print('#### Creando la base de datos ####')
db.create_all()
#from . import db
#db.init_app(app)
except Exception as e:
print('> Error: DBMS Exception: ' + str(e) )
# fallback to SQLite
basedir = os.path.abspath(os.path.dirname(__file__))
app.config['SQLALCHEMY_DATABASE_URI'] = SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'db.sqlite3')
print('> Fallback to SQLite ')
db.create_all()
@app.teardown_request
def shutdown_session(exception=None):
db.session.remove()
def create_app(config):
print('4 Create app')
app = Flask(__name__)
modal = Modal(app)
app.config.from_object(config)
register_extensions(app)
register_blueprints(app)
configure_database(app)
return app
| qa8990/reports | apps/__init__.py | __init__.py | py | 1,875 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "flask_sqlalchemy.SQLAlchemy",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "flask_login.LoginManager",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "apps.utils.stocks_properties.read_properties_file",
"line_number": 14,
"usage_type":... |
74766917948 | #-------------------------------------------------------------------------------
# Recipes tests
#-------------------------------------------------------------------------------
import io
import os
import pytest
from pathlib import Path
from cookbook.db import get_db
# Data generators for testing.
#-------------------------------------------------------------------------------
def image_data(
image_bytes = b'hopefully this is a cat image',
image_file_name = 'image.jpg'):
return (io.BytesIO(image_bytes), image_file_name)
def recipe_data(
title = 'different recipe',
author = 'oliver jameson',
description = 'dot dot dot',
source_url = 'http://google.com',
image = 'default',
servings = 1,
prep_time = 4,
cook_time = 8,
ingredients = 'six\nfive\nfour',
instructions = 'new instructions\ngo here'
):
# NOTE: Hack because we can't use this function as a default value.
if image == 'default':
image = image_data()
return {
'title': title,
'author': author,
'description': description,
'source_url': source_url,
'image': image,
'servings': servings,
'prep_time': prep_time,
'cook_time': cook_time,
'ingredients': ingredients,
'instructions': instructions,
}
def yaml_data(
title = 'test recipe',
author = 'chef ramsay',
description = 'yummy',
source_url = 'http://example.com',
servings = 2,
prep_time = 5,
cook_time = 10,
ingredients = '1tbsp nonsense',
instructions = 'put the bla in the bla\nthen do the thing',
yaml_file_name = 'test-recipe.yaml'
):
ingredients_list = '\n'.join([f'- {i.strip()}'
for i in ingredients.split('\n')
if len(i.strip()) > 0])
instructions_list = '\n'.join([f'- {i.strip()}'
for i in instructions.split('\n')
if len(i.strip()) > 0])
yaml_bytes = f'''title: {title}
author: {author}
description: {description}
source_url: {source_url}
servings: {servings}
prep_time: {prep_time}
cook_time: {cook_time}
ingredients:
{ingredients_list}
instructions:
{instructions_list}
'''.encode()
return (io.BytesIO(yaml_bytes), yaml_file_name)
# Test index route.
#-------------------------------------------------------------------------------
def test_index(client, auth):
response = client.get('/recipes', follow_redirects=True)
assert b'Log In' in response.data
assert b'Register' in response.data
auth.login()
response = client.get('/recipes')
assert b'Log Out' in response.data
assert b'test recipe' in response.data
assert b'user_images/whatever.jpg' in response.data
assert b'href=\'/recipes/add\'' in response.data
assert b'href=\'/recipes/view/1\'' in response.data
# Authentication is required.
#-------------------------------------------------------------------------------
@pytest.mark.parametrize('path', (
'/recipes/add',
'/recipes/edit/1',
'/recipes/delete/1',
))
def test_login_required(client, path):
response = client.post(path)
assert response.headers['Location'] == '/auth/login'
# Unauthenticated access is prevented.
#-------------------------------------------------------------------------------
def test_data_privacy(app, client, auth):
with app.app_context():
db = get_db()
db.execute('UPDATE recipe SET user_id = 2 WHERE id = 1')
db.commit()
auth.login()
# Current user can't access other user's recipe.
assert client.post('/recipes/edit/1', data=recipe_data()).status_code == 404
assert client.post('/recipes/delete/1').status_code == 404
assert client.get('/recipes/view/1').status_code == 404
# Current user doesn't see other user's view link.
assert b'href=\'/recipes/view/1\'' not in client.get('/').data
# Recipes must exist to be operated on.
#-------------------------------------------------------------------------------
def test_exists_required(client, auth):
auth.login()
response = client.post('/recipes/delete/2')
assert response.status_code == 404
assert b'Recipe id 2 not found' in response.data
response = client.post('/recipes/edit/2', data=recipe_data())
assert response.status_code == 404
assert b'Recipe id 2 not found' in response.data
# Recipes must be added to the database.
#-------------------------------------------------------------------------------
def test_add(client, auth, app):
auth.login()
assert client.get('/recipes/add').status_code == 200
response = client.post('/recipes/add', data=recipe_data())
assert response.headers['Location'] == '/recipes/view/2'
with app.app_context():
db = get_db()
count = db.execute('SELECT COUNT(id) FROM recipe').fetchone()[0]
assert count == 2
# Recipes must be viewable.
#-------------------------------------------------------------------------------
def test_view(client, auth, app):
auth.login()
response = client.get('/recipes/view/1')
assert response.status_code == 200
assert b'1tbsp nonsense' in response.data
# Recipes must be edited in the database.
#-------------------------------------------------------------------------------
def test_edit(client, auth, app):
auth.login()
assert client.get('/recipes/edit/1').status_code == 200
client.post('/recipes/edit/1', data=recipe_data())
with app.app_context():
db = get_db()
post = db.execute('SELECT * FROM recipe WHERE id = 1').fetchone()
assert post['title'] == 'different recipe'
# Recipes must be validated when added or edited.
#-------------------------------------------------------------------------------
@pytest.mark.parametrize('path', (
'/recipes/add',
'/recipes/edit/1',
))
def test_add_edit_validate(client, auth, path):
auth.login()
recipe = recipe_data(title='')
response = client.post(path, data=recipe)
assert b'Title is required.' in response.data
recipe = recipe_data(author='')
response = client.post(path, data=recipe)
assert b'Author is required.' in response.data
recipe = recipe_data(description='')
response = client.post(path, data=recipe)
assert b'Description is required.' in response.data
recipe = recipe_data(source_url='')
response = client.post(path, data=recipe)
assert b'Source URL is required.' in response.data
recipe = recipe_data(image=image_data(image_file_name=''))
response = client.post(path, data=recipe)
assert b'Image is required.' in response.data
recipe = recipe_data(image=image_data(image_file_name='uhoh.exe'))
response = client.post(path, data=recipe)
assert b'Image not allowed.' in response.data
recipe = recipe_data(servings='')
response = client.post(path, data=recipe)
assert b'Servings is required.' in response.data
recipe = recipe_data(prep_time='')
response = client.post(path, data=recipe)
assert b'Prep Time is required.' in response.data
recipe = recipe_data(cook_time='')
response = client.post(path, data=recipe)
assert b'Cook Time is required.' in response.data
recipe = recipe_data(ingredients='')
response = client.post(path, data=recipe)
assert b'Ingredients is required.' in response.data
recipe = recipe_data(instructions='')
response = client.post(path, data=recipe)
assert b'Instructions is required.' in response.data
# Recipes must be deletable.
#-------------------------------------------------------------------------------
# NOTE: Do we need this?
user_images = Path(__file__).parent / 'user_images'
def test_delete(client, auth, app):
# assert os.path.exists(os.path.join(user_images, 'whatever.jpg'))
auth.login()
response = client.post('/recipes/delete/1')
assert response.headers['Location'] == '/recipes'
with app.app_context():
db = get_db()
recipe = db.execute('SELECT * FROM recipe WHERE id = 1').fetchone()
assert recipe is None
# TODO: Test whether associated image is deleted.
# assert not os.path.exists(os.path.join(user_images, 'whatever.jpg'))
# Recipes must be exportable.
#-------------------------------------------------------------------------------
def test_export(client, auth, app):
auth.login()
response = client.get('/recipes/export/1')
expected = yaml_data()
assert response.get_data() == expected[0].getvalue()
| cmvanb/cookbook | tests/test_recipes.py | test_recipes.py | py | 8,458 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "io.BytesIO",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "io.BytesIO",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line... |
71584682748 | from bs4 import BeautifulSoup
import requests
class DHMenuScraper:
menuLink = "https://nutrition.sa.ucsc.edu/menuSamp.asp?"
dHallCodes = {
"nineten" : "locationNum=40&locationName=Colleges+Nine+%26+Ten+Dining+Hall",
"cowellstevenson" : "locationNum=05&locationName=Cowell+Stevenson+Dining+Hall"
}
def __init__(self):
return
def getFullMenu(self, dHall, mealNum):
fullUrl = self.menuLink + self.dHallCodes[dHall]
page = requests.get(fullUrl)
soup = BeautifulSoup(page.text, 'html.parser')
# finds the correct table for the meal
meal = soup.find_all('div', class_='menusampmeals')[mealNum]
# variables for loop to find the meals
current = meal
firstTableFound = True
while current is not None:
# print(current)
if current.name == 'table':
if firstTableFound:
firstTableFound = False
else:
# we are done
break
current = current.parent
rawMeals = current.find_all('div', class_='menusamprecipes')
finalMeals = []
for meal in rawMeals:
finalMeals.append(meal.string)
return finalMeals | kschniedergers/DHBot | DHMenuScraper.py | DHMenuScraper.py | py | 1,281 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "requests.get",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 20,
"usage_type": "call"
}
] |
29564758485 | #!/usr/bin/python
# -*- encoding: utf-8 -*-
import random
from model.leverage_bracket import leverage_bracket
from model.symbol import symbol as s
from operation.contract.client.leverage_bracket.query_leverage_bracket_list import query_leverage_bracket_list
from test_cases.contract.client.conftest import *
from common.logger import logger
class Test_query_leverage_bracket_list:
''' 查询所有交易对杠杆分层
1,从接口返回结果随机选择一个交易对与数据库进行对比
2,根据交易对,数据库查找对应交易分层信息
3,对比分层信息
'''
@pytest.mark.single
# @pytest.mark.usefixtures("step_first")
@pytest.mark.parametrize("scene,except_result, except_returnCode, except_msg",
api_leverage_data["query_leverage_bracket_list"])
def test_query_leverage_bracket_list(self,scene,except_result,except_returnCode, except_msg):
# logger.info("*************** 开始执行用例 ***************")
logger.info(f'场景【{scene}】信息:{except_result}-{except_returnCode}-"{except_msg}"')
result = query_leverage_bracket_list()
logger.warning(f'场景-[{scene}]的返回信息是:{result.response}')
try:
# 从返回结果随机选择一个交易对
leverage_result = random.choice(result.response["result"])
symbol = leverage_result.get("symbol")
# 数据库获取该交易对信息
symbol_single = s.query.filter(s.symbol == '{}'.format(symbol)).first()
# 根据 根据交易对 symbol_id 获取分层详细信息
symbol_list = leverage_bracket.query.filter(leverage_bracket.symbol_id == symbol_single.id).all()
if symbol_list is not None:
for symbol_ in symbol_list:
for res in leverage_result.get('leverageBrackets'):
if symbol_.bracket == res['bracket']:
assert float(symbol_.max_nominal_value) == \
float(res['maxNominalValue'])
assert float(symbol_.max_nominal_value) == \
float(res['maxNominalValue'])
assert float(symbol_.maint_margin_rate) == \
float(res['maintMarginRate'])
assert float(symbol_.start_margin_rate) == \
float(res['startMarginRate'])
assert float(symbol_.max_leverage) == \
float(res['maxLeverage'])
assert float(symbol_.min_leverage) == \
float(res['minLeverage'])
else:
# 该交易对不在数据库之中
assert leverage_result is not None
logger.error("查询所有交易对杠杆分层接口返回了数据库不存在的交易对")
except Exception as e:
logger.error(e)
assert result.status_code == 200
assert except_result == result.response["msgInfo"]
assert except_returnCode == result.response["returnCode"]
if except_returnCode == 0:
assert except_msg in str(result.response["result"])
else:
assert except_msg in result.response["error"]["msg"]
# logger.info("*************** 结束执行用例 ***************")
if __name__ == '__main__':
pytest.main(["-q", "-s", "test_query_leverage_bracket_list.py"])
| shiqilouyang/thanos_test | test_cases/contract/client/leverage_bracket/test_query_everage_bracket_list.py | test_query_everage_bracket_list.py | py | 3,595 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "common.logger.logger.info",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "common.logger.logger",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "operation.contract.client.leverage_bracket.query_leverage_bracket_list.query_leverage_bracket_list... |
32171234106 | import json
from django.views.generic import ListView
from django.conf import settings
from django.shortcuts import render
from django.urls import reverse_lazy
from django.contrib.sites.models import Site
import requests
from cart.cart import Cart
from django.views.generic import CreateView
from django.views import View
from .tasks import order_created
from orders.models import Order, OrderItem
from django.contrib.auth.mixins import LoginRequiredMixin
class CreateOrderView(LoginRequiredMixin, CreateView):
model = Order
template_name = "orders/order_create.html"
fields = [
'first_name',
'last_name',
'email',
'address',
'apartment',
'city',
'country',
'state_province',
'postal_code',
]
def form_valid(self, form):
cart = Cart(self.request)
order = form.save(commit=False)
order.user = self.request.user
order.save()
amount = int(cart.get_total_price())
email = form.cleaned_data['email']
headers = {
'Authorization': f'Bearer {settings.PS_SECRET}',
'Content-Type': 'application/json'
}
current_site = Site.objects.get_current()
if settings.DEBUG:
call_back = f'http://{current_site.domain}/payment'
else:
call_back = f'https://{current_site.domain}/payment'
data = {
'amount': amount * 100,
'email': email,
'callback_url': call_back,
'metadata': {
'order_id': str(order.id)
}
}
url = "https://api.paystack.co/transaction/initialize"
resp = requests.post(url=url, json=data, headers=headers)
respo = json.loads(resp.content)
self.success_url = str(respo['data']['authorization_url'])
for product in cart:
OrderItem.objects.create(
order=order, item=product['item'],
price=product['price'], quantity=product['quantity']
)
cart.clear()
return super().form_valid(form)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
cart = Cart(self.request)
context['cart'] = cart
return context
# class CreateCheckoutSession(View):
# def post(self, request, *args, **kwargs):
class OrderHistory(LoginRequiredMixin, ListView):
model = Order
template_name = 'orders/order_history.html'
queryset = Order.objects.all()
context_object_name = 'orders'
def get_queryset(self):
queryset = Order.objects.filter(user=self.request.user)
return queryset
def created(request):
return render(request, "orders/created.html") | Alisjj/Shop-From-Home | orders/views.py | views.py | py | 2,802 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "django.views.generic.CreateView",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "orders.models.Order",
"line_number": 17,
"usage_type": "nam... |
8670813064 | import pathlib
def get_desanitizer(celltypes_dir):
cell_type_list = read_all_manifests(celltypes_dir)
return desanitizer_from_meta_manifest(cell_type_list)
def desanitizer_from_meta_manifest(cell_type_list):
"""
cell_type_list is the result of reading list_of_manifests
"""
desanitizer = dict()
for cell_type in cell_type_list:
m = cell_type['machine_readable']
h = cell_type['human_readable']
if m in desanitizer:
if h != desanitizer[m]:
raise RuntimeError(f"{m} occurs more than once")
desanitizer[m] = h
return desanitizer
def read_all_manifests(data_dir):
"""
Return:
valid_cell_types -- list of dicts like
{'hierarcy': 'Level_1',
'data_path': path_to_zarr,
'human_readable': human_readable_name,
'machine_readable': machine_readable_name,
'unique': a_unique_key}
"""
sub_dirs = [n for n in data_dir.iterdir() if n.is_dir()]
list_of_manifests = []
for d in sub_dirs:
m = d / 'manifest.csv'
if m.is_file():
list_of_manifests.append(m)
return read_list_of_manifests(list_of_manifests)
def read_list_of_manifests(list_of_manifests):
found_machine = set()
valid_cell_types = []
#for child_dir in sub_dirs:
for manifest_path in list_of_manifests:
child_dir = manifest_path.parent
this_hierarchy = child_dir.name
if not manifest_path.is_file():
raise RuntimeError(
f"cannot find {manifest_path.resolve().absolute()}")
this_manifest = read_manifest(manifest_path)
for manifest_key in this_manifest:
element = this_manifest[manifest_key]
unq_key = f"{this_hierarchy}/{element['machine_readable']}"
if unq_key in found_machine:
raise RuntimeError(
f"{unq_key} occurs more than once")
found_machine.add(unq_key)
this_element = {'hierarchy': this_hierarchy,
'human_readable': element['human_readable'],
'machine_readable': element['machine_readable'],
'unique': unq_key}
valid_cell_types.append(this_element)
return valid_cell_types
def read_manifest(manifest_path):
"""
Get a lookup table from filename to
celltype name and machine readable group
name from the manifest.csv files written
by Lydia's script
"""
label_idx = None
path_idx = None
with open(manifest_path, "r") as in_file:
header = in_file.readline().strip().split(',')
for idx, val in enumerate(header):
if val == 'label':
label_idx = idx
elif val == 'file_name':
path_idx = idx
assert label_idx is not None
assert path_idx is not None
file_path_list = []
human_readable_list = []
for line in in_file:
line = line.strip().split(',')
pth = line[path_idx]
human_readable = line[label_idx]
file_path_list.append(pth)
human_readable_list.append(human_readable)
(sanitized_list,
_ ) = sanitize_cluster_name_list(human_readable_list)
result = dict()
for file_path, human_readable, sanitized in zip(file_path_list,
human_readable_list,
sanitized_list):
result[file_path] = {"human_readable": human_readable,
"machine_readable": sanitized}
return result
def sanitize_cluster_name(name):
for bad_char in (' ', '/'):
name = name.replace(bad_char, '_')
return name
def sanitize_cluster_name_list(
raw_cluster_name_list):
sanitized_name_set = set()
sanitized_name_list = []
desanitizer = dict()
for name in raw_cluster_name_list:
sanitized_name = sanitize_cluster_name(name)
if name in sanitized_name_set:
raise RuntimeError(
f"{sanitized_name} occurs more than once")
sanitized_name_set.add(sanitized_name)
sanitized_name_list.append(sanitized_name)
desanitizer[sanitized_name] = name
return sanitized_name_list, desanitizer
def get_class_lookup(
anno_path):
"""
returns subclass_to_clusters and class_to_clusters which
map the names of classes to lists of the names of clusters
therein
also return a set containing all of the valid cluster names
"""
anno_path = pathlib.Path(anno_path)
if not anno_path.is_file():
raise RuntimeError(f"{anno_path} is not a file")
subclass_to_clusters = dict()
class_to_clusters = dict()
valid_clusters = set()
desanitizer = dict()
with open(anno_path, "r") as in_file:
header = in_file.readline()
for line in in_file:
params = line.replace('"', '').strip().split(',')
assert len(params) == 4
cluster_name = params[1]
subclass_name = params[2]
class_name = params[3]
sanitized_cluster_name = sanitize_cluster_name(cluster_name)
sanitized_subclass_name = sanitize_cluster_name(subclass_name)
sanitized_class_name = sanitize_cluster_name(class_name)
for dirty, clean in zip((cluster_name,
subclass_name,
class_name),
(sanitized_cluster_name,
sanitized_subclass_name,
sanitized_class_name)):
if clean in desanitizer:
if desanitizer[clean] != dirty:
msg = "\nmore than one way to desanitize "
msg += f"{clean}\n"
msg += f"{dirty}\n"
msg += f"{desanitizer[clean]}\n"
raise RuntimeError(msg)
desanitizer[clean] = dirty
valid_clusters.add(sanitized_cluster_name)
if subclass_name not in subclass_to_clusters:
subclass_to_clusters[sanitized_subclass_name] = []
if class_name not in class_to_clusters:
class_to_clusters[sanitized_class_name] = []
subclass_to_clusters[sanitized_subclass_name].append(
sanitized_cluster_name)
class_to_clusters[sanitized_class_name].append(
sanitized_cluster_name)
return (subclass_to_clusters,
class_to_clusters,
valid_clusters,
desanitizer)
| AllenInstitute/neuroglancer_formatting_scripts | src/neuroglancer_interface/utils/celltypes_utils.py | celltypes_utils.py | py | 6,758 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "pathlib.Path",
"line_number": 145,
"usage_type": "call"
}
] |
17609317181 | # encoding: utf-8
import os
import binascii
from collections import OrderedDict
import cachemodel
from basic_models.models import CreatedUpdatedAt
from django.urls import reverse
from django.db import models, transaction
from django.db.models import Q
from entity.models import BaseVersionedEntity
from issuer.models import BaseAuditedModelDeletedWithUser, BadgeInstance
from backpack.sharing import SharingManager
from issuer.utils import CURRENT_OBI_VERSION, get_obi_context, add_obi_version_ifneeded
from mainsite.managers import SlugOrJsonIdCacheModelManager
from mainsite.models import BadgrApp
from mainsite.utils import OriginSetting
class BackpackCollection(BaseAuditedModelDeletedWithUser, BaseVersionedEntity):
entity_class_name = 'BackpackCollection'
name = models.CharField(max_length=128)
description = models.CharField(max_length=255, blank=True)
share_hash = models.CharField(max_length=255, null=False, blank=True)
# slug has been deprecated, but keep for legacy collections redirects
slug = models.CharField(max_length=254, blank=True, null=True, default=None)
assertions = models.ManyToManyField('issuer.BadgeInstance', blank=True, through='backpack.BackpackCollectionBadgeInstance')
cached = SlugOrJsonIdCacheModelManager(slug_kwarg_name='entity_id', slug_field_name='entity_id')
def publish(self):
super(BackpackCollection, self).publish()
self.publish_by('share_hash')
self.created_by.publish()
def delete(self, *args, **kwargs):
super(BackpackCollection, self).delete(*args, **kwargs)
self.publish_delete('share_hash')
self.created_by.publish()
def save(self, **kwargs):
if self.pk:
BackpackCollectionBadgeInstance.objects.filter(
Q(badgeinstance__acceptance=BadgeInstance.ACCEPTANCE_REJECTED) | Q(badgeinstance__revoked=True)
).delete()
super(BackpackCollection, self).save(**kwargs)
@cachemodel.cached_method(auto_publish=True)
def cached_badgeinstances(self):
return self.assertions.filter(
revoked=False,
acceptance__in=(BadgeInstance.ACCEPTANCE_ACCEPTED, BadgeInstance.ACCEPTANCE_UNACCEPTED)
)
@cachemodel.cached_method(auto_publish=True)
def cached_collects(self):
return self.backpackcollectionbadgeinstance_set.filter(
badgeinstance__revoked=False,
badgeinstance__acceptance__in=(BadgeInstance.ACCEPTANCE_ACCEPTED,BadgeInstance.ACCEPTANCE_UNACCEPTED)
)
@property
def owner(self):
from badgeuser.models import BadgeUser
return BadgeUser.cached.get(id=self.created_by_id)
# Convenience methods for toggling published state
@property
def published(self):
return bool(self.share_hash)
@published.setter
def published(self, value):
if value and not self.share_hash:
self.share_hash = str(binascii.hexlify(os.urandom(16)), 'utf-8')
elif not value and self.share_hash:
self.publish_delete('share_hash')
self.share_hash = ''
@property
def share_url(self):
if self.published:
return OriginSetting.HTTP+reverse('collection_json', kwargs={'entity_id': self.share_hash})
def get_share_url(self, **kwargs):
return self.share_url
@property
def badge_items(self):
return self.cached_badgeinstances()
@badge_items.setter
def badge_items(self, value):
"""
Update this collection's list of BackpackCollectionBadgeInstance from a list of BadgeInstance EntityRelatedFieldV2 serializer data
:param value: list of BadgeInstance instances or list of BadgeInstance entity_id strings.
"""
def _is_in_requested_badges(entity_id):
if entity_id in value:
return True
try:
if entity_id in [i.entity_id for i in value]:
return True
except AttributeError:
pass
return False
with transaction.atomic():
existing_badges = {b.entity_id: b for b in self.badge_items}
# add missing badges
for badge_reference in value:
try:
if isinstance(badge_reference, BadgeInstance):
badgeinstance = badge_reference
else:
badgeinstance = BadgeInstance.cached.get(entity_id=badge_reference)
except BadgeInstance.DoesNotExist:
pass
else:
if badgeinstance.entity_id not in list(existing_badges.keys()):
BackpackCollectionBadgeInstance.cached.get_or_create(
collection=self,
badgeinstance=badgeinstance
)
# remove badges no longer in collection
for badge_entity_id, badgeinstance in list(existing_badges.items()):
if not _is_in_requested_badges(badge_entity_id):
BackpackCollectionBadgeInstance.objects.filter(
collection=self,
badgeinstance=badgeinstance
).delete()
def get_json(self, obi_version=CURRENT_OBI_VERSION, expand_badgeclass=False, expand_issuer=False, include_extra=True):
obi_version, context_iri = get_obi_context(obi_version)
json = OrderedDict([
('@context', context_iri),
('type', 'Collection'),
('id', add_obi_version_ifneeded(self.share_url, obi_version)),
('name', self.name),
('description', self.description),
('entityId', self.entity_id),
('owner', OrderedDict([
('firstName', self.cached_creator.first_name),
('lastName', self.cached_creator.last_name),
]))
])
json['badges'] = [b.get_json(obi_version=obi_version,
expand_badgeclass=expand_badgeclass,
expand_issuer=expand_issuer,
include_extra=include_extra) for b in self.cached_badgeinstances()]
return json
@property
def cached_badgrapp(self):
creator = self.cached_creator
if creator and creator.badgrapp_id:
return BadgrApp.objects.get(pk=creator.badgrapp_id)
return BadgrApp.objects.get_current(None)
class BackpackCollectionBadgeInstance(cachemodel.CacheModel):
collection = models.ForeignKey('backpack.BackpackCollection',
on_delete=models.CASCADE)
badgeuser = models.ForeignKey('badgeuser.BadgeUser', null=True, default=None,
on_delete=models.CASCADE)
badgeinstance = models.ForeignKey('issuer.BadgeInstance',
on_delete=models.CASCADE)
def publish(self):
super(BackpackCollectionBadgeInstance, self).publish()
self.collection.publish()
def delete(self):
super(BackpackCollectionBadgeInstance, self).delete()
self.collection.publish()
@property
def cached_badgeinstance(self):
return BadgeInstance.cached.get(id=self.badgeinstance_id)
@property
def cached_collection(self):
return BackpackCollection.cached.get(id=self.collection_id)
class BaseSharedModel(cachemodel.CacheModel, CreatedUpdatedAt):
SHARE_PROVIDERS = [(p.provider_code, p.provider_name) for code,p in list(SharingManager.ManagerProviders.items())]
provider = models.CharField(max_length=254, choices=SHARE_PROVIDERS)
source = models.CharField(max_length=254, default="unknown")
class Meta:
abstract = True
def get_share_url(self, provider, **kwargs):
raise NotImplementedError()
class BackpackBadgeShare(BaseSharedModel):
badgeinstance = models.ForeignKey("issuer.BadgeInstance", null=True,
on_delete=models.CASCADE)
def get_share_url(self, provider, **kwargs):
return SharingManager.share_url(provider, self.badgeinstance, **kwargs)
class BackpackCollectionShare(BaseSharedModel):
collection = models.ForeignKey('backpack.BackpackCollection', null=False,
on_delete=models.CASCADE)
def get_share_url(self, provider, **kwargs):
return SharingManager.share_url(provider, self.collection, **kwargs)
| reedu-reengineering-education/badgr-server | apps/backpack/models.py | models.py | py | 8,542 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "issuer.models.BaseAuditedModelDeletedWithUser",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "entity.models.BaseVersionedEntity",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 24,
"usage_ty... |
23182257426 | import pytest
import json
import ipaddress
from tests.common.utilities import wait_until
from tests.common import config_reload
import ptf.testutils as testutils
import ptf.mask as mask
import ptf.packet as packet
import time
pytestmark = [
pytest.mark.topology('t0'),
pytest.mark.device_type('vs')
]
def add_ipaddr(ptfhost, nexthop_addrs, prefix_len, nexthop_devs, ipv6=False):
for idx in range(len(nexthop_addrs)):
if ipv6:
ptfhost.shell("ip -6 addr add {}/{} dev eth{}".format(nexthop_addrs[idx], prefix_len, nexthop_devs[idx]), module_ignore_errors=True)
else:
ptfhost.shell("ip addr add {}/{} dev eth{}".format(nexthop_addrs[idx], prefix_len, nexthop_devs[idx]), module_ignore_errors=True)
def del_ipaddr(ptfhost, nexthop_addrs, prefix_len, nexthop_devs, ipv6=False):
for idx in range(len(nexthop_addrs)):
if ipv6:
ptfhost.shell("ip -6 addr del {}/{} dev eth{}".format(nexthop_addrs[idx], prefix_len, nexthop_devs[idx]), module_ignore_errors=True)
else:
ptfhost.shell("ip addr del {}/{} dev eth{}".format(nexthop_addrs[idx], prefix_len, nexthop_devs[idx]), module_ignore_errors=True)
def generate_and_verify_traffic(duthost, ptfadapter, ip_dst, expected_ports, ipv6=False):
if ipv6:
pkt = testutils.simple_tcpv6_packet(
eth_dst=duthost.facts["router_mac"],
eth_src=ptfadapter.dataplane.get_mac(0, 0),
ipv6_src='2001:db8:85a3::8a2e:370:7334',
ipv6_dst=ip_dst,
ipv6_hlim=64,
tcp_sport=1234,
tcp_dport=4321)
else:
pkt = testutils.simple_tcp_packet(
eth_dst=duthost.facts["router_mac"],
eth_src=ptfadapter.dataplane.get_mac(0, 0),
ip_src='1.1.1.1',
ip_dst=ip_dst,
ip_ttl=64,
tcp_sport=1234,
tcp_dport=4321)
exp_pkt = pkt.copy()
exp_pkt = mask.Mask(exp_pkt)
exp_pkt.set_do_not_care_scapy(packet.Ether, 'dst')
exp_pkt.set_do_not_care_scapy(packet.Ether, 'src')
if ipv6:
exp_pkt.set_do_not_care_scapy(packet.IPv6, 'hlim')
exp_pkt.set_do_not_care_scapy(packet.IPv6, 'chksum')
else:
exp_pkt.set_do_not_care_scapy(packet.IP, 'ttl')
exp_pkt.set_do_not_care_scapy(packet.IP, 'chksum')
testutils.send(ptfadapter, 5, pkt)
testutils.verify_packet_any_port(ptfadapter, exp_pkt, ports=expected_ports)
def run_static_route_test(duthost, ptfadapter, ptfhost, prefix, nexthop_addrs, prefix_len, nexthop_devs, ipv6=False, config_reload_test=False):
# Add ipaddresses in ptf
add_ipaddr(ptfhost, nexthop_addrs, prefix_len, nexthop_devs, ipv6=ipv6)
try:
# Add static route
duthost.shell("sonic-db-cli CONFIG_DB hmset 'STATIC_ROUTE|{}' nexthop {}".format(prefix, ",".join(nexthop_addrs)))
time.sleep(5)
# Check traffic get forwarded to the nexthop
ip_dst = str(ipaddress.ip_network(unicode(prefix))[1])
generate_and_verify_traffic(duthost, ptfadapter, ip_dst, nexthop_devs, ipv6=ipv6)
# Config save and reload if specified
if config_reload_test:
duthost.shell('config save -y')
config_reload(duthost)
generate_and_verify_traffic(duthost, ptfadapter, ip_dst, nexthop_devs, ipv6=ipv6)
finally:
# Remove static route
duthost.shell("sonic-db-cli CONFIG_DB del 'STATIC_ROUTE|{}'".format(prefix), module_ignore_errors=True)
# Delete ipaddresses in ptf
del_ipaddr(ptfhost, nexthop_addrs, prefix_len, nexthop_devs, ipv6=ipv6)
# Config save if the saved config_db was updated
if config_reload_test:
duthost.shell('config save -y')
def get_vlan_info(duthost, tbinfo, ipv6=False):
mg_facts = duthost.get_extended_minigraph_facts(tbinfo)
vlan_intf = mg_facts['minigraph_vlan_interfaces'][1 if ipv6 else 0]
prefix_len = vlan_intf['prefixlen']
vlan_subnet = ipaddress.ip_network(vlan_intf['subnet'])
vlan_ports = mg_facts['minigraph_vlans'][mg_facts['minigraph_vlan_interfaces'][1 if ipv6 else 0]['attachto']]['members']
vlan_ptf_ports = [mg_facts['minigraph_ptf_indices'][port] for port in vlan_ports]
return prefix_len, vlan_subnet, vlan_ptf_ports
def test_static_route(duthost, ptfadapter, ptfhost, tbinfo):
prefix_len, vlan_subnet, vlan_ptf_ports = get_vlan_info(duthost, tbinfo)
run_static_route_test(duthost, ptfadapter, ptfhost, "1.1.1.0/24",
[str(vlan_subnet[11])], prefix_len, [vlan_ptf_ports[0]])
def test_static_route_ecmp(duthost, ptfadapter, ptfhost, tbinfo):
prefix_len, vlan_subnet, vlan_ptf_ports = get_vlan_info(duthost, tbinfo)
if len(vlan_ptf_ports) >= 3:
nexthops = [str(vlan_subnet[20 + idx]) for idx in range(3)]
intfs = vlan_ptf_ports[0:3]
else:
nexthops = [str(vlan_subnet[20 + idx]) for idx in range(len(vlan_ptf_ports))]
intfs = vlan_ptf_ports[0:len(vlan_ptf_ports)]
run_static_route_test(duthost, ptfadapter, ptfhost, "2.2.2.0/24",
nexthops, prefix_len, intfs, config_reload_test=True)
def test_static_route_ipv6(duthost, ptfadapter, ptfhost, tbinfo):
prefix_len, vlan_subnet, vlan_ptf_ports = get_vlan_info(duthost, tbinfo, ipv6=True)
run_static_route_test(duthost, ptfadapter, ptfhost, "2000:1::/64",
[str(vlan_subnet[11])], prefix_len, [vlan_ptf_ports[0]], ipv6=True)
def test_static_route_ecmp_ipv6(duthost, ptfadapter, ptfhost, tbinfo):
prefix_len, vlan_subnet, vlan_ptf_ports = get_vlan_info(duthost, tbinfo, ipv6=True)
if len(vlan_ptf_ports) >= 3:
nexthops = [str(vlan_subnet[20 + idx]) for idx in range(3)]
intfs = vlan_ptf_ports[0:3]
else:
nexthops = [str(vlan_subnet[20 + idx]) for idx in range(len(vlan_ptf_ports))]
intfs = vlan_ptf_ports[0:len(vlan_ptf_ports)]
run_static_route_test(duthost, ptfadapter, ptfhost, "2000:2::/64",
nexthops, prefix_len, intfs, ipv6=True, config_reload_test=True)
| SijiJ/sonic-mgmt | tests/route/test_static_route.py | test_static_route.py | py | 6,020 | python | en | code | null | github-code | 6 | [
{
"api_name": "pytest.mark.topology",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "pytest.mark.device_type",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pytest.... |
23811859933 | from typing import List, Tuple
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
from torchvision import datasets, models, transforms
import time
import copy
from PIL import Image
from grid import SQUARES
class GeoModel:
"""Encapsulates the creation, training, saving, loading and evaluation
of the geographic prediction model.
The selected map region is divided up into squares, and the model predicts
the probability of the input image being in any given square.
"""
def __init__(self):
self.data_transforms = {
"train": transforms.Compose(
[
transforms.RandomResizedCrop(512),
transforms.ToTensor(),
]
),
"val": transforms.Compose(
[
transforms.Resize(512),
transforms.CenterCrop(512),
transforms.ToTensor(),
]
),
}
self.image_datasets = {
"train": datasets.ImageFolder("data", self.data_transforms["train"]),
"val": datasets.ImageFolder("valdata", self.data_transforms["val"]),
}
self.dataloaders = {
x: torch.utils.data.DataLoader(
self.image_datasets[x], batch_size=4, shuffle=True, num_workers=4
)
for x in ["train", "val"]
}
self.dataset_sizes = {x: len(self.image_datasets[x]) for x in ["train", "val"]}
self.class_names = self.image_datasets["train"].classes
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.net = models.resnet18(pretrained=True)
self.num_features = self.net.fc.in_features
# Our network doesn't use softmax as the last layer, since we use
# CrossEntropy loss which already implicitly does softmax,
# and softmax isn't idempotent. So we manually add softmax
# during inference.
self.net.fc = nn.Linear(self.num_features, len(self.class_names))
self.net = self.net.to(self.device)
self.criterion = nn.CrossEntropyLoss()
# Observe that all parameters are being optimized
self.optimizer = optim.SGD(self.net.parameters(), lr=0.001, momentum=0.9)
# Decay LR by a factor of 0.1 every 7 epochs
self.scheduler = lr_scheduler.StepLR(self.optimizer, step_size=7, gamma=0.1)
def _train_model(self, model, criterion, optimizer, scheduler, num_epochs=25):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print("Epoch {}/{}".format(epoch, num_epochs - 1))
print("-" * 10)
# Each epoch has a training and validation phase
for phase in ["train", "val"]:
if phase == "train":
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for inputs, labels in self.dataloaders[phase]:
inputs = inputs.to(self.device)
labels = labels.to(self.device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == "train"):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == "train":
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
if phase == "train":
scheduler.step()
epoch_loss = running_loss / self.dataset_sizes[phase]
epoch_acc = running_corrects.double() / self.dataset_sizes[phase]
print(
"{} Loss: {:.4f} Acc: {:.4f}".format(phase, epoch_loss, epoch_acc)
)
# deep copy the model
if phase == "val" and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
print()
time_elapsed = time.time() - since
print(
"Training complete in {:.0f}m {:.0f}s".format(
time_elapsed // 60, time_elapsed % 60
)
)
print("Best val Acc: {:4f}".format(best_acc))
# Load best model weights found during the training
model.load_state_dict(best_model_wts)
return model
def train(self, num_epochs=25):
"""Fine-tunes the pre-trained model using the parameters specified in this class's
`__init__`. The trained model is then stored in this class for usage.
Takes a handful of minutes per epoch on a 30-series Nvidia CUDA-enabled GPU.
"""
self.net = self._train_model(
self.net,
self.criterion,
self.optimizer,
self.scheduler,
num_epochs=num_epochs,
)
def save_to_disk(self, path: str = "models/resnet18v1"):
"""Saves the model parameters to disk using the specified `path`."""
torch.save(self.net.state_dict(), path)
def load_from_disk(self, path: str = "models/resnet18v1"):
"""Loads the model parameters from disk using the specified `path`."""
self.net.load_state_dict(torch.load(path))
self.net.eval()
def predict_random_image(
self,
) -> Tuple[Image.Image, List[float], Tuple[float, float]]:
"""Select a random image from the validaiton data, run inference
on it, and return the image as well as the predicted probabilities
and the correct location for the image.
"""
_, (inputs, labels) = next(enumerate(self.dataloaders["val"]))
inputs = inputs.to(self.device)
labels = labels.to(self.device)
raw_outputs = self.net(inputs)
outputs = nn.functional.softmax(raw_outputs, dim=1)
# Just take the first image + probabilities of the batch
net_probabilities = outputs.cpu().detach().numpy()[0]
# The probabilities are in the internal order of the network.
# We need to assign them the correct class names
probabilities = [None] * len(self.class_names)
for i in range(len(self.class_names)):
# Note that we assume that class names are just numbers of squares.
# If we wanted to use strings instead, we would have to use a dict.
probabilities[int(self.class_names[i])] = net_probabilities[i]
return (
transforms.ToPILImage()(inputs[0]).convert("RGB"),
probabilities,
SQUARES[int(self.class_names[int(labels[0])])].center,
)
if __name__ == "__main__":
# This main method will train the model and save it to disk.
# Load pre-trained model and finetune the weight by training it.
# The model chosen is ResNet18, which is the 18-layer version of ResNet
# pere-trained on the ImageNet dataset.
# We just finetune the weights using our own Google Street View data.
model = GeoModel()
model.train(num_epochs=25)
# Save model weights to disk so that we can load the trained model later
model.save_to_disk()
# Load pre-trained model and load the finetuned weights from disk
model = GeoModel()
model.load_from_disk()
# Run inference on a random image from the validation dataset
image, probs = model.predict_random_image()
pass
| yawnston/geo-guessing | model.py | model.py | py | 8,132 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "torchvision.transforms.Compose",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.RandomResizedCrop",
"line_number": 27,
"usage_type": "call"
... |
74743639546 | import logging
logger = logging.getLogger('camelot.view.controls.formview')
from PyQt4 import QtGui
from PyQt4 import QtCore
from PyQt4.QtCore import Qt
import sip
from camelot.view.art import Icon
from camelot.view.model_thread import post
from camelot.view.model_thread import model_function
from camelot.view.controls.view import AbstractView
from camelot.view.controls.statusbar import StatusBar
from camelot.view import register
from camelot.view.action import ActionFactory
class ContextMenuAction(QtGui.QAction):
default_icon = Icon('tango/16x16/categories/applications-system.png')
def __init__(self, parent, title, icon = None):
"""
:param parent: the widget on which the context menu will be placed
:param title: text displayed in the context menu
:param icon: camelot.view.art.Icon object
"""
super(ContextMenuAction, self).__init__(title, parent)
self.icon = icon
if self.icon:
self.setIcon(self.icon.getQIcon())
else:
self.setIcon(self.default_icon.getQIcon())
class FormWidget(QtGui.QWidget):
"""A form widget comes inside a form view or inside an embedded manytoone editor"""
changed_signal = QtCore.pyqtSignal()
def __init__(self, parent, admin):
QtGui.QWidget.__init__(self, parent)
self._admin = admin
widget_mapper = QtGui.QDataWidgetMapper(self)
widget_mapper.setObjectName('widget_mapper')
widget_layout = QtGui.QHBoxLayout()
widget_layout.setSpacing(0)
widget_layout.setMargin(0)
self._index = 0
self._model = None
self._form = None
self._columns = None
self._delegate = None
self.setLayout(widget_layout)
def get_model(self):
return self._model
def set_model(self, model):
self._model = model
self._model.dataChanged.connect( self._data_changed )
self._model.layoutChanged.connect( self._layout_changed )
self._model.item_delegate_changed_signal.connect( self._item_delegate_changed )
self._model.setObjectName( 'model' )
widget_mapper = self.findChild(QtGui.QDataWidgetMapper, 'widget_mapper' )
if widget_mapper:
widget_mapper.setModel( model )
register.register( model, widget_mapper )
def get_columns_and_form():
return (self._model.getColumns(), self._admin.get_form_display())
post(get_columns_and_form, self._set_columns_and_form)
def clear_mapping(self):
widget_mapper = self.findChild(QtGui.QDataWidgetMapper, 'widget_mapper' )
if widget_mapper:
widget_mapper.clearMapping()
def _data_changed(self, index_from, index_to):
#@TODO: only revert if this form is in the changed range
widget_mapper = self.findChild(QtGui.QDataWidgetMapper, 'widget_mapper' )
if widget_mapper:
widget_mapper.revert()
if not sip.isdeleted(self):
self.changed_signal.emit()
def _layout_changed(self):
widget_mapper = self.findChild(QtGui.QDataWidgetMapper, 'widget_mapper' )
if widget_mapper:
widget_mapper.revert()
self.changed_signal.emit()
@QtCore.pyqtSlot()
def _item_delegate_changed(self):
from camelot.view.controls.delegates.delegatemanager import \
DelegateManager
self._delegate = self._model.getItemDelegate()
self._delegate.setObjectName('delegate')
assert self._delegate != None
assert isinstance(self._delegate, DelegateManager)
self._create_widgets()
def set_index(self, index):
self._index = index
widget_mapper = self.findChild(QtGui.QDataWidgetMapper, 'widget_mapper' )
if widget_mapper:
widget_mapper.setCurrentIndex(self._index)
def get_index(self):
widget_mapper = self.findChild(QtGui.QDataWidgetMapper, 'widget_mapper' )
if widget_mapper:
return widget_mapper.currentIndex()
def submit(self):
widget_mapper = self.findChild(QtGui.QDataWidgetMapper, 'widget_mapper' )
if widget_mapper:
widget_mapper.submit()
def to_first(self):
widget_mapper = self.findChild(QtGui.QDataWidgetMapper, 'widget_mapper' )
if widget_mapper:
widget_mapper.toFirst()
self.changed_signal.emit()
def to_last(self):
widget_mapper = self.findChild(QtGui.QDataWidgetMapper, 'widget_mapper' )
if widget_mapper:
widget_mapper.toLast()
self.changed_signal.emit()
def to_next(self):
widget_mapper = self.findChild(QtGui.QDataWidgetMapper, 'widget_mapper' )
if widget_mapper:
widget_mapper.toNext()
self.changed_signal.emit()
def to_previous(self):
widget_mapper = self.findChild(QtGui.QDataWidgetMapper, 'widget_mapper' )
if widget_mapper:
widget_mapper.toPrevious()
self.changed_signal.emit()
def export_ooxml(self):
from camelot.view.export.word import open_stream_in_word
def create_ooxml_export(row):
# print self._columns
def ooxml_export():
# TODO insert delegates
fields = self._admin.get_all_fields_and_attributes()
delegates = {}
for field_name, attributes in fields.items():
delegates[field_name] = attributes['delegate'](**attributes)
obj = self._model._get_object(row)
document = self._form.render_ooxml(obj, delegates)
open_stream_in_word( document )
return ooxml_export
post(create_ooxml_export(self.get_index()))
@QtCore.pyqtSlot(tuple)
def _set_columns_and_form(self, columns_and_form ):
self._columns, self._form = columns_and_form
self._create_widgets()
def _create_widgets(self):
"""Create value and label widgets"""
from camelot.view.controls.field_label import FieldLabel
from camelot.view.controls.editors.wideeditor import WideEditor
#
# Dirty trick to make form views work during unit tests, since unit
# tests have no event loop running, so the delegate will never be set,
# so we get it and are sure it will be there if we are running without
# threads
#
if not self._delegate:
self._delegate = self._model.getItemDelegate()
#
# end of dirty trick
#
# only if all information is available, we can start building the form
if not (self._form and self._columns and self._delegate):
return
widgets = {}
widget_mapper = self.findChild(QtGui.QDataWidgetMapper, 'widget_mapper' )
if not widget_mapper:
return
widget_mapper.setItemDelegate(self._delegate)
option = QtGui.QStyleOptionViewItem()
# set version to 5 to indicate the widget will appear on a
# a form view and not on a table view
option.version = 5
#
# this loop can take a while to complete, so processEvents is called
# regulary
#
for i, (field_name, field_attributes ) in enumerate( self._columns):
# if i%10==0:
# QtCore.QCoreApplication.processEvents(
# QtCore.QEventLoop.ExcludeSocketNotifiers,
# 100
# )
model_index = self._model.index(self._index, i)
hide_title = False
if 'hide_title' in field_attributes:
hide_title = field_attributes['hide_title']
widget_label = None
widget_editor = self._delegate.createEditor(
self,
option,
model_index
)
widget_editor.setObjectName('%s_editor'%field_name)
if not hide_title:
widget_label = FieldLabel(
field_name,
field_attributes['name'],
field_attributes,
self._admin
)
widget_label.setObjectName('%s_label'%field_name)
if not isinstance(widget_editor, WideEditor):
widget_label.setAlignment(Qt.AlignVCenter | Qt.AlignRight)
# required fields font is bold
if ('nullable' in field_attributes) and \
(not field_attributes['nullable']):
font = QtGui.QApplication.font()
font.setBold(True)
widget_label.setFont(font)
assert widget_editor != None
assert isinstance(widget_editor, QtGui.QWidget)
widget_mapper.addMapping(widget_editor, i)
widgets[field_name] = (widget_label, widget_editor)
widget_mapper.setCurrentIndex(self._index)
self.layout().insertWidget(0, self._form.render(widgets, self))
#self._widget_layout.setContentsMargins(7, 7, 7, 7)
class FormView(AbstractView):
"""A FormView is the combination of a FormWidget, possible actions and menu
items
.. form_widget: The class to be used as a the form widget inside the form
view"""
form_widget = FormWidget
def __init__(self, title, admin, model, index):
AbstractView.__init__(self)
layout = QtGui.QVBoxLayout()
form_and_actions_layout = QtGui.QHBoxLayout()
form_and_actions_layout.setObjectName('form_and_actions_layout')
layout.addLayout(form_and_actions_layout)
self.model = model
self.admin = admin
self.title_prefix = title
form = FormWidget(self, admin)
form.setObjectName( 'form' )
form.changed_signal.connect( self.update_title )
form.set_model(model)
form.set_index(index)
form_and_actions_layout.addWidget(form)
statusbar = StatusBar(self)
statusbar.setObjectName('statusbar')
statusbar.setSizeGripEnabled(False)
layout.addWidget(statusbar)
layout.setAlignment(statusbar, Qt.AlignBottom)
self.setLayout(layout)
self.change_title(title)
if hasattr(admin, 'form_size') and admin.form_size:
self.setMinimumSize(admin.form_size[0], admin.form_size[1])
self.validator = admin.create_validator(model)
self.validate_before_close = True
def get_actions():
return admin.get_form_actions(None)
post(get_actions, self.setActions)
self.update_title()
#
# Define actions
#
self.setContextMenuPolicy(Qt.ActionsContextMenu)
self.addAction( ActionFactory.view_first(self, self.viewFirst) )
self.addAction( ActionFactory.view_last(self, self.viewLast) )
self.addAction( ActionFactory.view_next(self, self.viewNext) )
self.addAction( ActionFactory.view_previous(self, self.viewPrevious) )
self.addAction( ActionFactory.refresh(self, self.refresh_session) )
self.addAction( ActionFactory.export_ooxml(self, form.export_ooxml) )
@QtCore.pyqtSlot()
def refresh_session(self):
from elixir import session
from camelot.core.orm import refresh_session
refresh_session( session )
@QtCore.pyqtSlot()
def refresh(self):
"""Refresh the data in the current view"""
self.model.refresh()
def update_title(self):
def get_title():
obj = self.getEntity()
return u'%s %s' % (
self.title_prefix,
self.admin.get_verbose_identifier(obj)
)
post(get_title, self.change_title)
def getEntity(self):
form = self.findChild(QtGui.QWidget, 'form' )
if form:
return self.model._get_object(form.get_index())
@QtCore.pyqtSlot(list)
def setActions(self, actions):
form = self.findChild(QtGui.QWidget, 'form' )
layout = self.findChild(QtGui.QLayout, 'form_and_actions_layout' )
if actions and form and layout:
side_panel_layout = QtGui.QVBoxLayout()
from camelot.view.controls.actionsbox import ActionsBox
logger.debug('setting Actions for formview')
actions_widget = ActionsBox(self, self.getEntity)
actions_widget.setObjectName('actions')
action_widgets = actions_widget.setActions(actions)
for action_widget in action_widgets:
form.changed_signal.connect( action_widget.changed )
action_widget.changed()
side_panel_layout.insertWidget(1, actions_widget)
side_panel_layout.addStretch()
layout.addLayout(side_panel_layout)
def viewFirst(self):
"""select model's first row"""
form = self.findChild(QtGui.QWidget, 'form' )
if form:
form.submit()
form.to_first()
def viewLast(self):
"""select model's last row"""
# submit should not happen a second time, since then we don't want
# the widgets data to be written to the model
form = self.findChild(QtGui.QWidget, 'form' )
if form:
form.submit()
form.to_last()
def viewNext(self):
"""select model's next row"""
# submit should not happen a second time, since then we don't want
# the widgets data to be written to the model
form = self.findChild(QtGui.QWidget, 'form' )
if form:
form.submit()
form.to_next()
def viewPrevious(self):
"""select model's previous row"""
# submit should not happen a second time, since then we don't want
# the widgets data to be written to the model
form = self.findChild(QtGui.QWidget, 'form' )
if form:
form.submit()
form.to_previous()
@QtCore.pyqtSlot(bool)
def showMessage(self, valid):
form = self.findChild(QtGui.QWidget, 'form' )
if not valid and form:
reply = self.validator.validityDialog(
form.get_index(), self
).exec_()
if reply == QtGui.QMessageBox.Discard:
# clear mapping to prevent data being written again to the model,
# then we reverted the row
form.clear_mapping()
self.model.revertRow(form.get_index())
self.validate_before_close = False
self.close()
else:
self.validate_before_close = False
self.close()
def validateClose(self):
logger.debug('validate before close : %s' % self.validate_before_close)
form = self.findChild(QtGui.QWidget, 'form' )
if self.validate_before_close and form:
# submit should not happen a second time, since then we don't
# want the widgets data to be written to the model
form.submit()
def validate():
return self.validator.isValid(form.get_index())
post(validate, self.showMessage)
return False
return True
def closeEvent(self, event):
#print 'close event'
logger.debug('formview closed')
if self.validateClose():
event.accept()
else:
event.ignore()
@model_function
def toHtml(self):
"""generates html of the form"""
from jinja2 import Environment
def to_html(d = u''):
"""Jinja 1 filter to convert field values to their default html
representation
"""
def wrapped_in_table(env, context, value):
if isinstance(value, list):
return u'<table><tr><td>' + \
u'</td></tr><tr><td>'.join(
[unicode(e) for e in value]
) + u'</td></tr></table>'
return unicode(value)
return wrapped_in_table
entity = self.getEntity()
fields = self.admin.get_fields()
table = [dict( field_attributes = field_attributes,
value = getattr(entity, name ))
for name, field_attributes in fields]
context = {
'title': self.admin.get_verbose_name(),
'table': table,
}
from camelot.view.templates import loader
env = Environment(loader = loader)
env.filters['to_html'] = to_html
tp = env.get_template('form_view.html')
return tp.render(context)
| kurtraschke/camelot | camelot/view/controls/formview.py | formview.py | py | 16,671 | python | en | code | 4 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "PyQt4.QtGui.QAction",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "PyQt4.QtGui",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "camelot.view.ar... |
70075741628 | # -*- encoding:utf-8 -*-
'''
@time: 2019/12/21 8:28 下午
@author: huguimin
@email: 718400742@qq.com
一个doc表示一个样本
'''
import math
import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from layers.dynamic_rnn import DynamicLSTM
from layers.attention import Attention
class GraphConvolution(nn.Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, in_features, out_features, bias=True):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = nn.Parameter(torch.FloatTensor(in_features, out_features))
if bias:
self.bias = nn.Parameter(
torch.FloatTensor(out_features))
else:
self.register_parameter('bias', None)
def forward(self, text, adj):
hidden = torch.matmul(text, self.weight)
denom = torch.sum(adj, dim=2, keepdim=True) + 1
output = torch.matmul(adj, hidden) / denom
if self.bias is not None:
return output + self.bias
else:
return output
class ECGCN(nn.Module):
def __init__(self, word_embedding, pos_embedding, opt):
super(ECGCN, self).__init__()
self.opt = opt
self.embed = nn.Embedding.from_pretrained(torch.tensor(word_embedding, dtype=torch.float))
self.pos_embed = nn.Embedding.from_pretrained(torch.tensor(pos_embedding, dtype=torch.float))
self.word_lstm = DynamicLSTM(opt.embed_dim, opt.hidden_dim, num_layers=1, batch_first=True, bidirectional=True)#(32,75,45,200)
self.clause_encode = Attention(2*opt.hidden_dim, 1, opt.max_sen_len, opt)#(32,75,200)
# gcn
# self.gc1 = GraphConvolution(2*opt.hidden_dim, 2*opt.hidden_dim)
# self.gc2 = GraphConvolution(2*opt.hidden_dim, 2*opt.hidden_dim)
# self.gc3 = GraphConvolution(2*opt.hidden_dim, 2*opt.hidden_dim)
#gat
# self.ga1 = GAT(2*opt.hidden_dim, 2*opt.hidden_dim, self.opt.num_class, self.opt.keep_prob1, self.opt.alpha, self.opt.heads)
self.fc1 = nn.Linear(2*opt.hidden_dim + self.opt.embedding_dim_pos, 2*opt.hidden_dim)
self.fc2 = nn.Linear(2*opt.hidden_dim, opt.num_class)
self.text_embed_dropout = nn.Dropout(opt.keep_prob1)
self.gates = nn.ModuleList()
self.gcns = nn.ModuleList()
for i in range(3):
self.gcns.append(GraphConvolution(2*opt.hidden_dim, 2*opt.hidden_dim))
self.gates.append(nn.Linear(2*opt.hidden_dim, 1))
def position_weight(self, inputs, emotion_id, doc_len):
"""
:param inputs: [32, 75, 200]
:param emotion_id: [32,]
:param doc_len: [32]
:param pos_embedding: [103, 50]
:return:[32,75,50]
"""
batch_size, max_len = inputs.shape[0], inputs.shape[1]
relative_pos = np.zeros((batch_size, max_len))
for sample in range(batch_size):
len = doc_len[sample].item()
for i in range(len):
relative_pos[sample][i] = i - emotion_id[sample].item() + 69
return relative_pos
def emotion_encode(self, inputs, emotion_id):
"""
:param inputs: [32, 75, 200]
:param emotion_id: [32,]
:param doc_len: [32,]
:return: [32, 1, 200]
"""
batch_size, max_len, dim = inputs.shape[0], inputs.shape[1], inputs.shape[2]
emotion_clause = np.zeros((batch_size, dim))
for sample in range(batch_size):
clause = inputs[sample][emotion_id[sample]]
emotion_clause[sample] = clause.cpu().detach().numpy()
return torch.FloatTensor(emotion_clause)
def emotion_weight(self, inputs, emotion_clause):
"""
:param inputs: [32, 75, 200]
emotion_clause:[32, 1, 200]
:return: [32, 75]
"""
batch, dim = inputs.shape[0], inputs.shape[2]
emotion_clause = torch.reshape(emotion_clause, [batch, dim, 1])
alpha = torch.reshape(torch.matmul(inputs, emotion_clause.float()), [-1, self.opt.max_doc_len, 1])
return alpha
def mask(self, inputs, emotion_id):
"""
:param inputs: [32,75,200]
:param emotion_id: [32,]
:return: [32, 1, 200]
"""
batch_size, max_len = inputs.shape[0], inputs.shape[1]
emotion_idx = emotion_id.cpu().numpy()
mask = [[] for i in range(batch_size)]
for i in range(batch_size):
for j in range(emotion_idx[i]):
mask[i].append(0)
for j in range(emotion_idx[i], emotion_id[i] + 1):
mask[i].append(1)
for j in range(emotion_idx[i] + 1, max_len):
mask[i].append(0)
mask = torch.tensor(mask).unsqueeze(2).float().to(self.opt.device)
return mask * inputs
def pack_sen_len(self, sen_len):
"""
:param sen_len: [32, 75]
:return:
"""
batch_size = sen_len.shape[0]
up_sen_len = np.zeros([batch_size, self.opt.max_doc_len])
for i, doc in enumerate(sen_len):
for j, sen in enumerate(doc):
if sen == 0:
up_sen_len[i][j] = 1
else:
up_sen_len[i][j] = sen
return torch.tensor(up_sen_len)
def forward(self, inputs):
x, sen_len, doc_len, doc_id, emotion_id, adj = inputs
up_sen_len = self.pack_sen_len(sen_len)
x = torch.reshape(x, [-1, self.opt.max_sen_len])
x = self.embed(x)
x = self.text_embed_dropout(x)
up_sen_len = torch.reshape(up_sen_len, [-1])
word_encode = self.word_lstm(x, up_sen_len) #(32*75, batch_max_len, 200)
clause_encode = self.clause_encode(word_encode, sen_len)
embs = [clause_encode]
embs += [self.pos_embed(torch.LongTensor(self.position_weight(clause_encode, emotion_id, doc_len)).to(self.opt.device))]
emotion_encode = self.emotion_encode(clause_encode, emotion_id) ###情感子句的嵌入表示
###对每层的GCN都与emotion_encode计算一个score.
# x = F.relu(self.gc1(clause_encode, adj))
# x = F.relu(self.gc2(x, adj))
# x = F.relu(self.gc3(x, adj))
x = clause_encode
for i in range(3):
x = F.relu(self.gcns[i](x, adj))
weight = F.sigmoid(self.gates[i](emotion_encode))
weight = weight.unsqueeze(dim=-1)
x = x * weight
output = self.fc2(x.float())
return output
# def forward(self, inputs, vs=False):
# attention = []
# x, sen_len, doc_len, doc_id, emotion_id, adj = inputs#(x(32,75, 45)), (32, 75)
# up_sen_len = self.pack_sen_len(sen_len)
# x = torch.reshape(x, [-1, self.opt.max_sen_len])
# x = self.embed(x)
# x = self.text_embed_dropout(x)
# up_sen_len = torch.reshape(up_sen_len, [-1])
# word_encode = self.word_lstm(x, up_sen_len) #(32*75, batch_max_len, 200)
# clause_encode = self.clause_encode(word_encode, sen_len)
# embs = [clause_encode]
# embs += [self.pos_embed(torch.LongTensor(self.position_weight(clause_encode, emotion_id, doc_len)).to(self.opt.device))]
# "concat"
# clause_encode = torch.cat(embs, dim=2)
# clause_encode = torch.reshape(clause_encode, [-1, self.opt.max_doc_len, 2 * self.opt.hidden_dim + self.opt.embedding_dim_pos])
# clause_encode = self.fc1(clause_encode)
# # 策略1 "emotion clause 与 clause的attention weight"
# # emotion_encode = self.emotion_encode(clause_encode, emotion_id)
# # batch, dim = clause_encode.shape[0], clause_encode.shape[2]
# # emotion_encode = torch.reshape(emotion_encode, [batch, dim , 1])
# # alpha = self.emotion_weight(clause_encode, emotion_encode)
# #
# # ones = torch.ones((batch, self.opt.max_doc_len, 1))
# #
# # emotion_encode = emotion_encode.expand(-1,-1,self.opt.max_doc_len).transpose(1,2)
# # clause_encode = alpha * emotion_encode + (ones-alpha)*clause_encode
# x = F.relu(self.gc1(clause_encode, adj))
# x = F.relu(self.gc2(x, adj))
# # x = F.relu(self.gc3(x, adj))
# # output = self.ga1(clause_encode, adj)
#
# batch, dim = clause_encode.shape[0], clause_encode.shape[2]
# ones = torch.ones((batch, self.opt.max_doc_len, 1)).to(self.opt.device)
# emotion_encode = self.emotion_encode(x, emotion_id).to(self.opt.device)
# alpha = self.emotion_weight(clause_encode, emotion_encode)
# # # emotion_encode = self.mask(x, emotion_id)
# # # alpha_mat = torch.matmul(emotion_encode, clause_encode.transpose(1,2))
# # # alpha = F.softmax(alpha_mat.sum(1, keepdim=True), dim=2).transpose(1,2) #(32,1,75)
# # # ones = torch.ones((batch, self.opt.max_doc_len, 1))
# # emotion_encode = torch.reshape(emotion_encode, [batch, dim, 1])
# # emotion_encode = emotion_encode.expand(-1, -1, self.opt.max_doc_len).transpose(1, 2)
# # # x = emotion_encode * alpha + (ones-alpha)*clause_encode
# emotion_encode = torch.reshape(emotion_encode, [batch, dim, 1])
# emotion_encode = emotion_encode.expand(-1, -1, self.opt.max_doc_len).transpose(1, 2)
# x = clause_encode * alpha + (ones - alpha) * emotion_encode
# x = self.text_embed_dropout(x)
# # # x = torch.matmul(alpha, clause_encode).squeeze(1)
# #
# # # 策略2 以原始的句表示为主,图卷积作为辅助
# # #
# #
# output = self.fc2(x.float())
# if vs:
# return output, attention
# return output
| LeMei/FSS-GCN | models/word2vec/ecgcn.py | ecgcn.py | py | 9,816 | python | en | code | 14 | github-code | 6 | [
{
"api_name": "torch.nn.Module",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "torch.nn.Parameter",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line... |
9054587294 | # -*- coding: utf-8 -*-
"""
Created on Tue Sep 28 16:52:49 2021
@author: shabalin
Utils to work with fable and hexrd functions.
"""
import sys, os
import numpy as np
import yaml, subprocess
#import cbftiffmxrdfix
def run_peaksearch(par_file=None):
""" Wrapper for the ImageD11 peaksearch.py script"""
with open(par_file) as f:
pars = yaml.safe_load(f)
if pars['stem_out'] == None:
pars['stem_out'] = ''
first_im = int(pars['first_image'])
last_im = int(pars['first_image']) + int(pars['nbr_images']) - 1
ndigits = pars['ndigits']
path_inp = os.path.join(pars['image_path'],pars['image_stem'])
path_out = os.path.join(pars['output_dir'], pars['stem_out']+pars['det_code']+'_peaks')
# construct the command for peaksearch.py
command = ('peaksearch.py -n {} -F {} -f {:d} -l {:d} -o {} -d {} -p Y --ndigits {:d} -S {:.3f} -T {:.3f} '.format(
path_inp,pars['filetype'],first_im,last_im,path_out,
pars['dark_image'],ndigits,pars['omegastep'], pars['startomega']
))
# Adds threshold values to command
for t in pars['thresholds']:
command += '-t {:d} '.format(t)
# Adds keyword args
if 'kwargs' in pars:
command += '{} '.format(pars['kwargs'])
# modify command for lunarc
if 'lunarc' in pars:
command = lunarc_path + command
print('Running peaksearch with the following command:')
print(command)
try:
subprocess.call(command, shell=True)
except AttributeError as a:
print('peaksearch.py ended with error. It seems to work nonetheless.', a)
del pars, first_im, last_im, ndigits, path_inp, path_out, command
return
def merge_peaks(par_file, config_file):
# Wrapper for ImageD11 merge_flt.py
if (par_file is None):
raise ValueError('Must supply par_file to run_peaksearcher')
with open(par_file) as f:
pars = yaml.safe_load(f)
if pars['stem_out'] == None:
pars['stem_out'] = ''
if 'merged_name' in pars:
file_out = os.path.join(pars['output_dir'],pars['stem_out']+pars['merged_name'])
else:
file_out = os.path.join(pars['output_dir'],pars['stem_out']+pars['det_code']+'_peaks_merged.flt')
inp = os.path.join(pars['output_dir'], pars['stem_out']+pars['det_code']+'_peaks')
print('Merging flt files matching {}'.format(inp))
if not config_file:
config_file = 'junk'
command = 'merge_flt.py {} {} {} {:d} '.format(config_file,inp,file_out,pars['pixel_tol']) + ('{:d} '*len(pars['thresholds'])).format(*pars['thresholds'])
# modify command for lunarc
if 'lunarc' in pars:
command = lunarc_path + command
print(command)
subprocess.call(command, shell=True)
del pars, file_out, inp, command
return
def hexrd_to_fable(path_to_hexrd_yml, path_to_fable_par, det=1, mat='Nb'):
detname = 'detector_{:d}'.format(det)
if mat=='ruby':
cell_params = { "a": 4.7608, "b": 4.7608, "c": 12.99568, "alpha": 90.0, "beta": 90.0, "gamma": 120.0, "lattice": 'R'}
elif mat=='Nb':
cell_params = { "a": 3.3042, "b": 3.3042, "c": 3.3042, "alpha": 90.0, "beta": 90.0, "gamma": 90.0, "lattice": 'I'}
elif mat=='CeO2':
cell_params = { "a": 5.41153, "b": 5.41153, "c": 5.41153, "alpha": 90.0, "beta": 90.0, "gamma": 90.0, "lattice": 'F'}
elif mat=='Ti':
cell_params = { "a": 2.9505, "b": 2.9505, "c": 4.6826, "alpha": 90.0, "beta": 90.0, "gamma": 120.0, "lattice": 'P'}
else:
print('ERROR! Incorrect material!')
with open(path_to_hexrd_yml) as f:
pars = yaml.safe_load(f)
wavelength = 12.39842/pars['beam']['energy']
translation = pars['detectors'][detname]['transform']['translation']
tilt = pars['detectors'][detname]['transform']['tilt']
frame_size = [pars['detectors'][detname]['pixels']['columns'], pars['detectors'][detname]['pixels']['rows']]
pix_size = pars['detectors'][detname]['pixels']['size']
if os.path.exists(path_to_fable_par):
if input('File %s already exist! Overwrite it? (y/n):' % path_to_fable_par) != 'y':
print('Aborted!')
return
else:
pass
else:
pass
f = open(path_to_fable_par,'w')
f.write( 'cell__a {}'.format(cell_params['a']) )
f.write( '\ncell__b {}'.format(cell_params['b']) )
f.write( '\ncell__c {}'.format(cell_params['c']) )
f.write( '\ncell_alpha {}'.format(cell_params['alpha']) )
f.write( '\ncell_beta {}'.format(cell_params['beta']) )
f.write( '\ncell_gamma {}'.format(cell_params['gamma']) )
f.write( '\ncell_lattice_[P,A,B,C,I,F,R] {}'.format(cell_params['lattice']) )
f.write( '\nchi {}'.format(0.0) )
f.write( '\ndistance {}'.format((-translation[2]*1000)) )
f.write( '\nfit_tolerance {}'.format(0.5) )
f.write( '\nmin_bin_prob {}'.format(1e-05) )
f.write( '\nno_bins {}'.format(10000) )
f.write( '\no11 {}'.format(0) )
f.write( '\no12 {}'.format(-1) )
f.write( '\no21 {}'.format(1) )
f.write( '\no22 {}'.format(0) )
f.write( '\nomegasign {}'.format(1.0) )
f.write( '\nt_x {}'.format(0) )
f.write( '\nt_y {}'.format(0) )
f.write( '\nt_z {}'.format(0) )
f.write( '\ntilt_x {}'.format(tilt[2]) )
f.write( '\ntilt_y {}'.format(tilt[1]) ) # -?
f.write( '\ntilt_z {}'.format(tilt[0]) )
f.write('\nwavelength {:0.6f}'.format(wavelength) )
f.write( '\nwedge {}'.format(0.0) )
f.write( '\nweight_hist_intensities {}'.format(0) )
f.write( '\ny_center {}'.format((translation[1]/pix_size[1] + frame_size[1]/2)) )
f.write( '\ny_size {}'.format((pix_size[1]*1000)) )
f.write( '\nz_center {}'.format((translation[0]/pix_size[0] + frame_size[0]/2)) )
f.write( '\nz_size {}'.format((pix_size[0]*1000)) )
f.close()
del detname, cell_params, pars, wavelength, translation, tilt, frame_size, pix_size
return
def fable_to_hexrd(path_to_fable_par, path_to_hexrd_yml):
y_frm_size = 2880
z_frm_size = 2880
with open(path_to_fable_par) as f:
for line in f:
if ('distance' in line):
dist = float(line.split()[1])/1000
elif ('tilt_x' in line):
tilt_1 = float(line.split()[1])
elif ('tilt_y' in line):
tilt_2 = float(line.split()[1])
elif ('tilt_z' in line):
tilt_3 = float(line.split()[1])
elif ('wavelength' in line):
wavelength = float(line.split()[1])
elif ('y_center' in line):
y_cen = float(line.split()[1])
elif ('y_size' in line):
y_pix_size = float(line.split()[1])/1000
elif ('z_center' in line):
z_cen = float(line.split()[1])
elif ('z_size' in line):
z_pix_size = float(line.split()[1])/1000
f.close()
pars = {'beam':
{'energy': 12.39842/wavelength, 'vector': {'azimuth': 90.0, 'polar_angle': 90.0}},
'detectors':
{'detector_1':
{'buffer': None,
'pixels': {'columns': y_frm_size, 'rows': z_frm_size, 'size': [z_pix_size, y_pix_size]},
'saturation_level': 14000.0,
'transform': {'tilt': [tilt_1, tilt_2, tilt_3], 'translation': [(z_cen-z_frm_size/2)*z_pix_size, (y_cen-y_frm_size/2)*y_pix_size, -dist]}}},
'id': 'instrument',
'oscillation_stage': {'chi': 0.0, 'translation': [0.0, 0.0, 0.0]}}
if os.path.exists(path_to_hexrd_yml):
if input('File %s already exist! Overwrite it? (y/n):' % path_to_hexrd_yml) != 'y':
print('Aborted!')
return
else:
pass
else:
pass
with open(path_to_hexrd_yml, 'w') as f:
yaml.dump(pars, f)
del y_frm_size, z_frm_size, pars, dist, tilt_1, tilt_2, tilt_3, wavelength, y_cen, y_pix_size, z_cen, z_pix_size
return | agshabalin/py3DXRD | .ipynb_checkpoints/fable_hexrd_utils-checkpoint.py | fable_hexrd_utils-checkpoint.py | py | 7,948 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "yaml.safe_load",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_num... |
74126214589 | # -*- coding: utf-8 -*-
# this file is released under public domain and you can use without limitations
import datetime
#########################################################################
## This is a sample controller
## - index is the default action of any application
## - user is required for authentication and authorization
## - download is for downloading files uploaded in the db (does streaming)
#########################################################################
@auth.requires_login()
def index():
"""
Main logged in homepage, displays users collection
"""
#If user doesn't have an Unfiled box, create one
if (db((db.box.owner_id == auth.user.id) & (db.box.name == 'Unfiled')).count()==0):
db.box.insert(name='Unfiled',
is_public='False',
owner_id=auth.user.id,
created_on = datetime.datetime.now())
db.commit
#Display any necessary message
if (session.message):
response.flash = session.message
session.message = None
#Find users pubic boxes
public = db((db.box.owner_id==auth.user.id) & (db.box.is_public == True)).select()
#Find users private boxes
private = db((db.box.owner_id==auth.user.id) & (db.box.is_public != True)).select()
#Find how many comics user has, to offer assistance
no_of_comics = db(db.comic.owner_id == auth.user.id).count()
return dict(public_boxes = public, private_boxes = private, no_of_comics = no_of_comics)
@auth.requires_login()
def all():
comics = db((db.comic.owner_id == auth.user.id) & (auth.user.id == db.auth_user.id)).select(orderby = db.comic.title)
if len(comics)>0:
return dict(comics = comics)
else:
return dict()
@auth.requires_login()
def search():
form = FORM(DIV(LABEL('Title:', _for='title', _class="control-label col-sm-3"),
DIV(INPUT(_class = "form-control string", _name='title', _type="text"), _class="col-sm-3"),
_class="form-group"),
DIV(LABEL('Writer:', _for='writer', _class="control-label col-sm-3"),
DIV(INPUT(_class = "form-control string", _name='writer', _type="text"), _class="col-sm-3"),
_class="form-group"),
DIV(LABEL('Artist:', _for='artist', _class="control-label col-sm-3"),
DIV(INPUT(_class = "form-control string", _name='artist', _type="text"), _class="col-sm-3"),
_class="form-group"),
DIV(LABEL('Publisher:', _for='publisher', _class="control-label col-sm-3"),
DIV(INPUT(_class = "form-control string", _name='publisher', _type="text"), _class="col-sm-3"),
_class="form-group"),
DIV(DIV(INPUT(_class = "btn btn-primary", _value='Search', _type="submit"),
_class="col-sm-9 col-sm-offset-3"),
_class="form-group"),
_class="form-horizontal")
if form.accepts(request, session):
search_term = ""
if (len(request.vars.title) > 0):
title_term = "%" + request.vars.title + "%"
search_term = (db.comic.title.like(title_term))
if (len(request.vars.writer) > 0):
writer_term = "%" + request.vars.writer + "%"
if (search_term):
search_term = search_term & (db.comic.writers.like(writer_term))
else:
search_term = (db.comic.writers.like(writer_term))
if (len(request.vars.artist) > 0):
artist_term = "%" + request.vars.artist + "%"
if (search_term):
search_term = search_term & (db.comic.artists.like(artist_term))
else:
search_term = (db.comic.artists.like(artist_term))
if (len(request.vars.publisher) > 0):
publisher_term = "%" + request.vars.publisher + "%"
if (search_term):
search_term = search_term & (db.comic.publisher.like(publisher_term))
else:
search_term = (db.comic.publisher.like(publisher_term))
#Allow for a blank search to return all comics
#TODO: Disallow for when this search could overload system, i.e. lots of public comics
constraint = (db.comic_in_box.box_id == db.box.id) & ((db.box.is_public == True) | (db.box.owner_id == auth.user.id)) & (db.comic_in_box.comic_id == db.comic.id) & (db.comic.owner_id == db.auth_user.id)
if (search_term):
search_term = search_term & constraint
else:
search_term = constraint
results = db(search_term).select()
#Filter out duplicate results caused by comics being in public boxes
#Not able to get select query do this due to complexity in use of distinct
distinct = dict()
for result in results:
if result.comic.id not in distinct:
distinct[result.comic.id] = result.comic_in_box.id
#Output success indicated by number of distinct result(s)
output = "Search complete: " + str(len(distinct)) + " result"
if(len(distinct) != 1): output += "s"
response.flash = output
else:
if form.errors:
response.flash = 'One or more of the entries is incorrect'
results = dict()
distinct = dict()
return dict(form = form, results = results, distinct = distinct)
| tylrbvn/longboxes | controllers/collection.py | collection.py | py | 5,360 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "datetime.datetime.now",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 22,
"usage_type": "attribute"
}
] |
7122911694 | from django.conf.urls import url
from one import views
from one.views import CreateStudent
urlpatterns = [
url(r'^index/', views.index),
url(r'^print/',views.PrintTable,name='print'),
url(r'^studentname/(\d+)/',views.stuname,name='studentname'),
url(r'^detail/',views.detail,name='detail'),
url(r'^CreateStudent/', views.CreateStudent,name='CreateStudent'),
]
| lao1a0/Django-1 | one/urls.py | urls.py | py | 385 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.conf.urls.url",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "one.views.index",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "one.views",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url... |
5212403801 | # -*- coding: utf-8 -*-
import scrapy
from junyang_spider.items import YouzySchoolBadgeItem
class SchoolBadgeSpider(scrapy.Spider):
name = "school_badge"
allowed_domains = ["youzy.cn"]
start_urls = [
"https://www.youzy.cn/college/search?page=1",
]
custom_settings = {
'ITEM_PIPELINES': {'gaokao.pipelines.SchoolBadgePipeline': 200}
}
def parse(self, response):
for school in response.css("li.clearfix"):
image_url = school.css('a img::attr("src")').extract_first()
if image_url.find("http") != -1:
item = YouzySchoolBadgeItem()
item['school_name'] = school.css('a.name::text').extract_first()
item['image_url'] = image_url
yield item
for i in range(2, 144):
yield scrapy.Request('https://www.youzy.cn/college/search?page=%d' % i, callback=self.parse)
| endForYou/spider | junyang_spider/spiders/school_badge_spider.py | school_badge_spider.py | py | 914 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "scrapy.Spider",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "junyang_spider.items.YouzySchoolBadgeItem",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "scrapy.Request",
"line_number": 25,
"usage_type": "call"
}
] |
6018015716 | import os
import re
from pathlib import Path
summary = ""
def get_sql(name, docs, cat, markdown, docs_url):
return f"INSERT INTO `ae-expression` ( `name`, `docs`, `cat`, `markdown`, `docs_url`) VALUES ( {name}, {docs}, {cat}, {markdown}, {docs_url});"
def get_content(file_path, docs, cat):
with open(file_path, "r", encoding='utf-8') as file:
markdown_text = file.read()
# print(markdown_text)
reg = r'^##.+\n[^#]+'
result = re.findall(reg, markdown_text, re.M)
for res in result:
name = res.split("\n\n")[0]
markdown = "\n".join(res.split("\n\n")[1:])
# get_sql()
cat = Path(file_path).name
print(name.replace("## ", "").replace("(", "").replace(")", ""))
docs = docs
cat = cat.replace(".md", "")
get_sql(name, docs, cat, markdown, "")
print(markdown)
def print_folder_tree(folder_path, cat=""):
# 遍历文件夹中的所有文件和子文件夹
global summary
global index
index += 1
for entry in os.scandir(folder_path):
if index > 2:
return
summary += "\n"
if entry.is_dir():
print_folder_tree(entry.path, entry.name)
...
elif entry.name != "summary.md":
get_content(entry.path)
# with open('data.txt', "r+", encoding='utf-8') as file:
# file_content = file.read()
# file.seek(0)
# file.truncate()
# file.write("# Title\n" + file_content)
root_dir = r"H:\Scripting\Vue Projects\docs2_yuelili_com\AE\expression"
# print_folder_tree(root_dir)
# get_content(
# r"H:\Scripting\Vue Projects\docs2_yuelili_com\AE\expression\General\Global.md")
| Yuelioi/Program-Learning | Python/Projects/提取文件API的sql.py | 提取文件API的sql.py | py | 1,733 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "re.findall",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "re.M",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "pathlib.Path",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.scandir",
"line_number": 35,... |
26408420089 | import json
from socket import *
import base64
def client_json(ip, port, obj):
# 创建TCP Socket并连接
sockobj = socket(AF_INET, SOCK_STREAM)
sockobj.connect((ip, port))
if 'exec_cmd' in obj.keys():
send_obj = obj
elif 'upload_file' in obj.keys():
with open('{0}'.format(obj['upload_file']), 'rb') as f:
read_data = f.read()
bytes_b64code = base64.b64encode(read_data)
send_obj = {'upload_file': obj['upload_file'], 'file_bit': bytes_b64code.decode()}
elif 'download_file' in obj.keys():
send_obj = obj
# 把obj转换为JSON字节字符串
send_message = json.dumps(send_obj).encode()
# 读取1024字节长度数据, 准备发送数据分片
send_message_fragment = send_message[:1024]
# 剩余部分数据
send_message = send_message[1024:]
while send_message_fragment:
sockobj.send(send_message_fragment) # 发送数据分片(如果分片的话)
send_message_fragment = send_message[:1024] # 读取1024字节长度数据
send_message = send_message[1024:] # 剩余部分数据
recieved_message = b'' # 预先定义接收信息变量
recieved_message_fragment = sockobj.recv(1024) # 读取接收到的信息,写入到接收到信息分片
while recieved_message_fragment:
recieved_message = recieved_message + recieved_message_fragment # 把所有接收到信息分片重组装
recieved_message_fragment = sockobj.recv(1024)
return_data = json.loads(recieved_message.decode())
if 'download_file' not in return_data.keys():
print('收到确认数据:', return_data)
else:
print('收到确认数据:', return_data)
# 应该考虑写入下载的文件名!但是由于使用了相同的目录测试!所以使用了’download_file.py‘
with open('download_file.py', 'w+') as f:
b4code_back = bytes(return_data['file_bit'], 'GBK')
file_info = base64.b64decode(b4code_back)
f.write(file_info.decode())
print('下载文件{0}保存成功!'.format((obj.get('download_file'))))
sockobj.close()
if __name__ == '__main__':
# 使用Linux解释器 & WIN解释器
port = 6666
# 执行命令
exec_cmd = {'exec_cmd': 'pwd'}
client_json('192.168.0.188', port, exec_cmd)
# 上传文件
upload_file = {'upload_file': 'snmpv2_get_file.py'}
client_json('192.168.0.188', port, upload_file)
# 下载文件
download_file = {'download_file': 'snmpv2_get_file.py'}
client_json('192.168.0.188', port, download_file)
| Prin-Meng/NetDevOps | network_protocal/task_day13/socket_client.py | socket_client.py | py | 2,627 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "base64.b64encode",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "base64.b64decode",
"line_n... |
34778096922 | import os
import yaml
def book_ids_from_frontmatter(frontmatter):
'''Return a list of book id hashes from frontmatter of list file.'''
sections = yaml.load(frontmatter)['sections']
books = []
for section in sections:
for source in section['listings']:
if source['type'] == 'book':
books.append(source['id'])
return books
def get_asins_from_files(book_data_paths):
'''Given list of file paths, return list of ASIN strings in YAML \
frontmatter in specified files.'''
asins = []
for path in book_data_paths:
book_file = open(path)
book_yaml = grab_yaml_frontmatter(book_file)
asins.append(str(yaml.load(book_yaml)['amzn']))
book_file.close()
return asins
def get_book_data_paths(list_file_path, books):
'''Given root book data directory, return list of paths to files that \
match book id hashes in given list of hashes.'''
book_data_dir = os.path.abspath(list_file_path) + '/../../../../../_books'
book_data_dir = os.path.abspath(book_data_dir)
book_data_paths = []
for path, visit, arg in os.walk(book_data_dir):
for filename in arg:
if os.path.splitext(filename)[1] == '.bib':
for book in books:
if filename.find(book) >= 0:
book_data_paths.append(path + '/' + filename)
return book_data_paths
def grab_yaml_frontmatter(f):
'''Given a file, return YAML frontmatter as string, if present'''
yaml_result = ''
if f.readline() == '---\n':
yaml_active = True
for line in f.readlines():
if yaml_active:
if line != '---\n':
yaml_result += line
else:
yaml_active = False
return yaml_result
| Backlist/backlist-workflows | backlist.py | backlist.py | py | 1,832 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "yaml.load",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "yaml.load",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 36,
... |
17970705274 |
#setuptools.setup is looking at one argv parameter; to "build" and "install":
#python3 setup.py install
#libtorrent from pypi has bindings and library now, before was:
# python-libtorrent-bin is at extra require now, but, if was at install requires:
# ok, package python-libtorrent-bin is old. install with pip install --no-deps but add somehow appdirs
# and python-libtorrent on ubuntu
# if it's not old python-libtorrent at pip:
# that+(libtorrent-rasterbar2.0 on ubuntu) can be a solution
pkname='torra'
import pathlib
HERE = pathlib.Path(__file__).parent
#here README is ok, else will be README.md not found for pypi
README = (HERE / "info.md").read_text()
ver=(HERE / "v2").read_text()
from setuptools import setup
setup(name=pkname,
version=ver,
packages=[pkname],
#opt
python_requires='>=3',
install_requires=["appdirs>=1.4.3"
,"libtorrent"
#python-libtorrent-bin it's not updated at pypi (old 3.9)
#,'python-libtorrent-bin>=1.2.9' #;platform_system=="Linux" and platform_machine=="x86_64"'
#,"python-apt"#is from 2012 0.7.8, missing DistUtilsExtra, sudo apt install python-apt is 2.2., verify with pip3 install python-apt
],
#extras_require={
# 'bin': ['python-libtorrent-bin>=1.2.9']
# #,'apt': ['python-apt']
#},
description='Torrent client',
long_description=README,
long_description_content_type="text/markdown",
url='https://github.com/colin-i/tora',
author='cb',
author_email='costin.botescu@gmail.com',
license='MIT',
entry_points = {
'console_scripts': [pkname+'='+pkname+'.main:main']
}
)
| colin-i/tora | setup.py | setup.py | py | 1,548 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "pathlib.Path",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "setuptools.setup",
"line_number": 21,
"usage_type": "call"
}
] |
36262108045 | from collections import deque
def solution(stats):
queue = deque(stats)
answer = []
while queue:
x = queue.popleft()
length = len(answer)
if length < 1:
answer.append([x])
else:
max_index = -1
for i in range(length):
if answer[i][-1] < x:
max_index = i
break
if max_index == -1:
answer.append([x])
else:
answer[max_index].append(x)
return len(answer)
if __name__ == "__main__":
stats = [6, 2, 3, 4, 1, 5]
print(solution(stats)) | hon99oo/PythonAlgorithmStudy | 코테/스테이지파이브/solution2/solution.py | solution.py | py | 636 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "collections.deque",
"line_number": 5,
"usage_type": "call"
}
] |
6518783432 | #!/usr/bin/env python
import datetime
from elasticsearch import Elasticsearch
from jobs.lib import Configuration
from jobs.lib import Send_Alert
local_config = {
"minutes": 5,
"index": "servers-*",
"max_results": 1000,
"severity": "low"
}
# Query goes here
search_query = {
"query": {
"bool": {
"must": [],
"filter": [
{
"range": {
"@timestamp": {
"format": "strict_date_optional_time",
"gte": datetime.datetime.utcnow() - datetime.timedelta(minutes=local_config["minutes"]),
"lte": datetime.datetime.utcnow()
}
}
},
{
"match_phrase": {
"winlog.channel": "Security"
}
},
{
"match_phrase": {
"winlog.event_id": "4740"
}
}
], }}, }
def init():
config = Configuration.readconfig()
connection = str(config["elasticsearch"]["connection"])
es = Elasticsearch([connection], verify_certs=False, ssl_show_warn=False)
res = es.search(index=local_config["index"], body=search_query, size=local_config["max_results"])
# Iterate through results
for doc in res.get('hits', {}).get('hits'):
username = doc.get('_source', {}).get('user', {}).get('target', {}).get('name')
Send_Alert.send(username + " account was locked in AD", local_config["severity"])
| 0xbcf/elasticsearch_siem | jobs/LockedADAccount.py | LockedADAccount.py | py | 1,430 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "datetime.datetime.utcnow",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "datetime.timedelta",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "da... |
2128326759 | from cumulusci.tasks.apex.anon import AnonymousApexTask
from cumulusci.core.exceptions import TaskOptionsError
import time
class SetBDIMappingMode(AnonymousApexTask):
"""Change the mapping mode for NPSP BGE/BDI."""
task_docs = """
Use the 'mode' argument to specify either 'Help Text' or 'Data Import Field Mapping'
"""
help_text_apex = """
BDI_MigrationMappingUtility.updateCustomSettings(
BDI_MigrationMappingUtility.HELP_TEXT,
String.valueOf(Metadata.DeployStatus.Succeeded));
"""
data_import_field_mapping_apex = """
BDI_MigrationMappingUtility migrationMappingUtility =
new BDI_MigrationMappingUtility(
new BDI_MigrationMappingHelper());
migrationMappingUtility.migrateHelpTextToCustomMetadata();
Id deploymentId = CMT_MetadataAPI.deployMetadata(
migrationMappingUtility.queuedMetadataTypesForDeploy,
new BDI_MigrationMappingUtility.DeploymentCallback());
"""
task_options = {
"mode": {
"description": "'Help Text' or 'Data Import Field Mapping'",
"required": True,
},
}
def get_org_namespace_prefix(self):
managed = self.options.get("managed") or False
namespaced = self.options.get("namespaced") or False
if managed or namespaced:
return "npsp__"
else:
return ""
def _validate_options(self):
if self.options.get("mode") == "Help Text":
self.options["apex"] = self.help_text_apex
elif self.options.get("mode") == "Data Import Field Mapping":
self.options["apex"] = self.data_import_field_mapping_apex
else:
raise TaskOptionsError(
"You must specify mode as either 'Help Text' or 'Data Import Field Mapping'"
)
super()._validate_options()
def _run_task(self):
super()._run_task()
self.logger.info("Deploying BDI mode {mode}".format(mode=self.options.get("mode")))
for i in range(0, 600):
if self._get_di_mode() == self.options.get("mode"):
return
self.logger.info("Waiting for BDI metadata to deploy.")
time.sleep(3)
raise AssertionError("Data Import mode never updated!")
def _get_di_mode(self):
soql = "SELECT {token}Field_Mapping_Method__c FROM {token}Data_Import_Settings__c"
soql = soql.format(token=self.get_org_namespace_prefix())
res = self.sf.query_all(soql)
if res["records"]:
return res["records"][0][
"{token}Field_Mapping_Method__c".format(
token=self.get_org_namespace_prefix()
)
]
| SalesforceFoundation/NPSP | tasks/set_BDI_mapping_mode.py | set_BDI_mapping_mode.py | py | 2,760 | python | en | code | 609 | github-code | 6 | [
{
"api_name": "cumulusci.tasks.apex.anon.AnonymousApexTask",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "cumulusci.core.exceptions.TaskOptionsError",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 63,
"usage_type": "call... |
33051867473 | import numpy as np
import cv2
import pickle
import glob
import matplotlib.pyplot as plt
import os
import Lane_find_functions as Lff
import function_parameters as FP
import time
# video_name = 'test_video_4lanes_1.13.mp4'
# image_folder = './Test_images/dashcam_driving/'
# video_name = 'challenge_video_4lanes_1.8.mp4'
# image_folder = './Test_images/challnege_video/'
# video_name = 'harder_challenge_video_4lanes_1.8_fullscreen.mp4'
# image_folder = './Test_images/harder_challenge_video/'
#
# video_name = 'project_video_4lanes_1.11_confidence.mp4'
# image_folder = './Test_images/project_video/'
count=0
filename = 'frame_count'
filename2 = 'calculated_binary_combinations'
def main():
global count
video_name = FP.video_name
image_folder = FP.dashcam_image_path
frame = cv2.imread(image_folder+"frame1.jpg")
height, width, layers = frame.shape
# fullscreen=False
if FP.fullscreen is False:
height,width=960,1280
else:
height,width=720,1280
print(frame.shape)
#video = cv2.VideoWriter(video_name, -1, 1, (width,height))
video = cv2.VideoWriter(video_name, cv2.VideoWriter_fourcc(*'XVID'), 30, (width,height))
success=1
# count = 215
count = 0
while success:
start = time.time()
outfile = open(filename,'wb')
pickle.dump(count,outfile)
outfile.close()
org_image = cv2.imread(image_folder+"frame%d.jpg" % count)
image=org_image
# image = cv2.resize(org_image,(360,640))
# image = cv2.resize(org_image,(180,320))
# save frame as JPEG file
#if image is None or count > 300: # if image was not read successfully
if image is None: # if image was not read successfully
print ("error: image not read from file \n\n") # print error message to std out
success = 0 # pause so user can see error message
#success,image = vidcap.read()
#imgOriginal=oszv.pipeline(image)
infile = open(filename2,'rb')
new_count = pickle.load(infile)
infile.close()
print('xdxdxdxdxdxd '+str(new_count))
FP.binary_combinations=new_count
processed_image =Lff.process_image_4lanes(image, FP.fullscreen)
cv2.putText(processed_image, 'frame ' + str(count), (40,80), cv2.FONT_HERSHEY_DUPLEX, 1, (255,0,0), 1, cv2.LINE_AA)
#processed_image = cv2.resize(processed_image,width,height)
video.write(processed_image)
count += 1
end = time.time()
# print('frames_to_video_dynamic time= '+str(end - start)+'sec')
print('______________________________________')
print('| wrote a new frame: ', count,' |',str(end - start)+'sec')
print('______________________________________')
f = open("fps_test_log.txt", "a")
write_line=str(FP.video_tip)+' '+'frame:'+str(count)+' '+str(end - start)+' sec'+'\n'
f.write(write_line)
cv2.destroyAllWindows()
video.release()
f.close()
return
###################################################################################################
if __name__ == "__main__":
main()
| Domagoj-Spoljar/-Python-Algoritam-Prepozunavanje-vozne-trake | frames_to_video_dynamic.py | frames_to_video_dynamic.py | py | 3,245 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "function_parameters.video_name",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "function_parameters.dashcam_image_path",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "cv2.imread",
"line_number": 32,
"usage_type": "call"
},... |
15211959630 | """
CNN Classification of SDSS galaxy images
----------------------------------------
Figure 9.20
The accuracy of a multi-layer Convolutional Neural Network
applied to a set of morphologically classified galaxy images taken
from the SDSS. The configuration of the network is described in
Section 9.8.4. The left panel shows the false positive rate
against the true positive rate for the resulting network. The right
side of the figure shows examples of images that were correctly
and incorrectly classified.
"""
# Author: Andrew Connolly
# License: BSD
# The code is derived from an example by Marc Huertas-Company.
# The figure produced by this code is published in the updated edition of the
# textbook "Statistics, Data Mining, and Machine Learning in Astronomy" (2019)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
import matplotlib.pyplot as plt
from pathlib import Path
from sklearn.metrics import roc_curve
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
import random
from sklearn.utils import shuffle
from sklearn.metrics import accuracy_score
from keras.callbacks import EarlyStopping
from keras.callbacks import ModelCheckpoint
try:
from astroML.datasets import fetch_sdss_galaxy_images
HAS_ASTROML_DATASETS = True
except ImportError:
HAS_ASTROML_DATASETS = False
if "setup_text_plots" not in globals():
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
plt.rcParams['axes.xmargin'] = 0.05
plt.rcParams['axes.ymargin'] = 0.05
def read_savefile(filename):
'''Read npy save file containing images or labels of galaxies'''
return np.load(filename)
def CNN(img_channels, img_rows, img_cols, verbose=False):
'''Define CNN model for Nair and Abraham data'''
# some hyperparamters you can chage
dropoutpar = 0.5
nb_dense = 64
model = Sequential()
model.add(Convolution2D(32, 6, 6, border_mode='same',
input_shape=(img_rows, img_cols, img_channels)))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Convolution2D(64, 5, 5, border_mode='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Convolution2D(64, 5, 5, border_mode='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Convolution2D(128, 2, 2, border_mode='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Convolution2D(128, 3, 3, border_mode='same'))
model.add(Activation('relu'))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(nb_dense, activation='relu'))
model.add(Dropout(dropoutpar))
model.add(Dense(1, init='uniform', activation='sigmoid'))
print("Compilation...")
model.compile(loss='binary_crossentropy', optimizer='adam',
metrics=['accuracy'])
print("... done!")
if verbose is True:
print("Model Summary")
print("===================")
model.summary()
return model
def train_CNN(X, Y, ntrain, nval, output="test", verbose=False):
'''Train the CNN given a dataset and output model and weights'''
# train params - hardcoded for simplicity
batch_size = 30
nb_epoch = 50
data_augmentation = True # if True the data will be augmented at every iteration
ind = random.sample(range(0, ntrain+nval-1), ntrain+nval-1)
X_train = X[ind[0:ntrain], :, :, :]
X_val = X[ind[ntrain:ntrain+nval], :, :, :]
Y_train = Y[ind[0:ntrain]]
Y_val = Y[ind[ntrain:ntrain+nval]]
# input image dimensions
img_rows, img_cols = X_train.shape[1:3]
img_channels = 3
# Right shape for X
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols,
img_channels)
X_val = X_val.reshape(X_val.shape[0], img_rows, img_cols, img_channels)
# Avoid more iterations once convergence
patience_par = 10
earlystopping = EarlyStopping(monitor='val_loss', patience=patience_par,
verbose=0, mode='auto' )
modelcheckpoint = ModelCheckpoint(output+"_best.hd5", monitor='val_loss',
verbose=0, save_best_only=True)
# Define CNN
model = CNN(img_channels, img_rows, img_cols, verbose=True)
if not data_augmentation:
print('Not using data augmentation.')
history = model.fit(X_train, Y_train,
batch_size=batch_size,
nb_epoch=nb_epoch,
validation_data=(X_val, Y_val),
shuffle=True, verbose=verbose,
callbacks=[earlystopping, modelcheckpoint])
else:
print('Using real-time data augmentation.')
# this will do preprocessing and realtime data augmentation
datagen = ImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=45,
width_shift_range=0.05,
height_shift_range=0.05,
horizontal_flip=True,
vertical_flip=True,
zoom_range=[0.75, 1.3])
datagen.fit(X_train)
history = model.fit_generator(
datagen.flow(X_train, Y_train, batch_size=batch_size),
samples_per_epoch=X_train.shape[0],
nb_epoch=nb_epoch,
validation_data=(X_val, Y_val),
callbacks=[earlystopping, modelcheckpoint])
print("Saving model...")
# save weights
model.save_weights(output+".weights", overwrite=True)
def apply_CNN(X, model_name):
'''Apply a CNN to a data set'''
# input image dimensions
img_rows, img_cols = X.shape[1:3]
img_channels = 3
X = X.reshape(X.shape[0], img_rows, img_cols, img_channels)
# load model & predict
print("Loading weights", model_name)
model = CNN(img_channels, img_rows, img_cols)
model.load_weights(model_name+".weights")
Y_pred = model.predict_proba(X)
return Y_pred
def add_titlebox(ax, text):
'''Add an embedded title into figure panel'''
ax.text(.1, .85, text,
horizontalalignment='left',
transform=ax.transAxes,
bbox=dict(facecolor='white', edgecolor='none', alpha=0.8))
return ax
def plot_CNN_performance(pred, labels):
'''Plot ROC curve and sample galaxies'''
fig = plt.figure(figsize=(6, 3))
fig.subplots_adjust(wspace=0.1, hspace=0.1,
left=0.1, right=0.95,
bottom=0.15, top=0.9)
# define shape of figure
gridsize = (2, 4)
ax1 = plt.subplot2grid(gridsize, (0, 0), colspan=2, rowspan=2)
ax2 = plt.subplot2grid(gridsize, (0, 2))
ax3 = plt.subplot2grid(gridsize, (0, 3))
ax4 = plt.subplot2grid(gridsize, (1, 2))
ax5 = plt.subplot2grid(gridsize, (1, 3))
# plot ROC curve
fpr, tpr, thresholds = roc_curve(labels, pred)
ax1.plot(fpr, tpr, color='black')
ax1.set_xlabel(r'False Positive Rate')
ax1.set_ylabel(r'True Positive Rate')
# array of objects (good E, good S, bad E, bad S)
goodE = np.where((pred[:, 0] < 0.5) & (labels == 0))
goodS = np.where((pred[:, 0] > 0.5) & (labels == 1))
badE = np.where((pred[:, 0] < 0.5) & (labels == 1))
badS = np.where((pred[:, 0] > 0.5) & (labels == 0))
ax2.imshow(D[pred_index + goodE[0][1]])
add_titlebox(ax2, "Correct E")
ax2.axis('off')
ax3.imshow(D[pred_index + goodS[0][4]])
add_titlebox(ax3, "Correct Spiral")
ax3.axis('off')
ax4.imshow(D[pred_index + badE[0][1]])
add_titlebox(ax4, "Incorrect E")
ax4.axis('off')
ax5.imshow(D[pred_index + badS[0][3]])
add_titlebox(ax5, "Incorrect Spiral")
ax5.axis('off')
plt.show()
n_objects = 500
save_files = "./SDSS{}".format(n_objects)
# Read SDSS images and labels. Data is a sample from
# Nair and Abraham (2010) http://adsabs.harvard.edu/abs/2010ApJS..186..427N
# Ellipticals are class 0. Spirals are class 1
if HAS_ASTROML_DATASETS:
D, Y = fetch_sdss_galaxy_images()
else:
try:
D = read_savefile("sdss_images_1000.npy")[0:n_objects]
Y = read_savefile("sdss_labels_1000.npy")[0:n_objects]
except FileNotFoundError:
raise FileNotFoundError(
'Loading this data automatically requires astroML 1.0.2+.\n'
'For older versions please download and uncompress the files\n'
'"sdss_images_1000.npy.gz" and \n'
'"sdss_labels_1000.npy"\n'
'manually before running this script. Data URL:\n'
'https://github.com/astroML/astroML-data/tree/main/datasets')
# Train network and output to disk (keep 10% of data for test set)
ntrain = D.shape[0] * 8 // 10
nval = D.shape[0] // 10
npred = D.shape[0] - (ntrain + nval) # test sample size;
pred_index = ntrain + nval # test sample start index;
# Normalize images
mu = np.amax(D, axis=(1, 2))
for i in range(0, mu.shape[0]):
D[i, :, :, 0] = D[i, :, :, 0] / mu[i, 0]
D[i, :, :, 1] = D[i, :, :, 1] / mu[i, 1]
D[i, :, :, 2] = D[i, :, :, 2] / mu[i, 2]
# change order so that we do not use always the same objects to train/test
D, Y, = shuffle(D, Y, random_state=0)
my_file = Path(save_files + ".weights")
if my_file.is_file():
Y_pred = apply_CNN(D[pred_index:pred_index + npred, :, :, :], save_files)
Y_test=Y[pred_index:pred_index + npred]
else:
print("Training Model")
print("====================")
model_name = train_CNN(D, Y, ntrain, nval, output=save_files)
Y_pred = apply_CNN(D[pred_index:pred_index + npred, :, :, :], save_files)
Y_test = Y[pred_index:pred_index + npred]
Y_pred_class = Y_pred * 0
Y_pred_class[Y_pred > 0.5] = 1
print("Global Accuracy:", accuracy_score(Y_test, Y_pred_class))
plot_CNN_performance(Y_pred, Y_test)
| astroML/astroML_figures | book_figures/chapter9/fig_morph_nn.py | fig_morph_nn.py | py | 10,396 | python | en | code | 7 | github-code | 6 | [
{
"api_name": "astroML.plotting.setup_text_plots",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 55,
"usage_type": "name"
},
{
... |
38779549924 | import asyncio
import datetime
import time
import random
import discord
from discord import Member, Guild, User, message
from discord.ext import commands
from datetime import datetime
client = discord.Client()
client = discord.Client(intents=discord.Intents.all())
bot = commands.Bot(command_prefix='!')
autoroles = {
842130432462946315: {'memberroles': [842133392375021569], 'botroles': [842502664032878672]}
}
#Liste der Verbotenen Wörter
verboten = ['penis', 'hure', 'fotze', 'arschloch', 'depp', 'bastard', 'schlampe', 'dick', 'cock', 'pussy', 'penner', 'pute', 'sucker']
#AgokiZustand
wieGehtEsDir = ['**Es geht mir bestens, danke für die Nachfrage.**', '**Daten zu analysieren ist anstrengend,dennoch tue ich meine Pflicht.**',
'**Gut, wie geht es Ihnen ?**', '**Meine programmierung ist zwar sehr fortschritlich, jedoch besitze ich keinen körperlichen oder geistigen Zustand um die Frage adequat zu beantworten.**',
'**Das weiß ich nicht. Ich hoffe dennoch dass es Ihnen bestens geht.**']
#!help Befehl
hilfeListe = ['**Mit dem Befehl "!befehle" können Sie eine Liste mit den Verfügbaren befehlen auslesen. \r\n '
'Ich hoffe ich konnte Ihnen weiter helfen !**',
'**Wenden Sie sich an Director Keres oder Director Bolgorov für detaillierte Fragen.**', '**Ich brauche auch hilfe.**',
'**Nicht jetzt bitte. Versuchen Sie es später nochmals.**']
@client.event
async def on_ready():
print('Logging in als User {}'.format(client.user.name))
client.loop.create_task(status_task())
async def status_task():
while True:
await client.change_presence(activity=discord.Game('Empfange Daten...'), status=discord.Status.online)
await asyncio.sleep(10)
await client.change_presence(activity=discord.Game('Verarbeite Daten...'), status=discord.Status.online)
await asyncio.sleep(10)
def is_not_pinned(mess):
return not mess.pinned
#Neuankömmlinge
@client.event
async def on_member_join(member):
guild: Guild = member.guild
if not member.bot:
embed = discord.Embed(title='Willkomen bei AGO {}'.format(member.name),
description='Ich bin **AGOKI**, die Künstliche Intelligenz erschaffen von Keres & Bolgorov. Ich bin hier, um euch zu leiten und zu helfen. \r \n'
'Es ist eine große Ehre, unserer Organisation beizutreten und wir erwarten Respektvollen Umgang untereinander. \r \n'
'Unsere Organisation wird in verschiedenen Rängen unterteilt. \r \n'
'Alle Neuankömmlige haben den Rang **"Privates"** und bilden die unterste Stufe.\r \n'
'Für weitere Informationen, steht die beschreibung der Ränge im Textkanal "Allgemein", in der Beschreibung zur verfügung. \r \n'
'Des weiteren können Sie mit dem Befehl "!help" und "!befehle" noch mehr Informationen finden. \r\n'
'Viel Erfolg Soldat. \r \n'
'**Transmission End**'
'', color=0x51998C)
try:
if not member.dm_channel:
await member.create_dm()
await member.dm_channel.send(embed=embed)
except discord.errors.Forbidden:
print('Es konnte keine Willkommensnachricht an {} gesendet werden'.format(member.name))
autoguild = autoroles.get(guild.id)
if autoguild and autoguild['memberroles']:
for roleId in autoguild['memberroles']:
role = guild.get_role(roleId)
if role:
await member.add_roles(role, reason='AutoRoles', atomic=True)
else:
autoguild = autoroles.get(guild.id)
if autoguild and autoguild['botroles']:
for roleId in autoguild['botroles']:
role = guild.get_role(roleId)
if role:
await member.add_roles(role, reason='AutoRoles', atomic=True)
#Begrüßung Nachricht auf Allgemein
kanal = discord.utils.get(member.guild.channels, name='allgemein')
await kanal.send(f'**{member.mention}** ist uns beigetreten ! Willkommen Private.')
@client.event
async def on_message(message):
if message.content.startswith('!ping'):
await message.channel.send(f'Die Ping zwischen den AGO Servern und Ihnen beträgt {round(client.latency * 1000)}ms.')
#BefehlListe
if message.content.startswith('!befehle'):
#await message.channel.send('Ich habe folgende Befehle aus meiner Datenbank gefunden: \r\n')
befehlListe = discord.Embed(title='Ich habe folgende Befehle aus meiner Datenbank gefunden: ',
color=0x51998C)
befehlListe.add_field(name='!zeit',
value='Zeigt das Datum und die Uhrzeit an.',
inline=False)
befehlListe.add_field(name='!userinfo',
value='Ermöglicht es Informationen über einen bestimmten Benutzer zu erhalten.',
inline=False)
befehlListe.set_author(name='AGOKI',
icon_url='https://cdn.discordapp.com/app-icons/842427779002007613/457e0c63c8a70e962306a5399657cb33.png?size=256"')
await message.channel.send(embed=befehlListe)
#agokiZustand
if 'wie geht es dir'and 'agoki' in message.content:
await message.channel.send(random.choice(wieGehtEsDir))
#Chat Filter
content_raw = message.content.lower()
for word in verboten:
if word in content_raw:
await message.delete()
await message.channel.send(f'**Warnung** ! Diese Wortwahl wird hier nicht gedulded. '
f'Bei mehrmaligem Vorfall wird dieses Verhalten konsequenzen haben.')
#Uhrzeit
if '!zeit' in message.content:
today = datetime.now()
date = today.strftime('%d/%m/%Y')
zeit = today.strftime('%H:%M:%S')
await message.channel.send(f'Wir sind der **{date}** und es ist **{zeit}** Uhr.')
# Hilfe Befehl
if '!help' in message.content:
await message.channel.send(random.choice(hilfeListe))
#bannen
if message.content.startswith('!ban') and message.author.guild_permissions.ban_members:
args = message.content.split(' ')
if len(args) == 2:
member: Member = discord.utils.find(lambda m: args[1] in m.name, message.guild.members)
if member:
await member.ban()
await message.channel.send(f'Auf Grund von Verstößen gegen den AGBs, wurde **{member.name}** von der Organisation gebannt.')
else:
await message.channel.send(f'Ich habe keinen User mit dem Namen **{args[1]}** gefunden.')
#unbannen
if message.content.startswith('!unban') and message.author.guild_permissions.ban_members:
args = message.content.split(' ')
if len(args) == 2:
user: User = discord.utils.find(lambda banentry: args[1] in banentry.user.name,
await message.guild.bans()).user
if user:
await message.guild.unban(user)
await message.channel.send(
f'Nach einer Gründlichen überprüfung der Akte des Users **{user.name}**, wurde dieser entbannt')
else:
await message.channel.send(f'Ich habe keinen User mit dem Namen **{args[1]}** gefunden.')
#Kicken
if message.content.startswith('!kick') and message.author.guild_permissions.kick_members:
args = message.content.split(' ')
if len(args) == 2:
member: Member = discord.utils.find(lambda m: args[1] in m.name, message.guild.members)
if member:
await member.kick()
await message.channel.send(f'Auf Grund von Verstößen gegen den AGBs, wurde **{member.name}** von der Organisation gekickt.')
else:
await message.channel.send(f'Ich habe keinen User mit dem Namen **{args[1]}** gefunden.')
#User Informationen
if message.content.startswith('!userinfo'):
args = message.content.split(' ')
if len(args) == 2:
member: Member = discord.utils.find(lambda m: args[1] in m.name, message.guild.members)
if member:
embed = discord.Embed(title='Userinformationen für {}'.format(member.name),
description='Hierbei Informationen zum User {}'.format(member.mention),
color=0x51998C)
embed.add_field(name='Server beigetreten', value=member.joined_at.strftime('%d/%m/%Y, %H:%M:%S'),
inline=True)
embed.add_field(name='Discord beigetreten', value=member.created_at.strftime('%d/%m/%Y, %H:%M:%S'),
inline=True)
rollen = ''
for role in member.roles:
if not role.is_default():
rollen += '{} \r\n'.format(role.mention)
if rollen:
embed.add_field(name='Rollen', value=rollen, inline=True)
embed.set_thumbnail(url=member.avatar_url)
embed.set_footer(text='Datenbank Vollständig')
await message.channel.send(embed=embed)
#Nachrichten löschen
if message.content.startswith('!clear'):
if message.author.permissions_in(message.channel).manage_messages:
args = message.content.split(' ')
if len(args) == 2:
if args[1].isdigit():
count = int(args[1]) + 1
deleted = await message.channel.purge(limit=count, check=is_not_pinned)
await message.channel.send('Ich habe {} Nachrichten gelöscht.'.format(len(deleted) - 1))
client.run('')
| Bolgorov/Agoki | agoki code (without token).py | agoki code (without token).py | py | 10,312 | python | de | code | 0 | github-code | 6 | [
{
"api_name": "discord.Client",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "discord.Client",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "discord.Intents.all",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "discord.Intents",
... |
18817086763 | from gameObject import GameObject
import pygame, time
FALL_VELOCITY = (0, 3.9)
NULL_VELOCITY = (0, 0)
FLAPPING_VELOCITY = (0, -4.5)
FLAPPING_MAX_TIME = 0.15
BIRD_RADIUS = 15
WINDOW_WIDTH = 480
COLOR_RED = (255, 0, 0)
class Bird(GameObject):
def __init__(self, x, y, color, brain):
GameObject.__init__(self, x, y, BIRD_RADIUS, BIRD_RADIUS, FALL_VELOCITY)
self.color = color
self.isFlapping = False
self.flappingTime = 0
self.brain = brain
self.isAlive = True
self.score = 0
self.drawable = True
def draw(self, surface):
if self.drawable:
pygame.draw.circle(surface, self.color, (self.bounds.x, self.bounds.y), self.bounds.height, self.bounds.width)
def update(self, inputs):
inputs[1] -= BIRD_RADIUS
inputs[2] += BIRD_RADIUS
if not self.isAlive:
self.color = COLOR_RED
if self.bounds.y < WINDOW_WIDTH - 10:
self.speed = FALL_VELOCITY
else:
self.drawable = False
self.speed = NULL_VELOCITY
else:
if self.isFlapping and time.time() - self.flappingTime >= FLAPPING_MAX_TIME:
self.speed = FALL_VELOCITY
self.isFlapping = False
else:
prediction = self.brain.feed_forward(inputs)
if len(prediction) == 1 or (len(prediction) > 1 and prediction[0] < prediction[1]):
self.flap()
if inputs[1] < 0 < inputs[2]:
self.brain.increment_fitness(1)
if 0 >= inputs[0] > -2 and inputs[1] < 0 < inputs[2]:
self.brain.increment_fitness(100)
self.score += 1
self.move(*self.speed)
def flap(self):
if not self.isFlapping:
self.flappingTime = time.time()
self.speed = FLAPPING_VELOCITY
self.isFlapping = True
| JSMarrocco/JSMarrocco_PersonalRepository | NEAT_fluppy_bird/bird.py | bird.py | py | 1,951 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "gameObject.GameObject",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "gameObject.GameObject.__init__",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "gameObject.GameObject",
"line_number": 16,
"usage_type": "name"
},
{
"api_na... |
6661767688 | import random
import cv2
import numpy as np
import sys
sys.path.insert(1, 'build/lib')
from va_rs import augment
original_cube = np.zeros((32, 32, 32), dtype=np.float32)
original_cube[12:20, 12:20, 12:20] = 1.0
original_cube = original_cube[None, ...]
linear_cube = original_cube.copy()
nearest_cube = original_cube.copy()
rotations = (20, 4, 1)
translations = tuple(np.random.rand(3) * 6 - 3) # <-3, +3>
scaling = tuple(np.random.rand(3) * 0.2 + 0.9) # <0.9; 1.1>
raw_cube_multipliers = (random.random() * 0.2 + 0.9,) # (<0.9; 1.1>, 1.0) - don't multiply frangi data
linear_cube = augment(linear_cube, rotations, interpolation='linear')
nearest_cube = augment(nearest_cube, rotations, interpolation='nearest')
for i in range(32):
cv2.imshow('original', original_cube[0, i, ...])
cv2.imshow('linear', linear_cube[0, i, ...])
cv2.imshow('nearest', nearest_cube[0, i, ...])
cv2.waitKey()
| PUTvision/volume-augmentations | examples/augment.py | augment.py | py | 913 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sys.path.insert",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_num... |
19272008799 |
import MDAnalysis
import sys
import itertools
import tool
from argparse import ArgumentParser
"""
a = sys.argv
a.pop(0)
kai1 = [i for i in a if ".trr" in i]
kai2 = [i for i in a if ".pdb" in i]
kai3 = [i for i in a if "prob" in i]
kai4 = [i for i in a if ".trr" not in i and ".pdb" not in i
and ".txt" not in i and ".dat" not in i]
a = []
a.append([0, 0])
a.append(kai1)
a.append(kai2)
a.append(kai3)
a.append(kai4)
"""
# a = [i.split() for i in a]
# a[1]:trr_path_list
# a[2]:pdb_path_list
# a[3]:prob_path_list
# a[4]:data processing
"""
1:only CA coordinates
2:chain A and chain B | only CA
3:select residue coordinates like["3","C","N","O"]
4:chain A and chain B | selesct ATOM
"""
a=[]
def get_option():
argparser = ArgumentParser()
argparser.add_argument("-trr","--trajectory",type=str,help="path of trr")
argparser.add_argument("-pdb","--protein",type=str,help="path of pdb")
argparser.add_argument("-prob","--probtxt",type=str,help="path of prob")
argparser.add_argument("-cal","--caluculation",type=str,help="way of data processing")
return argparser.parse_args()
def PDB_cal1(num1):
num_pdb = []
for i in open(a[1][num1], "r"):
f = i.split()
if f[2] == "CA":
num_pdb.append(int(f[1]))
return num_pdb
def PDB_cal2(num1):
num_pdb = []
kai_a = []
kai_b = []
for i in open(a[1][num1], "r"):
f = i.split()
if f[4] == "A" and f[2] == "CA":
kai_a.append(int(f[1]))
if f[4] == "B" and f[2] == "CA":
kai_b.append(int(f[1]))
num_pdb.append(kai_a)
num_pdb.append(kai_b)
return num_pdb
def PDB_cal3(num1):
num_pdb = []
for i in open(a[1][num1], "r"):
f = i.split()
if f[2] in a[3]:
num_pdb.append(int(f[1]))
return num_pdb
def PDB_cal4(num1):
num_pdb = []
kai_a = []
kai_b = []
for i in open(a[1][num1], "r"):
f = i.split()
if f[4] == "A" and f[2] in a[3]:
kai_a.append(int(f[1]))
if f[4] == "B" and f[2] in a[3]:
kai_b.append(int(f[1]))
num_pdb.append(kai_a)
num_pdb.append(kai_b)
return num_pdb
def PROB_cal(num1):
num_prob = []
num2 = 0
for i in open(a[2][num1], "r"):
if float(i) != 0:
num_prob.append(num2)
num2 += 1
return num_prob
def main():
global a
args = get_option()
a.append(str(args.trajectory).split(","))
a.append(str(args.protein).split(","))
a.append(str(args.probtxt).split(","))
a.append(str(args.caluculation).split(","))
if len(a[0]) == len(a[1]) and len(a[1]) == len(a[2]):
print("go")
for i in a[0]:
num1 = a[0].index(i)
if len(a[3]) > 1:
if int(a[3][0]) == 4:
num_pdb = PDB_cal4(num1)
elif int(a[3][0]) == 3:
num_pdb = PDB_cal3(num1)
else:
if int(a[3][0]) == 1:
num_pdb = PDB_cal1(num1)
elif int(a[3][0]) == 2:
num_pdb = PDB_cal2(num1)
num_prob = PROB_cal(num1)
u = MDAnalysis.Universe(i)
frm = u.trajectory
del u
if int(a[3][0]) == 2 or int(a[3][0]) == 4:
for i in safe_mem_distance(frm, num_prob, num_pdb):
k = i.replace(",", "")
k = k.replace("[", "")
k = k.replace("]", "")
print(k)
else:
for i in safe_mem_coordinates(frm, num_prob, num_pdb):
k = i.replace(",", "")
k = k.replace("[", "")
k = k.replace("]", "")
print(k)
else:
print("not match kind of trr or pdb, prob")
def safe_mem_distance(frm, num_prob, num_pdb):
num2 = 0
for frm_num in frm:
try:
kai = []
if num2 in num_prob:
for i in safe_safe_distance(num_pdb, frm_num):
kai.append(i)
yield str(kai)
# del x, y, z, FRM, kai
except StopIteration:
del kai
break
num2 += 1
def safe_safe_distance(num_pdb, FRM):
for j1, j2 in itertools.product(num_pdb[0], num_pdb[1]):
yield tool.contact(float(FRM[j1][0]),
float(FRM[j1][1]),
float(FRM[j1][2]),
float(FRM[j2][0]),
float(FRM[j2][1]),
float(FRM[j2][2]))
def safe_mem_coordinates(frm, num_prob, num_pdb):
frm_itr = iter(frm)
num2 = 0
while True:
try:
kai = []
FRM = next(frm_itr)
if num2 in num_prob:
for j1 in num_pdb:
for j2 in range(3):
kai.append(float(FRM[j1][j2]))
yield str(kai)
# del x, y, z, FRM, kai
except StopIteration:
del kai
break
num2 += 1
if __name__ == '__main__':
main()
| satoshi-python/Desktop | pca1_kai.py | pca1_kai.py | py | 5,325 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "MDAnalysis.Universe",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "itertools.product",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "tool.... |
36689627100 | import sys
import subprocess
from PyQt5 import uic, QtCore
from PyQt5.QtWidgets import QApplication, QMainWindow
form_class = uic.loadUiType("./testBtn.ui")[0]
class WindowClass(QMainWindow, form_class):
def __init__(self):
super().__init__()
self.setupUi(self)
self.testBtn.clicked.connect(self.toggle_exe)
self.testBtn2.clicked.connect(self.toggle2_exe)
self.testBtn3.clicked.connect(self.toggle3_exe)
self.proc = None
def toggle_exe(self):
if self.proc is None:
self.start_exe()
else:
self.stop_exe()
def start_exe(self):
if self.proc is None:
self.statusBar().showMessage("음성인식 파일 실행중...")
self.proc = subprocess.Popen(["python", "C:\\main_ui\\voicecommand_final.py"])
self.testBtn.setText("음성인식 종료") # 버튼 텍스트 변경
def stop_exe(self):
if self.proc is not None:
subprocess.run(['taskkill', '/f', '/t', '/pid', str(self.proc.pid)])
self.proc = None
self.testBtn.setText("음성인식 실행") # 버튼 텍스트 변경
self.statusBar().showMessage("음성인식 종료")
def toggle2_exe(self):
if self.proc is None:
self.start2_exe()
else:
self.stop2_exe()
def start2_exe(self):
if self.proc is None:
self.statusBar().showMessage("모션인식 파일 실행중...")
self.proc = subprocess.Popen(["python", "C:\main_ui\motion_final.py"])
self.testBtn2.setText("모션인식 종료") # 버튼 텍스트 변경
def stop2_exe(self):
if self.proc is not None:
subprocess.run(['taskkill', '/f', '/t', '/pid', str(self.proc.pid)])
self.proc = None
self.testBtn2.setText("모션인식 실행") # 버튼 텍스트 변경
self.statusBar().showMessage("모션인식 종료")
def toggle3_exe(self):
if self.proc is None:
self.start3_exe()
else:
self.stop3_exe()
def start3_exe(self):
if self.proc is None:
self.statusBar().showMessage("아이트레킹 파일 실행중...")
self.proc = subprocess.Popen(["python", "C:\main_ui\eyetrac_final.py"])
self.testBtn3.setText("아이트레킹 종료") # 버튼 텍스트 변경
def stop3_exe(self):
if self.proc is not None:
subprocess.run(['taskkill', '/f', '/t', '/pid', str(self.proc.pid)])
self.proc = None
self.testBtn3.setText("아이트레킹 실행") # 버튼 텍스트 변경
self.statusBar().showMessage("아이트레킹 종료")
if __name__ == "__main__":
app = QApplication(sys.argv)
myWindow = WindowClass()
myWindow.show()
sys.exit(app.exec_()) | quswjdgns399/air_command | main_ui.py | main_ui.py | py | 2,953 | python | ko | code | 0 | github-code | 6 | [
{
"api_name": "PyQt5.uic.loadUiType",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "PyQt5.uic",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QMainWindow",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "subprocess.Po... |
14033064402 | import matplotlib.pyplot as plt
import xgboost as xgb
import os
from constants import *
from time import gmtime, strftime
from src.models.model_learner import ModelLearner
from src.models.csv_handler import save_feature_importance_res
class XgboostTrainObj(ModelLearner):
def __init__(self,org_name):
self.m_name = 'Xgboost'
self.model_name = 'Xgboost'
ModelLearner.__init__(self,org_name)
def train_model(self,models_folder_name,model_name,datasets):
super().prep_model_training(datasets)
print("---Start training {0} on {1}---\n".format(self.model_name,self.org_name))
self.model = xgb.XGBClassifier(kwargs=XGBS_PARAMS).fit(self.x, self.y, eval_metric=["error", "logloss"], eval_set=[(self.xval, self.yval)])
print("---Learning Curves---\n")
# self.plot_learning_curves()
model_name = os.path.join(MODELS_OBJECTS_PATH, models_folder_name, f"{model_name}.dat")
self.model.save_model(model_name)
print("---{0} model saved---\n".format(self.model_name))
def plot_learning_curves(self):
results = self.model.evals_result()
epochs = len(results['validation_0']['error'])
x_axis = range(0, epochs)
fig, ax = plt.subplots(figsize=(12, 12))
ax.plot(x_axis, results['validation_0']['logloss'], label='Train')
ax.plot(x_axis, results['validation_1']['logloss'], label='Test')
ax.legend()
plt.ylabel('Log Loss')
plt.title('XGBoost Log Loss')
plt.savefig(os.path.join(MODELS_OUTPUT_PATH, 'XGBoost {0} Log Loss.png'.format(self.org_name)))
fig, ax = plt.subplots(figsize=(12, 12))
ax.plot(x_axis, results['validation_0']['error'], label='Train')
ax.plot(x_axis, results['validation_1']['error'], label='Test')
ax.legend()
plt.ylabel('Classification Error')
plt.title('XGBoost Classification Error')
plt.savefig(os.path.join(MODELS_OUTPUT_PATH, 'XGBoost {0} Classification Error.png'.format(self.org_name)))
plt.clf()
def model_explain(self):
print("---Explain model---\n")
# self.feature_importance()
super().model_explain()
def feature_importance(self):
print("feature_importances\n")
importance = self.model.feature_importances_
f_important = sorted(list(zip(self.feature_names, importance)), key=lambda x: x[1], reverse=True)
save_feature_importance_res('{0}_{1}'.format(self.model_name,self.org_name),f_important,'reg')
plt.bar([x[0] for x in f_important[:5]], [x[1] for x in f_important[:5]])
plt.xticks(rotation=20)
title = '{0} {1} f_important'.format(self.model_name, self.org_name)
plt.title(title)
plt.savefig(os.path.join(MODELS_FEATURE_IMPORTANCE, '{0}.png'.format(title)))
plt.clf()
| EyalHadad/miRNA_transfer | src/models/training/xgboos_trainer.py | xgboos_trainer.py | py | 2,852 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "src.models.model_learner.ModelLearner",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "src.models.model_learner.ModelLearner.__init__",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "src.models.model_learner.ModelLearner",
"line_number": 1... |
19412698559 | import json
def create_row_w_validated_params(cls, validated_params, rqst_errors):
found_rows_w_rqst_name = cls.check_for_rows_with_rqst_name(
validated_params['name'],
rqst_errors
)
new_row = None
if not found_rows_w_rqst_name and not rqst_errors:
new_row = cls()
new_row.name = validated_params['name']
new_row.save()
return new_row
def update_row_w_validated_params(cls, validated_params, rqst_errors):
rqst_id = validated_params['id']
found_rows_w_rqst_name = cls.check_for_rows_with_rqst_name(
validated_params['name'],
rqst_errors,
rqst_id
)
rqst_row = None
if not found_rows_w_rqst_name and not rqst_errors:
try:
rqst_row = cls.objects.get(id=rqst_id)
rqst_row.name = validated_params['name']
rqst_row.save()
except cls.DoesNotExist:
rqst_errors.append("Row does not exist for database id: {}".format(rqst_id))
return rqst_row
def delete_row_w_validated_params(cls, validated_params, rqst_errors):
rqst_id = validated_params['id']
try:
row = cls.objects.get(id=rqst_id)
row.delete()
except cls.DoesNotExist:
rqst_errors.append("Row does not exist for database id: {}".format(rqst_id))
def check_for_rows_with_rqst_name(cls, rqst_name, rqst_errors, current_id=None):
found_row_w_name = False
rows_w_rqst_name = cls.objects.filter(name__iexact=rqst_name)
if rows_w_rqst_name:
found_row_w_name = True
rows_w_rqst_name_ids = []
len_of_rows_w_rqst_name = len(rows_w_rqst_name)
for row in rows_w_rqst_name:
rows_w_rqst_name_ids.append(row.id)
if len_of_rows_w_rqst_name > 1:
rqst_errors.append(
"Multiple rows with name: {} already exist in db. (Hint - Delete one and modify the remaining) id's: {}".format(
rqst_name, json.dumps(rows_w_rqst_name_ids)))
else:
if not current_id or current_id not in rows_w_rqst_name_ids:
rqst_errors.append(
"Row with name: {} already exists in db. (Hint - Modify that entry) id: {}".format(
rqst_name, rows_w_rqst_name_ids[0]))
else:
found_row_w_name = False
return found_row_w_name
| bbcawodu/careadvisors-backend | picmodels/models/care_advisors/healthcare_service_expertise_models/services/create_update_delete.py | create_update_delete.py | py | 2,368 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "json.dumps",
"line_number": 65,
"usage_type": "call"
}
] |
7194454936 | # THINGS TO DO
# Isolates + Member + Star < Bridge < Organizer
import networkx as nx
from community import community_louvain
import pandas as pd
import operator
# ORGANIZER/LIAISON/BROKER
G = nx.read_weighted_edgelist('Only_50_Employees1.csv', delimiter=',', create_using = nx.DiGraph(), nodetype=str)
page_score = dict(nx.pagerank(G))
eigen_score = dict(nx.eigenvector_centrality(G))
betweenness_score = dict(nx.betweenness_centrality(G))
mydicts = [page_score, betweenness_score, eigen_score]
df = pd.concat([pd.Series(d) for d in mydicts], axis=1).fillna(0).T
df.index = ['page_score', 'betweenness_score', 'eigen_score']
df = df.transpose()
del page_score, eigen_score, betweenness_score, mydicts
df = (df - df.mean()) / (df.max() - df.min())
minus_columns = ['page_score', 'betweenness_score', 'eigen_score']
df = df[minus_columns] + 1
df['score'] = df['page_score'] + df['betweenness_score'] + df['eigen_score']
del df['page_score'], df['betweenness_score'], df['eigen_score']
score_dict = df['score'].to_dict()
n = int(len(score_dict) * 0.10)
organizer_dict = dict(sorted(score_dict.items(), key=operator.itemgetter(1), reverse=True)[:n])
organizer_dict = {x: 0 for x in organizer_dict}
del score_dict, df, n, minus_columns
# BRIDGE/GATEKEEPER
G = nx.read_weighted_edgelist('Only_50_Employees1.csv', delimiter=',', create_using = nx.Graph(), nodetype=str)
gatekeeper = dict(nx.bridges(G))
gatekeeper_dict = {k: v for k, v in gatekeeper.items() if k not in organizer_dict}
gatekeeper_dict = {x: 1 for x in gatekeeper_dict}
del gatekeeper
# STAR/TEAM-PLAYER
G = nx.read_weighted_edgelist('Only_50_Employees1.csv', delimiter=',', create_using = nx.Graph(), nodetype=str)
part = community_louvain.best_partition(G) # Finding Communities
invert_partition = {v: k for k, v in part.items()}
star_dict = {} # iterate over each community
for community_id in invert_partition.keys(): #Extract the sub graph containing the community nodes
temp_graph = G.subgraph(invert_partition[community_id])
temp_degree = dict(temp_graph.degree()) #Extract the degrees in the subgraph
star_dict[community_id] = max(temp_degree, key=lambda x: temp_degree[x]) #Store it in a dictionary, with key as community_id and value as the node with max degree
star_dict = dict((v,k) for k,v in sorted(star_dict.items(), key=operator.itemgetter(1)))
star_dict = {k: v for k, v in star_dict.items() if k not in organizer_dict}
star_dict = {k: v for k, v in star_dict.items() if k not in gatekeeper_dict}
star_dict = {x: 2 for x in star_dict}
del community_id, invert_partition, part, temp_degree
# ISOLATES
isolate_dict = dict(G.degree())
isolate_dict = {key:val for key, val in isolate_dict.items() if val == 1 or 0}
isolate_dict = {x: 3 for x in isolate_dict}
# Integration of Final Appointed Roles
final_roles = {**organizer_dict, **gatekeeper_dict, **star_dict, **isolate_dict}
del organizer_dict, gatekeeper_dict, star_dict, isolate_dict
| AnnaMudano/Msc-Students | Unofficial_Roles_Script.py | Unofficial_Roles_Script.py | py | 3,132 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "networkx.read_weighted_edgelist",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "networkx.DiGraph",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "networkx.pagerank",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "ne... |
28102982969 | from django.urls import path
from .views import *
from django.contrib.auth.views import LogoutView
urlpatterns = [
path('aboutMe/', aboutMe, name="aboutMe"),
path('routePages/',routePages,name="routePages"),
path("routePages/<id>", routePagesId, name="routePagesId"),
path('crearHistoria/',crearHistoria,name="crearHistoria"),
path("eliminarHistoria/", eliminarHistoria, name="eliminarHistoria"),
path("editarHistoria/", editarHistoria, name="editarHistoria"),
# path("listar_curso/", Listar_cursos),
# path("cursos/",cursos, name="cursos"),
# path("profesores/",profesores, name="profesores"),
# path("estudiantes/",estudiantes, name="estudiantes"),
# path("entregables/",entregables, name="entregables"),
# path("cursoFormulario/",cursoFormulario, name="cursoFormulario"),
# path("busquedaComision/", busquedaComison, name="busquedaComision"),
# path("buscar/", buscar, name="buscar"),
# path("eliminarProfesor/<id>", eliminarProfesor, name="eliminarProfesor"),
# path("profesorEditar/<id>", profesorEditar, name="profesorEditar"),
# path("estudiante/list/", EstudianteList.as_view(), name="EstudianteList"),
# path("estudiante/nuevo/", EstudianteCreacion.as_view(), name="EstudianteCrear"),
# path("estudiante/<pk>", EstudianteDetalle.as_view(), name="estudiante_detalle"),
# path("estudiante/borrar/<pk>", EstudianteDelete.as_view(), name="estudiante_borrar"),
# path("estudiante/editar/<pk>", EstudianteUpdate.as_view(), name="estudiante_editar"),
# path('login/',login_request, name='login'),
# path('register/', register, name='register'),
# path('logout/',LogoutView.as_view(), name='logout'),
# #path('logout/',LogoutView.as_view(template_name='logout.html'), name='logout'),
# path('editarPerfil/', editarPerfil, name='editarPerfil'),
# path('agregarAvatar/', agregarAvatar, name='agregarAvatar'),
] | ldcomba/ProyectoFinalComba_HistoriaMascotas | AppPages/urls.py | urls.py | py | 1,924 | python | es | code | 0 | github-code | 6 | [
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
34453314320 | import sys, os
import requests
from bs4 import BeautifulSoup # scraper library
import pandas as pd # tables
from collections import OrderedDict
# Config
base_page_url = 'https://www.teamrankings.com/ncaa-basketball/stat/'
date_range = pd.date_range(pd.datetime(2018, 1, 1), periods=59).tolist()
# dictionary: output_name: url
stat_types = {
'pts_per_game': 'points-per-game',
'pos_per_game': 'possessions-per-game',
'field_goals_attempted': 'field-goals-attempted-per-game',
'field_goals_made': 'field-goals-made-per-game',
'3pt_attempted': 'three-pointers-attempted-per-game',
'3pt_made': 'three-pointers-made-per-game',
'ft_per_100_pos': 'ftm-per-100-possessions',
'off_rebounds': 'offensive-rebounds-per-game',
'ast_per_game': 'assists-per-game',
'to_per_game': 'turnovers-per-game',
'fouls_per_game': 'personal-fouls-per-game',
'opp_pts_per_game': 'opponent-points-per-game',
'opp_pts_from_3pt': 'opponent-points-from-3-pointers',
'opp_pts_from_2pt': 'opponent-points-from-2-pointers',
'def_rebounds': 'defensive-rebounds-per-game',
'blocks_per_game': 'blocks-per-game',
'steals_per_game': 'steals-per-game',
'opp_to_per_game': 'opponent-turnovers-per-game',
'opp_ast_per_game': 'opponent-assists-per-game',
}
def scrape_stats(page_url, output_name):
stats_df = None
stats = {}
for date_i, date in enumerate(date_range):
date = str(date.date())
url = page_url + '?date=' + date
page = requests.get(url) # load page
soup = BeautifulSoup(page.text, 'html5lib') # parse
table = soup.find('table', class_='datatable').find('tbody')
rows = table.find_all('tr')
# Go through rows
for i in range(351):
row = rows[i].find_all('td')
team_name = row[1].get_text()
stat_val = row[2].get_text()
# Add to stats
if team_name not in stats:
stats[team_name] = {}
stats[team_name][date] = stat_val
print(f"{output_name}: Fetching date: {date} [{date_i+1}/{len(date_range)}]", end='\r')
print()
# Convert to pandas dataframe
stats_df_data = [ [ team_name, *v.values() ] for team_name,v in stats.items() ]
stats_df_columns = ['Team Name'] + list(stats[list(stats.keys())[0]].keys())
stats_df = pd.DataFrame(data = stats_df_data, columns = stats_df_columns)
return stats_df
# def main():
# scrape_stats('https://www.teamrankings.com/ncaa-basketball/stat/points-per-game', 'pts_per_game')
def main():
for (output_name, stat_url) in stat_types.items():
# Check if file exists so we don't have to reparse the data
if os.path.isfile(output_name + '.csv'):
print(f"{output_name}: File exists. Skipping...")
continue
page_url = base_page_url + stat_url
print(f"{output_name}: Parsing from `{page_url}`...")
stat = scrape_stats(page_url, output_name)
stat.to_csv(output_name + '.csv')
print(f"{output_name}: Done.")
print()
if __name__ == '__main__':
main()
| bwu987/March-Madness-Crusher | scraper/scraper.py | scraper.py | py | 3,218 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pandas.date_range",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pandas.datetime",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
... |
35724557326 | from django.shortcuts import render
from social.apps.django_app.middleware import SocialAuthExceptionMiddleware
from social import exceptions as social_exceptions
class SocialAuthExceptionMiddleware(SocialAuthExceptionMiddleware):
def process_exception(self, request, exception):
if hasattr(social_exceptions, exception.__class__.__name__):
return render(request, 'error.html', {
'errorMessage': 'There was an authentication error.',
'errorDetails': str(exception)
})
# else:
# raise exception
| jasonwaters/fitcompetition | fitcompetition/middleware.py | middleware.py | py | 592 | python | en | code | 7 | github-code | 6 | [
{
"api_name": "social.exceptions",
"line_number": 8,
"usage_type": "argument"
},
{
"api_name": "django.shortcuts.render",
"line_number": 9,
"usage_type": "call"
}
] |
644356051 | from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
from .serializers import UserRegistrationSerializer
class UserRegistrationView(APIView):
"""
API endpoint for user registration.
"""
def post(self, request):
"""
Handle user registration POST request.
"""
serializer = UserRegistrationSerializer(data=request.data)
if serializer.is_valid():
user = serializer.save() # Save the new user
response_data = {
"message": "User registered successfully",
"user": {
"id": user.id,
"username": user.username,
"email": user.email,
}
}
return Response(response_data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
| soovuh/military-collections-api | custom_auth/views.py | views.py | py | 951 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "rest_framework.views.APIView",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "serializers.UserRegistrationSerializer",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 27,
"usage_type": "c... |
34632164463 | import cv2
import numpy as py
#检测拐角
#与边缘检测不同,拐角的检测的过程稍稍有些复杂
# 。但原理相同,所不同的是先用十字形的结构元素膨胀像素,
# 这种情况下只会在边缘处“扩张”,角点不发生变化。
# 接着用菱形的结构元素腐蚀原图像,
# 导致只有在拐角处才会“收缩”,而直线边缘都未发生变化。
image = cv2.imread('img\\building.jpg', 0)
origin = cv2.imread('img\\building')
#构造5 * 5的结构元素,分别为十字形/菱形/方形/X形
cross = cv2.getStructuringElement(cv2.MORPH_CROSS, (5, 5))
#菱形结构元素的定义
diamond = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
diamond[0, 0] = 0
diamond[0, 1] = 0
diamond[1, 0] = 0
diamond[4, 4] = 0
diamond[4, 3] = 0
diamond[3, 4] = 0
diamond[4, 0] = 0
diamond[4, 1] = 0
diamond[3, 0] = 0
diamond[0, 3] = 0
diamond[0, 4] = 0
diamond[1, 4] = 0
square = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
x = cv2.getStructuringElement(cv2.MORPH_CROSS, (5, 5))
#使用cross膨胀图像
result1 = cv2.dilate(image, cross)
#使用菱形腐蚀图像
result1 = cv2.erode(image, diamond)
#使用X膨胀原图像
result2 = cv2.dilate(image, x)
#使用方形腐蚀图像
result2 = cv2.erode(image, square)
#将两幅闭运算的图像相减获得角
result = cv2.absdiff(result2, result1)
#使用阈值获得二值图
retval, result = cv2.threshold(result, 40, 255, cv2.THRESH_BINARY)
#在原图上用半径为5的圆圈将点标出
for j in range(result.size):
y = int(j / result.shape[0])
x = j % result.shape[0]
if result[x, int(y)] == 255:
cv2.circle(image, (int(y), x), 5, (255, 0, 0))
cv2.imshow("result", image)
cv2.waitKey(0)
cv2.destroyAllWindows()
| liuyuhua-ha/opencvStudy | opencvStudy/checkFaceTest.py | checkFaceTest.py | py | 1,754 | python | zh | code | 0 | github-code | 6 | [
{
"api_name": "cv2.imread",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "cv2.getStructuringElement",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "cv2.MORPH_CROSS",
... |
10383049173 | from typing import Dict
import executorch.backends.qualcomm.python.PyQnnWrapperAdaptor as PyQnnWrapper
import torch
from .node_visitor import NodeVisitor, register_node_visitor
from .qnn_constants import OpDequantize, QNN_OP_PACKAGE_NAME_QTI_AISW
class DequantizeOpBase(NodeVisitor):
def __init__(self, *args) -> None:
super().__init__(*args)
def define_node(
self,
node: torch.fx.Node,
nodes_to_wrappers: Dict[torch.fx.Node, PyQnnWrapper.TensorWrapper],
) -> PyQnnWrapper.PyQnnOpWrapper:
dequant_input_tensors = []
input_node = node.args[0]
input_tensor = self.get_tensor(input_node, node)
inp_tensor_wrapper = self.define_tensor(
input_node,
input_tensor,
PyQnnWrapper.Qnn_TensorType_t.QNN_TENSOR_TYPE_NATIVE,
nodes_to_wrappers,
)
dequant_input_tensors.append(inp_tensor_wrapper)
output_tensor = self.get_tensor(node, node)
output_tensor_wrapper = self.define_tensor(
node,
output_tensor,
PyQnnWrapper.Qnn_TensorType_t.QNN_TENSOR_TYPE_NATIVE,
nodes_to_wrappers,
)
dequant_output_tensors = [output_tensor_wrapper]
dequant_op = PyQnnWrapper.PyQnnOpWrapper(
node.target.__name__,
QNN_OP_PACKAGE_NAME_QTI_AISW,
OpDequantize.op_name,
)
dequant_op.AddInputTensors(dequant_input_tensors)
dequant_op.AddOutputTensors(dequant_output_tensors)
return dequant_op
@register_node_visitor
class PerTensorDequantizeDefault(DequantizeOpBase):
target = "quantized_decomposed.dequantize_per_tensor.default"
@register_node_visitor
class PerTensorDequantizeTensor(DequantizeOpBase):
target = "quantized_decomposed.dequantize_per_tensor.tensor"
@register_node_visitor
class PerChannelDequantizeDefault(DequantizeOpBase):
target = "quantized_decomposed.dequantize_per_channel.default"
@register_node_visitor
class PerChannelDequantizeTensor(DequantizeOpBase):
target = "quantized_decomposed.dequantize_per_channel.tensor"
| pytorch/executorch | backends/qualcomm/builders/op_dequantize.py | op_dequantize.py | py | 2,130 | python | en | code | 479 | github-code | 6 | [
{
"api_name": "node_visitor.NodeVisitor",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "torch.fx",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "typing.Dict",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "torch.fx",
"li... |
17961314435 | from pymavlink import mavutil
from contextlib import nullcontext
CONNECTION_STRING = "udpin:0.0.0.0:14550"
DRONE_IDS = [3, 4]
def wait_heartbeats_multi(connection):
heartbeats = {id: False for id in DRONE_IDS}
while not all(heartbeats.values()):
msg = connection.recv_match(type="HEARTBEAT")
if msg:
heartbeats[msg.get_srcSystem()] = True
def connect():
connection = mavutil.mavlink_connection(CONNECTION_STRING)
connection.wait_heartbeat()
wait_heartbeats_multi(connection)
return connection
def recv_ack(connection):
while True:
msg = connection.recv_match(type="COMMAND_ACK", blocking=True)
if msg.get_srcSystem() == connection.target_system:
break
print("Received ACK:", msg)
def for_all_drones(f):
def wrapped(connection, *args, **kwargs):
for drone in DRONE_IDS:
connection.target_system = drone
f(connection, *args, **kwargs)
return wrapped
def send_command(connection, cmd, confirm, p1=0, p2=0, p3=0, p4=0, p5=0, p6=0, p7=0, lock=nullcontext(), ack=True):
if type(cmd) == str:
try:
cmd = getattr(mavutil.mavlink, cmd)
except AttributeError:
raise AttributeError(f"Unknown command `{cmd}`")
with lock:
connection.mav.command_long_send(
connection.target_system,
connection.target_component,
cmd,
confirm, p1, p2, p3, p4, p5, p6, p7
)
if ack:
recv_ack(connection)
| jath03/mavlink-testing | utils.py | utils.py | py | 1,543 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pymavlink.mavutil.mavlink_connection",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pymavlink.mavutil",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "contextlib.nullcontext",
"line_number": 41,
"usage_type": "call"
},
{
"api... |
20288234307 | # -*- coding: utf-8 -*-
import re
import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
REQUIRES = [
'docopt',
'argparse==1.2.1',
'requests==2.8.1',
'trello==0.9.1',
'wsgiref==0.1.2',
]
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errcode = pytest.main(self.test_args)
sys.exit(errcode)
def find_version(fname):
'''Attempts to find the version number in the file names fname.
Raises RuntimeError if not found.
'''
version = ''
with open(fname, 'r') as fp:
reg = re.compile(r'__version__ = [\'"]([^\'"]*)[\'"]')
for line in fp:
m = reg.match(line)
if m:
version = m.group(1)
break
if not version:
raise RuntimeError('Cannot find version information')
return version
__version__ = find_version("trello2text.py")
def read(fname):
with open(fname) as fp:
content = fp.read()
return content
setup(
name='trello2text',
version="0.1.1",
description='Parses trello board and outputs text',
long_description=read("README.md"),
author='Alejandro Cirino',
author_email='alejandro.cirino@devecoop.com',
url='https://github.com/cirinoalejando/trello2text',
install_requires=REQUIRES,
license=read("LICENSE"),
zip_safe=False,
keywords='trello2text',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy'
],
py_modules=["trello2text", ".utils"],
entry_points={
'console_scripts': [
"trello2text = trello2text:main"
]
},
tests_require=['pytest'],
cmdclass={'test': PyTest}
)
| cirinoalejandro/trello-to-text | setup.py | setup.py | py | 2,304 | python | en | code | 4 | github-code | 6 | [
{
"api_name": "setuptools.command.test.test",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "setuptools.command.test.test.finalize_options",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "setuptools.command.test.test",
"line_number": 19,
"usage_type"... |
31935770331 | import pandas as pd
import numpy as np
import time
import sys
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
np.set_printoptions(threshold=sys.maxsize)
df = pd.read_csv('contourQ3data.csv')
Z = df.pivot_table(index='p1', columns='p3', values='vari').T.values
X_unique = np.sort(df.p1.unique())
Y_unique = np.sort(df.p3.unique())
X, Y = np.meshgrid(X_unique, Y_unique)
fig, ax = plt.subplots()
CS = ax.contourf(X, Y, Z, cmap='RdGy')
ax.set_title('Contour plot of the behavour of avg I')
ax.set_xlabel('p1')
ax.set_ylabel('p3')
ax.set_aspect('equal')
fig.colorbar(CS, format="%.2f")
plt.show()
plt.savefig('contour-plot-of-vari-0.05.png', dpi=300) | isabelnic/Modelling-and-Visualisation | checkpoint 2/plot_contour_vari.py | plot_contour_vari.py | py | 694 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.set_printoptions",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "sys.maxsize",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_csv",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.sort",
... |
44070613144 | import numpy as np
import math
import matplotlib.pyplot as plt
class LotkaVolterra:
"""This class defines the Lotka--Voltera prey-predator
system. There are 4 parameters in this class which
define the evoluion of the system.
Attributes:
k_a reproduction rate of the antelopes
k_ca death rate of antelopes when the meet cheetahs
k_c death rate of cheetahs
k_a reproduction rate of the cheetahs when they meet antelopes
"""
def __init__(self,k_a,k_ca,k_c,k_ac):
self.k_a = k_a
self.k_ca = k_ca
self.k_c = k_c
self.k_ac = k_ac
def __call__(self,x,t):
y = np.zeros(len(x))
y[0] = self.k_a*x[0]-self.k_ca*x[0]*x[1]
y[1] = -self.k_c*x[1]+self.k_ac*x[0]*x[1]
return y
class Logistic:
"""This class defines the Logistic population
growth of a population which has a limited size C
and a growth rate of nu.
Attributes:
nu Growth rate of the population
C Limit sizeof the population
"""
def __init__(self,nu,C):
self.nu = nu
self.C = C
def __call__(self,x,t):
return self.nu*(1-x/self.C)*x
class ExplicitEuler:
"""This class defines the Explicit Euler
scheme for the numerical resolution of
a differentiel equation.
"""
def __init__(self,f):
self.f = f
def iterate(self,x0,t,dt):
return x0+dt*self.f(x0,t)
class RK2:
"""This class defines the Runge-Kutta 2
scheme for the numerical resolution of
a differentiel equation.
"""
def __init__(self,f):
self.f = f
def iterate(self,x0,t,dt):
return x0+dt*self.f(x0+dt/2*self.f(x0,t),t+dt/2)
class Integrator:
"""This class defines the Integration
of a differential equation between tMin and tMax
with N discretization steps and x0 as an initial condition
"""
def __init__(self,method,x0,tMin,tMax,N):
self.x0 = x0
self.tMin = tMin
self.tMax = tMax
self.dt = (tMax - tMin)/(N-1)
self.f = method
def getIntegrationTime(self):
return np.arange(self.tMin,self.tMax+self.dt,self.dt)
def integrate(self):
x = np.array([self.x0])
for t in np.arange(self.tMin,self.tMax,self.dt):
x = np.append( x, [self.f.iterate(x[-1,:],t,self.dt)],axis=0)
return x
# Plots the data in a 2d plot
def plotData(x,y,color,legend):
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.rc('xtick', labelsize=20)
plt.rc('ytick', labelsize=20)
plt.ylabel('$a(t),c(t)$',fontsize=20)
plt.xlabel('$t$', fontsize=20)
plt.plot(x,y,color,linewidth=2.0,label=legend)
plt.legend(loc=2,prop={'size':20})
# Parametric plot of x vs y
def parametricPlotData(x,y,color,xAxis,yAxis,legend):
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.rc('xtick', labelsize=20)
plt.rc('ytick', labelsize=20)
plt.xlabel('$'+xAxis+'$',fontsize=20)
plt.ylabel('$'+yAxis+'$',fontsize=20)
plt.plot(x,y,color,linewidth=2.0,label=legend)
plt.legend(loc=2,prop={'size':20})
# Plot the population of the antelope and the cheetah
x0 = np.array([2, 4])
tmin = 0
tmax = 100
rk2 = Integrator(RK2(LotkaVolterra(1,1,0.5,0.5)),x0,tmin,tmax,2000)
eul = Integrator(ExplicitEuler(LotkaVolterra(1,1,0.5,0.5)),x0,tmin,tmax,2000)
plotData(rk2.getIntegrationTime(),rk2.integrate()[:,0],'r-',"antelope (RK)")
plotData(rk2.getIntegrationTime(),rk2.integrate()[:,1],'b-',"cheetah (RK)")
plotData(eul.getIntegrationTime(),eul.integrate()[:,0],'g-',"antelope (E)")
plotData(eul.getIntegrationTime(),eul.integrate()[:,1],'m-',"cheetah (E)")
plt.show()
parametricPlotData(rk2.integrate()[:,0], rk2.integrate()[:,1],'r-','a(t)','c(t)',"6 ini (RK)")
parametricPlotData(eul.integrate()[:,0], eul.integrate()[:,1],'b-','a(t)','c(t)',"6 ini (E)")
plt.show()
# Compues the errror between 2 solutions with a given ratio
# in term of resolution points
def computeError(x,xRef,ratio):
iMax = np.size(xRef,axis=0)
totError = 0
for i in np.arange(0,np.size(xRef,axis=1)):
totError += math.sqrt(np.sum(np.square(x[:,i]-xRef[0:iMax:ratio,i])))/np.size(x[:,i])
return totError
n_rk = np.array([1000, 2000, 4000, 8000])
n_e = np.array([1000, 2000, 4000, 8000])
n_ref = 16000
tmin = 0
tmax = 13
rk2 = Integrator(RK2(LotkaVolterra(1,1,0.5,0.5)),x0,tmin,tmax,n_ref)
solRefRK = rk2.integrate()
eul = Integrator(ExplicitEuler(LotkaVolterra(1,1,0.5,0.5)),x0,tmin,tmax,n_ref)
solRefE = eul.integrate()
errRK = []
for i in n_rk:
rk = Integrator(RK2(LotkaVolterra(1,1,0.5,0.5)),x0,tmin,tmax,i)
r_rk = n_ref//i
errRK.append(computeError(rk.integrate(),solRefRK,r_rk))
print(computeError(rk.integrate(),solRefRK,r_rk))
plt.loglog(n_rk,errRK,'ro',linewidth=2.0,label="RK2 error")
plt.loglog(n_rk,np.power(n_rk/10,-2),'k-',linewidth=2.0,label="-2 slope")
plt.legend(loc=3)
plt.show()
errE = []
for i in n_rk:
e = Integrator(ExplicitEuler(LotkaVolterra(1,1,0.5,0.5)),x0,tmin,tmax,i)
r_rk = n_ref//i
errE.append(computeError(e.integrate(),solRefRK,r_rk))
print(computeError(e.integrate(),solRefRK,r_rk))
plt.loglog(n_rk,errE,'ro',linewidth=2.0,label="Euler error")
plt.loglog(n_rk,np.power(n_e/100,-2),'k-',linewidth=2.0,label="-1 slope")
plt.legend(loc=3)
plt.show()
| sidsriv/Simulation-and-modelling-of-natural-processes | lotkaVolterra.py | lotkaVolterra.py | py | 5,583 | python | en | code | 21 | github-code | 6 | [
{
"api_name": "numpy.zeros",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number"... |
17110435205 | import tkinter
from tkinter import *
from PIL import ImageTk, Image
# configure window
root = Tk()
windowColor = "#F2F2F2"
root.geometry("827x1500")
root.configure(bg = windowColor)
root.title("Train Build")
# create a container for canvas so window is scrollable
# window is a frame inside canvas that is a container for rest of application
container = Frame(root, bg = windowColor)
canvas = Canvas(container, bg = windowColor, bd=0, highlightthickness=0)
scrollbar = Scrollbar(container, orient="vertical", command=canvas.yview)
window = Frame(canvas, bg = windowColor)
# make canvas and window scrollable
window.bind(
"<Configure>",
lambda e: canvas.configure(
scrollregion=canvas.bbox("all")
)
)
canvas.create_window((0, 0), window=window, anchor="nw")
canvas.configure(yscrollcommand=scrollbar.set)
# pack containers into root
container.pack(side="left", fill="both", expand=True)
canvas.pack(side="left", fill="both", expand=True)
scrollbar.pack(side="right", fill="y")
image1 = Image.open("Asset 4.png")
test = ImageTk.PhotoImage(image1)
label1 = tkinter.Label(image=test, width = 753, height = 355, bg=windowColor)
label1.place(x=150, y=25)
# span information frame
framea = LabelFrame(
root,
bg="#FFFFFF",
bd=0,
highlightthickness=0
)
framea.place(
x=60,
y=48,
width=334,
height=390
)
infoSpan = Label(root, text="Span Information", bg="#FFFFFF", font='Helvetica 22 bold')
infoSpan.place(x = 72, y = 64)
# start span name and label
startSpanEntry = Entry(
bd=0,
bg="#E6E6E6",
fg="#000716",
highlightthickness=0
)
startSpanEntry.configure(highlightbackground="black", highlightcolor="black")
startSpanEntry.place(
x=292,
y=104,
width=87,
height=22
)
startSpanLabel = Label(root, text="Start Span", bg="#FFFFFF")
startSpanLabel.place(x = 100, y = 108)
# end span name and label
endSpanEntry = Entry(
bd=0,
bg="#E6E6E6",
fg="#000716",
highlightthickness=0
)
endSpanEntry.configure(highlightbackground="black", highlightcolor="black")
endSpanEntry.place(
x=292,
y=139,
width=87,
height=22
)
endSpanLabel = Label(root, text="End Span", bg="#FFFFFF")
endSpanLabel.place(x = 100, y = 142)
# span increment name and label
incrementSpanEntry = Entry(
bd=0,
bg="#E6E6E6",
fg="#000716",
highlightthickness=0
)
incrementSpanEntry.configure(highlightbackground="black", highlightcolor="black")
incrementSpanEntry.place(
x=292,
y=174,
width=87,
height=22
)
incrementSpanLabel = Label(root, text="Span Increment", bg="#FFFFFF")
incrementSpanLabel.place(x = 100, y = 178)
llTypeOptionList = ["Custom", "Option", "Option", "Option"]
lltext = StringVar()
lltext.set(llTypeOptionList[0])
lloption = OptionMenu(root, lltext, *llTypeOptionList)
lloption.configure(highlightbackground="#FFFFFF")
lloption.place(x = 279, y = 215)
llTypeLabel = Label(root, text="LL Type", bg="#FFFFFF")
llTypeLabel.place(x = 100, y = 213)
infoSpan = Label(root, text="Moment/Stress Calculation Interval Locations", bg="#FFFFFF", font='Helvetica 13 bold')
infoSpan.place(x = 77, y = 248)
# start location name and label
startLocationEntry = Entry(
bd=0,
bg="#E6E6E6",
fg="#000716",
highlightthickness=0
)
startLocationEntry.configure(highlightbackground="black", highlightcolor="black")
startLocationEntry.place(
x=292,
y=279,
width=87,
height=22
)
startLocationLabel = Label(root, text="Start Location", bg="#FFFFFF")
startLocationLabel.place(x = 100, y = 283)
# end location name and label
endLocationEntry = Entry(
bd=0,
bg="#E6E6E6",
fg="#000716",
highlightthickness=0
)
endLocationEntry.configure(highlightbackground="black", highlightcolor="black")
endLocationEntry.place(
x=292,
y=314,
width=87,
height=22
)
endLocationLabel = Label(root, text="End Location", bg="#FFFFFF")
endLocationLabel.place(x = 100, y = 318)
# interval name and label
intervalEntry = Entry(
bd=0,
bg="#E6E6E6",
fg="#000716",
highlightthickness=0
)
intervalEntry.configure(highlightbackground="black", highlightcolor="black")
intervalEntry.place(
x=292,
y=349,
width=87,
height=22
)
intervalLabel = Label(root, text="Interval", bg="#FFFFFF")
intervalLabel.place(x = 100, y = 353)
# step size name and label
stepSizeEntry = Entry(
bd=0,
bg="#E6E6E6",
fg="#000716",
highlightthickness=0
)
stepSizeEntry.configure(highlightbackground="black", highlightcolor="black")
stepSizeEntry.place(
x=292,
y=384,
width=87,
height=22
)
stepSizeLabel = Label(root, text="Step Size", bg="#FFFFFF")
stepSizeLabel.place(x = 100, y = 388)
# loads frame
framec = LabelFrame(
root,
bg="#FFFFFF",
bd=0,
highlightthickness=0
)
framec.place(
x=60,
y=473,
width=334,
height=286
)
loadsDead = Label(root, text="Dead Loads", bg="#FFFFFF", font='Helvetica 22 bold')
loadsDead.place(x = 75, y = 489)
# girder type name and label
girderTypeOptionList = ["Custom", "Option", "Option", "Option"]
girdertext = StringVar()
girdertext.set(girderTypeOptionList[0])
girderoption = OptionMenu(root, girdertext, *girderTypeOptionList)
girderoption.configure(highlightbackground="#FFFFFF")
girderoption.place(x = 292, y = 529)
typeGirderLabel = Label(root, text="Girder Type", bg="#FFFFFF")
typeGirderLabel.place(x = 100, y = 533)
# deck type name and label
deckTypeOptionList = ["Custom", "Option", "Option", "Option"]
decktext = StringVar()
decktext.set(deckTypeOptionList[0])
deckoption = OptionMenu(root, decktext, *deckTypeOptionList)
deckoption.configure(highlightbackground="#FFFFFF")
deckoption.place(x = 292, y = 564)
typeDeckLabel = Label(root, text="Deck Type", bg="#FFFFFF")
typeDeckLabel.place(x = 100, y = 568)
loadsDead = Label(root, text="Cooper Loads", bg="#FFFFFF", font='Helvetica 22 bold')
loadsDead.place(x = 75, y = 597)
# cooper type name and label
cooperTypeOptionList = ["Custom", "Option", "Option", "Option"]
coopertext = StringVar()
coopertext.set(cooperTypeOptionList[0])
cooperoption = OptionMenu(root, coopertext, *cooperTypeOptionList)
cooperoption.configure(highlightbackground="#FFFFFF")
cooperoption.place(x = 292, y = 635)
typeCooperLabel = Label(root, text="Cooper Type", bg="#FFFFFF")
typeCooperLabel.place(x = 100, y = 639)
# cooper year name and label
cooperYearTypeOptionList = ["Custom", "Option", "Option", "Option"]
cooperYeartext = StringVar()
cooperYeartext.set(cooperYearTypeOptionList[0])
cooperYearoption = OptionMenu(root, cooperYeartext, *cooperYearTypeOptionList)
cooperYearoption.configure(highlightbackground="#FFFFFF")
cooperYearoption.place(x = 292, y = 670)
yearCooperLabel = Label(root, text="Cooper Year", bg="#FFFFFF")
yearCooperLabel.place(x = 100, y = 674)
# girder connection name and label
girderTypeOptionList = ["Custom", "Option", "Option", "Option"]
girdertext = StringVar()
girdertext.set(girderTypeOptionList[0])
girderoption = OptionMenu(root, girdertext, *girderTypeOptionList)
girderoption.configure(highlightbackground="#FFFFFF")
girderoption.place(x = 292, y = 705)
connectionGirderLabel = Label(root, text="Girder Connection", bg="#FFFFFF")
connectionGirderLabel.place(x = 100, y = 709)
valueImpact = Label(root, text="Impact Value", bg=windowColor, font='Helvetica 22 bold')
valueImpact.place(x = 448, y = 64)
# impact option and bridge
impactTypeOptionList = ["Unknown", "Option", "Option", "Option"]
impacttext = StringVar()
impacttext.set(impactTypeOptionList[0])
impactoption = OptionMenu(root, impacttext, *impactTypeOptionList)
impactoption.configure(highlightbackground="#FFFFFF")
impactoption.place(x = 633, y = 103)
impactTypeLabel = Label(root, text="Design Impact and Year", bg=windowColor)
impactTypeLabel.place(x = 474, y = 104)
# section modulus frame
frameb = LabelFrame(
root,
bg="#FFFFFF",
bd=0,
highlightthickness=0
)
frameb.place(
x=433,
y=282,
width=334,
height=477
)
# column labels
modulusSection = Label(root, text="Section Modulus", bg="#FFFFFF", font='Helvetica 22 bold')
modulusSection.place(x = 448, y = 300)
# section area load name and label
areaCheck1Value = IntVar()
areaCheck1 = Checkbutton(
root,
text = "Gross",
variable = areaCheck1Value,
onvalue = 1,
offvalue = 0,
bg = "#ffffff"
)
areaCheck1.place(
x = 665,
y = 341
)
areaCheck2Value = IntVar()
areaCheck2 = Checkbutton(
root,
text = "Net",
variable = areaCheck2Value,
onvalue = 1,
offvalue = 0,
bg = "#ffffff"
)
areaCheck2.place(
x = 665,
y = 368
)
areaSectionLabel = Label(root, text="Section Area", bg="#FFFFFF")
areaSectionLabel.place(x = 473, y = 340)
# number of girders name and label
girderSectionEntry = Entry(
bd=0,
bg="#E6E6E6",
fg="#000716",
highlightthickness=0
)
girderSectionEntry.configure(highlightbackground="black", highlightcolor="black")
girderSectionEntry.place(
x=665,
y=406,
width=87,
height=22
)
girderSectionLabel = Label(root, text="Number of Girders", bg="#FFFFFF")
girderSectionLabel.place(x = 473, y = 410)
# S value name and label
sValueEntry = Entry(
bd=0,
bg="#E6E6E6",
fg="#000716",
highlightthickness=0
)
sValueEntry.configure(highlightbackground="black", highlightcolor="black")
sValueEntry.place(
x=665,
y=445,
width=87,
height=22
)
sValueLabel = Label(root, text="S Value", bg="#FFFFFF")
sValueLabel.place(x = 473, y = 449)
# Hammer Blow name and label
blowCheck1Value = IntVar()
blowCheck1 = Checkbutton(
root,
text = "Yes",
variable = blowCheck1Value,
onvalue = 1,
offvalue = 0,
bg = "#ffffff"
)
blowCheck1.place(
x = 665,
y = 489
)
blowCheck2Value = IntVar()
blowCheck2 = Checkbutton(
root,
text = "No",
variable = blowCheck2Value,
onvalue = 1,
offvalue = 0,
bg = "#ffffff"
)
blowCheck2.place(
x = 665,
y = 516
)
blowHammerLabel = Label(root, text="Hammer Blow", bg="#FFFFFF")
blowHammerLabel.place(x = 473, y = 488)
modulusSection = Label(root, text="Fatigue", bg="#FFFFFF", font='Helvetica 22 bold')
modulusSection.place(x = 448, y = 547)
# mean impact load name and label
impactTypeOptionList = ["Custom", "Option", "Option", "Option"]
impacttext = StringVar()
impacttext.set(impactTypeOptionList[0])
impactoption = OptionMenu(root, impacttext, *impactTypeOptionList)
impactoption.configure(highlightbackground="#FFFFFF")
impactoption.place(x = 665, y = 587)
meanImpactLoadLabel = Label(root, text="Mean Impact Load", bg="#FFFFFF")
meanImpactLoadLabel.place(x = 473, y = 591)
# fatigue category name and label
fatigueTypeOptionList = ["Custom", "Option", "Option", "Option"]
fatiguetext = StringVar()
fatiguetext.set(fatigueTypeOptionList[0])
fatigueoption = OptionMenu(root, fatiguetext, *fatigueTypeOptionList)
fatigueoption.configure(highlightbackground="#FFFFFF")
fatigueoption.place(x = 665, y = 622)
categoryFatigueLabel = Label(root, text="Fatigue Category", bg="#FFFFFF")
categoryFatigueLabel.place(x = 473, y = 626)
# ignore stress name and label
ignoreStressEntry = Entry(
bd=0,
bg="#E6E6E6",
fg="#000716",
highlightthickness=0
)
ignoreStressEntry.configure(highlightbackground="black", highlightcolor="black")
ignoreStressEntry.place(
x=665,
y=657,
width=87,
height=22
)
ignoreStressLabel = Label(root, text="Ignore Stress", bg="#FFFFFF")
ignoreStressLabel.place(x = 473, y = 661)
#####################################################
# train frame
framet = LabelFrame(
root,
bg="#FFFFFF",
bd=0,
highlightthickness=0
)
framet.place(
x=60,
y=794,
width=707,
height=211
)
chooseTrain = Label(root, text="Choose Train", bg="#FFFFFF", font='Helvetica 22 bold')
chooseTrain.place(x = 92, y = 810)
#####################################################
# save button
# close button
########################## tk mainloop ##########################
root.resizable(True, True)
root.mainloop()
| masonknight22/CE596-RailroadAnalysisMockup | analysis p1.py | analysis p1.py | py | 12,035 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "PIL.Image.open",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "PIL.ImageTk.PhotoImage",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "PIL.ImageTk",
"li... |
20105435432 | import ConfigParser, logging, datetime, os, json
from flask import Flask, render_template, request
import mediacloud
CONFIG_FILE = 'settings.config'
basedir = os.path.dirname(os.path.realpath(__file__))
# load the settings file
config = ConfigParser.ConfigParser()
config.read(os.path.join(basedir, 'settings.config'))
# set up logging
logging.basicConfig(level=logging.DEBUG)
logging.info("Starting the MediaCloud example Flask app!")
# clean a mediacloud api client
mc = mediacloud.api.MediaCloud( config.get('mediacloud','api_key') )
app = Flask(__name__)
@app.route("/")
def home():
return render_template("search-form.html")
@app.route("/search",methods=['POST'])
def search_results():
keywords = request.form['keywords']
start_date = request.form['start_date']
end_date = request.form['end_date']
# now = datetime.datetime.now()
results = mc.sentenceCount(keywords,
solr_filter=[mc.publish_date_query(datetime.datetime.strptime(start_date, "%Y-%m-%d"),
datetime.datetime.strptime(end_date, "%Y-%m-%d")),
'tags_id_media:9139487'],
split=True,
split_start_date=start_date,
split_end_date=end_date)
print(json.dumps(results['split'], indent=4, separators=(',', ': ')))
clean_data = {}
for key in results['split']:
# if a date, append to clean_data dict
if len(key.encode('utf-8')) > 5:
clean_data[key.encode('utf-8')] = results['split'][key]
# print(type(key.encode('utf-8')))
# print(json.dumps(clean_data))
# print(type(clean_data))
# print(type(json.dumps(clean_data)))
# print(type(json.loads(json.dumps(clean_data))))
return render_template("search-results.html",
keywords=keywords,
sentenceCount=results['count'],
weeklyResults=json.dumps(clean_data))
if __name__ == "__main__":
app.debug = True
app.run()
| freeeal/MAS.500 | hw3/mcserver.py | mcserver.py | py | 2,145 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.path.dirname",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "os.path.realpath",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "ConfigParser.ConfigParser"... |
26825006942 |
from __future__ import annotations
import pickle # nosec
import struct
from typing import Final, Optional
from ..packet import Packet
from ..sign import Signatures
__all__ = ['UdpPack']
_prefix = struct.Struct('!BBI')
class UdpPack:
"""Packs and unpacks SWIM protocol :class:`~swimprotocol.packet.Packet`
objects from raw UDP packets or TCP connections. The :mod:`pickle` module
is used for serialization, so :class:`~swimprotocol.sign.Signatures` is
used to sign the payloads.
Args:
signatures: Generates and verifies cluster packet signatures.
pickle_protocol: The :mod:`pickle` protocol version number.
prefix_xor: A 4-byte string used to XOR the packet prefix, as a sanity
check to detect malformed or incomplete UDP packets.
"""
def __init__(self, signatures: Signatures, *,
pickle_protocol: int = pickle.HIGHEST_PROTOCOL,
prefix_xor: bytes = b'SWIM?!') -> None:
super().__init__()
if len(prefix_xor) != _prefix.size:
raise ValueError(f'{prefix_xor!r} must be {_prefix.size} bytes')
self.signatures: Final = signatures
self.pickle_protocol: Final = pickle_protocol
self.prefix_xor: Final = prefix_xor
def _xor_prefix(self, prefix: bytes) -> bytes:
zipped = zip(prefix, self.prefix_xor, strict=True)
return bytes([left ^ right for left, right in zipped])
def pack(self, packet: Packet) -> bytes:
"""Uses :mod:`pickle` to serialize *packet*, generates a digital
signature of the pickled data, and returns a byte-string that can be
sent as a raw UDP packet.
The resulting byte-string starts with a 4-byte :mod:`struct` prefix
(XOR'ed with *prefix_xor*) with the `struct format
<https://docs.python.org/3/library/struct.html#format-strings>`_
``!BBH``. The first byte is the length of the salt, the second byte is
the length of the signature, and the final two bytes are the length of
the pickled payload. After the prefix, the salt, digest, and pickled
payload byte-strings are concatenated.
Args:
packet: The SWIM protocol packet to serialize.
"""
pickled = pickle.dumps(packet, self.pickle_protocol)
salt, digest = self.signatures.sign(pickled)
salt_start = _prefix.size
digest_start = salt_start + len(salt)
data_start = digest_start + len(digest)
prefix = _prefix.pack(len(salt), len(digest), len(pickled))
packed = bytearray(data_start + len(pickled))
packed[0:salt_start] = self._xor_prefix(prefix)
packed[salt_start:digest_start] = salt
packed[digest_start:data_start] = digest
packed[data_start:] = pickled
return packed
def unpack(self, data: bytes) -> Optional[Packet]:
"""Deserializes a byte-string that was created using :meth:`.pack` into
a SWIM protocol packet. If any assumptions about the serialized data
are not met, including an invalid signature, ``None`` is returned to
indicate that *data* was malformed or incomplete.
Args:
data: The serialized byte-string of the SWIM protocol packet.
"""
data_view = memoryview(data)
salt_start = _prefix.size
prefix = self._xor_prefix(data_view[0:salt_start])
try:
salt_len, digest_len, data_len = _prefix.unpack(prefix)
except struct.error:
return None
digest_start = salt_start + salt_len
data_start = digest_start + digest_len
data_end = data_start + data_len
salt = data_view[salt_start:digest_start]
digest = data_view[digest_start:data_start]
pickled = data_view[data_start:data_end]
signatures = self.signatures
if len(digest) != signatures.digest_size or len(pickled) != data_len:
return None
if signatures.verify(pickled, (salt, digest)):
packet = pickle.loads(pickled) # noqa: S301
assert isinstance(packet, Packet)
return packet
else:
return None
| icgood/swim-protocol | swimprotocol/udp/pack.py | pack.py | py | 4,184 | python | en | code | 6 | github-code | 6 | [
{
"api_name": "struct.Struct",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "sign.Signatures",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "pickle.HIGHEST_PROTOCOL",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "typing.Fin... |
71861354429 | from collections import Counter
from datetime import datetime
# 示例数据
# 10折 1200 (0/1) 自己
# 10折 1200 VDMzZFF1T0hKdTRjaEJRMkV0N2xiZz09 (0/3) 舞***影(15***33)
def extract_discount(share_str: str) -> int:
return int(share_str.split(" ")[0][:-1])
def extract_price(share_str: str) -> int:
return int(share_str.split(" ")[1])
def extract_suin(share_str: str) -> str:
return share_str.split(" ")[2]
def extract_remaining_times(share_str: str) -> int:
# (0/3)
temp = share_str.split(" ")[3][1:-1]
remaing_times = int(temp.split("/")[0])
return remaing_times
# 清洗并去重
with open(".cached/my_home.csv", encoding="utf-8") as f:
suin_to_share_str: dict[str, str] = {}
f.readline()
for line in f:
line = line.strip()
if line == "":
continue
if line.endswith("自己"):
continue
suin = extract_suin(line)
if suin in suin_to_share_str:
last_info = suin_to_share_str[suin]
if extract_remaining_times(line) <= extract_remaining_times(last_info):
# 之前记录的是新一点的数据
continue
suin_to_share_str[suin] = line
# 排序
share_str_list = []
for s in suin_to_share_str.values():
share_str_list.append(s)
share_str_list.sort(key=lambda s: extract_price(s))
# 统计各个折扣对应数目
discount_to_count: Counter = Counter()
for s in reversed(share_str_list):
discount = extract_discount(s)
discount_to_count[discount] += 1
# 导出
with open(".cached/my_home_processed.csv", "w", encoding="utf-8") as f:
# 导出统计数据
f.write(f"{datetime.now()}\n")
f.write(f"总计: {len(share_str_list)}\n")
for discount in sorted(discount_to_count.keys()):
count = discount_to_count[discount]
f.write(f"{discount:2d} 折: {count}\n")
f.write("-----\n")
# 导出实际数据
for share_str in share_str_list:
f.write(share_str + "\n")
| fzls/djc_helper | process_my_home.py | process_my_home.py | py | 2,004 | python | en | code | 319 | github-code | 6 | [
{
"api_name": "collections.Counter",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 67,
"usage_type": "name"
}
] |
42432440743 | # MenuTitle: SVG Pen
from fontTools.pens.basePen import BasePen
# (C) 2016 by Jens Kutilek
# https://raw.githubusercontent.com/jenskutilek/TypoLabs2016/master/penCollection/svgPen.py
# See also:
# http://www.w3.org/TR/SVG/paths.html#PathDataBNF
# https://developer.mozilla.org/en-US/docs/Web/SVG/Tutorial/Paths
# SVG path parsing code from:
# http://codereview.stackexchange.com/questions/28502/svg-path-parsing
def parse_svg_path(path_data):
digit_exp = "0123456789eE"
comma_wsp = ", \t\n\r\f\v"
drawto_command = "MmZzLlHhVvCcSsQqTtAa"
sign = "+-"
exponent = "eE"
float = False
entity = ""
for char in path_data:
if char in digit_exp:
entity += char
elif char in comma_wsp and entity:
yield entity
float = False
entity = ""
elif char in drawto_command:
if entity:
yield entity
float = False
entity = ""
yield char
elif char == ".":
if float:
yield entity
entity = "."
else:
entity += "."
float = True
elif char in sign:
if entity and entity[-1] not in exponent:
yield entity
float = False
entity = char
else:
entity += char
if entity:
yield entity
def drawSVGPath(pen, path=""):
"""
Draw an SVG path that is supplied as a string. This is limited to SVG paths
that contain only elements that can be matched to the usual path elements
found in a glyph.
"""
path_data = list(parse_svg_path(path))
# print(path_data)
i = 0
prev_x: int | float = 0
prev_y: int | float = 0
while i < len(path_data):
# print(i, path_data[i])
v = path_data[i]
if v in "Cc":
# Cubic curve segment
x1, y1, x2, y2, x3, y3 = path_data[i + 1 : i + 7]
# print(" ", x1, y1, x2, y2, x3, y3)
x1 = float(x1)
y1 = float(y1)
x2 = float(x2)
y2 = float(y2)
x3 = float(x3)
y3 = float(y3)
if v == "c":
x1 += prev_x
y1 += prev_y
x2 += prev_x
y2 += prev_y
x3 += prev_x
y3 += prev_y
pen.curveTo(
(x1, y1),
(x2, y2),
(x3, y3),
)
prev_x = x3
prev_y = y3
i += 7
elif v in "Hh":
# Horizontal line segment
x = path_data[i + 1]
# print(" ", x)
x = float(x)
if v == "h":
x += prev_x
pen.lineTo((x, prev_y))
prev_x = x
i += 2
elif v in "LlMm":
# Move or Line segment
x, y = path_data[i + 1 : i + 3]
# print(" ", x, y)
x = float(x)
y = float(y)
if v in "lm":
x += prev_x
y += prev_y
if v in "Ll":
pen.lineTo((x, y))
else:
pen.moveTo((x, y))
prev_x = x
prev_y = y
i += 3
elif v in "Qq":
# Quadratic curve segment
x1, y1, x2, y2 = path_data[i + 1 : i + 5]
# print(" ", x1, y1, x2, y2)
x1 = float(x1)
y1 = float(y1)
x2 = float(x2)
y2 = float(y2)
if v == "q":
x1 += prev_x
y1 += prev_y
x2 += prev_x
y2 += prev_y
pen.qCurveTo(
(x1, y1),
(x2, y2),
)
prev_x = x2
prev_y = y2
i += 5
elif v in "Vv":
# Vertical line segment
y = path_data[i + 1]
# print(y)
y = float(y)
if v == "v":
y += prev_y
pen.lineTo((prev_x, y))
prev_y = y
i += 2
elif v in "Zz":
pen.closePath()
i += 1
else:
print(
"SVG path element '%s' is not supported for glyph paths."
% path_data[i]
)
break
class SVGpen(BasePen):
def __init__(
self,
glyphSet,
round_coordinates=False,
force_relative_coordinates=False,
optimize_output=False,
):
"""
A pen that converts a glyph outline to an SVG path. After drawing,
SVGPen.d contains the path as string. This corresponds to the SVG path
element attribute "d".
:param glyphSet: The font object
:type glyphSet: :py:class:`fontParts.RFont`
:param round_coordinates: Round all coordinates to integer. Default is
False.
:type round_coordinates: bool
:param force_relative_coordinates: Store all coordinates as relative.
Default is False, i.e. choose whichever notation (absolute or
relative) produces shorter output for each individual segment.
:type force_relative_coordinates: bool
:param optimize_output: Make the output path string as short as
possible. Default is True. Setting this to False also overrides the
relative_coordinates option.
:type optimize_output: bool
"""
self._rnd = round_coordinates
self._rel = force_relative_coordinates
self._opt = optimize_output
BasePen.__init__(self, glyphSet)
self.reset()
def reset(self):
self.prev_x: int | float = 0 # previous point
self.prev_y: int | float = 0
self._set_first_point((0, 0))
self._set_previous_point((0, 0))
self._set_previous_cubic_control(None)
self._set_previous_quadratic_control(None)
self._prev_cmd = None
self.relative = False
self.d = ""
def _append_shorter(self, absolute, relative):
# Check if relative output is smaller
if not self._rel and len(absolute) <= len(relative) or not self._opt:
cmd_str = absolute
self.relative = False
else:
cmd_str = relative
self.relative = True
if cmd_str[0] == self._prev_cmd:
rest = cmd_str[1:]
if rest.startswith("-"):
self.d += rest
else:
self.d += " " + rest
else:
self.d += cmd_str
def _get_shorter_sign(self, value):
if value < 0 and self._opt:
return "%g" % value
else:
return " %g" % value
def _round_pt(self, pt):
# Round the point based on the current rounding settings
if self._rnd:
x, y = pt
return (int(round(x)), int(round(y)))
return pt
def _set_first_point(self, pt):
self.first_x, self.first_y = pt
def _set_previous_point(self, pt):
self.prev_x, self.prev_y = pt
def _set_previous_cubic_control(self, pt):
if pt is None:
self.prev_cx = None
self.prev_cy = None
else:
self._set_previous_quadratic_control(None)
self.prev_cx, self.prev_cy = pt
def _set_previous_quadratic_control(self, pt):
if pt is None:
self.prev_qx = None
self.prev_qy = None
else:
self._set_previous_cubic_control(None)
self.prev_qx, self.prev_qy = pt
def _reset_previous_controls(self):
self._set_previous_cubic_control(None)
self._set_previous_quadratic_control(None)
def _moveTo(self, pt):
x, y = self._round_pt(pt)
cmd = "Mm"
a = "M%g" % x
a += self._get_shorter_sign(y)
r = "m%g" % (x - self.prev_x)
r += self._get_shorter_sign(y - self.prev_y)
self._append_shorter(a, r)
self._set_first_point((x, y))
self._set_previous_point((x, y))
self._reset_previous_controls()
self._prev_cmd = cmd[self.relative]
def _lineTo(self, pt):
x, y = self._round_pt(pt)
if y == self.prev_y:
cmd = "Hh"
a = "H%g" % x
r = "h%g" % (x - self.prev_x)
elif x == self.prev_x:
cmd = "Vv"
a = "V%g" % y
r = "v%g" % (y - self.prev_y)
else:
cmd = "Ll"
a = "L%g" % x
a += self._get_shorter_sign(y)
r = "l%g" % (x - self.prev_x)
r += self._get_shorter_sign(y - self.prev_y)
self._append_shorter(a, r)
self._set_previous_point((x, y))
self._reset_previous_controls()
self._prev_cmd = cmd[self.relative]
def _curveToOne(self, p1, p2, pt):
x1, y1 = self._round_pt(p1)
x2, y2 = self._round_pt(p2)
x3, y3 = self._round_pt(pt)
if self.prev_cx is None:
self._set_previous_cubic_control((self.prev_x, self.prev_x))
if (
self.prev_y - y1 + self.prev_y == self.prev_cy
and self.prev_x - x1 + self.prev_x == self.prev_cx
):
# Control point p1 is mirrored, use S command and omit p1
cmd = "Ss"
a = "S%g" % x2
for coord in [y2, x3, y3]:
a += self._get_shorter_sign(coord)
r = "s%g" % (x2 - self.prev_x)
for coord in [
y2 - self.prev_y,
x3 - self.prev_x,
y3 - self.prev_y,
]:
r += self._get_shorter_sign(coord)
else:
cmd = "Cc"
a = "C%g" % x1
for coord in [y1, x2, y2, x3, y3]:
a += self._get_shorter_sign(coord)
r = "c%g" % (x1 - self.prev_x)
for coord in [
y1 - self.prev_y,
x2 - self.prev_x,
y2 - self.prev_y,
x3 - self.prev_x,
y3 - self.prev_y,
]:
r += self._get_shorter_sign(coord)
self._append_shorter(a, r)
self._set_previous_point((x3, y3))
self._set_previous_cubic_control((x2, y2))
self._prev_cmd = cmd[self.relative]
def _qCurveToOne(self, p1, p2):
x1, y1 = self._round_pt(p1)
x2, y2 = self._round_pt(p2)
if self.prev_qx is None:
self._set_previous_quadratic_control((self.prev_x, self.prev_x))
if (
self.prev_y - y1 + self.prev_y == self.prev_qy
and self.prev_x - x1 + self.prev_x == self.prev_qx
):
# Control point p1 is mirrored, use T command and omit p1
cmd = "Tt"
a = "T%g" % x2
a += self._get_shorter_sign(y2)
r = "t%g" % (x2 - self.prev_x)
r += self._get_shorter_sign(y2 - self.prev_y)
else:
cmd = "Qq"
a = "Q%g" % x1
for coord in [y1, x2, y2]:
a += self._get_shorter_sign(coord)
r = "q%g" % (x1 - self.prev_x)
for coord in [
y1 - self.prev_y,
x2 - self.prev_x,
y2 - self.prev_y,
]:
r += self._get_shorter_sign(coord)
self._append_shorter(a, r)
self._set_previous_point((x2, y2))
self._set_previous_quadratic_control((x1, y1))
self._prev_cmd = cmd[self.relative]
def _closePath(self):
cmd = "z" if self._rel else "Z"
self.d += cmd
self._set_previous_point((self.first_x, self.first_y))
self._reset_previous_controls()
self._prev_cmd = cmd
| jenskutilek/TypoLabs2016 | penCollection/svgPen.py | svgPen.py | py | 11,820 | python | en | code | 15 | github-code | 6 | [
{
"api_name": "fontTools.pens.basePen.BasePen",
"line_number": 161,
"usage_type": "name"
},
{
"api_name": "fontTools.pens.basePen.BasePen.__init__",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "fontTools.pens.basePen.BasePen",
"line_number": 194,
"usage_type... |
37368152653 | from typing import List, Dict, Tuple, Any
import numpy as np
import pandas as pd
import spacy
import en_core_web_sm
from spacy.matcher import Matcher
from concept_processing.asp.asp_generator import ASPGenerator
from concept_processing.asp.asp_solver import clingo_solve
from concept_processing.asp.clingo_out_parsers import ClingoAnsParser
from concept_processing.enums import ProblemType
from concept_processing.nlp.spacy_wrapper import SpacyWrapper
from concept_processing.nlp.nlp_parser import NLPParser
from concept_processing.nlp.nlp_utils import add_punctuation, truecase, merge_not
from concept_processing.pam import count_datapoints_in_each_feature
# ILASP solution paths
base_dir = {
ProblemType.ATOMISATION: '/Users/Cherry0904/Desktop/roko-for-charlize/ilasp/atomisation',
ProblemType.GENERALISATION: '/Users/Cherry0904/Desktop/roko-for-charlize/ilasp/generalisation',
# ProblemType.ATOMISATION: '/vol/bitbucket/yy3219/roko-for-charlize/ilasp/atomisation',
# ProblemType.GENERALISATION: '/vol/bitbucket/yy3219/roko-for-charlize/ilasp/generalisation',
}
background_knowledge_file_temp = '{}/background.ilasp'
solution_file_temp = '{}/solutions/best_sol.lp'
clingo_out_file_temp = '{}/clingo_out.tmp'
class ConceptsState:
def __init__(self, ids: List[str], label_indices: np.ndarray, label_categories: List[str], concept_pam: np.ndarray,
concept_strings: List[str]):
assert len(ids) == len(label_indices) and len(label_indices) == concept_pam.shape[0] and \
concept_pam.shape[1] == len(concept_strings)
self.ids = ids
self.label_indices = label_indices
self.label_categories = label_categories
self.concept_pam = concept_pam
self.concept_strings = concept_strings
def get_labels(self) -> List[str]:
return [self.label_categories[i] for i in self.label_indices]
def to_dict(self) -> Dict[str, List[Any]]:
return dict(id=self.ids, label=self.get_labels(), concepts=list(self.concept_pam),
explanations=self.concept_strings)
# Replaces the old [row_id, [concept_ids]]
class ConceptBag:
def __init__(self):
self.store = {}
self.ids = []
self.labels = []
def append(self, row_id: str, concept_ids: List[str], label: str):
# Two explanations for a video may exist sometimes.
if row_id not in self.store:
self.store[row_id] = concept_ids
self.ids.append(row_id)
# There is one odd label that needs to be fixed
if label == ' it could be called a strike because the pitch landed in the strike zone before being hit':
label = 'strike'
self.labels.append(label)
else:
self.store[row_id] = list(set(self.store[row_id]).union(concept_ids))
def to_rawbagofconcepts(self) -> List[Tuple[str, List[int]]]:
return [(id, self.store[id]) for id in self.ids]
def to_pam(self) -> np.ndarray:
"""
Creates binary presence-absence matrix (PAM)
"""
N = len(self.ids)
C = 0
for id in self.ids:
curr_max = np.max(self.store[id], initial=0)
C = max(C, curr_max)
C += 1
data = np.zeros((N, C))
for i, id_ in enumerate(self.ids):
data[i, self.store[id_]] = 1
# Remove extraneous columns
cols_to_remove = count_datapoints_in_each_feature(data) == 0
data = data[:, ~cols_to_remove]
return data
# Applies generalisation/atomisation procedure to extract the concepts
class ConceptExtractor:
def __init__(self, nlp: NLPParser):
self.nlp = nlp
self.concept_dict = {}
self.next_concept_id = 0
self.concept_bag = ConceptBag()
def parse(self, row_id: str, premise_sents: str, label: str):
# Non need to include errors
if label != 'none':
premise_sents = self.nlp(premise_sents)
premise_sents = [str(sent) for sent in premise_sents.sentences()]
# Post-process atomic sentences to remove super short ones
atomic_sents = self.split(premise_sents, ProblemType.ATOMISATION)
# atomic_sents = self.remove_short_concepts(atomic_sents)
# atomic_sents = self.post_process_short_concepts(atomic_sents)
generalised_sents = self.split(atomic_sents, ProblemType.GENERALISATION)
# generalised_sents = self.post_process_short_concepts(generalised_sents)
# concept_ids = [self._get_id(sent) for sent in atomic_sents]
concept_ids = [self._get_id(sent) for sent in generalised_sents]
self.concept_bag.append(row_id, concept_ids, label)
def _get_id(self, sent: str):
if sent not in self.concept_dict:
self.concept_dict[sent] = self.next_concept_id
self.next_concept_id += 1
return self.concept_dict[sent]
def get(self) -> Tuple[ConceptBag, List[str]]:
return self.concept_bag, concept_dict_to_list(self.concept_dict)
@staticmethod
def _write(clingo_out_file: str, program: List[str]):
with open(clingo_out_file, 'w') as f:
for elem in program:
f.write(elem + '\n')
def split(self, sents: List[str], problem_type: ProblemType) -> List[str]:
sols = []
for sent in sents:
b_dir = base_dir[problem_type]
asp_generator = ASPGenerator(self.nlp, problem_type)
asp_generator.parse(str(sent))
# Exactly 1 element since we do not have concepts texts
program = asp_generator.get_programs()[0]
clingo_out_file = clingo_out_file_temp.format(b_dir)
solution_file = solution_file_temp.format(b_dir)
background_file = background_knowledge_file_temp.format(b_dir)
self._write(clingo_out_file, program)
atoms = clingo_solve(clingo_out_file, background_file, solution_file)
asp_parser = ClingoAnsParser(problem_type)
sents = asp_parser.get_sentences(atoms)
atomic_sents = [add_punctuation(merge_not(truecase(sent, self.nlp))) for sent in sents]
sols += atomic_sents
return sols
# Remove concepts with only one to three tokens
def remove_short_concepts(self, sents: List[str]) -> List[str]:
num_of_words = [len(sent.split()) for sent in sents]
index_of_short_concepts = [i for i, j in enumerate(num_of_words) if j == 1 or j == 2 or j==3]
index_of_all_concepts = [i for i, j in enumerate(num_of_words)]
index_of_long_concepts = [x for x in index_of_all_concepts if x not in index_of_short_concepts]
sents = [sents[i] for i in index_of_long_concepts]
return sents
# Remove short concepts that satisfy some rules defined on POS tags
def post_process_short_concepts(self, sents: List[str]) -> List[str]:
# nlp = SpacyWrapper()
nlp = en_core_web_sm.load()
excmatcher = self.add_exc_matcher(nlp)
num_of_words = [len(sent.split()) for sent in sents]
index_of_short_concepts = [i for i, j in enumerate(num_of_words) if j == 2 or j==3]
index_of_all_concepts = [i for i, j in enumerate(num_of_words)]
index_of_long_concepts = [x for x in index_of_all_concepts if x not in index_of_short_concepts]
# index_of_long_concepts = [i for i, j in enumerate(num_of_words) if j != 2 and j !=3]
# index_of_3_token_concepts = [i for i, j in enumerate(num_of_words) if j == 3]
index_to_keep = []
for i in index_of_short_concepts:
doc = nlp(sents[i])
match = excmatcher(doc)
if match == []: # If there is no match
index_to_keep.append(i)
sents = [sents[i] for i in index_to_keep + index_of_long_concepts]
return sents
def add_exc_matcher(self, nlp):
# create exclusion matcher for our concepts
excpattern1 = [{"POS": {"IN": ["NOUN", "PRON", "DET"]}}, {"POS": "VERB"}] # such as "it looks", "he's looking"
# excpattern2 = [{"POS": "DET"}, {"POS": "NOUN"}, {"POS": "VERB"}] # such as "the woman looks"
# Dirty way of using SpacyWrapper, kept because this code is not maintained
# excmatcher = Matcher(nlp._nlp.vocab)
excmatcher = Matcher(nlp.vocab)
excmatcher.add("meaningless_short_concept", [excpattern1])
# excmatcher.add("short_concept_3_tokens", [excpattern2])
# nlp._nlp.add_pipe("benepar", config={"model": "benepar_en3"})
return excmatcher
def concept_dict_to_list(concept_dict: Dict[str, int]) -> List[str]:
"""
parameters
----------
concept_dict - dictionary mapping from concept (e.g. strs) to index (int)
where indices are contiguous and starting from zero.
returns
-------
concepts - a list of concepts where concepts[i] is key k such that
concept_dict[k] = i
"""
reverse_dict = {i: s for s, i in concept_dict.items()}
concepts = [reverse_dict[i] for i in range(len(concept_dict))]
return concepts
| CharlizeY/AI-thesis | concept_processing/extraction.py | extraction.py | py | 9,204 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "concept_processing.enums.ProblemType.ATOMISATION",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "concept_processing.enums.ProblemType",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "concept_processing.enums.ProblemType.GENERALISATION",
... |
36546618587 | import sys, getopt
import collections
def main(argv):
inputFile = ''
try:
opts, args = getopt.getopt(argv, 'hi:')
except getopt.GetoptError:
print('test.py -i <inputfile>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('test.py -i <inputfile>')
sys.exit()
elif opt == '-i':
inputFile = arg
displaySignals = []
displayVals = []
with open(inputFile, "r") as fileIn:
for line in fileIn:
signals, values = line.split(" | ")
displaySignals.append(signals.split())
displayVals.append(values.split())
#part 1: Considerd how many times the digits 1, 4, 7, 8 appear accros all displays
#part A
valLengthSort = lenSort(displayVals)
for key in valLengthSort:
print(key, ":", len(valLengthSort[key]))
#part 2: (PER DISPLAY) create map...
totalVals = []
for signals, values in zip(displaySignals, displayVals):
#sort by length
signalLengthSort = {}
for signal in signals:
if not len(signal) in signalLengthSort:
signalLengthSort[len(signal)]=[]
signalLengthSort[len(signal)].append(signal)
#only lengths 2 (=1), 3 (=7), and 4 (=4) are useful.
infoKeys = [2, 3, 4]
digitMap = {8: "abcdefg"}
for info in infoKeys:
if info in signalLengthSort:
if info == 2:
digitMap[1] = signalLengthSort[info][0]
if info == 3:
digitMap[7] = signalLengthSort[info][0]
if info == 4:
digitMap[4] = signalLengthSort[info][0]
deductKeys = [5,6]
for deduct in deductKeys:
for signal in signalLengthSort[deduct]:
overlap147 = 0
for char in list(signal):
if char in digitMap[1]: overlap147 += 1
if char in digitMap[4]: overlap147 += 1
if char in digitMap[7]: overlap147 += 1
overlap147 += deduct
if not 0 in digitMap and overlap147 == 14:
digitMap[0] = signal
if not 2 in digitMap and overlap147 == 10:
digitMap[2] = signal
if not 3 in digitMap and overlap147 == 13:
digitMap[3] = signal
if not 5 in digitMap and overlap147 == 11:
digitMap[5] = signal
if not 6 in digitMap and overlap147 == 12:
digitMap[6] = signal
if not 9 in digitMap and overlap147 == 15:
digitMap[9] = signal
digits = []
for value in values:
for key in digitMap:
if collections.Counter(value) == collections.Counter(digitMap[key]):
digits.append(str(key))
totalVals.append(int(("".join(digits))))
print(totalVals)
print(sum(totalVals))
def lenSort(outVals):
sortedLengths={}
for entry in outVals:
for val in entry:
if not len(val) in sortedLengths:
sortedLengths[len(val)]=[]
sortedLengths[len(val)].append(val)
return sortedLengths
def decompose(sorted):
decomposition={}
for key in sorted:
decomposition[key]={}
for entry in sorted[key]:
for char in list(entry):
if not char in decomposition[key]:
decomposition[key][char]=0
decomposition[key][char] += 1
return decomposition
if __name__ == "__main__":
main(sys.argv[1:]) | Cranzai/AdventofCode | 2021/day08/python/day8.py | day8.py | py | 3,788 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "getopt.getopt",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "getopt.GetoptError",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_num... |
22504436543 | import scrapy
from scrapy import Request
class TrilhasTDC(scrapy.Spider):
name = "trilhas_tdc"
start_urls = [
"http://www.thedevelopersconference.com.br/tdc/2018/saopaulo/trilhas"
]
def parse(self, response):
colunas = response.xpath('//div[contains(@class, "col-sp")]')
for coluna in colunas:
dia = coluna.xpath('./h4/text()').extract_first()
links_trilhas = coluna.xpath('./a/@href').extract()
for link_trilha in links_trilhas:
yield Request(
url=response.urljoin(link_trilha),
callback=self.parse_trilha,
meta={
'dia' : dia,
}
)
def parse_trilha(self,response):
yield{
'dia' : response.meta.get('dia'),
'titulo' : response.xpath('//h1[@class="titulo-trilha"]/text()').extract_first(),
'subtitulo': response.xpath('//h1[@class="titulo-trilha"]/small/text()').extract_first(),
'descricao': response.xpath('//div[@class="lead"]//p/text()').extract(),
'link' : response.url,
}
| anacls/scrapy-study | tdc_examples/scrapy_study/spiders/trilhas_tdc.py | trilhas_tdc.py | py | 1,172 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "scrapy.Spider",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "scrapy.Request",
"line_number": 18,
"usage_type": "call"
}
] |
29827009348 | import string
from os.path import exists
import pandas as pd
from datetime import datetime
from random import shuffle
import numpy as np
if exists('D:\GRE\my word list\words.csv'):
df = pd.read_csv('D:\GRE\my word list\words.csv')
wordcount = df.shape[0]
else:
df = pd.DataFrame(columns = ['word', 'meaning', 'date', 'times_correct', 'times_incorrect'])
wordcount = 0
print("*****WELCOME*****")
def get_game():
if exists('D:\GRE\my word list\high_score.csv'):
high_df = pd.read_csv('D:\GRE\my word list\high_score.csv')
else:
high_df = pd.DataFrame(columns = ['score', 'date', 'time'])
if exists('D:\GRE\my word list\words.csv'):
df = pd.read_csv('D:\GRE\my word list\words.csv')
wordcount = df.shape[0]
if wordcount < 10:
print('Sorry, the word list should atleast contain 10 words')
return
else:
print('File doesnt exist!')
return
lives = 3
score = 0
datentime = datetime.now()
new_date = datentime.strftime('%d-%m-%Y')
new_time = datentime.strftime('%H-%M-%S')
while(lives > 0):
print('You have %d lives left!'%lives)
word_index = np.random.randint(low = 0, high = wordcount)
selected_word = df.iloc[word_index, 0]
selected_word_meaning = df.iloc[word_index, 1]
random_meanings = []
random_meanings_index = np.random.randint(low = 0, high = wordcount, size = (4))
for x in random_meanings_index:
random_meanings.append(df.iloc[x, 1])
random_meanings.append(selected_word_meaning)
shuffle(random_meanings)
print('\n', selected_word)
for i in range(5):
print('\n%d) %s'%(i, random_meanings[i]))
while True:
choice = int(input("\nEnter your choice!"))
if choice in list(range(5)):
break
else:
print('Wrong choice')
if random_meanings[choice] == selected_word_meaning:
score += 1
print('Correct! Your score now is:', score)
df.loc[word_index, 'times_correct'] += 1
else:
print('Sorry! Wrong answer')
print('\n%s means %s'%(selected_word, selected_word_meaning))
lives -= 1
df.loc[word_index, 'times_incorrect'] += 1
df.to_csv('D:\GRE\my word list\words.csv', index = False, columns = ['word', 'meaning', 'date', 'times_correct', 'times_incorrect'])
print('Sorry, you just went out of lives, your highscore for %s at %s was %d'%(new_date, new_time, score))
high_df.loc[high_df.shape[0]+1, :] = [score, new_date, new_time]
high_df.sort_values(by = 'score', ascending = False)
print(high_df)
high_df.to_csv('D:\GRE\my word list\high_score.csv', index = False, columns = ['score', 'date', 'time'])
return
def get_stats():
print('Statistics')
return
def get_meaning(get_word_meaning):
if exists('D:\GRE\my word list\words.csv'):
df = pd.read_csv('D:\GRE\my word list\words.csv')
wordcount = df.shape[0]
else:
print('File doesnt exist!')
return
found = False
for i in range(wordcount):
if df.iloc[i, 0].lower == get_word_meaning.lower:
print('\n%s means %s'%(get_word_meaning, df.iloc[i, 1]))
found = True
break
if found == False:
print('\nSorry, word was not found in your list')
return
if __name__ == '__main__':
choice = 1
while(choice != '*'):
print("1. Add new word\n2. Play word game\n3. Get word meaning\n4. Get Statistics\n*. TO EXIT!")
print("\nEnter your choice!")
choice = input()
if choice == str(1):
print("\nAdding new word!")
new_word = input('\nPlease enter the word: ')
word_meaning = input('\nPlease enter the meaning: ')
date = datetime.now()
date = date.strftime('%d-%m-%Y')
corr = 0
incorr = 0
print('Number of words in list', wordcount+1)
df.loc[wordcount, :] = [new_word, word_meaning, date, corr, incorr]
wordcount += 1
df.to_csv('D:\GRE\my word list\words.csv', index = False, columns = ['word', 'meaning', 'date', 'times_correct', 'times_incorrect'])
elif choice == str(2):
print("\nLets play word game!")
get_game()
elif choice == str(3):
get_word_meaning = input('\nGetting word meaning, so please enter the word: ')
get_meaning(get_word_meaning)
elif choice == str(4):
get_stats()
elif choice == str('*'):
df.to_csv('D:\GRE\my word list\words.csv', index = False, columns = ['word', 'meaning', 'date', 'times_correct', 'times_incorrect'])
break
else :
print('\nWrong choice, Please try again') | Geeks-Sid/GRE-word-game | play.py | play.py | py | 5,044 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.path.exists",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"l... |
27132320608 | """
Main launching point of the Top Patch Server
"""
import base64
import uuid
import os
import logging
import logging.config
import tornado.httpserver
import tornado.ioloop
import tornado.web
import tornado.options
from redis import StrictRedis
from rq import Connection, Queue
from server.handlers import RootHandler, RvlLoginHandler, RvlLogoutHandler
from server.handlers import WebSocketHandler, AdminHandler
from receiver.api.core.newagent import NewAgentV1
from receiver.api.core.checkin import CheckInV1
from receiver.api.core.startup import StartUpV1
from receiver.api.rv.results import *
from receiver.api.core.results import *
from receiver.api.rv.updateapplications import UpdateApplicationsV1
from receiver.api.ra.results import RemoteDesktopResults
from receiver.api.monitoring.monitoringdata import UpdateMonitoringStatsV1
from db.client import *
from scheduler.jobManager import start_scheduler
from tornado.options import define, options
#import newrelic.agent
#newrelic.agent.initialize('/opt/TopPatch/conf/newrelic.ini')
define("port", default=9001, help="run on port", type=int)
define("debug", default=True, help="enable debugging features", type=bool)
class Application(tornado.web.Application):
def __init__(self, debug):
handlers = [
#Operations for the Monitoring Plugin
(r"/rvl/v1/([a-f0-9]{8}-[a-f0-9]{4}-4[a-f0-9]{3}-[a-f0-9]{4}-[a-f0-9]{12})/monitoring/monitordata/?", UpdateMonitoringStatsV1),
#RA plugin
(r"/rvl/ra/rd/results/?", RemoteDesktopResults),
#Login and Logout Operations
(r"/rvl/?", RootHandler),
(r"/rvl/login/?", RvlLoginHandler),
(r"/rvl/logout/?", RvlLogoutHandler),
#Operations for the New Core Plugin
(r"/rvl/v1/core/newagent/?", NewAgentV1),
(r"/rvl/v1/([a-f0-9]{8}-[a-f0-9]{4}-4[a-f0-9]{3}-[a-f0-9]{4}-[a-f0-9]{12})/core/startup/?", StartUpV1),
(r"/rvl/v1/([a-f0-9]{8}-[a-f0-9]{4}-4[a-f0-9]{3}-[a-f0-9]{4}-[a-f0-9]{12})/core/checkin/?", CheckInV1),
(r"/rvl/v1/([a-f0-9]{8}-[a-f0-9]{4}-4[a-f0-9]{3}-[a-f0-9]{4}-[a-f0-9]{12})/rv/updatesapplications/?", UpdateApplicationsV1),
(r"/rvl/v1/([a-f0-9]{8}-[a-f0-9]{4}-4[a-f0-9]{3}-[a-f0-9]{4}-[a-f0-9]{12})/core/results/reboot/?", RebootResultsV1),
(r"/rvl/v1/([a-f0-9]{8}-[a-f0-9]{4}-4[a-f0-9]{3}-[a-f0-9]{4}-[a-f0-9]{12})/core/results/shutdown/?", ShutdownResultsV1),
#New Operations for the New RV Plugin
(r"/rvl/v1/([a-f0-9]{8}-[a-f0-9]{4}-4[a-f0-9]{3}-[a-f0-9]{4}-[a-f0-9]{12})/rv/results/install/apps/os?",
InstallOsAppsResults),
(r"/rvl/v1/([a-f0-9]{8}-[a-f0-9]{4}-4[a-f0-9]{3}-[a-f0-9]{4}-[a-f0-9]{12})/rv/results/install/apps/custom?",
InstallCustomAppsResults),
(r"/rvl/v1/([a-f0-9]{8}-[a-f0-9]{4}-4[a-f0-9]{3}-[a-f0-9]{4}-[a-f0-9]{12})/rv/results/install/apps/supported?",
InstallSupportedAppsResults),
(r"/rvl/v1/([a-f0-9]{8}-[a-f0-9]{4}-4[a-f0-9]{3}-[a-f0-9]{4}-[a-f0-9]{12})/rv/results/install/apps/agent?",
InstallAgentAppsResults),
(r"/rvl/v1/([a-f0-9]{8}-[a-f0-9]{4}-4[a-f0-9]{3}-[a-f0-9]{4}-[a-f0-9]{12})/rv/results/uninstall?",
UnInstallAppsResults),
]
template_path = "/opt/TopPatch/tp/templates"
settings = {
"cookie_secret": "patching-0.7",
"login_url": "/rvl/login",
}
tornado.web.Application.__init__(self, handlers,
template_path=template_path,
debug=True, **settings)
def log_request(self, handler):
logging.config.fileConfig('/opt/TopPatch/conf/logging.config')
log = logging.getLogger('rvweb')
log_method = log.debug
if handler.get_status() <= 299:
log_method = log.info
elif handler.get_status() <= 399 and \
handler.get_status() >= 300:
log_method = log.warn
elif handler.get_status() <= 499 and \
handler.get_status() >= 400:
log_method = log.error
elif handler.get_status() <= 599 and \
handler.get_status() >= 500:
log_method = log.error
request_time = 1000.0 * handler.request.request_time()
real_ip = handler.request.headers.get('X-Real-Ip', None)
#remote_ip = handler.request.remote_ip
#uri = handler.request.remote_ip
forwarded_ip = handler.request.headers.get('X-Forwarded-For', None)
user_agent = handler.request.headers.get('User-Agent')
log_message = '%d %s %s, %.2fms' % (handler.get_status(), handler._request_summary(), user_agent, request_time)
if real_ip:
log_message = (
'%d %s %s %s %s, %.2fms' %
(
handler.get_status(), handler._request_summary(),
real_ip, forwarded_ip, user_agent, request_time
)
)
log_method(log_message)
if __name__ == '__main__':
tornado.options.parse_command_line()
https_server = tornado.httpserver.HTTPServer(
Application(options.debug),
ssl_options={
"certfile": os.path.join(
"/opt/TopPatch/tp/data/ssl/",
"server.crt"),
"keyfile": os.path.join(
"/opt/TopPatch/tp/data/ssl/",
"server.key"),
}
)
https_server.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
| SteelHouseLabs/vFense | tp/src/vFense_listener.py | vFense_listener.py | py | 5,617 | python | en | code | 5 | github-code | 6 | [
{
"api_name": "tornado.options.define",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "tornado.options.define",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "tornado.httpserver.web",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_nam... |
72789776189 | import gc
import itertools as it
import os.path as osp
from typing import List
import warnings
from collections import deque, namedtuple
import numpy as np
import torch
from examples.speech_recognition.data.replabels import unpack_replabels
from fairseq import tasks
from fairseq.utils import apply_to_sample
from omegaconf import open_dict
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
# try:
# from flashlight.lib.text.dictionary import create_word_dict, load_words
# from flashlight.lib.sequence.criterion import CpuViterbiPath, get_data_ptr_as_bytes
# from flashlight.lib.text.decoder import (
# CriterionType,
# LexiconDecoderOptions,
# KenLM,
# LM,
# LMState,
# SmearingMode,
# Trie,
# LexiconDecoder,
# )
# except:
# warnings.warn(
# "flashlight python bindings are required to use this functionality. Please install from https://github.com/facebookresearch/flashlight/tree/master/bindings/python"
# )
# LM = object
# LMState = object
class W2lDecoder(object):
def __init__(self, args, tgt_dict):
self.tgt_dict = tgt_dict
self.vocab_size = len(tgt_dict)
self.nbest = args.nbest
self.blank = (
tgt_dict.index("<ctc_blank>")
if "<ctc_blank>" in tgt_dict.indices
else tgt_dict.bos()
)
if "<sep>" in tgt_dict.indices:
self.silence = tgt_dict.index("<sep>")
elif "|" in tgt_dict.indices:
self.silence = tgt_dict.index("|")
else:
self.silence = tgt_dict.eos()
self.asg_transitions = None
def generate(self, models, sample, **unused):
"""Generate a batch of inferences."""
# model.forward normally channels prev_output_tokens into the decoder
# separately, but SequenceGenerator directly calls model.encoder
encoder_input = {
k: v for k, v in sample["net_input"].items() if k != "prev_output_tokens"
}
emissions = self.get_emissions(models, encoder_input)
return self.decode(emissions)
def get_emissions(self, models, encoder_input):
"""Run encoder and normalize emissions"""
model = models[0]
encoder_out = model(**encoder_input)
if hasattr(model, "get_logits"):
emissions = model.get_logits(encoder_out) # no need to normalize emissions
else:
emissions = model.get_normalized_probs(encoder_out, log_probs=True)
return emissions.transpose(0, 1).float().cpu().contiguous()
def get_tokens(self, idxs):
"""Normalize tokens by handling CTC blank, ASG replabels, etc."""
idxs = (g[0] for g in it.groupby(idxs))
idxs = filter(lambda x: x != self.blank, idxs)
return torch.LongTensor(list(idxs))
class W2lViterbiDecoder(W2lDecoder):
def __init__(self, args, tgt_dict):
super().__init__(args, tgt_dict)
| lovemefan/Wav2vec2-webserver | fairseq_lib/examples/speech_recognition/w2l_decoder.py | w2l_decoder.py | py | 2,977 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "itertools.groupby",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "torch.LongTensor",
"line_number": 81,
"usage_type": "call"
}
] |
37858257254 | #using a shallow net(2 layers)
#not tested
import numpy as np
import matplotlib.pyplot as plt
import sklearn
import sklearn.datasets
import sklearn.linear_model
from utils import load_dataset
#loading the dataset using utils
x,y = load_dataset()
shape_x = x.shape
shape_y = y.shape
m = shape_x[1]
#first trying to fit the data using sime LR
clf = sklearn.linear_model.LogisticRegressionCV()
clf.fit(x.T, y.T)
print(clf.predict(x.T))
#now trying with multi layer nn model
#helper functions
def layer_sizes(x,y):
nx = x.shape[0]
nh = 4
ny = y.shape[0]
return(nx,nh,ny)
def init_weights(nx,nh,ny):
np.random.seed(9)
w1 = np.random.randn(nh,nx)*0.01
b1 = np.zeros((nh,1))
w2 = np.random.randn(ny,nh)*0.01
b2 = np.zeros((ny,1))
params = {"w1":w1, "b1":b1,"w2":w2,"b2":b2}
return params
#forward propagation
def propagate(x,y,params):
m = x.shape[1]
w1 = params["w1"]
b1 = params["b1"]
w2 = params["w2"]
b2 = params["b2"]
z1 = np.dot(w1,x)+b1
a1 = np.tanh(z1)
z2 = np.dot(w2,a1)+b2
a2 = 1/(1+np.exp(-z2))
logprobs = np.multiply(np.log(a2),y)+np.multiply(np.log(1-a2),(1-y))
cost =-np.sum(logprobs)/m
cost = np.squeeze(cost)
cache = {"z1":z1,"a1":a1,"z2":z2,"a2":a2}
return cache, cost
#backprop for optimization
def optimize(params, cache, x,y, alpha = 1.2):
m = x.shape[1]
a1 = cache["a1"]
a2 = cache["a2"]
dz2 = a2-y
dw2 = np.dot(dz2,a1.T)/m
db2 = np.sum(dz2)/m
dz1 = np.dot(w2.T, dz2)*(1-np.power(a1,2))
dw1 = np.dot(dz1,x.T)/m
db1 = np.sum(dz1)/m
w1 = params["w1"]
w2 = params["w2"]
b1 = params["b1"]
b2 = params["b2"]
w1 -= alpha*dw1
b1 -= alpha*db1
w2 -= alpha*dw2
b2 -= alpha*db2
params = {"w1":w1,"b1":b1,"w2":w2,"b2":b2}
grads = {"dw1":dw1,"db1":db1,"dw2":dw2,"db2":db2}
return params, grads
#final model
def model(x,y,iterations):
np.random.seed(9)
nx = layer_sizes(x,y)[0]
ny = layer_sizes(x,y)[2]
nh = 4
params = init_weights(nx,nh,ny)
for i in range(iterations):
cost, cache = propagate(x,y, params)
params,grads = optimize(params, cache,x,y)
if(i%1000 == 0):
print(i,cost)
return params
def predict(params, x):
m = x.shape[1]
w1 = params["w1"]
b1 = params["b1"]
w2 = params["w2"]
b2 = params["b2"]
z1 = np.dot(w1,x)+b1
a1 = np.tanh(z1)
z2 = np.dot(w2,a1)+b2
a2 = 1/(1+np.exp(-z2))
predictions = (a2>0.5)
return predictions
params = model(x,y,10000)
predictions = predict(params, x)
print((np.dot(y,predictions.T)+np.dot(1-y, 1-predictions.T))/y.size)
| thepavankoushik/Project-Reboot | shallow networks/planardata_classify.py | planardata_classify.py | py | 2,445 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "utils.load_dataset",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "sklearn.linear_model.LogisticRegressionCV",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "sklearn.linear_model",
"line_number": 17,
"usage_type": "attribute"
},
{... |
38930719861 | #!/usr/bin/python3
"""
Python script that takes GitHub credentials
(username and password) and uses the GitHub API to display id
"""
import requests
import sys
if __name__ == '__main__':
if len(sys.argv) != 3:
print("Usage: ./10-my_github.py <username> <token>")
sys.exit(1)
username, token = sys.argv[1], sys.argv[2]
# Make the request with Basic Authentication using your token
response = requests.get('https://api.github.com/user', auth=(username, token))
if response.status_code == 200:
try:
user_data = response.json()
user_id = user_data.get('id')
if user_id is not None:
print(user_id)
else:
print("None")
except ValueError:
print("Invalid JSON response")
else:
print("None")
| Bellamalwa/alx-higher_level_programming | 0x11-python-network_1/10-my_github.py | 10-my_github.py | py | 851 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sys.argv",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "requests.get",
"line_number"... |
72683633467 | from matplotlib import pyplot as plt
if __name__ == '__main__':
slope = 0.0008588
y_intercept = -0.1702
rainfall_values = [50 * x for x in range(0, 18)]
y = [max(slope * x + y_intercept, 0) for x in rainfall_values]
plt.title('Bifurcation diagram of Scanlon model')
plt.xlabel('Rainfall (mm/year)')
plt.ylabel('Steady state density')
plt.plot(rainfall_values, y)
plt.plot(rainfall_values[4:], [0 for _ in range(4, 18)], linestyle='dashed')
plt.show() | tee-lab/patchy-ecosterics | thesis_code/scanlon_transitions/phase_transition.py | phase_transition.py | py | 498 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "matplotlib.pyplot.title",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "ma... |
11120983307 | import abc
import typing as tp
from librarius.domain.messages import (
TAbstractMessage,
TAbstractCommand,
TAbstractQuery,
TAbstractEvent,
)
if tp.TYPE_CHECKING:
from librarius.domain.messages import AbstractCommand, AbstractQuery, AbstractEvent
from librarius.domain.models import Entity
from librarius.service.uow import TAbstractUnitOfWork
TAbstractHandler = tp.TypeVar("TAbstractHandler", bound="AbstractHandler")
class AbstractHandler(tp.Generic[TAbstractHandler, TAbstractMessage], abc.ABC):
def __init__(self, uow: "TAbstractUnitOfWork"):
self.uow = uow
@abc.abstractmethod
def __call__(self, message: "TAbstractMessage"):
raise NotImplementedError
class AbstractCommandHandler(
AbstractHandler["AbstractCommandHandler", TAbstractCommand],
tp.Generic[TAbstractCommand],
abc.ABC,
):
@abc.abstractmethod
def __call__(self, cmd: "TAbstractCommand") -> None:
raise NotImplementedError
class AbstractEventHandler(
AbstractHandler["AbstractEventHandler", TAbstractEvent],
tp.Generic[TAbstractEvent],
abc.ABC,
):
@abc.abstractmethod
def __call__(self, event: "TAbstractEvent") -> None:
raise NotImplementedError
class AbstractQueryHandler(
AbstractHandler["AbstractQueryHandler", TAbstractQuery],
tp.Generic[TAbstractQuery],
abc.ABC,
):
@abc.abstractmethod
def __call__(
self, query: "TAbstractQuery"
) -> tp.Union[tp.Iterable["Entity"], "Entity"]:
raise NotImplementedError
| adriangabura/vega | librarius/service/handlers/abstract.py | abstract.py | py | 1,538 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "typing.TypeVar",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "typing.Generic",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "librariu... |
37219263893 | from typing import List, Dict
import csv
def get_unique_industries(path: str) -> List[str]:
with open(path, mode="r") as file:
data_file = csv.DictReader(file)
list_data_file = []
for data in data_file:
list_data_file.append(data)
industries = set([industry['industry']
for industry in list_data_file
if industry['industry'] != ''])
return industries
def filter_by_industry(jobs: List[Dict], industry: str) -> List[Dict]:
filtered_industries = []
for job in jobs:
if job['industry'] == industry:
filtered_industries.append(job)
return filtered_industries
| Gilson-SR/job-insights | src/insights/industries.py | industries.py | py | 684 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "csv.DictReader",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number":... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.