seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
74537456828 | import numpy as np, cv2
def draw_histo(hist, shape=(200, 256)):
hist_img = np.full(shape, 255, np.uint8) # 흰색이 배경이 되도록 초기화
cv2.normalize(hist, hist, 0, shape[0], cv2.NORM_MINMAX) # 최솟값이 0, 최대값이 그래프의 높이 값을 갖도록 빈도값을 조정
gap = hist_img.shape[1]/hist.shape[0]
for i, h in enumerate(hist):
x = int(round(i*gap))
w = int(round(gap))
cv2.rectangle(hist_img, (x, 0, w, int(h)), 0, cv2.FILLED)
return cv2.flip(hist_img, 0) | binlee52/OpenCV-python | Common/histogram.py | histogram.py | py | 539 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "numpy.full",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "cv2.normalize",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "cv2.NORM_MINMAX",
"line_nu... |
42641515679 | # -*- coding: utf-8 -*-
# @Time : 2020/12/13 11:04
# @Author : Joker
# @Site :
# @File : draw.py
# @Software: PyCharm
import numpy as np
import matplotlib.pyplot as plt
m = 20 # 行
n = 2 # 列
c = 5 # 分类数量
test_point = [2, 6] # 测试点数据
if __name__ == '__main__':
# 文件地址
path = "C:/Users/99259/source/repos/k-means/k-means/point.txt"
# 文件对象
file = []
# 源点数组
data = np.zeros((m + c, n))
# 读取数组文件
for line in open(path, "r"):
# 取出换行符
line = line.strip()
file.append(line)
# 将文件数据存储进数组中
for i in range(m + c):
for j in range(n):
data[i][j] = float(file[i].split(' ')[j])
# 同上面操作一样,不过处理的是分类数组
cate_path = "C:/Users/99259/source/repos/k-means/k-means/category.txt"
cate_file = []
cate = np.zeros(m)
for line in open(cate_path, 'r'):
# 取出换行符
line = line.strip()
cate_file.append(line)
for i in range(m):
cate[i] = int(cate_file[i])
# 解决中文乱码
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
plt.title('数据点分布')
# 存储颜色数组
color = ['red', 'blue', 'pink', 'yellow', 'green', 'purple']
# 绘制源数据点
# 不同类别的点放在不同数组中
x = [[] for i in range(c)] # x轴数据
y = [[] for i in range(c)] # y轴数据
for i in range(m):
for j in range(c):
if cate[i] == j:
x[j].append(data[i][0])
y[j].append(data[i][1])
# 分别绘制不同类别的点
for i in range(c):
plt.scatter(x[i], y[i], color=color[i], label=("类别%d" % (i + 1)))
# 绘制中心点
point_x = []
point_y = []
for i in range(c):
point_x.append([data[m + i][0]])
point_y.append([data[m + i][1]])
plt.scatter(point_x, point_y, color='black', marker='*', label="中心点")
# 存储不同类的半径
radius = np.zeros(c)
# 遍历类别
for i in range(c):
# 记录x轴和y轴的最大值r
# 如果是一个点属于一类 就令其半径为0.2
max_dis = 0.2
# 遍历点
for j in range(len(x[i])):
dis_x = x[i][j] - point_x[i]
dis_y = y[i][j] - point_y[i]
# 计算欧式距离
dis = np.sqrt(pow(dis_x, 2) + pow(dis_y, 2))
# 更新最大半径
if dis > max_dis:
max_dis = dis
# 最大值最为该类的类半径
radius[i] = max_dis
# 分别绘制不同类的类半径
for i in range(c):
# 定义圆心和半径
x = point_x[i][0]
y = point_y[i][0]
r = radius[i]
# 点的横坐标为a
a = np.arange(x - r, x + r, 0.0001)
# 点的纵坐标为b
b = np.sqrt(pow(r, 2) - pow((a - x), 2))
# 绘制上半部分
plt.plot(a, y + b, color=color[i], linestyle='-')
# 绘制下半部分
plt.plot(a, y - b, color=color[i], linestyle='-')
# t暂时存储(2,6)的类别
# 可以在开头改变测试点的坐标
t = 0
# 设置一个很大的值
d = 100
# 遍历类别
for i in range(c):
# 计算测试点到每个中心的距离
dis = np.sqrt(pow((test_point[0] - point_x[i][0]), 2) + pow((test_point[1] - point_y[i][0]), 2))
# 寻找最小的距离
if dis < d:
d = dis
t = i
# 绘制测试点数据
plt.scatter(test_point[0], test_point[1], c=color[t], marker='x', label='(2,6)')
plt.legend()
# 保存图片
plt.savefig(r'C:/Users/99259/source/repos/k-means/k-means/show.png', dpi=300)
plt.show()
| Chimaeras/Data_Mining_ex | src/category_draw.py | category_draw.py | py | 3,852 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.zeros",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.py... |
70416778427 | from config import config
import random
import requests
import chardet
from db.db_select import sqlhelper
import threading
lock = threading.Lock()
class Downloader(object):
@staticmethod
def download(url):
try:
r = requests.get(url=url, headers=config.get_header(), timeout=config.TIMEOUT)
r.encoding = chardet.detect(r.content)['encoding']
if (not r.ok) or len(r.content) < 500:
raise ConnectionError
else:
return r.text
except:
count = 0 # 重试次数
lock.acquire()
proxylist = sqlhelper.select(10)
lock.release()
if not proxylist:
return None
while count < config.RETRY_TIME:
try:
proxy = random.choice(proxylist)
ip = proxy[0]
port = proxy[1]
proxies = {"http": "http://{}:{}".format(ip, port), "https": "http://{}:{}".format(ip, port)}
r = requests.get(url=url, headers=config.get_header(), timeout=config.TIMEOUT, proxies=proxies)
r.encoding = chardet.detect(r.content)['encoding']
if (not r.ok) or len(r.content) < 500:
raise ConnectionError
else:
return r.text
except:
count += 1
return None | queenswang/IpProxyPool | spider/HtmlDownloader.py | HtmlDownloader.py | py | 1,469 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "threading.Lock",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "config.config.get_header",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "config.config",
... |
21025178712 | #!/usr/bin/env python3
import logging
import sys
from ev3dev2.motor import OUTPUT_A, OUTPUT_B, OUTPUT_C, MediumMotor
from ev3dev2.control.rc_tank import RemoteControlledTank
log = logging.getLogger(__name__)
class TRACK3R(RemoteControlledTank):
"""
Base class for all TRACK3R variations. The only difference in the child
classes are in how the medium motor is handled.
To enable the medium motor toggle the beacon button on the EV3 remote.
"""
def __init__(self, medium_motor, left_motor, right_motor):
RemoteControlledTank.__init__(self, left_motor, right_motor)
self.medium_motor = MediumMotor(medium_motor)
self.medium_motor.reset()
class TRACK3RWithBallShooter(TRACK3R):
def __init__(self, medium_motor=OUTPUT_A, left_motor=OUTPUT_B, right_motor=OUTPUT_C):
TRACK3R.__init__(self, medium_motor, left_motor, right_motor)
self.remote.on_channel1_beacon = self.fire_ball
def fire_ball(self, state):
if state:
self.medium_motor.run_to_rel_pos(speed_sp=400, position_sp=3*360)
else:
self.medium_motor.stop()
class TRACK3RWithSpinner(TRACK3R):
def __init__(self, medium_motor=OUTPUT_A, left_motor=OUTPUT_B, right_motor=OUTPUT_C):
TRACK3R.__init__(self, medium_motor, left_motor, right_motor)
self.remote.on_channel1_beacon = self.spinner
def spinner(self, state):
if state:
self.medium_motor.run_forever(speed_sp=50)
else:
self.medium_motor.stop()
class TRACK3RWithClaw(TRACK3R):
def __init__(self, medium_motor=OUTPUT_A, left_motor=OUTPUT_B, right_motor=OUTPUT_C):
TRACK3R.__init__(self, medium_motor, left_motor, right_motor)
self.remote.on_channel1_beacon = self.move_claw
def move_claw(self, state):
if state:
self.medium_motor.run_to_rel_pos(speed_sp=200, position_sp=-75)
else:
self.medium_motor.run_to_rel_pos(speed_sp=200, position_sp=75)
| ev3dev/ev3dev-lang-python-demo | robots/TRACK3R/TRACK3R.py | TRACK3R.py | py | 2,002 | python | en | code | 59 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "ev3dev2.control.rc_tank.RemoteControlledTank",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "ev3dev2.control.rc_tank.RemoteControlledTank.__init__",
"line_number": 20,
... |
6460673982 | import logging
from pprint import pprint # noqa
from olefile import isOleFile, OleFileIO
from ingestors.support.timestamp import TimestampSupport
from ingestors.support.encoding import EncodingSupport
log = logging.getLogger(__name__)
class OLESupport(TimestampSupport, EncodingSupport):
"""Provides helpers for Microsoft OLE files."""
def decode_meta(self, meta, prop):
try:
value = getattr(meta, prop, None)
if not isinstance(value, bytes):
return
encoding = "cp%s" % meta.codepage
return self.decode_string(value, encoding)
except Exception:
log.warning("Could not read metadata: %s", prop)
def extract_ole_metadata(self, file_path, entity):
with open(file_path, "rb") as fh:
if not isOleFile(fh):
return
fh.seek(0)
try:
ole = OleFileIO(fh)
self.extract_olefileio_metadata(ole, entity)
except (RuntimeError, IOError):
# OLE reading can go fully recursive, at which point it's OK
# to just eat this runtime error quietly.
log.warning("Failed to read OLE data: %r", entity)
except Exception:
log.exception("Failed to read OLE data: %r", entity)
def extract_olefileio_metadata(self, ole, entity):
try:
entity.add("authoredAt", self.parse_timestamp(ole.root.getctime()))
except Exception:
log.warning("Failed to parse OLE ctime.")
try:
entity.add("modifiedAt", self.parse_timestamp(ole.root.getmtime()))
except Exception:
log.warning("Failed to parse OLE mtime.")
meta = ole.get_metadata()
entity.add("title", self.decode_meta(meta, "title"))
entity.add("author", self.decode_meta(meta, "author"))
entity.add("author", self.decode_meta(meta, "last_saved_by"))
entity.add("author", self.decode_meta(meta, "company"))
entity.add("summary", self.decode_meta(meta, "notes"))
entity.add("generator", self.decode_meta(meta, "creating_application"))
entity.add("authoredAt", self.decode_meta(meta, "create_time"))
entity.add("modifiedAt", self.decode_meta(meta, "last_saved_time"))
entity.add("language", self.decode_meta(meta, "language"))
| alephdata/ingest-file | ingestors/support/ole.py | ole.py | py | 2,390 | python | en | code | 45 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "ingestors.support.timestamp.TimestampSupport",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "ingestors.support.encoding.EncodingSupport",
"line_number": 11,
"usage_type... |
19993528742 | """
This script crawls data about Malaysian stock indices and stores the output in a csv file.
"""
import requests
from bs4 import BeautifulSoup
import time
#Website to get the indices
base_url = 'https://www.investing.com/indices/malaysia-indices?'
print('Scraping: ' + base_url)
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X '
'10_14_3) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/72.0.3626.109 Safari/537.36'}
html_doc = requests.get(base_url, headers=headers).text
# parse the HTML contents using BeautifulSoup parser
soup = BeautifulSoup(html_doc, 'html.parser')
#KLCI
indiceKLCI = soup.select_one('#pair_29078 > td.bold.left.noWrap.elp.plusIconTd > a').text
LastA = soup.select_one('#pair_29078 > td.pid-29078-last').text
LastA = LastA.replace(",","")
HighA = soup.select_one('#pair_29078 > td.pid-29078-high').text
HighA = HighA.replace(",","")
LowA = soup.select_one('#pair_29078 > td.pid-29078-low').text
LowA = LowA.replace(",","")
#Malaysia ACE
indiceMalaysiaACE = soup.select_one('#pair_29075 > td.bold.left.noWrap.elp.plusIconTd > a').text
LastB = soup.select_one('#pair_29075 > td.pid-29075-last').text
LastB = LastB.replace(",","")
HighB = soup.select_one('#pair_29075 > td.pid-29075-high').text
HighB = HighB.replace(",","")
LowB = soup.select_one('#pair_29075 > td.pid-29075-low').text
LowB = LowB.replace(",","")
#FTSE BM Mid 70
indiceFTSEBMMid70 = soup.select_one('#pair_29076 > td.bold.left.noWrap.elp.plusIconTd > a').text
LastC = soup.select_one('#pair_29076 > td.pid-29076-last').text
LastC = LastC.replace(",","")
HighC = soup.select_one('#pair_29076 > td.pid-29076-high').text
HighC = HighC.replace(",","")
LowC = soup.select_one('#pair_29076 > td.pid-29076-low').text
LowC = LowC.replace(",","")
#Malaysia Top 100
indiceMalaysiaTop100 = soup.select_one('#pair_29077 > td.bold.left.noWrap.elp.plusIconTd > a').text
LastD = soup.select_one('#pair_29077 > td.pid-29077-last').text
LastD = LastD.replace(",","")
HighD = soup.select_one('#pair_29077 > td.pid-29077-high').text
HighD = HighD.replace(",","")
LowD = soup.select_one('#pair_29077 > td.pid-29077-low').text
LowD = LowD.replace(",","")
indice_name = [indiceKLCI, indiceMalaysiaACE,
indiceFTSEBMMid70, indiceMalaysiaTop100]
Last = [LastA, LastB, LastC, LastD]
High = [HighA, HighB, HighC, HighD]
Low = [LowA, LowB, LowC, LowD]
Time = [time.strftime('%H:%M'),time.strftime('%H:%M') ,
time.strftime('%H:%M'), time.strftime('%H:%M')]
Date = [time.strftime('%d-%b-%Y'),time.strftime('%d-%b-%Y'),
time.strftime('%d-%b-%Y'),time.strftime('%d-%b-%Y')]
# save the scraped prices to a file whose name contains the
# current datetime
file_name = 'indices_' + time.strftime('%d-%b-%Y_%H-%M') + '.csv'
with open(file_name, 'w') as f:
for A, B, C, D, G, H in zip(indice_name, Last, High,
Low, Date, Time):
f.write(A + ',' + B + ',' + C + ',' + D + ',' + '[' + G + '|' + H + ']' + '\n')
| ammar1y/Data-Mining-Assignment | Web crawlers/Malaysian stock indices crawler.py | Malaysian stock indices crawler.py | py | 3,548 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "requests.get",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "time.strftime",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "time.strftime",
"lin... |
20972621530 |
import sys
import random
import math
from tools.model import io
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from detection import box, anchors, display, evaluate, loss
import argparse
from detection.models import models
from tools.image import cv
def random_box(dim, num_classes):
cx = random.uniform(0, dim[0])
cy = random.uniform(0, dim[1])
sx = random.uniform(0.1, 0.2) * dim[0]
sy = random.uniform(0.1, 0.2) * dim[1]
return (cx, cy, sx, sy)
if __name__ == '__main__':
random.seed(0)
torch.manual_seed(0)
parser = argparse.ArgumentParser(description='Test model')
parser.add_argument('--model', action='append', default=[],
help='model type and sub-parameters e.g. "unet --dropout 0.1"')
args = parser.parse_args()
print(args)
num_classes = 2
model_args = {'num_classes':num_classes, 'input_channels':3}
creation_params = io.parse_params(models, args.model)
model, encoder = io.create(models, creation_params, model_args)
print(model)
batches = 1
dim = (512, 512)
images = Variable(torch.FloatTensor(batches, 3, dim[1], dim[0]).uniform_(0, 1))
loc_preds, class_preds = model.cuda()(images.cuda())
def random_target():
num_boxes = random.randint(1, 50)
boxes = torch.Tensor ([random_box(dim, num_classes) for b in range(0, num_boxes)])
boxes = box.point_form(boxes)
label = torch.LongTensor(num_boxes).random_(0, num_classes)
return (boxes, label)
target_boxes = [random_target() for i in range(0, batches)]
target = [encoder.encode(dim, boxes, label) for boxes, label in target_boxes]
loc_target = Variable(torch.stack([loc for loc, _ in target]).cuda())
class_target = Variable(torch.stack([classes for _, classes in target]).cuda())
# print((loc_target, class_target), (loc_preds, class_preds))
print(loss.total_loss( (loc_target, class_target), (loc_preds, class_preds) ))
detections = encoder.decode_batch(images.detach(), loc_preds.detach(), class_preds.detach())
classes = {}
for i, (boxes, label, confs), (target_boxes, target_label) in zip(images.detach(), detections, target_boxes):
score = evaluate.mAP(boxes, label, confs, target_boxes.type_as(boxes), target_label.type_as(label), threshold = 0.1)
print(score)
# noise = target_boxes.clone().uniform_(-20, 30)
# score = evaluate.mAP(target_boxes + noise, target_label, torch.arange(target_label.size(0)), target_boxes, target_label, threshold=0.5)
# print(score)
# i = i.permute(1, 2, 0)
# key = cv.display(display.overlay(i, boxes, label, confidence=confs))
# if(key == 27):
# break
#print(boxes)
#loss = MultiBoxLoss(num_classes)
#target = (Variable(boxes.cuda()), Variable(label.cuda()))
#print(loss(out, target))
| oliver-batchelor/detection | models/test.py | test.py | py | 2,957 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "random.uniform",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "random.uniform",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "random.uniform",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "random.uniform",
"li... |
37379394096 | try:
import Image
import ImageDraw
except:
from PIL import Image
from PIL import ImageDraw
import glob
import numpy as np
import os
import sys
def image_clip(img_path, size):
# 转换为数组进行分割操作,计算能完整分割的行数、列数
imarray = np.array(Image.open(img_path))
imshape = imarray.shape
image_col = int(imshape[1]/size[1])
image_row = int(imshape[0]/size[0])
img_name= img_path.split(".")[0].split("\\")[1]
# 两个for循环分割能完整分割的图像,并保存图像、坐标转换文件
for row in range(image_row):
for col in range(image_col):
clipArray = imarray[row*size[0]:(row+1)*size[0],col*size[1]:(col+1)*size[1]]
clipImg = Image.fromarray(clipArray)
folder = os.path.exists("E:/wangyu_file/GID/Fine Land-cover Classification_15classes/image_RGB/clip")
# 判断文件夹是否存在,不存在则新建国家文件
if not folder: # 判断是否存在文件夹如果不存在则创建为文件夹
os.makedirs("E:/wangyu_file/GID/Fine Land-cover Classification_15classes/image_RGB/clip") # makedirs 创建文件时如果路径不存在会创建这个路径
img_filepath = 'E:/wangyu_file/GID/Fine Land-cover Classification_15classes/image_RGB/clip/' + img_name + "_" +str(row) + "_" + str(col) + ".tif"
clipImg.save(img_filepath)
if __name__=='__main__':
img_dir = 'E:/wangyu_file/GID/Fine Land-cover Classification_15classes/image_RGB/'
# img_dir = 'E:/wangyu_file/GID/Fine Land-cover Classification_15classes/label_15classes/'
imgs = glob.glob('{}*.tif'.format(img_dir))
for img in imgs:
image_clip(img, [512, 512])
| faye0078/RS-ImgShp2Dataset | train_example/model/Fast_NAS/data/slip_img.py | slip_img.py | py | 1,752 | python | zh | code | 1 | github-code | 6 | [
{
"api_name": "numpy.array",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "PIL.Image.fromarray",
"line_... |
14896890650 | """empty message
Revision ID: 97dd2d43d5f4
Revises: d5e28ae20d48
Create Date: 2018-05-30 00:51:39.536518
"""
# revision identifiers, used by Alembic.
revision = '97dd2d43d5f4'
down_revision = 'd5e28ae20d48'
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('exam', sa.Column('hidden', sa.Boolean(), server_default=sa.literal(False), nullable=False))
op.add_column('exam_version', sa.Column('hidden', sa.Boolean(), server_default=sa.literal(False), autoincrement=False, nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('exam_version', 'hidden')
op.drop_column('exam', 'hidden')
# ### end Alembic commands ###
| duvholt/memorizer | migrations/versions/97dd2d43d5f4_.py | 97dd2d43d5f4_.py | py | 828 | python | en | code | 16 | github-code | 6 | [
{
"api_name": "alembic.op.add_column",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Boolean... |
70211001788 | from django.conf.urls import patterns, include, url
from django.conf import settings
from cer_manager.views import *
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'cer_manager.views.home', name='home'),
# url(r'^cer_manager/', include('cer_manager.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
url(r'^index/$', cer_list),
url(r'^canshu/(.+)/$',insert),
url(r'^test/$',test),
url(r'^insert/$',insert),
url(r'^modify/(.+)/$',modify),
url('^css/(?P<path>.*)$','django.views.static.serve',{'document_root':settings.STATIC_ROOT_CSS}),
url('^js/(?P<path>.*)$','django.views.static.serve',{'document_root':settings.STATIC_ROOT_JS}),
)
| colive/cer_manager | urls.py | urls.py | py | 1,030 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.conf.urls.patterns",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "dj... |
10173968880 | from configparser import ConfigParser
# get the configparser object
config_object = ConfigParser()
# set config
config_object["SERVERCONFIG_BROWSER"] = {
"host": "127.0.0.1",
"port": "8888",
"web_directory": "www/"
}
config_object["SERVERCONFIG"] = {
"host": "127.0.0.1",
"port": "8080",
}
# Write the above sections to config.ini file
with open('config.ini', 'w') as conf:
config_object.write(conf)
| kaumnen/diy-http-server | config/config.py | config.py | py | 428 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "configparser.ConfigParser",
"line_number": 4,
"usage_type": "call"
}
] |
17287700821 | import yaml
import argparse
from jinja2 import Environment, FileSystemLoader, Template
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--jobs',
required=True)
parser.add_argument('--job_config',
required=True)
return parser.parse_args()
def get_commandline(args):
config_data = yaml.load(open(args.job_config))
job_data = config_data[args.jobs]
env = Environment(loader=FileSystemLoader('Templates'), trim_blocks=True, lstrip_blocks=True)
template = env.get_template(args.jobs)
return template.render(job_data)
def main():
args = get_args()
commandline = get_commandline(args)
print(commandline)
if __name__ == '__main__':
main() | Chappers1992/Variability | run.py | run.py | py | 759 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "yaml.load",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "jinja2.Environment",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "jinja2.FileSystem... |
71733863868 | from logging import Logger
from extract.adapters.airtable.credentials import AirtableCredentials
from pyairtable import Table
class AirTableAdapter:
def __init__(self, logger: Logger, credentials: AirtableCredentials):
self.logger = logger
self.api_key = credentials.api_key
self.base_id = credentials.base_id
def extract(self, table_ids: list) -> dict:
data_fetched = {}
dict_of_data = {}
for table_id in table_ids:
try:
table = Table(self.api_key, self.base_id, table_id)
dict_of_data[table_id] = table.all()
data_fetched[table_id] = True
except RuntimeError:
self.logger.error(f"loading of airtable '{table_id}' data has not been successful")
for table_id in data_fetched:
if data_fetched[table_id] is True:
self.logger.info(f"loading of airtable '{table_id}' data has been successful")
return dict_of_data
| patrikbraborec/good-crm-analytics | src/extract/adapters/airtable/impl.py | impl.py | py | 1,004 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "logging.Logger",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "extract.adapters.airtable.credentials.AirtableCredentials",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "pyairtable.Table",
"line_number": 18,
"usage_type": "call"
}
] |
41791316904 | import pytessy as pt
from PIL import ImageFilter, Image
if __name__ == "__main__":
# Create pytessy instance
ocrReader = pt.PyTessy()
files = ["cell_pic.jpg"]
for file in files:
# Load Image
img = Image.open(file)
# Scale up image
w, h = img.size
img = img.resize((2 * w, 2 * h))
# Sharpen image
img = img.filter(ImageFilter.SHARPEN)
# Convert to ctypes
imgBytes = img.tobytes()
bytesPerPixel = int(len(imgBytes) / (img.width * img.height))
# Use OCR on Image
imageStr = ocrReader.read(img.tobytes(), img.width, img.height, bytesPerPixel, raw=True, resolution=600)
print(file, imageStr)
| TheNova22/OurVision | legacy1/testtessy.py | testtessy.py | py | 628 | python | en | code | null | github-code | 6 | [
{
"api_name": "pytessy.PyTessy",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "PIL.ImageFilter.SHARPEN",
... |
26969758526 | import os
import time
import numpy as np
import torch
from torchvision.utils import make_grid
from torchvision.transforms import ToPILImage
from base import BaseTrainer
from evaluate import get_fid_score, get_i3d_activations, init_i3d_model, evaluate_video_error
from utils.readers import save_frames_to_dir
from model.loss import AdversarialLoss
class Trainer(BaseTrainer):
"""
Trainer class
Note:
Inherited from BaseTrainer.
"""
def __init__(
self, model, losses, metrics,
optimizer_g, optimizer_d_s, optimizer_d_t, resume, config,
data_loader, valid_data_loader=None, lr_scheduler=None,
train_logger=None, learn_mask=True, test_data_loader=None,
pretrained_path=None
):
super().__init__(
model, losses, metrics, optimizer_g,
optimizer_d_s, optimizer_d_t, resume, config, train_logger,
pretrained_path
)
self.config = config
self.data_loader = data_loader
self.valid_data_loader = valid_data_loader
self.test_data_loader = test_data_loader
self.do_validation = self.valid_data_loader is not None
self.lr_scheduler = lr_scheduler
self.log_step = self.config['visualization']['log_step']
self.loss_gan_s_w = config['gan_losses']['loss_gan_spatial_weight']
self.loss_gan_t_w = config['gan_losses']['loss_gan_temporal_weight']
self.adv_loss_fn = AdversarialLoss()
self.evaluate_score = config['trainer'].get('evaluate_score', True)
self.store_gated_values = False
self.printlog = False
if self.test_data_loader is not None:
self.toPILImage = ToPILImage()
self.evaluate_test_warp_error = config.get('evaluate_test_warp_error', False)
self.test_output_root_dir = os.path.join(self.checkpoint_dir, 'test_outputs')
init_i3d_model()
def _store_gated_values(self, out_dir):
from model.blocks import GatedConv, GatedDeconv
def save_target(child, out_subdir):
if not os.path.exists(out_subdir):
os.makedirs(out_subdir)
if isinstance(child, GatedConv):
target = child.gated_values[0]
elif isinstance(child, GatedDeconv):
target = child.conv.gated_values[0]
else:
raise ValueError('should be gated conv or gated deconv')
target = target.transpose(0, 1)
for t in range(target.shape[0]):
for c in range(target.shape[1]):
out_file = os.path.join(out_subdir, f'time{t:03d}_channel{c:04d}.png')
self.toPILImage(target[t, c: c + 1]).save(out_file)
for key, child in self.model.generator.coarse_net.upsample_module.named_children():
out_subdir = os.path.join(out_dir, f'upsample_{key}')
save_target(child, out_subdir)
for key, child in self.model.generator.coarse_net.downsample_module.named_children():
out_subdir = os.path.join(out_dir, f'downsample_{key}')
save_target(child, out_subdir)
def _evaluate_data_loader(self, epoch=None, output_root_dir=None, data_loader=None, name='test'):
total_length = 0
total_warp_error = 0 if self.evaluate_test_warp_error else None
total_error = 0
total_psnr = 0
total_ssim = 0
total_p_dist = 0
if output_root_dir is None:
output_root_dir = self.test_output_root_dir
if epoch is not None:
output_root_dir = os.path.join(output_root_dir, f"epoch_{epoch}")
output_root_dir = os.path.join(output_root_dir, name)
output_i3d_activations = []
real_i3d_activations = []
with torch.no_grad():
for batch_idx, data in enumerate(data_loader):
data_input, model_output = self._process_data(data)
inputs, outputs, targets, masks = self._unpack_data(data_input, model_output)
if self.store_gated_values:
out_dir = os.path.join(output_root_dir, 'gated_values', f'input_{batch_idx:04}')
self._store_gated_values(out_dir)
outputs = outputs.clamp(0, 1)
if self.evaluate_score:
# get i3d activation
output_i3d_activations.append(get_i3d_activations(outputs).cpu().numpy())
real_i3d_activations.append(get_i3d_activations(targets).cpu().numpy())
assert len(outputs) == 1 # Batch size = 1 for testing
inputs = inputs[0]
outputs = outputs[0].cpu()
targets = targets[0].cpu()
masks = masks[0].cpu()
if epoch is not None and epoch == 0:
# Save inputs to output_dir
output_dir = os.path.join(output_root_dir, 'inputs', f"input_{batch_idx:04}")
self.logger.debug(f"Saving batch {batch_idx} input to {output_dir}")
save_frames_to_dir([self.toPILImage(t) for t in inputs.cpu()], output_dir)
if epoch is not None and epoch % 5 == 0:
# Save test results to output_dir
output_dir = os.path.join(output_root_dir, f"result_{batch_idx:04}")
self.logger.debug(f"Saving batch {batch_idx} to {output_dir}")
save_frames_to_dir([self.toPILImage(t) for t in outputs], output_dir)
if self.evaluate_score:
# Evaluate scores
warp_error, error, psnr_value, ssim_value, p_dist, length = \
self._evaluate_test_video(outputs, targets, masks)
if self.evaluate_test_warp_error:
total_warp_error += warp_error
total_error += error
total_ssim += ssim_value
total_psnr += psnr_value
total_p_dist += p_dist
total_length += length
if self.evaluate_score:
output_i3d_activations = np.concatenate(output_i3d_activations, axis=0)
real_i3d_activations = np.concatenate(real_i3d_activations, axis=0)
fid_score = get_fid_score(real_i3d_activations, output_i3d_activations)
else:
fid_score = 0
total_p_dist = [0]
total_length = 1
total_p_dist = total_p_dist[0]
if epoch is not None:
self.writer.set_step(epoch, name)
self._write_images(
inputs, outputs, targets, masks,
model_output=model_output, data_input=data_input
)
if self.evaluate_test_warp_error:
self.writer.add_scalar('test_warp_error', total_warp_error / total_length)
self.writer.add_scalar('test_mse', total_error / total_length)
self.writer.add_scalar('test_ssim', total_ssim / total_length)
self.writer.add_scalar('test_psnr', total_psnr / total_length)
self.writer.add_scalar('test_p_dist', total_p_dist / total_length)
self.writer.add_scalar('test_fid_score', fid_score)
return total_warp_error, total_error, total_ssim, total_psnr, total_p_dist, total_length, fid_score
def _write_images(
self, inputs, outputs, targets, masks, output_edges=None,
target_edges=None, model_output=None, data_input=None
):
self.writer.add_image('input', make_grid(inputs.cpu(), nrow=3, normalize=False))
self.writer.add_image('loss_mask', make_grid(masks.cpu(), nrow=3, normalize=False))
self.writer.add_image(
'output', make_grid(outputs.clamp(0, 1).cpu(), nrow=3, normalize=False))
self.writer.add_image('gt', make_grid(targets.cpu(), nrow=3, normalize=False))
self.writer.add_image('diff', make_grid(targets.cpu() - outputs.cpu(), nrow=3, normalize=True))
self.writer.add_image('IO_diff', make_grid(inputs.cpu() - outputs.cpu(), nrow=3, normalize=True))
try:
output_edges = self.losses['loss_edge'][0].current_output_edges
target_edges = self.losses['loss_edge'][0].current_target_edges
self.writer.add_image('output_edge', make_grid(output_edges[0].cpu(), nrow=3, normalize=True))
self.writer.add_image('target_edge', make_grid(target_edges[0].cpu(), nrow=3, normalize=True))
except Exception:
pass
try:
guidances = data_input['guidances']
self.writer.add_image('guidances', make_grid(guidances[0].cpu(), nrow=3, normalize=True))
except Exception:
pass
if model_output is not None:
if 'imcomplete_video' in model_output.keys():
self.writer.add_image('imcomplete_video', make_grid(
model_output['imcomplete_video'][0].transpose(0, 1).cpu(), nrow=3, normalize=False))
def _evaluate_test_video(self, output, gt_frames, masks):
gt_images = [self.toPILImage(gt) for gt in gt_frames]
result_images = [self.toPILImage(result) for result in output]
mask_images = [self.toPILImage(mask / 255) for mask in masks]
return evaluate_video_error(
result_images, gt_images, mask_images,
flownet_checkpoint_path=None,
evaluate_warping_error=self.evaluate_test_warp_error,
printlog=self.printlog
)
def _eval_metrics(self, output, target):
acc_metrics = np.zeros(len(self.metrics))
for i, metric in enumerate(self.metrics):
acc_metrics[i] += metric(output, target)
self.writer.add_scalar(f'{metric.__name__}', acc_metrics[i])
return acc_metrics
def _get_gan_loss(self, outputs, target, masks, discriminator, w, guidances=None, is_disc=None):
if w <= 0:
return torch.Tensor([0]).to(self.device)
scores = self.model.forward(outputs, masks, guidances, model=discriminator)
gan_loss = self.adv_loss_fn(scores, target, is_disc)
return gan_loss
def _get_grad_mean_magnitude(self, output, optimizer):
"""
Get mean magitude (absolute value) of gradient of output w.r.t params in the optimizer.
This function is used to get a simple understanding over the impact of a loss.
:output: usually the loss you want to compute gradient w.r.t params
:optimizer: the optimizer who contains the parameters you care
Note:
This function will reset the gradient stored in paramerter, so please
use it before <your loss>.backward()
Example:
> grad_magnitude = self._get_grad_mean_magnitude(
loss_recon * self.loss_recon_w, self.optimizer_g))
> print(grad_magnitude)
"""
optimizer.zero_grad()
output.backward(retain_graph=True)
all_grad = []
for group in optimizer.param_groups:
for p in group['params']:
all_grad.append(p.grad.view(-1))
value = torch.cat(all_grad).abs().mean().item()
optimizer.zero_grad()
return value
def _get_edge_guidances(self, tensors):
from utils.edge import get_edge
guidances = []
for batch_idx in range(tensors.size(0)):
batch_edges = []
for frame_idx in range(tensors.size(1)):
edge = get_edge(
tensors[batch_idx, frame_idx:frame_idx + 1]
)
batch_edges.append(edge)
guidances.append(torch.cat(batch_edges, dim=0))
guidances = torch.stack(guidances)
return guidances
def _process_data(self, data):
inputs = data["input_tensors"].to(self.device)
masks = data["mask_tensors"].to(self.device)
targets = data["gt_tensors"].to(self.device)
# guidances = self._get_edge_guidances(targets).to(self.device) if 'edge' in data['guidance'] else None
guidances = data["guidances"].to(self.device) if len(data["guidances"]) > 0 else None
data_input = {
"inputs": inputs,
"masks": masks,
"targets": targets,
"guidances": guidances
}
model_output = self.model(inputs, masks, guidances)
return data_input, model_output
def _unpack_data(self, data_input, model_output):
# inputs, outputs, targets, masks = self._unpack_data(data_input, model_output)
return (
data_input['inputs'],
model_output['outputs'] if 'refined_outputs' not in model_output.keys()
else model_output['refined_outputs'],
data_input['targets'],
data_input['masks']
)
def _get_non_gan_loss(self, data_input, model_output):
# Compute and write all non-GAN losses to tensorboard by for loop
losses = []
for loss_name, (loss_instance, loss_weight) in self.losses.items():
if loss_weight > 0.0:
loss = loss_instance(data_input, model_output)
self.writer.add_scalar(f'{loss_name}', loss.item())
loss *= loss_weight
losses.append(loss)
loss = sum(losses)
return loss
def _train_epoch(self, epoch):
"""
Training logic for an epoch
:param epoch: Current training epoch.
:return: A log that contains all information you want to save.
Note:
If you have additional information to record, for example:
> additional_log = {"x": x, "y": y}
merge it with log before return. i.e.
> log = {**log, **additional_log}
> return log
The metrics in log must have the key 'metrics'.
"""
self.model.train()
epoch_start_time = time.time()
total_loss = 0
total_metrics = np.zeros(len(self.metrics))
for batch_idx, data in enumerate(self.data_loader):
batch_start_time = time.time()
# Set writer
self.writer.set_step((epoch - 1) * len(self.data_loader) + batch_idx)
data_input, model_output = self._process_data(data)
inputs, outputs, targets, masks = self._unpack_data(data_input, model_output)
# Train G
non_gan_loss = self._get_non_gan_loss(data_input, model_output)
loss_gan_s = self._get_gan_loss(
outputs, 1, masks, discriminator='D_s', w=self.loss_gan_s_w, is_disc=False)
loss_gan_t = self._get_gan_loss(
outputs, 1, masks, discriminator='D_t', w=self.loss_gan_t_w, is_disc=False)
loss_total = (
non_gan_loss
+ loss_gan_s * self.loss_gan_s_w
+ loss_gan_t * self.loss_gan_t_w
)
self.optimizer_g.zero_grad()
# Uncomment these lines to see the gradient
# grad_recon = self._get_grad_mean_magnitude(loss_recon, self.optimizer_g)
# grad_vgg = self._get_grad_mean_magnitude(loss_vgg, self.optimizer_g)
# grad_gan_s = self._get_grad_mean_magnitude(loss_gan_s, self.optimizer_g)
# grad_gan_t = self._get_grad_mean_magnitude(loss_gan_t, self.optimizer_g)
# self.logger.info(f"Grad: recon {grad_recon} vgg {grad_vgg} gan_s {grad_gan_s} gan_t {grad_gan_t}")
loss_total.backward()
self.optimizer_g.step()
# Train spatial and temporal discriminators
for d in ['s', 't']:
weight = getattr(self, f'loss_gan_{d}_w')
optimizer = getattr(self, f'optimizer_d_{d}')
if weight > 0:
optimizer.zero_grad()
loss_d = (
self._get_gan_loss(
targets, 1, masks, discriminator=f'D_{d}', w=weight, is_disc=True)
+ self._get_gan_loss(
outputs.detach(), 0, masks, discriminator=f'D_{d}', w=weight, is_disc=True)
) / 2
loss_d.backward()
optimizer.step()
self.writer.add_scalar(f'loss_d_{d}', loss_d.item())
self.writer.add_scalar('loss_total', loss_total.item())
self.writer.add_scalar('loss_gan_s', loss_gan_s.item())
self.writer.add_scalar('loss_gan_t', loss_gan_t.item())
with torch.no_grad():
total_loss += loss_total.item()
total_metrics += self._eval_metrics(outputs, targets)
if self.verbosity >= 2 and \
(batch_idx % self.log_step == 0 and epoch < 30) or \
batch_idx == 0:
self.logger.info(
f'Epoch: {epoch} [{batch_idx * self.data_loader.batch_size}/{self.data_loader.n_samples} '
f' ({100.0 * batch_idx / len(self.data_loader):.0f}%)] '
f'loss_total: {loss_total.item():.3f}, '
f'BT: {time.time() - batch_start_time:.2f}s'
)
self._write_images(inputs[0], outputs[0], targets[0], masks[0],
model_output=model_output, data_input=data_input)
log = {
'epoch_time': time.time() - epoch_start_time,
'loss_total': total_loss / len(self.data_loader),
'metrics': (total_metrics / len(self.data_loader)).tolist()
}
if self.do_validation:
val_log = self._valid_epoch(epoch)
log = {**log, **val_log}
if self.test_data_loader is not None:
log = self.evaluate_test_set(epoch=epoch, log=log)
if self.lr_scheduler is not None:
self.lr_scheduler.step()
return log
def evaluate_test_set(self, output_root_dir=None, epoch=None, log=None):
# Insert breakpoint when Nan
self.model.eval()
if isinstance(self.test_data_loader, list):
test_data_loaders = self.test_data_loader
else:
test_data_loaders = [self.test_data_loader]
try:
for i, data_loader in enumerate(test_data_loaders):
name = data_loader.name if data_loader.name is not None else f'test{i}'
total_warp_error, total_error, total_ssim, total_psnr, total_p_dist, total_length, fid_score = \
self._evaluate_data_loader(data_loader=data_loader, name=name,
output_root_dir=output_root_dir, epoch=epoch)
if log is not None:
log[f'{name}_p_dist'] = total_p_dist / total_length
log[f'{name}_fid_score'] = fid_score
if self.printlog:
self.logger.info(f'test set name: {name}')
if self.evaluate_test_warp_error:
self.logger.info(f'test_warp_error: {total_warp_error / total_length}')
self.logger.info(f'test_mse: {total_error / total_length}')
self.logger.info(f'test_ssim: {total_ssim / total_length}')
self.logger.info(f'test_psnr: {total_psnr / total_length}')
self.logger.info(f'test_p_dist: {total_p_dist / total_length}')
self.logger.info(f'test_fid_score: {fid_score}\n')
except Exception as err:
self.logger.error(err, exc_info=True)
breakpoint() # NOQA
return log
def _valid_epoch(self, epoch):
"""
Validate after training an epoch
:return: A log that contains information about validation
Note:
The validation metrics in log must have the key 'val_metrics'.
"""
self.model.eval()
total_val_loss = 0
total_val_metrics = np.zeros(len(self.metrics))
self.logger.info(f"Doing {epoch} validation ..")
with torch.no_grad():
for batch_idx, data in enumerate(self.valid_data_loader):
if epoch == 1 and batch_idx > 5:
continue
self.writer.set_step((epoch - 1) * len(self.valid_data_loader) + batch_idx, 'valid')
data_input, model_output = self._process_data(data)
inputs, outputs, targets, masks = self._unpack_data(data_input, model_output)
loss_total = self._get_non_gan_loss(data_input, model_output)
self.writer.add_scalar('loss_total', loss_total.item())
total_val_loss += loss_total.item()
total_val_metrics += self._eval_metrics(outputs, targets)
if batch_idx % self.log_step == 0:
self._write_images(
inputs[0], outputs[0], targets[0], masks[0],
model_output=model_output, data_input=data_input
)
return {
'val_loss': total_val_loss / len(self.valid_data_loader),
'val_metrics': (total_val_metrics / len(self.valid_data_loader)).tolist(),
}
| amjltc295/Free-Form-Video-Inpainting | src/trainer/trainer.py | trainer.py | py | 21,228 | python | en | code | 323 | github-code | 6 | [
{
"api_name": "base.BaseTrainer",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "model.loss",
"line_number": 31,
"usage_type": "argument"
},
{
"api_name": "model.loss.AdversarialLoss",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "torchvisio... |
13155871471 | import serial
import time
# pass in upper and lower 8 bit values
# returns the 16 bit value as an int
# def PrintContcatBytes(valueOne, valueTwo):
# print bin(valueOne)[2:].rjust(8,'0')
class ReturnValue(object):
def __init__(self, valid, pm10, pm25, pm100, num3, num5, num10, num25, num50, num100):
self.valid = valid
self.pm10 = pm10
self.pm25 = pm25
self.pm100 = pm100
self.num3 = num3
self.num5 = num5
self.num10 = num10
self.num25 = num25
self.num50 = num50
self.num100 = num100
def ConcatBytes(valueOne, valueTwo):
return int(bin(valueOne)[2:].rjust(8, '0') + bin(valueTwo)[2:].rjust(8, '0'), 2)
def readlineCRtest(port):
for i in range(256):
for j in range(256):
if i * 256 + j != ConcatBytes(i, j):
print (i, j, i * 256 + j, ConcatBytes(i, j))
print (i)
def readlineCR(port):
"""
Output values are explained here: https://www.dfrobot.com/wiki/index.php/PM2.5_laser_dust_sensor_SKU:SEN0177#Communication_protocol
:param port:
:return:
"""
data = []
summation = 0
data.append(ord(port.read()))
data.append(ord(port.read()))
# data.append(22) data.append(17) print (int("42", 16), int("4d", 16)) print int(bin(32)[2:].rjust(8, '0'),2) print (data[0], data[1])
while (data[0] != int("42", 16) and data[1] != int("4d", 16)):
print("failed - scooting over")
data.pop(0)
data.append(ord(port.read()))
summation += data[0] + data[1]
while len(data) < 17:
upperVal = ord(port.read())
lowerVal = ord(port.read())
if len(data) < 16:
summation += upperVal
summation += lowerVal
data.append(ConcatBytes(upperVal, lowerVal))
# for message in data:
# print message print "Last num should be: ", summation
if data[16] != summation:
return ReturnValue("False", 0, 0, 0, 0, 0, 0, 0, 0, 0)
# print (int(message[0], 16)) print (len(message), message) if (int(message[0], 16) != int("42", 16) or len(message) > 1 and int(message[1], 16) != int("4d", 16)):
# print("character deleted to scoot over") message = message[1:]
return ReturnValue("True", data[3], data[4], data[5], data[9], data[10], data[11], data[12], data[13], data[14])
# if ch == '\r' or ch == chr(66) or ch == '':
# return rv
port = serial.Serial("/dev/serial0", baudrate=9600, timeout=2)
while True:
boxOfStuff = readlineCR(port)
port.write(b"I typed stuff")
if boxOfStuff.valid:
print(boxOfStuff.pm10, boxOfStuff.pm25, boxOfStuff.pm100, boxOfStuff.num3, boxOfStuff.num5, boxOfStuff.num10,
boxOfStuff.num25, boxOfStuff.num50, boxOfStuff.num100)
else:
print("message failed")
| learnlafayette/sensors | sensors/sensors/test/samples/pm_sample.py | pm_sample.py | py | 2,794 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "serial.Serial",
"line_number": 71,
"usage_type": "call"
}
] |
1480464469 | from jinja2 import DebugUndefined
from app.models import db, Order
from datetime import datetime
def seed_orders():
christian = Order(
userId=1,
gigId=2,
gigImage='https://nerdrr.s3.amazonaws.com/fruits-basket.jpg',
deliveryInstructions='Please mail directly to me.',
placed=datetime(2022, 6, 5, 8, 10, 10, 10),
due=datetime(2022, 6, 12, 8, 10, 10, 10)
)
james = Order(
userId=4,
gigId=4,
gigImage='https://nerdrr.s3.amazonaws.com/dnd-mini.jpg',
deliveryInstructions='Please mail directly to me.',
placed=datetime(2022, 5, 29, 8, 10, 10, 10),
due=datetime(2022, 6, 8, 8, 10, 10, 10)
)
sherman = Order(
userId=2,
gigId=1,
gigImage='https://nerdrr.s3.amazonaws.com/indie-game.jpg',
deliveryInstructions='Please mail directly to me.',
placed=datetime(2022, 3, 11, 8, 10, 10, 10),
due=datetime(2022, 4, 10, 8, 10, 10, 10)
)
brian = Order(
userId=3,
gigId = 3,
gigImage='https://nerdrr.s3.amazonaws.com/demon-slayer.jpg',
deliveryInstructions='Please mail directly to me.',
placed=datetime(2022, 6, 7, 8, 10, 10, 10),
due=datetime(2022, 6, 10, 8, 10, 10, 10)
)
db.session.add(christian)
db.session.add(james)
db.session.add(sherman)
db.session.add(brian)
db.session.commit()
def undo_orders():
db.session.execute('TRUNCATE orders RESTART IDENTITY CASCADE;')
db.session.commit() | Amlovern/nerdrr | app/seeds/orders.py | orders.py | py | 1,402 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "app.models.Order",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "app.models.Order"... |
25069435045 | from typing import List, Any, Tuple
from ups_lib.av_request import AddressValidationRequest
from purplship.core.utils import (
XP,
DP,
request as http,
exec_parrallel,
Serializable,
Deserializable,
Envelope,
Pipeline,
Job,
)
from purplship.api.proxy import Proxy as BaseProxy
from purplship.mappers.ups.settings import Settings
class Proxy(BaseProxy):
settings: Settings
def _send_request(self, path: str, request: Serializable[Any]) -> str:
return http(
url=f"{self.settings.server_url}{path}",
data=bytearray(request.serialize(), "utf-8"),
headers={"Content-Type": "application/xml"},
method="POST",
)
def validate_address(
self, request: Serializable[AddressValidationRequest]
) -> Deserializable[str]:
response = self._send_request("/webservices/AV", request)
return Deserializable(response, XP.to_xml)
def get_rates(self, request: Serializable[Envelope]) -> Deserializable[str]:
response = self._send_request("/webservices/Rate", request)
return Deserializable(response, XP.to_xml)
def get_tracking(
self, request: Serializable[List[str]]
) -> Deserializable[List[Tuple[str, dict]]]:
"""
get_tracking makes parallel requests for each tracking number
"""
def get_tracking(tracking_number: str):
return tracking_number, http(
url=f"{self.settings.server_url}/track/v1/details/{tracking_number}",
headers={
"Accept": "application/json",
"Content-Type": "application/json",
"AccessLicenseNumber": self.settings.access_license_number,
"Username": self.settings.username,
"Password": self.settings.password,
},
method="GET",
)
responses: List[str] = exec_parrallel(get_tracking, request.serialize())
return Deserializable(
responses,
lambda res: [
(num, DP.to_dict(track)) for num, track in res if any(track.strip())
],
)
def create_shipment(self, request: Serializable[Envelope]) -> Deserializable[str]:
response = self._send_request("/webservices/Ship", request)
return Deserializable(response, XP.to_xml)
def cancel_shipment(self, request: Serializable) -> Deserializable[str]:
response = self._send_request("/webservices/Ship", request)
return Deserializable(response, XP.to_xml)
def schedule_pickup(self, request: Serializable[Pipeline]) -> Deserializable[str]:
def process(job: Job):
if job.data is None:
return job.fallback
return self._send_request("/webservices/Pickup", job.data)
pipeline: Pipeline = request.serialize()
response = pipeline.apply(process)
return Deserializable(XP.bundle_xml(response), XP.to_xml)
def modify_pickup(self, request: Serializable[Pipeline]) -> Deserializable[str]:
def process(job: Job):
if job.data is None:
return job.fallback
return self._send_request("/webservices/Pickup", job.data)
pipeline: Pipeline = request.serialize()
response = pipeline.apply(process)
return Deserializable(XP.bundle_xml(response), XP.to_xml)
def cancel_pickup(self, request: Serializable[Envelope]) -> Deserializable[str]:
response = self._send_request("/webservices/Pickup", request)
return Deserializable(response, XP.to_xml)
| danh91/purplship | sdk/extensions/ups/purplship/mappers/ups/proxy.py | proxy.py | py | 3,654 | python | en | code | null | github-code | 6 | [
{
"api_name": "purplship.api.proxy.Proxy",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "purplship.mappers.ups.settings.Settings",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "purplship.core.utils.Serializable",
"line_number": 21,
"usage_type": "n... |
18100941624 | """
739. Daily Temperatures
https://leetcode.com/problems/daily-temperatures/
"""
from typing import List
from unittest import TestCase, main
class Solution:
def dailyTemperatures(self, temperatures: List[int]) -> List[int]:
stack: List[int] = []
# List of indexes, not temperatures
answer = [0 for _ in range(len(temperatures))]
# Pick up index and temperature from temperatures one by one
for idx, temparature in enumerate(temperatures):
# Loop while the stack has an item and the current temperature is
# greater than the peak in the stack.
while len(stack) != 0 and temperatures[stack[-1]] < temparature:
peak_idx = stack.pop()
# idx - peak_idx will be the num of days you have to wait to get a warmer temperature.
answer[peak_idx] = idx - peak_idx
# Now the stack is empty or the peak in the stack is less than or equal to the current one,
# just push it to the stack
stack.append(idx)
return answer
| hirotake111/leetcode_diary | leetcode/739/solution.py | solution.py | py | 1,081 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "typing.List",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 11,
"usage_type": "name"
}
] |
21340780197 | import sqlite3
from recipe import Recipe, Quantity, stringsToQuantities
class Database:
def __init__(self,database):
self.connection = sqlite3.connect(database)
self.c = self.connection.cursor()
# self.c.execute("""DROP TABLE IF EXISTS ingredients""")
# self.c.execute("""DROP TABLE IF EXISTS recipes""")
# self.c.execute("""DROP TABLE IF EXISTS instructions""")
# self.c.execute("""DROP TABLE IF EXISTS recipeingredients""")
# self.c.execute("""DROP TABLE IF EXISTS recipeinstructions""")
#Create ingredients
self.c.execute("""CREATE TABLE IF NOT EXISTS ingredients (
ingredientID INTEGER PRIMARY KEY,
name TEXT,
UNIQUE(name)
)
""")
self.c.execute("""CREATE TABLE IF NOT EXISTS recipes (
recipeID INTEGER PRIMARY KEY,
name TEXT,
UNIQUE(name)
)
""")
self.c.execute("""CREATE TABLE IF NOT EXISTS instructions (
instructionID INTEGER PRIMARY KEY,
instruction TEXT,
num INTEGER
)
""")
self.c.execute("""CREATE TABLE IF NOT EXISTS recipeingredients (
recipeID INTEGER NOT NULL,
ingredientID INTEGER NOT NULL,
quantity TEXT,
FOREIGN KEY (recipeID) REFERENCES recipes(recipeID),
FOREIGN KEY (ingredientID) REFERENCES ingredients(ingredientID)
)
""")
self.c.execute("""CREATE TABLE IF NOT EXISTS recipeinstructions (
recipeID INTEGER NOT NULL,
instructionID INTEGER NOT NULL,
FOREIGN KEY (recipeID) REFERENCES recipes(recipeID),
FOREIGN KEY (instructionID) REFERENCES instructions(instructionID)
)
""")
def addRecipe(self,recipe):
#self.c.execute("INSERT OR IGNORE INTO recipes (name) VALUES (?)",(recipe.name,))
hold = self.c.execute("SELECT recipeID FROM recipes WHERE recipes.name = ?",(recipe.name,)).fetchone()
if hold is not None:
return None
self.c.execute("INSERT OR IGNORE INTO recipes (name) VALUES (?)",(recipe.name,))
recipeID = hold = self.c.execute("SELECT recipeID FROM recipes WHERE recipes.name = ?",(recipe.name,)).fetchone()[0]
ingredientIDList = []
if type(recipe.ingredients) is not str:
for ingredient in recipe.ingredients:
self.c.execute("INSERT OR IGNORE INTO ingredients (name) VALUES (?)",(ingredient,))
hold2 = self.c.execute("SELECT ingredientID FROM ingredients WHERE ingredients.name = ?",(ingredient,)).fetchone()
ingredientIDList.extend(hold2)
else:
self.c.execute("INSERT OR IGNORE INTO ingredients (name) VALUES (?)",(recipe.ingredients,))
hold2 = self.c.execute("SELECT ingredientID FROM ingredients WHERE ingredients.name = ?",(recipe.ingredients,)).fetchone()
ingredientIDList.extend(hold2)
instructionIDList = []
if type(recipe.instructions) is not str:
for index, instruction in enumerate(recipe.instructions):
self.c.execute("INSERT OR IGNORE INTO instructions (instruction,num) VALUES (?,?)",(instruction,index+1))
hold3 = self.c.execute("SELECT instructionID FROM instructions WHERE instructions.instruction = ?",(instruction,)).fetchone()
instructionIDList.extend(hold3)
else:
self.c.execute("INSERT OR IGNORE INTO instructions (instruction,num) VALUES (?,?)",(recipe.instructions,1))
hold2 = self.c.execute("SELECT instructionID FROM instructions WHERE instructions.instruction = ?",(recipe.instructions,)).fetchone()
instructionIDList.extend(hold2)
for instructionID in instructionIDList:
self.c.execute("INSERT OR IGNORE INTO recipeinstructions (recipeID,instructionID) VALUES (?,?)",(recipeID,instructionID))
for index, ingredientID in enumerate(ingredientIDList):
self.c.execute("INSERT OR IGNORE INTO recipeingredients (recipeID,ingredientID,quantity) VALUES (?,?,?)",(recipeID,ingredientID,recipe.quantities[index].getStorageString()))
return recipe
def deleteRecipe(self,recipe_name):
#returns None if not found
check = self.c.execute("SELECT * FROM recipes WHERE recipes.name = ?",(recipe_name,)).fetchone()
self.c.execute("DELETE FROM recipes WHERE recipes.name = ?",(recipe_name,))
return check
def getRecipe(self,recipe_name): #return either list or single answer, if list then print options
recipeID = self.c.execute("SELECT recipes.recipeID FROM recipes WHERE recipes.name = ?",(recipe_name,)).fetchone()
if recipeID == None:
return None
elif len(recipeID) == 1:
ingredientsQuantities = self.c.execute("SELECT ingredients.name, recipeingredients.quantity FROM ingredients INNER JOIN recipeingredients ON ingredients.ingredientID = recipeingredients.ingredientID AND recipeingredients.recipeID = ?",(recipeID[0],)).fetchall()
instructions = self.c.execute("SELECT instructions.instruction, instructions.num FROM instructions INNER JOIN recipeinstructions ON recipeinstructions.instructionID = instructions.instructionID AND recipeinstructions.recipeID = ?",(recipeID[0],)).fetchall()
ingredients, str_quantities = zip(*ingredientsQuantities)
recipe = Recipe(recipe_name, instructions, ingredients, stringsToQuantities(str_quantities))
return [recipe]
else: # == 0, return None
return None
def keyWordSearchRecipes(self, keyword):
names = [name[0] for name in self.c.execute("SELECT recipes.name FROM recipes").fetchall()]
includes_keyword = []
for name in names:
if keyword in name:
includes_keyword.append(name)
if len(includes_keyword) == 0:
return None
return includes_keyword
def keyWordSearchIngredients(self, keyword):
test = self.c.execute("SELECT ingredients.name FROM ingredients").fetchall()
ingredient_names = []
[ingredient_names.append(name[0]) for name in test if keyword in name[0] ]
recipe_names = set([])
for ingredient_name in ingredient_names:
ingredientID = self.c.execute("SELECT ingredients.ingredientID FROM ingredients WHERE ingredients.name = ?",(ingredient_name,)).fetchone()
recipe_name_list = self.c.execute("SELECT recipes.name FROM recipes INNER JOIN recipeingredients ON recipeingredients.ingredientID = ? AND recipes.recipeID = recipeingredients.recipeID",(ingredientID[0],)).fetchall()
[recipe_names.add(name[0]) for name in recipe_name_list]
return list(recipe_names)
def getRecipeList(self):
return [name[0] for name in self.c.execute("SELECT recipes.name FROM recipes").fetchall()]
def getAllRecipes(self):
return [self.getRecipe(name[0])[0] for name in self.c.execute("SELECT recipes.name FROM recipes").fetchall()]
def close(self):
self.connection.commit()
self.connection.close()
# c.execute("INSERT OR IGNORE INTO recipes (name) VALUES ('dicks')")
# c.execute("INSERT OR IGNORE INTO recipes (name) VALUES (2)")
# c.execute("INSERT OR IGNORE INTO recipes (name) VALUES (3)")
# c.execute("INSERT OR IGNORE INTO recipes (name) VALUES (1)")
# test = c.execute("SELECT recipeID, * FROM recipes""")
# for row in test:
# print(row[0])
# addRecipe("lovely dicks",["dicks","penis","cock"],["2","3","4"],["cook the cokes","eat my ass"])
# print(getRecipeList())
# print(getRecipe("lovely dicks"))
# print(getRecipe("lovely dick"))
# connection.commit()
# connection.close() | fcopp/RecipeApplication | backend/backend.py | backend.py | py | 7,842 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sqlite3.connect",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "recipe.name",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "recipe.name",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "recipe.name",
"l... |
1369504657 | # -*- coding: utf-8 -*-
import RPi.GPIO as GPIO
import time, datetime
from lcd import *
from Email import *
import server
lcd_init ()
GPIO.setmode(GPIO.BOARD)
print('System start/restart - ' + str(datetime.datetime.now()))
#Switch for Bin 1 to be connected to pin 18 and 3.3v pin
#Switch for Bin 2 to be connected to pin 16 and 3.3v pin
GPIO.setup(16, GPIO.IN, pull_up_down = GPIO.PUD_DOWN)
GPIO.setup(18, GPIO.IN, pull_up_down = GPIO.PUD_DOWN)
lcd_string(" Dust-O-Matic ",LCD_LINE_1)
#This function will run when the button is triggered
def Notifier(channel):
if channel==18:
print('Bin 1 Full - '+ str(datetime.datetime.now()))
lcd_string(' TRAILER #1 FULL ',LCD_LINE_2)
SendEmail('TRAILER 1 FULL - PLEASE COLLECT', "")
lcd_string(' TRAILER #2 Filling ',LCD_LINE_3)
elif channel==16:
print('Bin 2 Full - ' + str(datetime.datetime.now()))
lcd_string(' TRAILER #2 FULL ',LCD_LINE_2)
SendEmail('TRAILER 2 FULL - PLEASE COLLECT', "")
lcd_string(' TRAILER #1 Filling ',LCD_LINE_3)
GPIO.add_event_detect(18, GPIO.RISING)
GPIO.add_event_detect(16, GPIO.RISING)
while True:
#print('Looping')
lcd_string("LAN: " + get_ip_address('eth0'),LCD_LINE_4)
#lcd_string("WLAN: " + get_ip_address('wlan0'),LCD_LINE_4)
if GPIO.event_detected(18):
time.sleep(0.005) # debounce for 5mSec
# only show valid edges
if GPIO.input(18)==1:
#lcd_string('TRAILER #1 TRIGGERED',LCD_LINE_2)
Notifier(18)
if GPIO.event_detected(16):
time.sleep(0.005)
if GPIO.input(16)==1:
Notifier(16)
time.sleep(0.5)
GPIO.cleanup()
| CraigHissett/TM_Timber | BinSensor/BinSensor.py | BinSensor.py | py | 1,875 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "RPi.GPIO.setmode",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "RPi.GPIO",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "RPi.GPIO.BOARD",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",... |
21114723474 | import os
from dotenv import load_dotenv, dotenv_values
# FOR LOG
import logging
from logging.handlers import RotatingFileHandler
import datetime
import math
import json
# Load environmental variable
config = dotenv_values(".env")
# --------------------------------------------------- LOGGING ---------------------------------------------------------
# Create new log folder if not exist
LOG_FOLDER_NAME = config.get('LOG_FOLDER_NAME')
LOG_FOLDER = os.path.join(os.getcwd(), LOG_FOLDER_NAME)
LOG_FILE = os.path.join(LOG_FOLDER, 'log_{datetime}.log'.format(datetime=datetime.datetime.now().strftime('%Y-%m-%d')))
MAXBYTES = (config.get('MAXBYTES'))
BACKUP_COUNT = config.get('BACKUP_COUNT')
# Set up logging basic config
try:
handler_rfh = RotatingFileHandler(LOG_FILE, maxBytes=int(MAXBYTES), backupCount=int(BACKUP_COUNT))
handler_rfh.setFormatter(logging.Formatter('%(asctime)s %(message)s'))
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(message)s', \
datefmt='%m/%d/%Y %I:%M:%S %p')
logging.getLogger('CRAWL_TELEGRAM').addHandler(handler_rfh)
except Exception as e:
logging.exception(e)
# ----------------------------------------------------------------------------------------------------------------------
from entities.Account import Account
from entities.User import User
from BatchProcessor import BatchProcessor
if __name__ == "__main__":
bpro = BatchProcessor()
num_mem_per_acc = 4
list_members = []
for i in range(3):
member = User(f'user_id_{i}', f'access_hash_{i}')
list_members.append(member)
list_accounts = []
for i in range(3):
acc = Account(f'phone_no_{i}', f'api_id_{i}', f'api_hash_{i}', datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
list_accounts.append(acc)
logging.info(list_accounts)
logging.info(list_members)
list_use_accounts, dict_batch, is_lack_acc, max_mem_process = bpro.divide_into_batch(list_accounts, list_members, num_mem_per_acc)
logging.info(', '.join([acc.phone_no for acc in list_use_accounts]))
logging.info(is_lack_acc)
logging.info(max_mem_process)
for key in dict_batch:
print(key)
print('Account:' ,dict_batch[key][0])
print('List members:')
print(*dict_batch[key][1], sep='\n')
| Splroak/add_member_telegram | src/test_BatchProcessor.py | test_BatchProcessor.py | py | 2,365 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "dotenv.dotenv_values",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_... |
24117960481 | import gym
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.distributions import Categorical
import numpy as np
from skimage.transform import resize
# hyper params
gamma = 0.98
class Policy(nn.Module):
def __init__(self):
super(Policy, self).__init__()
self.data = []
self.lr = 0.002
# define architecture/layer parameters
self.input_channels = 3
self.conv_ch_l1 = 8
self.conv_ch_l2 = 12
self.height = 210
self.width = 160
self.kernel_size = 3
self.pool_size = 2
self.conv_out = 23256
self.fc1_size = 16
self.fc_out = 4
# deifne actual layer
# define first convolutional layer
self.conv1 = nn.Conv2d(in_channels = self.input_channels,
out_channels = self.conv_ch_l1,
kernel_size = self.kernel_size)
# add batch normalization layer
self.batch_norm1 = nn.BatchNorm2d(self.conv_ch_l1)
# define max-pool layer
self.pool = nn.MaxPool2d(self.pool_size, self.pool_size)
# define second convolution layer
self.conv2 = nn.Conv2d(in_channels = self.conv_ch_l1,
out_channels = self.conv_ch_l2,
kernel_size = self.kernel_size)
# define batch normalization layer
self.batch_norm2 = nn.BatchNorm2d(self.conv_ch_l2)
# define fully connected layers
self.fc1 = nn.Linear(self.conv_out, self.fc1_size)
self.fc2 = nn.Linear(self.fc1_size, self.fc_out)
# define optimizer
self.optimizer = optim.Adam(self.parameters() , lr = self.lr)
def forward(self, x):
# pass input through conv layer
out = self.pool(F.relu(self.conv1(x)))
out = self.batch_norm1(out)
out = self.pool(F.relu(self.conv2(out)))
# print(out.size())
# exit()
out = self.batch_norm2(out)
# reshape the conv out before passing it to fully connected layer
_,b,c,d = out.size()
fc_shape = b*c*d
# print("FC input size : ", fc_shape)
out = out.view(-1, fc_shape)
# pass input through fully connected layer
out = F.relu(self.fc1(out))
out = F.relu(self.fc2(out))
return out
# save data for training
def put_data(self, item):
self.data.append(item)
# once the episode is complete we train the episode
def train_policy(self):
R = 0
for r, log_prob in self.data[::-1]:
R = r + gamma * R
loss = -log_prob * R
# clean the previous gradients
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.data = []
def main():
# create the environment
env = gym.make('Breakout-v0')
pi = Policy()
score = 0.0
print_interval = 20
num_episodes = 100
for n_epi in range(num_episodes):
state = env.reset()
for t in range(100000):
# state is an image with channel last.
# pre-processing steps:
# 1. make image grayscale
# 2. resize image
# 3. add first dimension for batch
# 4. convert image to tensor
#img = np.dot(state[:,:,:3], [0.2989, 0.5870, 0.1140])
#img = resize(img, (63, 48), anti_aliasing=True)
# now image is converted to single channel, add dimension for channel
#img = np.expand_dims(img, axis=0)
img = np.rollaxis(state, 2, 0)
prob = pi(torch.from_numpy(img).unsqueeze(0).float())
m = Categorical(prob)
a = m.sample()
state_prime, r, done, _ = env.step(a.item())
# print(prob.size())
# print(prob)
# print(a)
# print(a.size())
# exit()
print("Output : ", prob)
print("Action : ", a.item())
print("Reward : ", r)
pi.put_data((r,torch.log(prob[0,a])))
state = state_prime
score += r
if done:
print("Episode ended : ", n_epi+1)
break
# if the episode is completed, train policy on recorded observations
pi.train_policy()
if (n_epi+1)%print_interval == 0 and n_epi > 0 :
print("Episode : {}, avg_score : {}".format(n_epi,
score/print_interval)
)
score = 0
env.close()
if __name__ == '__main__':
main()
| sachinumrao/pytorch_tutorials | cnn_breakout_rl.py | cnn_breakout_rl.py | py | 4,757 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "torch.nn.Module",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_nu... |
43371065244 | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
class Spider:
try:
page = webdriver.Chrome()
url = "https://music.163.com/#/song?id=31654747"
page.get(url)
search = page.find_element_by_id("srch")
search.send_keys("aaa")
search.send_keys(Keys.ENTER)
except Exception as e:
print(e)
| frebudd/python | autoinput.py | autoinput.py | py | 382 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.keys.Keys.ENTER",
"line_number": 12,
"usage_type": "attribute"
},
... |
28800521491 | """Function to calculate the enrichment score for a given similarity matrix."""
import numpy as np
import pandas as pd
from typing import List, Union
import scipy
from cytominer_eval.utils.operation_utils import assign_replicates
def enrichment(
similarity_melted_df: pd.DataFrame,
replicate_groups: List[str],
percentile: Union[float, List[float]],
) -> pd.DataFrame:
"""Calculate the enrichment score. This score is based on the fisher exact odds score.
Similar to the other functions, the closest connections are determined and checked with the replicates.
This score effectively calculates how much better the distribution of correct connections is compared to random.
Parameters
----------
similarity_melted_df : pandas.DataFrame
An elongated symmetrical matrix indicating pairwise correlations between
samples. Importantly, it must follow the exact structure as output from
:py:func:`cytominer_eval.transform.transform.metric_melt`.
replicate_groups : List
a list of metadata column names in the original profile dataframe to use as
replicate columns.
percentile : List of floats
Determines what percentage of top connections used for the enrichment calculation.
Returns
-------
dict
percentile, threshold, odds ratio and p value
"""
result = []
replicate_truth_df = assign_replicates(
similarity_melted_df=similarity_melted_df, replicate_groups=replicate_groups
)
# loop over all percentiles
if type(percentile) == float:
percentile = [percentile]
for p in percentile:
# threshold based on percentile of top connections
threshold = similarity_melted_df.similarity_metric.quantile(p)
# calculate the individual components of the contingency tables
v11 = len(
replicate_truth_df.query(
"group_replicate==True and similarity_metric>@threshold"
)
)
v12 = len(
replicate_truth_df.query(
"group_replicate==False and similarity_metric>@threshold"
)
)
v21 = len(
replicate_truth_df.query(
"group_replicate==True and similarity_metric<=@threshold"
)
)
v22 = len(
replicate_truth_df.query(
"group_replicate==False and similarity_metric<=@threshold"
)
)
v = np.asarray([[v11, v12], [v21, v22]])
r = scipy.stats.fisher_exact(v, alternative="greater")
result.append(
{
"enrichment_percentile": p,
"threshold": threshold,
"ods_ratio": r[0],
"p-value": r[1],
}
)
result_df = pd.DataFrame(result)
return result_df
| cytomining/cytominer-eval | cytominer_eval/operations/enrichment.py | enrichment.py | py | 2,845 | python | en | code | 7 | github-code | 6 | [
{
"api_name": "pandas.DataFrame",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "typing.List",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "typing.List",
"lin... |
9837240055 | from urllib.parse import urljoin
import requests
import json
from fake_useragent import UserAgent
from lxml import html
import re
from pymongo import MongoClient
ua = UserAgent()
movie_records = []
first = True
base_url = "https://www.imdb.com/"
url = "https://www.imdb.com/search/title/?genres=drama&groups=top_250&sort=user_rating,desc&ref_=adv_prv"
def scrape(url):
global first
resp = requests.get(url = url,headers={'User-Agent':ua.random})
tree = html.fromstring(resp.content)
movie_data = tree.xpath("//div[@class = 'lister-item-content']")
for movie in movie_data:
p = {
'name':movie.xpath(".//h3/a/text()")[0],
'year' : re.findall('\d+',movie.xpath(".//h3/span[@class='lister-item-year text-muted unbold']/text()")[0])[0],
'duration' : re.findall('\d+',movie.xpath(".//p/span[@class='runtime']/text()")[0])[0],
'rating' : movie.xpath(".//div[@class='ratings-bar']/div[contains(@class,'inline-block ratings-imdb-rating')]/@data-value")[0]
}
movie_records.append(p)
if first:
next_page = tree.xpath("//div[@class = 'desc']/a/@href")
first = False
else:
next_page = tree.xpath("//div[@class='desc']/a[2]/@href")
if len(next_page) != 0:
surl = urljoin(base = base_url,url=next_page[0])
print(surl)
scrape(surl)
def insert_to_db(list_records):
client = MongoClient("mongodb://<user_name>:<pwd>@cluster0-shard-00-00.rsxac.mongodb.net:27017,cluster0-shard-00-01.rsxac.mongodb.net:27017,cluster0-shard-00-02.rsxac.mongodb.net:27017/myFirstDatabase?ssl=true&replicaSet=atlas-3xsr69-shard-0&authSource=admin&retryWrites=true&w=majority")
db = client['imdb_movies']
collection = db['movies']
for m in movie_records:
exists = collection.find_one({'name': m['name']})
if exists:
if exists['year'] != m['year'] :
collection.replace_one({'name': exists['name']}, m)
print(f"Old item: {exists} New Item: {m}")
else:
collection.insert_one(m)
client.close()
scrape(url)
insert_to_db(movie_records)
print('number of movies ',len(movie_records)) | shreyashettyk/DE | Imdb_data_extraction/imdb.py | imdb.py | py | 2,184 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "fake_useragent.UserAgent",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "lxml.html.fromstring",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "lxml.html",... |
19239185532 | # stack ! 과제는 끝나지 않아!
# 효율 고려 X, 하나 넣고 하나 빼기
import sys
from collections import deque
input = sys.stdin.readline
N = int(input())
S = deque() # 과제 넣어두는 스택
tot = 0 # 총 점수
for _ in range(N):
W = list(map(int, input().split()))
if W[0]: # 새 과제가 있다면
if W[2] == 1: # 지금 바로 끝낼 수 있으면 점수 바로 더해주기
tot += W[1]
else: # 아니라면 시간 1 빼서 S에 넣어주기
S.append([W[1], W[2]-1])
else: # 새 과제가 없다면
if S: # 남은 과제 있을 때
n_score, n_time = S.pop()
if n_time == 1: # 지금 끝낼 수 있으면 점수 더하고
tot += n_score
else: # 못 끝내면 시간만 1 빼주기
S.append([n_score, n_time-1])
print(tot) | sdh98429/dj2_alg_study | BAEKJOON/stack/b17952.py | b17952.py | py | 878 | python | ko | code | 0 | github-code | 6 | [
{
"api_name": "sys.stdin",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "collections.deque",
"line_number": 11,
"usage_type": "call"
}
] |
11900486194 | import dash
from dash import html
from matplotlib import container
from navbar import create_navbar
import dash_bootstrap_components as dbc
from dash import Dash, html, dcc, Input, Output
import plotly.express as px
import pandas as pd
f_sb2021 = pd.read_csv("f_sb2021.csv", on_bad_lines='skip', sep=';')
f_sb2022 = pd.read_csv("f_sb2022.csv", on_bad_lines='skip', sep=';')
C2021 = pd.read_csv("C2021.csv", on_bad_lines='skip', sep=',')
C2022 = pd.read_csv("C2022.csv", on_bad_lines='skip', sep=',')
Delitos_2010_2021 = pd.read_csv("Delitos_2010_2021.csv", on_bad_lines='skip', sep=',')
Violencia_G_2015_2022 = pd.read_csv("Violencia_G_2015_2022.csv", on_bad_lines='skip', sep=',')
app = dash.Dash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP])
nav = create_navbar()
nivel_sisben= f_sb2021['nivel_sisben']
grupo_sisben= f_sb2022["Grupo"]
values = Delitos_2010_2021['GENERO'].value_counts()
Genero = Delitos_2010_2021['GENERO'].unique()
values2 = Delitos_2010_2021['DIA_SEMANA'].value_counts()
armas = Delitos_2010_2021['DIA_SEMANA'].unique()
delitos_ano_mes= pd.DataFrame({'count' : Delitos_2010_2021.groupby( [ "ANO", "MES"] ).size()}).reset_index()
#gb21_sex = f_sb2021.groupby("sexo_persona")['sexo_persona'].count()
#fig = px.histogram(f_sb2021, x=gb21_sex.index, y=gb21_sex, histfunc='sum')
def generate_table(dataframe, max_rows=16):
return html.Table([
html.Thead(
html.Tr([html.Th(col) for col in dataframe.columns])
),
html.Tbody([
html.Tr([
html.Td(dataframe.iloc[i][col]) for col in dataframe.columns
]) for i in range(min(len(dataframe), max_rows))
])
])
app.layout=html.Div([
html.H1('Data Visualization',style={'textAlign':'center'}),
html.Div([
html.P('Alcaldia de Bucaramanga',style={'textAlign':'center'}),
]),
html.Div([
html.Table(style={'width':'90%'},
children=[
html.Tr(style={'width':'50%'},
children=[
html.Td(
children=[
html.H1('Grupo de delitos por mes',
style={'textAlign':'center'}),
dcc.Graph(id='linegraph',
figure = px.line(delitos_ano_mes, x="MES", y='count', color='ANO'))
]
),html.Td(
children=[
html.H1('Delitos por Género',
style={'textAlign':'center'}),
dcc.Graph(id='piegraph',
figure = px.pie(Delitos_2010_2021, values=values, names=Genero))
]
)
]
),
html.Tr(style={'width':'50%'},
children=[
html.Td(style={'width':'50%'},
children=[
html.H1('Nivel de Sisben Año 2021',
style={'textAlign':'center'}),
dcc.Graph(id='bargraph',
figure = px.histogram(f_sb2021, x=nivel_sisben, color=nivel_sisben, barmode='group'))
]
),html.Td(style={'width':'50%'},
children=[
html.H1('Grupo de Sisben Año 2022',
style={'textAlign':'center'}),
dcc.Graph(id='bargraph2',
figure = px.histogram(f_sb2022, x=grupo_sisben, color=grupo_sisben, barmode='group'))
]
)
]
),
]
),
html.Table(style={'width':'90%'},
children=[html.Tr(
children=[
html.Td(style={'width':'100%'},
children=[
html.H1('Días de la semana vs Delitos',
style={'textAlign':'center'}),
dcc.Graph(id='piegraph2',
figure = px.pie(Delitos_2010_2021, values=values2, names=armas))
]
)
]
),]),
]),
# End of all content DIV
])
def create_page_home():
layout = html.Div([
nav,
#header,
app.layout
])
return layout
| jeanpierec/ljpiere_projects | DataScience_projects/Proyecto5_DS4ABucaramanga/home.py | home.py | py | 4,956 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "pandas.read_csv",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
... |
38701948852 | #coding=utf8
import config
import json
import sys, time
py3k = sys.version_info.major > 2
import os.path
import urllib
if py3k:
from urllib import parse as urlparse
else:
import urlparse
def get_one():
return config.dbconn().fetch_rows('http', condition={'checked': 0}, order="id asc", limit="1", fetchone=True)
def check_key(key):
'''
是否需要保留这个key
'''
blacklist = ['t', 'r', 'submit']
if key.lower() in blacklist:
return False
return True
def check_value(value, vtype):
'''
是否需要保留这个value
'''
if vtype == 'array': return False
return True
def get_type(key, value):
if type(value) == type([]): return 'array'
value = value[0]
if value.isdigit(): return 'int'
try:
float(value)
return 'float'
except:
pass
# url check
u = urlparse.urlparse(value)
if u.scheme and u.netloc:
return 'url'
try:
j = json.loads(value)
if type(j) == type([]) or type(j) == type({}):
return 'json'
except:
pass
return 'str'
while True:
http = get_one()
if not http:
time.sleep(3)
continue
req = json.loads(http['req'])
if req['rtype'] not in ['qs', 'rewrite']:
config.dbconn().insert('requests', {'requestid': http['id'], 'method': req['method'], 'key': '', 'type': 'special|'+req['rtype']})
else:
# support array like a[]=1&a[]=2
parsed = urlparse.urlparse(req['uri'])
get_parts = urlparse.parse_qs(parsed.query)
if get_parts:
for k,v in get_parts.items():
v = v[0] if len(v) == 1 else v
vtype = get_type(k, v)
if check_key(k) and check_value(v, vtype):
config.dbconn().insert('requests', {'requestid': http['id'], 'method': "GET", 'key': k, 'type': vtype})
if not parsed.query and not os.path.splitext(parsed.path)[1] and len(parsed.path.split('/')) > 3:
path_parts = parsed.path.split('/')
for i in range(3, len(path_parts)):
vtype = 'rewrite|'+get_type('rewrite', path_parts[i])
config.dbconn().insert('requests', {'requestid': http['id'], 'method': "GET", 'key': str(i), 'type': vtype})
if req['method'] == "POST":
post_parts = urlparse.parse_qs(urlparse.urlparse(req['body']).query)
if post_parts:
for k,v in post_parts.items():
v = v[0] if len(v) == 1 else v
vtype = get_type(k, v)
if check_key(k) and check_value(v, vtype):
config.dbconn().insert('requests', {'requestid': http['id'], 'method': "POST", 'key': k, 'type': vtype})
config.dbconn().update('http', {'checked': 1}, {'id': http['id']}) | 5alt/ZeroExploit | parser.py | parser.py | py | 2,452 | python | en | code | 4 | github-code | 6 | [
{
"api_name": "sys.version_info",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "config.dbconn",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "urlparse.urlparse",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "json.loads",
... |
17441173344 | import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import streamlit as st
import ptitprince as pt
def scatter_plot(df,fig):
hobbies = []
for col in df.columns:
hobbies.append(col)
print(col)
st.title(" Scatter Plot")
hobby = st.selectbox("X-axis: ", hobbies)
# print the selected hobby
st.write("You have selected X-axis: ", hobby)
hobby1 = st.selectbox("Y-axis: ", hobbies)
st.write("You have selected Y-axis: ", hobby1)
if (st.button("scatter plot")):
st.text("scatter plot")
ax = sns.regplot(x=hobby, y=hobby1, data=df)
st.pyplot(fig)
def group_histogram(df,fig):
st.title("Grouped Histogram")
if (st.button("Grouped Histogram")):
st.text("Grouped Histogram")
for condition in df.TrialType.unique():
cond_data = df[(df.TrialType == condition)]
ax = sns.distplot(cond_data.RT, kde=False)
ax.set(xlabel='Response Time', ylabel='Frequency')
st.pyplot(fig)
def bar_plot(df,fig,hobbies):
st.title("Bar Plot")
hobby = st.selectbox("X-axis for barplot: ", hobbies)
# print the selected hobby
st.write("You have selected X-axis: ", hobby)
hobby1 = st.selectbox("Y-axis for barplot: ", hobbies)
st.write("You have selected Y-axis: ", hobby1)
if (st.button("Bar plot")):
st.text("Bar plot")
sns.barplot(x=hobby, y=hobby1, data=df)
st.pyplot(fig)
def box_plot1(df,fig,hobbies):
st.title("Box Plot")
hobby = st.selectbox("X-axis for boxplot: ", hobbies)
# print the selected hobby
st.write("You have selected X-axis: ", hobby)
hobby1 = st.selectbox("Y-axis for boxplot: ", hobbies)
st.write("You have selected Y-axis: ", hobby1)
if (st.button("Box plot")):
st.text("Box plot")
sns.boxplot(x=hobby, y=hobby,data=df)
st.pyplot(fig)
def heat_map(df,fig,hobbies):
st.title("Heatmap Plot")
col2 = st.multiselect(
"Blah:", sorted(list(hobbies)), sorted(list(hobbies))
)
if (st.button("Heatmap plot")):
st.text("Heatmap plot")
ax = sns.heatmap(df[col2])
st.pyplot(fig)
def violine_plot(df,fig,hobbies):
st.title("Violin Plot")
hobby = st.selectbox("X-axis for Violinplot: ", hobbies)
# print the selected hobby
st.write("X-axis for Violinplot: ", hobby)
hobby1 = st.selectbox("Y-axis for Violinplot: ", hobbies)
st.write("You have selected Y-axis: ", hobby1)
if (st.button("Violin plot")):
st.text("Violin plot")
sns.violinplot(x=hobby, y=hobby1, data=df)
st.pyplot(fig)
def rain_cloudplot(df,fig,hobbies):
st.title("Raincloud Plot")
hobby = st.selectbox("X-axis for Raincloudplot: ", hobbies)
# print the selected hobby
st.write("You have selected X-axis: ", hobby)
hobby1 = st.selectbox("Y-axis for Raincloudplot: ", hobbies)
st.write("You have selected Y-axis: ", hobby1)
if (st.button("Raincloud plot")):
st.text("Raincloud plot")
ax = pt.RainCloud(x=hobby, y=hobby1,
data=df,
width_viol=.8,
width_box=.4,
figsize=(12, 8), orient='h',
move=.0)
st.pyplot(fig)
def app():
filename = st.text_input('Enter a file path:')
try:
df = pd.read_csv(filename)
except:
None
uploaded_files = st.file_uploader("Upload CSV", type="csv", accept_multiple_files=True)
if uploaded_files:
for file in uploaded_files:
file.seek(0)
uploaded_data_read = [pd.read_csv(file) for file in uploaded_files]
df = pd.concat(uploaded_data_read)
hobbies=[]
try:
fig = plt.figure(figsize=(12, 8))
for col in df.columns:
hobbies.append(col)
print(col)
fig = plt.figure(figsize=(12, 8))
st.dataframe(data=df, width=None, height=None)
scatter_plot(df,fig)
group_histogram(df, fig)
bar_plot(df, fig, hobbies)
heat_map(df, fig, hobbies)
violine_plot(df, fig, hobbies)
box_plot1(df, fig, hobbies)
rain_cloudplot(df, fig, hobbies)
except:
None
| imsanjoykb/Data-Analytics-Tool-Development | apps/graphs.py | graphs.py | py | 4,249 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "streamlit.title",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "streamlit.selectbox",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "streamlit.write",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "streamlit.selectb... |
14150647036 | import json
import re
from requests_toolbelt import MultipartEncoder
from todayLoginService import TodayLoginService
from liteTools import *
class AutoSign:
# 初始化签到类
def __init__(self, todayLoginService: TodayLoginService, userInfo):
self.session = todayLoginService.session
self.host = todayLoginService.host
self.userInfo = userInfo
self.taskInfo = None
self.task = None
self.form = {}
self.fileName = None
# 获取未签到的任务
def getUnSignTask(self):
LL.log(1, '获取未签到的任务')
headers = self.session.headers
headers['Content-Type'] = 'application/json'
# 第一次请求接口获取cookies(MOD_AUTH_CAS)
url = f'{self.host}wec-counselor-sign-apps/stu/sign/getStuSignInfosInOneDay'
self.session.post(url, headers=headers,
data=json.dumps({}), verify=False)
# 第二次请求接口,真正的拿到具体任务
res = self.session.post(url, headers=headers,
data=json.dumps({}), verify=False)
res = DT.resJsonEncode(res)
signLevel = self.userInfo.get('signLevel', 1)
if signLevel >= 0:
taskList = res['datas']['unSignedTasks'] # 未签到任务
if signLevel >= 1:
taskList += res['datas']['leaveTasks'] # 不需签到任务
if signLevel == 2:
taskList += res['datas']['signedTasks'] # 已签到任务
# 查询是否没有未签到任务
LL.log(1, '获取到的签到任务列表', taskList)
if len(taskList) < 1:
LL.log(1, '签到任务列表为空')
raise TaskError('签到任务列表为空')
# 自动获取最后一个未签到任务(如果title==0)
if self.userInfo['title'] == 0:
latestTask = taskList[0]
self.taskName = latestTask['taskName']
LL.log(1, '最后一个未签到的任务', latestTask['taskName'])
self.taskInfo = {'signInstanceWid': latestTask['signInstanceWid'],
'signWid': latestTask['signWid'], 'taskName': latestTask['taskName']}
return self.taskInfo
# 获取匹配标题的任务
for righttask in taskList:
if righttask['taskName'] == self.userInfo['title']:
self.taskName = righttask['taskName']
LL.log(1, '匹配标题的任务', righttask['taskName'])
self.taskInfo = {'signInstanceWid': righttask['signInstanceWid'],
'signWid': righttask['signWid'], 'taskName': righttask['taskName']}
return self.taskInfo
LL.log(1, '没有匹配标题的任务')
raise TaskError('没有匹配标题的任务')
# 获取历史签到任务详情
def getHistoryTaskInfo(self):
'''获取历史签到任务详情'''
headers = self.session.headers
headers['Content-Type'] = 'application/json;charset=UTF-8'
# 获取签到月历
url = f'{self.host}wec-counselor-sign-apps/stu/sign/getStuIntervalMonths'
res = self.session.post(url, headers=headers,
data=json.dumps({}), verify=False)
res = DT.resJsonEncode(res)
monthList = [i['id'] for i in res['datas']['rows']]
monthList.sort(reverse=True) # 降序排序月份
# 按月遍历
for month in monthList:
# 获取对应历史月签到情况
req = {"statisticYearMonth": month}
url = f'{self.host}wec-counselor-sign-apps/stu/sign/getStuSignInfosByWeekMonth'
res = self.session.post(
url, headers=headers, data=json.dumps(req), verify=False)
res = DT.resJsonEncode(res)
monthSignList = list(res['datas']['rows'])
# 遍历查找历史月中每日的签到情况
monthSignList.sort(
key=lambda x: x['dayInMonth'], reverse=True) # 降序排序日信息
for daySignList in monthSignList:
# 遍历寻找和当前任务匹配的历史已签到任务
for task in daySignList['signedTasks']:
if task['signWid'] == self.taskInfo['signWid']:
# 找到和当前任务匹配的历史已签到任务,开始更新表单
historyTaskId = {
"wid": task['signInstanceWid'], "content": task['signWid']}
# 更新cookie
url = f'{self.host}wec-counselor-sign-apps/stu/sign/getUnSeenQuestion'
self.session.post(url, headers=headers, data=json.dumps(
historyTaskId), verify=False)
# 获取历史任务详情
historyTaskId = {
"signInstanceWid": task['signInstanceWid'], "signWid": task['signWid']}
url = f'{self.host}wec-counselor-sign-apps/stu/sign/detailSignInstance'
res = self.session.post(
url, headers=headers, data=json.dumps(historyTaskId), verify=False)
res = DT.resJsonEncode(res)
# 其他模拟请求
url = f'{self.host}wec-counselor-sign-apps/stu/sign/queryNotice'
self.session.post(url, headers=headers,
data=json.dumps({}), verify=False)
url = f'{self.host}wec-counselor-sign-apps/stu/sign/getQAconfigration'
self.session.post(url, headers=headers,
data=json.dumps({}), verify=False)
# 一些数据处理
result = res['datas']
result['longitude'] = float(result['longitude'])
result['latitude'] = float(result['latitude'])
self.userInfo['lon'] = result['longitude']
self.userInfo['lat'] = result['latitude']
result['photograph'] = result['photograph'] if len(
result['photograph']) != 0 else ""
result['extraFieldItems'] = [{"extraFieldItemValue": i['extraFieldItem'],
"extraFieldItemWid": i['extraFieldItemWid']} for i in result['signedStuInfo']['extraFieldItemVos']]
# 返回结果
LL.log(1, '历史签到情况的详情', result)
self.historyTaskInfo = result
return result
# 如果没有遍历找到结果
LL.log(2, "没有找到匹配的历史任务")
return "没有找到匹配的历史任务"
def getDetailTask(self):
LL.log(1, '获取具体的签到任务详情')
url = f'{self.host}wec-counselor-sign-apps/stu/sign/detailSignInstance'
headers = self.session.headers
headers['Content-Type'] = 'application/json;charset=UTF-8'
res = self.session.post(url, headers=headers, data=json.dumps(
self.taskInfo), verify=False)
res = DT.resJsonEncode(res)
LL.log(1, '签到任务的详情', res['datas'])
self.task = res['datas']
# 上传图片到阿里云oss
def uploadPicture(self):
url = f'{self.host}wec-counselor-sign-apps/stu/oss/getUploadPolicy'
res = self.session.post(url=url, headers={'content-type': 'application/json'}, data=json.dumps({'fileType': 1}),
verify=False)
datas = DT.resJsonEncode(res).get('datas')
fileName = datas.get('fileName')
policy = datas.get('policy')
accessKeyId = datas.get('accessid')
signature = datas.get('signature')
policyHost = datas.get('host')
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:50.0) Gecko/20100101 Firefox/50.0'
}
multipart_encoder = MultipartEncoder(
fields={ # 这里根据需要进行参数格式设置
'key': fileName, 'policy': policy, 'OSSAccessKeyId': accessKeyId, 'success_action_status': '200',
'signature': signature,
'file': ('blob', open(RT.choicePhoto(self.userInfo['photo']), 'rb'), 'image/jpg')
})
headers['Content-Type'] = multipart_encoder.content_type
res = self.session.post(url=policyHost,
headers=headers,
data=multipart_encoder)
self.fileName = fileName
# 获取图片上传位置
def getPictureUrl(self):
url = f'{self.host}wec-counselor-sign-apps/stu/sign/previewAttachment'
params = {'ossKey': self.fileName}
res = self.session.post(url=url, headers={'content-type': 'application/json'}, data=json.dumps(params),
verify=False)
photoUrl = DT.resJsonEncode(res).get('datas')
return photoUrl
# 填充表单
def fillForm(self):
LL.log(1, '填充表单')
if self.userInfo['getHistorySign']:
self.getHistoryTaskInfo()
hti = self.historyTaskInfo
self.form['isNeedExtra'] = self.task['isNeedExtra']
self.form['signInstanceWid'] = self.task['signInstanceWid']
self.form['signPhotoUrl'] = hti['photograph'] # WARNING:存疑
self.form['extraFieldItems'] = hti['extraFieldItems']
self.form['longitude'], self.form['latitude'] = RT.locationOffset(
hti['longitude'], hti['latitude'], self.userInfo['global_locationOffsetRange'])
# 检查是否在签到范围内
self.form['isMalposition'] = 1
for place in self.task['signPlaceSelected']:
if MT.geoDistance(self.form['longitude'], self.form['latitude'], place['longitude'], place['latitude']) < place['radius']:
self.form['isMalposition'] = 0
break
self.form['abnormalReason'] = hti.get(
'abnormalReason', '回家') # WARNING: 未在历史信息中找到这个
self.form['position'] = hti['signAddress']
self.form['uaIsCpadaily'] = True
self.form['signVersion'] = '1.0.0'
else:
# 判断签到是否需要照片
if self.task['isPhoto'] == 1:
self.uploadPicture()
self.form['signPhotoUrl'] = self.getPictureUrl()
else:
self.form['signPhotoUrl'] = ''
# 检查是否需要额外信息
self.form['isNeedExtra'] = self.task['isNeedExtra']
if self.task['isNeedExtra'] == 1:
extraFields = self.task['extraField']
userItems = self.userInfo['forms']
extraFieldItemValues = []
for i in range(len(extraFields)):
userItem = userItems[i]['form']
extraField = extraFields[i]
if self.userInfo['checkTitle'] == 1:
if userItem['title'] != extraField['title']:
raise Exception(
f'\r\n第{i + 1}个配置出错了\r\n您的标题为:{userItem["title"]}\r\n系统的标题为:{extraField["title"]}')
extraFieldItems = extraField['extraFieldItems']
flag = False
for extraFieldItem in extraFieldItems:
if extraFieldItem['isSelected']:
data = extraFieldItem['content']
if extraFieldItem['content'] == userItem['value']:
flag = True
extraFieldItemValue = {'extraFieldItemValue': userItem['value'],
'extraFieldItemWid': extraFieldItem['wid']}
# 其他 额外的文本
if extraFieldItem['isOtherItems'] == 1:
flag = True
extraFieldItemValue = {'extraFieldItemValue': userItem['value'],
'extraFieldItemWid': extraFieldItem['wid']}
extraFieldItemValues.append(extraFieldItemValue)
if not flag:
raise Exception(
f'\r\n第{ i + 1 }个配置出错了\r\n表单未找到你设置的值:{userItem["value"]}\r\n,你上次系统选的值为:{ data }')
self.form['extraFieldItems'] = extraFieldItemValues
self.form['signInstanceWid'] = self.task['signInstanceWid']
self.form['longitude'] = self.userInfo['lon']
self.form['latitude'] = self.userInfo['lat']
# 检查是否在签到范围内
self.form['isMalposition'] = 1
for place in self.task['signPlaceSelected']:
if MT.geoDistance(self.form['longitude'], self.form['latitude'], place['longitude'], place['latitude']) < place['radius']:
self.form['isMalposition'] = 0
break
self.form['abnormalReason'] = self.userInfo['abnormalReason']
self.form['position'] = self.userInfo['address']
self.form['uaIsCpadaily'] = True
self.form['signVersion'] = '1.0.0'
LL.log(1, "填充完毕的表单", self.form)
def getSubmitExtension(self):
'''生成各种额外参数'''
extension = {
"lon": self.userInfo['lon'],
"lat": self.userInfo['lat'],
"model": self.userInfo['model'],
"appVersion": self.userInfo['appVersion'],
"systemVersion": self.userInfo['systemVersion'],
"userId": self.userInfo['username'],
"systemName": self.userInfo['systemName'],
"deviceId": self.userInfo['deviceId']
}
self.cpdailyExtension = CpdailyTools.encrypt_CpdailyExtension(
json.dumps(extension))
self.bodyString = CpdailyTools.encrypt_BodyString(
json.dumps(self.form))
self.submitData = {
"lon": self.userInfo['lon'],
"version": self.userInfo['signVersion'],
"calVersion": self.userInfo['calVersion'],
"deviceId": self.userInfo['deviceId'],
"userId": self.userInfo['username'],
"systemName": self.userInfo['systemName'],
"bodyString": self.bodyString,
"lat": self.userInfo['lat'],
"systemVersion": self.userInfo['systemVersion'],
"appVersion": self.userInfo['appVersion'],
"model": self.userInfo['model'],
}
self.submitData['sign'] = CpdailyTools.signAbstract(self.submitData)
# 提交签到信息
def submitForm(self):
LL.log(1, '提交签到信息')
self.getSubmitExtension()
headers = {
'User-Agent': self.session.headers['User-Agent'],
'CpdailyStandAlone': '0',
'extension': '1',
'Cpdaily-Extension': self.cpdailyExtension,
'Content-Type': 'application/json; charset=utf-8',
'Accept-Encoding': 'gzip',
'Host': re.findall('//(.*?)/', self.host)[0],
'Connection': 'Keep-Alive'
}
LL.log(1, '即将提交的信息', headers, self.submitData)
res = self.session.post(f'{self.host}wec-counselor-sign-apps/stu/sign/submitSign', headers=headers,
data=json.dumps(self.submitData), verify=False)
res = DT.resJsonEncode(res)
LL.log(1, '提交后返回的信息', res['message'])
return '[%s]%s' % (res['message'], self.taskInfo['taskName'])
| zuiqiangdexianyu/ruoli-sign-optimization | actions/autoSign.py | autoSign.py | py | 16,038 | python | en | code | null | github-code | 6 | [
{
"api_name": "todayLoginService.TodayLoginService",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "todayLoginService.session",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "todayLoginService.host",
"line_number": 14,
"usage_type": "attribute"
... |
72729319867 | # External dependencies
import openai
import io
import os
import tempfile
from datetime import datetime
from flask import render_template, request, url_for, redirect, flash, Response, session, send_file, Markup
from flask_login import login_user, login_required, logout_user, current_user
from flask_mail import Message
# Internal dependencies
from models import User, Log
from forms import SignupForm, LoginForm, RequestResetForm, ResetPasswordForm
from app import app, db, bcrypt, mail, login_manager, limiter
from prompt_template import prompt_template
# Security measures for the Heroku production environment
@app.before_request
def enforce_https():
if request.headers.get('X-Forwarded-Proto') == 'http' and not app.debug:
request_url = request.url.replace('http://', 'https://', 1)
return redirect(request_url, code=301)
@app.after_request
def set_hsts_header(response):
if request.url.startswith('https://'):
response.headers['Strict-Transport-Security'] = 'max-age=31536000' # One year
return response
@login_manager.user_loader
@limiter.limit("10/minute")
def load_user(user_id):
return User.query.get(int(user_id))
@app.route('/cleverletter/', methods=['GET', 'POST'])
@limiter.limit("10/minute")
def signup():
if current_user.is_authenticated:
return redirect(url_for('generator'))
form = SignupForm()
if form.validate_on_submit():
hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')
user = User(email=form.email.data, password=hashed_password)
db.session.add(user)
db.session.commit()
flash('Your account has been created! You are now able to log in', 'success')
return redirect(url_for('login'))
return render_template('signup.html', title='Sign Up', form=form)
@app.route('/cleverletter/login', methods=['GET', 'POST'])
@limiter.limit("10/minute")
def login():
if current_user.is_authenticated:
return redirect(url_for('generator'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user and bcrypt.check_password_hash(user.password, form.password.data):
login_user(user, remember=form.remember.data)
return redirect(url_for('dashboard'))
else:
flash('Login Unsuccessful. Please make sure you used the correct credentials', 'warning')
return render_template('login.html', form=form)
@app.route('/cleverletter/logout')
@login_required
def logout():
logout_user()
return redirect(url_for('login'))
@app.route('/cleverletter/generator', methods=['GET', 'POST'])
@limiter.limit("5/minute")
def generator():
user_authenticated = current_user.is_authenticated
response = ""
job_title = ""
job_description = ""
employer_name = ""
employer_description = ""
additional_instructions = ""
if not current_user.is_authenticated:
flash(Markup('<a href="{}">Sign up</a> or <a href="{}">Login</a> to keep your CV and cover letter history'.format(url_for('signup'), url_for('login'))), 'warning')
# Retrieve CV from session for unauthenticated users or from the database for authenticated users
if current_user.is_authenticated and current_user.cv:
cv = current_user.cv
else:
cv = session.get('cv', "Your CV goes here")
if request.method == 'POST':
job_title = request.form.get('job_title')
job_description = request.form.get('job_description')
employer_name = request.form.get('employer_name')
employer_description = request.form.get('employer_description')
additional_instructions = request.form.get('additional_instructions')
session_cv = request.form.get('cv') # Assuming the CV is submitted as a form field
# Update CV in session for unauthenticated users
if not current_user.is_authenticated and session_cv:
session['cv'] = session_cv
cv = session_cv
if 'generate' in request.form:
if cv == "Your CV goes here":
flash('Please set your CV before generating a cover letter.', 'warning')
return render_template('dashboard.html', job_title=job_title, job_description=job_description,
employer_name=employer_name, employer_description=employer_description,
additional_instructions=additional_instructions)
prompt = prompt_template.format(cv=cv, job_title=job_title, job_description=job_description,
employer_name=employer_name, employer_description=employer_description,
additional_instructions=additional_instructions)
try:
response = get_completion(prompt)
except Exception as e:
flash('Error generating cover letter: {}'.format(str(e)), 'error')
return redirect(url_for('generator'))
# Save the response in the user's session
session['response'] = response
# Create a log entry only for authenticated users
if current_user.is_authenticated:
log = Log(job_title=job_title, employer_name=employer_name, user_id=current_user.id)
db.session.add(log)
try:
db.session.commit()
except Exception as e:
flash('Error saving log: {}'.format(str(e)), 'error')
return redirect(url_for('generator'))
# Save the response to a txt file in a temporary directory
filename = '{} - {} - {}.txt'.format(employer_name, job_title, datetime.now().strftime('%d-%b-%Y'))
temp_dir = tempfile.gettempdir()
file_path = os.path.join(temp_dir, filename)
with open(file_path, 'w') as f:
f.write(response)
# Save the filename in the session
session['filename'] = file_path
elif 'clear' in request.form:
job_title = ""
job_description = ""
employer_name = ""
employer_description = ""
additional_instructions = ""
session['response'] = ""
elif 'download' in request.form:
# Get the filename from the session
file_path = session.get('filename')
if file_path and os.path.exists(file_path):
download_response = send_file(file_path, as_attachment=True)
os.remove(file_path) # delete the file after sending it
return download_response
else:
flash('No cover letter available for download.', 'warning')
return redirect(url_for('generator'))
return render_template('generator.html', response=response, job_title=job_title, job_description=job_description,
employer_name=employer_name, employer_description=employer_description,
additional_instructions=additional_instructions, cv=cv, user_authenticated=user_authenticated, user=current_user)
@app.route('/cleverletter/dashboard', methods=['GET', 'POST'])
@limiter.limit("5/minute")
def dashboard():
# Initialize CV with a default value
cv = "Your CV goes here"
logs = None
user_authenticated = current_user.is_authenticated
if request.method == 'POST':
# Handle CV form submission
new_cv = request.form.get('cv')
if new_cv:
if current_user.is_authenticated:
current_user.cv = new_cv
db.session.commit()
flash('CV updated successfully.', 'success')
else:
session['cv'] = new_cv
flash('CV saved to session successfully.', 'success')
# Fetch CV from the authenticated user or from the session
if current_user.is_authenticated:
cv = current_user.cv if current_user.cv else cv
# Fetch the logs from the database
page = request.args.get('page', 1, type=int)
per_page = 10
logs = Log.query.filter_by(user_id=current_user.id).order_by(Log.timestamp.desc()).paginate(page=page, per_page=per_page)
else:
cv = session.get('cv', cv) # Use the session value if available, otherwise use the default
return render_template('dashboard.html', user_authenticated=user_authenticated, user=current_user, cv=cv, logs=logs)
@app.route('/cleverletter/reset_request', methods=['GET', 'POST'])
@limiter.limit("5/minute")
def reset_request():
form = RequestResetForm()
message = None
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user:
send_reset_email(user)
message = 'An e-mail has been sent with instructions to reset your password.'
return render_template('reset_request.html', form=form, message=message)
def send_reset_email(user):
token = user.get_reset_token()
msg = Message('Password Reset Request',
sender='noreply@demo.com',
recipients=[user.email])
msg.body = f'''To reset your password, visit the following link:
{url_for('reset_token', token=token, _external=True)}
If you did not make this request then simply ignore this email and no changes will be made.
'''
mail.send(msg)
@app.route('/cleverletter/reset_request/<token>', methods=['GET', 'POST'])
@limiter.limit("5/minute")
def reset_token(token):
user = User.verify_reset_token(token)
if not user:
# If the token is invalid or expired, redirect the user to the `reset_request` route.
return redirect(url_for('reset_request'))
form = ResetPasswordForm()
if form.validate_on_submit():
hashed_password = bcrypt.generate_password_hash(form.password.data)
user.password = hashed_password
db.session.commit()
return redirect(url_for('login'))
return render_template('reset_token.html', form=form)
@app.route('/cleverletter/delete_account', methods=['POST'])
@limiter.limit("5/minute")
@login_required
def delete_account():
user = User.query.get(current_user.id)
db.session.delete(user)
db.session.commit()
flash('Your account has been deleted.', 'success')
return redirect(url_for('signup'))
def get_completion(prompt, model="gpt-3.5-turbo"):
# Always use the development API key
api_key = app.config['OPENAI_API_KEY_DEV']
# Set the API key for this request
openai.api_key = api_key
messages = [{"role": "user", "content": prompt}]
response = openai.ChatCompletion.create(
model=model,
messages=messages,
temperature=0.5, # this is the degree of randomness of the model's output
)
return response.choices[0].message["content"] | joaomorossini/Clever-Letter-Generator | routes.py | routes.py | py | 10,934 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "flask.request.headers.get",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "flask.request.headers",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "ap... |
33846054954 | from jwst.stpipe import Step
from jwst import datamodels
from ..datamodels import TMTDarkModel
from . import dark_sub
from ..utils.subarray import get_subarray_model
__all__ = ["DarkCurrentStep"]
class DarkCurrentStep(Step):
"""
DarkCurrentStep: Performs dark current correction by subtracting
dark current reference data from the input science data model.
"""
spec = """
dark_output = output_file(default = None) # Dark model subtracted
"""
reference_file_types = ["dark"]
def process(self, input):
# Open the input data model
with datamodels.open(input) as input_model:
# Get the name of the dark reference file to use
self.dark_name = self.get_reference_file(input_model, "dark")
self.log.info("Using DARK reference file %s", self.dark_name)
# Check for a valid reference file
if self.dark_name == "N/A":
self.log.warning("No DARK reference file found")
self.log.warning("Dark current step will be skipped")
result = input_model.copy()
result.meta.cal_step.dark = "SKIPPED"
return result
# Create name for the intermediate dark, if desired.
dark_output = self.dark_output
if dark_output is not None:
dark_output = self.make_output_path(
None, basepath=dark_output, ignore_use_model=True
)
# Open the dark ref file data model - based on Instrument
dark_model = TMTDarkModel(self.dark_name)
dark_model = get_subarray_model(input_model, dark_model)
# Do the dark correction
result = dark_sub.do_correction(input_model, dark_model, dark_output)
dark_model.close()
return result
| oirlab/iris_pipeline | iris_pipeline/dark_current/dark_current_step.py | dark_current_step.py | py | 1,857 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "jwst.stpipe.Step",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "jwst.datamodels.open",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "jwst.datamodels",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "datamodels.TMTD... |
2856090188 | import unittest
from conans.test.tools import TestClient
from conans.util.files import load
import os
import platform
class ConanEnvTest(unittest.TestCase):
def conan_env_deps_test(self):
client = TestClient()
conanfile = '''
from conans import ConanFile
class HelloConan(ConanFile):
name = "Hello"
version = "0.1"
def package_info(self):
self.env_info.var1="bad value"
self.env_info.var2.append("value2")
self.env_info.var3="Another value"
self.env_info.path = "/dir"
'''
files = {}
files["conanfile.py"] = conanfile
client.save(files)
client.run("export lasote/stable")
conanfile = '''
from conans import ConanFile
class HelloConan(ConanFile):
name = "Hello2"
version = "0.1"
def config(self):
self.requires("Hello/0.1@lasote/stable")
def package_info(self):
self.env_info.var1="good value"
self.env_info.var2.append("value3")
'''
files["conanfile.py"] = conanfile
client.save(files, clean_first=True)
client.run("export lasote/stable")
client.run("install Hello2/0.1@lasote/stable --build -g virtualenv")
ext = "bat" if platform.system() == "Windows" else "sh"
self.assertTrue(os.path.exists(os.path.join(client.current_folder, "activate.%s" % ext)))
self.assertTrue(os.path.exists(os.path.join(client.current_folder, "deactivate.%s" % ext)))
activate_contents = load(os.path.join(client.current_folder, "activate.%s" % ext))
deactivate_contents = load(os.path.join(client.current_folder, "deactivate.%s" % ext))
self.assertNotIn("bad value", activate_contents)
self.assertIn("var1=good value", activate_contents)
if platform.system() == "Windows":
self.assertIn("var2=value3;value2;%var2%", activate_contents)
else:
self.assertIn("var2=value3:value2:$var2", activate_contents)
self.assertIn("Another value", activate_contents)
self.assertIn("PATH=/dir", activate_contents)
self.assertIn('var1=', deactivate_contents)
self.assertIn('var2=', deactivate_contents)
| AversivePlusPlus/AversivePlusPlus | tools/conan/conans/test/integration/conan_env_test.py | conan_env_test.py | py | 2,180 | python | en | code | 31 | github-code | 6 | [
{
"api_name": "unittest.TestCase",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "conans.test.tools.TestClient",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "platform.system",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "os... |
72332661307 | #!/usr/bin/env python3
import os
import configparser
from mongoengine.connection import connect
from .data_model import Post
from .render_template import render
from .mailgun_emailer import send_email
def email_last_scraped_date():
## mongodb params (using configparser)
config = configparser.ConfigParser()
config.read(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'settings.cfg'))
mlab_uri = config.get('MongoDB', 'mlab_uri')
# connect to db
MONGO_URI = mlab_uri
connect('sivji-sandbox', host=MONGO_URI)
## get the last date the webscraper was run
for post in Post.objects().fields(date_str=1).order_by('-date_str').limit(1):
day_to_pull = post.date_str
## pass in variables, render template, and send
context = {
'day_to_pull': day_to_pull,
'Post': Post,
}
html = render("template.html", context)
send_email(html)
| alysivji/reddit-top-posts-scrapy | top_post_emailer/__init__.py | __init__.py | py | 917 | python | en | code | 14 | github-code | 6 | [
{
"api_name": "configparser.ConfigParser",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",... |
25575181895 | from typing import List
class Solution:
def rotate(self, nums: List[int], k: int) -> None:
new_array = [1]*len(nums)
for i in range(len(nums)):
new_p = (i - k)%len(nums)
new_array[i] = nums[new_p]
return new_array
s = Solution()
l = [1,2,3,4,5,6,7]
x = s.rotate(l,3)
print(x) | ThadeuFerreira/python_code_challengers | rotate_array.py | rotate_array.py | py | 331 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "typing.List",
"line_number": 3,
"usage_type": "name"
}
] |
35116816269 | # Import the libraries
import cv2
import numpy as np
import math as m
from matplotlib import pyplot as plt
#-- PRE-PROCESSING --
# Read the image
nimg = 'image1' # Change 'image1' for the name of your image
image = cv2.imread(nimg + '.jpg')
# Extract the RGB layers of the image
rgB = np.matrix(image[:,:,0]) # Blue
rGb = np.matrix(image[:,:,1]) # Green
Rgb = np.matrix(image[:,:,2]) # Red
# Define the combination RGB
II = cv2.absdiff(rGb,rgB)
I = II*255
cv2.imshow('Images with layers extracted', I)
cv2.waitKey(0)
# Initial binarization of the image
[fil, col] = I.shape
for o in range(0,fil):
for oo in range(0,col):
if I[o, oo]<80: # Pixel less than 80 will be 0
I[o,oo]=0
for o in range(0,fil):
for oo in range(0,col):
if I[o, oo]>0: # Pixel more than 0 will be 1
I[o,oo]=1
# Morphological transformations
# Create square streel: se for closing and se2 for dilation
se = np.ones((50, 50), np.uint8)
se2 = np.ones((10, 10), np.uint8)
closing = cv2.morphologyEx(I,cv2.MORPH_CLOSE,se) # Closing
dilation = cv2.dilate(closing,se2,1) # Dilation
# Find the contours
contours,hierarchy=cv2.findContours(dilation,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
# Extract the contours
cnt = contours[:]
num = len(cnt)
# print(num)
# print(contours)
# print(hierarchy)
# Calculate the bigger contour
box = np.zeros((num,4))
for j in range(0, num):
box[j,:]=cv2.boundingRect(cnt[j])
L = np.zeros((num,4))
Max=[0,0]
for j in range(0, num):
L[j,:]=box[j]
if L[j,2]>Max[1]:
Max=[j,L[j,2]]
BOX = box[Max[0],:]
# Mask
b = image[int(BOX[1]):int(BOX[1]+BOX[3]),int(BOX[0]):int(BOX[0]+BOX[2]),:]
#-- SEGMENTATION --
[fil,col,cap] = b.shape
# Extract the RGB layers of the image with the mask
rgB = b[:,:,0] # Blue
rGb = b[:,:,1] # Green
Rgb = b[:,:,2] # Red
# Normalizate the layers
R = Rgb/255.0
G = rGb/255.0
B = rgB/255.0
# Build the color K space
K = np.zeros((fil,col)) # Black layer
for o in range(0,fil):
for oo in range(0,col):
MAX = max(R[o,oo],G[o,oo],B[o,oo]) # Calculate the maximum value R-G-B
K[o,oo] = 1-MAX
# Save the image in .bmp format
cv2.imwrite('imgbmp_' + nimg + '.bmp', K)
# Read the image
k = cv2.imread('imgbmp_' + nimg + '.bmp')
# Apply Canny
BW1 = cv2.Laplacian(k, cv2.CV_8UC1)
# Extract layers
imgk = BW1[:,:,0]+BW1[:,:,1]+BW1[:,:,2]
# Save the image
cv2.imwrite('result_' + nimg + '.png', imgk*255)
| selenebpradop/basic_exercises-computer_vision | contours_of_an_image_v2.py | contours_of_an_image_v2.py | py | 2,461 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "cv2.imread",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.matrix",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.matrix",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.matrix",
"line_number"... |
74750166267 | from src.components import summarizer
from celery import Celery
from celery.utils.log import get_task_logger
from EmailSender import send_email
logger = get_task_logger(__name__)
celery = Celery(
__name__, backend="redis://127.0.0.1:6379", broker="redis://127.0.0.1:6379"
)
@celery.task(name="summarizer")
def GmailSummarizer(gmails, email_address):
responses = []
for gmail in gmails:
gmail_summary = summarizer.summarize(gmail)
responses.append(gmail_summary)
send_email(responses, email_address)
return True
"""
run celery and also redis
# celery -A flask_celery.celery worker -l info --pool=solo
Compile Celery with --pool=solo argument. #IMP
# celery -A flask_celery.celery worker -l info --pool=solo
Example: celery -A your-application worker -l info --pool=solo
"""
| SVijayB/Gist | scripts/flask_celery.py | flask_celery.py | py | 816 | python | en | code | 4 | github-code | 6 | [
{
"api_name": "celery.utils.log.get_task_logger",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "celery.Celery",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "src.components.summarizer.summarize",
"line_number": 16,
"usage_type": "call"
},
{
"... |
25293805211 | import unittest
from task import fix_encoding
expected_content = """Roses are räd.
Violets aren't blüe.
It's literally in the name.
They're called violets.
"""
filename = "example.txt"
output = "output.txt"
class TestCase(unittest.TestCase):
def setUp(self) -> None:
with open(filename, "w") as f:
f.write(expected_content)
def test_fix_encoding(self):
fix_encoding(filename, output)
with open(filename, "r", encoding="utf-8") as file:
actual_content = file.read()
self.assertEqual(actual_content, expected_content, "wrong answer") | DoctorManhattan123/edotools-python-course | Strings, inputs and files/file encoding/tests/test_task.py | test_task.py | py | 604 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "unittest.TestCase",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "task.fix_encoding",
"line_number": 21,
"usage_type": "call"
}
] |
6146581577 | import datetime
import pyttsx3
import speech_recognition as sr
import wikipedia
import webbrowser
import pywhatkit
import time
import threading
import newsapi
import random
maquina = pyttsx3.init()
voz = maquina.getProperty('voices')
maquina.setProperty('voice', voz[1].id)
def executa_comando():
try:
with sr.Microphone() as source:
recognizer = sr.Recognizer()
voz = recognizer.listen(source)
comando = recognizer.recognize_google(voz, language='pt-BR')
comando = comando.lower()
return comando
except sr.UnknownValueError:
maquina.say('Não entendi o comando')
maquina.runAndWait()
except sr.RequestError as e:
maquina.say('Desculpe, houve um erro ao processar o comando')
maquina.runAndWait()
return ''
def comando_voz_usuario():
while True:
comando = executa_comando()
if 'horas' in comando:
tempo = datetime.datetime.now().strftime('%H:%M')
maquina.say('Agora são ' + tempo)
maquina.runAndWait()
elif 'procure por' in comando:
procurar = comando.replace('procure por', '')
wikipedia.set_lang('pt')
resultado = wikipedia.summary(procurar, 2)
maquina.say(resultado)
maquina.runAndWait()
elif 'abrir navegador' in comando:
webbrowser.open('https://www.google.com.br/')
elif 'pesquise por' in comando:
pesquisar = comando.replace('pesquise por', '')
webbrowser.open('https://www.google.com.br/search?q=' + pesquisar)
elif 'toque' in comando:
musica = comando.replace('toque', '')
pywhatkit.playonyt(musica)
maquina.say('Tocando Música ' + musica)
maquina.runAndWait()
elif 'clima' in comando:
obter_clima()
elif 'pare de escutar' in comando:
maquina.say('Por quantos minutos você quer que eu pare de escutar?')
maquina.runAndWait()
resposta = executa_comando()
try:
tempo = int(resposta)
maquina.say('Ok, vou parar de escutar por ' + str(tempo) + ' minutos')
maquina.runAndWait()
time.sleep(tempo * 60)
maquina.say('Voltei! O que posso fazer por você?')
maquina.runAndWait()
except ValueError:
maquina.say('Desculpe, não entendi o tempo que você informou')
maquina.runAndWait()
elif 'tchau' in comando:
maquina.say('Tchau!, foi bom te ver')
maquina.runAndWait()
break
elif 'definir alarme' in comando:
partes = comando.split(' ')
hora = partes[2]
mensagem = ' '.join(partes[3:])
definir_alarme(hora, mensagem)
maquina.say('Alarme definido para ' + hora + '.')
maquina.runAndWait()
elif 'definir lembrete' in comando:
partes = comando.split(' ')
tempo_espera = int(partes[2])
mensagem = ' '.join(partes[3:])
def alerta():
time.sleep(tempo_espera)
maquina.say(mensagem)
maquina.runAndWait()
thread = threading.Thread(target=alerta)
thread.start()
maquina.say('Lembrete definido para daqui a ' + str(tempo_espera) + ' segundos.')
maquina.runAndWait()
elif 'notícias' in comando:
obter_noticias()
elif 'piada' in comando:
contar_piada()
elif 'ajuda' in comando:
exibir_ajuda()
else:
maquina.say('Comando não reconhecido')
maquina.runAndWait()
def definir_alarme(hora, mensagem):
agora = datetime.datetime.now()
horario_alarme = datetime.datetime.strptime(hora, '%H:%M')
diferenca = horario_alarme - agora
segundos = diferenca.seconds
def alerta():
time.sleep(segundos)
maquina.say(mensagem)
maquina.runAndWait()
thread = threading.Thread(target=alerta)
thread.start()
def obter_clima():
maquina.say('Desculpe, ainda não posso fornecer informações sobre o clima.')
maquina.runAndWait()
def obter_noticias():
newsapi = NewsApiClient(api_key='YOUR_NEWS_API_KEY')
top_headlines = newsapi.get_top_headlines(language='pt')
articles = top_headlines['articles']
maquina.say('Aqui estão as principais notícias:')
maquina.runAndWait()
for article in articles:
title = article['title']
maquina.say(title)
maquina.runAndWait()
def contar_piada():
piadas = [
"Por que a galinha atravessou a rua? Para chegar ao outro lado.",
"O que o pato disse para a pata? 'Vem Quá!'",
"Qual é o cúmulo da velocidade? Levantar a mão para pedir licença ao vento.",
"Por que o livro de matemática cometeu suicídio? Porque tinha muitos problemas.",
"Qual é o doce preferido do átomo? Pé de moléculas."
]
piada = random.choice(piadas)
maquina.say(piada)
maquina.runAndWait()
def exibir_ajuda():
ajuda = "Aqui estão alguns comandos que você pode usar:\n" \
"- Horas: para saber a hora atual.\n" \
"- Procure por [termo]: para pesquisar informações no Wikipedia.\n" \
"- Abrir navegador: para abrir o navegador padrão.\n" \
"- Pesquise por [termo]: para pesquisar no Google.\n" \
"- Toque [música]: para reproduzir uma música no YouTube.\n" \
"- Clima: para obter informações sobre o clima.\n" \
"- Pare de escutar: para pausar a escuta por um determinado tempo.\n" \
"- Tchau: para encerrar o programa.\n" \
"- Definir alarme [hora] [mensagem]: para definir um alarme.\n" \
"- Definir lembrete [tempo] [mensagem]: para definir um lembrete.\n" \
"- Notícias: para obter as principais notícias.\n" \
"- Piada: para ouvir uma piada.\n" \
"- Ajuda: para exibir esta mensagem de ajuda."
maquina.say(ajuda)
maquina.runAndWait()
comando_voz_usuario()
| lucasss45/Fryday-IA | alfredv2.6.py | alfredv2.6.py | py | 6,390 | python | pt | code | 0 | github-code | 6 | [
{
"api_name": "pyttsx3.init",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "speech_recognition.Microphone",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "speech_recognition.Recognizer",
"line_number": 19,
"usage_type": "call"
},
{
"api_name... |
26829773618 |
#######################################
# This file computes several characteristics of the portage graph
#######################################
import math
import sys
import core_data
import hyportage_constraint_ast
import hyportage_data
import utils
import graphs
import host.scripts.utils
from host.scripts import hyportage_db
data = {}
######################################################################
# GENERIC STATISTICS EXTRACTION FUNCTION
######################################################################
def map_base_statistics(value_number, total_size, map_size):
average = total_size / float(value_number)
variance = 0
for key, value in map_size.iteritems():
tmp = average - key
tmp = tmp * tmp * len(value)
variance = variance + tmp
variance = math.sqrt(variance / value_number)
return average, variance
def generics(input_iterator, extract_data, extract_key, filter_function=host.scripts.utils.filter_function_simple, store_data_map=False):
value_number = 0
map_data = {}
map_size = {}
total_size = 0
max_size = 0
min_size = sys.maxint
for element in input_iterator:
if filter_function(element):
value_number = value_number + 1
data = extract_data(element)
key = extract_key(element)
size = len(data)
if store_data_map:
if data in map_data: map_data[data].add(key)
else: map_data[data] = {key}
if size in map_size: map_size[size].add(key)
else: map_size[size] = {key}
total_size = total_size + size
if size > max_size: max_size = size
if size < min_size: min_size = size
average, variance = map_base_statistics(value_number, total_size, map_size)
return {
'number': value_number,
'map_data': map_data,
'map_size': map_size,
'total_size': total_size,
'average': average,
'variance': variance,
'max_size': max_size,
'min_size': min_size
}
def generic_map(input_map, extraction_function, filter_function=host.scripts.utils.filter_function_simple, store_data_map=False):
return generics(input_map.iteritems(), lambda el: extraction_function(el[1]), lambda el: el[0], filter_function, store_data_map)
def generic_list(input_list, extraction_function, filter_function=host.scripts.utils.filter_function_simple, store_data_map=False):
return generics(input_list, extraction_function, lambda el: tuple(el), filter_function, store_data_map)
######################################################################
# FEATURES
######################################################################
def features(filter_function=host.scripts.utils.filter_function_simple):
utils.phase_start("Computing the USE Flags Core Statistics.")
required = sum([len(spl.required_iuses) for spl in hyportage_db.mspl.itervalues() if filter_function(spl)])
local = sum([len(spl.iuses_default) for spl in hyportage_db.mspl.itervalues() if filter_function(spl)])
global data
data['features'] = generic_map(hyportage_db.mspl, hyportage_data.spl_get_iuses_full, filter_function)
data['features']['average_required'] = required / float(data['features']['number'])
data['features']['average_local'] = local / float(data['features']['number'])
utils.phase_end("Computation Completed")
def features_usage(filter_function=host.scripts.utils.filter_function_simple):
utils.phase_start("Computing the USE Flags Core Statistics.")
map_features = {}
for key, value in hyportage_db.mspl.iteritems():
if filter_function(value):
for feature in hyportage_data.spl_get_required_iuses(value):
if feature in map_features: map_features[feature].add(key)
else: map_features[feature] = {key}
global data
data['feature_usage'] = generic_map(map_features, core_data.identity, filter_function)
data['feature_usage']['map_data'] = map_features
utils.phase_end("Computation Completed")
"""
def statistics_features(filter_function=db.filter_function_simple):
utils.phase_start("Computing the USE Flags Core Statistics.")
features_number = 0
features_max = 0
features_min = 100
spl_min = []
spl_max = []
spl_number = 0
for spl in db.mspl.itervalues():
if filter_function(spl):
spl_number = spl_number + 1
use_flag_size = len(hyportage_data.spl_get_required_iuses(spl))
if use_flag_size < features_min:
features_min = use_flag_size
spl_min = [spl.name]
elif use_flag_size == features_min: spl_min.append(spl.name)
if use_flag_size > features_max:
features_max = use_flag_size
spl_max = [spl.name]
elif use_flag_size == features_max: spl_max.append(spl.name)
features_number = features_number + use_flag_size
res = {
'min': features_min,
'min_spl_list': sorted(spl_min),
'max': features_max,
'max_spl_list': sorted(spl_max),
'number': features_number,
'spl_number': spl_number,
'average': features_number / spl_number
}
global statistics
statistics['features'] = res
utils.phase_end("Computation Completed")
"""
######################################################################
# DEPENDENCIES
######################################################################
class GETGuardedDependenciesVisitor(hyportage_constraint_ast.ASTVisitor):
def __init__(self):
super(hyportage_constraint_ast.ASTVisitor, self).__init__()
self.res = {}
self.guards = 0
def visitDependCONDITION(self, ctx):
self.guards = self.guards + 1
map(self.visitDependEL, ctx['els'])
self.guards = self.guards - 1
def visitDependSIMPLE(self, ctx):
pattern = ctx['atom']
if pattern in self.res:
if self.guards == 0: self.res[pattern]['guarded'] = False
if "selection" in ctx: self.res[pattern]['selects'] = True
else: self.res[pattern] = {'guarded': self.guards > 0, 'selects': "selection" in ctx}
def visitSPL(self, spl):
self.visitDepend(spl.fm_combined)
res = self.res
self.res = {}
self.guards = 0
return res
def dependencies(filter_function=host.scripts.utils.filter_function_simple):
utils.phase_start("Computing the Dependencies Statistics.")
visitor = GETGuardedDependenciesVisitor()
local_map = {spl.name: visitor.visitSPL(spl) for spl in hyportage_db.mspl.itervalues()}
def extraction_function_all(data): return data.keys()
def extraction_function_guarded(data): return {pattern for pattern in data.iterkeys() if data[pattern]['guarded']}
def extraction_function_selects(data): return {pattern for pattern in data.iterkeys() if data[pattern]['selects']}
global data
data['dependencies_all'] = generic_map(local_map, extraction_function_all, filter_function)
data['dependencies_guarded'] = generic_map(local_map, extraction_function_guarded, filter_function)
data['dependencies_selects'] = generic_map(local_map, extraction_function_selects, filter_function)
utils.phase_end("Computation Completed")
def lone_packages(filter_function=host.scripts.utils.filter_function_simple):
referenced_spls = {
spl
for el in hyportage_db.flat_pattern_repository.itervalues()
for spl in el.__generate_matched_spls(hyportage_db.mspl, hyportage_db.spl_groups)
}
spls = filter(filter_function, hyportage_db.mspl.itervalues())
spls = filter(lambda spl: len(spl.dependencies) == 0, spls)
spls = filter(lambda spl: spl not in referenced_spls, spls)
global data
data['lone_packages'] = spls
"""
def statistics_dependencies(filter_function=db.filter_function_simple):
utils.phase_start("Computing the Dependencies Core Statistics.")
dependencies_number = 0
dependencies_max = 0
dependencies_min = 100
dependencies_guarded_number = 0
dependencies_guarded_max = 0
dependencies_guarded_min = 100
spl_number = 0
spl_max = []
spl_min = []
spl_guarded_number = 0
spl_guarded_max = []
spl_guarded_min = []
visitor = GETDependenciesVisitor()
for spl in db.mspl.itervalues():
if filter_function(spl):
spl_number = spl_number + 1
deps = visitor.visitSPL(spl)
#print(" " + spl.name + ": " + str(deps))
dependencies_size = len(deps)
if dependencies_size < dependencies_min:
dependencies_min = dependencies_size
spl_min = [spl.name]
elif dependencies_size == dependencies_min:
spl_min.append(spl.name)
if dependencies_size > dependencies_max:
dependencies_max = dependencies_size
spl_max = [spl.name]
elif dependencies_size == dependencies_max:
spl_max.append(spl.name)
dependencies_number = dependencies_number + dependencies_size
deps_guarded = {k for k, v in deps.iteritems() if v}
dependencies_guarded_size = len(deps_guarded)
if dependencies_guarded_size < dependencies_guarded_min:
dependencies_guarded_min = dependencies_guarded_size
spl_guarded_min = [spl.name]
elif dependencies_guarded_size == dependencies_guarded_min:
spl_guarded_min.append(spl.name)
if dependencies_guarded_size > dependencies_guarded_max:
dependencies_max = dependencies_guarded_size
dependencies_guarded_max = [spl.name]
elif dependencies_guarded_size == dependencies_guarded_max:
spl_guarded_max.append(spl.name)
dependencies_guarded_number = dependencies_guarded_number + dependencies_guarded_size
if dependencies_guarded_size > 0: spl_guarded_number = spl_guarded_number + 1
res = {
'min': dependencies_min,
'min_spl_list': sorted(spl_min),
'max': dependencies_max,
'max_spl_list': sorted(spl_max),
'number': dependencies_number,
'spl_number': spl_number,
'average': dependencies_number / spl_number,
'guarded_min': dependencies_guarded_min,
'guarded_min_spl_list': sorted(spl_guarded_min),
'guarded_max': dependencies_guarded_max,
'guarded_max_spl_list': sorted(spl_guarded_max),
'guarded_number': dependencies_guarded_number,
'guarded_spl_number': spl_guarded_number,
'guarded_average': dependencies_guarded_number / spl_guarded_number
}
global statistics
statistics['dependencies'] = res
utils.phase_end("Computation Completed")
"""
######################################################################
# PATTERNS (ABSTRACT SPL)
######################################################################
def pattern_refinement(filter_function=host.scripts.utils.filter_function_simple):
utils.phase_start("Computing the Pattern (refinement) Statistics.")
def extraction_function(element): return element.matched_spls(hyportage_db.mspl, hyportage_db.spl_groups)
global data
data['pattern_refinement'] = generic_map(hyportage_db.flat_pattern_repository, extraction_function, filter_function)
utils.phase_end("Computation Completed")
def statistics_pattern(filter_function=host.scripts.utils.filter_function_simple):
utils.phase_start("Computing the Pattern Core Statistics.")
pattern_number = 0
pattern_usage = {}
pattern_usage_max = 0
for pattern_element in hyportage_db.flat_pattern_repository.itervalues():
if filter_function(pattern_element):
pattern_number = pattern_number + 1
size = len(pattern_element.containing_spl)
if pattern_usage_max < size:
pattern_usage_max = size
if size in pattern_usage:
pattern_usage[size].extend(pattern_element)
else: pattern_usage[size] = [pattern_element]
pattern_abstraction_number = 0
pattern_abstraction_max = [0, []]
pattern_abstraction_min = [100, []]
for pattern_element in hyportage_db.flat_pattern_repository.itervalues():
if filter_function(pattern_element):
pattern_abstraction_size = len(pattern_element.matched_spls(hyportage_db.mspl, hyportage_db.spl_groups))
if pattern_abstraction_size < pattern_abstraction_min[0]:
pattern_abstraction_min[0] = pattern_abstraction_size
pattern_abstraction_min[1] = [pattern_element]
elif pattern_abstraction_size == pattern_abstraction_min[0]:
pattern_abstraction_min[1].append(pattern_element)
if pattern_abstraction_size > pattern_abstraction_max[0]:
pattern_abstraction_max[0] = pattern_abstraction_size
pattern_abstraction_max[1] = [pattern_element]
elif pattern_abstraction_size == pattern_abstraction_max[0]:
pattern_abstraction_max[1].append(pattern_element)
pattern_abstraction_number = pattern_abstraction_number + pattern_abstraction_size
res = {
'number': pattern_number,
'usage': pattern_usage,
'usage_max': pattern_usage_max,
'usage_average': pattern_usage_max / pattern_number,
'total_abstraction_number': pattern_abstraction_number,
'abstraction_min': pattern_abstraction_min[0],
'abstraction_min_list': pattern_abstraction_min[1],
'abstraction_max': pattern_abstraction_max[0],
'abstraction_max_list': pattern_abstraction_max[1]
}
global data
data['patterns'] = res
utils.phase_end("Computation Completed")
######################################################################
# CYCLES
######################################################################
def graph(filter_function=host.scripts.utils.filter_function_simple):
utils.phase_start("Computing the Graph Core Statistics.")
graph_mspl, spl_nodes = graphs.mspl(filter_function, keep_self_loop=True)
nodes_spl = {node: spl for spl, node in spl_nodes.iteritems()}
visited = graph_mspl.getBooleanProperty("visited")
for n in graph_mspl.getNodes():
visited.setNodeValue(n, False)
shairplay_len = sys.maxint
cycles = []
for n in graph_mspl.getNodes():
if not visited.getNodeValue(n):
visited.setNodeValue(n, True)
path = [n]
branches = [graph_mspl.getOutNodes(n)]
if "shairplay" in nodes_spl[n].name: shairplay_len = 1
while path:
if len(path) >= shairplay_len: print(str([nodes_spl[node].name for node in path]))
if branches[-1].hasNext():
succ = branches[-1].next()
if len(path) >= shairplay_len: print(" found: " + nodes_spl[succ].name)
if succ in path:
if len(path) >= shairplay_len: print(" loop found: " + str([nodes_spl[node].name for node in path[path.index(succ):]]))
cycles.append([nodes_spl[node].name for node in path[path.index(succ):]])
elif not visited.getNodeValue(succ):
visited.setNodeValue(succ, True)
path.append(succ)
branches.append(graph_mspl.getOutNodes(succ))
if "shairplay" in nodes_spl[succ].name: shairplay_len = len(path)
else:
path.pop()
branches.pop()
if len(path) < shairplay_len: shairplay_len = sys.maxint
res = generic_map({tuple(v): v for v in cycles}, core_data.identity, host.scripts.utils.filter_function_simple)
res['cycles'] = cycles
global data
data['graph'] = res
utils.phase_end("Computation Completed")
| HyVar/gentoo_to_mspl | host/statistics/statistics.py | statistics.py | py | 14,279 | python | en | code | 10 | github-code | 6 | [
{
"api_name": "math.sqrt",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "host.scripts.utils.scripts",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "host.scripts.utils",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "sys.maxi... |
26239060381 | #!/usr/bin/env python3
'''
This script will incement the major version number of the specified products.
It is assumed that the version number in the label itself is correct, and the version
just needs to be added on to the filename.
Usage:
versioning.py <label_file>...
'''
import re
import os
import sys
from bs4 import BeautifulSoup
LABELFILE_PARSE_VERSIONED_REGEX = r'(.+)_(\d+)_(\d+)\.xml'
LABELFILE_PARSE_UNVERSIONED_REGEX = r'(.+)\.xml'
DATAFILE_PARSE_VERSIONED_REGEX = r'(.+)_(\d+)\.([a-z0-9]+)'
DATAFILE_PARSE_UNVERSIONED_REGEX = r'(.+)\.([a-z0-9]+)'
def main(argv=None):
''' Entry point into the script '''
if argv is None:
argv = sys.argv
filepaths = argv[1:]
for filepath in filepaths:
dirname, filename = os.path.split(filepath)
increment_product(dirname, filename)
def increment_product(path, labelfile):
'''
Increments the version number of the specified product
'''
label = read_label(path, labelfile)
datafile = extract_datafile(label)
new_labelfile = increment_labelfile(labelfile)
if datafile:
new_datafile = increment_datafile(datafile)
contents = inject_datafile(label, datafile, new_datafile)
with open(new_labelfile, "w") as outfile:
outfile.write(contents)
rename(path, datafile, new_datafile)
else:
rename(path, labelfile, new_labelfile)
def read_label(path, labelfile):
'''
Reads in a product label file as a string
'''
with open(os.path.join(path, labelfile)) as infile:
return infile.read()
def extract_datafile(label):
''' Finds the data filename referenced in a product '''
soup = BeautifulSoup(label, 'lxml-xml')
if soup.Product_Observational:
return extract_observational_datafile(soup.Product_Observational)
if soup.Product_Collection:
return extract_collection_datafile(soup.Product_Collection)
if soup.Product_Document:
return extract_document_datafile(soup.Product_Document)
return None
def extract_collection_datafile(product):
''' Finds the inventory filename referenced in a collection product '''
file_area = product.File_Area_Inventory if product else None
file_element = file_area.File if file_area else None
file_name = file_element.file_name if file_element else None
return file_name.string
def extract_observational_datafile(product):
''' Finds the data filename referenced in a product '''
file_area = product.File_Area_Observational if product else None
file_element = file_area.File if file_area else None
file_name = file_element.file_name if file_element else None
return file_name.string
def extract_document_datafile(product):
''' Finds the document filename referenced in a document product. '''
document = product.Document if product else None
document_edition = document.Document_Edition if document else None
document_file = document_edition.Document_File if document_edition else None
file_name = document_file.document_file if document_file else None
return file_name.string
def increment_labelfile(labelfile):
''' Creates the filename for a label file with a new version number '''
(filebase, major, _) = parse_labelfile_name(labelfile)
newmajor, newminor = major + 1, 0
return "{}_{}_{}.xml".format(filebase, newmajor, newminor)
def increment_datafile(datafile):
''' Creates the filename for a data file with the new version number '''
(filebase, major, extension) = parse_datafile_name(datafile)
newmajor = major + 1
return "{}_{}.{}".format(filebase, newmajor, extension)
def inject_datafile(label, datafile, new_datafile):
''' Replaces the filename reference in a label with the specified file '''
return label.replace(datafile, new_datafile)
def rename(dirname, filename, newfilename):
''' Renames a file '''
src = os.path.join(dirname, filename)
dst = os.path.join(dirname, newfilename)
if os.path.exists(newfilename):
print("File already exists: " + newfilename)
else:
os.rename(src, dst)
def parse_datafile_name(name):
''' Extract the version number from a data file, if available '''
versioned_match = re.match(DATAFILE_PARSE_VERSIONED_REGEX, name)
if versioned_match:
(filebase, major, extension) = versioned_match.groups()
return (filebase, int(major), extension)
unversioned_match = re.match(DATAFILE_PARSE_UNVERSIONED_REGEX, name)
(filebase, extension) = unversioned_match.groups()
return (filebase, 1, extension)
def parse_labelfile_name(name):
''' Extract the version number from a label file, if available '''
versioned_match = re.match(LABELFILE_PARSE_VERSIONED_REGEX, name)
if versioned_match:
(filebase, major, minor) = versioned_match.groups()
return (filebase, int(major), int(minor))
unversioned_match = re.match(LABELFILE_PARSE_UNVERSIONED_REGEX, name)
filebase = unversioned_match.groups()[0]
return (filebase, 1, 0)
def increment_major(major, _):
'''
Returns the version number with the major version incremented,
and the minor version reset to 1
'''
return (major + 1, 0)
def increment_minor(major, minor):
''' Returns the version number with the minor version incremented '''
return (major, minor + 1)
def attach_version_to_datafile(filebase, extension, major):
''' Creates a version of a filename that includes the version number '''
return '{filebase}_{major}.{extension}'.format(
major=major,
filebase=filebase,
extension=extension
)
def attach_version_to_labelfile(filebase, major, minor):
''' Creates a version of a label filename that includes the version number '''
return '{filebase}_{major}_{minor}.xml'.format(
filebase=filebase,
major=major,
minor=minor
)
if __name__ == '__main__':
sys.exit(main())
| sbn-psi/data-tools | orex/pds4-tools/versioning.py | versioning.py | py | 5,949 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sys.argv",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "os.path.split",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_num... |
71470012027 | from lxml import etree
from xml.etree import ElementTree
def get_text_from_file(xml_file):
tree = etree.parse(xml_file)
root = tree.getroot()
for element in root.iterfind('.//para'):
for ele in element.findall('.//display'):
parent = ele.getparent()
parent.remove(ele)
ElementTree.dump(element)
| ayandeephazra/Natural_Language_Processing_Research | PaperDownload/papers/process_xml.py | process_xml.py | py | 349 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "lxml.etree.parse",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "lxml.etree",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.dump",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "xml.etree.Elemen... |
23704854533 | #!/usr/bin/env python3
import json
import os
import requests
import datetime
base_url="https://raw.githubusercontent.com/threatstop/crl-ocsp-whitelist/master/"
uri_list=['crl-hostnames.txt','crl-ipv4.txt','crl-ipv6.txt','ocsp-hostnames.txt','ocsp-ipv4.txt','ocsp-ipv6.txt']
dict=dict()
dict['list']=list()
def source_read_and_add(input_file):
output_list=list()
for item in input_file:
item=item.rstrip()
output_list.append(item)
return output_list
for uri in uri_list:
url = base_url + uri
r=requests.get(url)
dict['list'] += source_read_and_add(r.text)
dict['type'] = "string"
dict['matching_attributes']=["hostname","domain","ip-dst","ip-src","url", "domain|ip"]
dict['name']="CRL Warninglist"
dict['version']= int(datetime.date.today().strftime('%Y%m%d'))
dict['description']="CRL Warninglist from threatstop (https://github.com/threatstop/crl-ocsp-whitelist/)"
dict['list']=list(set(dict['list']))
print(json.dumps(dict))
| 007Alice/misp-warninglists | tools/generate-crl-ip-list.py | generate-crl-ip-list.py | py | 943 | python | en | code | null | github-code | 6 | [
{
"api_name": "requests.get",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "datetime.date.today",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "json.dumps",
... |
43005467228 | """
Utility functions
"""
import torch
import matplotlib as mpl
import numpy as np
import math
mpl.use('Agg')
from matplotlib import pyplot as plt
def sin_data(n_train, n_test, noise_std, sort=False):
"""Create 1D sine function regression dataset
:n_train: Number of training samples.
:n_test: Number of testing samples.
:noise_srd: Standard deviation of observation noise.
:returns: x_train, y_train, x_test, y_test
"""
def ground_truth(x):
return torch.sin(math.pi * x)
xn = torch.rand(n_train, 1) * 2 - 1 # Uniformly random in [-1, 1]
yn = ground_truth(xn) + noise_std * torch.randn(n_train, 1)
if sort:
indices = torch.argsort(xn, axis=0)
xn = xn[indices.squeeze()]
yn = yn[indices.squeeze()]
xt = torch.linspace(-1.1, 1.1, n_test).view(-1, 1)
yt = ground_truth(xt) + noise_std * torch.randn(n_test, 1)
return xn, yn, xt, yt
def plot_lengthscale(xt, lengthscale, uncertainty, name=None):
"""
Visualize lengthscale function and its corresponding uncertainty.
:lengthscale: Lengthscale mean.
:uncertainty: Standard deviation of lengthscale prediction.
"""
lengthscale = lengthscale.numpy().ravel()
uncertainty = uncertainty.numpy().ravel()
lower = lengthscale - 2.0 * uncertainty
upper = lengthscale + 2.0 * uncertainty
xt = xt.numpy().ravel()
fig, ax = plt.subplots()
ax.plot(xt, lengthscale, 'b', lw=2, alpha=0.8, label='Lengthscale')
ax.fill_between(xt, lower, upper,
facecolor='b', alpha=0.3, label='95% CI')
ax.set_xlim([xt.min(), xt.max()])
ax.legend(loc='lower left', bbox_to_anchor=(0, 1.02, 1, 0.2), ncol=3,
borderaxespad=0, frameon=False)
if name is not None:
fig.savefig('../results/prediction/' + name + '.svg')
def plot_pytorch(dataset, preds, name=None):
dataset = [tensor.numpy().ravel() for tensor in dataset]
xn, yn, xt, ft = dataset
mean = preds.mean.cpu().numpy().ravel()
lower, upper = preds.confidence_region()
lower = lower.cpu().numpy().ravel()
upper = upper.cpu().numpy().ravel()
fig, ax = plt.subplots()
ax.plot(xn, yn, 'k.', label='Training data')
ax.plot(xt, ft, 'r--', lw=2, alpha=0.8, label='Function')
ax.plot(xt, mean, 'b', lw=2, alpha=0.8, label='Prediction')
ax.fill_between(xt, lower, upper,
facecolor='b', alpha=0.3, label='95% CI')
ax.set_xlim([xt.min(), xt.max()])
ax.legend(loc='lower left', bbox_to_anchor=(0, 1.02, 1, 0.2), ncol=3,
borderaxespad=0, frameon=False)
if name is not None:
fig.savefig('../results/prediction/' + name + '.svg')
def plot_1d_results(dataset, mean, std, title=None, name=None):
"""
Visualize training data, ground-truth function, and prediction.
:dataset: A tuple containing (Xn, Yn, Xt, Ft)
:mean: Mean of predictive Gaussian distribution.
:std: Standard deviation of predictive Gaussian distribution.
"""
dataset = [tensor.cpu().numpy().ravel() for tensor in dataset]
xn, yn, xt, ft = dataset
mean, std = mean.cpu().numpy().ravel(), std.cpu().numpy().ravel()
lower = mean - 2.0 * std
upper = mean + 2.0 * std
fig, ax = plt.subplots()
ax.plot(xn, yn, 'k.', label='Training data')
ax.plot(xt, ft, 'r.', lw=2, alpha=0.8, label='Test data')
ax.plot(xt, mean, 'b', lw=2, alpha=0.8, label='Prediction')
ax.fill_between(xt, lower, upper,
facecolor='b', alpha=0.3, label='95% CI')
ax.set_xlim([xt.min(), xt.max()])
ax.legend(loc='lower left', bbox_to_anchor=(0, 1.02, 1, 0.2), ncol=3,
borderaxespad=0, frameon=False)
if title is not None:
ax.set_title(title, loc='center')
if name is not None:
fig.savefig(name + '.pdf')
def train(model, optimizer, n_iter, verbose=True, name=None, Xn=None, yn=None, tol=None):
"""
Training helper function.
"""
n_train = Xn.size(0) if Xn is not None else model.Xn.size(0)
losses = []
for i in range(n_iter):
optimizer.zero_grad()
if Xn is None and yn is None:
loss = model.loss()
else:
loss = model.loss(Xn, yn)
loss.backward()
optimizer.step()
losses.append(loss.item())
if tol is not None:
# if the result is stable for over 50 iteration, then we consider it converges.
n = 50
if len(losses) > n:
last = losses[-n:]
if max(last) - min(last) <= tol:
if verbose:
print("Converges at iteration: ", i)
break
if verbose:
print('Iteration: {0:04d} Loss: {1: .6f}'.format(i, loss.item() / n_train))
if name is not None:
plt.figure()
plt.plot(losses, lw=2)
plt.ylabel('Loss')
plt.xlabel('Number of iteration')
plt.savefig(name + '.svg')
| weiyadi/dlm_sgp | conjugate/utils.py | utils.py | py | 4,966 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "matplotlib.use",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "torch.sin",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "math.pi",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "torch.rand",
"line_number": ... |
30969861276 | import matplotlib.pyplot as plt
import time
import numpy as np
from PIL import Image
class graphic_display():
def __init__(self):
self.um_per_pixel = 0.5
self.cm_hot = plt.get_cmap('hot')
self.cm_jet = plt.get_cmap('jet')
self.cm_vir = plt.get_cmap('viridis')
self.cm_mag = plt.get_cmap('magma')
# self.cm_grn = plt.get_cmap('Greens')
self.cm_raw = plt.get_cmap('gray')
self.fps_counter = np.array([time.time(),time.time(),time.time()])
self.img_rs = None
self.img_norm = None
self.img_gamma = None
self.img_p = None
self.img_cm = None
self.img_sb = None
self.img_fin = None
self.win = None
self.cam = None
return
def update_win(self, win):
self.win = win
def update_cam(self, cam):
self.cam = cam
def update_image(self):
print('update image function')
self.img_rs = np.array(Image.fromarray(self.cam.img_raw).resize(size=(958, 638)),dtype = 'float64')/255
if self.win.zoom_factor > 1:
r1 = self.img_rs.shape[0]
c1 = self.img_rs.shape[1]
r2 = int(np.round(r1/self.win.zoom_factor))
c2 = int(np.round(c1/self.win.zoom_factor))
self.img_rs = self.img_rs[int((r1-r2)/2):int((r1-r2)/2)+r2, int((c1-c2)/2):int((c1-c2)/2)+c2]
# update and process the image for display from the camera
self.update_image_gamma()
self.normalise_img()
self.update_colormap()
self.display_saturated_pixels_purple() ### error
self.burn_scalebar_into_image()
# gui functions
self.win.repaint_image() ### may zoom in twice for raw image, need double check
self.win.update_hist()
# self.win.image_histogram.update_histogram() # method in histogram_canvas class
self.win.status_text_update_image()
# fps counter
self.fps_counter = np.append(self.fps_counter,time.time())
self.fps_counter = np.delete(self.fps_counter, 0)
self.win.status_fps_number.setText(str(np.round(1/np.mean(np.diff(self.fps_counter)),5)))
print('current saved value for fps is: ' + str(self.cam.fps) + ' current timer value is: ' + str(self.cam.timer_value))
return
def update_image_gamma(self):
if self.win.gamma == 1:
self.img_gamma = self.img_rs
else:
self.img_gamma = self.img_rs**self.win.gamma
return
def normalise_img(self):
print('normalise function')
if self.win.cbox_normalise.isChecked():
imgnormmin = np.min(np.nonzero(self.img_gamma))
imgnormmax = np.max(self.img_gamma)
self.img_norm = (self.img_gamma-imgnormmin)/(imgnormmax--imgnormmin)
self.img_norm = self.img_norm
else:
self.img_norm = self.img_gamma
return
def update_colormap(self):
print('update colormap function')
# convert from gray to colormap magma selection
if self.win.combobox_colourmap.currentIndex() == 0:
self.img_cm = self.cm_mag(self.img_norm)
# convert from gray to colormap green selection
elif self.win.combobox_colourmap.currentIndex() == 1:
self.img_cm = np.zeros(np.hstack([np.shape(self.img_norm),4]))
self.img_cm[:,:,1] = self.img_norm
self.img_cm[:,:,3] = 255
## or use Greens colormap directly
# self.img_cm = self.cm_grn(self.img_norm)
# convert from gray to colormap viridis (3 channel) selection
elif self.win.combobox_colourmap.currentIndex() == 2:
self.img_cm = self.cm_vir(self.img_norm)
# convert from gray to colormap jet selection
elif self.win.combobox_colourmap.currentIndex() == 3:
self.img_cm = self.cm_jet(self.img_norm)
elif self.win.combobox_colourmap.currentIndex() == 4:
# self.img_cm = np.zeros(np.hstack([np.shape(self.img_norm),4]))
# self.img_cm[:,:,0] = self.img_norm
# self.img_cm[:,:,1] = self.img_norm
# self.img_cm[:,:,2] = self.img_norm
# self.img_cm[:,:,3] = 1
# print(self.img_cm)
# print(self.cam.img_raw)
## or use gray colormap directly
self.img_cm = self.cm_raw(self.img_norm)
return
def display_saturated_pixels_purple(self):
print('saturated pxls purple function')
# saturated pixels show up purple if check box is selected
# if self.win.combobox_colourmap.currentIndex() != 4:
self.img_p = self.img_cm
if self.win.cbox_saturated.isChecked():
ind = self.img_norm > 254
self.img_p[ind,0] = 255
self.img_p[ind,1] = 0
self.img_p[ind,2] = 255
return
def burn_scalebar_into_image(self):
print('burn scalebar function')
self.img_sb = self.img_p
if self.win.cbox_show_scalebar.isChecked():
s = self.img_sb.shape
if self.win.combobox_colourmap.currentIndex() == 1:
self.img_sb[int(s[0]*0.95):int(s[0]*0.955), int(s[1]*0.05):int(s[1]*0.05+100/self.um_per_pixel), 0] = 255
self.img_sb[int(s[0]*0.95):int(s[0]*0.955), int(s[1]*0.05):int(s[1]*0.05+100/self.um_per_pixel), 1] = 0
self.img_sb[int(s[0]*0.95):int(s[0]*0.955), int(s[1]*0.05):int(s[1]*0.05+100/self.um_per_pixel), 2] = 255
else:
self.img_sb[int(s[0]*0.95):int(s[0]*0.955), int(s[1]*0.05):int(s[1]*0.05+100/self.um_per_pixel), 0] = 0
self.img_sb[int(s[0]*0.95):int(s[0]*0.955), int(s[1]*0.05):int(s[1]*0.05+100/self.um_per_pixel), 1] = 255
self.img_sb[int(s[0]*0.95):int(s[0]*0.955), int(s[1]*0.05):int(s[1]*0.05+100/self.um_per_pixel), 2] = 0
self.img_fin = self.img_sb
self.img_fin = np.array(self.img_fin*255,dtype='uint8')
return
| peterlionelnewman/flow_lithographic_printer | Graphic_display.py | Graphic_display.py | py | 6,222 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "matplotlib.pyplot.get_cmap",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.get_cmap",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": ... |
39607752443 | import logging
import os
from django.conf import settings
from django.core.management.base import BaseCommand
from core.management.commands import configure_logging
from core.models import Batch, OcrDump
configure_logging("dump_ocr_logging.config", "dump_ocr.log")
_logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = "looks for batches that need to have ocr dump files created"
def handle(self, *args, **options):
if not os.path.isdir(settings.OCR_DUMP_STORAGE):
os.makedirs(settings.OCR_DUMP_STORAGE)
for batch in Batch.objects.filter(ocr_dump__isnull=True):
_logger.info("starting to dump ocr for %s", batch)
try:
if batch.ocr_dump:
_logger.info("Ocr is already generated for %s", batch)
continue
except OcrDump.DoesNotExist:
pass
dump = OcrDump.new_from_batch(batch)
_logger.info("created ocr dump %s for %s", dump, batch)
| open-oni/open-oni | core/management/commands/dump_ocr.py | dump_ocr.py | py | 1,024 | python | en | code | 43 | github-code | 6 | [
{
"api_name": "core.management.commands.configure_logging",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "django.core.management.base.BaseCommand",
"line_number": 15,
"usage_type": "... |
2962541494 | # TomoPy recon on Cyclone: compare different algorithms
import tomopy
import dxchange
import numpy as np
import os
import logging
from time import time
def touint8(data, quantiles=None):
# scale data to uint8
# if quantiles is empty data is scaled based on its min and max values
if quantiles == None:
data_min = np.min(data)
data_max = np.max(data)
data_max = data_max - data_min
data = 255 * ((data - data_min) / data_max)
return np.uint8(data)
else:
[q0, q1] = np.quantile(np.ravel(data), quantiles)
q1 = q1 - q0
data = 255 * ((data - q0) / q1)
return np.uint8(data)
def writemidplanesDxchange(data, filename_out):
if data.ndim == 3:
filename, ext = os.path.splitext(filename_out)
dxchange.writer.write_tiff(touint8(data[int(data.shape[0] / 2), :, :]), fname=filename+'_XY.tiff', dtype='uint8')
dxchange.writer.write_tiff(touint8(data[:, int(data.shape[1] / 2), :]), fname=filename + '_XZ.tiff', dtype='uint8')
dxchange.writer.write_tiff(touint8(data[:, :, int(data.shape[2] / 2)]), fname=filename + '_YZ.tiff', dtype='uint8')
h5file = "/tmp/tomoData/8671_8_B_01_/8671_8_B_01_.h5"
path_recon = "/scratch/recon/algorithm_test/"
# path_recon = "/nvme/h/jo21gi1/data_p029/test_00_/recon_phase/"
logging.basicConfig(filename=path_recon+'recon_algorithm_test.log', level=logging.DEBUG)
CPU_algorithms = ['gridrec', 'fbp', 'mlem', 'sirt', 'art']
# read projections, darks, flats and angles
projs, flats, darks, theta = dxchange.read_aps_32id(h5file, exchange_rank=0)
# If the angular information is not available from the raw data you need to set the data collection angles.
# In this case, theta is set as equally spaced between 0-180 degrees.
if theta is None:
theta = tomopy.angles(projs.shape[0])
# flat-field correction
logging.info("Flat-field correct.")
projs = tomopy.normalize(projs, flats, darks)
# - log transform
logging.info("- log transform.")
projs = tomopy.minus_log(projs)
# COR was found with Vo method + manual inspection
COR = 1303
for alg in CPU_algorithms:
time_start = time()
# CPU recon
recon = tomopy.recon(projs, theta, center=COR, algorithm=alg, sinogram_order=False)
time_end = time()
execution_time = time_end - time_start
logging.info("{} reconstructed in {} s".format(alg, str(execution_time)))
# apply circular mask
recon = tomopy.circ_mask(recon, axis=0, ratio=0.95)
# rescale GV range to uint8 from MIN and MAX of 3D data
recon_uint8Range = touint8(recon)
# apply again circ mask
recon_uint8Range = tomopy.circ_mask(recon_uint8Range, axis=0, ratio=0.95)
# write output stack of TIFFs as uint8
fileout = path_recon+alg
# dxchange.writer.write_tiff_stack(recon_uint8Range, fname=fileout, dtype='uint8', axis=0, digit=5, start=0, overwrite=True)
writemidplanesDxchange(recon_uint8Range, fileout)
del recon
del recon_uint8Range
| SESAME-Synchrotron/BEATS_recon | tests/Cyclone/tomopy_testCyclone_recon_algorithms_comparison.py | tomopy_testCyclone_recon_algorithms_comparison.py | py | 2,975 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.min",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.quantile",
"line_number": 2... |
20186345178 | # Import pakages
import torch
import torch.nn as nn
import gym
import os
import torch.nn.functional as F
import torch.multiprocessing as mp
import numpy as np
# Import python files
from utils import v_wrap, set_init, push_and_pull, record
from shared_adam import SharedAdam
os.environ["OMP_NUM_THREADS"] = "1"
os.environ['KMP_DUPLICATE_LIB_OK']='True'
# Setting hyperparameters
UPDATE_GLOBAL_ITER = 10 #
GAMMA = 0.99
MAX_EP = 500
hidden_dim_pi = 16
hidden_dim_v = 16
env = gym.make('CartPole-v0')
N_S = env.observation_space.shape[0]
N_A = env.action_space.n
# Define basic neural network(It will be same for each worker)
class Net(nn.Module):
def __init__(self, s_dim, a_dim):
super(Net, self).__init__()
self.s_dim = s_dim # 4
self.a_dim = a_dim # 2
self.pi1 = nn.Linear(s_dim, hidden_dim_pi) # (N, 4) -> (N, hidden_dim_pi)
self.pi2 = nn.Linear(hidden_dim_pi, a_dim) # (N, hidden_dim_pi) -> (N, 2)
self.v1 = nn.Linear(s_dim, hidden_dim_v) # (N, 4) -> (N, hidden_dim_v)
self.v2 = nn.Linear(hidden_dim_v, 1) # (N, hidden_dim_v) -> (N, 1)
set_init([self.pi1, self.pi2, self.v1, self.v2])
self.distribution = torch.distributions.Categorical # It means that [a, b, c, ...] -> 0:a, 1:b, 2:c, ...
# forward returns output of model
# Return : softmax^(-1)(probability) and V(s) (Note. During using crossentropy loss in pytorch, network must not contain softmax layer)
def forward(self, x):
pi1 = torch.tanh(self.pi1(x))
logits = self.pi2(pi1)
v1 = torch.tanh(self.v1(x))
values = self.v2(v1)
return logits, values
# choose_action returns action from state s
# Return : action
def choose_action(self, s):
self.eval()
logits, _ = self.forward(s)
prob = F.softmax(logits, dim=1).data # We need to change to probability
m = self.distribution(prob) # take actions by given probability
return m.sample().numpy()[0]
# evaluate loss function
# v_t : r+gamma*v_(t+1)
def loss_func(self, s, a, v_t):
self.train()
logits, values = self.forward(s)
td = v_t - values
c_loss = td.pow(2)
probs = F.softmax(logits, dim=1)
m = self.distribution(probs)
exp_v = m.log_prob(a) * td.detach().squeeze()
a_loss = -exp_v
total_loss = (c_loss + a_loss).mean()
return total_loss
class Worker(mp.Process):
def __init__(self, gnet, opt, global_ep, global_ep_r, res_queue, name):
super(Worker, self).__init__()
self.name = 'w%02i' % name
self.g_ep, self.g_ep_r, self.res_queue = global_ep, global_ep_r, res_queue
self.gnet, self.opt = gnet, opt
self.lnet = Net(N_S, N_A) # local network
self.env = gym.make('CartPole-v0').unwrapped
def run(self):
total_step = 1
while self.g_ep.value < MAX_EP:
s = self.env.reset()
buffer_s, buffer_a, buffer_r = [], [], []
ep_r = 0.
while True: #현재 시점 t
if self.name == 'w00':
self.env.render()
a = self.lnet.choose_action(v_wrap(s[None, :]))
s_, r, done, _ = self.env.step(a)
if done: r = -1
ep_r += r
buffer_a.append(a)
buffer_s.append(s)
buffer_r.append(r)
if total_step % UPDATE_GLOBAL_ITER == 0 or done: # update global and assign to local net
# sync
push_and_pull(self.opt, self.lnet, self.gnet, done, s_, buffer_s, buffer_a, buffer_r, GAMMA)
buffer_s, buffer_a, buffer_r = [], [], []
if done: # done and print information
ep_r = min(ep_r, 200)
record(self.g_ep, self.g_ep_r, ep_r, self.res_queue, self.name)
break
s = s_
total_step += 1
self.res_queue.put(None)
if __name__ == "__main__":
gnet = Net(N_S, N_A) # global network
gnet.share_memory() # share the global parameters in multiprocessing
opt = SharedAdam(gnet.parameters(), lr=5e-4, betas=(0.92, 0.999)) # global optimizer
global_ep, global_ep_r, res_queue = mp.Value('i', 0), mp.Value('d', 0.), mp.Queue()
# parallel training
workers = [Worker(gnet, opt, global_ep, global_ep_r, res_queue, i) for i in range(mp.cpu_count())]
[w.start() for w in workers]
res = [] # record episode reward to plot
while True:
r = res_queue.get()
if r is not None:
res.append(r)
else:
break
[w.join() for w in workers]
import matplotlib.pyplot as plt
res = np.array(res)
np.save("discrete_result.npy", res)
plt.plot(res)
plt.ylabel('ep reward')
plt.xlabel('Episode')
plt.show()
| smfelixchoi/MATH-DRL-study | 6.A3C/discrete_A3C.py | discrete_A3C.py | py | 4,973 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "os.environ",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "gym.make",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_... |
21379925078 | from bogos import ScrapeBogos
import configparser
import twitter
def lambda_handler(event, context):
config = configparser.ConfigParser()
config.read('config.ini');
keywords = ''
keywordMultiWord = False
url = ''
prefixText = ''
postfixText = ''
noBogoText = ''
print('Config values:')
if 'BOGO' not in config:
print("No BOGO config found")
return
else:
bogoConfig = config['BOGO']
if 'keywords' not in bogoConfig or 'url' not in bogoConfig:
print("'keywords' or 'url' was provided in the config")
return
else:
keywords = bogoConfig['keywords'].split(',')
print('keywords: ' + str(keywords))
url = bogoConfig['url']
print('url: ' + url)
if 'keywordMultiWord' in bogoConfig:
keywordMultiWord = bogoConfig['keywordMultiWord'].lower() == 'true'
print('keywordMultiWord: ' + str(keywordMultiWord))
if 'prefixText' in bogoConfig:
prefixText = bogoConfig['prefixText']
print('prefixText: ' + prefixText)
if 'postfixText' in bogoConfig:
postfixText = bogoConfig['postfixText']
print('postfixText: ' + postfixText)
if 'noBogoText' in bogoConfig:
noBogoText = bogoConfig['noBogoText']
print('noBogoText: ' + noBogoText)
consumer_key = ''
consumer_secret = ''
access_token_key = ''
access_token_secret = ''
if 'TwitterApi' in config:
twitterConfig = config['TwitterApi']
consumer_key = twitterConfig['consumer_key']
consumer_secret = twitterConfig['consumer_secret']
access_token_key = twitterConfig['access_token_key']
access_token_secret = twitterConfig['access_token_secret']
print('End of config values')
print('====================\n')
bogos = ScrapeBogos(url, keywords, keywordMultiWord, prefixText, postfixText)
bogos.initialize()
tweetBogo(bogos.getItemsFound(), noBogoText, consumer_key, consumer_secret, access_token_key, access_token_secret)
def tweetBogo(itemsFound, noBogoText, consumer_key, consumer_secret, access_token_key, access_token_secret):
twitterApi = None
if consumer_key and consumer_secret and access_token_key and access_token_secret:
twitterApi = twitter.Api(consumer_key=consumer_key,
consumer_secret=consumer_secret,
access_token_key=access_token_key,
access_token_secret=access_token_secret)
if itemsFound:
for item in itemsFound:
print(item);
if twitterApi:
print('posting to twitter: ' + item)
twitterApi.PostUpdate(item)
elif noBogoText:
print(noBogoText);
if twitterApi:
print('posting to twitter: ' + noBogoText)
twitterApi.PostUpdate(noBogoText)
else:
print("nothing found");
| DFieldFL/publix-bogo-notification | BogoMain.py | BogoMain.py | py | 2,732 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "configparser.ConfigParser",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "bogos.ScrapeBogos",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "bogos.initialize",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "bogos.get... |
16704497954 | import pickle
import numpy as np
import scipy.io as sio
from library.error_handler import Error_Handler
class Data_Loader:
def load_data_from_pkl(self, filepath_x, filepath_y, ordering="True"):
with open(filepath_x, "rb") as file_x:
x_data = pickle.load(file_x)
with open(filepath_y, "rb") as file_y:
y_data = pickle.load(file_y)
x_data = np.asarray(x_data)
y_data = np.asarray(y_data)
if np.min(y_data) > 0:
y_data = y_data - np.min(y_data)
reordered_data = Data_Loader.__reorder(x_data, ordering)
return reordered_data, y_data
def load_data_from_npy(self, filepath_x, filepath_y, ordering="True"):
x_data = np.load(filepath_x)
y_data = np.load(filepath_y)
x_data = np.asarray(x_data)
y_data = np.asarray(y_data)
if np.min(y_data) > 0:
y_data = y_data - np.min(y_data)
reordered_data = Data_Loader.__reorder(x_data, ordering)
return reordered_data, y_data
def load_data_from_mat(self, filepath, x_key, y_key, ordering):
mat_dict = sio.loadmat(filepath)
x_data = mat_dict[x_key]
y_data = mat_dict[y_key]
x_data = np.asarray(x_data)
y_data = np.asarray(y_data)
if np.min(y_data) > 0:
y_data = y_data - np.min(y_data)
reordered_data = Data_Loader.__reorder(x_data, ordering)
return reordered_data, y_data
def __reorder(x, ordering):
if ordering == "SWHC":
return x
elif ordering == "CWHS":
x = np.swapaxes(x, 3, 0)
return x
elif ordering == "WHCS":
x = np.rollaxis(x, 2, 0)
x = np.swapaxes(x, 0, 3)
return x
elif ordering == "WHSC":
x = np.rollaxis(x, 3, 0)
x = np.swapaxes(x, 0, 3)
return x
elif ordering == "SCWH":
x = np.rollaxis(x, 1, 4)
return x
elif ordering == "CSWH":
x = np.swapaxes(x, 0, 1)
x = np.rollaxis(x, 1, 4)
return x
else:
Error_Handler.error_in_data_ordering()
| tzee/EKDAA-Release | library/data_loader.py | data_loader.py | py | 2,380 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "pickle.load",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_numbe... |
36064906771 | '''
Created on 2017-1-13
@author: xuls
'''
from PIL import Image
import os
PATH2=os.path.dirname(os.getcwd())
def classfiy_histogram(image1,image2,size = (256,256)):
image1 = image1.resize(size).convert("RGB")
g = image1.histogram()
image2 = image2.resize(size).convert("RGB")
s = image2.histogram()
assert len(g) == len(s),"error"
data = []
for index in range(0,len(g)):
if g[index] != s[index]:
data.append(1 - abs(g[index] - s[index])/max(g[index],s[index]) )
else:
data.append(1)
print(sum(data)/len(g))
def compare(image):
image1 = Image.open(PATH2+"\\aw\\image\\expected\\"+image+".png")
image2 = Image.open(PATH2+"\\aw\\image\\actual\\"+image+".png")
print(image+"-differ:")
classfiy_histogram(image1,image2,size = (256,256))
if __name__ == "__main__":
'''Search'''
compare("image01")
compare("image02")
compare("image03")
compare("image04")
compare("image05")
compare("image06")
compare("image07")
compare("image08")
#
'''BusinessChance'''
compare("image11")
compare("image12")
compare("image13")
compare("image14")
'''CarContrast'''
compare("image21")
compare("image22")
'''FriendsToHelp'''
compare("image31")
compare("image32")
compare("image33")
compare("image34")
compare("image35")
compare("image36")
compare("image37")
compare("image38")
compare("image39")
compare("image3a")
compare("image3b")
compare("image3c")
compare("image3d")
compare("image3e")
'''SendTopic'''
compare("image41")
compare("image42")
compare("image43")
compare("image44")
compare("image45")
compare("image46")
compare("image47")
compare("image48")
| xulishuang/qichebaojiadaquan | src/script/sameas.py | sameas.py | py | 1,917 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.path.dirname",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number... |
162022841 |
import time
from selenium import webdriver
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from .pages.login import LoginPage
class ManageUserTestCase(StaticLiveServerTestCase):
def setUp(self):
self.browser = webdriver.Firefox()
self.browser.implicitly_wait(20)
self.browser.maximize_window()
def tearDown(self):
if self.browser:
self.browser = None
def test_user_interaction_on_manage_user_page(self):
temp_page = LoginPage(self.browser)
temp_page.first_visit('{}/account/login/'.format(self.live_server_url), 'email')
temp_page = temp_page.login_user('admin@admin.com', 'admin')
temp_page.first_visit(self.live_server_url)
self.page = temp_page.visit_manage_user()
self.assertIn('Manage Users', self.page.get_body_content())
user_info = self.page.add_new_user()
self.page.wait_for_element_with_class_name('stickit_name')
tbody = self.browser.find_element_by_id('tbody')
tbody_text = tbody.text.lower()
email = user_info.get('email').lower()
name = user_info.get('name').lower()
phone = user_info.get('phone').lower()
department = user_info.get('department').lower()
self.assertIn(email, tbody_text)
self.assertIn(name, tbody_text)
self.assertIn(phone, tbody_text)
self.assertIn(department, tbody_text)
edited_user_info = self.page.edit_user()
self.page.wait_for_element_with_class_name('stickit_name')
tbody = self.browser.find_element_by_id('tbody')
tbody_text = tbody.text.lower()
self.assertNotIn(name, tbody_text)
self.assertNotIn(phone, tbody_text)
self.assertNotIn(department, tbody_text)
self.assertIn(edited_user_info.get('name').lower(), tbody_text)
self.assertIn(edited_user_info.get('phone').lower(), tbody_text)
self.assertIn(edited_user_info.get('department').lower(), tbody_text)
self.page.delete_user()
time.sleep(3)
tbody = self.browser.find_element_by_id('tbody')
tbody_text = tbody.text.lower()
self.assertNotIn(edited_user_info.get('name').lower(), tbody_text)
self.assertNotIn(edited_user_info.get('phone').lower(), tbody_text)
self.assertNotIn(edited_user_info.get('department').lower(), tbody_text)
self.assertNotIn(email, tbody_text)
| pophils/TaskManagement | yasanaproject/tests/functional/test_manage_user.py | test_manage_user.py | py | 2,447 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "django.contrib.staticfiles.testing.StaticLiveServerTestCase",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.Firefox",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 11,
"usage_type... |
70602414269 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 19 11:06:45 2020
@author: xchen
"""
## required packages
# system imports
import os
import sys
from termcolor import colored
from colorama import init
# data manipulation and data clean
from nltk.corpus import stopwords
# sklearn
from sklearn import preprocessing
from sklearn.linear_model import LogisticRegression, SGDClassifier
from sklearn.naive_bayes import MultinomialNB
# self-defined
import pipeline
# default data path
DATA_PATH = '../data'
GLOVE_PATH = '../glove.6B'
# default parameters
stop_words = stopwords.words('english')
stop_words = stop_words + ['would','could','may','also', 'one', 'two', 'three',
'first', 'second' ,'third',
'someone', 'anyone', 'something', 'anything',
'subject', 'organization', 'lines',
'article', 'writes', 'wrote']
tokenize_regex1 = r"\w+|\$[\d\.]+"
tokenize_regex2 = r"[a-zA-Z_]+"
def main_test(path):
dir_path = path or DATA_PATH
TRAIN_DIR = os.path.join(dir_path, "train")
TEST_DIR = os.path.join(dir_path, "test")
# load data
print (colored('Loading files into memory', 'green', attrs=['bold']))
train_path_list, ylabel_train = pipeline.parse_files(TRAIN_DIR)
test_path_list, ylabel_test = pipeline.parse_files(TEST_DIR)
train_documents = [pipeline.load_document(path = path, label = y) for \
path, y in zip(train_path_list, ylabel_train)]
test_documents = [pipeline.load_document(path = path, label = y) for \
path, y in zip(test_path_list, ylabel_test)]
# clean all documents
print (colored('Cleaning all files', 'green', attrs=['bold']))
pipeline.clean_all_documents(train_documents,
word_split_regex = tokenize_regex1,
stop_words = stop_words,
contraction_dict = 'default')
pipeline.clean_all_documents(test_documents,
word_split_regex = tokenize_regex1,
stop_words = stop_words,
contraction_dict = 'default')
# encode labels
print (colored('Encoding labels', 'green', attrs=['bold']))
y_train, y_test, category = pipeline.label_encoder(ylabel_train, ylabel_test, 'ordinal')
## *************************** machine learning ***************************
# calculate the BOW representation
print (colored('Calculating BOW', 'green', attrs=['bold']))
X_train_bow = pipeline.BagOfWord.fit_transform(train_documents)
X_test_bow = pipeline.BagOfWord.transform(test_documents)
print ("The shape of X after processing is: \ntrain: %s, test: %s"%(X_train_bow.shape, X_test_bow.shape))
# calculate the tf-idf representation
print (colored('Calculating Tf-idf', 'green', attrs=['bold']))
X_train_tfidf = pipeline.Tfidf.fit_transform(train_documents)
X_test_tfidf = pipeline.Tfidf.transform(test_documents)
print ("The shape of X after processing is: \ntrain: %s, test: %s"%(X_train_tfidf.shape, X_test_tfidf.shape))
# scale
scaler = preprocessing.Normalizer()
X_train_scaled = scaler.fit_transform(X_train_bow)
X_test_scaled = scaler.transform(X_test_bow)
## models
# naive bayes
clf_nb = MultinomialNB()
# logistic regression
clr_lr = LogisticRegression(penalty='l2', C=12, solver='lbfgs', max_iter=500, random_state=42)
# svm
clf_svm = SGDClassifier(penalty = 'l2',alpha = 5e-5, random_state=42)
# model selection
print (colored('Selecting model using 10-fold cross validation', 'magenta', attrs=['bold']))
clf_list = [clf_nb, clr_lr, clf_svm]
clf_optimal, clf_f1 = pipeline.model_selection(X_train_tfidf, y_train, clf_list, cv=5, scoring='f1_macro')
# test the optimal classifier with train-test-split
print (colored('Testing the optimal classifier with train-test split', 'magenta', attrs=['bold']))
f1 = pipeline.test_classifier(X_train_tfidf, y_train, clf_optimal, test_size=0.2, y_names=category, confusion=True)
print('Train score (macro f1):%.4f, test score (macro f1):%.4f'%(f1[1],f1[0]))
# predict test set
print (colored('Predicting test dataset', 'magenta', attrs=['bold']))
y_pred_ml = pipeline.model_prediction(clf_optimal, X_train_tfidf, y_train, X_test_tfidf)
pipeline.model_report(y_test, y_pred_ml, y_names=category, confusion=True)
def main():
init()
# get the dataset
print (colored("Where is the dataset?", 'cyan', attrs=['bold']))
print (colored('Press return with default path', 'yellow'))
ans = sys.stdin.readline()
# remove any newlines or spaces at the end of the input
path = ans.strip('\n')
if path.endswith(' '):
path = path.rstrip(' ')
print ('\n\n')
# do the main test
main_test(path)
if __name__ == '__main__':
main() | linnvel/text-classifier-master | ML.py | ML.py | py | 5,072 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "nltk.corpus.stopwords.words",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.stopwords",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "os.pat... |
34608371364 | import os.path
import json
import os
def readDIPfile(parent_path):
edges = {}
index = 0
xmlfilepath = os.path.join(parent_path, r'data\Hsapi20170205CR.txt')
f = open(xmlfilepath)
lines = f.readlines()
for line in lines:
line_list = line.strip("\n").split("\t")
if line_list[9] == "taxid:9606(Homo sapiens)" and line_list[10] == "taxid:9606(Homo sapiens)":
source = line_list[0].split("|")[0]
target = line_list[1].split("|")[0]
if source != target:
edges[index] = [source, target]
index += 1
print(len(edges))
result_path = parent_path + r'\data\uploads\resultEdges.json'
with open(result_path, 'w') as fw:
json.dump(edges, fw)
if __name__ == '__main__':
ROOT_DIR = os.path.dirname(os.path.abspath('__file__'))
parent_path = os.path.dirname(ROOT_DIR)
readDIPfile(parent_path) | LittleBird120/DiseaseGenePredicition | DiseaseGenePredicition/Human_COVID_node2vec20210315/data_processing/readHumanProtein.py | readHumanProtein.py | py | 919 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.path.join",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "json.dump",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number"... |
1435864274 | import math
import numpy as np
import cv2
from matplotlib import pyplot as plt
def Euclidean_Distance(pointA, pointB):
ans = ((pointA[0] - pointB[0])**2+(pointA[1] - pointB[1])**2)**0.5
return ans
def Flat_Kernel(distance, bandwidth, point_number):
inRange = []
weight = np.zeros((point_number, 1))
for i in range (distance.shape[0]):
if distance[i] <= bandwidth:
inRange.append(distance[i])
weight[i] = 1
inRange = np.array(inRange)
return weight
def Gaussian_Kernel(distance, bandwidth, point_number):
left = 1.0/(bandwidth * math.sqrt(2*math.pi))
right = np.zeros((point_number, 1))
for i in range(point_number):
right[i, 0] = (-0.5 * distance[i] * distance[i]) / (bandwidth * bandwidth)
right[i, 0] = np.exp(right[i, 0])
return left * right
def Get_Mono_Histogram(image_dir):
img = cv2.imread(image_dir)
hist = cv2.calcHist([img],[0],None,[256],[0,256])
plt.hist(img.ravel(), 256, [0, 256])
plt.show()
def Get_RGB_Histogram(image_dir):
img = cv2.imread(image_dir)
color = ('b', 'g', 'r')
for i, col in enumerate(color):
histr = cv2.calcHist([img], [i], None, [256], [0, 256])
plt.plot(histr, color=col)
plt.xlim([0, 256])
plt.show()
| laitathei/algorithm_implemention | machine_learning/Mean_Shift/utils.py | utils.py | py | 1,331 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.zeros",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "math.pi",
"line_number": 21,
... |
8519707697 | import bs4 as bs
import requests
import regex as re
import pandas as pd
from src.config import *
def get_page_body(url: str):
try:
response = requests.get(url, timeout=10)
if response.status_code == 200:
page = bs.BeautifulSoup(response.text)
return page.body
except requests.exceptions.HTTPError as errh:
print("http error:", errh)
except requests.exceptions.ConnectionError as errc:
print("connection error:", errc)
except requests.exceptions.Timeout as errt:
print("timeout error:", errt)
except requests.exceptions.RequestException as err:
print("other error:", err)
else:
return None
def clean_text(s: str):
s = re.sub(r'[\n\t]', ' ', s)
s = s.strip()
s = ' '.join(s.split())
return s
def get_separate_book_urls(url: str):
page_body = get_page_body(url)
urls = []
if page_body:
urls = [URL_SOURCE + section['href'] for section in page_body.find_all("a", {"class": "bookTitle"})]
return urls
def get_category_books_urls(
input_url: str = URL_START,
book_categories=BOOK_CATEGORIES,
top_n: int = NUMBER_OF_CATEGORY_PAGES_TO_SCRAPE) -> dict:
category_urls = {}
for category in book_categories:
page_body = get_page_body(input_url + category)
if not page_body:
continue
category_link = page_body.find("div", {"class": "listImgs"}).find("a")["href"]
top_pages_links = [f"{URL_SOURCE}{category_link}?page={i}" for i in range(1, top_n + 1)]
category_urls[category] = [book_url for page_url in top_pages_links for book_url in get_separate_book_urls(page_url)]
return category_urls
def get_text(x):
return clean_text(getattr(x, "text", ""))
def get_single_book_info(url: str, book_category: str):
page_body = get_page_body(url)
book_info = {}
if page_body:
book_info["category"] = book_category
book_info["title"] = get_text(page_body.find("h1", id="bookTitle"))
book_info["author"] = get_text(page_body.find("span", itemprop="name"))
book_info["description"] = get_text(page_body.find("div", id="description"))
book_info["rating"] = get_text(page_body.find("span", itemprop="ratingValue"))
book_info["number_of_pages"] = get_text(page_body.find("span", itemprop="numberOfPages"))
book_info["url"] = url
return book_info
def get_books_data(category_urls: dict):
books_data = []
for category in category_urls.keys():
book_urls = category_urls[category]
if not book_urls:
continue
for book_url in book_urls:
book_info = get_single_book_info(book_url, category)
if book_info:
books_data += list(book_info)
return books_data | bakalstats/py_project | src/scraping_utils.py | scraping_utils.py | py | 2,826 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "requests.get",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "requests.exceptions",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "requests.exce... |
73795089468 | import json
import os
from flask import current_app, redirect, request, Response
from . import blueprint
@blueprint.route("/routes")
def routes():
data = {
"name": current_app.config["name"],
"version": current_app.config["version"],
"routes": {
"api": [
"/api/documentation",
"/api/shutdown",
"/api/version"
],
"igv": [
"/igv/demo",
"/igv/custom",
"/igv/session"
]
}
}
js = json.dumps(data, indent=4, sort_keys=True)
resp = Response(js, status=200, mimetype="application/json")
return resp
@blueprint.route("/api/documentation")
def documentation():
return redirect("https://github.com/igvteam/igv.js", code=302)
@blueprint.route("/api/shutdown")
def shutdown():
try:
request.environ.get("werkzeug.server.shutdown")()
except Exception:
raise RuntimeError("Not running with the Werkzeug Server")
return "Shutting down..."
@blueprint.route("/api/version")
def api_version():
data = {
"tool_version": current_app.config["tool_version"],
"igv_version": current_app.config["igv_version"]
}
js = json.dumps(data, indent=4, sort_keys=True)
resp = Response(js, status=200, mimetype="application/json")
return resp | cumbof/igv-flask | igv/routes/basics.py | basics.py | py | 1,385 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "flask.current_app.config",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "flask.current_app",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "flask.current_app.config",
"line_number": 12,
"usage_type": "attribute"
},
{
"api... |
6448028292 | import datetime
import time
import MySQLdb
import cv2, os
cascadePath = ("haarcascade_frontalface_default.xml")
faceCascade = cv2.CascadeClassifier(cascadePath)
recognizer = cv2.face.LBPHFaceRecognizer_create()
recognizer.read('dataTrain/train.yml')
now = datetime.datetime.now()
def getProfile(id):
db = MySQLdb.connect("localhost", "root", "", "presensi")
curs = db.cursor()
cmd = "select *from facebase where npm="+str(id)
curs.execute(cmd)
profile = None
rows = curs.fetchall()
for row in rows:
profile = row
curs.close()
return profile
def getFace_info():
cam = cv2.VideoCapture(0)
while (True):
ret, img = cam.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(gray, 1.2, 5)
for (x, y, w, h) in faces:
id, conf = recognizer.predict(gray[y:y + h, x:x + w])
profile = getProfile(id)
print(str(id) + str(conf))
if (conf < 40):
if (profile != None):
cv2.imwrite("absensi/" + profile[1] + "/" + now.strftime("%Y-%m-%d %H-%M") + "[1]" + ".jpg", img)
cv2.imwrite("absensi/" + profile[2] + "/" + now.strftime("%Y-%m-%d &H-&M") + "[2]" + ".jpg", img)
time.sleep(3)
return profile[1], profile[2]
break
else:
cam.release()
cv2.destroyAllWindows()
cv2.imshow('img', img)
if cv2.waitKey(10) & 0xff == ord('q'):
break
cam.release()
cv2.destroyAllWindows() | Kuroboy/Presensi-Face | faceRec.py | faceRec.py | py | 1,634 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "cv2.CascadeClassifier",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "cv2.face.LBPHFaceRecognizer_create",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "cv2.face",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "d... |
73900222588 | # coding: utf-8
import unittest
import os
from django.conf import settings
from studitemps_storage.path import guarded_join
from studitemps_storage.path import guarded_safe_join
from studitemps_storage.path import guarded_join_or_create
from studitemps_storage.path import FileSystemNotAvailable
ABSPATH = os.path.abspath(".")
TEST_DIR = os.path.join("studitemps_storage", "tests", "test_dir")
"""
Using unittest.TestCase because we don't need django-Database or Server
"""
class GuardedJoinTestCase(unittest.TestCase):
def test_file_exists(self):
"""
it should act like os.path.join
"""
self.assertEqual(
guarded_join(ABSPATH, 'studitemps_storage'),
os.path.join(ABSPATH, 'studitemps_storage')
)
self.assertEqual(
guarded_join(ABSPATH, TEST_DIR, 'check.txt'),
os.path.join(ABSPATH, TEST_DIR, 'check.txt')
)
def test_file_not_exists(self):
"""
It should raise IOError for not existing file/folder
"""
self.assertRaises(IOError, guarded_join, ABSPATH, 'files-does-not-exists')
def test_file_system_not_available(self):
"""
Manually activates GUARDED_JOIN_TEST to raise FileSystemNotAvailable
"""
settings.GUARDED_JOIN_TEST = True
self.assertRaises(FileSystemNotAvailable, guarded_join, ABSPATH)
settings.GUARDED_JOIN_TEST = False
class GuardedSafeJoin(unittest.TestCase):
def test_file_exists(self):
"""
It should act like os.path join with base-folder
"""
self.assertEqual(
guarded_safe_join(TEST_DIR, 'check.txt'),
os.path.join(ABSPATH, TEST_DIR, 'check.txt')
)
def test_outside_project(self):
"""
It should raise exception if try to access files outside project
"""
self.assertRaises(ValueError, guarded_safe_join, "..", "..", "..")
def test_not_exists(self):
"""
It should act like os.path join
If file/folder doesn't exists returns joined-path
"""
self.assertEqual(
guarded_safe_join(TEST_DIR, "file-does-not-exists"),
os.path.join(ABSPATH, TEST_DIR, "file-does-not-exists")
)
class GuardedJoinOrCreate(unittest.TestCase):
def test_file_exists(self):
"""
It should return path and not create new folder
"""
self.assertEqual(
guarded_join_or_create(ABSPATH, 'README.md'),
os.path.join(ABSPATH, 'README.md')
)
def test_create_dir(self):
"""
Dir does not exists - create new
"""
path = os.path.join(TEST_DIR, "new-dir")
# The folder shouldn't exists
self.assertFalse(os.path.exists(path))
# The folder should be created
guarded_join_or_create(path)
# The folder should be created successful
self.assertTrue(os.path.exists(path))
# Remove
os.rmdir(path)
self.assertFalse(os.path.exists(path))
| STUDITEMPS/studitools_storages | studitemps_storage/tests/suites/path.py | path.py | py | 3,073 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.path.abspath",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number"... |
25571472390 | import logging
# fmt = "%(name)s----->%(message)s----->%(asctime)s"
# logging.basicConfig(level="DEBUG",format=fmt)
# logging.debug("这是debug信息")
# logging.info('这是info信息')
# logging.warning('这是警告信息')
# logging.error('这是错误信息')
# logging.critical('这是cri信息')
logger = logging.getLogger('heihei') #默认的打印级别是WARNING,所以当跟控制台日志的打印级别不一样时,以打印级别最高的为准。
logger.setLevel('INFO')
console_handler = logging.StreamHandler()
#控制台的等级
console_handler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
console_handler.setLevel(level='INFO')
logger.addHandler(console_handler)
file_handler = logging.FileHandler('1.txt', encoding='utf-8', mode='a')
file_handler.setLevel('INFO')
logger.addHandler(file_handler)
logging.debug("这是debug信息")
logging.info('这是info信息')
logging.warning('这是警告信息')
logging.error('这是错误信息')
logging.critical('这是cri信息')
USER_AGENTS | lishuangbo0123/basic | history_study/test.py | test.py | py | 1,059 | python | zh | code | 0 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "logging.StreamHandler",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "logging.Formatter",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "logging.Fil... |
25754911493 | import os
from multiprocessing import freeze_support,set_start_method
import multiprocessing
from Optimization import Optimization
from GA import RCGA
from PSO import PSO
if __name__=='__main__':
from datetime import datetime
start = datetime.now()
print('start:', start.strftime("%m.%d.%H.%M"))
multiprocessing.freeze_support()
lower = [0.9, 0.9, 0.9,0.9,0.9,0.9]
upper = [1.1,1.1,1.1,1.1,1.1,1.1]
pso = PSO(func=Optimization, n_dim=6, pop=72, max_iter=30, w=0.8, lb=lower, ub=upper, c1=1.49, c2=1.49,verbose=True)
#freeze_support()
#set_start_method('forkserver')
pso.record_mode=True
pso.run(precision=1e-5)
print('best_x',pso.pbest_x,'\n','best_y',pso.pbest_y)
f =open('best_opt.txt','a+')
f.write(str(pso.best_x))
f.close()
f=open('updating_processing.txt','a+')
f.write(str(pso.pbest_x))
f.write('\n')
f.write(str(pso.pbest_y))
end=datetime.now()
print('end',end.strftime("%m.%d.%H.%M"))
os.system('MAC.py') | zhengjunhao11/model-updating-framework | program_framework/Input.py | Input.py | py | 1,002 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "datetime.datetime.now",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "multiprocessing.freeze_support",
"line_number": 12,
"usage_type": "call"
},
{
"api_name":... |
1066446639 | """
This module defines the interface for communicating with the sound module.
.. autoclass:: _Sound
:members:
:undoc-members:
:show-inheritance:
"""
import glob
import os
import platform
import subprocess
from functools import partial
from opsoro.console_msg import *
from opsoro.sound.tts import TTS
from opsoro.users import Users
get_path = partial(os.path.join, os.path.abspath(os.path.dirname(__file__)))
class _Sound(object):
def __init__(self):
"""
Sound class, used to play sound and speak text.
"""
# List of search folders for sound files
self.sound_folders = ["../data/sounds/"]
self.playProcess = None
self.jack = False
self._platform = platform.system()
def _play(self, filename):
"""
Play any local file, used internally by other methods
:param string filename: full filename to play
"""
FNULL = open(os.devnull, "w")
if self._platform == "Darwin":
# OSX playback, used for development
self.playProcess = subprocess.Popen(
["afplay", filename], stdout=FNULL, stderr=subprocess.STDOUT)
elif not self.jack:
self.playProcess = subprocess.Popen(
["aplay", filename], stdout=FNULL, stderr=subprocess.STDOUT)
else:
# self.playProcess = subprocess.Popen(["aplay", "-D", "hw:0,0", full_path], stdout=FNULL, stderr=subprocess.STDOUT)
self.playProcess = subprocess.Popen(
["aplay", "-D", "hw:0,0", filename],
stdout=FNULL,
stderr=subprocess.STDOUT)
def say_tts(self, text, generate_only=False):
"""
Converts a string to a soundfile using Text-to-Speech libraries
:param string text: text to convert to speech
:param bool generate_only: do not play the soundfile once it is created
"""
if text is None:
return
full_path = TTS.create(text)
if generate_only:
return
# Send sound to virtual robots
Users.broadcast_robot({'sound': 'tts', 'msg': text})
self.stop_sound()
self._play(full_path)
def play_file(self, filename):
"""
Plays an audio file according to the given filename.
:param string filename: file to play
:return: True if sound is playing.
:rtype: bool
"""
self.stop_sound()
path = None
if os.path.splitext(filename)[1] == '':
filename += '.*'
for folder in self.sound_folders:
f = os.path.join(get_path(folder), filename)
files = glob.glob(f)
if files:
path = files[0]
break
if path is None:
print_error("Could not find soundfile \"%s\"." % filename)
return False
# Send sound to virtual robots
name, extension = os.path.splitext(os.path.basename(filename))
Users.broadcast_robot({'sound': 'file', 'msg': name})
self._play(path)
return True
def get_file(self, filename, tts=False):
"""
Returns audio file data according to the given filename.
:param string filename: file to return the data from
:return: Soundfile data.
:rtype: var
"""
path = None
data = None
if tts:
path = TTS.create(filename)
else:
if os.path.splitext(filename)[1] == '':
filename += '.*'
for folder in self.sound_folders:
f = os.path.join(get_path(folder), filename)
files = glob.glob(f)
if files:
path = files[0]
break
if path is None:
print_error("Could not find soundfile \"%s\"." % filename)
return data
try:
with open(get_path(path)) as f:
data = f.read()
except Exception as e:
print_error(e)
# Send sound to virtual robots
return data
def stop_sound(self):
"""
Stop the played sound.
"""
if self.playProcess == None:
return
self.playProcess.terminate()
self.playProcess = None
def wait_for_sound(self):
"""
Wait until the played sound is done.
"""
if self.playProcess == None:
return
self.playProcess.wait()
self.playProcess = None
# Global instance that can be accessed by apps and scripts
Sound = _Sound()
| OPSORO/OS | src/opsoro/sound/__init__.py | __init__.py | py | 4,683 | python | en | code | 9 | github-code | 6 | [
{
"api_name": "functools.partial",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
... |
73816196028 | import numpy as np
import matplotlib.pyplot as plt
import difuzija as di
import sys
sys.getdefaultencoding()
def rho(x):
if x >= 2.0 and x <= 5.0:
return 5.5
else:
return 0.0
j = [0, 100, 200, 300, 400]
t = [0.5*J for J in j]
P1 = [0.0, 20.0, 0.0, t[0]] #pocetni uvjeti
P2 = [0.0, 20.0, 0.0, t[1]]
P3 = [0.0, 20.0, 0.0, t[2]]
P4 = [0.0, 20.0, 0.0, t[3]]
P5 = [0.0, 20.0, 0.0, t[4]]
N = 100
D1 = di.D_exp(rho, P1, N, j[0]) #vrijednosti funkcije difuzije
D2 = di.D_exp(rho, P2, N, j[1])
D3 = di.D_exp(rho, P3, N, j[2])
D4 = di.D_exp(rho, P4, N, j[3])
D5 = di.D_exp(rho, P5, N, j[4])
X = [x/(20.0/N) for x in np.arange(0.0, 20.0 + 20.0/N, 20.0/N)]
fig = plt.figure(figsize=(9,6), dpi=120)
axes = fig.add_axes([0.15, 0.15, 0.75, 0.70])
plt.rcParams.update({'font.size': 8}) #type: ignore
axes.plot(X, D1, label='t = {}$\u0394$x'.format(j[0]), lw=0.8, color='lightblue')
#axes.plot(X, D2, label='t = {}$\u0394$x'.format(j[1]), lw=0.8, color='blue')
#axes.plot(X, D3, label='t = {}$\u0394$x'.format(j[2]), lw=0.8, color='cyan')
#axes.plot(X, D4, label='t = {}$\u0394$x'.format(j[3]), lw=0.8, color='green')
#axes.plot(X, D5, label='t = {}$\u0394$x'.format(j[4]), lw=0.8, color='orange')
axes.grid(lw=0.5)
axes.set_xlabel('x / $\u0394$x')
axes.set_ylabel('$\u03C1(x,t)$ / kgm$^{-1}$')
axes.legend(loc='best')
axes.set_title('Fazni dijagram matematičkog njihala')
plt.show()
| FabjanJozic/MMF3 | Predavanje12_PDJ/Zadatak1.py | Zadatak1.py | py | 1,404 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sys.getdefaultencoding",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "difuzija.D_exp",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "difuzija.D_exp",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "difuzija.D_exp",
... |
14594327515 | import tensorflow as tf
import json
from model_provider import get_model
from utils.create_gan_tfrecords import TFRecordsGAN
from utils.augment_images import augment_autoencoder
import os
import tensorflow.keras as K
import datetime
import string
from losses import get_loss, gradient_penalty
import argparse
physical_devices = tf.config.experimental.list_physical_devices("GPU")
for gpu in physical_devices:
tf.config.experimental.set_memory_growth(gpu, True)
mirrored_strategy = tf.distribute.MirroredStrategy()
args = argparse.ArgumentParser(description="Train a network with specific settings")
args.add_argument("-d", "--dataset", type=str, default="zebra2horse",
help="Name a dataset from the tf_dataset collection",
choices=["zebra2horse"])
args.add_argument("-opt", "--optimizer", type=str, default="Adam", help="Select optimizer",
choices=["SGD", "RMSProp", "Adam"])
args.add_argument("-lrs", "--lr_scheduler", type=str, default="constant", help="Select learning rate scheduler",
choices=["poly", "exp_decay", "constant"])
args.add_argument("-gm", "--gan_mode", type=str, default="constant", help="Select training mode for GAN",
choices=["normal", "wgan_gp"])
args.add_argument("-e", "--epochs", type=int, default=1000, help="Number of epochs to train")
args.add_argument("--lr", type=float, default=2e-4, help="Initial learning rate")
args.add_argument("--momentum", type=float, default=0.9, help="Momentum")
args.add_argument("-bs", "--batch_size", type=int, default=16, help="Size of mini-batch")
args.add_argument("-si", "--save_interval", type=int, default=5, help="Save interval for model")
args.add_argument("-m", "--model", type=str, default="cyclegan", help="Select model")
args.add_argument("-logs", "--logdir", type=str, default="./logs", help="Directory to save tensorboard logdir")
args.add_argument("-l_m", "--load_model", type=str,
default=None,
help="Load model from path")
args.add_argument("-s", "--save_dir", type=str, default="./cyclegan_runs",
help="Save directory for models and tensorboard")
args.add_argument("-tfrecs", "--tf_record_path", type=str, default="/data/input/datasets/tf2_gan_tfrecords",
help="Save directory that contains train and validation tfrecords")
args.add_argument("-sb", "--shuffle_buffer", type=int, default=1024, help="Size of the shuffle buffer")
args.add_argument("--width", type=int, default=286, help="Size of the shuffle buffer")
args.add_argument("--height", type=int, default=286, help="Size of the shuffle buffer")
args.add_argument("--c_width", type=int, default=256, help="Crop width")
args.add_argument("--c_height", type=int, default=256, help="Crop height")
args.add_argument("--random_seed", type=int, default=1, help="Set random seed to this if true")
args = args.parse_args()
tf_record_path = args.tf_record_path
dataset = args.dataset
BUFFER_SIZE = args.shuffle_buffer
BATCH_SIZE = args.batch_size
IMG_WIDTH = args.width
IMG_HEIGHT = args.height
CROP_HEIGHT = args.c_height if args.c_height < IMG_HEIGHT else IMG_HEIGHT
CROP_WIDTH = args.c_width if args.c_width < IMG_WIDTH else IMG_WIDTH
LAMBDA = 10
EPOCHS = args.epochs
LEARNING_RATE = args.lr
LEARNING_RATE_SCHEDULER = args.lr_scheduler
save_interval = args.save_interval
save_dir = args.save_dir
load_model_path = args.load_model
MODEL = args.model
gan_mode = args.gan_mode
time = str(datetime.datetime.now())
time = time.translate(str.maketrans('', '', string.punctuation)).replace(" ", "-")[:-8]
logdir = "{}_{}_e{}_lr{}_{}x{}_{}".format(time, MODEL, EPOCHS, LEARNING_RATE, IMG_HEIGHT, IMG_WIDTH, gan_mode)
train_A, train_B = \
TFRecordsGAN(
tfrecord_path=
"{}/{}_train.tfrecords".format(tf_record_path, dataset + "_a")).read_tfrecords(), \
TFRecordsGAN(
tfrecord_path=
"{}/{}_train.tfrecords".format(tf_record_path, dataset + "_b")).read_tfrecords()
with open(f"{args.tf_record_path}/data_samples.json") as f:
data = json.load(f)
num_samples_ab = [data[dataset + "_a"], data[dataset + "_b"]]
if num_samples_ab[0] > num_samples_ab[1]:
total_samples = num_samples_ab[0]
train_B = train_B.repeat()
else:
total_samples = num_samples_ab[1]
train_A = train_A.repeat()
augmentor = lambda batch: augment_autoencoder(batch, size=(IMG_HEIGHT, IMG_WIDTH), crop=(CROP_HEIGHT, CROP_WIDTH))
train_A = train_A.map(
augmentor, num_parallel_calls=tf.data.AUTOTUNE).cache().shuffle(
BUFFER_SIZE).batch(BATCH_SIZE, drop_remainder=True)
train_B = train_B.map(
augmentor, num_parallel_calls=tf.data.AUTOTUNE).cache().shuffle(
BUFFER_SIZE).batch(BATCH_SIZE, drop_remainder=True)
train_A = mirrored_strategy.experimental_distribute_dataset(train_A)
train_B = mirrored_strategy.experimental_distribute_dataset(train_B)
if gan_mode == "wgan_gp":
gan_loss_obj = get_loss(name="Wasserstein")
else:
gan_loss_obj = get_loss(name="binary_crossentropy")
cycle_loss_obj = get_loss(name="MAE")
id_loss_obj = get_loss(name="MAE")
def discriminator_loss(real, generated):
if gan_mode == "wgan_gp":
real_loss = gan_loss_obj(-tf.ones_like(real), real)
generated_loss = gan_loss_obj(tf.ones_like(generated), generated)
else:
real_loss = gan_loss_obj(tf.ones_like(real), real)
generated_loss = gan_loss_obj(tf.zeros_like(generated), generated)
total_disc_loss = generated_loss + real_loss
return tf.reduce_mean(total_disc_loss) * 0.5
def generator_loss(generated):
return tf.reduce_mean(
gan_loss_obj(-tf.ones_like(generated), generated)) if gan_mode == "wgan_gp" else tf.reduce_mean(
gan_loss_obj(tf.ones_like(generated), generated))
def calc_cycle_loss(real_image, cycled_image):
loss1 = cycle_loss_obj(real_image, cycled_image)
return loss1
def identity_loss(real_image, same_image):
loss = id_loss_obj(real_image, same_image)
return LAMBDA * 0.5 * loss
if LEARNING_RATE_SCHEDULER == "poly":
lrs = K.optimizers.schedules.PolynomialDecay(LEARNING_RATE,
decay_steps=EPOCHS,
end_learning_rate=1e-8, power=0.8)
elif LEARNING_RATE_SCHEDULER == "exp_decay":
lrs = K.optimizers.schedules.ExponentialDecay(LEARNING_RATE,
decay_steps=EPOCHS,
decay_rate=0.5)
else:
lrs = LEARNING_RATE
with mirrored_strategy.scope():
generator_g = get_model("{}_gen".format(MODEL), type="gan")
generator_f = get_model("{}_gen".format(MODEL), type="gan")
discriminator_x = get_model("{}_disc".format(MODEL), type="gan")
discriminator_y = get_model("{}_disc".format(MODEL), type="gan")
tmp = tf.cast(tf.random.uniform((1, CROP_HEIGHT, CROP_WIDTH, 3), dtype=tf.float32, minval=0, maxval=1),
dtype=tf.float32)
generator_g(tmp), generator_f(tmp), discriminator_x(tmp), discriminator_y(tmp)
generator_g_optimizer = tf.keras.optimizers.Adam(lrs, beta_1=0.5)
generator_f_optimizer = tf.keras.optimizers.Adam(lrs, beta_1=0.5)
discriminator_x_optimizer = tf.keras.optimizers.Adam(lrs, beta_1=0.5)
discriminator_y_optimizer = tf.keras.optimizers.Adam(lrs, beta_1=0.5)
def load_models(models_parent_dir):
assert os.path.exists(models_parent_dir), "The path {} is not valid".format(models_parent_dir)
p_gen_g = K.models.load_model(os.path.join(models_parent_dir, "gen_g"))
p_gen_f = K.models.load_model(os.path.join(models_parent_dir, "gen_f"))
p_disc_x = K.models.load_model(os.path.join(models_parent_dir, "disc_x"))
p_disc_y = K.models.load_model(os.path.join(models_parent_dir, "disc_y"))
generator_g.set_weights(p_gen_g.get_weights())
print("Generator G loaded successfully")
generator_f.set_weights(p_gen_f.get_weights())
print("Generator F loaded successfully")
discriminator_x.set_weights(p_disc_x.get_weights())
print("Discriminator X loaded successfully")
discriminator_y.set_weights(p_disc_y.get_weights())
print("Discriminator Y loaded successfully")
if load_model_path is not None:
load_models(load_model_path)
START_EPOCH = int(load_model_path.split("/")[-1])
else:
START_EPOCH = 0
def write_to_tensorboard(g_loss_g, g_loss_f, d_loss_x, d_loss_y, c_step, writer):
with writer.as_default():
tf.summary.scalar("G_Loss_G", g_loss_g.numpy(), c_step)
tf.summary.scalar("G_Loss_F", g_loss_f.numpy(), c_step)
tf.summary.scalar("D_Loss_X", tf.reduce_mean(d_loss_x).numpy(), c_step)
tf.summary.scalar("D_Loss_Y", tf.reduce_mean(d_loss_y).numpy(), c_step)
if len(physical_devices) > 1:
o_img_a = tf.cast(image_x.values[0], dtype=tf.float32)
o_img_b = tf.cast(image_y.values[0], dtype=tf.float32)
img_a, img_b = o_img_a, o_img_b
else:
img_a = image_x
img_b = image_y
# img_size_a, img_size_b = img_a.shape[1] * img_a.shape[2] * img_a.shape[3], img_b.shape[1] * img_b.shape[2] * \
# img_b.shape[3]
# mean_a, mean_b = tf.reduce_mean(img_a, axis=[1, 2, 3], keepdims=True), tf.reduce_mean(img_b, axis=[1, 2, 3],
# keepdims=True)
# adjusted_std_a = tf.maximum(tf.math.reduce_std(img_a, axis=[1, 2, 3], keepdims=True),
# 1 / tf.sqrt(img_size_a / 1.0))
# adjusted_std_b = tf.maximum(tf.math.reduce_std(img_b, axis=[1, 2, 3], keepdims=True),
# 1 / tf.sqrt(img_size_b / 1.0))
f_image_y = generator_g(img_a, training=True)
f_image_x = generator_f(img_b, training=True)
confidence_a = discriminator_x(f_image_x, training=True)
confidence_b = discriminator_y(f_image_y, training=True)
tf.summary.image("img_a", tf.cast(127.5 * (img_a + 1), dtype=tf.uint8), step=c_step)
tf.summary.image("img_b", tf.cast(127.5 * (img_b + 1), dtype=tf.uint8), step=c_step)
tf.summary.image("fake_img_a", tf.cast((f_image_x + 1) * 127.5, dtype=tf.uint8), step=c_step)
tf.summary.image("fake_img_b", tf.cast((f_image_y + 1) * 127.5, dtype=tf.uint8), step=c_step)
tf.summary.image("confidence_a", confidence_a, step=c_step)
tf.summary.image("confidence_b", confidence_b, step=c_step)
@tf.function
def train_step(real_x, real_y, n_critic=5):
# real_x = tf.image.per_image_standardization(real_x)
# real_y = tf.image.per_image_standardization(real_y)
with tf.GradientTape(persistent=True) as tape:
fake_y = generator_g(real_x, training=True)
cycled_x = generator_f(fake_y, training=True)
fake_x = generator_f(real_y, training=True)
cycled_y = generator_g(fake_x, training=True)
# same_x and same_y are used for identity loss.
same_x = generator_f(real_x, training=True)
same_y = generator_g(real_y, training=True)
disc_real_x = discriminator_x(real_x, training=True)
disc_real_y = discriminator_y(real_y, training=True)
disc_fake_x = discriminator_x(fake_x, training=True)
disc_fake_y = discriminator_y(fake_y, training=True)
# calculate the loss
gen_g_loss = generator_loss(disc_fake_y)
gen_f_loss = generator_loss(disc_fake_x)
total_cycle_loss = calc_cycle_loss(real_x, cycled_x) + calc_cycle_loss(real_y, cycled_y)
# Total generator loss = adversarial loss + cycle loss
total_gen_g_loss = LAMBDA * total_cycle_loss + identity_loss(real_y, same_y) + gen_g_loss
total_gen_f_loss = LAMBDA * total_cycle_loss + identity_loss(real_x, same_x) + gen_f_loss
if gan_mode != "wgan_gp":
disc_x_loss = discriminator_loss(disc_real_x, disc_fake_x)
disc_y_loss = discriminator_loss(disc_real_y, disc_fake_y)
# ------------------- Disc Cycle -------------------- #
if gan_mode == "wgan_gp":
disc_x_loss, disc_y_loss = wgan_disc_apply(fake_x, fake_y, n_critic, real_x, real_y)
# Calculate the gradients for generator and discriminator
generator_g_gradients = tape.gradient(total_gen_g_loss,
generator_g.trainable_variables)
generator_f_gradients = tape.gradient(total_gen_f_loss,
generator_f.trainable_variables)
if gan_mode != "wgan_gp":
discriminator_x_gradients = tape.gradient(disc_x_loss,
discriminator_x.trainable_variables)
discriminator_y_gradients = tape.gradient(disc_y_loss,
discriminator_y.trainable_variables)
# Apply the gradients to the optimizer
generator_g_optimizer.apply_gradients(zip(generator_g_gradients,
generator_g.trainable_variables))
generator_f_optimizer.apply_gradients(zip(generator_f_gradients,
generator_f.trainable_variables))
if gan_mode != "wgan_gp":
discriminator_x_optimizer.apply_gradients(zip(discriminator_x_gradients,
discriminator_x.trainable_variables))
discriminator_y_optimizer.apply_gradients(zip(discriminator_y_gradients,
discriminator_y.trainable_variables))
return total_gen_g_loss, total_gen_f_loss, disc_x_loss, disc_y_loss
def wgan_disc_apply(fake_x, fake_y, n_critic, real_x, real_y):
for _ in range(n_critic):
with tf.GradientTape(persistent=True) as disc_tape:
disc_real_x = discriminator_x(real_x, training=True)
disc_real_y = discriminator_y(real_y, training=True)
disc_fake_x = discriminator_x(fake_x, training=True)
disc_fake_y = discriminator_y(fake_y, training=True)
disc_x_loss = discriminator_loss(disc_real_x, disc_fake_x) + 10 * gradient_penalty(real_x, fake_x,
discriminator_x)
disc_y_loss = discriminator_loss(disc_real_y, disc_fake_y) + 10 * gradient_penalty(real_y, fake_y,
discriminator_y)
discriminator_x_gradients = disc_tape.gradient(disc_x_loss,
discriminator_x.trainable_variables)
discriminator_y_gradients = disc_tape.gradient(disc_y_loss,
discriminator_y.trainable_variables)
discriminator_x_optimizer.apply_gradients(zip(discriminator_x_gradients,
discriminator_x.trainable_variables))
discriminator_y_optimizer.apply_gradients(zip(discriminator_y_gradients,
discriminator_y.trainable_variables))
return disc_x_loss, disc_y_loss
@tf.function
def distributed_train_step(dist_inputs_a, dist_inputs_b):
per_replica_gen_g_losses, per_replica_gen_f_losses, per_replica_disc_x_losses, per_replica_disc_y_losses = \
mirrored_strategy.run(train_step, args=(dist_inputs_a, dist_inputs_b))
reduced_gen_g_loss, reduced_gen_f_loss = mirrored_strategy.reduce(tf.distribute.ReduceOp.MEAN,
per_replica_gen_g_losses,
axis=None), mirrored_strategy.reduce(
tf.distribute.ReduceOp.MEAN, per_replica_gen_f_losses,
axis=None)
reduced_disc_x_loss, reduced_disc_y_loss = mirrored_strategy.reduce(tf.distribute.ReduceOp.MEAN,
per_replica_disc_x_losses,
axis=None), mirrored_strategy.reduce(
tf.distribute.ReduceOp.MEAN, per_replica_disc_y_losses,
axis=None)
return reduced_gen_g_loss, reduced_gen_f_loss, reduced_disc_x_loss, reduced_disc_y_loss
train_writer = tf.summary.create_file_writer(os.path.join(args.logdir, logdir))
def save_models():
K.models.save_model(generator_g, os.path.join(save_dir, MODEL, str(epoch + 1), "gen_g"))
K.models.save_model(generator_f, os.path.join(save_dir, MODEL, str(epoch + 1), "gen_f"))
K.models.save_model(discriminator_x, os.path.join(save_dir, MODEL, str(epoch + 1), "disc_x"))
K.models.save_model(discriminator_y, os.path.join(save_dir, MODEL, str(epoch + 1), "disc_y"))
print("Model at Epoch {}, saved at {}".format(epoch, os.path.join(save_dir, MODEL, str(epoch))))
for epoch in range(START_EPOCH, EPOCHS):
print("\n ----------- Epoch {} --------------\n".format(epoch + 1))
n = 0
with train_writer.as_default():
tf.summary.scalar("Learning Rate", lrs(epoch).numpy(),
epoch) if LEARNING_RATE_SCHEDULER != "constant" else tf.summary.scalar("Learning Rate", lrs,
epoch)
for image_x, image_y in zip(train_A, train_B):
gen_g_loss, gen_f_loss, disc_x_loss, disc_y_loss = distributed_train_step(image_x, image_y)
print(
"Epoch {} \t Gen_G_Loss: {}, Gen_F_Loss: {}, Disc_X_Loss: {}, Disc_Y_Loss: {}".format(epoch + 1, gen_g_loss,
gen_f_loss,
disc_x_loss,
disc_y_loss))
n += 1
if n % 20 == 0:
write_to_tensorboard(gen_g_loss, gen_f_loss, disc_x_loss, disc_y_loss,
(epoch * total_samples // BATCH_SIZE) + n, train_writer)
if (epoch + 1) % save_interval == 0:
save_models()
| AhmedBadar512/Badr_AI_Repo | cycle_gan_train.py | cycle_gan_train.py | py | 18,298 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "tensorflow.config.experimental.list_physical_devices",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "tensorflow.config",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.config.experimental.set_memory_growth",
"line_number":... |
36914067207 | from django.urls import path
from rest_framework import routers
from user_accounts import views
router = routers.DefaultRouter()
# router.register('users', user_viewsets)
urlpatterns = [
path('create_user/', views.create_user.as_view(), name='create_user'),
path('login_user/', views.login_user.as_view(), name='login_user'),
path('logout_user/<str:email_address>', views.logout_user.as_view(), name='logout_user'),
path('GetUserInfoAPI/<str:email_address>', views.GetUserInfoAPI.as_view(), name='GetUserInfoAPI'),
# path('get_principalesID/<str:email_address>', views.get_principalesID.as_view(), name='get_principalesID'),
]
| AmbeyiBrian/ELECTRONIC-SCHOOL-MANAGER-KENYA | elimu_backend/user_accounts/urls.py | urls.py | py | 649 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "rest_framework.routers.DefaultRouter",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "rest_framework.routers",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_nam... |
74927169786 | '''
The photon Project
-------------------
File: read_conf.py
This file reads the configuration file
@author: R. THOMAS
@year: 2018
@place: ESO
@License: GPL v3.0 - see LICENCE.txt
'''
#### Python Libraries
import configparser
import os
class Conf:
"""
This Class defines the arguments to be calle to use SPARTAN
For the help, you can use 'SPARTAN -h' or 'SPARTAN --help'
"""
def __init__(self,conf):
"""
Class constructor, defines the attributes of the class
and run the argument section
"""
if conf == None:
##if no configuration was passed, we take the default one
dir_path = os.path.dirname(os.path.realpath(__file__))
default_file = os.path.join(dir_path, 'properties.conf')
self.read_conf(default_file)
else:
self.read_conf(conf)
def read_conf(self, fileconf):
'''
Method that reads the configuration file passed to the code
'''
config = configparser.ConfigParser()
config.read(fileconf)
###background color
self.BACK = config.get('background', 'back_color')
##AXIS properties
AXIS = {'Color' : '', 'Label_color' : '', 'lw' : '', 'Labelsize' : '', 'Axis_label_font' : ''}
AXIS['Color'] = config.get('AXIS', 'Color')
AXIS['Label_color'] = config.get('AXIS', 'Label_Color')
AXIS['lw'] = config.getfloat('AXIS', 'linewidth')
AXIS['Labelsize'] = config.getfloat('AXIS', 'Labelsize')
AXIS['Axis_label_font'] = config.get('AXIS', 'Axis_label_font')
self.axis = AXIS
####Ticks properties
TICKS = {'Minor' : '', 'placement' : '', 'Major_size' : '', 'Minor_size' : '', \
'Major_width' : '', 'Minor_width' : '', 'Ticks_color' : '', 'Label_color' : '',\
'Ticks_label_font' : '', }
TICKS['Minor'] = config.get('TICKS', 'Minor')
TICKS['placement'] = config.get('TICKS', 'placement')
TICKS['Major_size'] = config.getfloat('TICKS', 'Major_size')
TICKS['Minor_size'] = config.getfloat('TICKS', 'Minor_size')
TICKS['Major_width'] = config.getfloat('TICKS', 'Major_width')
TICKS['Minor_width'] = config.getfloat('TICKS', 'Minor_width')
TICKS['Ticks_color'] = config.get('TICKS', 'Ticks_color')
TICKS['Label_color'] = config.get('TICKS', 'Label_color')
TICKS['Label_size'] = config.getfloat('TICKS', 'Label_size')
TICKS['Ticks_label_font'] = config.get('TICKS', 'Ticks_label_font')
self.ticks = TICKS
###legend
LEGEND = {'Frame' : '', 'font_size' : '', 'Legend_font' : '',\
'Label_font_color' : '', 'ncol' : '', 'location':''}
LEGEND['Frame'] = config.get('LEGEND', 'Frame')
LEGEND['font_size'] = config.getfloat('LEGEND', 'font_size')
LEGEND['Legend_font'] = config.get('LEGEND', 'Legend_font')
LEGEND['Label_font_color'] = config.get('LEGEND', 'Label_font_color')
LEGEND['location'] = config.get('LEGEND', 'location')
self.legend = LEGEND
| astrom-tom/Photon | photon/read_conf.py | read_conf.py | py | 3,134 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "os.path.dirname",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "os.path.realpath",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"lin... |
74182045309 | """feature for malware."""
import os.path
from abc import ABC, abstractmethod
import numpy as np
import filebrowser
import lief
from capstone import *
class Feature(ABC):
"""interface for all feature type."""
def __init__(self):
super().__init__()
self.dtype = np.float32
self.name = ''
@abstractmethod
def __call__(self):
"""call for feature extraction."""
def __repr__(self):
return '{}({})'.format(self.name, self.dim)
class BaseFeature(Feature, ABC):
"""interface & base impl for all base feature type."""
def __init__(self, dim):
super(BaseFeature, self).__init__()
self.dim = dim
def empty(self):
return np.zeros((self.dim,), dtype=np.float32)
class RawBytesFeature(Feature):
"""raw bytes from whole exe."""
def __init__(self):
super(RawBytesFeature, self).__init__()
self.bytez = None
def __call__(self, binary):
builder = lief.PE.Builder(binary)
builder.build()
self.bytez = bytearray(builder.get_build())
return self.bytez
def image(self, width=256):
total_size = len(self.bytez)
rem = total_size % width
height = total_size // width
arr = np.frombuffer(self.bytez, dtype=np.uint8)
if rem != 0:
height += 1
arr = np.pad(arr, (0, width-rem), 'constant')
return arr.reshape((height, width))
class OpCodeFeature(Feature):
"""opcode sequence from binary."""
def __init__(self, only_text=False):
super(OpCodeFeature, self).__init__()
self.only_text = only_text
def __call__(self, binary):
opcode_seq = []
disasm_sections = []
for sec in binary.sections:
if lief.PE.SECTION_CHARACTERISTICS.MEM_EXECUTE in sec.characteristics_lists:
disasm_sections.append(sec.name)
if self.only_text:
disasm_sections = [".text"]
for name in disasm_sections:
section = binary.get_section(name)
try: # some sections may contains no content
bytes = section.content.tobytes()
except:
continue
if binary.header.machine == lief.PE.MACHINE_TYPES.I386:
md = Cs(CS_ARCH_X86, CS_MODE_32)
else:
md = Cs(CS_ARCH_X86, CS_MODE_64)
for i in md.disasm(bytes, section.virtual_address):
opcode_seq.append(i.mnemonic)
return opcode_seq
if __name__ == "__main__":
fclient = filebrowser.FileBrowserClient().with_host(
host="10.112.108.112", port="8081", username="admin", password="daxiahyh")
download_list = [
"dagongren/DikeDataset-main/files/benign/0a8deb24eef193e13c691190758c349776eab1cd65fba7b5dae77c7ee9fcc906.exe",
]
opcode_set = set()
for file in download_list:
save_path = os.path.join("../download", file.split("/")[-1])
print(save_path)
fclient.download_auth_file(
file,
save_path)
binary = lief.PE.parse(save_path)
bytes = RawBytesFeature()
print(hex(len(bytes(binary))))
print(bytes.image())
opcodes = OpCodeFeature()
opcode_set.update(opcodes(binary))
print(len(opcode_set))
| dagrons/try | feature/feature.py | feature.py | py | 3,314 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "abc.ABC",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "numpy.float32",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "abc.abstractmethod",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "abc.ABC",
"line_num... |
9380859967 | import numpy as np
import time, glob, cv2
from pymycobot import MyCobotSocket
from pymycobot import PI_PORT, PI_BAUD
from single_aruco_detection import marker_detecting
from forward_kinematics import F_K
from inverse_kinematics import I_K
import matplotlib.pyplot as plt
from scipy.linalg import orthogonal_procrustes
def std_deviation():
# load the T_cam2ee
T_cam2ee = np.load("scripts/Hand_eye_calibration/cam2gripper.npy")
# load the T_marker2cam
R_target2cam = np.load("scripts/Hand_eye_calibration/R_target2cam.npy")
t_target2cam = np.load("scripts/Hand_eye_calibration/t_target2cam.npy")
# number of images
n = len(R_target2cam)
T_mar2cam = []
for i in range(len(t_target2cam)):
T_eye = np.eye(4)
T_eye[0:3,0:3] = R_target2cam[i]
T_eye[0:3,3] = np.reshape(t_target2cam[i], (3,))
T_mar2cam.append(T_eye)
# load the T_ee2base
T_ee2base_file = sorted(glob.glob("scripts/Hand_eye_calibration"
+ "/T_gripper2base/*.npy"))
T_ee2base = [np.load(f) for f in T_ee2base_file]
# T_marker2base
T_mar2base = []
for i in range(len(T_ee2base)):
T_mar2base.append(transform2base(T_mar2cam[i], T_cam2ee, T_ee2base[i]))
np.save("scripts/Analysis/T_mar2base", T_mar2base)
# print(np.shape(T_mar2base))
# create the chessboard corners
# 9X6 chessboard, 0.016m for the edge of the square
chessboard_corners = []
# for line (y)
for i in range(0,6):
# for coloum (x)
for j in range(0,9):
corner_coord = [0,0,0,1]
corner_coord[0] = 0.016 * j
corner_coord[1] = 0.016 * i
chessboard_corners.append(corner_coord)
chessboard_corners = np.reshape(chessboard_corners, (54,4))
# print(np.shape(chessboard_corners)[0])
# print(chessboard_corners)
# transfer the chessboard corners to the base
y = []
# loop the T_mar2base
for i in range(0, np.shape(T_mar2base)[0]):
# loop the chessboard corners
y_i = []
for j in range(0,54):
coord = T_mar2base[i] @ np.reshape(chessboard_corners[j], (4,1))
y_i.append(coord)
y.append(y_i)
# print(np.shape(y[0]))
# sum the y_i
sum = np.squeeze(y[0])
for i in range(1,n):
sum = sum + np.squeeze(y[i])
# y bar
y_bar = (sum)/n
# y_i - y_bar
error = []
for i in range(0, np.shape(y)[0]):
error.append(np.squeeze(y[i])-y_bar)
# print(np.shape(error[0]))
# square each error, then sum, then divided by 6*9, finally square root
# error_each_image in m
# error of each corresponding corner in each image
error_each_image = []
for i in range(0, n):
error_each_image.append(np.sqrt(np.sum(np.square(error[i])))/(6*9))
# print(error_each_image)
##### draw the figure and save #####
# font1 = {'family':'times','size':14}
# font2 = {'family':'times','size':12}
# plt.figure(figsize=(12,5))
# plt.plot(np.arange(1,33), error_each_image)
# plt.xticks(range(1,33))
# plt.xlabel("No.i pose", fontdict=font2)
# plt.ylabel("Error / (m)", fontdict=font2)
# plt.title("Average error at No.i pose", fontdict=font1)
# plt.savefig("error.png",dpi=500)
### procrustes ###
R, _ = orthogonal_procrustes(y_bar, chessboard_corners)
np.save("scripts/Analysis/T_mar2base_procrustes", R)
print("the estimated transformation is:")
print(R)
def transform2base(T_marker2cam, T_cam2gripper, T_gripper2base):
"""
chain rule to get the transformation from marker frame to the robot base frame
"""
T_marker2base = T_gripper2base @ T_cam2gripper @ T_marker2cam
return T_marker2base
if __name__ == "__main__":
std_deviation()
| zzZzzccHEnn/Visual_Tracking | evaluation.py | evaluation.py | py | 3,833 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.load",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.eye",
"line_number": 25,
... |
23595304281 | import os
import re
import time
from option import Option
import pandas as pd
import wget as wget
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
import download
__author__ = 'Song Hui' # 作者名
def get_options_from_command_line():
import argparse
# Initialize parser
parser = argparse.ArgumentParser()
# Adding optional argument
parser.add_argument("-o", "--output_dir", help="to set directory of all output files")
parser.add_argument("-r", "--result_file", help="to set result CSV file name")
parser.add_argument("-l", "--limit", help="to set max file count")
# Read arguments from command line
args = parser.parse_args()
if args:
print("parsing arguments: {}".format(args))
return Option(args.output_dir, args.result_file, int(args.limit) if args.limit is not None else None)
if __name__ == '__main__':
# 获取命令行参数
option = get_options_from_command_line()
if not os.path.exists(option.output_dir):
os.mkdir(option.output_dir)
# 设置从url中获取名字的正则表达式
exp_img = re.compile(r'v/.+?/(.*?)\?')
exp_video = re.compile(r'v/.+?/(.*?)\?')
web_options = webdriver.ChromeOptions()
web_options.add_argument("--enable-javascript")
# web_options.add_argument('--always-authorize-plugins=true')
with webdriver.Chrome(options=web_options) as driver:
# with webdriver.Chrome(chrome_options=options) as driver:
wait = WebDriverWait(driver, 10)
# 访问首页
driver.get(
"https://www.facebook.com/ads/library/?active_status=active&ad_type=all&country=ALL&q=clothing&sort_data[direction]=desc&sort_data[mode]=relevancy_monthly_grouped&start_date[min]=2021-11-25&start_date[max]=2021-11-26&search_type=keyword_unordered&media_type=video")
# 获取所有视频描述节点
results = []
while len(results) < option.limit:
results = driver.find_elements(By.CSS_SELECTOR,
"div._99s5")
driver.execute_script("window.scrollTo(0,document.body.scrollHeight)")
time.sleep(5)
times = 0
# 准备数据表
columns = ['title', 'img_name', 'video_name', 'desc']
df = pd.DataFrame(columns=columns)
for ele in results:
ele_item = ele.find_element(By.CSS_SELECTOR, "div.iajz466s div._7jyg")
# 获取标题节点
title_item = ele_item.find_element(By.CSS_SELECTOR,
"div._8nsi a.aa8h9o0m>span.a53abz89")
title = title_item.text
print(title)
# 获取描述节点
desc_item = ele_item.find_element(By.CSS_SELECTOR,
"div._7jyr>span div._4ik4>div")
desc = desc_item.text
# 获取视频图片节点
video_item = ele_item.find_element(By.CSS_SELECTOR,
"div._8o0a>video")
img_url = video_item.get_attribute('poster')
# print(img_url)
img_name = re.search(exp_img, img_url).group(1)
print(img_name)
video_url = video_item.get_attribute('src')
# print(video_url)
video_name = re.search(exp_video, video_url).group(1) + 'mp4'
# 网站给出的视频文件没有扩展名,这里随便加一个,应该就可以播放了
print(video_name)
if os.path.exists(option.output_dir + video_name):
# 如果目标路径中,对应的视频文件已经存在,那么跳过该记录
continue
# 下载对应的图片和视频文件
try:
wget.download(img_url, option.output_dir + img_name)
except Exception as e:
print('下载图片"{}"异常:{}'.format(img_url, e))
continue
try:
wget.download(video_url, option.output_dir + video_name)
except Exception as e:
print('下载视频"{}"异常:{}'.format(video_url, e))
continue
# 新增1条数据记录
df = df.append({'title': title, 'img_name': img_name, 'video_name': video_name, 'desc': desc},
ignore_index=True)
# 检查数据上限
times += 1
if times >= option.limit:
break
# # driver.get(ele.get_attribute('src'))
time.sleep(0.5)
# 保存数据文件
with open(option.output_result, 'a', encoding="utf-8", newline='') as f:
# 如果文件存在,则添加数据
df.to_csv(f, header=f.tell() == 0)
| songofhawk/simplerpa | test/test_selenium/facebook_download.py | facebook_download.py | py | 4,871 | python | en | code | 15 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "option.Option",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "os.path",
"l... |
41707501948 | import tensorflow as tf
from config import cfg
def detect_loss():
def get_box_highest_percentage(arr):
shape = tf.shape(arr)
reshaped = tf.reshape(arr, (shape[0], tf.reduce_prod(shape[1:-1]), -1))
# returns array containing the index of the highest percentage of each batch
# where 0 <= index <= height * width
max_prob_ind = tf.argmax(reshaped[..., -1], axis=-1, output_type=tf.int32)
# turn indices (batch, y * x) into (batch, y, x)
# returns (3, batch) tensor
unraveled = tf.unravel_index(max_prob_ind, shape[:-1])
# turn tensor into (batch, 3) and keep only (y, x)
unraveled = tf.transpose(unraveled)[:, 1:]
y, x = unraveled[..., 0], unraveled[..., 1]
# stack indices and create (batch, 5) tensor which
# contains height, width, offset_y, offset_x, percentage
indices = tf.stack([tf.range(shape[0]), y, x], axis=-1)
box = tf.gather_nd(arr, indices)
y, x = tf.cast(y, tf.float32), tf.cast(x, tf.float32)
# transform box to (y + offset_y, x + offset_x, GRID_SIZE * height, GRID_SIZE * width, obj)
# output is (batch, 5)
out = tf.stack([y + box[..., 2], x + box[..., 3],
cfg.NN.GRID_SIZE * box[..., 0], cfg.NN.GRID_SIZE * box[..., 1],
box[..., -1]], axis=-1)
return out
def loss(y_true, y_pred):
# get the box with the highest percentage in each image
true_box = get_box_highest_percentage(y_true)
pred_box = get_box_highest_percentage(y_pred)
# object loss
obj_loss = tf.keras.losses.binary_crossentropy(y_true[..., 4:5], y_pred[..., 4:5])
# mse with the boxes that have the highest percentage
box_loss = tf.reduce_sum(tf.math.squared_difference(true_box[..., :-1], pred_box[..., :-1]))
return tf.reduce_sum(obj_loss) + box_loss
return loss
| burnpiro/tiny-face-detection-tensorflow2 | model/loss.py | loss.py | py | 1,942 | python | en | code | 27 | github-code | 6 | [
{
"api_name": "tensorflow.shape",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "tensorflow.reshape",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "tensorflow.reduce_prod",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "tensorflow.ar... |
13348518622 | import argparse
import datetime
import pathlib
import subprocess
import sys
import time
import run_all_utils
command = '''
python ../modules/S3segmenter/large/S3segmenter.py
--imagePath "{}"
--stackProbPath "{}"
--outputPath "{}"
--probMapChan {probMapChan}
--area-max 50000
--expand-size {expand_size}
--maxima-footprint-size {maxima_footprint_size}
--mean-intensity-min {mean_intensity_min}
--pixelSize {pixelSize}
'''
MODULE_NAME = 's3seg'
ORION_DEFAULTS = [
('probMapChan', 1, 'int'),
('expand-size', 5, 'int'),
('maxima-footprint-size', 13, 'int'),
('mean-intensity-min', 128, 'float'),
('pixelSize', 0.325, 'float'),
]
def main(argv=sys.argv):
parser = argparse.ArgumentParser()
parser.add_argument(
'-c',
metavar='config-csv',
required=True
)
parser.add_argument(
'-m',
metavar='module-params',
required=False,
default=None
)
parsed_args = parser.parse_args(argv[1:])
CURR = pathlib.Path(__file__).resolve().parent
file_config, module_params, log_path = run_all_utils.init_run(
parsed_args, ORION_DEFAULTS, MODULE_NAME
)
for config in file_config[:]:
config = run_all_utils.set_config_defaults(config)
name = config['name']
out_dir = config['out_dir']
print('Processing', name)
nucleus_channel = module_params['probMapChan'] - 1
pmap_path = out_dir / name / 'unmicst2' / f'{name}_Probabilities_{nucleus_channel}.ome.tif'
command_run = [
'python',
CURR.parent / 'modules/S3segmenter/large/S3segmenter.py',
'--imagePath', config['path'],
'--stackProbPath', pmap_path,
'--outputPath', out_dir / name / 'segmentation',
'--area-max', str(50000)
]
for kk, vv in module_params.items():
command_run.extend([f"--{kk}", str(vv)])
start_time = int(time.perf_counter())
subprocess.run(command_run)
end_time = int(time.perf_counter())
print('elapsed', datetime.timedelta(seconds=end_time-start_time))
print()
run_all_utils.to_log(
log_path, config['path'], end_time-start_time, module_params
)
return 0
if __name__ == '__main__':
sys.exit(main()) | Yu-AnChen/orion-scripts | processing/command-s3seg.py | command-s3seg.py | py | 2,454 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sys.argv",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "run_all_utils.init... |
10966555857 | import json
class Config(dict):
def __init__(self, path=None, section='default', *args, **kwargs):
super().__init__(*args, **kwargs)
if path is not None:
self.read(path, section)
def read(self, path, section='default'):
'''read config from config file.
will clear config before read'''
self.section = section
self.dirty = False
self.hasDefault = False
self.path = path
self.clear()
with open(path) as f:
self.conf = json.load(f)
if self.section not in self.conf:
raise KeyError('{} not a valid key'.format(self.section))
self.hasDefault = 'default' in self.conf
if self.hasDefault:
self.update(self.conf['default'])
self.update(self.conf[self.section])
def save(self):
'''save config.'''
dconf = {}
if self.hasDefault:
dconf = self.conf['default']
sconf = self.conf[self.section]
# delete keys
for key in set(sconf):
if key not in self:
self.dirty = True
del sconf[key]
# add / change key
for key in self:
if key in dconf and self[key] == dconf[key]:
continue
else:
self.dirtY
sconf[key] = self[key]
if self.dirty:
with open(self.path, 'w') as f:
json.dump(self.conf, f, sort_keys=True, ensure_ascii=False,
indent=4, separators=(',', ': '))
self.dirty = False
def write(self, path):
'''write conf to file'''
self.path = path
self.dirty = True
self.save()
| lycsjm/acgnmanager | src/lib/config.py | config.py | py | 1,751 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "json.load",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 52,
"usage_type": "call"
}
] |
8632528934 | from multiprocessing import cpu_count
from deepsecrets.core.utils.fs import path_exists
QUOTA_FILE = '/sys/fs/cgroup/cpu/cpu.cfs_quota_us'
PERIOD_FILE = '/sys/fs/cgroup/cpu/cpu.cfs_period_us'
CGROUP_2_MAX = '/sys/fs/cgroup/cpu.max'
class CpuHelper:
def get_limit(self) -> int:
multiproc_limit = self._by_multiproc()
cgroup = self._by_cgroup()
final = cgroup if cgroup != -1 else multiproc_limit
return final if final > 0 else 0
def _by_multiproc(self):
return cpu_count()
def _by_cgroup(self):
quota = 1
period = -1
# cgroup 2: https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html
if path_exists(CGROUP_2_MAX):
try:
quota, period = self.__cgroup2()
return quota // period
except Exception:
pass
# cgroup 1: https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v1/index.html
if path_exists(QUOTA_FILE) and path_exists(PERIOD_FILE):
try:
quota, period = self.__cgroup1()
return quota // period
except Exception:
pass
return quota // period
def __cgroup1(self):
quota = 1
period = -1
with open(QUOTA_FILE) as f:
quota = int(f.read())
with open(PERIOD_FILE) as f:
period = int(f.read())
return quota, period
def __cgroup2(self):
quota = 1
period = -1
with open(CGROUP_2_MAX) as f:
str_quota_period = f.read().split(' ')
quota = int(str_quota_period[0])
period = int(str_quota_period[1])
return quota, period
| avito-tech/deepsecrets | deepsecrets/core/utils/cpu.py | cpu.py | py | 1,763 | python | en | code | 174 | github-code | 6 | [
{
"api_name": "multiprocessing.cpu_count",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "deepsecrets.core.utils.fs.path_exists",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "deepsecrets.core.utils.fs.path_exists",
"line_number": 35,
"usage_type": ... |
32749134758 | import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import confusion_matrix, classification_report
def main():
# the goal is to predict if the borrower will pay back the money or not
# reading loan_data.csv as a dataframe
loans = pd.read_csv('loan_data.csv')
# checking out the loans information
print(loans.info())
print(loans.head())
print(loans.describe())
# histogram of two FICO distributions on top of each other, one for each credit.policy outcome
plt.figure(figsize = (10, 6))
loans[loans['credit.policy'] == 1]['fico'].hist(alpha = 0.5, bins = 30, color = 'blue', label = 'Credit policy = 1')
loans[loans['credit.policy'] == 0]['fico'].hist(alpha = 0.5, bins = 30, color = 'red', label = 'Credit policy = 0')
plt.legend()
plt.xlabel('FICO')
# similar figure, except this time selected by the not.fully.paid column
plt.figure(figsize = (10, 6))
loans[loans['not.fully.paid'] == 1]['fico'].hist(alpha = 0.5, bins = 30, label = 'Not fully paid = 1', color = 'blue')
loans[loans['not.fully.paid'] == 0]['fico'].hist(alpha = 0.5, bins = 30, label = 'Not fully paid = 0', color = 'red')
plt.legend()
plt.xlabel('FICO')
# countplot showing the counts of loans by purpose, with the color hue defined by not.fully.paid
plt.figure(figsize = (15, 6))
sns.countplot(data = loans, hue = 'not.fully.paid', x = 'purpose')
# trend between FICO score and interest rate
sns.jointplot(data = loans, kind = 'scatter', x = 'fico', y = 'int.rate')
# lmplots to see if the trend differs between not.fully.paid and credit.policy
sns.lmplot(x = 'fico', y = 'int.rate', data = loans, hue = 'credit.policy', col = 'not.fully.paid')
# purpose column is categorical; transforming them using dummy variables
cat_feats = ['purpose']
final_data = pd.get_dummies(loans, columns = cat_feats, drop_first = True)
print(final_data.info())
# splitting data into a training set and a testing set
X = final_data.drop('not.fully.paid', axis = 1)
y = final_data['not.fully.paid']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 101)
# instance of DecisionTreeClassifier() and fitting it to the training data
dtree = DecisionTreeClassifier()
dtree.fit(X_train, y_train)
# predictions from the test set and a classification report and a confusion matrix.
predictions = dtree.predict(X_test)
print(classification_report(y_test, predictions))
print(confusion_matrix(y_test, predictions))
# instance of the RandomForestClassifier class and fitting it to the training data
rf = RandomForestClassifier(n_estimators = 600)
rf.fit(X_train, y_train)
# predicting the class of not.fully.paid for the X_test data
rf_predictions = rf.predict(X_test)
# classification report and confusion matrix from the results
print(classification_report(y_test, rf_predictions))
print(confusion_matrix(y_test, rf_predictions))
plt.show()
if __name__ == '__main__':
main() | AleksandarPav/Decision-Tree-and-Random-Forest | main.py | main.py | py | 3,272 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pandas.read_csv",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "matplotlib... |
40170866347 | import os
import numpy as np
import cv2
import imutils
import sys
np.set_printoptions(threshold=sys.maxsize)
corner_shapes_map = {
3: "triangle",
4: "rectangle",
5: "pentagon",
6: "hexagon"
}
model_prediction_map = {
0: "triangle",
1: "rectangle",
2: "circle"
}
def get_corners_in_canvas(canvas):
detected_contours = cv2.findContours(canvas, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
final_contours = imutils.grab_contours(detected_contours)
peri = cv2.arcLength(final_contours[0], True)
approx = cv2.approxPolyDP(final_contours[0], 0.1 * peri, True)
return len(approx)
def get_shape_from_model(canvas):
from keras.models import load_model
model = load_model(os.path.join(os.getcwd(), "ML", "model.h5"))
m_input = cv2.resize(canvas, (60, 60))
m_input = m_input.astype('float32')
print(m_input.shape)
# m_input /= 255
m_input = m_input.reshape(np.prod([60, 60]))
pred_list = model.predict(m_input.reshape(1, np.prod([60, 60])))[0].tolist()
print(pred_list)
max_val = max(pred_list)
return model_prediction_map[pred_list.index(max_val)]
def get_shape(arr, use_ml=False):
np_arr = np.array(arr, dtype=np.uint8)
# Sort by timestamps
np_arr = np_arr[np.argsort(np_arr[:, 0])]
contours = np_arr[:, 1:]
# Hardcoded: Offset x values by 50.
contours[:, 0] += 50
# Create a canvas and fill it up for a thresh img
# canvas = np.full((200, 200), 255, dtype=np.uint8)
# cv2.fillPoly(canvas, pts=np.int32([contours]), color=0)
canvas = np.full((200, 200), 0, dtype=np.uint8)
cv2.fillPoly(canvas, pts=np.int32([contours]), color=255)
if use_ml:
return get_shape_from_model(canvas)
else:
n_corners = get_corners_in_canvas(canvas)
if n_corners > 6:
return "circle"
if n_corners in corner_shapes_map:
return corner_shapes_map[n_corners]
return None
#
# if __name__ == "__main__":
# inp = [(0, 9.4, 83.4), (1, 10.4, 83.2), (2, 8.8, 83.2), (3, 9, 84), (4, 4.6, 81.6), (5, 1.6, 79.4), (6, -7.8, 75.2),
# (7, -12.6, 67.6), (8, -14.6, 59), (9, -10.6, 52.4), (10, -3.6, 49.6), (11, 12.4, 51.8), (12, 21.8, 56.4),
# (13, 28.8, 64.4), (14, 30.4, 73.4), (15, 27.2, 78.6), (16, 19.4, 79.2), (17, 15.4, 82.2), (18, 10.8, 82.4),
# (19, 10.4, 82), (20, 9.6, 81.6), (21, 10.2, 83.8)]
#
# print(get_shape(inp, True))
| nirajsrimal/UWB_2FA | BackEnd/solver.py | solver.py | py | 2,434 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "numpy.set_printoptions",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "sys.maxsize",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "cv2.findContours",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "cv2.RETR_EXTER... |
73016396668 | # TODO save output in a set to remove duplicated warnings
import sys
import time
import subprocess
import linecache
rules = {
"false":[
"PyArg_ParseTuple"
],
"NULL":[
"Py_BuildValue",
"PyLong_FromLong",
#"PyBytes_FromStringAndSize",
#"PyBytes_AsString",
#"PyFloat_FromDouble",
#"PyObject_GetAttrString",
#"PyDict_New",
#"PyDict_GetItemString",
#"PyDict_GetItem",
#"PyList_GetItem",
#"PyList_GET_ITEM",
"PyList_New",
"malloc"
],
"-1":[
#"PyDict_SetItemString",
#"PyType_Ready",
#"PyLong_AsLong",
#"PyFloat_AsDouble", # -1.0
#"PyModule_AddIntConstant",
#"PyObject_SetAttrString",
"PyDict_SetItem",
#"PyList_Append",
#"PyList_Insert",
"PyList_SetItem",
"PyList_SET_ITEM",
"PyTuple_SET_ITEM"
]
}
append_rules = {
"false":[],
"NULL":[],
"-1":[]
}
# string -> string list
def exec_command(command):
output = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True, encoding="UTF-8")
return output.stdout.readlines()
# string -> (string, int, string)
def grep_parser(grep_line):
pos_filename = grep_line.find(":")
filename = grep_line[0:pos_filename]
pos_lineno = grep_line[pos_filename + 1:].find(":") + pos_filename + 1
lineno = int(grep_line[pos_filename + 1:pos_lineno])
content = grep_line[pos_lineno + 1:-1]
return (filename, lineno, content)
# string, int -> string
def next_program_line(filename, lineno):
current_line = linecache.getline(filename, lineno).strip()
lineno += 1
if (not current_line.endswith(";")):
return next_program_line(filename, lineno)
next_line = linecache.getline(filename, lineno).strip()
if (next_line == "}" or next_line == ""):
return linecache.getline(filename, lineno + 1).strip()
else:
return next_line
# string -> string
def assignment_parser(assignment):
pos_left = assignment.find("=")
left = assignment[0:pos_left].strip()
if left.find(" ") != -1: # new declarations, remove type identifers
variable = left.split(" ")[1]
if variable.startswith("*"): # pointers
return variable[1:]
else:
return variable
else:
return left
# string, string, bool, string -> None
def check(api, errval, append, path):
command = "grep -rn \"" + api + "\" " + path
for line in exec_command(command):
(filename, lineno, content) = grep_parser(line)
if not filename.endswith(".c") and not filename.endswith(".h"):
continue
if content.strip().startswith("return"):
continue
elif content.strip().startswith("if") or content.strip().startswith("else if"):
continue
elif content.strip().startswith("//") or content.strip().startswith("/*") or content.strip().startswith("*"):
continue
elif content.strip().startswith("#define"):
if append == True:
print(content.strip())
append_rules[errval].append(content.split(" ")[1])
continue
else:
continue
else:
next_content = next_program_line(filename, lineno)
#print(next_content)
variable = assignment_parser(content)
#print(variable)
if next_content.startswith("if") and next_content.find(variable) != -1:
continue
elif next_content.startswith("return") and next_content.find(variable) != -1:
continue
else:
#print(filename)
#print(lineno)
#print(content)
#print(next_content)
print(line.strip())
if __name__ == "__main__":
start_time = time.time()
for (errval, apis) in rules.items():
for api in apis:
#print("===== " + api + " =====")
check(api, errval, True, sys.argv[1])
#print(append_rules)
for (errval, apis) in append_rules.items():
for api in apis:
#print("===== " + api + " =====")
check(api, errval, False, sys.argv[1])
end_time = time.time()
print("total time : {:.2f}s".format(end_time - start_time)) | S4Plus/pyceac | checkers/2/checker.py | checker.py | py | 4,376 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "subprocess.Popen",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "linecache.getline",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "linecache.get... |
36406902723 | #!/usr/bin/env python
# -*- coding: utf-8 -*
import os
import csv
import cloudpickle
import numpy as np
import pandas as pd
from scipy.integrate import quad
from scipy.stats import (
gaussian_kde,
ks_2samp,
t
)
from sklearn.feature_selection import SelectorMixin
from sklearn.base import TransformerMixin
from sklearn.utils import resample
from sklearn.metrics import (
f1_score,
precision_score,
recall_score,
average_precision_score,
)
from .logger import log
CSV_WRITE_FORMAT = {
'index': False,
'quoting': csv.QUOTE_ALL,
}
CSV_READ_FORMAT = {
'keep_default_na': False,
}
class Sample:
@staticmethod
def full_sample(data, n):
return data
@staticmethod
def random_sample(data, n, random_state=1):
rng = np.random.default_rng(random_state)
sample = rng.choice(data, n)
return sample
@staticmethod
def percentile_sample(data, n, lower=0, upper=100):
quantiles = np.linspace(lower, upper, n, endpoint=True)
sample = np.percentile(data, quantiles, interpolation='lower')
return sample
@staticmethod
def percentile_interpolation_sample(data, n, lower=0, upper=100):
quantiles = np.linspace(lower, upper, n, endpoint=True)
sample = np.percentile(data, quantiles, interpolation='linear')
return sample
Sample.MODES = {
'random': Sample.random_sample,
'percentile': Sample.percentile_sample,
'interpolate': Sample.percentile_interpolation_sample,
'full': Sample.full_sample,
}
class Stats:
@staticmethod
def compute_integral_boundaries(f, retsize):
u = f.resample(retsize)
a = u.mean() - 8 * u.std()
b = u.mean() + 8 * u.std()
return a, b
@staticmethod
def discrete_hellinger_integral(p, q, a, b, retsize):
x, step = np.linspace(a, b, retsize, endpoint=True, retstep=True)
i = np.dot(np.sqrt(p(x)), np.sqrt(q(x))) * step
if i > 1:
return 0
else:
return i
@classmethod
def discrete_hellinger_distance(cls, p, q, retsize=100):
a1, b1 = cls.compute_integral_boundaries(p, retsize)
a2, b2 = cls.compute_integral_boundaries(q, retsize)
a1, b1, a2, b2 = sorted([a1, b1, a2, b2])
i1 = cls.discrete_hellinger_integral(p, q, a1, b1, retsize)
i2 = cls.discrete_hellinger_integral(p, q, b1, a2, retsize)
i3 = cls.discrete_hellinger_integral(p, q, a2, b2, retsize)
i = i1 + i2 + i3
if i > 1: # To prevent computing a negative root because of an approximation error during integration
return 0
else:
return np.sqrt(1 - i)
@staticmethod
def hellinger_integral(p, q, a=-np.inf, b=np.inf):
value, error = quad(
lambda x: np.sqrt(p(x)*q(x)),
a,
b
)
return value, error
@classmethod
def hellinger_distance(cls, p, q, a=-np.inf, b=np.inf, split_integral=True, retsize=100):
if split_integral:
a1, b1 = cls.compute_integral_boundaries(p, retsize)
a2, b2 = cls.compute_integral_boundaries(q, retsize)
a1, b1, a2, b2 = sorted([a1, b1, a2, b2])
i1, _ = cls.hellinger_integral(p, q, a1, b1)
i2, _ = cls.hellinger_integral(p, q, b1, a2)
i3, _ = cls.hellinger_integral(p, q, a2, b2)
value = i1 + i2 + i3
else:
value, error = cls.hellinger_integral(p.pdf, q.pdf, a, b)
if 1 <= value < 1.1: # To prevent computing a negative root because of an approximation error during integration
return 1
elif 1.1 <= value: # If value > 1.1 the approximation failed too much and should not be rejected
return 0
else:
return np.sqrt(1 - value)
@classmethod
def hellinger_distance_1samp(cls, sample, pdf, **params):
kde = gaussian_kde(sample, bw_method='silverman')
return cls.hellinger_distance(kde, pdf, split_integral=False)
@classmethod
def hellinger_distance_2samp(cls, samp1, samp2):
kde1 = gaussian_kde(samp1, bw_method='silverman')
kde2 = gaussian_kde(samp2, bw_method='silverman')
return cls.hellinger_distance(kde1, kde2)
class Accessor:
@staticmethod
def get_entity_kde(entity):
entityid, indexid = entity.split(':')
kdepath = os.path.join(indexid, 'kde', entityid + '.kde')
with open(kdepath, 'rb') as f:
kde = cloudpickle.load(f)
return kde
@staticmethod
def get_entity_data(entity):
entityid, indexid = entity.split(':')
datapath = os.path.join(indexid, 'entity', entityid + '.npy')
data = np.load(datapath)
return data
@staticmethod
def get_entity_metadata(entity):
entityid, indexid = entity.split(':')
path = os.path.join(indexid, 'terminology.csv')
terminology = pd.read_csv(path, **CSV_READ_FORMAT, dtype=str)
data = terminology[terminology.entityid == entityid].squeeze()
return data
@staticmethod
def get_entity_aggregate(entity):
columntypes = {
"entityid": str,
"size": int,
"mean": float,
"std": float,
"var": float,
"frequency": float
}
entityid, indexid = entity.split(':')
path = os.path.join(indexid, 'aggregate.csv')
terminology = pd.read_csv(path, **CSV_READ_FORMAT, dtype=columntypes)
data = terminology[terminology.entityid == entityid].squeeze()
return data
@classmethod
def hellinger_distance_2entity(cls, entity1, entity2, strategy='split_integral'):
kde1 = cls.get_entity_kde(entity1)
kde2 = cls.get_entity_kde(entity2)
strategies = ('full', 'split_integral', 'discrete')
if strategy not in strategies:
strategy = 'split_integral'
log.info(f"Hellinger distance strategy {strategy} must be in {strategies}, switching to 'split_integral'")
if strategy == 'full':
hd = Stats.hellinger_distance(kde1, kde2, split_integral=False)
elif strategy == 'split_integral':
hd = Stats.hellinger_distance(kde1, kde2, split_integral=True)
elif strategy == 'discrete':
hd = Stats.discrete_hellinger_distance(kde1, kde2)
return hd
@classmethod
def ks_test_2entity(cls, entity1, entity2):
data1 = cls.get_entity_data(entity1).flatten()
data2 = cls.get_entity_data(entity2).flatten()
return ks_2samp(data1, data2)
@classmethod
def kde_from_entity(cls, entity):
entityid, indexid = entity.split(':')
kdepath = os.path.join(indexid, 'kde', entityid + '.kde')
os.makedirs(os.path.dirname(kdepath), exist_ok=True)
data = cls.get_entity_data(entity)
kde = gaussian_kde(data, bw_method='silverman')
with open(kdepath, 'wb') as f:
cloudpickle.dump(kde, f)
return kde
class CachedAccessor:
KDECACHE = dict()
@classmethod
def get_entity_kde(cls, entity):
if entity in cls.KDECACHE:
kde = cls.KDECACHE.get(entity)
else:
kde = Accessor.get_entity_kde(entity)
cls.KDECACHE[entity] = kde
return kde
DATACACHE = dict()
@classmethod
def get_entity_data(cls, entity):
if entity in cls.DATACACHE:
data = cls.DATACACHE.get(entity)
else:
data = Accessor.get_entity_data(entity)
cls.DATACACHE[entity] = data
return data
METDATACACHE = dict()
@classmethod
def get_entity_metadata(cls, entity):
if entity in cls.METDATACACHE:
data = cls.METDATACACHE.get(entity)
else:
data = Accessor.get_entity_metadata(entity)
cls.METDATACACHE[entity] = data
return data
class Score:
@staticmethod
def tm_rank(groupby, ret=5):
return groupby.sort_values('y_proba', ascending=False).head(ret).Y.any()
@staticmethod
def tm_score(df, groupby_key, ret=5):
res = df.groupby(groupby_key).apply(Score.tm_rank, ret=ret)
n = len(df.index.get_level_values(groupby_key).unique())
return res.value_counts().get(True, 0) / n
@staticmethod
def tm_score_relaxed(df, groupby_key, ret=5):
res = df.groupby(groupby_key).apply(Score.tm_rank, ret=ret)
n = len(df[df.Y == True].index.get_level_values(groupby_key).unique())
return res.value_counts().get(True, 0) / n
@staticmethod
def compute_tm_score(model, df, groupby_key, ret=5):
df = df.copy()
df['y_proba'] = model.predict_proba(df.X)[:,1]
res = df.groupby(groupby_key).apply(Score.tm_rank, ret=ret)
n = len(df[df.Y == True].index.get_level_values(groupby_key).unique())
return res.value_counts().get(True, 0) / n
class NamedFeatureSelector(SelectorMixin, TransformerMixin):
_params_name = set(['columns', 'selected_columns'])
def __init__(self, columns=None, selected_columns=None):
self.columns = columns or []
self.selected_columns = set(selected_columns or [])
def set_params(self, **params):
if not params:
# Simple optimization to gain speed (inspect is slow)
return self
valid_params = dict()
for k, v in params.items():
if (k in self._params_name):
valid_params[k] = v
for k, v in valid_params.items():
setattr(self, k, v)
return self
def _get_support_mask(self):
mask = np.array(list(map(lambda x: x in self.selected_columns, self.columns)))
return mask
def fit(self, X, y=None):
return self
class Bootstrap:
@staticmethod
def sample(df, rate=1):
n = int(len(df.index) * rate)
return resample(df, n_samples=n)
@staticmethod
def evaluate(model, data):
X = data.drop('Y', axis=1)
Y = data['Y']
Y_pred = model.predict(X)
Y_proba = model.predict_proba(X)[:, 1]
stats = (
f1_score(Y, Y_pred, zero_division=0),
precision_score(Y, Y_pred, zero_division=0),
recall_score(Y, Y_pred, zero_division=0),
average_precision_score(Y, Y_proba)
)
return stats
@staticmethod
def score(model, df, rep=1000, rate=1, verbose=False):
statistics = []
for i in range(rep):
if verbose and (i % 50 == 0):
log.info(f"Bootstrap iteration {i} over {rep}")
test = Bootstrap.sample(df, rate)
stat = Bootstrap.evaluate(model, test)
statistics.append(stat)
statistics = np.array(statistics)
results = dict()
for name, stats in zip(['f1', 'precision', 'recall', 'PR-AUC'], statistics.T):
mu = stats.mean()
std = stats.std()
alpha = 0.05
if std > 0:
st = (mu - stats) / std
q1 = mu - np.quantile(st, 1-0.5*alpha)*std
q2 = mu - np.quantile(st, 0.5*alpha)*std
else:
q1 = q2 = mu
results[name] = {
'mean': mu,
'std': std,
'CI': 1-alpha,
'lower': q1,
'upper': q2,
}
return results | mcrts/dmatch | dmatch/utils.py | utils.py | py | 11,860 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "csv.QUOTE_ALL",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.default_rng",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "numpy.l... |
41648714624 | import unittest
from PIL import Image
import numpy as np
from texture.analysis import CoOccur
class MyTestCase(unittest.TestCase):
def test_offset_slices(self):
slices = CoOccur._offset_slices(4, 225)
self.assertEqual(slices, ([[None, -3], [3, None]], [[3, None], [None, -3]]))
pixels = np.array([[1, 2, 3, 1, 0],
[0, 7, 5, 8, 2],
[5, 4, 0, 2, 5],
[7, 1, 3, 4, 9]])
angle = 90 + 45 # ↖
slice_start, slice_end = CoOccur._offset_slices(1, angle)
start = pixels[slice_start[0][0]:slice_start[0][1], slice_start[1][0]:slice_start[1][1]]
end = pixels[slice_end[0][0]:slice_end[0][1], slice_end[1][0]:slice_end[1][1]]
self.assertEqual(start.tolist(), [[7, 5, 8, 2],
[4, 0, 2, 5],
[1, 3, 4, 9]])
self.assertEqual(end.tolist(), [[1, 2, 3, 1],
[0, 7, 5, 8],
[5, 4, 0, 2]])
def test_co_occur(self):
image = Image.open("textures/1.1.04.tiff")
co_occur = CoOccur(image, distances=[1, 2, 4, 8, 16], angles=[0, 90, 180, 270], levels=8)
self.assertEqual(co_occur.matrices.shape, (5, 4, 8, 8))
co_occur = CoOccur(image, distances=[1, 16], angles=[0, 120, 175.3, 240], levels=8)
self.assertEqual(co_occur.matrices.shape, (2, 4, 8, 8))
def test_inertia(self):
image = Image.open("textures/1.1.04.tiff")
l_b = np.arange(8)
l_a = l_b[:, np.newaxis]
coefficients = ((l_a - l_b) ** 2).reshape(1, 1, 8, 8)
co_occur = CoOccur(image, distances=[1, 4, 8, 16, 32], angles=[0, 120, 240])
self.assertAlmostEqual(np.sum(co_occur.matrices[2, 1] * coefficients).item(), co_occur.inertia[2, 1])
self.assertAlmostEqual(np.sum(co_occur.matrices[4, 2] * coefficients).item(), co_occur.inertia_of(32, 240))
def test_average(self):
image = Image.open("textures/1.1.05.tiff")
co_occur = CoOccur(image, distances=[1, 4, 8, 16, 32], angles=[0, 90, 240])
self.assertAlmostEqual(np.mean(co_occur.matrices[2, :, 1, 3]).item(), co_occur.average[2, 1, 3].item())
self.assertAlmostEqual(np.mean(co_occur.matrices[4, :, 2, 6]).item(), co_occur.average_of(32)[2, 6].item())
def test_spread(self):
image = Image.open("textures/1.1.10.tiff")
co_occur = CoOccur(image, distances=[2, 5, 14], angles=[0, 32, 128, 290])
spread_1 = np.max(co_occur.matrices[0, :, 7, 4]) - np.min(co_occur.matrices[0, :, 7, 4])
self.assertAlmostEqual(spread_1, co_occur.spread[0, 7, 4].item())
spread_2 = np.max(co_occur.matrices[2, :, 0, 5]) - np.min(co_occur.matrices[2, :, 0, 5])
self.assertAlmostEqual(spread_2, co_occur.spread_of(14)[0, 5].item())
if __name__ == '__main__':
unittest.main()
| MatteoZanella/siv-texture-analysis | tests/test_com.py | test_com.py | py | 2,951 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "unittest.TestCase",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "texture.analysis.CoOccur._offset_slices",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "texture.analysis.CoOccur",
"line_number": 9,
"usage_type": "name"
},
{
... |
41236509775 | from django.urls import path
from rest_framework.routers import DefaultRouter
from . import views
urlpatterns = [
]
router = DefaultRouter()
router.register("porcelain", viewset=views.PorcelainView, basename="porcelain")
router.register("dynasty", viewset=views.DynastyView, basename="dynasty")
router.register("EmperorYear", viewset=views.EmperorYearView, basename="EmperorYear")
urlpatterns += router.urls | beishangongzi/porcelain-backend | predict_model/urls.py | urls.py | py | 412 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "rest_framework.routers.DefaultRouter",
"line_number": 10,
"usage_type": "call"
}
] |
18390234941 | from django import template
from photos.models import GalleryCustom
register = template.Library()
@register.filter
def stripgallery(title):
"""
Remove gallery prefix from photo titles of the form gallery__mytitle.
"""
idx = title.find("__")
if idx < 0:
return title
return title[idx+2:]
@register.inclusion_tag('photologue/tags/galleries.html')
def get_public_photo_galleries():
"""
Return all public galleries as an HTML ul element.
"""
galleries = GalleryCustom.objects.filter(gallery__is_public=True) \
.order_by('gallery__date_added')
return {'galleries': galleries,
'private_galleries_list': False}
def get_private_photo_galleries_array(user):
galleries_private = GalleryCustom.objects.filter(gallery__is_public=False) \
.order_by('gallery__date_added')
if user.is_superuser:
return galleries_private
else:
return [gal for gal in galleries_private if user in gal.allowed_users.all()]
@register.simple_tag(takes_context=True)
def get_private_photo_galleries_num(context):
"""
Return the number of private galleries accessible to the user.
"""
return len(get_private_photo_galleries_array(context['user']))
@register.inclusion_tag('photologue/tags/galleries.html', takes_context=True)
def get_private_photo_galleries(context):
"""
Return all private galleries accessible to the user as an HTML ul element.
"""
return {'galleries': get_private_photo_galleries_array(context['user']),
'private_galleries_list': True,
'user': context['user']}
| ria4/tln | photos/templatetags/photos_extras.py | photos_extras.py | py | 1,682 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "django.template.Library",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.template",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "photos.models.GalleryCustom.objects.filter",
"line_number": 24,
"usage_type": "call"
},
{
"... |
5896021463 | import torch
import torch.utils.data as data
import numpy as np
from collections import defaultdict
from tqdm import tqdm
from copy import deepcopy
import config_data as conf
import random
infor_train_data_path = np.load('/content/drive/MyDrive/DASR-WGAN/data/dianping/version3/infor_train.npy', allow_pickle = True).tolist()
eva_infor_test_data_path = np.load('/content/drive/MyDrive/DASR-WGAN/data/dianping/version3/infor_test.npy', allow_pickle = True).tolist()
social_ratings = np.load('/content/drive/MyDrive/DASR-WGAN/data/dianping/version3/soc_ratings.npy', allow_pickle = True).tolist()
social_links = np.load('/content/drive/MyDrive/DASR-WGAN/data/dianping/version3/social_links.npy', allow_pickle = True).tolist()
soc_test = np.load('/content/drive/MyDrive/DASR-WGAN/data/dianping/version3/soc_test_1000.npy', allow_pickle = True).tolist()
infor_fake = np.load('/content/drive/MyDrive/DASR-WGAN/src/dianping/Final_Model_augmentation_edge/data/edge_modification/infor_fake.npy', allow_pickle = True).tolist()
social_fake = np.load('/content/drive/MyDrive/DASR-WGAN/src/dianping/Final_Model_augmentation_edge/data/edge_modification/fake_links_2.npy', allow_pickle = True).tolist()
def load_all():
max_user, max_item, max_user_soc = 0, 0, 0
##################################################################
'''
information domain
'''
##################################################################
infor_hash_data = set()
infor_rating_train_data = []
infor_item_dict = defaultdict(set)
infor_user_dict = infor_train_data_path
all_user = set()
infor_user = set()
for user, items in tqdm(infor_train_data_path.items()):
all_user.add(user)
infor_user.add(user)
for item in items:
infor_hash_data.add((user, item))
infor_rating_train_data.append([user, item])
infor_item_dict[item].add(user)
eva_infor_rating_test_data = []
for user, items in tqdm(eva_infor_test_data_path.items()):
for item in items:
eva_infor_rating_test_data.append([user, item])
eva_common_rating_test_data = []
common_rating_dict = defaultdict(set)
for user, friends in social_links.items():
if user in eva_infor_test_data_path:
for i in eva_infor_test_data_path[user]:
eva_common_rating_test_data.append([user, i])
common_rating_dict[user].add(i)
social_link_dict = social_links
link_hash_data = set()
link_train_data = []
soc_user = set()
for user, friends in social_link_dict.items():
all_user.add(user)
soc_user.add(user)
for friend in friends:
all_user.add(friend)
soc_user.add(friend)
link_hash_data.add((user, friend))
link_train_data.append([user, friend])
eva_social_rating_test_data = []
for user, friends in tqdm(social_ratings.items()):
if user not in common_rating_dict:
for i in social_ratings[user]:
if i <= 8626:
eva_social_rating_test_data.append([user, i])
# infor_fake_dict = infor_fake
# social_fake_dict = social_fake
# infor_fake_item_dict = defaultdict(set)
# for user,items in infor_fake.items():
# for i in items:
# infor_fake_item_dict[i].add(user)
eva_soc_test = soc_test
eva_soc_test_ground_truth = social_ratings
print('all user count:', len(all_user))
print('max user id:', max(all_user))
print('infor user count:', len(infor_user))
print('max infor user id:', max(infor_user))
print('common user count:', len(common_rating_dict))
print('soc user count:', len(soc_user))
print('max soc user id:', max(soc_user))
#import sys;sys.exit(0)
return infor_hash_data, infor_rating_train_data, eva_infor_rating_test_data,\
infor_user_dict, infor_item_dict, eva_common_rating_test_data, social_link_dict, eva_social_rating_test_data,\
link_train_data, link_hash_data, eva_soc_test, eva_soc_test_ground_truth, common_rating_dict
####################edge modification################
def load_corrupt_edge():
infor_train_data_path_cp = infor_train_data_path
social_links_cp = social_links
for _ in range(1000):
u = random.randint(0, 10181)
if len(infor_train_data_path_cp[u]) > 0:
u_value = list(infor_train_data_path_cp[u])
u_value_len = len(u_value)
i = random.randint(0,u_value_len-1)
try:
infor_train_data_path_cp[u].remove(u_value[i])
except:
import pdb;pdb.set_trace()
for _ in range(1000):
u = random.randint(0, 10181)
i = random.randint(0, 8626)
if i not in infor_train_data_path_cp[u]:
infor_train_data_path_cp[u].add(i)
########add edges in social domain
for _ in range(500):
u = random.randint(8486, 14174)
if len(social_links_cp[u]) > 0:
u_value = list(social_links_cp[u])
u_value_len = len(u_value)
i = random.randint(0,u_value_len-1)
try:
social_links_cp[u].remove(u_value[i])
except:
pdb.set_trace()
for _ in range(500):
u1 = random.randint(8486, 14174)
u2 = random.randint(8486, 14174)
if u2 not in social_links_cp[u1]:
social_links_cp[u1].add(u2)
infor_fake_item_dict_cp = defaultdict(set)
for user,items in infor_train_data_path_cp.items():
for i in items:
infor_fake_item_dict_cp[i].add(user)
return infor_train_data_path_cp, social_links_cp, infor_fake_item_dict_cp
################node mask################
def load_corrupt_node_mask():
infor_train_data_path_cp = infor_train_data_path
social_links_cp = social_links
for _ in range(1000):
u = random.randint(0, 10181)
if len(infor_train_data_path_cp[u]) > 0:
u_value = list(infor_train_data_path_cp[u])
u_value_len = len(u_value)
i = random.randint(0,u_value_len-1)
try:
infor_train_data_path_cp[u].remove(u_value[i])
except:
import pdb;pdb.set_trace()
for _ in range(1000):
u = random.randint(0, 10181)
i = random.randint(0, 8626)
if i not in infor_train_data_path_cp[u]:
infor_train_data_path_cp[u].add(i)
########add edges in social domain
for _ in range(300):
u = random.randint(8486, 14174)
if len(social_links_cp[u]) > 0:
u_value = list(social_links_cp[u])
u_value_len = len(u_value)
i = random.randint(0,u_value_len-1)
try:
social_links_cp[u].remove(u_value[i])
except:
pdb.set_trace()
for _ in range(300):
u1 = random.randint(8486, 14174)
u2 = random.randint(8486, 14174)
if u2 not in social_links_cp[u1]:
social_links_cp[u1].add(u2)
infor_fake_item_dict_cp = defaultdict(set)
for user,items in infor_train_data_path_cp.items():
for i in items:
infor_fake_item_dict_cp[i].add(user)
return infor_train_data_path_cp, social_links_cp, infor_fake_item_dict_cp
# construct original local graph#
def construct_infor_mat(soc_dict, user_dict, item_dict, is_user):
if is_user == True:
infor_index, infor_value = [], []
#common user
#'''
for user in soc_dict.keys():
friends_list = soc_dict[user]
if user not in user_dict:
for f in friends_list:
fri_friend = soc_dict[f]
infor_index.append([user, f])
#infor_value.append(1.0/(np.sqrt(len(friends_list)*len(fri_friend))))
infor_value.append(1.0/len(friends_list))
#'''
for user in user_dict.keys():
item_list = user_dict[user]
if user not in soc_dict:
for i in item_list:
user_list = item_dict[i]
infor_index.append([user, i+conf.num_all_user_id])
#infor_value.append(1.0/(np.sqrt(len(item_list)*len(user_list))))
infor_value.append(1.0/len(item_list))
for user in user_dict.keys():
if user in soc_dict.keys():
friends_list = soc_dict[user]
item_list = user_dict[user]
#'''
for f in friends_list:
fri_friend = soc_dict[f]
infor_index.append([user, f])
#infor_value.append(1.0/(np.sqrt(len(friends_list)*len(fri_friend))))
infor_value.append(1.0/(len(friends_list)+len(item_list)))
for i in item_list:
user_list = item_dict[i]
infor_index.append([user, i+conf.num_all_user_id])
#infor_value.append(1.0/(np.sqrt(len(item_list)*len(user_list))))
infor_value.append(1.0/(len(item_list)+len(friends_list)))
#'''
for item in item_dict.keys():
user_list = item_dict[item]
for u in user_list:
item_list = user_dict[u]
infor_index.append([item + conf.num_all_user_id, u])
#infor_value.append(1.0/(np.sqrt(len(user_list)*len(item_list))))
infor_value.append(1.0/len(user_list))
length = conf.num_all_user_id + conf.num_items
user_agg_mat = torch.sparse.FloatTensor(torch.LongTensor(infor_index).t().cuda(), \
torch.FloatTensor(infor_value).cuda(), torch.Size([length, length]))#.to_dense()
#import pdb;pdb.set_trace()
return user_agg_mat
############construct corrupted local graph###############
def construct_corrupted_graph(infor_fake_dict, infor_fake_item_dict, social_fake_dict):
infor_index, infor_value = [], []
#common user
#'''
for user in infor_fake_dict.keys():
item_list = infor_fake_dict[user]
for i in item_list:
infor_index.append([user, i+conf.num_all_user_id])
#infor_value.append(1.0/(np.sqrt(len(item_list)*len(user_list))))
infor_value.append(1.0/len(item_list))
for item in infor_fake_item_dict.keys():
user_list = infor_fake_item_dict[item]
for u in user_list:
infor_index.append([item+conf.num_all_user_id, u])
infor_value.append(1.0/len(user_list))
for user in social_fake_dict.keys():
friend_list = social_fake_dict[user]
for f in friend_list:
infor_index.append([user, f])
infor_value.append(1.0/len(friend_list))
length = conf.num_all_user_id + conf.num_items
fake_agg_mat = torch.sparse.FloatTensor(torch.LongTensor(infor_index).t().cuda(), \
torch.FloatTensor(infor_value).cuda(), torch.Size([length, length]))#.to_dense()
#import pdb;pdb.set_trace()
return fake_agg_mat
##############3construct global in social domian################
def construct_global_social(soc_dict):
infor_index, infor_value = [], []
for user in soc_dict.keys():
friends_list = soc_dict[user]
for f in friends_list:
fri_friend = soc_dict[f]
infor_index.append([user, f])
#infor_value.append(1.0/(np.sqrt(len(friends_list)*len(fri_friend))))
infor_value.append(1.0/len(friends_list))
length = conf.num_all_user_id + conf.num_items
user_agg_mat = torch.sparse.FloatTensor(torch.LongTensor(infor_index).t().cuda(), \
torch.FloatTensor(infor_value).cuda(), torch.Size([length, length]))#.to_dense()
#import pdb;pdb.set_trace()
return user_agg_mat
#construct global in information domain#
def construct_global_infor(user_dict, item_dict):
infor_index, infor_value = [], []
for user in user_dict.keys():
item_list = user_dict[user]
for i in item_list:
user_list = item_dict[i]
infor_index.append([user, i+conf.num_all_user_id])
#infor_value.append(1.0/(np.sqrt(len(item_list)*len(user_list))))
infor_value.append(1.0/len(item_list))
for item in item_dict.keys():
user_list = item_dict[item]
for u in user_list:
item_list = user_dict[u]
infor_index.append([item + conf.num_all_user_id, u])
#infor_value.append(1.0/(np.sqrt(len(user_list)*len(item_list))))
infor_value.append(1.0/len(user_list))
length = conf.num_all_user_id + conf.num_items
user_agg_mat = torch.sparse.FloatTensor(torch.LongTensor(infor_index).t().cuda(), \
torch.FloatTensor(infor_value).cuda(), torch.Size([length, length]))#.to_dense()
#import pdb;pdb.set_trace()
return user_agg_mat
#
def construct_infor_fake_graph(infor_fake_dict, infor_fake_item_dict, soc_dict, is_true):
infor_index, infor_value = [], []
#common user
#'''
for user in infor_fake_dict.keys():
item_list = infor_fake_dict[user]
for i in item_list:
infor_index.append([user, i+conf.num_all_user_id])
#infor_value.append(1.0/(np.sqrt(len(item_list)*len(user_list))))
infor_value.append(1.0/len(item_list))
for item in infor_fake_item_dict.keys():
user_list = infor_fake_item_dict[item]
for u in user_list:
infor_index.append([item+conf.num_all_user_id, u])
infor_value.append(1.0/len(user_list))
if is_true == True:
for user in soc_dict.keys():
friend_list = soc_dict[user]
for f in friend_list:
infor_index.append([user, f])
infor_value.append(1.0/len(friend_list))
length = conf.num_all_user_id + conf.num_items
fake_infor_agg_mat = torch.sparse.FloatTensor(torch.LongTensor(infor_index).t().cuda(), \
torch.FloatTensor(infor_value).cuda(), torch.Size([length, length]))#.to_dense()
#import pdb;pdb.set_trace()
return fake_infor_agg_mat
##########construct fake graph in social domain#############
def construct_social_fake_graph(social_fake_dict, user_dict, item_dict, is_true):
social_index,social_value = [],[]
for user in social_fake_dict.keys():
friend_list = social_fake_dict[user]
for f in friend_list:
social_index.append([user, f])
social_value.append(1.0/len(friend_list))
if is_true == True:
for user in user_dict.keys():
item_list = user_dict[user]
for i in item_list:
social_index.append([user, i+conf.num_all_user_id])
#infor_value.append(1.0/(np.sqrt(len(item_list)*len(user_list))))
social_value.append(1.0/len(item_list))
for item in item_dict.keys():
user_list = item_dict[item]
for u in user_list:
item_list = user_dict[u]
social_index.append([item + conf.num_all_user_id, u])
#infor_value.append(1.0/(np.sqrt(len(user_list)*len(item_list))))
social_value.append(1.0/len(user_list))
length = conf.num_all_user_id + conf.num_items
fake_social_agg_mat = torch.sparse.FloatTensor(torch.LongTensor(social_index).t().cuda(), \
torch.FloatTensor(social_value).cuda(), torch.Size([length, length]))#.to_dense()
#import pdb;pdb.set_trace()
return fake_social_agg_mat
###########construct infor h-g link ##############
def construct_infor_link(soc_dict, user_dict, item_dict):
infor_index, infor_value = [], []
for user in user_dict.keys():
if user in soc_dict.keys():
items = user_dict[user]
for i in items:
infor_index.append([user, i])
infor_value.append(1.0)
length = conf.num_all_user_id + conf.num_items
agg_mat = torch.sparse.FloatTensor(torch.LongTensor(infor_index).t().cuda(), \
torch.FloatTensor(infor_value).cuda(), torch.Size([length, length]))#.to_dense()
#import pdb;pdb.set_trace()
return agg_mat
# TrainData is used to train the model
class TrainData():
def __init__(self, infor_rating_train_data, infor_hash_data, social_link_dict, infor_user_dict, link_hash_data):
self.features_ps = infor_rating_train_data
self.train_mat = infor_hash_data
self.social_link = social_link_dict
self.infor_user = infor_user_dict
self.social_hash = link_hash_data
def ng_sample(self):
features_fill = []
for x in self.features_ps:
u, i = x[0], x[1]
for t in range(conf.num_train_neg):
j = np.random.randint(conf.num_items)
while (u, j) in self.train_mat:
j = np.random.randint(conf.num_items)
features_fill.append([u, i, j])
self.features_fill = features_fill
self.link_ng_sample()
self.infor_bridge_sample()
#'''
def link_ng_sample(self):
link_features_fill = []
for user,friends in self.social_link.items():
if user in self.infor_user:
for f in friends:
j = np.random.randint(conf.num_bri_user_start, conf.num_all_user_id)
while (user,j) in self.social_hash:
j = np.random.randint(conf.num_bri_user_start, conf.num_all_user_id)
link_features_fill.append([user, f, j])
self.link_features_fill = link_features_fill
#'''
def infor_bridge_sample(self):
infor_bridge_fill = []
for user,items in self.infor_user.items():
if user in self.social_link:
for i in items:
j = np.random.randint(conf.num_items)
while (user, j) in self.train_mat:
j = np.random.randint(conf.num_items)
infor_bridge_fill.append([user, i, j])
self.infor_bridge_fill = infor_bridge_fill
def __len__(self):
return len(self.features_ps) * (conf.num_train_neg)
def __getitem__(self, idx):
features = self.features_fill
user = features[idx][0]
pos = features[idx][1]
neg = features[idx][2]
link_features_fill = self.link_features_fill
idx = np.random.randint(len(link_features_fill))
s_bri = link_features_fill[idx][0]
s_bri_pos = link_features_fill[idx][1]
s_bri_neg = link_features_fill[idx][2]
infor_bridge_fill = self.infor_bridge_fill
idx_2 = np.random.randint(len(infor_bridge_fill))
i_bri = infor_bridge_fill[idx_2][0]
i_bri_pos = infor_bridge_fill[idx_2][1]
i_bri_neg = infor_bridge_fill[idx_2][2]
return user, pos, neg, s_bri, s_bri_pos, s_bri_neg, i_bri, i_bri_pos, i_bri_neg
class EvaData():
def __init__(self, eva_data):
self.eva_data = eva_data
self.length = len(eva_data.keys())
def get_batch(self, batch_idx_list):
user_list, item_list = [], []
for idx in batch_idx_list:
user_list.extend([self.eva_data[idx][0]]*(len(self.eva_data[idx])-1))
item_list.extend(self.eva_data[idx][1:])
return torch.LongTensor(user_list).cuda(), \
torch.LongTensor(item_list).cuda()
| PeiJieSun/AAAI-submission | DataModule_domain_infor.py | DataModule_domain_infor.py | py | 19,566 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.load",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 13,
... |
16090817554 | from django.urls import path
from . import api_endpoints as views
from .api_endpoints.Contacts.ContactsList.views import ContactListAPIView
app_name = 'about'
urlpatterns = [
path('addresses/', views.AddressListAPIView.as_view(), name='address_list'),
path('company_stats/', views.CompanyStatListAPIView.as_view(), name='company_stat'),
path('company_histories/', views.CompanyHistoryListAPIView.as_view(), name='company_history'),
path('emails/', views.EmailListAPIView.as_view(), name='email_list'),
path('phone_numbers/', views.PhoneNumberListView.as_view(), name='phone_number_list'),
path('social_medias/', views.SocialMediaListAPIView.as_view(), name='social_media_list'),
path('contacts/', ContactListAPIView.as_view(), name='contact_list'),
path('showroom/', views.ShowroomListAPIView.as_view(), name='showroom_list')
]
| bilolsolih/decormax | apps/about/urls.py | urls.py | py | 863 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
25097408304 | # -*- coding: utf-8 -*-
"""
Created on Fri Jul 15 09:34:07 2022
@author: maria
"""
import numpy as np
import pandas as pd
from numpy import zeros, newaxis
import matplotlib.pyplot as plt
import scipy as sp
from scipy.signal import butter,filtfilt,medfilt
import csv
import re
#getting the F traces which are classified as cells by Suite2P (manually curated ROIs should be automatically saved)
def getcells(filePathF, filePathiscell):
"""
This function returns the ROIs that are classified as cells.
Careful, only use this if you have manually curated the Suite2P data!
Parameters
----------
filePathF : string
The path of where the fluorescence traces from Suite2P are located.
It will load the file as an array within the function.
This should be an array of shape [x,y] where x is the number of ROIs and y the corresponding values of F intensity
filePathiscell : string
The path of where the iscell file from Suite2P is located.
iscell should be an array of shape [x,y] where x is the number of ROIs and y is the classification confidence
(values are boolean, 0 for not a cell, 1 for cell)
cells is a 1D array [x] with the identify of the ROIs classified as cells in iscell
Returns
-------
F_cells : array of float32
array of shape [x,y] where x is the same as the one in cells and y contains the corresponding F intensities
"""
iscell = np.load(filePathiscell, allow_pickle=True)
F = np.load(filePathF, allow_pickle=True)
cells = np.where(iscell == 1)[0]
F_cells = F[cells,:]
return F_cells
#%%Liad's functions slightly adapted
#code from Liad, returns the metadata, remember to change the number of channels
def GetNidaqChannels(niDaqFilePath, numChannels):
"""
Parameters
----------
niDaqFilePath : string
the path of the nidaq file.
numChannels : int, optional
Number of channels in the file. The default is 7.
Returns
-------
niDaq : matrix
the matrix of the niDaq signals [time X channels]
"""
niDaq = np.fromfile(niDaqFilePath, dtype= np.float64)
niDaq = np.reshape(niDaq,(int(len(niDaq)/numChannels),numChannels))
return niDaq
def AssignFrameTime(frameClock,th = 0.5,plot=False):
"""
The function assigns a time in ms to a frame time.
Parameters:
frameClock: the signal from the nidaq of the frame clock
th : the threshold for the tick peaks, default : 3, which seems to work
plot: plot to inspect, default = False
returns frameTimes (ms)
"""
#Frame times
# pkTimes,_ = sp.signal.find_peaks(-frameClock,threshold=th)
# pkTimes = np.where(frameClock<th)[0]
# fdif = np.diff(pkTimes)
# longFrame = np.where(fdif==1)[0]
# pkTimes = np.delete(pkTimes,longFrame)
# recordingTimes = np.arange(0,len(frameClock),0.001)
# frameTimes = recordingTimes[pkTimes]
# threshold = 0.5
pkTimes = np.where(np.diff(frameClock > th, prepend=False))[0]
# pkTimes = np.where(np.diff(np.array(frameClock > 0).astype(int),prepend=False)>0)[0]
if (plot):
f,ax = plt.subplots(1)
ax.plot(frameClock)
ax.plot(pkTimes,np.ones(len(pkTimes))*np.min(frameClock),'r*')
ax.set_xlabel('time (ms)')
ax.set_ylabel('Amplitude (V)')
return pkTimes
#function from Liad, detecting photodiode change
def DetectPhotodiodeChanges_old(photodiode,plot=True,lowPass=30,kernel = 101,fs=1000, waitTime=10000):
"""
The function detects photodiode changes using a 'Schmitt Trigger', that is, by
detecting the signal going up at an earlier point than the signal going down,
the signal is filtered and smootehd to prevent nosiy bursts distorting the detection.W
Parameters:
photodiode: the signal from the nidaq of the photodiode
lowPass: the low pass signal for the photodiode signal, default: 30,
kernel: the kernel for median filtering, default = 101.
fs: the frequency of acquisiton, default = 1000
plot: plot to inspect, default = False
waitTime: the delay time until protocol start, default = 5000
returns: st,et (ms) (if acq is 1000 Hz)
***** WHAT DOES ST, ET STAND FOR???*****
"""
b,a = sp.signal.butter(1, lowPass, btype='low', fs=fs)
# sigFilt = photodiode
sigFilt = sp.signal.filtfilt(b,a,photodiode)
sigFilt = sp.signal.medfilt(sigFilt,kernel)
maxSig = np.max(sigFilt)
minSig = np.min(sigFilt)
thresholdU = (maxSig-minSig)*0.2
thresholdD = (maxSig-minSig)*0.4
threshold = (maxSig-minSig)*0.5
# find thesehold crossings
crossingsU = np.where(np.diff(np.array(sigFilt > thresholdU).astype(int),prepend=False)>0)[0]
crossingsD = np.where(np.diff(np.array(sigFilt > thresholdD).astype(int),prepend=False)<0)[0]
# crossingsU = np.delete(crossingsU,np.where(crossingsU<waitTime)[0])
# crossingsD = np.delete(crossingsD,np.where(crossingsD<waitTime)[0])
crossings = np.sort(np.unique(np.hstack((crossingsU,crossingsD))))
if (plot):
f,ax = plt.subplots(1,1,sharex=True)
ax.plot(photodiode,label='photodiode raw')
ax.plot(sigFilt,label = 'photodiode filtered')
ax.plot(crossings,np.ones(len(crossings))*threshold,'g*')
ax.hlines([thresholdU],0,len(photodiode),'k')
ax.hlines([thresholdD],0,len(photodiode),'k')
# ax.plot(st,np.ones(len(crossingsD))*threshold,'r*')
ax.legend()
ax.set_xlabel('time (ms)')
ax.set_ylabel('Amplitude (V)')
return crossings
def DetectPhotodiodeChanges_new(photodiode,plot=False,kernel = 101,upThreshold = 0.2, downThreshold = 0.4,fs=1000, waitTime=5000):
"""
The function detects photodiode changes using a 'Schmitt Trigger', that is, by
detecting the signal going up at an earlier point than the signal going down,
the signal is filtered and smootehd to prevent nosiy bursts distorting the detection.W
Parameters:
photodiode: the signal from the nidaq of the photodiode
lowPass: the low pass signal for the photodiode signal, default: 30,
kernel: the kernel for median filtering, default = 101.
fs: the frequency of acquisiton, default = 1000
plot: plot to inspect, default = False
waitTime: the delay time until protocol start, default = 5000
returns: diode changes (s) up to the user to decide what on and off mean
"""
# b,a = sp.signal.butter(1, lowPass, btype='low', fs=fs)
sigFilt = photodiode
# sigFilt = sp.signal.filtfilt(b,a,photodiode)
sigFilt = sp.signal.medfilt(sigFilt,kernel)
maxSig = np.max(sigFilt)
minSig = np.min(sigFilt)
thresholdU = (maxSig-minSig)*upThreshold
thresholdD = (maxSig-minSig)*downThreshold
threshold = (maxSig-minSig)*0.5
# find thesehold crossings
crossingsU = np.where(np.diff(np.array(sigFilt > thresholdU).astype(int),prepend=False)>0)[0]
crossingsD = np.where(np.diff(np.array(sigFilt > thresholdD).astype(int),prepend=False)<0)[0]
crossingsU = np.delete(crossingsU,np.where(crossingsU<waitTime)[0])
crossingsD = np.delete(crossingsD,np.where(crossingsD<waitTime)[0])
crossings = np.sort(np.unique(np.hstack((crossingsU,crossingsD))))
if (plot):
f,ax = plt.subplots(1,1,sharex=True)
ax.plot(photodiode,label='photodiode raw')
ax.plot(sigFilt,label = 'photodiode filtered')
ax.plot(crossings,np.ones(len(crossings))*threshold,'g*')
ax.hlines([thresholdU],0,len(photodiode),'k')
ax.hlines([thresholdD],0,len(photodiode),'k')
# ax.plot(st,np.ones(len(crossingsD))*threshold,'r*')
ax.legend()
ax.set_xlabel('time (ms)')
ax.set_ylabel('Amplitude (V)')
return crossings
def GetStimulusInfo(filePath,props):
"""
Parameters
----------
filePath : str
the path of the log file.
props : array-like
the names of the properties to extract.
Returns
-------
StimProperties : list of dictionaries
the list has all the extracted stimuli, each a dictionary with the props and their values.
"""
StimProperties = []
with open(filePath, newline='') as csvfile:
reader = csv.reader(csvfile, delimiter=' ', quotechar='|')
for row in reader:
a = []
for p in range(len(props)):
# m = re.findall(props[p]+'=(\d*)', row[np.min([len(row)-1,p])])
m = re.findall(props[p]+'=([a-zA-Z0-9_.-]*)', row[np.min([len(row)-1,p])])
if (len(m)>0):
a.append(m[0])
if (len(a)>0):
stimProps = {}
for p in range(len(props)):
stimProps[props[p]] = a[p]
StimProperties.append(stimProps)
return StimProperties
def AlignStim(signal, time, eventTimes, window,timeUnit=1,timeLimit=1):
aligned = [];
t = [];
dt = np.median(np.diff(time,axis=0))
if (timeUnit==1):
w = np.rint(window / dt).astype(int)
else:
w = window.astype(int)
maxDur = signal.shape[0]
if (window.shape[0] == 1): # constant window
mini = np.min(w[:,0]);
maxi = np.max(w[:,1]);
tmp = np.array(range(mini,maxi));
w = np.tile(w,((eventTimes.shape[0],1)))
else:
if (window.shape[0] != eventTimes.shape[0]):
print('No. events and windows have to be the same!')
return
else:
mini = np.min(w[:,0]);
maxi = np.max(w[:,1]);
tmp = range(mini,maxi);
t = tmp * dt;
aligned = np.zeros((t.shape[0],eventTimes.shape[0],signal.shape[1]))
for ev in range(eventTimes.shape[0]):
# evInd = find(time > eventTimes(ev), 1);
wst = w[ev,0]
wet = w[ev,1]
evInd = np.where(time>=eventTimes[ev])[0]
if (len(evInd)==0):
continue
else :
# None
# if dist is bigger than one second stop
if (np.any((time[evInd[0]]-eventTimes[ev])>timeLimit)):
continue
st = evInd[0]+ wst #get start
et = evInd[0] + wet #get end
alignRange = np.array(range(np.where(tmp==wst)[0][0],np.where(tmp==wet-1)[0][0]+1))
sigRange = np.array(range(st,et))
valid = np.where((sigRange>=0) & (sigRange<maxDur))[0]
aligned[alignRange[valid],ev,:] = signal[sigRange[valid],:];
return aligned, t
#def DetectWheelMove(moveA,moveB,rev_res = 1024, total_track = 598.47,plot=True):
"""
The function detects the wheel movement.
At the moment uses only moveA.
Parameters:
moveA,moveB: the first and second channel of the rotary encoder
rev_res: the rotary encoder resoution, default =1024
total_track: the total length of the track, default = 598.47 (mm)
kernel: the kernel for median filtering, default = 101.
plot: plot to inspect, default = False
returns: distance
"""
# make sure all is between 1 and 0
moveA /= np.max(moveA)
moveA -= np.min(moveA)
moveB /= np.max(moveB)
moveB -= np.min(moveB)
# detect A move
ADiff = np.diff(moveA)
Ast = np.where(ADiff >0.5)[0]
Aet = np.where(ADiff <-0.5)[0]
# detect B move
BDiff = np.diff(moveB)
Bst = np.where(BDiff >0.5)[0]
Bet = np.where(BDiff <-0.5)[0]
#Correct possible problems for end of recording
if (len(Ast)>len(Aet)):
Aet = np.hstack((Aet,[len(moveA)]))
elif (len(Ast)<len(Aet)):
Ast = np.hstack(([0],Ast))
dist_per_move = total_track/rev_res
# Make into distance
track = np.zeros(len(moveA))
track[Ast] = dist_per_move
distance = np.cumsum(track)
if (plot):
f,ax = plt.subplots(3,1,sharex=True)
ax[0].plot(moveA)
# ax.plot(np.abs(ADiff))
ax[0].plot(Ast,np.ones(len(Ast)),'k*')
ax[0].plot(Aet,np.ones(len(Aet)),'r*')
ax[0].set_xlabel('time (ms)')
ax[0].set_ylabel('Amplitude (V)')
ax[1].plot(distance)
ax[1].set_xlabel('time (ms)')
ax[1].set_ylabel('distance (mm)')
ax[2].plot(track)
ax[2].set_xlabel('time (ms)')
ax[2].set_ylabel('Move')
# movFirst = Amoves>Bmoves
return distance
def running_info(filePath, th = 3, plot=False):
with open(filePath) as file_name:
csvChannels = np.loadtxt(file_name, delimiter=",")
arduinoTime = csvChannels[:,-1]
arduinoTimeDiff = np.diff(arduinoTime,prepend=True)
normalTimeDiff = np.where(arduinoTimeDiff>-100)[0]
csvChannels = csvChannels[normalTimeDiff,:]
# convert time to second (always in ms)
arduinoTime = csvChannels[:,-1]/1000
# Start arduino time at zero
arduinoTime-=arduinoTime[0]
csvChannels = csvChannels[:,:-1]
numChannels = csvChannels.shape[1]
if (plot):
f,ax = plt.subplots(numChannels,sharex=True)
for i in range(numChannels):
ax[i].plot(arduinoTime,csvChannels[:,i])
return csvChannels,arduinoTime
def DetectWheelMove(moveA,moveB,timestamps,rev_res = 1024, total_track = 59.847, plot=False):
"""
The function detects the wheel movement.
At the moment uses only moveA.
[[ALtered the minimum from 0 to 5 because of the data from 04/08/22 -M]]
Parameters:
moveA,moveB: the first and second channel of the rotary encoder
rev_res: the rotary encoder resoution, default =1024
total_track: the total length of the track, default = 59.847 (cm)
kernel: the kernel for median filtering, default = 101.
plot: plot to inspect, default = False
returns: velocity[cm/s], distance [cm]
"""
#introducing thresholoding in case the non movement values are not 0, 5 was the biggest number for now
th_index = moveA<5
moveA[th_index] = 0
th_index = moveB<5
moveB[th_index] = 0
moveA = np.round(moveA).astype(bool)
moveB = np.round(moveB).astype(bool)
counterA = np.zeros(len(moveA))
counterB = np.zeros(len(moveB))
# detect A move
risingEdgeA = np.where(np.diff(moveA>0,prepend=True))[0]
risingEdgeA = risingEdgeA[moveA[risingEdgeA]==1]
risingEdgeA_B = moveB[risingEdgeA]
counterA[risingEdgeA[risingEdgeA_B==0]]=1
counterA[risingEdgeA[risingEdgeA_B==1]]=-1
# detect B move
risingEdgeB = np.where(np.diff(moveB>0,prepend=True))[0]#np.diff(moveB)
risingEdgeB = risingEdgeB[moveB[risingEdgeB]==1]
risingEdgeB_A = moveB[risingEdgeB]
counterA[risingEdgeB[risingEdgeB_A==0]]=-1
counterA[risingEdgeB[risingEdgeB_A==1]]=1
dist_per_move = total_track/rev_res
instDist = counterA*dist_per_move
distance = np.cumsum(instDist)
averagingTime = int(np.round(1/np.median(np.diff(timestamps))))
sumKernel = np.ones(averagingTime)
tsKernel = np.zeros(averagingTime)
tsKernel[0]=1
tsKernel[-1]=-1
# take window sum and convert to cm
distWindow = np.convolve(instDist,sumKernel,'same')
# count time elapsed
timeElapsed = np.convolve(timestamps,tsKernel,'same')
velocity = distWindow/timeElapsed
# if (plot):
# f,ax = plt.subplots(3,1,sharex=True)
# ax[0].plot(moveA)
# # ax.plot(np.abs(ADiff))
# ax[0].plot(Ast,np.ones(len(Ast)),'k*')
# ax[0].plot(Aet,np.ones(len(Aet)),'r*')
# ax[0].set_xlabel('time (ms)')
# ax[0].set_ylabel('Amplitude (V)')
# ax[1].plot(distance)
# ax[1].set_xlabel('time (ms)')
# ax[1].set_ylabel('distance (mm)')
# ax[2].plot(track)
# ax[2].set_xlabel('time (ms)')
# ax[2].set_ylabel('Move')
# movFirst = Amoves>Bmoves
return velocity, distance
def Get_Stim_Identity(log, reps, protocol_type, types_of_stim):
"""
Parameters
----------
log : array
contains the log of stimuli, assumes the order of the columns is "Ori", "SFreq", "TFreq", "Contrast".
reps : integer
how many times a stimulus was repeated.
protocol_type : string
DESCRIPTION. The options are :
- "simple" which refers to the protocol which only shows 12 types of orietnations
- "TFreq": protocol with different temp frequencies
- "SFreq": protocol with different spatial frequencies
= "Contrast": protocol with different contrasts.
types_of_stim : integer
DESCRIPTION.
Refers to the different types of stimuli shown.
Assumes that for "simple", types of stim is 12 becuase 12 different orientations are shown.
For all the others, it is assumed to be 24 because there are 4 different orientartions and 6 different variations of parameters
Returns
-------
an array of shape (types_of_stim, reps) if protocol was "simple"
an array of shape(4, reps, 6) for all other protocols (if 4 different orientations and 6 different other parameters).
"""
#the angles of the stim
#in the case of 20 iterations, given that for simple gratings protocol 12 orientations are shown, the total stimuli shown is 240
if types_of_stim == 12:
angles = np.array([30, 60, 90, 120, 150, 180, 210, 240, 270, 300, 330, 360])
TFreq =np.array([2])
SFreq = np.array([0.08])
contrast = np.array([1])
#for other gratings protocols such as temp freq etc, this number should be double
elif types_of_stim == 24:
angles = np.array([0, 90, 180, 270])
TFreq = np.array([0.5, 1, 2, 4, 8, 16])
SFreq = np.array([0.01, 0.02, 0.04, 0.08, 0.16, 0.32])
contrast = np.array([0, 0.125, 0.25, 0.5, 0.75, 1])
#what each angle means
# 0 degrees is vertical to the left,
#90 is horizontal down,
#180 is vertical to the right and
#270 is horizontal up
#with these 4 orientations can test orientation and direction selectivity
#reps = how many repetitions of the same stim we have
#getting a 3D array with shape(orientation, repeats, TFreq/SFreq)
#all_TFreq = np.zeros((angles.shape[0], reps, TFreq.shape[0])).astype(int)
#all_SFreq = np.zeros((angles.shape[0], reps, SFreq.shape[0])).astype(int)
all_parameters = np.zeros((angles.shape[0], TFreq.shape[0], reps)).astype(int)
#all_oris = np.zeros((angles.shape[0], reps)).astype(int)
for angle in range(angles.shape[0]):
if protocol_type == "TFreq":
for freq in range(TFreq.shape[0]):
specific_TF = np.where((log[:,0] == angles[angle]) & (log[:,2] == TFreq[freq]) & (log[:,3] == 1)) [0]
all_parameters[angle, freq, :] = specific_TF
if protocol_type == "SFreq":
for freq in range(SFreq.shape[0]):
specific_SF = np.where((log[:,0] == angles[angle]) & (log[:,1] == SFreq[freq]) & (log[:,3] == 1)) [0]
all_parameters[angle, freq, :] = specific_SF
if protocol_type == "Contrast":
for freq in range(TFreq.shape[0]):
specific_contrast = np.where((log[:,0] == angles[angle]) & (log[:,3] == contrast[freq])) [0]
all_parameters[angle, freq, :] = specific_contrast
# if protocol_type == "simple":
# specific_P = np.where((log[:,0] == angles[angle])) [0]
# all_oris[angle, :] = specific_P
#return all_oris
return all_parameters
def behaviour_reps (log, types_of_stim,reps, protocol_type, speed, time, stim_on, stim_off):
"""
Takes the stim on values and the stim off values which tell you the exact time
Then uses this to find the value in the running data which gives you a vector that contains all the values within that period
Decides within the loop if 90% of the values are above a certain threshold then assign to each rep a 0 or 1 value
Make separate arrays which contain the indices like in all_oris but split into running and rest arrays
Then can use these values to plot separate parts of the traces (running vs not running)
Parameters
----------
log : array
contains the log of stimuli, assumes the order of the columns is "Ori", "SFreq", "TFreq", "Contrast".
types_of_stim : integer
DESCRIPTION:
Refers to the different types of stimuli shown.
Assumes that for "simple", types of stim is 12 becuase 12 different orientations are shown.
For all the others, it is assumed to be 24 because there are 4 different orientartions and 6 different variations of parameters
reps : integer
how many times a stimulus was repeated.
protocol_type : string
The options are :
- "simple" which refers to the protocol which only shows 12 types of orietnations
- "TFreq": protocol with different temp frequencies
- "SFreq": protocol with different spatial frequencies
= "contrast": protocol with different contrasts.
speed : 1D array
the speed throughout the whole experiment.
time : 1D array
The corrected time at which the behaviour occured.
Both of the above are outputs from Liad's function "DetectWheelMove" and duinoDelayCompensation
stim_on : 1D array
from photodiode, time at which stimulus appears.
stim_off : 1D array
same as above but when stim disappears.
Returns
-------
two arrays of shape (types_of_stim, reps) if protocol was "simple"
two arrays of shape(4, reps, 6) for all other protocols (if 4 different orientations and 6 different other parameters)
(one for running trials, one for rest trials)
"""
stim_on_round = np.around(stim_on, decimals = 2)
stim_off_round = np.around(stim_off, decimals = 2)
speed_time = np.stack((time, speed)).T
for rep in range(stim_on.shape[0]-1):
start = np.where(stim_on_round[rep] == speed_time[:,0])[0]
stop = np.where(stim_off_round[rep] == speed_time[:,0])[0]
interval = speed_time[start[0]:stop[0], 1]
running_bool = np.argwhere(interval>1)
plt.plot(interval)
if running_bool.shape[0]/interval.shape[0]>0.9:
a = 1
else:
a = 0
if a ==1:
#now appending the final column of the log with 1 if the above turns out true
log[rep,4] = 1
if types_of_stim == 12:
angles = np.array([30, 60, 90, 120, 150, 180, 210, 240, 270, 300, 330, 360])
TFreq =np.array([2])
SFreq = np.array([0.08])
contrast = np.array([1])
#for other gratings protocols such as temp freq etc, this number should be double
elif types_of_stim == 24:
angles = np.array([0, 90, 180, 270])
TFreq = np.array([0.5, 1, 2, 4, 8, 16])
SFreq = np.array([0.01, 0.02, 0.04, 0.08, 0.16, 0.32])
contrast = np.array([0, 0.125, 0.25, 0.5, 0.75, 1])
"""
for running
"""
#running = np.ones((4, 6, 30))*np.nan
running = []
#creates a list of arrays by looking in the log file and sorting the indices based on the desired angles, freq
#and if there is a 0 or a 1 in the final column
for angle in range(angles.shape[0]):
if protocol_type == "SFreq":
for freq in range(TFreq.shape[0]):
specific_SF_r = np.where((log[:,0] == angles[angle]) & (log[:,1] == SFreq[freq]) & (log[:,3] == 1) & (log[:,4] ==1)) [0]
#running[angle, freq,:] = specific_SF_r
running.append(specific_SF_r)
if protocol_type == "TFreq":
for freq in range(TFreq.shape[0]):
specific_TF_r = np.where((log[:,0] == angles[angle]) & (log[:,2] == TFreq[freq]) & (log[:,3] == 1) & (log[:,4] ==1)) [0]
running.append(specific_TF_r)
#running[angle, freq,:] = specific_TF_r
if protocol_type == "Contrast":
for freq in range(TFreq.shape[0]):
specific_contrast_r = np.where((log[:,0] == angles[angle]) & (log[:,2] == contrast[freq]) & (log[:,4] ==1)) [0]
running.append(specific_contrast_r)
#running[angle, freq, :] = specific_contrast_r
elif protocol_type == "simple":
specific_P_r = np.where((log[:,0] == angles[angle]) & (log[:,4] ==1)) [0]
running.append(specific_P_r)
"""
for rest
"""
rest = []
for angle in range(angles.shape[0]):
if protocol_type == "SFreq":
for freq in range(TFreq.shape[0]):
specific_SF_re = np.where((log[:,0] == angles[angle]) & (log[:,1] == SFreq[freq]) & (log[:,4] ==0)) [0]
rest.append(specific_SF_re)
if protocol_type == "TFreq":
for freq in range(TFreq.shape[0]):
specific_TF_re = np.where((log[:,0] == angles[angle]) & (log[:,2] == TFreq[freq]) & (log[:,4] ==0)) [0]
rest.append(specific_TF_re)
if protocol_type == "Contrast":
for freq in range(TFreq.shape[0]):
specific_contrast_re = np.where((log[:,0] == angles[angle]) & (log[:,3] == contrast[freq]) & (log[:,4] ==0)) [0]
rest.append(specific_contrast_re)
elif protocol_type == "simple":
specific_P_re = np.where((log[:,0] == angles[angle]) & (log[:,4] ==0)) [0]
rest.append(specific_P_re)
return running, rest | mariacozan/Analysis_and_Processing | functions/functions2022_07_15.py | functions2022_07_15.py | py | 26,567 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.load",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "numpy.fromfile",
"line_number":... |
22555751097 | from flask_app.config.mysqlconnection import connectToMySQL
from flask import flash
class Product:
db = "my_solo"
def __init__(self, data):
self.id = data['id']
self.wood = data['wood']
self.thickness = data['thickness']
self.description = data['description']
self.created_at = data['created_at']
self.updated_at = data['updated_at']
self.customers_id = data['customers_id']
@classmethod
def create(cls, data):
query = "INSERT INTO products (wood, thickness, description, customers_id) VALUES (%(wood)s, %(thickness)s, %(description)s, %(customers_id)s)"
results = connectToMySQL(
cls.db).query_db(query, data)
return results
@classmethod
def get_by_cust_id(cls, data):
query = "SELECT * FROM products WHERE customers_id = %(id)s"
results = connectToMySQL(cls.db).query_db(query, data)
list = []
for row in results:
list.append(cls(row))
return list
@classmethod
def get_one(cls, data):
query = "SELECT * FROM products WHERE id = %(id)s;"
results = connectToMySQL(
cls.db).query_db(query, data)
this_product = cls(results[0])
return this_product
@classmethod
def cancel(cls, data):
query = "DELETE FROM products WHERE id=%(id)s"
results = connectToMySQL(cls.db).query_db(query, data)
return results
@classmethod
def update(cls, data):
query = "UPDATE products SET wood= %(wood)s, thickness= %(thickness)s, description= %(description)s WHERE id = %(id)s"
results = connectToMySQL(cls.db).query_db(query, data)
return results
@ staticmethod
def validate_product(product):
is_valid = True
if len(product['wood']) < 1:
flash("Please choose wood type!")
is_valid = False
if len(product['thickness']) < 1:
flash("Thickness amount needed!")
is_valid = False
if len(product['description']) < 3:
flash("Input at least 3 letters!")
is_valid = False
if len(product['description']) >= 4:
flash("Input at only 3 letters!")
is_valid = False
return is_valid
| tsu112/solo_project | flask_app/models/product.py | product.py | py | 2,288 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "flask_app.config.mysqlconnection.connectToMySQL",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "flask_app.config.mysqlconnection.connectToMySQL",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "flask_app.config.mysqlconnection.connectToMySQL",... |
30357845811 | import unittest
from traits.api import Enum, HasTraits, Int, Str, Instance
from traitsui.api import HGroup, Item, Group, VGroup, View
from traitsui.menu import ToolBar, Action
from traitsui.testing.api import Index, IsVisible, MouseClick, UITester
from traitsui.tests._tools import (
create_ui,
requires_toolkit,
process_cascade_events,
ToolkitName,
)
class FooPanel(HasTraits):
my_int = Int(2)
my_str = Str("I am a panel/subpanel")
toolbar = Instance(ToolBar)
def default_traits_view(self):
view = View(
Item(name="my_int"),
Item(name="my_str"),
title="FooPanel",
buttons=["OK", "Cancel"],
toolbar=self.toolbar,
)
return view
def _toolbar_default(self):
return ToolBar(Action(name="Open file"))
class FooDialog(HasTraits):
panel1 = Instance(FooPanel)
panel2 = Instance(FooPanel)
view = View(
Group(Item("panel1"), Item("panel2"), layout="split", style="custom")
)
def _panel1_default(self):
return FooPanel()
def _panel2_default(self):
return FooPanel()
class ScrollableGroupExample(HasTraits):
my_int = Int(2)
my_str = Str("The group is scrollable")
scrollable_group_view = View(
Group(
Item(name="my_int"),
Item(name="my_str"),
scrollable=True,
),
title="FooPanel",
kind='subpanel',
)
non_scrollable_group_view = View(
Group(
Item(name="my_int"),
Item(name="my_str"),
scrollable=False,
),
title="FooPanel",
kind='subpanel',
)
scrollable_group_box_view = View(
Group(
Item(name="my_int"),
Item(name="my_str"),
scrollable=True,
label="Scrollable View",
show_border=True,
),
title="FooPanel",
kind='subpanel',
)
scrollable_labelled_group_view = View(
Group(
Item(name="my_int"),
Item(name="my_str"),
scrollable=True,
label="Scrollable View",
),
title="FooPanel",
kind='subpanel',
)
class ScrollableGroupVisibleWhen(HasTraits):
bar = Str("bar!")
baz = Str("Baz?")
enabled = Enum("Yes", "No")
def default_traits_view(self):
view = View(
Item("enabled"),
HGroup(
VGroup(
Item("bar"),
scrollable=True,
visible_when="enabled=='Yes'",
id='bar_group',
),
VGroup(
Item("baz"),
scrollable=True,
visible_when="enabled=='No'",
id='baz_group',
),
),
)
return view
@requires_toolkit([ToolkitName.qt])
class TestUIPanel(unittest.TestCase):
def setup_qt_dock_window(self):
from pyface.qt import QtGui
# set up the dock window for qt
main_window = QtGui.QMainWindow()
self.addCleanup(process_cascade_events)
self.addCleanup(main_window.close)
dock = QtGui.QDockWidget("testing", main_window)
dock.setWidget(QtGui.QMainWindow())
return main_window, dock
def test_panel_has_toolbar_buttons_qt(self):
from pyface.qt import QtGui
_, dock = self.setup_qt_dock_window()
# add panel
panel = FooPanel()
with create_ui(panel, dict(parent=dock.widget(), kind="panel")) as ui:
dock.widget().setCentralWidget(ui.control)
# There should be a toolbar for the panel
self.assertIsNotNone(dock.findChild(QtGui.QToolBar))
# There should be buttons too
# Not searching from dock because the dock panel has buttons for
# popping up and closing the panel
self.assertIsNotNone(ui.control.findChild(QtGui.QPushButton))
def test_subpanel_has_toolbar_no_buttons_qt(self):
from pyface.qt import QtGui
_, dock = self.setup_qt_dock_window()
# add panel
panel = FooPanel()
parent = dock.widget()
with create_ui(panel, dict(parent=parent, kind="subpanel")) as ui:
dock.widget().setCentralWidget(ui.control)
# There should be a toolbar for the subpanel
self.assertIsNotNone(dock.findChild(QtGui.QToolBar))
# Buttons should not be shown for subpanel
# Not searching from dock because the dock panel has buttons for
# popping up and closing the panel
self.assertIsNone(ui.control.findChild(QtGui.QPushButton))
def test_subpanel_no_toolbar_nor_button_in_widget(self):
from pyface.qt import QtGui
# FooDialog uses a QWidget to contain the panels
# No attempt should be made for adding the toolbars
foo_window = FooDialog()
with create_ui(foo_window) as ui:
# No toolbar for the dialog
self.assertIsNone(ui.control.findChild(QtGui.QToolBar))
# No button
self.assertIsNone(ui.control.findChild(QtGui.QPushButton))
# regression test for enthought/traitsui#1512
def test_scrollable_group_visible_when(self):
from pyface.qt import QtGui
obj = ScrollableGroupVisibleWhen()
tester = UITester()
with tester.create_ui(obj) as ui:
bar_group = tester.find_by_id(ui, 'bar_group')
baz_group = tester.find_by_id(ui, 'baz_group')
# for a scrollable group the GroupEditors control should be a
# QScrollArea not just the QWidget. We want the full area to be
# not visible, not just the text box widget.
self.assertIsInstance(bar_group._target.control, QtGui.QScrollArea)
self.assertIsInstance(baz_group._target.control, QtGui.QScrollArea)
self.assertTrue(bar_group.inspect(IsVisible()))
self.assertFalse(baz_group.inspect(IsVisible()))
enabled_box = tester.find_by_name(ui, 'enabled')
baz_item = enabled_box.locate(Index(1))
baz_item.perform(MouseClick())
self.assertTrue(baz_group.inspect(IsVisible()))
self.assertFalse(bar_group.inspect(IsVisible()))
@requires_toolkit([ToolkitName.qt])
class TestPanelLayout(unittest.TestCase):
def test_scrollable_group_typical(self):
from pyface.qt import QtGui
example = ScrollableGroupExample()
ui = example.edit_traits(view=scrollable_group_view)
try:
mainwindow = ui.control.layout().itemAt(0).widget()
scroll_area = mainwindow.centralWidget()
self.assertIsInstance(scroll_area, QtGui.QScrollArea)
content = scroll_area.widget()
self.assertEqual(type(content), QtGui.QWidget)
finally:
ui.dispose()
def test_scrollable_group_box(self):
from pyface.qt import QtGui
example = ScrollableGroupExample()
ui = example.edit_traits(view=scrollable_group_box_view)
try:
mainwindow = ui.control.layout().itemAt(0).widget()
scroll_area = mainwindow.centralWidget()
self.assertIsInstance(scroll_area, QtGui.QScrollArea)
group_box = scroll_area.widget()
self.assertIsInstance(group_box, QtGui.QGroupBox)
self.assertEqual(group_box.title(), "Scrollable View")
finally:
ui.dispose()
def test_scrollable_labelled_group(self):
from pyface.qt import QtGui
example = ScrollableGroupExample()
ui = example.edit_traits(view=scrollable_labelled_group_view)
try:
mainwindow = ui.control.layout().itemAt(0).widget()
scroll_area = mainwindow.centralWidget()
self.assertIsInstance(scroll_area, QtGui.QScrollArea)
content = scroll_area.widget()
self.assertEqual(type(content), QtGui.QWidget)
finally:
ui.dispose()
def test_non_scrollable_group_typical(self):
from pyface.qt import QtGui
example = ScrollableGroupExample(my_str="The group is not scrollable")
ui = example.edit_traits(view=non_scrollable_group_view)
try:
mainwindow = ui.control.layout().itemAt(0).widget()
content = mainwindow.centralWidget()
self.assertEqual(type(content), QtGui.QWidget)
finally:
ui.dispose()
| enthought/traitsui | traitsui/qt/tests/test_ui_panel.py | test_ui_panel.py | py | 8,436 | python | en | code | 290 | github-code | 6 | [
{
"api_name": "traits.api.HasTraits",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "traits.api.Int",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "traits.api.Str",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "traits.api.Instanc... |
15409655756 | """
一个网站域名,如"discuss.leetcode.com",包含了多个子域名。作为顶级域名,常用的有"com",下一级则有"leetcode.com",最低的一级为"discuss.leetcode.com"。当我们访问域名"discuss.leetcode.com"时,也同时访问了其父域名"leetcode.com"以及顶级域名 "com"。
给定一个带访问次数和域名的组合,要求分别计算每个域名被访问的次数。其格式为访问次数+空格+地址,例如:"9001 discuss.leetcode.com"。
接下来会给出一组访问次数和域名组合的列表cpdomains 。要求解析出所有域名的访问次数,输出格式和输入格式相同,不限定先后顺序。
示例 2
输入:
["900 google.mail.com", "50 yahoo.com", "1 intel.mail.com", "5 wiki.org"]
输出:
["901 mail.com","50 yahoo.com","900 google.mail.com","5 wiki.org","5 org","1 intel.mail.com","951 com"]
说明:
按照假设,会访问"google.mail.com" 900次,"yahoo.com" 50次,"intel.mail.com" 1次,"wiki.org" 5次。
而对于父域名,会访问"mail.com" 900+1 = 901次,"com" 900 + 50 + 1 = 951次,和 "org" 5 次。
"""
import collections
class Solution:
def subdomainVisits(self, cpdomains):
"""
:type cpdomains: List[str]
:rtype: List[str]
"""
ans = collections.Counter()
for el in cpdomains:
count = int(el.split(' ')[0])
domain = el.split(' ')[1]
frags = domain.split('.')
for i in range(len(frags)):
ans[".".join(frags[i:])] += count
return ["{} {}".format(ct, dom) for dom, ct in ans.items()]
if __name__ == '__main__':
A = ["900 google.mail.com", "50 yahoo.com", "1 intel.mail.com", "5 wiki.org"]
S = Solution()
res = S.subdomainVisits(A)
print(res) | Octoberr/letcode | easy/811subdomainvisitcount.py | 811subdomainvisitcount.py | py | 1,796 | python | zh | code | 1 | github-code | 6 | [
{
"api_name": "collections.Counter",
"line_number": 27,
"usage_type": "call"
}
] |
17642557437 | import os
from flask import Flask, request, jsonify
from flask_pymongo import PyMongo
from bson import ObjectId
import bcrypt
import jwt
import ssl
import datetime
from functools import wraps
from dotenv import load_dotenv
load_dotenv('.env')
app = Flask(__name__)
app.config['MONGO_URI'] = os.environ.get('MONGO_URI')
app.config['SECRET_KEY'] = os.environ.get('SECRET_KEY')
mongo = PyMongo(app, ssl_cert_reqs=ssl.CERT_NONE)
def token_required(f):
@wraps(f)
def decorated(*args, **kwargs):
token = request.headers.get('Authorization')
if not token:
return jsonify({'message': 'Token is missing'}), 401
try:
jwt.decode(token.split()[1], app.config['SECRET_KEY'], algorithms=['HS256'])
except jwt.ExpiredSignatureError:
return jsonify({'message': 'Token has expired'}), 401
except jwt.InvalidTokenError:
return jsonify({'message': 'Token is invalid'}), 401
return f(*args, **kwargs)
return decorated
class HomePage:
@staticmethod
@app.route("/")
def index():
return "Homepage<br> Use /register to register user <br> Use /login to login user<br> Use " \
"/template to get template<br> Use /template/<template_id> to do 'GET', 'PUT', 'DELETE' methods"
class UserManagement:
@staticmethod
@app.route('/register', methods=['POST'])
def register():
data = request.get_json()
hashed_pw = bcrypt.hashpw(data['password'].encode('utf-8'), bcrypt.gensalt())
user = mongo.db.users.find_one({'email': data['email']})
if user:
return jsonify({'message': 'User already registered'}), 409
user_id = str(mongo.db.users.insert_one({
'first_name': data['first_name'],
'last_name': data['last_name'],
'email': data['email'],
'password': hashed_pw}).inserted_id)
return jsonify({'message': 'User registered successfully!', 'user_id': user_id}), 201
@staticmethod
@app.route('/login', methods=['POST'])
def login():
auth = request.get_json()
user = mongo.db.users.find_one({'email': auth['email']})
if user and bcrypt.checkpw(auth['password'].encode('utf-8'), user['password']):
token = jwt.encode({
'user_id': str(user['_id']),
'exp': datetime.datetime.utcnow() + datetime.timedelta(hours=1)
}, app.config['SECRET_KEY'], algorithm='HS256')
return jsonify({'token': token})
return jsonify({'message': 'Invalid credentials'}), 401
class TemplateManagement:
@staticmethod
@app.route('/template', methods=['POST'])
@token_required
def create_template():
user_id = jwt.decode(request.headers.get('Authorization').split()[1], app.config['SECRET_KEY'], algorithms=['HS256'])['user_id']
data = request.get_json()
data['user_id'] = user_id
inserted_template = mongo.db.templates.insert_one(data)
inserted_id = str(inserted_template.inserted_id)
return jsonify({'template_id': inserted_id, 'message': 'Template created successfully'}), 201
@staticmethod
@app.route('/template', methods=['GET'])
@token_required
def get_all_templates():
user_id = \
jwt.decode(request.headers.get('Authorization').split()[1], app.config['SECRET_KEY'], algorithms=['HS256'])[
'user_id']
templates = list(mongo.db.templates.find({'user_id': user_id}, {'_id': 1}))
formatted_templates = [{'_id': str(template['_id'])} for template in templates]
result = []
for template in formatted_templates:
template_id = template['_id']
template_data = mongo.db.templates.find_one({'_id': ObjectId(template_id), 'user_id': user_id}, {'_id': 0})
if template_data:
template_data['_id'] = template_id
result.append(template_data)
return jsonify(result), 200
@staticmethod
@app.route('/template/<template_id>', methods=['GET'])
@token_required
def get_template(template_id):
user_id = \
jwt.decode(request.headers.get('Authorization').split()[1], app.config['SECRET_KEY'], algorithms=['HS256'])[
'user_id']
template = mongo.db.templates.find_one({'_id': ObjectId(template_id), 'user_id': user_id}, {'_id': 0})
if template:
return jsonify({'template_id': template_id, "template": template}), 200
else:
return jsonify({'template_id': template_id, 'message': 'Template not found'}), 404
@staticmethod
@app.route('/template/<template_id>', methods=['PUT'])
@token_required
def update_template(template_id):
user_id = \
jwt.decode(request.headers.get('Authorization').split()[1], app.config['SECRET_KEY'], algorithms=['HS256'])[
'user_id']
data = request.get_json()
result = mongo.db.templates.update_one({'_id': ObjectId(template_id), 'user_id': user_id}, {'$set': data})
print(result)
if result.modified_count > 0:
return jsonify({'template_id': template_id, 'message': 'Template updated successfully'}), 200
else:
return jsonify({'template_id': template_id, 'message': 'Template not found'}), 404
@staticmethod
@app.route('/template/<template_id>', methods=['DELETE'])
@token_required
def delete_template(template_id):
user_id = \
jwt.decode(request.headers.get('Authorization').split()[1], app.config['SECRET_KEY'], algorithms=['HS256'])[
'user_id']
result = mongo.db.templates.delete_one({'_id': ObjectId(template_id), 'user_id': user_id})
if result.deleted_count > 0:
return jsonify({'template_id': template_id, 'message': 'Template deleted successfully'}), 200
else:
return jsonify({'template_id': template_id, 'message': 'Template not found'}), 404
if __name__ == '__main__':
user_manager = UserManagement()
template_manager = TemplateManagement()
app.add_url_rule('/register', methods=['POST'], view_func=user_manager.register)
app.add_url_rule('/login', methods=['POST'], view_func=user_manager.login)
app.add_url_rule('/template', methods=['POST'], view_func=template_manager.create_template)
app.add_url_rule('/template', methods=['GET'], view_func=template_manager.get_all_templates)
app.add_url_rule('/template/<template_id>', methods=['GET'], view_func=template_manager.get_template)
app.add_url_rule('/template/<template_id>', methods=['PUT'], view_func=template_manager.update_template)
app.add_url_rule('/template/<template_id>', methods=['DELETE'], view_func=template_manager.delete_template)
app.run(debug=True)
| abhi1083/simple_crud_ops | main.py | main.py | py | 6,831 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "dotenv.load_dotenv",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "flask.Flask",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_... |
45333966266 | from markdown import markdown
from unittest import TestCase
from markdown_vimwiki.extension import VimwikiExtension
class TestExtension(TestCase):
def test_default_config(self):
source = """
Hello World
===========
* [-] rejected
* [ ] done0
* [.] done1
* [o] done2
* [O] done3
* [X] done4
:lorem:ipsum:
""".strip()
expected = """
<h1>Hello World</h1>
<ul>
<li class="rejected"> rejected</li>
<li class="done0"> done0<ul>
<li class="done1"> done1</li>
<li class="done2"> done2</li>
<li class="done3"> done3</li>
<li class="done4"> done4</li>
</ul>
</li>
</ul>
<p><span class="tag">lorem</span> <span class="tag">ipsum</span></p>
""".strip()
html = markdown(source, extensions=[VimwikiExtension()])
self.assertEqual(html, expected)
html = markdown(source, extensions=['markdown_vimwiki'])
self.assertEqual(html, expected)
def test_custom_config(self):
source = """
Hello World
===========
* [i] yip
* [a] yap
* [o] yop
:lorem:ipsum:
""".strip()
expected = """
<h1>Hello World</h1>
<ul>
<li class="yip"> yip</li>
<li class="yap"> yap</li>
<li class="yop"> yop</li>
</ul>
<p><span class="bark">lorem</span> <span class="bark">ipsum</span></p>
""".strip()
html = markdown(source, extensions=[VimwikiExtension(
list_levels='iao',
list_classes=['yip', 'yap', 'yop'],
tag_class='bark')])
self.assertEqual(html, expected)
| makyo/markdown-vimwiki | markdown_vimwiki/tests/test_extension.py | test_extension.py | py | 1,499 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "unittest.TestCase",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "markdown.markdown",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "markdown_vimwiki.extension.VimwikiExtension",
"line_number": 38,
"usage_type": "call"
},
{
"ap... |
13489533801 | import json
import requests
resource = requests.post('http://216.10.245.166/Library/DeleteBook.php',
json = {"ID" : "ashish123227"}, headers={'Content-Type' : 'application/json' }
)
assert resource.status_code == 200 , f'the api failed with an error messages as : {resource.text}'
response_json = json.loads(resource.text)
print(response_json)
assert(response_json['msg']) == 'book is successfully deleted' , 'book is not deleted'
| bhagatashish/APT_Testing | delete_book.py | delete_book.py | py | 462 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "requests.post",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 10,
"usage_type": "call"
}
] |
18777503409 | from flask import Flask
from flask import render_template
from pymongo import MongoClient
import json
from bson import json_util
from bson.json_util import dumps
import random
import numpy as np
import ast
import pandas as pd
from sklearn import preprocessing
from sklearn.cluster import KMeans
from sklearn.metrics.pairwise import pairwise_distances
import matplotlib.pylab as plt
from sklearn.preprocessing import StandardScaler
from scipy.spatial.distance import cdist
from scipy.spatial.distance import pdist,squareform
import collections
from collections import defaultdict
from sklearn.decomposition import PCA
from sklearn.manifold import MDS
from sklearn.metrics.pairwise import pairwise_distances
import math
app = Flask(__name__)
MONGODB_HOST = 'localhost'
MONGODB_PORT = 27017
DBS_NAME = 'crime'
COLLECTION_NAME = 'projects'
FIELDS = {'crmrte': True, 'prbarr': True, 'prbconv': True, 'prbpris': True,'avgsen': True,'density': True,'wcon': True,'wtuc': True,'wtrd': True,'wfir': True,'wser': True,'wmfg': True,'taxpc': True,'pctmin': True,'wfed': True,'wsta': True,'wloc': True,'mix': True,'pctymle': True,'_id': False}
@app.route("/")
def index():
return render_template("index.html")
@app.route("/crime/projects")
def crime_projects():
connection = MongoClient(MONGODB_HOST, MONGODB_PORT)
collection = connection[DBS_NAME][COLLECTION_NAME]
projects = collection.find(projection=FIELDS)
json_projects = []
for project in projects:
json_projects.append(project)
json_projects = json.dumps(json_projects, default=json_util.default)
connection.close()
return json_projects
proj_details=crime_projects();
crime_data = pd.read_json(proj_details)
# testarray = ast.literal_eval(proj_details)
clusterObj= crime_data[['crmrte','prbarr','prbconv','prbpris','avgsen',
'density','wcon','wtuc','wtrd','wfir','wser','wmfg','taxpc','pctmin','wfed','wsta','wloc','mix','pctymle']]
clustervar=clusterObj.copy()
# clustervar['county']= preprocessing.scale(clustervar['county'].astype('float64'))
# clustervar['year']= preprocessing.scale(clustervar['year'].astype('float64'))
clustervar['crmrte']= preprocessing.scale(clustervar['crmrte'].astype('float64'))
clustervar['prbarr']= preprocessing.scale(clustervar['prbarr'].astype('float64'))
clustervar['prbconv']= preprocessing.scale(clustervar['prbconv'].astype('float64'))
clustervar['prbpris']= preprocessing.scale(clustervar['prbpris'].astype('float64'))
clustervar['avgsen']= preprocessing.scale(clustervar['avgsen'].astype('float64'))
clustervar['density']= preprocessing.scale(clustervar['density'].astype('float64'))
clustervar['wcon']= preprocessing.scale(clustervar['wcon'].astype('float64'))
clustervar['wtuc']= preprocessing.scale(clustervar['wtuc'].astype('float64'))
clustervar['wtrd']= preprocessing.scale(clustervar['wtrd'].astype('float64'))
clustervar['wfir']= preprocessing.scale(clustervar['wfir'].astype('float64'))
clustervar['wser']= preprocessing.scale(clustervar['wser'].astype('float64'))
clustervar['wmfg']= preprocessing.scale(clustervar['wmfg'].astype('float64'))
clustervar['taxpc']= preprocessing.scale(clustervar['taxpc'].astype('float64'))
clustervar['pctmin']= preprocessing.scale(clustervar['pctmin'].astype('float64'))
clustervar['wfed']= preprocessing.scale(clustervar['wfed'].astype('float64'))
clustervar['wsta']= preprocessing.scale(clustervar['wsta'].astype('float64'))
clustervar['wloc']= preprocessing.scale(clustervar['wloc'].astype('float64'))
clustervar['mix']= preprocessing.scale(clustervar['mix'].astype('float64'))
clustervar['pctymle']= preprocessing.scale(clustervar['pctymle'].astype('float64'))
clus_train = clustervar
def findSuitableK():
clusters=range(1,9)
meandist=[]
for k in clusters:
model=KMeans(n_clusters=k)
model.fit(clus_train)
clusassign=model.predict(clus_train)
meandist.append(sum(np.min(cdist(clus_train, model.cluster_centers_, 'euclidean'), axis=1))
/ clus_train.shape[0])
plt.plot(clusters, meandist)
plt.xlabel('Number of clusters')
plt.ylabel('Average distance')
plt.title('Selecting k with the Elbow Method') # pick the fewest number of clusters that reduces the average distance
plt.show()
findSuitableK()
def createClusters():
model=KMeans(n_clusters=3)
model.fit(clus_train)
clusassign=model.predict(clus_train)
lables = model.labels_
return lables
lables=createClusters()
def groupClusters():
my_dict = {}
for (ind,elem) in enumerate(lables):
if elem in my_dict:
my_dict[elem].append(ind)
else:
my_dict.update({elem:[ind]})
return my_dict
cluster_dict=groupClusters()
def sampleClusters():
cluster_sample={}
df = pd.DataFrame()
# # df = pd.DataFrame(index=range(0,13),columns=['county','year','crmrte','prbarr','prbconv','prbpris','avgsen',
# 'density','wcon','wfir','wser','wmfg'], dtype='float64')
# df= pd.DataFrame([['county','year','crmrte','prbarr','prbconv','prbpris','avgsen',
# 'density','wcon','wfir','wser','wmfg']])
for i in range(0,3):
length = len(cluster_dict[i])
cluster_sample[i]=random.sample(cluster_dict[i],length//3)
for k in cluster_sample[i]:
df=df.append(clus_train.iloc[[k]],ignore_index=True)
# df.iloc[[count]] = clus_train.iloc[[k]]
return df
def randomSample():
newClusterTrain= clustervar.sample(n=len(clus_train)//3)
return newClusterTrain
randomSampledClusterFrame=randomSample()
sampled_dataFrame=sampleClusters()
pca = PCA(n_components=19)
pca.fit(sampled_dataFrame)
loadings=pca.components_
def pcaRandomSample():
r_pca = PCA(n_components=19)
r_pca.fit_transform(randomSampledClusterFrame)
r_loadings=r_pca.components_
return r_pca,r_loadings
random_pca,Random_loadings =pcaRandomSample()
def screeplot(pca, standardised_values):
y = pca.explained_variance_
x = np.arange(len(y)) + 1
plt.plot(x, y, "o-")
plt.xticks(x, ["PC"+str(i) for i in x], rotation=60)
plt.ylabel("Variance")
plt.show()
return np.array(y),np.array(x)
y,x =screeplot(pca, sampled_dataFrame)
y_random,x_random =screeplot(random_pca, Random_loadings)
@app.route("/crime/screeplot")
def showScreeplot():
return render_template("screeplot.html",y=y.tolist(),x=x.tolist())
@app.route("/crime/randomscreeplot")
def showScreeplot_random():
return render_template("randomScreePlot.html",y=y_random.tolist(),x=x_random.tolist())
def squaredLoadings():
w, h = 3, 19;
squaredLoadings = [0 for y in range(h)]
for i in range(len(loadings)):
sum=0
for j in range(3):
sum = sum + loadings[j][i] **2
squaredLoadings[i]=sum
return squaredLoadings
sumSquareLoadings=squaredLoadings()
@app.route("/crime/squaredLoadings")
def showSqureloadingsPlot():
sortedSumSquareLoadings=sorted(sumSquareLoadings,reverse=True)
length= len(sortedSumSquareLoadings)
columns=[0 for y in range(length)]
index=0
for i in sortedSumSquareLoadings:
columns[index]=clus_train.columns.values[sumSquareLoadings.index(i)]
index =index+1
return render_template("squaredloadings.html",y=sortedSumSquareLoadings,x=json.dumps(columns))
def randomSquaredLoadings():
w, h = 3, 21;
squaredLoadings = [0 for y in range(h)]
for i in range(len(Random_loadings)):
sum=0
for j in range(3):
sum = sum + loadings[j][i] **2
squaredLoadings[i]=sum
return squaredLoadings
randomsumSquareLoadings=randomSquaredLoadings()
@app.route("/crime/randomsquaredLoadings")
def showRandomSqureloadingsPlot():
sortedSumSquareLoadings=sorted(randomsumSquareLoadings,reverse=True)
length= len(sortedSumSquareLoadings)
columns=[0 for y in range(length)]
index=0
for i in sortedSumSquareLoadings:
columns[index]=clus_train.columns.values[randomsumSquareLoadings.index(i)]
index =index+1
return render_template("randomSquaredloadings.html",y=sortedSumSquareLoadings,x=json.dumps(columns))
@app.route("/crime/scatterMatrix")
def getColumnData():
sortedSumSquareLoadings=sorted(sumSquareLoadings,reverse=True)
columns=[0 for y in range(3)]
columnsVals={}
index=0
for i in sortedSumSquareLoadings:
columns[index]=clus_train.columns.values[sumSquareLoadings.index(i)]
index =index+1
if index==3:
break
for i in range(3):
columnsVals.update({columns[i]:sampled_dataFrame.loc[:,columns[i]].tolist()})
return render_template("scatterMatrix.html",dataVal=columnsVals,traits=columns)
@app.route("/crime/randomScatterMatrix")
def getRandomColumnData():
sortedSumSquareLoadings=sorted(randomsumSquareLoadings,reverse=True)
columns=[0 for y in range(3)]
# columnsVals=[0 for y in range(3)]
columnsVals={}
index=0
for i in sortedSumSquareLoadings:
columns[index]=clus_train.columns.values[randomsumSquareLoadings.index(i)]
index =index+1
if index==3:
break
for i in range(3):
columnsVals.update({columns[i]:randomSampledClusterFrame.loc[:,columns[i]].tolist()})
return render_template("scatterMatrix.html",dataVal=columnsVals,traits=columns)
def MDS_DimReduction():
mdsData = MDS(n_components=2,dissimilarity='euclidean')
mdsData.fit(sampled_dataFrame)
return mdsData.embedding_
def MDS_RandomDimReduction():
mdsData = MDS(n_components=2,dissimilarity='euclidean')
mdsData.fit(randomSampledClusterFrame)
return mdsData.embedding_
def PCA_TopComp():
pca = PCA(n_components=2)
return pca.fit_transform(sampled_dataFrame)
def PCA_RandomTopComp():
pca = PCA(n_components=2)
return pca.fit_transform(randomSampledClusterFrame)
top_PCAVal=PCA_TopComp()
top_RandomPCAVal=PCA_RandomTopComp()
@app.route("/crime/scatterPlot")
def PCA_ScatterPlot():
return render_template("scatterPlot.html",dataVal=top_PCAVal.tolist())
@app.route("/crime/randomscatterPlot")
def PCA_RandomScatterPlot():
return render_template("randomScatterPlot.html",dataVal=top_RandomPCAVal.tolist())
def MDS_DimReduction_Correlation():
mdsData = MDS(n_components=2,dissimilarity='precomputed',max_iter=10)
precompute=pairwise_distances(sampled_dataFrame.values,metric='correlation')
return mdsData.fit_transform(precompute)
def MDS_Random_DimReduction_Correlation():
mdsData = MDS(n_components=2,dissimilarity='precomputed',max_iter=10)
precompute=pairwise_distances(randomSampledClusterFrame.values,metric='correlation')
return mdsData.fit_transform(precompute)
@app.route("/crime/MDSscatterPlot")
def MDS_ScatterPlot():
return render_template("ScatterPlotMDS.html",dataVal=mds_embeddings.tolist())
@app.route("/crime/MDSCorrelationscatterPlot")
def MDS_ScatterPlot_Correlation():
return render_template("ScatterPlotMDS.html",dataVal=mds_embeddings_correlation.tolist())
@app.route("/crime/MDSRandomscatterPlot")
def MDS_RandomScatterPlot():
return render_template("RandomScatterPlotMDS.html",dataVal=mds_RandomEmbeddings.tolist())
@app.route("/crime/MDSRandomCorrelationscatterPlot")
def MDS_RandomScatterPlot_Correlation():
return render_template("RandomScatterPlotMDS.html",dataVal=mds_Random_embeddings_correlation.tolist())
mds_embeddings=MDS_DimReduction()
mds_embeddings_correlation=MDS_DimReduction_Correlation()
mds_RandomEmbeddings=MDS_RandomDimReduction()
mds_Random_embeddings_correlation=MDS_Random_DimReduction_Correlation()
if __name__ == "__main__":
app.run(host='0.0.0.0',port=5005,debug=True) | rbadri91/N.C.-Crime-Data-Visualization | app.py | app.py | py | 11,214 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "flask.Flask",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "pymongo.MongoClient",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "json.dumps",
... |
20426981888 | """
All rights reserved.
--Yang Song (songyangmri@gmail.com)
--2021/1/7
"""
import os
import pickle
import random
from abc import abstractmethod
from lifelines import CoxPHFitter, AalenAdditiveFitter
from lifelines.utils.printer import Printer
from lifelines import utils
from SA.Utility import mylog
from SA.DataContainer import DataContainer
class BaseFitter(object):
def __init__(self, fitter=None, name=None):
self.fitter = fitter
self.name = name
def Fit(self, dc: DataContainer):
self.fitter.fit(dc.df, duration_col=dc.duration_name, event_col=dc.event_name)
def Save(self, store_folder):
with open(os.path.join(store_folder, 'model.pkl'), 'wb') as f:
pickle.dump(self.fitter, f)
def Load(self, store_folder):
with open(os.path.join(store_folder, 'model.pkl'), 'rb') as f:
self.fitter = pickle.load(f)
def Plot(self):
self.fitter.plot()
def Summary(self):
import io
from contextlib import redirect_stdout
f = io.StringIO()
with redirect_stdout(f):
self.fitter.print_summary()
out = f.getvalue()
return out
class CoxPH(BaseFitter):
def __init__(self):
random.seed(0)
super(CoxPH, self).__init__(CoxPHFitter(), self.__class__.__name__)
def Fit(self, dc: DataContainer):
self.fitter.fit(dc.df, duration_col=dc.duration_name, event_col=dc.event_name)
class AalenAdditive(BaseFitter):
def __init__(self):
super(AalenAdditive, self).__init__(AalenAdditiveFitter(), self.__class__.__name__)
#
# class Weibull(BaseFitter):
# def __init__(self):
# super(Weibull, self).__init__(WeibullAFTFitter(), self.__class__.__name__)
if __name__ == '__main__':
import numpy as np
model = CoxPH()
print(model.name)
# model = AalenAdditive()
# print(model.name)
train_dc = DataContainer()
train_dc.Load(r'..\..\Demo\train.csv', event_name='status', duration_name='time')
model.Fit(train_dc)
result = model.Summary()
print(result)
# model.Save(r'..\..\Demo')
#
# model_new = AalenAdditive()
# model_new.Load(r'..\..\Demo')
# model_new.Summary()
| salan668/FAE | SA/Fitter.py | Fitter.py | py | 2,221 | python | en | code | 121 | github-code | 6 | [
{
"api_name": "SA.DataContainer.DataContainer",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "pickle.dump"... |
5005373290 | from typing import Any, Iterable, MutableMapping
from typing_extensions import TypeAlias
from .. import etree
from .._types import Unused, _AnyStr, _ElemClsLookupArg, _FileReadSource
from ._element import HtmlElement
_HtmlElemParser: TypeAlias = etree._parser._DefEtreeParsers[HtmlElement]
#
# Parser
#
# Stub version before March 2023 used to omit 'target' parameter, which
# would nullify default HTML element lookup behavior, degenerating html
# submodule parsers into etree ones. Since it is decided to not support
# custom target parser for now, we just add back 'target' parameter for
# coherence. Same for XHTMLParser below.
class HTMLParser(etree.HTMLParser[HtmlElement]):
"""An HTML parser configured to return ``lxml.html`` Element
objects.
Notes
-----
This subclass is not specialized, unlike the ``etree`` counterpart.
They are designed to always handle ``HtmlElement``;
for generating other kinds of ``_Elements``, one should use
etree parsers with ``set_element_class_lookup()`` method instead.
In that case, see ``_FeedParser.set_element_class_lookup()`` for more info.
"""
def __init__(
self,
*,
encoding: _AnyStr | None = ...,
remove_blank_text: bool = ...,
remove_comments: bool = ...,
remove_pis: bool = ...,
strip_cdata: bool = ...,
no_network: bool = ...,
target: etree.ParserTarget[Any] | None = ...,
schema: etree.XMLSchema | None = ...,
recover: bool = ...,
compact: bool = ...,
default_doctype: bool = ...,
collect_ids: bool = ...,
huge_tree: bool = ...,
) -> None: ...
@property
def target(self) -> None: ...
class XHTMLParser(etree.XMLParser[HtmlElement]):
"""An XML parser configured to return ``lxml.html`` Element
objects.
Notes
-----
This subclass is not specialized, unlike the ``etree`` counterpart.
They are designed to always handle ``HtmlElement``;
for generating other kinds of ``_Elements``, one should use
etree parsers with ``set_element_class_lookup()`` method instead.
In that case, see ``_FeedParser.set_element_class_lookup()`` for more info.
Original doc
------------
Note that this parser is not really XHTML aware unless you let it
load a DTD that declares the HTML entities. To do this, make sure
you have the XHTML DTDs installed in your catalogs, and create the
parser like this::
>>> parser = XHTMLParser(load_dtd=True)
If you additionally want to validate the document, use this::
>>> parser = XHTMLParser(dtd_validation=True)
For catalog support, see http://www.xmlsoft.org/catalog.html.
"""
def __init__(
self,
*,
encoding: _AnyStr | None = ...,
attribute_defaults: bool = ...,
dtd_validation: bool = ...,
load_dtd: bool = ...,
no_network: bool = ...,
target: etree.ParserTarget[Any] | None = ...,
ns_clean: bool = ...,
recover: bool = ...,
schema: etree.XMLSchema | None = ...,
huge_tree: bool = ...,
remove_blank_text: bool = ...,
resolve_entities: bool = ...,
remove_comments: bool = ...,
remove_pis: bool = ...,
strip_cdata: bool = ...,
collect_ids: bool = ...,
compact: bool = ...,
) -> None: ...
@property
def target(self) -> None: ...
html_parser: HTMLParser
xhtml_parser: XHTMLParser
#
# Parsing funcs
#
# Calls etree.fromstring(html, parser, **kw) which has signature
# fromstring(text, parser, *, base_url)
def document_fromstring(
html: _AnyStr,
parser: _HtmlElemParser | None = ...,
ensure_head_body: bool = ...,
*,
base_url: str | None = ...,
) -> HtmlElement: ...
def fragments_fromstring(
html: _AnyStr,
no_leading_text: bool = ...,
base_url: str | None = ...,
parser: _HtmlElemParser | None = ...,
**kw: Unused,
) -> list[HtmlElement]: ...
def fragment_fromstring(
html: _AnyStr,
create_parent: bool = ...,
base_url: str | None = ...,
parser: _HtmlElemParser | None = ...,
**kw: Unused,
) -> HtmlElement: ...
def fromstring(
html: _AnyStr,
base_url: str | None = ...,
parser: _HtmlElemParser | None = ...,
**kw: Unused,
) -> HtmlElement: ...
def parse(
filename_or_url: _FileReadSource,
parser: _HtmlElemParser | None = ...,
base_url: str | None = ...,
**kw: Unused,
) -> etree._ElementTree[HtmlElement]: ...
#
# Element Lookup
#
class HtmlElementClassLookup(etree.CustomElementClassLookup):
def __init__(
self,
# Should have been something like Mapping[str, type[HtmlElement]],
# but unfortunately classes mapping is required to be mutable
classes: MutableMapping[str, Any] | None = ...,
# docstring says mixins is mapping, but implementation says otherwise
mixins: Iterable[tuple[str, type[HtmlElement]]] = ...,
) -> None: ...
def lookup(
self,
node_type: _ElemClsLookupArg | None,
document: Unused,
namespace: Unused,
name: str, # type: ignore[override]
) -> type[HtmlElement] | None: ...
| abelcheung/types-lxml | lxml-stubs/html/_parse.pyi | _parse.pyi | pyi | 5,211 | python | en | code | 23 | github-code | 6 | [
{
"api_name": "typing_extensions.TypeAlias",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "_element.HtmlElement",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "_element.HtmlElement",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "_... |
30366690531 | """
Example of how to use a DataView and bare renderers to create plots
"""
from numpy import linspace, sin, cos
# Enthought library imports.
from chaco.api import (
DataView,
ArrayDataSource,
ScatterPlot,
LinePlot,
LinearMapper,
)
from chaco.tools.api import PanTool, ZoomTool
from enable.api import Component, ComponentEditor
from traits.api import HasTraits, Instance
from traitsui.api import UItem, View
class PlotExample(HasTraits):
plot = Instance(Component)
traits_view = View(
UItem("plot", editor=ComponentEditor()),
width=700,
height=600,
resizable=True,
title="Dataview + renderer example",
)
def _plot_default(self):
x = linspace(-5, 10, 500)
y = sin(x)
y2 = 0.5 * cos(2 * x)
view = DataView(border_visible=True)
scatter = ScatterPlot(
index=ArrayDataSource(x),
value=ArrayDataSource(y),
marker="square",
color="red",
outline_color="transparent",
index_mapper=LinearMapper(range=view.index_range),
value_mapper=LinearMapper(range=view.value_range),
)
line = LinePlot(
index=scatter.index,
value=ArrayDataSource(y2),
color="blue",
index_mapper=LinearMapper(range=view.index_range),
value_mapper=LinearMapper(range=view.value_range),
)
# Add the plot's index and value datasources to the dataview's
# ranges so that it can auto-scale and fit appropriately
view.index_range.sources.append(scatter.index)
view.value_range.sources.append(scatter.value)
view.value_range.sources.append(line.value)
# Add the renderers to the dataview. The z-order is determined
# by the order in which renderers are added.
view.add(scatter)
view.add(line)
view.tools.append(PanTool(view))
view.overlays.append(ZoomTool(view))
return view
demo = PlotExample()
if __name__ == "__main__":
demo.configure_traits()
| enthought/chaco | chaco/examples/demo/data_view.py | data_view.py | py | 2,098 | python | en | code | 286 | github-code | 6 | [
{
"api_name": "traits.api.HasTraits",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "traits.api.Instance",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "enable.api.Component",
"line_number": 22,
"usage_type": "argument"
},
{
"api_name": "tra... |
40194290799 | import tweepy
import time
print('Starting bot....')
CONSUMER_KEY = "Cqw4pXPk4lz2EEUieSDKjKuQT"
CONSUMER_SECRET = "AhQZvxkBNS2bmXdUOX8tu5SoZi9vYdNimwmTuzkE9ZJJuzTEk5"
ACCES_KEY = "1323551878483865600-LVgJ1466OXyOnZqKNt4H3k0hBBQlmO"
ACCES_SECRET = "yWdPUmakm5Cn4eMURajaZkNkbeaXgLhzvD7msCsB5Ipxw"
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCES_KEY, ACCES_SECRET)
api = tweepy.API(auth)
FILE_NAME = 'last_seen.txt'
def retrieve_last_seen_id(file_name):
f_read = open(file_name, 'r')
last_seen = int(f_read.read().strip())
f_read.close()
return last_seen
def store_last_seen_id(last_seen, file_name):
f_write = open(file_name, 'w')
f_write.write(str(last_seen))
f_write.close()
return
def reply_back():
print('retrieving and reply to tweets....')
last_seen = retrieve_last_seen_id(FILE_NAME)
mentions = api.mentions_timeline(last_seen, tweet_mode = 'extended')
for mention in reversed(mentions):
print(str(mention.id) + '--' + mention.full_text)
last_seen = mention.id
store_last_seen_id(last_seen, FILE_NAME)
if '#hello' in mention.full_text.lower():
print('found #hello!')
print('Responding back...')
api.update_status('@' + mention.user.screen_name + '#hello back to you!', mention.id)
while True:
reply_back()
time.sleep(15) | byte-exe/bot-reply-back_tweets | twt_bot.py | twt_bot.py | py | 1,355 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "tweepy.OAuthHandler",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "tweepy.API",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 45,
"usage_type": "call"
}
] |
21141343602 | import asyncio
import math
import sys
from collections import Counter, defaultdict
from pprint import pprint
import aiohttp
import async_timeout
import click
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from pyecharts import Bar as Line
from pyecharts import Overlap
from lucky.commands import util
URL = '/k/min_max_counter/{code}?resample={resample}&window_size={window_size}'
async def process(resample='1w', window_size=7*52, min_timetomarket=None, test=False, where='ALL'):
rsts = await util.fetch(URL, resample=resample, window_size=window_size,
min_timetomarket=min_timetomarket, test=test, where=where)
c = defaultdict(Counter)
for rst in rsts:
for key, value in rst['result'].items():
c[key].update(**value)
df = None
for key, value in c.items():
tmp = pd.DataFrame.from_dict(value, 'index', columns=[key])
if df is None:
df = tmp
else:
df = pd.concat([df, tmp], axis=1, sort=True)
df.index = pd.DatetimeIndex(df.index)
df = df.sort_index()
ds = pd.date_range(min(df.index), max(df.index), freq=resample)
df = df.reindex(ds,
copy=False, fill_value=0)
# print(df)
# x = df.plot()
# plt.show()
df = df.fillna(value=0)
line1 = Line()
line1.add('is_rolling_max', df.index, df['is_rolling_max'])
line2 = Line()
line2.add('is_rolling_min', df.index, df['is_rolling_min'])
overlap = Overlap(
)
overlap.add(line1)
overlap.add(line2) # , yaxis_index=1, is_add_yaxis=True
util.render(overlap, path="render.html",)
line1 = Line()
line1.add('ismax', df.index, df['ismax'])
line2 = Line()
line2.add('ismin', df.index, df['ismin'])
overlap = Overlap(
)
overlap.add(line1)
overlap.add(line2)
util.render(overlap, path="render2.html",)
# overlap.render(path="render2.html",)
for c in df.columns:
df[c] = pd.to_numeric(df[c])
df = df.resample('1m').sum()
market_size = await util.get_marketsize(where=where)
market_size = pd.DataFrame.from_dict(market_size)
market_size.index = pd.DatetimeIndex(market_size.index)
df['marketsize'] = market_size
df['ismin'] = df['ismin'] / df['marketsize']
df['ismax'] = df['ismax'] / df['marketsize']
line1 = Line()
line1.add('ismax', df.index, df['ismax'])
line2 = Line()
line2.add('ismin', df.index, df['ismin'])
overlap = Overlap(
)
overlap.add(line1)
overlap.add(line2)
util.render(overlap, path="render3.html",)
return df
@click.command()
@click.option('--resample', default='1d', help='用于减少结果集,类似于周线,月线')
@click.option('--window_size', default=7*52, help='窗口')
@click.option('--min_timetomarket', default=20180101, help='用于去除近期上市的股票')
@click.option('--where', default='ALL', help='市场')
@click.option('--test', default=False, help='是否启用test, 只处理少量code')
def main(resample, window_size, min_timetomarket, where, test):
"""
破新高和新低的股票数
"""
print('='*50)
print('破新高和新低的股票数')
import time
b = time.time()
loop = asyncio.get_event_loop()
loop.run_until_complete(
process(resample, window_size, min_timetomarket, test=True, where=where)
)
e = time.time()
print(e-b)
if __name__ == '__main__':
main()
| onecans/my | mystockservice/lucky/commands/min_max_counter.py | min_max_counter.py | py | 3,486 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "lucky.commands.util.fetch",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "lucky.commands.util",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "collections.defaultdict",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": ... |
43078051498 | import time
from datadog import initialize
from datadog import api as dogapi
from datadog.dogstatsd.base import DogStatsd
from datadog.dogstatsd.context import TimedContextManagerDecorator
from flask import g, request
class TimerWrapper(TimedContextManagerDecorator):
def __init__(self, statsd, *args, **kwargs):
super(TimerWrapper, self).__init__(statsd, *args, **kwargs)
def start(self):
self.__enter__()
def stop(self):
self.__exit__(None, None, None)
class StatsD(object):
def __init__(self, app=None, config=None):
"""
Constructor for `flask.ext.datadog.StatsD`
>>> from flask.ext.datadog import StatsD
>>> app = Flask(__name__)
>>> statsd = StatsD(app=app)
:param app: Flask app to configure this client for, if `app` is `None`, then do not
configure yet (call `init_app` manually instead)
:type app: flask.Flask or None
:param config: Configuration for this client to use instead of `app.config`
:type config: dict or None
"""
self.config = config
self.statsd = None
# If an app was provided, then call `init_app` for them
if app is not None:
self.init_app(app)
else:
self.app = None
def init_app(self, app, config=None):
"""
Initialize Datadog DogStatsd client from Flask app
>>> from flask.ext.datadog import StatsD
>>> app = Flask(__name__)
>>> statsd = StatsD()
>>> statsd.init_app(app=app)
Available DogStatsd config settings:
STATSD_HOST - statsd host to send metrics to (default: 'localhost')
STATSD_MAX_BUFFER_SIZE - max number of metrics to buffer before sending, only used when batching (default: 50)
STATSD_NAMESPACE - metric name prefix to use, e.g. 'app_name' (default: None)
STATSD_PORT - statsd port to send metrics to (default: 8125)
STATSD_TAGS - list of tags to include by default, e.g. ['env:prod'] (default: None)
STATSD_USEMS - whether or not to report timing in milliseconds (default: False)
Available Flask-Datadog config settings:
DATADOG_CONFIGURE_MIDDLEWARE - whether or not to setup response timing middleware (default: True)
DATADOG_RESPONSE_METRIC_NAME - the name of the response time metric (default: 'flask.response.time')
DATADOG_RESPONSE_SIZE_METRIC_NAME - the name of the response time metric (default: 'flask.response.size')
DATADOG_RESPONSE_SAMPLE_RATE - the sample rate to use for response timing middleware (default: 1)
DATADOG_RESPONSE_AUTO_TAG - whether to auto-add request/response tags to response metrics (default: True)
DATADOG_RESPONSE_ENDPOINT_TAG_NAME - tag name to use for request endpoint tag name (default: 'endpoint')
DATADOG_RESPONSE_METHOD_TAG_NAME - tag name to use for the request method tag name (default: 'method')
:param app: Flask app to configure this client for
:type app: flask.Flask
:param config: optional, dictionary of config values (defaults to `app.config`)
:type config: dict
"""
# Used passed in config if provided, otherwise use the config from `app`
if config is not None:
self.config = config
elif self.config is None:
self.config = app.config
# Set default values for expected config properties
self.config.setdefault('STATSD_HOST', 'localhost')
self.config.setdefault('STATSD_MAX_BUFFER_SIZE', 50)
self.config.setdefault('STATSD_NAMESPACE', None)
self.config.setdefault('STATSD_PORT', 8125)
self.config.setdefault('STATSD_TAGS', None)
self.config.setdefault('STATSD_USEMS', False)
self.app = app
# Configure DogStatsd client
# https://github.com/DataDog/datadogpy/blob/v0.11.0/datadog/dogstatsd/base.py
self.statsd = DogStatsd(host=self.config['STATSD_HOST'],
port=self.config['STATSD_PORT'],
max_buffer_size=self.config['STATSD_MAX_BUFFER_SIZE'],
namespace=self.config['STATSD_NAMESPACE'],
constant_tags=self.config['STATSD_TAGS'],
use_ms=self.config['STATSD_USEMS'])
# Configure any of our middleware
self.setup_middleware()
def timer(self, *args, **kwargs):
"""Helper to get a `flask_datadog.TimerWrapper` for this `DogStatsd` client"""
return TimerWrapper(self.statsd, *args, **kwargs)
def incr(self, *args, **kwargs):
"""Helper to expose `self.statsd.increment` under a shorter name"""
return self.statsd.increment(*args, **kwargs)
def decr(self, *args, **kwargs):
"""Helper to expose `self.statsd.decrement` under a shorter name"""
return self.statsd.decrement(*args, **kwargs)
def setup_middleware(self):
"""Helper to configure/setup any Flask-Datadog middleware"""
# Configure response time middleware (if desired)
self.config.setdefault('DATADOG_CONFIGURE_MIDDLEWARE', True)
self.config.setdefault('DATADOG_RESPONSE_SIZE_METRIC_NAME', 'flask.response.size')
self.config.setdefault('DATADOG_RESPONSE_METRIC_NAME', 'flask.response.time')
self.config.setdefault('DATADOG_RESPONSE_SAMPLE_RATE', 1)
self.config.setdefault('DATADOG_RESPONSE_AUTO_TAG', True)
self.config.setdefault('DATADOG_RESPONSE_ENDPOINT_TAG_NAME', 'endpoint')
self.config.setdefault('DATADOG_RESPONSE_METHOD_TAG_NAME', 'method')
if self.config['DATADOG_CONFIGURE_MIDDLEWARE']:
self.app.before_request(self.before_request)
self.app.after_request(self.after_request)
def before_request(self):
"""
Flask-Datadog middleware handle for before each request
"""
# Set the request start time
g.flask_datadog_start_time = time.time()
g.flask_datadog_request_tags = []
# Add some default request tags
if self.config['DATADOG_RESPONSE_AUTO_TAG']:
self.add_request_tags([
# Endpoint tag
'{tag_name}:{endpoint}'.format(tag_name=self.config['DATADOG_RESPONSE_ENDPOINT_TAG_NAME'],
endpoint=str(request.endpoint).lower()),
# Method tag
'{tag_name}:{method}'.format(tag_name=self.config['DATADOG_RESPONSE_METHOD_TAG_NAME'],
method=request.method.lower()),
])
def after_request(self, response):
"""
Flask-Datadog middleware handler for after each request
:param response: the response to be sent to the client
:type response: ``flask.Response``
:rtype: ``flask.Response``
"""
# Return early if we don't have the start time
if not hasattr(g, 'flask_datadog_start_time'):
return response
# Get the response time for this request
elapsed = time.time() - g.flask_datadog_start_time
# Convert the elapsed time to milliseconds if they want them
if self.use_ms:
elapsed = int(round(1000 * elapsed))
# Add some additional response tags
if self.config['DATADOG_RESPONSE_AUTO_TAG']:
self.add_request_tags(['status_code:%s' % (response.status_code, )])
tags = self.get_request_tags()
sample_rate = self.config['DATADOG_RESPONSE_SAMPLE_RATE']
# Emit timing metric
self.statsd.timing(self.config['DATADOG_RESPONSE_METRIC_NAME'],
elapsed,
tags,
sample_rate)
# Emit response size metric
if 'content-length' in response.headers:
size = int(response.headers['content-length'])
self.statsd.histogram(self.config['DATADOG_RESPONSE_SIZE_METRIC_NAME'],
size,
tags,
sample_rate)
# We ALWAYS have to return the original response
return response
def get_request_tags(self):
"""
Get the current list of tags set for this request
:rtype: list
"""
return getattr(g, 'flask_datadog_request_tags', [])
def add_request_tags(self, tags):
"""
Add the provided list of tags to the tags stored for this request
:param tags: tags to add to this requests tags
:type tags: list
:rtype: list
"""
# Get the current list of tags to append to
# DEV: We use this method since ``self.get_request_tags`` will ensure that we get a list back
current_tags = self.get_request_tags()
# Append our new tags, and return the new full list of tags for this request
g.flask_datadog_request_tags = current_tags + tags
return g.flask_datadog_request_tags
def __getattr__(self, name):
"""
Magic method for fetching any underlying attributes from `self.statsd`
We utilize `__getattr__` to ensure that we are always compatible with
the `DogStatsd` client.
"""
# If `self.statsd` has the attribute then return that attribute
if self.statsd and hasattr(self.statsd, name):
return getattr(self.statsd, name)
raise AttributeError('\'StatsD\' has has attribute \'{name}\''.format(name=name))
def __enter__(self):
"""
Helper to expose the underlying `DogStatsd` client for context managing
>>> statsd = StatsD(app=app)
>>> # Batch any metrics within the `with` block
>>> with statsd:
>>> statsd.increment('metric')
"""
return self.statsd.__enter__()
def __exit__(self, *args, **kwargs):
"""Helper to expose the underlying `DogStatsd` client for context managing"""
return self.statsd.__exit__(*args, **kwargs)
class API(object):
def __init__(self, app=None, config=None):
"""
Constructor for `flask.ext.datadog.API`
>>> from flask.ext.datadog import API
>>> app = Flask(__name__)
>>> dogapi = API(app=app)
:param app: Flask app to configure this client for, if `app` is `None`, then do not
configure yet (call `init_app` manually instead)
:type app: flask.Flask or None
:param config: Configuration for this client to use instead of `app.config`
:type config: dict or None
"""
self.config = config
# If an app was provided, then call `init_app` for them
if app is not None:
self.init_app(app)
else:
self.app = None
def init_app(self, app, config=None):
"""
Initialize Datadog API client from Flask app
>>> from flask.ext.datadog import API
>>> app = Flask(__name__)
>>> dogapi = API()
>>> dogapi.init_app(app=app)
Available config settings:
DATADOG_API_KEY - Datadog API key from https://app.datadoghq.com/account/settings#api
DATADOG_APP_KEY - Datadog APP key from https://app.datadoghq.com/account/settings#api
:param app: Flask app to configure this client for
:type app: flask.Flask
:param config: optional, dictionary of config values (defaults to `app.config`)
:type config: dict
"""
# Used passed in config if provided, otherwise use the config from `app`
if config is not None:
self.config = config
elif self.config is None:
self.config = app.config
# Set default values for expected config properties
self.config.setdefault('DATADOG_API_KEY', None)
self.config.setdefault('DATADOG_APP_KEY', None)
self.app = app
# Initialize datadog client
# DEV: Datadog client uses module level variables for storing API keys rather than initializing a
# class to manage a connection/and keys
# https://github.com/DataDog/datadogpy/blob/v0.11.0/datadog/__init__.py
# https://github.com/DataDog/datadogpy/blob/v0.11.0/datadog/api/__init__.py#L4-L9
options = {
'api_key': self.config['DATADOG_API_KEY'],
'app_key': self.config['DATADOG_APP_KEY'],
}
initialize(**options)
def __getattr__(self, name):
"""
Magic method for fetching attributes from `datadog.api`
We utilize `__getattr__` to ensure that we are always compatible with
the `datadog.api` module.
"""
# If `self.statsd` has the attribute then return that attribute
if dogapi and hasattr(dogapi, name):
return getattr(dogapi, name)
raise AttributeError('\'API\' has has attribute \'{name}\''.format(name=name))
| sky107/python-lab-project-sky | pyprojectbackend/lib/python3.9/site-packages/flask_datadog.py | flask_datadog.py | py | 13,019 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "datadog.dogstatsd.context.TimedContextManagerDecorator",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "datadog.dogstatsd.base.DogStatsd",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "flask.g.flask_datadog_start_time",
"line_number": 139... |
33626128629 | import jsonlines
import os
from pathlib import Path
from xml_handler import XmlHandler
from google_cloud_storage_client import GoogleCloudStorageClient
def main(event, context):
# Retrieve file from GCS
input_filename = event.get("name")
input_bucket_name = event.get("bucket")
output_bucket_name = os.environ.get("OUTPUT_BUCKET_NAME")
gcs_client = GoogleCloudStorageClient()
local_filename = gcs_client.download_file_from_gcs(bucket_name=input_bucket_name, source_blob=input_filename)
# Read file and parse to List[dict]
xml_content_dict = XmlHandler.read_xml_file(file_path=local_filename)
parsed_row_list = XmlHandler.parse_harvest_xml_to_json(content_dict=xml_content_dict)
# Write file to jsonlines
output_filename = Path(input_filename).stem + '.jsonl'
local_output_file = Path("/tmp") / output_filename
with jsonlines.open(local_output_file, mode="w") as writer:
writer.write_all(parsed_row_list)
# Upload file to GCS
gcs_client.upload_file_to_gcs(bucket_name=output_bucket_name, destination_blob=Path(output_filename).name,
source_filename=local_output_file, remove_local_file=True)
| MeneerBunt/MarjolandHarvestData | src/convert_xml_to_json/main.py | main.py | py | 1,202 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.environ.get",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "google_cloud_storage_client.GoogleCloudStorageClient",
"line_number": 15,
"usage_type": "call"
},
{
... |
21202048829 | from kivy.uix.label import Label
from kivy.uix.textinput import TextInput
from kivy.uix.gridlayout import GridLayout
from kivy.uix.scrollview import ScrollView
from app.main import ShowcaseScreen
from app.widgets.heartfelt_hellos_button import HeartfeltHellosButton
from app.widgets.heartfelt_hellos_step_progression_button import HeartfeltHellosStepProgressionButton
from app.data.data_types.idea import Idea
class IdeaCreationScreen(ShowcaseScreen):
scroll_view = None
grid_layout = None
def __init__(self, **kwargs):
super(IdeaCreationScreen, self).__init__(**kwargs)
# NOTE: replace tags and ideas with global list so that it can be editted in this class
self.tags=["books", "movies", "sports"]
self.ideas=[]
#self.progress_grid = GridLayout(spacing='10dp', padding='10dp', cols=5, size_hint_y=None)
self.grid_layout = GridLayout(spacing='10dp', padding='10dp', cols=1, size_hint_y=None)
self.grid_layout.bind(minimum_height=self.grid_layout.setter("height"))
self.scroll_view = ScrollView(do_scroll_y=True)
self.add_widget(self.scroll_view)
self.scroll_view.add_widget(self.grid_layout)
def on_pre_enter(self, *args):
self.stepOne()
def stepOne(self):
self.grid_layout.clear_widgets()
self.grid_layout.add_widget(Label(text="What is your conversation idea?", font_size=24, color=(255,255,255)))
self.grid_layout.add_widget(Label())
# text box
textinput = TextInput(hint_text="Are you still into sports?", font_size=24, size_hint_y=None, multiline=False)
textinput.bind(text=lambda x, y: print("Hi"))
self.grid_layout.add_widget(textinput)
#self.name = textinput.text
# next and back button rendering
next_button = HeartfeltHellosStepProgressionButton(text="next",on_press=lambda x: self.stepTwo(textinput.text))
progress_grid=GridLayout(spacing='10dp', padding='10dp', cols=3, size_hint_y=None)
progress_grid.add_widget(Label())
progress_grid.add_widget(Label())
progress_grid.add_widget(next_button)
self.grid_layout.add_widget(progress_grid)
def stepTwo(self, prompt):
self.grid_layout.clear_widgets()
self.prompt=prompt
self.grid_layout.add_widget(Label(text="Search and select the tag(s)\nthat matches with your idea!", height=50, color=(255,255,255), size_hint_y=None))
# text box
#NOTE TO SELF: add filtering for search bar
textinput = TextInput(hint_text="Search Tag here", height=50, font_size=24, size_hint_y=None)
#textinput.bind(on_text_validate=on_enter(textinput.text))
self.grid_layout.add_widget(textinput)
# NOTE TO SELF: add scroll bar to tags section
#tag_grid=GridLayout(spacing='10dp', padding='10dp', cols=1, size_hint_y=None)
for tag in self.getTags():
tag_button = HeartfeltHellosButton(text=tag, height=50, on_press=lambda x: self.pressTag(x.text), size_hint_y=None)
self.grid_layout.add_widget(tag_button)
#self.grid_layout.add_widget(tag_grid)
# next and back button rendering
create_person_button = HeartfeltHellosStepProgressionButton(text="Create\nIdea", on_press=lambda x: self.createIdea())
back_button = HeartfeltHellosStepProgressionButton(text="back", on_press=lambda x: self.stepOne())
progress_grid=GridLayout(spacing='10dp', padding='10dp', cols=3, size_hint_y=None)
progress_grid.add_widget(back_button)
progress_grid.add_widget(Label())
progress_grid.add_widget(create_person_button)
self.grid_layout.add_widget(progress_grid)
def pressTag(self, name: str):
print("pressed " + str)
if name not in self.tags:
self.tags.append(name)
else:
self.tags.remove(name)
def createIdea(self):
print("pressed create idea")
# NOTE: return to idea screen + add self.idea to global list of ideas as input somehow
self.ideas.append(Idea(self.prompt, self.tags)) #place holder (something like that maybe?)
def on_leave(self, *args):
self.grid_layout.clear_widgets()
def getTags(self) -> list:
return ["sports", "books", "movies"]
| JelindoGames/HeartfeltHellos | app/data/screen_types/deprecated/idea_creation_screen.py | idea_creation_screen.py | py | 4,316 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "app.main.ShowcaseScreen",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "kivy.uix.gridlayout.GridLayout",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "kivy.uix.scrollview.ScrollView",
"line_number": 24,
"usage_type": "call"
},
{
... |
16930544030 |
from __future__ import absolute_import
from .dataset_iter import default_collate, DatasetIter
from .samplers import RandomSampler, SequentialSampler
import torch
import os
import os.path
import warnings
import fnmatch
import math
import numpy as np
try:
import nibabel
except:
warnings.warn('Cant import nibabel.. Cant load brain images')
try:
from PIL import Image
except:
warnings.warn('Cant import PIL.. Cant load PIL images')
IMG_EXTENSIONS = [
'.jpg', '.JPG', '.jpeg', '.JPEG',
'.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',
'.nii.gz', '.npy'
]
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def find_classes(dir):
classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))]
classes.sort()
class_to_idx = {classes[i]: i for i in range(len(classes))}
return classes, class_to_idx
def pil_loader(path):
return Image.open(path).convert('RGB')
def npy_loader(path):
return torch.from_numpy(np.load(path).astype('float32'))
def nifti_loader(path):
return nibabel.load(path)
def make_dataset(directory, class_mode, class_to_idx=None,
input_regex=None, target_regex=None, ):
"""Map a dataset from a root folder"""
if class_mode == 'image':
if not input_regex and not target_regex:
raise ValueError('must give input_regex and target_regex if'+
' class_mode==image')
inputs = []
targets = []
for subdir in sorted(os.listdir(directory)):
d = os.path.join(directory, subdir)
if not os.path.isdir(d):
continue
for root, _, fnames in sorted(os.walk(d)):
for fname in fnames:
if fnmatch.fnmatch(fname, input_regex):
path = os.path.join(root, fname)
inputs.append(path)
if class_mode == 'label':
targets.append(class_to_idx[subdir])
if class_mode == 'image' and fnmatch.fnmatch(fname, target_regex):
path = os.path.join(root, fname)
targets.append(path)
if class_mode is None:
return inputs
else:
return inputs, targets
class Dataset(object):
"""An abstract class representing a Dataset.
All other datasets should subclass it. All subclasses should override
``__len__``, that provides the size of the dataset, and ``__getitem__``,
supporting integer indexing in range from 0 to len(self) exclusive.
"""
def __getitem__(self, index):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
def one_epoch(self):
"""Return an iterator that will loop through all the samples one time"""
return DatasetIter(self)
def __iter__(self):
"""Return an iterator that will loop through all the samples one time"""
return DatasetIter(self)
def __next__(self):
"""Return the next batch in the data. If this batch is the last
batch in the data, the iterator will be reset -- allowing you
to loop through the data ad infinitum
"""
new_batch = next(self._iter)
self.batches_seen += 1
if self.batches_seen % self.nb_batches == 0:
#print('Last Batch of Current Epoch')
self._iter = DatasetIter(self)
return new_batch
next = __next__
class FolderDataset(Dataset):
def __init__(self,
root,
class_mode='label',
input_regex='*',
target_regex=None,
transform=None,
target_transform=None,
co_transform=None,
loader='npy',
batch_size=1,
shuffle=False,
sampler=None,
num_workers=0,
collate_fn=default_collate,
pin_memory=False):
"""Dataset class for loading out-of-memory data.
Arguments
---------
root : string
path to main directory
class_mode : string in `{'label', 'image'}`
type of target sample to look for and return
`label` = return class folder as target
`image` = return another image as target as found by 'target_regex'
NOTE: if class_mode == 'image', you must give an
input and target regex and the input/target images should
be in a folder together with no other images in that folder
input_regex : string (default is any valid image file)
regular expression to find input images
e.g. if all your inputs have the word 'input',
you'd enter something like input_regex='*input*'
target_regex : string (default is Nothing)
regular expression to find target images if class_mode == 'image'
e.g. if all your targets have the word 'segment',
you'd enter somthing like target_regex='*segment*'
transform : torch transform
transform to apply to input sample individually
target_transform : torch transform
transform to apply to target sample individually
loader : string in `{'npy', 'pil', 'nifti'} or function
defines how to load samples from file
if a function is provided, it should take in a file path
as input and return the loaded sample.
Examples
--------
For loading input images and target images (e.g. image and its segmentation):
>>> data = FolderDataset(root=/path/to/main/dir,
class_mode='image', input_regex='*input*',
target_regex='*segment*', loader='pil')
For loading input images with sub-directory as class label:
>>> data = FolderDataset(root=/path/to/main/dir,
class_mode='label', loader='pil')
"""
if loader == 'npy':
loader = npy_loader
elif loader == 'pil':
loader = pil_loader
elif loader == 'nifti':
loader = nifti_loader
root = os.path.expanduser(root)
classes, class_to_idx = find_classes(root)
inputs, targets = make_dataset(root, class_mode,
class_to_idx, input_regex, target_regex)
if len(inputs) == 0:
raise(RuntimeError("Found 0 images in subfolders of: " + root + "\n"
"Supported image extensions are: " + ",".join(IMG_EXTENSIONS)))
self.root = os.path.expanduser(root)
self.inputs = inputs
self.targets = targets
self.classes = classes
self.class_to_idx = class_to_idx
self.transform = transform
self.target_transform = target_transform
self.co_transform = co_transform
self.loader = loader
self.class_mode = class_mode
self.batch_size = batch_size
self.num_workers = num_workers
self.collate_fn = collate_fn
self.pin_memory = pin_memory
if sampler is not None:
self.sampler = sampler
elif shuffle:
self.sampler = RandomSampler(nb_samples=len(self.inputs))
elif not shuffle:
self.sampler = SequentialSampler(nb_samples=len(self.inputs))
if class_mode == 'image':
print('Found %i input images and %i target images' %
(len(self.inputs), len(self.targets)))
elif class_mode == 'label':
print('Found %i input images across %i classes' %
(len(self.inputs), len(self.classes)))
self.batches_seen = 0
self.nb_batches = int(math.ceil(len(self.sampler) / float(self.batch_size)))
self._iter = DatasetIter(self)
def __getitem__(self, index):
# get paths
input_sample = self.inputs[index]
target_sample = self.targets[index]
# load samples into memory
input_sample = self.loader(os.path.join(self.root, input_sample))
if self.class_mode == 'image':
target_sample = self.loader(os.path.join(self.root, target_sample))
# apply transforms
if self.transform is not None:
input_sample = self.transform(input_sample)
if self.target_transform is not None:
target_sample = self.target_transform(target_sample)
if self.co_transform is not None:
input_sample, target_sample = self.co_transform(input_sample, target_sample)
return input_sample, target_sample
def __len__(self):
return len(self.inputs)
class TensorDataset(Dataset):
def __init__(self,
input_tensor,
target_tensor=None,
transform=None,
target_transform=None,
co_transform=None,
batch_size=1,
shuffle=False,
sampler=None,
num_workers=0,
collate_fn=default_collate,
pin_memory=False):
"""Dataset class for loading in-memory data.
Arguments
---------
input_tensor : torch tensor
target_tensor : torch tensor
transform : torch transform
transform to apply to input sample individually
target_transform : torch transform
transform to apply to target sample individually
loader : string in `{'npy', 'pil', 'nifti'} or function
defines how to load samples from file
if a function is provided, it should take in a file path
as input and return the loaded sample.
Examples
--------
For loading input images and target images (e.g. image and its segmentation):
>>> data = FolderDataset(root=/path/to/main/dir,
class_mode='image', input_regex='*input*',
target_regex='*segment*', loader='pil')
For loading input images with sub-directory as class label:
>>> data = FolderDataset(root=/path/to/main/dir,
class_mode='label', loader='pil')
"""
self.inputs = input_tensor
self.targets = target_tensor
if target_tensor is None:
self.has_target = False
else:
self.has_target = True
self.transform = transform
self.target_transform = target_transform
self.co_transform = co_transform
self.batch_size = batch_size
self.num_workers = num_workers
self.collate_fn = collate_fn
self.pin_memory = pin_memory
if sampler is not None:
self.sampler = sampler
else:
if shuffle:
self.sampler = RandomSampler(nb_samples=len(self.inputs))
elif not shuffle:
self.sampler = SequentialSampler(nb_samples=len(self.inputs))
self.batches_seen = 0
self.nb_batches = int(math.ceil(len(self.sampler) / float(self.batch_size)))
self._iter = DatasetIter(self)
def __getitem__(self, index):
"""Return a (transformed) input and target sample from an integer index"""
# get paths
input_sample = self.inputs[index]
if self.has_target:
target_sample = self.targets[index]
# apply transforms
if self.transform is not None:
input_sample = self.transform(input_sample)
if self.has_target and self.target_transform is not None:
target_sample = self.target_transform(target_sample)
if self.has_target and self.co_transform is not None:
input_sample, target_sample = self.co_transform(input_sample, target_sample)
if self.has_target:
return input_sample, target_sample
else:
return input_sample
def __len__(self):
"""Number of samples"""
return self.inputs.size(0)
| huiyi1990/torchsample | torchsample/datasets.py | datasets.py | py | 12,057 | python | en | code | null | github-code | 6 | [
{
"api_name": "warnings.warn",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "warnings.warn",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_numb... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.