seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
32685087909 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# @author: orleven
from lib.core.env import *
import json
from sqlalchemy import and_
from flask import request
from flask import Blueprint
from lib.core.enums import ApiStatus
from lib.core.model import DNSLog
from lib.core.model import WebLog
from lib.hander import db
from lib.hander.basehander import fix_response
from lib.hander.basehander import login_check
mod = Blueprint('api', __name__, url_prefix=f'{PREFIX_URL}/api')
@mod.route('/dnslog/list', methods=['POST', 'GET'])
@login_check
@fix_response
def api_dnslog_list():
"""获取dnslog信息"""
response = {
'data': {
'res': [],
'total': 0,
}
}
page = request.json.get('page', 1)
per_page = request.json.get('per_page', 10)
domain = request.json.get('domain', '')
ip = request.json.get('ip', '')
condition = (1 == 1)
if ip != '':
condition = and_(condition, DNSLog.ip.like('%' + ip + '%'))
if domain != '':
condition = and_(condition, DNSLog.domain.like('%' + domain + '%'))
if per_page == 'all':
for row in db.session.query(DNSLog).filter(condition).all():
response['data']['res'].append(row.to_json())
else:
for row in db.session.query(DNSLog).filter(condition).order_by(
DNSLog.update_time.desc()).paginate(page=page, per_page=per_page).items:
response['data']['res'].append(row.to_json())
response['data']['total'] = db.session.query(DNSLog).filter(condition).count()
return response
@mod.route('/dnslog/detail', methods=['POST', 'GET'])
@login_check
@fix_response
def api_dnslog_detail():
"""获取dnslog信息"""
response = {'data': {'res': []}}
dnslog_id = request.json.get('id', '')
if dnslog_id != '':
dnslog = db.session.query(WebLog).filter(WebLog.id == dnslog_id).first()
if dnslog:
response['data']['res'].append(dnslog.to_json())
response['data']['total'] = 1
return response
@mod.route('/weblog/list', methods=['POST', 'GET'])
@login_check
@fix_response
def api_weblog_list():
"""获取weblog信息"""
response = {
'data': {
'res': [],
'total': 0,
}
}
page = request.json.get('page', 1)
per_page = request.json.get('per_page', 10)
ip = request.json.get('ip', '')
url = request.json.get('url', '')
condition = (1 == 1)
if ip != '':
condition = and_(condition, WebLog.ip.like('%' + ip + '%'))
if url != '':
condition = and_(condition, WebLog.url.like('%' + url + '%'))
if per_page == 'all':
for row in db.session.query(WebLog).filter(condition).all():
response['data']['res'].append(row.to_json())
else:
for row in db.session.query(WebLog).filter(condition).order_by(
WebLog.update_time.desc()).paginate(page=page, per_page=per_page).items:
response['data']['res'].append(row.to_json())
response['data']['total'] = db.session.query(WebLog).filter(condition).count()
return response
@mod.route('/weblog/detail', methods=['POST', 'GET'])
@login_check
@fix_response
def api_weblog_detail():
"""获取weblog信息"""
response = {'data': {'res': []}}
weblog_id = request.json.get('id', '')
if weblog_id != '':
weblog = db.session.query(WebLog).filter(WebLog.id == weblog_id).first()
if weblog:
weblog_dic = {}
request_headers = json.loads(weblog.request_headers)
url_temp = weblog.url[weblog.url.replace('://', '___').index('/'):]
weblog_dic['url'] = weblog.url
weblog_dic['request'] = weblog.method + ' ' + url_temp + ' ' + weblog.request_http_version + '\r\n'
weblog_dic['request'] += '\r\n'.join([key + ': ' + value for key, value in request_headers.items()])
weblog_dic['request'] += '\r\n\r\n'
weblog_dic['request'] += bytes.decode(weblog.request_content)
response['data']['res'].append(weblog_dic)
return response
return ApiStatus.ERROR_IS_NOT_EXIST
| orleven/Celestion | lib/hander/apihander.py | apihander.py | py | 4,136 | python | en | code | 30 | github-code | 36 | [
{
"api_name": "flask.Blueprint",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "flask.request.json.get",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "flask.request.json",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "flask.... |
6964335494 | from flask import Flask, render_template, request, url_for, jsonify
from base64 import b64encode, b64decode
app = Flask(__name__)
img = ""
@app.route('/upload', methods=["POST", "GET"])
def up():
global img
img = request.get_json()
return img["image"]
def main():
app.run()
if __name__ == "__main__":
main()
| leesamu/garffiti | app/dummy_server.py | dummy_server.py | py | 333 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "flask.request.get_json",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "flask.request",
"line_number": 11,
"usage_type": "name"
}
] |
22622252680 | # 인식시킬 사진을 Clova API를 통해 요청을 보내, 인식 결과를 받아온다.
# req(파일) : 파일 데이터 전송
# 1. requests를 통해 Clova API 주소에 요청을 보낸다.
# 2. 응답 받은 json을 파싱하여 원하는 결과를 출력한다.
import requests
import os
from pprint import pprint as pp
naver_id = os.getenv('NAVER_ID')
naver_secret = os.getenv('NAVER_SECRET')
url = "https://openapi.naver.com/v1/vision/celebrity"
headers = {
'X-Naver-Client-Id': naver_id ,
'X-Naver-Client-Secret': naver_secret
}
# 1. 해당하는 image_url에 요청을 보낸다\
image_url = "http://www.kbstve.com/news/photo/201604/681_616_1746.jpg"
image_res = requests.get(image_url, stream=True) # 옵션 아는 법 google : python requests 문서 찾아보기
# print(image_res.raw.read())
# 2. 파일 데이터를 받아 저장해둔다
files = {
'image': open('ho.jpg', 'rb') # open : 파일을 열때 쓰는 함수 = image에 파일을 넣어줌
#'image' : image_res.raw.read()
}
res = requests.post(url, headers=headers, files=files)
result = res.json()
name = result['faces'][0]['celebrity']['value']
percent = round(result['faces'][0]['celebrity']['confidence']*100)
print("닮은 연예인은 {}입니다.\n{}% 확신할 수 있습니다.".format(name,percent))
| jungeunlee95/python-practice | API/NaverApi-Cloud9/workspace/naverapi_clova_face.py | naverapi_clova_face.py | py | 1,331 | python | ko | code | 0 | github-code | 36 | [
{
"api_name": "os.getenv",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 3... |
69993904103 | import re
import base64
import xml.dom.minidom
import zlib
from xml.parsers.expat import ExpatError
def repr_saml_request(saml_str, b64=False):
"""Decode SAML request from b64 and b64 deflated
and return a pretty printed representation
"""
try:
msg = base64.b64decode(saml_str).decode() if b64 else saml_str
dom = xml.dom.minidom.parseString(msg)
except (UnicodeDecodeError, ExpatError) as err:
# in HTTP-REDIRECT the base64 must be inflated
msg = base64.b64decode(saml_str) if b64 else saml_str
try:
inflated = zlib.decompress(msg, -15)
except (zlib.error, TypeError):
raise err from None
dom = xml.dom.minidom.parseString(inflated.decode())
return dom.toprettyxml()
def encode_http_redirect_saml(saml_envelope):
return base64.b64encode(zlib.compress(saml_envelope.encode()))
def saml_request_from_html_form(html_str):
regexp = 'name="SAMLRequest" value="(?P<value>[a-zA-Z0-9+=]*)"'
authn_request = re.findall(regexp, html_str)
if not authn_request:
raise ValueError("AuthnRequest not found in htmlform")
return authn_request[0]
| italia/spid-django | src/djangosaml2_spid/utils.py | utils.py | py | 1,171 | python | en | code | 40 | github-code | 36 | [
{
"api_name": "base64.b64decode",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "xml.dom.minidom.dom.minidom.parseString",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "xml.dom.minidom.dom",
"line_number": 15,
"usage_type": "attribute"
},
{
... |
19089281909 | import datetime
from django.db import models
from imagekit.models import ImageModel
from structure.models import Author
from video.managers import PublishedVideoManager
from tagging.fields import TagField
class Video(ImageModel):
name = models.CharField(max_length=255)
slug = models.SlugField(unique=True)
pub_date = models.DateTimeField(default=datetime.datetime.now,
auto_now_add=True)
link = models.URLField(
help_text="Insert Link to YouTube or Vimeo video. e.g. \
http://www.youtube.com/watch?v=vnVkGSAqCIE. Make sure the link is \
http, not httpS")
tags = TagField()
caption = models.TextField()
photographer = models.ForeignKey(Author, blank=True, null=True)
screenshot = models.ImageField(upload_to='video_thumbs/%Y/%m/%d',
help_text='Please convert all images to RGB JPEGs.')
is_published = models.BooleanField()
is_tweeted = models.BooleanField(editable=False, default=False)
objects = models.Manager()
published = PublishedVideoManager()
class IKOptions:
# Defining ImageKit options
spec_module = 'video.specs'
cache_dir = 'photo_cache'
image_field = 'screenshot'
class Meta:
ordering = ['-pub_date']
@models.permalink
def get_absolute_url(self):
return ('video.views.video_detail', (), {
'datestring': self.pub_date.strftime("%Y-%m-%d"),
'slug': self.slug})
def get_twitter_message(self):
return u'Video: %s: %s' % (self.name)
def model_type(self):
return self.__class__.__name__
def __unicode__(self):
return self.name
| queensjournal/queensjournal.ca | apps/video/models.py | models.py | py | 1,662 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "imagekit.models.ImageModel",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": ... |
71961794665 | import math
import sys
import imutils
sys.path.append("..")
import cv2
import torch
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from DatasetBuilding.drivingDataset import DrivingDataset
from torch.utils.data import DataLoader
from modelCNN import CNN
def drawLivePredictions(frame, acceleration, steering):
OPEN_ANGLE = 120
LINE_LENGTH = 120
CENTER = (int(frame.shape[1]/2), frame.shape[0])
CENTER_ANGLE = -90
ACCEL_DISPLAY_OFFSET = 150
# Angles
angleLeft = CENTER_ANGLE - int(OPEN_ANGLE/2)
angleRight = CENTER_ANGLE + int(OPEN_ANGLE/2)
anglePred = CENTER_ANGLE + int(OPEN_ANGLE/2*steering)
# Left
x_left = int(CENTER[0] + math.cos(math.radians(angleLeft)) * LINE_LENGTH)
y_left = int(CENTER[1] + math.sin(math.radians(angleLeft)) * LINE_LENGTH)
# Right
x_right = int(CENTER[0] + math.cos(math.radians(angleRight)) * LINE_LENGTH)
y_right = int(CENTER[1] + math.sin(math.radians(angleRight)) * LINE_LENGTH)
# Prediction
x_pred = int(CENTER[0] + math.cos(math.radians(anglePred)) * LINE_LENGTH)
y_pred = int(CENTER[1] + math.sin(math.radians(anglePred)) * LINE_LENGTH)
cv2.line(frame, CENTER, (x_left, y_left), (0, 255, 0), 3)
cv2.line(frame, CENTER, (x_right, y_right), (0, 255, 0), 3)
cv2.line(frame, CENTER, (x_pred, y_pred), (0, 0, 255), 3)
accelerationColor = (0, 255, 0)
if acceleration < 0:
accelerationColor = (0, 0, 255)
cv2.line(frame, (CENTER[0] - ACCEL_DISPLAY_OFFSET, CENTER[1]), (CENTER[0] - ACCEL_DISPLAY_OFFSET, int(CENTER[1] - LINE_LENGTH)), (0, 0, 0), 10)
cv2.line(frame, (CENTER[0] - ACCEL_DISPLAY_OFFSET, CENTER[1]), (CENTER[0] - ACCEL_DISPLAY_OFFSET, int(CENTER[1] - LINE_LENGTH * acceleration)), accelerationColor, 5)
return frame
# Device
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Load Model
model = CNN()
model.load_state_dict(torch.load("../../res/models/FINAL_CNN_epochs_500_3.0"))
model.eval()
# Load Data
dataset = DrivingDataset("../../res/datasets/full.txt", isTrainSet=False, minAcceleration=0.20)
dataloader = DataLoader(dataset, batch_size=100, shuffle=False)
# inputs, targets = next(iter(dataloader))
end = False
while not end:
for batchIdx, (batchData, targets) in enumerate(dataloader):
for frame in batchData:
preds = model(frame.view(1, 1, frame.shape[1], frame.shape[2]))
preds = preds.flatten().detach().numpy()
accel = preds[0]
steer = preds[1]
steer = max(min(steer, 1), -1)
frame = frame[0].numpy().astype(np.uint8)
frame = np.dstack((frame, frame))
frame = np.dstack((frame, frame))
frame = imutils.resize(frame, width=500)
drawLivePredictions(frame, acceleration=accel, steering=steer)
frame = imutils.resize(frame, width=1000)
cv2.imshow('Live Model Decisions', frame)
if cv2.waitKey(100) & 0xFF == ord('q'):
end = True
break
if end:
break
cv2.destroyAllWindows()
| FredCarvalhoOliveira/SelfDrivingCar | scripts/testing/evaluateLiveModelDriving.py | evaluateLiveModelDriving.py | py | 3,078 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "sys.path.append",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "math.cos",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "math.radians",
"line_number"... |
36988267529 | # TODO merge naive and weighted loss.
import torch
import torch.nn.functional as F
def weighted_nll_loss(pred, label, weight, avg_factor=None):
if avg_factor is None:
avg_factor = max(torch.sum(weight > 0).float().item(), 1.)
raw = F.nll_loss(pred, label, reduction='none')
return torch.sum(raw * weight)[None] / avg_factor
def weighted_cross_entropy(pred, label, weight, avg_factor=None, reduce=True):
if avg_factor is None:
avg_factor = max(torch.sum(weight > 0).float().item(), 1.)
raw = F.cross_entropy(pred, label, reduction='none')
if reduce:
return torch.sum(raw * weight)[None] / avg_factor
else:
return raw * weight / avg_factor
def weighted_binary_cross_entropy(pred, label, weight, avg_factor=None):
if avg_factor is None:
avg_factor = max(torch.sum(weight > 0).float().item(), 1.)
return F.binary_cross_entropy_with_logits(
pred, label.float(), weight.float(),
reduction='sum')[None] / avg_factor
def sigmoid_focal_loss(pred,
target,
weight,
gamma=2.0,
alpha=0.25,
reduction='mean'):
pred_sigmoid = pred.sigmoid()
target = target.type_as(pred)
pt = (1 - pred_sigmoid) * target + pred_sigmoid * (1 - target)
weight = (alpha * target + (1 - alpha) * (1 - target)) * weight
weight = weight * pt.pow(gamma)
loss = F.binary_cross_entropy_with_logits(
pred, target, reduction='none') * weight
reduction_enum = F._Reduction.get_enum(reduction)
# none: 0, mean:1, sum: 2
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
elif reduction_enum == 2:
return loss.sum()
def weighted_sigmoid_focal_loss(pred,
target,
weight,
gamma=2.0,
alpha=0.25,
avg_factor=None,
num_classes=80):
if avg_factor is None:
avg_factor = torch.sum(weight > 0).float().item() / num_classes + 1e-6
return sigmoid_focal_loss(
pred, target, weight, gamma=gamma, alpha=alpha,
reduction='sum')[None] / avg_factor
def mask_cross_entropy(pred, target, label):
num_rois = pred.size()[0]
inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device)
pred_slice = pred[inds, label].squeeze(1)
return F.binary_cross_entropy_with_logits(
pred_slice, target, reduction='mean')[None]
def smooth_l1_loss(pred, target, beta=1.0, reduction='mean'):
assert beta > 0
assert pred.size() == target.size() and target.numel() > 0
diff = torch.abs(pred - target)
loss = torch.where(diff < beta, 0.5 * diff * diff / beta,
diff - 0.5 * beta)
reduction_enum = F._Reduction.get_enum(reduction)
# none: 0, mean:1, sum: 2
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.sum() / pred.numel()
elif reduction_enum == 2:
return loss.sum()
def weighted_smoothl1(pred, target, weight, beta=1.0, avg_factor=None):
if avg_factor is None:
avg_factor = torch.sum(weight > 0).float().item() / 4 + 1e-6
loss = smooth_l1_loss(pred, target, beta, reduction='none')
return torch.sum(loss * weight)[None] / avg_factor
def accuracy(pred, target, topk=1):
if isinstance(topk, int):
topk = (topk, )
return_single = True
else:
return_single = False
maxk = max(topk)
_, pred_label = pred.topk(maxk, 1, True, True)
pred_label = pred_label.t()
correct = pred_label.eq(target.view(1, -1).expand_as(pred_label))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / pred.size(0)))
return res[0] if return_single else res
| implus/PytorchInsight | detection/mmdet/core/loss/losses.py | losses.py | py | 4,005 | python | en | code | 845 | github-code | 36 | [
{
"api_name": "torch.sum",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.nll_loss",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "torch.sum",
... |
73188648103 | import time
import pyautogui as pg
import userlist
import pyperclip
def log():
while True:
time.sleep(1)
print(pg.position())
def goto_tribe_room():
pg.moveTo(1075,820, 1)
pg.click()
pg.moveTo(1680, 600, 1)
pg.click()
time.sleep(3)
def execute_lua(luadata):
pg.press("Enter")
pg.write("/lua")
pyperclip.copy(luadata)
pg.press("Enter")
pg.moveTo(1425,430)
pg.click()
time.sleep(0.2)
pg.hotkey("ctrl", "a")
pg.press("backspace")
pg.hotkey("ctrl", "v")
pg.moveTo(1440, 580)
pg.click()
pg.moveTo(1290,820)
pg.click()
pg.write("/room 1")
pg.press("Enter")
def inviter(luadata):
goto_tribe_room()
with open('userlist.txt', 'r') as f:
arr = [line.strip() for line in f]
for i in arr:
pyperclip.copy(i)
pg.press('enter')
pg.write("/inv ")
pg.hotkey("ctrl", "v")
time.sleep(1)
pg.press("Enter")
execute_lua(luadata)
def change_room(room_list):
arr = []
for i in room_list:
pg.press("Enter")
pg.write("/room " + i)
time.sleep(1)
pg.press("Enter")
time.sleep(3)
arr = arr + ulist()
pg.press("Enter")
print(set(arr), len(arr))
return set(arr)
def silence():
pg.press("Enter")
pg.write("/silence Incorrect version, try to reload the game.")
pg.press("Enter")
time.sleep(2)
def ulist():
arr = userlist.getUserlist()
#print(arr, len(arr))
return arr
def get_data():
room_list = ["vanilla2", "vanilla1", "1", "5", "survivor787", "survivor esek", "survivor", "racing1", "racing96846468"]
arr = change_room(room_list)
with open('userlist.txt', 'w') as f:
for i in arr:
f.write(f"{i}\n")
def clear_userlist():
with open("userlist.txt", "w") as f:
f.write("")
f.close()
def main(luadata):
#log()
silence()
get_data()
inviter(luadata=luadata)
#clear_userlist()
if __name__ == '__main__':
luadata = """data = 3
function eventLoop (datA)
if data == 0 then
for i = 0, 9999
do
tfm.exec.addShamanObject(4,-100,-200,0,0,0,false)
end
else
print(data)
data = data - 1
end
end
--Incorrect version, try to reload the game."""
main(luadata=luadata)
| MuhammetSonmez/Transformice-Game-Crusher | main.py | main.py | py | 2,381 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "time.sleep",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pyautogui.position",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pyautogui.moveTo",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pyautogui.click",
"... |
43731215301 | # app.py
from flask import Flask, render_template
from urls import get_urls
app = Flask(__name__)
@app.route('/')
def index():
urls = get_urls()
return render_template('index.html', urls=urls)
if __name__ == '__main__':
app.run()
| VignanBaligari234/PythonSraper | app.py | app.py | py | 245 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "urls.get_urls",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 10,
"usage_type": "call"
}
] |
40265642473 | from flask import Flask, render_template
from post import Post
from api import data
postList = []
for post in data:
postItem = Post(post['id'],post['title'], post['body'])
postList.append(postItem)
app = Flask(__name__)
@app.route('/')
def home():
return render_template("index.html", posts=postList)
@app.route('/post/<int:id>')
def post(id):
for postInfo in postList:
if postInfo.id == id:
info = postInfo
return render_template("post.html", post=info)
if __name__ == "__main__":
app.run(debug=True) | nastyc0de/python100dias | dia57/blog-templating-start/main.py | main.py | py | 548 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "api.data",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "post.Post",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "flask.Flask",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number"... |
4705717498 | #!/usr/bin/env python3
"""
project: Pythonic Card Deck
created:2021-10-19
@author:seraph
email:seraph776@gmail.com
"""
from collections import namedtuple
from random import choice
# namedtuple used to construct a simple class to represent individual cards:
Card = namedtuple('Card', ['rank', 'suit'])
class CardDeck:
"""A Deck class."""
# develop 2 lists of ranks and suits:
ranks: list = [str(n) for n in range(2, 11)] + list('JQKA')
suits: list = 'spades diamonds clubs hearts'.split()
def __init__(self):
"""Initialize attributes."""
# Develop complete deck of 52 cards:
self._cards = [Card(rank, suit) for suit in self.suits for rank in self.ranks]
def __len__(self):
# to get the length of the deck
return len(self._cards)
def __getitem__(self, position):
# To support indexing and slicing:
return self._cards[position]
def main():
deck = CardDeck()
print(len(deck)) # 52
print(deck[0]) # Card(rank='2', suit='spades')
print(deck[0][0]) # 2
print(deck[0][1]) # spades
# Get a randoom card
print(choice(deck))
if __name__ == '__main__':
main()
| rishawsingh/Hacktoberfest_2021 | code/pythonic_card_deck/pythonic_card_deck.py | pythonic_card_deck.py | py | 1,188 | python | en | code | null | github-code | 36 | [
{
"api_name": "collections.namedtuple",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 44,
"usage_type": "call"
}
] |
74060660904 | import asyncio
import contextlib
from contextlib import closing
from unittest import mock
import pytest
from server import ServerContext
from server.core import Service
from server.lobbyconnection import LobbyConnection
from server.protocol import DisconnectedError, QDataStreamProtocol
from tests.utils import exhaust_callbacks, fast_forward
class MockConnection:
def __init__(self):
self.protocol = None
self.peername = None
self.user_agent = None
self.version = None
self.on_connection_lost = mock.AsyncMock()
async def on_connection_made(self, protocol, peername):
self.protocol = protocol
self.peername = peername
self.protocol.writer.write_eof()
self.protocol.reader.feed_eof()
async def on_message_received(self, msg):
pass
@pytest.fixture
def mock_connection():
return MockConnection()
@pytest.fixture
def mock_service():
return mock.create_autospec(Service)
@pytest.fixture
async def mock_context(mock_connection, mock_service):
ctx = ServerContext("TestServer", lambda: mock_connection, [mock_service])
yield await ctx.listen("127.0.0.1", None), ctx
await ctx.stop()
await ctx.shutdown()
@pytest.fixture
async def context(mock_service):
def make_connection() -> LobbyConnection:
return LobbyConnection(
database=mock.Mock(),
game_service=mock.Mock(),
players=mock.Mock(),
nts_client=mock.Mock(),
geoip=mock.Mock(),
ladder_service=mock.Mock(),
party_service=mock.Mock(),
rating_service=mock.Mock(),
oauth_service=mock.Mock(),
)
ctx = ServerContext("TestServer", make_connection, [mock_service])
yield await ctx.listen("127.0.0.1", None), ctx
await ctx.stop()
await ctx.shutdown()
async def test_serverside_abort(
event_loop,
mock_context,
mock_connection,
mock_service
):
srv, ctx = mock_context
reader, writer = await asyncio.open_connection(*srv.sockets[0].getsockname())
with closing(writer):
proto = QDataStreamProtocol(reader, writer)
await proto.send_message({"some_junk": True})
await exhaust_callbacks(event_loop)
mock_connection.on_connection_lost.assert_any_call()
mock_service.on_connection_lost.assert_called_once()
async def test_connection_broken_external(context):
"""
When the connection breaks while the server is calling protocol.send from
somewhere other than the main read - response loop. Make sure that this
still triggers the proper connection cleanup.
"""
srv, ctx = context
_, writer = await asyncio.open_connection(*srv.sockets[0].getsockname())
writer.close()
# Need this sleep for test to work, otherwise closed protocol isn't detected
await asyncio.sleep(0)
proto = next(iter(ctx.connections.values()))
proto.writer.transport.set_write_buffer_limits(high=0)
# Might raise DisconnectedError depending on OS
with contextlib.suppress(DisconnectedError):
await proto.send_message({"command": "Some long message" * 4096})
await asyncio.sleep(0.1)
assert len(ctx.connections) == 0
async def test_unexpected_exception(event_loop, context, caplog, mocker):
srv, ctx = context
mocker.patch.object(
ctx.protocol_class,
"read_message",
mock.AsyncMock(
side_effect=RuntimeError("test")
)
)
with caplog.at_level("TRACE"):
_, writer = await asyncio.open_connection(*srv.sockets[0].getsockname())
with closing(writer):
assert "Exception in protocol" in caplog.text
async def test_unexpected_exception_in_connection_lost(context, caplog):
srv, ctx = context
ctx._services[0].on_connection_lost = mock.Mock(
side_effect=RuntimeError("test"),
__name__="on_connection_lost"
)
with caplog.at_level("TRACE"):
_, writer = await asyncio.open_connection(*srv.sockets[0].getsockname())
writer.close()
await asyncio.sleep(0.1)
assert "Unexpected exception in on_connection_lost" in caplog.text
@fast_forward(20)
async def test_drain_connections(context):
srv, ctx = context
_, writer = await asyncio.open_connection(*srv.sockets[0].getsockname())
with pytest.raises(asyncio.TimeoutError):
await asyncio.wait_for(
ctx.drain_connections(),
timeout=10
)
writer.close()
await asyncio.wait_for(
ctx.drain_connections(),
timeout=3
)
| FAForever/server | tests/integration_tests/test_servercontext.py | test_servercontext.py | py | 4,597 | python | en | code | 64 | github-code | 36 | [
{
"api_name": "unittest.mock.AsyncMock",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "unittest.mock",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "pytest.fixture",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "unittest.mo... |
10367895809 | import youtube_dl
import os
from sys import argv
from pydub import AudioSegment
# PROCESS OF CONVERSION
def process(f):
audioIN = AudioSegment.from_file(f)
print("Processing.... " + str(f))
audioIN.export(f[:-4] + "mp3", format="mp3")
os.remove(f)
# CONFIG OF DOWNLOAD
download_config = {
'format': 'bestaudio/best',
'outtmpl': '%(title)s.%(ext)s',
'nocheckcertificate': True,
'postprocessor': [{
'key': 'FFmpegExtractAudio',
'preferredcodedc': 'mp3',
'preferredquality': '192',
}],
}
# SONG DIRECTORY
if not os.path.exists('Songs'):
os.mkdir('Songs')
os.chdir('Songs')
# DOWNLOADING
with youtube_dl.YoutubeDL(download_config) as dl:
with open("../" + argv[1],'r') as f:
for song_url in f:
dl.download([song_url])
#CONVERSION
files = os.listdir(".")
for f in files:
if f.lower()[-4:] == "webm":
process(f) | ayushmanbt/MyPythonStuff | MY OWN SOFTWARES/MUSIC APP/main.py | main.py | py | 956 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pydub.AudioSegment.from_file",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pydub.AudioSegment",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "os.remove",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path.exist... |
34994415249 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""http://www.pythonchallenge.com/pc/rock/arecibo.html:kohsamui:thailand"""
__author__ = "子風"
__copyright__ = "Copyright 2015, Sun All rights reserved"
__version__ = "1.0.0"
import get_challenge
from PIL import Image
import copy
import time
def getdata(url):
f = get_challenge.download(url, "kohsamui", "thailand")
flag = -1
size = []
hor = []
ver = []
temp = [size, hor, ver]
for i in f.getvalue().decode().split('\n'):
if i == '':
pass
elif i[0] == '#':
flag += 1
else:
temp[flag].append(list(map(int, i.split())))
return (size[0][0], size[0][1], hor, ver)
def genv(v, l, marks):
'''遞歸方式獲取可能的排列
v=描述串(列表) l=行/列長度 marks=(填滿,留空)
'''
r = []
if v:
for i in range(l-(len(v)-1)-sum(v)+1): # 長度-中間留空-總填滿 = 可移動空間
ri = marks[1]*i + marks[0]*v[0] #開頭
if len(v) == 1:
r += [ri + marks[1]*(l-len(ri))] # 剩下留空
else:
rr = genv(v[1:], l-len(ri)-1, marks) # 長度少 1 因需留空
r += [ri + marks[1] + vv for vv in rr]
return r
else:
return [marks[1]*l]
# 必定的值
def confirmedSingle(origin, idx, l):
'''檢查 l 中所有 item 的第 idx 項是否一致,不一致則返回原值,否則返回這項的值'''
for item in l:
if item[idx] != l[0][idx]:
return origin
return l[0][idx]
# 填滿確定值
def confirmed(table, hl, vl):
'''table 填入所有確定的值'''
for j, l in enumerate(hl):
for i in range(len(l[0])):
table[j][i] = confirmedSingle(table[j][i], i, l)
for i, l in enumerate(vl):
for j in range(len(l[0])):
table[j][i] = confirmedSingle(table[j][i], j, l)
return table
# 檢查是否符合
def checkMatch(tl, l):
for i in range(len(tl)):
if tl[i] != "?" and tl[i] != l[i]:
return False
return True
# 去掉不符合的組合
def removeMismatch(table, hls, vls):
thl = []
tvl = []
for j, hl in enumerate(hls):
t = []
for rl in hl:
if checkMatch(table[j], rl):
t.append(rl)
if t:
thl.append(t)
for i, vl in enumerate(vls):
t = []
for cl in vl:
if checkMatch([l[i] for l in table], cl):
t.append(cl)
if t:
tvl.append(t)
return (thl, tvl)
# 檢查所有組合的可能性
def combineAll(tables, hl, vl, size, checktable=[], xy=(0, 0), getOne=False):
# print(xy)
if hl: # 利用 hl 組合可能的 table
# 利用 hl 檢查可能的 vl
for l in hl[0]:
tvl = []
for i, vs in enumerate(vl):
t = []
for col in vs:
if col[xy[1]] == l[i]:
t.append(col)
if not t:
tvl = []
break
tvl.append(t)
if not tvl:
continue
checktable.append(l)
print('\n', xy)
print('\n'.join(checktable))
ans = combineAll(tables, hl[1:], tvl, size, checktable, xy=(xy[0], xy[1]+1), getOne=getOne)
if ans:
tables.append(copy.deepcopy(ans))
if tables and getOne:
return None
# 回覆原本的狀態
checktable.pop()
return None
else: # if len(vl) == size[1]:
return checktable
def measureTime(func):
def with_measureTime(*args, **kwargs):
start = time.perf_counter()
result = func(*args, **kwargs)
elapsed = (time.perf_counter() - start)
print("get elapsed:", elapsed)
input("pause...")
return result
return with_measureTime
@measureTime
def solved(width, height, hnum, vnum):
# print(width, height, hnum, vnum)
table = [["?" for _ in range(width)] for _ in range(height)]
Hlist = [genv(a, width, ('1', '0')) for a in hnum]
Vlist = [genv(a, height, ('1', '0')) for a in vnum]
# print('all possible row/col generated.')
# 此部分還可改進速度,將 confirmed & removeMismatch 合併寫,如 solvedFast
# 但考慮可讀性就不修了
while True:
sumH = sum([len(x) for x in Hlist])
sumV = sum([len(x) for x in Vlist])
table = confirmed(table, Hlist, Vlist)
Hlist, Vlist = removeMismatch(table, Hlist, Vlist)
print('H after: ',)
print(','.join([str(len(x)) for x in Hlist]))
print('V after: ',)
print(','.join([str(len(x)) for x in Vlist]))
sumHt = sum([len(x) for x in Hlist])
sumVt = sum([len(x) for x in Vlist])
if sumH == sumHt and sumV == sumVt:
break
# print('removeMismatch')
tables = []
combineAll(tables, Hlist, Vlist, (width, height), getOne=True)
# print(tables)
if tables:
print("\nfinish")
for t in tables:
print('\n'.join(t))
else:
print("no solution")
imgs = []
for t in tables:
img = Image.new('L', (width, height))
img.putdata([(x == '0') and 255 or 0 for l in t for x in l])
imgs.append(img.resize((width*10, height*10)))
return imgs
@measureTime
def solvedFast(width, height, hnum, vnum):
# print(width, height, hnum, vnum)
table = [["?" for _ in range(width)] for _ in range(height)]
Hlist = [genv(a, width, ('1', '0')) for a in hnum]
Vlist = [genv(a, height, ('1', '0')) for a in vnum]
totalnumber = width*height
resovlednumber = 0
itercnt = 1
resovled = table
while resovlednumber < totalnumber:
print('nitercnt=%d' % (itercnt))
for i, rows in enumerate(Hlist):
for j in range(width):
if resovled[i][j] == '?':
t = confirmedSingle(None, j, rows)
if t:
resovled[i][j] = t
Vlist[j] = [item for item in Vlist[j] if item[i] == t] # 馬上用確定的點來減少Vlist對應列的可能數量
resovlednumber += 1
for i, cols in enumerate(Vlist):
for j in range(height):
if resovled[j][i] == '?':
t = confirmedSingle(None, j, cols)
if t:
resovled[j][i] = t
Hlist[j] = [item for item in Hlist[j] if item[i] == t] # 馬上用確定的點來減少Hlist對應行的可能數量
resovlednumber += 1
print('H after: ',)
print(','.join([str(len(x)) for x in Hlist]))
print('V after: ',)
print(','.join([str(len(x)) for x in Vlist]))
itercnt += 1
# print(tables)
print("\nfinish")
print('\n'.join([''.join(l) for l in resovled]))
img = Image.new('L', (width, height))
img.putdata([(x == '0') and 255 or 0 for l in resovled for x in l])
return img.resize((width*10, height*10))
# width, height, hnum, vnum = getdata("http://www.pythonchallenge.com/pc/rock/warmup.txt")
# result = solved(width, height, hnum, vnum)
width, height, hnum, vnum = getdata("http://www.pythonchallenge.com/pc/rock/up.txt")
result = solvedFast(width, height, hnum, vnum)
result = solved(width, height, hnum, vnum)[0]
# google Free" as in "Free speech", not as in "free 可得 beer
| z-Wind/Python_Challenge | Level32_Nonogram.py | Level32_Nonogram.py | py | 7,587 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "get_challenge.download",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "time.perf_counter",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "time.perf_co... |
70082118504 |
import os, platform, subprocess
from tempfile import TemporaryDirectory
import zipfile
import tkinter as tk
import tkinter.ttk as ttk
import tkinter.scrolledtext as scrolledtext
from tkinter import filedialog
from pdfrw import PdfReader, PdfWriter, PdfDict, PdfName
basedir = os.path.dirname(__file__)
if platform.system() == 'Windows':
win = True
LO = "C:\\Program Files\\LibreOffice\\program\\soffice.exe"
else:
win = False
LO = "libreoffice"
def run_extern(command, *args, cwd = None, xpath = None, feedback = None):
"""Run an external program.
Pass the command and the arguments as individual strings.
The command must be either a full path or a command known in the
run-time environment (PATH).
Named parameters can be used to set:
- cwd: working directory. If provided, change to this for the
operation.
- xpath: an additional PATH component (prefixed to PATH).
- feedback: If provided, it should be a function. It will be called
with each line of output as this becomes available.
Return a tuple: (return-code, message).
return-code: 0 -> ok, 1 -> fail, -1 -> command not available.
If return-code >= 0, return the output as the message.
If return-code = -1, return a message reporting the command.
"""
# Note that using the <timeout> parameter will probably not work,
# at least not as one might expect it to.
params = {
'stdout': subprocess.PIPE,
'stderr': subprocess.STDOUT,
'universal_newlines':True
}
my_env = os.environ.copy()
if win:
# Suppress the console
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
params['startupinfo'] = startupinfo
if xpath:
# Extend the PATH for the process
my_env['PATH'] = xpath + os.pathsep + my_env['PATH']
params['env'] = my_env
if cwd:
# Switch working directory for the process
params['cwd'] = cwd
cmd = [command] + list(args)
try:
if feedback:
out = []
with subprocess.Popen(cmd, bufsize=1, **params) as cp:
for line in cp.stdout:
l = line.rstrip()
out.append(l)
feedback(l)
msg = '\n'.join(out)
else:
cp = subprocess.run(cmd, **params)
msg = cp.stdout
return (0 if cp.returncode == 0 else 1, msg)
except FileNotFoundError:
return (-1, _COMMANDNOTPOSSIBLE.format(cmd=repr(cmd)))
def libre_office(in_list, pdf_dir):
"""Convert a list of odt (or docx or rtf, etc.) files to pdf-files.
The input files are provided as a list of absolute paths,
<pdf_dir> is the absolute path to the output folder.
"""
# Use LibreOffice to convert the input files to pdf-files.
# If using the appimage, the paths MUST be absolute, so I use absolute
# paths "on principle".
# I don't know whether the program blocks until the conversion is complete
# (some versions don't), so it might be good to check that all the
# expected files have been generated (with a timeout in case something
# goes wrong?).
# The old problem that libreoffice wouldn't work headless if another
# instance (e.g. desktop) was running seems to be no longer the case,
# at least on linux.
def extern_out(line):
REPORT(line)
rc, msg = run_extern(LO, '--headless',
'--convert-to', 'pdf',
'--outdir', pdf_dir,
*in_list,
feedback = extern_out
)
def merge_pdf(ifile_list, ofile, pad2sided = False):
"""Join the pdf-files in the input list <ifile_list> to produce a
single pdf-file. The output is returned as a <bytes> object.
The parameter <pad2sided> allows blank pages to be added
when input files have an odd number of pages – to ensure that
double-sided printing works properly.
"""
writer = PdfWriter()
for inpfn in ifile_list:
ipages = PdfReader(inpfn).pages
if pad2sided and len(ipages) & 1:
# Make sure we have an even number of pages by copying
# and blanking the first page
npage = ipages[0].copy()
npage.Contents = PdfDict(stream='')
ipages.append(npage)
writer.addpages(ipages)
writer.write(ofile)
def get_input():
text.delete('1.0', tk.END)
# files = filedialog.askopenfilenames(
# parent=root,
## initialdir='/',
## initialfile='tmp',
# filetypes=[
# ("All files", "*"),
# ("Word", "*.docx"),
# ("LibreOffice", "*.odt"),
# ("RTF", "*.rtf")
# ]
# )
idir = filedialog.askdirectory(parent=root)
if idir:
conjoindir(idir)
def get_zip():
text.delete('1.0', tk.END)
zfile = filedialog.askopenfilename(
parent=root,
# initialdir='/',
# initialfile='tmp',
filetypes=[("Zip-Dateien", ".zip*")]
)
if zfile:
with TemporaryDirectory() as zdir:
# Extract archive
try:
with zipfile.ZipFile(zfile, mode="r") as archive:
archive.extractall(zdir)
except zipfile.BadZipFile as error:
REPORT(f"FEHLER: {error}")
return
# Handle files not in a subfolder
conjoindir(zdir, name=os.path.basename(zfile.rsplit(".", 1)[0]))
# Handle subfolders
for d in sorted(os.listdir(zdir)):
sdir = os.path.join(zdir, d)
if os.path.isdir(sdir):
conjoindir(sdir)
def conjoindir(idir, name=None):
#print("???", idir)
files = []
for f in sorted(os.listdir(idir)):
try:
b, e = f.rsplit(".", 1)
except ValueError:
continue
if e in ("odt", "docx", "rtf"):
files.append(os.path.join(idir, f))
if files:
idir = os.path.dirname(files[0])
with TemporaryDirectory() as odir:
root.config(cursor="watch")
text.config(cursor="watch") # Seems to be needed additionally!
root.update_idletasks()
libre_office(files, odir)
root.config(cursor="")
text.config(cursor="")
pfiles = []
REPORT("\n *******************************\n")
for f in files:
bpdf = os.path.basename(f).rsplit(".", 1)[0] + ".pdf"
fpdf = os.path.join(odir, bpdf)
if os.path.isfile(fpdf):
pfiles.append(fpdf)
else:
REPORT(" *** FEHLER, Datei fehlt: {fpdf}")
if len(files) == len(pfiles):
sfile = filedialog.asksaveasfilename(
parent=root,
defaultextension=".pdf",
#initialdir=idir,
initialfile=(name or os.path.basename(idir)) + ".pdf",
filetypes=[("PDF", "*.pdf")]
)
if sfile:
if not sfile.endswith(".pdf"):
sfile += ".pdf"
merge_pdf(pfiles, sfile,
pad2sided=twosided.instate(['selected'])
)
REPORT(f" --> {sfile}")
def REPORT(line):
text.insert(tk.END, line.rstrip() + "\n")
root.update_idletasks()
text.yview(tk.END)
if __name__ == "__main__":
root = tk.Tk()
try:
root.tk.call('tk_getOpenFile', '-foobarbaz')
except:
pass
try:
#root.tk.call('set', '::tk::dialog::file::showHiddenBtn', '1')
root.tk.call('set', '::tk::dialog::file::showHiddenVar', '0')
except:
pass
root.title("Conjoin2pdf")
root.iconphoto(True, tk.PhotoImage(file=os.path.join(basedir, 'conjoin2pdf.png')))
#root.geometry('300x300')
bt = ttk.Button(
root,
text="Eingabe von Ordner (mit odt-, docx-, rtf-Dateien)",
command=get_input
)
bt.pack(fill=tk.X, padx=3, pady=3)
btz = ttk.Button(
root,
text="Eingabe von Zip-Datei",
command=get_zip
)
btz.pack(fill=tk.X, padx=3, pady=3)
twosided = ttk.Checkbutton(root, text="Doppelseitige Ausgabe")
twosided.pack(fill=tk.X, padx=3, pady=3)
#twosided.state(['!alternate', '!selected'])
twosided.state(['!alternate', 'selected'])
#print("?$?", twosided.state())
#print("?$?", twosided.instate(['selected']))
text = scrolledtext.ScrolledText(root)#, state=tk.DISABLED)
text.bind("<Key>", lambda e: "break")
text.pack()
root.update()
w, h = root.winfo_width(), root.winfo_height()
#print(f"w={w}, h={h}")
x = int(root.winfo_screenwidth()/2 - w/2)
y = int(root.winfo_screenheight()/2 - h/2)
#x, y = 200, 100
root.geometry(f"+{x}+{y}")
root.mainloop()
| gradgrind/conjoin2pdf | conjoin2pdf.py | conjoin2pdf.py | py | 8,959 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.dirname",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "platform.system",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
"l... |
26336658479 | import requests
from datetime import datetime
import api_keys
now = datetime.now()
formatted_now_date = now.strftime("%d/%m/%Y")
formatted_now_time = now.time().strftime("%H:%M:%S")
exercise_ep = "https://trackapi.nutritionix.com/v2/natural/exercise"
sheety_url = "https://api.sheety.co/dd81a891f83891fcffdab1dcc7c5a53e/casualWorkoutTracking/workouts"
exercise_params = {
"query": input("Tell me which exercises you did today: \n"),
"gender": api_keys.GENDER,
"weight_kg": api_keys.WEIGHT_KG,
"height_cm": api_keys.HEIGHT_CM,
"age": api_keys.AGE
}
exercise_response = requests.post(url=exercise_ep, headers=api_keys.header, json=exercise_params)
exercise_data = exercise_response.json()
headers = {
"Authorization": f"Bearer {api_keys.BEARER_TOKEN}"
}
for exercise in exercise_data["exercises"]:
print(exercise)
print("==========================================")
body = {
"workout": {
"date": formatted_now_date,
"time": formatted_now_time,
"exercise": exercise["name"],
"calories": exercise["nf_calories"],
"duration": exercise["duration_min"]
}
}
sheety_response = requests.post(url=sheety_url, json=body, headers=headers)
sheety_data = sheety_response.json()
print(sheety_data)
| Zoom30/100-python | Day 38/Day 38.py | Day 38.py | py | 1,365 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.datetime.now",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "api_keys.GENDER",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "api_keys.W... |
25623525939 | # the simplest solution - simply run this script at startup
# - output will go to console and alert if there is a diff
# For first run will compare with specified html file
# or create new one if nonexistant
import time
import urllib.request as ur
import sys, getopt
import os.path
from datetime import datetime
import difflib
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
import logging
## If you are going to recommit this file with your own information, look for no-comit hooks
# These hooks will filter sensitive information on my machine, but will not do so on your machine
# logging setup
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s',
filename='tmp.log',
filemode='w')
### default params, can be passed as flags to change
secondsToSleep = 3600 # 3600s = 1hr
numberOfRuns = -1 #-1 is infinite
urlToCheck = "www.Google.com" #no-commit
fileToWrite = "sourceOfLastRun.html" # file with last source of site
verbose = True # if True, print
log = True # if True, write to log
# email parameters
sendEmailFlag = True
emailDest = ["www.Google.com"] #no-commit, make a list if multiple recipients
emailType = 'html' # can also be html. Other values will break!
#############
### Values to be set by modifying this file only, not thru command line
emailSrc = emailDest[0] # change here if dest is different than from
emailSrcPass = "www.Google.com" #no-commit this password generated from https://support.google.com/accounts/answer/185833
# refer to README.md for more details
#############
def checkUrl():
with ur.urlopen(urlToCheck) as openedUrl:
fetchedHtml = str(openedUrl.read().decode('utf-8'))
if (os.path.isfile(fileToWrite)):
currentFileHtml = open(fileToWrite).read()
if (currentFileHtml == fetchedHtml):
writeLine("same at " + createTimeStamp())
else:
writeFileBack(fetchedHtml)
writeLine("diff at " + createTimeStamp())
diff = getDiffString(currentFileHtml, fetchedHtml)
emailDiff(diff)
else:
writeFileBack(fetchedHtml, True)
checkUrl()
# feel free to modify this function as needed to 'prettify' your email bodies
def emailDiff(diff):
if (sendEmailFlag):
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login(emailSrc, emailSrcPass)
msg = createMIMEMsg(diff)
server.sendmail(emailSrc, emailDest, msg.as_string())
server.quit()
def createMIMEMsg(txt):
msg = MIMEMultipart('alternative')
msg['Subject'] = 'Latest diff of webpage "www.Google.com"' #no-commit
msg['To'] = emailDest[0]
txt += "\n\n\n\n\n\nURL of webpage: " + urlToCheck
html = MIMEText(txt, emailType)
msg.attach(html)
return msg
def writeFileBack(src, created = False,):
with open(fileToWrite, 'w') as f:
f.write(src)
if (created):
action = 'created'
else:
action = 'overwritten'
writeLine(" File " + action + " at " + createTimeStamp())
def getDiffString(orig, new):
diff = list(difflib.context_diff(orig.splitlines(), new.splitlines()))
diff = [line[1:] for line in diff if line[0] == '!'] # the '!' corresponds with difflib for diff presentation
return '\n'.join(list(diff))
# prints a simple time stamp for readibility
def createTimeStamp():
now = datetime.now()
return now.strftime("%b %d, %Y at %I:%M.%S %p")
def writeLine(s):
if verbose:
print(s)
if log:
logging.info(s)
if __name__ == "__main__":
opts, args = getopt.getopt(sys.argv[1:],"hs:r:",["seconds=", "runs="])
for opt, arg in opts:
if opt == '-h':
print('python checkURLNoTask.py -s <seconds>')
sys.exit()
elif opt in ("-s", "--seconds"):
secondsToSleep = int(arg)
elif opt in ("-r", "--runs"):
numberOfRuns = int(arg)
line = "checking url: " + urlToCheck + " every " + str(secondsToSleep) + " seconds " + str(numberOfRuns) + " times with email sending: " + str(sendEmailFlag)
writeLine(line)
runCount = 0
while runCount != numberOfRuns:
checkUrl()
runCount+=1
time.sleep(secondsToSleep)
writeLine("Checked URL " + str(numberOfRuns) + ", ending script") | alexbudy/urlDiffCheck | checkURLNoTask.py | checkURLNoTask.py | py | 4,126 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.basicConfig",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "urllib.request.urlopen",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "urllib.r... |
72607742505 | #!/usr/bin/env python3
import numpy
import opt_analy
import matplotlib
import matplotlib.pyplot as plt
def plot(az_list, el_list, dx_list, dy_list, file_list, raw=True):
if raw == True:
dx_avg, dy_avg, dx_std, dy_std, *_ = opt_analy.process_static(file_list, clip_sigma=(3.,3.))
else:
az = [e for inner_list in az_list for e in inner_list]
el = [e for inner_list in el_list for e in inner_list]
dx = [e for inner_list in dx_list for e in inner_list]
dy = [e for inner_list in dy_list for e in inner_list]
dx_avg, dy_avg, dx_std, dy_std = numpy.average(dx), numpy.average(dy), numpy.std(dx), numpy.std(dy)
nrow = 3
ncol = 2
nax = ncol * nrow
figsize = (ncol * 4, nrow * 4)
fig = plt.figure(figsize=figsize)
fig.subplots_adjust(wspace=0.4, hspace=0.4)
ax = [fig.add_subplot(nrow, ncol, i+1) for i in range(nax-1)]
matplotlib.rcParams['savefig.dpi'] = 200
matplotlib.rcParams['font.size'] = 14
for az, el, dx, dy in zip(az_list, el_list, dx_list, dy_list):
ax[0].plot(az, dx, '.', label='diff')
ax[1].plot(el, dx, '.', label='diff')
ax[2].plot(az, dy, '.', label='diff')
ax[3].plot(el, dy, '.', label='diff')
ax[4].plot(dx, dy, '.', label='diff')
ax[0].set_xlabel('Az [deg.]')
ax[0].set_ylabel('dx [arcsec.]')
ax[0].set_title('Az vs dx')
ax[1].set_xlabel('El [deg.]')
ax[1].set_ylabel('dx [arcsec.]')
ax[1].set_title('El vs dx')
ax[2].set_xlabel('Az [deg.]')
ax[2].set_ylabel('dy [arcsec.]')
ax[2].set_title('Az vs dy')
ax[3].set_xlabel('El [deg.]')
ax[3].set_ylabel('dy [arcsec.]')
ax[3].set_title('El vs dy')
ax[4].set_xlabel('dx [arcsec.]')
ax[4].set_ylabel('dy [arcsec.]')
ax[4].set_title('dx vs dy')
[_ax.grid() for _ax in ax]
if len(file_list) < 8:
tbl_loc = 'lower center'
legend_loc = (1.1, 0.35)
else:
tbl_loc = 'bottom'
legend_loc = (1.1, 0.02)
tbl = fig.add_subplot(3,2,6)
col_labels=['average','std dev',]
row_labels=[' dx ',' dy ', ' unite ']
tbl_vals=[["{:.2e}".format(dx_avg), "{:.2f}".format(dx_std)],
["{:.2e}".format(dy_avg), "{:.2f}".format(dy_std)],
["{:.2e}".format(numpy.sqrt(dx_avg**2+dy_avg**2)), "{:.2e}".format(numpy.sqrt(dx_std**2+dy_std**2))]
]
tbl.table(cellText=tbl_vals, rowLabels=row_labels, colLabels=col_labels, loc=tbl_loc)
tbl.set_axis_off()
fig.tight_layout()
ax[4].legend(labels=file_list, loc=legend_loc)
plt.show()
| nanten2/necst-ros | lib/plot.py | plot.py | py | 2,582 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "opt_analy.process_static",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.average",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.std",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.fi... |
14566461168 | from io import BytesIO
from .models import Book
from librarian import DocProvider
from django.http import HttpResponse
class RedakcjaDocProvider(DocProvider):
"""Used for getting books' children."""
def __init__(self, publishable):
self.publishable = publishable
def by_slug(self, slug):
print(slug)
return BytesIO(Book.objects.get(catalogue_book_id=slug
).materialize(publishable=self.publishable
).encode('utf-8'))
def serve_file(file_path, name, mime_type):
def read_chunks(f, size=8192):
chunk = f.read(size)
while chunk:
yield chunk
chunk = f.read(size)
response = HttpResponse(content_type=mime_type)
response['Content-Disposition'] = 'attachment; filename=%s' % name
with open(file_path, 'rb') as f:
for chunk in read_chunks(f):
response.write(chunk)
return response
| fnp/redakcja | src/documents/ebook_utils.py | ebook_utils.py | py | 937 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "librarian.DocProvider",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "io.BytesIO",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "models.Book.objects.get",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "models.Book.o... |
74177779624 | from aiogram import types, Dispatcher
from aiogram.dispatcher import FSMContext
from aiogram.dispatcher.filters import Text
async def cmd_start(message: types.Message, state: FSMContext):
await state.finish()
await message.answer(
"Начните ваш заказ (/food)!",
reply_markup=types.ReplyKeyboardRemove()
)
async def cmd_cancel(message: types.Message, state: FSMContext):
await state.finish()
await message.answer("Действие отменено", reply_markup=types.ReplyKeyboardRemove())
def register_handlers_common(dp: Dispatcher):
dp.register_message_handler(cmd_start, commands="start", state="*")
dp.register_message_handler(cmd_cancel, commands="cancel", state="*")
dp.register_message_handler(cmd_cancel, Text(equals="отмена", ignore_case=True), state="*") | ARSecret/arsPython | DZ/handlers/common.py | common.py | py | 839 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "aiogram.types.Message",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "aiogram.types",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "aiogram.dispatcher.FSMContext",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "a... |
70640513703 | import torch
import numpy as np
class CategoriesSampler():
def __init__(self, label, n_batch, n_cls, n_per):
self.n_batch = n_batch
self.n_cls = n_cls
self.n_per = n_per
label = np.array(label)
self.m_ind = []
for i in range(max(label) + 1):
ind = np.argwhere(label == i).reshape(-1)
ind = torch.from_numpy(ind)
self.m_ind.append(ind)
def __len__(self):
return self.n_batch
def __iter__(self):
for i_batch in range(self.n_batch):
batch = []
classes = torch.randperm(len(self.m_ind))[:self.n_cls]
for c in classes:
l = self.m_ind[c]
pos = torch.randperm(len(l))[:self.n_per]
batch.append(l[pos])
batch = torch.stack(batch).t().reshape(-1)
yield batch
| yaoyao-liu/e3bm | dataloader/samplers.py | samplers.py | py | 880 | python | en | code | 48 | github-code | 36 | [
{
"api_name": "numpy.array",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.argwhere",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "torch.randperm",
"lin... |
9710177195 | import re
from dataclasses import dataclass
from pprint import pprint
import pygame
import pyscroll
import pytmx
from random import randint, seed
from src import player
from src.player import NPC, Player
from lib_dijkstra import Point
verbose = False
# seed(1)
def groups_in_list(lst, code='X', blank=' '):
"""Find a list of continuous signs. This is used to try to reduce memory usage.
>>> groups_in_list ((' ', ' ', 'X', 'X', 'X', 'X', 'X', ' ', 'X', 'X', ' '))
[(2, 6), (8, 9)]
>>> groups_in_list ((' ', ' ', 'X', 'X', 'X', 'X', 'X', ' ', 'X', 'X'))
[(2, 6), (8, 9)]
"""
walls = []
again = True
current = 0
while again:
try:
first = lst.index(code, current)
except ValueError:
break
try:
last = lst.index(blank, first + 1)
except ValueError:
last = len(lst)
if last:
walls.append((first, last - 1))
current = last
else:
again = False
return walls
@dataclass
class Portal:
from_world: str
origin_point: str
target_world: str
teleport_point: str
# Vient de https://coderslegacy.com/pygame-platformer-coins-and-images/
class Coin(pygame.sprite.Sprite):
# Intentionally, there are more 1 point coins than 50 points coins.
values = (1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 5, 5, 5, 10, 10, 20, 50)
def __init__(self, pos):
super().__init__()
self.name = 'coin'
self.image = pygame.image.load("../map/coin.png")
self.rect = self.image.get_rect()
self.rect.topleft = pos
self.feet = pygame.Rect(0, 0, self.rect.width * 0.5, 16)
self.value = Coin.values[randint(0, len(Coin.values) - 1)]
def move_back(self):
pass
@dataclass
class Map:
name: str
walls: list[pygame.Rect]
group: pyscroll.PyscrollGroup
simple_map: list
tmx_data: pytmx.TiledMap
portals: list[Portal]
npcs: list[NPC]
class MapManager:
"""General manager of all maps"""
def __init__(self, master_game, screen, player):
"""Charge les cartes, puis téléporte le joueur et enfin les NPC"""
self.master_game = master_game
self.maps = dict() # "house" -> Map ("house", walls, group)
self.screen = screen
self.player = player
self.current_map = 'world'
# Dans Portal on indique comment entrer dans un autre monde.
# Attention le from_world doit absolument avoir tous les origin_points.
self.register_map('world',
portals=[Portal(from_world="world", origin_point='enter_house', target_world="house",
teleport_point="spawn_from_world")],
npcs=[# NPC('paul', nb_areas=4),
NPC('robin', self, 'world')])
self.register_map('house',
portals=[
Portal(from_world='house', origin_point='enter_world', target_world='world',
teleport_point="spawn_from_house"),
Portal(from_world='house', origin_point='enter_dungeon', target_world='dungeon',
teleport_point="spawn_from_house")
])
self.register_map('dungeon',
portals=[
Portal(from_world='dungeon', origin_point='enter_house', target_world='house',
teleport_point="spawn_from_dungeon"),
Portal(from_world='dungeon', origin_point='enter_world', target_world='world',
teleport_point="spawn_from_dungeon")
])
self.teleport_player('player')
self.teleport_npcs()
def register_map(self, map_name, portals=None, npcs=None):
if npcs is None:
npcs = []
if portals is None:
portals = []
if verbose:
print("Registering map", map_name)
# Charger les cartes
tmx_data = pytmx.util_pygame.load_pygame(f"../map/{map_name}.tmx")
map_data = pyscroll.data.TiledMapData(tmx_data)
map_layer = pyscroll.orthographic.BufferedRenderer(map_data, self.screen.get_size())
map_layer.zoom = 1
# Définir une liste de collisions
walls = []
# Je vais ajouter des pièces/coins en tant que sprites (méthode venant de
# https://coderslegacy.com/pygame-platformer-coins-and-images/ )
coins = pygame.sprite.Group()
for obj in tmx_data.objects:
if obj.type == "collision":
walls.append(pygame.Rect(obj.x, obj.y, obj.width, obj.height))
elif obj.type == "coin_place":
coins.add(Coin((obj.x - 24, obj.y - 24))) # Valeur mal ajustée
# Ajouter en wall toute la zone d'eau, sauf s'il y a un path par-dessus
water_blocks = []
if 'water' in tmx_data.layernames:
for y, line in enumerate(tmx_data.layernames['water'].data):
line_wall = []
for x, cell in enumerate(line):
if cell != 0 and tmx_data.layernames['path'].data[y][x] == 0:
line_wall.append('X')
else:
line_wall.append(' ')
water_blocks.append(line_wall)
for y, line in enumerate(water_blocks):
for group in groups_in_list(line, code='X', blank=' '):
walls.append(pygame.Rect(group[0] * 16, y * 16, (group[1] - group[0] + 1) * 16, 16))
# Dessiner le groupe de calques
# default_layer à 0 : bonhomme sur herbe, sous chemin
group = pyscroll.PyscrollGroup(map_layer=map_layer, default_layer=5) # Pourquoi 5 :
group.add(self.player)
# group.add(npcs)
group.add(coins)
for npc in npcs:
group.add(npc)
# fabriquer une carte simplifiée de 0 et de 1 pour les walls
simple_map = build_simple_map_from_tmx(tmx_data, walls, reduction_factor=2)
# Créer un objet Map
self.maps[map_name] = Map(map_name, walls, group, simple_map, tmx_data, portals, npcs)
def teleport_player(self, player_name):
point = self.get_object(player_name)
self.player.position[0] = point.x - 16
self.player.position[1] = point.y - 32 # pour régler le niveau des pieds.
self.player.save_location()
def teleport_npcs(self):
for map_name in self.maps:
map_data = self.maps[map_name]
for npc in map_data.npcs:
npc.areas = self.get_object_by_regex(map_data, r"robin_path\d")
npc.areas_nb = len(npc.areas) # BOUH
npc.define_first_target()
npc.calculate_move_direction()
npc.calculate_dijkstra()
npc.teleport_npc()
pass
def check_collision(self):
# portals
for portal in self.get_map().portals:
if portal.from_world == self.current_map:
point = self.get_object(portal.origin_point)
rect = pygame.Rect(point.x, point.y, point.width, point.height)
if self.player.feet.colliderect(rect):
copy_portal = portal
self.current_map = portal.target_world
self.teleport_player(copy_portal.teleport_point)
self.master_game.point_counter.points += 100
# collisions, coins
for my_sprite in self.get_group().sprites():
# fix BUG_SAUT : Ne reculer que si le sprite est un Player, pas un NPC
# if isinstance(my_sprite, Player):
if my_sprite.name == "player":
if my_sprite.feet.collidelist(self.get_walls()) > -1:
my_sprite.move_back()
if isinstance(my_sprite, Coin):
if self.player.feet.colliderect(my_sprite):
if verbose:
print(f"Miam ! {my_sprite.value} points !!")
self.master_game.point_counter.points += my_sprite.value
my_sprite.kill()
def get_map(self):
return self.maps[self.current_map]
def get_group(self):
return self.get_map().group
def get_walls(self):
return self.get_map().walls
def get_object(self, name):
return self.get_map().tmx_data.get_object_by_name(name)
# trouver automatiquement le nombre d'objets correspondant à une regex
# par exemple "paul_path\d"
def get_object_by_regex(self, map, regex):
"""Return objects witch name match with a regex"""
carte = map.tmx_data
all_objects = carte.objects
matching_lst = []
for tiled_object in all_objects:
if re.match(regex, str(tiled_object.name)):
obj = self.get_object(tiled_object.name)
matching_lst.append(pygame.Rect(obj.x, obj.y, obj.width, obj.height))
return matching_lst
def update(self):
"""Fonction pour toutes les maps, appelée à chaque image"""
self.get_group().update()
self.check_collision()
# Bouger les NPC
for npc in self.get_map().npcs:
npc.move()
def draw(self):
self.get_group().draw(self.screen)
self.get_group().center(self.player.rect.center)
def build_simple_map_from_tmx(tmx_data, walls_block_list, reduction_factor):
"""Deduce a 2 dimensional array from a tmx map"""
bin_map = []
size = tmx_data.tilewidth
map_w = tmx_data.width * size
map_h = tmx_data.height * size
steps = size * reduction_factor
dec = int(steps / reduction_factor)
for i, y in enumerate(range(0 + dec, map_h + dec, steps)):
line_map = []
for j, x in enumerate(range(0, map_w, steps)):
PP = pygame.Rect(x, y, 1, 1)
if PP.collidelist(walls_block_list) != -1: # See documentation of colidelist()
line_map.append(1)
else:
line_map.append(0)
bin_map.append(line_map)
if verbose:
print("Même pas planté !")
pprint(bin_map)
print("La carte est ci-dessus : ! ")
return (bin_map)
| bermau/PW_19_pygamon | src/map.py | map.py | py | 10,396 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "dataclasses.dataclass",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "pygame.sprite",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "pygame.ima... |
16305876969 | import matplotlib.pyplot as plt
import geopandas
# 3D Plot
def plot3Dtrajectory(name, desc, x, y, z):
fig = plt.figure(figsize=(6,6))
ax = fig.add_subplot(111, projection = '3d')
ax.plot(x, y, z, color = 'purple', label = 'GPS', marker = '')
plt.title(name + " " + desc)
ax.set_xlabel('|X] = km')
ax.set_ylabel('[Y] = km')
ax.set_zlabel('[Z] = km')
ax.set_xlim3d(-33000,33000)
ax.set_ylim3d(-33000,33000)
ax.set_zlim3d(-33000,33000)
plt.savefig("Export/" + name + "_" + desc + '.png')
def plotGroundTrack(name, desc, lat, long, min, max):
countries = geopandas.read_file(geopandas.datasets.get_path("naturalearth_lowres"))
countries.plot(color = "grey")
plt.scatter(long, lat, color = "purple")
plt.grid()
plt.ylim(-90,90)
plt.xlim(-180,180)
plt.title(name + " " + desc)
plt.figtext(0.5, 0.15, "Minimale Breite: " + str(min) + "° / maximale Breite: " + str(max) + "°", ha = "center", fontsize = 9, style = "italic")
plt.savefig("Export/" + name + "_" + desc + '.png')
def polarPlot(name, desc, az, el):
fig, ax = plt.subplots(1,1, figsize=(8,8), subplot_kw=dict(projection='polar'))
plt.scatter(az, el, color='purple', label=name)
ax.set_theta_zero_location("N")
ax.set_theta_direction(-1)
plt.figtext(0.54, 0.5, "Elevation", rotation=62.5)
plt.ylim(90,0)
plt.title(name + " " + desc)
plt.savefig("Export/" + name + "_" + desc + '.png')
def elevationPlot(name, desc, el, timeoverHorizon):
time = []
if name == "Lageos1":
for i in range(0, 86401, 120):
time.append(i)
else:
for i in range(0, 86401, 300):
time.append(i)
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(111)
plt.scatter(time, el, color= "purple")
plt.ylim(0,90)
plt.xlim(0, 86400)
plt.grid()
plt.title(name + " " + desc)
plt.figtext(0.5, 0.03, "Sichtbare Zeit: " + str(timeoverHorizon) + "s (" + str(timeoverHorizon / 3600) + "h)", ha = "center", fontsize = 9)
plt.savefig("Export/" + name + "_" + desc + '.png') | d33pk3rn3l/Satellite-geodesy-Exercise-1 | plotter.py | plotter.py | py | 2,117 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "matp... |
15506053954 | from flask import Flask, render_template, request, Response
import random
test_app = Flask(__name__)
@test_app.route('/')
def getRandomData():
context = {"title": "Data table test"}
data = [{ 'id': i,
'name': "test_name_{0}".format(random.randint(0,1000)),
'phone': random.randint(2308,903234),
'status': random.choice([True, False])
} for i in range(0,10000)]
context['data'] = data
return render_template('table_example.html', **context)
if __name__ == "__main__":
test_app.run(host ="localhost") | hssaka7/flask_boilerplate | datatable/app.py | app.py | py | 567 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_num... |
24447934064 | import os
from multiprocessing import Process
from pathlib import Path
from shutil import rmtree
from time import sleep
import lmdb
import pytest
import rocksdb
from rocksdb.errors import RocksIOError
class TestDBPath:
rocksdb = "test_rocksdb"
lmdb = "test_lmdb"
def print_process_info(title):
print("\n--------------------------")
print(title)
print("module name:", __name__)
print("parent process:", os.getppid())
print("process id:", os.getpid())
def f_rocksdb(name: str):
print_process_info(f"function f_rocksdb({name})")
data = b"test_test_test_rocksdb"
try:
db = rocksdb.DB(TestDBPath.rocksdb, rocksdb.Options(create_if_missing=True))
db.put(b"name", data)
print("f_rocksdb as a Writer")
except RocksIOError:
sleep(0.1)
db = rocksdb.DB(TestDBPath.rocksdb, rocksdb.Options(create_if_missing=True), read_only=True)
if data != db.get(b"name"):
exit(-1)
print("f_rocksdb as a Reader")
sleep(1)
def f_lmdb(name: str):
print_process_info(f"function f_lmdb({name})")
data = b"test_test_test_lmdb"
if name == "prop1":
env = lmdb.open(TestDBPath.lmdb)
with env.begin(write=True) as txn:
txn.put(b"name", data)
print("f_lmdb as a Writer")
else:
sleep(0.1)
env = lmdb.open(TestDBPath.lmdb)
with env.begin() as txn:
if data != txn.get(b"name"):
exit(-1)
print("f_lmdb as a Reader")
sleep(1)
def delete_test_db_dirs():
db_paths = [Path(f"./{TestDBPath.lmdb}"), Path(f"./{TestDBPath.rocksdb}")]
for db_path in db_paths:
if db_path.exists():
print(f"delete DB({db_path.resolve()})")
rmtree(db_path)
@pytest.fixture(autouse=True)
def run_around_tests():
delete_test_db_dirs()
yield
delete_test_db_dirs()
class TestMultiProcessLevelDB:
store_process_functions = [f_rocksdb, f_lmdb]
@pytest.mark.parametrize("store_process_function", store_process_functions)
def test_multiprocessing_db(self, store_process_function):
p1 = Process(target=store_process_function, args=("prop1",))
p1.start()
p2 = Process(target=store_process_function, args=("prop2",))
p2.start()
p1.join()
p2.join()
# RocksDB supports multiprocessing.
assert p1.exitcode == 0
assert p2.exitcode == 0
| iconloop/kona | tests/test_multi_process_db.py | test_multi_process_db.py | py | 2,444 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "os.getppid",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.getpid",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "rocksdb.DB",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "rocksdb.Options",
"line_number": ... |
74050173864 | from parlai.core.teachers import FbDeprecatedDialogTeacher, MultiTaskTeacher
from .build import build
import copy
import os
def _path(task, opt):
# Build the data if it doesn't exist.
build(opt)
suffix = ''
dt = opt['datatype'].split(':')[0]
if dt == 'train':
suffix = 'train'
elif dt == 'test':
suffix = 'test_2500ex'
elif dt == 'valid':
suffix = 'valid_2000ex'
return os.path.join(
opt['datapath'], 'CBT', 'CBTest', 'data', task + '_' + suffix + '.txt'
)
class NETeacher(FbDeprecatedDialogTeacher):
def __init__(self, opt, shared=None):
opt['datafile'] = _path('cbtest_NE', opt)
opt['cloze'] = True
super().__init__(opt, shared)
class CNTeacher(FbDeprecatedDialogTeacher):
def __init__(self, opt, shared=None):
opt['datafile'] = _path('cbtest_CN', opt)
opt['cloze'] = True
super().__init__(opt, shared)
class VTeacher(FbDeprecatedDialogTeacher):
def __init__(self, opt, shared=None):
opt['datafile'] = _path('cbtest_V', opt)
opt['cloze'] = True
super().__init__(opt, shared)
class PTeacher(FbDeprecatedDialogTeacher):
def __init__(self, opt, shared=None):
opt['datafile'] = _path('cbtest_P', opt)
opt['cloze'] = True
super().__init__(opt, shared)
# By default train on all tasks at once.
class DefaultTeacher(MultiTaskTeacher):
def __init__(self, opt, shared=None):
opt = copy.deepcopy(opt)
opt['task'] = 'cbt:NE,cbt:CN,cbt:V,cbt:P'
super().__init__(opt, shared)
| facebookresearch/ParlAI | parlai/tasks/cbt/agents.py | agents.py | py | 1,587 | python | en | code | 10,365 | github-code | 36 | [
{
"api_name": "build.build",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "parlai.core.teachers.FbDeprecat... |
40661236565 | from torch import nn
import torch
from torch.nn import CrossEntropyLoss
from transformers import BertPreTrainedModel, BertConfig, BertModel
from transformers.models.bert.modeling_bert import BertOnlyMLMHead
class BertForPTuning(BertPreTrainedModel):
def __init__(self, config: BertConfig, prompt_index):
super().__init__(config)
self.num_labels = config.num_labels
self.prompt_index = prompt_index
self.bert = BertModel(config, add_pooling_layer=False)
self.cls = BertOnlyMLMHead(config)
self.prompt_embedding = torch.nn.Embedding(len(prompt_index), config.hidden_size)
self.lstm_head = torch.nn.LSTM(input_size=config.hidden_size,
hidden_size=config.hidden_size,
num_layers=2,
bidirectional=True,
batch_first=True)
self.mlp_head = nn.Sequential(nn.Linear(2 * config.hidden_size, config.hidden_size),
nn.ReLU(),
nn.Linear(config.hidden_size, config.hidden_size))
self.init_weights()
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder.weight = new_embeddings.weight
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None):
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
self.set_output_embeddings(self.bert.embeddings.word_embeddings)
# 替换embedding
replace_embedding = self.prompt_embedding(torch.arange(len(self.prompt_index)).to(input_ids.device))[None, :]
replace_embedding = self.lstm_head(replace_embedding)[0]
replace_embedding = self.mlp_head(replace_embedding)
raw_embedding = self.bert.embeddings.word_embeddings(input_ids)
raw_embedding[:, self.prompt_index, :] = replace_embedding
inputs_embeds = raw_embedding
outputs = self.bert(
input_ids=None,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
logits = self.cls(outputs[0])
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
s = attention_mask.shape[0] * attention_mask.shape[1]
loss = loss_fct(logits.view(s, -1), labels.view(-1))
# token out / pool out / cls
output = (logits,) + outputs[1:] + (outputs[0][:, 0],)
return ((loss,) + output) if loss is not None else output
| zhangzhiqiangccm/NLP-project | 小样本学习/few_shot/model.py | model.py | py | 3,124 | python | en | code | 120 | github-code | 36 | [
{
"api_name": "transformers.BertPreTrainedModel",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "transformers.BertConfig",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "transformers.BertModel",
"line_number": 14,
"usage_type": "call"
},
{
"api... |
8668254189 | import re
import json
import logging
class Object: # pylint: disable=too-many-instance-attributes
def __str__(self):
return json.dumps(self, default=lambda o: o.__dict__, indent=5)
class Device(Object):
def __init__(self, uid, version, firmware):
self.namespace = 'http://www.w3.org/2001/XMLSchema-instance'
self.location = '../../db/resources/db.xsd'
self.id = uid
self.version = version
self.firmware = firmware
def delete_attrs(obj, paths):
"""
Delete attributes
:param cterasdk.common.object.Object object: The object
:param list[str] paths: List of attributes to remove
:returns: The modified object
:rtype: cterasdk.common.object.Object
"""
for path in paths:
delete_attr(obj, path)
def delete_attr(obj, path):
"""
Delete attribute
:param cterasdk.common.object.Object object: The object
:param str path: Attribute path
:returns: The modified object
:rtype: cterasdk.common.object.Object
"""
parts = re.findall('[^/]+', path)
parent = find_attr(obj, parts[:-1])
remove_attr(parent, parts[-1])
if len(parts) > 1 and isinstance(parent, Object) and not parent.__dict__:
grandparent = find_attr(obj, parts[:-2])
setattr(grandparent, parts[-2], None)
def find_attr(obj, path):
"""
Find attribute
:param cterasdk.common.object.Object object: The object
:param str path: A string or an array of the attribute path
:returns: The attribute, or ``None`` if not found
"""
parts = re.findall('[^/]+', path) if isinstance(path, str) else path
attr = obj
for part in parts:
attr = get_attr(attr, part)
if attr is None:
logging.getLogger().warning('Could not find attribute. %s', {'path': f'/{"/".join(parts)}'})
return attr
return attr
def get_attr(obj, attr):
"""
Get attribute
:param cterasdk.common.object.Object object: The object
:param str attr: The name of the attribute to retrieve
:returns: The attribute, or ``None`` if not found
"""
if isinstance(obj, list):
try:
attr = int(attr)
return obj[attr]
except ValueError:
logging.getLogger().warning('Could not find attribute.')
return None
return getattr(obj, attr, None)
def remove_attr(obj, attr):
"""
Remove attribute
:param cterasdk.common.object.Object object: The object
:param str attr: The name of the attribute to remove
"""
if isinstance(obj, list):
remove_array_element(obj, attr)
else:
try:
delattr(obj, attr)
except AttributeError:
logging.getLogger().warning('Failed to remove attribute. Attribute not found. %s', {'attr': attr})
def remove_array_element(array, attr):
try:
attr = int(attr)
if attr <= len(array) - 1:
array.pop(attr)
else:
logging.getLogger().warning('Could not remove array item. Index out of range. %s', {'index': attr})
except ValueError:
pass
if remove_array_element_by_key(array, '_uuid', attr):
return
remove_array_element_by_key(array, 'name', attr)
def remove_array_element_by_key(array, key, value):
for index, element in enumerate(array):
element_value = getattr(element, key, None)
if element_value == value:
return array.pop(index)
return None
| ctera/ctera-python-sdk | cterasdk/common/object.py | object.py | py | 3,495 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "json.dumps",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number"... |
17310591965 | import sys
import io
import os
import shutil
import base64
import hashlib
from cryptography.hazmat.primitives.asymmetric.x25519 import X25519PrivateKey
from cryptography.hazmat.primitives.asymmetric.x25519 import X25519PublicKey
from Crypto.Cipher import AES
from Crypto.Util import Counter
RANSOM_EXT = '.INC'
ENC_MARKER = b'INC'
# x25519
X25519_KEY_SIZE = 32
# AES
AES_KEY_SIZE = 16
AES_IV_SIZE = 16
METADATA_SIZE = X25519_KEY_SIZE + len(ENC_MARKER)
ENC_BLOCK_SIZE = 1000000
ENC_BLOCK_STEP = 3 * ENC_BLOCK_SIZE
def derive_encryption_key_data(priv_key_data: bytes,
pub_key_data: bytes) -> bytes:
"""Derive encryption key data"""
# Derive x25519 shared secret
priv_key = X25519PrivateKey.from_private_bytes(priv_key_data)
pub_key = X25519PublicKey.from_public_bytes(pub_key_data)
shared_secret = priv_key.exchange(pub_key)
# Derive encryption key data
return hashlib.sha512(shared_secret).digest()
def decrypt_file(filename: str, priv_key_data: bytes) -> bool:
"""Decrypt file"""
with io.open(filename, 'rb+') as f:
# Read metadata
try:
f.seek(-METADATA_SIZE, 2)
except OSError:
return False
metadata = f.read(METADATA_SIZE)
if metadata[-len(ENC_MARKER):] != ENC_MARKER:
return False
pub_key_data = metadata[:X25519_KEY_SIZE]
# Derive encryption key data
key_data = derive_encryption_key_data(priv_key_data, pub_key_data)
# AES-128 CTR
key = key_data[:AES_KEY_SIZE]
iv = key_data[AES_KEY_SIZE : AES_KEY_SIZE + AES_IV_SIZE]
init_val = int.from_bytes(iv, byteorder='big')
counter = Counter.new(128, initial_value=init_val,
little_endian=False)
cipher = AES.new(key, AES.MODE_CTR, counter=counter)
# Remove metadata
f.seek(-METADATA_SIZE, 2)
f.truncate()
# Decrypt file data
pos = 0
while True:
# Decrypt block
f.seek(pos)
enc_data = f.read(ENC_BLOCK_SIZE)
if enc_data == b'':
break
data = cipher.decrypt(enc_data)
f.seek(pos)
f.write(data)
pos += ENC_BLOCK_STEP
return True
#
# Main
#
if len(sys.argv) != 2:
print('Usage:', os.path.basename(sys.argv[0]), 'filename')
sys.exit(0)
filename = sys.argv[1]
with io.open('privkey.txt', 'rb') as f:
priv_key_data = base64.b64decode(f.read())
# Copy file
new_filename = filename
if new_filename.endswith(RANSOM_EXT):
new_filename = new_filename[:-len(RANSOM_EXT)]
else:
new_filename += '.dec'
shutil.copy(filename, new_filename)
# Decrypt file
if not decrypt_file(new_filename, priv_key_data):
print('Error: Failed to decrypt file')
sys.exit(1)
| rivitna/Malware | Inc/inc_decrypt_file.py | inc_decrypt_file.py | py | 2,858 | python | en | code | 218 | github-code | 36 | [
{
"api_name": "cryptography.hazmat.primitives.asymmetric.x25519.X25519PrivateKey.from_private_bytes",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "cryptography.hazmat.primitives.asymmetric.x25519.X25519PrivateKey",
"line_number": 37,
"usage_type": "name"
},
{
"api_na... |
8491486153 | from rich.table import Table
from rich.console import Console
import os
import sqlite3
from sqlite3 import Error
from colorama import Fore
from colored import fg, attr
from datetime import date, datetime
# =============================================
con = sqlite3.connect("data.db")
os.system("cls")
# =============================================
def sql_connection(con):
cur = con.cursor()
cur.execute(
"CREATE TABLE IF NOT EXISTS ManagerBuy(Id INTEGER PRIMARY KEY AUTOINCREMENT,Price INTEGER, Product_Name TEXT, Date TEXT, Time TEXT)")
con.commit()
# =============================================
def help_list():
print(f"""{fg(50)}
███ ███ █████ ███ ██ █████ ██████ ███████ ██████ ██████ ██ ██ ██ ██
████ ████ ██ ██ ████ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██
██ ████ ██ ███████ ██ ██ ██ ███████ ██ ███ █████ ██████ ██████ ██ ██ ████
██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██
██ ██ ██ ██ ██ ████ ██ ██ ██████ ███████ ██ ██ ██████ ██████ ██ {fg(50)}
{fg(115)}===========================================================================================
** Github : erfanbanaei **
** Twitter: @erfan_banaei **
** YouTube: @Hero_Code **
==========================================================================================={fg(115)}{attr(0)}
[{fg(50)}1{fg(50)}{attr(0)}] Init
[{fg(50)}2{fg(50)}{attr(0)}] Add
[{fg(50)}3{fg(50)}{attr(0)}] Show
[{fg(50)}4{fg(50)}{attr(0)}] Edit
[{fg(50)}5{fg(50)}{attr(0)}] Delete
[{fg(50)}6{fg(50)}{attr(0)}] Help
[{fg(50)}7{fg(50)}{attr(0)}] Exit
""")
# =============================================
def add(con):
Price = int(
input(f"[{fg(9)}?{fg(9)}{attr(0)}]Enter your purchase price : "))
Product_Name = input(
f"[{fg(9)}?{fg(9)}{attr(0)}] Enter your purchase name : ")
t = datetime.now().time()
time = f"{t.hour}:{t.minute}"
date2 = date.today()
full = (Price, Product_Name, date2, time)
cur = con.cursor()
cur.execute(
"INSERT INTO ManagerBuy(Price, Product_Name, Date, Time) VALUES(?,?,?,?)", full)
con.commit()
os.system("cls")
# =============================================
def show(con):
cur = con.cursor()
cur.execute('SELECT * FROM ManagerBuy')
rows = cur.fetchall()
console = Console()
table = Table(title="ManagerBuy")
table.add_column("Id", justify="center", style="cyan")
table.add_column("Price", justify="center", style="magenta")
table.add_column("Product Name", justify="center", style="green")
table.add_column("Date", justify="center", style="yellow")
table.add_column("Time", justify="center", style="blue")
for row in rows:
table.add_row(str(row[0]),str(row[1]),str(row[2]),str(row[3]),str(row[4]))
console.print(table)
# =============================================
def edit(con):
try:
data_id = int(input(f"[{fg(9)}?{fg(9)}{attr(0)}]Enter the ID of the product you want : "))
new_name = input(f"[{fg(9)}?{fg(9)}{attr(0)}]Enter the new name of the desired product : ")
new_price = input(f"[{fg(9)}?{fg(9)}{attr(0)}]Enter the new price of the product you want :")
full = (new_price,new_name,data_id)
cur = con.cursor()
cur.execute(f"UPDATE ManagerBuy SET Price = ?, Product_Name = ? WHERE Id = ?",full)
con.commit()
except Error as e:
print(Fore.RED+ "Error" , e)
# =============================================
def delete_record(con):
data_id = input(f"[{fg(9)}?{fg(9)}{attr(0)}]Enter the ID of the product you want (9999 => Remove all products) : ")
cur = con.cursor()
full = (int(data_id))
if full == 9999:
cur.execute(f"DELETE FROM ManagerBuy")
con.commit()
print(Fore.GREEN + "Removed all\n\n")
else:
cur.execute(f"DELETE FROM ManagerBuy WHERE Id = {full}")
con.commit()
print(Fore.GREEN + "Deleted Product\n\n")
# =============================================
def Help():
print(f"""
Init {fg(50)}=>{fg(50)} {fg(115)}Create Database{fg(115)}{attr(0)}
Add {fg(50)}=>{fg(50)} {fg(115)}Add to Database(Price , Product Name){fg(115)}{attr(0)}
Show {fg(50)}=>{fg(50)} {fg(115)}Show all products{fg(115)}{attr(0)}
Edit {fg(50)}=>{fg(50)} {fg(115)}Product edit{fg(115)}{attr(0)}
Delete {fg(50)}=>{fg(50)} {fg(115)}Remove the product from the list{fg(115)}{attr(0)}
\n\n""")
# =============================================
while True:
help_list()
number = input(Fore.CYAN+"┌─["+Fore.LIGHTGREEN_EX+"ManagerBuy"+Fore.BLUE+"~"+Fore.WHITE+"@HOME"+Fore.CYAN+"""]
└──╼ """+Fore.WHITE+"$ ")
# =============================================
if number == "1":
os.system("cls")
sql_connection(con)
print(Fore.GREEN + "Created Database\n\n")
# =============================================
elif number == "2":
os.system("cls")
add(con)
# =============================================
elif number == "3":
os.system("cls")
show(con)
# =============================================
elif number == "4":
os.system("cls")
edit(con)
# =============================================
elif number == "5":
os.system("cls")
delete_record(con)
# =============================================
elif number == "6":
os.system("cls")
Help()
# =============================================
elif number == "7":
quit() | erfanbanaei/ManagerBuy | main.py | main.py | py | 6,303 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "sqlite3.connect",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "colored.fg",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "colored.fg",
"line_number": ... |
70862243305 | from flask import Blueprint, render_template, request, redirect, url_for
import pendulum
import requests
from .static.sc.schedule import date_schedule, get_date
from . import db
from .models import User, List, ListFields
views = Blueprint('views', __name__)
@views.route('/')
def main():
return render_template('index.html')
@views.route('/capacity')
def capacity():
return render_template('capacity.html', page_class = 'capacity-page', page_title = 'capacity')
@views.route('/todo')
def todo():
response = List.query.all()
return render_template('todo.html', page_class = 'todo-page', page_title = 'todo', res=response, len=len(response))
@views.route('/post_task', methods=['POST'])
def post_task():
text_task = request.form.get('text')
deadline = request.form.get('deadline')
task = List(text=text_task, deadline=deadline)
db.session.add(task)
db.session.commit()
return redirect(url_for('views.todo'))
@views.route('/del_task', methods=['POST'])
def del_task():
k = int(request.form.get('id'))
task = List.query.filter_by(id=k).first()
db.session.delete(task)
db.session.commit()
return redirect(url_for('views.todo'))
@views.route('/compl_task', methods=['POST'])
def compl_task():
k = int(request.form.get('id'))
task = List.query.filter_by(id=k).first()
task.complete = True
db.session.commit()
return redirect(url_for('views.todo'))
@views.route('/encompl_task', methods=['POST'])
def encompl_task():
k = int(request.form.get('id'))
task = List.query.filter_by(id=k).first()
task.complete = False
db.session.commit()
return redirect(url_for('views.todo'))
@views.route('/schedule/today')
def schedule():
today = pendulum.now()
d = today.strftime("%d/%m/%Y")
today_sc = date_schedule(1307, today)
return render_template('schedule.html',
page_class = 'schedule-page', page_title = 'schedule',
sc_date = d, sc = today_sc, current_date = today, pendulum = pendulum)
@views.route('/schedule/date/<n>/<m>/<y>')
def scdate(n, m, y):
date = get_date([int(n), int(m), int(y)])
d = date.strftime("%d/%m/%Y")
today_sc = date_schedule(1307, date)
return render_template('schedule.html',
page_class = 'schedule-page', page_title = 'schedule',
sc_date = d, sc = today_sc, current_date = date, pendulum = pendulum)
| eternalme0w/dvs | website/views.py | views.py | py | 2,410 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Blueprint",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "models.Lis... |
74946408105 | import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix, accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB
from sklearn import tree
from sklearn import svm
import warnings
warnings.filterwarnings('ignore')
def getData(category):
crime_rate_df = pd.read_csv('dataset/boston_crime_2021-2022.csv', dtype=str)
crime_dictionary = ['LARCENY', 'M/V ACCIDENT', 'LIQUOR', 'INCEST', 'MANSLAUGHTER', 'MISSING PERSON',
'PROPERTY - LOST', 'MURDER', 'FRAUD', 'PROSTITUTION', 'RAPE', 'ROBBERY', 'ASSAULT',
'SICK/INJURED/MEDICAL', 'TOWED MOTOR VEHICLE', 'TRESPASSING', 'VIOLATION', 'ANIMAL',
'AUTO THEFT', 'FIREARM/WEAPON', 'HUMAN TRAFFICKING', 'DRUGS', 'SEX OFFENSE', 'ARSON',
'VANDALISM', 'SEARCH WARRANT', 'KIDNAPPING', 'DEATH INVESTIGATION', 'CHILD ABUSE', 'HARASSMENT']
crime_list = [0, 1, 2, 3, 4, 5,
6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17,
18, 19, 20, 21, 22, 23,
24, 25, 26, 27, 28, 29]
for j in range(len(crime_list)):
crime_rate_df.loc[crime_rate_df['OFFENSE_DESCRIPTION'].str.contains(crime_dictionary[j]), 'GROUP'] = crime_list[j]
crime_rate_df = crime_rate_df.dropna()
Weekday = ["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"]
for i in range(len(Weekday)):
crime_rate_df['DAY_OF_WEEK'] = np.where((crime_rate_df.DAY_OF_WEEK == Weekday[i]), i, crime_rate_df.DAY_OF_WEEK)
X = crime_rate_df.drop([category, 'OFFENSE_DESCRIPTION', 'Location', 'DISTRICT', 'Dates', 'STREET', 'YEAR'], axis=1).values
Y = crime_rate_df[[category]].values
x_train, x_test, y_train, y_test = train_test_split(X, Y, train_size=0.8)
return x_train, y_train, x_test, y_test
def getTable(cm, i, all=False):
TP = cm[i][0][0]
FP = cm[i][0][1]
FN = cm[i][1][0]
TN = cm[i][1][1]
TPR = TP / (TP + FN)
TNR = TN / (TN + FP)
ACC = (TP + TN) / (TP + TN + FP + FN)
d = {'Accuracy': [ACC], 'True positive rate': [TPR], 'True negative rate': [TNR]}
dfx = pd.DataFrame(data=d)
if all:
return TP, FP, FN, TN, TPR, TNR, ACC
return dfx
def predictCrime():
cm = []
x_train, y_train, x_test, y_test = getData('GROUP')
NB_classifier = GaussianNB().fit(x_train, y_train)
accuracy = accuracy_score(y_test, NB_classifier.predict(x_test))
print("\n1:")
print('Implement a Naive Bayesian classifier:')
print('The accuracy is', accuracy)
log_reg_classifier = LogisticRegression()
log_reg_classifier.fit(x_train, y_train)
accuracy = log_reg_classifier.score(x_train, y_train)
print("\n2:")
print('Implement a Logistic regression classifier:')
print('The accuracy is', accuracy)
clf = tree.DecisionTreeClassifier(criterion='entropy')
clf = clf.fit(x_train, y_train)
prediction = clf.predict(x_test)
accuracy = accuracy_score(y_test, prediction)
print("\n3:")
print('Implement a Decision Tree:')
print('The accuracy is', accuracy)
# 5. Use Random Forest classifier
error_rate = []
random_forest_table = pd.DataFrame(columns=['n_estimators', 'max_depth', 'accuracy'])
for i in range(1, 11):
for j in range(1, 6):
rf = RandomForestClassifier(n_estimators=i, max_depth=j)
rf.fit(x_train, y_train)
error_rate.append(1 - accuracy_score(y_test, rf.predict(x_test)))
ACC = accuracy_score(y_test, rf.predict(x_test))
random_forest_table.loc[len(random_forest_table.index)] = [i, j, ACC]
best_n = error_rate.index(min(error_rate)) % 10 + 1
best_max = error_rate.index(min(error_rate)) % 5 + 1
print("\n4:")
print('Implement a Random Forest classifier :')
print("The best n_estimators and max_depth are", best_n, "and", best_max)
rf = RandomForestClassifier(n_estimators=best_n, max_depth=best_max)
rf.fit(x_train, y_train)
accuracy = accuracy_score(y_test, rf.predict(x_test))
print('The accuracy is', accuracy)
predictCrime()
| Eldoov/cs677-Data-Sci.-with-Python | Final Project/boston crime/prediction.py | prediction.py | py | 4,305 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "warnings.filterwarnings",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "sklearn.model_sel... |
40152632978 | """Mel - a command-line utility to help with mole management."""
import argparse
import sys
import mel.cmd.addcluster
import mel.cmd.addsingle
import mel.cmd.error
import mel.cmd.list
import mel.cmd.microadd
import mel.cmd.microcompare
import mel.cmd.microview
import mel.cmd.rotomapautomark
import mel.cmd.rotomapautomark2
import mel.cmd.rotomapautomark2train
import mel.cmd.rotomapautomask
import mel.cmd.rotomapcalcspace
import mel.cmd.rotomapcompare
import mel.cmd.rotomapcompareextrastem
import mel.cmd.rotomapconfirm
import mel.cmd.rotomapedit
import mel.cmd.rotomapfiltermarks
import mel.cmd.rotomapfiltermarkspretrain
import mel.cmd.rotomapfiltermarkstrain
import mel.cmd.rotomapidentify
import mel.cmd.rotomapidentifytrain
import mel.cmd.rotomaplist
import mel.cmd.rotomaploadsave
import mel.cmd.rotomapmarkunchanged
import mel.cmd.rotomapmergeextrastem
import mel.cmd.rotomapmontagesingle
import mel.cmd.rotomaporganise
import mel.cmd.rotomaprm
import mel.cmd.rotomapuuid
import mel.cmd.status
import mel.cmd.timelog
COMMANDS = {
"root": {
"status": mel.cmd.status,
"timelog": mel.cmd.timelog,
},
"micro": {
"add-cluster": mel.cmd.addcluster,
"add-single": mel.cmd.addsingle,
"list": mel.cmd.list,
"add": mel.cmd.microadd,
"compare": mel.cmd.microcompare,
"view": mel.cmd.microview,
},
"rotomap": {
"automark": mel.cmd.rotomapautomark,
"automark2": mel.cmd.rotomapautomark2,
"automark2-train": mel.cmd.rotomapautomark2train,
"automask": mel.cmd.rotomapautomask,
"calc-space": mel.cmd.rotomapcalcspace,
"compare": mel.cmd.rotomapcompare,
"compare-extra-stem": mel.cmd.rotomapcompareextrastem,
"confirm": mel.cmd.rotomapconfirm,
"edit": mel.cmd.rotomapedit,
"filter-marks": mel.cmd.rotomapfiltermarks,
"filter-marks-pretrain": mel.cmd.rotomapfiltermarkspretrain,
"filter-marks-train": mel.cmd.rotomapfiltermarkstrain,
"identify": mel.cmd.rotomapidentify,
"identify-train": mel.cmd.rotomapidentifytrain,
"list": mel.cmd.rotomaplist,
"loadsave": mel.cmd.rotomaploadsave,
"mark-unchanged": mel.cmd.rotomapmarkunchanged,
"merge-extra-stem": mel.cmd.rotomapmergeextrastem,
"montage-single": mel.cmd.rotomapmontagesingle,
"organise": mel.cmd.rotomaporganise,
"rm": mel.cmd.rotomaprm,
"uuid": mel.cmd.rotomapuuid,
},
}
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=__doc__,
)
top_subparsers = parser.add_subparsers()
micro_parser = top_subparsers.add_parser(
"micro", help="Work with microscope images.", aliases=["m"]
)
rotomap_parser = top_subparsers.add_parser(
"rotomap", help="Work with rotomap images.", aliases=["r", "roto"]
)
micro_subparsers = micro_parser.add_subparsers()
rotomap_subparsers = rotomap_parser.add_subparsers()
subparsers = top_subparsers
# Work around a bug in argparse with subparsers no longer being required:
# http://bugs.python.org/issue9253#msg186387
subparsers.required = True
subparsers.dest = "command"
# vulture will report these as unused unless we do this
#
# pylint: disable=pointless-statement
subparsers.required
subparsers.dest
# pylint: enable=pointless-statement
parser_map = {
"root": subparsers,
"micro": micro_subparsers,
"rotomap": rotomap_subparsers,
}
for pname, parser2 in parser_map.items():
for name, module in COMMANDS[pname].items():
_setup_parser_for_module(parser2, module, name)
args = parser.parse_args()
try:
return args.func(args)
except mel.cmd.error.UsageError as e:
print("Usage error:", e, file=sys.stderr)
return 2
except BrokenPipeError:
# Silently exit on broken pipes, e.g. when our output is piped to head.
# Explicitly close stderr before exiting, to avoid an additional
# message from Python on stderr about the pipe break being ignored.
# http://bugs.python.org/issue11380,#msg153320
sys.stderr.close()
except mel.lib.ui.AbortKeyInterruptError:
# Using this return code may also break us out of an outer loop, e.g.
# 'xargs' will stop processing if the program it calls exists with 255.
return 255
def _setup_parser_for_module(subparsers, module, name):
doc = module.__doc__
doc_subject = doc.splitlines()[0]
doc_epilog = "\n".join(doc.splitlines()[1:])
parser = subparsers.add_parser(
name,
formatter_class=argparse.RawDescriptionHelpFormatter,
help=doc_subject,
description=doc_subject,
epilog=doc_epilog,
)
module.setup_parser(parser)
parser.set_defaults(func=module.process_args)
if __name__ == "__main__":
sys.exit(main())
# -----------------------------------------------------------------------------
# Copyright (C) 2015-2019 Angelos Evripiotis.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------
| aevri/mel | mel/cmd/mel.py | mel.py | py | 5,755 | python | en | code | 8 | github-code | 36 | [
{
"api_name": "mel.cmd.addcluster.cmd",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "mel.cmd.addcluster",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "mel.cmd.addcluster.cmd",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_na... |
8468882150 | from selenium import webdriver
options = webdriver.ChromeOptions()
options.add_argument('headless')
driver = webdriver.Chrome(options=options)
#driver.get("http://www.zhgc.com/dllt_wq1/arena.asp")
driver.get("http://www.zhgc.com/dllt_wq2/arena.asp")
file_name = 'write7.txt'
#for i in range(75,217):
for i in range(13,70):
print("Page No.", i)
btn = driver.find_element_by_partial_link_text(str(i))
btn.click()
data = driver.find_elements_by_xpath('/html/body/table/tbody/tr/td[2]/table[@id="AutoNumber1"]/tbody/tr/td[2]')
for d in data:
print(d.text)
with open(file_name, 'a' , encoding='utf-8') as file_obj:
file_obj.write(d.text+'\n')
| BabyYang2049/demo | spider/Crawler.py | Crawler.py | py | 690 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "selenium.webdriver.ChromeOptions",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 3,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 5,
"usage_type": "call"
},
{
"api_na... |
27470111335 | import os
import openai
from flask import Flask, render_template, request, jsonify
from openai.error import ServiceUnavailableError, InvalidRequestError, RateLimitError
openai.api_key = os.environ.get('OPENAI_API_KEY')
app = Flask(__name__, template_folder='templates', static_folder='static')
@app.route('/')
def index():
return render_template('index.html')
@app.route('/api/speech-to-text', methods=['POST'])
def speech_to_text():
transcript = request.json['transcript']
messages = [{"role": "system", "content": "Ты дружелюбный, но саркастичный бот."},
{"role": "user", "content": transcript}]
print('Вопрос:', transcript)
try:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages,
temperature=0.5,
max_tokens=1000,
top_p=1.0,
frequency_penalty=0.5,
presence_penalty=0.0,
)
except ServiceUnavailableError:
return jsonify({'response': 'Извините, сервер openAI перегружен и не отвечает.'})
except InvalidRequestError as e:
return jsonify({'response': f'Проблема с запросом, {e}'})
except RateLimitError:
return jsonify({'response': 'Превышен лимит запросов в минуту.'})
except BaseException as e:
return jsonify({'response': f'Неизвестная ошибка: {e}'})
print('GPT:', response.choices[0].message.content)
return jsonify({'response': response.choices[0].message.content})
if __name__ == '__main__':
app.run(debug=True)
| rudotcom/flask-bot | app.py | app.py | py | 1,687 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "openai.api_key",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "flask.Flask",
"li... |
3511368259 | import itertools
digits = [ x for x in range(10) ]
i = 0
for x in itertools.permutations(digits):
i += 1
if i == 1000000:
print(x)
break
for c in x:
print(c,end="")
| PetraVidnerova/euler | 24.py | 24.py | py | 200 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "itertools.permutations",
"line_number": 5,
"usage_type": "call"
}
] |
39803344453 | from django.utils.translation import gettext_lazy as _
from django.db import models
import uuid
from internal_users.models import InternalUser
from customer_users.models import CustomerUser
from homepageapp.models import RepairOrdersNewSQL02Model as RepairOrder
from django.utils import timezone
from core_operations.models import FormattedPhoneNumberField
APPT_STATUS_NOT_SUBMITTED = 0
APPT_STATUS_PENDING = 1
APPT_STATUS_CONFIRMED = 2
APPT_STATUS_REJECTED = 3
APPT_STATUS_RESCHEDULED = 4
APPT_STATUS_PORGRESSING = 5
APPT_STATUS_COMPLETED = 10
APPT_STATUS_CANCELLED = -20
class AppointmentRequest(models.Model):
STATUS_CHOICES = (
(APPT_STATUS_NOT_SUBMITTED, _('00_Not_Submitted')),
(APPT_STATUS_PENDING, _('01_Pending')),
(APPT_STATUS_CONFIRMED, _('02_Confirmed')),
(APPT_STATUS_REJECTED, _('03_Rejected')),
(APPT_STATUS_RESCHEDULED, _('04_Rescheduled')),
(APPT_STATUS_PORGRESSING, _('05_Progressing (tracking status via repair order)')),
(APPT_STATUS_COMPLETED, _('10_Completed')),
(APPT_STATUS_CANCELLED, _('-20_Cancelled')),
)
REASON_CHOICES = (
(0, '00-not selected.'),
(1, '01-oil change and maintenance.'),
(2, '02-a/c diagnosis, compressors etc.'),
(3, '03-brakes, transmission'),
(4, '04-service lights, engine related'),
(5, '05-just inqurires, others'),
)
appointment_id = models.BigAutoField(primary_key=True)
# appointment_date = models.DateField()
appointment_requested_datetime = models.DateTimeField(
null=True, blank=True, verbose_name='Requested Apptmnt Time')
appointment_confirmed_datetime = models.DateTimeField(
null=True, blank=True, verbose_name='Confirmed Apptmnt Time')
appointment_reason_for_visit = models.PositiveSmallIntegerField(
choices=REASON_CHOICES, default=0, verbose_name='Reason for visit?')
appointment_customer_user = models.ForeignKey(
CustomerUser, on_delete=models.SET_NULL, null=True, verbose_name='your linked user account')
appointment_first_name = models.CharField(
max_length=50, null=True, blank=True)
appointment_last_name = models.CharField(
max_length=50, null=True, blank=True)
appointment_phone_number = FormattedPhoneNumberField(
help_text='we will send appointment reminders to this number.')
appointment_phone_number_digits_only = models.CharField(
max_length=20, null=True)
# recording either customer_user or internal_user
appointment_user_type = models.CharField(
max_length=50, blank=True, null=True)
appointment_email = models.EmailField(null=True)
appointment_vehicle_year = models.CharField(
max_length=4, null=True, blank=True)
appointment_vehicle_make = models.CharField(
max_length=100, null=True, blank=True)
appointment_vehicle_model = models.CharField(
max_length=100, null=True, blank=True)
appointment_vehicle_license_plate = models.CharField(
max_length=20, null=True, blank=True)
appointment_vehicle_license_state = models.CharField(
max_length=2, null=True, blank=True)
appointment_vehilce_vin_number = models.CharField(
max_length=30, null=True, blank=True)
appointment_vehicle_detail = models.TextField()
appointment_vehicle_detail_in_json = models.CharField(
max_length=4000, null=True) # {'year': 2003, 'model': VW, ...}
appointment_concern_description = models.TextField(blank=True)
# check the status of the appointment
appointment_status = models.CharField(
max_length=50, choices=STATUS_CHOICES, default=APPT_STATUS_NOT_SUBMITTED, verbose_name='Appointment Status')
appointment_status_comments = models.CharField(
max_length=4000, null=True, blank=True)
appointment_is_active = models.BooleanField(default=True)
appointment_preferred_contact_method = models.CharField(
max_length=100, blank=True, null=True)
appointment_repair_order = models.ForeignKey(
RepairOrder, on_delete=models.SET_NULL, null=True, related_name='appointment_repair_order')
appointment_is_converted_to_ro = models.BooleanField(default=False)
appointment_confirmation_id = models.UUIDField(
default=uuid.uuid4, editable=False, verbose_name='your appointment confirmation id') # unique=True,
# appointment can either be created by anoymous user, a signed-in customer_user or created by an internal_user when a customer shows up on the physical store.
appointment_created_by_internal_user = models.ForeignKey(
InternalUser, on_delete=models.SET_NULL, null=True, related_name='appointment_created_by') # when null, it means its created by customer user
appointment_created_at = models.DateTimeField(auto_now_add=True)
appointment_last_updated_at = models.DateTimeField(auto_now=True)
@property
def appointment_full_name(self):
return f"{self.appointment_first_name} {self.appointment_last_name}"
class Meta:
db_table = 'appointments'
ordering = ['-appointment_id']
def __str__(self):
return f"Name: {self.appointment_first_name} {self.appointment_last_name}-Time: {self.appointment_requested_datetime}"
class AppointmentImages(models.Model):
image_id = models.BigAutoField(primary_key=True)
appointment = models.ForeignKey(AppointmentRequest, on_delete=models.SET_NULL,
null=True, related_name='appointment_appointmentimages')
appointment_image = models.FileField(
upload_to='appointment_images') # the bucket's subfolder
uploaded_date = models.DateTimeField(auto_now_add=True)
image_is_active = models.BooleanField(default=True)
class Meta:
db_table = 'appointment_images'
ordering = ['-image_id']
verbose_name = 'appointment_image'
verbose_name_plural = 'appointment_images'
| zjgcainiao/new_place_at_76 | appointments/models.py | models.py | py | 5,933 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.db.models.Model",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.gettext_lazy",
"line_number": 22,
"usage_type": "call"
},
{
... |
42296358444 | import os
import logging
from xdg.BaseDirectory import xdg_config_home, xdg_state_home
from typing import Dict
import yaml
from .log import LogManager
from cfancontrol import __version__ as VERSION
class Environment(object):
APP_NAME: str = "cfancontrol"
APP_FANCY_NAME: str = "Commander²"
APP_VERSION: str = VERSION
LOG_FILE: str = 'cfancontrol.log'
CONFIG_FILENAME: str = 'config.yaml'
SENSORS_FILE: str = 'sensors3.conf'
is_root: bool = False
log_path: str = ''
log_full_name: str = ''
settings_path: str = ''
config_full_name: str = ''
pid_path: str = ''
sensors_config_file: str = ''
@staticmethod
def prepare_environment():
if os.geteuid() == 0:
Environment.is_root = True
Environment.log_path = "/var/log"
Environment.settings_path = os.path.join("/etc", Environment.APP_NAME)
Environment.sensors_config_file = os.path.join("/etc", Environment.SENSORS_FILE)
Environment.pid_path = "/var/run"
else:
Environment.log_path = os.path.join(xdg_state_home, Environment.APP_NAME)
Environment.settings_path = os.path.join(xdg_config_home, Environment.APP_NAME)
Environment.sensors_config_file = os.path.join(Environment.settings_path, Environment.SENSORS_FILE)
Environment.pid_path = f"/var/run/user/{os.geteuid()}"
if not os.path.isdir(Environment.log_path):
os.makedirs(Environment.log_path, mode=0o755, exist_ok=True)
if not os.path.isdir(Environment.settings_path):
os.makedirs(Environment.settings_path, mode=0o755, exist_ok=True)
if not os.path.isfile(Environment.sensors_config_file):
os.mknod(Environment.sensors_config_file, mode=0o755)
Environment.log_full_name = os.path.join(Environment.log_path, Environment.LOG_FILE)
Environment.config_full_name = os.path.join(Environment.settings_path, Environment.CONFIG_FILENAME)
class Config(object):
interval: float = 10.0
auto_start: bool = False
profile_file: str = ''
log_level: int = logging.INFO
theme: str = 'light'
@classmethod
def from_arguments(cls, **kwargs):
for attr in kwargs:
setattr(cls, attr, kwargs[attr])
if cls.profile_file:
if os.path.isfile(os.path.expanduser(cls.profile_file)):
cls.profile_file = os.path.expanduser(cls.profile_file)
else:
cls.profile_file = ''
cls.auto_start = False
else:
cls.auto_start = False
@classmethod
def get_settings(cls) -> Dict:
return {name: value for name, value in vars(cls).items() if not callable(getattr(cls, name)) and not name.startswith("__")}
@classmethod
def load_settings(cls):
if Environment.config_full_name:
cls._load_from_file(Environment.config_full_name)
@classmethod
def _load_from_file(cls, file_name: str):
if file_name is not None and os.path.isfile(file_name):
LogManager.logger.debug(f'Loading configuration from {file_name}')
with open(file_name) as config_file:
config = yaml.safe_load(config_file)
if config:
cls.from_arguments(**config)
@classmethod
def save_settings(cls):
if Environment.config_full_name:
cls._save_to_file(Environment.config_full_name)
@classmethod
def _save_to_file(cls, file_name: str):
if file_name is not None:
LogManager.logger.debug(f'Saving configuration: {repr(cls.get_settings())}')
with open(file_name, 'w') as config_file:
yaml.safe_dump(cls.get_settings(), config_file)
| maclarsson/cfancontrol | cfancontrol/settings.py | settings.py | py | 3,776 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "cfancontrol.__version__",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "os.geteuid",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_n... |
33011549163 | from __future__ import division
import scipy
import setupplots
setupplots.thesis_format()
import matplotlib.pyplot as plt
plt.ion()
import sys
sys.path.insert(0, '/Users/markchilenski/src/bayesimp')
import lines
lines.read_filter_file(
'/Users/markchilenski/src/bayesimp/spectral_modeling/Be_filter_50_um.dat',
plot=True,
title=r'$\SI{50}{\micro m}$ Be filter',
figsize=(0.5 * setupplots.TEXTWIDTH, 0.5 * setupplots.TEXTWIDTH / 1.618)
)
f = plt.gcf()
a = plt.gca()
a2 = a.twiny()
a2.set_xlim(a.get_xlim())
lam_locs = scipy.asarray([10, 1, 0.5, 0.25, 0.125], dtype=float)
lam_s = [r'$10\vphantom{0123456789}$', r'$1\vphantom{0123456789}$', r'$0.5\vphantom{0123456789}$', r'$0.25\vphantom{0123456789}$', r'$0.125\vphantom{0123456789}$']
E_locs = 1e-3 * scipy.constants.h * scipy.constants.c / (scipy.constants.e * lam_locs * 1e-9)
a2.set_xticks(E_locs)
a2.set_xticklabels(lam_s)
a2.set_xlabel(r"$\lambda$ [nm]")
a.set_title(r'$\SI{50}{\micro m}$ Be filter', y=1.275)
setupplots.apply_formatter(f)
f.savefig("XTOMO_filter.pdf", bbox_inches='tight')
f.savefig("XTOMO_filter.pgf", bbox_inches='tight')
| markchil/thesiscode | plot_xtomo_filter.py | plot_xtomo_filter.py | py | 1,117 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "setupplots.thesis_format",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.ion",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "sys.pat... |
14656210330 | from random import choice
from faker import Faker
bank_names_fr = [
"Banque CIBC",
"BMO Banque de Montréal",
"Banque Desjardins",
"Banque HSBC Canada",
"Banque Laurentienne du Canada",
"Banque Nationale du Canada",
"Banque Royale du Canada",
"Banque Scotia",
"Banque TD Canada Trust",
]
bank_names_en = [
"CIBC Bank",
"BMO Montreal Bank",
"Desjardins Bank",
"HSBC Canada Bank",
"Laurentian Bank of Canada",
"National Bank of Canada",
"Royal Bank of Canada",
"Scotia Bank",
"TD Canada Trust Bank",
]
class FinancingFaker:
def __init__(self, locale: str) -> None:
"""
A faker to fake a financing institution.
locale (str): The locale language setting to use for simulation. Can either be `'fr_CA'` or `'en_CA'`.
"""
self.locale = locale
self.address_faker = Faker(locale=self.locale)
if self.locale == "fr_CA":
self.bank_name = bank_names_fr
elif self.locale == "en_CA":
self.bank_name = bank_names_en
else:
raise ValueError(f"The locale {locale} is not supporter. It can either be 'fr_CA' or 'en_CA'.")
def financing(self) -> str:
"""
Method to fake a financing details information. Namely, is name and address.
Return:
A string of the bank name and address in capitalize characters.
"""
bank = choice(self.bank_name)
address = self.address_faker.address()
return f"{bank} {address}"
| GRAAL-Research/risc | risc_generator/faker/contract_faker/financing_faker.py | financing_faker.py | py | 1,559 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "faker.Faker",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 55,
"usage_type": "call"
}
] |
72962033703 | # -*- coding: utf-8 -*-
"""
Created on Thu Jan 3 23:47:45 2019
@author: LEX
"""
import time
import math
import torch
import os
import torch.onnx
from model import Languagemodel
from utils.data_utils import Vocab, Txtfile, Data2tensor, SaveloadHP, seqPAD, PAD, EOS, SOS
from utils.core_nns import RNNModel
# Load trained model
def load_model(model_source, use_cuda=False):
""" Load pretrained model from source
- model_source: link to '.args' file
- use_cuda: set it to True if you have GPU
Return: model, vocab
"""
#model_args_source = './results/lm.args'
model_args = SaveloadHP.load(model_source)
model_args.use_cuda = use_cuda
language_model = Languagemodel(model_args)
language_model.model.load_state_dict(torch.load(model_args.trained_model))
return language_model.model, model_args.vocab
def rev_gen( model, vocab, start_word=SOS):
""" Generate a review starts with 'start_word', ends with '</s>'
"""
print('Generating sample review .....................')
with torch.no_grad():
word_idx = vocab.w2i[start_word]
all_words = []
all_words.append(start_word)
while word_idx != vocab.w2i[EOS]:
word_tensor = Data2tensor.idx2tensor([[word_idx]])
hidden = model.init_hidden(word_tensor.size(0))
output, hidden = model(word_tensor, hidden)
label_prob, label_pred = model.inference(output)
word_idx = label_pred.data[0][0].data.numpy()[0]
all_words.append(vocab.i2w[word_idx])
return ' '.join(all_words)
def wd_pred(model, vocab, sentence):
""" Predict next word
"""
with torch.no_grad():
words = sentence.split(' ')
for i, word in enumerate(words):
# transform word to tensor
word_idx = vocab.w2i[word]
word_tensor = Data2tensor.idx2tensor([[word_idx]])
if i == 0:
hidden = model.init_hidden(word_tensor.size(0))
output, hidden = model(word_tensor, hidden)
label_prob, label_pred = model.inference(output)
word_idx = label_pred.data[0][0].data.numpy()[0]
return vocab.i2w[word_idx]
| k2lexus/nlp_course | nnlm/predict.py | predict.py | py | 2,413 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "utils.data_utils.SaveloadHP.load",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "utils.data_utils.SaveloadHP",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "model.Languagemodel",
"line_number": 28,
"usage_type": "call"
},
{
"... |
39978019698 | import argparse
import numpy as np
import pandas as pd
import io_tools
BASE_TIMESTEP = 10
MAX_TIMESTEP = 120
parser = argparse.ArgumentParser(description="Reduce displacement data for remote processing.")
parser.add_argument("disp_file", help="displacements file")
parser.add_argument("outfile", help="output file")
args = parser.parse_args()
df = pd.read_csv(args.disp_file, dtype=io_tools.DTYPE_DICT)
df = io_tools.filter_displacement_data(df, remove_zeros=False, base_timestep=BASE_TIMESTEP)
df = df[df[io_tools.TIMESTEP] <= MAX_TIMESTEP]
df.to_csv(args.outfile)
| rohan-hitchcock/tcells-portfolio | track_analysis/reduce_data.py | reduce_data.py | py | 575 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "io_tools.DTYPE_DICT",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "io_t... |
18355097609 | #this is the server file
from flask import Flask, render_template, request, redirect, session
import random
from datetime import datetime
app = Flask(__name__)
app.secret_key = 'keep it secret, keep it safe' # set a secret key for security purposes
@app.route('/', methods=['get']) #per instructions: Have the root route render this [the wireframe] page..an that is about all it does
def start():
new_time=datetime.now().strftime('%Y-%m-%d %H:%M:%S%f')
if 'total_gold' not in session:
session['total_gold'] = 0
if 'activity_list' not in session:
session['activity_list'] = []
return render_template("ninjagold.html", activity_results=session['activity_list'], gold_count=session['total_gold']) #these extra values are just for testing and will move to the other route
@app.route('/process_money', methods=['POST']) #per video, html form key-values will be sent to this route per instructions: Have the "/process_money" POST route increase/decrease the user's gold by an appropriate amount and redirect to the root route. I think this means the html page's form sends its results to this /process money route
def process_money():
new_time=datetime.now().strftime('%Y-%m-%d %H:%M:%S%f')
location_visited=request.form['locationclick'] #possible values: farm, cave, house, casino
print("location visited: " + location_visited)
if (location_visited=="farm"):
new_gold=random.randint(10,20)
new_activity_text="<p class=""won_activities"" >Earned "+ str(new_gold) + " golds from the farm! (" + new_time + ")</p>"
elif (location_visited=="cave"):
new_gold=random.randint(5,10)
new_activity_text="<p class=""won_activities"" >Earned "+ str(new_gold) + " golds from the cave! (" + new_time + ")</p>"
elif (location_visited=="house"):
new_gold=random.randint(2,5)
new_activity_text="<p class=""won_activities"" >Earned "+ str(new_gold) + " golds from the house! (" + new_time + ")</p>"
else:
new_gold=random.randint(-50,50)
if (new_gold>0):
new_activity_text="<p class=""won_activities"" >Entered a casino and won "+ str(new_gold) + " golds ! (" + new_time + ")</p>"
elif (new_gold == 0):
new_activity_text="<p class=""won_activities"" >Entered a casino and won "+ str(1) + " golds ! (" + new_time + ")</p>"
else:
new_activity_text="<p class=""lost_activities"" >Entered a casino and lost "+ str(new_gold*-1) + " golds Ouch! (" + new_time + ")</p>"
print("new activity string: " + new_activity_text)
session['total_gold'] = str(int(session['total_gold']) + new_gold)
#session['activity_list'] = [new_activity_text,new_activity_text,'erer','4545t']
session['activity_list'].insert(0,new_activity_text)
print(session['activity_list'])
return redirect("/")
if __name__ == "__main__":
app.run(debug=True)
| full-time-april-irvine/kent_hervey | flask/flask_fundamentals/ninja-gold/ninja_gold.py | ninja_gold.py | py | 2,924 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "flask.session",
... |
40209432333 | import csv
import numpy as np
from pymc import Gamma, Normal, NoncentralT, Binomial, Uniform, invlogit, logit, deterministic, stochastic
### SETUP
# counts_pos, counts_total, batch_id, plate_id, row, col, treatment
rows = [tuple(row) for row in csv.reader(open('nephrine_fracs.csv'))]
pos_counts, total_counts, batch_ids, plate_ids, row_ids, col_ids, treatment_ids = [np.array(v) for v in zip(*rows)]
pos_counts = pos_counts.astype(int)
total_counts = total_counts.astype(int)
num_wells = len(pos_counts)
# batches
batch_names, batch_idxs = np.unique(batch_ids, return_inverse=True)
num_batches = len(batch_names)
# plates
plate_names, plate_idxs = np.unique(plate_ids, return_inverse=True)
num_plates = len(plate_names)
# batchrows, batchcols
batchrow_names, batchrow_idxs = np.unique(['batchrow_%s_%s'%(b, r) for b, r in zip(batch_ids, row_ids)], return_inverse=True)
batchcol_names, batchcol_idxs = np.unique(['batchcol_%s_%s'%(b, c) for b, c in zip(batch_ids, col_ids)], return_inverse=True)
num_batchrows = len(batchrow_names)
num_batchcols = len(batchcol_names)
# treatments
treatment_names, treatment_idxs = np.unique(treatment_ids, return_inverse=True)
num_treatments = len(treatment_names)
### MODEL
# base effect, uninformative prior
base_fx = Normal('base', mu=0, tau=0.001, size=1, value=np.zeros(1))
# batch effect, somewhat informative prior
batch_fx = Normal('batch_fx', mu=0, tau=0.1, size=num_batches, value=np.zeros(num_batches))
# plate effect, two-level prior, somewhat informative
plate_prec = Gamma('plate_prec', alpha=0.1, beta=0.1)
plate_fx = np.array([Normal('plate_fx_%s'%(name), mu=0, tau=plate_prec, value=0) for name in plate_names])
# batch row and column effects, two-level prior
batchrowcol_prec_base = Gamma('batchrowcol_prec_prior', alpha=0.01, beta=0.01)
batchrow_fx = np.array([Normal('batchrow_fx_%s'%(name), mu=0, tau=batchrowcol_prec_base, value=0) for name in batchrow_names])
batchcol_fx = np.array([Normal('batchcol_fx_%s'%(name), mu=0, tau=batchrowcol_prec_base, value=0) for name in batchcol_names])
def initial_guess(treatment):
return np.median(logit((pos_counts[treatment_ids == treatment] + 1).astype(float) / (total_counts[treatment_ids == treatment] + 2)))
# treatment effect - individual precisions
# NB: these are the values we are interested in capturing.
treatment_prec = [Gamma('treatment_prec_%s'%(name), alpha=0.01, beta=0.01, value=0.5) for name in treatment_names]
treatment_fx = np.array([Normal('treatment_fx_%s'%(name), mu=0, tau=treatment_prec[idx], value=initial_guess(name)) for idx, name in enumerate(treatment_names)])
# # well effects - we want to allow outliers, so use a 3-parameter
# # Student's t distribution (see ARM, pg. 384, Gelman & Hill)
# # nu = degrees of freedom
# well_df_inv = Uniform('well_df_inv', lower=0.0, upper=0.5, value=0.25)
# @deterministic(plot=False)
# def well_df(well_df_inv=well_df_inv):
# return 1.0 / well_df_inv
#
# #lam = scale
# @deterministic(plot=False)
# def well_lam(well_df=well_df):
# return (well_df - 2) / well_df
#
# well_fx = np.array([NoncentralT('well_fx_%d'%(wellidx), mu=0, lam=well_lam, nu=well_df, value=0) for wellidx in range(num_wells)])
# Unnobserved probabilities per well
@deterministic(plot=False)
def p_wells(base_fx=base_fx,
batch_fx=batch_fx,
plate_fx=plate_fx,
batchrow_fx=batchrow_fx,
batchcol_fx=batchcol_fx,
treatment_fx=treatment_fx):
# use this ordering to make everything turn into an ArrayContainer
return invlogit(treatment_fx[treatment_idxs] +
base_fx +
batch_fx[batch_idxs] +
plate_fx[plate_idxs] +
batchrow_fx[batchrow_idxs] +
batchcol_fx[batchcol_idxs])
# Likelihood
pos_counts_likelihood = Binomial('pos_counts', value=pos_counts, n=total_counts, p=p_wells, observed=True, verbose=0)
| thouis/works-in-progress | hierscore/nephrine_frac.py | nephrine_frac.py | py | 3,924 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "csv.reader",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 1... |
34082922202 | import sys
from api.hh_api import HeadHunterAPI
from config import EMPLOYEERS_VACANCY_ID
from database.db_manager import DBManager
from app.mixin_menu_app import MixinMenuAPP
from utils.сurrency_сonverter import get_currency_data
from app.job_search_meta import JobSearchAppMeta
from utils.loading_progress import show_loading_progress
from utils.generate_unique import generate_unique_four_letter_value
class JobSearchApp(MixinMenuAPP, metaclass=JobSearchAppMeta):
"""
Главное приложение для поиска работы.
Класс наследуется от миксина MixinMenuAPP и использует метакласс JobSearchAppMeta.
Атрибуты:
hh_api (HeadHunterAPI): Экземпляр класса HeadHunterAPI для работы с HeadHunter API.
db_manager (DBManager): Экземпляр класса DBManager для работы с базой данных.
existing_values (list): Список существующих значений для генерации уникальных идентификаторов.
"""
hh_api = HeadHunterAPI()
db_manager = DBManager()
existing_values = []
@classmethod
def _interact_with_user(cls) -> None:
"""
Взаимодействие с пользователем.
Метод запускает взаимодействие с пользователем, предоставляя главное меню приложения и обрабатывая выбор
пользователя.
"""
cls.db_manager.connect()
cls.db_manager.clear_tables()
cls.__get_for_database_all_vacancies()
cls.main_menu()
cls.db_manager.disconnect()
@classmethod
def __get_for_database_all_vacancies(cls):
""" Получение всех вакансий из API и запись в базу данных.
Метод получает список всех вакансий от API, обрабатывает их и записывает в базу данных.
"""
total_employers = len(EMPLOYEERS_VACANCY_ID)
completed_employers = 0
for employeer_name, employeer_id in EMPLOYEERS_VACANCY_ID.items():
company_name = employeer_name
company_vacancies = cls.hh_api.get_vacancies(employeer_id)
for vacancy in company_vacancies:
vacancy_name = vacancy["name"]
vacancy_url = vacancy["alternate_url"]
vacancy_from = int(vacancy["salary"]["from"]) if vacancy.get("salary") is not None and vacancy[
"salary"].get("from") is not None else 0
vacancy_to = int(vacancy["salary"]["to"]) if vacancy.get("salary") is not None and vacancy[
"salary"].get("to") is not None else 0
if vacancy.get("salary") and vacancy["salary"]["currency"] not in ["RUR", "RUB"]:
vacancy_from *= get_currency_data(vacancy["salary"]["currency"])
vacancy_to *= get_currency_data(vacancy["salary"]["currency"])
vacancy_currency = "RUR"
vacancy_id = generate_unique_four_letter_value(cls.existing_values)
cls.db_manager.insert_vacancy_to_all(vacancy_id, company_name, vacancy_name, vacancy_from,
vacancy_to,
vacancy_currency,
vacancy_url)
cls.db_manager.insert_vacancy_company(vacancy_id, company_name, vacancy_name, vacancy_from,
vacancy_to,
vacancy_currency,
vacancy_url)
completed_employers += 1
show_loading_progress(completed_employers, total_employers)
sys.stdout.write("\rЗагрузка завершена!\n")
@classmethod
def _get_vacancies_with_keyword(cls, keyword):
""" Получение списка вакансий с заданным ключевым словом.
Метод получает список вакансий, в названии которых содержится заданное ключевое слово, из базы данных и
выводит его на экран.
Аргументы:
keyword (str): Ключевое слово для поиска вакансий.
"""
result_df = cls.db_manager.get_vacancies_with_keyword(keyword)
result_str = result_df.to_string(index=False)
print(result_str)
@classmethod
def _get_avg_salary(cls):
""" Получение средней зарплаты по вакансиям.
Метод получает среднюю зарплату по всем вакансиям из базы данных и выводит ее на экран.
"""
print(cls.db_manager.get_avg_salary())
@classmethod
def _get_vacancies_with_higher_salary(cls):
""" Получение списка вакансий с зарплатой выше средней.
Метод получает список вакансий, у которых зарплата выше средней по всем вакансиям из базы данных и выводит
его на экран.
"""
result_df = cls.db_manager.get_vacancies_with_higher_salary()
result_str = result_df.to_string(index=False)
print(result_str)
@classmethod
def _get_companies_and_vacancies_count(cls):
""" Получение списка всех компаний и количества вакансий у каждой компании.
Метод получает список всех компаний и количество вакансий у каждой компании из базы данных и выводит его на
экран.
"""
result_df = cls.db_manager.get_companies_and_vacancies_count()
result_str = result_df.to_string(index=False)
print(result_str)
@classmethod
def _get_top_vacancies(cls):
""" Получение списка топ-300 вакансий.
Метод получает список топ-300 вакансий с указанием названия компании, названия вакансии и зарплаты, а также
ссылки на вакансию из базы данных и выводит его на экран.
"""
result_df = cls.db_manager.get_top_vacancies()
result_str = result_df.to_string(index=False)
print(result_str)
| AndreyAgeew/skypro-course_work_5 | app/job_search_app.py | job_search_app.py | py | 6,996 | python | ru | code | 0 | github-code | 36 | [
{
"api_name": "app.mixin_menu_app.MixinMenuAPP",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "app.job_search_meta.JobSearchAppMeta",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "api.hh_api.HeadHunterAPI",
"line_number": 24,
"usage_type": "call"
... |
39430384118 | import unittest
import pathlib
from helpers import FakeWriter, a_wait
import grole
class TestHeader(unittest.TestCase):
def test_header(self):
res = grole.Response(None, 123, 'foo', {'foo': 'bar'}, 'bar')
writer = FakeWriter()
a_wait(res._write(writer))
for line in writer.data.split(b'\r\n'):
if line.startswith(b'bar'):
self.assertEqual(line, b'bar 123 foo')
elif line.startswith(b'Content-Type'):
self.assertEqual(line, b'Content-Type: text/plain')
elif line.startswith(b'Content-Length'):
self.assertEqual(line, b'Content-Length: 0')
elif line.startswith(b'foo'):
self.assertEqual(line, b'foo: bar')
elif line.startswith(b'Server'):
self.assertEqual(line, b'Server: grole/' + grole.__version__.encode())
else:
if line != b'':
self.fail('Extra data: ' + line.decode())
class TestBody(unittest.TestCase):
def test_headers(self):
res = grole.ResponseBody(b'foo', content_type='bar')
hdr = {}
res._set_headers(hdr)
self.assertDictEqual(hdr, {'Content-Length': 3,
'Content-Type': 'bar'})
def test_data(self):
res = grole.ResponseBody(b'foo', content_type='bar')
writer = FakeWriter()
a_wait(res._write(writer))
self.assertEqual(writer.data, b'foo')
def test_bytes(self):
res = grole.Response(b'foo')
self.assertIsInstance(res.data, grole.ResponseBody)
def test_string(self):
res = grole.Response('foo')
self.assertIsInstance(res.data, grole.ResponseString)
def test_json(self):
res = grole.Response(['foo'])
self.assertIsInstance(res.data, grole.ResponseJSON)
def test_file(self):
res = grole.Response(grole.ResponseFile('foo'))
self.assertIsInstance(res.data, grole.ResponseFile)
class TestString(unittest.TestCase):
def setUp(self):
self.res = grole.ResponseString('foo', content_type='bar')
def test_headers(self):
hdr = {}
self.res._set_headers(hdr)
self.assertDictEqual(hdr, {'Content-Length': 3,
'Content-Type': 'bar'})
def test_data(self):
writer = FakeWriter()
a_wait(self.res._write(writer))
self.assertEqual(writer.data, b'foo')
class TestJSON(unittest.TestCase):
def setUp(self):
self.res = grole.ResponseJSON({'foo': 'bar'}, content_type='baz')
def test_headers(self):
hdr = {}
self.res._set_headers(hdr)
self.assertDictEqual(hdr, {'Content-Length': 14,
'Content-Type': 'baz'})
def test_data(self):
writer = FakeWriter()
a_wait(self.res._write(writer))
self.assertEqual(writer.data, b'{"foo": "bar"}')
class TestFile(unittest.TestCase):
def setUp(self):
testfile = pathlib.Path(__file__).parents[0] / 'test.dat'
self.res = grole.ResponseFile(str(testfile), content_type='baz')
def test_headers(self):
hdr = {}
self.res._set_headers(hdr)
self.assertDictEqual(hdr, {'Transfer-Encoding': 'chunked',
'Content-Type': 'baz'})
def test_data(self):
writer = FakeWriter()
a_wait(self.res._write(writer))
self.assertEqual(writer.data, b'4\r\nfoo\n\r\n0\r\n\r\n')
class TestAuto(unittest.TestCase):
def test_empty(self):
res = grole.Response()
self.assertTrue(isinstance(res.data, grole.ResponseBody))
def test_bytes(self):
res = grole.Response(b'foo')
self.assertTrue(isinstance(res.data, grole.ResponseBody))
self.assertEqual(res.data._data, b'foo')
self.assertEqual(res.data._headers['Content-Type'], 'text/plain')
def test_str(self):
res = grole.Response('foo')
self.assertTrue(isinstance(res.data, grole.ResponseString))
self.assertEqual(res.data._data, b'foo')
self.assertEqual(res.data._headers['Content-Type'], 'text/html')
def test_json(self):
res = grole.Response({'foo': 'bar'})
self.assertTrue(isinstance(res.data, grole.ResponseJSON))
self.assertEqual(res.data._data, b'{"foo": "bar"}')
self.assertEqual(res.data._headers['Content-Type'], 'application/json')
if __name__ == '__main__':
unittest.main()
| witchard/grole | test/test_response.py | test_response.py | py | 4,504 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "unittest.TestCase",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "grole.Response",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "helpers.FakeWriter",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "helpers.a_wai... |
30329681031 | import scrapy
# TF-IDF
# import StemmerFactory class
from Sastrawi.Stemmer.StemmerFactory import StemmerFactory
from math import log10
# create stemmer
factory = StemmerFactory()
stemmer = factory.create_stemmer()
class QuotesSpider(scrapy.Spider):
name = "tubes_novel5"
def start_requests(self):
main_Url = 'https://www.worldnovel.online/'
Novel = 'the-first-order/'
# list 10 url tiap chapter di novel the first order
urls = [
'{}{}chapter-1-a-sickness-in-the-head/'.format(main_Url, Novel),
'{}{}chapter-2-this-world-has-never-trusted-tears/'.format(main_Url, Novel),
'{}{}chapter-3-a-palace/'.format(main_Url, Novel),
'{}{}chapter-4-luck-is-a-type-of-skill-too/'.format(main_Url, Novel),
'{}{}chapter-5-the-school/'.format(main_Url, Novel),
'{}{}chapter-6-walls-and-science/'.format(main_Url, Novel),
'{}{}chapter-7-substitute-teacher/'.format(main_Url, Novel),
'{}{}chapter-8-something-really-is-wrong-with-his-head/'.format(main_Url, Novel),
'{}{}chapter-9-ask-me-if-theres-anything-you-dont-understand/'.format(main_Url, Novel),
'{}{}chapter-10-side-quest/'.format(main_Url, Novel)
]
for url in urls:
yield scrapy.Request(url=url, callback=self.parse) # metode Scrapy meminta request ke web url
def parse(self, response):
# print(response.url)
yield {
'jdlChap' : response.css('#outer-wrapper > div > h3::text').extract(), # mengambil data Judul Chapter
'textNovel' : response.css('#soop > p ::text').extract(), # mengambil data berupaa seluruh isi teks novel yang terdapat dalam tag p
}
# block untuk proses TF-IDF function
def get_list_of_word(list_of_chapNovel):
list_of_word = []
for sentence in list_of_chapNovel:
for word in stemmer.stem(sentence).split(' '):
if word not in list_of_chapNovel:
list_of_word.append(word)
return list_of_word
# membuka file yang berupa store data dari hasil ouput Scrapy tadi yang dimasukkan ke dalam sebuah file berformat json
# yang mana untuk store data ke berupa file itu dilakukkan command Scrapy tertentu ke cmd, lalu akan tebentuklah file tersebut
# List yang berisi kumpulan teks chapter novel dan ukuran dari list tersebut
list_of_chapNovel = [open('novel_the_first_order.json', encoding='utf-8').read()]
length_of_chapNovel = len(list_of_chapNovel)
# berisi kata-kata yang berasal dari list text chapNovel
list_of_word = get_list_of_word(list_of_chapNovel)
# print(list_of_word)
print(list_of_word[25])
print(list_of_word[688])
print(list_of_word[702])
print(list_of_word[899])
print(list_of_word[917])
print(list_of_word[918])
print(list_of_word[1200])
print(list_of_word[1400])
print(list_of_word[1539])
print(list_of_word[1993])
| VicinthiaVS/Tugas-Besar-Scrapy-2014321018-Pagi-Ubhara-Surabaya | soal3/tubes_novel/spiders/novel5.py | novel5.py | py | 3,112 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "Sastrawi.Stemmer.StemmerFactory.StemmerFactory",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "scrapy.Spider",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "scrapy.Request",
"line_number": 34,
"usage_type": "call"
}
] |
74222814823 | #!/usr/bin/env python3
import yaml
import rospy
import os
from uav_abstraction_layer.srv import TakeOff, GoToWaypoint, Land
from geometry_msgs.msg import PoseStamped
class WayPointTracker:
def __init__(self):
self.uav_namespace = rospy.get_param("~uav_namespace", "")
self.file_path = rospy.get_param("~flight_plan_path", "")
# Create the servies
self.take_off_service_name = os.path.join("/", self.uav_namespace, "ual/take_off")
self.go_to_service_name = os.path.join("/", self.uav_namespace, "ual/go_to_waypoint")
self.land_service_name = os.path.join("/", self.uav_namespace, "ual/land")
self.take_off_service = self.registerService(self.take_off_service_name, TakeOff)
self.go_to_service = self.registerService(self.go_to_service_name, GoToWaypoint)
self.land_service = self.registerService(self.land_service_name, Land)
# Load flight plan
self.raw_flight_plan = None
self.flight_plan = None
self.loop_waypoints = None
self.loadFlightPlan()
self.parseFlightPlan()
# Exectute flight plan
self.takeOff(self.take_off_service, self.flight_plan['TakeOff'])
self.followPoints(self.go_to_service, self.flight_plan['Waypoints'])
self.goToWaypoint(self.go_to_service, self.flight_plan['LandingPosition'])
self.land(self.land_service)
def loadFlightPlan(self):
# Check file exists
if not os.path.exists(self.file_path):
raise ValueError("The file does not exist, please check the provided path.")
with open(self.file_path, 'r') as wp_file:
self.raw_flight_plan = yaml.safe_load(wp_file)
def makeStampedPose(self, data, frame):
waypoint = PoseStamped()
waypoint.header.frame_id = frame
waypoint.pose.position.x = data[0]
waypoint.pose.position.y = data[1]
waypoint.pose.position.z = data[2]
waypoint.pose.orientation.x = data[3]
waypoint.pose.orientation.y = data[4]
waypoint.pose.orientation.z = data[5]
waypoint.pose.orientation.w = data[6]
return waypoint
def parseFlightPlan(self):
self.loop_waypoints = self.raw_flight_plan['loop_waypoints']
self.flight_plan = {}
self.flight_plan['TakeOff'] = self.raw_flight_plan['take_off_altitude']
self.flight_plan['LandingPosition'] = self.makeStampedPose(self.raw_flight_plan['landing_position'],
self.raw_flight_plan['frame_id'])
self.flight_plan['Waypoints'] = []
for i in self.raw_flight_plan['waypoints']:
waypoint = self.makeStampedPose(i, self.raw_flight_plan['frame_id'])
self.flight_plan['Waypoints'].append(waypoint)
def followPoints(self, go_to_service, waypoints):
if self.loop_waypoints:
while (not rospy.is_shutdown()):
for waypoint in waypoints:
self.goToWaypoint(go_to_service, waypoint)
else:
for waypoint in waypoints:
self.goToWaypoint(go_to_service, waypoint)
def registerService(self, service_path, service_type):
rospy.wait_for_service(service_path)
try:
service = rospy.ServiceProxy(service_path, service_type)
except rospy.ServiceException as e:
print("Service registration failed: %s", e)
return service
def goToWaypoint(self, go_to_service, waypoint):
try:
go_to_service(waypoint, True)
except rospy.ServiceException as e:
print("Service call failed: %s", e)
def takeOff(self, take_off_service, height):
try:
take_off_service(height, True)
except rospy.ServiceException as e:
print("Service call failed: %s", e)
def land(self, land_service):
try:
land_service(True)
except rospy.ServiceException as e:
print("Service call failed: %s", e)
if __name__ == "__main__":
rospy.init_node('waypoint_tracker')
WPT = WayPointTracker()
| AntoineRichard/sesame_ul_uavs | src/simple_mission.py | simple_mission.py | py | 4,141 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "rospy.get_param",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "rospy.get_param",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_numb... |
11370141393 | import torch
import numpy as np
from ml.modules.losses.ordinal_regression_loss import OrdinalRegressionLoss
config_path = '../config/config_files/kitti_base.yaml'
ord_num = 90
gamma = -0.97
beta = 90
config = load_config(config_path)
config['model']['params']['discretization'] = "SID"
ordinal_regression_loss = OrdinalRegressionLoss(ord_num, beta)
val_loader, niter_val = build_loader(config, is_train=False)
rmses = []
for i, data in enumerate(val_loader):
gt = data['target'].unsqueeze(0)
_, gt_mask = ordinal_regression_loss._create_ord_label(gt)
label = ord_num - torch.sum(gt_mask, dim=1)
t0 = torch.exp(np.log(beta) * label.float() / ord_num)
t1 = torch.exp(np.log(beta) * (label.float() + 1) / ord_num)
depth_gt = (t0 + t1) / 2 - gamma
depth_gt = np.squeeze(depth_gt.numpy())
gt = np.squeeze(gt.numpy())
gt_mask = gt > 0
gt = gt[gt_mask]
depth_gt = depth_gt[gt_mask]
rmse = np.sqrt(np.mean((depth_gt - gt) ** 2)) * 1000
rmses.append(rmse)
print(f'{i + 1} / {len(val_loader)}, RMSE: {rmse}')
mean_rmse = np.mean(rmses)
print(f'Mean minimum RMSE: {mean_rmse}')
| gregiberri/DepthPrediction | measuring_scripts/min_error_with_dorn.py | min_error_with_dorn.py | py | 1,138 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "ml.modules.losses.ordinal_regression_loss.OrdinalRegressionLoss",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "torch.sum",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "torch.exp",
"line_number": 26,
"usage_type": "call"
},
{
... |
6751040076 | import sys
import decimal
import re
from PyQt5.QtWidgets import QDialog
from PyQt5.QtCore import Qt
import user as userdetail
from lib.utils.evalenv import evalenv
from db.models import Selfdefinedformat, Forms
from product.controllers.productcontroller import ProductController
from stuff.controllers.stuffcontroller import StuffController
from labrecord.controllers.labrecordscontroller import LabrecordsController
from lib.xmlwidget.xmlcheckbox import XmlCheckBox
from lib.xmlwidget.xmlcombobox import XmlComboBox
from lib.xmlwidget.xmlexprbox import XmlExprBox
from lib.xmlwidget.xmllineedit import XmlLineEdit
from lib.xmlwidget.xmlsignbox import XmlSignBox
from lib.xmlwidget.xmllabel import XmlTextEdit
from tesui import Ui_Form
# 批记录产品信息类变量
PRODUCT_DICT = {'PBIANHAO': 'prodid', 'PMING': 'prodname', 'PGUIGE': 'spec',
'PBZGG': 'package', 'PTMING': 'commonname', 'PPIHAO': 'batchno',
'PSHIJI': 'realamount', 'PJIHUA': 'planamount',
'PJIXING': 'medkind'
}
# 批记录物料信息类变量
STUFF_DICT = {'ID': 'stuffid', 'MING': 'stuffname', 'PIHAO': 'batchno',
'LEIBIE': 'kind', 'GUIGE': 'spec', 'BZGG': 'package',
'JBDW': 'unit', 'XBZDW': 'spunit', 'ZBZDW': 'mpunit',
'DBZDW': 'bpunit',
'JIHUA': 'presamount', 'SHIJI': 'realamount',
'LINGQU': 'drawamount',
'SHENGYU': 'restamount', 'TUIKU': 'backamount',
'SHUIFEN': 'water',
'HANLIANG': 'content', 'CHANGJIA': 'producer'
}
STUFF_KIND = {'ZF': (0, 1), 'ZC': 0, 'FC': 1, 'NB': 2, 'WB': 3, 'BC': (2, 3),
'QC': 4}
# 检验报告类信息变量
# 检品编号,检品名称,批号,半成品取样,成品取样,生产厂家,报告编号,检品数量
LAB_DICT = {'SID': 'chkid', 'SMING': 'chkname', 'JPPIHAO': 'batchno',
'MQUYANG': 'samplecount', 'PQUYANG': 'samplecount',
'SPRODUCER': 'producer', 'SBGBH': 'paperno',
'JPSHULIANG': 'checkamount'
}
# 对齐方式
alignment = ('L', 'C', 'R')
qtalign = (Qt.AlignLeft, Qt.AlignHCenter, Qt.AlignRight)
class ReadXML(QDialog, Ui_Form):
def __init__(self, parent=None):
super().__init__(parent)
self.setupUi(self)
self.reader = QtCore.QXmlStreamReader()
self.writer = QtXmlPatterns.QXmlQuery()
# self.pix = QtGui.QPixmap(800, 600)
# self.pix.fill(QtCore.Qt.white)
# 线框,先把所有线框的信息都保存了,最后再刷新
self.line_border = list()
self.current_X = 20
self.current_Y = 10
self.form = parent
# 要查询的记录id
self.autoid = 0
# 要查询的内容,0测试,1生产,2检验
self.type = 0
self.proddetail = None
self.stuffdetail = None
self.mstuffdetail = None
self.labdetail = None
self.stuffdetailZF = list()
self.stuffdetailZC = list()
self.stuffdetailFC = list()
self.stuffdetailNB = list()
self.stuffdetailWB = list()
self.stuffdetailBC = list()
self.stuffdetailQC = list()
self.stuffdetailMZF = list()
self.stuffdetailMZC = list()
self.stuffdetailMFC = list()
self.stuffdetailMNB = list()
self.stuffdetailMWB = list()
self.stuffdetailMBC = list()
self.stuffdetailMQC = list()
# 中文:14个像素 英文:7个像素,系统中的长度*7 = 像素
# 中文数*2 = 系统中的长度,中位数*14 = 像素
# 系统中长度*7 = 像素
def read(self, file):
# 传入的是地址
qfile = QtCore.QFile(file)
# 找到文件,则设置引擎,否则向xml文件直接添加数据
if qfile.open(QtCore.QFile.ReadWrite | QtCore.QFile.Text):
self.reader.setDevice(file)
self.writer.setDevice(file)
else:
self.reader.addData(file)
s = self.writer.setQuery(file + '/GMPPaper/Title[2]')
print(s)
self.reader.setNamespaceProcessing(0)
self.reader.readNextStartElement()
if self.reader.isStartDocument():
self.reader.readNextStartElement()
# 如果没有读到文档结尾,而且没有出现错误
while not self.reader.atEnd():
if self.reader.isStartElement():
name = self.reader.name()
if name == "GMPPaper":
pass
# 标题框
elif name == "Title":
self.titleBox()
# 标题输入框
elif name == "TextBox":
self.inputBox()
# 线框
elif name == "Box":
self.wireframe()
# 检测框
elif name == "CheckBox":
self.checkBox()
# 下拉框
elif name == "ComboBox":
self.comboBox()
# 签名框
elif name == "Signature":
self.signBox()
elif name == "Expr":
self.exprBox()
elif name == "br":
self.wrapBox()
# 空格组成的字符,包括换行
elif self.reader.isWhitespace():
pass
# 纯文本
elif self.reader.isCharacters():
pass
self.reader.readNextStartElement()
# 如果读取过程中出现错误,那么输出错误信息
# if self.reader.hasError():
# print(self.reader.errorString())
# raise ValueError
if qfile.isOpen():
qfile.close()
# self.update()
self.scrollAreaWidgetContents.setLineBorder(self.line_border)
'''
try:
except Exception as e:
print(repr(e))
pass
# raise ValueError
'''
# 标题框
def titleBox(self):
widget = QtWidgets.QLabel(self.scrollAreaWidgetContents)
widget.setContentsMargins(2, 0, 0, 0)
self.boxresize(widget)
# L C R ,左中右对齐
align = self.reader.attributes().value("align")
widget.setAlignment(
QtCore.Qt.AlignVCenter | qtalign[alignment.index(align)])
widget.setText(self.set_vars(self.reader.readElementText()))
# 输入框
def inputBox(self):
widgetlabel = QtWidgets.QLabel(self.scrollAreaWidgetContents)
widgetlineedit = QtWidgets.QLineEdit(self.scrollAreaWidgetContents)
# self.reader.lineNumber()
# widgetlineedit.setObjectName('widget' + self.reader.columnNumber())
widgetlabel.setContentsMargins(2, 0, 0, 0)
widgetlineedit.setContentsMargins(1, 0, 1, 0)
# self.boxresize(widgetlineedit)
widgetlabel.move(self.current_X, self.current_Y)
widgetlabel.setText(
self.set_vars(self.reader.attributes().value("Title")))
widgetlineedit.editingFinished.connect(self.widgetedit)
L_width = int(self.reader.attributes().value("Width")) * 7
L_height = int(self.reader.attributes().value(
"Height")) * 7 if self.reader.attributes().value(
"Height") else 20
if L_width:
# 标题长度不为0
widgetlabel.resize(L_width, L_height)
self.current_X += L_width
else:
# 标题内容不为空
if self.reader.attributes().value("Title"):
widgetlabel.adjustSize()
widgetlabel.resize(widgetlabel.size().width(), L_height)
else:
widgetlabel.resize(0, L_height)
self.current_X += widgetlabel.size().width()
self.boxresize(widgetlineedit, "MaxLength", "MaxHeight")
wid = self.reader.attributes().value("ID")
widgetlineedit.setText(self.set_vars(self.reader.readElementText()))
if wid:
try:
setattr(self, wid, decimal.Decimal(widgetlineedit.text()))
except:
setattr(self, wid, '')
# 线框
def wireframe(self):
linewidth = int(self.reader.attributes().value("width")) * 7
lineheight = int(self.reader.attributes().value("height")) * 20
penwidth = int(self.reader.attributes().value("PenWidth"))
qrect = QtCore.QRect(self.current_X, self.current_Y, linewidth,
lineheight)
self.line_border.append((qrect, penwidth))
# 检测框
def checkBox(self):
self.reader.readNextStartElement()
widget = QtWidgets.QCheckBox(self.scrollAreaWidgetContents)
# widget.setContentsMargins(2, 0, 0, 0)
self.boxresize(widget)
widget.setText(self.set_vars(self.reader.attributes().value("name")))
widget.setChecked(int(self.reader.readElementText()))
# self.boxresize(widget)
# 下拉框
def comboBox(self):
index = 0
widget = QtWidgets.QComboBox(self.scrollAreaWidgetContents)
self.boxresize(widget)
# 允许自己填内容,则显示“value”的内容,
# 否则显示index序号的内容
if int(self.reader.attributes().value("style")):
widget.setEditable(
int(self.reader.attributes().value("style")))
widget.setCurrentText(
self.reader.attributes().value("value"))
else:
index = int(self.reader.attributes().value("index"))
# self.boxresize(widget)
# 循环添加下拉菜单的项目。
while 1:
self.reader.readNext()
if self.reader.name() != 'Item':
break
widget.addItem(self.set_vars(self.reader.readElementText()))
widget.setCurrentIndex(index)
# 签名框
def signBox(self):
widget = QtWidgets.QLineEdit(self.scrollAreaWidgetContents)
self.boxresize(widget)
widget.setEnabled(False)
widget.setStyleSheet("background-color: rgb(255, 0, 0);")
widget.setText(self.reader.readElementText(1))
# 表达式
def exprBox(self):
widget = QtWidgets.QLineEdit(self.scrollAreaWidgetContents)
self.boxresize(widget)
widget.setEnabled(False)
widget.setStyleSheet("background-color: rgb(85, 255, 255);")
# style = self.reader.attributes().value("showformat")
# 后缀
sibfix = ''
# 计算过程
expr = ''
# 显示的表达式
va = ''
vid = self.reader.attributes().value("ID")
while 1:
self.reader.readNextStartElement()
if self.reader.isEndElement() and self.reader.name() == "Expr":
break
if self.reader.name() == "subfix":
sibfix = self.reader.readElementText()
elif self.reader.name() == "expr":
expr = self.reader.readElementText()
elif self.reader.name() == "vars":
va = self.reader.readElementText()
# 把文件中的#变量,改为实例中的变量。
expr = expr.replace('#', 'self.')
expr = self.set_vars(expr)
va = self.set_vars(va)
try:
result = str(eval(expr, evalenv(self)))
widget.setText(va + result + sibfix)
if id:
setattr(self, id, decimal.Decimal(result))
except:
return "公式格式错误"
# widget.setText(self.reader.readElementText(1))
if vid:
setattr(self, vid, result)
# 换行框
def wrapBox(self):
self.current_X = 20
self.current_Y += 20
# 设置控件的尺寸
# w:控件的宽度变量名
# h:控件的高度变量名
def boxresize(self, widget, w="width", h="height"):
self.setStyleSheet("margin:2 2;")
width = int(self.reader.attributes().value(
w)) * 7 + 4 if self.reader.attributes().value(
w) else 134
height = int(self.reader.attributes().value(
h)) * 24 if self.reader.attributes().value(
h) else 24
widget.resize(width, height)
widget.move(self.current_X, self.current_Y)
self.current_X += width
# 设置表达式
def set_vars(self, exp):
items, sys_items = self.get_vars(exp)
if items:
for item in set(items):
try:
exp = exp.replace(item, str(getattr(self, item[1:-1])))
except AttributeError:
exp = exp.replace(item, '')
if sys_items:
for item in set(sys_items):
try:
# 切片去除头尾的@
# 日期类变量
if item[1: -1] in (
'NIAN', 'YUE', 'RI', 'SHI', 'FEN', 'MIAO'):
exp = exp.replace(item,
str(getattr(userdetail, item[1:-1])))
# 产品信息类变量
elif item[1: -1] in PRODUCT_DICT:
if self.proddetail is not None:
exp = exp.replace(item, str(
self.proddetail[PRODUCT_DICT[item[1: -1]]]))
else:
try:
self.get_sys_vars(0)
exp = exp.replace(item, str(
self.proddetail[PRODUCT_DICT[item[1: -1]]]))
except IndexError:
exp = exp.replace(item, str(''))
# 产品物料,分批次
elif item[3: -2] in STUFF_DICT or item[3: -3] in STUFF_DICT:
# 变量的后缀
num = int(re.search(r'\d+', item).group(0)) - 1
vals = [x for x in re.split(r'@|\d', item) if x][0]
var_list = getattr(self, 'stuffdetail' + item[1: 3])
if len(var_list):
exp = exp.replace(item, str(
getattr(var_list[num], STUFF_DICT[vals[2:]])))
else:
self.get_sys_vars(1)
var_list = getattr(self, 'stuffdetail' + item[1: 3])
exp = exp.replace(item, str(
getattr(var_list[num], STUFF_DICT[vals[2:]])))
# 产品物料,不分批次
elif item[4: -2] in STUFF_DICT or item[4: -3] in STUFF_DICT:
# 变量的后缀
num = int(re.search(r'\d+', item).group(0)) - 1
vals = [x for x in re.split(r'@|\d', item) if x][0]
var_list = getattr(self, 'stuffdetail' + item[1: 4])
if len(var_list):
exp = exp.replace(item, str(
getattr(var_list[num], STUFF_DICT[vals[3:]])))
else:
self.get_sys_vars(2)
var_list = getattr(self, 'stuffdetail' + item[1: 4])
exp = exp.replace(item, str(
getattr(var_list[num], STUFF_DICT[vals[3:]])))
# 检验报告类信息变量
elif item[1: -1] in LAB_DICT:
if self.labdetail is not None:
exp = exp.replace(item, str(
self.labdetail[LAB_DICT[item[1: -1]]]))
else:
self.get_sys_vars(3)
exp = exp.replace(item, str(
self.labdetail[LAB_DICT[item[1: -1]]]))
except:
exp = exp.replace(item, str(''))
return exp
# 获取系统变量的值
# kind 获取的数据类型,0生产,1物料,2检验
def get_sys_vars(self, kind=0):
if self.autoid != 0:
try:
if kind == 0:
pm = ProductController()
self.proddetail = pm.get_producingplan(autoid=self.autoid)[
0]
elif kind == 1:
sm = StuffController()
self.stuffdetail = sm.get_prodstuff(self.autoid)
for item in self.stuffdetail:
stufftype = item.stufftype
if stufftype == 0:
self.stuffdetailZC.append(item)
self.stuffdetailZF.append(item)
elif stufftype == 1:
self.stuffdetailFC.append(item)
self.stuffdetailZF.append(item)
elif stufftype == 2:
self.stuffdetailNB.append(item)
self.stuffdetailBC.append(item)
elif stufftype == 3:
self.stuffdetailWB.append(item)
self.stuffdetailBC.append(item)
elif stufftype == 4:
self.stuffdetailQC.append(item)
elif kind == 2:
sm = StuffController()
self.mstuffdetail = sm.get_Mprodstuff(self.autoid)
for item in self.mstuffdetail:
stufftype = item.stufftype
if stufftype == 0:
self.stuffdetailMZC.append(item)
self.stuffdetailMZF.append(item)
elif stufftype == 1:
self.stuffdetailMFC.append(item)
self.stuffdetailMZF.append(item)
elif stufftype == 2:
self.stuffdetailMNB.append(item)
self.stuffdetailMBC.append(item)
elif stufftype == 3:
self.stuffdetailMWB.append(item)
self.stuffdetailMBC.append(item)
elif stufftype == 4:
self.stuffdetailMQC.append(item)
elif kind == 3:
lm = LabrecordsController()
self.labdetail = lm.get_labrecord(1, autoid=self.autoid)[0]
except:
# traceback.print_exc()
pass
# 获得表达式中的普通变量和系统变量
# {\w*} 普通变量
# @\w*@ 系统变量
def get_vars(self, exp):
pattern1 = re.compile(r'{\w*}')
pattern2 = re.compile(r'@\w*@')
return pattern1.findall(exp), pattern2.findall(exp)
'''
def paintEvent(self, event):
if self.line_border:
#self.pix = self.pix.scaled(self.size())
pp = QtGui.QPainter(self)
#pp = QtGui.QPainter(self.pix)
pen = QtGui.QPen() # 定义笔格式对象
for index, item in enumerate(self.line_border):
print(item)
pen.setWidth(item[1]) # 设置笔的宽度
pen.setColor(QtCore.Qt.red)
pp.setPen(pen) # 将笔格式赋值给 画笔
# 根据鼠标指针前后两个位置绘制直线
pp.drawRect(item[0])
#self.line_border.pop(index)
#painter = QtGui.QPainter(self)
# 在画布上画出
#painter.drawPixmap(0, 0, self.pix)
'''
def widgetedit(self):
widget = self.sender().objectName()
# s = self.writer.setQuery("GMPPaper/Title[2]")
# for item in s:
# print(s)
# num = widget[6:]
# self.writer.writeCurrentToken()
# print(self.reader.namespaceUri())
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
mainmenu = ReadXML()
res = Selfdefinedformat.objects.filter(autoid=2855)
mainmenu.__setattr__('autoid', 50)
mainmenu.read(res[0].format)
mainmenu.show()
sys.exit(app.exec_())
| zxcvbnmz0x/gmpsystem | lib/xmlwidget/xmlread.py | xmlread.py | py | 20,355 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "PyQt5.QtCore.Qt.AlignLeft",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtCore.Qt",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.Qt.AlignHCenter",
"line_number": 53,
"usage_type": "attribute"
},
{
"... |
25622745933 | from sqlalchemy import Column, ForeignKey, String, DateTime, Boolean, Enum
from sqlalchemy import func, exc
from sqlalchemy.dialects.postgresql import UUID
from uuid import uuid4
from .. import db
from enum import Enum
from datetime import datetime
#? https://stackoverflow.com/questions/33612625/how-to-model-enums-backed-by-integers-with-sqlachemy
#? https://docs.sqlalchemy.org/en/14/core/type_basics.html#sqlalchemy.types.Enum
class CategoryEnum(Enum):
tollBooth = "toll booth"
passage = "passage"
food = "food"
others = "others"
class ExtraExpenses(db.Model):
__tablename__ = "extra_expenses"
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid4())
id_move = Column(UUID(as_uuid=True), ForeignKey("moves.id", ondelete="CASCADE", name="id_move"))
description = Column(String(255), nullable=False)
#* Opción 1
category = Column(Enum(CategoryEnum), nullable=False)
#* Opción 2, no tengo pruebas, pero tampoco dudas
#! category = Column(Enum(["toll booth", "passage", "food", "others" ]), nullable=False)
amount = Column(String(18), nullable=False)
#? https://stackoverflow.com/questions/13370317/sqlalchemy-default-datetime
startedAt = Column(DateTime(timezone=True), nullable=False, server_default=func.now())
updatedAt = Column(DateTime(timezone=True), nullable=True, onupdate=func.now())
deletedAt = Column(DateTime(timezone=True), nullable=True)
active = Column(Boolean(), nullable=False, default=True)
def save(**kwargs):
try:
expense = ExtraExpenses(**kwargs)
db.session.add(expense)
db.session.commit()
return expense
except Exception as error:
print(error)
return {}
finally:
pass
def find():
try:
return ExtraExpenses.query.filter_by().all()
except:
return {}
finally:
pass
def find_one(**kwargs):
try:
return db.session.query(ExtraExpenses).filter_by(**kwargs).first()
except exc.SQLAlchemyError as err:
print(err)
return {}
finally:
db.session.close()
def update(**update):
try:
update["updatedAt"] = datetime.now()
updated = (
db.session.query(ExtraExpenses)
.filter_by(id=str(update["id"]))
.update(update, synchronize_session="fetch")
)
db.session.commit()
return updated
except Exception as error:
print(error)
return {}
def delete(**kwargs) -> int:
try:
updated = (
db.session.query(ExtraExpenses)
.filter_by(**kwargs)
.update(
{"active": False, "deletedAt": datetime.now()},
synchronize_session="fetch",
)
)
db.session.commit()
return updated
except exc.SQLAlchemyError as err:
print(err)
db.session.rollback()
return {} | SpiritDeveloper/multi-translados-backend | src/multi/model/extra_expenses.py | extra_expenses.py | py | 2,977 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "enum.Enum",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.dialects.postgresql.UUID",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "uuid.... |
22868454772 | import time
import pandas as pd
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from datetime import datetime # Config file purpose
options = Options()
options.add_argument("--incognito")
options.add_argument("--window-size=1920x1080")
# Driver path
driver = webdriver.Chrome(options=options, executable_path="../chromedriver_win32/chromedriver.exe")
def layer_three(layer_two_file):
print("Layer three is starting")
'''
This layer will retrieve layer_two.csv.
Layer one contains author name and its profile url
Layer two contains paper name and its url.
The objective of layer three is retrieve paper title, description, paperlink, authors and date.
This is the final layer to obtain the data.
'''
# Define dataframe for third layer
layer_three_data = pd.DataFrame(
columns=["df_paper_title", "df_paper_url", "df_paper_abstract", "df_paper_author", "df_paper_date"])
data = {}
df_layer_two_file = pd.read_csv(layer_two_file)
print("Loading dataframe")
print(df_layer_two_file)
print("dataframe loaded")
for i in range(len(df_layer_two_file["df_title_url"])):
time.sleep(2)
print("Moving to " + (df_layer_two_file["df_title_url"][i]))
driver.get(df_layer_two_file["df_title_url"][i]) # Moves to unique paper page
print("No of paper accessed", i)
# columns = ["df_paper_title", "df_paper_doi", "df_paper_abstract", "df_paper_author", "df_paper_date"])
# Obtain title
paperTitle = driver.find_elements_by_css_selector(".container > div > div > div:nth-child(1) > h1")
paperTitles = [el.text for el in paperTitle]
print("Pasting paper title")
data['df_paper_title'] = paperTitles
# Obtain link
paper_title_links = driver.find_elements_by_css_selector(
".rendering_contributiontojournal_publicationaccessrenderer > ul.dois > li > div > a")
paperlink = [el.get_attribute("href") for el in paper_title_links]
print("Pasting paper url")
data['df_paper_url'] = paperlink
# Obtain Abstract
paper_abstract = driver.find_elements_by_css_selector(
".rendering_abstractportal.rendering_contributiontojournal_abstractportal > div")
paper_abstracts = [el.text for el in paper_abstract]
print("Pasting paper abstract")
data['df_paper_abstract'] = paper_abstracts
# Obtain authors
paper_authors = driver.find_elements_by_css_selector(
".rendering_contributiontojournal_associatespersonsclassifiedportal > p > a:nth-child(1) > span")
paper_author = [el.text for el in paper_authors]
print("Pasting paper authors")
data['df_paper_author'] = paper_author
# Obtain publication date
paper_publication_dates = driver.find_elements_by_css_selector(
".rendering_contributiontojournal_detailsportal > div > table > tbody > tr.status > td > span.date")
paper_publication_date = [el.text for el in paper_publication_dates]
print("Pasting paper publication date")
data['df_paper_date'] = paper_publication_date
layer_three_data = layer_three_data.append(data, ignore_index=True)
layer_three_data.to_csv("./layer_three_data.csv", index=False)
print("CSV saved, number of paper", i)
df_layer_three_file = pd.read_csv(layer_three_data)
df_layer_three_file['df_paper_title'] = df_layer_three_file['df_paper_title'].str.strip('[]')
df_layer_three_file['df_paper_title'] = df_layer_three_file['df_paper_title'].str.strip("''")
df_layer_three_file['df_paper_url'] = df_layer_three_file['df_paper_url'].str.strip('[]')
df_layer_three_file['df_paper_url'] = df_layer_three_file['df_paper_url'].str.strip("''")
df_layer_three_file['df_paper_abstract'] = df_layer_three_file['df_paper_abstract'].str.strip('[]')
df_layer_three_file['df_paper_abstract'] = df_layer_three_file['df_paper_abstract'].str.strip("''")
df_layer_three_file['df_paper_author'] = df_layer_three_file['df_paper_author'].str.strip('[]')
df_layer_three_file['df_paper_author'] = df_layer_three_file['df_paper_author'].str.strip("''")
df_layer_three_file['df_paper_date'] = df_layer_three_file['df_paper_date'].str.strip('[]')
df_layer_three_file['df_paper_date'] = df_layer_three_file['df_paper_date'].str.strip("''")
df_layer_three_file.to_csv("./layer_three_data.csv", index=False)
print("CSV saved")
print("End of layer three")
f = open("version.cfg", "x")
# datetime object containing current date and time
now = datetime.now()
# dd/mm/YY H:M:S
dt = now.strftime("%d/%m/%Y %H:%M:%S")
print("Saved version: ", dt)
f.write(dt)
f.close()
driver.close()
return True
# layer_three("layer_two_data.csv")
| chois11/7071CEM-R | resources/crawler/layer_three.py | layer_three.py | py | 4,885 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "selenium.webdriver.chrome.options.Options",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 14,
"usage_type": "name"
},
{
... |
74330818023 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from enigma import eInputDeviceManager, eTimer
from Screens.Screen import Screen
from Screens.Rc import Rc
from Components.Sources.List import List
from Components.ActionMap import ActionMap
from Components.config import config
from Components.Sources.StaticText import StaticText
from Tools.Directories import pathExists, resolveFilename, SCOPE_CURRENT_SKIN
from Tools.LoadPixmap import LoadPixmap
from Tools.Log import Log
from os import path as os_path
import six
from .CharJump import CharJump
from .InputDeviceIRDatabase import irdb
from .IrProtocols.ProtocolMaster import ProtocolMaster
from .KeyBindingList import KeyBindingList
class InputDeviceIRProg(Screen, CharJump):
PLUGIN_IMAGES_PATH = "%s/images/" % (os_path.dirname(__file__))
SKIN_IMAGES_PATH = resolveFilename(SCOPE_CURRENT_SKIN, config.skin.primary_skin.value.replace("/skin.xml", "/images/"))
MAJOR_CODELIST_ITEMS = [ "amp", "av ", "tv", "vcr", "sat"]
def __init__(self, session, remote):
Screen.__init__(self, session)
CharJump.__init__(self, session)
self._remote = remote
self["actions"] = ActionMap(["ListboxActions", "OkCancelActions", "EPGSelectActions"],
{
"ok": self._onKeyOK,
"cancel": self._onKeyExit,
"info" : self._onKeyInfo
}, -1)
self["list"] = List()
self["list"].onSelectionChanged.append(self._onSelectionChanged)
self._status = StaticText()
self["status"] = self._status
self._vendorPixmap = self._loadPixmap("vendor.svg")
self._seperatorPixmap = self._loadPixmap("div-h.svg")
self._level = 0
self._lastLevel = 0
self._lastVendor = ""
self._keysAcknowledged = 0
self._keysAckTimer = eTimer()
self.__keysAckTimer_connection = self._keysAckTimer.timeout.connect(self._onKeysAckTimeout)
self.__onIrKeycount_connection = eInputDeviceManager.getInstance().irKeyCount.connect(self._onIrKeyCount)
self.onLayoutFinish.append(self._reload)
def _onIrKeyCount(self, address, count):
if address == self._remote.address():
self._keysAcknowledged = count
self._keysAckTimer.startLongTimer(2)
def _onKeysAckTimeout(self):
self.session.toastManager.showToast(_("%s IR codes acknowledged!") %(self._keysAcknowledged))
self._keysAcknowledged = 0
def _loadPixmap(self, filename, desktop=None):
picfile = None
if filename[0] == "/" and pathExists(filename):
picfile = filename
else:
for p in (self.SKIN_IMAGES_PATH, self.PLUGIN_IMAGES_PATH):
imagepath = "%s%s" % (p, filename)
if pathExists(imagepath):
picfile = "%s%s" % (p, filename)
break
if picfile:
return LoadPixmap(path=picfile, desktop=desktop, cached=False)
return None
def _onKeyExit(self):
if self._level == 1:
self._level = 0
self._reload()
return
self.close()
def _getFirstForChar(self, char):#CharJump
idx = 0
for x in self["list"].list:
val = x[0][0]
Log.w(val)
if val and val[0].upper() == char: # and not val.lower() in self.MAJOR_VENDORS:
self["list"].setIndex(idx)
break
idx += 1
def _onKey0(self, unused):#CharJump
if self["list"].count():
self["list"].setIndex(0)
def _reload(self, dlist={}):
if self._level == 0:
dlist = irdb.data
mlist = []
for x, y in dlist.items():
x = six.ensure_str(x)
title = x
subtitle = ""
pic = self._seperatorPixmap
if self._level == 0:
lendev = len(y)
if lendev == 1:
subtitle = "%s" % (six.ensure_str(y.keys()[0]))
else:
subtitle = _("%s devices") % (lendev,)
else:
models = y.get("models", [])
sorted_models = []
if models:
for dev in models:
dev = six.ensure_str(dev)
append = True
for item in self.MAJOR_CODELIST_ITEMS:
if dev.lower().startswith(item):
append = False
if append:
sorted_models.append(dev)
else:
sorted_models.insert(0, dev)
title = " / ".join(sorted_models)
if title == "":
title = _("Unknown")
if not len(y["keys"]):
Log.w("No known automap-keys for %s" % (title,))
subtitle = _("%s mapped keys") % (len(y["keys"]))
mlist.append(((x, y), self._vendorPixmap, subtitle, title, pic))
if self._level != 0:
def sortCodelist(x):
x = x[0][0]
val = "000000"
items = self.MAJOR_CODELIST_ITEMS[:]
items.reverse()
for key in items:
if x.lower().startswith(key):
return val + x
val = "{}{}".format(val, "000000")
return x
mlist = sorted(mlist, key=sortCodelist)
self["list"].setList(mlist)
if self._level == 0:
self["list"].setIndex(self._lastLevel)
self.setTitle(_("Vendors"))
self["status"].setText("%s entries" % (len(mlist),))
else:
self.setTitle(self._lastVendor)
self._onSelectionChanged()
def _onKeyOK(self):
sel = self["list"].getCurrent()
entry = sel and sel[0]
if not len(entry):
return
if self._level == 0:
self._level = 1
self._lastLevel = self["list"].getIndex()
self._lastVendor = six.ensure_str(entry[0])
self._reload(entry[1])
else:
self._send(entry[1])
def _onKeyInfo(self):
if self._level == 0:
return
sel = self["list"].getCurrent()
entry = sel and sel[0]
if not len(entry):
return
device, data = entry[0:2]
title = six.ensure_str("%s - %s (%s - %s:%s)" %(self._lastVendor, device, data["protocol"], data["device"], data["subdevice"]))
self.session.open(InputDeviceKeyInfo, title, data["keys"].keys())
def _send(self, data):
protocolData = ProtocolMaster.buildProtocol(data)
self._remote.resetIr()
for d in protocolData: #initial / repeat
protocol, isRepeat, keys = d
if protocol:
self._remote.setIrProtocol(isRepeat, protocol)
for irKey in keys:
self._remote.setIrKey(irKey)
self._remote.getIrKeyCount()
self.session.toastManager.showToast(_("%s IR codes sent!") %(len(keys)), 3)
def _onSelectionChanged(self):
if self._level == 0:
return
entry = self["list"].getCurrent()
entry = entry and entry[0]
if not entry:
return
device, data = entry
count = len(data["keys"])
self["status"].setText(_("Press OK to apply assign %s keys of '%s'") %(count, device))
class InputDeviceKeyInfo(Screen, Rc):
def __init__(self, session, title, boundKeys):
Screen.__init__(self, session, windowTitle=title)
Rc.__init__(self, 3)
keys = sorted([six.ensure_str(x) for x in boundKeys])
self["list"] = KeyBindingList(3, keys)
self["list"].onSelectionChanged.append(self._onSelectionChanged)
self["actions"] = ActionMap(["OkCancelActions"],
{
"cancel": self.close,
}, -1)
self.onLayoutFinish.append(self._onSelectionChanged)
def _onSelectionChanged(self):
self.clearSelectedKeys()
selection = self["list"].getCurrent()
Log.w(selection)
selection = selection and selection[0]
if not selection:
return
self.selectKey(selection)
| opendreambox/enigma2 | usr/lib/enigma2/python/Plugins/SystemPlugins/InputDeviceManager/InputDeviceIRProg.py | InputDeviceIRProg.py | py | 6,801 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "Screens.Screen.Screen",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "CharJump.CharJump",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "os.path.dirname",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.path",
... |
74647038823 | import os
import networkx as nx
from networkx.drawing.nx_agraph import write_dot
from tree_parser.parser import DependencyParser
_path = os.path.dirname(__file__)
_save_filename = os.path.join(_path, '../data/tree_parser.model')
_text = """
In nuclear physics, the island of stability is a predicted set of isotopes of superheavy elements
that may have considerably longer half-lives than known isotopes of these elements.
"""
if __name__ == '__main__':
parser = DependencyParser(_save_filename)
g = parser.parse(_text)
print('Node words:')
print(nx.get_node_attributes(g, 'token'))
print('Node POS tags:')
print(nx.get_node_attributes(g, 'pos'))
print('edge labels:')
print(nx.get_edge_attributes(g, 'label'))
write_dot(g, 'test.dot') | fractalego/tree_parser | tree_parser/predict.py | predict.py | py | 781 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "os.path.dirname",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": ... |
36934201214 | import os
import sys
if sys.platform[:4] == "java": # Jython
import uk.ac.cam.ch.wwmm.opsin as opsin
else: # CPython
import jpype
if not jpype.isJVMStarted():
_jvm = os.environ['JPYPE_JVM']
if _jvm[0] == '"': # Handle trailing quotes
_jvm = _jvm[1:-1]
_cp = os.environ['CLASSPATH']
jpype.startJVM(_jvm, "-Djava.class.path=" + _cp)
opsin = jpype.JPackage("uk").ac.cam.ch.wwmm.opsin
try:
_nametostruct = opsin.NameToStructure.getInstance()
_restoinchi = opsin.NameToInchi.convertResultToInChI
except TypeError:
raise ImportError("The OPSIN Jar file cannot be found.")
informats = {'iupac': 'IUPAC name'}
"""A dictionary of supported input formats"""
outformats = {'cml': "Chemical Markup Language", 'inchi': "InChI",
'smi': "SMILES"}
"""A dictionary of supported output formats"""
def readstring(format, string):
"""Read in a molecule from a string.
Required parameters:
format - see the informats variable for a list of available
input formats
string
Example:
>>> input = "propane"
>>> mymol = readstring("iupac", input)
"""
if format!="iupac":
raise ValueError("%s is not a recognised OPSIN format" % format)
result = _nametostruct.parseChemicalName(string)
if str(result.getStatus()) == "FAILURE":
raise IOError("Failed to convert '%s' to format '%s'\n%s" % (
string, format, result.getMessage()))
return Molecule(result)
class Molecule(object):
"""Represent a opsinjpype Molecule.
Required parameters:
OpsinResult -- the result of using OPSIN to parse an IUPAC string
Methods:
write()
The underlying OpsinResult can be accessed using the attribute:
OpsinResult
"""
_cinfony = True
def __init__(self, OpsinResult):
if hasattr(OpsinResult, "_cinfony"):
raise IOError("An opsin Molecule cannot be created from another Cinfony Molecule")
self.OpsinResult = OpsinResult
def __str__(self):
return self.write()
@property
def _exchange(self):
return (0, self.write("smi"))
def write(self, format="smi", filename=None, overwrite=False):
"""Write the molecule to a file or return a string.
Optional parameters:
format -- see the outformats variable for a list of available
output formats (default is "smi")
filename -- default is None
overwite -- if the output file already exists, should it
be overwritten? (default is False)
If a filename is specified, the result is written to a file.
Otherwise, a string is returned containing the result.
"""
if format not in outformats:
raise ValueError("%s is not a recognised OPSIN format" % format)
if filename is not None and not overwrite and os.path.isfile(filename):
raise IOError("%s already exists. Use 'overwrite=True' to overwrite it." % filename)
if format == "cml":
result = str(self.OpsinResult.getCml().toXML())
elif format == "inchi":
result = str(_restoinchi(self.OpsinResult))
elif format == "smi":
result = str(self.OpsinResult.getSmiles())
if filename:
outputfile = open(filename, "w")
with open(outputfile,'w') as fp:
print(result,file=fp)
else:
return result
if __name__=="__main__": #pragma: no cover
mol = readstring("iupac", "propane")
print(mol.write("inchi"))
| cinfony/cinfony | cinfony/opsin.py | opsin.py | py | 3,688 | python | en | code | 82 | github-code | 36 | [
{
"api_name": "sys.platform",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "jpype.isJVMStarted",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"l... |
71920473385 | from pathlib import Path
import requests
import json
# user is the name of the user
# type is either "ANIME" or "MANGA"
def get_list(user, type = "ANIME", queries = None):
if queries is None: queries = load_queries()
variables = {
'name': user,
'type': type
}
url = 'https://graphql.anilist.co'
r = requests.post(url, json={'query': queries['animelist'],
'variables': variables})
j = r.json()
return r.json()
def trim_list(l, type = "anime"):
unified_list = [y for x in
l['data']['MediaListCollection']['lists']
for y in x['entries']]
return unified_list
def load_queries() -> {str: str}:
query_dir = 'anilist_queries'
queries = [x for x in Path(query_dir).iterdir() if x.suffix == '.query']
return {query.stem: open(query).read() for query in queries}
if __name__ == '__main__':
#queries = load_queries()
#test_write(get_list('Darn', queries = queries), 'balls2')
trim_list(get_list('Darn'))
| em-ilia/anilist-sync | anilist.py | anilist.py | py | 1,062 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.post",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 27,
"usage_type": "call"
}
] |
42672390948 | import logging
class LoggingService:
def __init__(self, name) -> None:
self.logger = logging.getLogger(name)
self.handler = logging.StreamHandler()
self.formatter = logging.Formatter(
'%(asctime)s [%(name)-12s] %(levelname)-8s %(message)s')
self.handler.setFormatter(self.formatter)
self.logger.addHandler(self.handler)
self.logger.setLevel(logging.DEBUG)
| team-ananas-og-mango/SaxoStockService | saxo_stock_service/loggingservice.py | loggingservice.py | py | 429 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "logging.StreamHandler",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "logging.Formatter",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG"... |
12179384689 | import itertools
from unittest.mock import Mock
from sukoon.kernel import SukoonKernel
def test_all():
test_data = open("test/basic.py")
test = ''
expected = ''
for line in itertools.chain(test_data, ['']):
if line.startswith('##'):
expected += line[2:].lstrip()
elif line.strip() == '' and test and expected:
run_single(test, expected)
test = ''
expected = ''
else:
test += line
def run_single(test, expected):
kernel = SukoonKernel()
send_response = Mock()
kernel.send_response = send_response
kernel.do_execute(test, False)
response = send_response.call_args[0][2]['text']
assert expected == response
| hyperparameter/sukoon | test/test_run.py | test_run.py | py | 735 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "itertools.chain",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "sukoon.kernel.SukoonKernel",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "unittest.mock.Mock",
"line_number": 25,
"usage_type": "call"
}
] |
8757575635 | # -*- coding: utf-8 -*-
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
import itertools
import json
from odoo import models, fields, api, _
from odoo.addons.sale.models.sale import SaleOrderLine as SOL
from odoo.addons.sale.models.sale import SaleOrder as SO
from odoo.tools import float_compare, float_is_zero, DEFAULT_SERVER_DATE_FORMAT
from odoo.exceptions import UserError
from odoo.models import regex_order
from odoo.addons.of_utils.models.of_utils import get_selection_label
NEGATIVE_TERM_OPERATORS = ('!=', 'not like', 'not ilike', 'not in')
@api.onchange('product_uom', 'product_uom_qty')
def product_uom_change(self):
u"""Copie de la fonction parente avec retrait de l'affectation du prix unitaire"""
if not self.product_uom or not self.product_id:
self.price_unit = 0.0
return
if self.order_id.pricelist_id.of_is_quantity_dependent(self.product_id.id, self.order_id.date_order) \
and self.order_id.partner_id \
and (not self.price_unit or float_compare(self.price_unit, self.product_id.list_price, 2) != 0):
self.price_unit = self.of_get_price_unit()
SOL.product_uom_change = product_uom_change
@api.onchange('fiscal_position_id')
def _compute_tax_id(self):
"""
La fonction est appelée compute mais est en réalité un onchange. Surcharge pour ne pas réaffecter les taxes
sur des lignes ayant déjà été facturées
"""
for order in self:
order.order_line.filtered(lambda l: not l.invoice_lines)._compute_tax_id()
SO._compute_tax_id = _compute_tax_id
class SaleOrder(models.Model):
_name = 'sale.order'
_inherit = ['sale.order', 'of.documents.joints']
def pdf_payment_schedule(self):
return self.env['ir.values'].get_default('sale.config.settings', 'pdf_payment_schedule')
def pdf_address_contact_parent_name(self):
return self.env['ir.values'].get_default('sale.config.settings', 'pdf_address_contact_parent_name')
def pdf_address_contact_titles(self):
return self.env['ir.values'].get_default('sale.config.settings', 'pdf_address_contact_titles')
def pdf_address_contact_name(self):
return self.env['ir.values'].get_default('sale.config.settings', 'pdf_address_contact_name')
def pdf_address_contact_phone(self):
return self.env['ir.values'].get_default('sale.config.settings', 'pdf_address_contact_phone') or False
def pdf_address_contact_mobile(self):
return self.env['ir.values'].get_default('sale.config.settings', 'pdf_address_contact_mobile') or False
def pdf_address_contact_fax(self):
return self.env['ir.values'].get_default('sale.config.settings', 'pdf_address_contact_fax') or False
def pdf_address_contact_email(self):
return self.env['ir.values'].get_default('sale.config.settings', 'pdf_address_contact_email') or False
def pdf_technical_visit_insert(self):
return self.env['ir.values'].get_default('sale.config.settings', 'pdf_technical_visit_insert')
def pdf_validity_insert(self):
return self.env['ir.values'].get_default('sale.config.settings', 'pdf_validity_insert')
def pdf_address_title(self):
return self.env['ir.values'].get_default('sale.config.settings', 'pdf_address_title')
def pdf_shipping_address_specific_title(self):
return self.env['ir.values'].get_default('sale.config.settings', 'pdf_shipping_address_specific_title') or False
def pdf_commercial_insert(self):
return self.env['ir.values'].get_default('sale.config.settings', 'pdf_commercial_insert')
def pdf_commercial_contact(self):
return self.env['ir.values'].get_default('sale.config.settings', 'pdf_commercial_contact')
def pdf_commercial_email(self):
return self.env['ir.values'].get_default('sale.config.settings', 'pdf_commercial_email')
def pdf_customer_insert(self):
return self.env['ir.values'].get_default('sale.config.settings', 'pdf_customer_insert')
def pdf_customer_phone(self):
return self.env['ir.values'].get_default('sale.config.settings', 'pdf_customer_phone')
def pdf_customer_mobile(self):
return self.env['ir.values'].get_default('sale.config.settings', 'pdf_customer_mobile')
def pdf_customer_fax(self):
return self.env['ir.values'].get_default('sale.config.settings', 'pdf_customer_fax')
def pdf_customer_email(self):
return self.env['ir.values'].get_default('sale.config.settings', 'pdf_customer_email')
def pdf_payment_term_insert(self):
return self.env['ir.values'].get_default('sale.config.settings', 'pdf_payment_term_insert')
def pdf_customer_ref_insert(self):
return self.env['ir.values'].get_default('sale.config.settings', 'pdf_customer_ref_insert')
def pdf_taxes_detail(self):
return self.env['ir.values'].get_default('sale.config.settings', 'pdf_taxes_detail')
def pdf_signatures_insert(self):
return self.env['ir.values'].get_default('sale.config.settings', 'pdf_signatures_insert')
def pdf_vendor_signature(self):
return self.env['ir.values'].get_default('sale.config.settings', 'pdf_vendor_signature')
def pdf_prefill_vendor_signature(self):
return self.env['ir.values'].get_default('sale.config.settings', 'pdf_prefill_vendor_signature')
def pdf_customer_signature(self):
return self.env['ir.values'].get_default('sale.config.settings', 'pdf_customer_signature')
def pdf_signature_text(self):
return self.env['ir.values'].get_default('sale.config.settings', 'pdf_signature_text')
def get_color_section(self):
return self.env['ir.values'].get_default('sale.config.settings', 'pdf_section_bg_color') or '#FFFFFF'
def get_color_font(self):
return self.env['ir.values'].get_default('sale.config.settings', 'pdf_section_font_color') or "#000000"
def _search_of_to_invoice(self, operator, value):
# Récupération des bons de commande non entièrement livrés
self._cr.execute("SELECT DISTINCT order_id\n"
"FROM sale_order_line\n"
"WHERE qty_to_invoice + qty_invoiced < product_uom_qty")
order_ids = self._cr.fetchall()
domain = ['&', '&',
('of_force_invoice_status', 'not in', ('invoiced', 'no')),
('state', 'in', ('sale', 'done')),
('order_line.qty_to_invoice', '>', 0)]
if order_ids:
domain = ['&'] + domain + [('id', 'not in', zip(*order_ids)[0])]
return domain
@api.depends('order_line.price_total')
def _amount_all(self):
"""Compute the total amounts of the SO."""
# Le calcul standard diffère du calcul utilisé dans les factures, cela peut mener à des écarts dans certains cas
# (quand l'option d'arrondi global de la tva est utilisée
# et que la commande contient plusieurs lignes avec des taxes différentes).
# On uniformise le calcul du montant des devis/commandes avec celui des factures.
for order in self:
order.amount_untaxed = sum(line.price_subtotal for line in order.order_line)
order.amount_tax = sum(tax['amount'] for tax in order.of_get_taxes_values().itervalues())
order.amount_total = order.amount_untaxed + order.amount_tax
of_to_invoice = fields.Boolean(
u"Entièrement facturable", compute='_compute_of_to_invoice', search='_search_of_to_invoice'
)
of_notes_facture = fields.Html(string="Notes facture", oldname="of_notes_factures")
of_notes_intervention = fields.Html(string="Notes intervention")
of_notes_client = fields.Text(related='partner_id.comment', string="Notes client", readonly=True)
of_total_cout = fields.Monetary(compute='_compute_of_marge', string='Prix de revient')
of_marge_pc = fields.Float(compute='_compute_of_marge', string=u"Marge %", search='_search_of_marge_pc')
of_etiquette_partenaire_ids = fields.Many2many(
'res.partner.category', related='partner_id.category_id', string=u"Étiquettes client")
of_client_view = fields.Boolean(string='Vue client/vendeur')
of_date_vt = fields.Date(
string="Date visite technique", help=u"Si renseignée apparaîtra sur le devis / Bon de commande"
)
of_echeance_line_ids = fields.One2many('of.sale.echeance', 'order_id', string=u"Échéances")
of_echeances_modified = fields.Boolean(
u"Les échéances ont besoin d'être recalculées", compute="_compute_of_echeances_modified")
of_force_invoice_status = fields.Selection([
('invoiced', 'Fully Invoiced'),
('no', 'Nothing to Invoice')], string=u"Forcer état de facturation",
help=u"Permet de forcer l'état de facturation de la commande.\n"
u"Utile pour les commandes facturées qui refusent de changer d'état "
u"(e.g. une ligne a été supprimée dans la facture).", copy=False
)
of_invoice_policy = fields.Selection(
[('order', u'Quantités commandées'), ('delivery', u'Quantités livrées')], string="Politique de facturation"
)
of_fixed_invoice_date = fields.Date(string="Date de facturation fixe")
of_invoice_date_prev = fields.Date(
string=u"Date de facturation prévisonnelle", compute="_compute_of_invoice_date_prev",
inverse="_inverse_of_invoice_date_prev", store=True, compute_sudo=True)
of_delivered = fields.Boolean(string=u"Livrée", compute="_compute_delivered", store=True)
of_allow_quote_addition = fields.Boolean(
string=u"Permet l'ajout de devis complémentaires", compute='_compute_of_allow_quote_addition')
of_price_printing = fields.Selection([
('order_line', u'Prix par ligne de commande'),
], string=u"Impressions des prix", default='order_line', required=True)
of_apply_on_invoice = fields.Boolean(string=u"Appliquer aux factures", default=True)
of_partner_phone = fields.Char(related='partner_id.phone', string=u"Téléphone du partenaire", readonly=True)
of_partner_mobile = fields.Char(related='partner_id.mobile', string=u"Mobile du partenaire", readonly=True)
of_partner_email = fields.Char(related='partner_id.email', string=u"Courriel du partenaire", readonly=True)
@api.multi
@api.depends('name', 'date', 'state')
def name_get(self):
if not self._context.get('extended_display'):
return super(SaleOrder, self).name_get()
result = []
date_format = '%d/%m/%Y' if self.env.user.lang == 'fr_FR' else DEFAULT_SERVER_DATE_FORMAT
for record in self:
date_order = fields.Date.from_string(record.date_order).strftime(date_format)
order_state = get_selection_label(self, record._name, 'state', record.state)
record_name = "%s - %s - %s" % (
record.name, order_state, date_order
)
result.append((record.id, record_name))
return result
@api.depends('company_id')
def _compute_of_allow_quote_addition(self):
option = self.env['ir.values'].get_default('sale.config.settings', 'of_allow_quote_addition')
for order in self:
order.of_allow_quote_addition = option
@api.depends('of_echeance_line_ids', 'amount_total')
def _compute_of_echeances_modified(self):
for order in self:
order.of_echeances_modified = bool(order.of_echeance_line_ids
and float_compare(order.amount_total,
sum(order.of_echeance_line_ids.mapped('amount')),
precision_rounding=.01))
@api.depends('order_line', 'order_line.qty_delivered', 'order_line.product_uom_qty')
def _compute_delivered(self):
for order in self:
for line in order.order_line:
if float_compare(line.qty_delivered, line.product_uom_qty, 2) < 0:
order.of_delivered = False
break
else:
order.of_delivered = True
@api.depends('of_fixed_invoice_date', 'of_invoice_policy',
'order_line', 'order_line.of_invoice_date_prev',
'order_line.procurement_ids', 'order_line.procurement_ids.move_ids',
'order_line.procurement_ids.move_ids.picking_id.min_date')
def _compute_of_invoice_date_prev(self):
for order in self:
if order.of_fixed_invoice_date or order.of_invoice_policy == 'order':
order.of_invoice_date_prev = order.of_fixed_invoice_date
elif order.of_invoice_policy == 'delivery':
pickings = order.order_line.mapped('procurement_ids')\
.mapped('move_ids')\
.mapped('picking_id')\
.filtered(lambda p: p.state != 'cancel')\
.sorted('min_date')
if pickings:
to_process_pickings = pickings.filtered(lambda p: p.state != 'done')
if to_process_pickings:
order.of_invoice_date_prev = fields.Date.to_string(
fields.Date.from_string(to_process_pickings[0].min_date))
else:
order.of_invoice_date_prev = fields.Date.to_string(
fields.Date.from_string(pickings[-1].min_date))
def _inverse_of_invoice_date_prev(self):
for order in self:
order.of_fixed_invoice_date = order.of_invoice_date_prev
def _of_get_max_or_min_seq_by_layout(self, what='max'):
self.ensure_one()
lines_with_layout = self.order_line.filtered(lambda l: l.layout_category_id)
seq_by_layout = {}.fromkeys(lines_with_layout.mapped('layout_category_id').ids, 0)
for layout_id in seq_by_layout:
if what == 'max':
seq = max(lines_with_layout.filtered(lambda l: l.layout_category_id.id == layout_id).mapped('sequence'))
else:
seq = min(lines_with_layout.filtered(lambda l: l.layout_category_id.id == layout_id).mapped('sequence'))
seq_by_layout[layout_id] = seq
return seq_by_layout
@api.multi
def of_get_taxes_values(self):
tax_grouped = {}
round_curr = self.currency_id.round
for line in self.order_line:
price_unit = line.price_unit * (1 - (line.discount or 0.0) / 100.0)
taxes = line.tax_id.compute_all(price_unit, self.currency_id, line.product_uom_qty,
product=line.product_id, partner=self.partner_shipping_id)['taxes']
for val in taxes:
key = val['account_id']
val['amount'] += val['base'] - round_curr(val['base'])
if key not in tax_grouped:
tax_grouped[key] = {
'tax_id': val['id'],
'amount': val['amount'],
'base': round_curr(val['base'])
}
else:
tax_grouped[key]['amount'] += val['amount']
tax_grouped[key]['base'] += round_curr(val['base'])
for values in tax_grouped.itervalues():
values['base'] = round_curr(values['base'])
values['amount'] = round_curr(values['amount'])
return tax_grouped
@api.multi
def _of_compute_echeances(self):
self.ensure_one()
if not self.payment_term_id:
return False
dates = {
'order': self.state not in ('draft', 'sent', 'cancel') and self.confirmation_date,
'invoice': self.invoice_status == 'invoiced' and self.invoice_ids[0].date_invoice,
'default': False,
}
amounts = self.payment_term_id.compute(self.amount_total, dates=dates)[0]
amount_total = self.amount_total
pct_left = 100.0
pct = 0
result = [(5, )]
for term, (date, amount) in itertools.izip(self.payment_term_id.line_ids, amounts):
pct_left -= pct
pct = round(100 * amount / amount_total, 2) if amount_total else 0
line_vals = {
'name': term.name,
'percent': pct,
'amount': amount,
'date': date,
}
result.append((0, 0, line_vals))
if len(result) > 1:
result[-1][2]['percent'] = pct_left
return result
@api.depends('state', 'order_line.invoice_status', 'of_force_invoice_status')
def _get_invoiced(self):
# Appel du super dans tous les cas pour le calcul de invoice_count et invoice_ids
super(SaleOrder, self)._get_invoiced()
for order in self:
if order.of_force_invoice_status:
order.invoice_status = order.of_force_invoice_status
@api.onchange('partner_id')
def onchange_partner_id(self):
fiscal_position = self.fiscal_position_id
payment_term = self.payment_term_id
super(SaleOrder, self).onchange_partner_id()
self.of_invoice_policy = self.partner_id and self.partner_id.of_invoice_policy or False
# Si la nouvelle valeur est vide, on remet l'ancienne
if fiscal_position != self.fiscal_position_id and not self.fiscal_position_id:
self.fiscal_position_id = fiscal_position.id
if payment_term != self.payment_term_id and not self.payment_term_id:
self.payment_term_id = payment_term.id
if self.partner_id:
# Référence client
ref = self.partner_id.ref
if not ref and self.partner_id.parent_id:
ref = self.partner_id.parent_id.ref
self.client_order_ref = ref
# Adresses par défaut
if not self.partner_invoice_id.of_default_address:
default_invoice_address = self.partner_id.child_ids.filtered(
lambda child: child.type == 'invoice' and child.of_default_address)
if default_invoice_address:
if len(default_invoice_address) > 1:
default_invoice_address = default_invoice_address[0]
self.partner_invoice_id = default_invoice_address
if not self.partner_shipping_id.of_default_address:
default_shipping_address = self.partner_id.child_ids.filtered(
lambda child: child.type == 'delivery' and child.of_default_address)
if default_shipping_address:
if len(default_shipping_address) > 1:
default_shipping_address = default_shipping_address[0]
self.partner_shipping_id = default_shipping_address
@api.multi
@api.onchange('partner_shipping_id', 'partner_id')
def onchange_partner_shipping_id(self):
fiscal_position = self.fiscal_position_id
super(SaleOrder, self).onchange_partner_shipping_id()
# Si la nouvelle valeur est vide, on remet l'ancienne
if fiscal_position != self.fiscal_position_id and not self.fiscal_position_id:
self.fiscal_position_id = fiscal_position.id
return {}
@api.onchange('partner_id')
def onchange_partner_id_warning(self):
if not self.partner_id:
return
partner = self.partner_id
# If partner has no warning, check its parents
# invoice_warn is shared between different objects
if not partner.of_is_sale_warn and partner.parent_id:
partner = partner.parent_id
if partner.of_is_sale_warn and partner.invoice_warn != 'no-message':
return super(SaleOrder, self).onchange_partner_id_warning()
return
@api.onchange('payment_term_id')
def _onchange_payment_term_id(self):
if self.payment_term_id:
self.of_echeance_line_ids = self._of_compute_echeances()
@api.onchange('amount_total')
def _onchange_amount_total(self):
self._onchange_payment_term_id()
@api.multi
def of_update_dates_echeancier(self):
for order in self:
if not order.payment_term_id:
continue
date_invoice = order.invoice_status == 'invoiced' and order.invoice_ids and \
order.invoice_ids[0].date_invoice or False
dates = {
'order': order.confirmation_date,
'invoice': date_invoice,
'default': False,
}
force_dates = [echeance.date for echeance in order.of_echeance_line_ids]
echeances = order.payment_term_id.compute(order.amount_total, dates=dates, force_dates=force_dates)[0]
if len(echeances) != len(order.of_echeance_line_ids):
continue
for echeance, ech_calc in itertools.izip(order.of_echeance_line_ids, echeances):
if ech_calc[0] and not echeance.date:
echeance.date = ech_calc[0]
@api.multi
def action_verification_confirm(self):
"""
Permet de faire les vérification avant de démarrer la confirmation de la commande.
Comme il n'y a pas de raise si on veut une vérification qui bloque la confirmation il faut le faire hors de
action_confirm, autrement certaines surcharge qui seraient passées avant/après seront tout de même réalisées
"""
action = False
for order in self:
action, interrupt = self.env['of.sale.order.verification'].do_verification(order)
if interrupt:
return action
res = self.action_confirm()
if action:
return action
return res
@api.multi
def action_confirm(self):
res = super(SaleOrder, self).action_confirm()
self.of_update_dates_echeancier()
return res
@api.multi
def of_recompute_echeance_last(self):
for order in self:
if not order.of_echeance_line_ids:
continue
percent = 100.0
amount = order.amount_total
for echeance in order.of_echeance_line_ids:
if echeance.last:
echeance.write({
'percent': percent,
'amount': amount,
})
else:
percent -= echeance.percent
amount -= echeance.amount
@api.model
def create(self, vals):
mail_subtype = self.env.ref('of_base.mail_message_subtype_mail', raise_if_not_found=False)
record = super(SaleOrder, self).create(vals)
if mail_subtype:
record.message_subscribe(partner_ids=[vals['partner_id']], subtype_ids=[mail_subtype.id], force=False)
return record
@api.multi
def write(self, vals):
mail_subtype = self.env.ref('of_base.mail_message_subtype_mail', raise_if_not_found=False)
if mail_subtype and vals.get('partner_id'):
old_partner_ids = self.mapped('partner_id')._ids
res = super(SaleOrder, self).write(vals)
if mail_subtype and vals.get('partner_id'):
# subscribe new partner and unsunscribe the old ones
self.message_subscribe(partner_ids=[vals['partner_id']], subtype_ids=[mail_subtype.id], force=False)
message_followers = self.mapped('message_follower_ids')
message_followers.filtered(lambda r: r.partner_id.id in old_partner_ids)\
.write({'subtype_ids': [(3, mail_subtype.id)]})
# Recalcul de la dernière échéance si besoin
self.filtered('of_echeances_modified').of_recompute_echeance_last()
return res
def _search_of_marge_pc(self, operator, value):
top = value + 0.004
down = value - 0.005
params = []
request = "SELECT id FROM sale_order WHERE "
if operator == '=':
request += "(100 * (margin / NULLIF(amount_untaxed, 0))) >= %s AND " \
"(100 * (margin / NULLIF(amount_untaxed, 0))) <= %s;"
params = (down, top)
elif operator == '!=':
request += "(100 * (margin / NULLIF(amount_untaxed, 0))) <= %s OR " \
"(100 * (margin / NULLIF(amount_untaxed, 0))) >= %s;"
params = (down, top)
elif operator == '>=':
request += "(100 * (margin / NULLIF(amount_untaxed, 0))) >= %s;"
params = (down,)
elif operator == '>':
request += "(100 * (margin / NULLIF(amount_untaxed, 0))) > %s;"
params = (top,)
elif operator == '<=':
request += "(100 * (margin / NULLIF(amount_untaxed, 0))) <= %s;"
params = (top,)
elif operator == '<':
request += "(100 * (margin / NULLIF(amount_untaxed, 0))) < %s;"
params = (down,)
else:
raise NotImplementedError(_("Search operator %s not implemented for value %s") % (operator, value))
self.env.cr.execute(request, params)
ids = [r[0] for r in self.env.cr.fetchall()]
return [('id', 'in', ids)]
@api.depends('state', 'order_line', 'order_line.qty_to_invoice', 'order_line.product_uom_qty')
def _compute_of_to_invoice(self):
for order in self:
if order.state not in ('sale', 'done') or order.of_force_invoice_status in ('invoiced', 'no'):
order.of_to_invoice = False
continue
for line in order.order_line:
if line.qty_to_invoice + line.qty_invoiced < line.product_uom_qty:
order.of_to_invoice = False
break
else:
order.of_to_invoice = True
@api.depends('margin', 'amount_untaxed')
def _compute_of_marge(self):
for order in self:
cout = order.amount_untaxed - order.margin
order.of_total_cout = cout
order.of_marge_pc = 100 * (1 - cout / order.amount_untaxed) if order.amount_untaxed else -100
def toggle_view(self):
""" Permet de basculer entre la vue vendeur/client
"""
self.of_client_view = not self.of_client_view
@api.multi
def _of_get_total_lines_by_group(self):
"""
Retourne les lignes de la commande, séparées en fonction du groupe dans lequel les afficher.
Les groupes sont ceux définis par l'objet of.invoice.report.total, permettant de déplacer le rendu des
lignes de commande sous le total hors taxe ou TTC.
Les groupes sont affichés dans leur ordre propre, puis les lignes dans l'ordre d'apparition dans la commande.
@param return: Liste de couples (groupe, lignes de commande). Le 1er élément vaut (False, Lignes non groupées).
"""
self.ensure_one()
group_obj = self.env['of.invoice.report.total.group']
lines = self.order_line
products = lines.mapped('product_id')
product_ids = list(products._ids)
categ_ids = list(products.mapped('categ_id')._ids)
groups = group_obj.search([('order', '=', True),
'|', ('id', '=', group_obj.get_group_paiements().id),
'|', ('product_ids', 'in', product_ids), ('categ_ids', 'in', categ_ids)])
result = []
for group in groups:
if group.is_group_paiements():
group_paiement_lines = group.filter_lines(lines)
if group_paiement_lines is not False:
lines -= group_paiement_lines
break
for group in groups:
if group.is_group_paiements():
result.append((group, group_paiement_lines))
else:
group_lines = group.filter_lines(lines)
if group_lines is not False:
# On ajoute cette vérification pour ne pas afficher des lignes à 0 dans les paiements et
# ne pas afficher le groupe si toutes les lignes sont à 0.
group_lines_2 = group_lines.filtered(lambda l: l.price_subtotal)
if group_lines_2:
result.append((group, group_lines_2))
# On enlève quand même toutes les lignes du groupe pour ne pas qu'elle s'affichent
lines -= group_lines
if lines:
result = [(False, lines)] + result
else:
result = [(False, self.order_line.mapped('invoice_lines'))]
# On ajoute quand-même les paiements
for group in groups:
if group.is_group_paiements():
result.append((group, lines)) # lines est vide
return result
@api.multi
def _of_get_printable_lines(self):
""" [IMPRESSION]
Renvoie les lignes à afficher
"""
return self._of_get_total_lines_by_group()[0][1]
def _prepare_tax_line_vals(self, line, tax):
""" Emulation de la fonction du même nom du modèle 'account.invoice'
Permet de récupérer la clé de groupement dans _of_get_printable_totals
"""
vals = {
'name': tax['name'],
'tax_id': tax['id'],
'amount': tax['amount'],
'base': tax['base'],
'manual': False,
'sequence': tax['sequence'],
'account_analytic_id': tax['analytic'] or False,
'account_id': tax['account_id'] or tax['refund_account_id'] or False,
}
return vals
@api.multi
def _of_get_printable_totals(self):
""" [IMPRESSION]
Retourne un dictionnaire contenant les valeurs à afficher dans les totaux de la commande pdf.
Dictionnaire de la forme :
{
'subtotal' : Total HT des lignes affichées,
'untaxed' : [[('libellé', montant),...], ('libellé total': montant_total)]
'taxes' : idem,
'total' : idem,
}
Les listes untaxed, taxes et total pourraient être regroupés en une seule.
Ce format pourra aider aux héritages (?).
"""
self.ensure_one()
tax_obj = self.env['account.tax']
round_curr = self.currency_id.round
group_lines = self._of_get_total_lines_by_group()
result = {}
result['subtotal'] = sum(group_lines[0][1].mapped('price_subtotal'))
total_amount = result['subtotal']
i = 1
untaxed_lines = group_lines[0][1]
# --- Sous-totaux hors taxes ---
result_untaxed = []
while i < len(group_lines) and group_lines[i][0].position == '0-ht':
group, lines = group_lines[i]
i += 1
untaxed_lines |= lines
lines_vals = []
for line in lines:
lines_vals.append((line.of_get_line_name()[0], line.price_subtotal))
total_amount += line.price_subtotal
total_vals = (group.subtotal_name, round_curr(total_amount))
result_untaxed.append([lines_vals, total_vals])
result['untaxed'] = result_untaxed
# --- Ajout des taxes ---
# Code copié depuis account.invoice.get_taxes_values()
tax_grouped = {}
for line in untaxed_lines:
price_unit = line.price_unit * (1 - (line.discount or 0.0) / 100.0)
taxes = line.tax_id.compute_all(price_unit, self.currency_id, line.product_uom_qty, line.product_id,
self.partner_id)['taxes']
for tax_val in taxes:
val = self._prepare_tax_line_vals(line, tax_val)
tax = tax_obj.browse(tax_val['id'])
key = tax.get_grouping_key(val)
val['amount'] += val['base'] - round_curr(val['base'])
if key not in tax_grouped:
tax_grouped[key] = val
tax_grouped[key]['name'] = tax.description or tax.name
tax_grouped[key]['group'] = tax.tax_group_id
else:
tax_grouped[key]['amount'] += val['amount']
# Taxes groupées par groupe de taxes (cf account.invoice._get_tax_amount_by_group())
tax_vals_dict = {}
for tax in sorted(tax_grouped.values(), key=lambda t: t['name']):
amount = round_curr(tax['amount'])
tax_vals_dict.setdefault(tax['group'], [tax['group'].name, 0])
tax_vals_dict[tax['group']][1] += amount
total_amount += amount
result['taxes'] = [[tax_vals_dict.values(), (_("Total TTC"), round_curr(total_amount))]]
# --- Sous-totaux TTC ---
result_total = []
while i < len(group_lines):
# Tri des paiements par date
group, lines = group_lines[i]
i += 1
if group.is_group_paiements():
lines_vals = self._of_get_printable_payments(lines)
if not lines_vals:
continue
for line in lines_vals:
total_amount -= line[1]
else:
lines_vals = []
for line in lines:
lines_vals.append((line.of_get_line_name()[0], line.price_total))
total_amount += line.price_total
total_vals = (group.subtotal_name, round_curr(total_amount))
if group.hide_amount_total and len(result['taxes'][0]) == 2:
result['taxes'][0].pop(1)
result_total.append([lines_vals, total_vals])
result['total'] = result_total
return result
@api.multi
def order_lines_layouted(self):
"""
Retire les lignes de commande qui doivent êtres affichées dans les totaux.
"""
report_pages_full = super(SaleOrder, self).order_lines_layouted()
report_lines = self._of_get_printable_lines()
report_pages = []
for page_full in report_pages_full:
page = []
for group in page_full:
lines = [line for line in group['lines'] if line in report_lines]
if lines:
group['lines'] = lines
page.append(group)
if page:
report_pages.append(page)
return report_pages
@api.multi
def _of_get_printable_payments(self, order_lines):
""" [IMPRESSION]
Renvoie les lignes à afficher.
Permet l'affichage des paiements dans une commande.
On ne va pas chercher les paiements affectés à la commande car le lien est ajouté dans of_sale_payment
"""
invoice_obj = self.env['account.invoice']
account_move_line_obj = self.env['account.move.line']
# Liste des factures et factures d'acompte
invoices = self.mapped('order_line').mapped('invoice_lines').mapped('invoice_id')
# Retour de tous les paiements des factures
# On distingue les paiements de la facture principale de ceux des factures liées
result = []
for invoice in invoices:
widget = json.loads(invoice.payments_widget.replace("'", "\'"))
if not widget:
continue
for payment in widget.get('content', []):
# Les paiements sont classés dans l'ordre chronologique
move_line = account_move_line_obj.browse(payment['payment_id'])
name = invoice_obj._of_get_payment_display(move_line)
result.append((name, payment['amount']))
return result
@api.multi
def _prepare_invoice(self):
""" Rajout date visite technique. Attention en cas de facturation de plusieurs bons de commande à la fois"""
self.ensure_one()
if self.company_id:
self = self.with_context(company_id=self.company_id.id)
invoice_vals = super(SaleOrder, self)._prepare_invoice()
invoice_vals["of_date_vt"] = self.of_date_vt
if self.of_apply_on_invoice:
invoice_vals["of_price_printing"] = self.of_price_printing
if not self.env['ir.values'].get_default('sale.config.settings', 'of_propagate_payment_term'):
invoice_vals['payment_term_id'] = False
return invoice_vals
@api.multi
def copy(self, default=None):
res = super(SaleOrder, self).copy(default=default)
res._onchange_payment_term_id()
return res
@api.multi
def action_invoice_create(self, grouped=False, final=False):
grouped = self.env['ir.values'].get_default('sale.config.settings', 'of_invoice_grouped')
invoice_ids = super(SaleOrder, self).action_invoice_create(grouped=grouped, final=final)
invoices = self.env['account.invoice'].browse(invoice_ids)
if self._context.get('of_include_null_qty_lines', False) and invoices:
for order in self:
# On récupère la facture générée correspondant à cette commande
invoice = invoices.filtered(lambda inv: inv.origin == order.name)
if invoice:
# On ajoute dans la facture les lignes correspondantes aux lignes de commande en quantité 0
# et qui n'ont pas de lignes de facture associées
for order_line in order.order_line.filtered(
lambda l: l.product_uom_qty == 0.0 and not l.invoice_lines):
vals = order_line._prepare_invoice_line(qty=0.0)
vals.update({'invoice_id': invoice.id, 'sale_line_ids': [(6, 0, [order_line.id])]})
self.env['account.invoice.line'].create(vals)
# Pour les factures groupées, on indique pour chaque ligne de facture sa commande d'origine
for inv in invoices:
if len(inv.invoice_line_ids.mapped('sale_line_ids').mapped('order_id')) > 1:
for line in inv.invoice_line_ids:
order_line = line.sale_line_ids[:1]
line.name = "%s %s\n%s" % (
order_line.order_id.name, order_line.order_id.client_order_ref or "", line.name)
return invoice_ids
@api.multi
def action_add_quote(self):
self.ensure_one()
if self.state != 'sale':
raise UserError(u"Vous ne pouvez pas ajouter un devis complémentaire à une commande non validée.")
wizard = self.env['of.sale.order.add.quote.wizard'].create({
'order_id': self.id,
})
return {
'type': 'ir.actions.act_window',
'name': "Ajouter un devis complémentaire",
'view_mode': 'form',
'res_model': 'of.sale.order.add.quote.wizard',
'res_id': wizard.id,
'target': 'new',
}
@api.multi
def of_get_taxes_display(self):
tax_obj = self.env['account.tax']
tax_grouped = []
round_curr = self.currency_id.round
for line in self.order_line:
price_unit = line.price_unit * (1 - (line.discount or 0.0) / 100.0)
taxes = line.tax_id.compute_all(price_unit, self.currency_id, line.product_uom_qty,
product=line.product_id, partner=self.partner_shipping_id)['taxes']
for val in taxes:
key = val['id']
tax = tax_obj.browse(key)
for values in tax_grouped:
if values['id'] == key:
values['amount'] += val['amount']
values['base'] += round_curr(val['base'])
break
else:
tax_grouped.append({
'id': key,
'name': tax.description,
'amount': val['amount'],
'base': round_curr(val['base'])
})
for values in tax_grouped:
values['base'] = round_curr(values['base'])
values['amount'] = round_curr(values['amount'])
return tax_grouped
@api.multi
def action_quotation_send(self):
mail_subtype = self.env.ref('of_base.mail_message_subtype_mail', raise_if_not_found=False)
action = super(SaleOrder, self).action_quotation_send()
if mail_subtype:
action['ctx'].update({'default_subtype_id': mail_subtype.id})
return action
class SaleOrderLine(models.Model):
_name = 'sale.order.line'
_inherit = ['sale.order.line', 'of.readgroup']
price_unit = fields.Float(digits=False, help="""
Prix unitaire de l'article.
À entrer HT ou TTC suivant la TVA de la ligne de commande.
""")
of_client_view = fields.Boolean(string="Vue client/vendeur", related="order_id.of_client_view")
of_article_principal = fields.Boolean(
string="Article principal", help="Cet article est l'article principal de la commande"
)
of_product_categ_id = fields.Many2one(
'product.category', related='product_id.categ_id', string=u"Catégorie d'article", store=True, index=True)
date_order = fields.Datetime(related='order_id.date_order', string="Date de commande", store=True, index=True)
confirmation_date_order = fields.Datetime(
related='order_id.confirmation_date', string="Date de confirmation de commande", store=True, index=True)
of_gb_partner_tag_id = fields.Many2one(
'res.partner.category', compute=lambda *a, **k: {}, search='_search_of_gb_partner_tag_id',
string="Étiquette client", of_custom_groupby=True
)
of_price_unit_display = fields.Float(related='price_unit', string=u"Prix unitaire", readonly=True)
of_product_forbidden_discount = fields.Boolean(string=u"Remise interdite pour cet article")
of_price_unit_ht = fields.Float(
string='Unit Price excl', compute='_compute_of_price_unit', help="Unit price without taxes", store=True
)
of_price_unit_ttc = fields.Float(
string='Unit Price incl', compute='_compute_of_price_unit', help="Unit price with taxes", store=True
)
of_marge_pc = fields.Float(
compute='_compute_of_marge', string=u"Marge %", store=True)
of_product_default_code = fields.Char(related='product_id.default_code', string=u"Référence article", readonly=True)
of_order_line_option_id = fields.Many2one(comodel_name='of.order.line.option', string=u"Option")
of_reset_option = fields.Boolean(string=u"Réinitialiser l'option ?")
of_confirmation_date = fields.Datetime(
string="Date de confirmation", related="order_id.confirmation_date", store=True)
of_invoice_policy = fields.Selection([('order', u'Quantités commandées'), ('delivery', u'Quantités livrées')],
string="Politique de facturation",
compute="_compute_of_invoice_policy",
store=True)
of_invoice_date_prev = fields.Date(
string=u"Date de facturation prévisionnelle", compute="_compute_of_invoice_date_prev", store=True,
compute_sudo=True)
of_seller_price = fields.Float(string=u"Prix d'achat")
of_date_tarif = fields.Date(string="Date du tarif", related="product_id.date_tarif", readonly=True)
of_obsolete = fields.Boolean(string=u"Article obsolète", related="product_id.of_obsolete", readonly=True)
of_product_image_ids = fields.Many2many('of.product.image', string='Images')
of_product_attachment_ids = fields.Many2many("ir.attachment", string="Documents joints")
# Champ servant au calcul du domain de of_product_attachment_ids
of_product_attachment_computed_ids = fields.Many2many(
"ir.attachment", string="Documents joints",
compute='_compute_of_product_attachment_computed_ids')
# A supprimer après la prochaine màj
of_product_attachment_computed = fields.Boolean(compute=lambda s: None)
@api.model_cr_context
def _auto_init(self):
"""
Modification du nom du champ 'of_product_seller_price' en 'of_seller_price' dans les vues xml.
TODO: A SUPPRIMER APRES INSTALLATION !
"""
cr = self._cr
cr.execute(
"SELECT 1 FROM information_schema.columns WHERE table_name = %s AND column_name = 'of_seller_price'",
(self._table,))
exists = bool(cr.fetchall())
res = super(SaleOrderLine, self)._auto_init()
if not exists:
cr.execute(
""" UPDATE ir_ui_view
SET arch_db = REPLACE(arch_db, 'of_product_seller_price', 'of_seller_price')
WHERE arch_db LIKE '%of_product_seller_price%'
""")
return res
of_price_management_variation = fields.Float(
string=u"Montant unitaire de la variation de prix liée à la gestion de prix")
of_unit_price_variation = fields.Float(string=u"Montant unitaire de la variation de prix")
@api.depends('price_subtotal', 'margin')
def _compute_of_marge(self):
for line in self:
if line.price_subtotal:
line.of_marge_pc = line.margin * 100.0 / line.price_subtotal
else:
line.of_marge_pc = 0.0
@api.depends('product_id')
def _compute_of_product_attachment_computed_ids(self):
product_obj = self.env['product.product']
attachment_obj = self.env['ir.attachment']
for line in self:
# On récupère toutes les variantes du modèle d'article
product_ids = product_obj.search([('product_tmpl_id', '=', line.product_id.product_tmpl_id.id)])
# On récupère toutes les PJ pdf du modèle d'article et de ses variantes
domain = [
'&',
'|',
'&',
('res_model', '=', 'product.template'),
('res_id', '=', line.product_id.product_tmpl_id.id),
'&',
('res_model', '=', 'product.product'),
('res_id', 'in', product_ids.ids),
('mimetype', '=', 'application/pdf')
]
attachment_ids = attachment_obj.search(domain)
line.of_product_attachment_computed_ids = attachment_ids
@api.depends('price_unit', 'order_id.currency_id', 'order_id.partner_shipping_id', 'product_id',
'price_subtotal', 'product_uom_qty')
def _compute_of_price_unit(self):
"""
@ TODO: à fusionner avec _compute_amount
:return:
"""
for line in self:
taxes = line.tax_id.compute_all(line.price_unit, line.order_id.currency_id, 1,
product=line.product_id, partner=line.order_id.partner_shipping_id)
line.of_price_unit_ht = taxes['total_excluded']
line.of_price_unit_ttc = taxes['total_included']
@api.depends('product_id', 'product_id.invoice_policy',
'order_id', 'order_id.of_invoice_policy',
'order_partner_id', 'order_partner_id.of_invoice_policy')
def _compute_of_invoice_policy(self):
for line in self:
line.of_invoice_policy = line.order_id.of_invoice_policy \
or line.order_partner_id.of_invoice_policy or line.product_id.invoice_policy \
or self.env['ir.values'].get_default('product_template', 'invoice_policy')
@api.depends('of_invoice_policy',
'order_id', 'order_id.of_fixed_invoice_date',
'procurement_ids', 'procurement_ids.move_ids', 'procurement_ids.move_ids')
def _compute_of_invoice_date_prev(self):
for line in self:
if line.of_invoice_policy == 'order':
line.of_invoice_date_prev = line.order_id.of_invoice_date_prev
elif line.of_invoice_policy == 'delivery':
moves = line.procurement_ids.mapped('move_ids').sorted('date_expected')
if moves:
line.of_invoice_date_prev = fields.Date.to_string(fields.Date.from_string(moves[0].date_expected))
@api.model
def _search_of_gb_partner_tag_id(self, operator, value):
return [('order_partner_id.category_id', operator, value)]
@api.model
def _read_group_process_groupby(self, gb, query):
# Ajout de la possibilité de regrouper par employé
if gb != 'of_gb_partner_tag_id':
return super(SaleOrderLine, self)._read_group_process_groupby(gb, query)
alias, _ = query.add_join(
(self._table, 'res_partner_res_partner_category_rel', 'order_partner_id', 'partner_id', 'partner_category'),
implicit=False, outer=True,
)
return {
'field': gb,
'groupby': gb,
'type': 'many2one',
'display_format': None,
'interval': None,
'tz_convert': False,
'qualified_field': '"%s".category_id' % (alias,)
}
@api.model
def of_custom_groupby_generate_order(self, alias, order_field, query, reverse_direction, seen):
if order_field == 'of_gb_partner_tag_id':
dest_model = self.env['res.partner.category']
m2o_order = dest_model._order
if not regex_order.match(m2o_order):
# _order is complex, can't use it here, so we default to _rec_name
m2o_order = dest_model._rec_name
rel_alias, _ = query.add_join(
(alias, 'res_partner_res_partner_category_rel',
'order_partner_id', 'partner_id', 'partner_category_rel'),
implicit=False, outer=True)
dest_alias, _ = query.add_join(
(rel_alias, 'res_partner_category', 'category_id', 'id', 'partner_category'),
implicit=False, outer=True)
return dest_model._generate_order_by_inner(dest_alias, m2o_order, query,
reverse_direction, seen)
return []
def _compute_margin(self, order_id, product_id, product_uom_id):
"""Override to use the theoretical cost instead of the standard cost price when the settings is set to True"""
frm_cur = self.env.user.company_id.currency_id
to_cur = order_id.pricelist_id.currency_id
purchase_price = product_id.get_cost()
if product_uom_id != product_id.uom_id:
purchase_price = product_id.uom_id._compute_price(purchase_price, product_uom_id)
price = frm_cur.with_context(date=order_id.date_order).compute(purchase_price, to_cur, round=False)
return price
@api.multi
@api.onchange('product_id')
def product_id_change(self):
if not self.product_id:
return
if not self.order_id.partner_id:
self.product_id = False
warning = {
'title': (_("Warning!")),
'message': (_("You must fill in the Customer field to go further."))
}
return {'warning': warning}
res = super(SaleOrderLine, self).product_id_change()
afficher_descr_fab = self.env.user.company_id.afficher_descr_fab
afficher = afficher_descr_fab == 'devis' or afficher_descr_fab == 'devis_factures'
product = self.product_id.with_context(
lang=self.order_id.partner_id.lang,
partner=self.order_id.partner_id.id,
)
if product and product.description_fabricant and afficher:
name = self.name
name += '\n' + product.description_fabricant
self.update({'name': name})
# Remise interdite
if self.product_id:
self.of_product_forbidden_discount = self.product_id.of_forbidden_discount or not self.env.user.has_group(
'of_sale.group_of_can_modify_sale_price_unit')
if self.product_id.of_forbidden_discount and self.of_discount_formula:
self.of_discount_formula = False
if self.product_id.categ_id:
self.of_article_principal = self.product_id.categ_id.of_article_principal
if self.env.user.has_group('sale.group_sale_layout'):
if self.product_id.of_layout_category_id:
self.layout_category_id = product.of_layout_category_id
elif self.product_id.categ_id.of_layout_id:
self.layout_category_id = self.product_id.categ_id.of_layout_id
if self.env.user.has_group('of_sale.group_of_sale_multiimage'):
if self.product_id.product_tmpl_id.of_product_image_ids:
of_product_image_ids = self.product_id.product_tmpl_id.of_product_image_ids
self.of_product_image_ids = self.product_id.product_tmpl_id.of_product_image_ids
res.setdefault('domain', {})
res['domain']['of_product_image_ids'] = [('id', 'in', of_product_image_ids.ids)]
if self.env.user.has_group('of_sale.group_of_sale_print_attachment'):
attachment_ids = self.env['ir.attachment'].search(
[('id', 'in', self.of_product_attachment_computed_ids.ids)])
self.of_product_attachment_ids = attachment_ids
return res
@api.onchange('product_id', 'product_uom')
def product_id_change_margin(self):
super(SaleOrderLine, self).product_id_change_margin()
if not self.order_id.pricelist_id or not self.product_id or not self.product_uom:
return
frm_cur = self.env.user.company_id.currency_id
to_cur = self.order_id.pricelist_id.currency_id
seller_price = self.product_id.of_seller_price
if self.product_uom != self.product_id.uom_id:
seller_price = self.product_id.uom_id._compute_price(seller_price, self.product_uom)
ctx = self.env.context.copy()
ctx['date'] = self.order_id.date_order
self.of_seller_price = frm_cur.with_context(ctx).compute(seller_price, to_cur, round=False)
@api.model
def _get_purchase_price(self, pricelist, product, product_uom, date):
"""Override to use the theoretical cost instead of the standard cost price when the settings is set to True"""
frm_cur = self.env.user.company_id.currency_id
to_cur = pricelist.currency_id
purchase_price = product.get_cost()
if product_uom != product.uom_id:
purchase_price = product.uom_id._compute_price(purchase_price, product_uom)
price = frm_cur.with_context(date=date).compute(purchase_price, to_cur, round=False)
return {'purchase_price': price}
@api.model
def _get_of_seller_price(self, pricelist, product, product_uom, date):
frm_cur = self.env.user.company_id.currency_id
to_cur = pricelist.currency_id
seller_price = product.of_seller_price
if product_uom != product.uom_id:
seller_price = product.uom_id._compute_price(seller_price, product_uom)
ctx = self.env.context.copy()
ctx['date'] = date
price = frm_cur.with_context(ctx).compute(seller_price, to_cur, round=False)
return {'of_seller_price': price}
@api.onchange('of_order_line_option_id')
def _onchange_of_order_line_option_id(self):
if self.of_order_line_option_id and self.product_id:
option = self.of_order_line_option_id
if option.sale_price_update and self.price_unit:
if option.sale_price_update_type == 'fixed':
self.price_unit = self.price_unit + option.sale_price_update_value
elif option.sale_price_update_type == 'percent':
self.price_unit = self.price_unit + self.price_unit * (option.sale_price_update_value / 100)
self.price_unit = self.order_id.currency_id.round(self.price_unit)
if option.purchase_price_update and self.purchase_price:
if option.purchase_price_update_type == 'fixed':
self.purchase_price = self.purchase_price + option.purchase_price_update_value
elif option.purchase_price_update_type == 'percent':
self.purchase_price = \
self.purchase_price + self.purchase_price * (option.purchase_price_update_value / 100)
self.purchase_price = self.order_id.currency_id.round(self.purchase_price)
if option.description_update:
self.name = self.name + "\n%s" % option.description_update
@api.onchange('of_reset_option')
def _onchange_of_reset_option(self):
if self.of_reset_option:
product = self.product_id.with_context(
lang=self.order_id.partner_id.lang,
partner=self.order_id.partner_id.id,
quantity=self.product_uom_qty,
date=self.order_id.date_order,
pricelist=self.order_id.pricelist_id.id,
uom=self.product_uom.id
)
if self.order_id.pricelist_id and self.order_id.partner_id:
self.price_unit = self.env['account.tax']._fix_tax_included_price_company(
self._get_display_price(product), product.taxes_id, self.tax_id, self.company_id)
self.purchase_price = product.get_cost()
if self.of_order_line_option_id.description_update:
self.name = self.name.replace(self.of_order_line_option_id.description_update, '')
self.of_order_line_option_id = False
self.of_reset_option = False
@api.onchange('of_product_forbidden_discount')
def _onchange_of_product_forbidden_discount(self):
if self.of_product_forbidden_discount and self.product_id:
self.price_unit = self.product_id.list_price
def of_get_line_name(self):
self.ensure_one()
# inhiber l'affichage de la référence
afficher_ref = self.env['ir.values'].get_default('sale.config.settings', 'pdf_product_reference')
le_self = self.with_context(
lang=self.order_id.partner_id.lang,
partner=self.order_id.partner_id.id,
)
name = le_self.name
if not afficher_ref:
if name.startswith("["):
splitted = name.split("]")
if len(splitted) > 1:
splitted.pop(0)
name = ']'.join(splitted).strip()
return name.split("\n") # utilisation t-foreach dans template qweb
def _write(self, vals):
for field in vals:
if field != 'of_product_categ_id':
break
else:
self = self.sudo()
if 'price_reduce' in vals and len(self) == 1:
vals['of_unit_price_variation'] = \
self.of_price_management_variation + vals.get('price_reduce', 0) - self.price_unit
return super(SaleOrderLine, self)._write(vals)
@api.multi
def unlink(self):
"""
Ne pas autoriser la suppression de ligne de commandes si la ligne est déjà présente sur une facture qui n'est
pas une facture annulée n'ayant jamais été validée.
"""
locked_invoice_lines = self.mapped('invoice_lines').filtered(
lambda l: l.invoice_id.state != 'cancel' or l.invoice_id.move_name)
if locked_invoice_lines:
raise UserError(u"""Vous ne pouvez supprimer une ligne d'article liée à une facture.\n"""
u"""Veuillez annuler vos modifications.""")
return super(SaleOrderLine, self).unlink()
@api.model
def create(self, vals):
"""
Au moment de la sauvegarde de la commande, les images articles ne sont pas toujours sauvegardées
car renseignées par un onchange et affichage en vue en kanban, du coup on surcharge le create
"""
if vals.get('layout_category_id') and 'sequence' not in vals:
order = self.env['sale.order'].browse(vals['order_id'])
max_sequence = order._of_get_max_or_min_seq_by_layout().get(vals['layout_category_id'], 0)
vals['sequence'] = max_sequence + 1
res = super(SaleOrderLine, self).create(vals)
if 'of_product_image_ids' in vals.keys() and vals['of_product_image_ids'] and not res.of_product_image_ids:
res.with_context(already_tried=True).of_product_image_ids = vals['of_product_image_ids']
return res
@api.multi
def write(self, vals):
"""
Si un des champ de blocked est présent ET une ligne modifiée ne doit pas avoir de modification alors renvoi une
erreur. Le champ of_discount_formula est dans le module of_sale_discount, la façon dont on vérifie la présence
des champs dans vals ne provoque pas d'erreur si le module n'est pas installé.
TODO: Permettre de modifier le montant si modification viens de la facture d'acompte
"""
force = self._context.get('force_price')
blocked = [x for x in ('price_unit', 'product_uom_qty', 'product_uom', 'discount', 'of_discount_formula')
if x in vals.keys()]
for line in self:
locked_invoice_lines = line.mapped('invoice_lines').filtered(lambda l: l.of_is_locked)
if locked_invoice_lines and blocked and not force:
raise UserError(u"""Cette ligne ne peut être modifiée : %s""" % line.name)
# Au moment de la sauvegarde de la commande, les images articles ne sont pas toujours sauvegardées, car
# renseignées par un onchange et affichage en vue en kanban. Du coup, on surcharge le write
if 'already_tried' not in self._context:
if 'of_product_image_ids' in vals.keys() and vals['of_product_image_ids'] and not self.of_product_image_ids:
self.with_context(already_tried=True).of_product_image_ids = vals['of_product_image_ids']
if vals.get('layout_category_id') and 'sequence' not in vals:
new_layout = self.env['sale.layout_category'].browse(vals['layout_category_id'])
for line in self:
old_layout = line.layout_category_id
order = line.order_id
if old_layout.sequence < new_layout.sequence:
sequence = order._of_get_max_or_min_seq_by_layout('min').get(vals['layout_category_id'], 0)
vals['sequence'] = sequence - 1
else:
sequence = order._of_get_max_or_min_seq_by_layout().get(vals['layout_category_id'], 0)
vals['sequence'] = sequence + 1
return super(SaleOrderLine, self).write(vals)
@api.multi
def _additionnal_tax_verifications(self):
invoice_line_obj = self.env['account.invoice.line']
if self.product_id and self.product_id.id in invoice_line_obj.get_locked_product_ids():
return True
if self.product_id and self.product_id.categ_id and self.product_id.categ_id.id in invoice_line_obj.\
get_locked_category_ids():
return True
return False
@api.multi
def _compute_tax_id(self):
return super(SaleOrderLine, self.filtered(lambda line: not line._additionnal_tax_verifications())).\
_compute_tax_id()
@api.depends(
'state', 'product_uom_qty', 'qty_delivered', 'qty_to_invoice', 'qty_invoiced', 'order_id.of_invoice_policy',
'order_id.partner_id.of_invoice_policy', 'order_id.of_force_invoice_status')
def _compute_invoice_status(self):
"""
Compute the invoice status of a SO line. Possible statuses:
- no: if the SO is not in status 'sale' or 'done', we consider that there is nothing to
invoice. This is also hte default value if the conditions of no other status is met.
- to invoice: we refer to the quantity to invoice of the line. Refer to method
`_get_to_invoice_qty()` for more information on how this quantity is calculated.
- upselling: this is possible only for a product invoiced on ordered quantities for which
we delivered more than expected. The could arise if, for example, a project took more
time than expected but we decided not to invoice the extra cost to the client. This
occurs only in state 'sale', so that when a SO is set to done, the upselling opportunity
is removed from the list.
- invoiced: the quantity invoiced is larger or equal to the quantity ordered.
"""
precision = self.env['decimal.precision'].precision_get('Product Unit of Measure')
for line in self:
if line.order_id.of_force_invoice_status:
line.invoice_status = line.order_id.of_force_invoice_status
else:
invoice_policy = line.of_invoice_policy
if line.state not in ('sale', 'done'):
line.invoice_status = 'no'
elif not float_is_zero(line.qty_to_invoice, precision_digits=precision):
line.invoice_status = 'to invoice'
elif line.state == 'sale' and invoice_policy == 'order' and \
float_compare(line.qty_delivered, line.product_uom_qty, precision_digits=precision) == 1:
line.invoice_status = 'upselling'
elif float_compare(line.qty_invoiced, line.product_uom_qty, precision_digits=precision) >= 0:
line.invoice_status = 'invoiced'
else:
line.invoice_status = 'no'
@api.depends('qty_invoiced', 'qty_delivered', 'product_uom_qty', 'order_id.state',
'order_id.of_invoice_policy', 'order_id.partner_id.of_invoice_policy')
def _get_to_invoice_qty(self):
"""
Compute the quantity to invoice. If the invoice policy is order, the quantity to invoice is
calculated from the ordered quantity. Otherwise, the quantity delivered is used.
"""
for line in self:
invoice_policy = line.of_invoice_policy
if line.order_id.state in ['sale', 'done']:
if invoice_policy == 'order':
line.qty_to_invoice = line.product_uom_qty - line.qty_invoiced
elif invoice_policy == 'delivery':
line.qty_to_invoice = line.qty_delivered - line.qty_invoiced
else:
line.qty_to_invoice = 0
def of_get_price_unit(self):
"""Renvoi le prix unitaire type."""
self.ensure_one()
product = self.product_id.with_context(
lang=self.order_id.partner_id.lang,
partner=self.order_id.partner_id.id,
quantity=self.product_uom_qty,
date=self.order_id.date_order,
pricelist=self.order_id.pricelist_id.id,
uom=self.product_uom.id,
fiscal_position=self.env.context.get('fiscal_position')
)
return self.env['account.tax']._fix_tax_included_price_company(
self._get_display_price(product), product.taxes_id, self.tax_id, self.company_id)
@api.model
def read_group(self, domain, fields, groupby, offset=0, limit=None, orderby=False, lazy=True):
if 'of_marge_pc' in fields and 'margin' not in fields:
fields.append('margin')
if 'of_marge_pc' in fields and 'price_subtotal' not in fields:
fields.append('price_subtotal')
res = super(SaleOrderLine, self).read_group(
domain, fields, groupby, offset=offset, limit=limit, orderby=orderby, lazy=lazy)
for line in res:
if 'of_marge_pc' in fields:
if 'margin' in line and line['margin'] is not None and \
'price_subtotal' in line and line['price_subtotal']:
line['of_marge_pc'] = round(100.0 * line['margin'] / line['price_subtotal'], 2)
else:
line['of_marge_pc'] = 0.0
return res
class SaleLayoutCategory(models.Model):
_inherit = 'sale.layout_category'
active = fields.Boolean(string="Active", default=True)
class OFOrderLineOption(models.Model):
_name = 'of.order.line.option'
_description = u"Option pour les lignes de commande (Achat et Vente)"
name = fields.Char(string=u"Nom", required=True)
purchase_price_update = fields.Boolean(string=u"Modification du prix d'achat")
purchase_price_update_type = fields.Selection(
selection=[('fixed', u"Montant fixe"),
('percent', u"Pourcentage")], string=u"Type de modification du prix d'achat")
purchase_price_update_value = fields.Float(string=u"Valeur de modification du prix d'achat")
sale_price_update = fields.Boolean(string=u"Modification du prix de vente")
sale_price_update_type = fields.Selection(
selection=[('fixed', u"Montant fixe"),
('percent', u"Pourcentage")], string=u"Type de modification du prix de vente")
sale_price_update_value = fields.Float(string=u"Valeur de modification du prix de vente")
description_update = fields.Text(string=u"Description de la ligne de commande")
| odof/openfire | of_sale/models/of_sale.py | of_sale.py | py | 69,395 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "odoo.tools.float_compare",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "odoo.api.onchange",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "odoo.api",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "odoo.addons.sale.... |
9409618118 | """Test cases for 'Mailgun' provider module."""
import pytest
import f451_comms.constants as const
import f451_comms.providers.mailgun as mailgun
from f451_comms.exceptions import MissingAttributeError
# =========================================================
# G L O B A L S & P Y T E S T F I X T U R E S
# =========================================================
_DEFAULT_MSG_ = "Hello World!"
_DEFAULT_TAG_ = "safe"
_DEFAULT_TEST_STRING_ = "_TEST_STRING_"
_DEFAULT_TEST_NAME_ = "Batman"
_DEFAULT_TEST_EMAIL_ = "batman@example.com"
_DEFAULT_TEST_SUBJECT_ = "_TEST_SUBJECT_"
@pytest.fixture()
def mixed_tag_list():
"""Return mixed tag strings."""
return [_DEFAULT_TAG_, "äpple", "nötter", "blåbär", "three", "four"]
@pytest.fixture()
def mixed_attribs():
"""Return mixed attributes."""
return {
const.KWD_FROM_NAME: _DEFAULT_TEST_NAME_,
const.KWD_TO_EMAIL: _DEFAULT_TEST_EMAIL_,
const.KWD_SUBJECT: _DEFAULT_TEST_SUBJECT_,
const.KWD_TAGS: _DEFAULT_TEST_STRING_,
const.KWD_TRACKING: True,
const.KWD_TESTMODE: True,
}
@pytest.fixture()
def mailgunClient(valid_settings, mixed_attribs):
"""Return Nailgun client."""
return mailgun.Mailgun(
apiKey=valid_settings.get(
const.CHANNEL_MAILGUN, const.KWD_PRIV_KEY, fallback=""
),
fromDomain=valid_settings.get(
const.CHANNEL_MAILGUN, const.KWD_FROM_DOMAIN, fallback=""
),
**mixed_attribs,
)
# =========================================================
# T E S T F U N C T I O N S
# =========================================================
def test_static_process_tag_list(mixed_tag_list):
"""Verify ability to process tag list."""
# Test happy path
totNum = len(mixed_tag_list)
maxNum = totNum + 1
processed = mailgun.process_tag_list(
inList=mixed_tag_list,
maxNum=maxNum,
minTagLen=mailgun._MIN_TAG_LEN_,
maxTagLen=mailgun._MAX_TAG_LEN_,
)
assert len(processed) == len(mixed_tag_list)
# Test blank items
processed = mailgun.process_tag_list(
inList=["one", "", "", "three"],
maxNum=10,
minTagLen=mailgun._MIN_TAG_LEN_,
maxTagLen=mailgun._MAX_TAG_LEN_,
)
assert len(processed) == 2
# Test max items
processed = mailgun.process_tag_list(
inList=["one", "two", "three", "four"],
maxNum=3,
minTagLen=mailgun._MIN_TAG_LEN_,
maxTagLen=mailgun._MAX_TAG_LEN_,
)
assert len(processed) == 3
# Test min/max chars
processed = mailgun.process_tag_list(
inList="abc123",
maxNum=10,
minTagLen=1,
maxTagLen=3,
)
assert len(processed[0]) == 3
processed = mailgun.process_tag_list(
inList=["abc123", "a", "ab"],
maxNum=10,
minTagLen=3,
maxTagLen=10,
)
assert len(processed[0]) == 6
# Test 'ascii' conversion
processed = mailgun.process_tag_list(
inList="äpple", maxNum=10, minTagLen=3, maxTagLen=10
)
assert processed[0] == "?pple"
def test_create_Tags_object(mixed_tag_list):
"""Verify ability to creata a 'Tag' object."""
# Test happy path
totNum = len(mixed_tag_list)
maxNum = totNum + 1
obj = mailgun.Tags(
inList=mixed_tag_list,
maxNum=maxNum,
minLen=mailgun._MIN_TAG_LEN_,
maxLen=mailgun._MAX_TAG_LEN_,
)
assert obj.keyword == const.KWD_TAGS
assert not obj.isRequired
assert obj.isValid
assert obj.minNum == 0
assert obj.maxNum == maxNum
assert obj.totNum == totNum
assert len(obj.raw) == totNum
assert isinstance(obj.clean, list)
# Test 'maxNum'
maxNum = len(mixed_tag_list) - 1
obj = mailgun.Tags(
inList=mixed_tag_list,
maxNum=maxNum,
minLen=mailgun._MIN_TAG_LEN_,
maxLen=mailgun._MAX_TAG_LEN_,
)
assert obj.minNum == 0
assert obj.maxNum == maxNum
assert obj.totNum == maxNum
assert len(obj.raw) == maxNum
assert len(obj.clean) == maxNum
# Test assertion that 'tags' can be empty
obj = mailgun.Tags(
inList=[""],
maxNum=10,
minLen=mailgun._MIN_TAG_LEN_,
maxLen=mailgun._MAX_TAG_LEN_,
)
assert obj.isValid
assert obj.raw == []
assert obj.clean == []
def test_create_RecipientData_object(valid_attribs_dict):
"""Verify ability to creata a 'RecipientData' object."""
# Test happy path
totNum = len(valid_attribs_dict.items())
maxNum = totNum + 1
obj = mailgun.RecipientData(inData=valid_attribs_dict, maxNum=maxNum)
assert obj.keyword == const.KWD_RECIPIENT_DATA
assert not obj.isRequired
assert obj.isValid
assert obj.minNum == 0
assert obj.maxNum == maxNum
assert obj.totNum == totNum
assert len(obj.raw.items()) == totNum
assert isinstance(obj.clean, str)
assert len(obj.clean) > 1
# Test 'maxNum'
maxNum = len(valid_attribs_dict.items()) - 1
obj = mailgun.RecipientData(inData=valid_attribs_dict, maxNum=maxNum)
assert obj.minNum == 0
assert obj.maxNum == maxNum
assert obj.totNum == maxNum
assert len(obj.raw.items()) == maxNum
# Test assertion that 'recipient_data' can be empty
obj = mailgun.RecipientData(inData={}, maxNum=10)
assert obj.isValid
assert obj.raw == {}
assert obj.clean == "{}"
def test_create_Mailgun_object(mailgunClient, valid_settings, mixed_attribs):
"""Verify ability to creata a 'Mailgun' object."""
client = mailgunClient
assert client.serviceType == const.SRV_TYPE_EMAIL
assert client.serviceName == mailgun._SRV_PROVIDER_
assert client.configSection == mailgun._SRV_CONFIG_SCTN_
client = mailgun.Mailgun(
valid_settings.get(const.CHANNEL_MAILGUN, const.KWD_PRIV_KEY, fallback=""),
valid_settings.get(const.CHANNEL_MAILGUN, const.KWD_FROM_DOMAIN, fallback=""),
**mixed_attribs,
)
assert len(client.defaultTo) == 1
assert client.defaultTo[0].email == _DEFAULT_TEST_EMAIL_
assert client.defaultSubject == _DEFAULT_TEST_SUBJECT_
assert client.defaultTags == [_DEFAULT_TEST_STRING_]
assert client._tracking
assert client._testmode
def test_send_message(mocker, mailgunClient):
"""Verify ability to send message."""
with pytest.raises(MissingAttributeError) as e:
mailgunClient.send_message("")
assert e.type == MissingAttributeError
assert "blank" in e.value.args[0]
attribs = {
const.KWD_SUBJECT: "",
const.KWD_TO_EMAIL: "one@example.com",
}
with pytest.raises(MissingAttributeError) as e:
mailgunClient.send_message(_DEFAULT_MSG_, **attribs)
assert e.type == MissingAttributeError
assert "blank" in e.value.args[0]
attribs = {
const.KWD_SUBJECT: _DEFAULT_MSG_,
const.KWD_TO_EMAIL: "",
}
with pytest.raises(MissingAttributeError) as e:
mailgunClient.send_message(_DEFAULT_MSG_, **attribs)
assert e.type == MissingAttributeError
assert "blank" in e.value.args[0]
attribs = {
const.KWD_SUBJECT: _DEFAULT_MSG_,
const.KWD_TO_EMAIL: "one@example.com",
const.KWD_HTML: f"<html>{_DEFAULT_MSG_}</html>",
const.KWD_RECIPIENT_DATA: {
"one@example.com": {"first": "First", "last": "Person", "uuid": "12345567"}
},
const.KWD_TESTMODE: False,
}
mocker.patch.object(mailgunClient, "send_message", autospec=True)
mailgunClient.send_message(_DEFAULT_MSG_, **attribs)
mailgunClient.send_message.assert_called_once()
@pytest.mark.slow
def test_send_message_extensive(mocker, mailgunClient, new_attachment_file):
"""Verify ability to send message with more data."""
attribs = {
const.KWD_SUBJECT: _DEFAULT_MSG_,
const.KWD_TO_EMAIL: ["one@example.com", "two@example.com"],
const.KWD_CC_EMAIL: "cc@example.com",
const.KWD_BCC_EMAIL: ["bcc@example.com", "", "bcc2@example.com"],
const.KWD_TAGS: ["äpple", "nötter", "", "blåbär", "three", "four"],
const.KWD_HTML: f"<html>{_DEFAULT_MSG_}</html>",
const.KWD_ATTACHMENTS: new_attachment_file,
const.KWD_INLINE: new_attachment_file,
const.KWD_RECIPIENT_DATA: {
"one@example.com": {"first": "First", "last": "Person", "uuid": "12345567"},
"two@example.com": {"first": "Second", "last": "Human", "uuid": "98765443"},
},
const.KWD_TESTMODE: False,
}
mockMailgunClient = mailgunClient
mocker.patch.object(mockMailgunClient, "send_message", autospec=True)
mailgunClient.send_message(_DEFAULT_MSG_, **attribs)
mailgunClient.send_message.assert_called_once()
# from inspect import currentframe, getframeinfo
# helpers.pp(capsys, data, currentframe())
| mlanser/f451-comms | tests/providers/test_mailgun.py | test_mailgun.py | py | 8,844 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "pytest.fixture",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "f451_comms.constants.KWD_FROM_NAME",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "f451_comms.constants",
"line_number": 30,
"usage_type": "name"
},
{
"api_n... |
24391185825 | from flask import Blueprint, request, jsonify
import pandas as pd
import requests
import mysql.connector as connection
db = connection.connect(host='database-midway.cnjonpzevrxo.us-east-1.rds.amazonaws.com',user='admin', password='root1234', database= 'midway')
search_bp = Blueprint('search', __name__)
@search_bp.route('/searchByName', methods=['GET'])
def search_location():
location = request.args.get('location')
# Perform location filtering using OpenStreetMap Nominatim service
filtered_results = filter_location(location)
# Return the filtered results as JSON response
return jsonify(filtered_results)
def filter_location(location):
base_url = 'https://nominatim.openstreetmap.org/search'
params = {'q': location, 'format': 'json', 'limit': 10,'countrycodes':'LK'}
del params['limit']
response = requests.get(base_url, params=params)
if response.status_code == 200:
data = response.json()
filtered_results = []
for result in data:
place = {
'Name': result.get('display_name'),
'latitude': result.get('lat'),
'longitude': result.get('lon'),
'Type': result.get('type')
}
filtered_results.append(place)
return filtered_results
else:
return []
def execute_query(query):
from app import connection2
cursor = connection2.cursor()
cursor.execute(query)
result = cursor.fetchone()
cursor.close()
return result
def delete():
from app import connection2
cursor = connection2.cursor()
cursor.execute("delete from saveSuggest")
cursor.execute()
cursor.close()
@search_bp.route('/savetype', methods=['GET'])
def save_location():
type = request.args.get('type')
name = request.args.get('name')
print(type)
print(name)
from app import connection2
cursor = connection2.cursor()
delete_query = "DELETE FROM saveSuggest"
cursor.execute(delete_query)
qury = 'insert into saveSuggest(name,type) value (%s,%s)'
value = (name,type)
cursor.execute(qury, value)
connection2.commit()
cursor.close()
return "done"
| KavindaDharmasiri/midway-backend | search.py | search.py | py | 2,244 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "mysql.connector.connect",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "mysql.connector",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "flask.Blueprint",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "flask.request.ar... |
7950602535 | import discord
from discord.ext import commands
try:
import cPickle as pickle
except ImportError:
import pickle
def loadPickle(file : str):
with open(file, 'rb') as f:
data = pickle.load(f)
return data
def dumpPickle(data, file : str):
with open(file, 'wb') as f:
pickle.dump(data, f)
def createEmbed(title, description=None):
return discord.Embed(title=title, description=description, color=14031172)
def gainCoins(target, amount : int):
with open('coin_stash.pickle', 'rb') as f:
coin_stash = pickle.load(f)
try:
coin_stash[target.id] += amount
except KeyError:
coin_stash[target.id] = amount
with open('coin_stash.pickle', 'wb') as f:
pickle.dump(coin_stash, f)
def findMember(bot, member_id : str):
"""Looks in each server and returns a member if found."""
for server in bot.servers:
member = server.get_member(member_id)
if member is not None:
return member
async def inputTimeout(bot, ctx, topic : str):
await bot.send_message(ctx.message.channel, "{}, your {} has timed out".format(ctx.message.author.mention, topic))
| nath1100/Kamikaze-Bot | cogs/utilities/tools.py | tools.py | py | 1,198 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "pickle.load",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pickle.dump",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "discord.Embed",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number"... |
586835230 | # Residual block architecture
import torch
import torch.nn as nn
from torch.nn import functional as F
from torch.autograd import Variable
import pdb
class ConvLayer(nn.Module):
def __init__(self, in_channels, out_channels, sn, kernel_size, stride):
super(ConvLayer, self).__init__()
padding = (kernel_size-1) // 2
self.conv3d = nn.Conv3d(in_channels, out_channels, kernel_size, stride, padding)
if sn:
self.conv3d = nn.utils.spectral_norm(self.conv3d, eps=1e-4)
def forward(self, x):
out = self.conv3d(x)
return out
class UpsampleConvLayer(nn.Module):
def __init__(self, in_channels, out_channels, sn, kernel_size, stride, upsample_mode, upsample=None):
super(UpsampleConvLayer, self).__init__()
self.upsample_mode = upsample_mode
self.upsample = upsample
padding = (kernel_size-1) // 2
if upsample_mode == "lr" and upsample:
self.conv3d = nn.Conv3d(in_channels, out_channels * upsample * upsample * upsample, kernel_size, stride,
padding)
self.voxel_shuffle = VoxelShuffle(upsample)
else:
self.conv3d = nn.Conv3d(in_channels, out_channels, kernel_size, stride, padding)
if sn:
self.conv3d = nn.utils.spectral_norm(self.conv3d, eps=1e-4)
def forward(self, x):
if self.upsample:
if self.upsample_mode == "hr":
x = F.interpolate(x, mode='nearest', scale_factor=self.upsample)
out = self.conv3d(x)
elif self.upsample_mode == "lr":
x = self.conv3d(x)
out = self.voxel_shuffle(x)
else:
out = self.conv3d(x)
return out
class VoxelShuffle(nn.Module):
def __init__(self, upscale_factor):
super(VoxelShuffle, self).__init__()
self.upscale_factor = upscale_factor
def forward(self, input):
batch_size, c, h, w, l = input.size()
rh, rw, rl = self.upscale_factor, self.upscale_factor, self.upscale_factor
oh, ow, ol = h * rh, w * rw, l * rl
oc = c // (rh * rw * rl)
input_view = input.contiguous().view(
batch_size, rh, rw, rl, oc, h, w, l
)
shuffle_out = input_view.permute(0, 4, 5, 1, 6, 2, 7, 3).contiguous()
out = shuffle_out.view(
batch_size, oc, oh, ow, ol
)
return out
class ForwardBlockGenerator(nn.Module):
def __init__(self, in_channels, out_channels, gen_sn, kernel_size=3, stride=1,
downsample_factor=2):
super(ForwardBlockGenerator, self).__init__()
self.relu = nn.ReLU()
self.p1_conv0 = ConvLayer(in_channels, out_channels, gen_sn, kernel_size, stride)
self.p1_in0 = nn.InstanceNorm3d(out_channels, affine=True)
self.p1_conv1 = ConvLayer(out_channels, out_channels, gen_sn, kernel_size, downsample_factor)
self.p1_in1 = nn.InstanceNorm3d(out_channels, affine=True)
self.p2_conv0 = ConvLayer(in_channels, out_channels, gen_sn, 1, stride)
def forward(self, x, norm):
out = self.p1_conv0(x)
if norm == "Instance":
out = self.p1_in0(out)
out = self.relu(out)
out = self.p1_conv1(out)
if norm == "Instance":
out = self.p1_in1(out)
residual = self.p2_conv0(x)
residual = F.avg_pool3d(residual, kernel_size=2)
out = out + residual
return out
class BackwardBlockGenerator(nn.Module):
def __init__(self, in_channels, out_channels, gen_sn, kernel_size=3, stride=1, upsample_mode="lr",
upsample_factor=2):
super(BackwardBlockGenerator, self).__init__()
self.relu = nn.ReLU()
self.p1_conv0 = UpsampleConvLayer(in_channels, in_channels, gen_sn, kernel_size, stride, upsample_mode,
upsample=upsample_factor)
self.p1_in0 = nn.InstanceNorm3d(in_channels, affine=True)
self.p1_conv1 = UpsampleConvLayer(in_channels, out_channels, gen_sn, kernel_size, stride, upsample_mode)
self.p1_in1 = nn.InstanceNorm3d(out_channels, affine=True)
self.p2_conv0 = UpsampleConvLayer(in_channels, out_channels, gen_sn, 1, 1, upsample_mode,
upsample=upsample_factor)
def forward(self, x, norm):
out = x
out = self.p1_conv0(out)
if norm == "Instance":
out = self.p1_in0(out)
out = self.relu(out)
out = self.p1_conv1(out)
if norm == "Instance":
out = self.p1_in1(out)
residual = x
residual = self.p2_conv0(residual)
out = out + residual
return out
class ResidualBlockGenerator(nn.Module):
def __init__(self, channels, gen_sn, kernel_size=3, stride=1):
super(ResidualBlockGenerator, self).__init__()
self.relu = nn.ReLU()
self.conv0 = ConvLayer(channels, channels, gen_sn, kernel_size, stride)
self.in0 = nn.InstanceNorm3d(channels, affine=True)
self.conv1 = ConvLayer(channels, channels, gen_sn, kernel_size, stride)
self.in1 = nn.InstanceNorm3d(channels, affine=True)
def forward(self, x, norm):
out = self.conv0(x)
if norm == "Instance":
out = self.in0(out)
out = self.relu(out)
out = self.conv1(out)
if norm == "Instance":
out = self.in1(out)
residual = x
out = out + residual
return out
class ConvLSTMCell(nn.Module):
def __init__(self, input_channels, hidden_channels, kernel_size, stride):
super(ConvLSTMCell, self).__init__()
padding = kernel_size // 2
self.Wxf = nn.Conv3d(input_channels, hidden_channels, kernel_size, stride, padding, bias=True)
self.Whf = nn.Conv3d(hidden_channels, hidden_channels, kernel_size, stride, padding, bias=False)
self.Wxi = nn.Conv3d(input_channels, hidden_channels, kernel_size, stride, padding, bias=True)
self.Whi = nn.Conv3d(hidden_channels, hidden_channels, kernel_size, stride, padding, bias=False)
self.Wxo = nn.Conv3d(input_channels, hidden_channels, kernel_size, stride, padding, bias=True)
self.Who = nn.Conv3d(hidden_channels, hidden_channels, kernel_size, stride, padding, bias=False)
self.Wxc = nn.Conv3d(input_channels, hidden_channels, kernel_size, stride, padding, bias=True)
self.Whc = nn.Conv3d(hidden_channels, hidden_channels, kernel_size, stride, padding, bias=False)
def forward(self, x, h0, c0):
f = torch.sigmoid(self.Wxf(x) + self.Whf(h0))
i = torch.sigmoid(self.Wxi(x) + self.Whi(h0))
o = torch.sigmoid(self.Wxo(x) + self.Who(h0))
c = i * torch.tanh(self.Wxc(x) + self.Whc(h0)) + f * c0
h = o * torch.tanh(c)
return h, c
def init_hidden(self, batch_size, hidden_channels, shape):
return (Variable(torch.zeros(batch_size, hidden_channels, shape[0], shape[1], shape[2])).cuda(),
Variable(torch.zeros(batch_size, hidden_channels, shape[0], shape[1], shape[2])).cuda())
| trainsn/TSR-TVD | model/basicblock.py | basicblock.py | py | 7,192 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv3d",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_nu... |
33780162767 | from flask import Flask, render_template, session
from flask_session import Session
from app.api.controllers import api
from logging.handlers import TimedRotatingFileHandler
import config, logging
app = Flask(__name__, static_folder="static")
app.config.from_object("config")
app.config['JSON_AS_ASCII'] = False
app.config['UPLOAD_FOLDER'] = config.UPLOAD_PATH
Session(app)
app.register_blueprint(api)
logging.basicConfig(level=config.log_level, format=config.log_formatter)
handler = TimedRotatingFileHandler(config.log_file,when="midnight")
handler.suffix = config.log_file_suffix
formatter = logging.Formatter(config.log_formatter)
handler .setFormatter(formatter)
logging.getLogger().addHandler(handler) | pachecobeto95/IC | SensingBusV2/webapi/app/__init__.py | __init__.py | py | 712 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "app.api.controllers",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "flask.Flask",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "app.api.controllers.config.from_object",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "... |
31062436935 |
from ..utils import Object
class BankCardInfo(Object):
"""
Information about a bank card
Attributes:
ID (:obj:`str`): ``BankCardInfo``
Args:
title (:obj:`str`):
Title of the bank card description
actions (List of :class:`telegram.api.types.bankCardActionOpenUrl`):
Actions that can be done with the bank card number
Returns:
BankCardInfo
Raises:
:class:`telegram.Error`
"""
ID = "bankCardInfo"
def __init__(self, title, actions, **kwargs):
self.title = title # str
self.actions = actions # list of bankCardActionOpenUrl
@staticmethod
def read(q: dict, *args) -> "BankCardInfo":
title = q.get('title')
actions = [Object.read(i) for i in q.get('actions', [])]
return BankCardInfo(title, actions)
| iTeam-co/pytglib | pytglib/api/types/bank_card_info.py | bank_card_info.py | py | 865 | python | en | code | 20 | github-code | 36 | [
{
"api_name": "utils.Object",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "utils.Object.read",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "utils.Object",
"line_number": 35,
"usage_type": "name"
}
] |
74698894504 | import hashlib
import datetime
from fastapi import UploadFile
from motor.motor_asyncio import AsyncIOMotorClient
from config import settings
from models import UploadPhoto, UploadUser, Photo
def Hash(string: str):
hash = hashlib.sha256(string.encode('utf-8')).digest().decode('utf-8')
return hash
class DB:
__slots__ = ('db', 'collection')
def __init__(self) -> None:
client = AsyncIOMotorClient(
settings.mongo_host,
settings.mongo_port
)
self.db = client['main_photos']
self.collection = self.db['photos']
async def get_photos(self, skip: int = 0, limit: int = 10):
cursor = self.collection.find()
cursor.skip(skip).limit(limit)
count = await self.collection.count_documents({})
result = []
async for document in cursor:
document['id'] = str(document.pop('_id'))
result.append(document)
return count, result
async def random_photo(self):
cursor = self.collection.aggregate([{ '$sample': { 'size': 1 } }])
documents = await cursor.to_list(length=None)
document = documents[0]
document['id'] = str(document.pop('_id'))
return document
async def save(self, user: UploadUser, file: UploadFile) -> Photo:
filename = await Hash(file.filename)
filename += '.png'
uploadUser = user.dict()
document = {
'filename': filename,
'created_at': datetime.date.today().strftime('%Y-%m-%d'),
'uploaded_by': uploadUser
}
r = await self.collection.insert_one(
document
)
document['id'] = str(document.pop('_id'))
return document
db = DB() | NIDILLIN/Kathrin | Microservices/Photos(1405)/db.py | db.py | py | 1,762 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "hashlib.sha256",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "motor.motor_asyncio.AsyncIOMotorClient",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "config.settings.mongo_host",
"line_number": 20,
"usage_type": "attribute"
},
{
... |
4578609455 | from itertools import combinations
n,k = [int(x) for x in input().split()]
work = {} #work เก็บงานของอุปกรณ์
price = {} #price เก็บราคาของอุปกรณ์
for i in range(n):
data = [int(x) for x in input().split()]
work[i] =set([j-1 for j in range(1,k+1) if data[j] == 1])
price[i] = data[0]
check = set([i for i in range(k)]) # check คือเซตของงานทั้งหมด (0,1,2,..,n-1)
all = [i for i in range(n)]
result = []
for i in range(1,n+1):
comb = combinations(all,i)
for j in comb:
theirwork = set()
theirprice = 0
for w in j:
theirwork = theirwork.union(work[w])
theirprice += price[w]
if theirwork == check:
result.append(theirprice)
result.sort()
print(result[0])
| naphattar/Betaprogramming | Chapter 1/1036.ver2.py | 1036.ver2.py | py | 860 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "itertools.combinations",
"line_number": 13,
"usage_type": "call"
}
] |
37458237517 | import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint
plt.rcParams['font.size'] = 14
M = 15
k = 2
c = 5
ν = c / M
ω = k / M
def f(u, t, ν, ω):
"""
u: [x,v]
δu/δt = f(u) = [δx/δt, δv/δt] = [v, a]
1) Ma + kv + cx = 0
2) Ma = -kv - cx
3) let ν = c/M; ω=k/m
4) a = -νv - ωx
5) δv/δt = -νv - ωx
6) by definition, v = δx/δt = u[1]
∴ δu/δt = f(u) = f([x,v]) = [δx/δt, δv/δt] = [v, a]
"""
v = u[1]
a = -ν * u[1] - ω * u[0]
return np.array([v, a])
t = np.linspace(0, 100, 1000)
# odeint transforms a differential equation of
# degree n (n=2 in our case)
# in one function (x(t) in our case) to
# a system of differential equations of
# degree 1 in n functions
uu = odeint(f, (-1.5, -2.5), t, args=(ν, ω))
center = t.mean()
fig, ax = plt.subplots()
ax.grid()
ax.plot(t, uu[:, 0], label=r'$x(t)$ [m]')
ax.plot(t, uu[:, 1], label=r'$v(t)$ [m/s]')
ax.set_xlabel('$t$ [s]')
ax.legend()
spring = ax.plot([center] * 2, [0, uu[0, 0]], 'k', lw=3)[0]
big_dot = ax.plot(center, uu[0, 0], 'ko', ms=20)[0]
dot1 = ax.plot(t[0], uu[0, 0], 'r*')[0]
dot2 = ax.plot(t[0], uu[0, 1], 'g*')[0]
def animate(i):
i = np.clip(int(round(i)), 0, len(t) - 1)
tt = t[i]
spring.set_data([center] * 2, [0, uu[i, 0]])
big_dot.set_data([center, uu[i, 0]])
dot1.set_data([tt, uu[i, 0]])
dot2.set_data([tt, uu[i, 1]])
fig.canvas.draw_idle()
if 0:
from matplotlib.widgets import Slider
plt.subplots_adjust(bottom=0.2)
axts = fig.add_axes([0.25, .03, 0.50, 0.02])
ts = Slider(axts, 'step', 0, len(t) - 1, valinit=0, valfmt='%d')
ts.on_changed(animate)
else:
import matplotlib.animation as animation
ani = animation.FuncAnimation(fig, animate, np.arange(len(t)),
interval=(t[1] - t[0]) * 1000)
plt.show()
| lbrichards/asaka | mass_spring.py | mass_spring.py | py | 1,890 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "numpy.lin... |
70105956265 | #%%
import torch
from torch.utils.data import Dataset, DataLoader
import glob
from sklearn.model_selection import train_test_split
labeled_dir = "D:/Images_segmentation/Ellipse/pseudo_training/6_images_pt_512"
mask_dir = "D:/Images_segmentation/Ellipse/pseudo_training/6_masks_pt_512"
unlabeled_dir = "D:/Images_nanomax/Images/unlabeled_images_512_t1_pt"
class Labeled_Dataset(Dataset):
def __init__(self, image_list, mask_list):
self.image_list = image_list
self.mask_list = mask_list
def __len__(self):
return len(self.image_list)
def __getitem__(self, index):
image = self.image_list[index]
mask = self.mask_list[index]
image = torch.load(image)
mask = torch.load(mask)
return image, mask
class Unlabeled_Dataset(Dataset):
def __init__(self, image_list):
self.image_list = image_list
def __len__(self):
return len(self.image_list)
def __getitem__(self, index):
image = self.image_list[index]
image = torch.load(image)
return image
def get_dataloaders(batch_size, unlabeled=False, labeled_dir=labeled_dir, mask_dir=mask_dir, unlabeled_dir=unlabeled_dir):
labeled_list = glob.glob(labeled_dir + '/*.pt')
mask_list = glob.glob(mask_dir + '/*.pt' )
train_images, eval_images, train_masks, eval_masks = train_test_split(labeled_list, mask_list, test_size=0.2)
train_dataset = Labeled_Dataset(train_images, train_masks)
eval_dataset = Labeled_Dataset(eval_images, eval_masks)
train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True, drop_last=True, pin_memory=True)
eval_dataloader = torch.utils.data.DataLoader(eval_dataset, batch_size=batch_size, shuffle=True, drop_last=True, pin_memory=True)
if unlabeled:
unlabeled_list = glob.glob(unlabeled_dir + '/*.pt')
unlabeled_dataset = Unlabeled_Dataset(unlabeled_list)
unlabeled_dataloader = torch.utils.data.DataLoader(unlabeled_dataset, batch_size=batch_size, shuffle=True, drop_last=True, pin_memory=True)
return train_dataloader, eval_dataloader, unlabeled_dataloader
else:
return train_dataloader, eval_dataloader
| lucasdegeorge/NW_SemSeg | dataloader.py | dataloader.py | py | 2,241 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.utils.data.Dataset",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "torch.load",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "torch.load",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.Datas... |
719866394 | # -*- coding: utf-8 -*-
from Acquisition import aq_base
from plone.app.contenttypes.testing import PLONE_APP_CONTENTTYPES_FIXTURE
from plone.app.discussion.interfaces import IDiscussionSettings
from plone.app.testing import FunctionalTesting
from plone.app.testing import PLONE_FIXTURE
from plone.app.testing import PloneSandboxLayer
from plone.registry.interfaces import IRegistry
from plone.testing import zope
from plone.testing import z2
from Products.CMFPlone.tests.utils import MockMailHost
from Products.MailHost.interfaces import IMailHost
from zope.component import getSiteManager
from zope.component import queryUtility
import collective.honeypot.config
import plone.restapi
# We want WHITELISTED_START to be empty by default currently, but we
# do want to test it.
start = list(collective.honeypot.config.WHITELISTED_START)
start.append("jq_")
collective.honeypot.config.WHITELISTED_START = set(start)
def patch_mailhost(portal):
registry = queryUtility(IRegistry)
registry["plone.email_from_address"] = "webmaster@example.org"
portal._original_MailHost = portal.MailHost
portal.MailHost = mailhost = MockMailHost("MailHost")
mailhost.smtp_host = "localhost"
sm = getSiteManager(context=portal)
sm.unregisterUtility(provided=IMailHost)
sm.registerUtility(mailhost, provided=IMailHost)
def unpatch_mailhost(portal):
portal.MailHost = portal._original_MailHost
sm = getSiteManager(context=portal)
sm.unregisterUtility(provided=IMailHost)
sm.registerUtility(aq_base(portal._original_MailHost), provided=IMailHost)
class HoneypotFixture(PloneSandboxLayer):
defaultBases = (PLONE_FIXTURE,)
def setUpZope(self, app, configurationContext):
# Load ZCML
import collective.honeypot
self.loadZCML(package=collective.honeypot)
# Install product and call its initialize() function
zope.installProduct(app, "collective.honeypot")
def tearDownZope(self, app):
# Uninstall product
zope.uninstallProduct(app, "collective.honeypot")
def setUpPloneSite(self, portal):
patch_mailhost(portal)
# Enable commenting, self registration, and sending mail.
registry = queryUtility(IRegistry)
settings = registry.forInterface(IDiscussionSettings)
settings.globally_enabled = True
settings.anonymous_comments = True
portal.manage_permission("Reply to item", ("Anonymous", "Manager"))
portal.manage_permission("Allow sendto", ("Anonymous", "Manager"))
portal.manage_permission("Add portal member", ("Anonymous", "Manager"))
def teardownPloneSite(self, portal):
unpatch_mailhost(portal)
HONEYPOT_FIXTURE = HoneypotFixture()
HONEYPOT_FUNCTIONAL_TESTING = FunctionalTesting(
bases=(HONEYPOT_FIXTURE,),
name="collective.honeypot:Functional",
)
class HoneypotRestApiFixture(HoneypotFixture):
defaultBases = (PLONE_APP_CONTENTTYPES_FIXTURE,)
def setUpZope(self, app, configurationContext):
super(HoneypotRestApiFixture, self).setUpZope(app, configurationContext)
self.loadZCML(package=plone.restapi)
HONEYPOT_API_FIXTURE = HoneypotRestApiFixture()
HONEYPOT_API_FUNCTIONAL_TESTING = FunctionalTesting(
bases=(HONEYPOT_API_FIXTURE, z2.ZSERVER_FIXTURE),
name="HoneypotRestApiFixture:Functional",
)
| collective/collective.honeypot | collective/honeypot/testing.py | testing.py | py | 3,337 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "collective.honeypot.config.honeypot",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "collective.honeypot.config",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "collective.honeypot.config.honeypot",
"line_number": 23,
"usage_type"... |
40745506193 | import json
import os
import torch
import torch.nn as nn
import pandas as pd
import argparse
from test import test
from src.dataset import create_dataloader
from src.utils import (
read_feature,
feature_extraction_pipeline,
read_features_files,
choose_model,
)
from src.data_augmentation import Mixup, Specmix, Cutmix
from src.models.utils import SaveBestModel
from torch.utils.data import DataLoader
from torch.optim.lr_scheduler import StepLR
from typing import Dict, Tuple, List, Union
from sklearn.metrics import classification_report, accuracy_score
def train(
model: nn.Module,
dataloader: DataLoader,
optimizer: torch.optim.Adam,
loss: torch.nn.CrossEntropyLoss,
device: torch.device,
mixer: Union[None, Mixup, Specmix, Cutmix],
dataset: str,
) -> Tuple[float, float]:
"""
Function responsible for the model training.
Args:
model (nn.Module): the created model.
dataloader (DataLoader): the training dataloader.
optimizer (torch.optim.Adam): the optimizer used.
loss (torch.nn.CrossEntropyLoss): the loss function used.
device (torch.device): which device to use.
dataset (str): which dataset is being used (coraa, emodb or ravdess).
Returns:
Tuple[float, float]: the training f1 and loss, respectively.
"""
model.train()
predictions = []
targets = []
train_loss = 0.0
for index, (batch) in enumerate(dataloader, start=1):
data = batch["features"].to(device)
target = batch["labels"].to(device)
optimizer.zero_grad()
data = data.to(dtype=torch.float32)
target = target.to(dtype=torch.float32)
if not mixer is None:
data, target = mixer(x=data, y=target)
output = model(data)
l = loss(output, target)
train_loss += l.item()
l.backward()
optimizer.step()
prediction = output.argmax(dim=-1, keepdim=True).to(dtype=torch.int)
prediction = prediction.detach().cpu().numpy()
predictions.extend(prediction.tolist())
target = target.argmax(dim=-1, keepdim=True).to(dtype=torch.int)
target = target.detach().cpu().numpy()
targets.extend(target.tolist())
train_loss = train_loss / index
if dataset == "coraa":
train_f1 = classification_report(
targets, predictions, digits=6, output_dict=True, zero_division=0.0
)
train_f1 = train_f1["macro avg"]["f1-score"]
else:
train_f1 = accuracy_score(y_true=targets, y_pred=predictions)
return train_f1, train_loss
def evaluate(
model: nn.Module,
dataloader: DataLoader,
loss: torch.nn.CrossEntropyLoss,
device: torch.device,
dataset: str,
) -> Tuple[float, float]:
"""
Function responsible for the model evaluation.
Args:
model (nn.Module): the created model.
dataloader (DataLoader): the validaiton dataloader.
loss (torch.nn.CrossEntropyLoss): the loss function used.
device (torch.device): which device to use.
dataset (str): which dataset is being used (coraa, emodb or ravdess).
Returns:
Tuple[float, float]: the validation f1 and loss, respectively.
"""
model.eval()
predictions = []
targets = []
validation_loss = 0.0
validation_f1 = []
with torch.inference_mode():
for index, (batch) in enumerate(dataloader):
data = batch["features"].to(device)
target = batch["labels"].to(device)
data = data.to(dtype=torch.float32)
target = target.to(dtype=torch.float32)
output = model(data)
l = loss(output, target)
validation_loss += l.item()
prediction = output.argmax(dim=-1, keepdim=True).to(dtype=torch.int)
prediction = prediction.detach().cpu().numpy()
predictions.extend(prediction.tolist())
target = target.argmax(dim=-1, keepdim=True).to(dtype=torch.int)
target = target.detach().cpu().numpy()
targets.extend(target.tolist())
validation_loss = validation_loss / index
if dataset == "coraa":
validation_f1 = classification_report(
targets, predictions, digits=6, output_dict=True, zero_division=0.0
)
validation_f1 = validation_f1["macro avg"]["f1-score"]
else:
validation_f1 = accuracy_score(y_true=targets, y_pred=predictions)
return validation_f1, validation_loss
def training_pipeline(
training_data: List,
validation_data: List,
feature_config: Dict,
wavelet_config: Dict,
data_augmentation_config: Dict,
model_config: Dict,
mode: str,
dataset: str,
) -> None:
"""
The training pipeline.
Args:
training_data (List): the training data.
validation_data (List): the validation data.
feature_config (Dict): the feature's configurations.
wavelet_config (Dict): the wavelet's configurations.
data_augmentation_config (Dict): the data augmentation step's configurations.
model_config (Dict): the model's configurations.
mode (str): which mode is being used.
dataset (str): which dataset is being used.
"""
total_folds = len(training_data)
best_valid_f1, best_train_f1, best_test_f1 = [], [], []
if dataset == "coraa":
if data_augmentation_config["target"] == "majority":
data_augment_target = [0]
elif data_augmentation_config["target"] == "minority":
data_augment_target = [1, 2]
elif data_augmentation_config["target"] == "all":
data_augment_target = [0, 1, 2]
else:
raise ValueError(
"Invalid arguments for target. Should be 'all', 'majority' or 'minority'"
)
elif dataset == "emodb" or dataset == "savee":
if data_augmentation_config["target"] == "all":
data_augment_target = [0, 1, 2, 3, 4, 5, 6]
else:
raise ValueError("Invalid arguments for target. Should be 'all'")
elif dataset == "ravdess":
if data_augmentation_config["target"] == "all":
data_augment_target = [0, 1, 2, 3, 4, 5, 6, 7]
else:
raise ValueError("Invalid arguments for target. Should be 'all'")
else:
raise NotImplementedError
# creating log folder
log_path = os.path.join(os.getcwd(), "logs", dataset, mode)
os.makedirs(log_path, exist_ok=True)
logs = pd.DataFrame()
feat_path = os.path.join(params["output_path"], params["dataset"])
# reading training audio features (CORAA only)
if dataset == "coraa":
X_test = read_feature(
path=feat_path,
name="X_test.pth",
)
y_test = read_feature(
path=feat_path,
name="y_test.pth",
)
for fold, (training, validation) in enumerate(zip(training_data, validation_data)):
X_train, y_train = training
X_valid, y_valid = validation
# creating and defining the model
device = torch.device(
"cuda" if torch.cuda.is_available and model_config["use_gpu"] else "cpu"
)
model = choose_model(
mode=mode, model_name=model_config["name"], dataset=dataset, device=device
)
optimizer = torch.optim.Adam(
params=model.parameters(),
lr=model_config["learning_rate"],
weight_decay=0,
betas=(0.9, 0.98),
eps=1e-9,
)
loss = torch.nn.CrossEntropyLoss()
scheduler = None
mixer = None
if model_config["use_lr_scheduler"]:
scheduler = StepLR(optimizer, step_size=10, gamma=0.1)
if "mixup" in data_augmentation_config["techniques"].keys():
mixer = Mixup(
alpha=data_augmentation_config["techniques"]["mixup"]["alpha"]
)
if "specmix" in data_augmentation_config["techniques"].keys():
mixer = Specmix(
p=data_augmentation_config["p"],
min_band_size=data_augmentation_config["techniques"]["specmix"][
"min_band_size"
],
max_band_size=data_augmentation_config["techniques"]["specmix"][
"max_band_size"
],
max_frequency_bands=data_augmentation_config["techniques"]["specmix"][
"max_frequency_bands"
],
max_time_bands=data_augmentation_config["techniques"]["specmix"][
"max_time_bands"
],
device=device,
)
if "cutmix" in data_augmentation_config["techniques"].keys():
mixer = Cutmix(
alpha=data_augmentation_config["techniques"]["cutmix"]["alpha"],
p=data_augmentation_config["p"],
)
# creating the model checkpoint object
sbm = SaveBestModel(
output_dir=os.path.join(
model_config["output_path"], dataset, mode, model_config["name"]
),
model_name=model_config["name"],
dataset=dataset,
)
# creating the training dataloader
training_dataloader = create_dataloader(
X=X_train,
y=y_train,
feature_config=feature_config,
wavelet_config=wavelet_config,
data_augmentation_config=data_augmentation_config,
num_workers=0,
mode=mode,
shuffle=True,
training=True,
batch_size=model_config["batch_size"],
data_augment_target=data_augment_target,
)
# creating the validation dataloader
validation_dataloader = create_dataloader(
X=X_valid,
y=y_valid,
feature_config=feature_config,
wavelet_config=wavelet_config,
data_augmentation_config=None,
num_workers=0,
mode=mode,
shuffle=True,
training=False,
batch_size=model_config["batch_size"],
data_augment_target=None,
)
# creating the test dataloader (CORAA only)
if dataset == "coraa":
test_dataloader = create_dataloader(
X=X_test,
y=y_test,
feature_config=feat_config,
wavelet_config=wavelet_config,
data_augmentation_config=None,
num_workers=0,
mode=params["mode"],
shuffle=False,
training=False,
batch_size=params["model"]["batch_size"],
data_augment_target=None,
)
if total_folds != 1:
print()
print("#" * 20)
print(f"TRAINING FOLD: {fold}")
print("#" * 20)
print()
else:
print()
print("#" * 20)
print(f"TRAINING")
print("#" * 20)
print()
# training loop
for epoch in range(1, model_config["epochs"] + 1):
print(f"Epoch: {epoch}/{model_config['epochs']}")
train_f1, train_loss = train(
device=device,
dataloader=training_dataloader,
optimizer=optimizer,
model=model,
loss=loss,
mixer=mixer,
dataset=dataset,
)
valid_f1, valid_loss = evaluate(
device=device,
dataloader=validation_dataloader,
model=model,
loss=loss,
dataset=dataset,
)
if dataset == "coraa":
test_f1 = test(model=model, dataloader=test_dataloader, device=device)[
"f1-score macro"
]
# saving the best model
sbm(
current_valid_f1=valid_f1,
current_valid_loss=valid_loss,
current_test_f1=test_f1,
current_train_f1=train_f1,
epoch=epoch,
fold=fold,
model=model,
optimizer=optimizer,
)
else:
valid_acc = valid_f1
train_acc = train_f1
# saving the best model
sbm(
current_valid_acc=valid_acc,
current_valid_loss=valid_loss,
current_train_acc=train_acc,
epoch=epoch,
fold=fold,
model=model,
optimizer=optimizer,
)
# updating learning rate
if not scheduler is None:
scheduler.step()
row = pd.DataFrame(
{
"epoch": [epoch],
"train_f1": [train_f1],
"train_loss": [train_loss],
"validation_f1": [valid_f1],
"validation_loss": [valid_loss],
}
)
logs = pd.concat([logs, row], axis=0)
# printing the best result
if dataset == "coraa":
print()
print("*" * 40)
print(f"Epoch: {sbm.best_epoch}")
print(f"Best F1-Score: {sbm.best_valid_f1}")
print(f"Best Loss: {sbm.best_valid_loss}")
print("*" * 40)
print()
best_train_f1.append(sbm.best_train_f1)
best_valid_f1.append(sbm.best_valid_f1)
best_test_f1.append(sbm.best_test_f1)
else:
print()
print("*" * 40)
print(f"Epoch: {sbm.best_epoch}")
print(f"Best Unweighted Accuracy: {sbm.best_valid_acc}")
print(f"Best Loss: {sbm.best_valid_loss}")
print("*" * 40)
print()
best_train_f1.append(sbm.best_train_acc)
best_valid_f1.append(sbm.best_valid_acc)
logs = logs.reset_index(drop=True)
logs.to_csv(
path_or_buf=os.path.join(
log_path, f"fold{fold if total_folds != 1 else ''}.csv"
),
sep=",",
index=False,
)
logs = pd.DataFrame()
# printing the best result
print()
print("#" * 40)
print(f"Best Train F1-Score: {best_train_f1}")
print(f"Best Validation F1-Score: {best_valid_f1}")
print(f"Best Test F1-Score: {best_test_f1}")
print("#" * 40)
print()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-c", "--config", required=True, help="the json configuration file path."
)
args = parser.parse_args()
assert os.path.exists(args.config), "Configuration file does not exist!"
# reading the parameters configuration file
params = json.load(open(args.config, "r"))
# parameters defination
k_fold = None
if params["dataset"].lower() == "coraa":
max_seconds = 16
elif params["dataset"].lower() == "emodb":
max_seconds = 10
elif params["dataset"].lower() == "ravdess":
max_seconds = 6
elif params["dataset"].lower() == "savee":
max_seconds = 8
if "kfold" in params.keys():
k_fold = params["kfold"]["num_k"]
max_samples = max_seconds * int(params["sample_rate"])
feat_config = params["feature"]
feat_config["sample_rate"] = int(params["sample_rate"])
data_augmentation_config = params["data_augmentation"]
wavelet_config = params["wavelet"]
feat_path = os.path.join(params["output_path"], params["dataset"])
# feature extraction pipeline
if params["overwrite"] or not os.path.exists(params["output_path"]):
print()
print("EXTRACTING THE FEATURES...")
print()
feature_extraction_pipeline(
sample_rate=int(params["sample_rate"]),
to_mono=params["to_mono"],
dataset=params["dataset"],
max_samples=max_samples,
k_fold=k_fold,
output_path=params["output_path"],
input_path=params["input_path"],
)
# reading the previously extracted features
training_data, validation_data = read_features_files(
k_fold=k_fold, feat_path=feat_path
)
model_config = params["model"]
print()
print("TRAINING THE MODEL...")
# training step
training_pipeline(
training_data=training_data,
validation_data=validation_data,
feature_config=feat_config,
wavelet_config=wavelet_config,
data_augmentation_config=data_augmentation_config,
model_config=model_config,
mode=params["mode"],
dataset=params["dataset"],
)
| rafaelgreca/ser-wavelet | train.py | train.py | py | 16,934 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "torch.optim... |
70437779623 | import sys
from PyQt5.QtWidgets import QApplication, QMainWindow, QLabel, QPushButton, QVBoxLayout, QWidget, QFileDialog, QTextEdit
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, confusion_matrix
class HeartFailureApp(QMainWindow):
def __init__(self):
super().__init__()
self.setWindowTitle("Heart Failure Prediction")
self.setGeometry(100, 100, 400, 200)
self.dataset_loaded = False
self.create_widgets()
self.create_layout()
def create_widgets(self):
self.load_data_button = QPushButton("Load Dataset", self)
self.load_data_button.clicked.connect(self.load_dataset)
self.train_model_button = QPushButton("Train Model", self)
self.train_model_button.setEnabled(False)
self.train_model_button.clicked.connect(self.train_model)
self.accuracy_label = QLabel("Accuracy:")
self.confusion_matrix_label = QLabel("Confusion Matrix:")
self.result_text = QTextEdit()
self.result_text.setReadOnly(True)
def create_layout(self):
layout = QVBoxLayout()
layout.addWidget(self.load_data_button)
layout.addWidget(self.train_model_button)
layout.addWidget(self.accuracy_label)
layout.addWidget(self.confusion_matrix_label)
layout.addWidget(self.result_text)
widget = QWidget()
widget.setLayout(layout)
self.setCentralWidget(widget)
def load_dataset(self):
file_dialog = QFileDialog()
filepath, _ = file_dialog.getOpenFileName(self, "Select Dataset", "", "CSV Files (*.csv)")
if filepath:
self.dataset_loaded = True
self.heart_data = pd.read_csv(filepath)
self.train_model_button.setEnabled(True)
def train_model(self):
if self.dataset_loaded:
X = self.heart_data.drop(['age','sex','cp','trtbps','chol','fbs','restecg','thalachh','exng','oldpeak','slp'],'columns')
y = self.heart_data['output']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
model = LogisticRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
cm = confusion_matrix(y_test, y_pred)
self.accuracy_label.setText(f"Accuracy: {accuracy}")
self.confusion_matrix_label.setText("Confusion Matrix:")
self.result_text.setText(str(cm))
if __name__ == "__main__":
app = QApplication(sys.argv)
window = HeartFailureApp()
window.show()
sys.exit(app.exec_())
| kgurudarshan/Heart-Failure-Prediction | UI.py | UI.py | py | 2,907 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "PyQt5.QtWidgets.QMainWindow",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QPushButton",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QPushButton",
"line_number": 23,
"usage_type": "call"
},
{
... |
27037952147 | import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
import torch
import os
def plot_pcu(vector):
fig = plt.figure()
ax = plt.axes(projection='3d')
xdata = vector[:, 0]
ydata = vector[:, 1]
zdata = vector[:, 2]
ax.scatter3D(xdata, ydata, zdata, cmap='Greens')
plt.show()
def get_y_pred_truth(model, data_loader):
device = get_device()
y_pred = []
y_truth = []
model.eval()
for point in data_loader:
if point == None:
continue
x, y, _ = point
x = x.view(1, x.shape[0], x.shape[1])
x = x.to(device)
y = y.to(device)
yhat = model(x.float())
_, label = torch.max(yhat, 1)
y_pred.append(label.item())
y_truth.append(y.item())
return y_pred, y_truth
def get_device():
device = 'cpu'
if torch.cuda.is_available():
device = 'cuda'
return device
def save_model(model, path):
with open(path, 'wb') as file:
torch.save({'model_state_dict': model.state_dict()}, file)
def load_model_state_dict(path):
path = os.path.join(path)
with open(path, 'rb') as file:
model_state_dict = torch.load(file)['model_state_dict']
return model_state_dict | AbdullrahmanHMD/TransformersFor3dPointCLouds | utils.py | utils.py | py | 1,284 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.axes",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "matplo... |
2846448684 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 30 17:52:31 2018
@author: khanhdeux
"""
import numpy as np
import matplotlib.pyplot as plt
from lib import plot_decision_regions
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
def sigmoid(z):
return 1.0 / (1.0 + np.exp(-z))
z = np.arange(-7, 7, 0.1)
phi_z = sigmoid(z)
plt.plot(z, phi_z)
plt.axvline(0.0, color='k')
plt.ylim(-0.1, 1.1)
plt.xlabel('z')
plt.ylabel('$\phi (z)$')
plt.yticks([0.0, 0.5, 1.0])
ax = plt.gca()
ax.yaxis.grid(True)
plt.show()
def cost_1(z):
return - np.log(sigmoid(z))
def cost_0(z):
return - np.log(1 - sigmoid(z))
z = np.arange(-10, 10, 0.1)
phi_z = sigmoid(z)
c1 = [cost_1(x) for x in z]
plt.plot(phi_z, c1, label='J(w) if y=1')
c0 = [cost_0(x) for x in z]
plt.plot(phi_z, c0, linestyle='--', label='J(w) if y=0')
plt.ylim(0.0, 5.1)
plt.xlim([0, 1])
plt.xlabel('$\phi$(z)')
plt.ylabel('J(w)')
plt.legend(loc='best')
plt.show()
iris = datasets.load_iris()
X = iris.data[:, [2,3]]
y = iris.target
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1, stratify=y)
class LogisticRegressionGD(object):
def __init__(self, eta = 0.01, n_iter=50, random_state=1):
self.eta = eta
self.n_iter=n_iter
self.random_state = random_state
def fit(self,X,y):
rgen = np.random.RandomState(self.random_state)
self.w_ = rgen.normal(loc=0.0, scale=0.01,size=1 + X.shape[1])
self.cost_ = []
for _ in range(self.n_iter):
net_input = self.net_input(X)
output = self.activation(net_input)
errors = (y - output)
self.w_[1:] += self.eta * np.dot(X.T, errors)
self.w_[0] += self.eta * errors.sum()
cost = - y.dot(np.log(output)) - (1-y).dot(np.log(1-np.log(output)))
self.cost_.append(cost)
return self
def sigmoid(self,z):
return 1.0 / (1.0 + np.exp(-z))
def activation(self,X):
return self.sigmoid(X)
def net_input(self, X):
return np.dot(X, self.w_[1:]) + self.w_[0]
def predict(self, X):
return np.where(self.net_input(X) >=0.0, 1, 0)
X_train_01_subset = X_train[(y_train == 0) | (y_train == 1)]
y_train_01_subset = y_train[(y_train == 0) | (y_train == 1)]
lrgd = LogisticRegressionGD(eta=0.05, n_iter=1000, random_state=1)
lrgd.fit(X_train_01_subset, y_train_01_subset)
plot_decision_regions(X=X_train_01_subset, y=y_train_01_subset, classifier=lrgd)
plt.xlabel('sepal length [standardized]')
plt.ylabel('petal length [standardized]')
plt.legend(loc='upper left')
plt.show()
sc = StandardScaler()
sc.fit(X_train)
X_train_std = sc.transform(X_train)
X_test_std = sc.transform(X_test)
X_combined_std = np.vstack((X_train_std, X_test_std))
y_combined = np.hstack((y_train, y_test))
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression(C=100, random_state=1)
lr.fit(X_train_std, y_train)
plot_decision_regions(X=X_combined_std, y=y_combined, classifier=lr,test_idx=range(105, 150))
plt.xlabel('sepal length [standardized]')
plt.ylabel('petal length [standardized]')
plt.legend(loc='upper left')
plt.show()
print(lr.predict_proba(X_test_std[:3,:]).argmax(axis=1))
print(lr.predict(X_test_std[:3, :]))
print(lr.predict(X_test_std[0, :].reshape(1,-1)))
weights, params = [], []
for c in np.arange(-5, 5):
lr = LogisticRegression(C=10.**c, random_state=1)
lr.fit(X_train_std, y_train)
weights.append(lr.coef_[1])
params.append(10.**c)
weights = np.array(weights)
plt.plot(params, weights[:, 0], label='petal length')
plt.plot(params, weights[:, 1], label='petal width', linestyle='--')
plt.xlabel('C')
plt.ylabel('weight coefficient')
plt.legend(loc='upper left')
plt.xscale('log')
plt.show() | khanhdeux/ml | logisticRegression.py | logisticRegression.py | py | 3,857 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.exp",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
... |
38802287949 | #!/usr/bin/python3
import logging
import sys
from scapy.all import *
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
def parsePacket(packet):
if not packet.haslayer("TCP") or not packet.haslayer("Raw"):
return
# Return if packet doesn't use HTTP protocol or isn't a GET request
if b'HTTP' not in packet["Raw"].load or b'GET' not in packet["Raw"].load:
return
# Retrieve the HTTP GET request
request = packet["Raw"].load
host = request.split(b"Host: ")[1].split(b"\r\n")[0].decode()
path = request.split(b"GET ")[1].split(b" HTTP")[0].decode()
# Print in required format
print("URL:" + host+path)
if __name__ == "__main__":
for packet in rdpcap(sys.argv[1]):
parsePacket(packet) | TateRCXVII/4440-NetSec | attack2.py | attack2.py | py | 762 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "logging.ERROR",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 25,
"usage_type": "attribute"
}
] |
8986849807 | from winstealer import *
import json
winstealer_script_info = {
"script": "Vision Tracker",
"author": "bckd00r",
"description": "Tracks enemy invisible objects and clones",
}
show_clones, show_wards, show_traps, ward_awareness = None, None, None, None
blue_to_side_brush = {
"clickPosition": Vec3(2380.09, -71.24, 11004.69),
"wardPosition": Vec3(2826.47, -71.02, 11221.34),
"movePosition": Vec3(1774, 52.84, 10856),
}
mid_to_wolves_blue_side = {
"clickPosition": Vec3(5174.83, 50.57, 7119.81),
"wardPosition": Vec3(4909.10, 50.65, 7110.90),
"movePosition": Vec3(5749.25, 51.65, 7282.75),
}
tower_to_wolves_blue_side = {
"clickPosition": Vec3(5239.21, 50.67, 6944.90),
"wardPosition": Vec3(4919.83, 50.64, 7023.80),
"movePosition": Vec3(5574, 51.74, 6458),
}
red_blue_side = {
"clickPosition": Vec3(8463.64, 50.60, 4658.71),
"wardPosition": Vec3(8512.29, 51.30, 4745.90),
"movePosition": Vec3(8022, 53.72, 4258),
}
dragon_got_bush = {
"clickPosition": Vec3(10301.03, 49.03, 3333.20),
"wardPosition": Vec3(10322.94, 49.03, 3244.38),
"movePosition": Vec3(10072, -71.24, 3908),
}
baron_top_bush = {
"clickPosition": Vec3(4633.83, 50.51, 11354.40),
"wardPosition": Vec3(4524.69, 53.25, 11515.21),
"movePosition": Vec3(4824, -71.24, 10906),
}
red_red_side = {
"clickPosition": Vec3(6360.12, 52.61, 10362.71),
"wardPosition": Vec3(6269.35, 53.72, 10306.69),
"movePosition": Vec3(6824, 56, 10656),
}
tower_to_wolves = {
"clickPosition": Vec3(9586.57, 59.62, 8020.29),
"wardPosition": Vec3(9871.77, 51.47, 8014.44),
"movePosition": Vec3(9122, 53.74, 8356),
}
mid_to_wolves = {
"clickPosition": Vec3(9647.62, 51.31, 7889.96),
"wardPosition": Vec3(9874.42, 51.50, 7969.29),
"movePosition": Vec3(9122, 52.60, 7606),
}
red_bot_side_bush = {
"clickPosition": Vec3(12427.00, -35.46, 3984.26),
"wardPosition": Vec3(11975.34, 66.37, 3927.68),
"movePosition": Vec3(13022, 51.37, 3808),
}
traps = {
# Name -> (radius, show_radius_circle, show_radius_circle_minimap, icon)
"caitlyntrap": [50, True, False, "caitlyn_yordlesnaptrap"],
"jhintrap": [140, True, False, "jhin_e"],
"jinxmine": [50, True, False, "jinx_e"],
"maokaisproutling": [50, False, False, "maokai_e"],
"nidaleespear": [50, True, False, "nidalee_w1"],
"shacobox": [300, True, False, "jester_deathward"],
"teemomushroom": [75, True, True, "teemo_r"],
}
wards = {
"bluetrinket": [900, True, True, "bluetrinket"],
"jammerdevice": [900, True, True, "pinkward"],
"perkszombieward": [900, True, True, "bluetrinket"],
"sightward": [900, True, True, "sightward"],
"visionward": [900, True, True, "sightward"],
"yellowtrinket": [900, True, True, "yellowtrinket"],
"yellowtrinketupgrade": [900, True, True, "yellowtrinket"],
"ward": [900, True, True, "sightward"],
}
clones = {
"shaco": [0, False, False, "shaco_square"],
"leblanc": [0, False, False, "leblanc_square"],
"monkeyking": [0, False, False, "monkeyking_square"],
"neeko": [0, False, False, "neeko_square"],
"fiddlesticks": [0, False, False, "fiddlesticks_square"],
}
def winstealer_load_cfg(cfg):
global show_clones, show_wards, show_traps, ward_awareness, traps, wards
ward_awareness = cfg.get_bool("ward_awareness", True)
show_clones = cfg.get_bool("show_clones", True)
show_wards = cfg.get_bool("show_wards", True)
show_traps = cfg.get_bool("show_traps", True)
traps = json.loads(cfg.get_str("traps", json.dumps(traps)))
wards = json.loads(cfg.get_str("wards", json.dumps(wards)))
def winstealer_save_cfg(cfg):
global show_clones, show_wards, show_traps, ward_awareness, traps, wards
cfg.set_bool("ward_awareness", ward_awareness)
cfg.set_bool("show_clones", show_clones)
cfg.set_bool("show_wards", show_wards)
cfg.set_bool("show_traps", show_traps)
cfg.set_str("traps", json.dumps(traps))
cfg.set_str("wards", json.dumps(wards))
def winstealer_draw_settings(game, ui):
global traps, wards
global show_clones, show_wards, show_traps, ward_awareness
ward_awareness = ui.checkbox("Ward awareness", ward_awareness)
show_clones = ui.checkbox("Show clones", show_clones)
show_wards = ui.checkbox("Show wards", show_wards)
show_traps = ui.checkbox("Show clones", show_traps)
ui.text("Traps")
for x in traps.keys():
if ui.treenode(x):
traps[x][1] = ui.checkbox("Show range circles", traps[x][1])
traps[x][2] = ui.checkbox("Show on minimap", traps[x][2])
ui.treepop()
ui.text("Wards")
for x in wards.keys():
if ui.treenode(x):
wards[x][1] = ui.checkbox("Show range circles", wards[x][1])
wards[x][2] = ui.checkbox("Show on minimap", wards[x][2])
ui.treepop()
def draw(game, obj, radius, show_circle_world, show_circle_map, icon):
sp = game.world_to_screen(obj.pos)
if game.is_point_on_screen(sp):
duration = obj.duration + obj.last_visible_at - game.time
if duration > 0:
game.draw_text(sp, f"{duration:.0f}", Color.WHITE)
game.draw_image(icon, sp, sp.add(Vec2(30, 30)), Color.WHITE)
if show_circle_world:
game.draw_circle_world(obj.pos, radius, 100, 3, Color.YELLOW)
if show_circle_map:
game.draw_circle(
game.world_to_minimap(obj.pos),
game.distance_to_minimap(radius),
100,
2,
Color.YELLOW,
)
def drawAwareness(game, wardSpot):
spotDist = wardSpot["movePosition"].distance(game.player.pos)
if (spotDist < 400) and (spotDist > 70):
game.draw_circle_world(wardSpot["movePosition"], 100, 100, 1, Color.YELLOW)
elif spotDist < 70:
game.draw_circle_world(wardSpot["movePosition"], 100, 100, 1, Color.GREEN)
clickDist = game.get_cursor().distance(
game.world_to_screen(wardSpot["clickPosition"])
)
if clickDist > 10:
game.draw_circle_world(wardSpot["clickPosition"], 30, 100, 1, Color.YELLOW)
else:
# game.draw_circle_world(wardSpot["movePosition"], 100, 100, 1, Color.GREEN)
game.draw_circle_world(wardSpot["clickPosition"], 30, 100, 1, Color.GREEN)
game.draw_circle_world(wardSpot["movePosition"], 100, 100, 1, Color.WHITE)
def wardAwareness(game):
global tower_to_wolves, tower_to_wolves_blue_side
global dragon_got_bush
global mid_to_wolves, mid_to_wolves_blue_side
global blue_to_side_brush
global red_blue_side, red_bot_side_bush, red_red_side
global baron_top_bush
if game.map.type == MapType.SummonersRift:
drawAwareness(game, tower_to_wolves)
drawAwareness(game, tower_to_wolves_blue_side)
drawAwareness(game, dragon_got_bush)
drawAwareness(game, mid_to_wolves)
drawAwareness(game, mid_to_wolves_blue_side)
drawAwareness(game, blue_to_side_brush)
drawAwareness(game, red_blue_side)
drawAwareness(game, red_bot_side_bush)
drawAwareness(game, red_red_side)
drawAwareness(game, baron_top_bush)
def winstealer_update(game, ui):
global show_clones, show_wards, show_traps
global traps, wards, clones
if ward_awareness:
wardAwareness(game)
for obj in game.others:
if obj.is_ally_to(game.player) or not obj.is_alive:
continue
if show_wards and obj.has_tags(UnitTag.Unit_Ward) and obj.name in wards:
draw(game, obj, *(wards[obj.name]))
elif (
show_traps and obj.has_tags(UnitTag.Unit_Special_Trap) and obj.name in traps
):
draw(game, obj, *(traps[obj.name]))
if show_clones:
for champ in game.champs:
if champ.is_ally_to(game.player) or not champ.is_alive:
continue
if champ.name in clones and champ.R.name == champ.D.name:
draw(game, champ, *(clones[champ.name]))
| 8C/Xopher-lol | GameplayScripts/vision_tracker.py | vision_tracker.py | py | 8,011 | python | en | code | 14 | github-code | 36 | [
{
"api_name": "json.loads",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 1... |
22636055069 | import os
import functools
import copy
import mimetypes
import urllib.parse
import email
import logging
import tracemalloc
import feedparser
from lxml import etree # nosec B410
logger = logging.getLogger(__name__)
# Configure the XML parser as securely as possible since we're parsing XML from
# untrusted sources:
# https://lxml.de/FAQ.html#how-do-i-use-lxml-safely-as-a-web-service-endpoint
XML_PARSER = etree.XMLParser(resolve_entities=False)
TRUE_STRS = {"1", "true", "yes", "on"}
DEBUG = ( # noqa: F841
"DEBUG" in os.environ and os.environ["DEBUG"].strip().lower() in TRUE_STRS
)
POST_MORTEM = ( # noqa: F841
"POST_MORTEM" in os.environ
and os.environ["POST_MORTEM"].strip().lower() in TRUE_STRS
)
PYTHONTRACEMALLOC = (
"PYTHONTRACEMALLOC" in os.environ
and os.environ["PYTHONTRACEMALLOC"].strip().lower()
)
PRIORITY_TYPES = {
"application/xml": ".xml",
"audio/ogg": ".ogg",
"video/x-matroska": ".mkv",
# Not actually needed as overrides but found in the wild
# and not in `/etc/mime.types`
"application/rss+xml": ".rss", # `application/x-rss+xml` in `/etc/mime.types`
}
def init(files=None, priority_types=None):
"""
Fix broken defaults in the Python's `mimetypes` standard library module.
https://bugs.python.org/issue1043134
Unfortunately, the defaults in the `mimetypes` module are wrong; `application/xml`
for example is associated with `.xsl`. Also unfortunately, `**/mime.types` files
are also often wrong; `audio/mpeg` for example is associated with `.mpga`, at least
in Ubuntu's `/etc/mime.types`. Since both are wrong in different ways, there's no
avoiding manual intervention.
For each given priority type, ensure that the extension is returned first.
Internally, the `mimetypes` module relies on the order in which types are added to
the registry to decide which extension/suffix is first and thus the default for a
given MIME type. As such, for each priority type we manually move the priority
extension to the front of the list extensions are appended to when they're added.
Also requires promoting any such types to `strict=True` types if they were
originally registered as `strict=False`.
"""
if priority_types is None: # pragma: no cover
priority_types = PRIORITY_TYPES
# Ensure the standard library module has registered all the types first
mimetypes.init(files=files)
mimetypes_db = mimetypes._db # pylint: disable=protected-access
strict_types_map_inv = mimetypes_db.types_map_inv[True]
loose_types_map_inv = mimetypes_db.types_map_inv[False]
# Manually promote the priority extensions to the front of the list
for priority_type, priority_ext in priority_types.items():
priority_type = priority_type.lower()
if priority_type not in strict_types_map_inv:
# Must re-register as a strict type first
mimetypes.add_type(priority_type, priority_ext)
for types_map_inv in (strict_types_map_inv, loose_types_map_inv):
if priority_type not in types_map_inv:
continue
extensions = types_map_inv[priority_type] = list(
types_map_inv[priority_type],
)
if priority_ext not in extensions: # pragma: no cover
continue
extensions.remove(priority_ext)
extensions.insert(0, priority_ext)
init()
# Abuse URL quoting for paths that are safe across filesystems:
# - *do* quote (IOW, do *not* allow) "/"
# - do *not* quote (IOW, *do* allow) spaces and other special characters found not to
# cause problems
#
# So far, special characters have been checked in a Samba share as browsed in the
# Windows 10 explorer in order to determine which should be allowed/unquoted. The `%`
# character works in this test bed but of course it *must* be quoted, otherwise quoting
# and unquoting would not be symmetrical. A directory with a Unicode character was also
# tested against this environment and found to be working but it doesn't seem possible
# to get `urllib.parse.quote` to leave them unquoted. Test files were generated in the
# Samba share from the Linux side using the following:
#
# tmp_path = pathlib.Path("/media/Library/tmp/feed-archiver")
# [
# (tmp_path / f"{char_idx}{char}").write_text("")
# for char_idx, char in enumerate(string.printable)
# if urllib.parse.quote(char, safe=" ").startswith("%")
# ]
#
# Please do report any additional cases that cause issues in any other
# common filesystems.
SAFE_CHARS_WIN10_SAMBA = " !#$&'()+,;=@[]^`{}"
QUOTED_SEP = urllib.parse.quote(os.sep, safe="")
QUOTED_ALTSEP = None
if os.altsep is not None: # pragma: no cover
QUOTED_ALTSEP = urllib.parse.quote(os.altsep)
quote_basename = functools.partial(urllib.parse.quote, safe=SAFE_CHARS_WIN10_SAMBA)
quote_path = functools.partial(
urllib.parse.quote,
safe=f"{SAFE_CHARS_WIN10_SAMBA}{os.sep}{os.altsep}",
)
def quote_sep(string_): # noqa: V103
"""
Return the string with all occurrences of path separators, slashes, quoted.
Useful to sanitize input from feed XML when used in enclosure template plugin string
formats from adding unintended path parts.
"""
quoted = string_.replace(os.sep, QUOTED_SEP)
if os.altsep is not None: # pragma: no cover
quoted = quoted.replace(os.altsep, QUOTED_ALTSEP)
return quoted
def compare_memory_snapshots(parent): # pragma: no cover
"""
Compare two traemalloc snapshots and log the results.
"""
snapshot = tracemalloc.take_snapshot()
if getattr(parent, "tracemalloc_snapshot", None) is not None:
stats = snapshot.compare_to(
parent.tracemalloc_snapshot,
"lineno",
)
logger.debug(
"Memory consumption changes:\n%s",
"\n".join(str(stat) for stat in stats[:10]),
)
return snapshot
def parse_content_type(content_type):
"""
Parse an RFC822-style `Content-Type` header.
Useful to safely extract the MIME type from the charset.
"""
message = email.message.Message()
message["Content-Type"] = content_type
return message.get_params()[0][0]
def copy_empty_items_parent(feed_format, items_parent):
"""
Create an `etree` copy of the feed items parent without any items.
Useful for richer parsing of single items at a time.
"""
items_parent_copy = etree.Element(items_parent.tag, items_parent.attrib)
for child in items_parent:
if child.tag == feed_format.ITEM_TAG:
# Optimization: This is not strictly correct as feed XML may contain
# non-item elements after feed item elements, either interspersed or at the
# end. This is rare, however, in fact I've never seen an instance of it,
# items are *most* of a feed's elements and use of the items parent other
# elements in enclosure plugin configurations is rare, so avoid unnecessary
# iteration until someone reports an issue with this.
break
items_parent_copy.append(copy.deepcopy(child))
return items_parent_copy
# We need to parse the archive and remote feed XML using `etree` because we need to be
# able to modify the XML and write it to the archive, something that `feedparser`
# doesn't provide. enclosure plugins, however, frequently need the richer parsing
# support that `feedparser` *does* provide, such as parsing dates and times. That rich
# parsing is only needed in the rare case of new items being added to the archive's
# version of the feed, so only do the rich parsing on a per-item basis.
def parse_item_feed(feed_format, feed_elem, item_elem):
"""
Reconstruct a "feed" of just one item and return the richly parsed version.
"""
item_feed_elem = copy_empty_items_parent(feed_format, feed_elem)
item_feed_elem.append(copy.deepcopy(item_elem))
return feedparser.parse(etree.tostring(item_feed_elem))
| rpatterson/feed-archiver | src/feedarchiver/utils.py | utils.py | py | 8,034 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "lxml.etree.XMLParser",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "lxml.etree",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "os.environ",
"l... |
72170371625 | import streamlit as st
from streamlit_lottie import st_lottie
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from lightgbm import LGBMClassifier
from catboost import CatBoostClassifier
import json
import joblib
import pickle
import shap
from webapp.pred_pipeline_user_input_app import get_all_updrs_preds
st.set_page_config(layout="centered")
def load_lottiefile(filepath: str):
with open(filepath, "r") as f:
lottie_json = json.load(f)
return lottie_json
filename = load_lottiefile("./streamlit_data/doctor_animation.json")
st_lottie(filename, speed=1, height=200)
st.title("Parkinsons Severity Prediction")
tab1, tab2, tab3, tab4, tab5 = st.tabs(
[
"Prediction",
"Overview",
"UPDRS 1 Proteins",
"UPDRS 2 Proteins",
"UPDRS 3 Proteins",
]
)
with tab2:
st.header("Project Overview")
"""
Using the first 12 months of doctor's visits where protein mass spectometry data has been recorded,
the model is meant to assist doctors in determining whether a patient is likely to develop
moderate-to-severe parkinsons for the UPDRS 1, 2, and 3.
A categorical prediction of 1 means the patient is predicted to have moderate-to-severe UPDRS rating
at some point in the future. A categorical prediction of 0 means the patient is predicted to have
none-to-mild UPDRS ratings in the future. If a protein or peptide column is not present in the data,
then it is given a value of 0, meaning it is not present in the sample. The visit month is defined as
the months since the first recorded visit. It is necessary for predicting the UPDRS score with these
models.
The column upd23b_clinical_state_on_medication is based on whether the patient was taking medication
during the clinical evaluation and can be values "On", "Off", or NaN.
- **UPDRS 1 categorical ratings**: 10 and below is mild, 11 to 21 is moderate, 22 and above is severe
- **UPDRS 2 categorical ratings**: 12 and below is mild, 13 to 29 is moderate, 30 and above is severe
- **UPDRS 3 categorical ratings**: 32 and below is mild, 33 to 58 is moderate, 59 and above is severe
- **UPDRS 4 was dropped due to too few samples for training**
"""
with tab1:
# read in the protein and updrs data
updrs1_df = pd.read_csv("./streamlit_data/full_pred_updrs_1.csv")
updrs2_df = pd.read_csv("./streamlit_data/full_pred_updrs_2.csv")
updrs3_df = pd.read_csv("./streamlit_data/full_pred_updrs_3.csv")
# import patient updrs values
patient_updrs_df = pd.read_csv("./streamlit_data/patient_updrs_values.csv")
# import the input data used for modeling
input_updrs1_df = pd.read_csv("./streamlit_data/updrs_1_model_input.csv")
input_updrs2_df = pd.read_csv("./streamlit_data/updrs_2_model_input.csv")
input_updrs3_df = pd.read_csv("./streamlit_data/updrs_3_model_input.csv")
st.header("Parkinsons Severity Prediction")
# have the user select the patient id
patient_id = st.selectbox(
"Patient ID", updrs1_df.sort_values(by="patient_id")["patient_id"].unique()
)
patient_updrs1_df = updrs1_df[updrs1_df["patient_id"] == patient_id]
patient_updrs2_df = updrs2_df[updrs2_df["patient_id"] == patient_id]
patient_updrs3_df = updrs3_df[updrs3_df["patient_id"] == patient_id]
# updrs values by visit month
visit_updrs1_df = patient_updrs1_df[["updrs_1", "visit_month"]].rename(
columns={"updrs_1": "value"}
)
visit_updrs2_df = patient_updrs2_df[["updrs_2", "visit_month"]].rename(
columns={"updrs_2": "value"}
)
visit_updrs3_df = patient_updrs3_df[["updrs_3", "visit_month"]].rename(
columns={"updrs_3": "value"}
)
(visit_updrs1_df["updrs"], visit_updrs2_df["updrs"], visit_updrs3_df["updrs"]) = (
"UPDRS 1",
"UPDRS 2",
"UPDRS 3",
)
updrs_vals = pd.concat(
[
visit_updrs1_df[["updrs", "value", "visit_month"]],
visit_updrs2_df[["updrs", "value", "visit_month"]],
visit_updrs3_df[["updrs", "value", "visit_month"]],
],
axis=0,
)
# display dataframe of predicted updrs and the visit month
""" ### UPDRS Max Predictions
**The model uses only the protein and peptide data from visit months 0 - 12 to predict whether the patient will have moderate-to-severe max UPDRS rating**
Below you can see the "Max Predicted UPDRS Score" for each UPDRS
"""
pred_df = pd.merge(
patient_updrs1_df[["visit_month", "updrs_1_max_cat_preds"]],
patient_updrs2_df[["visit_month", "updrs_2_max_cat_preds"]],
on="visit_month",
)
pred_df = pd.merge(
pred_df,
patient_updrs3_df[["visit_month", "updrs_3_max_cat_preds"]],
on="visit_month",
)
pred_df = pred_df.sort_values(by=["visit_month"]).set_index("visit_month")
for i in range(1, 4):
if i == 1:
pred_df[f"updrs_{i}_max_cat_preds"] = pred_df[
f"updrs_{i}_max_cat_preds"
].apply(
lambda x: "> 10 (Moderate-to-Severe)"
if x == 1
else "< 11 (None-to-Mild)"
)
elif i == 2:
pred_df[f"updrs_{i}_max_cat_preds"] = pred_df[
f"updrs_{i}_max_cat_preds"
].apply(
lambda x: "> 12 (Moderate-to-Severe)"
if x == 1
else "< 13 (None-to-Mild)"
)
elif i == 3:
pred_df[f"updrs_{i}_max_cat_preds"] = pred_df[
f"updrs_{i}_max_cat_preds"
].apply(
lambda x: "> 32 (Moderate-to-Severe)"
if x == 1
else "< 33 (None-to-Mild)"
)
st.dataframe(
pred_df.rename(
columns={
"updrs_1_max_cat_preds": "Max Predicted UPDRS 1",
"updrs_2_max_cat_preds": "Max Predicted UPDRS 2",
"updrs_3_max_cat_preds": "Max Predicted UPDRS 3",
}
)
)
"""
- **UPDRS 1 categorical ratings**: 10 and below is mild, 11 to 21 is moderate, 22 and above is severe
- **UPDRS 2 categorical ratings**: 12 and below is mild, 13 to 29 is moderate, 30 and above is severe
- **UPDRS 3 categorical ratings**: 32 and below is mild, 33 to 58 is moderate, 59 and above is severe
"""
# filter out the input data for the patient
patient_values = patient_updrs_df[patient_updrs_df["patient_id"] == patient_id]
"""### View all of actual UPDRS values for the patient below:"""
if patient_values["visit_month"].nunique() > 1:
# plot the updrs values by visit month
fig, ax = plt.subplots(figsize=(10, 5))
sns.lineplot(
data=patient_values,
x="visit_month",
y="value",
hue="updrs",
ax=ax,
)
ax.set_title(f"UPDRS Values for Patient {patient_id}")
ax.set_xlabel("Visit Month")
ax.set_ylabel("UPDRS Value")
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0)
st.pyplot(fig)
else:
st.markdown("*Only One Visit for this Patient*")
# plot as a bar chart
fig, ax = plt.subplots(figsize=(10, 5))
sns.barplot(
data=patient_values,
x="updrs",
y="value",
hue="visit_month",
ax=ax,
)
ax.set_title(f"UPDRS Values for Patient {patient_id}")
ax.set_xlabel("UPDRS")
ax.set_ylabel("UPDRS Value")
plt.legend(
bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0, title="Visit Month"
)
st.pyplot(fig)
st.header("Explanation of Model Predictions")
st.write(
"The following plots show the **top ten features (proteins)** that contributed to the model prediction for the **inputed patient and visit month**. The features are ranked by their SHAP values."
)
st.write(
"**Choose a visit month to see the explanation of the model prediction for the input patient**"
)
# user selects the visit month to make predictions on
visit_month = st.selectbox("Visit Month", patient_updrs1_df["visit_month"].unique())
st.subheader("UPDRS 1")
# UPDRS 1
# Load the saved model
model = joblib.load("./webapp/catboost_updrs_1_model_hyperopt_smote.sav")
# filter out the input data for the patient
drop_col = [
"patient_id",
"upd23b_clinical_state_on_medication_On",
"upd23b_clinical_state_on_medication_Unknown",
]
input_updrs1_df = input_updrs1_df[input_updrs1_df["patient_id"] == patient_id].drop(
columns=drop_col
)
# filter for the visit month
input_updrs1_df = input_updrs1_df[input_updrs1_df["visit_month"] == visit_month]
# make predictions on the data
# preds = model.predict(input_updrs1_df)
# plot the shap values
# explain the model's predictions using SHAP values
explainer = shap.TreeExplainer(model)
input_shap_values = explainer.shap_values(input_updrs1_df)
# create a dataframe of the shap values with the column names
input_shap_df = pd.DataFrame(
input_shap_values, columns=input_updrs1_df.columns
).T.reset_index()
input_shap_df.columns = ["feature", "shap_value"]
# SHAP force plot for inputed instance predicted class
fig, ax = plt.subplots()
# plot a vertical bar for the top ten features
sns.barplot(
data=input_shap_df.sort_values(by="shap_value", ascending=False).head(10),
x="shap_value",
y="feature",
ax=ax,
)
plt.title(
"Features (Proteins) Towards Severe UPDRS 1 Model Prediction", fontsize=14
)
plt.ylabel("")
plt.xlabel("")
st.pyplot(fig)
st.subheader("UPDRS 2")
# UPDRS 2
# Load the saved model
model = joblib.load("./webapp/catboost_updrs_2_model_hyperopt_smote_meds.sav")
# filter out the input data for the patient
input_updrs2_df = input_updrs2_df[input_updrs2_df["patient_id"] == patient_id].drop(
columns=["patient_id"]
)
# filter for the visit month
input_updrs2_df = input_updrs2_df[input_updrs2_df["visit_month"] == visit_month]
# make predictions on the data
# preds = model.predict(input_updrs2_df)
# plot the shap values
# explain the model's predictions using SHAP values
explainer = shap.TreeExplainer(model)
input_shap_values = explainer.shap_values(input_updrs2_df)
# create a dataframe of the shap values with the column names
input_shap_df = pd.DataFrame(
input_shap_values, columns=input_updrs2_df.columns
).T.reset_index()
input_shap_df.columns = ["feature", "shap_value"]
# SHAP force plot for inputed instance predicted class
fig, ax = plt.subplots()
# plot a vertical bar for the top ten features
sns.barplot(
data=input_shap_df.sort_values(by="shap_value", ascending=False).head(10),
x="shap_value",
y="feature",
ax=ax,
)
plt.title("Feature (Proteins) Towards Severe UPDRS 2 Model Prediction", fontsize=14)
plt.ylabel("")
plt.xlabel("")
st.pyplot(fig)
st.subheader("UPDRS 3")
# UPDRS 3
# Load the saved model
filename = "./webapp/lgboost_updrs_3_model_hyperopt_smote_meds.sav"
model = pickle.load(open(filename, "rb"))
# filter out the input data for the patient
input_updrs3_df = input_updrs3_df[input_updrs3_df["patient_id"] == patient_id].drop(
columns=["patient_id"]
)
# filter for the visit month
input_updrs3_df = input_updrs3_df[input_updrs3_df["visit_month"] == visit_month]
# make predictions on the data
# preds = model.predict(input_updrs3_df)
# plot the shap values
# explain the model's predictions using SHAP values
explainer = shap.TreeExplainer(model)
input_shap_values = explainer.shap_values(input_updrs3_df)
# create a dataframe of the shap values with the column names
input_shap_df = pd.DataFrame(
input_shap_values[0], columns=input_updrs3_df.columns
).T.reset_index()
input_shap_df.columns = ["feature", "shap_value"]
# SHAP force plot for inputed instance predicted class
fig, ax = plt.subplots()
# plot a vertical bar for the top ten features
sns.barplot(
data=input_shap_df.sort_values(by="shap_value", ascending=False).head(10),
x="shap_value",
y="feature",
ax=ax,
)
plt.title(
"Features (Proteins) Towards Severe UPDRS 3 Model Prediction", fontsize=14
)
plt.ylabel("")
plt.xlabel("")
st.pyplot(fig)
with tab3:
# show the feature importances from the saved csv files
st.header("Feature Importances")
st.subheader("UPDRS 1")
updrs1_feat_imp = pd.read_csv("./webapp/updrs_1_feat_imp.csv")
updrs1_feat_imp = updrs1_feat_imp.sort_values(by="importance", ascending=False)
top_ten_updrs1_feats = updrs1_feat_imp.head(10)
fig, ax = plt.subplots()
sns.barplot(data=top_ten_updrs1_feats, x="importance", y="feature", ax=ax)
plt.title("Top Ten Features for UPDRS 1 Model", fontsize=14)
plt.ylabel("")
plt.xlabel("")
st.pyplot(fig)
# import the Uniprot data
uniprot_df = pd.read_csv("./webapp/UniprotProteinLookup.csv")
# combine the protein and the uniprot data
top_ten_updrs1_feats["protein"] = top_ten_updrs1_feats["feature"].apply(
lambda x: x.split("_")[1] if "_" in x else x
)
top_ten_updrs1_feats = pd.merge(
top_ten_updrs1_feats, uniprot_df, left_on="protein", right_on="UniProt"
)
top_ten_updrs1_feats = top_ten_updrs1_feats.fillna("Unknown")
# display the protein information
st.subheader("Top Proteins for UPDRS 1 Information")
st.write(
"**If a protein is missing it is because it is not in the Uniprot database**"
)
st.write("-------------------")
for i, row in top_ten_updrs1_feats.iterrows():
st.markdown(f"**Protein Peptide**: {row['feature']}")
st.markdown(f"**Protein Name**: {row['Protein names']}")
st.markdown(f"**Gene Name**: {row['Gene Names']}")
st.markdown(f"**Length**: {row['Length']}")
st.write("-------------------")
with tab4:
# show the feature importances from the saved csv files
st.header("Feature Importances")
st.subheader("UPDRS 2")
updrs2_feat_imp = pd.read_csv("./webapp/updrs_2_feat_imp.csv")
updrs2_feat_imp = updrs2_feat_imp.sort_values(by="importance", ascending=False)
top_ten_updrs2_feats = updrs2_feat_imp.head(10)
fig, ax = plt.subplots()
sns.barplot(data=top_ten_updrs2_feats, x="importance", y="feature", ax=ax)
plt.title("Top Ten Features for UPDRS 2 Model", fontsize=14)
plt.ylabel("")
plt.xlabel("")
st.pyplot(fig)
# combine the protein and the uniprot data
top_ten_updrs2_feats["protein"] = top_ten_updrs2_feats["feature"].apply(
lambda x: x.split("_")[1] if "_" in x else x
)
top_ten_updrs2_feats = pd.merge(
top_ten_updrs2_feats, uniprot_df, left_on="protein", right_on="UniProt"
)
top_ten_updrs2_feats = top_ten_updrs2_feats.fillna("Unknown")
# display the protein information
# display the protein information
st.subheader("Top Proteins for UPDRS 2 Information")
st.write(
"**If a protein is missing it is because it is not in the Uniprot database**"
)
st.write("-------------------")
for i, row in top_ten_updrs2_feats.iterrows():
st.markdown(f"**Protein Peptide**: {row['feature']}")
st.markdown(f"**Protein Name**: {row['Protein names']}")
st.markdown(f"**Gene Name**: {row['Gene Names']}")
st.markdown(f"**Length**: {row['Length']}")
st.write("-------------------")
with tab5:
# show the feature importances from the saved csv files
st.header("Feature Importances")
st.subheader("UPDRS 3")
updrs3_feat_imp = pd.read_csv("./webapp/updrs_3_feat_imp.csv")
updrs3_feat_imp = updrs3_feat_imp.sort_values(by="importance", ascending=False)
top_ten_updrs3_feats = updrs3_feat_imp.head(10)
fig, ax = plt.subplots()
sns.barplot(data=top_ten_updrs3_feats, x="importance", y="feature", ax=ax)
plt.title("Top Ten Features for UPDRS 3 Model", fontsize=14)
plt.ylabel("")
plt.xlabel("")
st.pyplot(fig)
# combine the protein and the uniprot data
top_ten_updrs3_feats["protein"] = top_ten_updrs3_feats["feature"].apply(
lambda x: x.split("_")[1] if "_" in x else x
)
top_ten_updrs3_feats = pd.merge(
top_ten_updrs3_feats, uniprot_df, left_on="protein", right_on="UniProt"
)
top_ten_updrs3_feats = top_ten_updrs3_feats.fillna("Unknown")
# display the protein information
# display the protein information
st.subheader("Top Proteins for UPDRS 3 Information")
st.write(
"**If a protein is missing it is because it is not in the Uniprot database**"
)
st.write("-------------------")
for i, row in top_ten_updrs3_feats.iterrows():
st.markdown(f"**Protein Peptide**: {row['feature']}")
st.markdown(f"**Protein Name**: {row['Protein names']}")
st.markdown(f"**Gene Name**: {row['Gene Names']}")
st.markdown(f"**Length**: {row['Length']}")
st.write("-------------------")
| dagartga/Boosted-Models-for-Parkinsons-Prediction | streamlit_app.py | streamlit_app.py | py | 17,417 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "streamlit.set_page_config",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "streamlit_lottie.st_lottie",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "stream... |
3289421742 | """Automatically initialize variables."""
from __future__ import division
import textwrap
from pyomo.core.base.var import Var
from pyomo.core.kernel.numvalue import value
from pyomo.core.plugins.transform.hierarchy import IsomorphicTransformation
from pyomo.util.plugin import alias
class InitMidpoint(IsomorphicTransformation):
"""Initializes non-fixed variables to the midpoint of their bounds.
- If the variable does not have bounds, set the value to zero.
- If the variable is missing one bound, set the value to that of the
existing bound.
"""
alias(
'contrib.init_vars_midpoint',
doc=textwrap.fill(textwrap.dedent(__doc__.strip())))
def __init__(self):
"""Initialize the transformation."""
super(InitMidpoint, self).__init__()
def _apply_to(self, instance, overwrite=False):
"""Apply the transformation.
Kwargs:
overwrite: if False, transformation will not overwrite existing
variable values.
"""
for var in instance.component_data_objects(
ctype=Var, descend_into=True):
if var.fixed:
continue
if var.value is not None and not overwrite:
continue
if var.lb is None and var.ub is None:
# If LB and UB do not exist, set variable value to 0
var.set_value(0)
elif var.lb is None:
# if one bound does not exist, set variable value to the other
var.set_value(value(var.ub))
elif var.ub is None:
# if one bound does not exist, set variable value to the other
var.set_value(value(var.lb))
else:
var.set_value((value(var.lb) + value(var.ub)) / 2.)
class InitZero(IsomorphicTransformation):
"""Initializes non-fixed variables to zeros.
- If setting the variable value to zero will violate a bound, set the
variable value to the relevant bound value.
"""
alias(
'contrib.init_vars_zero',
doc=textwrap.fill(textwrap.dedent(__doc__.strip())))
def __init__(self):
"""Initialize the transformation."""
super(InitZero, self).__init__()
def _apply_to(self, instance, overwrite=False):
"""Apply the transformation.
Kwargs:
overwrite: if False, transformation will not overwrite existing
variable values.
"""
for var in instance.component_data_objects(
ctype=Var, descend_into=True):
if var.fixed:
continue
if var.value is not None and not overwrite:
continue
if var.lb is not None and value(var.lb) > 0:
var.set_value(value(var.lb))
elif var.ub is not None and value(var.ub) < 0:
var.set_value(value(var.ub))
else:
var.set_value(0)
| igorsowa9/vpp | venv/lib/python3.6/site-packages/pyomo/contrib/preprocessing/plugins/init_vars.py | init_vars.py | py | 2,978 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "pyomo.core.plugins.transform.hierarchy.IsomorphicTransformation",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "pyomo.util.plugin.alias",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "textwrap.fill",
"line_number": 22,
"usage_type": ... |
31097536057 | from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.keys import Keys
from pytesseract import image_to_string
from PIL import Image
import time
path_to_chromedriver = "/home/akash/projects/legal_recourse/chromedriver"
options = webdriver.ChromeOptions()
options.add_argument("--start-maximized")
browser = webdriver.Chrome(executable_path = path_to_chromedriver, chrome_options = options)
url = "http://services.ecourts.gov.in/ecourtindia/"
browser.get(url)
sel_state = browser.find_element_by_id("sess_state_code")
all_states = [st for st in sel_state.find_elements_by_tag_name("option")]
all_states = [state.get_attribute("text") for state in all_states]
del all_states[0]
total=0
def get_captcha():
browser.save_screenshot("screenshot.png")
img = Image.open("screenshot.png")
left = 575
top = 405
right = 620
bottom = 425
img = img.crop((left, top, right, bottom))
img.save("captcha.png")
captcha_img = Image.open("captcha.png")
captcha = image_to_string(captcha_img)
img.close()
captcha_img.close()
return captcha
for state in all_states:
#~ try:
browser.get(url)
state_path = "//*[@id='sess_state_code']/option[contains(text(), '%s')]" % state # using xpath for javascript dropdown click
browser.find_element_by_xpath(state_path).click()
browser.implicitly_wait(1)
sel_dist = browser.find_element_by_id("sess_dist_code")
all_dist = [x for x in sel_dist.find_elements_by_tag_name("option")]
all_dist = [dist.get_attribute("text") for dist in all_dist]
del all_dist[0]
for dist in all_dist:
#~ try:
browser.get(url)
state_path = "//*[@id='sess_state_code']/option[contains(text(), '%s')]" % state
browser.find_element_by_xpath(state_path).click()
browser.implicitly_wait(1)
dist_path = "//*[@id='sess_dist_code']/option[contains(text(), '%s')]" % dist
browser.find_element_by_xpath(dist_path).click()
browser.implicitly_wait(1)
browser.find_element_by_id("s_casetype.php").click()
browser.switch_to_frame("ifr")
sel_court = browser.find_element_by_id("court_complex_code")
all_courts = [x for x in sel_court.find_elements_by_tag_name("option")]
all_courts = [court.get_attribute("text") for court in all_courts]
del all_courts[0]
for court in all_courts :
try:
court_path = "//*[@id='court_complex_code']/option[contains(text(), '%s')]" % court
browser.find_element_by_xpath(court_path).click()
except:
continue
browser.implicitly_wait(1)
sel_case = browser.find_element_by_id("case_type")
all_cases = [x for x in sel_case.find_elements_by_tag_name("option")]
all_cases = [case.get_attribute("text") for case in all_cases]
del all_cases[0]
for case in all_cases:
try:
case_path = "//*[@id='case_type']/option[contains(text(), '%s')]" % case
browser.find_element_by_xpath(case_path).click()
except:
continue
case_name = case
case_id = ""
hyphen = case.find('-')
if hyphen != -1:
case_id = case[:hyphen-1] # for database
case_name = case[hyphen+2:]
browser.implicitly_wait(1)
browser.find_element_by_id("radD").click()
captcha = get_captcha()
browser.find_element_by_id("captcha").send_keys(captcha)
browser.find_element_by_xpath("//*[@id='caseNoDet']/div[8]/span[3]/input[1]").click()
time.sleep(5)
| akash-attri/legality | crawler.py | crawler.py | py | 3,557 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "selenium.webdriver.ChromeOptions",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 15,
"usage_type": "call"
},
{
"api... |
37268264175 | import html
from telegram import Chat, InlineKeyboardButton, InlineKeyboardMarkup, ParseMode, Update
from telegram.error import BadRequest, Unauthorized
from telegram.ext import (
CallbackContext,
CallbackQueryHandler,
CommandHandler,
Filters,
MessageHandler,
run_async,
)
from telegram.utils.helpers import mention_html
from Sangtei import DRAGONS, LOGGER, TIGERS, WOLVES, dispatcher
from Sangtei.modules.helper_funcs.chat_status import user_admin, user_not_admin
from Sangtei.modules.log_channel import loggable
from Sangtei.modules.sql import reporting_sql as sql
REPORT_GROUP = 12
REPORT_IMMUNE_USERS = DRAGONS + TIGERS + WOLVES
@run_async
@user_admin
def report_setting(update: Update, context: CallbackContext):
bot, args = context.bot, context.args
chat = update.effective_chat
msg = update.effective_message
if chat.type == chat.PRIVATE:
if len(args) >= 1:
if args[0] in ("yes", "on"):
sql.set_user_setting(chat.id, True)
msg.reply_text(
"Reports na tih nun ani!, mi tu in emaw engthil pawh report se hriattir zel i ni ang."
)
elif args[0] in ("no", "off"):
sql.set_user_setting(chat.id, False)
msg.reply_text("Reports na tih thih ani! Eng reports mah i dawng tawh lo ang.")
else:
msg.reply_text(
f"Reports dan tur i siam sa, i duhdan chu: `{sql.user_should_report(chat.id)}`",
parse_mode=ParseMode.MARKDOWN,
)
else:
if len(args) >= 1:
if args[0] in ("yes", "on"):
sql.set_chat_setting(chat.id, True)
msg.reply_text(
"Reports na tih nun ani! Admin tu te pawh report ti nung a piang chu hriattir an ni zel ang /report hman anih a piang in "
"emaw @admin hman anih in."
)
elif args[0] in ("no", "off"):
sql.set_chat_setting(chat.id, False)
msg.reply_text(
"Reports na tih thih ani! Admin tu te pawh in /report emaw @admin emaw hmang an awm pawh in hriattir an ni tawh lo ang."
)
else:
msg.reply_text(
f"He Group settings awmlai mek chu: `{sql.chat_should_report(chat.id)}`",
parse_mode=ParseMode.MARKDOWN,
)
@run_async
@user_not_admin
@loggable
def report(update: Update, context: CallbackContext) -> str:
bot = context.bot
args = context.args
message = update.effective_message
chat = update.effective_chat
user = update.effective_user
if chat and message.reply_to_message and sql.chat_should_report(chat.id):
reported_user = message.reply_to_message.from_user
chat_name = chat.title or chat.first or chat.username
admin_list = chat.get_administrators()
message = update.effective_message
if not args:
message.reply_text("I Reports na tur chhan tha tak ziak tel rawh.")
return ""
if user.id == reported_user.id:
message.reply_text("Aaa aw le, lutuk lutuk tlat...tak tak maw?")
return ""
if user.id == bot.id:
message.reply_text("Tum chhin tha hle mai.")
return ""
if reported_user.id in REPORT_IMMUNE_USERS:
message.reply_text("Uh? Disaster hi reports i duh meuh maw?")
return ""
if chat.username and chat.type == Chat.SUPERGROUP:
reported = f"{mention_html(user.id, user.first_name)} reported {mention_html(reported_user.id, reported_user.first_name)} Admins hnen ah!"
msg = (
f"<b>⚠️ Report: </b>{html.escape(chat.title)}\n"
f"<b> • Report by:</b> {mention_html(user.id, user.first_name)}(<code>{user.id}</code>)\n"
f"<b> • Reported user:</b> {mention_html(reported_user.id, reported_user.first_name)} (<code>{reported_user.id}</code>)\n"
)
link = f'<b> • Reported message:</b> <a href="https://t.me/{chat.username}/{message.reply_to_message.message_id}">hetah hmet rawh</a>'
should_forward = False
keyboard = [
[
InlineKeyboardButton(
"➡ Message",
url=f"https://t.me/{chat.username}/{message.reply_to_message.message_id}",
)
],
[
InlineKeyboardButton(
"⚠ Kick",
callback_data=f"report_{chat.id}=kick={reported_user.id}={reported_user.first_name}",
),
InlineKeyboardButton(
"⛔️ Ban",
callback_data=f"report_{chat.id}=banned={reported_user.id}={reported_user.first_name}",
),
],
[
InlineKeyboardButton(
"❎ Message Paih ani",
callback_data=f"report_{chat.id}=delete={reported_user.id}={message.reply_to_message.message_id}",
)
],
]
reply_markup = InlineKeyboardMarkup(keyboard)
else:
reported = (
f"{mention_html(user.id, user.first_name)} reported "
f"{mention_html(reported_user.id, reported_user.first_name)} Admins hnen ah!"
)
msg = f'{mention_html(user.id, user.first_name)} hian admin te a koh e hetah "{html.escape(chat_name)}"!'
link = ""
should_forward = True
for admin in admin_list:
if admin.user.is_bot: # can't message bots
continue
if sql.user_should_report(admin.user.id):
try:
if not chat.type == Chat.SUPERGROUP:
bot.send_message(
admin.user.id, msg + link, parse_mode=ParseMode.HTML
)
if should_forward:
message.reply_to_message.forward(admin.user.id)
if (
len(message.text.split()) > 1
): # If user is giving a reason, send his message too
message.forward(admin.user.id)
if not chat.username:
bot.send_message(
admin.user.id, msg + link, parse_mode=ParseMode.HTML
)
if should_forward:
message.reply_to_message.forward(admin.user.id)
if (
len(message.text.split()) > 1
): # If user is giving a reason, send his message too
message.forward(admin.user.id)
if chat.username and chat.type == Chat.SUPERGROUP:
bot.send_message(
admin.user.id,
msg + link,
parse_mode=ParseMode.HTML,
reply_markup=reply_markup,
)
if should_forward:
message.reply_to_message.forward(admin.user.id)
if (
len(message.text.split()) > 1
): # If user is giving a reason, send his message too
message.forward(admin.user.id)
except Unauthorized:
pass
except BadRequest as excp: # TODO: cleanup exceptions
LOGGER.exception("Exception while reporting user")
message.reply_to_message.reply_text(
f"{mention_html(user.id, user.first_name)} chu a message te admin te hnen ah reports ani.",
parse_mode=ParseMode.HTML,
)
return msg
return ""
def __migrate__(old_chat_id, new_chat_id):
sql.migrate_chat(old_chat_id, new_chat_id)
def __chat_settings__(chat_id, _):
return f"He chat ah hian user ten admin te hnen ah reports an thawn theih na tur siam ani a, hetiang hian /report leh @admin: `{sql.chat_should_report(chat_id)}`"
def __user_settings__(user_id):
if sql.user_should_report(user_id) is True:
text = "Admin i nihna group atangin reports te i dawng thin ang."
else:
text = "Admin i nihna group atangin, eng reports mah *i dawng lo ang*."
return text
def buttons(update: Update, context: CallbackContext):
bot = context.bot
query = update.callback_query
splitter = query.data.replace("report_", "").split("=")
if splitter[1] == "kick":
try:
bot.kickChatMember(splitter[0], splitter[2])
bot.unbanChatMember(splitter[0], splitter[2])
query.answer("✅ Hlawhtling taka pet chhuah ani")
return ""
except Exception as err:
query.answer("🛑 Hnek hlawhchham tlat")
bot.sendMessage(
text=f"Error: {err}",
chat_id=query.message.chat_id,
parse_mode=ParseMode.HTML,
)
elif splitter[1] == "banned":
try:
bot.kickChatMember(splitter[0], splitter[2])
query.answer("✅ Hlawhtling tak a Ban ani")
return ""
except Exception as err:
bot.sendMessage(
text=f"Error: {err}",
chat_id=query.message.chat_id,
parse_mode=ParseMode.HTML,
)
query.answer("🛑 Ban hlawhchham ani")
elif splitter[1] == "delete":
try:
bot.deleteMessage(splitter[0], splitter[3])
query.answer("✅ Message Paih ani")
return ""
except Exception as err:
bot.sendMessage(
text=f"Error: {err}",
chat_id=query.message.chat_id,
parse_mode=ParseMode.HTML,
)
query.answer("🛑 Message paih hlawhchham tlat!")
__help__ = """
➥ /report `<a chhan>`*:* admin te hnen a report tur in.
➥ @admin *:* Admin te hnen lama report tur in message kha reply rawh.
*NOTE:* Admin te hnen a report thlen theih tur in a khawi emaw zawk zawk khi a hman theih sa vek e.
*Admin te tan bik:*
➥ /reports `<on/off>`*:* report setting siamthatna. Reports settings awm lai mek en na.
➥ Pm lam ah i ti fel tawh anih chuan, a rawn ti lang bawk ang.
➥ Group lam ah i ti anih chuan, Groups lama a awm dan a rawn ti lang ang.
"""
SETTING_HANDLER = CommandHandler("reports", report_setting)
REPORT_HANDLER = CommandHandler("report", report, filters=Filters.group)
ADMIN_REPORT_HANDLER = MessageHandler(Filters.regex(r"(?i)@admin(s)?"), report)
REPORT_BUTTON_USER_HANDLER = CallbackQueryHandler(buttons, pattern=r"report_")
dispatcher.add_handler(REPORT_BUTTON_USER_HANDLER)
dispatcher.add_handler(SETTING_HANDLER)
dispatcher.add_handler(REPORT_HANDLER, REPORT_GROUP)
dispatcher.add_handler(ADMIN_REPORT_HANDLER, REPORT_GROUP)
__mod_name__ = "Reporting"
__handlers__ = [
(REPORT_HANDLER, REPORT_GROUP),
(ADMIN_REPORT_HANDLER, REPORT_GROUP),
(SETTING_HANDLER),
]
| Mizo-Noob-Developer/Sangtei | Sangtei/modules/reporting.py | reporting.py | py | 11,411 | python | en | code | null | github-code | 36 | [
{
"api_name": "Sangtei.DRAGONS",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "Sangtei.TIGERS",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "Sangtei.WOLVES",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "telegram.Update",
"... |
3212721770 | import re
from itertools import groupby
from operator import itemgetter
def part1(rows):
result = 0
for row in rows:
_, identifier, real = id_if_real(row)
if real:
result += identifier
return result
def part2(rows):
for row in rows:
name, identifier, real = id_if_real(row)
if real:
decrypted = decrypt(name, identifier)
if "northpole" in decrypted:
return identifier
raise ValueError("No northpole objects found.")
def id_if_real(room):
match = re.match(r"([a-z-]+)(\d+)\[(\w+)]", room)
name, identifier, checksum = match.groups()
groups = groupby(char for char in sorted(name) if char != "-")
counts = [(key, len(list(group))) for key, group in groups]
sorted_on_char = sorted(counts, key=itemgetter(0))
sorted_on_count_and_char = sorted(
sorted_on_char, key=itemgetter(1), reverse=True)
first_five = "".join(char for char, _ in sorted_on_count_and_char[:5])
return name, int(identifier), checksum == first_five
def decrypt(name, identifier):
return "".join(rotate(char, identifier) for char in name)
def rotate(char, identifier):
return " " if char == "-" else chr(ord('a') + (ord(char) - ord('a') + identifier) % 26)
| heijp06/AoC-2016 | day04/lib.py | lib.py | py | 1,283 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "re.match",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "itertools.groupby",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "operator.itemgetter",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "operator.itemgetter",
... |
30303644357 | #Import Sleeper Functions
import SleeperFunctions.sleeperfunctions as sleeperData
#Import Keep,Trade,Cut Functions
import KeepTradeCutFunctions.KTCfunctions as KTCdata
import pandas as pd
from UserLeagueClass.user_league_info_class import user_league_info
from UserLeagueClass.user_league_users_class import user_league_users
from UserLeagueClass.user_league_rosters_class import user_league_rosters
#location lat/long
import LocationFunctions.LocationFunctions as Locate
from os import path,remove
from datetime import datetime,timedelta
#Build Master Class for Entire League
class sleeper_league():
"""This class combines the league info, league users, and league rosters object data"""
def __init__(self,sleeper_league_id = None):
self.dataready = False
if (sleeper_league_id != None):
"""Initiates league info, league rosters, league users objects"""
self.league_info = user_league_info(sleeper_league_id)
if (self.league_info.validLeagueID == True):
self.league_rosters = user_league_rosters(sleeper_league_id)
self.league_users = user_league_users(sleeper_league_id)
"""
for i,v in self.league_rosters.__dict__.items():
print (f"{i}-{v}")
"""
#Calculate data we want for the league
self.league_data_calculated = self.load_league_stats()
#self.save_league_stats_to_csv()
self.validLeagueID = True
else:
self.validLeagueID = False
def get_league_stats(self):
return self.league_data_calculated
def load_league_stats(self):
"""This loads the league stats into the instance of the object"""
#league_id = self.league_info.get_league_id()
#Build dataframe to save to csv
dataforleague = []
#Get league IDs
league_ids = self.league_users.get_all_user_ids()
for item in league_ids:
team_id = str(item)
#Get real name of user to display
actual_id_name = self.league_users.get_user_name_from_user_id(team_id)
actual_team_name = self.league_users.get_team_name_from_user_id(team_id)
#print(team_id)
#Show Average Age Functions
avgAgeTotal = self.league_rosters.get_average_age(team_id)
dataforteam = {}
avgAgeQB = self.league_rosters.get_average_age(team_id,'QB')
avgAgeRB = self.league_rosters.get_average_age(team_id,'RB')
avgAgeWR = self.league_rosters.get_average_age(team_id,'WR')
avgAgeTE = self.league_rosters.get_average_age(team_id,'TE')
#Average Experience Functions
avgExpTotal = self.league_rosters.get_average_experience(team_id)
avgExpQB = self.league_rosters.get_average_experience(team_id,'QB')
avgExpRB = self.league_rosters.get_average_experience(team_id,'RB')
avgExpWR = self.league_rosters.get_average_experience(team_id,'WR')
avgExpTE = self.league_rosters.get_average_experience(team_id,'TE')
#Average Weight Functions
avgWeightTotal = self.league_rosters.get_average_weight(team_id)
avgWeightQB = self.league_rosters.get_average_weight(team_id,'QB')
avgWeightRB = self.league_rosters.get_average_weight(team_id,'RB')
avgWeightWR = self.league_rosters.get_average_weight(team_id,'WR')
avgWeightTE = self.league_rosters.get_average_weight(team_id,'TE')
#Average Height Functions
avgHeightTotal = self.league_rosters.get_average_height(team_id)
avgHeightQB = self.league_rosters.get_average_height(team_id,'QB')
avgHeightRB = self.league_rosters.get_average_height(team_id,'RB')
avgHeightWR = self.league_rosters.get_average_height(team_id,'WR')
avgHeightTE = self.league_rosters.get_average_height(team_id,'TE')
#Total KTC Functions
KTCTotal = self.league_rosters.get_total_KTC(team_id)
ktcTotalQB = self.league_rosters.get_total_KTC(team_id,'QB')
ktcTotalRB = self.league_rosters.get_total_KTC(team_id,'RB')
ktcTotalWR = self.league_rosters.get_total_KTC(team_id,'WR')
ktcTotalTE = self.league_rosters.get_total_KTC(team_id,'TE')
#Ave KTC Functions
KTCTotalAve = self.league_rosters.get_average_KTC(team_id)
ktcAveQB = self.league_rosters.get_average_KTC(team_id,'QB')
ktcAveRB = self.league_rosters.get_average_KTC(team_id,'RB')
ktcAveWR = self.league_rosters.get_average_KTC(team_id,'WR')
ktcAveTE = self.league_rosters.get_average_KTC(team_id,'TE')
#Roster Functions
#full_roster = self.league_rosters.get_roster(team_id,None,False)
QB_roster = self.league_rosters.get_roster(team_id,'QB',False)
RB_roster = self.league_rosters.get_roster(team_id,'RB',False)
WR_roster = self.league_rosters.get_roster(team_id,'WR',False)
TE_roster = self.league_rosters.get_roster(team_id,'TE',False)
#Total Search Rank
TotalSearchRank = self.league_rosters.get_total_search_rank(team_id)
SearchRankQB = self.league_rosters.get_total_search_rank(team_id,'QB')
SearchRankRB = self.league_rosters.get_total_search_rank(team_id,'RB')
SearchRankWR = self.league_rosters.get_total_search_rank(team_id,'WR')
SearchRankTE = self.league_rosters.get_total_search_rank(team_id,'TE')
#Get counts of roster
QBCount = len(QB_roster)
RBCount = len(RB_roster)
WRCount = len(WR_roster)
TECount = len(TE_roster)
#Load Data into Dict, then append it into data list to save to csv later on
dataforteam = {}
dataforteam['Team ID'] = team_id
dataforteam['Actual ID Name'] = actual_id_name
dataforteam['Actual Team Name'] = actual_team_name
#Load Rosters Data into class
dataforteam['Total Search Rank'] = TotalSearchRank
dataforteam['QB Search Rank'] = SearchRankQB
dataforteam['RB Search Rank'] = SearchRankRB
dataforteam['WR Search Rank'] = SearchRankWR
dataforteam['TE Search Rank'] = SearchRankTE
#Load search rank into class
dataforteam['QB Count'] = QBCount
dataforteam['RB Count'] = RBCount
dataforteam['WR Count'] = WRCount
dataforteam['TE Count'] = TECount
#Age Data
dataforteam['Overall Average Age'] = avgAgeTotal
dataforteam['QB Average Age'] = avgAgeQB
dataforteam['RB Average Age'] = avgAgeRB
dataforteam['WR Average Age'] = avgAgeWR
dataforteam['TE Average Age'] = avgAgeTE
#Exp Data
dataforteam['Overall Experience Average'] = avgExpTotal
dataforteam['QB Experience'] = avgExpQB
dataforteam['RB Experience'] = avgExpRB
dataforteam['WR Experience'] = avgExpWR
dataforteam['TE Experience'] = avgExpTE
#Weight Data
dataforteam['Overall Average Weight'] = avgWeightTotal
dataforteam['QB Average Weight'] = avgWeightQB
dataforteam['RB Average Weight'] = avgWeightRB
dataforteam['WR Average Weight'] = avgWeightWR
dataforteam['TE Average Weight'] = avgWeightTE
#Height Data
dataforteam['Overall Average Height'] = avgHeightTotal
dataforteam['QB Average Height'] = avgHeightQB
dataforteam['RB Average Height'] = avgHeightRB
dataforteam['WR Average Height'] = avgHeightWR
dataforteam['TE Average Height'] = avgHeightTE
#Total KTC Data
dataforteam['Total KTC'] = KTCTotal
dataforteam['QB Total KTC'] = ktcTotalQB
dataforteam['RB Total KTC'] = ktcTotalRB
dataforteam['WR Total KTC'] = ktcTotalWR
dataforteam['TE Total KTC'] = ktcTotalTE
#Total KTC Data
dataforteam['Overall KTC Average'] = KTCTotalAve
dataforteam['QB Ave KTC'] = ktcAveQB
dataforteam['RB Ave KTC'] = ktcAveRB
dataforteam['WR Ave KTC'] = ktcAveWR
dataforteam['TE Ave KTC'] = ktcAveTE
dataforleague.append(dataforteam)
return dataforleague
def save_league_stats_to_csv(self):
"""This just saves all league stats to a csv"""
dataforleague = self.league_data_calculated
#Save to CSV for League
#Look at path
base_path = 'FinalData/'
date_str = datetime.now().strftime('%Y%m%d')
sleeperlID = str(self.league_info.get_league_id())
csv_path = base_path + date_str +'_' + sleeperlID + '.csv'
#Get Data
#check if values already downloaded today
if not path.exists(csv_path):
print("Saving League Values")
player_pandas = pd.DataFrame.from_dict(dataforleague)
player_pandas.to_csv(csv_path, index=False)
else:
print("File Already Created")
def get_formatted_roster_with_data(self,user_id = None,position=None):
"""Gets the roster formatted with QB,RB,WR,and TE in order. Include Position, Age, and KTC value"""
dataforleague = []
#Get league IDs
league_ids = self.league_users.get_all_user_ids()
for item in league_ids:
team_id = str(item)
if (user_id == team_id):
"""Found Matching Team ID/User ID"""
#print(self.league_rosters.get_player_info_raw('866361936198135808'))
#Roster Functions
#full_roster = self.league_rosters.get_roster(team_id,None,False)
QB_roster = self.league_rosters.get_roster(team_id,'QB',False)
RB_roster = self.league_rosters.get_roster(team_id,'RB',False)
WR_roster = self.league_rosters.get_roster(team_id,'WR',False)
TE_roster = self.league_rosters.get_roster(team_id,'TE',False)
#Now Build Dictonary for player_data_formatted
player_data_formatted_list = []
if (position == 'QB') or (position == None):
for qb in QB_roster:
player_data = {}
player_data['Name'] = qb
player_data['Age'] = self.league_rosters.get_player_age(qb)
player_data['Position'] = self.league_rosters.get_player_position(qb)
college = self.league_rosters.get_player_college(qb)
player_data['College'] = college
"""
college_find = college +' university'
#Get lat of college
tempList = Locate.getlatlong(college_find)
player_data['lat'] = tempList[0]
player_data['lon'] = tempList[1]
"""
player_data['KTC'] = self.league_rosters.get_player_ktc(qb)
player_data_formatted_list.append(player_data)
if (position == 'RB') or (position == None):
for rb in RB_roster:
player_data = {}
player_data['Name'] = rb
player_data['Age'] = self.league_rosters.get_player_age(rb)
player_data['Position'] = self.league_rosters.get_player_position(rb)
college = self.league_rosters.get_player_college(rb)
player_data['College'] = college
"""
college_find = college +" university"
#Get lat of college
tempList = Locate.getlatlong(college_find)
player_data['lat'] = tempList[0]
player_data['lon'] = tempList[1]
"""
player_data['KTC'] = self.league_rosters.get_player_ktc(rb)
player_data_formatted_list.append(player_data)
if (position == 'WR') or (position == None):
for wr in WR_roster:
player_data = {}
player_data['Name'] = wr
player_data['Age'] = self.league_rosters.get_player_age(wr)
player_data['Position'] = self.league_rosters.get_player_position(wr)
college = self.league_rosters.get_player_college(wr)
player_data['College'] = college
"""
college_find = college +" university"
#Get lat of college
tempList = Locate.getlatlong(college_find)
player_data['lat'] = tempList[0]
player_data['lon'] = tempList[1]
"""
player_data['KTC'] = self.league_rosters.get_player_ktc(wr)
player_data_formatted_list.append(player_data)
if (position == 'TE') or (position == None):
for te in TE_roster:
player_data = {}
player_data['Name'] = te
player_data['Age'] = self.league_rosters.get_player_age(te)
player_data['Position'] = self.league_rosters.get_player_position(te)
college = self.league_rosters.get_player_college(te)
player_data['College'] = college
"""
print(college)
college_find = college +" university"
#Get lat of college
tempList = Locate.getlatlong(college_find)
player_data['lat'] = tempList[0]
player_data['lon'] = tempList[1]
"""
player_data['KTC'] = self.league_rosters.get_player_ktc(te)
player_data_formatted_list.append(player_data)
return(player_data_formatted_list)
#Testing
"""
#Run Get All NFL Player Info Once A Month
sleeperData.get_all_nfl_player_data()
#Remove Unneeded People from this Json if required that month
sleeperData.remove_unneeded_nfl_players()
#Get KTC Cut Values Once A Day
sleeper_ktc_include_picks = True
sleeper_ktc_superflex = False
KTCdata.initate_ktc_pull(sleeper_ktc_superflex,sleeper_ktc_include_picks)
#Add the KTC Cut Values to the Player Data
KTCdata.add_KTC_values_to_player_data()
leaguetest = sleeper_league('917535899465388032')
league_stats = leaguetest.get_league_stats()
leaguetest.save_league_stats_to_csv()
roster = leaguetest.get_formatted_roster_with_data('866361936198135808')
rt = pd.DataFrame(roster)
print(rt)
es = pd.DataFrame(league_stats)
#print(es)
""" | jpagel1/Sleeper_KTC_Streamlit_Version | UserLeagueClass/user_sleeperleague_class.py | user_sleeperleague_class.py | py | 15,793 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "UserLeagueClass.user_league_info_class.user_league_info",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "UserLeagueClass.user_league_rosters_class.user_league_rosters",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "UserLeagueClass.user_league... |
4676474414 | #!/usr/bin/env python3
"""Module defines `filter_datum` function
"""
import logging
import mysql.connector
from os import environ
import re
from typing import List
PII_FIELDS = ("name", "email", "phone", "ssn", "password")
class RedactingFormatter(logging.Formatter):
""" Redacting Formatter class
"""
REDACTION = "***"
FORMAT = "[HOLBERTON] %(name)s %(levelname)s %(asctime)-15s: %(message)s"
SEPARATOR = ";"
def __init__(self, fields: List[str] = []):
"""Initialize"""
super(RedactingFormatter, self).__init__(self.FORMAT)
self.fields = fields
def format(self, record: logging.LogRecord) -> str:
"""Extended format function from parent class"""
return filter_datum(
self.fields, self.REDACTION,
logging.Formatter.format(self, record),
self.SEPARATOR,
)
def filter_datum(
fields: List[str],
redaction: str,
message: str,
separator: str
) -> str:
"""Return log message obfuscated
Args:
fields: list of strings representing all fields to obfuscate
redaction: string representing by what the field will be obfuscated
message: string representing the log line
separator: string representing by which character is separating all
fields in the log line (message)
Returns:
obfuscated log message
"""
return re.sub(
r'({})=(.*?){}'.format('|'.join(fields), separator),
r'\1={}'.format(redaction), message
)
def get_logger() -> logging.Logger:
"""Return logger object"""
logger = logging.getLogger("user_data")
logger.setLevel(logging.INFO)
logger.propagate = False
sh = logging.StreamHandler()
sh.setFormatter(RedactingFormatter(list(PII_FIELDS)))
logger.addHandler(sh)
return logger
def get_db() -> mysql.connector.connection.MySQLConnection:
"""Return a database connector object"""
return mysql.connector.connect(
user=environ.get("PERSONAL_DATA_DB_USERNAME"),
password=environ.get("PERSONAL_DATA_DB_PASSWORD"),
host=environ.get("PERSONAL_DATA_DB_HOST"),
database=environ.get("PERSONAL_DATA_DB_NAME")
)
def main() -> None:
"""Logs redacted recrods from MySQL database"""
con = get_db() # Get connection to database
cur = con.cursor() # Get a cursor object from mysql shell
query = "SELECT * FROM users;" # Construct a query
cur.execute(query) # Execute query in and get response object
# Get column names in a list
col = list(map(lambda row: row[0], cur.description))
message = []
logger = get_logger()
# construct `column_name`=`data` list for each row and append to message
try:
for row in cur.fetchall():
message.append('; '.join(list(map(
lambda name, value: name+'='+str(value), col, row
))))
except Exception as e:
print(e)
finally:
con.close()
for msg in message:
logger.info(msg)
if __name__ == "__main__":
main()
| leykun-gizaw/alx-backend-user-data | 0x00-personal_data/filtered_logger.py | filtered_logger.py | py | 3,115 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.Formatter",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "typing.List",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "logging.LogRecord",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "logging.For... |
23522576847 | """
Multilayer perceptron for cycloid and Lorenz attractor.
"Eugene Morozov"<Eugene ~at~ HiEugene.com>
"""
import matplotlib.pyplot as plt
import torch.nn
import torch.nn as nn
import torch.optim as optim
import scipy.linalg
from scipy.integrate import odeint
from util import *
v = 1
epoch_sec = 1 # int(time.time())
start_time, device = preamble(epoch_sec)
rnd = 0.4
do_dropout = False
dropout_prob_zero = 0.5
do_batch_normalization = False
do_regularization = False
L2_lambda = 0.01
do_early_stopping = False
early_stop_patience = 2
info_step = 50
max_epoch = 75
batch_size = 20
train_data_batches = 80
if v:
plt.ion()
fig_grad = plt.figure(); ax_grad = fig_grad.gca()
plt.show(); plt.tight_layout()
move_plot(fig_grad, 0, 0, 1000, 500); fig_grad.canvas.flush_events()
i_grad = 0
def fig_grad_init():
global i_grad
ax_grad.cla()
ax_grad.grid()
ax_grad.set_title(f"grad norm-2")
i_grad = 0
fig_w, ax_w = plt.subplots(3, 4)
fig_w.show(); plt.tight_layout()
move_plot(fig_w, 1000, 0, 1600, 1000); fig_w.canvas.flush_events()
def fig_w_init():
pass
fig_loss = plt.figure(); ax_loss = fig_loss.gca()
plt.show(); plt.tight_layout()
move_plot(fig_loss, 0, 500, 1000, 800); fig_loss.canvas.flush_events()
i_loss = 0
def fig_loss_init(lr):
global i_loss
ax_loss.cla()
ax_loss.grid()
ax_loss.set_title(f"loss; lr={lr}")
i_loss = 0
fig_ver = plt.figure()
ax_ver = fig_ver.gca()
# ax_ver = fig_ver.add_subplot(projection="3d")
fig_ver.show(); plt.tight_layout()
move_plot(fig_ver, 600, 200, 1000, 800); fig_ver.canvas.flush_events()
def fig_ver_init():
ax_ver.cla()
ax_ver.grid()
ax_ver.set_title(f"cycloid; rnd={rnd}")
# ax_ver.set_title(f"Lorenz attractor; rnd={rnd}")
fig_lr = plt.figure(); ax_lr = fig_lr.gca()
fig_lr.show() #; plt.tight_layout()
move_plot(fig_lr, 1200, 200, 1000, 800); fig_lr.canvas.flush_events()
def fig_lr_init():
ax_lr.cla()
ax_lr.grid()
ax_lr.set_xlabel("learning rate")
ax_lr.set_ylabel("loss")
fig_lr_init()
def cycloid(t, rnd, r=0.5):
x = r*(t-np.sin(t))
y = r*(1-np.cos(t))
if rnd > 0:
noise = np.random.uniform(low=-rnd, high=rnd+1e-8, size=len(x))
y += noise
return x, y
def cycloid_normalize_x(x):
# already in [0, 1]
return x
### Lorenz attractor ###
Lorenz_rho = 28.0
Lorenz_sigma = 10.0
Lorenz_beta = 8.0 / 3.0
def Lorenz_derivatives(state, t):
x, y, z = state # unpack the state vector
return Lorenz_sigma*(y-x), x*(Lorenz_rho-z)-y, x*y-Lorenz_beta*z
def Lorenz(t, rnd, x0=np.random.uniform(low=-10, high=10+1e-8, size=3)):
xx = odeint(Lorenz_derivatives, x0, t)
xx /= 20
if rnd > 0:
noise = np.random.uniform(low=-rnd, high=rnd+1e-8, size=xx.shape[0])
xx[:,2] += noise
return xx[:,[0,1]], xx[:,2]
def Lorenz_normalize_x(x):
return x
if v and False:
t_num = 600
delta_t = 0.05
t_start = 100*np.random.rand()
t_end = t_start + t_num*delta_t
t = np.linspace(t_start, t_end, t_num)
x, y = Lorenz(t, rnd=0)
x = Lorenz_normalize_x(x)
fig = plt.figure()
ax = fig.add_subplot(projection="3d")
ax.plot(x[:,0], x[:,1], y, "-o")
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("z")
plt.show()
class MLP_NN(nn.Module):
def __init__(self):
super().__init__()
self.l1 = nn.Linear(in_features=1, out_features=64, bias=True) # 2 for Lorenz
self.l2 = nn.Linear(64, 32)
if do_dropout:
self.l2a = nn.Dropout(p=dropout_prob_zero)
if do_batch_normalization:
self.l2a = nn.BatchNorm1d(32) # batch normalization (especially for convolutional networks and networks with sigmoidal nonlinearities) (allows dropout to be omitted)
self.l3 = nn.Linear(32, 1)
# self.leakyReLU = nn.LeakyReLU(0.01)
# self.softmax = nn.Softmax(dim=1)
def forward(self, x):
x = torch.flatten(x, 1) # flatten all dimensions except batch (which is 1st dimension of the tensor)
x = torch.relu(self.l1(x))
# x = self.leakyReLU(self.l1(x))
# x = self.softmax(self.l1(x))
x = self.l2(x)
if do_dropout:
x = self.l2a(x)
if do_batch_normalization:
x = self.l2a(x) # we add the BN transform immediately before the nonlinearity
x = torch.relu(x)
# x = self.leakyReLU(x)
# x = self.softmax(x)
x = self.l3(x)
return x
# verify gradient by using complex numbers trick
def verify_grad():
# note: not all methods in torch 1.10 are supported for complex numbers
try:
m = 1
epsilon = 1e-20
# true mse = mean(abs(h_calc-h_true).^2); % where h is m x 1 matrix of complex numbers
def loss_function(a, b):
# if using only torch functions then fine (otherwise need a custom class)
loss = torch.mean((a-b)**2)
return loss
# loss_function = nn.L1Loss()
t = np.random.uniform(low=0, high=2*np.pi+1e-8, size=m*1)
x, y = cycloid(t, rnd=rnd)
x = cycloid_normalize_x(x)
xc = np.zeros(m, dtype=np.csingle)
xc.real = x
yc = np.zeros(m, dtype=np.csingle)
yc.real = y
inputs, targets = torch.from_numpy(xc), torch.from_numpy(yc)
inputs = torch.reshape(inputs, (m,1))
targets = torch.reshape(targets, (m,1))
l1 = nn.Linear(1, 64, dtype=torch.complex64)
l1.weight.data.fill_(0.01)
l1.bias.data.fill_(0.01)
l2 = nn.Linear(64, 32, dtype=torch.complex64)
l2.weight.data.fill_(0.01)
l2.bias.data.fill_(0.01)
l3 = nn.Linear(32, 1, dtype=torch.complex64)
l3.weight.data.fill_(0.01)
l3.bias.data.fill_(0.01)
l1.weight.data[0] += 0 + 1j*epsilon
out = torch.tanh(l1(inputs))
out = torch.tanh(l2(out))
out = l3(out)
loss = loss_function(out, targets)
deriv = loss.data.numpy().imag / epsilon
loss.backward()
auto_deriv = l1.weight.grad.data.numpy()[0].real[0]
if np.abs(deriv - auto_deriv) > epsilon:
warn(f"derivatives differ: delta = {np.abs(deriv - auto_deriv)}")
except Exception as e:
warn(f"exception: {e}")
def print_weight(model, x):
if v:
for i in range(3):
for j in range(4):
ax_w[i][j].cla()
ax_w[i][j].grid()
ax_w[0][0].hist(model.l1.weight.data.cpu().numpy().flatten()); ax_w[0][1].hist(model.l1.bias.data.cpu().numpy().flatten())
ax_w[1][0].hist(model.l2.weight.data.cpu().numpy().flatten()); ax_w[1][1].hist(model.l2.bias.data.cpu().numpy().flatten())
ax_w[2][0].hist(model.l3.weight.data.cpu().numpy().flatten()); ax_w[2][1].hist(model.l3.bias.data.cpu().numpy().flatten())
x = model.l1(x)
ax_w[0][2].hist(x.data.cpu().numpy().flatten())
x = torch.relu(x)
ax_w[0][3].hist(x.data.cpu().numpy().flatten())
x = model.l2(x)
ax_w[1][2].hist(x.data.cpu().numpy().flatten())
x = torch.relu(x)
ax_w[1][3].hist(x.data.cpu().numpy().flatten())
x = model.l3(x)
ax_w[2][2].hist(x.data.cpu().numpy().flatten())
for i in range(3):
if i == 0: suffix = "st"
elif i == 1: suffix = "nd"
elif i == 2: suffix = "rd"
else: suffix = "th"
ax_w[i][0].set_title(f"{i+1}{suffix} layer weights")
ax_w[i][1].set_title(f"{i+1}{suffix} layer biases")
ax_w[i][2].set_title(f"{i+1}{suffix} layer preactivation")
ax_w[i][3].set_title(f"{i+1}{suffix} layer activation")
def print_grad(model):
l1_weight_norm = torch.sqrt(torch.sum(model.l1.weight.grad.mul(model.l1.weight.grad))).data.cpu().numpy()
l1_bias_norm = torch.sqrt(torch.sum(model.l1.bias.grad.mul(model.l1.bias.grad))).data.cpu().numpy()
l2_weight_norm = torch.sqrt(torch.sum(model.l2.weight.grad.mul(model.l2.weight.grad))).data.cpu().numpy()
l2_bias_norm = torch.sqrt(torch.sum(model.l2.bias.grad.mul(model.l2.bias.grad))).data.cpu().numpy()
l3_weight_norm = torch.sqrt(torch.sum(model.l3.weight.grad.mul(model.l3.weight.grad))).data.cpu().numpy()
l3_bias_norm = torch.sqrt(torch.sum(model.l3.bias.grad.mul(model.l3.bias.grad))).data.cpu().numpy()
print(f"l1 grad weight abs max = {model.l1.weight.grad.abs().max().float():0.3f}, 0 # = {torch.le(model.l1.weight.grad.abs(), 1e-10).sum().int():4d} ({int(100*torch.le(model.l1.weight.grad.abs(), 1e-10).sum().int() / (model.l1.weight.grad.shape[0]*model.l1.weight.grad.shape[1])):2d}%), l1.weight 2-norm = {l1_weight_norm:0.3f}")
print(f"l1 grad bias abs max = {model.l1.bias.grad.abs().max().float():0.3f}, 0 # = {torch.le(model.l1.bias.grad.abs(), 1e-10).sum().int():4d} ({int(100*torch.le(model.l1.bias.grad.abs(), 1e-10).sum().int() / model.l1.bias.grad.shape[0]):2d}%), l1.bias 2-norm = {l1_bias_norm:0.3f}")
print(f"l2 grad weight abs max = {model.l2.weight.grad.abs().max().float():0.3f}, 0 # = {torch.le(model.l2.weight.grad.abs(), 1e-10).sum().int():4d} ({int(100*torch.le(model.l2.weight.grad.abs(), 1e-10).sum().int() / (model.l2.weight.grad.shape[0]*model.l2.weight.grad.shape[1])):2d}%), l2.weight 2-norm = {l2_weight_norm:0.3f}")
print(f"l2 grad bias abs max = {model.l2.bias.grad.abs().max().float():0.3f}, 0 # = {torch.le(model.l2.bias.grad.abs(), 1e-10).sum().int():4d} ({int(100*torch.le(model.l2.bias.grad.abs(), 1e-10).sum().int() / model.l2.bias.grad.shape[0]):2d}%), l2.bias 2-norm = {l2_bias_norm:0.3f}")
print(f"l3 grad weight abs max = {model.l3.weight.grad.abs().max().float():0.3f}, 0 # = {torch.le(model.l3.weight.grad.abs(), 1e-10).sum().int():4d} ({int(100*torch.le(model.l3.weight.grad.abs(), 1e-10).sum().int() / (model.l3.weight.grad.shape[0]*model.l3.weight.grad.shape[1])):2d}%), l3.weight 2-norm = {l3_weight_norm:0.3f}")
print(f"l3 grad bias abs max = {model.l3.bias.grad.abs().max().float():0.3f}, 0 # = {torch.le(model.l3.bias.grad.abs(), 1e-10).sum().int():4d} ({int(100*torch.le(model.l3.bias.grad.abs(), 1e-10).sum().int() / model.l3.bias.grad.shape[0]):2d}%), l3.bias 2-norm = {l3_bias_norm:0.3f}")
# gradient/parameter_value should ~= 1% over a minibatch
a = torch.abs(model.l1.weight.grad / model.l1.weight).data.cpu().numpy().flatten()
a1 = (a > 0.01).sum()
a10 = (a > 0.1).sum()
if a10 > 0:
warn(f"L1 grad > 1%: {a1}, > 10%: {a10} out of {len(a)}")
else:
print(f"L1 grad > 1%: {a1}, > 10%: {a10} out of {len(a)}")
a = torch.abs(model.l2.weight.grad / model.l2.weight).data.cpu().numpy().flatten()
a1 = (a > 0.01).sum()
a10 = (a > 0.1).sum()
if a10 > 0:
warn(f"L2 grad > 1%: {a1}, > 10%: {a10} out of {len(a)}")
else:
print(f"L2 grad > 1%: {a1}, > 10%: {a10} out of {len(a)}")
a = torch.abs(model.l3.weight.grad / model.l3.weight).data.cpu().numpy().flatten()
a1 = (a > 0.01).sum()
a10 = (a > 0.1).sum()
if a10 > 0:
warn(f"L3 grad > 1%: {a1}, > 10%: {a10} out of {len(a)}")
else:
print(f"L3 grad > 1%: {a1}, > 10%: {a10} out of {len(a)}")
if v:
# A test that can rule out local minima as the problem is plotting the norm of the gradient over time.
global i_grad, l1_weight_norm_prev, l1_bias_norm_prev, l2_weight_norm_prev, l2_bias_norm_prev, l3_weight_norm_prev, l3_bias_norm_prev
if i_grad == 0:
ax_grad.plot(i_grad, l1_weight_norm, color="blue", linestyle='-', label="l1 weight norm") # marker='*'
ax_grad.plot(i_grad, l1_bias_norm, color=lblue, linestyle='-', label="l1 bias norm")
ax_grad.plot(i_grad, l2_weight_norm, color="green", linestyle='-', label="l2 weight norm")
ax_grad.plot(i_grad, l2_bias_norm, color=lgreen, linestyle='-', label="l2 bias norm")
ax_grad.plot(i_grad, l3_weight_norm, color="red", linestyle='-', label="l3 weight norm")
ax_grad.plot(i_grad, l3_bias_norm, color=lred, linestyle='-', label="l3 bias norm")
ax_grad.legend(loc="best", ncol=1, scatterpoints=1, numpoints=1)
else:
ax_grad.plot([i_grad-1, i_grad], [l1_weight_norm_prev, l1_weight_norm], color="blue", linestyle='-', label="l1 weight norm") # marker='*'
ax_grad.plot([i_grad-1, i_grad], [l1_bias_norm_prev, l1_bias_norm], color=lblue, linestyle='-', label="l1 bias norm")
ax_grad.plot([i_grad-1, i_grad], [l2_weight_norm_prev, l2_weight_norm], color="green", linestyle='-', label="l2 weight norm")
ax_grad.plot([i_grad-1, i_grad], [l2_bias_norm_prev, l2_bias_norm], color=lgreen, linestyle='-', label="l2 bias norm")
ax_grad.plot([i_grad-1, i_grad], [l3_weight_norm_prev, l3_weight_norm], color="red", linestyle='-', label="l3 weight norm")
ax_grad.plot([i_grad-1, i_grad], [l3_bias_norm_prev, l3_bias_norm], color=lred, linestyle='-', label="l3 bias norm")
l1_weight_norm_prev = l1_weight_norm
l1_bias_norm_prev = l1_bias_norm
l2_weight_norm_prev = l2_weight_norm
l2_bias_norm_prev = l2_bias_norm
l3_weight_norm_prev = l3_weight_norm
l3_bias_norm_prev = l3_bias_norm
i_grad += 1
fig_grad.canvas.flush_events()
def check_hessian(model, loss_function):
if do_batch_normalization:
m = 20
else:
m = 1
t = np.random.uniform(low=0, high=2*np.pi+1e-8, size=m)
t = t.astype(np.float32)
x, y = cycloid(t, rnd=rnd)
x = cycloid_normalize_x(x)
# t_num = m
# delta_t = 0.05
# t_start = 100*np.random.rand()
# t_end = t_start + t_num*delta_t
# t = np.linspace(t_start, t_end, t_num)
# t = t.astype(np.float32)
# x, y = Lorenz(t, rnd=rnd)
# x = Lorenz_normalize_x(x)
# x = x.astype(np.float32)
# y = y.astype(np.float32)
inputs, targets = torch.from_numpy(x), torch.from_numpy(y)
inputs = torch.reshape(inputs, (m,1)) # (m,2) for Lorenz
targets = torch.reshape(targets, (m,1))
inputs, targets = inputs.to(device), targets.to(device)
outputs = model(inputs)
h = torch.autograd.functional.hessian(loss_function, (outputs, targets))
print(f"Hessian = {h}")
hm = np.array([[h[0][0].data.cpu().numpy()[0][0][0][0], h[0][1].data.cpu().numpy()[0][0][0][0]], # [0][0][0][0] is for 1 sample
[h[1][0].data.cpu().numpy()[0][0][0][0], h[1][1].data.cpu().numpy()[0][0][0][0]]])
w, _ = scipy.linalg.eig(hm)
if (w[0].real < -1e-8 and w[1].real > 1e-8) or (w[0].real > 1e-8 and w[1].real < -1e-8):
warn(f"At a saddle point, the Hessian matrix has both positive and negative eigenvalues: {w}")
def MLP_train(model, loss_function, optimizer):
start = time.time()
running_loss_prev = 0.0
i_loss_valid_prev = 0
if do_early_stopping:
validation_loss_prev = 1e8
early_stop_trigger_times = 0
def calc():
nonlocal loss
outputs = model(inputs)
loss = loss_function(outputs, targets)
if do_regularization:
L2_reg = torch.tensor(0.0).to(device)
for param in model.parameters():
L2_reg += torch.pow(param,2).sum()/2
loss += L2_lambda * L2_reg
optimizer.zero_grad()
loss.backward()
return loss
for epoch in range(1,max_epoch+1):
hours, minutes, seconds = sec2hms((max_epoch - epoch) * (time.time() - start) / epoch)
print(f"starting epoch {epoch}; ETA = {hours:02d}:{minutes:02d}:{seconds:02d}")
running_loss = 0.0
train_data_size = train_data_batches*batch_size
print(f"train_data_size = {train_data_size:,d}")
for i in range(train_data_size//batch_size):
t = np.random.uniform(low=0, high=2*np.pi+1e-8, size=batch_size*1)
t = t.astype(np.float32)
x, y = cycloid(t, rnd=rnd)
x = cycloid_normalize_x(x)
# t_num = batch_size
# delta_t = 0.05
# t_start = 100*np.random.rand()
# t_end = t_start + t_num*delta_t
# t = np.linspace(t_start, t_end, t_num)
# t = t.astype(np.float32)
# x, y = Lorenz(t, rnd=rnd)
# x = Lorenz_normalize_x(x)
# x = x.astype(np.float32)
# y = y.astype(np.float32)
inputs, targets = torch.from_numpy(x), torch.from_numpy(y)
inputs = torch.reshape(inputs, (batch_size,1)) # 2 for Lorenz
targets = torch.reshape(targets, (batch_size,1))
inputs, targets = inputs.to(device), targets.to(device)
if optimizer.__repr__().startswith("LBFGS"):
optimizer.step(calc)
else:
loss = calc()
optimizer.step()
running_loss += loss.item()
if i % info_step == info_step-1:
running_loss = running_loss / info_step / batch_size
print(f"loss after mini-batch {i+1:5d}: {running_loss:.05f}")
if v:
global i_loss
if i_loss == 0:
ax_loss.plot(i_loss, running_loss, color="blue", linestyle='-', label="training loss")
else:
ax_loss.plot([i_loss-1, i_loss], [running_loss_prev, running_loss], color="blue", linestyle='-') # , label="training loss")
running_loss_prev = running_loss
i_loss += 1
fig_loss.canvas.flush_events()
running_loss = 0.0
print_grad(model)
print_weight(model, inputs)
if v:
ax_loss.annotate(f"epoch={epoch}", xy=(i_loss, running_loss_prev), rotation=60)
if do_early_stopping or v:
validation_loss, _, _, _, _, _ = MLP_validate(model, loss_function)
if v:
if i_loss_valid_prev == 0:
ax_loss.plot(i_loss, validation_loss, color="red", linestyle='-', label="validation loss")
ax_loss.legend(loc="best", ncol=1, scatterpoints=1, numpoints=1)
else:
ax_loss.plot([i_loss_valid_prev, i_loss], [validation_loss_prev, validation_loss], color="red", linestyle='-', label="validation loss")
i_loss_valid_prev = i_loss
if do_early_stopping:
if validation_loss > validation_loss_prev:
# or save model to file when < validation_loss_best
early_stop_trigger_times += 1
if early_stop_trigger_times >= early_stop_patience:
print(f"early stopping triggered at epoch={epoch}")
break
else:
early_stop_trigger_times = 0
validation_loss_prev = validation_loss
print("finished training")
check_hessian(model, loss_function)
def report():
pass
def MLP_validate(model, loss_function):
t = np.arange(0, 2*np.pi+0.1, 0.1)
t = t.astype(np.float32)
xs, ys = cycloid(t, rnd=rnd)
xs = cycloid_normalize_x(xs)
# t_num = 300
# delta_t = 0.05
# t_start = 100*np.random.rand()
# t_end = t_start + t_num*delta_t
# t = np.linspace(t_start, t_end, t_num)
# t = t.astype(np.float32)
# x0=np.random.uniform(low=-10, high=10+1e-8, size=3)
# x, y = Lorenz(t, rnd=rnd, x0=x0)
# x = Lorenz_normalize_x(x)
# xs = x.astype(np.float32)
# ys = y.astype(np.float32)
length = t.shape[0]
print(f"t_validate length = {length}")
with torch.no_grad():
inputs, targets = torch.from_numpy(xs), torch.from_numpy(ys)
inputs = torch.reshape(inputs, (length,1)) # 2 for Lorenz
targets = torch.reshape(targets, (length,1))
inputs, targets = inputs.to(device), targets.to(device)
outputs = model(inputs)
sample_loss = loss_function(outputs, targets)
sample_loss /= length
print(f"validation samples loss = {sample_loss:.05f}")
zs = outputs.data.cpu().numpy()
# zs = zs.flatten() # for Lorenz, no need for cycloid
x0 = None
return sample_loss.data.cpu().numpy().flatten()[0], t, xs, ys, zs, x0
def MLP_verify(model, loss_function):
sample_loss, t, xs, ys, zs, x0 = MLP_validate(model, loss_function)
_, ss = cycloid(t, rnd=0)
# _, ss = Lorenz(t, rnd=0, x0=x0)
with torch.no_grad():
zst, sst = torch.from_numpy(zs), torch.from_numpy(ss)
zst = torch.reshape(zst, (zs.shape[0],1))
sst = torch.reshape(sst, (ss.shape[0],1))
true_loss = loss_function(zst, sst)
# true_loss = true_loss.item() / ss.shape[0]
print(f"verification true loss = {true_loss:.05f}")
if v:
ax_ver.plot(xs, ys, "b-*", label="samples")
ax_ver.plot(xs, zs, "r-o", label="predicted")
ax_ver.plot(xs, ss, "g-x", label="truth")
# ax_ver.plot(xs[:,0], xs[:,1], ys, "b-*", label="samples")
# ax_ver.plot(xs[:,0], xs[:,1], zs, "r-o", label="predicted")
# ax_ver.plot(xs[:,0], xs[:,1], ss, "g-x", label="truth")
ax_ver.legend()
report()
return sample_loss, true_loss
def MLP():
# tune the learning rate
# lrs = [0.0001, 0.0005, 0.0010, 0.0025, 0.0050, 0.0075, 0.0100, 0.0500] # SGD; lr=0.001
# lrs = [0.0001, 0.0002, 0.0006, 0.0006, 0.0008, 0.0010, 0.0015, 0.0020, 0.0025, 0.0050, 0.0075, 0.0100, 0.0500] # Adam; lr=0.001
lrs = [0.001]
lr_prev = lrs[0]
sample_loss_prev, true_loss_prev = None, None
model = MLP_NN().to(device)
if v: print(model); # assert(all(p.is_cuda for p in model.parameters()))
loss_function = nn.MSELoss()
# loss_function = nn.L1Loss()
verify_grad()
for lr in lrs:
print(f"learning rate = {lr}")
if v:
fig_grad_init()
fig_w_init()
fig_loss_init(lr)
# optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.9) # lr=0.001, momentum=0.9 # weight_decay is L2 regularization
optimizer = optim.Adam(params=model.parameters(), lr=lr)
# optimizer = optim.LBFGS(params=model.parameters(), lr=lr, line_search_fn="strong_wolfe")
MLP_train(model, loss_function, optimizer)
if v: fig_ver_init()
sample_loss, true_loss = MLP_verify(model, loss_function)
if v:
if not sample_loss_prev: sample_loss_prev = sample_loss
if not true_loss_prev: true_loss_prev = true_loss
ax_lr.plot([lr_prev, lr], [sample_loss_prev, sample_loss], color="red", linestyle='-', label="validation sample loss")
ax_lr.plot([lr_prev, lr], [true_loss_prev, true_loss], color="green", linestyle='-', label="validation truth loss")
if lr == lrs[0]: ax_lr.legend(loc="best", ncol=1, scatterpoints=1, numpoints=1)
lr_prev = lr
sample_loss_prev = sample_loss
true_loss_prev = true_loss
fig_lr.canvas.flush_events()
print()
MLP()
postscript(start_time)
| eugegit/examples | nn_cycloid.py | nn_cycloid.py | py | 21,540 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.ion",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "matp... |
41924656937 | import json # JSON modülünü içe aktar
from kafka import KafkaConsumer # kafka kütüphanesinden KafkaConsumer sınıfını içe aktar
kafka_bootstrap_servers = 'localhost:9092' # Kafka başlangıç sunucularını belirle
kafka_topic = 'your_kafka_topic' # Kafka konusunu belirle
consumer = KafkaConsumer( # KafkaConsumer nesnesi oluştur
kafka_topic, # Abone olunacak konuyu belirt
bootstrap_servers=kafka_bootstrap_servers, # Kafka başlangıç sunucularını ayarla
value_deserializer=lambda v: json.loads(v.decode('utf-8')) # Değer çözümleyiciyi ayarla, JSON verilerini çözümlemek için
)
def consume_messages():
for message in consumer: # Her bir mesaj için döngüye gir
print(message.value) # Mesajın değerini ekrana yazdır
if __name__ == '__main__':
consume_messages() # Mesajları tüketmek için consume_messages fonksiyonunu çağır
| sumeyyenacar/Kafka-CDC-Producer-Consumer-Mongodb-Project | consumer/tuketici.py | tuketici.py | py | 904 | python | tr | code | 0 | github-code | 36 | [
{
"api_name": "kafka.KafkaConsumer",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 11,
"usage_type": "call"
}
] |
26317282313 | import base64
import datetime
import glob
import logging
import os
import re
import time
import types
from importlib import util
from random import Random, shuffle
import toml
import yaml
from kapitan import cached, defaults, utils
from kapitan.errors import CompileError
from six import string_types
logger = logging.getLogger(__name__)
def load_jinja2_filters(env):
"""Load Jinja2 custom filters into env"""
env.filters["sha256"] = utils.sha256_string
env.filters["b64encode"] = base64_encode
env.filters["b64decode"] = base64_decode
env.filters["yaml"] = to_yaml
env.filters["toml"] = to_toml
env.filters["fileglob"] = fileglob
env.filters["bool"] = to_bool
env.filters["to_datetime"] = to_datetime
env.filters["strftime"] = strftime
env.filters["regex_replace"] = regex_replace
env.filters["regex_escape"] = regex_escape
env.filters["regex_search"] = regex_search
env.filters["regex_findall"] = regex_findall
env.filters["reveal_maybe"] = reveal_maybe
env.filters["ternary"] = ternary
env.filters["shuffle"] = randomize_list
def load_module_from_path(env, path):
"""
Loads a python module from provided path and adds it to jinja2 environment
filter name is same as that of function
"""
try:
module_name = os.path.basename(path).split(".")[0]
custom_filter_spec = util.spec_from_file_location(module_name, path)
custom_filter_module = util.module_from_spec(custom_filter_spec)
custom_filter_spec.loader.exec_module(custom_filter_module)
for function in dir(custom_filter_module):
if isinstance(getattr(custom_filter_module, function), types.FunctionType):
logger.debug("custom filter loaded from %s", path)
env.filters[function] = getattr(custom_filter_module, function)
except Exception as e:
raise IOError("jinja2 failed to render, could not load filter at {}: {}".format(path, e))
logger.debug("failed to find custom filter from path %s", path)
def load_jinja2_filters_from_file(env, jinja2_filters):
"""
if filter points to default file and in case it doesn't exist then proceed silently, no error
else try to load module (which will throw error in case of non existence of file)
"""
jinja2_filters = os.path.normpath(jinja2_filters)
if jinja2_filters == defaults.DEFAULT_JINJA2_FILTERS_PATH:
if not os.path.isfile(jinja2_filters):
return
load_module_from_path(env, jinja2_filters)
# Custom filters
def reveal_maybe(ref_tag):
"Will reveal ref_tag if valid and --reveal flag is used"
if cached.args["compile"].reveal:
return cached.revealer_obj.reveal_raw(ref_tag)
else:
return ref_tag
def base64_encode(string):
return base64.b64encode(string.encode("UTF-8")).decode("UTF-8")
def base64_decode(string):
return base64.b64decode(string).decode("UTF-8")
def to_yaml(obj):
return yaml.safe_dump(obj, default_flow_style=False)
def to_toml(obj):
return toml.dumps(obj)
# Following filters are from https://github.com/ansible/ansible/blob/devel/lib/ansible/plugins/filter/core.py
def fileglob(pathname):
"""return list of matched regular files for glob"""
return [g for g in glob.glob(pathname) if os.path.isfile(g)]
def to_bool(a):
"""return a bool for the arg"""
if a is None or isinstance(a, bool):
return a
if isinstance(a, string_types):
a = a.lower()
if a in ("yes", "on", "1", "true", 1):
return True
return False
def to_datetime(string, format="%Y-%m-%d %H:%M:%S"):
return datetime.datetime.strptime(string, format)
def strftime(string_format, second=None):
"""return current date string for format. See https://docs.python.org/3/library/time.html#time.strftime for format"""
if second is not None:
try:
second = int(second)
except Exception:
raise CompileError("Invalid value for epoch value ({})".format(second))
return time.strftime(string_format, time.localtime(second))
def regex_replace(value="", pattern="", replacement="", ignorecase=False):
"""Perform a `re.sub` returning a string"""
if ignorecase:
flags = re.I
else:
flags = 0
_re = re.compile(pattern, flags=flags)
return _re.sub(replacement, value)
def regex_escape(string):
"""Escape all regular expressions special characters from STRING."""
return re.escape(string)
def regex_search(value, regex, *args, **kwargs):
"""Perform re.search and return the list of matches or a backref"""
groups = list()
for arg in args:
if arg.startswith("\\g"):
match = re.match(r"\\g<(\S+)>", arg).group(1)
groups.append(match)
elif arg.startswith("\\"):
match = int(re.match(r"\\(\d+)", arg).group(1))
groups.append(match)
else:
raise CompileError("Unknown argument")
flags = 0
if kwargs.get("ignorecase"):
flags |= re.I
if kwargs.get("multiline"):
flags |= re.M
match = re.search(regex, value, flags)
if match:
if not groups:
return match.group()
else:
items = list()
for item in groups:
items.append(match.group(item))
return items
def regex_findall(value, regex, multiline=False, ignorecase=False):
"""Perform re.findall and return the list of matches"""
flags = 0
if ignorecase:
flags |= re.I
if multiline:
flags |= re.M
return re.findall(regex, value, flags)
def ternary(value, true_val, false_val, none_val=None):
"""value ? true_val : false_val"""
if value is None and none_val is not None:
return none_val
elif bool(value):
return true_val
else:
return false_val
def randomize_list(mylist, seed=None):
try:
mylist = list(mylist)
if seed:
r = Random(seed)
r.shuffle(mylist)
else:
shuffle(mylist)
except Exception:
pass
return mylist
| kapicorp/kapitan | kapitan/inputs/jinja2_filters.py | jinja2_filters.py | py | 6,140 | python | en | code | 1,719 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "kapitan.utils.sha256_string",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "kapitan.utils",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "os.p... |
26793988072 | from django.shortcuts import render
from django.http import HttpResponse
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import linear_kernel
from surprise import Reader, Dataset, SVD
from surprise.model_selection import KFold
import pickle
from surprise.model_selection.validation import cross_validate
import copy
from datetime import datetime
# In[3]:
meta = pd.read_csv('Dataset/movies_metadata.csv')
# In[4]:
# Rating
ratings = pd.read_csv('Dataset/ratings_small.csv')
# In[5]:
links = pd.read_csv('Dataset/links_small.csv')
# In[6]:
keywords = pd.read_csv('Dataset/keywords.csv')
# In[7]:
# -- Content filtering based Recommender
meta['overview'] = meta['overview'].fillna('')
# In[8]:
pd.DataFrame({'feature': meta.dtypes.index, 'dtype': meta.dtypes.values})
# In[9]:
meta = meta.drop([19730, 29503, 35587])
meta['id'] = pd.to_numeric(meta['id'])
# In[10]:
pd.DataFrame({'feature': links.dtypes.index, 'dtype': links.dtypes.values})
# In[11]:
col = np.array(links['tmdbId'], np.int64)
links['tmdbId'] = col
# In[12]:
meta.rename(columns={'id': 'tmdbId'}, inplace=True)
meta = pd.merge(meta, links, on='tmdbId')
meta.drop(['imdb_id'], axis=1, inplace=True)
meta.head()
# In[13]:
tfidf = TfidfVectorizer(stop_words='english')
tfidf_matrix = tfidf.fit_transform(meta['overview'])
# In[14]:
# Compute cosine similarity
cosine_sim = linear_kernel(tfidf_matrix, tfidf_matrix)
indices = pd.Series(meta.index, index=meta['original_title']).drop_duplicates()
# In[15]:
def recommend(title, cosine_sim=cosine_sim):
idx = indices[title]
# pairwise similarity scores of movies with given movie
sim_scores = list(enumerate(cosine_sim[idx]))
# Sort the movies on similarity scores
sim_scores = sorted(sim_scores, key=lambda x: x[1], reverse=True)
# 15 most similar movies
sim_scores = sim_scores[1:16]
movie_indices = [i[0] for i in sim_scores]
# Remove low-rated
for i in movie_indices:
pop = meta.at[i, 'vote_average']
if pop < 5 or pop > 10:
movie_indices.remove(i)
return meta[['original_title', 'vote_average']].iloc[movie_indices]
# In[16]:
# In[17]:
# Collaborative Filtering based Recommender
reader = Reader()
df = Dataset.load_from_df(ratings[['userId', 'movieId', 'rating']], reader)
kf = KFold(n_splits=5)
kf.split(df) # Split the data into folds
# In[18]:
# Use Single Value Decomposition (SVD) for cross-validation and fitting
svd = pickle.load(open("movie_ml_model.sav", "rb"))
# In[19]:
# In[20]:
# reload files
links_df = pd.read_csv('Dataset/links_small.csv')
col = np.array(links_df['tmdbId'], np.int64)
links_df['tmdbId'] = col
links_df = links_df.merge(meta[['title', 'tmdbId']], on='tmdbId').set_index('title')
links_index = links_df.set_index('tmdbId') # For label indexing
# In[21]:
def hybrid(userId, title):
idx = indices[title]
tmdbId = links_df.loc[title]['tmdbId']
sim_scores = list(enumerate(cosine_sim[idx]))
sim_scores = sorted(sim_scores, key=lambda x: x[1], reverse=True)
sim_scores = sim_scores[1:31] # calculating score of 30 similar movies
movie_indices = [i[0] for i in sim_scores]
movies = meta.iloc[movie_indices][['title', 'vote_average', 'tmdbId']]
movies['est'] = movies['tmdbId'].apply(
lambda x: svd.predict(userId, links_index.loc[x]['movieId']).est) # Estimated prediction using svd
movies = movies.sort_values('est', ascending=False) # Ranking movies according to predicted values
movies.columns = ['Title', 'Vote Average', 'TMDb Id', 'Estimated Prediction']
return movies.head(30) # 30 similar movies
# necessary functions for contextual_update function
def day_time():
now = datetime.now().time()
morning = now.replace(hour=12, minute=0, second=0, microsecond=0)
afternoon = now.replace(hour=16, minute=0, second=0, microsecond=0)
evening = now.replace(hour=19, minute=0, second=0, microsecond=0)
if now < morning:
return "morning"
elif now < afternoon:
return "afternoon"
elif now < evening:
return "evening"
else:
return "night"
def season():
month = datetime.now().month
if month < 4:
return "winter"
elif month < 6:
return "summer"
elif month < 9:
return "rainy"
elif month < 11:
return "autumn"
else:
return "winter"
def is_weekend():
day = datetime.now().isoweekday()
if day < 6:
return False
return True
# testing function
# day_time()
season()
# In[24]:
# Function to include movies on specific dates -
def special_date(recommended_list, date_passed):
print("special date function reached")
date_event = datetime.now().date()
# Independence Day
date_event = date_event.replace(month=8, day=15)
new_list = recommended_list.copy()
if date_event == date_passed:
# Vote Average TMDb Id Estimated Prediction
new_movie = pd.DataFrame({"Title": ["Border", "Uri:The Surgical Strike"],
"Vote Average": [6.8, 7.1],
"TMDb Id": [33125, 554600],
"Estimated Prediction": [5.0, 5.0],
"tmdbId": [33125, 554600],
"genres": ["[{'name':'Action'},{'name':'History'},{'name':'War'}]",
"[{'name':'Action'},{'name':'Drama'},{'name':'War'}]"]
})
new_list = pd.concat([new_movie, recommended_list])
# Repubic Day
date_event = date_event.replace(month=1, day=26)
if date_event == date_passed:
new_movie = pd.DataFrame({"Title": ["Shaheed", "Border", "Uri:The Surgical Strike"],
"Vote Average": [5.0, 6.8, 7.1],
"TMDb Id": [498713, 33125, 554600],
"Estimated Prediction": [5.0, 5.0, 5.0],
"tmdbId": [498713, 33125, 554600],
"genres": ["[{'name':'War'},{'name':'History'}]",
"[{'name':'Action'},{'name':'History'},{'name:'War'}]",
"[{'name':'Action'},{'name':'Drama'},{'name':'War'}]"]
})
new_list = pd.concat([new_movie, recommended_list])
# Teachers Day
date_event = date_event.replace(month=9, day=5)
if date_event == date_passed:
new_movie = pd.DataFrame({"Title": ["Super 30", "Taare Zameen Par"],
"Vote Average": [7.6, 8.0],
"TMDb Id": [534075, 7508],
"Estimated Prediction": [5.0, 5.0],
"tmdbId": [534075, 7508],
"genres": ["[{'name':'Drama'}]", "[{'name':'Drama'}]"]
})
new_list = pd.concat([new_movie, recommended_list])
# Children day
date_event = date_event.replace(month=11, day=14)
if date_event == date_passed:
new_movie = pd.DataFrame({"Title": ["Taare Zameen Par", "Chillar Party"],
"Vote Average": [8.0, 6.9],
"TMDb Id": [7508, 69891],
"Estimated Prediction": [5.0, 5.0],
"tmdbId": [7508, 69891],
"genres": ["[{'name':'Drama'}]",
"[{'name':'Drama'},{'name':'Comedy'},{'name':'Family'}]"]
})
new_list = pd.concat([new_movie, recommended_list])
# Christmas
date_event = date_event.replace(month=12, day=25)
if date_event == date_passed:
new_movie = pd.DataFrame({"Title": ["Let It Snow", "Home Alone"],
"Vote Average": [6.1, 7.3],
"TMDb Id": [295151, 771],
"Estimated Prediction": [5.0, 5.0],
"tmdbId": [295151, 771],
"genres": ["[{'name':'Romance'},{'name':'Comedy'}]",
"[{'name':'Comedy'},{'name':'Family'}]"]
})
new_list = pd.concat([new_movie, recommended_list])
# New Year
date_event = date_event.replace(month=12, day=31)
if date_event == date_passed:
new_movie = pd.DataFrame({"Title": ["New Years Eve"],
"Vote Average": [5.9],
"TMDb Id": [62838],
"Estimated Prediction": [5.0],
"tmdbId": [62838],
"genres": ["[{'name':'Comedy'},{'name':'Romance'}]"]
})
new_list = pd.concat([new_movie, recommended_list])
date_event = date_event.replace(month=1, day=1)
if date_event == date_passed:
new_movie = pd.DataFrame({"Title": ["New Years Eve"],
"Vote Average": [5.9],
"TMDb Id": [62838],
"Estimated Prediction": [5.0],
"tmdbId": [62838],
"genres": ["[{'name':'Comedy'},{'name':'Romance'}]"]
})
new_list = pd.concat([new_movie, recommended_list])
# Valentine
date_event = date_event.replace(month=2, day=14)
if date_event == date_passed:
new_movie = pd.DataFrame({"Title": ["The Notebook", "Titanic"],
"Vote Average": [7.9, 7.9],
"TMDb Id": [11036, 597],
"Estimated Prediction": [5.0, 5.0],
"tmdbId": [11036, 597],
"genres": ["[{'name':'Romance'},{'name':'Drama'}]",
"[{'name':'Drama'},{'name':'Romance'}]"]
})
new_list = pd.concat([new_movie, recommended_list])
return new_list
# In[25]:
def recommendation_updater(recommended_list, genre_score):
# print("reached recommendation updater - ")
new_list = recommended_list.copy()
for ind in recommended_list.index:
new_score = 0
movie_genre = list(eval(recommended_list['genres'][ind]))
# print(recommended_list['genres'][ind])
# print(type(recommended_list['genres'][ind]))
# print(movie_genre)
curr_genre_list = [li['name'] for li in movie_genre]
# print(curr_genre_list)
for genre in curr_genre_list:
if genre in genre_score:
new_score += genre_score[genre]
# print(new_score)
new_list['Estimated Prediction'][ind] = new_list['Estimated Prediction'][ind] + new_score
return new_list
# In[26]:
def contextual_update(list_passed, family=False, device="Mobile", no_of_people=1, date_passed=15,
month_passed=8):
# categories we have romance,action,comedy,drama ,crime and thriller ,documentary,sci-fi
recommended_list = list_passed.copy()
print("Before Context-Awareness based changes - ")
print(list_passed)
# Adding Genres for update
recommended_list = pd.merge(recommended_list, meta[['tmdbId', 'genres']], left_on=['TMDb Id'],
right_on=['tmdbId']).dropna()
# Special Days
date_used = datetime.now().date()
date_used = date_used.replace(month=int(month_passed), day=int(date_passed))
recommended_list = special_date(recommended_list, date_used)
recommended_list.reset_index(drop=True, inplace=True)
# Reducing score to take account for contextual_update
effect_rate = 0.75
category = 4
recommended_list['Estimated Prediction'] = recommended_list['Estimated Prediction'] - effect_rate
# Timing based
day_part = day_time()
if day_part == "morning":
scores = {
'Romance': 0.24 * (effect_rate / category), 'Action': 0.18 * (effect_rate / category),
'Comedy': 0.64 * (effect_rate / category), 'Drama': 0.24 * (effect_rate / category),
'Crime': 0.17 * (effect_rate / category)
, 'Thriller': 0.17 * (effect_rate / category), 'Documentary': 0.25 * (effect_rate / category),
'Science Fiction': 0.28 * (effect_rate / category)
}
elif day_part == "afternoon":
scores = {
'Romance': 0.18 * (effect_rate / category), 'Action': 0.44 * (effect_rate / category),
'Comedy': 0.48 * (effect_rate / category), 'Drama': 0.35 * (effect_rate / category),
'Crime': 0.5 * (effect_rate / category)
, 'Thriller': 0.5 * (effect_rate / category), 'Documentary': 0.24 * (effect_rate / category),
'Science Fiction': 0.35 * (effect_rate / category)
}
elif day_part == "evening":
scores = {
'Romance': 0.4 * (effect_rate / category), 'Action': 0.34 * (effect_rate / category),
'Comedy': 0.48 * (effect_rate / category), 'Drama': 0.3 * (effect_rate / category),
'Crime': 0.4 * (effect_rate / category)
, 'Thriller': 0.4 * (effect_rate / category), 'Documentary': 0.24 * (effect_rate / category),
'Science Fiction': 0.32 * (effect_rate / category)
}
else:
scores = {
'Romance': 0.57 * (effect_rate / category), 'Action': 0.37 * (effect_rate / category),
'Comedy': 0.42 * (effect_rate / category), 'Drama': 0.37 * (effect_rate / category),
'Crime': 0.54 * (effect_rate / category)
, 'Thriller': 0.54 * (effect_rate / category), 'Documentary': 0.31 * (effect_rate / category),
'Science Fiction': 0.41 * (effect_rate / category)
}
recommended_list = recommendation_updater(recommended_list, scores)
# Season based
curr_season = season()
if curr_season == "summer":
scores = {
'Romance': 0.32 * (effect_rate / category), 'Action': 0.48 * (effect_rate / category),
'Comedy': 0.57 * (effect_rate / category), 'Drama': 0.5 * (effect_rate / category),
'Crime': 0.6 * (effect_rate / category)
, 'Thriller': 0.6 * (effect_rate / category), 'Documentary': 0.27 * (effect_rate / category),
'Science Fiction': 0.47 * (effect_rate / category)
}
elif curr_season == "rainy":
scores = {
'Romance': 0.57 * (effect_rate / category), 'Action': 0.3 * (effect_rate / category),
'Comedy': 0.52 * (effect_rate / category), 'Drama': 0.5 * (effect_rate / category),
'Crime': 0.41 * (effect_rate / category)
, 'Thriller': 0.41 * (effect_rate / category), 'Documentary': 0.14 * (effect_rate / category),
'Science Fiction': 0.32 * (effect_rate / category)
}
elif curr_season == "autumn":
scores = {
'Romance': 0.41 * (effect_rate / category), 'Action': 0.37 * (effect_rate / category),
'Comedy': 0.5 * (effect_rate / category), 'Drama': 0.48 * (effect_rate / category),
'Crime': 0.52 * (effect_rate / category)
, 'Thriller': 0.52 * (effect_rate / category), 'Documentary': 0.31 * (effect_rate / category),
'Science Fiction': 0.44 * (effect_rate / category)
}
else:
scores = {
'Romance': 0.54 * (effect_rate / category), 'Action': 0.45 * (effect_rate / category),
'Comedy': 0.51 * (effect_rate / category), 'Drama': 0.42 * (effect_rate / category),
'Crime': 0.5 * (effect_rate / category)
, 'Thriller': 0.5 * (effect_rate / category), 'Documentary': 0.21 * (effect_rate / category),
'Science Fiction': 0.32 * (effect_rate / category)
}
recommended_list = recommendation_updater(recommended_list, scores)
# Weekday based -
if is_weekend():
scores = {
'Romance': 0.41 * (effect_rate / category), 'Action': 0.48 * (effect_rate / category),
'Comedy': 0.54 * (effect_rate / category), 'Drama': 0.38 * (effect_rate / category),
'Crime': 0.7 * (effect_rate / category)
, 'Thriller': 0.7 * (effect_rate / category), 'Documentary': 0.28 * (effect_rate / category),
'Science Fiction': 0.41 * (effect_rate / category)
}
else:
scores = {
'Romance': 0.37 * (effect_rate / category), 'Action': 0.32 * (effect_rate / category),
'Comedy': 0.51 * (effect_rate / category), 'Drama': 0.32 * (effect_rate / category),
'Crime': 0.48 * (effect_rate / category)
, 'Thriller': 0.48 * (effect_rate / category), 'Documentary': 0.21 * (effect_rate / category),
'Science Fiction': 0.38 * (effect_rate / category)
}
recommended_list = recommendation_updater(recommended_list, scores)
# Device Based
if device == "phone":
scores = {
'Romance': 0.36 * (effect_rate / category), 'Action': 0.24 * (effect_rate / category),
'Comedy': 0.66 * (effect_rate / category), 'Drama': 0.44 * (effect_rate / category),
'Crime': 0.38 * (effect_rate / category)
, 'Thriller': 0.38 * (effect_rate / category), 'Documentary': 0.2 * (effect_rate / category),
'Science Fiction': 0.21 * (effect_rate / category)
}
elif device == "tablet":
scores = {
'Romance': 0.34 * (effect_rate / category), 'Action': 0.37 * (effect_rate / category),
'Comedy': 0.43 * (effect_rate / category), 'Drama': 0.43 * (effect_rate / category),
'Crime': 0.42 * (effect_rate / category)
, 'Thriller': 0.42 * (effect_rate / category), 'Documentary': 0.22 * (effect_rate / category),
'Science Fiction': 0.36 * (effect_rate / category)
}
else:
scores = {
'Romance': 0.33 * (effect_rate / category), 'Action': 0.6 * (effect_rate / category),
'Comedy': 0.24 * (effect_rate / category), 'Drama': 0.3 * (effect_rate / category),
'Crime': 0.66 * (effect_rate / category)
, 'Thriller': 0.66 * (effect_rate / category), 'Documentary': 0.21 * (effect_rate / category),
'Science Fiction': 0.58 * (effect_rate / category)
}
recommended_list = recommendation_updater(recommended_list, scores)
# Based on Number of people and Family -
if no_of_people > 1:
if family:
scores = {
'Romance': 0.1 * (effect_rate / category), 'Action': 0.43 * (effect_rate / category),
'Comedy': 0.66 * (effect_rate / category), 'Drama': 0.49 * (effect_rate / category),
'Crime': 0.26 * (effect_rate / category)
, 'Thriller': 0.26 * (effect_rate / category), 'Documentary': 0.36 * (effect_rate / category),
'Science Fiction': 0.29 * (effect_rate / category)
}
else:
scores = {
'Romance': 0.33 * (effect_rate / category), 'Action': 0.63 * (effect_rate / category),
'Comedy': 0.54 * (effect_rate / category), 'Drama': 0.33 * (effect_rate / category),
'Crime': 0.61 * (effect_rate / category)
, 'Thriller': 0.61 * (effect_rate / category), 'Documentary': 0.17 * (effect_rate / category),
'Science Fiction': 0.54 * (effect_rate / category)
}
recommended_list = recommendation_updater(recommended_list, scores)
# removing genre from table
recommended_list.drop(['tmdbId', 'genres'], axis=1, inplace=True)
# Sorting the list for final result and comparing
# print(list_passed)
recommended_list['Estimated Prediction'].clip(lower=0,upper =5,inplace=True)
recommended_list.sort_values(by='Estimated Prediction', ascending=False, inplace=True)
print(recommended_list)
return recommended_list
def index(request):
if request.method == 'POST':
## Values that can used to test server
##movies_id = [[862, 96], [8884, 95], [284, 93], [1907, 98], [1285, 95], [867, 87], [337, 95], [10527, 67],
## [1998, 78], [580, 95], [10527, 95], [874, 95]]
choice = request.POST['choice']
movie = request.POST['movie']
userId = request.POST['user_Id']
numberOfPeople = request.POST['number_of_people']
device = request.POST['device-type']
mood = request.POST['mood']
family = request.POST.getlist('family[]')
day = request.POST['day']
month = request.POST['month']
year = request.POST['year']
movie_list = hybrid(userId, movie)
print(type(family))
print(family)
print(device)
print(type(numberOfPeople))
print(numberOfPeople)
## Apply contextual recommendation if true
if choice == "Yes":
if family == ['Yes']:
family = True
else:
family = False
numberOfPeople = int(numberOfPeople)
movie_list = contextual_update(movie_list, family, device, numberOfPeople,day,month)
## FIXING FORMAT FOR DISPLAY
print(movie_list)
movie_list.drop(['Title', 'Vote Average'], axis=1, inplace=True)
movie_list['Estimated Prediction'] = movie_list['Estimated Prediction'] * 20
movie_list['Estimated Prediction'] = round(movie_list['Estimated Prediction'], 2)
movie_list = movie_list.values.tolist()
print(movie_list)
movies_id = movie_list
return render(request, 'rec/index.html', {'display_rec': 'inline-block', 'movies_id': movies_id})
return render(request, 'rec/index.html',
{'yes': False, 'no': False, 'movie': "Hello", 'number': 0, 'display_rec': 'none'})
def movie(request, id):
return render(request, 'rec/movie.html', {'id': id})
| yadavgaurav251/Context-Aware-Recommender | UI/rec/views.py | views.py | py | 22,276 | python | en | code | 19 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
... |
28136082705 | # coding=utf-8
from __future__ import absolute_import
__author__ = "Gina Häußge <osd@foosel.net>"
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
__copyright__ = "Copyright (C) 2014 The OctoPrint Project - Released under terms of the AGPLv3 License"
import logging
from flask import jsonify, make_response
import octoprint.plugin
from octoprint.server import admin_permission
class NetconnectdSettingsPlugin(octoprint.plugin.SettingsPlugin,
octoprint.plugin.TemplatePlugin,
octoprint.plugin.SimpleApiPlugin,
octoprint.plugin.AssetPlugin):
def __init__(self):
self.address = None
def initialize(self):
self.address = self._settings.get(["socket"])
@property
def hostname(self):
hostname = self._settings.get(["hostname"])
if hostname:
return hostname
else:
import socket
return socket.gethostname() + ".local"
##~~ SettingsPlugin
def on_settings_save(self, data):
octoprint.plugin.SettingsPlugin.on_settings_save(self, data)
self.address = self._settings.get(["socket"])
def get_settings_defaults(self):
return dict(
socket="/var/run/netconnectd.sock",
hostname=None,
timeout=10
)
##~~ TemplatePlugin API
def get_template_configs(self):
return [
dict(type="settings", name="Network connection")
]
##~~ SimpleApiPlugin API
def get_api_commands(self):
return dict(
start_ap=[],
stop_ap=[],
refresh_wifi=[],
configure_wifi=[],
forget_wifi=[],
reset=[]
)
def is_api_adminonly(self):
return True
def on_api_get(self, request):
try:
status = self._get_status()
if status["wifi"]["present"]:
wifis = self._get_wifi_list()
else:
wifis = []
except Exception as e:
return jsonify(dict(error=str(e)))
return jsonify(dict(
wifis=wifis,
status=status,
hostname=self.hostname
))
def on_api_command(self, command, data):
if command == "refresh_wifi":
return jsonify(self._get_wifi_list(force=True))
# any commands processed after this check require admin permissions
if not admin_permission.can():
return make_response("Insufficient rights", 403)
if command == "configure_wifi":
if data["psk"]:
self._logger.info("Configuring wifi {ssid} and psk...".format(**data))
else:
self._logger.info("Configuring wifi {ssid}...".format(**data))
self._configure_and_select_wifi(data["ssid"], data["psk"], force=data["force"] if "force" in data else False)
elif command == "forget_wifi":
self._forget_wifi()
elif command == "reset":
self._reset()
elif command == "start_ap":
self._start_ap()
elif command == "stop_ap":
self._stop_ap()
##~~ AssetPlugin API
def get_assets(self):
return dict(
js=["js/netconnectd.js"],
css=["css/netconnectd.css"],
less=["less/netconnectd.less"]
)
##~~ Private helpers
def _get_wifi_list(self, force=False):
payload = dict()
if force:
self._logger.info("Forcing wifi refresh...")
payload["force"] = True
flag, content = self._send_message("list_wifi", payload)
if not flag:
raise RuntimeError("Error while listing wifi: " + content)
result = []
for wifi in content:
result.append(dict(ssid=wifi["ssid"], address=wifi["address"], quality=wifi["signal"], encrypted=wifi["encrypted"]))
return result
def _get_status(self):
payload = dict()
flag, content = self._send_message("status", payload)
if not flag:
raise RuntimeError("Error while querying status: " + content)
return content
def _configure_and_select_wifi(self, ssid, psk, force=False):
payload = dict(
ssid=ssid,
psk=psk,
force=force
)
flag, content = self._send_message("config_wifi", payload)
if not flag:
raise RuntimeError("Error while configuring wifi: " + content)
flag, content = self._send_message("start_wifi", dict())
if not flag:
raise RuntimeError("Error while selecting wifi: " + content)
def _forget_wifi(self):
payload = dict()
flag, content = self._send_message("forget_wifi", payload)
if not flag:
raise RuntimeError("Error while forgetting wifi: " + content)
def _reset(self):
payload = dict()
flag, content = self._send_message("reset", payload)
if not flag:
raise RuntimeError("Error while factory resetting netconnectd: " + content)
def _start_ap(self):
payload = dict()
flag, content = self._send_message("start_ap", payload)
if not flag:
raise RuntimeError("Error while starting ap: " + content)
def _stop_ap(self):
payload = dict()
flag, content = self._send_message("stop_ap", payload)
if not flag:
raise RuntimeError("Error while stopping ap: " + content)
def _send_message(self, message, data):
obj = dict()
obj[message] = data
import json
js = json.dumps(obj, encoding="utf8", separators=(",", ":"))
import socket
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.settimeout(self._settings.get_int(["timeout"]))
try:
sock.connect(self.address)
sock.sendall(js + '\x00')
buffer = []
while True:
chunk = sock.recv(16)
if chunk:
buffer.append(chunk)
if chunk.endswith('\x00'):
break
data = ''.join(buffer).strip()[:-1]
response = json.loads(data.strip())
if "result" in response:
return True, response["result"]
elif "error" in response:
# something went wrong
self._logger.warn("Request to netconnectd went wrong: " + response["error"])
return False, response["error"]
else:
output = "Unknown response from netconnectd: {response!r}".format(response=response)
self._logger.warn(output)
return False, output
except Exception as e:
output = "Error while talking to netconnectd: {}".format(e)
self._logger.warn(output)
return False, output
finally:
sock.close()
__plugin_name__ = "Netconnectd Client"
def __plugin_check__():
import sys
if sys.platform == 'linux2':
return True
logging.getLogger("octoprint.plugins." + __name__).warn("The netconnectd plugin only supports Linux")
return False
def __plugin_load__():
# since we depend on a Linux environment, we instantiate the plugin implementation here since this will only be
# called if the OS check above was successful
global __plugin_implementation__
__plugin_implementation__ = NetconnectdSettingsPlugin()
return True
| OctoPrint/OctoPrint-Netconnectd | octoprint_netconnectd/__init__.py | __init__.py | py | 6,382 | python | en | code | 8 | github-code | 36 | [
{
"api_name": "octoprint.plugin.plugin",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "octoprint.plugin",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "octoprint.plugin.plugin",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_na... |
40860207729 | import logging
import time
from airhockey.utils import Timeout
class AwaitVideoHandler(object):
SUCCESS = "SUCCESS"
TIMEOUT = "TIMEOUT"
def __init__(self, video_stream, timeout):
self.video_stream = video_stream
self.timeout = timeout
self.logger = logging.getLogger(__name__)
self.logger.setLevel(logging.INFO)
def __call__(self, *args, **kwargs):
self.logger.info("Waiting for video stream...")
t = Timeout(self.timeout)
self.video_stream.start()
while not self.video_stream.has_frame():
if t.timeout():
self.logger.info("Video stream timeout.")
return self.TIMEOUT
time.sleep(0.1)
self.logger.info("Video stream OK.")
return self.SUCCESS
| peter-svintsitskyi/airhockey | airhockey/handlers/await_video.py | await_video.py | py | 799 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "airhockey.utils.Timeout",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "time.slee... |
37208304076 | #!/usr/bin/env python3.6
import os, sys
import yaml
from lmfit import Model
from lmfit.models import GaussianModel
from lmfit.model import save_modelresult
from lmfit.model import load_modelresult
from reproject import reproject_exact
from astropy.io import ascii, fits
from astropy.table import Table, Column
import numpy as np
#import numpy.ma as ma
import tPlay,cvPlay,bptPlot,momPlot
tP = tPlay.tplay()
cvP = cvPlay.convert()
bpt = bptPlot.BPTplot()
mPl = momPlot.MOMplot()
class momplay:
'''Modules to create moment maps, residual maps, line ratios maps
- makeMoments
load line list, datacube and create loop for moments module
- makeSigmaCentroidMap
load line list, datacube and create loop for momSigmaCentroid module
- makeMomPlots
load line list, call momPlot for each line for mom0, mom1 maps from gaussian components
- momSigmaCentroid
mom0, mom1 from centroid and sigma of fitted line
- moments
mom0, mom1, mom2 of the fitted gaussian components of the line
- resCube
cube of residuals of fit
- resLines
for each line residuals are computed as the standard deviation of line-fit
within vrange and as the sum of the absolute value of line-fit
- makeLineRatioMaps
load line list, and create loop for momLineRatio
- momLineRatio
maps of the line ratios (OIII/Hbeta, NII/Halpha, SII/Halpha)
- momCDist
map of the eta-parameter (distance from Kauffmann and Kewley SF curves)
'''
def makeMoments(self,cfg_par):
workDir = cfg_par['general']['cubeDir']
f = fits.open(cfg_par['general']['dataCubeName'])
dd = f[0].header
lineInfo = tP.openLineList(cfg_par)
for ii in range(0,len(lineInfo['ID'])):
#for ii in range(0,1):
lineNameStr = str(lineInfo['Name'][ii])
if '[' in lineNameStr:
lineName = lineNameStr.replace("[", "")
lineName = lineName.replace("]", "")
lineName = lineName+str(int(lineInfo['Wave'][ii]))
else:
lineName = lineNameStr+str(int(lineInfo['Wave'][ii]))
lineNameStr=lineNameStr+str(int(lineInfo['Wave'][ii]))
lineThresh = float(lineInfo['SNThresh'][ii])
cenRange = float(lineInfo['cenRange'][ii])
print('\n\t +++\t\t '+lineName+'\t\t +++')
if ii==0:
doBinMap=True
else:
doBinMap=True
self.moments(cfg_par,lineName,lineNameStr,dd,cfg_par['general']['outTableName'],lineThresh,doBinMap,cenRange)
return
def makeSigmaCentroidMaps(self,cfg_par):
workDir = cfg_par['general']['cubeDir']
f = fits.open(cfg_par['general']['dataCubeName'])
dd = f[0].header
lineInfo = tP.openLineList(cfg_par)
for ii in range(0,len(lineInfo['ID'])):
#for ii in range(0,1):
lineNameStr = str(lineInfo['Name'][ii])
if '[' in lineNameStr:
lineName = lineNameStr.replace("[", "")
lineName = lineName.replace("]", "")
lineName = lineName+str(int(lineInfo['Wave'][ii]))
else:
lineName = lineNameStr+str(int(lineInfo['Wave'][ii]))
lineNameStr=lineNameStr+str(int(lineInfo['Wave'][ii]))
lineThresh = float(lineInfo['SNThresh'][ii])
cenRange = float(lineInfo['cenRange'][ii])
print('\t +++\t\t'+lineName+'\t\t+++')
self.momSigmaCentroid(cfg_par,lineName,lineNameStr,dd,lineThresh,cenRange)
return
def makeMomPlots(self,cfg_par):
workDir = cfg_par['general']['cubeDir']
modName = cfg_par['gFit']['modName']
momModDir = cfg_par['general']['momDir']+modName+'/'
lineInfo = tP.openLineList(cfg_par)
for ii in range(0,len(lineInfo['ID'])):
#for ii in range(0,1):
lineNameStr = str(lineInfo['Name'][ii])
if '[' in lineName:
lineName = lineNameStr.replace("[", "")
lineName = lineName.replace("]", "")
lineName = lineName+str(int(lineInfo['Wave'][ii]))
lineThresh = float(lineInfo['SNThresh'][ii])
cenRange = float(lineInfo['cenRange'][ii])
print('\n\t *********** --- Plot Moms: '+lineName+' --- ***********\n')
mom0Name = momModDir+'mom0_g1-'+lineName+'.fits'
mom1Name = momModDir+'mom1_g1-'+lineName+'.fits'
mPl.mom0Plot(cfg_par, mom0Name,lineName,lineNameStr,lineThresh)
mPl.mom1Plot(cfg_par, mom1Name,lineName,lineThresh,lineNameStr, 'moments',vRange=[-cenRange,cenRange])
return
def momSigmaCentroid(self,cfg_par,lineName,lineNameStr,header,lineThresh,cenRange):
modName = cfg_par['gFit']['modName']
momModDir = cfg_par['general']['momDir']+modName+'/'
if not os.path.exists(momModDir):
os.mkdir(momModDir)
if 'CUNIT3' in header:
del header['CUNIT3']
if 'CTYPE3' in header:
del header['CTYPE3']
if 'CDELT3' in header:
del header['CDELT3']
if 'CRVAL3' in header:
del header['CRVAL3']
if 'CRPIX3' in header:
del header['CRPIX3']
if 'NAXIS3' in header:
del header['NAXIS3']
if 'CRDER3' in header:
del header['CRDER3']
momSigmaHead = header.copy()
momCentroidHead = header.copy()
momW80Head = header.copy()
hdul = fits.open(cfg_par['general']['outTableName'])
lines = hdul['Ancels'+cfg_par['gFit']['modName']].data
if cfg_par['gFit']['modName'] == 'BF':
cfg_par['gFit']['modName'] = 'g2'
residuals = hdul['Residuals_'+cfg_par['gFit']['modName']].data
#esiduals = hdul['Residuals_G1'].data
linesG1 = hdul['LineRes_G1'].data
#hduGen = fits.open(cfg_par['general']['outVorLineTableName'])
tabGen = hdul['BININFO'].data
momSigma = np.zeros([header['NAXIS2'],header['NAXIS1']])*np.nan
momCentroid = np.zeros([header['NAXIS2'],header['NAXIS1']])*np.nan
momW80 = np.zeros([header['NAXIS2'],header['NAXIS1']])*np.nan
momDisp = np.zeros([header['NAXIS2'],header['NAXIS1']])*np.nan
for i in range(0,len(lines['BIN_ID'])):
match_bin = np.where(tabGen['BIN_ID']==lines['BIN_ID'][i])[0]
for index in match_bin:
thresHold = residuals['SN_NII6583'][index]
sigmaThresh = linesG1['g1_SigIntr_NII6583'][index]
if thresHold >= lineThresh and sigmaThresh < cfg_par['moments']['sigmaThresh']:
# if thresHold >= lineThresh :
momW80[int(tabGen['PixY'][index]),int(tabGen['PixX'][index])] = lines['w80_'+lineName][i]
momSigma[int(tabGen['PixY'][index]),int(tabGen['PixX'][index])] = lines['sigma_'+lineName][i]
momCentroid[int(tabGen['PixY'][index]),int(tabGen['PixX'][index])] = lines['centroid_'+lineName][i]
momDisp[int(tabGen['PixY'][index]),int(tabGen['PixX'][index])] = lines['dispIntr_'+lineName][i]
momSigmaHead['WCSAXES'] = 2
momSigmaHead['SPECSYS'] = 'topocent'
momSigmaHead['BUNIT'] = 'km/s'
fits.writeto(momModDir+'momSigma-'+lineName+'.fits',momSigma,momSigmaHead,overwrite=True)
mPl.mom2Plot(cfg_par, momModDir+'momSigma-'+lineName+'.fits',lineName,lineThresh,lineNameStr,'ancillary')
fits.writeto(momModDir+'momDisp-'+lineName+'.fits',momDisp,momSigmaHead,overwrite=True)
mPl.mom2Plot(cfg_par, momModDir+'momDisp-'+lineName+'.fits',lineName,lineThresh,lineNameStr,'ancillary')
momCentroidHead['WCSAXES'] = 2
momCentroidHead['SPECSYS'] = 'topocent'
momCentroidHead['BUNIT'] = 'km/s'
fits.writeto(momModDir+'momCentroid-'+lineName+'.fits',momCentroid,momCentroidHead,overwrite=True)
mPl.mom1Plot(cfg_par, momModDir+'momCentroid-'+lineName+'.fits',lineName,lineThresh,
lineNameStr,'ancillary',vRange=[-cenRange,cenRange],modName=cfg_par['gFit']['modName'])
momW80Head['WCSAXES'] = 2
momW80Head['SPECSYS'] = 'topocent'
momW80Head['BUNIT'] = 'km/s'
fits.writeto(momModDir+'momW80-'+lineName+'.fits',momW80,momW80Head,overwrite=True)
mPl.mom2Plot(cfg_par, momModDir+'momW80-'+lineName+'.fits',lineName,lineThresh,lineNameStr,'ancillary')
return
def moments(self,cfg_par,lineName,lineNameStr,header,outTableName,lineThresh,doBinMap,cenRange):
modName = cfg_par['gFit']['modName']
momModDir = cfg_par['general']['momDir']+modName+'/'
if not os.path.exists(momModDir):
os.mkdir(momModDir)
if 'CUNIT3' in header:
del header['CUNIT3']
if 'CTYPE3' in header:
del header['CTYPE3']
if 'CDELT3' in header:
del header['CDELT3']
if 'CRVAL3' in header:
del header['CRVAL3']
if 'CRPIX3' in header:
del header['CRPIX3']
if 'NAXIS3' in header:
del header['NAXIS3']
mom0Head = header.copy()
mom1Head = header.copy()
mom2Head = header.copy()
binHead = header.copy()
hdul = fits.open(cfg_par['general']['outTableName'])
lines = hdul['LineRes_'+cfg_par['gFit']['modName']].data
# lines['BIN_ID'] = hdul['BININFO'].data['ID']
residuals = hdul['Residuals_'+cfg_par['gFit']['modName']].data
#residuals = hdul['Residuals_G1'].data
linesG1 = hdul['LineRes_G1'].data
hduGen = fits.open(cfg_par['general']['outVorLineTableName'])
tabGen = hduGen[1].data
ampSpax = np.empty(len(tabGen['BIN_ID']))
mom0G1 = np.zeros([header['NAXIS2'],header['NAXIS1']])*np.nan
mom1G1 = np.zeros([header['NAXIS2'],header['NAXIS1']])*np.nan
mom2G1 = np.zeros([header['NAXIS2'],header['NAXIS1']])*np.nan
heightG1 = np.zeros([header['NAXIS2'],header['NAXIS1']])*np.nan
if doBinMap==True:
binMap = np.zeros([header['NAXIS2'],header['NAXIS1']])*np.nan
if modName != 'g1':
#ancels = hdul['Ancels_'+cfg_par['gFit']['modName']].data
mom0Tot = np.zeros([header['NAXIS2'],header['NAXIS1']])*np.nan
mom0G2 = np.zeros([header['NAXIS2'],header['NAXIS1']])*np.nan
mom1G2 = np.zeros([header['NAXIS2'],header['NAXIS1']])*np.nan
mom2G2 = np.zeros([header['NAXIS2'],header['NAXIS1']])*np.nan
if modName == 'g3':
mom0G3 = np.zeros([header['NAXIS2'],header['NAXIS1']])*np.nan
mom1G3 = np.zeros([header['NAXIS2'],header['NAXIS1']])*np.nan
mom2G3 = np.zeros([header['NAXIS2'],header['NAXIS1']])*np.nan
for i in range(0,len(lines['BIN_ID'])):
#if lines['BIN_ID'][i]< 0:
# continue
#else:
match_bin = np.where(tabGen['BIN_ID']==lines['BIN_ID'][i])[0]
for index in match_bin:
thresHold = residuals['SN_NII6583'][i]
sigmaThresh = linesG1['g1_SigIntr_NII6583'][i]
if cfg_par['gFit']['method'] == 'pixel':
tabGen['NSPAX'][index] = 1.
#if modName=='g2':
# ampSpax[index] = (lines['g1_Amp_'+lineName][i]+lines['g2_Amp_'+lineName][i])/tabGen['NSPAX'][index]
#elif modName=='g3':
# ampSpax[index] = (lines['g1_Amp_'+lineName][i]+lines['g2_Amp_'+lineName][i]+lines['g3_Amp_'+lineName][i])/tabGen['NSPAX'][index]
#thresHold = lines['g1_Height_'+lineName][i]/0.3989423*lines['g1_Sigma_'+lineName][i]/noise[0,int(tabGen['PixY'][index]),int(tabGen['PixX'][index])]
#print(lines['g1_Height_'+lineName][i]/0.3989423*lines['g1_Sigma_'+lineName][i],lines['g1_Sigma_'+lineName][i],lines['g1_Height_'+lineName][i])
#print(thresHold,lineThresh)
if thresHold >= lineThresh and sigmaThresh < cfg_par['moments']['sigmaThresh']:
# if thresHold >= lineThresh:
mom0G1[int(tabGen['PixY'][index]),int(tabGen['PixX'][index])] = lines['g1_Amp_'+lineName][i]/tabGen['NSPAX'][index]
# mom0G1[int(tabGen['PixY'][index]),int(tabGen['PixX'][index])] = lines['g1_Height_'+lineName][i]/tabGen['NSPAX'][index]
mom1G1[int(tabGen['PixY'][index]),int(tabGen['PixX'][index])] = lines['g1_Centre_'+lineName][i]
mom2G1[int(tabGen['PixY'][index]),int(tabGen['PixX'][index])] = lines['g1_SigIntr_'+lineName][i]
heightG1[int(tabGen['PixY'][index]),int(tabGen['PixX'][index])] = lines['g1_Height_'+lineName][i]
if doBinMap==True:
binMap[int(tabGen['PixY'][index]),int(tabGen['PixX'][index])] = lines['BIN_ID'][i]
if modName != 'g1':
mom0G2[int(tabGen['PixY'][index]),int(tabGen['PixX'][index])] = lines['g2_Amp_'+lineName][i]/tabGen['NSPAX'][index]
if lines['g2_Amp_'+lineName][i]!=0.0:
mom1G2[int(tabGen['PixY'][index]),int(tabGen['PixX'][index])] = lines['g2_Centre_'+lineName][i]
mom2G2[int(tabGen['PixY'][index]),int(tabGen['PixX'][index])] = lines['g2_SigIntr_'+lineName][i]
if modName == 'g3':
mom0G3[int(tabGen['PixY'][index]),int(tabGen['PixX'][index])] = lines['g3_Amp_'+lineName][i]/tabGen['NSPAX'][index]
if lines['g2_Amp_'+lineName][i]!=0.0:
mom1G3[int(tabGen['PixY'][index]),int(tabGen['PixX'][index])] = lines['g3_Centre_'+lineName][i]
mom2G3[int(tabGen['PixY'][index]),int(tabGen['PixX'][index])] = lines['g3_SigIntr_'+lineName][i]
#mom0Tot[int(tabGen['PixY'][index]),int(tabGen['PixX'][index])] = ampSpax[i]
#else#:
# print(mom1G1[int(tabGen['PixY'][index]),int(tabGen['PixX'][index])])
# print(int(tabGen['PixY'][index]),int(tabGen['PixX'][index]))
if doBinMap==True:
binHead['SPECSYS'] = 'topocent'
binHead['BUNIT'] = 'Flux'
fits.writeto(momModDir+'binMapMom0_'+lineName+'.fits',binMap, binHead,overwrite=True)
del mom0Head['CRDER3']
del mom1Head['CRDER3']
del mom2Head['CRDER3']
mom0Head['WCSAXES'] = 2
mom0Head['SPECSYS'] = 'topocent'
mom0Head['BUNIT'] = 'Jy/beam.km/s'
fits.writeto(momModDir+'mom0_g1-'+lineName+'.fits',mom0G1,mom0Head,overwrite=True)
fits.writeto(momModDir+'height_g1-'+lineName+'.fits',heightG1,mom0Head,overwrite=True)
#mPl.mom0Plot(cfg_par, momModDir+'mom0_g1-'+lineName+'.fits',lineName,lineNameStr,lineThresh)
mom1Head['WCSAXES'] = 2
mom1Head['SPECSYS'] = 'topocent'
mom1Head['BUNIT'] = 'km/s'
fits.writeto(momModDir+'mom1_g1-'+lineName+'.fits',mom1G1,mom1Head,overwrite=True)
#mPl.mom1Plot(cfg_par, momModDir+'mom1_g1-'+lineName+'.fits',lineName,
# lineThresh, lineNameStr,'moments', vRange=[-cenRange,cenRange],modName='g1')
mom2Head['WCSAXES'] = 2
mom2Head['SPECSYS'] = 'topocent'
mom2Head['BUNIT'] = 'km/s'
fits.writeto(momModDir+'mom2_g1-'+lineName+'.fits',mom2G1,mom2Head,overwrite=True)
#mPl.mom2Plot(cfg_par, momModDir+'mom2_g1-'+lineName+'.fits',lineName,lineThresh,lineNameStr,'moments')
if modName != 'g1':
fits.writeto(momModDir+'mom0_g2-'+lineName+'.fits',mom0G2,mom0Head,overwrite=True)
fits.writeto(momModDir+'mom1_g2-'+lineName+'.fits',mom1G2,mom1Head,overwrite=True)
fits.writeto(momModDir+'mom2_g2-'+lineName+'.fits',mom2G2,mom2Head,overwrite=True)
mPl.mom0Plot(cfg_par, momModDir+'mom0_g2-'+lineName+'.fits',lineName,lineNameStr,lineThresh)
mPl.mom1Plot(cfg_par, momModDir+'mom1_g2-'+lineName+'.fits',lineName,
lineNameStr,lineThresh,'moments',vRange=[-cenRange,cenRange],
modName='g2')
mPl.mom2Plot(cfg_par, momModDir+'mom2_g2-'+lineName+'.fits',lineName,lineThresh,lineNameStr,'moments')
if modName == 'g2':
fits.writeto(momModDir+'mom0_tot-'+lineName+'.fits', mom0G1+mom0G2,mom0Head,overwrite=True)
mPl.mom0Plot(cfg_par, momModDir+'mom0_tot-'+lineName+'.fits',lineName,lineNameStr,lineThresh)
if modName == 'g3':
fits.writeto(momModDir+'mom0_g3-'+lineName+'.fits',mom0G3,mom0Head,overwrite=True)
fits.writeto(momModDir+'mom1_g3-'+lineName+'.fits',mom1G3,mom1Head,overwrite=True)
fits.writeto(momModDir+'mom2_g3-'+lineName+'.fits',mom2G3,mom2Head,overwrite=True)
fits.writeto(momModDir+'mom0_tot-'+lineName+'.fits',mom0G1+mom0G2+mom0G3,mom0Head,overwrite=True)
t=Table(tabGen)
if modName+'-AmpSpax_'+lineName not in tabGen.dtype.names:
t.add_column(Column(ampSpax,name=modName+'-AmpSpax_'+lineName))
else:
t.replace_column(modName+'-AmpSpax_'+lineName,Column(ampSpax,name=modName+'-AmpSpax_'+lineName))
#try:
# tt = Table(hduGen['VORBININFO'].data)
hduGen['VORBININFO'] = fits.BinTableHDU(t.as_array(),name='VORBININFO')
#except KeyError as e:
# tt=fits.BinTableHDU(t.as_array(),name='VORBININFO')
# hdul.append(tt)
hduGen.writeto(cfg_par['general']['outVorLineTableName'],overwrite=True)
return
def resCube(self,cfg_par):
key = 'general'
cubeDir = cfg_par['general']['cubeDir']
workDir = cfg_par['general']['workdir']
modName = cfg_par['gFit']['modName']
resModDir = cfg_par['general']['resDir']+modName+'/'
# if not os.path.exists(resModDir):
# os.mkdir(momModDir)
f = fits.open(cfg_par['general']['dataCubeName'])
dd = f[0].data
resHead = f[0].header
hdul = fits.open(cfg_par['general']['outTableName'])
lines = hdul['LineRes_'+cfg_par['gFit']['modName']].data
residuals = hdul['Residuals_'+cfg_par['gFit']['modName']].data
bF = np.array(residuals['bestFit'],dtype=int)
hduGen = fits.open(cfg_par['general']['outVorLineTableName'])
tabGen = hduGen[1].data
resG1 = np.zeros([resHead['NAXIS3'],resHead['NAXIS2'],resHead['NAXIS1']])*np.nan
fitCube = np.zeros([resHead['NAXIS3'],resHead['NAXIS2'],resHead['NAXIS1']])*np.nan
wave,xAxis,yAxis,pxSize,noiseBin, vorBinInfo,dataSpec = tP.openVorLineOutput(cfg_par,cfg_par['general']['outVorLineTableName'],
cfg_par['general']['outVorSpectra'])
#hdul = fits.open(cfg_par['general']['outTableName'])
#tabGen = hdul['BinInfo'].data
lambdaMin = np.log(cfg_par['gFit']['lambdaMin'])
lambdaMax = np.log(cfg_par['gFit']['lambdaMax'])
idxMin = int(np.where(abs(wave-lambdaMin)==abs(wave-lambdaMin).min())[0])
idxMax = int(np.where(abs(wave-lambdaMax)==abs(wave-lambdaMax).min())[0])
# if modName != 'g1':
# resTot = np.zeros([resHead['NAXIS3'],resHead['NAXIS2'],resHead['NAXIS1']])*np.nan
# resG2 = np.zeros([resHead['NAXIS3'],resHead['NAXIS2'],resHead['NAXIS1']])*np.nan
# if modName == 'g3':
# res0G3 = np.zeros([resHead['NAXIS3'],resHead['NAXIS2'],resHead['NAXIS1']])*np.nan
for i in range(0,len(lines['BIN_ID'])):
match_bin = np.where(tabGen['BIN_ID']==lines['BIN_ID'][i])[0]
if cfg_par['residuals']['BFcube'] == True:
if bF[i] == 0:
modName = 'g1'
elif bF[i] == 1:
modName = 'g2'
else:
modName = cfg_par['gFit']['modName']
result = load_modelresult(cfg_par['general']['runNameDir']+'models/'+modName+'/'+str(lines['BIN_ID'][i])+'_'+modName+'.sav')
for index in match_bin:
yy = dd[idxMin:idxMax,int(tabGen['PixY'][index]),int(tabGen['PixX'][index])]
fit = result.best_fit
residuals = result.best_fit-yy
resG1[idxMin:idxMax,int(tabGen['PixY'][index]),int(tabGen['PixX'][index])] = residuals
fitCube[idxMin:idxMax,int(tabGen['PixY'][index]),int(tabGen['PixX'][index])] = fit
resHead['SPECSYS'] = 'topocent'
resHead['BUNIT'] = 'Flux'
if cfg_par['residuals']['BFcube'] == True:
modName = 'BF'
fits.writeto(cubeDir+'resCube_'+modName+'.fits',resG1,resHead,overwrite=True)
fits.writeto(cubeDir+'fitCube_'+modName+'.fits',fitCube,resHead,overwrite=True)
return
def resLinesFromTable(self,cfg_par):
'''
Computes for each the residuals of the fit. Within which velocity range?
At the moment is within 6*sigmaG1 and 3*sigmaG2
Parameters:
cfg_par: parameter file
gFit_modName: specifies # of gaussian components used for the fit
Uses:
- voroni binned line subtracted datacube
- table of voronoi binned datacube and spectra
-
Returns (located in /residuals/modName/):
- res_linename: residuals computed as the standard deviation of line-fit
within a velocity range given by cenRange centred at the peak of the observed line
- resSTDPeak_linename: residuals computed as the standard deviation of line-fit
within a velocity range given by cenRange centred at the peak of the observed line multiplied by the peak of the line
- snRes_linename: residuals divided by the noise
'''
modName = cfg_par['gFit']['modName']
resModDir = cfg_par['general']['resDir']+modName+'/'
lineInfo = tP.openLineList(cfg_par)
cubeDir = cfg_par['general']['cubeDir']
resName = cubeDir+'resCube_'+modName+'.fits'
f = fits.open(resName)
resHead = f[0].header
if 'CUNIT3' in resHead:
del resHead['CUNIT3']
if 'CTYPE3' in resHead:
del resHead['CTYPE3']
if 'CDELT3' in resHead:
del resHead['CDELT3']
if 'CRVAL3' in resHead:
del resHead['CRVAL3']
if 'CRPIX3' in resHead:
del resHead['CRPIX3']
if 'NAXIS3' in resHead:
del resHead['NAXIS3']
if 'CRDER3'in resHead:
del resHead['CRDER3']
hdul = fits.open(cfg_par['general']['outTableName'])
binInfo = hdul['BININFO'].data
res = hdul['residuals_'+modName].data
for ii in range(0,len(lineInfo['ID'])):
lineName = str(lineInfo['Name'][ii])
if '[' in lineName:
lineName = lineName.replace("[", "")
lineName = lineName.replace("]", "")
lineName = lineName+str(int(lineInfo['Wave'][ii]))
lineThresh = float(lineInfo['SNThresh'][ii])
print('\n\t +++\t\t '+lineName+'\t\t +++')
rmsRes = np.empty([resHead['NAXIS2'],resHead['NAXIS1']])*np.nan
rmsResPeak = np.empty([resHead['NAXIS2'],resHead['NAXIS1']])*np.nan
stdRes = np.empty([resHead['NAXIS2'],resHead['NAXIS1']])*np.nan
stdResPeak = np.empty([resHead['NAXIS2'],resHead['NAXIS1']])*np.nan
chiSq = np.empty([resHead['NAXIS2'],resHead['NAXIS1']])*np.nan
SNResLineMap = np.empty([resHead['NAXIS2'],resHead['NAXIS1']])*np.nan
rmsName =resModDir+'rms_'+lineName+'.fits'
rmsPeakName =resModDir+'rmsPeak_'+lineName+'.fits'
stdName =resModDir+'std_'+lineName+'.fits'
stdPeakName =resModDir+'stdPeak_'+lineName+'.fits'
chiSqName = resModDir+'chiRes_'+lineName+'.fits'
SNResNameOut =resModDir+'SN_rms-noise'+lineName+'.fits'
for i in range(0,len(res['BIN_ID'])):
match_bin = np.where(binInfo['BIN_ID']==res['BIN_ID'][i])[0]
for index in match_bin:
rmsRes[int(binInfo['PixY'][index]),int(binInfo['PixX'][index])] = res['rms_'+lineName][i]
rmsResPeak[int(binInfo['PixY'][index]),int(binInfo['PixX'][index])] = res['rmsPeak_'+lineName][i]
stdRes[int(binInfo['PixY'][index]),int(binInfo['PixX'][index])] = res['std_'+lineName][i]
stdResPeak[int(binInfo['PixY'][index]),int(binInfo['PixX'][index])] = res['stdPeak_'+lineName][i]
chiSq[int(binInfo['PixY'][index]),int(binInfo['PixX'][index])] = res['chiSq_'+lineName][i]
SNResLineMap[int(binInfo['PixY'][index]),int(binInfo['PixX'][index])] = res['SN_rms-noise'+lineName][i]
resHead['WCSAXES'] = 2
fits.writeto(rmsName,rmsRes,resHead,overwrite=True)
fits.writeto(rmsPeakName,rmsResPeak,resHead,overwrite=True)
fits.writeto(stdName,stdRes,resHead,overwrite=True)
fits.writeto(stdPeakName,stdResPeak,resHead,overwrite=True)
fits.writeto(chiSqName,chiSq,resHead,overwrite=True)
fits.writeto(SNResNameOut,SNResLineMap,resHead,overwrite=True)
return
def resLines(self,cfg_par):
'''
Makes the residual maps from the residual table
Parameters:
cfg_par: parameter file
gFit_modName: specifies # of gaussian components used for the fit
Uses:
- voroni binned line subtracted datacube
- table of voronoi binned datacube and spectra
Returns (located in /residuals/modName/):
- resSTD_linename: residuals computed as the standard deviation of line-fit
within a velocity range given by 6*sigmag1 weighted on the fitted amplitude of the line
- resSTDPeak_linename: residuals computed as the standard deviation of line-fit
within a velocity range given by 6*sigmag1 weighted on the observed amplitude of the line
Options:
- compute noise:
when set to True in the parameter file, it computes the noise as the rms in within [-80,-60]AA and [+60,+80]AA with respect to the
rest wavelenght of each line
computes the S/N of each line as the peak/noise in each pixel
Returns:
- SN_linename: S/N map of each line
- noise_linename: noise map of each line
'''
key = 'general'
workDir = cfg_par[key]['workdir']
cubeDir = cfg_par[key]['cubeDir']
modName = cfg_par['gFit']['modName']
resModDir = cfg_par['general']['resDir']+modName+'/'
noiseDir = cfg_par['general']['noiseDir']
resName = cubeDir+'resCube_'+modName+'.fits'
fitCubeName = cubeDir+'fitCube_'+modName+'.fits'
if not os.path.exists(resName):
self.resCube(cfg_par)
else:
pass
f = fits.open(resName)
resCube = f[0].data
resHead = f[0].header
if 'CUNIT3' in resHead:
del resHead['CUNIT3']
if 'CTYPE3' in resHead:
del resHead['CTYPE3']
if 'CDELT3' in resHead:
del resHead['CDELT3']
if 'CRVAL3' in resHead:
del resHead['CRVAL3']
if 'CRPIX3' in resHead:
del resHead['CRPIX3']
if 'NAXIS3' in resHead:
del resHead['NAXIS3']
if 'CRDER3'in resHead:
del resHead['CRDER3']
f = fits.open(cfg_par['general']['outVorLines'])
dd = f[0].data
#to load Voronoi Bin noise : noiseBin
wave,xAxis,yAxis,pxSize,noiseBin, vorBinInfo,dataSpec = tP.openVorLineOutput(cfg_par,cfg_par['general']['outVorLineTableName'],
cfg_par['general']['outVorSpectra'])
#print(noiseBin.shape)
#print(cfg_par['general']['outVorLineTableName'])
#sys.exit(0)
f = fits.open(cfg_par['general']['dataCubeName'])
dd = f[0].data
header = f[0].header
hdul = fits.open(cfg_par['general']['outTableName'])
lines = hdul['LineRes_'+cfg_par['gFit']['modName']].data
linesG1 = hdul['LineRes_g1'].data
#lines['BIN_ID'] = hdul['BININFO'].data['ID']
resNameList=['BIN_ID']
frmList=['i4']
tot = lines['BIN_ID']
hduGen = fits.open(cfg_par['general']['outVorLineTableName'])
tabGen = hduGen[1].data
lineInfo = tP.openLineList(cfg_par)
tableSpec = workDir+cfg_par[key]['tableSpecName']
tab = fits.open(tableSpec)
dataSpec = tab[1].data
specExp = tab[2].data
wave = [item for t in specExp for item in t]
noiseMapName =noiseDir+'noiseMap.fits'
noiseMap = np.empty([resHead['NAXIS2'],resHead['NAXIS1']])*np.nan
for ii in range(0,len(lineInfo['ID'])):
stdArr = np.empty(len(lines['BIN_ID']))
stdPeakArr = np.empty(len(lines['BIN_ID']))
rmsArr = np.empty(len(lines['BIN_ID']))
rmsPeakArr = np.empty(len(lines['BIN_ID']))
peakArr = np.empty(len(lines['BIN_ID']))
chiSqArr = np.empty(len(lines['BIN_ID']))
noiseArr = np.empty(len(lines['BIN_ID']))
SNValues = np.empty(len(lines['BIN_ID']))
SNStdValues = np.empty(len(lines['BIN_ID']))
lineName = str(lineInfo['Name'][ii])
if '[' in lineName:
lineName = lineName.replace("[", "")
lineName = lineName.replace("]", "")
lineName = lineName+str(int(lineInfo['Wave'][ii]))
lineThresh = float(lineInfo['SNThresh'][ii])
print('\n\t +++\t\t '+lineName+'\t\t +++')
stdRes = np.empty([resHead['NAXIS2'],resHead['NAXIS1']])*np.nan
stdResPeak = np.empty([resHead['NAXIS2'],resHead['NAXIS1']])*np.nan
rmsRes = np.empty([resHead['NAXIS2'],resHead['NAXIS1']])*np.nan
rmsResPeak = np.empty([resHead['NAXIS2'],resHead['NAXIS1']])*np.nan
chiRes = np.empty([resHead['NAXIS2'],resHead['NAXIS1']])*np.nan
noiseLine = np.empty([resHead['NAXIS2'],resHead['NAXIS1']])*np.nan
SNLineMap = np.empty([resHead['NAXIS2'],resHead['NAXIS1']])*np.nan
SNRes = np.empty([resHead['NAXIS2'],resHead['NAXIS1']])*np.nan
stdResName =resModDir+'std_'+lineName+'.fits'
stdResPeakName =resModDir+'stdPeak_'+lineName+'.fits'
rmsResName =resModDir+'rms_'+lineName+'.fits'
rmsResPeakName =resModDir+'rmsPeak_'+lineName+'.fits'
chiResName = resModDir+'chiRes_'+lineName+'.fits'
noiseNameLine =noiseDir+'noise_'+lineName+'.fits'
SNMapName =noiseDir+'SN_'+lineName+'.fits'
SNResName =resModDir+'SN_rms-noise'+lineName+'.fits'
for i in range(0,len(lines['BIN_ID'])):
#lineHeigth = np.max(y[indexMin:indexMax])
amp = lines['g1_Amp_'+lineName][i]
cenKmsG1 = linesG1['g1_Centre_'+lineName][i]
#sigKmsG1 = linesG1['g1_SigIntr_'+lineName][i]
#if sigKmsG1 >=2.e3:
# sigKmsG1=2.e3
cenG1 = np.log(cvP.vRadLambda(cenKmsG1,lineInfo['Wave'][ii]))
leftG1 = np.log(cvP.vRadLambda(cenKmsG1-lineInfo['cenRange'][ii],lineInfo['Wave'][ii]))
rightG1 = np.log(cvP.vRadLambda(cenKmsG1+lineInfo['cenRange'][ii],lineInfo['Wave'][ii]))
idxLeft = int(np.where(abs(wave-leftG1)==abs(wave-leftG1).min())[0])
idxRight = int(np.where(abs(wave-rightG1)==abs(wave-rightG1).min())[0])
#define interval where to measure maximum of real line from centroid of 1G-fit
peakLeft = np.log(cvP.vRadLambda(cenKmsG1-140.,lineInfo['Wave'][ii]))
peakRight = np.log(cvP.vRadLambda(cenKmsG1+140.,lineInfo['Wave'][ii]))
idxPeakLeft = int(np.where(abs(wave-peakLeft)==abs(wave-peakLeft).min())[0])
idxPeakRight = int(np.where(abs(wave-peakRight)==abs(wave-peakRight).min())[0])
if cfg_par['residuals']['computeNoise']==True:
leftNoise = np.log(lineInfo['Wave'][ii]-60.)
leftleftNoise = np.log(lineInfo['Wave'][ii]-80.)
rightNoise = np.log(lineInfo['Wave'][ii]+60.)
rightrightNoise = np.log(lineInfo['Wave'][ii]+80.)
idxLeftLeftNoise = int(np.where(abs(wave-leftleftNoise)==abs(wave-leftleftNoise).min())[0])
idxLeftNoise = int(np.where(abs(wave-leftNoise)==abs(wave-leftNoise).min())[0])
idxRightRightNoise = int(np.where(abs(wave-rightrightNoise)==abs(wave-rightrightNoise).min())[0])
idxRightNoise = int(np.where(abs(wave-rightNoise)==abs(wave-rightNoise).min())[0])
# noiseMinRed = cfg_par['general']['redshift']*cfg_par['gFit']['noiseMin']+cfg_par['gFit']['noiseMin']
# noiseMaxRed = cfg_par['general']['redshift']*cfg_par['gFit']['noiseMax']+cfg_par['gFit']['noiseMax']
# idxLeftNoise = int(np.where(abs(np.exp(wave)-noiseMinRed)==abs(np.exp(wave)-noiseMinRed).min())[0])
# idxRightNoise = int(np.where(abs(np.exp(wave)-noiseMaxRed)==abs(np.exp(wave)-noiseMaxRed).min())[0])
a = np.where(tabGen['BIN_ID'] == int(lines['BIN_ID'][i]))[0]
#if not a.size == 0:
idxTable = a[0]
y = dd[:,int(tabGen['PixY'][idxTable]),int(tabGen['PixX'][idxTable])]
#else:
# y = np.zeros((dd.shape[0]))
if modName == 'g2':
amp = lines['g1_Amp_'+lineName][i]+lines['g2_Amp_'+lineName][i]
cenKmsG2 = lines['g2_Centre_'+lineName][i]
sigKmsG2 = lines['g2_SigMeas_'+lineName][i]
cenG2 = np.log(cvP.vRadLambda(cenKmsG2,lineInfo['Wave'][ii]))
leftG2 = np.log(cvP.vRadLambda(cenKmsG2-lineInfo['cenRange'][ii],lineInfo['Wave'][ii]))
rightG2 = np.log(cvP.vRadLambda(cenKmsG2+lineInfo['cenRange'][ii],lineInfo['Wave'][ii]))
idxLeftG2 = int(np.where(abs(wave-leftG2)==abs(wave-leftG2).min())[0])
idxRightG2 = int(np.where(abs(wave-rightG2)==abs(wave-rightG2).min())[0])
idxLeft = np.min([idxLeft,idxLeftG2])
idxRight = np.max([idxRight,idxRightG2])
# if modName =='g3':
# cenKmsG3 = lines['g3_Centre_'+lineName][i]
# sigKmsG3 = lines['g3_SigMeas_'+lineName][i]
# cenG2 = np.log(cvP.vRadLambda(cenKmsG1,lineInfo['Wave'][ii]))
# leftG2 = np.log(cvP.vRadLambda(cenKmsG1-3.*sigKmsG3,lineInfo['Wave'][ii]))
# rightG2 = np.log(cvP.vRadLambda(cenKmsG1+3.*sigKmsG3,lineInfo['Wave'][ii]))
# idxLeftG3 = int(np.where(abs(wave-leftG3)==abs(wave-leftG3).min())[0])
# idxRightG3 = int(np.where(abs(wave-rightG3)==abs(wave-rightG3).min())[0])
#idxLeft = np.min([idxLeft,idxLeftG3])
#idxRight = np.max([idxRight,idxRightG3])
#if ii==0 and cfg_par['residuals']['computeNoise']==True:
# noiseValue = noiseBin[idxLeft][lines['BIN_ID'][i]][idxLeft]*amp
match_bin = np.where(tabGen['BIN_ID']==lines['BIN_ID'][i])[0]
#print(match_bin,lines['BIN_ID'][i])
#result = load_modelresult(cfg_par[key]['modNameDir']+str(lines['BIN_ID'][i])+'_'+cfg_par['gFit']['modName']+'.sav')
if idxRight-idxLeft <2.:
idxLeft-=4
idxRight+=4
for index in match_bin:
# if modName=='g1':
# thresHold = lines['g1_Amp_Hb4861'][i]/tabGen['NSPAX'][index]
# elif modName=='g2':
# thresHold = (lines['g1_Amp_Hb4861'][i]+lines['g2_Amp_Hb4861'][i])/tabGen['NSPAX'][index]
# elif modName=='g3':
# thresHold = (lines['g1_Amp_Hb4861'][i]+lines['g2_Amp_Hb4861'][i]+lines['g3_Amp_Hb4861'][i])/tabGen['NSPAX'][index]
# if thresHold >= lineThresh:
linePeak = np.max(y[idxPeakLeft:idxPeakRight])
stdValue = np.nanstd(resCube[idxLeft:idxRight,int(tabGen['PixY'][index]),int(tabGen['PixX'][index])])
stdValuePeak = np.multiply(stdValue,linePeak)
rmsValue = np.sqrt(np.power(stdValue,2)+np.power(np.nanmean(resCube[idxLeft:idxRight,int(tabGen['PixY'][index]),int(tabGen['PixX'][index])]),2))
rmsValuePeak = np.multiply(rmsValue,linePeak)
stdResPeak[int(tabGen['PixY'][index]),int(tabGen['PixX'][index])] = stdValuePeak
stdRes[int(tabGen['PixY'][index]),int(tabGen['PixX'][index])] = stdValue
rmsResPeak[int(tabGen['PixY'][index]),int(tabGen['PixX'][index])] = rmsValuePeak
rmsRes[int(tabGen['PixY'][index]),int(tabGen['PixX'][index])] = rmsValue
if cfg_par['residuals']['computeNoise']==True:
noise = np.nanstd(np.concatenate([y[idxLeftLeftNoise:idxLeftNoise],y[idxRightNoise:idxRightRightNoise]]))
sn = np.divide(linePeak,noise)
snStd = np.divide(stdValue,noise)
noiseLine[int(tabGen['PixY'][index]),int(tabGen['PixX'][index])] = noise
SNLineMap[int(tabGen['PixY'][index]),int(tabGen['PixX'][index])] = sn
SNRes[int(tabGen['PixY'][index]),int(tabGen['PixX'][index])] = snStd
if modName =='g1':
nvar = 3
elif modName =='g2':
nvar = 6
chiSq=np.divide(np.divide(np.nansum(np.power(resCube[idxLeft:idxRight,int(tabGen['PixY'][index]),int(tabGen['PixX'][index])]-y[idxLeft:idxRight],2)),
np.power(noise,2)),idxRight-idxLeft-nvar)
chiRes[int(tabGen['PixY'][index]),int(tabGen['PixX'][index])] = chiSq
#if ii==0:
# noiseMap[int(tabGen['PixY'][index]),int(tabGen['PixX'][index])] = noiseValue
peakArr[i] = linePeak
chiSqArr[i] = chiSq
stdArr[i] = stdValue
stdPeakArr[i] = stdValuePeak
rmsArr[i] = rmsValue
rmsPeakArr[i] = rmsValuePeak
noiseArr[i] = noise
SNValues[i] = sn
SNStdValues[i] = snStd
tot = np.column_stack((tot,stdArr))
resNameList.append('std_'+lineName)
frmList.append('f8')
tot = np.column_stack((tot,stdPeakArr))
resNameList.append('stdPeak_'+lineName)
frmList.append('f8')
tot = np.column_stack((tot,rmsArr))
resNameList.append('rms_'+lineName)
frmList.append('f8')
tot = np.column_stack((tot,rmsPeakArr))
resNameList.append('rmsPeak_'+lineName)
frmList.append('f8')
tot = np.column_stack((tot,rmsArr))
resNameList.append('peak_'+lineName)
frmList.append('f8')
resHead['WCSAXES'] = 2
fits.writeto(stdResName,stdRes,resHead,overwrite=True)
fits.writeto(stdResPeakName,stdResPeak,resHead,overwrite=True)
fits.writeto(rmsResName,rmsRes,resHead,overwrite=True)
fits.writeto(rmsResPeakName,rmsResPeak,resHead,overwrite=True)
if cfg_par['residuals']['computeNoise']==True:
fits.writeto(noiseNameLine,noiseLine,resHead,overwrite=True)
fits.writeto(SNMapName,SNLineMap,resHead,overwrite=True)
fits.writeto(SNResName,SNRes,resHead,overwrite=True)
fits.writeto(chiResName,chiRes,resHead,overwrite=True)
tot = np.column_stack((tot,noiseArr))
resNameList.append('noise_'+lineName)
frmList.append('f8')
tot = np.column_stack((tot,SNValues))
resNameList.append('SN_'+lineName)
frmList.append('f8')
tot = np.column_stack((tot,SNStdValues))
resNameList.append('SN_rms-noise'+lineName)
frmList.append('f8')
tot = np.column_stack((tot,chiSqArr))
resNameList.append('chiSq_'+lineName)
frmList.append('f8')
#if ii==0:
# fits.writeto(noiseMapName,noiseMap,resHead,overwrite=True)
t = Table(tot, names=(resNameList))
# hdul.append(fits.BinTableHDU(t.as_array(), name='Residuals_'+modName))
try:
tt = Table(hdul['Residuals_'+modName].data)
hdul['Residuals_'+modName] = fits.BinTableHDU(t.as_array(),name='Residuals_'+modName)
except KeyError as e:
tt=fits.BinTableHDU.from_columns(t.as_array(),name='Residuals_'+modName)
hdul.append(tt)
hdul.writeto(cfg_par['general']['outTableName'],overwrite=True)
# try:
# tt = Table(hdul['Ancels'+modName].data)
# hdul['Ancels'+modName] = fits.BinTableHDU.from_columns(sigmaCenArr,name='Ancels'+modName)
# except KeyError as e:
# tt=fits.BinTableHDU.from_columns(sigmaCenArr,name='Ancels'+modName)
# hdul.append(tt)
return 0
def makeLineRatioMaps(self,cfg_par):
workDir = cfg_par['general']['cubeDir']
f = fits.open(cfg_par['general']['dataCubeName'])
dd = f[0].header
#lineInfo = self.openLineList()
#for ii in range(0,len(lineInfo['ID'])):
# lineName = str(lineInfo['Name'][ii])
# if '[' in lineName:
# lineName = lineName.replace("[", "")
# lineName = lineName.replace("]", "")
#lineName = lineName+str(int(lineInfo['Wave'][ii]))
self.momLineRatio(cfg_par,dd,cfg_par['general']['outTableName'])
return
def momLineRatio(self,cfg_par,header,outTableName):
modName = cfg_par['gFit']['modName']
bptDir = cfg_par['general']['bptDir']+'/'
momModDir = cfg_par['general']['momDir']+modName+'/'
if 'CUNIT3' in header:
del header['CUNIT3']
if 'CTYPE3' in header:
del header['CTYPE3']
if 'CDELT3' in header:
del header['CDELT3']
if 'CRVAL3' in header:
del header['CRVAL3']
if 'CRPIX3' in header:
del header['CRPIX3']
if 'NAXIS3' in header:
del header['NAXIS3']
if 'WCSAXES' in header:
del header['WCSAXES']
if 'CRDER3' in header:
del header['CRDER3']
lineMapHead = header.copy()
hdul = fits.open(cfg_par['general']['outTableName'])
lineBPT = hdul['BPT_'+cfg_par['gFit']['modName']].data
hduGen = fits.open(cfg_par['general']['outVorLineTableName'])
tabGen = hduGen[1].data
hbetaMap = fits.open(momModDir+'mom0_'+modName+'-OIII5006.fits')
hbetaData = hbetaMap[0].data
numCols = len(lineBPT.dtype.names)
if modName == 'g2':
numCols = int((numCols-1)/3)
numCols +=1
if modName == 'g3':
numCols = int((numCols-1)/4)
numCols +=1
for i in range(1,numCols):
lineMapG1 = np.zeros([header['NAXIS2'],header['NAXIS1']])*np.nan
if modName != 'g1':
lineMapToT = np.zeros([header['NAXIS2'],header['NAXIS1']])*np.nan
lineMapG2 = np.zeros([header['NAXIS2'],header['NAXIS1']])*np.nan
if modName == 'g3':
lineMapG3 = np.zeros([header['NAXIS2'],header['NAXIS1']])*np.nan
for j in range(0,len(lineBPT['BIN_ID'])):
match_bin = np.where(tabGen['BIN_ID']==lineBPT['BIN_ID'][j])[0]
for index in match_bin:
if ~np.isnan(hbetaData[int(tabGen['PixY'][index]),int(tabGen['PixX'][index])]):
lineMapG1[int(tabGen['PixY'][index]),int(tabGen['PixX'][index])] = lineBPT[j][i]
if modName != 'g1':
lineMapToT[int(tabGen['PixY'][index]),int(tabGen['PixX'][index])] = lineBPT[j][i+numCols*2-2]
lineMapG2[int(tabGen['PixY'][index]),int(tabGen['PixX'][index])] = lineBPT[j][i+numCols-1]
if modName == 'g3':
lineMapG3[int(tabGen['PixY'][index]),int(tabGen['PixX'][index])] = lineBPT[j][i+numCols+2] #TOREVIEW!!!!
lineMapHead['BUNIT'] = 'Flux'
outBPT = bptDir+'BPT-'+str(lineBPT.dtype.names[i])+'.fits'
fits.writeto(bptDir+'BPT-'+str(lineBPT.dtype.names[i])+'.fits',lineMapG1,lineMapHead,overwrite=True)
if modName != 'g1':
outBPTg2 = bptDir+'BPT-'+str(lineBPT.dtype.names[i+numCols-1])+'.fits'
outBPTtot = bptDir+'BPT-'+str(lineBPT.dtype.names[i+numCols*2-2])+'.fits'
fits.writeto(outBPTg2,lineMapG2,lineMapHead,overwrite=True)
fits.writeto(outBPTtot,lineMapToT,lineMapHead,overwrite=True)
if modName == 'g3':
outBPTg3 = bptDir+'BPT-'+str(lineBPT.dtype.names[i+numCols+2])+'.fits'
fits.writeto(bptDir+'BPT-'+str(lineBPT.dtype.names[i+numCols+2])+'.fits',lineMapG3,lineMapHead,overwrite=True)
if cfg_par['lineRatios']['bptMap'] == True:
bpt.bptIM(cfg_par,outBPT)
if modName != 'g1':
bpt.bptIM(cfg_par,outBPTg2)
bpt.bptIM(cfg_par,outBPTtot)
elif modName=='g3':
bpt.bptIM(cfg_par,outBPTg3)
return
def momCDist(self,cfg_par):
f = fits.open(cfg_par['general']['dataCubeName'])
header = f[0].header
f.close()
modName = cfg_par['gFit']['modName']
bptDir = cfg_par['general']['bptDir']+'/'
momModDir = cfg_par['general']['momDir']+modName+'/'
if 'CUNIT3' in header:
del header['CUNIT3']
if 'CTYPE3' in header:
del header['CTYPE3']
if 'CDELT3' in header:
del header['CDELT3']
if 'CRVAL3' in header:
del header['CRVAL3']
if 'CRPIX3' in header:
del header['CRPIX3']
if 'NAXIS3' in header:
del header['NAXIS3']
if 'WCSAXES' in header:
del header['WCSAXES']
if 'CRDER3' in header:
del header['CRDER3']
lineMapHead = header.copy()
hdul = fits.open(cfg_par['general']['outTableName'])
lineBPT = hdul['BPT_'+cfg_par['gFit']['modName']].data
hbetaMap = fits.open(momModDir+'mom0_'+modName+'-Hb4861.fits')
hbetaData = hbetaMap[0].data
hduGen = fits.open(cfg_par['general']['outVorLineTableName'])
tabGen = hduGen[1].data
numCols = len(lineBPT.dtype.names)
if modName == 'g2':
numCols = int((numCols-1)/3)
numCols +=1
if modName == 'g3':
numCols = int((numCols-1)/4)
numCols +=1
lineMapG1 = np.zeros([header['NAXIS2'],header['NAXIS1']])*np.nan
if modName != 'g1':
lineMapToT = np.zeros([header['NAXIS2'],header['NAXIS1']])*np.nan
lineMapG2 = np.zeros([header['NAXIS2'],header['NAXIS1']])*np.nan
if modName == 'g3':
lineMapG3 = np.zeros([header['NAXIS2'],header['NAXIS1']])*np.nan
for j in range(0,len(lineBPT['BIN_ID'])):
match_bin = np.where(tabGen['BIN_ID']==lineBPT['BIN_ID'][j])[0]
for index in match_bin:
if ~np.isnan(hbetaData[int(tabGen['PixY'][index]),int(tabGen['PixX'][index])]):
lineMapG1[int(tabGen['PixY'][index]),int(tabGen['PixX'][index])] = lineBPT[j]['cDist-OIIIG1']
if modName != 'g1':
lineMapG2[int(tabGen['PixY'][index]),int(tabGen['PixX'][index])] = lineBPT[j]['cDist-OIIIG2']
lineMapToT[int(tabGen['PixY'][index]),int(tabGen['PixX'][index])] = lineBPT[j]['cDist-OIIIToT']
if modName == 'g3':
lineMapG3[int(tabGen['PixY'][index]),int(tabGen['PixX'][index])] = lineBPT[j]['cDist-OIIIG3'] #TOREVIEW!!!!
lineMapHead['BUNIT'] = 'cDistance'
outBPT = bptDir+'cDist-OIIIG1.fits'
fits.writeto(outBPT,lineMapG1,lineMapHead,overwrite=True)
if modName != 'g1':
#print('\n\t************* --- GuFo : ERROR --- **************\n')
#outBPTg2 = bptDir+'BPT-'+str(lineBPT.dtype.names[i+numCols-1])+'.fits'
outBPTG2 = bptDir+'BPT-cDist-OIIIG2.fits'
outBPTToT = bptDir+'BPT-cDist-OIIIToT.fits'
fits.writeto(outBPTG2,lineMapG2,lineMapHead,overwrite=True)
fits.writeto(outBPTToT,lineMapToT,lineMapHead,overwrite=True)
if modName == 'g3':
outBPTG3 = bptDir+'BPT-cDist-OIIIG3.fits'
fits.writeto(outBPTG3,lineMapG3,lineMapHead,overwrite=True)
if cfg_par['lineRatios']['cDistPlot'] == True:
bpt.cDistIM(cfg_par,outBPT)
if modName != 'g1':
bpt.cDistIM(cfg_par,outBPTG2)
bpt.cDistIM(cfg_par,outBPTToT)
elif modName=='g3':
bpt.cDistIM(cfg_par,outBPTG3)
return
def regridMoms(self,basename,slavename):
outName = slavename.split('.fits')[0]
outName = outName+'_rg.fits'
base = fits.open(basename)
bheader = base[0].header
if 'WCSAXES' in bheader:
bheader['WCSAXES'] = 2
bheader['NAXIS'] = 2
slave = fits.open(slavename)
sheader = slave[0].header
if 'WCSAXES' in sheader:
sheader['WCSAXES'] = 2
slave = fits.open(slavename)[0]
sheader['NAXIS'] = 2
bheader['BMIN'] = sheader['BMIN']
bheader['BMAJ'] = sheader['BMAJ']
# if 'FREQ' in slave.header:
# bheader['FREQ'] = sheader['FREQ']
# elif 'CRVAL3' in sheader:
# bheader['FREQ'] = sheader['CRVAL3']
#print basename
#for i in base.header.keys():
# print i,'\t',base.header[i]
#print slavename
#for i in slave.header.keys():
# print i,'\t',slave.header[i]
newslave, footprint = reproject_exact(slave, bheader)
fits.writeto(outName, newslave, bheader, overwrite=True)
return outName | Fil8/GuFo | scavengers/momPlay.py | momPlay.py | py | 52,897 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tPlay.tplay",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "cvPlay.convert",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "bptPlot.BPTplot",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "momPlot.MOMplot",
"lin... |
23993127692 | '''
301. Remove Invalid Parentheses
https://leetcode.com/problems/remove-invalid-parentheses/
'''
from collections import deque
from typing import List
def removeInvalidParentheses(self, s: str) -> List[str]:
# helper to check if the expression is valid
def isValid(expr):
count = 0
for ch in expr:
if ch not in '()':
continue
if ch == '(':
count += 1
elif ch == ')':
count -= 1
if count < 0:
return False
return count == 0
if len(s) == 0:
return [""]
# queue holds expressions to evaluate
queue = deque()
# holds expressions that were evaluated
visited = set()
queue.append(s)
visited.add(s)
found = False # all optimal solutions will be found on the same level
output = []
while queue:
expr = queue.popleft()
if isValid(expr):
output.append(expr)
found = True
# no need to check by removing more, as we found on this level
if found:
continue
for i in range(len(expr)):
if expr[i] not in '()':
continue
candidate = expr[:i] + expr[i+1:] #remove one parentheses
if candidate not in visited:
queue.append(candidate)
visited.add(candidate)
return output if output else [""]
| asset311/leetcode | strings/remove_invalid_parentheses.py | remove_invalid_parentheses.py | py | 1,459 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.deque",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 11,
"usage_type": "name"
}
] |
31432419627 | # -*- coding: utf-8 -*-
# This file is part of wger Workout Manager.
#
# wger Workout Manager is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wger Workout Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
from django.db.models.signals import post_save
from django.db.models.signals import post_delete
from django.dispatch import receiver
from django.core.cache import cache
from wger.nutrition.models import NutritionPlan, Meal, MealItem
@receiver(post_delete, sender=NutritionPlan)
@receiver(post_delete, sender=Meal)
@receiver(post_delete, sender=MealItem)
def post_delete_activity(sender, instance, **kwargs):
'''
Signal: post_delete
Sender: NutritionPlan, Meal, MealItem
'''
plan = instance.get_owner_object()
cache.delete('nutritional_values-{0}'.format(plan.id))
@receiver(post_save, sender=NutritionPlan)
@receiver(post_save, sender=Meal)
@receiver(post_save, sender=MealItem)
def post_save_activity(sender, instance, **kwargs):
'''
Signal: post_save
Sender: NutritionPlan, Meal, MealItem
'''
plan = instance.get_owner_object()
cache.delete('nutritional_values-{0}'.format(plan.id))
| andela/wger-sparta | wger/nutrition/signals.py | signals.py | py | 1,609 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "django.core.cache.cache.delete",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "django.core.cache.cache",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "django.dispatch.receiver",
"line_number": 24,
"usage_type": "call"
},
{
"a... |
11874793301 | from setuptools import setup
with open('README.md') as reader:
long_description = reader.read()
setup(
author='Jaedson Silva',
author_email='imunknowuser@protonmail.com',
name='ufinder',
version='1.0.0',
description='Search URL paths with UFinder.',
long_description=long_description,
long_description_content_type='text/markdown',
packages=['ufinder'],
install_requires=['requests'],
license='MIT',
project_urls={
'Source Code': 'https://github.com/jaedsonpys/ufinder',
'License': 'https://github.com/jaedsonpys/ufinder/blob/master/LICENSE'
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Information Technology',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Indexing/Search',
'Topic :: Security'
],
entry_points={
'console_scripts': [
'ufinder = ufinder.ufinder:main'
]
},
)
| jaedsonpys/ufinder | setup.py | setup.py | py | 1,161 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "setuptools.setup",
"line_number": 6,
"usage_type": "call"
}
] |
72593481705 | import re
import json
from random import randint
from datetime import datetime
# FORMATTING
############
def format_processo_juridico(legal_process_id): # type: (str) -> (str)
"""
Format an adequately formatted numbers-only Legal Process ID number,
Returns a Legal Process ID number formatted with standard visual aid
symbols.
Returns None if Legal Process ID number is invalid.
"""
if legal_process_id.isdigit() and len(legal_process_id) == 20:
capture_fields = r"(\d{7})(\d{2})(\d{4})(\d)(\d{2})(\d{4})"
include_chars = r"\1-\2.\3.\4.\5.\6"
return re.sub(capture_fields, include_chars, legal_process_id)
return None
def remove_symbols(processo_juridico: str): # type: (str) -> str
"""Removes common symbols from a legal process number string.
The standard symbols removed are "." and "-"
Args:
process_juridico[str]: A legal process number string
Returns:
[str]: A legal process number string without symbols
"""
return processo_juridico.replace(".", "").replace("-", "")
def generate_processo_juridico(
ano=datetime.now().year, orgao=randint(1, 9)
): # type: (int, int) -> (str)
"""
Generates a random valid number of a Legal Process ID number.
"""
if ano < datetime.now().year or orgao not in range(1, 10):
return ""
# Getting possible legal process ids from 'legal_process_ids.json' asset
with open("brutils/data/legal_process_ids.json") as file:
legal_process_ids = json.load(file)
_ = legal_process_ids[f"orgao_{orgao}"]
TR = str(
_["id_tribunal"][randint(0, (len(_["id_tribunal"]) - 1))]
).zfill(2)
OOOO = str(_["id_foro"][randint(0, (len(_["id_foro"])) - 1)]).zfill(4)
NNNNNNN = str(randint(0, 9999999)).zfill(7)
DD = _checksum(f"{NNNNNNN}{ano}{orgao}{TR}{OOOO}")
return f"{NNNNNNN}{DD}{ano}{orgao}{TR}{OOOO}"
def _checksum(basenum): # type: (int) -> str
"""
Checksum to compute the verification digit for a Legal Process ID number.
`basenum` needs to be a digit without the verification id.
"""
return str(97 - ((int(basenum) * 100) % 97)).zfill(2)
def is_valid_processo_juridico(legal_process_id): # type: (str) -> bool
"""
Returns whether or not the verifying checksum digits of the given Legal
Process ID number match it's varification digit and if the numbers match
a valid ID from a legal process.
"""
clean_legal_process_id = remove_symbols(legal_process_id)
DD = clean_legal_process_id[7:9]
J = clean_legal_process_id[13:14]
TR = clean_legal_process_id[14:16]
OOOO = clean_legal_process_id[16:]
with open("brutils/data/legal_process_ids.json") as file:
legal_process_ids = json.load(file)
process = legal_process_ids.get(f"orgao_{J}")
if not process:
return False
valid_process = int(TR) in process.get("id_tribunal") and int(
OOOO
) in process.get("id_foro")
return (
_checksum(int(clean_legal_process_id[0:7] + clean_legal_process_id[9:]))
== DD
) and valid_process
| brazilian-utils/brutils-python | brutils/legal_process.py | legal_process.py | py | 3,189 | python | en | code | 112 | github-code | 36 | [
{
"api_name": "re.sub",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "random.randint",
"... |
11394943095 | from argparse import Namespace
opts = Namespace()
# StyleGAN2 setting
opts.size = 1024
opts.ckpt = "pretrained_models/ffhq.pt"
opts.channel_multiplier = 2
opts.latent = 512
opts.n_mlp = 8
# loss options
opts.percept_lambda = 1.0
opts.l2_lambda = 1.0
opts.p_norm_lambda = 1e-3
# arguments
opts.device = 'cuda'
opts.seed = 2
opts.tile_latent = False
opts.opt_name = 'adam'
opts.learning_rate = 0.01
opts.lr_schedule = 'fixed'
# opts.steps = 1300
opts.steps = 1000
opts.save_intermediate = False
opts.save_interval = 300
opts.verbose = False
face_opts = opts | ZPdesu/MindTheGap | options/face_embed_options.py | face_embed_options.py | py | 561 | python | en | code | 47 | github-code | 36 | [
{
"api_name": "argparse.Namespace",
"line_number": 4,
"usage_type": "call"
}
] |
7755819569 | from funclib.resources import Resources
from funclib.stamps.stamp import TRANSPARENT, ImageStamp
from PIL import Image, ImageDraw, ImageFont
from PIL.Image import Image as PILImage
class LogoStamp(ImageStamp):
def apply(self, image: PILImage) -> PILImage:
logo = Image.open(Resources.logo_path())
RIGHT_OFFSET = 141
BOTTOM_OFFSET = 55
x, y = (
image.width - RIGHT_OFFSET,
image.height - BOTTOM_OFFSET,
)
image.paste(logo, (x, y))
return image
class PcUrlStamp(ImageStamp):
def __init__(self) -> None:
self.font = ImageFont.truetype(Resources.font_path(), 12) # type: ignore
self.text = "planetarycomputer.microsoft.com"
self.text_width, self.text_height = self.font.getsize(self.text)
def apply(self, image: PILImage) -> PILImage:
brand_frame = Image.new("RGBA", (image.width, image.height), TRANSPARENT)
BOTTOM_OFFSET = 16
PADDING = 2
draw = ImageDraw.Draw(brand_frame)
x, y = (
image.width - self.text_width - PADDING * 4.5,
image.height - self.text_height - BOTTOM_OFFSET,
)
# Draw an padded background for the text
draw.rounded_rectangle(
(
(x - PADDING, y - self.text_height / 2 + PADDING),
(x + self.text_width + PADDING, y + self.text_height + PADDING * 2),
),
radius=1,
fill=(255, 255, 255, 255),
)
draw.text(
(x, y),
text=self.text,
font=self.font,
fill=(0, 0, 0, 255),
)
return Image.alpha_composite(image, brand_frame)
| microsoft/planetary-computer-apis | pcfuncs/funclib/stamps/branding.py | branding.py | py | 1,709 | python | en | code | 88 | github-code | 36 | [
{
"api_name": "funclib.stamps.stamp.ImageStamp",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "PIL.Image.Image",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "PIL.Image.open",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "PIL.Image... |
15681614045 | import logging
import os
import time
# import traceback
import csurpc
import config
__server_address = None
__get_server_address_time = 0
def get_server_address():
return __server_address
def get_server_connect():
global __server_address
global __get_server_address_time
try:
str_server_address = config.get('server_address')
if not str_server_address:
err_msg = 'can not find server_address config in clup-agent.conf!'
logging.fatal(err_msg)
os._exit(1)
str_server_address = str_server_address.strip()
hostport_list = str_server_address.split(',')
host_list = []
for hostport in hostport_list:
cells = hostport.split(':')
host_list.append((cells[0], int(cells[1])))
if len(host_list) == 0:
err_msg = 'can not server_address config in clup-agent.conf!'
logging.fatal(err_msg)
os._exit(1)
if len(host_list) > 1:
curr_time = time.time()
# 减少频繁调用etcd获得server_address
if __server_address is None or curr_time - __get_server_address_time > 60:
# 做一个字典,key为主库ip, value为返回是这个主库的次数,先把次数初始化为0
primary_host_dict = {}
primary_port = '4242'
my_config_clup_list = []
for host, port in host_list:
primary_host_dict[host] = 0
primary_port = port
my_config_clup_list.append(host)
rpc_conn_dict = {}
for host, port in host_list:
server_address = "%s:%d" % (host, port)
try:
c1 = csurpc.Client()
c1.connect("tcp://%s" % server_address, password=config.get('internal_rpc_pass'))
rpc_conn_dict[host] = c1
primary_host, clup_host_list = c1.get_clup_node_info()
logging.debug(f"{host} return primary is {primary_host}, clup_host_list is {repr(clup_host_list)}.")
if len(clup_host_list) == 0:
logging.fatal(f"clup({host}) is not multiple clup mode, clup-agent exit!")
os._exit(1)
if list(set(my_config_clup_list) ^ (set(clup_host_list))):
logging.fatal(f"my config clup list({my_config_clup_list}) not equal return clup list({clup_host_list})!")
os._exit(1)
if not primary_host:
continue
if primary_host not in primary_host_dict:
logging.fatal(f"{host} return primary {primary_host} is not in my config({','.join(host_list)}), clup-agent exit!")
os._exit(1)
primary_host_dict[primary_host] += 1
except Exception as e:
logging.info(f"Can not connect to {server_address}: {str(e)}.")
continue
actual_primary_host = ''
for host in primary_host_dict:
if primary_host_dict[host] >= 2:
actual_primary_host = host
break
if not actual_primary_host:
return -1, "Can not find primary clup!"
actual_primary_address = f"{actual_primary_host}:{primary_port}"
if __server_address is not None and actual_primary_address != __server_address:
logging.info(f"switch clup server from {__server_address} to {actual_primary_host}.")
__server_address = actual_primary_address
__get_server_address_time = curr_time
for host in rpc_conn_dict:
if host != actual_primary_host:
rpc_conn_dict[host].close()
c1 = rpc_conn_dict[actual_primary_host]
return 0, c1
else:
__server_address = str_server_address
c1 = csurpc.Client()
c1.connect("tcp://%s" % __server_address, password=config.get('internal_rpc_pass'))
return 0, c1
except Exception as e:
return -1, "Can not connect clup: " + str(e)
def get_rpc_connect(ip, rpc_port=0):
try:
if not rpc_port:
rpc_port = config.get('agent_rpc_port')
rpc_address = "tcp://%s:%s" % (ip, rpc_port)
c1 = csurpc.Client()
c1.connect(rpc_address, password=config.get('internal_rpc_pass'))
return 0, c1
except Exception as e:
return -1, "Can not connect %s: %s" % (ip, str(e))
def os_read_file(host, file_path, offset, data_len):
err_code, rpc = get_rpc_connect(host)
if err_code != 0:
logging.error(f"Can not connect {host}: maybe host is down.")
return err_code, rpc
err_code, err_msg = rpc.os_read_file(file_path, offset, data_len)
rpc.close()
return err_code, err_msg
def pg_get_valid_wal_list_le_pt(host, pgdata, pt):
err_code, rpc = get_rpc_connect(host)
if err_code != 0:
logging.error(f"Can not connect to {host}: maybe host is down.")
return err_code, rpc
err_code, err_msg = rpc.pg_get_valid_wal_list_le_pt(pgdata, pt)
if err_code != 0:
rpc.close()
logging.error(f"Call rpc pg_get_valid_wal_list_le_pt({pgdata}, {pt}) failed: {err_msg}.")
return err_code, err_msg
rpc.close()
return err_code, err_msg
def task_insert_log(task_id, task_state, msg, task_type):
# 在server端记录日志
err_code, rpc = get_server_connect()
if err_code != 0:
logging.error(f"connect clup-server failed: {rpc}.")
return err_code, rpc
ret = rpc.task_insert_log(task_id, task_state, msg, task_type)
rpc.close()
return err_code, ret
| csudata/clup-agent | lib/rpc_utils.py | rpc_utils.py | py | 6,001 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "config.get",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "logging.fatal",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os._exit",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "logging.fatal",
"line_number": ... |
42914797613 | import csv
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
import sklearn
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import Flatten, Dense, Activation, Dropout, Lambda, Cropping2D, Conv2D, MaxPool2D
from tensorflow.keras.utils import plot_model
lines = [] # Stores lines read in csv file
with open('../training/driving_log.csv') as csvfile:
reader = csv.reader(csvfile)
for line in reader:
lines.append(line)
# Separates path to images in training and validation sets
train_samples, validation_samples = train_test_split(lines, test_size=0.2)
# Generator to generate the training and validation batches when requested
def generator(samples, batch_size=10):
num_samples = len(samples)
correction = 0.2
while 1: # Loop forever so the generator never terminates
sklearn.utils.shuffle(samples)
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset+batch_size]
images = []
measurements = []
for batch_sample in batch_samples:
# Retrieves path from center, left and right image
source_path_center = batch_sample[0]
source_path_left = batch_sample[1]
source_path_right = batch_sample[2]
# Use windows 10 separator '\\'- Not sure whether this will work in GNU/Linux
filename_center =source_path_center.split('/')[-1]
filename_left =source_path_left.split('/')[-1]
filename_right =source_path_right.split('/')[-1]
# Redefine path of each image
current_path_center = '../training/IMG/' + filename_center
current_path_left = '../training/IMG/' + filename_left
current_path_right = '../training/IMG/' + filename_right
# Read the image in current path
image_center = mpimg.imread(current_path_center)
image_left = mpimg.imread(current_path_left)
image_right = mpimg.imread(current_path_right)
# Append image to the list of images
images.append(image_center)
images.append(image_left)
images.append(image_right)
# Retrieve center, left and right measurements
measurement_center = float(batch_sample[3])
measurement_left = measurement_center + correction
measurement_right = measurement_center - correction
# Append measurement to the list of measurements for center, left and right images
measurements.append(measurement_center)
measurements.append(measurement_left)
measurements.append(measurement_right)
# Flip the images, store the flipped image and the modified measurement
for image in [image_center, image_left, image_right]:
image_flipped = np.fliplr(image)
images.append(image_flipped)
for measurement in [measurement_center, measurement_left, measurement_right]:
measurement_flipped = -measurement
measurements.append(measurement_flipped)
# trim image to only see section with road
X_train = np.array(images)
y_train = np.array(measurements)
yield sklearn.utils.shuffle(X_train, y_train)
# compile and train the model using the generator function
train_generator = generator(train_samples, batch_size=32)
validation_generator = generator(validation_samples, batch_size=32)
# Create a Sequential object to define the neural network
model = Sequential()
# Lambda layer to normalize the input data
model.add(Lambda(lambda x: (x/255.0)-0.5 , input_shape=(160,320,3)))
# Cropping image in the y axis to avoid feeding undesired features to the network
model.add(Cropping2D(cropping=((70,25), (0,0))))
# Set of 5 convolutional layers. First 3 have a sumbsampling of 2x2, the others are the typical 1x1
model.add(Conv2D(24,(5,5), activation='relu', strides= (2,2)))
model.add(Dropout(0.5))
model.add(Conv2D(36,(5,5),activation='relu', strides = (2,2)))
model.add(Conv2D(48,(5,5), activation='relu', strides=(2,2)))
model.add(Dropout(0.5))
model.add(Conv2D(64,(3,3), activation='relu'))
model.add(Conv2D(64,(3,3), activation='relu'))
model.add(Dropout(0.5))
# Flatter the data to enter new fully connected layers
model.add(Flatten())
# Set of 3 fully connected layers
model.add(Dense(100))
model.add(Dense(50))
model.add(Dense(10))
# Output layer
model.add(Dense(1))
# Compile deep neural network with loss function Mean Square Error (mse) and adam optimizer
model.compile(loss='mse', optimizer='adam')
plot_model(model, to_file='model.png', show_shapes=True)
# Use fit_generator to train the model
history_object = model.fit_generator(train_generator, steps_per_epoch= \
int(np.ceil(len(train_samples)/32)), validation_data=validation_generator, \
validation_steps=int(np.ceil(len(validation_samples)/32)), verbose= 1, epochs=20)
### print the keys contained in the history object
print(history_object.history.keys())
### plot the training and validation loss for each epoch
plt.plot(history_object.history['loss'])
plt.plot(history_object.history['val_loss'])
plt.title('model mean squared error loss')
plt.ylabel('mean squared error loss')
plt.xlabel('epoch')
plt.legend(['training set', 'validation set'], loc='upper right')
plt.show()
# Save trained model in file
print('Saving model...')
model.save('model2.h5')
print('Mode has been saved!')
| juandarr/Behavioral-cloning | model.py | model.py | py | 5,823 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "csv.reader",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "sklearn.utils.shuffle",
"line_number": 28,
"usage_type": "call"
},
{
"api_nam... |
14145252892 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 24 19:17:44 2020
@author: b
https://www.youtube.com/watch?v=T4nZDuakYlU&list=PLO_fdPEVlfKoHQ3Ua2NtDL4nmynQC8YiS&index=9
Selection de variable
Dnas le module sklearn.module_selection,
on retrouve les transformers et les tests de dépendances
Selecteur variance: permet de sélectionner les variables selon leur variance ()
-VarianceThreshold: élimine les variables dont la variance est inférieure à un certain seuil
Selecteur : test statistique
test de dépendance, test ANOVA
-GenericUnivariateSelect
-SelectPercentile: sélecte toutes les variables qui sont au dessus d'un certain pourcentage de score
-SelectKBest: Sélectionne les K variables X dont le score du test de dépendance avec y est le plus élevé
-SelectFpr
-SelectFdr
-SelectFwe
Selecteur estimateur coefs, sélection des variables les plus importantes
-SelectFromModel: entraine un estimateur puis sélectionne les variables les plus importantes pour cet estimateur
Note: compatible avec les estimateurs qui développent une fonction paramétrée (attribut .coef_ ou .feature_importance_)
K-Nearest Neighbour incompatible
-RFE Recursif Feature Elimination: élimine les variables les moins importantes de façon récursive
un estimateur est entrainé plusieurs fois, après chaque entrainement, des features sont éliminées sur
la base des coefficients les plus faibles de l'estimateur
-RFECV
Test de dépendance: utile pour les problèmes de classification, xhi², ANOVA
-chi2
-f_classif
-mutual_info_classif
Test utile pour la régression: Pearson Corr
-f_regression
-info_regression
"""
#Selecteur variance:
#VarianceThreshold
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_iris
from sklearn.feature_selection import VarianceThreshold
iris = load_iris()
X = iris.data
y = iris.target
plt.plot(X)
plt.legend(iris.feature_names)
# Selection des variables
X.var(axis=0) # Donne la variance selon chaque variable
selector = VarianceThreshold(threshold=0.2)
selector.fit(X)
selector.get_support() # indique les variables qui ont été sélectionner
np.array(iris.feature_names)[selector.get_support()]
# Selecteur : test statistique
# Sélection de variable sur les test de dépendance, en générale, cette technique est plus puissante
# SelectKBest
from sklearn.feature_selection import SelectKBest, chi2
chi2(X, y) # tableau avec chi2 statistique et p-value
selector = SelectKBest(chi2, k=1) # Selecteur qui va retourner 1 variable parmis les 4, celle qui a le plsu d'impact
selector.fit_transform(X, y)
np.array(iris.feature_names)[selector.get_support()]
#Selecteur estimateur coefs
#SelectFromModel
from sklearn.feature_selection import SelectFromModel
from sklearn.linear_model import SGDClassifier
selector = SelectFromModel(SGDClassifier(random_state=0),
threshold='mean')
selector.fit(X, y)
selector.get_support()
# Quelles sont les coefficient qui ont été trouvées ?
selector.estimator_.coef_
"""
Pour bien comprendre la matrice affichée
X.shape : (150,4)
y.shape : (150,1) avec 3 classes
On transforme la matrice X (150*4) en matrice y (150*3) en multipliant par une matrice (4*3)
le vecteur paramètre theta est donc une matrice de 4 lignes et de 3 colonnes
SelectFromModel va sélectionner la moyenne selon les colonnes et va sélectionner toutes les variables supérieure au seuil
"""
# Sélecteur récursif
# RFE Recursif Feature Elimination
from sklearn.feature_selection import RFE, RFECV
selector = RFECV(SGDClassifier(), step=1, #step: nb de variable à élminer à chaque itération
min_features_to_select=2, #min_features_to_select: cb restera-t-il de variable à la fin
cv=5)
selector.fit(X, y)
selector.ranking_ # permet de voir le classement finale des différentes variables
selector.grid_scores_ # score de SGDClassifier à chaque itération, cad à chaque enlèvement de variable
| b846/Data | 4b YT Modele selection.py | 4b YT Modele selection.py | py | 3,976 | python | fr | code | 0 | github-code | 36 | [
{
"api_name": "sklearn.datasets.load_iris",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "m... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.