seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
31551435271 | import numpy as np
import yaml
import matplotlib.pyplot as plt
file = 'D:/Projects/PhaseTransistor/Data/Simulation/Phonon/4_D3BJ_FD_vdw/phonon/eigenvectors/band.yaml'
def ReadPhonopyData(band_yaml):
with open(band_yaml) as f:
data = yaml.load(f, Loader=yaml.FullLoader)
return data
def RearrangeEigenvector(eigenvector_rawdata,natoms,degree_of_freedom):
dim = int(natoms*degree_of_freedom)
normal_coordinate = np.zeros((dim,dim))
return
def GetGammaEigenvertor(band_yaml,degree_of_freedom=3):
data = ReadPhonopyData(band_yaml)
natoms = data['natom']
Gamma = data['phonon'][0]
k_point = Gamma['q-position']
bands = Gamma['band']
nbands = len(bands) # num_bands = num_atoms * degree_of_freedom
normal_coordinate = np.zeros((nbands,nbands))
for n in range(nbands):
eigenvector = bands[n]['eigenvector']
for i in range(natoms):
for j in range(degree_of_freedom):
normal_coordinate[n][i*3+j] = eigenvector[i][j][0] # 取实部
return normal_coordinate, k_point, nbands
#a = GetGammaEigenvertor(file)
#print(a[0]) | MajestyV/VASPWheels | GetVibrationalDisplacement.py | GetVibrationalDisplacement.py | py | 1,160 | python | en | code | 5 | github-code | 1 | [
{
"api_name": "yaml.load",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "yaml.FullLoader",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_numb... |
74718384033 | import datetime
import queue
import logging
import signal
import time
import threading
import tkinter as tk
from tkinter.scrolledtext import ScrolledText
from tkinter import ttk, VERTICAL, HORIZONTAL, N, S, E, W
logger = logging.getLogger(__name__)
class Clock(threading.Thread):
"""Class to display the time every seconds
Every 5 seconds, the time is displayed using the logging.ERROR level
to show that different colors are associated to the log levels
"""
def __init__(self):
super().__init__()
self._stop_event = threading.Event()
def run(self):
logger.debug('Clock started')
previous = -1
while not self._stop_event.is_set():
now = datetime.datetime.now()
if previous != now.second:
previous = now.second
if now.second % 5 == 0:
level = logging.ERROR
else:
level = logging.INFO
logger.log(level, now)
time.sleep(0.2)
def stop(self):
self._stop_event.set()
class QueueHandler(logging.Handler):
"""Class to send logging records to a queue
It can be used from different threads
The ConsoleUi class polls this queue to display records in a ScrolledText widget
"""
# Example from Moshe Kaplan: https://gist.github.com/moshekaplan/c425f861de7bbf28ef06
# (https://stackoverflow.com/questions/13318742/python-logging-to-tkinter-text-widget) is not thread safe!
# See https://stackoverflow.com/questions/43909849/tkinter-python-crashes-on-new-thread-trying-to-log-on-main-thread
def __init__(self, log_queue):
super().__init__()
self.log_queue = log_queue
def emit(self, record):
self.log_queue.put(record)
class ConsoleUi:
"""Poll messages from a logging queue and display them in a scrolled text widget"""
def __init__(self, frame):
self.frame = frame
# Create a ScrolledText wdiget
self.scrolled_text = ScrolledText(frame, state='disabled', height=12)
self.scrolled_text.grid(row=0, column=0, sticky=(N, S, W, E))
self.scrolled_text.configure(font='TkFixedFont')
self.scrolled_text.tag_config('INFO', foreground='black')
self.scrolled_text.tag_config('DEBUG', foreground='gray')
self.scrolled_text.tag_config('WARNING', foreground='orange')
self.scrolled_text.tag_config('ERROR', foreground='red')
self.scrolled_text.tag_config('CRITICAL', foreground='red', underline=1)
# Create a logging handler using a queue
self.log_queue = queue.Queue()
self.queue_handler = QueueHandler(self.log_queue)
formatter = logging.Formatter('%(asctime)s: %(message)s')
self.queue_handler.setFormatter(formatter)
logger.addHandler(self.queue_handler)
# Start polling messages from the queue
self.frame.after(100, self.poll_log_queue)
def display(self, record):
msg = self.queue_handler.format(record)
self.scrolled_text.configure(state='normal')
self.scrolled_text.insert(tk.END, msg + '\n', record.levelname)
self.scrolled_text.configure(state='disabled')
# Autoscroll to the bottom
self.scrolled_text.yview(tk.END)
def poll_log_queue(self):
# Check every 100ms if there is a new message in the queue to display
while True:
try:
record = self.log_queue.get(block=False)
except queue.Empty:
break
else:
self.display(record)
self.frame.after(100, self.poll_log_queue)
class FormUi:
def __init__(self, frame):
self.frame = frame
# Create a combobbox to select the logging level
values = ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']
self.level = tk.StringVar()
ttk.Label(self.frame, text='Level:').grid(column=0, row=0, sticky=W)
self.combobox = ttk.Combobox(
self.frame,
textvariable=self.level,
width=25,
state='readonly',
values=values
)
self.combobox.current(0)
self.combobox.grid(column=1, row=0, sticky=(W, E))
# Create a text field to enter a message
self.message = tk.StringVar()
ttk.Label(self.frame, text='Message:').grid(column=0, row=1, sticky=W)
ttk.Entry(self.frame, textvariable=self.message, width=25).grid(column=1, row=1, sticky=(W, E))
# Add a button to log the message
self.button = ttk.Button(self.frame, text='Submit', command=self.submit_message)
self.button.grid(column=1, row=2, sticky=W)
def submit_message(self):
# Get the logging level numeric value
lvl = getattr(logging, self.level.get())
logger.log(lvl, self.message.get())
class ThirdUi:
def __init__(self, frame):
self.frame = frame
ttk.Label(self.frame, text='This is just an example of a third frame').grid(column=0, row=1, sticky=W)
ttk.Label(self.frame, text='With another line here!').grid(column=0, row=4, sticky=W)
class App:
def __init__(self, root):
self.root = root
root.title('Logging Handler')
root.columnconfigure(0, weight=1)
root.rowconfigure(0, weight=1)
# Create the panes and frames
vertical_pane = ttk.PanedWindow(self.root, orient=VERTICAL)
vertical_pane.grid(row=0, column=0, sticky="nsew")
horizontal_pane = ttk.PanedWindow(vertical_pane, orient=HORIZONTAL)
vertical_pane.add(horizontal_pane)
form_frame = ttk.Labelframe(horizontal_pane, text="MyForm")
form_frame.columnconfigure(1, weight=1)
horizontal_pane.add(form_frame, weight=1)
console_frame = ttk.Labelframe(horizontal_pane, text="Console")
console_frame.columnconfigure(0, weight=1)
console_frame.rowconfigure(0, weight=1)
horizontal_pane.add(console_frame, weight=1)
third_frame = ttk.Labelframe(vertical_pane, text="Third Frame")
vertical_pane.add(third_frame, weight=1)
# Initialize all frames
self.form = FormUi(form_frame)
self.console = ConsoleUi(console_frame)
self.third = ThirdUi(third_frame)
self.clock = Clock()
self.clock.start()
self.root.protocol('WM_DELETE_WINDOW', self.quit)
self.root.bind('<Control-q>', self.quit)
signal.signal(signal.SIGINT, self.quit)
def quit(self, *args):
self.clock.stop()
self.root.destroy()
def main():
logging.basicConfig(level=logging.DEBUG)
root = tk.Tk()
app = App(root)
app.root.mainloop()
if __name__ == '__main__':
main()
| beenje/tkinter-logging-text-widget | main.py | main.py | py | 6,751 | python | en | code | 52 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "threading.Event",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "datetime.date... |
2325246930 | import os
import shutil
from flask import request, jsonify
from flask_restful import Resource
from flask_uploads import UploadNotAllowed
from db import db
from libs import image_helper
from models.category import CategoryModel
from models.subcategory import SubCategoryModel
from models.provider import ProviderModel, ProviderLanguageModel, ProviderContactModel, ProviderImageModel
from schemas.provider import ProviderSchema, ProviderLanguageSchema, ProviderContactSchema, ProviderImageSchema
provider_schema = ProviderSchema(many=False)
providers_schema = ProviderSchema(many=True, only=('identifier', 'forename', 'surname'))
provider_language_schema = ProviderLanguageSchema()
providers_languages_schema = ProviderLanguageSchema(many=True)
provider_contact_schema = ProviderContactSchema()
provider_contacts_schema = ProviderContactSchema(many=True)
provider_images_schema = ProviderImageSchema(many=True)
class ProvidersResource(Resource):
@classmethod
def get(cls):
all_providers = ProviderModel.query.all()
result = providers_schema.dump(all_providers)
return jsonify(result)
class ProviderResource(Resource):
@classmethod
def get(cls, identifier):
provider = ProviderModel.find_by_identifier(identifier)
if provider:
result = provider_schema.dump(provider)
return jsonify(result)
else:
return {"message": f"Provider with id {identifier} does not exist!"}
@classmethod
def put(cls, identifier):
provider = ProviderModel.find_by_identifier(identifier)
if provider:
if request.mimetype == 'application/json':
provider = ProviderModel.find_by_identifier(identifier)
forename = request.json['forename']
surname = request.json['surname']
email = request.json['email']
home_address = request.json['home_address']
city = request.json['city']
post_code = request.json['post_code']
dob = request.json['dob']
residency = request.json['residency']
email_confirmation = request.json['email_confirmation']
role = request.json['role']
provider.forename = forename
provider.surname = surname
provider.email = email
provider.home_address = home_address
provider.city = city
provider.post_code = post_code
provider.dob = dob
provider.residency = residency
provider.email_confirmation = email_confirmation
provider.role = role
db.session.commit()
occupations_len = request.json['occupations']
provider.occupations.clear()
provider.subcategories.clear()
for i in range(len(occupations_len)):
category_json = str(request.json['occupations'][i]['name'])
category = CategoryModel.find_by_name(category_json)
if category:
provider.occupations.append(category)
subcategories_len = request.json['occupations'][i]['subcategories']
for j in range(len(subcategories_len)):
subcategory_json = str(request.json['occupations'][i]['subcategories'][j]['name'])
category = str(CategoryModel.find_by_name(category_json)).split(" ")
sub = db.session.query(SubCategoryModel.name). \
filter(SubCategoryModel.category_id == category[1][:-1]).all()
sub_len = len(sub) - 1
def is_sub(length, sub_name):
try:
fix = sub.pop(length)
if sub_name in fix[0]:
return True
else:
return is_sub(length - 1, sub_name)
except IndexError:
return False
check_sub = is_sub(sub_len, subcategory_json)
if check_sub:
subcategory_name = SubCategoryModel.find_sub_by_name(subcategory_json)
provider.subcategories.append(subcategory_name)
else:
return {"message":
f"Subcategory with name {subcategory_json} is not in {category_json}!"}
else:
return {"message": f"Category {category_json} does not exist!"}
language = ProviderLanguageModel.find_lang_by_provider_id(identifier)
if not language:
lang = request.json['languages']
for i in range(len(lang)):
name = request.json['languages'][i]['name']
my_language = ProviderLanguageModel(name, identifier)
db.session.add(my_language)
else:
for i in range(len(language)):
db.session.delete(language[i])
db.session.commit()
rang = request.json['languages']
for i in range(len(rang)):
name = request.json['languages'][i]['name']
my_language = ProviderLanguageModel(name, identifier)
db.session.add(my_language)
contact_number = ProviderContactModel.find_cont_by_provider_id(identifier)
if not contact_number:
cont = request.json['contact_numbers']
for i in range(len(cont)):
number = request.json['contact_numbers'][i]['number']
my_contact = ProviderContactModel(number, identifier)
db.session.add(my_contact)
else:
for i in range(len(contact_number)):
db.session.delete(contact_number[i])
db.session.commit()
cont = request.json['contact_numbers']
for i in range(len(cont)):
number = request.json['contact_numbers'][i]['number']
my_contact = ProviderContactModel(number, identifier)
db.session.add(my_contact)
db.session.commit()
if request.mimetype == 'multipart/form-data':
data = {'images': None}
back_folder = "providers"
provider_id = f"{identifier}".lower()
folder = os.path.join(back_folder, provider_id)
folder_path = os.path.join("static", "images", folder)
is_folder = os.path.isdir(folder_path)
if is_folder:
try:
shutil.rmtree(folder_path)
except OSError as e:
return jsonify("Error: %s : %s" % (folder_path, e.strerror))
image_query = ProviderImageModel.find_image_by_provider_id(identifier)
for i in range(len(image_query)):
db.session.delete(image_query[i])
db.session.commit()
for images in request.files.getlist('images'):
data['images'] = images
try:
save = image_helper.save_image(images, folder=folder, name=images.filename)
# DATABASE
path = str(image_helper.get_path(save)).replace('\\', '/')
extension = image_helper.get_extension(save)
provider_image = ProviderImageModel(path, extension, identifier)
provider_image.save_to_db()
except UploadNotAllowed:
extension = image_helper.get_extension(data['images'])
return {"message": f"The file with {extension} is not allowed"}
return {"message": f"Provider updated successfully."}
else:
return {"message": f"Provider with id {identifier} does not exist!"}
@classmethod
def delete(cls, identifier):
provider = ProviderModel.find_by_identifier(identifier)
if provider:
provider.delete_from_db()
back_folder = "providers"
provider_id = f"{identifier}".lower()
folder = os.path.join(back_folder, provider_id)
folder_path = os.path.join("static", "images", folder)
is_folder = os.path.isdir(folder_path)
if is_folder:
try:
shutil.rmtree(folder_path)
except OSError as e:
return jsonify("Error: %s : %s" % (folder_path, e.strerror))
return {'message': 'Provider was deleted successfully!'}
else:
return {"message": f"Provider {identifier} does not exist!"}
| Emir99/city-service | resources/provider.py | provider.py | py | 9,338 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "schemas.provider.ProviderSchema",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "schemas.provider.ProviderSchema",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "schemas.provider.ProviderLanguageSchema",
"line_number": 18,
"usage_type"... |
15026539313 | import pyrealsense2 as rs
# Import Numpy for easy array manipulation
import numpy as np
# Import OpenCV for easy image rendering
import cv2
# Create a pipeline
pipeline = rs.pipeline()
# Create a config and configure the pipeline to stream
# different resolutions of color and depth streams
config = rs.config()
# Get device product line for setting a supporting resolution
pipeline_wrapper = rs.pipeline_wrapper(pipeline)
pipeline_profile = config.resolve(pipeline_wrapper)
device = pipeline_profile.get_device()
device_product_line = str(device.get_info(rs.camera_info.product_line))
found_rgb = False
for s in device.sensors:
if s.get_info(rs.camera_info.name) == 'RGB Camera':
found_rgb = True
break
if not found_rgb:
print("The demo requires Depth camera with Color sensor")
exit(0)
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
if device_product_line == 'L500':
config.enable_stream(rs.stream.color, 960, 540, rs.format.bgr8, 30)
else:
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
# Start streaming
profile = pipeline.start(config)
# Getting the depth sensor's depth scale (see rs-align example for explanation)
depth_sensor = profile.get_device().first_depth_sensor()
depth_scale = depth_sensor.get_depth_scale()
print("Depth Scale is: " , depth_scale)
# We will be removing the background of objects more than
# clipping_distance_in_meters meters away
clipping_distance_in_meters = 1 #1 meter
clipping_distance = clipping_distance_in_meters / depth_scale
# Create an align object
# rs.align allows us to perform alignment of depth frames to others frames
# The "align_to" is the stream type to which we plan to align depth frames.
align_to = rs.stream.color
align = rs.align(align_to)
import rospy
from sensor_msgs.msg import CompressedImage
from cv_bridge import CvBridge
br = CvBridge()
def talker():
pub = rospy.Publisher('DepthImageAlign', CompressedImage, queue_size=10)
rospy.init_node('talker', anonymous=True)
rate = rospy.Rate(10) # 10hz
while not rospy.is_shutdown():
# Get frameset of color and depth
frames = pipeline.wait_for_frames()
# frames.get_depth_frame() is a 640x360 depth image
# Align the depth frame to color frame
aligned_frames = align.process(frames)
# Get aligned frames
aligned_depth_frame = aligned_frames.get_depth_frame() # aligned_depth_frame is a 640x480 depth image
color_frame = aligned_frames.get_color_frame()
# Validate that both frames are valid
if not aligned_depth_frame or not color_frame:
continue
depth_image = np.asanyarray(aligned_depth_frame.get_data())
depth_image = np.expand_dims(depth_image, axis=2)
color_image = np.asanyarray(color_frame.get_data())
# Remove background - Set pixels further than clipping_distance to grey
grey_color = 153
# depth_image_3d = np.dstack((depth_image,depth_image,depth_image)) #depth image is 1 channel, color is 3 channels
# bg_removed = np.where((depth_image_3d > clipping_distance) | (depth_image_3d <= 0), grey_color, color_image) #1m 이상,또는 depth가 음수인 경우 background로 취급
# bg_removed = np.where((depth_image > clipping_distance) | (depth_image <= 0), grey_color, color_image) #1m 이상,또는 depth가 음수인 경우 background로 취급
# import pdb; pdb.set_trace()
im = np.concatenate((color_image,depth_image),axis=2).astype(np.uint8)
# im2 = cv2.imdecode(im, cv2.IMREAD_UNCHANGED)
msg = br.cv2_to_compressed_imgmsg(im, dst_format='png')
pub.publish(msg)
rate.sleep()
if __name__ == '__main__':
try:
talker()
except rospy.ROSInterruptException:
pass
| bub3690/greencamp_vision | realsense/depth_color_align.py | depth_color_align.py | py | 3,833 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "pyrealsense2.pipeline",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pyrealsense2.config",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pyrealsense2.pipeline_wrapper",
"line_number": 15,
"usage_type": "call"
},
{
"api_name":... |
32017383453 | """Base Template For the API"""
import cherrypy
from api.base import APIBase
from libs.scraper import Scraper
@cherrypy.expose
class APIScraperSearchMovie(APIBase):
"""Base Template For the API"""
def GET(self, **kwargs) -> str:
"""POST Function"""
if "name" not in kwargs:
return self._return_data(
"Scraper",
"Movie Search",
False,
error="Missing Name",
errorNumber=0,
)
data = Scraper.search_for_movie(
kwargs["name"], kwargs.get("page", 1), kwargs.get("year", None)
)
return self._return_data(
"Scraper", "Movie Search", True, data=data, images=Scraper.image_config
)
| GaryTheBrown/Tackem | api/scraper/search_movie.py | search_movie.py | py | 762 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "api.base.APIBase",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "libs.scraper.Scraper.search_for_movie",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "libs.scraper.Scraper",
"line_number": 24,
"usage_type": "name"
},
{
"api_na... |
12340533789 | from flask import Blueprint, request, url_for, jsonify
from PIL import Image
from delete_processed_images.views import delete_images
import numpy as np
gray_to_binary = Blueprint('gray_to_binary', __name__, template_folder='templates')
@gray_to_binary.route('/gray_to_binary', methods=['GET', 'POST'])
def im2bw():
if request.method == "POST":
try:
delete_images()
image_src = 'static/uploads/img.png'
im = Image.open(image_src).convert(mode="L") # imaginea devine monocroma
pixels = np.array(im, dtype=np.uint8) # matricea pixelilor imaginii
prag = int(request.get_data())
imgname = "img_bw_" + str(prag) + ".png" # numele imaginii va fi alcatuit din img_bw_ + valoarea pragului
y, x = im.size # dimensiunile imaginii
for i in range(x):
for j in range(y):
# ce este peste prag devine alb, ce este sub prag, devine negru
if pixels[i][j] >= prag:
pixels[i][j] = 255
else:
pixels[i][j] = 0
im_bw = Image.fromarray(pixels) # transformare din matricea de pixeli (numere) in imagine binara
im_bw.save('static/uploads/' + imgname)
image_url_bw= url_for('static',filename="uploads/" + imgname)
return jsonify({'image_url_bw' : image_url_bw})
except Exception as e:
print(e)
| Narcissimillus/appweb-edimg | gray_to_binary/views.py | views.py | py | 1,469 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Blueprint",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "delete_process... |
36286082579 | # Impordime vajalikud moodulid
import pygame
import sys
pygame.init() # alustame pygame mooduli
# Seadistame värvid
red = [255, 0, 0]
green = [0, 255, 0]
blue = [0, 0, 255]
pink = [255, 153, 255]
lGreen = [153, 255, 153]
lBlue = [153, 204, 255]
# Seadistame ekraani seaded
screenX = 640
screenY = 480
screen = pygame.display.set_mode([screenX, screenY])
pygame.display.set_caption("Ping-pong - Tamm")
screen.fill(lBlue)
clock = pygame.time.Clock()
# Seadistame palli kiiruse ja positsiooni
posX, posY = 0, 0
speedX, speedY = 3, 4
# Seadistame aluse kiiruse ja positsiooni
alusX, alusY = 0, screenY/1.5
alusSpeedX = 2
# Piltide laadimine
pall = pygame.Rect(posX, posY, 20, 20)
palliPilt = pygame.image.load("yl5_pall.png")
palliPilt = pygame.transform.scale(palliPilt, (20, 20))
alus = pygame.Rect(alusX, alusY, 120, 20)
alusePilt = pygame.image.load("yl5_alus.png")
alusePilt = pygame.transform.scale(alusePilt, (120, 20))
# Scoori muutuja seadistamine
skoor = 0
gameover = False # gameover muutuja seadistamine
while not gameover: # kordub, kuni gameover muutuja on False
clock.tick(60) # seadistame kaadrisageduse
for event in pygame.event.get(): # sündmuse käitlemine
if event.type == pygame.QUIT: # kui aken suletakse
sys.exit() # lõpetame mängu
# Palli liikumine
pall = pygame.Rect(posX, posY, 20, 20)
screen.blit(palliPilt, pall)
posX += speedX
posY += speedY
# Aluse liikumine
alus = pygame.Rect(alusX, alusY, 120, 20)
screen.blit(alusePilt, alus)
alusX += alusSpeedX
# Skoori kuvamine
screen.blit(pygame.font.Font(None, 30).render(f"Skoor: {skoor}", True, [255, 255, 255]), [10, 20])
# Kui puudutab ekraani ääri, muudab palli suunda
if posX > screenX - palliPilt.get_rect().width or posX < 0:
speedX = -speedX
if posY > screenY-palliPilt.get_rect().height or posY < 0:
speedY = -speedY
if posY > screenY-palliPilt.get_rect().height:
skoor -= 1
# Kui puudutab alust, muudab palli suunda ja suurendab skoori
if pall.colliderect(alus) and speedY > 0:
speedY = -speedY
skoor += 1
# Kui puudutab ekraani ääri, muudab aluse suunda
if alusX > screenX - alusePilt.get_rect().width or alusX < 0:
alusSpeedX = -alusSpeedX
# Graafika kuvamine ekraanil
pygame.display.flip()
screen.fill(lBlue) # Et vanad pildid ei jääks peale
pygame.quit() # Kui mäng on läbi
| TorrenTamm/Tarkvaraarenduse-projekt | Tamm_yl5/Tamm_yl5.py | Tamm_yl5.py | py | 2,548 | python | et | code | 0 | github-code | 1 | [
{
"api_name": "pygame.init",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "pygame.display... |
73725948512 | import requests, dateutil.parser
from bs4 import BeautifulSoup
from datetime import datetime
#send email function
def sendEmail(title):
requests.post(
"https://api.eu.mailgun.net/v3/YOUR-DOMAIN/messages",
auth=("api", "YOUR-API-KEY"),
data={"from": "YOUR-NAME <YOUR-EMAIL-ADDRESS>",
"to": ["YOUR-EMAIL-ADDRESS"],
"subject": title,
"text": "There is a new announcement on the Eloqua website: https://community.oracle.com/topliners/categories/eloqua-system-status"})
#get source code
request = requests.get("https://community.oracle.com/topliners/categories/eloqua-system-status", headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.76 Safari/537.36', "Upgrade-Insecure-Requests": "1","DNT": "1","Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8","Accept-Language": "en-US,en;q=0.5","Accept-Encoding": "gzip, deflate"})
#get last update date and title
sourceCode = BeautifulSoup(request.content)
timeElement = sourceCode.time.string
titleElement = sourceCode.find_all("div", {"class": "Title"})
title = titleElement[0].a.string
lastUpdate = dateutil.parser.parse(timeElement)
#get current time and date
now = datetime.now()
#if dates match and there is max one hour difference, then send email
difference = now - lastUpdate
if difference.days == 0 and difference.seconds <= 3600:
sendEmail(title)
else:
print("No new Eloqua updates")
| adamxszabo/eloqua-announcements | eloqua-announcements.py | eloqua-announcements.py | py | 1,480 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "requests.post",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "dateutil.parser.parser.pa... |
72263104354 |
import sys
import pygame
import keyboard
from pygame.locals import *
from time import sleep
pygame.init()
deadband = 0.1
keepPlaying = True
print("example4")
# pygame.init()
pygame.display.set_caption('game base')
screen = pygame.display.set_mode((500, 500), 0, 32)
clock = pygame.time.Clock()
#
# pygame.joystick.init()
# joysticks = [pygame.joystick.Joystick(i) for i in range(pygame.joystick.get_count())]
# for joystick in joysticks:
# print(joystick.get_name())
my_square = pygame.Rect(50, 50, 50, 50)
my_square_color = 0
colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255)]
motion = [0, 0]
myjoystick = pygame.joystick.Joystick(0) #since we only have one joystick, we know the instance ID is 0
myjoystick.init()
while True:
screen.fill((0, 0, 0))
pygame.draw.rect(screen, colors[my_square_color], my_square)
if abs(motion[0]) < 0.1:
motion[0] = 0
if abs(motion[1]) < 0.1:
motion[1] = 0
my_square.x += motion[0] * 10
my_square.y += motion[1] * 10
for event in pygame.event.get():
# The 0 button is the 'a' button, 1 is the 'b' button, 2 is the 'x' button, 3 is the 'y' button
if event.type == pygame.JOYBUTTONDOWN:
if event.button == 0: # event.type == pygame.JOYBUTTONUP:
print("Select Has Been Pressed")
if event.button == 1:
print("Left Joystick button has been pressed")
if event.button == 2:
print("Right Joystick button has been pressed")
if event.button == 3:
print("Start has been pressed")
if event.button == 4:
print("Surface top button has been pressed")
if event.button == 5:
print("Surface right button has been pressed")
if event.button == 6:
print("Surface Bottom Has Been Pressed")
if event.button == 7:
print("Surface left button has been pressed")
if event.button == 8:
print("Left 2 has been pressed")
if event.button == 9:
print("Right 2 has been pressed")
if event.button == 10:
print("Left 1 has been pressed")
if event.button == 11:
print("Right 1 has been pressed")
if event.button == 12: # event.type == pygame.JOYBUTTONUP:
print("Triangle Has Been Pressed")
if event.button == 13:
print("Circle has been pressed")
if event.button == 14:
print("X has been pressed")
if event.button == 15:
print("Square has been pressed")
if event.button == 16:
print("Center PS has been pressed")
elif event.type == pygame.JOYAXISMOTION:
#print(event)
if event.axis < 2:
motion[event.axis] = event.value
if event.axis == 0 and abs(myjoystick.get_axis(0))> deadband:
zero = myjoystick.get_axis(0)
print('1 has been moved ' + str(zero))
if event.axis == 1 and abs(myjoystick.get_axis(1))> deadband:
one = myjoystick.get_axis(1)
print('2 has been moved ' + str(one))
if event.axis == 2 and abs(myjoystick.get_axis(2))> deadband:
two = myjoystick.get_axis(2)
print('3 has been moved ' + str(two))
if event.axis == 3 and abs(myjoystick.get_axis(3))> deadband:
three = myjoystick.get_axis(3)
print('4 has been moved ' + str(three))
if event.axis == 4 and abs(myjoystick.get_axis(4)) > deadband:
four = myjoystick.get_axis(4)
print('4 has been moved ' + str(four))
#
# while True:
#
# screen.fill((0, 0, 0))
#
# pygame.draw.rect(screen, colors[my_square_color], my_square)
# if abs(motion[0]) < 0.1:
# motion[0] = 0
# if abs(motion[1]) < 0.1:
# motion[1] = 0
# my_square.x += motion[0] * 10
# my_square.y += motion[1] * 10
#
# for event in pygame.event.get():
# if keyboard.read_key() == "s":
# print(event)
# if event.button == 0:
# my_square_color = (my_square_color + 1) % len(colors)
# if keyboard.read_key() == "w":
# print(event)
# if event.type == JOYAXISMOTION:
# print(event)
# if event.axis < 2:
# motion[event.axis] = event.value
# if event.type == JOYHATMOTION:
# print(event)
# if event.type == JOYDEVICEADDED:
# joysticks = [pygame.joystick.Joystick(i) for i in range(pygame.joystick.get_count())]
# for joystick in joysticks:
# print(joystick.get_name())
# if event.type == JOYDEVICEREMOVED:
# joysticks = [pygame.joystick.Joystick(i) for i in range(pygame.joystick.get_count())]
# if event.type == QUIT:
# pygame.quit()
# sys.exit()
# if event.type == KEYDOWN:
# if event.key == K_ESCAPE:
# pygame.quit()
# sys.exit()
#
# pygame.display.update()
# clock.tick(60)
| Aragon-Robotics-Team/test-materov-2021 | GUI/joystick.py | joystick.py | py | 5,378 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pygame.init",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_caption",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "pygame.dis... |
19773722015 | import logging
import os
import sys
from abc import ABC
from typing import TextIO
from kivy import metrics
from kivy.app import App
from kivy.base import EventLoop
from kivy.config import Config
from kivy.core.window import Window
from kivy.lang import Builder
from kivy.metrics import dp
from kivy.resources import resource_add_path
from kivy.resources import resource_paths
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.checkbox import CheckBox
from kivy.uix.label import Label
from kivy.uix.popup import Popup
from kivy.uix.textinput import TextInput
from src import radio_sync_version
from src.ham.util import radio_types
from src.ham.util.file_util import GlobalConstants
from src.ham.util.path_manager import PathManager
from src.ui.async_wrapper import AsyncWrapper
Config.set('input', 'mouse', 'mouse,disable_multitouch')
class RightClickTextInput(TextInput):
def on_touch_down(self, touch):
super().on_touch_down(touch)
if not self.focused:
return
if touch.button == 'right':
logging.debug("right mouse clicked")
pos = self.to_local(*self._long_touch_pos, relative=False)
pos = (pos[0], pos[1] - metrics.inch(.25))
self._show_cut_copy_paste(
pos, EventLoop.window, mode='paste')
class LayoutIds:
action_previous = 'action_previous'
buffer = 'buffer'
button_pool = 'button_pool'
create_radio_plugs = 'create_radio_plugs'
cant_find_radio = 'cant_find_radio'
check_migrations = 'check_migrations'
clear_log = 'clear_log'
debug_toggle = 'debug_toggle'
dangerous_operations = 'dangerous_operations'
dangerous_operation__delete_migrate = 'dangerous_operation__delete_migrate'
dangerous_operation__migrate = 'dangerous_operation__migrate'
dangerous_operation__wizard = 'dangerous_operation__wizard'
dangerous_operation__cleanup = 'dangerous_operation__cleanup'
enable_dangerous = 'enable_dangerous'
exit_button = 'exit_button'
feature_request = 'feature_request'
file_log_toggle = 'file_log_toggle'
getting_started = 'getting_started'
input_folder = 'input_folder'
input_folder_select = 'input_folder_select'
import_file = 'import_file'
output_folder = 'output_folder'
output_folder_select = 'output_folder_select'
log_output = 'log_output'
radio_descriptions = 'radio_descriptions'
radio_header = 'radio_header'
radio_labels = 'radio_labels'
kv = f"""
BoxLayout:
orientation: "vertical"
ActionBar:
ActionView:
ActionPrevious:
id: {LayoutIds.action_previous}
title: 'Ham Radio Sync'
with_previous: False
enabled: False
ActionButton:
id: {LayoutIds.create_radio_plugs}
text: "Create Radio Plugs"
background_normal:''
background_down: ''
background_color: [0.00,0.40,0.13,1.0]
ActionToggleButton:
id: {LayoutIds.enable_dangerous}
text: "Enable Dangerous Operations"
ActionSeparator:
important: True
ActionGroup:
text: "File"
mode: "spinner"
dropdown_width: dp(225)
ActionButton:
id: {LayoutIds.check_migrations}
text: "Check for needed migrations"
ActionButton:
id: {LayoutIds.input_folder_select}
text: "Set input directory"
ActionButton:
id: {LayoutIds.output_folder_select}
text: "Set output directory"
ActionButton:
id: {LayoutIds.import_file}
text: "Import from CHiRP"
ActionButton:
id: {LayoutIds.clear_log}
text: "Clear screen log"
ActionButton:
id: {LayoutIds.exit_button}
text: "Exit"
ActionGroup:
text: "Dangerous Operations"
mode: "spinner"
id: {LayoutIds.dangerous_operations}
dropdown_width: dp(225)
ActionButton:
id: {LayoutIds.dangerous_operation__delete_migrate}
text: "Remove migration backups"
ActionButton:
id: {LayoutIds.dangerous_operation__migrate}
text: "Migrate to latest format"
ActionButton:
id: {LayoutIds.dangerous_operation__wizard}
text: "Wizard"
ActionButton:
id: {LayoutIds.dangerous_operation__cleanup}
text: "Cleanup"
ActionGroup:
text: "Help / Getting Started"
mode: "spinner"
dropdown_width: dp(250)
ActionButton:
id: {LayoutIds.getting_started}
text: "About/Getting started..."
ActionButton:
id: {LayoutIds.radio_descriptions}
text: "Radio model/program list"
ActionButton:
id: {LayoutIds.cant_find_radio}
text: "My radio isn't here"
ActionButton:
id: {LayoutIds.feature_request}
text: "Feature request/bug report"
ActionToggleButton:
id: {LayoutIds.file_log_toggle}
text: "Enable logging to text file"
ActionToggleButton:
id: {LayoutIds.debug_toggle}
text: "Debug logging"
BoxLayout:
orientation: "horizontal"
StackLayout:
id: {LayoutIds.button_pool}
spacing: dp(10)
size_hint: (0.2, 1)
padding: [dp(20), dp(20), dp(20), dp(20)]
size_hint_min_x: dp(225)
size_hint_max_x: dp(275)
Label:
id: {LayoutIds.radio_header}
text: "Radios to Generate"
size_hint: (1.0, 0.1)
font_size: dp(15)
bold: True
BoxLayout:
id: {LayoutIds.radio_labels}
orientation: "vertical"
spacing: dp(10)
size_hint: (1, 0.4)
BoxLayout:
id: {LayoutIds.buffer}
orientation: "vertical"
size_hint: (1, 0.2)
BoxLayout:
orientation: "vertical"
size_hint: (0.8, 1)
Label:
id: {LayoutIds.input_folder}
text: "Input folder: None"
valign: 'middle'
size_hint: (1, 0.1)
text_size: self.size
Label:
id: {LayoutIds.output_folder}
text: "Output folder: None"
valign: 'middle'
size_hint: (1, 0.1)
text_size: self.size
RightClickTextInput:
id: {LayoutIds.log_output}
font_name: 'RobotoMono-Regular'
text: ''
size_hint: (1, 1)
readonly: True
font_size: dp(11)
use_bubble: True
"""
class AppWindow(App):
text_log = None
_async_wrapper = None
force_debug = False
popup_manager = None
def build(self):
icon_path = './images/radio_sync.ico'
action_icon_path = './images/radio_sync.png'
if hasattr(sys, '_MEIPASS'):
logging.debug("Has _MEIPASS")
logging.debug(os.listdir(sys._MEIPASS))
icon_path = os.path.join(sys._MEIPASS, 'images/radio_sync.ico')
action_icon_path = os.path.join(sys._MEIPASS, 'images/radio_sync.png')
logging.debug(f"Icon path: `{icon_path}`")
if os.path.exists(icon_path):
logging.debug("Icon path exists")
resource_add_path(os.path.join(sys._MEIPASS, 'images'))
else:
resource_add_path('images')
self.icon = icon_path
logging.debug(f"Resource paths: `{resource_paths}`")
self._async_wrapper = AsyncWrapper()
layout = Builder.load_string(kv)
Window.size = (dp(1200), dp(550))
Window.clearcolor = (0.15, 0.15, 0.15, 1)
Window.bind(on_keyboard=self.key_handler)
self.title = f'Ham Radio Sync v{radio_sync_version.version}'
action_previous = layout.ids[LayoutIds.action_previous]
action_previous.app_icon = action_icon_path
self._bind_radio_menu(layout)
self._bind_console_log(layout)
self._bind_file_menu(layout)
self._bind_dangerous_ops_menu(layout)
self._bind_help_menu(layout)
create_radio_button = layout.ids[LayoutIds.create_radio_plugs]
dangerous_ops_button = layout.ids[LayoutIds.enable_dangerous]
dangerous_ops_menu = layout.ids[LayoutIds.dangerous_operations]
buttons = [create_radio_button, dangerous_ops_button, dangerous_ops_menu]
self._async_wrapper.buttons = buttons
logging.info("Welcome to the ham radio sync app.")
self._async_wrapper.check_version(None)
return layout
def key_handler(self, window, keycode1, keycode2, text, modifiers):
if keycode1 == 27 or keycode1 == 1001:
return True
return False
def _bind_radio_menu(self, layout):
button_pool = layout.ids[LayoutIds.radio_labels]
radio_select_buttons = dict()
radios = radio_types.radio_choices()
for radio in radios:
radio_layout = BoxLayout(orientation='horizontal', size_hint=(1, 0.1))
radio_label = Label(text=radio_types.pretty_name(radio), size_hint=(0.9, 1), font_size=dp(11), halign='left')
radio_checkbox = CheckBox(size_hint=(0.1, 1))
radio_checkbox.active = radio == radio_types.DEFAULT
radio_label.bind(size=radio_label.setter('text_size'))
radio_layout.add_widget(radio_label)
radio_layout.add_widget(radio_checkbox)
radio_select_buttons[radio] = radio_checkbox
button_pool.add_widget(radio_layout)
self._async_wrapper.radio_buttons = radio_select_buttons
create_button = layout.ids[LayoutIds.create_radio_plugs]
create_button.bind(on_press=self._async_wrapper.radio_generator)
def _bind_console_log(self, layout):
text_log = layout.ids[LayoutIds.log_output]
self.text_log = text_log
input_folder = layout.ids[LayoutIds.input_folder]
output_folder = layout.ids[LayoutIds.output_folder]
PathManager.input_folder_label = input_folder
PathManager.output_folder_label = output_folder
PathManager.set_input_path('./in')
PathManager.set_output_path('./out')
PathManager.set_import_file('./in/import.csv', radio_types.CHIRP)
logger = logging.getLogger('radio_sync')
formatter = GlobalConstants.logging_formatter
text_box_logger = TextBoxHandler(self.text_log)
handler = logging.StreamHandler(stream=text_box_logger)
handler.setFormatter(formatter)
logger.setLevel(logging.INFO)
if self.force_debug:
logger.setLevel(logging.DEBUG)
logger.addHandler(handler)
def _bind_file_menu(self, layout):
check_migrations_button = layout.ids[LayoutIds.check_migrations]
check_migrations_button.bind(on_press=self._async_wrapper.check_migrations)
self.popup_manager = PopupManager(self._async_wrapper)
input_folder_button = layout.ids[LayoutIds.input_folder_select]
input_folder_button.bind(on_press=self.popup_manager.select_input_folder_dialog)
output_folder_button = layout.ids[LayoutIds.output_folder_select]
output_folder_button.bind(on_press=self.popup_manager.select_output_folder_dialog)
import_button = layout.ids[LayoutIds.import_file]
import_button.bind(on_press=self.popup_manager.select_import_file_dialog)
clear_console_button = layout.ids[LayoutIds.clear_log]
clear_console_button.bind(on_press=self._clear_console)
exit_button = layout.ids[LayoutIds.exit_button]
exit_button.bind(on_press=self.stop)
def _bind_dangerous_ops_menu(self, layout):
dangerous_ops_button = layout.ids[LayoutIds.enable_dangerous]
dangerous_ops_button.bind(on_press=self._async_wrapper.arm_dangerous)
self._async_wrapper.dangerous_ops_toggle = dangerous_ops_button
dangerous_ops_menu = layout.ids[LayoutIds.dangerous_operations]
self._async_wrapper.dangerous_buttons = [dangerous_ops_menu]
dangerous_ops_menu.disabled = True
cleanup_button = layout.ids[LayoutIds.dangerous_operation__cleanup]
cleanup_button.bind(on_press=self._async_wrapper.wizard_cleanup)
wizard_button = layout.ids[LayoutIds.dangerous_operation__wizard]
wizard_button.bind(on_press=self._async_wrapper.wizard_bootstrap)
migrate_button = layout.ids[LayoutIds.dangerous_operation__migrate]
migrate_button.bind(on_press=self._async_wrapper.migrations)
delete_migrate_button = layout.ids[LayoutIds.dangerous_operation__delete_migrate]
delete_migrate_button.bind(on_press=self._async_wrapper.migration_backups)
def _bind_help_menu(self, layout):
debug_button = layout.ids[LayoutIds.debug_toggle]
self._async_wrapper.debug_toggle = debug_button
debug_button.bind(on_press=self._async_wrapper.log_level)
file_log_button = layout.ids[LayoutIds.file_log_toggle]
file_log_button.bind(on_press=self._async_wrapper.toggle_file_log)
contact_button = layout.ids[LayoutIds.cant_find_radio]
contact_button.bind(on_press=self._async_wrapper.contact_info)
feature_request_button = layout.ids[LayoutIds.feature_request]
feature_request_button.bind(on_press=self._async_wrapper.contact_info)
getting_started_button = layout.ids[LayoutIds.getting_started]
getting_started_button.bind(on_press=self._async_wrapper.display_start_info)
compatible_radios_button = layout.ids[LayoutIds.radio_descriptions]
compatible_radios_button.bind(on_press=self._async_wrapper.compatible_radios)
def _clear_console(self, event):
self.text_log.text = ''
logging.info("Console has been cleared.")
def right_click_down(self, touch):
if touch.button == 'right':
print("right mouse clicked")
pos = touch.to_local(*self._long_touch_pos, relative=True)
self._show_cut_copy_paste(
pos, EventLoop.window, mode='paste')
class PopupIds:
cancel_button = "cancel_button"
file_chooser = "file_chooser"
file_path = "file_path"
load_button = "load_button"
mode = "mode"
load_dialog = f"""
BoxLayout:
size: root.size
pos: root.pos
orientation: "vertical"
Label:
id: {PopupIds.mode}
size_hint_y: 0
text: "mode"
TextInput:
size_hint: (1, 0.1)
id: {PopupIds.file_path}
text: "None"
multiline: False
FileChooserListView:
size_hint: (1, 0.9)
id: {PopupIds.file_chooser}
dirselect: True
filters: ["!*"]
BoxLayout:
size_hint_y: None
height: 30
Button:
id: {PopupIds.load_button}
text: "Load"
Button:
id: {PopupIds.cancel_button}
text: "Cancel"
"""
class PopupManager:
def __init__(self, async_wrapper):
self._async_wrapper = async_wrapper
def select_input_folder_dialog(self, event):
self._select_folder_dialog(event, 'Set input directory', PathManager.get_input_path(), self._select_input_folder)
def select_output_folder_dialog(self, event):
self._select_folder_dialog(event, 'Set output directory', PathManager.get_output_path(), self._select_output_folder)
def _select_folder_dialog(self, event, title, starting_path, load_button_action):
dialog_content = Builder.load_string(load_dialog)
file_chooser = dialog_content.ids[PopupIds.file_chooser]
file_chooser.path = starting_path
file_chooser.bind(selection=self._update_display_path)
file_label = dialog_content.ids[PopupIds.file_path]
file_label.text = file_chooser.path
file_label.bind(on_text_validate=self._update_file_browser)
self._popup = Popup(title=title, content=dialog_content, size_hint=(0.9, 0.9))
dialog_content.ids[PopupIds.cancel_button].bind(on_press=self._dismiss_popup)
dialog_content.ids[PopupIds.load_button].bind(on_press=load_button_action)
self._popup.open()
return
def _update_file_browser(self, event):
file_label = self._popup.content.ids[PopupIds.file_path]
potential_path = file_label.text
file_chooser = self._popup.content.ids[PopupIds.file_chooser]
if os.path.exists(potential_path):
file_chooser.path = potential_path
else:
file_label.text = self._get_selected_path()
def _select_input_folder(self, event):
path = self._get_selected_path()
PathManager.set_input_path(path)
self._dismiss_popup(None)
def _select_output_folder(self, event):
path = self._get_selected_path()
PathManager.set_output_path(path)
self._dismiss_popup(None)
def select_import_file_dialog(self, event):
dialog_content = Builder.load_string(load_dialog)
file_chooser = dialog_content.ids[PopupIds.file_chooser]
file_chooser.dirselect = False
file_chooser.filters = "*.csv"
file_chooser.path = PathManager.get_import_path()
file_chooser.bind(selection=self._update_display_path)
file_label = dialog_content.ids[PopupIds.file_path]
file_label.text = file_chooser.path
file_label.bind(on_text_validate=self._update_file_browser)
self._popup = Popup(title="Select CHiRP file", content=dialog_content, size_hint=(0.9, 0.9))
dialog_content.ids[PopupIds.cancel_button].bind(on_press=self._dismiss_popup)
dialog_content.ids[PopupIds.load_button].bind(on_press=self._import_trigger_event)
self._popup.open()
return
def _import_trigger_event(self, event):
file_label = self._popup.content.ids[PopupIds.file_path]
PathManager.set_import_file(file_label.text, radio_types.CHIRP)
self._async_wrapper.import_file()
self._dismiss_popup(None)
def _dismiss_popup(self, event):
self._popup.dismiss()
def _update_display_path(self, *args):
file_label = self._popup.content.ids[PopupIds.file_path]
file_label.text = self._get_selected_path()
def _get_selected_path(self):
file_chooser = self._popup.content.ids[PopupIds.file_chooser]
result = file_chooser.path
if len(file_chooser.selection) == 1:
result = file_chooser.selection[0]
return result
class TextBoxHandler(TextIO, ABC):
def __init__(self, text_log):
self._text_log = text_log
self.lock = None
def write(self, record):
self._text_log.text += record
return
| n2qzshce/ham-radio-sync | src/ui/app_window.py | app_window.py | py | 16,444 | python | en | code | 9 | github-code | 1 | [
{
"api_name": "kivy.config.Config.set",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "kivy.config.Config",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "kivy.uix.textinput.TextInput",
"line_number": 31,
"usage_type": "name"
},
{
"api_name":... |
39354409559 | from sqlalchemy import func, desc, select, and_, distinct
from module_7.myconf.models import Grade, Teacher, Student, Group, Subject
from module_7.myconf.db import session
def select_01():
result = (
session.query(
Student.id,
Student.fullname,
func.round(func.avg(Grade.grade), 2).label('average_grade')).
select_from(Student).
join(Grade).group_by(Student.id).
order_by(desc('average_grade')).limit(5).all())
return result
def select_02():
result = session.query(Student.id, Student.fullname, func.round(func.avg(Grade.grade), 2).label('average_grade')) \
.select_from(Grade).join(Student).filter(Grade.subjects_id == 1).group_by(Student.id).order_by(
desc('average_grade')).limit(1).all()
return result
def select_03():
result = session.query(Student.group_id, func.avg(Grade.grade).label('average_grade')) \
.join(Grade, Student.id == Grade.student_id).join(Subject, Grade.subjects_id == Subject.id) \
.filter(Subject.id == 1).group_by(Student.group_id).all()
return result
def select_04():
result = session.query(func.avg(Grade.grade).label('average_grade')).scalar()
return result
def select_05():
result = (session.query(Subject.name).join(Subject.teacher).filter(Teacher.id == 1).all())
return result
def select_06():
students_list = session.query(Student).filter_by(group_id=1).all()
student_names = [student.fullname for student in students_list]
return student_names
def select_07():
result = session.query(Grade).join(Student).join(Subject).filter(Student.group_id == 1, Subject.id == 1).all()
# grade_ids = [grade.id for grade in grades_list]
return result
def select_08():
result = session.query(func.avg(Grade.grade)).join(Subject).filter(Subject.teacher_id == 1).scalar()
return result
def select_09():
result = session.query(Subject.name).join(Grade, Subject.id == Grade.subjects_id).filter(Grade.student_id == 1).distinct().all()
return result
def select_10():
result = session.query(Subject.name).join(Teacher, Teacher.id == Subject.teacher_id).filter(Teacher.id == 1).all()
return result
if __name__ == '__main__':
print(select_01())
print(select_02())
print(select_03())
print(select_04())
print(select_05())
print(select_06())
print(select_07())
print(select_08())
print(select_09())
print(select_10())
| KarinaNester/GoIT_homework_ | module_7/hw/query.py | query.py | py | 2,476 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "module_7.myconf.models.Grade",
"line_number": 14,
"usage_type": "argument"
},
{
"api_name": "module_7.myconf.models.Student",
"line_number": 13,
"usage_type": "argument"
},
{
"api_name": "module_7.myconf.db.session.query",
"line_number": 9,
"usage_type": "c... |
10576811082 | import json
from flask import Flask,render_template,request
import gspread
from oauth2client.service_account import ServiceAccountCredentials
import requests
app = Flask(__name__)
url = "https://devapi.endato.com/PersonSearch"
scope = ['https://www.googleapis.com/auth/spreadsheets',
'https://www.googleapis.com/auth/drive',
'https://www.googleapis.com/auth/drive.file']
credentials = ServiceAccountCredentials.from_json_keyfile_name('crud.json', scope)
client = gspread.authorize(credentials)
sheet = client.open('test4')
get_sheet = sheet.worksheet('Sheet1')
def getRecord():
Google_sheet_value = get_sheet.get_all_values()
store_record = Google_sheet_value[1:]
return store_record
def update_gsheet(payloads,sheet_range):
update_sheet = get_sheet.update(sheet_range,[payloads])
return update_sheet
@app.route('/', methods=['GET', 'POST'])
def Search():
sheet_range = 2
sheet_ranges = 1
count = 1
context = {}
all_data = list()
try:
if request.method == 'POST':
phone = request.form['phone']
payload = {"Phone": phone}
headers = {
"Accept": "application/json",
"galaxy-ap-name": "f5778850-ab32-401e-bca1-377606919ae0",
"galaxy-ap-password": "54e01deb091c4df2bef74481b5093453",
"galaxy-search-type": "Person",
"Content-Type": "application/json"
}
response = requests.post(url, json=payload, headers=headers)
data = json.loads(response.text)
get_data = data.get('persons')
for i in get_data:
# New.................................
dummy = i.get('associates')
for getdummy in dummy:
fName = getdummy.get('name').get('firstName')
LName = getdummy.get('name').get('lastName')
SFullName = fName+LName
firstName = i.get('name').get('firstName')
lastName = i.get('name').get('lastName')
fullName = firstName + ' ' + lastName
age = i.get('age')
dobFirstSeen = i.get('dobFirstSeen')
addres = i.get('addresses')
for getAddress in addres:
addresses = getAddress.get('fullAddress')
context = {
"Name": fullName,
"age": age,
"Dob": dobFirstSeen,
"addresses": addresses
}
all_data.append(context)
for k in all_data:
SName = k.get('Name')
SAge = k.get('age')
SDob = k.get('Dob')
Saddress = k.get('addresses')
Spayload = [SName,SAge,SDob,Saddress]
GoogleSheetRecords = getRecord()
for getRcd in GoogleSheetRecords:
if SName in getRcd:
sheet_ranges = f'Sheet1!A{count}:D{count}'
update_gsheet(Spayload,sheet_ranges)
else:
print(getRcd,"create","=====")
sheet_ranges +=1
get_sheet.insert_row(Spayload, sheet_range)
return render_template('home.html',all_data=all_data)
except:
return render_template('home.html')
if __name__ == '__main__':
app.run()
| arunthakur007/Flask_Api | main.py | main.py | py | 3,450 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "oauth2client.service_account.ServiceAccountCredentials.from_json_keyfile_name",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "oauth2client.service_account.ServiceAccountCredentials",... |
29728137134 | """
This creates Figure 2.
"""
import numpy as np
from statsmodels.multivariate.pca import PCA
from .common import subplotLabel, getSetup
from ..tensor import perform_CMTF, calcR2X, tensor_degFreedom
from ..dataImport import createCube
from ..impute import flatten_to_mat
from matplotlib.ticker import ScalarFormatter
def makeFigure():
"""Get a list of the axis objects and create a figure"""
# Get list of axis objects
ax, f = getSetup((9, 3), (1, 3))
comps = np.arange(1, 12)
CMTFR2X = np.zeros(comps.shape)
PCAR2X = np.zeros(comps.shape)
sizeTfac = np.zeros(comps.shape)
tOrig, mOrig = createCube()
tMat = flatten_to_mat(tOrig, mOrig)
sizePCA = comps * np.sum(tMat.shape)
for i, cc in enumerate(comps):
outt = PCA(tMat, ncomp=cc, missing="fill-em", standardize=False, demean=False, normalize=False)
recon = outt.scores @ outt.loadings.T
PCAR2X[i] = calcR2X(recon, mIn=tMat)
tFac = perform_CMTF(r=cc)
CMTFR2X[i] = tFac.R2X
sizeTfac[i] = tensor_degFreedom(tFac)
ax[0].scatter(comps, CMTFR2X, s=10)
ax[0].set_ylabel("CMTF R2X")
ax[0].set_xlabel("Number of Components")
ax[0].set_xticks([x for x in comps])
ax[0].set_xticklabels([x for x in comps])
ax[0].set_ylim(0, 1)
ax[0].set_xlim(0.5, np.amax(comps) + 0.5)
ax[1].set_xscale("log", base=2)
ax[1].plot(sizeTfac, 1.0 - CMTFR2X, ".", label="CMTF")
ax[1].plot(sizePCA, 1.0 - PCAR2X, ".", label="PCA")
ax[1].set_ylabel("Normalized Unexplained Variance")
ax[1].set_xlabel("Size of Reduced Data")
ax[1].set_ylim(bottom=0.0)
ax[1].set_xlim(2 ** 8, 2 ** 12)
ax[1].xaxis.set_major_formatter(ScalarFormatter())
ax[1].legend()
# Scaling matrix
rats = np.arange(-8, 9, step=0.25)
tOrig, mOrig = createCube()
totalR2X = np.zeros(rats.shape)
CMTFR2X = np.zeros(rats.shape)
PCAR2X = np.zeros(rats.shape)
for ii, rat in enumerate(rats):
mScaled = mOrig * (2.0 ** rat)
tFac = perform_CMTF(tOrig=tOrig, mOrig=mScaled, r=5)
totalR2X[ii] = calcR2X(tFac, tOrig, mScaled)
CMTFR2X[ii] = calcR2X(tFac, tIn=tOrig)
PCAR2X[ii] = calcR2X(tFac, mIn=mScaled)
ax[2].plot(rats, totalR2X, label="Total")
ax[2].plot(rats, PCAR2X, label="Matrix")
ax[2].plot(rats, CMTFR2X, label="Tensor")
ax[2].set_ylabel("R2X")
ax[2].set_xlabel("Matrix scaled")
def rat2frac(rat):
if rat >= 0:
return str(2 ** rat)
else:
return '1/' + rat2frac(-rat)
ax[2].set_xlim(-7.5, 7.5)
# ax[2].set_ylim(0.8, 1.0)
ax[2].set_xticks(rats[::8])
ax[2].set_xticklabels([rat2frac(r) for r in rats[::8]])
ax[2].legend()
# Add subplot labels
subplotLabel(ax)
return f
| meyer-lab/systemsSerology | syserol/figures/figure2.py | figure2.py | py | 2,785 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "common.getSetup",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_numb... |
13094457575 | import torch
import torch.nn as nn
def calc_iou(a, b):
area = (b[:, 2] - b[:, 0]) * (b[:, 3] - b[:, 1])
iw = torch.min(torch.unsqueeze(a[:, 3], dim=1), b[:, 2]) - torch.max(torch.unsqueeze(a[:, 1], 1), b[:, 0])
ih = torch.min(torch.unsqueeze(a[:, 2], dim=1), b[:, 3]) - torch.max(torch.unsqueeze(a[:, 0], 1), b[:, 1])
iw = torch.clamp(iw, min=0)
ih = torch.clamp(ih, min=0)
ua = torch.unsqueeze((a[:, 2] - a[:, 0]) * (a[:, 3] - a[:, 1]), dim=1) + area - iw * ih
ua = torch.clamp(ua, min=1e-8)
intersection = iw * ih
IoU = intersection / ua
return IoU
def get_target(anchor, bbox_annotation, classification, cuda):
#------------------------------------------------------#
# anchor num_anchors, 4
# bbox_annotation num_true_boxes, 5
# Iou num_anchors, num_true_boxes
#------------------------------------------------------#
IoU = calc_iou(anchor[:, :], bbox_annotation[:, :4])
#------------------------------------------------------#
# IoU_max num_anchors,
# IoU_argmax num_anchors,
#------------------------------------------------------#
IoU_max, IoU_argmax = torch.max(IoU, dim=1)
targets = torch.ones_like(classification) * -1
if cuda:
targets = targets.cuda()
#------------------------------------------#
# The coincidence degree is less than 0.4 and needs to participate in training
#------------------------------------------#
targets[torch.lt(IoU_max, 0.4), :] = 0
#--------------------------------------------------#
# The coincidence degree is greater than 0.5, you need to participate in training, and you need to calculate the regression loss
#--------------------------------------------------#
positive_indices = torch.ge(IoU_max, 0.5)
#--------------------------------------------------#
# Take out the ground truth box that most corresponds to each a priori box
#--------------------------------------------------#
assigned_annotations = bbox_annotation[IoU_argmax, :]
#--------------------------------------------------#
# Set the corresponding category to 1
#--------------------------------------------------#
targets[positive_indices, :] = 0
targets[positive_indices, assigned_annotations[positive_indices, 4].long()] = 1
#--------------------------------------------------#
# Calculate the number of positive samples
#--------------------------------------------------#
num_positive_anchors = positive_indices.sum()
return targets, num_positive_anchors, positive_indices, assigned_annotations
def encode_bbox(assigned_annotations, positive_indices, anchor_widths, anchor_heights, anchor_ctr_x, anchor_ctr_y):
#--------------------------------------------------#
# Take out the true box corresponding to the a priori box as the positive sample
#--------------------------------------------------#
assigned_annotations = assigned_annotations[positive_indices, :]
anchor_widths_pi = anchor_widths[positive_indices]
anchor_heights_pi = anchor_heights[positive_indices]
anchor_ctr_x_pi = anchor_ctr_x[positive_indices]
anchor_ctr_y_pi = anchor_ctr_y[positive_indices]
#--------------------------------------------------#
# Calculate the width, height and center of the real frame
#--------------------------------------------------#
gt_widths = assigned_annotations[:, 2] - assigned_annotations[:, 0]
gt_heights = assigned_annotations[:, 3] - assigned_annotations[:, 1]
gt_ctr_x = assigned_annotations[:, 0] + 0.5 * gt_widths
gt_ctr_y = assigned_annotations[:, 1] + 0.5 * gt_heights
gt_widths = torch.clamp(gt_widths, min=1)
gt_heights = torch.clamp(gt_heights, min=1)
#---------------------------------------------------#
# Use the real box and a priori box to encode to get the expected results
#---------------------------------------------------#
targets_dx = (gt_ctr_x - anchor_ctr_x_pi) / anchor_widths_pi
targets_dy = (gt_ctr_y - anchor_ctr_y_pi) / anchor_heights_pi
targets_dw = torch.log(gt_widths / anchor_widths_pi)
targets_dh = torch.log(gt_heights / anchor_heights_pi)
targets = torch.stack((targets_dy, targets_dx, targets_dh, targets_dw))
targets = targets.t()
return targets
class FocalLoss(nn.Module):
def __init__(self):
super(FocalLoss, self).__init__()
def forward(self, classifications, regressions, anchors, annotations, alpha = 0.25, gamma = 2.0, cuda = True):
#---------------------------#
# Get the size of batch_size
#---------------------------#
batch_size = classifications.shape[0]
#--------------------------------------------#
# Obtain the a priori box and convert the a priori box into the form of the center width and height
#--------------------------------------------#
dtype = regressions.dtype
anchor = anchors[0, :, :].to(dtype)
#--------------------------------------------#
# Convert a priori box into a center, width and height form
#--------------------------------------------#
anchor_widths = anchor[:, 3] - anchor[:, 1]
anchor_heights = anchor[:, 2] - anchor[:, 0]
anchor_ctr_x = anchor[:, 1] + 0.5 * anchor_widths
anchor_ctr_y = anchor[:, 0] + 0.5 * anchor_heights
regression_losses = []
classification_losses = []
for j in range(batch_size):
#-------------------------------------------------------#
# Take out the real frame, type prediction result and regression prediction result corresponding to each picture
#-------------------------------------------------------#
bbox_annotation = annotations[j]
classification = classifications[j, :, :]
regression = regressions[j, :, :]
classification = torch.clamp(classification, 1e-4, 1.0 - 1e-4)
if len(bbox_annotation) == 0:
#-------------------------------------------------------#
# When there is no real frame in the picture, all feature points are negative samples
#-------------------------------------------------------#
alpha_factor = torch.ones_like(classification) * alpha
if cuda:
alpha_factor = alpha_factor.cuda()
alpha_factor = 1. - alpha_factor
focal_weight = classification
focal_weight = alpha_factor * torch.pow(focal_weight, gamma)
#-------------------------------------------------------#
# Calculate the cross entropy corresponding to the feature point
#-------------------------------------------------------#
bce = - (torch.log(1.0 - classification))
cls_loss = focal_weight * bce
classification_losses.append(cls_loss.sum())
if cuda:
regression_losses.append(torch.tensor(0).to(dtype).cuda())
else:
regression_losses.append(torch.tensor(0).to(dtype))
continue
#------------------------------------------------------#
#
# targets num_anchors, num_classes
# num_positive_anchors number
# positive_indices num_anchors,
# assigned_annotations num_anchors, 5
#------------------------------------------------------#
targets, num_positive_anchors, positive_indices, assigned_annotations = get_target(anchor,
bbox_annotation, classification, cuda)
#------------------------------------------------------#
# cacul loss
#------------------------------------------------------#
alpha_factor = torch.ones_like(targets) * alpha
if cuda:
alpha_factor = alpha_factor.cuda()
alpha_factor = torch.where(torch.eq(targets, 1.), alpha_factor, 1. - alpha_factor)
focal_weight = torch.where(torch.eq(targets, 1.), 1. - classification, classification)
focal_weight = alpha_factor * torch.pow(focal_weight, gamma)
bce = - (targets * torch.log(classification) + (1.0 - targets) * torch.log(1.0 - classification))
cls_loss = focal_weight * bce
#------------------------------------------------------#
# set loss=0
#------------------------------------------------------#
zeros = torch.zeros_like(cls_loss)
if cuda:
zeros = zeros.cuda()
cls_loss = torch.where(torch.ne(targets, -1.0), cls_loss, zeros)
classification_losses.append(cls_loss.sum() / torch.clamp(num_positive_anchors.to(dtype), min=1.0))
if positive_indices.sum() > 0:
targets = encode_bbox(assigned_annotations, positive_indices, anchor_widths, anchor_heights, anchor_ctr_x, anchor_ctr_y)
regression_diff = torch.abs(targets - regression[positive_indices, :])
regression_loss = torch.where(
torch.le(regression_diff, 1.0 / 9.0),
0.5 * 9.0 * torch.pow(regression_diff, 2),
regression_diff - 0.5 / 9.0
)
regression_losses.append(regression_loss.mean())
else:
if cuda:
regression_losses.append(torch.tensor(0).to(dtype).cuda())
else:
regression_losses.append(torch.tensor(0).to(dtype))
c_loss = torch.stack(classification_losses).mean()
r_loss = torch.stack(regression_losses).mean()
loss = c_loss + r_loss
return loss, c_loss, r_loss
| sugarocket/object-detection-retinanet | nets/retinanet_training.py | retinanet_training.py | py | 10,231 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "torch.min",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "torch.unsqueeze",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "torch.max",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "torch.min",
"line_number": 7,
... |
15885760206 | from typing import Dict, Optional
import torch
import torch.nn as nn
from vc_tts_template.fastspeech2.fastspeech2 import FastSpeech2
from vc_tts_template.fastspeech2wContexts.context_encoder import ConversationalContextEncoder
from vc_tts_template.fastspeech2wContexts.prosody_model import PEProsodyEncoder
from vc_tts_template.fastspeech2.varianceadaptor import LengthRegulator
class FastSpeech2wContextswPEProsody(FastSpeech2):
""" FastSpeech2wContexts """
def __init__(
self,
max_seq_len: int,
num_vocab: int, # pad=0
# encoder
encoder_hidden_dim: int,
encoder_num_layer: int,
encoder_num_head: int,
conv_filter_size: int,
conv_kernel_size_1: int,
conv_kernel_size_2: int,
encoder_dropout: float,
# context encoder
context_encoder_hidden_dim: int,
context_num_layer: int,
context_encoder_dropout: float,
text_emb_dim: int,
peprosody_encoder_gru_dim: int,
peprosody_encoder_gru_num_layer: int,
shere_embedding: bool,
current_attention: bool,
past_global_gru: bool,
mel_embedding_mode: int,
pau_split_mode: int,
# mel_emb_dim: int,
# mel_emb_kernel: int,
# mel_emb_dropout: float,
peprosody_encoder_conv_kernel_size: int,
peprosody_encoder_conv_n_layers: int,
sslprosody_emb_dim: Optional[int],
sslprosody_layer_num: Optional[int],
use_context_encoder: bool,
use_prosody_encoder: bool,
use_peprosody_encoder: bool,
use_melprosody_encoder: bool,
last_concat: bool,
# variance predictor
variance_predictor_filter_size: int,
variance_predictor_kernel_size: int,
variance_predictor_dropout: int,
pitch_feature_level: int, # 0 is frame 1 is phoneme
energy_feature_level: int, # 0 is frame 1 is phoneme
pitch_quantization: str,
energy_quantization: str,
pitch_embed_kernel_size: int,
pitch_embed_dropout: float,
energy_embed_kernel_size: int,
energy_embed_dropout: float,
n_bins: int,
# decoder
decoder_hidden_dim: int,
decoder_num_layer: int,
decoder_num_head: int,
decoder_dropout: float,
n_mel_channel: int,
# other
encoder_fix: bool,
stats: Optional[Dict],
speakers: Dict,
emotions: Optional[Dict] = None,
accent_info: int = 0,
):
super().__init__(
max_seq_len,
num_vocab,
encoder_hidden_dim,
encoder_num_layer,
encoder_num_head,
conv_filter_size,
conv_kernel_size_1,
conv_kernel_size_2,
encoder_dropout,
variance_predictor_filter_size,
variance_predictor_kernel_size,
variance_predictor_dropout,
pitch_feature_level,
energy_feature_level,
pitch_quantization,
energy_quantization,
pitch_embed_kernel_size,
pitch_embed_dropout,
energy_embed_kernel_size,
energy_embed_dropout,
n_bins,
decoder_hidden_dim,
decoder_num_layer,
decoder_num_head,
decoder_dropout,
n_mel_channel,
encoder_fix,
stats,
speakers,
emotions,
accent_info,
)
# override to add padding_idx
n_speaker = len(speakers)
self.speaker_emb = nn.Embedding(
n_speaker,
encoder_hidden_dim,
padding_idx=0,
)
self.emotion_emb = None
if emotions is not None:
n_emotion = len(emotions)
self.emotion_emb = nn.Embedding(
n_emotion,
encoder_hidden_dim,
padding_idx=0,
)
if use_prosody_encoder is True:
# 外部で用意したglobal prosody embeddingを使う方式
raise RuntimeError("未対応です")
self.context_encoder = ConversationalContextEncoder(
d_encoder_hidden=encoder_hidden_dim,
d_context_hidden=context_encoder_hidden_dim,
context_layer_num=context_num_layer,
context_dropout=context_encoder_dropout,
text_emb_size=text_emb_dim,
prosody_emb_size=peprosody_encoder_gru_dim if sslprosody_emb_dim is None else sslprosody_emb_dim,
speaker_embedding=self.speaker_emb,
emotion_embedding=self.emotion_emb,
use_text_modal=use_context_encoder,
use_speech_modal=(use_peprosody_encoder or use_melprosody_encoder),
current_attention=current_attention,
past_global_gru=past_global_gru,
pau_split_mode=pau_split_mode > 0,
last_concat=last_concat,
)
if sslprosody_emb_dim is None:
if (stats is not None) and (use_prosody_encoder is True):
self.peprosody_encoder = PEProsodyEncoder(
peprosody_encoder_gru_dim,
peprosody_encoder_gru_num_layer,
pitch_embedding=self.variance_adaptor.pitch_embedding,
energy_embedding=self.variance_adaptor.energy_embedding,
pitch_bins=self.variance_adaptor.pitch_bins,
energy_bins=self.variance_adaptor.energy_bins,
shere_embedding=shere_embedding
)
else:
if use_peprosody_encoder is True:
self.peprosody_encoder = PEProsodyEncoder(
peprosody_encoder_gru_dim,
peprosody_encoder_gru_num_layer,
pitch_embedding=self.variance_adaptor.pitch_embedding,
energy_embedding=self.variance_adaptor.energy_embedding,
shere_embedding=shere_embedding
)
elif use_melprosody_encoder is True:
self.peprosody_encoder = PEProsodyEncoder(
peprosody_encoder_gru_dim,
peprosody_encoder_gru_num_layer,
pitch_embedding=None,
energy_embedding=None,
shere_embedding=shere_embedding,
n_mel_channel=n_mel_channel,
conv_kernel_size=peprosody_encoder_conv_kernel_size,
conv_n_layers=peprosody_encoder_conv_n_layers,
)
else:
self.peprosody_encoder = None # type:ignore
self.use_ssl = False
else:
if (use_prosody_encoder is True) or (use_peprosody_encoder is True) or (use_melprosody_encoder is True):
if sslprosody_layer_num > 1: # type:ignore
self.peprosody_encoder = nn.Conv1d( # type: ignore
in_channels=sslprosody_layer_num, # type: ignore
out_channels=1,
kernel_size=1,
bias=False,
)
else:
self.peprosody_encoder = None # type:ignore
else:
self.peprosody_encoder = None # type:ignore
self.use_ssl = True
self.use_context_encoder = use_context_encoder
self.use_peprosody_encoder = use_peprosody_encoder
self.use_melprosody_encoder = use_melprosody_encoder
self.length_regulator = LengthRegulator()
self.pau_split_mode = pau_split_mode > 0
self.sslprosody_layer_num = sslprosody_layer_num
def contexts_forward(
self,
output,
max_src_len,
c_txt_embs,
c_txt_embs_lens,
speakers,
emotions,
h_txt_embs,
h_txt_emb_lens,
h_speakers,
h_emotions,
h_prosody_embs,
h_prosody_embs_lens,
h_prosody_embs_len,
c_prosody_embs_phonemes,
):
if (self.use_peprosody_encoder or self.use_melprosody_encoder) is True:
if self.use_ssl is False:
h_prosody_emb = self.peprosody_encoder(
h_prosody_embs,
h_prosody_embs_lens,
)
else:
# h_prosody_embs: (B, hist_len, layer_num, dim)
if self.peprosody_encoder is not None:
batch_size = h_prosody_embs.size(0)
history_len = h_prosody_embs.size(1)
if h_prosody_embs.size(-2) == 1:
# batch全てPADのデータはこれになる
# これが最初に来ると,peprosody_encoderを通らないのでgrad = Noneになる
# そのためのexpand
h_prosody_embs = h_prosody_embs.expand(
batch_size, history_len,
self.sslprosody_layer_num, h_prosody_embs.size(-1)
)
h_prosody_embs = h_prosody_embs.view(-1, h_prosody_embs.size(-2), h_prosody_embs.size(-1))
h_prosody_emb = self.peprosody_encoder(
h_prosody_embs
).view(batch_size, history_len, -1)
else:
h_prosody_emb = h_prosody_embs.squeeze(-2)
else:
h_prosody_emb = None
context_enc_outputs = self.context_encoder(
c_txt_embs,
c_txt_embs_lens,
speakers,
emotions,
h_txt_embs,
h_txt_emb_lens, # [hist1, hist2, ...]
h_speakers,
h_emotions,
h_prosody_emb,
h_prosody_embs_len, # [hist1, hist2, ...]. h_txt_emb_lensとは違って1 start.
)
if type(context_enc_outputs) == tuple:
context_enc = context_enc_outputs[0]
attentions = context_enc_outputs[1:]
else:
context_enc = context_enc_outputs
attentions = None
if c_prosody_embs_phonemes is None:
output = output + context_enc.unsqueeze(1).expand(
-1, max_src_len, -1
)
else:
context_enc, _ = self.length_regulator(
context_enc, c_prosody_embs_phonemes, torch.max(c_prosody_embs_phonemes)
)
output = output + context_enc
return output, attentions
def forward(
self,
ids,
speakers,
emotions,
texts,
src_lens,
max_src_len,
c_txt_embs,
c_txt_embs_lens,
h_txt_embs,
h_txt_emb_lens,
h_speakers,
h_emotions,
c_prosody_embs,
c_prosody_embs_lens,
c_prosody_embs_duration,
c_prosody_embs_phonemes,
h_prosody_embs,
h_prosody_embs_lens,
h_prosody_embs_len,
h_local_prosody_emb=None,
h_local_prosody_emb_lens=None,
h_local_prosody_speakers=None,
h_local_prosody_emotions=None,
mels=None,
mel_lens=None,
max_mel_len=None,
p_targets=None,
e_targets=None,
d_targets=None,
p_control=1.0,
e_control=1.0,
d_control=1.0,
):
src_lens, max_src_len, src_masks, mel_lens, max_mel_len, mel_masks = self.init_forward(
src_lens, max_src_len, mel_lens, max_mel_len
)
output = self.encoder_forward(
texts, src_masks, max_src_len, speakers, emotions
)
output, attentions = self.contexts_forward(
output, max_src_len, c_txt_embs, c_txt_embs_lens,
speakers, emotions,
h_txt_embs, h_txt_emb_lens, h_speakers, h_emotions,
h_prosody_embs, h_prosody_embs_lens, h_prosody_embs_len,
c_prosody_embs_phonemes,
)
(
output,
p_predictions,
e_predictions,
log_d_predictions,
d_rounded,
mel_lens,
mel_masks,
) = self.variance_adaptor(
output,
src_masks,
mel_masks,
max_mel_len,
p_targets,
e_targets,
d_targets,
p_control,
e_control,
d_control,
)
output, postnet_output, mel_masks = self.decoder_forward(
output, mel_masks
)
return (
output,
postnet_output,
p_predictions,
e_predictions,
log_d_predictions,
d_rounded,
src_masks,
mel_masks,
src_lens,
mel_lens,
attentions,
)
| YutoNishimura-v2/vc_tts_template | vc_tts_template/fastspeech2wContexts/fastspeech2wContextswPEProsody.py | fastspeech2wContextswPEProsody.py | py | 12,920 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "vc_tts_template.fastspeech2.fastspeech2.FastSpeech2",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 45,
"usage_type": "name"
},
{
... |
73599315234 | import os
import shutil
import unittest
import uuid
import pyodbc
from ..pyodbc_helpers import *
class Test_pyodbc(unittest.TestCase):
@classmethod
def setUpClass(cls):
shutil.rmtree(cls.fix_tmproot())
@staticmethod
def fix_tmproot():
return os.path.realpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'tmp'))
def setUp(self):
self._dbc = None
self._tmpdir = None
def fix_tmpdir(self):
if self._tmpdir is not None:
return self._tmpdir
self._tmpdir = os.path.join(self.fix_tmproot(), uuid.uuid4().hex)
os.makedirs(self._tmpdir, exist_ok=True)
return self._tmpdir
def fix_dbc(self):
if self._dbc is not None:
return self._dbc
db_path = os.path.join(self.fix_tmpdir(), 'db.sqlite')
dbc = pyodbc.connect(f"Driver=SQLite3 ODBC Driver;Database={db_path}")
with dbc.cursor() as c:
c.execute('CREATE TABLE users (id INT, name VARCHAR(128))')
c.executemany('INSERT INTO users (id, name) VALUES (?,?)', [(1, 'John'), (2, 'Jane')])
c.commit()
return dbc
def test_connect(self):
dbc = self.fix_dbc()
def test_fetchall(self):
dbc = self.fix_dbc()
with dbc.cursor() as c:
c.execute('SELECT id, name FROM users ORDER BY id')
rows = c.fetchall()
self.assertEqual([(1, 'John'),(2, 'Jane')], [tuple(r) for r in rows])
def test_description(self):
dbc = self.fix_dbc()
with dbc.cursor() as c:
c.execute('SELECT id, name FROM users')
actual = [d[0] for d in c.description]
self.assertEqual(['id', 'name'], actual)
| ivangeorgiev/gems | legacy/src/pyodbc_helpers/tests/test_pyodbc.py | test_pyodbc.py | py | 1,803 | python | en | code | 14 | github-code | 1 | [
{
"api_name": "unittest.TestCase",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "shutil.rmtree",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path.realpath",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path",
"l... |
9639292454 | import base64
import json
import rsa
import sympy
from fastapi import APIRouter, Depends, HTTPException, WebSocket, Header
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.orm import joinedload
from starlette import status
from starlette.responses import Response
from starlette.websockets import WebSocketDisconnect
from src.auth.models import User
from src.chat.models import Chat, ChatUser, ChatPrime
from src.chat.schemas import RequestSchema, GetUserSchema, ReceiveChatSchema
from src.chat.utils import ConnectionManager, get_user_by_token_ws
from src.database import get_async_session
from src.utils import get_user_by_token, prepare_encrypted, RSA, get_current_user
router = APIRouter(tags=["Chat"], prefix="/chat")
@router.post("/get-users", responses={422: {"model": ""}})
async def get_users(encrypted: tuple[RequestSchema, User] = Depends(get_user_by_token),
server_private_key: rsa.PrivateKey = Depends(RSA.get_private_key),
session: AsyncSession = Depends(get_async_session)):
decrypted, user = encrypted
if not user or not user.public_key:
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED)
user_public_key = rsa.PublicKey.load_pkcs1(base64.b64decode(user.public_key), "DER")
if not user.has_changed_password:
data = {
"status": "error",
"data": None,
"details": "password expired"
}
encrypted = prepare_encrypted(data, server_private_key, user_public_key)
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail=encrypted)
try:
query = select(User).filter_by(is_active=True)
result = await session.execute(query)
result = result.scalars().all()
user_public_key = rsa.PublicKey.load_pkcs1(base64.b64decode(user.public_key), "DER")
data = [GetUserSchema(id=item.id, name=item.name, username=item.username).dict() for item in result]
data = {
"status": "success",
"data": data,
"details": None
}
encrypted = prepare_encrypted(data, server_private_key, user_public_key)
response = Response(status_code=status.HTTP_200_OK, content=encrypted,
media_type="application/octet-stream")
return response
except Exception:
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN)
@router.post("/new", responses={422: {"model": ""}})
async def create_chat(encrypted: tuple[RequestSchema, User] = Depends(get_user_by_token),
session: AsyncSession = Depends(get_async_session)):
decrypted, user = encrypted
if not user or not user.public_key:
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED)
user_public_key = rsa.PublicKey.load_pkcs1(base64.b64decode(user.public_key), "DER")
if not user.has_changed_password:
data = {
"status": "error",
"data": None,
"details": "password expired"
}
encrypted = prepare_encrypted(data, RSA.get_private_key(), user_public_key)
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail=encrypted)
try:
users = decrypted.data.payload.users
chat_type = 1 if len(users) > 1 else 0
users.append(user.id)
name = decrypted.data.payload.name or "New Chat"
g = sympy.randprime(int(0x1000000000000000000000000000000000000000000000000000000000000000),
int(0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff))
p = 2 * g + 1
while not sympy.isprime(p):
g = sympy.randprime(int(0x1000000000000000000000000000000000000000000000000000000000000000),
int(0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff))
p = 2 * g + 1
chat = Chat(type_id=chat_type, name=name)
try:
session.add(chat)
await session.flush()
session.add_all([ChatUser(chat_id=chat.id, user_id=chat_user) for chat_user in users])
session.add(ChatPrime(p=str(p), g=str(g), chat_id=chat.id))
except Exception:
await session.rollback()
raise
else:
await session.commit()
await session.refresh(chat)
active_users = connection.find_all_chat_users(users)
data = {
"status": "success",
"data": [ReceiveChatSchema(id=chat.id,
type=chat.type_id,
name=chat.name,
users=[GetUserSchema(id=u.id,
username=u.username,
name=u.name).dict() for u in chat.users]
).dict()],
"details": None
}
# message = prepare_encrypted(data, RSA.get_private_key(),
# rsa.PublicKey.load_pkcs1(base64.b64decode(user.public_key), "DER"))
for au in active_users:
await connection.send_message_to(au, json.dumps({"data": data, "signature": "signature"}).encode())
data = {
"status": "success",
"data": {"chat_id": chat.id, "p": str(p), "g": str(g)},
"details": None
}
encrypted = prepare_encrypted(data, RSA.get_private_key(), user_public_key)
response = Response(status_code=status.HTTP_201_CREATED,
content=encrypted, media_type="application/octet-stream")
return response
except Exception as ex:
await session.rollback()
data = {
"status": "error",
"data": None,
"details": "invalid data"
}
encrypted = prepare_encrypted(data, RSA.get_private_key(), user_public_key)
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=encrypted)
@router.patch("/{chat_id}", responses={422: {"model": ""}})
async def update_chat(chat_id: int,
encrypted: tuple[RequestSchema, User] = Depends(get_user_by_token),
session: AsyncSession = Depends(get_async_session)):
decrypted, user = encrypted
if not user or not user.public_key:
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED)
user_public_key = rsa.PublicKey.load_pkcs1(base64.b64decode(user.public_key), "DER")
if not user.has_changed_password:
data = {
"status": "error",
"data": None,
"details": "password expired"
}
encrypted = prepare_encrypted(data, RSA.get_private_key(), user_public_key)
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail=encrypted)
try:
query = select(Chat).filter_by(id=chat_id)
result = await session.execute(query)
chat: Chat = result.scalars().unique().first()
users = decrypted.data.payload.users
users.append(user.id)
query = select(User).filter(User.id.in_(users))
result = await session.execute(query)
new_users = result.scalars().unique().all()
name = decrypted.data.payload.name or chat.name
chat.name = name
chat.users = new_users
# todo: можно менять тип чата в зависимости от кол-ва юзеров
# chat.type_id = 1 if len(new_users) > 1 else 0
try:
session.add(chat)
except Exception:
await session.rollback()
raise
else:
await session.commit()
data = {
"status": "success",
"data": {"users": users, "name": name},
"details": None
}
encrypted = prepare_encrypted(data, RSA.get_private_key(), user_public_key)
response = Response(status_code=status.HTTP_200_OK,
content=encrypted, media_type="application/octet-stream")
return response
except Exception as ex:
data = {
"status": "error",
"data": None,
"details": "invalid data"
}
encrypted = prepare_encrypted(data, RSA.get_private_key(), user_public_key)
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=encrypted)
connection = ConnectionManager()
@router.websocket("/ws")
async def websocket_rooms(websocket: WebSocket,
session: AsyncSession = Depends(get_async_session),
user: User = Depends(get_user_by_token_ws)):
try:
await connection.connect(websocket, user)
ids, message = await connection.receive_chats(websocket, user, session)
await connection.send_message_to(websocket, message)
await connection.receive_messages(websocket, user, session, ids)
while True:
await websocket.receive_bytes()
except WebSocketDisconnect:
connection.disconnect(websocket)
except Exception as err:
# todo: переписать исключение
print(err)
connection.disconnect(websocket)
await websocket.close(code=status.WS_1006_ABNORMAL_CLOSURE)
@router.websocket("/ws/{chat_id}")
async def websocket_rooms(chat_id: int,
websocket: WebSocket,
session: AsyncSession = Depends(get_async_session),
user: User = Depends(get_user_by_token_ws)):
try:
await connection.connect_to_chat(websocket, session, user, chat_id)
ids, _ = await connection.receive_chats(websocket, user, session)
if chat_id not in ids:
raise WebSocketDisconnect
await connection.receive_messages_from_chat(websocket, session, chat_id)
while True:
message = await websocket.receive_bytes()
await connection.send_message(websocket, session, user.id, chat_id, message)
# todo: полученные байты
# а) если группа - отправить на клиенты + сохранить в бд (всё хранится в виде байтов)
# б) если личные - отправить на клиенты (убедиться, что сообщение доставлено
except WebSocketDisconnect:
connection.disconnect(websocket)
except Exception as err:
# todo: переписать исключение
connection.disconnect(websocket)
await websocket.close(code=status.WS_1006_ABNORMAL_CLOSURE)
@router.post("/send-keys", responses={422: {"model": ""}})
async def send_keys(encrypted: tuple[RequestSchema, User] = Depends(get_user_by_token),
session: AsyncSession = Depends(get_async_session)):
decrypted, user = encrypted
decrypted = RequestSchema.parse_obj(decrypted)
if not user or not user.public_key:
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED)
user_public_key = rsa.PublicKey.load_pkcs1(base64.b64decode(user.public_key), "DER")
if not user.has_changed_password:
data = {
"status": "error",
"data": None,
"details": "password expired"
}
encrypted = prepare_encrypted(data, RSA.get_private_key(), user_public_key)
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail=encrypted)
try:
query = select(ChatUser).filter_by(chat_id=decrypted.data.payload.chat_id).filter_by(user_id=user.id)
result = await session.execute(query)
result = result.scalars().first()
try:
result.public_key = decrypted.data.payload.public_key
session.add(result)
except Exception:
await session.rollback()
raise
else:
await session.commit()
await session.refresh(result)
active_users = connection.find_chat_active_users(decrypted.data.payload.chat_id)
query = select(ChatPrime).filter_by(chat_id=decrypted.data.payload.chat_id)
result = await session.execute(query)
chat_primes = result.scalars().unique().first()
query = select(ChatUser).filter_by(chat_id=decrypted.data.payload.chat_id)
result = await session.execute(query)
chat_public = result.scalars().unique().all()
user_primes = []
for item in chat_public:
user_primes.append({
"user_id": item.user_id,
"key": str(item.public_key)
})
data = {
"status": "success",
"data": {"chat_id": decrypted.data.payload.chat_id, "p": str(chat_primes.p), "g": str(chat_primes.g),
"public_keys": user_primes},
"details": None
}
# message = prepare_encrypted(data, RSA.get_private_key(),
# rsa.PublicKey.load_pkcs1(base64.b64decode(user.public_key), "DER"))
for au in active_users:
try:
await connection.send_message_to(au.get("ws"), json.dumps(
data).encode()) # json.dumps({"data": data, "signature": "signature"}).encode())
except Exception as ex:
print(ex)
pass
data = {
"status": "success",
"data": None,
"details": "sent successfully"
}
encrypted = prepare_encrypted(data, RSA.get_private_key(), user_public_key)
response = Response(status_code=status.HTTP_200_OK,
content=encrypted, media_type="application/octet-stream")
return response
except Exception as ex:
await session.rollback()
data = {
"status": "error",
"data": None,
"details": "invalid data"
}
encrypted = prepare_encrypted(data, RSA.get_private_key(), user_public_key)
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=encrypted)
| coplant/di-secured-chat | src/chat/router.py | router.py | py | 14,181 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "fastapi.APIRouter",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "src.chat.schemas.RequestSchema",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "src.auth.models.User",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": ... |
5269804326 | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^$question_one/$', views.question_one, name='question_one'),
url(r'^question_two/$',views.question_two, name='question_two'),
url(r'^question_three/$', views.question_three, name = 'question_three'),
] | blondiebytes/Learn-It-Girl-Project | TravelMetrics/mysite/travelmetrics/urls.py | urls.py | py | 321 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "django.conf.urls.url",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.co... |
31265768032 | #!/usr/bin/env python
import os, sys
import argparse
import toml
import asteval
from collections import namedtuple
import math
import numpy as np
import lmfit
from scipy.linalg import norm
import h5py
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plot
from pbpl import common
from pbpl import compton
from pbpl.common.units import *
from num2tex import num2tex
from functools import reduce
def get_parser():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description='Calculate energy scale from CST trajectory map',
epilog='''\
Example:
.. code-block:: sh
pbpl-compton-calc-energy-scale calc-energy-scale.toml
''')
parser.add_argument(
'config_filename', metavar='conf-file',
help='Configuration file')
return parser
def get_args():
parser = get_parser()
args = parser.parse_args()
args.conf = toml.load(args.config_filename)
return args
def get_energy(gin):
m0 = gin['m0'][()]*kg
p0 = gin['p'][0]*m0*c_light
E0 = np.sqrt(norm(p0)**2*c_light**2 + m0**2*c_light**4)
KE = E0 - m0*c_light**2
return KE
def fit_func(x, c0, c1, c2, c3):
return c0 + c1*x + c2*x**2 + c3*x**3
Axis = namedtuple('Axis', 'label unit xlim')
def get_axis(aeval, label, unit, xlim):
xlim = aeval(xlim)
if xlim is not None:
xlim = np.array(xlim)
return Axis(label, aeval(unit), xlim)
def plot_annotation(ax, aeval, conf):
if 'Annotation' in conf:
for aconf in conf['Annotation']:
text = ''
for s in aconf['Text']:
text += aeval(s) + '\n'
kwargs = {}
if 'Size' in aconf:
kwargs['size'] = aconf['Size']
ax.text(
*aconf['Location'], text, va='top',
transform=ax.transAxes, **kwargs)
def main():
args = get_args()
conf = args.conf
# create safe interpreter for evaluation of configuration expressions
aeval = asteval.Interpreter(use_numpy=True)
for q in common.units.__all__:
aeval.symtable[q] = common.units.__dict__[q]
pconf = conf['Projection']
M = compton.build_transformation(pconf['Transformation'], mm, deg)
prefilter = np.array(pconf['Prefilter'])*mm
postfilter = np.array(pconf['Postfilter'])*mm
energy = []
position = []
x = []
E0 = []
with h5py.File(conf['Files']['Input'], 'r') as fin:
for gin in fin.values():
x.append(
(gin['x'][0]*meter, compton.transform(M, gin['x'][-1]*meter)))
E0.append(get_energy(gin))
x = np.array(x)
E0 = np.array(E0)
prefilter_mask = compton.in_volume(prefilter, x[:,0,:])
x_pre = x[prefilter_mask,:,:]
E0_pre = E0[prefilter_mask]
postfilter_mask = compton.in_volume(postfilter, x_pre[:,1,:])
x_post = x_pre[postfilter_mask,:,:]
E0_post = E0_pre[postfilter_mask]
energy = E0_post.copy()
position = x_post[:,1,2].copy()
args = np.argsort(energy)
energy = energy[args]
position = position[args]
mod = lmfit.Model(fit_func)
params = mod.make_params(c0=0.0, c1=0.0, c2=0.0, c3=0.0)
result = mod.fit(
data=np.log(energy/MeV), x=position, params=params)
v = result.params.valuesdict()
x_fit = np.linspace(position[0], position[-1], 200)
common.setup_plot()
fig = plot.figure(figsize=np.array(conf['Plot']['FigSize'])/72)
ax = fig.add_subplot(1, 1, 1)
axes = [get_axis(aeval, *conf['Plot'][x]) for x in ['XAxis', 'YAxis']]
ax.semilogy(
x_fit/axes[0].unit, np.exp(result.eval(x=x_fit)), linewidth=0.6)
ax.semilogy(
position/axes[0].unit, energy/axes[1].unit,
marker='.', ls='', markersize=2.0, markeredgewidth=0,
color='k')
aeval.symtable['fitval'] = v
aeval.symtable['num2tex'] = num2tex
plot_annotation(ax, aeval, conf['Plot'])
ax.set_xlabel(axes[0].label, labelpad=-1.0)
ax.set_ylabel(axes[1].label, labelpad=2.0)
ax.set_xlim(*axes[0].xlim)
ax.set_ylim(*axes[1].xlim)
ax.xaxis.set_minor_locator(matplotlib.ticker.AutoMinorLocator())
filename = conf['Files']['PlotOutput']
path = os.path.dirname(filename)
if path != '':
os.makedirs(path, exist_ok=True)
plot.savefig(filename, transparent=True)
if 'CalcOutput' in conf['Files']:
filename = conf['Files']['CalcOutput']
path = os.path.dirname(filename)
if path != '':
os.makedirs(path, exist_ok=True)
calc_output = {
'EnergyScaleCoefficients' :
{ 'c0' : float(v['c0']),
'c1' : float(v['c1']*mm),
'c2' : float(v['c2']*mm**2),
'c3' : float(v['c3']*mm**3) } }
with open(filename, 'w') as fout:
toml.dump(calc_output, fout)
if __name__ == '__main__':
sys.exit(main())
| ucla-pbpl/pbpl-compton | pbpl/compton/calc_energy_scale.py | calc_energy_scale.py | py | 4,897 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "matplotlib.use",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "argparse.RawDescriptionHelpFormatter",
"line_number": 23,
"usage_type": "attribute"
},
{
"... |
9658243428 | # -*- coding: utf-8 -*-
import os
import telebot
import time
import random
import threading
from emoji import emojize
from telebot import types
from pymongo import MongoClient
import traceback
token = os.environ['TELEGRAM_TOKEN']
bot = telebot.TeleBot(token)
client=MongoClient(os.environ['database'])
db=client.futuremessages
users=db.users
symbols=['1', '2', '3', '4', '5', '6', '7', '8', '9', '0', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o',
'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
try:
pass
except Exception as e:
print('Ошибка:\n', traceback.format_exc())
bot.send_message(441399484, traceback.format_exc())
@bot.message_handler()
def add(m):
user=users.find_one({'id':m.from_user.id})
if user==None:
users.insert_one(createuser(m.from_user))
user=users.find_one({'id':m.from_user.id})
if m.text[:5]=='/list':
text=''
for ids in user['futuremsgs']:
msg=user['futuremsgs'][ids]
text+='`'+msg['code']+'`\n'
if text=='':
text='Список пуст!\n'
bot.send_message(m.chat.id, 'Список отложенных сообщений:\n\n'+text+'\nЧтобы просмотреть сообщение: `/show code`\nЧтобы удалить сообщение: `/del code`', parse_mode='markdown')
elif m.text=='/start':
bot.send_message(m.chat.id, 'Статус бота: работает. Откройте список команд для использования бота.')
elif m.text[:5]=='/show':
try:
code=m.text.split(' ')[1]
msg=user['futuremsgs'][code]
bot.send_message(m.chat.id, msg['msg'])
except:
bot.send_message(m.chat.id, 'Сообщение не найдено!')
elif m.text[:4]=='/del':
try:
code=m.text.split(' ')[1]
msg=user['futuremsgs'][code]
users.update_one({'id':user['id']},{'$unset':{'futuremsgs.'+code:1}})
bot.send_message(m.chat.id, 'Сообщение "'+msg['msg']+'" успешно удалено!')
except:
bot.send_message(m.chat.id, 'Сообщение не найдено!')
elif m.text[:4]=='/add':
users.update_one({'id':user['id']},{'$set':{'status':'adding'}})
bot.send_message(m.chat.id, 'Напишите сообщение, которое я отправлю вам позже.')
elif user['status']=='adding':
msg=createmsg(user, m.text)
users.update_one({'id':user['id']},{'$set':{'futuremsgs.'+msg['code']:msg}})
users.update_one({'id':user['id']},{'$set':{'status':'addtime'}})
users.update_one({'id':user['id']},{'$set':{'code':msg['code']}})
bot.send_message(m.chat.id, 'Отлично! А теперь выберите, через сколько времени я пришлю это вам. Формат:\n1d2h3m33s'+
' - бот пришлёт вам сообщение через 1 день, 2 часа, 3 минуты и 33 секунды.')
elif user['status']=='addtime':
try:
days=int(m.text.split('d')[0])
m.text=m.text.split('d')[1]
except:
days=None
try:
hours=int(m.text.split('h')[0])
m.text=m.text.split('h')[1]
except:
hours=None
try:
minutes=int(m.text.split('m')[0])
m.text=m.text.split('m')[1]
except:
minutes=None
try:
secs=int(m.text.split('s')[0])
except:
secs=None
ftime=time.time()+3*3600
ctime=ftime
text=''
if days!=None:
ftime+=days*86400
text+=str(days)+' дней, '
if hours!=None:
ftime+=hours*3600
text+=str(hours)+' часов, '
if minutes!=None:
ftime+=minutes*60
text+=str(minutes)+' минут, '
if secs!=None:
ftime+=secs
text+=str(secs)+' секунд, '
if ftime!=ctime:
text=text[:len(text)-2]
text+='.'
users.update_one({'id':user['id']},{'$set':{'futuremsgs.'+user['code']+'.time':ftime}})
bot.send_message(m.chat.id, 'Вы успешно установили отправку сообщения! Вы получите его через '+text)
users.update_one({'id':user['id']},{'$set':{'status':'free', 'code':None}})
def createmsg(user, msg):
code=createcode(user)
return {
'code':code,
'msg':msg,
'time':None,
}
def createcode(user):
i=0
ltrs=3
code=''
while i<ltrs:
code+=random.choice(symbols)
i+=1
while code in user['futuremsgs']:
code=''
i=0
while i<ltrs:
code+=random.choice(symbols)
i+=1
return code
def createuser(user):
return {
'id':user.id,
'futuremsgs':{},
'name':user.first_name,
'status':'free',
'code':None
}
def timecheck():
globaltime=time.time()+3*3600
for ids in users.find({}):
user=ids
for idss in user['futuremsgs']:
try:
if user['futuremsgs'][idss]['time']<=globaltime:
bot.send_message(user['id'], user['futuremsgs'][idss]['msg'])
users.update_one({'id':user['id']},{'$unset':{'futuremsgs.'+idss:1}})
except:
pass
t=threading.Timer(3, timecheck)
t.start()
timecheck()
print('7777')
bot.polling(none_stop=True,timeout=600)
| egor5q/futuremessages | bot.py | bot.py | py | 5,881 | python | ru | code | 0 | github-code | 1 | [
{
"api_name": "os.environ",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "telebot.TeleBot",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pymongo.MongoClient",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.environ",
... |
6202228261 | from django.conf.urls import url
from django.urls import path,include
from blog import views
urlpatterns =[
url(r'^about/$',views.AboutView.as_view(),name = "about"),
url(r'^$',views.PostListView.as_view(), name ="post_list"),
url(r'^posts/(?P<pk>\d+)$', views.PostDetailView.as_view(),name = "post_detail"),
url(r'^posts/create/$',views.CreatePostView.as_view(), name = "create_post"),
url(r'^posts/(?P<pk>\d+)/update$',views.PostUpdateView.as_view(), name = "post_edit"),
url(r"^posts/(?P<pk>\d+)/remove$",views.PostDeleteView.as_view(), name="post_remove"),
url(r"^posts/drafts$",views.DraftListView.as_view(),name='post_draft_list'),
url(r"^post/comment/(?P<pk>\d+)$",views.add_comment_to_post,name = "add_comment_to_post"),
url(r"^post/comment/(?P<pk>\d+)/approve$",views.comment_approve,name = "comment_approve"),
url(r"^post/comment/(?P<pk>\d+)/delete$",views.remove_comment, name = "remove_comment"),
url(r"^posts/(?P<pk>\d+)/publish$",views.publish_post,name = "publish_post")
] | AttalaKheireddine/bloggo | bloggo/blog/urls.py | urls.py | py | 1,032 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.conf.urls.url",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "blog.views.AboutView.as_view",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "blog.views.AboutView",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name... |
1782776232 | import sympy
import random
def gcd(a, b): # greatest common divisor
if b == 0:
return a
else:
return gcd(b, a % b)
def euler_func(n): # Euler's totient function
count = 0
for number in range(n):
if gcd(number, n) == 1:
count += 1
return count
def check(root, module):
if root ** euler_func(module) % module == 1:
for number in range(1, euler_func(module)):
if root ** number % module == 1:
return False
return True
else:
return False
def primitive_root(module):
root = 1
while not check(root, module):
root += 1
return root
def generate_x(module):
x = random.randint(1, p)
while gcd(x, module - 1) != 1:
x = random.randint(1, p)
return x
if __name__ == "__main__":
p = sympy.prime(random.randint(1, 1000)) # nth prime number
print("Your prime number (p):", p)
print("Euler function result:", euler_func(p))
g = primitive_root(p)
print("Primitive root (a):", g)
x = generate_x(p)
print("Private key (x):", x)
y = g ** x % p
print("Public key (y):", str(y))
print("Write your number:")
m = int(input())
k = generate_x(p)
print("Session key (k):", k)
a = g ** k % p
b = y ** k * m % p
print("Your cipher (a, b):", a, b)
decrypt = b * (a ** (p - 1 - x)) % p
print("Decrypt:", decrypt)
| Timofey21/cryptography | Elgamal.py | Elgamal.py | py | 1,436 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "random.randint",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "sympy.prime",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_... |
15023496745 | #!/usr/bin/env python3
#./call.py -f data.txt -u http://192.168.1.145:8080 -e dpm
from argparse import ArgumentParser
from time import sleep
import requests
import sys
import json
parser = ArgumentParser()
parser.add_argument("-f", "--file", dest="file",
help="Line separated file of qrcode value", metavar="FILE")
parser.add_argument("-u", "--url", dest="url",
help="Base url of the rpi instance", metavar="URL")
parser.add_argument("-e", "--event", dest="event",
help="Event short name", metavar="EVENT")
args = parser.parse_args()
if args.file == None:
print('file is required')
sys.exit()
if args.url == None:
print('url is required')
sys.exit()
if args.event == None:
print('event is required')
sys.exit()
print(f'file is: {args.file}\nurl is: {args.url}\nevent is: {args.event}')
event = args.event
file = args.file
base_url = args.url
url_to_call_base = base_url + '/admin/api/check-in/event/' + event + '/ticket/'
def call_check_in(ticket_data):
ticket_id = ticket_data.split('/')[0]
url_to_call = url_to_call_base + ticket_id
res = requests.post(url_to_call, json = {"code": ticket_data})
print(res.text)
with open(file, "r") as ins:
for line in ins:
call_check_in(line.strip())
sleep(1) #sleep 1s
| syjer/alf.io-PI-test | call.py | call.py | py | 1,318 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number... |
75061513312 | import requests
import xml.etree.ElementTree as ET
from bs4 import BeautifulSoup
import json
def get_rail_data():
parsed_data = []
url = 'http://api.irishrail.ie/realtime/realtime.asmx/getStationDataBsoupCodeXML_WithNumMins?StationCode=ENFLD&NumMins=90&format=xml'
data = requests.get(url)
data = data.content
print(data)
# soup = BeautifulSoup(data)
# for i in soup.find_all('objstationdata'):
# data_item = {}
# print("********")
# for x in i.find_all():
# print(x.name)
# # if len(x.contents) == 1:
# data_item[x.name] = x.contents[0]
# parsed_data.append(data_item)
def get_rail_stations():
parsed_data = []
lookup_data = {}
url = 'http://api.irishrail.ie/realtime/realtime.asmx/getAllStationsXML'
data = requests.get(url)
data = data.content
soup = BeautifulSoup(data)
for i in soup.find_all('objstation'):
lookup_data[i.stationdesc.contents[0]] = i.stationcode.contents[0]
with open('lookup_rail.json', 'w') as f:
json.dump(lookup_data, f)
get_rail_stations() | benedictmc/CS402 | Question 5/get_rail.py | get_rail.py | py | 1,123 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "requests.get",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_numb... |
39412984487 | import requests
class YaUploader:
def __init__(self, token: str):
self.token = token
def upload_file(self, loadfile, savefile, replace=False):
"""Загрузка файла.
savefile: Путь к файлу на Диске
loadfile: Путь к загружаемому файлу"""
headers = {'Content-Type': 'application/json', 'Accept': 'application/json', 'Authorization': f'OAuth {self.token}'}
URL = "https://cloud-api.yandex.net/v1/disk/resources"
res = requests.get(f'{URL}/upload?path={savefile}&overwrite={replace}', headers=headers).json()
with open(loadfile, 'rb') as f:
try:
requests.put(res['href'], files={'file': f})
except KeyError:
print(res)
if __name__ == '__main__':
# Получить путь к загружаемому файлу и токен от пользователя
path_to_file = ""
file_name = "file"
yandex_disk_path = f"Test/{file_name}"
token = ""
uploader = YaUploader(token)
result = uploader.upload_file(savefile=yandex_disk_path, loadfile=path_to_file) | Thunderouse/HW_YandexDisk | main.py | main.py | py | 1,165 | python | ru | code | 0 | github-code | 1 | [
{
"api_name": "requests.get",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "requests.put",
"line_number": 17,
"usage_type": "call"
}
] |
18359330872 | #!/usr/bin/python3
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from time import sleep
browser = webdriver.Firefox(executable_path='/home/ashika/selenium/geckodriver')
browser.set_window_size(900,900)
browser.set_window_position(0,0)
sleep(1)
browser.get("https://en.wikipedia.org/wiki/Home_page")
# assert 'Wikipedia' in browser.title
sleep(1)
browser.find_element_by_id("searchInput").send_keys("Selenium")
sleep(2)
browser.find_element_by_id("searchInput").send_keys(Keys.RETURN)
sleep(5)
browser.close()
| Ashikav/demo-repo | demo.py | demo.py | py | 543 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "selenium.webdriver.Firefox",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "time.sleep",
... |
2549773164 | #!/usr/bin/env python3
import torch
import horovod.torch as hvd
torch.backends.cudnn.benchmark=True
# Initialize Horovod
hvd.init()
# Pin GPU to be used to process local rank (one GPU per process)
torch.cuda.set_device(hvd.local_rank())
import argparse
import sys
import torch
import logging
import time
import math
import os
import torch.nn as nn
from loader import val_cls_loader, uint8_normalize
from tensorboardX import SummaryWriter
_FORMAT = "[%(levelname)s: %(filename)s: %(lineno)4d]: %(message)s"
logging.root.handlers = []
logging.basicConfig(
level=logging.INFO, format=_FORMAT, stream=sys.stdout
)
logger = logging.getLogger(__name__)
logger.info('hvd info, size %s, rank %s, local_rank %s.', hvd.size(), hvd.rank(), hvd.local_rank())
from train_self_superv import parse_args, topks_correct
def load_last_checkpoint(dir_to_checkpoint, model, name=None):
if name is None:
names = os.listdir(dir_to_checkpoint) if os.path.exists(dir_to_checkpoint) else []
names = [f for f in names if "checkpoint" in f]
if len(names) == 0:
return None
name = sorted(names)[-1]
path_to_checkpoint = os.path.join(dir_to_checkpoint, name)
# Load the checkpoint on CPU to avoid GPU mem spike.
checkpoint = torch.load(path_to_checkpoint, map_location="cpu")
model.load_state_dict(checkpoint["model_state"])
ckp_step = int(name.split('.')[0].split('-')[-1])
logger.info('checkpoint loaded from %s (ckp_step %s).', path_to_checkpoint, ckp_step)
return ckp_step
def get_loader(batch_size):
dataset, loader = val_cls_loader(
'data/val.txt', 'http://filer.ai.yy.com:9889/dataset/heliangliang/imagenet/val/',
batch_size=batch_size, threads=32, hvd=hvd)
return loader
def main():
args = parse_args()
batch_size = 64
#TENSORBOARD_LOG_DIR = './checkpoints/log-fc-1/val'
#OUTPUT_DIR = './checkpoints/ckpt-fc-1'
TENSORBOARD_LOG_DIR = './ckpt-byol/imagenet-lr-0.1/log-fc/val'
OUTPUT_DIR = './ckpt-byol/imagenet-lr-0.1/ckpt-fc'
loader = get_loader(batch_size)
import torchvision.models as models
model = models.__dict__['resnet50']().cuda()
#from resnet_x2 import resnet50
#model = resnet50(num_classes=1000).cuda()
model.eval()
data_size = len(loader)
t0 = time.time()
logger.info('rank %s, data_size %s', hvd.rank(), data_size)
total_iter = 0
if hvd.rank() == 0:
ckp_step = load_last_checkpoint(OUTPUT_DIR, model)
if hvd.size() > 1:
hvd.broadcast_parameters(model.state_dict(), root_rank=0)
logger.info('rank %s, total_iter %s', hvd.rank(), total_iter)
top1_acc_all = []
for cur_iter, (images, target) in enumerate(loader):
images = uint8_normalize(images.cuda(non_blocking=True))
target = target.cuda(non_blocking=True)
with torch.no_grad():
output = model(images)
num_topks_correct = topks_correct(output, target, [1])
top1_acc = num_topks_correct[0] / output.size(0)
if hvd.size() > 1:
top1_acc = hvd.allreduce(top1_acc)
cur_epoch = total_iter / data_size
top1_acc_all.append(top1_acc)
if hvd.rank() == 0:
t = time.time()
logger.info('epoch %.6f, iter %s, top1_acc %.6f (%.6f), step time %.6f',
cur_epoch, total_iter, top1_acc, sum(top1_acc_all)/len(top1_acc_all), t-t0)
t0 = t
total_iter += 1
if hvd.rank() == 0:
writer = SummaryWriter(TENSORBOARD_LOG_DIR)
writer.add_scalar('1-top1_acc', sum(top1_acc_all)/len(top1_acc_all), ckp_step)
if __name__ == "__main__":
main()
| Yidi299/yy_moco | val_fc.py | val_fc.py | py | 3,706 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "torch.backends",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "horovod.torch.init",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "horovod.torch",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "torch.cuda.set_devi... |
7178934887 | """This module contains the likes API."""
from flask_jwt_extended import (
get_jwt_identity,
jwt_required,
)
from flask_restful import (
marshal_with,
reqparse,
Resource,
)
from sqlalchemy.exc import IntegrityError
from . import db_client
from .fields import quote_fields, quotes_fields
from .utils import get_quote_or_404
class Likes(Resource):
"""Resource for likes."""
@classmethod
@marshal_with(quotes_fields)
@jwt_required
def get(cls):
"""Returns the liked quotes of the current user."""
parser = reqparse.RequestParser()
parser.add_argument('page', type=int, location='args')
parser.add_argument('per_page', type=int, location='args')
args = parser.parse_args()
page = args['page']
per_page = args['per_page']
current_user = get_jwt_identity()
return db_client.get_user_liked_quotes(page, per_page, current_user['id'])
@classmethod
@marshal_with(quote_fields)
@jwt_required
def post(cls):
"""Creates a like for the current user."""
parser = reqparse.RequestParser()
parser.add_argument('id', type=int, required=True)
args = parser.parse_args()
current_user = get_jwt_identity()
quote = get_quote_or_404(args['id'], current_user['id'])
try:
db_client.create_like({
'user_id': current_user['id'],
'quote_id': quote.id
})
except IntegrityError:
return {'success': False}
quote.is_liked = True
return quote
class Like(Resource):
"""Resource for like."""
@classmethod
@marshal_with(quote_fields)
@jwt_required
def delete(cls, quote_id):
"""Deletes a like from the current user."""
current_user = get_jwt_identity()
quote = get_quote_or_404(quote_id, current_user['id'])
try:
like = db_client.get_like(current_user['id'], quote.id)
db_client.delete_like(like)
except AttributeError:
return {'success': False}
quote.is_liked = False
return quote
| bertdida/devquotes-flask | devquotes/routes/like.py | like.py | py | 2,152 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "flask_restful.Resource",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "flask_restful.reqparse.RequestParser",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "flask_restful.reqparse",
"line_number": 28,
"usage_type": "name"
},
{
... |
38273906592 | from typing import List
class Solution:
def merge(self, nums1: List[int], m: int, nums2: List[int], n: int) -> None:
"""
Do not return anything, modify nums1 in-place instead.
"""
p1 = m - 1
p2 = n - 1
tail = n + m - 1
while p1 >= 0 or p2 >= 0:
if p1 == -1:
nums1[tail] = nums2[p2]
p2 -= 1
elif p2 == -1:
nums1[tail] = nums1[p1]
p1 -= 1
elif nums1[p1] > nums2[p2]:
nums1[tail] = nums1[p1]
p1 -= 1
else:
nums1[tail] = nums2[p2]
p2 -= 1
tail -= 1
if __name__ == '__main__':
nums1 = [0]
m = 0
nums2 = [1]
n = 1
Solution().merge(nums1, m, nums2, n)
| qiaocco/learn-data-structure | 刷题/88.py | 88.py | py | 818 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "typing.List",
"line_number": 5,
"usage_type": "name"
}
] |
34477334741 | #!/usr/bin/env python3
import logging
import functools
import rpyc
import threading
import random
import time
THREAD_SAFE = True # Toggles thread safe and unsafe behavior
def synchronize(lock):
""" Decorator that invokes the lock acquire call before a function call and releases after """
def sync_func(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
lock.acquire()
res = func(*args, **kwargs)
lock.release()
return res
return wrapper
return sync_func
class SharingComponent(object):
""" Initialized in the class definition of SharingService and shared by all instances of SharingService """
lock = threading.Lock()
def __init__(self):
self.sequence_id = 0
def sleepy_sequence_id(self):
""" increment id and sometimes sleep to force race condition """
self.sequence_id += 1
_expected_sequence_id = self.sequence_id
if random.randint(0, 1) == 1:
time.sleep(1)
if self.sequence_id == _expected_sequence_id:
return self.sequence_id
else:
raise RuntimeError("Unexpected sequence_id behavior (race condition).")
@synchronize(lock)
def get_sequence_id(self):
""" provides a thread-safe execution frame to otherwise unsafe functions """
return self.sleepy_sequence_id()
class SharingService(rpyc.Service):
""" A class that allows for sharing components between connection instances """
__shared__ = SharingComponent()
@property
def shared(self):
""" convenient access to an otherwise long object name """
return SharingService.__shared__
def exposed_echo(self, message):
""" example of the potential perils when threading shared state """
if THREAD_SAFE:
seq_id = self.shared.get_sequence_id()
else:
seq_id = self.shared.sleepy_sequence_id()
if message == "Echo":
return f"Echo Reply {seq_id}"
else:
return f"Parameter Problem {seq_id}"
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
debugging_config = {'allow_all_attrs': True, 'sync_request_timeout': None}
echo_svc = rpyc.ThreadedServer(service=SharingService, port=18861, protocol_config=debugging_config)
echo_svc.start()
| tomerfiliba-org/rpyc | demos/sharing/server.py | server.py | py | 2,375 | python | en | code | 1,454 | github-code | 1 | [
{
"api_name": "functools.wraps",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "threading.Lock",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_... |
17960395799 | import numpy as np
from scipy.integrate import solve_ivp
from utilities import *
import shelve
nInfectiousStates = [5, 6, 7, 8, 9, 10, 20, 30, 40, 50, 100, 200, 300, 400, 500, 1000]
tauR = 21
threshold = 0.0
maxRate = 1
timeToMaxRate = 4
n = 10000
R0 = 3
tmax = 100
initialFractionInfected = 0.01
time_SIR = list()
time_VL_const = list()
time_VL_gamma = list()
magnitude_SIR = list()
magnitude_VL_const = list()
magnitude_VL_gamma = list()
for stages in nInfectiousStates:
nStates = stages + 2
if stages > 1:
tau = np.linspace(0.0, tauR, stages)
dtau = tau[1] - tau[0]
else:
tau = np.array([tauR])
dtau = tauR
bFunction = betaVL(tau, threshold, maxRate, timeToMaxRate)
bScaled = bFunction/(np.sum(bFunction)*dtau)
beta_gamma = R0*bScaled
beta_const = betaConstant(tau, np.mean(beta_gamma))
beta = np.sum(beta_gamma)*dtau/tauR
gamma = 1/tauR
### Fully mixed
initialStatesVL = np.zeros(nStates)
initialStatesVL[1] = initialFractionInfected
initialStatesVL[0] = 1 - initialFractionInfected
initialStatesSIR = [1 - initialFractionInfected, initialFractionInfected, 0]
sol = solve_ivp(SIRModelFullyMixed, (0, tmax), initialStatesSIR, t_eval=np.arange(0, tmax, 0.01), args=(beta, gamma))
t = sol.t
y = sol.y.T
time_SIR.append(t[np.argmax(np.sum(y[:, 1:-1], axis=1))])
magnitude_SIR.append(np.max(np.sum(y[:, 1:-1], axis=1)))
sol = solve_ivp(viralLoadModelFullyMixed, (0, tmax), initialStatesVL, t_eval=np.arange(0, tmax, 0.01), args=(beta_const, dtau))
t = sol.t
y = sol.y.T
time_VL_const.append(t[np.argmax(np.sum(y[:, 1:-1], axis=1))])
magnitude_VL_const.append(np.max(np.sum(y[:, 1:-1], axis=1)))
sol = solve_ivp(viralLoadModelFullyMixed, (0, tmax), initialStatesVL, t_eval=np.arange(0, tmax, 0.01), args=(beta_gamma, dtau))
t = sol.t
y = sol.y.T
time_VL_gamma.append(t[np.argmax(np.sum(y[:, 1:-1], axis=1))])
magnitude_VL_gamma.append(np.max(np.sum(y[:, 1:-1], axis=1)))
with shelve.open("Theory/peak_difference") as data:
data["num-states"] = nInfectiousStates
data["time-SIR"] = time_SIR
data["mag-SIR"] = magnitude_SIR
data["time-VL-const"] = time_VL_const
data["mag-VL-const"] = magnitude_VL_const
data["time-VL-gamma"] = time_VL_gamma
data["mag-VL-gamma"] = magnitude_VL_gamma | nwlandry/time-dependent-infectiousness | Theory/run_peak_difference.py | run_peak_difference.py | py | 2,372 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "numpy.linspace",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": ... |
41898501447 | import matplotlib.pyplot as plt
import numpy as np
from load_store import db_indicies as dbi
def plot_data(shard_dict, x_units, y_scale, show=False, append_to_title=""):
"""
Plot each shard in the shard dict.
Parameters
----------
shards: dict
Dictionary containing shards
x_units: str
Specifies whether to plot x-axis in pixels or angstroms
y_scale: str
Specifies whether to plot y-axis in lin space or log space
show: bool
Flag specifying whether to suppress the plot.
append_to_title: str
String to append to plot title.
"""
if not show:
return
for shard in shard_dict.values():
plot_shard_data(shard, y_scale, x_units, append_to_title)
def plot_shard_data(shard, y_scale, x_units, append_to_title):
line_plot = True # Toggle between line plot and scatter plot
fig = plt.figure(facecolor='white')
plt.title(("Order:{} spectra in {} space {}"
" ").format(shard.order, y_scale, append_to_title))
for spectrum_name, spectrum in shard.spectra.items():
print("spectrum.log_y", np.exp(spectrum.log_y))
print("len(spectrum.log_y)", len(np.exp(spectrum.log_y)))
if x_units == "px":
plt.xlabel("Pixels (Arbitrary 0)")
if y_scale == "lin":
if line_plot:
# x: pixels, y: linear space, line plot
plt.plot(np.exp(spectrum.log_y), label=spectrum_name)
else: # (scatter plot)
# x: pixels, y: linear space, scatter plot
plt.scatter(list(range(shard.px_no)), np.exp(spectrum.log_y))
plt.ylabel("Signal Intensity (linear space)")
else: # (log plot)
if line_plot:
# x: pixels, y: log space, line plot
plt.plot(spectrum.log_y, label=spectrum_name)
else:
# x: pixels, y: log space, scatter plot
plt.scatter(list(range(shard.px_no)), spectrum.log_y)
plt.ylabel("Signal Intensity (log space)")
elif x_units == "wv":
plt.xlabel("Wavelength (Angstroms)")
if y_scale == "lin":
if line_plot:
# x: wavelength, y: linear space, line plot
plt.plot(spectrum.lin_x, np.exp(spectrum.log_y))
else: # (scatter plot)
# x: wavelength, y: linear space, scatter plot
plt.scatter(spectrum.lin_x, np.exp(spectrum.log_y))
plt.ylabel("Signal Intensity (linear space)")
else: # (log plot)
if line_plot:
# x: wavelength, y: log space, line plot
plt.plot(spectrum.lin_x, spectrum.log_y)
else:
# x: wavelength, y: log space, scatter plot
plt.scatter(spectrum.lin_x, spectrum.log_y)
plt.ylabel("Signal Intensity (log space)")
else:
raise Exception("xUnits unrecognized when plotting shards")
plt.show()
def plot_shards_vs_xcorr_tel(db, shift, shards, show=False):
"""
Plots each shard against the xcorrelated, unfitted telluric model.
"""
if not show:
return
for shard in shards.values():
plot_shard_vs_xcorr_tel(db, shift, shard)
def plot_shard_vs_xcorr_tel(db, shift, shard):
"""
Plots a shard against the xcorrelated, unfitted telluric model.
Worker function of plot_shards_vs_xcorr_tel.
"""
spectrum = next(iter(shard.spectra.values())) #only one spectrum in shard
db_spectrum = np.ones(len(spectrum.log_y))
for record in db:
px = record[dbi.PX_IND] + shift
if record[dbi.ORD_IND] == shard.order and shard.lo_px <= px and px < shard.hi_px:
db_spectrum[px - shard.lo_px] = np.exp(record[dbi.INT_IND])
fig = plt.figure(facecolor = 'white')
plt.plot(spectrum.lin_x, np.exp(spectrum.log_y), color='purple', label='CHIRON Spectrum')
plt.plot(spectrum.lin_x, db_spectrum, label='Telluric Spectrum')
plt.title("Order {} px {}-{}, spectrum and xcorr, unscaled telluric model".format(shard.order,
shard.lo_px,
shard.hi_px))
plt.xlabel("Wavelength (Angstroms)")
plt.ylabel("Signal strength")
plt.tight_layout()
plt.legend()
plt.show()
| chrisleet/selenite | selenite/visualize/plot_data.py | plot_data.py | py | 4,618 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "ma... |
30401371332 | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 13 19:04:50 2016
@author: jack
DESCRIPTION
-----
This script is for generating color background patterns for bidirectional
S-BOS
INSTRUCTIONS TO USE
-----
options can be set in the code. the width and height, as well as the
wavelegth and waveform in both directions can be changed. After the code is
run, a dialog will give the option to save the image. Unlike the script for
generating stripe background patterns, this gives the options to make a sine
or square background pattern, but not a triangle wave.
"""
import numpy as np
import matplotlib.pyplot as plt
import scipy.misc
import cv2
import tkMessageBox
import time
def generateBackgroundImage(width,height,N,waveform,orientation):
import numpy as np
import cv2
from scipy.signal import kaiserord, lfilter, firwin, freqz, square
from scipy import signal
if orientation == 'vertical' or orientation == 'v' or orientation == 'V':
W=width
width = height
height = W
x = np.linspace(0, N*2*np.pi, height)
if waveform == 'square' or waveform == 'sq' or waveform == 'SQ':
y = signal.square(x)
else:
y = np.sin(x)
Y = np.expand_dims(y, 1)
#Y =np.resize(Y, (height,width)
while Y.size < height*width:
Y = np.append(Y, Y, axis=1)
Y2 = Y[1:height, 1:width]
if orientation == 'vertical' or orientation == 'v' or orientation == 'V':
Y2=np.rot90(Y2, k=1)
return Y2
#------------------------------------------------------------
# generate background that will be assigned to first color channel
width=1920 # select width
height=1080 # select height
wavelength1 = 20 # select wavelength
waveform1='sin' # select waveform
orientation1='V' # select orientation
if orientation1 == 'H':
N1 = height/wavelength1
else:
N1 = width/wavelength1
q1 = generateBackgroundImage(width,height,N1,waveform1,orientation1)
#------------------------------------------------------------
# generate background that will be assigned to second color channel
wavelength2 = 6 # select wavelength
waveform2='sin' # select waveform
orientation2='H' # select orientation
if orientation2 == 'H':
N2 = height/wavelength2
else:
N2 = width/wavelength2
q2 = generateBackgroundImage(width,height,N2,waveform2,orientation2)
#------------------------------------------------------------
# assemble the two backgrounds to one RGB image
img = np.zeros((height-1,width-1,3))
img[:,:,0]=q1/2+0.5
img[:,:,2]=q2/2+0.5
#------------------------------------------------------------
# disply the background
fig1=plt.figure()
plt.imshow(img,cmap='gray')
plt.draw()
plt.show()
fig2=plt.figure()
plt.close(fig2)
plt.title('color S-BOS background image')
#------------------------------------------------------------
# ask user if they would like to save files
saveChoice = tkMessageBox.askyesno('Save results?','Would you like to save the background?')
if saveChoice:
outputFilename = ('BG_' + waveform1 + '_' + orientation1 + '_' +
str(int(wavelength1)) + 'px_' + waveform1 + '_' + orientation1 +
'_' + str(int(wavelength1)) + 'px_' + time.strftime("%Y-%m-%d") +'.jpg')
scipy.misc.imsave(outputFilename, img)
print('saved image as ' + outputFilename)
else:
print('You have chosen not to save the image')
##%% prompt user in the console to choose whether to save
#filename = 'BG_' + str(waveform1) + '_' + str(waveform2) + '.jpg'
#print('suggested filename: ' + filename)
#print('press enter to accept, or type a new name to change it. press space then enter to skip.')
#userInput = raw_input()
#if len(userInput) == 0:
# scipy.misc.imsave('../background_images/plaid/'+filename, img)
# print('file saved as ' + filename)
#elif len(userInput) == 1:
# print('you have chosen not to save the file')
#elif len(userInput) > 1:
# print('input desired filename. be sure to include a file extention')
# scipy.misc.imsave('../background_images/plaid/'+userInput, img)
# print('file saved as ' + userInput) | jonathanrgross/Background-Oriented-Schlieren | generate_background/generate_plaid_background.py | generate_plaid_background.py | py | 4,594 | python | en | code | 6 | github-code | 1 | [
{
"api_name": "numpy.linspace",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "scipy.signal.square",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "scipy.signal",
"... |
19514666953 | import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Conv2D, MaxPooling2D, UpSampling2D, Conv2DTranspose, Concatenate
from tensorflow.keras.losses import MeanSquaredError, MeanAbsoluteError
from tensorflow.nn import max_pool_with_argmax
import tensorflow_addons as tfa
from tensorflow_addons.optimizers import AdamW
from custom_layers import MaxPoolingWithArgmax2D, MaxUnpooling2D
def deepcfd(input_height, input_width, input_channels,
weight_decay, learning_rate):
# Shared encoder channel
inputs = Input(shape=(input_height, input_width, input_channels))
conv1a = Conv2D(8, (5,5), activation='relu', padding='same', name='block1_layer1_conv2d')(inputs)
conv1b = Conv2D(8, (5,5), activation='relu', padding='same')(conv1a)
# pool1 = MaxPooling2D((2,2))(conv1b)
pool1, idx1 = MaxPoolingWithArgmax2D(pool_size=(2, 2))(conv1b)
conv2a = Conv2D(16, (5,5), activation='relu', padding='same')(pool1)
conv2b = Conv2D(16, (5,5), activation='relu', padding='same')(conv2a)
# pool2 = MaxPooling2D((2,2))(conv2b)
pool2, idx2 = MaxPoolingWithArgmax2D(pool_size=(2, 2))(conv2b)
conv3a = Conv2D(32, (5,5), activation='relu', padding='same')(pool2)
conv3b = Conv2D(32, (5,5), activation='relu', padding='same')(conv3a)
# pool3 = MaxPooling2D((2,2))(conv3b)
pool3, idx3 = MaxPoolingWithArgmax2D(pool_size=(2, 2))(conv3b)
conv4a = Conv2D(32, (5,5), activation='relu', padding='same')(pool3)
conv4b = Conv2D(32, (5,5), activation='relu', padding='same')(conv4a)
# pool4 = MaxPooling2D((2,2))(conv4b)
# pool4, idx4 = MaxPoolingWithArgmax2D(pool_size=(2, 2))(conv4b)
# Separate Ux decoder channel
# upsamp4_ux = UpSampling2D((2,2))(pool4)
# unpool4_ux = MaxUnpooling2D(pool_size=(2, 2), out_shape=conv4b.shape)([pool4, idx4])
concat4_ux = Concatenate()([conv4a, conv4b])
deconv4a_ux = Conv2DTranspose(32, (5,5), activation='relu', padding='same')(concat4_ux)
deconv4b_ux = Conv2DTranspose(32, (5,5), activation='relu', padding='same')(deconv4a_ux)
# upsamp3_ux = UpSampling2D((2,2))(deconv4b_ux)
unpool3_ux = MaxUnpooling2D(pool_size=(2, 2), out_shape=conv3b.shape)([deconv4b_ux, idx3])
concat3_ux = Concatenate()([conv3b, unpool3_ux])
deconv3a_ux = Conv2DTranspose(32, (5,5), activation='relu', padding='same')(concat3_ux)
deconv3b_ux = Conv2DTranspose(16, (5,5), activation='relu', padding='same')(deconv3a_ux)
# upsamp2_ux = UpSampling2D((2,2))(deconv3b_ux)
unpool2_ux = MaxUnpooling2D(pool_size=(2, 2), out_shape=conv2b.shape)([deconv3b_ux, idx2])
concat2_ux = Concatenate()([conv2b, unpool2_ux])
deconv2a_ux = Conv2DTranspose(16, (5,5), activation='relu', padding='same')(concat2_ux)
deconv2b_ux = Conv2DTranspose(8, (5,5), activation='relu', padding='same')(deconv2a_ux)
# upsamp1_ux = UpSampling2D((2,2))(deconv2b_ux)
unpool1_ux = MaxUnpooling2D(pool_size=(2, 2), out_shape=conv1b.shape)([deconv2b_ux, idx1])
concat1_ux = Concatenate()([conv1b, unpool1_ux])
deconv1a_ux = Conv2DTranspose(8, (5,5), activation='relu', padding='same')(concat1_ux)
deconv1b_ux = Conv2DTranspose(1, (5,5), padding='same', name='ux')(deconv1a_ux)
# Separate Uy decoder channel
# upsamp4_uy = UpSampling2D((2,2))(pool4)
# unpool4_uy = MaxUnpooling2D(pool_size=(2, 2), out_shape=conv4b.shape)([pool4, idx4])
concat4_uy = Concatenate()([conv4a, conv4b])
deconv4a_uy = Conv2DTranspose(32, (5,5), activation='relu', padding='same')(concat4_uy)
deconv4b_uy = Conv2DTranspose(32, (5,5), activation='relu', padding='same')(deconv4a_uy)
# upsamp3_uy = UpSampling2D((2,2))(deconv4b_uy)
unpool3_uy = MaxUnpooling2D(pool_size=(2, 2), out_shape=conv3b.shape)([deconv4b_uy, idx3])
concat3_uy = Concatenate()([conv3b, unpool3_uy])
deconv3a_uy = Conv2DTranspose(32, (5,5), activation='relu', padding='same')(concat3_uy)
deconv3b_uy = Conv2DTranspose(16, (5,5), activation='relu', padding='same')(deconv3a_uy)
# upsamp2_uy = UpSampling2D((2,2))(deconv3b_uy)
unpool2_uy = MaxUnpooling2D(pool_size=(2, 2), out_shape=conv2b.shape)([deconv3b_uy, idx2])
concat2_uy = Concatenate()([conv2b, unpool2_uy])
deconv2a_uy = Conv2DTranspose(16, (5,5), activation='relu', padding='same')(concat2_uy)
deconv2b_uy = Conv2DTranspose(8, (5,5), activation='relu', padding='same')(deconv2a_uy)
# upsamp1_uy = UpSampling2D((2,2))(deconv2b_uy)
unpool1_uy = MaxUnpooling2D(pool_size=(2, 2), out_shape=conv1b.shape)([deconv2b_uy, idx1])
concat1_uy = Concatenate()([conv1b, unpool1_uy])
deconv1a_uy = Conv2DTranspose(8, (5,5), activation='relu', padding='same')(concat1_uy)
deconv1b_uy = Conv2DTranspose(1, (5,5), padding='same', name='uy')(deconv1a_uy)
# Separate p decoder channel
# upsamp4_p = UpSampling2D((2,2))(pool4)
# unpool4_p = MaxUnpooling2D(pool_size=(2, 2), out_shape=conv4b.shape)([pool4, idx4])
concat4_p = Concatenate()([conv4a, conv4b])
deconv4a_p = Conv2DTranspose(32, (5,5), activation='relu', padding='same')(concat4_p)
deconv4b_p = Conv2DTranspose(32, (5,5), activation='relu', padding='same')(deconv4a_p)
# upsamp3_p = UpSampling2D((2,2))(deconv4b_p)
unpool3_p = MaxUnpooling2D(pool_size=(2, 2), out_shape=conv3b.shape)([deconv4b_p, idx3])
concat3_p = Concatenate()([conv3b, unpool3_p])
deconv3a_p = Conv2DTranspose(32, (5,5), activation='relu', padding='same')(concat3_p)
deconv3b_p = Conv2DTranspose(16, (5,5), activation='relu', padding='same')(deconv3a_p)
# upsamp2_p = UpSampling2D((2,2))(deconv3b_p)
unpool2_p = MaxUnpooling2D(pool_size=(2, 2), out_shape=conv2b.shape)([deconv3b_p, idx2])
concat2_p = Concatenate()([conv2b, unpool2_p])
deconv2a_p = Conv2DTranspose(16, (5,5), activation='relu', padding='same')(concat2_p)
deconv2b_p = Conv2DTranspose(8, (5,5), activation='relu', padding='same')(deconv2a_p)
# upsamp1_p = UpSampling2D((2,2))(deconv2b_p)
unpool1_p = MaxUnpooling2D(pool_size=(2, 2), out_shape=conv1b.shape)([deconv2b_p, idx1])
concat1_p = Concatenate()([conv1b, unpool1_p])
deconv1a_p = Conv2DTranspose(8, (5,5), activation='relu', padding='same')(concat1_p)
deconv1b_p = Conv2DTranspose(1, (5,5), padding='same', name='p')(deconv1a_p)
# # Creating Model
model = Model(
inputs=[inputs],
outputs=[deconv1b_ux,deconv1b_uy,deconv1b_p],
name='DeepCFD'
)
# Creating optimiser
optimiser = AdamW(weight_decay, learning_rate)
# Creating metrics
metrics = {
'ux': ['acc', 'mse'],
'uy': ['acc', 'mse'],
'p': ['acc', 'mse']
}
# Creating separate losses
losses = {
'ux': MeanSquaredError(),
'uy': MeanSquaredError(),
'p': MeanAbsoluteError()
}
# Compiling model
model.compile(optimizer=optimiser, loss=losses, metrics=metrics)
return model | pomtojoer/DeepCFD-TF | models.py | models.py | py | 6,947 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "tensorflow.keras.layers.Input",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.layers.Conv2D",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.layers.Conv2D",
"line_number": 19,
"usage_type": "call"
}... |
25271283090 | import numpy as np
import os
import wrapp_mct_photon_propagation as mctw
import subprocess as sp
import tempfile
import json
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import plenopy as pl
out_dir = os.path.join('examples', 'small_camera_lens_psf')
os.makedirs(out_dir, exist_ok=True)
# scenery file
# ------------
outer_radius = 0.0715447
inner_radius = 0.0619595
focal_length = 0.17438
curvature_radius = 0.18919
MIRROR_WALLS = True
scenery = {}
scenery["functions"] = [
{
"name": "mirror_reflection",
"argument_versus_value": [
[200e-9, 0.95],
[1200e-9, 0.95]]
},
{
"name": "glas_refraction",
"argument_versus_value": [
[200e-9, 1.46832],
[1200e-9, 1.46832]]
},
]
scenery["colors"] = [
{"name": "red", "rgb": [255, 0, 0]},
{"name": "brown", "rgb": [128, 150, 0]},
{"name": "green", "rgb": [0, 200, 0]},
{"name": "lens_white", "rgb": [255, 255, 255]}
]
scenery["children"] = []
scenery["children"].append(
{
"type": "BiConvexLensHex",
"name": "lens",
"pos": [0, 0, focal_length],
"rot": [0, 0, np.pi/2],
"curvature_radius": curvature_radius,
"outer_radius": outer_radius,
"surface": {
"inner_color": "lens_white",
"outer_color": "lens_white",
"inner_refraction": "glas_refraction",
},
"children": [],
})
stop_centers = np.zeros(shape=(6, 2))
for i, phi in enumerate(np.linspace(0, 2*np.pi, 6, endpoint=False)):
stop_centers[i, :] = 2*inner_radius*np.array([
np.sin(phi + np.pi/2),
np.cos(phi + np.pi/2)])
for idx, pos in enumerate(stop_centers):
scenery["children"].append(
{
"type": "HexPlane",
"name": "stop_{:d}".format(idx),
"pos": [pos[0], pos[1], focal_length],
"rot": [0, 0, np.pi/2],
"outer_radius": outer_radius,
"surface": {
"inner_color": "brown",
"outer_color": "brown"},
"children": [],
})
if MIRROR_WALLS:
wall_centers = np.zeros(shape=(6, 2))
for i, phi in enumerate(np.linspace(0, 2*np.pi, 6, endpoint=False)):
wall_centers[i, :] = inner_radius*np.array([
np.sin(phi + np.pi/2),
np.cos(phi + np.pi/2)])
scenery["children"].append(
{
"type": "Plane",
"name": "wall_{:d}".format(i),
"pos": [wall_centers[i, 0], wall_centers[i, 1], 0.025],
"rot": [1.5707, 0, phi + np.pi/2],
"x_width": outer_radius,
"y_width": 0.05,
"surface": {
"inner_color": "green",
"outer_color": "green",
"outer_reflection": "mirror_reflection",
"inner_reflection": "mirror_reflection",
},
"children": [],
}
)
scenery["children"].append(
{
"type": "Disc",
"name": "sensor",
"pos": [0, 0, 0],
"rot": [0, 0, 0],
"radius": outer_radius*1.5,
"sensor_id": 0,
"children": [],
"surface": {
"inner_color": "red",
"outer_color": "red"},
})
with open(os.path.join(out_dir, 'optical-table_for_lens.json'), 'wt') as fout:
fout.write(json.dumps(scenery, indent=4))
sensor_responses = []
focal_ratio_imaging_reflector = 1.5
max_incident_angle = np.arctan(0.5/focal_ratio_imaging_reflector)
prng = np.random.Generator(np.random.MT19937(seed=0))
incident_directions = np.linspace(0, max_incident_angle, 6)
for idx, incident_direction in enumerate(incident_directions):
# photons
# -------
num_photons = 1000*1000
supports = np.zeros(shape=(num_photons, 3))
supports[:, 2] = 1.3*focal_length
supports[:, 0] = prng.uniform(
low=-outer_radius,
high=outer_radius,
size=num_photons)
supports[:, 1] = prng.uniform(
low=-outer_radius,
high=outer_radius,
size=num_photons)
area_exposed = (outer_radius*2)**2
areal_photon_density = num_photons/area_exposed
directions = np.zeros(shape=(num_photons, 3))
directions[:, 0] = incident_direction
directions[:, 2] = - np.sqrt(1 - incident_direction**2)
direction_length = np.linalg.norm(directions[:, :], axis=1)
np.testing.assert_allclose(direction_length, 1.0, atol=1e-3)
wavelengths = 433e-9*np.ones(num_photons)
with tempfile.TemporaryDirectory(suffix="acp_lens_psf") as tmp_dir:
photons_path = os.path.join(
tmp_dir, 'photons_{idx:d}.csv'.format(idx=idx))
photons_result_path = os.path.join(
tmp_dir, 'photons_result_{idx:d}.csv'.format(idx=idx))
mctw.write_ascii_table_of_photons(
photons_path,
supports=supports,
directions=directions,
wavelengths=wavelengths)
sp.call([
os.path.join(
".",
"build",
"merlict",
"merlict-propagate"),
"-s", os.path.join(
"examples",
"small_camera_lens_psf",
"optical-table_for_lens.json"),
"-i", photons_path,
"-o", photons_result_path,
"-c", os.path.join(
"merlict_development_kit",
"merlict_tests",
"apps",
"examples",
"settings.json")])
photons_result_path += "1_0"
result = np.genfromtxt(photons_result_path)
r = {}
r['incident_direction'] = incident_direction
r['areal_photon_density'] = areal_photon_density
r['x'] = result[:, 0]
r['y'] = result[:, 1]
r['cx'] = result[:, 2]
r['cy'] = result[:, 3]
r['wavelength'] = result[:, 4]
r['arrival_time'] = result[:, 5]
sensor_responses.append(r)
sensor_radius = outer_radius
num_bins = 300
xy_bin_edges = np.linspace(-sensor_radius, sensor_radius, num_bins + 1)
max_intensity = 0
for sensor_response in sensor_responses:
psf = np.histogram2d(
x=sensor_response['x'],
y=sensor_response['y'],
bins=[xy_bin_edges, xy_bin_edges])[0]
sensor_response['point_spread_function'] = psf
sensor_response['xy_bin_edges'] = xy_bin_edges
if np.max(psf) > max_intensity:
max_intensity = np.max(psf)
lfg_path = os.path.join('run', 'light_field_calibration')
if os.path.exists(lfg_path):
lfg = pl.LightFieldGeometry()
lixel_r = np.hypot(lfg.lixel_positions_x, lfg.lixel_positions_y)
pixel_r = (
lfg.sensor_plane2imaging_system.expected_imaging_system_focal_length *
np.tan(lfg.sensor_plane2imaging_system.pixel_FoV_hex_flat2flat/2))
mask = lixel_r < pixel_r
lixel_x = lfg.lixel_positions_x[mask]
lixel_y = lfg.lixel_positions_y[mask]
lixel_outer_radius = lfg.lixel_outer_radius
def add_hexagon(
ax,
x=0,
y=0,
outer_radius=1,
theta=0,
color='k',
linewidth=1,
alpha=1
):
hexagon = np.zeros(shape=(6, 2))
for i, phi in enumerate(np.linspace(0, 2*np.pi, 6, endpoint=False)):
hexagon[i, 0] = x + outer_radius*np.cos(phi + theta)
hexagon[i, 1] = y + outer_radius*np.sin(phi + theta)
for i in range(hexagon.shape[0]):
s = hexagon[i, :]
if i + 1 >= hexagon.shape[0]:
e = hexagon[0, :]
else:
e = hexagon[i + 1, :]
ax.plot(
[s[0], e[0]],
[s[1], e[1]],
color=color,
linewidth=linewidth,
alpha=alpha)
for sensor_response in sensor_responses:
fig = plt.figure(figsize=(4, 4), dpi=250)
ax = fig.add_axes([0, 0, 1, 1])
[s.set_visible(False) for s in ax.spines.values()]
[t.set_visible(False) for t in ax.get_xticklines()]
[t.set_visible(False) for t in ax.get_yticklines()]
im = ax.pcolor(
1e3*sensor_response['xy_bin_edges'],
1e3*sensor_response['xy_bin_edges'],
sensor_response['point_spread_function'],
cmap='binary',
norm=colors.PowerNorm(gamma=1./3.),
vmax=max_intensity)
ax.grid(color='k', linestyle='-', linewidth=1, alpha=0.1)
ax.set_aspect('equal')
add_hexagon(
ax=ax,
x=0,
y=0,
outer_radius=1e3*outer_radius,
color='g',
linewidth=1.5,
alpha=0.5)
if os.path.exists(lfg_path):
for j in range(lfg.number_lixel//lfg.number_pixel):
add_hexagon(
ax=ax,
x=1e3*lixel_x[j],
y=1e3*lixel_y[j],
outer_radius=1e3*lixel_outer_radius,
theta=np.pi/6,
color='r',
linewidth=1,
alpha=0.3)
fig.savefig(
os.path.join(
out_dir,
'psf_{:d}mdeg.png'.format(
int(1000*np.rad2deg(sensor_response['incident_direction'])))))
plt.close('all')
fig = plt.figure(figsize=(6, .5), dpi=250)
cax = fig.add_axes((0.1, 0.5, 0.8, 0.8))
cbar = fig.colorbar(im, cax=cax, orientation="horizontal")
fig.savefig(os.path.join(out_dir, 'colorbar.png'))
plt.close('all')
| cherenkov-plenoscope/starter_kit | obsolete_examples/small_camera_lens_psf.py | small_camera_lens_psf.py | py | 9,318 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.path.join",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 5... |
11223446905 | from hevc_predictor import Predictor
import numpy as np
from tqdm import tqdm
import random
import cv2
def offline_augmenter(odp_batch=None, output_path = None, mode_data=False):
"""
Computes structural similarity and mse metrics to return X best augmentation patches.
specify X as multiplier.
If multiplier==1, the offline augmenter behaves like the online version but much slower.
"""
if odp_batch ==None:
print("Error: missing list of odp patch names")
else:
if mode_data:
for patch in tqdm(odp_batch):
augment = random.choice([True, False])
if augment:
odp_patch = cv2.imread(patch, cv2.IMREAD_GRAYSCALE)
mode = odp_patch[0,0]
data_generator = Predictor(odp = odp_patch, diskpath= diskpath)
if mode == 2: #DC prediction - augment with planar prediction
pred = data_generator.predict_one(mode = 1)
if mode == 1: # Planar prediction - augment with DC prediction
pred = data_generator.predict_one(mode = 0)
else:#other prediction directions are augmented with their neighbors
if mode == 3:
pred = data_generator.predict_one(mode = 3)
if mode == 35:
pred = data_generator.predict_one(mode = 34)
else:
pred = data_generator.predict_one(mode = np.random.choice([mode+1, mode-1])-1)
out = output_path+ "aug_offline_"+ patch.split('\\')[len(patch.split('\\'))-1]
cv2.imwrite(out, aug_patch)
else:
for patch in tqdm(odp_batch):
augment = random.choice([True, False])
if augment:
odp_patch = cv2.imread(patch, cv2.IMREAD_GRAYSCALE)
data_generator = Predictor(odp = odp_patch)
aug_patch = data_generator.predict_all()
out = output_path+ "aug_offline_"+ patch.split('\\')[len(patch.split('\\'))-1]
cv2.imwrite(out, aug_patch)
def online_augmenter(odp=None, diskpath=None, mode_data=False):
'''
Returns a patch with closest structural similarity to the current prediction mode
i.e. one of the results of neighboring prediction modes
'''
if mode_data:
mode = odp[0,0]
data_generator = Predictor(odp = odp, diskpath= diskpath)
if mode == 2: #DC prediction - augment with planar prediction
pred = data_generator.predict_one(mode = 1)
if mode == 1: # Planar prediction - augment with DC prediction
pred = data_generator.predict_one(mode = 0)
else:#other prediction directions are augmented with their neighbors
if mode == 3:
pred = data_generator.predict_one(mode = 3)
if mode == 35:
pred = data_generator.predict_one(mode = 34)
else:
pred = data_generator.predict_one(mode = np.random.choice([mode+1, mode-1])-1)
else:
data_generator = Predictor(odp = odp)
pred = data_generator.predict_all(select=True)
return pred
| Goluck-Konuko/hevc_data_augmenter | hevc_augmenter/augmenter.py | augmenter.py | py | 3,347 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "tqdm.tqdm",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "cv2.IMREAD_GRAYSCALE",
"line_n... |
70297287074 | import glob
import pandas as pd
from tqdm import tqdm
from collections import defaultdict
from gensim.models import Word2Vec
import numpy as np
type_transform = {"clicks": 0, "carts": 1, "orders": 2}
IS_TRAIN = True
IS_Last_Month = True
def load_data(path):
dfs = []
# 只导入训练数据
for e, chunk_file in enumerate(glob.glob(path)):
chunk = pd.read_parquet(chunk_file)
chunk.ts = (chunk.ts / 1000).astype('int32')
# if not IS_TRAIN:
# # 除去第一周的数据
# chunk = chunk[chunk['ts'] >= 1659909599]
chunk['type'] = chunk['type'].map(type_transform).astype('int8')
dfs.append(chunk)
return pd.concat(dfs).reset_index(drop=True)
# 加载数据
print('加载数据')
if IS_TRAIN:
if IS_Last_Month:
train_sessions = load_data('/home/niejianfei/otto/CV/data/*_parquet/*')
print(train_sessions)
else:
train_sessions = load_data('/home/niejianfei/otto/CV/data/test_parquet/*')
print(train_sessions)
else:
if IS_Last_Month:
train_sessions = load_data('/home/niejianfei/otto/LB/data/*_parquet/*')
print(train_sessions)
else:
train_sessions = load_data('/home/niejianfei/otto/LB/data/test_parquet/*')
print(train_sessions)
print('开始排序')
# 分别对session_id聚合,对时间进行排序
df = train_sessions.sort_values(by=["session", "ts"], ascending=True)
print(df.head(10))
print('开始构图')
# 开始构图
dic = defaultdict(list) # defaultdict为了给key不在字典的情况赋予一个default值
# 加文字是区分item和user
for x in tqdm(df[["session", "aid"]].values):
dic[f"user_{x[0]}"].append(f"item_{x[1]}") # list中元素是有顺序的
dic[f"item_{x[1]}"].append(f"user_{x[0]}")
# 随机游走
print('开始随机游走')
# 中心点item,先选定一个session,再走到session中item后面的元素中
# 计算user item对应长度
dic_count = {}
for key in dic:
dic_count[key] = len(dic[key])
item_list = df["aid"].unique()
user_list = df["session"].unique()
print('item数量', len(item_list))
print('user数量', len(user_list))
path_length = 20
sentences = []
num_sentences = 20000000 # 实际跑的时候建议50w+ (有2w个item)
'''
badcase:
item_a : session_1
session_1 : [item_b,item_a]
需要加一个max_repeat_time 避免死循环
'''
max_repeat_nums = path_length * 2
for _ in tqdm(range(num_sentences)):
start_item = "item_{}".format(item_list[np.random.randint(0, len(item_list))])
sentence = [start_item]
repeat_time = 0
while len(sentence) < path_length:
last_item = sentence[-1]
random_user = dic[last_item][np.random.randint(0, dic_count[last_item])] # 递归,选最后一个得到user列表,再选一个user
# 若两个相同的item紧挨着,则+1后跳到下一个,继续session随机可能跳出来,其实图也有这种情况,闭环的产生
next_item_index = np.where(np.array(dic[random_user]) == last_item)[0][
0] + 1 # 在random_user的items里面找到last_item的索引+1
# user内item不是最后一个,把后面这个加过去
# 若是最后一个,不做操作继续循环,可能有bad case
if next_item_index <= dic_count[random_user] - 1:
next_item = dic[random_user][next_item_index]
sentence.append(next_item)
repeat_time += 1
if repeat_time > max_repeat_nums:
break
sentences.append(sentence)
# embedding_dimensions = number_of_categories**0.25
model = Word2Vec(sentences, vector_size=64, sg=1, window=5, min_count=1, hs=1, negative=5, sample=0.001, workers=4)
# 保存模型
if IS_TRAIN:
if IS_Last_Month:
model.wv.save_word2vec_format('/home/niejianfei/otto/CV/preprocess/deepwalk_last_month.w2v', binary=False)
else:
model.wv.save_word2vec_format('/home/niejianfei/otto/CV/preprocess/deepwalk_last_week.w2v', binary=False)
else:
if IS_Last_Month:
model.wv.save_word2vec_format('/home/niejianfei/otto/LB/preprocess/deepwalk_last_month.w2v', binary=False)
else:
model.wv.save_word2vec_format('/home/niejianfei/otto/LB/preprocess/deepwalk_last_week.w2v', binary=False)
| niejianfei/Kaggle_OTTO_Multi-Objective_Recommender_System | preprocess/deepwalk_prepare.py | deepwalk_prepare.py | py | 4,265 | python | en | code | 10 | github-code | 1 | [
{
"api_name": "glob.glob",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pandas.read_parquet",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",... |
2525113933 | import json
import requests
from django.shortcuts import render, get_object_or_404
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth.models import User
from django.db import IntegrityError
from django.db.models import Q, Avg
from django.http import HttpResponse, HttpResponseRedirect, JsonResponse
from django.urls import reverse
from django.core.exceptions import ObjectDoesNotExist
from django.core.paginator import Paginator
from .models import Book, BookRequest,Rating, Review, Illustration, IllustrationPostRequest, IllustrationDeleteRequest, User
from .forms import ReviewForm, BookForm, EditBookForm, EditBookRequestForm, ProtectionForm
def index(request):
Books = Book.objects.all().order_by('id')[:10]
latest_added_books = Book.objects.all().order_by('-id')[:10]
best_book_ratings = Book.objects.all().order_by('-score_avg')[:10]
reviews = Review.objects.all().order_by('-id')[:5]
return render(request, "books/index.html", {
"Books": Books,
"latest_added_books": latest_added_books,
"best_book_ratings": best_book_ratings,
"reviews": reviews
})
def login_view(request):
if request.method == 'POST':
username = request.POST["username"]
password = request.POST["password"]
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
return HttpResponseRedirect(reverse("index"))
else:
return render(request, "books/login.html", {
"message": "Invalid username and/or password."
})
else:
return render(request, "books/login.html")
def logout_view(request):
logout(request)
return HttpResponseRedirect(reverse("index"))
def register(request):
if request.method == "POST":
username = request.POST["username"]
email = request.POST["email"]
# Ensure password matches confirmation
password = request.POST["password"]
confirmation = request.POST["confirmation"]
if password != confirmation:
return render(request, "books/register.html", {
"message": "Passwords must match."
})
# Attempt to create new user
try:
user = User.objects.create_user(username, email, password)
user.save()
except IntegrityError:
return render(request, "books/register.html", {
"message": "Username already taken."
})
login(request, user)
return HttpResponseRedirect(reverse("index"))
else:
return render(request, "books/register.html")
def book(request, book_id):
book = get_object_or_404(Book, id=book_id)
# Get book illustrations and reviews objects
illustrations = Illustration.objects.filter(book=book)
reviews = Review.objects.filter(book=book)[:5]
read = False
reading = False
want_to_read = False
if User.objects.filter(username=request.user.username, read=book).exists():
read = True
if User.objects.filter(username=request.user.username, reading=book).exists():
reading = True
if User.objects.filter(username=request.user.username, want_to_read=book).exists():
want_to_read= True
context = {
"Book": book,
"Illustrations": illustrations,
"Reviews": reviews,
"ProtectionForm": ProtectionForm(),
"read": read,
"reading": reading,
"want_to_read": want_to_read
}
# Show user rating if exists
if request.user.is_authenticated and Rating.objects.filter(user=request.user, book=book).exists():
rating = Rating.objects.get(user=request.user, book=book)
context["rating_score"] = rating.score
return render(request, "books/book.html", context)
else:
return render(request, "books/book.html", context)
def show_reviews(request, book_id):
book = get_object_or_404(Book, id=book_id)
reviews = Review.objects.filter(book=book)
page_number = request.GET.get("page")
paginator = Paginator(reviews, 10)
page_obj = paginator.get_page(page_number)
return render(request, "books/book_reviews.html", {
"page_obj": page_obj,
"book_id": book.id
})
@login_required
def contribute(request):
if request.method == "POST":
form = BookForm(request.POST, request.FILES)
if form.is_valid():
book = form.save()
user = User.objects.get(username=request.user.username)
user.contributions += 1
user.save()
return HttpResponseRedirect(reverse("book", args=[book.id]))
else:
return render(request, "books/contribute.html", {
"form": form
})
else:
initial_data = {
"isbn": {"isbn10": "Insert ISBN10 here", "isbn13": "Insert ISBN13 here"},
"genres": {"genres": ["insert", "genres", "here"]},
"characters": {"characters": ["Insert", "characters", "here"]},
"keywords": {"keywords": ["Insert", "keywords", "here"]},
}
return render(request, "books/contribute.html", {
"form": BookForm(initial=initial_data)
})
@login_required
def edit_book(request, book_id):
book = get_object_or_404(Book, id=book_id)
if request.method == "POST":
if book.protection:
new_book = BookRequest()
form = EditBookRequestForm(request.POST, instance=book)
if form.is_valid():
new_book.original_book_id = book_id
new_book.title = form.cleaned_data["title"]
new_book.author = form.cleaned_data["author"]
new_book.isbn = form.cleaned_data["isbn"]
new_book.synopsis = form.cleaned_data["synopsis"]
new_book.genres = form.cleaned_data["genres"]
new_book.published = form.cleaned_data["published"]
new_book.original_title = form.cleaned_data["original_title"]
new_book.characters = form.cleaned_data["characters"]
new_book.keywords = form.cleaned_data["keywords"]
new_book.change = "Book"
new_book.user = User.objects.get(username=request.user.username)
new_book.book_cover = "NULL"
new_book.save()
return HttpResponseRedirect(reverse("book", args=[book.id]))
else:
return render(request, "books/edit_book.html", {
"form": form,
"book_id": book.id
})
else:
form = EditBookForm(request.POST, instance=book)
if form.is_valid():
edit_book = form.save()
user = User.objects.get(username=request.user.username)
user.contributions += 1
user.save()
return HttpResponseRedirect(reverse("book", args=[book.id]))
else:
return render(request, "books/edit_book.html", {
"form": form,
"book_id": book.id
})
else:
if book.protection:
return render(request, "books/edit_book.html", {
"form": EditBookRequestForm(instance=book),
"book_id": book.id
})
return render(request, "books/edit_book.html", {
"form": EditBookForm(instance=book),
"book_id": book.id
})
def get_book(request, book_id):
book = get_object_or_404(Book, id=book_id)
return JsonResponse({"book": {"id": book.id, "title": book.title, "author": book.author,
"synopsis": book.synopsis, "cover": book.book_cover.url,
"genre": book.genres["genres"][0] }})
def search(request):
entry_search = request.GET.get('q')
books = Book.objects.filter(Q(title__icontains=entry_search) | Q(author__icontains=entry_search) | Q(isbn__icontains=entry_search) | Q(genres__icontains=entry_search) | Q(original_title__icontains=entry_search) | Q(characters__icontains=entry_search) | Q(keywords__icontains=entry_search))
paginator = Paginator(books, 18)
page_number = request.GET.get("page")
page_obj = paginator.get_page(page_number)
return render(request, "books/search.html", {
"page_obj": page_obj,
"entry_search": entry_search
})
def rate_book(request):
if request.user.is_authenticated:
if request.method == 'POST':
data = json.loads(request.body)
rating_score = data.get('rating')
#Get book object
book_id = data.get('book_id')
book = get_object_or_404(Book, id=book_id)
# Get rating object and insert score, create if dont exist
try:
rating = Rating.objects.get(user=request.user, book=book)
rating.score = rating_score
rating.save()
except ObjectDoesNotExist:
rating = Rating(book=book, user=request.user, score=rating_score)
rating.save()
book.get_score
return JsonResponse({'success':'true', 'score': rating_score}, safe=False)
if request.method == "DELETE":
data = json.loads(request.body)
#Get book object
book_id = data.get('book_id')
book = get_object_or_404(Book, id=book_id)
try:
rating = Rating.objects.get(user=request.user, book=book)
rating.delete()
book.get_score
return JsonResponse({'success':'deleted'})
except ObjectDoesNotExist:
return JsonResponse({'error':'rating dont exist!'})
else:
return JsonResponse({'error':'login_required'})
@login_required
def illustration(request, book_id):
book = get_object_or_404(Book, id=book_id)
if request.method == "POST":
user = get_object_or_404(User, username=request.user.username)
if book.protection:
for i in request.FILES.values():
illustration = IllustrationPostRequest(user=user, book=book, image=i)
illustration.save()
else:
for i in request.FILES.values():
illustration = Illustration(book=book, image=i)
illustration.save()
user.contributions += 1
return HttpResponseRedirect(reverse("book", args=[book_id]))
if request.method == "DELETE":
data = json.loads(request.body)
user = get_object_or_404(User, username=request.user.username)
if book.protection:
for i in data:
illustration = get_object_or_404(Illustration, id=i)
illustration_delete = IllustrationDeleteRequest(user=user, illustration=illustration)
illustration_delete.save()
else:
for i in data:
illustration = get_object_or_404(Illustration, id=i)
illustration.delete()
user.contributions += 1
return JsonResponse({'success':'deleted'})
else:
illustrations = Illustration.objects.filter(book=book)
return render(request, "books/illustration.html", {
"book_id": book.id,
"book_title": book.title,
"illustrations": illustrations
})
@login_required
def review_book(request, book_id):
book = get_object_or_404(Book, id=book_id)
context = {
"book": book
}
# Prevent review duplication
if Review.objects.filter(user=request.user, book=book).exists():
return HttpResponseRedirect(reverse("edit_review", args=[book_id]))
if request.method == "POST":
# Prevent review duplication
if Review.objects.filter(user=request.user, book=book).exists():
return JsonResponse({'error':'review already exists!'})
form = ReviewForm(request.POST)
if form.is_valid():
rating = form.cleaned_data['rating']
title = form.cleaned_data["title"]
text = form.cleaned_data["text"]
review = Review(user=request.user, book=book, title=title, text=text, score=rating)
review.save()
return HttpResponseRedirect(reverse("book", args=[book_id]))
else:
context["message"] = "Invalid input"
return render(request, "books/review.html", context)
else:
return render(request, "books/review.html", context)
@login_required
def edit_review(request, book_id):
book = get_object_or_404(Book, id=book_id)
if request.method == "POST":
try:
review = Review.objects.get(user=request.user, book=book)
form = ReviewForm(request.POST)
if form.is_valid():
rating = form.cleaned_data['rating']
title = form.cleaned_data["title"]
text = form.cleaned_data["text"]
review.score = rating
review.title = title
review.text = text
review.save()
return HttpResponseRedirect(reverse("book", args=[book_id]))
else:
return render(request, "books/edit_review.html", {
"book": book,
"message": "Invalid input"
})
except ObjectDoesNotExist:
return HttpResponseRedirect(reverse("book", args=[book_id]))
else:
try:
review = Review.objects.get(user=request.user, book=book)
except ObjectDoesNotExist:
return HttpResponseRedirect(reverse("book", args=[book_id]))
return render(request, "books/edit_review.html", {
"book": book,
"review": review
})
@login_required
def protect(request, book_id):
if request.user.is_superuser:
book = get_object_or_404(Book, id=book_id)
if book.protection:
book.protection = False
else:
book.protection = True
book.save()
else:
pass
return HttpResponseRedirect(reverse("book", args=[book_id]))
@login_required
def aprove(request):
if request.method == "POST":
user = get_object_or_404(User, username=request.user.username)
data = json.loads(request.body)
book_post_id = data.get("id")
book_request_model = get_object_or_404(BookRequest, id=book_post_id)
book = Book.objects.get(id=book_request_model.original_book_id)
book.title = book_request_model.title
book.author = book_request_model.author
book.isbn = book_request_model.isbn
book.synopsis = book_request_model.synopsis
book.genres = book_request_model.genres
book.published = book_request_model.published
book.original_title = book_request_model.original_title
book.characters = book_request_model.characters
book.keywords = book_request_model.keywords
book.save()
user.contributions += 1
book_request_model.delete()
return JsonResponse({'success':'aproved'})
else:
book_post_request = BookRequest.objects.all()
illustration_post_request = IllustrationPostRequest.objects.all()
illustration_delete_request = IllustrationDeleteRequest.objects.all()
return render(request, "books/aprove.html", {
"book_post": book_post_request,
"illustration_post": illustration_post_request,
"illustration_delete": illustration_delete_request
})
@login_required
def aprove_illustration(request):
if request.method == "POST":
user = get_object_or_404(User, username=request.user.username)
data = json.loads(request.body)
illustration_post_request_id = data.get("id")
illustration_post_request = IllustrationPostRequest.objects.get(id=illustration_post_request_id)
illustration = Illustration(image=illustration_post_request.image, book=illustration_post_request.book)
illustration.save()
user.contributions += 1
illustration_post_request.delete()
return JsonResponse({'success':'aproved'})
if request.method == "DELETE":
user = get_object_or_404(User, username=request.user.username)
data = json.loads(request.body)
illustration_delete_request_id = data.get("id")
illustration_delete_request = IllustrationDeleteRequest.objects.get(id=illustration_delete_request_id)
illustration_delete_request.illustration.delete()
illustration_delete_request.delete()
user.contributions += 1
return JsonResponse({'success':'aproved'})
else:
return HttpResponseRedirect(reverse("aprove"))
@login_required
def reprove(request):
if request.user.is_superuser:
data = json.loads(request.body)
model = data.get("model")
model_id = data.get("id")
if model == "book":
book = get_object_or_404(BookRequest, id=model_id)
book.delete()
if model == "illustration":
illustration = get_object_or_404(IllustrationPostRequest, id=model_id)
illustration.delete()
if model == "remove_illustration":
illustration = get_object_or_404(IllustrationDeleteRequest, id=model_id)
illustration.delete()
return JsonResponse({'success':'aproved'})
else:
return HttpResponseRedirect(reverse("index"))
@login_required
def show_request(request, request_id):
book = get_object_or_404(BookRequest, id=request_id)
return render(request, "books/show_request.html", {
"Book": book
})
@login_required
def profile(request, user_id):
user = User.objects.get(username=request.user.username)
book_post_request = BookRequest.objects.filter(user=user)
illustration_post_request = IllustrationPostRequest.objects.filter(user=user)
illustration_delete_request = IllustrationDeleteRequest.objects.filter(user=user)
return render(request, "books/profile.html", {
"user_id": user_id,
"reviews": Review.objects.filter(user=request.user).count(),
"ratings": Rating.objects.filter(user=request.user).count(),
"read": user.read.count(),
"reading": user.reading.count(),
"want": user.want_to_read.count(),
"book_post": book_post_request,
"illustration_post": illustration_post_request,
"illustration_delete": illustration_delete_request
})
def book_status(request, book_id):
if request.user.is_authenticated:
if request.method == "POST":
data = json.loads(request.body)
option = data.get("option")
book = get_object_or_404(Book, id=book_id)
user = User.objects.get(username=request.user.username)
if User.objects.filter(username=user.username, read=book).exists():
user.read.remove(book)
if User.objects.filter(username=user.username, reading=book).exists():
user.reading.remove(book)
if User.objects.filter(username=user.username, want_to_read=book).exists():
user.want_to_read.remove(book)
if option == "want_read":
user.want_to_read.add(book)
if option == "reading":
user.reading.add(book)
if option == "read":
user.read.add(book)
user.save()
return JsonResponse({'success': option})
else:
return HttpResponseRedirect(reverse("index"))
else:
return JsonResponse({'error': "login"})
def get_book_score(request, book_id):
book = get_object_or_404(Book, id=book_id)
return JsonResponse(book.score) | rcorrei4/cs50w-ibdb | books/views.py | views.py | py | 17,098 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "models.Book.objects.all",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "models.Book.objects",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "models.Book",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "models.B... |
19117157583 | from gridworld import *
import simulateController as Simulator
import copy
import compute_all_vis
import cv2
# mapname = 'BeliefTestEvasion'
mapname = 'BelieEvasionTwenty'
filename = 'figures/'+mapname+'.png'
image = cv2.imread(filename, cv2.IMREAD_GRAYSCALE)
image = cv2.resize(image,dsize=(15,15),interpolation=cv2.INTER_AREA)
h, w = image.shape[:2]
folder_locn = 'Examples/'
example_name = 'Jonas_Belief_Evasion_Terminal_act_PUDO_blocks'
# example_name = 'Jonas_Belief_Evasion_PUDO'
trial_name = folder_locn + example_name
outfile = trial_name + '.json'
infile = copy.deepcopy(trial_name)
gwfile = folder_locn + '/figs/gridworldfig_' + example_name + '.png'
nagents = 1
# targets = [[],[],[],[],[]]
targets = [[]]
initial = [54]
moveobstacles = [47]
filename = [filename,(15,15),cv2.INTER_AREA]
gwg = Gridworld(filename,nagents=nagents, targets=targets, initial=initial, moveobstacles=moveobstacles)
gwg.colorstates = [set(), set()]
gwg.render()
# gwg.draw_state_labels()
gwg.save(gwfile)
partition = dict()
allowed_states = [[None]] * nagents
pg = [[None]]*nagents
allowed_states[0] = list(set(gwg.states) - set(gwg.obstacles))
# pg[0] = {0:allowed_states[0]}
# pg[0] = {0: set.union(*[set(range(0,10))]) - set(gwg.obstacles), 1: set.union(*[set(range(10,20))]) - set(gwg.obstacles), 2: set.union(*[set(range(20,30))]) - set(gwg.obstacles),
# 3: set.union(*[set(range(30,40))]) - set(gwg.obstacles), 4: set.union(*[set(range(40,50))]) - set(gwg.obstacles), 5: set.union(*[set(range(50,60))]) - set(gwg.obstacles),
# 6: set.union(*[set(range(60,70))]) - set(gwg.obstacles), 7: set.union(*[set(range(70,80))]) - set(gwg.obstacles), 8: set.union(*[set(range(80,90))]) - set(gwg.obstacles),
# 9: set.union(*[set(range(90,100))]) - set(gwg.obstacles)}
pg[0] = {0: set.union(*[set(range(0,30))]) - set(gwg.obstacles), 1: set.union(*[set(range(30,60))]) - set(gwg.obstacles), 2: set.union(*[set(range(60,90))]) - set(gwg.obstacles),
3: set.union(*[set(range(90,120))]) - set(gwg.obstacles), 4: set.union(*[set(range(120,150))]) - set(gwg.obstacles), 5: set.union(*[set(range(150,180))]) - set(gwg.obstacles),
6: set.union(*[set(range(180,210))]) - set(gwg.obstacles), 7: set.union(*[set(range(210,225))]) - set(gwg.obstacles)}
block1 = []
block2 = []
block3 = []
block4 = []
block5 = []
block6 = []
block7 = []
block8 = []
block9 = []
for s in gwg.states:
(row,col)=gwg.coords(s)
if row<5:
if col<5:
block1.append(s)
elif col<10:
block2.append(s)
else:
block3.append(s)
elif row<10:
if col<5:
block4.append(s)
elif col<10:
block5.append(s)
else:
block6.append(s)
else:
if col<5:
block7.append(s)
elif col<10:
block8.append(s)
else:
block9.append(s)
pg[0] = {0: set.union(*[set(block1)]) - set(gwg.obstacles), 1: set.union(*[set(block2)]) - set(gwg.obstacles), 2: set.union(*[set(block3)]) - set(gwg.obstacles),
3: set.union(*[set(block4)]) - set(gwg.obstacles), 4: set.union(*[set(block5)]) - set(gwg.obstacles), 5: set.union(*[set(block6)]) - set(gwg.obstacles),
6: set.union(*[set(block7)]) - set(gwg.obstacles), 7: set.union(*[set(block8)]) - set(gwg.obstacles), 8: set.union(*[set(block9)]) - set(gwg.obstacles)}
visdist = [5,20,3500,3500]
target_vis_dist = 2
vel = [3,2,2,2]
invisibilityset = []
obj = compute_all_vis.img2obj(image)
iset = compute_all_vis.compute_visibility_for_all(obj, h, w, radius=visdist[0])
invisibilityset.append(iset)
filename = []
outfile = trial_name+'agent'+str(0) +'.json'
filename.append(outfile)
Simulator.userControlled_partition(filename[0], gwg, pg[0], moveobstacles, invisibilityset) | GTLIDAR/safe-nav-locomotion | task_planner/Bipedal_Locomotion_Task_Planner/safe-nav-loco/Block_sim.py | Block_sim.py | py | 3,814 | python | en | code | 21 | github-code | 1 | [
{
"api_name": "cv2.imread",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "cv2.IMREAD_GRAYSCALE",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "cv2.resize",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "cv2.INTER_AREA",
... |
1842114863 | from bs4 import BeautifulSoup
import requests
import pandas as pd
import numpy as np
def get_title(soup):
try:
# Outer Tag Object
title = soup.find("h1", attrs={"class":'DrugHeader__title-content___2ZaPo'})
# Inner NavigatableString Object
title_value = title.text
# Title as a string value
title_string = title_value.strip()
except:
title_string = ""
return title_string
# Function to extract Product Price
def get_price(soup):
try:
price = soup.find("div", attrs={'class':'DrugPriceBox__best-price___32JXw'}).text.strip()
except:
try:
price=soup.find("span", attrs={'class':'PriceBoxPlanOption__offer-price___3v9x8 PriceBoxPlanOption__offer-price-cp___2QPU_'}).text.strip()
except:
price = ""
return price
def mgscrap(URL,HEADERS):
try:
webpage = requests.get(URL, headers=HEADERS)
# Soup Object containing all data
soup = BeautifulSoup(webpage.text, "html.parser")
links = soup.find_all("a", attrs={'target':'_blank','rel':'noopener'})
links_list=[]
for link in links:
links_list.append(link.get('href'))
if len(links_list)>=5:
break
d = {"title":[], "price":[],'links':[],'product':[]}
for link in links_list:
try:
plk="https://www.1mg.com" + link
new_webpage = requests.get("https://www.1mg.com" + link, headers=HEADERS)
new_soup = BeautifulSoup(new_webpage.text, "html.parser")
title =get_title(new_soup)
price=get_price(new_soup)
if title=="":
continue
if price=="":
continue
d['title'].append(title)
d['price'].append(price)
d['links'].append("")
d['product'].append(plk)
except:
continue
mg_df = pd.DataFrame.from_dict(d)
mg_df['title'].replace('', np.nan, inplace=True)
mg_df = mg_df.dropna(subset=['title'])
return mg_df
except:
df = {"title":[], "price":[],'links':[],'product':[]}
mg_df = pd.DataFrame.from_dict(df)
mg_df['title'].replace('', np.nan, inplace=True)
mg_df = mg_df.dropna(subset=['title'])
return mg_df | jhachirag7/Mediscan | prescribtion_system/mgscrap.py | mgscrap.py | py | 2,475 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "requests.get",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"... |
41279707209 | import sys
from PyQt5.QtWidgets import QMainWindow, QApplication
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import QUrl
from PyQt5.QtWebEngineWidgets import QWebEngineView, QWebEnginePage
class Window(QMainWindow):
def __init__(self):
super().__init__()
self.setWindowTitle("YouTube")
self.setWindowIcon(QIcon("youtube.png"))
self.setGeometry(0, 0, 1280, 800)
self.webEngineView = QWebEngineView()
self.setCentralWidget(self.webEngineView)
self.webEngineView.page().profile().setHttpUserAgent(
"Mozilla/5.0 (SMART-TV; Linux; Smart TV) AppleWebKit/537.36 (KHTML, like Gecko) Thano/3.0 Chrome/98.0.4758.102 TV Safari/537.36"
)
url = 'https://youtube.com/tv'
self.webEngineView.load(QUrl(url))
app = QApplication(sys.argv)
window = Window()
window.show()
sys.exit(app.exec_()) | Precious13ui/SDYA | main.py | main.py | py | 889 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "PyQt5.QtWidgets.QMainWindow",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtGui.QIcon",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWebEngineWidgets.QWebEngineView",
"line_number": 15,
"usage_type": "call"
},
{
... |
71861361633 | import nonebot
from nonebot import on_command, on_message
# from nonebot.adapters import Bot, Event
from nonebot.plugin import Plugin
from typing import Dict, List, Tuple, Set, Union
import datetime
from .my_config import Config
from ... import kit
from ...kit.nb import message as mskit
global_config = nonebot.get_driver().config
config = Config.parse_obj(global_config)
__plugin_meta__ = kit.nb.plugin.metadata(
name = '我要妹子',
description = '存储美图,或者随机返回一张本群或全局已存储美图',
usage = f'回复某条图片消息,回复内容需包含 \".wymz\"',
extra = {
'command': 'wymz',
'alias' : {'美图', '存图', 'meizi', 'maze'}
}
)
def get_current_time_string() -> str:
return datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
woyaomeizi = on_message(priority=1, block=False)
@woyaomeizi.handle()
async def handle_woyaomeizi(event : mskit.GroupMessageEvent, bot : mskit.Bot):
group_id = event.group_id
user_id = event.user_id
if not event.message.count('reply'):
return
if not any([event.message.count(value = '.' + keyword)
for keyword in
[__plugin_meta__.extra['command']] + __plugin_meta__.extra['alias']]):
return
id = event.message[0].data['id']
rep = await bot.get_msg(message_id = id)
if not rep.message.count('image'):
await mskit.send_reply(message = '这里面没有图片哦', event = event)
return
success_count = 0
fail_count = 0
for image in rep.message['image']:
url = image['url']
if kit.net.save_image(url = url, path = f'./data/wymz/{group_id}/{get_current_time_string()}.jpg'):
success_count += 1
else:
fail_count += 1
if success_count > 0:
await mskit.send_reply(message = f'已存储 {success_count} 张图片', event = event)
if fail_count > 0:
await mskit.send_reply(message = f'警告:有 {fail_count} 张图片存储失败', event = event)
| AntiLeaf/CirnoBot | src/plugins/wymz/__init__.py | __init__.py | py | 2,072 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "nonebot.get_driver",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "my_config.Config.parse_obj",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "my_config.Config",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "kit.nb... |
17243696411 | import sys
sys.path.insert(0, "/home/adriano/projeto_mestrado/modules")
import numpy as np
import pickle
from PIL import Image
# This is a sample Python script.
import vessel_analysis as va
if __name__ == '__main__':
imag = 'Experiment #1 (adults set #1)_20x_batch1 - Superfical layers@45-Image 4-20X'
#imag = '3D P0@CTL-3-FC-A'
pasta_mestrado ="/home/adriano/projeto_mestrado/modules"
arquivo = f"{pasta_mestrado}/Vetores_Extraidos_json/novos/{imag}.json"
caminho_img = f"{pasta_mestrado}/Imagens/vessel_data/images/{imag}.tiff"
#pega o arquivo e armazena em um array
array_path = va.retorna_paths(arquivo)
#leitura da imagem
img = np.array(Image.open(caminho_img))
#pega a metade inteira do vetor
half_array = len(array_path)//2
x=0
for i in range(half_array):
img, caminhos_transladados, primeiro_ponto = va.redimensiona_imagem(array_path[x:x+2], caminho_img)
alcance = va.setar_alcance(array_path[0], array_path[1])
vessel_mod, cross_t = va.gera_vessel_cross(img, caminhos_transladados[0], caminhos_transladados[1], alcance)
#va.plot_figure(img, vessel_mod, cross_t)
#plot_figure2(img, vessel_mod, cross_t)
#parte para salvar o .pickle
data_dump = {"img_file": caminho_img, "vessel_model": vessel_mod, "primeiro_ponto": primeiro_ponto}
savedata = f'{pasta_mestrado}/Vessel_Models_pickle/novos/{imag}_savedata{i}.pickle'
pickle.dump(data_dump, open(savedata,"wb"))
x+=2
| AdrianoCarvalh0/texture_codes | modules/Vessel_Analysis/main.py | main.py | py | 1,482 | python | pt | code | 0 | github-code | 1 | [
{
"api_name": "sys.path.insert",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 2,
"usage_type": "attribute"
},
{
"api_name": "vessel_analysis.retorna_paths",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.array... |
10049156994 | from flask import Flask, render_template_string
app = Flask(__name__)
app.config['JSON_AS_ASCII'] = False
def filtered(template):
blacklist = ["self.__dict__","url_for","config","getitems","../","process"]
for b in blacklist:
if b in template:
template=template.replace(b,"")
return template
@app.route("/")
def index():
return "Please find the flags on this site."
@app.route("/<path:template>")
def template(template):
if len(template) > 500:
return "too long input"
while filtered(template) != template:
template = filtered(template)
return render_template_string(template)
if __name__ == '__main__':
app.run()
| okayu1230z/simple_ssti | src/app.py | app.py | py | 660 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "flask.render_template_string",
"line_number": 28,
"usage_type": "call"
}
] |
12037648038 | import json
__all__ = ['base_publish_json']
def base_publish_json(request_dict):
"""
Building client publish json of base protocol
base protocol: MQTT(1), CoAP(2), WebSocket(6)
"""
# build publish payload
publish_payload = {
'data_type': 'request',
'task_id': request_dict['taskID'],
'data': request_dict['payload']
}
if request_dict.get('streamID'):
publish_payload['stream_id'] = request_dict['streamID']
publish_json = {
'qos': 1,
'topic': request_dict['prefixTopic'] + request_dict['topic'],
'payload': json.dumps(publish_payload)
}
return publish_json
| actorcloud/ActorCloud | server/actor_libs/emqx/publish/protocol/base.py | base.py | py | 661 | python | en | code | 181 | github-code | 1 | [
{
"api_name": "json.dumps",
"line_number": 23,
"usage_type": "call"
}
] |
37460030953 | #!/usr/bin/python3
__version__ = '0.0.1' # Time-stamp: <2021-01-15T17:44:23Z>
## Language: Japanese/UTF-8
"""「大バクチ」の正規分布+マイナスのレヴィ分布のためのパラメータを計算しておく。"""
##
## License:
##
## Public Domain
## (Since this small code is close to be mathematically trivial.)
##
## Author:
##
## JRF
## http://jrf.cocolog-nifty.com/software/
## (The page is written in Japanese.)
##
import random
import numpy as np
from scipy.optimize import minimize_scalar
from scipy.special import gamma, factorial
import matplotlib.pyplot as plt
import csv
import argparse
ARGS = argparse.Namespace()
ARGS.output = "normal_levy_1.0.csv"
ARGS.trials = 1000000
ARGS.mu = 0
ARGS.theta = 1
ARGS.sigma = None
ARGS.bins = 50
ARGS.max = -5
ARGS.min = -10000
def parse_args ():
parser = argparse.ArgumentParser()
parser.add_argument("-t", "--trials", type=int)
parser.add_argument("--output", type=str)
parser.add_argument("--mu", type=float)
parser.add_argument("--theta", type=float)
parser.add_argument("--cut", type=float)
parser.add_argument("--min", type=float)
parser.add_argument("--max", type=float)
parser.parse_args(namespace=ARGS)
def normal_levy_rand (mu, sigma, theta, cut, size=None):
z = np.random.normal(0, 1, size=size)
y = - mu/2 + theta / (z ** 2)
z2 = np.random.normal(mu/2, sigma, size=size)
return np.where(z2 - y > cut, z2 - y, cut)
def calc_score (x, cut):
y = normal_levy_rand(x, ARGS.sigma, ARGS.theta, cut, ARGS.trials)
return np.square(np.mean(y))
def main ():
if ARGS.sigma is None:
ARGS.sigma = 10 * ARGS.theta
edges = list(range(-10000, -1000, 1000)) + list(range(-1000, -100, 100)) + list(range(-100, -10, 5)) + list(range(-10, -5, 1)) + [-5]
mu = []
for cut in edges:
res = minimize_scalar(lambda x: calc_score(x, cut), bracket=(-20, 20), method='golden')
sc = calc_score(res.x, cut)
print (cut, ":", res.success, ":", res.x, ":", sc)
mu.append(res.x)
with open(ARGS.output, 'w') as f:
writer = csv.writer(f, quoting=csv.QUOTE_NONNUMERIC,
lineterminator='\n')
writer.writerows(np.array([edges, mu]).T)
#plt.plot(edges, mu)
#plt.show()
if __name__ == '__main__':
parse_args()
main()
| JRF-2018/simbd | generate_normal_levy_csv.py | generate_normal_levy_csv.py | py | 2,426 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "argparse.Namespace",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "numpy.random.normal",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "numpy.... |
24618522486 | """
This module contains machine learning model class
"""
import os
import sys
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
import tensorflow as tf
import tensorflow.keras.backend as K
from tensorflow.keras.callbacks import (
CSVLogger,
EarlyStopping,
History,
ModelCheckpoint,
TerminateOnNaN,
)
from features import quantize
from LSTNet.lstnet_datautil import DataUtil
from LSTNet.lstnet_model import (
LSTNetModel,
ModelCompile,
PostARTrans,
PostSkipTrans,
PreARTrans,
PreSkipTrans,
)
from LSTNet.lstnet_plot import AutoCorrelationPlot, PlotHistory, PlotPrediction
from LSTNet.lstnet_util import GetArguments, LSTNetInit
from LSTNet.util.model_util import LoadModel, SaveHistory, SaveModel, SaveResults
from LSTNet.util.Msglog import LogInit
tf.random.set_seed(0)
import random
random.seed(0)
np.random.seed(0)
logger_name = "lstnet"
import logging
log = logging.getLogger(logger_name)
custom_objects = {
"PreSkipTrans": PreSkipTrans,
"PostSkipTrans": PostSkipTrans,
"PreARTrans": PreARTrans,
"PostARTrans": PostARTrans,
}
class Model(object):
"""Class that creates machine learning model"""
def __init__(self, model_name, horizon, window, epochs):
self.init = self.init_args()
self.name = model_name
self.init.horizon = horizon
self.init.window = window
self.init.save = os.path.join("..", "..", "save", model_name)
self.init.load = os.path.join("..", "..", "save", model_name)
self.init.epochs = epochs
self.init.highway = window # default 24
# self.init.skip = horizon #default 24
self.scale = None
def init_args(self):
try:
args = GetArguments()
except SystemExit as err:
print("Error reading arguments")
exit(0)
init = LSTNetInit(args)
log = LogInit(logger_name, init.logfilename, init.debuglevel, init.log)
log.info("Python version: %s", sys.version)
log.info("Tensorflow version: %s", tf.__version__)
log.info(
"Keras version: %s ... Using tensorflow embedded keras",
tf.keras.__version__,
)
init.dump()
return init
def validate_model(self, lstnet):
if lstnet is None:
log.critical("Model could not be loaded or created ... exiting!!")
exit(1)
return
def train_model(self, rawdata):
"""An abstract training function"""
Data = self.preprocess_data(rawdata.values)
self.scale = Data.scale
self.init.CNNKernel = self.scale.shape[0]
log.info("Creating model")
self.model = LSTNetModel(self.init, Data.train[0].shape)
self.validate_model(self.model)
lstnet_tensorboard = ModelCompile(self.model, self.init)
log.info(
"Training model ... started at {}".format(
datetime.now().strftime("%Y-%m-%dT%H:%M:%SZ")
)
)
h = train(self.model, Data, self.init, lstnet_tensorboard)
loss, rse, corr, nrmse, nd = self.model.evaluate(Data.valid[0], Data.valid[1])
log.info(
"Validation on the validation set returned: Loss:%f, RSE:%f, Correlation:%f, NRMSE:%f, ND:%f",
loss,
rse,
corr,
nrmse,
nd,
)
test_result = {"loss": loss, "rse": rse, "corr": corr, "nrmse": nrmse, "nd": nd}
SaveModel(self.model, self.init.save)
# SaveResults(self.model, self.init, h.history, test_result, list(test_result.keys()))
# SaveHistory(self.init.save, h.history)
log.info(
"Training is done at {}".format(
datetime.now().strftime("%Y-%m-%dT%H:%M:%SZ")
)
)
return
def make_predictions(self, rawdata, start, end):
"""An abstract prediction function"""
log.info("Load model from %s", self.init.load)
lstnet = LoadModel(self.init.load, custom_objects)
self.validate_model(lstnet)
Data_test = self.normalize_data(rawdata.values)
log.info(
"Predict testing data ... started at {}".format(
datetime.now().strftime("%Y-%m-%dT%H:%M:%SZ")
)
)
Yt_hat = np.array(
[
lstnet.predict(Data_test, verbose=0)
for _ in range(self.init.mc_iterations)
]
)
q10, q50, q90 = self.postprocess_data([np.mean(Yt_hat, 0), np.std(Yt_hat, 0)])
log.info(
"Predict testing data done at {}".format(
datetime.now().strftime("%Y-%m-%dT%H:%M:%SZ")
)
)
return np.array([q10, q50, q90])
def preprocess_data(self, rawdata, trainpercent=0.9, validpercent=0.1, normalize=2):
"""A wrapper to create train, validation, and test datasets for model training/testing
based on influx data
"""
Data = DataUtil(
rawdata,
trainpercent,
validpercent,
self.init.horizon,
self.init.window,
normalize,
)
# If file does not exist, then Data will not have attribute 'data'
if hasattr(Data, "data") is False:
print("Could not load data!! Exiting")
exit(1)
return Data
def normalize_data(self, rawdata, predict=True):
if self.init.normalise == 2:
for i in range(self.scale.shape[0]):
if self.scale[i] != 0:
rawdata[:, i] = rawdata[:, i] / self.scale[i]
if predict == True:
test_set = range(self.init.window, int(rawdata.shape[0]))
n = len(test_set)
X = np.zeros((n, self.init.window, rawdata.shape[1]))
for i in range(n):
end = test_set[i]
start = end - self.init.window
X[i, :, :] = rawdata[start:end, :]
return X
def postprocess_data(self, data):
"""A wrapper to rescale the predictions and form quantiles"""
if self.init.normalise == 2:
for i in range(self.scale.shape[0]):
for fl in data:
if self.scale[i] != 0:
fl[:, i] = fl[:, i] * self.scale[i]
q10 = quantize(data[0], data[1], 0.45).astype(int).clip(0)
q50 = quantize(data[0], data[1], 0.5).astype(int).clip(0)
q90 = quantize(data[0], data[1], 0.55).astype(int).clip(0)
return q10, q50, q90
def train(model, data, init, tensorboard=None):
"""A wrapper to rescale the predictions and form quantiles"""
if init.validate == True:
val_data = (data.valid[0], data.valid[1])
else:
val_data = None
early_stop = EarlyStopping(
monitor="val_loss",
min_delta=0.0001,
patience=init.patience,
verbose=1,
mode="auto",
)
mcp_save = ModelCheckpoint(
init.save + ".h5",
save_best_only=True,
save_weights_only=True,
monitor="val_loss",
mode="min",
)
history = model.fit(
x=data.train[0],
y=data.train[1],
epochs=init.epochs,
batch_size=init.batchsize,
validation_data=val_data,
callbacks=[early_stop, mcp_save, TerminateOnNaN(), tensorboard]
if tensorboard
else None,
)
return history
| aleksei-mashlakov/parking-forecast | src/PMV4Cast/ml_model.py | ml_model.py | py | 7,462 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "tensorflow.random.set_seed",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "tensorflow.random",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "random.seed",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "numpy.r... |
1070827302 | from tenacity import retry, stop_after_attempt, wait_fixed
from aio_pika import Message, connect_robust
from aio_pika.abc import AbstractIncomingMessage
import json
import aiosqlite
from config import Settings
from loguru import logger
class RemoteDictRpcServer:
def __init__(self):
self.channel = None
self.exchange = None
self.queue = None
self.connection = None
self.settings = Settings()
# retry connection setup in case broker is not ready yet
@retry(stop=stop_after_attempt(5), wait=wait_fixed(10))
async def setup(self) -> "RemoteDictRpcServer":
"""Method for establishing connection with RabbitMQ and SQLite db setup"""
# create connection to RabbitMQ
try:
self.connection = await connect_robust(
host=self.settings.RABBITMQ_HOST,
port=self.settings.RABBITMQ_PORT,
login=self.settings.RABBITMQ_LOGIN,
password=self.settings.RABBITMQ_PASSWORD,
ssl=self.settings.RABBITMQ_SSL)
logger.info("Rabbit connection established successfully.")
except ConnectionError:
logger.warning("Rabbit broker not available, retrying connection in 10 seconds...")
raise Exception
# establish a channel
self.channel = await self.connection.channel()
self.exchange = self.channel.default_exchange
# declare a queue
self.queue = await self.channel.declare_queue(self.settings.RABBITMQ_TASK_QUEUE)
# initialize sqlite database if does not exist
async with aiosqlite.connect("/data/" + self.settings.SQLITEDB_FILE) as db:
sql = "create table if not exists {} (key text unique , value float);".format(self.settings.SQLITEDB_TABLE)
await db.execute(sql)
await db.commit()
logger.info("Remote dictionary server setup completed.")
return self
async def disconnect(self) -> None:
"""Method for closing broker connection on shutdown"""
await self.connection.close()
async def process_tasks(self) -> None:
"""Method for asynchronous Rabbit message consumption and processing."""
async with self.queue.iterator() as q:
message: AbstractIncomingMessage
async for message in q:
try:
async with message.process(requeue=False):
assert message.reply_to is not None
data = json.loads(message.body)
resp_msg = await self._interact_with_db(data)
response = json.dumps(resp_msg).encode()
await self.exchange.publish(
Message(body=response, correlation_id=message.correlation_id),
routing_key=message.reply_to)
except Exception as e:
logger.exception("Processing error for message {} ({})".format(message, e))
async def _interact_with_db(self, data) -> dict:
"""Method for fetching or inserting key-value pair to table in db"""
try:
async with aiosqlite.connect("/data/" + self.settings.SQLITEDB_FILE) as db:
# task type 1 : retrieve from database
if data['command'] == 'get':
sql = "SELECT value FROM {} WHERE key='{}'".format(self.settings.SQLITEDB_TABLE, data['key'])
async with db.execute(sql) as cursor:
row = await cursor.fetchone()
if row:
resp_msg = {data['key']: row[0]}
else:
msg = "Record with key '{}' does not exist".format(data['key'])
resp_msg = {"error": msg}
logger.error(msg)
# task type 2 : upsert in database
elif data['command'] == 'set':
sql = "INSERT INTO {} (key,value) VALUES (?,?) ON CONFLICT(key) DO UPDATE SET value = excluded.value".format(self.settings.SQLITEDB_TABLE)
await db.execute(sql, (data['key'], data['value']))
await db.commit()
resp_msg = {"status": "successfully inserted: {}".format({data['key']: data['value']})}
# for other commands report error
else:
msg = "wrong command type".format(data['command'])
resp_msg = {"error": msg}
logger.error(msg)
return resp_msg
except Exception as e:
resp_msg = {"error": "Server side error occurred while processing request"}
logger.error(e)
return resp_msg
| jaksklo/RemoteDictionary | src/rpc_server.py | rpc_server.py | py | 4,799 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "config.Settings",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "aio_pika.connect_robust",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "loguru.logger.info",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "loguru.log... |
3561958374 | #!/usr/bin/python3.6
#-*- coding: utf-8 -*-
"""
@Time : 2023/3/23 9:41
@Author : panrhenry
"""
import time
from playwright.sync_api import sync_playwright as playwright
pw = playwright().start()
chrom = pw.chromium.launch(headless=False)
context = chrom.new_context() # 需要创建一个 context
page = context.new_page() # 创建一个新的页面
page.goto("https://web.innodealing.com/auth-service/signin")
page.get_by_placeholder("DM账号/手机号").click()
page.get_by_placeholder("DM账号/手机号").click()
page.get_by_placeholder("DM账号/手机号").fill("yuyingjie")
page.get_by_placeholder("密码").click()
page.get_by_placeholder("密码").fill("123456")
page.get_by_text("我已阅读并同意相关服务条款和政策").click()
page.get_by_role("button", name="登录").click()
time.sleep(3)
page.frame_locator("iframe >> nth=0").locator(".nY5g3oU45oPl4aioSr5\\+ag\\=\\=").click()
time.sleep(1)
page.frame_locator("iframe >> nth=0").get_by_text("首页").first.click()
time.sleep(1)
page.frame_locator("iframe >> nth=0").get_by_role("button", name="历史成交").click()
time.sleep(4)
page.click('//*[@id="bondModule"]/div[1]/div[1]/div/div/div/div[1]/div/div/div[1]')
t1 = page.frame_locator("iframe >> nth=0").locator("div").filter(has_text="万科企业股份有限公司").nth(0)
time.sleep(2)
t2 = page.frame_locator("iframe >> nth=0").get_by_role("combobox").filter(has_text="万科企业股份有限公司").get_by_role("textbox")
# time.sleep(2)
page.frame_locator("iframe >> nth=0").get_by_role("combobox").filter(has_text="万科企业股份有限公司").get_by_role("textbox").fill("驻马店市产业投资集团有限公司")
time.sleep(3)
page.frame_locator("iframe >> nth=0").get_by_role("menuitem", name="驻马店市产业投资集团有限公司", exact=True).click()
time.sleep(10)
# ---------------------
context.close()
| panrhenry/py_pro_1 | pachong/getNovel_new_39/111.py | 111.py | py | 1,875 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "playwright.sync_api.sync_playwright",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "time.sleep"... |
10686339907 | from abc import ABCMeta, abstractmethod
from typing import Dict, Any, Optional, List
import torch
from ...core.helpers import Namespace
from ...core.logger import LOGGER as logging
from ...core.observers import EventManager
from ...core.exceptions import CheckpointNotFound
class AbstractNetwork(torch.nn.Module, metaclass=ABCMeta):
@abstractmethod
def get_requirements(self) -> List[str]:
raise NotImplementedError
def map(self, module: "AbstractNetwork", *args) -> Dict[str, Any]:
requirements = module.get_requirements()
if len(args) != len(requirements):
raise AttributeError("Cannot map inputs to module")
return {requirement: args[index] for index, requirement in enumerate(requirements)}
def load_checkpoint(self, checkpoint_path: str, device: Optional[torch.device] = None):
if checkpoint_path is None:
raise CheckpointNotFound
if device is None:
device = torch.device("cpu")
logging.info("Restoring from Checkpoint: {}".format(checkpoint_path))
info = torch.load(checkpoint_path, map_location=device)
payload = Namespace(network=self, info=info)
EventManager.dispatch_event(event_name="before_model_checkpoint_load", payload=payload)
self.load_state_dict(info["model"], strict=False)
@staticmethod
def dropout_layer_switch(m, dropout_prob):
if isinstance(m, torch.nn.Dropout):
if dropout_prob is not None:
m.p = dropout_prob
m.train()
def activate_dropout(self, dropout_prob):
self.apply(lambda m: self.dropout_layer_switch(m, dropout_prob))
def mc_dropout(self, data, dropout_prob=None, n_iter=5, loss_type=""):
self.activate_dropout(dropout_prob)
outputs = torch.stack([self.forward(data) for _ in range(n_iter)], dim=0)
if loss_type == "torch.nn.BCEWithLogitsLoss":
outputs = torch.sigmoid(outputs)
return {"logits": torch.mean(outputs, dim=0), "logits_var": torch.var(outputs, dim=0)}
def evidential_classification_multilabel_logits(self, data):
outputs = self.forward(data)
out = torch.sigmoid(outputs)
out = torch.unsqueeze(out, dim=-1)
out = torch.cat((out, 1 - out), -1)
alpha = out + 1
uncertainty = 2 / torch.sum(alpha, dim=-1, keepdim=True)
return {"logits": outputs, "logits_var": uncertainty}
@torch.no_grad()
def evidential_nologits_outputs_processing(self, outputs):
true_logits, false_logits = torch.chunk(outputs, 2, dim=-1)
true_logits = torch.unsqueeze(true_logits, dim=-1)
false_logits = torch.unsqueeze(false_logits, dim=-1)
out = torch.cat((true_logits, false_logits), dim=-1)
return torch.argmin(out, dim=-1)
@torch.no_grad()
def evidential_regression_outputs_processing(self, outputs):
mu, v, alpha, beta = torch.chunk(outputs, 4, dim=-1)
return mu
@torch.no_grad()
def pass_outputs(self, outputs):
return outputs
@torch.no_grad()
def simple_classification_outputs_processing(self, outputs):
return torch.argmax(outputs, dim=-1)
def evidential_classification_multilabel_nologits(self, data):
outputs = self.forward(data)
true_logits, false_logits = torch.chunk(outputs, 2, dim=-1)
true_logits = torch.unsqueeze(true_logits, dim=-1)
false_logits = torch.unsqueeze(false_logits, dim=-1)
out = torch.cat((true_logits, false_logits), dim=-1)
evidence = torch.nn.functional.relu(out)
alpha = evidence + 1
uncertainty = (2 / torch.sum(alpha, dim=-1, keepdim=True)).squeeze()
# logic is reversed as 0 is true and 1 is false
prediction = torch.argmin(out, dim=-1)
softmax_out = torch.softmax(out, dim=-1)
softmax_score, max_indice = torch.max(softmax_out, dim=-1)
prob = alpha / torch.sum(alpha, dim=-1, keepdim=True)
max_prob, max_indice = torch.max(prob, dim=-1)
return {"logits": prediction, "logits_var": uncertainty, "softmax_score": softmax_score, "belief_mass": max_prob}
def evidential_classification(self, data):
outputs = self.forward(data)
evidence = torch.nn.functional.relu(outputs)
alpha = evidence + 1
uncertainty = outputs.size()[-1] / torch.sum(alpha, dim=-1, keepdim=True)
uncertainty = uncertainty.unsqueeze(-1).repeat(1, 1, outputs.size(-1))
return {"logits": outputs, "logits_var": uncertainty}
def evidential_regression(self, data):
outputs = self.forward(data)
mu, v, alpha, beta = torch.chunk(outputs, 4, dim=-1)
v = torch.abs(v) + 1.0
alpha = torch.abs(alpha) + 1.0
beta = torch.abs(beta) + 0.1
epistemic = beta / (v * (alpha - 1))
return {"logits": mu, "logits_var": epistemic}
| elix-tech/kmol | src/kmol/model/architectures/abstract_network.py | abstract_network.py | py | 4,940 | python | en | code | 33 | github-code | 1 | [
{
"api_name": "torch.nn",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "abc.ABCMeta",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "abc.abstractmethod",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_... |
101243705 | import sqlite3
from lib.User import User
from lib.utils.Format import Format
class Profile:
def __init__(self, loggedInUser: User):
self.profileId = None
self.loggedInUser = loggedInUser
def getProfileId(self):
return self.profileId
def create(self):
con = sqlite3.connect("incollege.db")
cur = con.cursor()
cur.execute("INSERT INTO profiles (profile_user_id) VALUES (?)",
(self.loggedInUser.getUserId(),))
con.commit()
self.profileId = cur.lastrowid
return cur.lastrowid
def exists(self):
con = sqlite3.connect("incollege.db")
cur = con.cursor()
res = cur.execute(
"SELECT profile_id FROM profiles WHERE profile_user_id = ? LIMIT 1",
(self.loggedInUser.getUserId(), ))
profile = res.fetchone()
# Return boolean value if profile exists for a given profile_user_id
if profile:
self.profileId = profile[0]
return profile != None
def setTitle(self, title: str):
format = Format()
title = format.titleCase(title)
con = sqlite3.connect("incollege.db")
cur = con.cursor()
cur.execute(
"UPDATE profiles SET profile_title = ? WHERE profile_user_id = ?",
(title, self.loggedInUser.getUserId()))
con.commit()
def setDescription(self, description: str):
con = sqlite3.connect("incollege.db")
cur = con.cursor()
cur.execute(
"UPDATE profiles SET profile_description = ? WHERE profile_user_id = ?",
(description, self.loggedInUser.getUserId()))
con.commit()
def findOne(self, p_userId):
con = sqlite3.connect("incollege.db")
cur = con.cursor()
res = cur.execute(
"SELECT profile_Id, profile_title, profile_description FROM profiles WHERE profile_user_id = ? LIMIT 1",
(p_userId, ))
profile = res.fetchone()
#con.close()
return profile
def setMajor(self, major: str):
self.loggedInUser.updateMajor(major)
def setUniversity(self, university):
self.loggedInUser.updateUniversity(university) | 01sebar/incollege | lib/Profile.py | Profile.py | py | 2,229 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "lib.User.User",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "sqlite3.connect",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "lib.utils.Format.Format"... |
26692181436 | __author__ = "Matthias Rost, Alexander Elvers (mrost / aelvers <AT> inet.tu-berlin.de)"
import abc
import enum
import os
import pickle
import random
class AlgorithmIdentifier:
def __init__(self, key, properties=None):
self.key = AlgorithmType(key)
self.properties = properties
self._hash = None
self._str = None
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.key == other.key and self.properties == other.properties
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
if self._hash is None:
self._hash = ""
if self.properties is not None:
for key in sorted(self.properties.keys()):
self._hash += str(key) + ":" + str(self.properties[key]) + ","
self._hash = str(self.key) + self._hash
self._hash = self._hash.__hash__()
return self._hash
def __str__(self):
if self._str is None:
self._str = ""
if self.properties is not None:
self._str = " ("
for key in sorted(self.properties.keys()):
self._str += str(key) + ":" + str(self.properties[key]) + ", "
self._str = self._str[0:-2] + ")"
self._str = str(self.key) + self._str
return self._str
def __getstate__(self):
return self.key, self.properties
def __setstate__(self, state):
self.key, self.properties = state
self._hash = self._str = None
class AlgorithmType(enum.Enum):
MIP = "MIP"
GREEDY_SINGLE = "GREEDY_SINGLE"
GREEDY_PARALLEL = "GREEDY_PARALLEL"
class AbstractAlgorithmManager(abc.ABC):
default_algorithms = []
def __init__(self):
self.algorithms = []
self.algorithm_partition = None
def add_algorithm(self, algorithm_key, properties=None):
algorithm = AlgorithmIdentifier(algorithm_key, properties)
if algorithm in self.algorithms:
raise Exception(f"Algorithm {algorithm_key} with properties {properties} already in use")
self.algorithms.append(algorithm)
def remove_algorithm(self, algorithm_key):
for algorithm in self.algorithms:
if algorithm.key == algorithm_key:
self.algorithms.remove(algorithm)
@classmethod
def get_standard_algorithm_manager(cls):
alg_mgr = cls()
for alg in cls.default_algorithms:
alg_mgr.add_algorithm(*alg)
return alg_mgr
@abc.abstractmethod
def execute_algorithms_in_parallel(self, scenario, max_number_of_processes, *args):
...
def execute_algorithm_multiprocess(self, scenario, algorithm, result_queue, *extra_parameters):
alg = self.create_algorithm(scenario, algorithm, *extra_parameters)
result = alg.run()
result_queue.put([algorithm, result])
@abc.abstractmethod
def create_algorithm(self, scenario, algorithm, *extra_parameters):
...
def get_algorithm_partition(self, max_number_parallel_processes):
result = []
process_count_to_alg = {}
for alg in self.algorithms:
process_count = 1
if alg.key == AlgorithmType.GREEDY_SINGLE or alg.key == AlgorithmType.GREEDY_PARALLEL:
if alg.key == AlgorithmType.GREEDY_PARALLEL:
process_count = alg.properties["processes"]
if process_count not in process_count_to_alg:
process_count_to_alg[process_count] = []
process_count_to_alg[process_count].append(alg)
process_counts = sorted(process_count_to_alg.keys(), reverse=True)
while len(process_counts) > 0:
available_count = max_number_parallel_processes
partition = []
print("starting a new partition")
print(f"remaining elements are {process_count_to_alg} ")
for i in process_counts:
while available_count >= i and i in process_count_to_alg and len(process_count_to_alg[i]) > 0:
available_count -= i
partition.append(process_count_to_alg[i][0])
print(f"\tadding {process_count_to_alg[i][0]} to partition obtaining {partition}")
process_count_to_alg[i] = process_count_to_alg[i][1:]
if len(process_count_to_alg[i]) == 0:
del process_count_to_alg[i]
print(f"\tnew remaining algorithms {process_count_to_alg}")
result.append(partition)
process_counts = sorted(process_count_to_alg.keys(), reverse=True)
return result
class AbstractExperimentManager(abc.ABC):
algorithm_manager_class = None
def __init__(self, probability_for_pair, max_deviation, capacity_factor,substrate_filter=None, number_of_repetitions=1, offset=0):
self.probability_for_pair = probability_for_pair
self.max_deviation = max_deviation
self.capacity_factor = capacity_factor
self.scenario_keys = []
self.scenarios = {}
self.scenario_solutions = {}
self.substrate_filter = substrate_filter
self.number_of_repetitions = number_of_repetitions
self.offset = 0
self.algorithm_manager = self.algorithm_manager_class.get_standard_algorithm_manager()
random.seed(1337)
def unpickle_experiment_manager(path):
with open(path, "rb") as f:
return pickle.loads(f.read())
def pickle_experiment_manager(experiment_manager, path):
print(path)
with open(path, "wb") as f:
f.write(pickle.dumps(experiment_manager))
| submodular-middlebox-depoyment/submodular-middlebox-deployment | src/experiments/abstract_experiment_manager.py | abstract_experiment_manager.py | py | 5,747 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "enum.Enum",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "abc.ABC",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "abc.abstractmethod",
"line_number": 86,
"usage_type": "attribute"
},
{
"api_name": "abc.abstractmetho... |
428880409 | import logging
from flask import Blueprint, render_template, request, flash, redirect
from webapp.config import VALID_VALUES, REGRESSION_VALUES
from webapp.utils.dataframe_util import get_enriched_dataframe, prepare_data
from webapp.utils.enrich_sunspots import get_results_for_best_classifier
from webapp.utils.trends_util import get_fourier_prediction, \
prediction_by_type
from webapp.stat.api import get_smoothed_data_by_type
blueprint = Blueprint("stat", __name__, url_prefix="/stat")
def log_and_flash(msg: str) -> None:
""" logging """
logging.warning(msg)
flash(msg)
@blueprint.route("/smoothing_curve", methods=["GET", "POST"])
def process_smoothing():
""" show smoothed curve according to type """
selected = VALID_VALUES[0]
if request.method == "POST":
type_ = request.form.get("smoothing")
selected = type_
if type_ is None or type_ not in VALID_VALUES:
log_and_flash(f"неверный тип сглаживания: {type_}")
return redirect("/")
result = get_smoothed_data_by_type(selected)
return render_template("stat/select_graph.html",
title="Выбор сглаживания",
selected=selected,
time=result[0],
y=result[1],
y2=result[2])
@blueprint.route("/best")
def best_model():
""" display results for best ML model """
info = {'graph': 'Adaboost classifier predictions for max and min'}
data = get_enriched_dataframe()
time, pmax, pmin, max_, sunspots = get_results_for_best_classifier()
period = len(time)
timeseries = time[:period].tolist()
pmin = data["y_min"].values
pmax = data["y_max"].values
return render_template("stat/best.html",
info=info,
time=timeseries,
y=(pmax[:period] * 50).tolist(),
y2=(pmin[:period] * 50).tolist(),
y3=max_[:period].tolist(),
y4=sunspots[:period].tolist())
@blueprint.route("/fourier")
def fourier():
""" display fourier method predictions """
data = get_enriched_dataframe()
time = data["year_float"].values
sunspots = data["sunspots"].values
preds, time2 = get_fourier_prediction(sunspots, time, 300)
return render_template("stat/fourier.html",
time=time.tolist(),
y=sunspots.tolist(),
time2=time2.tolist(),
y2=preds.tolist())
@blueprint.route("/regression", methods=["GET", "POST"])
def regression_prediction():
""" display linear regression predictions """
selected = REGRESSION_VALUES[0]
if request.method == "POST":
type_ = request.form.get("regression")
selected = type_
if type_ not in REGRESSION_VALUES:
log_and_flash(f"неверный тип регрессии: {type_}")
return redirect("/")
data = prepare_data()
time = data["year_float"].values.tolist()
sunspots = data["sunspots"].values.tolist()
predicted, mae = prediction_by_type(selected, data)
print(f"MAE: {mae}")
return render_template("stat/select_regression.html",
title="Тип регрессии",
selected=selected,
time=time,
y=sunspots,
y2=predicted.tolist())
| bystrovpavelgit/solar_trends_prediction | webapp/stat/views.py | views.py | py | 3,581 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "flask.Blueprint",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "logging.warning",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "flask.flash",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "webapp.config.VALID_VALUE... |
21733301111 | """Модуль для схемы записи истории."""
from dataclasses import dataclass
from datetime import datetime
@dataclass
class HistoryDTO:
"""."""
before: int
after: int
changes: int
datetime_utc: datetime
@classmethod
def from_alchemy(cls, record):
"""Метод создания схемы.
Args:
record (_type_): _description_
Returns:
_type_: _description_
"""
return cls(
before=record.before,
after=record.after,
changes=record.changes,
datetime_utc=record.datetime_utc,
)
| YanaShurinova/shift_credit_card | authorization/src/app/dto/history.py | history.py | py | 653 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "datetime.datetime",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 6,
"usage_type": "name"
}
] |
26130539033 | import telegram
import os
import sys
import json
#set bot token in enrionmental variable 'outlet_bot_token' before using
TOKEN = os.environ.get('outlet_bot_token')
BOT = telegram.Bot(token=TOKEN)
CHAT_IDS_PATHNAME = 'data/chat_ids.json'
def read_chat_ids(pathname):
try:
with open(pathname, 'r') as json_file:
data = json.load(json_file)
return data
except FileNotFoundError:
try:
#create file if it doesn't exist
with open(pathname, 'x') as new_file:
json.dump([], new_file)
except Exception as e:
print(e)
sys.exit(1)
def write_chat_ids(data, pathname):
try:
with open(pathname, 'w') as outfile:
json.dump(data, outfile)
except Exception as e:
print(e)
sys.exit(1)
def update_chat_ids():
# adds new telegram chat subscribers to CHAT_IDS_PATHNAME
# also returns a list containing all subscribers.
#get new subscribers from telegram api
updates = BOT.get_updates()
new_chat_ids = [c.message.from_user.id for c in updates]
new_chat_ids = list(set(new_chat_ids)) #remove duplicates by converting to set and back to list
#get old subscribers from file
try:
chat_ids = read_chat_ids(CHAT_IDS_PATHNAME)
new_chat_ids = [chat for chat in new_chat_ids if chat not in chat_ids]
print('New telegram bot chat ids: {0}'.format(new_chat_ids))
chat_ids += new_chat_ids
except:
chat_ids = new_chat_ids
write_chat_ids(chat_ids, CHAT_IDS_PATHNAME)
return chat_ids
def send_telegram_message(message):
chat_ids = update_chat_ids()
print('Sending a message to following chats: {0}'.format(chat_ids))
for c_id in chat_ids:
try:
BOT.send_message(text=message, chat_id=c_id)
except telegram.error.BadRequest:
print('Could not send message to: {0}'.format(c_id))
chat_ids.remove(c_id)
write_chat_ids(chat_ids)
def notify(message):
# method to send message on all available notification daemons
send_telegram_message(message)
if __name__ == "__main__":
import sys
notify(sys.argv[1]) | vaarnio/OutletScraper | notifications.py | notifications.py | py | 2,223 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.environ.get",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "telegram.Bot",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number... |
19892176264 | from typing import Optional, Union
from fretboard.core.collections import StrEnum
from fretboard.data_structures import CircularArray
from fretboard.music_theory.interval import (
AscMelodicMinorScaleIntervals,
DescMelodicMinorScaleIntervals,
HarmonicMinorScaleIntervals,
Interval,
MajorScaleIntervals,
MinorScaleIntervals,
)
from fretboard.music_theory.note import Note
class Key(StrEnum):
Major = "major"
Minor = "minor"
HarmonicMinor = "harmonic_minor"
AscMelodicMinor = "asc_melodic_minor"
DescMelodicMinor = "desc_melodic_minor"
@property
def desc(self) -> str:
if self == Key.HarmonicMinor:
return "Harmonic Minor"
elif self == Key.AscMelodicMinor:
return "Melodic Minor ⬆️"
elif self == Key.DescMelodicMinor:
return "Melodic Minor ⬇️️"
return self.name
_FullChromaticScale = tuple(
[
tuple([Note(n) for n in str_n.split("/")])
for str_n in "B#/C, C#/Db, D, D#/Eb, E/Fb, E#/F, F#/Gb, G, G#/Ab, A, A#/Bb, B/Cb".split(
", "
)
]
)
_ChromaticNotes: tuple = tuple([Note(n) for n in "C, D, E, F, G, A, B".split(", ")])
_ScaleKeyMap = {
Key.Major: MajorScaleIntervals,
Key.Minor: MinorScaleIntervals,
Key.HarmonicMinor: HarmonicMinorScaleIntervals,
Key.AscMelodicMinor: AscMelodicMinorScaleIntervals,
Key.DescMelodicMinor: DescMelodicMinorScaleIntervals,
}
class Scale:
def __init__(self, root_note: Union[str, Note], key: Union[str, Key]):
"""
Create a scale
Args:
root_note: the root note
key: scale key
Examples:
>>> Scale("c", "major")
>>> "C - D - E - F - G - A - B"
Returns: scale
"""
# cast note
if isinstance(root_note, str):
root_note = Note(root_note)
# cast key
if isinstance(key, str):
try:
key = Key(key.lower())
except ValueError:
raise ValueError(f"Invalid key value, {key}")
self.root_note: Note = root_note
self.key: Key = key
# exception for descending minor scale, it's same as natural minor
# but due to complicity of generation, I've decided to implement
# such shortcut :)
if self.key == Key.DescMelodicMinor:
key = Key.Minor
# find a formula to build a target scale
try:
scale_intervals = _ScaleKeyMap[key]
except KeyError:
raise ValueError(f"{key.value} is not supported scale key")
if root_note.has_pitch:
note_without_pitch = root_note.root
new_scale = self._scale(note_without_pitch, scale_intervals)
# pitched scales created with adding pitch to each note to "original" scale,
# e.g. C# scale created by adding sharp to all notes in C scale.
new_scale_notes = []
for note_in_scale in new_scale: # type: Note
no_pitch = not note_in_scale.has_pitch
same_pitch = note_in_scale.pitch == root_note.pitch
if no_pitch or same_pitch:
new_scale_notes.append(
Note(f"{str(note_in_scale)}{root_note.pitch.value}")
)
else:
new_scale_notes.append(note_in_scale.root)
new_scale = new_scale_notes
else:
new_scale = self._scale(root_note, scale_intervals)
self._notes = CircularArray(new_scale)
# desc melodic minor is same as desc natural minor
if self.key == Key.DescMelodicMinor:
new_scale = [new_scale[0]] + list(reversed(new_scale[1:]))
self._notes = CircularArray(new_scale)
def __getitem__(self, index):
return self._notes[index]
def __hash__(self):
return hash(self._notes)
def __eq__(self, other):
if other is None:
return False
if not isinstance(other, Scale):
return False
return self._notes == other._notes
def __iter__(self):
return (self._notes[i] for i in range(self._notes.size))
def __str__(self):
return str(self._notes)
def __repr__(self):
return repr(self._notes)
def _scale(self, root_note: Note, scale_intervals: tuple) -> list[Note]:
# use chromatic scale as a source
start_note = None
for ch_notes in _FullChromaticScale:
if root_note in ch_notes:
start_note = ch_notes
break
chromatic_scale = CircularArray(_FullChromaticScale, start_value=start_note)
chromatic_notes_order = CircularArray(_ChromaticNotes)
# apply scale formula
scale_notes: list[Note] = []
current_interval = Interval()
for interval_name in scale_intervals:
notes_in_interval: tuple[Note, Optional[Note]] = chromatic_scale[
current_interval.semitones
]
try:
target_root_note = chromatic_notes_order[
chromatic_notes_order.index(scale_notes[-1].root) + 1
]
except IndexError:
target_root_note = root_note
scale_notes.append(
next(n for n in notes_in_interval if n.root == target_root_note)
)
current_interval += interval_name
return scale_notes
@property
def id(self) -> str:
return self.name.lower().replace(" ", "_")
@property
def name(self) -> str:
"""
Human readable scale name
Returns:
scale name
"""
return f"{str(self.root_note)} {self.key.desc}"
@property
def flats_count(self) -> int:
return sum((len(str(n)) - 1 for n in self if n.is_flat))
@property
def sharps_count(self) -> int:
return sum((len(str(n)) - 1 for n in self if n.is_sharp))
@property
def is_theoretical(self) -> bool:
"""
Scales that have more than 7 pitches - are theoretical.
E.g. D# has 9 sharps.
Returns:
True or False
"""
return self.sharps_count > 7 or self.flats_count > 7
| pavlotkk/fretboard | fretboard/music_theory/scale.py | scale.py | py | 6,310 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "fretboard.core.collections.StrEnum",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "fretboard.music_theory.note.Note",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "fretboard.music_theory.note.Note",
"line_number": 44,
"usage_type": "... |
71015603555 | # Title: 숫자 카드 2
# Link: https://www.acmicpc.net/problem/10816
import sys
from collections import defaultdict
sys.setrecursionlimit(10 ** 6)
read_single_int = lambda: int(sys.stdin.readline().strip())
read_list_int = lambda: list(map(int, sys.stdin.readline().strip().split(' ')))
def solution(n: int, ns: list, m: int, ms: list):
cards = defaultdict(lambda: 0)
for number in ns:
cards[number] += 1
ans = []
for number in ms:
ans.append(str(cards[number]))
return ' '.join(ans)
def main():
n = read_single_int()
ns = read_list_int()
m = read_single_int()
ms = read_list_int()
print(solution(n, ns, m, ms))
if __name__ == '__main__':
main() | yskang/AlgorithmPractice | baekjoon/python/number_card_2_10816.py | number_card_2_10816.py | py | 755 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "sys.setrecursionlimit",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sys.stdin.readline",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "sys.stdin",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "sys.stdin.read... |
34549216828 | import numpy as np
import neuralnet as nl
import load_mnist as lm
np.random.seed(21)
dataset = lm.load_mnist()
x_train = dataset['x_train']
y_train = dataset['y_train']
x_test = dataset['x_test']
y_test = dataset['y_test']
img = np.zeros(28 * 28 * 10).reshape(10, 784)
img_test = np.zeros(28 * 28 * 10).reshape(10, 784)
c = np.zeros(10)
c_test = np.zeros(10)
# count labels
c = y_train.sum(axis=0)
c_test = y_test.sum(axis=0)
# x_train(60000, 784) concat y_train (60000, 10)
# (60000, 794)
# [:784] img (0~783)
# [784:] label (784~793)
img_label = np.concatenate((x_train, y_train), axis=1) # (60000, 794)
img_label_test = np.concatenate((x_test, y_test), axis=1) # (10000, 794)
# if we want the img array of number 0 we take arrays which [:][784] = 1
# and dvsn with the # of num 0 in the dataset
for i in range(10):
img[i] = np.sum(element for element in img_label if element[:][784+i]==1)[:784]/c[i]
img_test[i] = np.sum(e for e in img_label_test if e[:][784+i]==1)[:784]/c_test[i]
np.set_printoptions(linewidth=125)
print()
print(np.around(img[0].reshape(28,28), 1))
import matplotlib.pyplot as plt
plt.rcParams["figure.dpi"] = 300
plt.figure(1)
for i in range(10):
plt.subplot(2, 5, i + 1)
plt.axis('off')
plt.imshow(img[i].reshape(28,28), cmap='gray')
# plt.show()
plt.figure(2)
x = np.arange(10)
plt.bar(x, c)
plt.xticks(x)
plt.yticks( np.arange(0, 7100, 1000) )
plt.show()
plt.figure(3)
for i in range(10):
plt.subplot(2, 5, i + 1)
plt.axis('off')
plt.imshow(img_test[i].reshape(28,28), cmap='gray')
# plt.show()
plt.figure(4)
x = np.arange(10)
plt.bar(x, c_test)
plt.xticks(x)
plt.yticks( np.arange(0, 7100, 1000) )
plt.show()
| xyw0025/AI2020f | HW3/learn.py | learn.py | py | 1,782 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.random.seed",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "load_mnist.load_mnist",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
... |
32113324994 | from ciphers.Cipher import Cipher
from collections import Counter
from utils.const import ENGLISH_IOC
class BitwiseXOR(Cipher):
@classmethod
def encrypt(cls, text, key):
text = text.encode('ascii')
key = key.encode('ascii')
return cls._hexify_encryption_matrix(
[
[
char ^ key[enum]
for enum, char in enumerate(text[shift:shift+len(key)])
]
for shift in range(0, len(text), len(key))
]
)
@classmethod
def decrypt(cls, hexified_text, key):
key = key.encode('ascii')
return ''.join(
[
chr(byte ^ key[enum])
for row in hexified_text.split('\n')
for enum, byte in enumerate(cls._hex_to_bytes(row))
]
)
@classmethod
def cryptanalysis(cls, cryptogram):
keyword = ''
keysize = int(cryptogram.find('\n') / 2)
cryptobytes = cls._hex_to_bytes(cryptogram.replace('\n', ''))
for column in range(keysize):
vector = cryptobytes[column::keysize]
distances = {}
for char in range(123):
decrypted = [chr(vector[i] ^ char) for i in range(len(vector))]
frequencies = {
# Turn frequencies into percentage-like values
key: value / len(vector) * 100
for key, value in Counter(decrypted).items()
}
distances[sum(
[
abs(ENGLISH_IOC[key] - frequencies.get(key, 0))
for key in ENGLISH_IOC
]
)
] = chr(char)
# Add best match to keyword
keyword += distances[min(distances)]
return keyword
@classmethod
def crack(cls, text):
raise NotImplementedError
@classmethod
def _hex_to_bytes(cls, h):
return bytes(
int(h[i:i+2], 16)
for i in range(0, len(h), 2)
)
@classmethod
def _bytes_to_hex(cls, b):
return ''.join('%02x' % i for i in b)
@classmethod
def _int_to_hex(cls, num):
num = hex(num).replace('0x', '').replace('L', '')
if len(num) % 2 == 1:
num = '0' + num
return num
@classmethod
def _hexify_encryption_matrix(cls, text_matrix):
return ''.join(
[
''.join([cls._int_to_hex(byte) for byte in row]) + '\n'
for row in text_matrix
]
)
| piotrjedrzejczak/cryptography | src/ciphers/BitwiseXOR.py | BitwiseXOR.py | py | 2,667 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "ciphers.Cipher.Cipher",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "collections.Counter",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "utils.const.ENGLISH_IOC",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "util... |
15870540092 | import torch
import torch.nn as nn
import torch.nn.functional as F
def DoubleConv(in_channel, out_channel):
conv = nn.Sequential(
nn.Conv2d(in_channel, out_channel, kernel_size = 3),
nn.ReLU(inplace = True),
nn.Conv2d(out_channel, out_channel, kernel_size = 3),
nn.ReLU(inplace = True)
)
return conv
def crop(original, target):
target_size = target.size()[2]
original_size = original.size()[2]
delta = original_size - target_size
delta = delta //2
return original[:, :, delta:original_size - delta, delta:original_size - delta]
| FlagArihant2000/unet | models/parts.py | parts.py | py | 548 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "torch.nn.Sequential",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number... |
10063185169 | from abc import abstractmethod
import numpy as np
from keras.layers import Conv2D, Dense, Flatten
from keras.models import Sequential
class GA:
def __init__(self, x_train, y_train, x_test, y_test, epochs):
# 初始化参数
self.x_train = x_train
self.y_train = y_train
self.x_test = x_test
self.y_test = y_test
self.pop_size = 20 # 种群大小
# 交叉、变异概率
self.r_mutation = 0.1
self.p_crossover = 0
self.p_mutation = 0.2
self.epochs = epochs
self.min_fitness = 0.95 # 适应度
self.elite_num = 2
self.mating_pool_size = 4
self.batch_size = 32
self.chroms = [] # 保存网络
self.evaluation_history = [] # 进化历史
self.stddev = 0.5 # 样本标准偏差
self.loss_func = 'mse' # loss_function
self.metrics = ['accuracy'] # evaluation_function
@property
def cur_iter(self):
return len(self.evaluation_history)
# 将数据集顺序打乱
def shuffle_batch(self):
series = list(range(len(self.x_train)))
np.random.shuffle(series)
return series
# 初始化
def init(self):
for i in range(self.pop_size):
# 神经网络模型的结构
model = Sequential()
model.add(Conv2D(8, (3, 3), activation='relu', use_bias=False, input_shape=(15, 15, 2)))
model.add(Conv2D(32, (3, 3), activation='relu', use_bias=False))
model.add(Conv2D(128, (3, 3), activation='relu', use_bias=False))
model.add(Conv2D(128, (1, 1), activation='relu', use_bias=False))
model.add(Flatten())
model.add(Dense(128, activation='relu', use_bias=False))
model.add(Dense(64, activation='relu', use_bias=False))
model.add(Dense(1, use_bias=False))
self.chroms.append(model)
print('network initialization finished')
# 评估
def evaluation(self, _X, _y, _is_batch=True):
cur_evaluation = []
for i in range(self.pop_size):
model = self.chroms[i]
model.compile(loss=self.loss_func, metrics=self.metrics, optimizer='adam')
train_loss, train_acc = model.evaluate(_X, _y, verbose=0)
# 保存评估历史
if not _is_batch:
test_loss, test_acc = model.evaluate(self.x_test, self.y_test, verbose=0)
cur_evaluation.append({
'pop': i,
'train_loss': round(train_loss, 4),
'train_acc': round(train_acc, 4),
'test_loss': round(test_loss, 4),
'test_acc': round(test_acc, 4),
})
else:
cur_evaluation.append({
'pop': i,
'train_loss': round(train_loss, 4),
'train_acc': round(train_acc, 4),
})
best_fit = sorted(cur_evaluation, key=lambda x: x['train_acc'])[-1]
self.evaluation_history.append({
'iter': self.cur_iter + 1,
'best_fit': best_fit,
'avg_fitness': np.mean([e['train_acc'] for e in cur_evaluation]).round(4),
'evaluation': cur_evaluation,
})
print('\nIter: {}'.format(self.evaluation_history[-1]['iter']))
print('Best_fit: {}, avg_fitness: {:.4f}'.format(self.evaluation_history[-1]['best_fit'],
self.evaluation_history[-1]['avg_fitness']))
# 选择算法
def roulette_wheel_selection(self):
sorted_evaluation = sorted(self.evaluation_history[-1]['evaluation'], key=lambda x: x['train_acc'])
cum_acc = np.array([e['train_acc'] for e in sorted_evaluation]).cumsum()
extra_evaluation = [{'pop': e['pop'], 'train_acc': e['train_acc'], 'cum_acc': acc}
for e, acc in zip(sorted_evaluation, cum_acc)]
rand = np.random.rand() * cum_acc[-1]
for e in extra_evaluation:
if rand < e['cum_acc']:
return e['pop']
return extra_evaluation[-1]['pop']
# 供外部调用的接口
@abstractmethod
def run(self):
raise NotImplementedError('Please finish this function')
# 选择
@abstractmethod
def select(self):
raise NotImplementedError('Please finish this function')
# 交叉
@abstractmethod
def crossover(self, _selected_pop):
raise NotImplementedError('Please finish this function')
# 变异
@abstractmethod
def mutate(self, _selected_pop):
raise NotImplementedError('Please finish this function')
# 替换
@abstractmethod
def replace(self, _child):
raise NotImplementedError('Please finish this function')
| HavEWinTao/BIT-CS | 人工智能基础/3/Ga.py | Ga.py | py | 4,844 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "numpy.random.shuffle",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "keras.models.Sequential",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "keras.... |
43963555187 | from uncertainties.unumpy import *
from uncertainties import ufloat
from inspect import getsourcefile
import os.path as path, sys
current_dir = path.dirname(path.abspath(getsourcefile(lambda:0)))
sys.path.insert(0, current_dir[:current_dir.rfind(path.sep)])
from AP import *
from uncertainties import unumpy
def calculate_f_h(e, k, l, d):
f = 0.5 * unumpy.sqrt((e - k - l)**2 - d**2)
h = k + l - unumpy.sqrt((e - k - l)**2 - d**2)
return f, h
def lengtherror(a):
relative = 0.0004
absolute = 0.06
return uarray(a, absolute + relative * a)
path_ = "./OPA/OPA.xls"
datak = getTableFromCells("B16","D20",path_,"N3")
datal = getTableFromCells("F16","H20",path_,"N3")
datad = getTableFromCells("B5","D9",path_,"N3")
# Example usage
e = lengtherror(88) - lengtherror(23.2)
k = ufloat(gewichteterMittelwert(datak[2], [0.06 + 0.0004 * i for i in datak[2]]), intExtFehler(datak[2], [0.06 + 0.0004 * i for i in datak[2]]))
l = ufloat(gewichteterMittelwert(datal[2], [0.06 + 0.0004 * i for i in datal[2]]), intExtFehler(datal[2], [0.06 + 0.0004 * i for i in datal[2]]))
d = ufloat(gewichteterMittelwert(datad[2], [0.06 + 0.0004 * i for i in datad[2]]), intExtFehler(datad[2], [0.06 + 0.0004 * i for i in datad[2]]))
f_prime, h_prime = calculate_f_h(e, k, l, d)
print(e, k, l, d)
print(f"f' = {f_prime}")
print(f"h' = {h_prime}")
| brouwerb/AP3 | OPA/aufg4.py | aufg4.py | py | 1,352 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.path.dirname",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "os.path.abspath",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "inspect.getsourcefile",
"lin... |
6188821386 | from flask import request, Flask, render_template, redirect, url_for
import os
from MathEquation import roomtypePrediction
import time
app = Flask(__name__,
static_url_path='',
static_folder='static')
@app.route('/')
def index():
return render_template('tool.html')
@app.route('/tool.html', methods=['GET'])
def main_page():
return redirect('/')
@app.route('/', methods=['POST', 'GET'])
def test():
if request.method == "POST":
room_int = request.form["room"]
prop_type = request.form["type"]
location = request.form["place"]
bb = {
"location": location,
"type": prop_type,
"rooms": room_int
}
print(bb)
print("....................")
return redirect(url_for("results", location=location, rooms=room_int, p_type=prop_type))
else:
return render_template('tool.html')
@app.route("/place:<location>:room:<rooms>:type:<p_type>", methods=['POST', 'GET'])
def results(location, rooms, p_type):
if location == "0":
return redirect(url_for("main_page"))
elif rooms == "0":
return redirect(url_for("main_page"))
elif p_type == "0":
return redirect(url_for("main_page"))
else:
x = int(rooms)
print(str(x) + ", " + p_type + ", " + location)
print(roomtypePrediction(x, p_type, location))
prediction = roomtypePrediction(x, p_type, location)
print("prediction:" + str(prediction))
return render_template('resultsPage.html', content=prediction)
if __name__ == "__main__":
app.run(debug=True)
| OierGman/FlaskAPI-SDLC | app.py | app.py | py | 1,631 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "flask.redirect",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "flask.request.method"... |
413523110 | # pylint: disable=W0621,C0114,C0116,W0212,W0613
import pathlib
from typing import Optional
import pytest
from dae.utils.regions import Region
from dae.testing import setup_pedigree, setup_vcf, \
vcf_study
from dae.testing.foobar_import import foobar_gpf
from dae.genotype_storage.genotype_storage import GenotypeStorage
from dae.studies.study import GenotypeData
@pytest.fixture(scope="module")
def imported_study(
tmp_path_factory: pytest.TempPathFactory,
genotype_storage: GenotypeStorage) -> GenotypeData:
root_path = tmp_path_factory.mktemp(
f"query_by_genes_effects_{genotype_storage.storage_id}")
gpf_instance = foobar_gpf(root_path, genotype_storage)
ped_path = setup_pedigree(
root_path / "vcf_data" / "in.ped",
"""
familyId personId dadId momId sex status role
f1 mom 0 0 2 1 mom
f1 dad 0 0 1 1 dad
f1 ch1 dad mom 2 2 prb
f1 ch2 dad mom 1 1 sib
""")
vcf_path = setup_vcf(
root_path / "vcf_data" / "in.vcf.gz",
"""
##fileformat=VCFv4.2
##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">
##contig=<ID=foo>
#CHROM POS ID REF ALT QUAL FILTER INFO FORMAT mom dad ch1 ch2
foo 14 . C T,A . . . GT 1/1 2/2 1/1 2/2
foo 15 . C A,T . . . GT 1/1 0/0 0/1 0/0
""")
study = vcf_study(
root_path,
"effects_trio_vcf", pathlib.Path(ped_path),
[pathlib.Path(vcf_path)],
gpf_instance,
project_config_update={
"input": {
"vcf": {
"include_reference_genotypes": True,
"include_unknown_family_genotypes": True,
"include_unknown_person_genotypes": True,
"denovo_mode": "denovo",
"omission_mode": "omission",
}
},
"processing_config": {
"include_reference": True
}
})
return study
@pytest.mark.parametrize(
"position, inheritance, effects, count",
[
(14, None, None, 1),
(14, "omission", None, 1),
(14, "denovo", None, 0),
(14, "omission", ["synonymous"], 0),
(14, "omission", ["missense"], 1),
(14, "not omission and not mendelian and not unknown",
["missense"], 0),
(14, "not omission", None, 1),
(14, "not mendelian", None, 1),
]
)
def test_f1_non_cannonical_omission(
imported_study: GenotypeData,
position: int,
inheritance: str,
effects: Optional[list[str]],
count: int
) -> None:
region = Region("foo", position, position)
vs = list(imported_study.query_variants(
regions=[region],
effect_types=effects,
inheritance=inheritance,
return_unknown=True,
return_reference=True))
gefs = [(v, v.effects) for v in vs]
print(gefs)
for v in vs:
for aa in v.alt_alleles:
print(aa, aa.inheritance_in_members)
assert len(vs) == count
@pytest.mark.parametrize(
"position, inheritance, effects, count",
[
(15, None, None, 1),
(15, "omission", None, 1),
(15, "denovo", None, 0),
(15, "not denovo", None, 1),
(15, "not denovo", ["noEnd"], 1),
(15, None, ["noEnd"], 1),
(15, None, ["missense"], 0),
(15, "omission", ["noEnd"], 1),
(15, "mendelian", None, 1),
]
)
def test_f1_cannonical_omission(
imported_study: GenotypeData,
position: int,
inheritance: str,
effects: Optional[list[str]],
count: int
) -> None:
region = Region("foo", position, position)
vs = list(imported_study.query_variants(
regions=[region],
effect_types=effects,
inheritance=inheritance,
return_unknown=True,
return_reference=True))
gefs = [(v, v.effects) for v in vs]
print(gefs)
assert len(vs) == count
@pytest.mark.parametrize(
"position,inheritance,return_reference,return_unknown,count",
[
(15, None, True, True, 1),
(15, None, False, False, 1), # find all
(15, "denovo", False, False, 0), # find denovo
(15, "denovo", True, True, 0), # find denovo
(15, "omission", False, False, 1), # find omission
(15, "omission", True, True, 1), # find omission
(15, "mendelian", False, False, 1),
(15, "mendelian", True, False, 1),
(15, "mendelian", True, True, 1),
(15, "not denovo and not omission and not unknown and not mendelian",
False, False, 0),
(15, "not denovo and not omission and not unknown and not mendelian",
True, False, 0),
(15, "not denovo and not omission",
False, False, 0),
(15, "not denovo and not omission",
True, True, 1),
]
)
def test_f1_canonical_omission_return_reference_or_unknown(
imported_study: GenotypeData,
position: int,
inheritance: str,
return_reference: bool,
return_unknown: bool,
count: int
) -> None:
region = Region("foo", position, position)
vs = list(imported_study.query_variants(
regions=[region],
inheritance=inheritance,
return_unknown=return_unknown,
return_reference=return_reference))
for v in vs:
print(100 * "-")
for aa in v.alleles:
print(aa, aa.inheritance_in_members)
assert len(vs) == count
@pytest.mark.parametrize(
"position,inheritance,return_reference,return_unknown,count",
[
(14, None, True, True, 1), # find all
(14, None, False, False, 1),
(14, "denovo", False, False, 0), # find denovo
(14, "not denovo and not omission and not unknown and not mendelian",
False, False, 0),
(14, "omission", False, False, 1), # find omission
]
)
def test_f1_non_canonical_omission_return_reference_or_unknown(
imported_study: GenotypeData,
position: int,
inheritance: str,
return_reference: bool,
return_unknown: bool,
count: int
) -> None:
region = Region("foo", position, position)
vs = list(imported_study.query_variants(
regions=[region],
inheritance=inheritance,
return_unknown=return_unknown,
return_reference=return_reference))
for v in vs:
print(100 * "-")
for aa in v.alleles:
print(aa, aa.inheritance_in_members)
assert len(vs) == count
| iossifovlab/gpf | dae/tests/integration/study_query_variants/test_f1_omission.py | test_f1_omission.py | py | 6,567 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "pytest.TempPathFactory",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "dae.genotype_storage.genotype_storage.GenotypeStorage",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "dae.testing.foobar_import.foobar_gpf",
"line_number": 21,
... |
14526624335 | import folium, io, sys, json
from PyQt5.QtWidgets import (
QApplication,
QLabel,
QLineEdit,
QPushButton,
QVBoxLayout,
QWidget,
QHBoxLayout
)
from PyQt5.QtWebEngineWidgets import QWebEngineView # pip install PyQtWebEngine
"""
Folium in PyQt5
"""
class MyApp(QWidget):
def __init__(self):
super().__init__()
self.setWindowTitle('Z...PA')
self.window_width, self.window_height = 800, 600
self.setMinimumSize(self.window_width, self.window_height)
# layout = QVBoxLayout()
# self.setLayout(layout)
pagelayout = QHBoxLayout()
self.setLayout(pagelayout)
setings_layout = QVBoxLayout()
map_layout = QVBoxLayout()
# self.setLayout(setings_layout)
# self.setLayout(map_layout)
pagelayout.addLayout(setings_layout)
pagelayout.addLayout(map_layout)
# LABEL 1
label1 = QLabel("Введите номер машины")
self.LineEdit1 = QLineEdit()
setings_layout.addWidget(label1)
setings_layout.addWidget(self.LineEdit1)
# LABEL 2
label2 = QLabel("Введите координаты (через пробел)")
self.LineEdit2 = QLineEdit()
setings_layout.addWidget(label2)
setings_layout.addWidget(self.LineEdit2)
# LABEL 3
label3 = QLabel("Введите почту для оповещения")
self.LineEdit3 = QLineEdit()
setings_layout.addWidget(label3)
setings_layout.addWidget(self.LineEdit3)
# BUTTON 1
btn = QPushButton("Показать результат")
btn.clicked.connect(self.on_click)
setings_layout.addWidget(btn)
# LABEL 3
self.label4 = QLabel("")
setings_layout.addWidget(self.label4)
coordinate = (52.2978, 104.296)
m = folium.Map(
tiles='Stamen Terrain',
zoom_start=12,
location=coordinate
)
# Markers
folium.Marker(location=[52.25102272646012, 104.40029507901323], popup="В824ТУ, 52.25102272646012 104.40029507901323", icon=folium.Icon(color='gray')).add_to(m)
folium.Marker(location=[52.27334456734613, 104.31116193826796], popup="Х158МУ, 52.27334456734613 104.31116193826796", icon=folium.Icon(color='gray')).add_to(m)
folium.Marker(location=[52.28578920225783, 104.39101093249656], popup="Н639ОН, 52.28578920225783 104.39101093249656", icon=folium.Icon(color='gray')).add_to(m)
folium.Marker(location=[52.31240620221297, 104.31152609456728], popup="М654ВМ, 52.31240620221297 104.31152609456728", icon=folium.Icon(color='gray')).add_to(m)
folium.Marker(location=[52.29192480346046, 104.24960326596431], popup="К718ХТ, 52.29192480346046 104.24960326596431", icon=folium.Icon(color='gray')).add_to(m)
folium.Marker(location=[52.23807065463703, 104.28061615247321], popup="В218УТ, 52.23807065463703 104.28061615247321", icon=folium.Icon(color='gray')).add_to(m)
# Области
world = 'world.json'
folium.GeoJson(world,name="madhyapradesh").add_to(m)
cars = ['В824ТУ','Х158МУ','Н639ОН','М654ВМ','К718ХТ','В218УТ']
# save map data to data object
data = io.BytesIO()
m.save(data, close_file=False)
webView = QWebEngineView()
webView.setHtml(data.getvalue().decode())
map_layout.addWidget(webView)
def on_click(self):
# self.label4.setText(self.LineEdit1.text())
if self.LineEdit1.text() == "В824ТУ":
self.label4.setText("Статус: данные отправлены \n Событие 1")
elif self.LineEdit1.text() == "Х158МУ":
self.label4.setText("Статус: данные отправлены \n Событие 2")
elif self.LineEdit1.text() == "Н639ОН":
self.label4.setText("Статус: данные отправлены \n Событие 3")
elif self.LineEdit1.text() == "М654ВМ":
self.label4.setText("Статус: данные отправлены \n Событие 4")
elif self.LineEdit1.text() == "К718ХТ":
self.label4.setText("Статус: данные отправлены \n Событие 5")
else:
self.label4.setText("Машина не найдена!")
if __name__ == '__main__':
app = QApplication(sys.argv)
myApp = MyApp()
myApp.show()
try:
sys.exit(app.exec_())
except SystemExit:
print('Closing Window...') | CameraTrack/backend | test.py | test.py | py | 4,751 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "PyQt5.QtWidgets.QWidget",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QHBoxLayout",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QVBoxLayout",
"line_number": 31,
"usage_type": "call"
},
{
"a... |
9244456948 | import torch
import torch.nn as nn
import os
class B2_VGG(nn.Module):
# VGG16 with two branches
# pooling layer at the front of block
def __init__(self):
super(B2_VGG, self).__init__()
conv1 = nn.Sequential()
conv1.add_module('conv1_1', nn.Conv2d(3, 64, 3, 1, 1))
conv1.add_module('relu1_1', nn.ReLU(inplace=True))
conv1.add_module('conv1_2', nn.Conv2d(64, 64, 3, 1, 1))
conv1.add_module('relu1_2', nn.ReLU(inplace=True))
self.conv1 = conv1
conv2 = nn.Sequential()
conv2.add_module('pool1', nn.MaxPool2d(2, stride=2))
conv2.add_module('conv2_1', nn.Conv2d(64, 128, 3, 1, 1))
conv2.add_module('relu2_1', nn.ReLU())
conv2.add_module('conv2_2', nn.Conv2d(128, 128, 3, 1, 1))
conv2.add_module('relu2_2', nn.ReLU())
self.conv2 = conv2
conv3 = nn.Sequential()
conv3.add_module('pool2', nn.MaxPool2d(2, stride=2))
conv3.add_module('conv3_1', nn.Conv2d(128, 256, 3, 1, 1))
conv3.add_module('relu3_1', nn.ReLU())
conv3.add_module('conv3_2', nn.Conv2d(256, 256, 3, 1, 1))
conv3.add_module('relu3_2', nn.ReLU())
conv3.add_module('conv3_3', nn.Conv2d(256, 256, 3, 1, 1))
conv3.add_module('relu3_3', nn.ReLU())
self.conv3 = conv3
conv4 = nn.Sequential()
conv4.add_module('pool3', nn.MaxPool2d(2, stride=2))
conv4.add_module('conv4_1', nn.Conv2d(256, 512, 3, 1, 1))
conv4.add_module('relu4_1', nn.ReLU())
conv4.add_module('conv4_2', nn.Conv2d(512, 512, 3, 1, 1))
conv4.add_module('relu4_2', nn.ReLU())
conv4.add_module('conv4_3', nn.Conv2d(512, 512, 3, 1, 1))
conv4.add_module('relu4_3', nn.ReLU())
self.conv4 = conv4
conv5 = nn.Sequential()
conv5.add_module('pool4', nn.MaxPool2d(2, stride=2))
conv5.add_module('conv5_1', nn.Conv2d(512, 512, 3, 1, 1))
conv5.add_module('relu5_1', nn.ReLU())
conv5.add_module('conv5_2', nn.Conv2d(512, 512, 3, 1, 1))
conv5.add_module('relu5_2', nn.ReLU())
conv5.add_module('conv5_3', nn.Conv2d(512, 512, 3, 1, 1))
conv5.add_module('relu5_3', nn.ReLU())
self.conv5 = conv5
for key, value in self.named_parameters():
if 'conv5_3' not in key:
value.requires_grad = False
pre_train = torch.load('./checkpoint/vgg16-397923af.pth')
self._initialize_weights(pre_train)
def forward(self, x):
conv1_2 = self.conv1(x)
conv2_2 = self.conv2(conv1_2)
conv3_3 = self.conv3(conv2_2)
conv4_3 = self.conv4(conv3_3)
conv5_3 = self.conv5(conv4_3)
return {
"conv1_2": conv1_2,
"conv2_2": conv2_2,
"conv3_3": conv3_3,
"conv4_3": conv4_3,
"conv5_3": conv5_3
}
def _initialize_weights(self, pre_train):
keys = list(pre_train.keys())
self.conv1.conv1_1.weight.data.copy_(pre_train[keys[0]])
self.conv1.conv1_2.weight.data.copy_(pre_train[keys[2]])
self.conv2.conv2_1.weight.data.copy_(pre_train[keys[4]])
self.conv2.conv2_2.weight.data.copy_(pre_train[keys[6]])
self.conv3.conv3_1.weight.data.copy_(pre_train[keys[8]])
self.conv3.conv3_2.weight.data.copy_(pre_train[keys[10]])
self.conv3.conv3_3.weight.data.copy_(pre_train[keys[12]])
self.conv4.conv4_1.weight.data.copy_(pre_train[keys[14]])
self.conv4.conv4_2.weight.data.copy_(pre_train[keys[16]])
self.conv4.conv4_3.weight.data.copy_(pre_train[keys[18]])
self.conv5.conv5_1.weight.data.copy_(pre_train[keys[20]])
self.conv5.conv5_2.weight.data.copy_(pre_train[keys[22]])
self.conv5.conv5_3.weight.data.copy_(pre_train[keys[24]])
self.conv1.conv1_1.bias.data.copy_(pre_train[keys[1]])
self.conv1.conv1_2.bias.data.copy_(pre_train[keys[3]])
self.conv2.conv2_1.bias.data.copy_(pre_train[keys[5]])
self.conv2.conv2_2.bias.data.copy_(pre_train[keys[7]])
self.conv3.conv3_1.bias.data.copy_(pre_train[keys[9]])
self.conv3.conv3_2.bias.data.copy_(pre_train[keys[11]])
self.conv3.conv3_3.bias.data.copy_(pre_train[keys[13]])
self.conv4.conv4_1.bias.data.copy_(pre_train[keys[15]])
self.conv4.conv4_2.bias.data.copy_(pre_train[keys[17]])
self.conv4.conv4_3.bias.data.copy_(pre_train[keys[19]])
self.conv5.conv5_1.bias.data.copy_(pre_train[keys[21]])
self.conv5.conv5_2.bias.data.copy_(pre_train[keys[23]])
self.conv5.conv5_3.bias.data.copy_(pre_train[keys[25]])
if __name__ == '__main__':
net = B2_VGG()
pass | dragonlee258079/DMT | B2_VGG.py | B2_VGG.py | py | 4,700 | python | en | code | 8 | github-code | 1 | [
{
"api_name": "torch.nn.Module",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_... |
17716038891 | """Analysis for meniscus.
Attributes:
BOUNDS (dict): Upper bounds for quantitative values.
"""
import itertools
import os
import warnings
import numpy as np
import pandas as pd
import scipy.ndimage as sni
from dosma.core.device import get_array_module
from dosma.core.med_volume import MedicalVolume
from dosma.core.quant_vals import T2, QuantitativeValueType
from dosma.defaults import preferences
from dosma.tissues.tissue import Tissue, largest_cc
from dosma.utils import io_utils
import matplotlib.pyplot as plt
# milliseconds
BOUNDS = {
QuantitativeValueType.T2: 60.0,
QuantitativeValueType.T1_RHO: 100.0,
QuantitativeValueType.T2_STAR: 50.0,
}
__all__ = ["Meniscus"]
class Meniscus(Tissue):
"""Handles analysis and visualization for meniscus.
This class extends functionality from `Tissue`.
For visualization, the meniscus is unrolled across the axial plane.
"""
ID = 2
STR_ID = "men"
FULL_NAME = "meniscus"
# Expected quantitative values
T1_EXPECTED = 1000 # milliseconds
# Coronal Keys
_ANTERIOR_KEY = 0
_POSTERIOR_KEY = 1
_CORONAL_KEYS = [_ANTERIOR_KEY, _POSTERIOR_KEY]
# Saggital Keys
_MEDIAL_KEY = 0
_LATERAL_KEY = 1
_SAGGITAL_KEYS = [_MEDIAL_KEY, _LATERAL_KEY]
# Axial Keys
_SUPERIOR_KEY = 0
_INFERIOR_KEY = 1
_TOTAL_AXIAL_KEY = -1
def __init__(
self, weights_dir: str = None, medial_to_lateral: bool = None, split_ml_only: bool = False
):
super().__init__(weights_dir=weights_dir, medial_to_lateral=medial_to_lateral)
self.split_ml_only = split_ml_only
self.regions_mask = None
def unroll_axial(self, quant_map: np.ndarray):
"""Unroll meniscus in axial direction.
Args:
quant_map (np.ndarray): Map to roll out.
"""
mask = self.__mask__.volume
assert (
self.regions_mask is not None
), "region_mask not initialized. Should be initialized when mask is set"
region_mask_sup_inf = self.regions_mask[..., 0]
superior = (region_mask_sup_inf == self._SUPERIOR_KEY) * mask * quant_map
superior[superior == 0] = np.nan
superior = np.nanmean(superior, axis=0)
inferior = (region_mask_sup_inf == self._INFERIOR_KEY) * mask * quant_map
inferior[inferior == 0] = np.nan
inferior = np.nanmean(inferior, axis=0)
total = mask * quant_map
total[total == 0] = np.nan
total = np.nanmean(total, axis=0)
return total, superior, inferior
def split_regions(self, base_map):
"""Split meniscus into subregions.
Center-of-mass (COM) is used to subdivide into
anterior/posterior, superior/inferior, and medial/lateral regions.
Note:
The anterior/posterior and superior/inferior subdivision may causes issues
with tilted mensici. This will be addressed in a later release. To avoid
computing metrics on these regions, set ``self.split_ml_only=True``.
"""
center_of_mass = sni.measurements.center_of_mass(base_map) # zero indexed
com_sup_inf = int(np.ceil(center_of_mass[0]))
com_ant_post = int(np.ceil(center_of_mass[1]))
com_med_lat = int(np.ceil(center_of_mass[2]))
region_mask_sup_inf = np.zeros(base_map.shape)
region_mask_sup_inf[:com_sup_inf, :, :] = self._SUPERIOR_KEY
region_mask_sup_inf[com_sup_inf:, :, :] = self._INFERIOR_KEY
region_mask_ant_post = np.zeros(base_map.shape)
region_mask_ant_post[:, :com_ant_post, :] = self._ANTERIOR_KEY
region_mask_ant_post[:, com_ant_post:, :] = self._POSTERIOR_KEY
region_mask_med_lat = np.zeros(base_map.shape)
region_mask_med_lat[:, :, :com_med_lat] = (
self._MEDIAL_KEY if self.medial_to_lateral else self._LATERAL_KEY
)
region_mask_med_lat[:, :, com_med_lat:] = (
self._LATERAL_KEY if self.medial_to_lateral else self._MEDIAL_KEY
)
self.regions_mask = np.stack(
[region_mask_sup_inf, region_mask_ant_post, region_mask_med_lat], axis=-1
)
def __calc_quant_vals__(self, quant_map: MedicalVolume, map_type: QuantitativeValueType):
subject_pid = self.pid
# Reformats the quantitative map to the appropriate orientation.
super().__calc_quant_vals__(quant_map, map_type)
assert (
self.regions_mask is not None
), "region_mask not initialized. Should be initialized when mask is set"
region_mask = self.regions_mask
axial_region_mask = self.regions_mask[..., 0]
coronal_region_mask = self.regions_mask[..., 1]
sagittal_region_mask = self.regions_mask[..., 2]
# Combine region mask into categorical mask.
axial_categories = [
(self._SUPERIOR_KEY, "superior"),
(self._INFERIOR_KEY, "inferior"),
(-1, "total"),
]
coronal_categories = [
(self._ANTERIOR_KEY, "anterior"),
(self._POSTERIOR_KEY, "posterior"),
(-1, "total"),
]
sagittal_categories = [(self._MEDIAL_KEY, "medial"), (self._LATERAL_KEY, "lateral")]
if self.split_ml_only:
axial_categories = [x for x in axial_categories if x[0] == -1]
coronal_categories = [x for x in coronal_categories if x[0] == -1]
categorical_mask = np.zeros(region_mask.shape[:-1])
base_mask = self.__mask__.A.astype(np.bool)
labels = {}
for idx, (
(axial, axial_name),
(coronal, coronal_name),
(sagittal, sagittal_name),
) in enumerate(
itertools.product(axial_categories, coronal_categories, sagittal_categories)
):
label = idx + 1
axial_map = np.asarray([True]) if axial == -1 else axial_region_mask == axial
coronal_map = np.asarray([True]) if coronal == -1 else coronal_region_mask == coronal
sagittal_map = sagittal_region_mask == sagittal
categorical_mask[base_mask & axial_map & coronal_map & sagittal_map] = label
labels[label] = f"{axial_name}-{coronal_name}-{sagittal_name}"
# TODO: Change this to be any arbitrary quantitative value type.
# Note, it does not matter what we wrap it in because the underlying operations
# are not specific to the value type.
t2 = T2(quant_map)
categorical_mask = MedicalVolume(categorical_mask, affine=quant_map.affine)
df = t2.to_metrics(categorical_mask, labels=labels, bounds=(0, np.inf), closed="neither")
df.insert(0, "Subject", subject_pid)
total, superior, inferior = self.unroll_axial(quant_map.volume)
qv_name = map_type.name
maps = [
{
"title": "%s superior" % qv_name,
"data": superior,
"xlabel": "Slice",
"ylabel": "Angle (binned)",
"filename": "%s_superior" % qv_name,
"raw_data_filename": "%s_superior.data" % qv_name,
},
{
"title": "%s inferior" % qv_name,
"data": inferior,
"xlabel": "Slice",
"ylabel": "Angle (binned)",
"filename": "%s_inferior" % qv_name,
"raw_data_filename": "%s_inferior.data" % qv_name,
},
{
"title": "%s total" % qv_name,
"data": total,
"xlabel": "Slice",
"ylabel": "Angle (binned)",
"filename": "%s_total" % qv_name,
"raw_data_filename": "%s_total.data" % qv_name,
},
]
self.__store_quant_vals__(maps, df, map_type)
def __calc_quant_vals_old__(self, quant_map, map_type):
subject_pid = self.pid
super().__calc_quant_vals__(quant_map, map_type)
assert (
self.regions_mask is not None
), "region_mask not initialized. Should be initialized when mask is set"
quant_map_volume = quant_map.volume
mask = self.__mask__.volume
quant_map_volume = mask * quant_map_volume
axial_region_mask = self.regions_mask[..., 0]
sagittal_region_mask = self.regions_mask[..., 1]
coronal_region_mask = self.regions_mask[..., 2]
axial_names = ["superior", "inferior", "total"]
coronal_names = ["medial", "lateral"]
sagittal_names = ["anterior", "posterior"]
pd_header = ["Subject", "Location", "Side", "Region", "Mean", "Std", "Median"]
pd_list = []
for axial in [self._SUPERIOR_KEY, self._INFERIOR_KEY, self._TOTAL_AXIAL_KEY]:
if axial == self._TOTAL_AXIAL_KEY:
axial_map = np.asarray(
axial_region_mask == self._SUPERIOR_KEY, dtype=np.float32
) + np.asarray(axial_region_mask == self._INFERIOR_KEY, dtype=np.float32)
axial_map = np.asarray(axial_map, dtype=np.bool)
else:
axial_map = axial_region_mask == axial
for coronal in [self._MEDIAL_KEY, self._LATERAL_KEY]:
for sagittal in [self._ANTERIOR_KEY, self._POSTERIOR_KEY]:
curr_region_mask = (
quant_map_volume
* (coronal_region_mask == coronal)
* (sagittal_region_mask == sagittal)
* axial_map
)
curr_region_mask[curr_region_mask == 0] = np.nan
# discard all values that are 0
c_mean = np.nanmean(curr_region_mask)
c_std = np.nanstd(curr_region_mask)
c_median = np.nanmedian(curr_region_mask)
row_info = [
subject_pid,
axial_names[axial],
coronal_names[coronal],
sagittal_names[sagittal],
c_mean,
c_std,
c_median,
]
pd_list.append(row_info)
# Generate 2D unrolled matrix
total, superior, inferior = self.unroll_axial(quant_map.volume)
df = pd.DataFrame(pd_list, columns=pd_header)
qv_name = map_type.name
maps = [
{
"title": "%s superior" % qv_name,
"data": superior,
"xlabel": "Slice",
"ylabel": "Angle (binned)",
"filename": "%s_superior" % qv_name,
"raw_data_filename": "%s_superior.data" % qv_name,
},
{
"title": "%s inferior" % qv_name,
"data": inferior,
"xlabel": "Slice",
"ylabel": "Angle (binned)",
"filename": "%s_inferior" % qv_name,
"raw_data_filename": "%s_inferior.data" % qv_name,
},
{
"title": "%s total" % qv_name,
"data": total,
"xlabel": "Slice",
"ylabel": "Angle (binned)",
"filename": "%s_total" % qv_name,
"raw_data_filename": "%s_total.data" % qv_name,
},
]
self.__store_quant_vals__(maps, df, map_type)
def set_mask(self, mask: MedicalVolume, use_largest_ccs: bool = False, ml_only: bool = False):
xp = get_array_module(mask.A)
if use_largest_ccs:
msk = xp.asarray(largest_cc(mask.A, num=2), dtype=xp.uint8)
else:
msk = xp.asarray(mask.A, dtype=xp.uint8)
mask_copy = mask._partial_clone(volume=msk)
super().set_mask(mask_copy)
self.split_regions(self.__mask__.volume)
def __save_quant_data__(self, dirpath):
"""Save quantitative data and 2D visualizations of meniscus
Check which quantitative values (T2, T1rho, etc) are defined for meniscus and analyze these
1. Save 2D total, superficial, and deep visualization maps
2. Save {'medial', 'lateral'}, {'anterior', 'posterior'},
{'superior', 'inferior', 'total'} data to excel file.
Args:
dirpath (str): Directory path to tissue data.
"""
q_names = []
dfs = []
for quant_val in QuantitativeValueType:
if quant_val.name not in self.quant_vals.keys():
continue
q_names.append(quant_val.name)
q_val = self.quant_vals[quant_val.name]
dfs.append(q_val[1])
q_name_dirpath = io_utils.mkdirs(os.path.join(dirpath, quant_val.name.lower()))
for q_map_data in q_val[0]:
filepath = os.path.join(q_name_dirpath, q_map_data["filename"])
xlabel = "Slice"
ylabel = ""
title = q_map_data["title"]
data_map = q_map_data["data"]
plt.clf()
upper_bound = BOUNDS[quant_val]
if preferences.visualization_use_vmax:
# Hard bounds - clipping
plt.imshow(data_map, cmap="jet", vmin=0.0, vmax=BOUNDS[quant_val])
else:
# Try to use a soft bounds
if np.sum(data_map <= upper_bound) == 0:
plt.imshow(data_map, cmap="jet", vmin=0.0, vmax=BOUNDS[quant_val])
else:
warnings.warn(
"%s: Pixel value exceeded upper bound (%0.1f). Using normalized scale."
% (quant_val.name, upper_bound)
)
plt.imshow(data_map, cmap="jet")
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
clb = plt.colorbar()
clb.ax.set_title("(ms)")
plt.axis("tight")
plt.savefig(filepath)
# Save data
raw_data_filepath = os.path.join(
q_name_dirpath, "raw_data", q_map_data["raw_data_filename"]
)
io_utils.save_pik(raw_data_filepath, data_map)
if len(dfs) > 0:
io_utils.save_tables(os.path.join(dirpath, "data.xlsx"), dfs, q_names)
| ad12/DOSMA | dosma/tissues/meniscus.py | meniscus.py | py | 14,454 | python | en | code | 49 | github-code | 1 | [
{
"api_name": "dosma.core.quant_vals.QuantitativeValueType.T2",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "dosma.core.quant_vals.QuantitativeValueType",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "dosma.core.quant_vals.QuantitativeValueType.T1_RH... |
11101433884 | import openpyxl
# Carregar o arquivo
workbook = openpyxl.load_workbook('<FILE_NAME>.xlsx')
# Selecionar a planilha ativa
sheet = workbook.active
headers = []
for cell in sheet[2]:
headers.append(cell.value)
# Iterar sobre as linhas a partir da terceira linha
data = []
for row in sheet.iter_rows(min_row=3, values_only=True):
row_data = dict(zip(headers, row))
data.append(row_data)
with open('<FILE_NAME>.xliff','w') as file:
file.write(f'<xliff version="{data[0]["/@version"]}.0">\n')
file.write(f'<file original="{data[0]["/file/@original"]}.0" source-language="{data[0]["/file/@source-language"]}" target-language="en" datatype="{data[0]["/file/@datatype"]}">\n')
file.write(f'<header></header>\n')
file.write(f'<body>\n')
for item in data:
file.write(f'<trans-unit id="{item["/file/body/trans-unit/@id"]}">\n')
file.write(f'<source>{item["/file/body/trans-unit/target"]}</source>\n')
file.write(f'<target>{item["/file/body/trans-unit/source"]}</target>\n')
file.write(f'</trans-unit>\n')
file.write('</body>\n')
file.write('</file>\n')
file.write('</xliff>\n')
| EduardoFelixNeto/Conversor_excel_to_xliff | main.py | main.py | py | 1,155 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "openpyxl.load_workbook",
"line_number": 4,
"usage_type": "call"
}
] |
73270903715 | from ast import Raise
from optparse import Option
from typing import List, Dict, Protocol, Tuple, Optional
from config.constant import PROJECT_ROOT
from dataclasses import dataclass, field
from abc import ABC, abstractmethod, abstractproperty
from config.exceptions import ScrapeConfigError
from config.config_test import (
JscrapeOnConfigTest,
ScrapeKeyTest,
ScrapeFieldKeyTest,
ScrapeHTMLMustHaveKeyTest,
ScrapeHTMLRequiredKeyTest,
)
import json
class Config(ABC):
config: Dict = field(default_factory=dict)
required_keys: List[str] = ["url", "method", "scrape", "data", "params", "headers"]
must_have_scrape_data_keys: List[str] = ["html", "json"]
must_have_html_keys: List[str] = ["selector", "id", "name", "class", "tag", "xpath"]
required_html_keys: List[str] = ["get", "count"]
@abstractmethod
def set_config_file(self) -> Dict:
"""
Return the configuration as Dictionary in a file.
"""
@abstractmethod
def get_configuration_keys(self) -> Dict:
"""
Return the config keys such as:
as_session = bool |
proxy = str
"""
@abstractmethod
def get_scrape_keys(self) -> List[str]:
"""
Return the scrape keys where the scraping start.
"""
@abstractmethod
def test_all_config(self) -> None:
"""
Test all config that are provided.
"""
@dataclass
class JsonConfig(Config):
debug: bool = False
file_name: Optional[str] = None
config_tester: List[JscrapeOnConfigTest] = field(default_factory=list)
config: Dict = field(default_factory=dict)
def __post_init__(self):
if self.file_name:
self.set_config_file(self.file_name)
def set_config_file(self, file_name):
json_data = ""
configuration_directory = PROJECT_ROOT + "scrapes/"
file_path = configuration_directory + file_name
with open(file_path) as json_file:
json_data = json.load(json_file)
self.config = json_data
json_file.close()
self.test_all_config()
return json_data
def test_all_config(self):
self.config_tester = [
ScrapeKeyTest(self.get_scrape_keys(), self.required_keys, self.config),
ScrapeFieldKeyTest(
self.get_scrape_keys(), self.must_have_scrape_data_keys, self.config
),
ScrapeHTMLMustHaveKeyTest(
self.get_scrape_keys(), self.must_have_html_keys, self.config
),
ScrapeHTMLRequiredKeyTest(
self.get_scrape_keys(), self.required_html_keys, self.config
),
]
for config in self.config_tester:
config.test()
def get_configuration_keys(self):
as_session = False
if "as_session" in self.config:
as_session = self.config["as_session"]
proxy = ""
if "proxy" in self.config:
proxy = self.config["proxy"]
return {"as_session": as_session, "proxy": proxy}
def get_scrape_keys(self):
all_keys = list(self.config.keys())
all_keys.remove("as_session")
all_keys.remove("proxy")
return all_keys
# a = JsonConfig()
# a.set_config_file("webscraper-e-commerce.json")
| johnalbert-dot-py/JScrapeON | jscrapeon_parser/config_parser.py | config_parser.py | py | 3,329 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "abc.ABC",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "config.constant",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "dataclasses.field",
"line_num... |
26486178646 | import tweepy
import re
import apiKey
######## Get Tweets and Clean
def get_all_tweets(screen_name):
# authorize twitter, initialize tweepy
auth = tweepy.OAuthHandler(apiKey.twitter_customer, apiKey.twitter_customer_secret)
auth.set_access_token(apiKey.twitter_token, apiKey.twitter_secret)
api = tweepy.API(auth)
# initialize a list to hold all the tweepy Tweets
alltweets = []
print("Reading Posts from @" + screen_name + " now...")
# make initial request for most recent tweets (200 is the maximum allowed count)
user = api.get_user(screen_name=screen_name)
new_tweets = api.user_timeline(screen_name=screen_name, count=50)
# save most recent tweets
alltweets.extend(new_tweets)
# save the id of the oldest tweet less one
oldest = alltweets[-1].id - 1
try:
while len(new_tweets) > 0:
# all subsiquent requests use the max_id param to prevent duplicates
new_tweets = api.user_timeline(screen_name=screen_name, count=200, max_id=oldest)
# save most recent tweets
alltweets.extend(new_tweets)
# update the id of the oldest tweet less one
oldest = alltweets[-1].id - 1
except Exception as e:
return [[0,"Error in retriving timeline from @"+screen_name+":"+str(e),False]]
try:
# transform the tweepy tweets into a 2D array that will populate the csv
outtweets = []
for tweet in alltweets:
# remove Emoji
emoji_pattern = re.compile("["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
u"\U00002702-\U000027B0"
u"\U000024C2-\U0001F251"
"]+", flags=re.UNICODE)
te = re.sub(emoji_pattern, "", tweet.text)
tweet_content = clean_text(str(te).encode('ascii','ignore')).strip()
if tweet_content and (not tweet_content.isspace()) and len(tweet_content)>0:
outtweet = [tweet.id_str, tweet.created_at, tweet_content]
outtweets.append(outtweet)
return outtweets
except Exception as e:
return [[0,"Error in packing new tweets from @"+screen_name+":"+str(e),False]]
def clean_text(twitter_text):
before_http = re.sub('https?:\/\/(www\.)?[-a-zA-Z0-9@:%._\+~#=]{2,256}\.[a-z]{2,6}\b([-a-zA-Z0-9@:%_\+.~#?&//=]*)','',str(twitter_text))
no_b = before_http.replace('b\'RT', '').replace('\'b', '').replace('RT','').replace('b\'','').replace('\'','')
no_at = no_b.replace('@', '')
no_hashtag = re.sub('/^@(.*?)\s/','', no_at)
return no_hashtag.replace('/n','')
| shanpy/aiCompetition | get_tweets.py | get_tweets.py | py | 2,874 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "tweepy.OAuthHandler",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "apiKey.twitter_customer",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "apiKey.twitter_customer_secret",
"line_number": 8,
"usage_type": "attribute"
},
{
... |
32656021823 | """Differentiate between type of service token
Revision ID: ddd3db82f370
Revises: 0e6ac85397af
Create Date: 2023-03-21 13:50:34.046658
"""
from alembic import op
from sqlalchemy import text
# revision identifiers, used by Alembic.
revision = 'ddd3db82f370'
down_revision = '0e6ac85397af'
branch_labels = None
depends_on = None
def insert_service_token(conn, row, token_type):
query = text("""
INSERT INTO `service_tokens` (hashed_token, description, service_id, token_type, created_by, updated_by)
VALUES (:hashed_token, :description, :service_id, :token_type, :created_by, :updated_by)
""")
description = f"Migrated {token_type.upper()} service token for {row.name}"
conn.execute(query,
dict(hashed_token=row.hashed_token,
description=description,
service_id=row.service_id,
token_type=token_type,
created_by="migration",
updated_by="migration")
)
def upgrade():
conn = op.get_bind()
conn.execute(text("ALTER TABLE `service_tokens` ADD COLUMN token_type VARCHAR(255)"))
rows = conn.execute(text("""
select s.id as service_id, s.token_enabled as token_enabled, s.pam_web_sso_enabled as pam_web_sso_enabled,
s.scim_enabled as scim_enabled, s.name as name, st.id as service_token_id, st.hashed_token as hashed_token,
st.description as description from services s inner join service_tokens st on st.service_id = s.id
"""))
for row in rows:
token_enabled = row.token_enabled
pam_web_sso_enabled = row.pam_web_sso_enabled
scim_enabled = row.scim_enabled
token_type = "introspection" if token_enabled else "pam" if pam_web_sso_enabled else "scim"
conn.execute(text("UPDATE service_tokens SET token_type = :token_type WHERE id = :id"),
token_type=token_type,
id=row.service_token_id)
if pam_web_sso_enabled and token_type != "pam":
insert_service_token(conn, row, "pam")
if scim_enabled and token_type != "scim":
insert_service_token(conn, row, "scim")
conn.execute(text("ALTER TABLE `service_tokens` CHANGE token_type token_type VARCHAR(255) NOT NULL"))
def downgrade():
pass
| SURFscz/SBS | server/migrations/versions/ddd3db82f370_differentiate_between_type_of_service_.py | ddd3db82f370_differentiate_between_type_of_service_.py | py | 2,331 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "sqlalchemy.text",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "alembic.op.get_bind",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.text",
... |
32345652310 |
from ast import arg
from cmath import inf
from notears.locally_connected import LocallyConnected
from notears.lbfgsb_scipy import LBFGSBScipy
from plot_utils import *
import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from notears.loss_func import *
from plot_utils import *
import notears.utils as ut
import tqdm as tqdm
from torch.utils.tensorboard import SummaryWriter
from torch.optim import lr_scheduler
from scipy.linalg import expm
from scipy.special import comb
import math
# def record_weight(reweight_list, cnt, hard_list=[26,558,550,326,915], easy_list=[859,132,82,80,189]):
# writer = SummaryWriter('logs/weight_record_real')
# reweight_idx = reweight_list.squeeze()
# reweight_idx = reweight_idx.tolist()
# for idx in hard_list:
# writer.add_scalar(f'hard_real/hard_reweight_list[{idx}]', reweight_idx[idx], cnt)
# for idx in easy_list:
# writer.add_scalar(f'easy_real/easy_reweight_list[{idx}]', reweight_idx[idx], cnt)
class NotearsMLP(nn.Module):
def __init__(self, dims, bias=True):
super(NotearsMLP, self).__init__()
assert len(dims) >= 2
assert dims[-1] == 1
d = dims[0]
self.dims = dims
# fc1: variable splitting for l1
self.fc1_pos = nn.Linear(d, d * dims[1], bias=bias)
self.fc1_neg = nn.Linear(d, d * dims[1], bias=bias)
self.fc1_pos.weight.bounds = self._bounds()
self.fc1_neg.weight.bounds = self._bounds()
# fc2: local linear layers
layers = []
for l in range(len(dims) - 2):
layers.append(LocallyConnected(d, dims[l + 1], dims[l + 2], bias=bias))
self.fc2 = nn.ModuleList(layers)
def _bounds(self):
d = self.dims[0]
bounds = []
for j in range(d):
for m in range(self.dims[1]):
for i in range(d):
if i == j:
bound = (0, 0)
else:
bound = (0, None)
bounds.append(bound)
return bounds
def forward(self, x): # [n, d] -> [n, d]
x = self.fc1_pos(x) - self.fc1_neg(x) # [n, d * m1]
x = x.view(-1, self.dims[0], self.dims[1]) # [n, d, m1]
for fc in self.fc2:
x = torch.sigmoid(x) # [n, d, m1]
x = fc(x) # [n, d, m2]
x = x.squeeze(dim=2) # [n, d]
return x
def h_func(self):
"""Constrain 2-norm-squared of fc1 weights along m1 dim to be a DAG"""
d = self.dims[0]
fc1_weight = self.fc1_pos.weight - self.fc1_neg.weight # [j * m1, i]
fc1_weight = fc1_weight.view(d, -1, d) # [j, m1, i]
A = torch.sum(fc1_weight * fc1_weight, dim=1).t() # [i, j]
# h = trace_expm(A) - d # (Zheng et al. 2018)
# A different formulation, slightly faster at the cost of numerical stability
M = torch.eye(d).to(A.device) + A / d # (Yu et al. 2019)
E = torch.matrix_power(M, d - 1)
h = (E.t() * M).sum() - d
return h
def l2_reg(self):
"""Take 2-norm-squared of all parameters"""
reg = 0.
fc1_weight = self.fc1_pos.weight - self.fc1_neg.weight # [j * m1, i]
reg += torch.sum(fc1_weight ** 2)
for fc in self.fc2:
reg += torch.sum(fc.weight ** 2)
return reg
def fc1_l1_reg(self):
"""Take l1 norm of fc1 weight"""
reg = torch.sum(self.fc1_pos.weight + self.fc1_neg.weight)
return reg
def predict(self,x):
return self.forward(x)
@torch.no_grad()
def fc1_to_adj(self) -> np.ndarray: # [j * m1, i] -> [i, j]
"""Get W from fc1 weights, take 2-norm over m1 dim"""
d = self.dims[0]
fc1_weight = self.fc1_pos.weight - self.fc1_neg.weight # [j * m1, i]
fc1_weight = fc1_weight.view(d, -1, d) # [j, m1, i]
A = torch.sum(fc1_weight * fc1_weight, dim=1).t() # [i, j]
W = torch.sqrt(A) # [i, j]
W = W.cpu().detach().numpy() # [i, j]
return W
class GOLEM(nn.Module):
"""Set up the objective function of GOLEM.
Hyperparameters:
(1) GOLEM-NV: lambda_1=2e-3, lambda_2=5.0.
(2) GOLEM-EV: lambda_1=2e-2, lambda_2=5.0.(not used)
"""
def __init__(self, args):
super(GOLEM, self).__init__()
self.n = args.n
self.d = args.d
self.lambda_1 = args.lambda1
self.lambda_2 = args.lambda2
self.W=nn.Linear(args.d, args.d, bias=False)
self.lr=args.golem_lr
nn.init.zeros_(self.W.weight)
#nn.init.xavier_normal_(self.W.weight)
# with torch.no_grad():
# #self.W.weight=torch.triu(self.W.weight)
# idx=torch.triu_indices(*self.W.weight.shape)
# self.W.weight[idx[0],idx[1]]=0
def predict(self,X):
return self.W(X)
def forward(self, X, weight):
likelihood = self._compute_likelihood(X,weight)
L1_penalty = self._compute_L1_penalty()
h = self._compute_h()
loss= likelihood + self.lambda_1 * L1_penalty + self.lambda_2 * h
return loss, likelihood, self.lambda_1 * L1_penalty, self.lambda_2 * h
def _compute_likelihood(self,X,weight):
"""Compute (negative log) likelihood in the linear Gaussian case.
Returns:
tf.Tensor: Likelihood term (scalar-valued).
"""
return 0.5 * self.d * torch.log(
torch.sum(torch.mul(weight,torch.sum(torch.square(X-self.W(X)),dim=1)))
# torch.square(
# torch.linalg.norm(X - self.W(X))
# )
) - torch.linalg.slogdet(torch.eye(self.d) - self.W.weight.T)[1]
# return 0.5 * torch.sum(
# torch.log(
# torch.sum(
# torch.square(X - self.W(X)), axis=0
# )
# )
# ) - torch.linalg.slogdet(torch.eye(self.d) - self.W.weight.T)[1]
def _compute_L1_penalty(self):
"""Compute L1 penalty.
Returns:
tf.Tensor: L1 penalty term (scalar-valued).
"""
return torch.norm(self.W.weight, 1)
def _compute_h(self):
"""Compute DAG penalty.
Returns:
tf.Tensor: DAG penalty term (scalar-valued).
"""
return torch.trace(torch.matrix_exp(self.W.weight.T * self.W.weight.T)) - self.d
@torch.no_grad()
def W_to_adj(self) -> np.ndarray: # [j * m1, i] -> [i, j]
"""Get W from fc1 weights, take 2-norm over m1 dim"""
w = self.W.weight.T.cpu().detach().numpy() # [i, j]
return w
class DAGGNN_MLPEncoder(nn.Module):
"""MLP encoder module."""
def __init__(self, n_in, n_xdims, n_hid, n_out, batch_size, do_prob=0., factor=True, tol = 0.1):
super(DAGGNN_MLPEncoder, self).__init__()
adj_A = np.zeros((n_in, n_in))
self.adj_A = nn.Parameter(torch.autograd.Variable(torch.from_numpy(adj_A).float(), requires_grad=True))
self.factor = factor
self.Wa = nn.Parameter(torch.zeros(n_out), requires_grad=True)
self.fc1 = nn.Linear(n_xdims, n_hid, bias = True)
self.fc2 = nn.Linear(n_hid, n_out, bias = True)
self.dropout_prob = do_prob
self.batch_size = batch_size
self.z = nn.Parameter(torch.tensor(tol))
self.z_positive = nn.Parameter(torch.ones_like(torch.from_numpy(adj_A)).float())
self.init_weights()
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight.data)
elif isinstance(m, nn.BatchNorm1d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, inputs):
def preprocess_adj_new(adj):
adj_normalized = (torch.eye(adj.shape[0]) - (adj.transpose(0,1)))
return adj_normalized
if torch.sum(self.adj_A != self.adj_A):
print('nan error \n')
# to amplify the value of A and accelerate convergence.
adj_A1 = torch.sinh(3.*self.adj_A)
# adj_Aforz = I-A^T
adj_Aforz = preprocess_adj_new(adj_A1) #[d*d]
H1 = F.relu((self.fc1(inputs)))#[?,d,m(=1)]=>[?,d,hidden]
x = (self.fc2(H1)) #[?,d,hidden]=>[?,d,n_out]
logits = torch.matmul(adj_Aforz, x+self.Wa) -self.Wa
return logits, adj_A1, self.Wa
class DAGGNN_MLPDecoder(nn.Module):
"""MLP decoder module."""
def __init__(self, n_in_z, n_out, data_variable_size, batch_size, n_hid,
do_prob=0.):
super(DAGGNN_MLPDecoder, self).__init__()
self.out_fc1 = nn.Linear(n_in_z, n_hid, bias = True)
self.out_fc2 = nn.Linear(n_hid, n_out, bias = True)
self.batch_size = batch_size
self.data_variable_size = data_variable_size
self.dropout_prob = do_prob
self.init_weights()
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight.data)
m.bias.data.fill_(0.0)
elif isinstance(m, nn.BatchNorm1d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, input_z, origin_A, Wa):
def preprocess_adj_new1(adj):
adj_normalized = torch.inverse(torch.eye(adj.shape[0])-adj.transpose(0,1))
return adj_normalized
#adj_A_new1 = (I-A^T)^(-1)
adj_A_new1 = preprocess_adj_new1(origin_A)
#print(origin_A.shape)
#print(input_z.shape)
#print(Wa.shape)
mat_z = torch.matmul(adj_A_new1, input_z+Wa) - Wa
H3 = F.relu(self.out_fc1((mat_z)))
out = self.out_fc2(H3)
return out
class DAGGNN(nn.Module):
"""MLP decoder module."""
def __init__(self, encoder, decoder):
super(DAGGNN, self).__init__()
self.encoder=encoder
self.decoder=decoder
self.best_ELBO_graph = torch.sinh(3.*self.encoder.adj_A).data.clone().numpy()
self.best_MSE_graph = torch.sinh(3.*self.encoder.adj_A).data.clone().numpy()
self.best_NLL_graph = torch.sinh(3.*self.encoder.adj_A).data.clone().numpy()
def forward(self, X):
X=torch.unsqueeze(X,2)
logits, adj_A1, Wa = self.encoder(X)
out = self.decoder(logits,adj_A1,Wa)
return torch.squeeze(out)
def predict(self, X):
return self.forward(X)
def get_adj(self):
return self.best_NLL_graph
class TrExpScipy(torch.autograd.Function):
"""
autograd.Function to compute trace of an exponential of a matrix
"""
@staticmethod
def forward(ctx, input):
device=input.device
with torch.no_grad():
# send tensor to cpu in numpy format and compute expm using scipy
expm_input = expm(input.detach().cpu().numpy())
# transform back into a tensor
expm_input = torch.as_tensor(expm_input)
if input.is_cuda:
expm_input = expm_input.to(device)
assert expm_input.is_cuda
# save expm_input to use in backward
ctx.save_for_backward(expm_input)
# return the trace
return torch.trace(expm_input)
@staticmethod
def backward(ctx, grad_output):
with torch.no_grad():
expm_input, = ctx.saved_tensors
return expm_input.t() * grad_output
def compute_constraint(model, w_adj):
assert (w_adj >= 0).detach().cpu().numpy().all()
h = TrExpScipy.apply(w_adj) - model.num_vars
return h
def compute_A_phi(model, norm="none", square=False):
weights = model.get_parameters(mode='w')[0]
prod = torch.eye(model.num_vars).to(model.device)
if norm != "none":
prod_norm = torch.eye(model.num_vars).to(model.device)
for i, w in enumerate(weights):
if square:
w = w ** 2
else:
w = torch.abs(w)
if i == 0:
prod = torch.einsum("tij,ljt,jk->tik", w, model.adjacency.unsqueeze(0), prod)
if norm != "none":
tmp = 1. - torch.eye(model.num_vars).unsqueeze(0).to(model.device)
prod_norm = torch.einsum("tij,ljt,jk->tik", torch.ones_like(w).detach(), tmp, prod_norm)
else:
prod = torch.einsum("tij,tjk->tik", w, prod)
if norm != "none":
prod_norm = torch.einsum("tij,tjk->tik", torch.ones_like(w).detach(), prod_norm)
# sum over density parameter axis
prod = torch.sum(prod, 1)
if norm == "paths":
prod_norm = torch.sum(prod_norm, 1).to(model.device)
denominator = prod_norm + torch.eye(model.num_vars).to(model.device) # avoid / 0 on diagonal
return (prod / denominator).t()
elif norm == "none":
return prod.t()
else:
raise NotImplementedError
class BaseModel(nn.Module):
def __init__(self, num_vars, num_layers, hid_dim, num_params, nonlin="leaky-relu", norm_prod='path',
square_prod=False,device='cpu'):
"""
:param num_vars: number of variables in the system
:param num_layers: number of hidden layers
:param hid_dim: number of hidden units per layer
:param num_params: number of parameters per conditional *outputted by MLP*
:param nonlin: which nonlinearity
"""
super(BaseModel, self).__init__()
self.num_vars = num_vars
self.num_layers = num_layers
self.hid_dim = hid_dim
self.num_params = num_params
self.nonlin = nonlin
self.norm_prod = norm_prod
self.square_prod = square_prod
self.device = device
self.weights = nn.ParameterList()
self.biases = nn.ParameterList()
self.extra_params = [] # Those parameter might be learnable, but they do not depend on parents.
# initialize current adjacency matrix
self.adjacency = nn.Parameter(torch.ones((self.num_vars, self.num_vars)) - torch.eye(self.num_vars), requires_grad=False)
#self.adjacency=self.adjacency.to(self.device)
self.zero_weights_ratio = 0.
self.numel_weights = 0
# Instantiate the parameters of each layer in the model of each variable
for i in range(self.num_layers + 1):
in_dim = self.hid_dim
out_dim = self.hid_dim
if i == 0:
in_dim = self.num_vars
if i == self.num_layers:
out_dim = self.num_params
self.weights.append(nn.Parameter(torch.zeros(self.num_vars, out_dim, in_dim)))
self.biases.append(nn.Parameter(torch.zeros(self.num_vars, out_dim)))
self.numel_weights += self.num_vars * out_dim * in_dim
def forward_given_params(self, x, weights, biases):
"""
:param x: batch_size x num_vars
:param weights: list of lists. ith list contains weights for ith MLP
:param biases: list of lists. ith list contains biases for ith MLP
:return: batch_size x num_vars * num_params, the parameters of each variable conditional
"""
bs = x.size(0)
num_zero_weights = 0
for k in range(self.num_layers + 1):
# apply affine operator
if k == 0:
adj = self.adjacency.unsqueeze(0).to(self.device)
x = torch.einsum("tij,ljt,bj->bti", weights[k], adj, x) + biases[k]
else:
x = torch.einsum("tij,btj->bti", weights[k], x) + biases[k]
# count num of zeros
num_zero_weights += weights[k].numel() - weights[k].nonzero().size(0)
# apply non-linearity
if k != self.num_layers:
x = F.leaky_relu(x) if self.nonlin == "leaky-relu" else torch.sigmoid(x)
self.zero_weights_ratio = num_zero_weights / float(self.numel_weights)
return torch.unbind(x, 1)
def get_w_adj(self):
"""Get weighted adjacency matrix"""
return compute_A_phi(self, norm=self.norm_prod, square=self.square_prod)
def reset_params(self):
with torch.no_grad():
for node in range(self.num_vars):
for i, w in enumerate(self.weights):
w = w[node]
nn.init.xavier_uniform_(w, gain=nn.init.calculate_gain('leaky_relu'))
for i, b in enumerate(self.biases):
b = b[node]
b.zero_()
def get_parameters(self, mode="wbx"):
"""
Will get only parameters with requires_grad == True
:param mode: w=weights, b=biases, x=extra_params (order is irrelevant)
:return: corresponding dicts of parameters
"""
params = []
if 'w' in mode:
weights = []
for w in self.weights:
weights.append(w)
params.append(weights)
if 'b'in mode:
biases = []
for j, b in enumerate(self.biases):
biases.append(b)
params.append(biases)
if 'x' in mode:
extra_params = []
for ep in self.extra_params:
if ep.requires_grad:
extra_params.append(ep)
params.append(extra_params)
return tuple(params)
def set_parameters(self, params, mode="wbx"):
"""
Will set only parameters with requires_grad == True
:param params: tuple of parameter lists to set, the order should be coherent with `get_parameters`
:param mode: w=weights, b=biases, x=extra_params (order is irrelevant)
:return: None
"""
with torch.no_grad():
k = 0
if 'w' in mode:
for i, w in enumerate(self.weights):
w.copy_(params[k][i])
k += 1
if 'b' in mode:
for i, b in enumerate(self.biases):
b.copy_(params[k][i])
k += 1
if 'x' in mode and len(self.extra_params) > 0:
for i, ep in enumerate(self.extra_params):
if ep.requires_grad:
ep.copy_(params[k][i])
k += 1
def get_grad_norm(self, mode="wbx"):
"""
Will get only parameters with requires_grad == True, simply get the .grad
:param mode: w=weights, b=biases, x=extra_params (order is irrelevant)
:return: corresponding dicts of parameters
"""
grad_norm = 0
if 'w' in mode:
for w in self.weights:
grad_norm += torch.sum(w.grad ** 2)
if 'b'in mode:
for j, b in enumerate(self.biases):
grad_norm += torch.sum(b.grad ** 2)
if 'x' in mode:
for ep in self.extra_params:
if ep.requires_grad:
grad_norm += torch.sum(ep.grad ** 2)
return torch.sqrt(grad_norm)
def save_parameters(self, exp_path, mode="wbx"):
params = self.get_parameters(mode=mode)
# save
with open(os.path.join(exp_path, "params_"+mode), 'wb') as f:
pickle.dump(params, f)
def load_parameters(self, exp_path, mode="wbx"):
with open(os.path.join(exp_path, "params_"+mode), 'rb') as f:
params = pickle.load(f)
self.set_parameters(params, mode=mode)
def get_distribution(self, density_params):
raise NotImplementedError
class LearnableModel(BaseModel):
def __init__(self, num_vars, num_layers, hid_dim, num_params, nonlin="leaky-relu", norm_prod='path',
square_prod=False,device='cpu'):
super(LearnableModel, self).__init__(num_vars, num_layers, hid_dim, num_params, nonlin=nonlin,
norm_prod=norm_prod, square_prod=square_prod,device=device)
self.reset_params()
def compute_log_likelihood(self, x, weights, biases, extra_params, detach=False):
"""
Return log-likelihood of the model for each example.
WARNING: This is really a joint distribution only if the DAGness constraint on the mask is satisfied.
Otherwise the joint does not integrate to one.
:param x: (batch_size, num_vars)
:param weights: list of tensor that are coherent with self.weights
:param biases: list of tensor that are coherent with self.biases
:return: (batch_size, num_vars) log-likelihoods
"""
density_params = self.forward_given_params(x, weights, biases)
if len(extra_params) != 0:
extra_params = self.transform_extra_params(self.extra_params)
log_probs = []
for i in range(self.num_vars):
density_param = list(torch.unbind(density_params[i], 1))
if len(extra_params) != 0:
density_param.extend(list(torch.unbind(extra_params[i], 0)))
conditional = self.get_distribution(density_param)
x_d = x[:, i].detach() if detach else x[:, i]
log_probs.append(conditional.log_prob(x_d).unsqueeze(1))
return torch.cat(log_probs, 1)
def compute_weighted_log_likelihood(self, x, weights, biases, extra_params, sample_weight, detach=False):
"""
Return log-likelihood of the model for each example.
WARNING: This is really a joint distribution only if the DAGness constraint on the mask is satisfied.
Otherwise the joint does not integrate to one.
:param x: (batch_size, num_vars)
:param weights: list of tensor that are coherent with self.weights
:param biases: list of tensor that are coherent with self.biases
:return: (batch_size, num_vars) log-likelihoods
"""
log_probs=self.compute_log_likelihood(x, weights, biases, extra_params, detach)
return
def get_distribution(self, dp):
raise NotImplementedError
def transform_extra_params(self, extra_params):
raise NotImplementedError
class LearnableModel_NonLinGauss(LearnableModel):
def __init__(self, num_vars, num_layers, hid_dim, nonlin="leaky-relu", norm_prod='path',
square_prod=False,device='cpu'):
super(LearnableModel_NonLinGauss, self).__init__(num_vars, num_layers, hid_dim, 2, nonlin=nonlin,
norm_prod=norm_prod, square_prod=square_prod,device=device)
def get_distribution(self, dp):
return torch.distributions.normal.Normal(dp[0], torch.exp(dp[1]))
class LearnableModel_NonLinGaussANM(LearnableModel):
def __init__(self, num_vars, num_layers, hid_dim, nonlin="leaky-relu", norm_prod='path',
square_prod=False,device='cpu'):
super(LearnableModel_NonLinGaussANM, self).__init__(num_vars, num_layers, hid_dim, 1, nonlin=nonlin,
norm_prod=norm_prod, square_prod=square_prod,device=device)
# extra parameters are log_std
extra_params = np.ones((self.num_vars,))
np.random.shuffle(extra_params) # TODO: make sure this init does not bias toward gt model
# each element in the list represents a variable, the size of the element is the number of extra_params per var
self.extra_params = nn.ParameterList()
for extra_param in extra_params:
self.extra_params.append(nn.Parameter(torch.tensor(np.log(extra_param).reshape(1)).type(torch.Tensor)))
def get_distribution(self, dp):
return torch.distributions.normal.Normal(dp[0], dp[1])
def transform_extra_params(self, extra_params):
transformed_extra_params = []
for extra_param in extra_params:
transformed_extra_params.append(torch.exp(extra_param))
return transformed_extra_params # returns std_dev
def dual_ascent_step_golem(args, model, X, train_loader, adp_flag, adaptive_model):
X = X - X.mean(axis=0, keepdims=True)
X = X.to(args.device)
#print(X)
patience=args.golem_patience
cur_patience=0
last_loss=inf
epoch=0
while cur_patience<patience:
optimizer = torch.optim.Adam([ param for param in model.parameters() if param.requires_grad == True], lr=model.lr)
primal_obj = torch.tensor(0.).to(args.device)
tot_loss = torch.tensor(0.).to(args.device)
tot_likelihood = torch.tensor(0.).to(args.device)
tot_L1 = torch.tensor(0.).to(args.device)
tot_h = torch.tensor(0.).to(args.device)
for _ , tmp_x in enumerate(train_loader):
batch_x = tmp_x[0].to(args.device)
batch_x = batch_x - torch.mean(batch_x)
X_hat = model.predict(batch_x)
# TODO: the adaptive loss should add here
if adp_flag == False or args.run_mode == False:
reweight_list = torch.ones(batch_x.shape[0],1)/batch_x.shape[0]
reweight_list = reweight_list.to(args.device)
else:
with torch.no_grad():
model.eval()
reweight_list = adaptive_model((batch_x-X_hat)**2)
model.train()
# print(reweight_list.squeeze(1))
# print(reweight_list)
# print(model.W.weight)
# input()
loss, likelihood, L1_penalty, h = model(batch_x,reweight_list)#adaptive_loss(X_hat, batch_x, reweight_list)
#print(loss)
tot_loss+=loss
tot_likelihood+=likelihood
tot_L1+=L1_penalty
tot_h+=h
optimizer.zero_grad()
tot_loss.backward()
optimizer.step()
if tot_loss.detach().item() < last_loss:
last_loss= tot_loss.detach().item()
cur_patience=0
else:
cur_patience+=1
#print(model.W.weight)
h_cur = model._compute_h().detach().item()
perf_str='Epoch %d : training loss ==[%.5f = %.5f + %.5f + %.5f], curr H: %.5f, curr patience: %d' % (
epoch, tot_loss.detach().item(),tot_likelihood.detach().item(),
tot_L1.detach().item(), tot_h.detach().item(), h_cur,cur_patience)
epoch+=1
#print(perf_str)
return h
def dual_ascent_step(args, model, X, train_loader, lambda1, lambda2, rho, alpha, h, rho_max, adp_flag, adaptive_model):
"""Perform one step of dual ascent in augmented Lagrangian."""
def adaptive_loss(output, target, reweight_list):
R = output-target
# reweight_matrix = torch.diag(reweight_idx).to(args.device)
# loss = 0.5 * torch.sum(torch.matmul(reweight_matrix, R))
loss = 0.5 * torch.sum(torch.mul(reweight_list, R**2))
return loss
def closure():
X.to(args.device)
model.to(args.device)
optimizer.zero_grad()
#print([param.device for param in model.parameters()])
X_hat = model(X)
loss = squared_loss(X_hat, X)
h_val = model.h_func()
penalty = 0.5 * rho * h_val * h_val + alpha * h_val
l2_reg = 0.5 * lambda2 * model.l2_reg()
l1_reg = lambda1 * model.fc1_l1_reg()
primal_obj = loss + penalty + l2_reg + l1_reg
primal_obj.backward()
# if COUNT % 100 == 0:
# print(f"{primal_obj}: {primal_obj.item():.4f}; count: {COUNT}")
return primal_obj
def r_closure():
optimizer.zero_grad()
primal_obj = torch.tensor(0.).to(args.device)
loss = torch.tensor(0.).to(args.device)
for _ , tmp_x in enumerate(train_loader):
batch_x = tmp_x[0].to(args.device)
X_hat = model(batch_x)
# TODO: the adaptive loss should add here
if adp_flag == False:
reweight_list = torch.ones(batch_x.shape[0],1)/batch_x.shape[0]
reweight_list = reweight_list.to(args.device)
else:
with torch.no_grad():
model.eval()
reweight_list = adaptive_model((batch_x-X_hat)**2)
model.train()
# print(reweight_list.squeeze(1))
primal_obj += adaptive_loss(X_hat, batch_x, reweight_list)
h_val = model.h_func()
penalty = 0.5 * rho * h_val * h_val + alpha * h_val
l2_reg = 0.5 * lambda2 * model.l2_reg()
l1_reg = lambda1 * model.fc1_l1_reg()
primal_obj += penalty + l2_reg + l1_reg
primal_obj.backward()
# if COUNT % 100 == 0:
# print(f"{primal_obj}: {primal_obj.item():.4f}; count: {COUNT}")
return primal_obj
h_new = None
optimizer = LBFGSBScipy(model.parameters())
# X_torch = torch.from_numpy(X)
while rho < rho_max:
#for i in range(5):
if args.run_mode:
optimizer.step(closure) # NOTE: updates model in-place
else: # NOTE: the adaptive reweight operation
optimizer.step(r_closure)
with torch.no_grad():
h_new = model.h_func().item()
if h_new > 0.25 * h:
rho *= 10
else:
break
alpha += rho * h_new
return rho, alpha, h_new
def dual_ascent_step_daggnn(args, model, X, train_loader, rho, alpha, h, rho_max, adp_flag, adaptive_model,true_graph):
def _h_A(A, m):
def matrix_poly(matrix, d):
x = torch.eye(d).double()+ torch.div(matrix, d)
return torch.matrix_power(x, d)
expm_A = matrix_poly(A*A, m)
h_A = torch.trace(expm_A) - m
return h_A
def update_optimizer(optimizer, original_lr, c_A):
'''related LR to c_A, whenever c_A gets big, reduce LR proportionally'''
MAX_LR = 1e-2
MIN_LR = 1e-4
estimated_lr = original_lr / (math.log10(c_A) + 1e-10)
if estimated_lr > MAX_LR:
lr = MAX_LR
elif estimated_lr < MIN_LR:
lr = MIN_LR
else:
lr = estimated_lr
# set LR
for parame_group in optimizer.param_groups:
parame_group['lr'] = lr
return optimizer, lr
def adaptive_nll_gaussian(preds, target, variance, reweight_list):
neg_log_p = variance + torch.div(torch.pow(preds - target, 2), 2.*np.exp(2. * variance))
return torch.sum(torch.mul(reweight_list,neg_log_p)) / (target.size(0))
def nll_gaussian(preds, target, variance, add_const=False):
mean1 = preds
mean2 = target
neg_log_p = variance + torch.div(torch.pow(mean1 - mean2, 2), 2.*np.exp(2. * variance))
# if add_const:
# const = 0.5 * torch.log(2 * torch.from_numpy(np.pi) * variance)
# neg_log_p += const
return neg_log_p.sum() / (target.size(0))
def kl_gaussian_sem(preds):
mu = preds
kl_div = mu * mu
kl_sum = kl_div.sum()
return (kl_sum / (preds.size(0)))*0.5
def train(epoch, lambda_A, c_A, optimizer):
# update optimizer
optimizer, lr = update_optimizer(optimizer, args.daggnn_lr, c_A)
nll_train = []
kl_train = []
mse_train = []
shd_trian = []
model.train()
#scheduler.step()
for _ , tmp_x in enumerate(train_loader):
batch_x = tmp_x[0].to(args.device)
optimizer.zero_grad()
batch_x=torch.unsqueeze(batch_x,dim=2)
logits, origin_A, Wa = model.encoder(batch_x) # logits is of size: [num_sims, z_dims]
edges = logits
output = model.decoder(edges, origin_A, Wa)
if torch.sum(output != output):
print('nan error\n')
target = batch_x.squeeze()
preds = output.squeeze()
variance = 0.
if adp_flag == False or args.run_mode == 1:
reweight_list = torch.ones(batch_x.shape[0],1)/batch_x.shape[0]
reweight_list = reweight_list.to(args.device)
else:
with torch.no_grad():
model.eval()
reweight_list = adaptive_model((target-preds)**2)
# reconstruction accuracy loss
#loss_nll = adaptive_nll_gaussian(preds, target, variance, reweight_list)
loss_nll = nll_gaussian(output, batch_x, variance)
# KL loss
loss_kl = kl_gaussian_sem(logits)
# ELBO loss:
loss = loss_kl + loss_nll
# add A loss
one_adj_A = origin_A # torch.mean(adj_A_tilt_decoder, dim =0)
sparse_loss = args.lambda1 * torch.sum(torch.abs(one_adj_A))
# compute h(A)
h_A = _h_A(origin_A, args.d)
loss += lambda_A * h_A + 0.5 * c_A * h_A * h_A + 100. * torch.trace(origin_A*origin_A) + sparse_loss #+ 0.01 * torch.sum(variance * variance)
loss.backward()
loss = optimizer.step()
#myA.data = stau(myA.data, args.tau_A*lr)
if torch.sum(origin_A != origin_A):
print('nan error\n')
# compute metrics
graph = origin_A.data.clone().numpy()
mse_train.append(F.mse_loss(preds, target).item())
nll_train.append(loss_nll.item())
kl_train.append(loss_kl.item())
# my_graph=graph
# my_graph[np.abs(my_graph) < 0.3]=0
#print(graph)
print(h_A.item())
print('Epoch: {:04d}'.format(epoch),
'nll_train: {:.10f}'.format(np.mean(nll_train)),
'kl_train: {:.10f}'.format(np.mean(kl_train)),
'ELBO_loss: {:.10f}'.format(np.mean(kl_train) + np.mean(nll_train)),
'mse_train: {:.10f}'.format(np.mean(mse_train)))
return np.mean(np.mean(kl_train) + np.mean(nll_train)), np.mean(nll_train), np.mean(mse_train), graph, origin_A
best_ELBO_loss = np.inf
best_NLL_loss = np.inf
best_MSE_loss = np.inf
best_epoch = 0
best_ELBO_graph = []
best_NLL_graph = []
best_MSE_graph = []
# optimizer step on hyparameters
c_A = rho
lambda_A = alpha
optimizer = torch.optim.Adam(model.parameters(),lr=args.daggnn_lr)
#epoch=0
while c_A < rho_max:
for epoch in range(args.daggnn_epochs):
ELBO_loss, NLL_loss, MSE_loss, graph, origin_A = train(epoch, lambda_A, c_A, optimizer)
if ELBO_loss < best_ELBO_loss:
best_ELBO_loss = ELBO_loss
best_epoch = epoch
model.best_ELBO_graph=graph
if NLL_loss < best_NLL_loss:
best_NLL_loss = NLL_loss
best_epoch = epoch
model.best_NLL_graph = graph
if MSE_loss < best_MSE_loss:
best_MSE_loss = MSE_loss
best_epoch = epoch
model.best_MSE_graph = graph
#print(graph)
#graph[np.abs(graph) < 0.3] = 0
#print(ut.count_accuracy(true_graph, graph != 0))
print("Optimization Finished!")
print("Best Epoch: {:04d}".format(best_epoch))
A_new = origin_A.data.clone()
h_A_new = _h_A(A_new, args.d)
#print("Epoch: {:04d}, ELBO: {:.10f}, NLL:{:.10f}, MSE:{:.10f}".format(best_epoch,ELBO_loss,NLL_loss,MSE_loss))
if ELBO_loss > 2 * best_ELBO_loss:
break
#epoch+=1
# update parameters
if h_A_new.item() > 0.25 * h:
c_A*=10
else:
break
lambda_A += c_A * h_A_new.item()
# print(graph) break
lambda_A += c_A * h_A_new.item()
# print(graph)
# graph[np.abs(graph) < 0.3] = 0
# print(ut.count_accuracy(true_graph, graph != 0))
return c_A, lambda_A, h_A_new
def dual_ascent_step_grandag(args, model, X, train_loader, rho, alpha, h, rho_max, adp_flag, adaptive_model,true_graph, _mus,_lambdas,_w_adjs,_iter_cnt):
if args.gran_optim == "sgd":
optimizer = torch.optim.SGD(model.parameters(), lr=args.gran_lr)
elif args.gran_optim == "rmsprop":
optimizer = torch.optim.RMSprop(model.parameters(), lr=args.gran_lr)
else:
raise NotImplementedError("optimizer {} is not implemented".format(args.gran_optim))
#print([param.device for param in model.parameters()])
aug_lagrangians = []
aug_lagrangian_ma = []
aug_lagrangians_val = []
grad_norms = []
grad_norm_ma = []
not_nlls = [] # Augmented Lagrangrian minus (pseudo) NLL
nlls = [] # NLL on train
mus = _mus
lambdas = _lambdas
w_adjs = _w_adjs
mu=rho
lamb=alpha
cur_h=h
iter_cnt=_iter_cnt
cur_min=inf
cur_patience=0
while mu < rho_max:
for _ , tmp_x in enumerate(train_loader):
batch_x = tmp_x[0].to(args.device)
model.train()
weights, biases, extra_params = model.get_parameters(mode="wbx")
log_likelihood=model.compute_log_likelihood(batch_x, weights, biases, extra_params)
if adp_flag == False or args.run_mode == 1:
reweight_list = torch.ones(batch_x.shape[0],1)#/batch_x.shape[0]
reweight_list = reweight_list.to(args.device)
else:
with torch.no_grad():
model.eval()
reweight_list = adaptive_model(-log_likelihood)
loss = - torch.mean(torch.mul(reweight_list,log_likelihood))
nlls.append(loss.item())
w_adj = model.get_w_adj()
cur_h = compute_constraint(model, w_adj)
aug_lagrangian = loss + 0.5 * mu * cur_h ** 2 + lamb * cur_h
optimizer.zero_grad()
aug_lagrangian.backward()
optimizer.step()
if args.edge_clamp_range != 0:
with torch.no_grad():
to_keep = (w_adj > args.edge_clamp_range).type(torch.Tensor).to(model.device)
model.adjacency *= to_keep
if not args.no_w_adjs_log:
w_adjs.append(w_adj.detach().cpu().numpy().astype(np.float32))
mus.append(mu)
lambdas.append(lamb)
not_nlls.append(0.5 * mu * cur_h.item() ** 2 + lamb * cur_h.item())
if iter_cnt % args.plot_freq == 0:
if not args.no_w_adjs_log:
plot_weighted_adjacency(w_adjs, true_graph, args.graph_path,
name="w_adj", mus=mus, lambdas=lambdas)
if iter_cnt==_iter_cnt:
aug_lagrangians.append(aug_lagrangian.item())
aug_lagrangian_ma.append(aug_lagrangian.item())
grad_norms.append(model.get_grad_norm("wbx").item())
grad_norm_ma.append(model.get_grad_norm("wbx").item())
else:
aug_lagrangians.append(aug_lagrangian.item())
aug_lagrangian_ma.append(aug_lagrangian_ma[-1]+ 0.01 * (aug_lagrangian.item() - aug_lagrangian_ma[-1]))
grad_norms.append(model.get_grad_norm("wbx").item())
grad_norm_ma.append(grad_norm_ma[-1] + 0.01 * (grad_norms[-1] - grad_norm_ma[-1]))
if aug_lagrangian.item() < cur_min:
cur_min=aug_lagrangian.item()
cur_patience=0
else:
cur_patience+=1
perf_str='Iter %d : training loss ==[%.5f = %.5f + %.5f], curr H: %.5f, curr patience: %d' % (
iter_cnt, aug_lagrangians[-1], nlls[-1], not_nlls[-1], cur_h, cur_patience)
#print(perf_str)
iter_cnt+=1
if cur_patience>args.gran_patience:
with torch.no_grad():
h_new = compute_constraint(model, w_adj).item()
if h_new > 0.9 * h:
mu *= 10
cur_patience=0
cur_min=inf
else:
lamb += mu * h_new
return mu, lamb, h_new, mus, lambdas, w_adjs, iter_cnt
lamb += mu * h_new
return mu, lamb, h_new, mus, lambdas, w_adjs, iter_cnt
| anzhang314/ReScore | adaptive_model/baseModel.py | baseModel.py | py | 40,511 | python | en | code | 10 | github-code | 1 | [
{
"api_name": "torch.nn.Module",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_nu... |
5235426120 | from PIL import Image
from io import BytesIO
from all_data import all_data
def open_image(filename):
return Image.open(filename)
def convert_bytes(bytes_stream):
return Image.open(BytesIO(bytes_stream)).convert("RGBA")
def combine(image_name, file_id, other_image):
image_data = all_data["images"][image_name]
if image_data["mode"] == "bg":
background = Image.open(image_data["path"]).convert("RGBA")
foreground = other_image.copy().resize(image_data["paste_image_size"])
pos_to_paste = image_data["pos_to_paste"]
elif image_data["mode"] == "fg":
foreground = Image.open(image_data["path"]).convert("RGBA")
background = other_image.copy()
indent_x, indent_y = image_data["indent_x"], image_data["indent_y"]
x = background.size[0] - foreground.size[0] + indent_x if indent_x < 0 else indent_x
y = background.size[1] - foreground.size[1] + indent_y if indent_y < 0 else indent_y
pos_to_paste = (x, y)
else:
return
background.paste(foreground, pos_to_paste)
background.convert("RGB").save("temp/{}.jpg".format(file_id), "JPEG")
| TurboGoose/turbo_bot | image_module.py | image_module.py | py | 1,148 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "PIL.Image.open",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "PIL.Image.open",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": ... |
15531056887 | """
Created on Thu Dec 1 06:33:09 2016
@author: sushma
"""
import pickle
from collections import Counter
def main():
finalfile=open("summary.txt","w")
clusterinput = open("clusterinput.pkl","rb")
users=pickle.load(clusterinput)
classifyinput = open("classifyinput.pkl","rb")
messagedata=pickle.load(classifyinput)
counterdata=Counter()
for val in users:
counterdata.update(val['screen_name'])
counterdata.update(val['connection'])
finalfile.write("Number of users collected "+str(len(users)))
finalfile.write("\n")
finalfile.write("Number of users collected "+str(len(counterdata)))
finalfile.write("\n")
finalfile.write("Number of messages collected "+str(len(messagedata)))
finalfile.write("\n")
clusteroutput = open("clusteroutput.pkl","rb")
clusters=pickle.load(clusteroutput)
total=0
for i in range(0,len(clusters)):
total=total+len(clusters[i])
finalfile.write("Number of communities discovered "+str(len(clusters)))
finalfile.write("\n")
finalfile.write("Average number of users per community "+str(total/len(clusters)))
finalfile.write("\n")
classifyoutput = open("classifyoutput.pkl","rb")
classify=pickle.load(classifyoutput)
classifycounter=Counter()
classifycounter.update(classify)
finalfile.write("Number of instances for class 0 -Male found "+str(classifycounter[0]))
finalfile.write("\n")
finalfile.write("Number of instances for class 1 -Female found "+str(classifycounter[1]))
finalfile.write("\n")
classifyinstance0 = open("classifyoutputinstance0.pkl","rb")
classify=pickle.load(classifyinstance0)
finalfile.write("Example of class 0 "+str( classify[0][1]))
classifyinstance0 = open("classifyoutputinstance0.pkl","rb")
classify=pickle.load(classifyinstance0)
classifyinstance1 = open("classifyoutputinstance1.pkl","rb")
classify=pickle.load(classifyinstance1)
finalfile.write("\n")
finalfile.write("Example of class 1 "+str(classify[0][1]))
if __name__ == '__main__':
main() | smahade4/Online-Social-Network-Analysis | Gender classification and community prediction using twitter/summarize.py | summarize.py | py | 1,984 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "pickle.load",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "collections.Counter",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_n... |
36812771219 | import argparse
import os
import re
import shutil
parser = argparse.ArgumentParser(
description="Converts a VHDL circuit that uses the default UsbPort implementation (via JTAG) into one that can use the VPI+GHDL one."
)
# TODO: generate a new Makefile / update the old one with the new files
# python3 usb_port_vpi_ghdl.py --vhdl_dir . source.vhd up_counter
parser.add_argument(
"--vhdl_dir",
metavar="vhdl_directory",
type=str,
default=".",
help="directory of where UsbPort.vhd and vhdl project files are (default = root of script)",
)
parser.add_argument(
"top_entity_file",
metavar="top_entity_file",
type=str,
help="top entity file name (ex: counter.vhd)",
)
parser.add_argument(
"top_entity_name",
metavar="top_entity_name",
type=str,
help="top entity name (ex: counter)",
)
NEW_USB_PORT_NAME = "UsbPort_VPI_GHDL"
USB_PORT_MAP_REGEX = ":[\s|\n]*(UsbPort)[\s|\n]*PORT[\s|\n]*MAP[\s|\n]*\("
USB_PORT_MAP_LINKS = """
-- Automated Inserted code for VPI_GHDL
inputPort_SW => inputPort_SW,
outputPort_SW => outputPort_SW,
-- Automated Inserted code for VPI_GHDL
"""
TOP_ENTITY_REGEX = "ENTITY[\s|\n]*({})[\s|\n]*IS[\s|\n]*PORT[\s|\n]*\("
USB_PORT_COMPONENT_REGEX = "COMPONENT[\s|\n]*(UsbPort)[\s|\n]*PORT[\s|\n]*\("
USB_PORT_DECLARATION = """
-- Automated Inserted code for VPI_GHDL
inputPort_SW : OUT STD_LOGIC_VECTOR(7 DOWNTO 0);
outputPort_SW : IN STD_LOGIC_VECTOR(7 DOWNTO 0);
-- Automated Inserted code for VPI_GHDL
"""
NEW_USB_PORT_FILE_CONTENT = """-- Auto generated by script.
LIBRARY ieee;
USE ieee.STD_LOGIC_1164.ALL;
ENTITY {} IS
PORT (
inputPort : IN STD_LOGIC_VECTOR(7 DOWNTO 0);
outputPort : OUT STD_LOGIC_VECTOR(7 DOWNTO 0);
inputPort_SW : OUT STD_LOGIC_VECTOR(7 DOWNTO 0);
outputPort_SW : IN STD_LOGIC_VECTOR(7 DOWNTO 0)
);
END ENTITY;
ARCHITECTURE UsbPort OF {} IS
BEGIN
inputPort_SW <= inputPort;
outputPort <= outputPort_SW;
END ARCHITECTURE;
""".format(NEW_USB_PORT_NAME, NEW_USB_PORT_NAME)
# get arguments
args = vars(parser.parse_args())
vhdl_dir = str(args["vhdl_dir"])
top_entity_file = str(args["top_entity_file"])
top_entity_name = str(args["top_entity_name"])
#
# check if UsbPort default exists
usb_port_path = vhdl_dir + "/UsbPort.vhd"
if os.path.exists(usb_port_path) == False:
print("[-] file '{}' does not exist...".format(usb_port_path))
exit(1)
#
# get top entity file data
top_entity_file_path = vhdl_dir + "/" + top_entity_file
if os.path.exists(top_entity_file_path) == False:
print("[-] top file entity '{}' does not exist...".format(top_entity_file_path))
exit(1)
fp = open(top_entity_file_path, "r")
data = fp.read()
fp.close()
#
# add UsbPort_VPI_GHDL signals to the top entity port
match = re.search(
TOP_ENTITY_REGEX.format(top_entity_name),
data,
flags=re.S | re.I,
)
if match == None:
print("[-] top entity '{}' does not exist...".format(top_entity_name))
exit(1)
new_top_entity = match.group(0) + USB_PORT_DECLARATION
new_data = re.sub(
TOP_ENTITY_REGEX.format(top_entity_name),
new_top_entity,
data,
flags=re.S | re.I,
)
print("[+] added {} signals to top entity {}".format(NEW_USB_PORT_NAME, top_entity_name))
#
# Replace the UsbPort component declaration with UsbPort_VPI_GHDL's one
match = re.search(USB_PORT_COMPONENT_REGEX, new_data, flags=re.S | re.I)
if match == None:
print(
"[-] top entity '{}' does not have a valid UsbPort port map instantiation...".format(top_entity_name)
)
exit(1)
new_component_name = match.group(0).replace(match.group(1), NEW_USB_PORT_NAME)
new_usb_port_component = new_component_name + USB_PORT_DECLARATION
new_data = re.sub(
USB_PORT_COMPONENT_REGEX,
new_usb_port_component,
new_data,
flags=re.S | re.I,
)
print("[+] old UsbPort component declaration replaced by {}".format(NEW_USB_PORT_NAME))
#
# Find UsbPort port map (component instantiation) and link the new SW signals
match = re.search(USB_PORT_MAP_REGEX, new_data, flags=re.S|re.I)
if match == None:
print(
"[-] top entity '{}' does not have a valid UsbPort port map instantiation...".format(
top_entity_name
)
)
exit(1)
new_usb_port_name = match.group(0).replace(match.group(1), NEW_USB_PORT_NAME)
new_port_map = new_usb_port_name + USB_PORT_MAP_LINKS
new_data = re.sub(
USB_PORT_MAP_REGEX,
new_port_map,
new_data,
flags=re.S | re.I,
)
print("[+] new UsbPort SW signals linked")
#
# Create new UsbPort file
new_usb_port_path = str(vhdl_dir) + "/{}.vhd".format(NEW_USB_PORT_NAME)
f = open(new_usb_port_path, "w")
f.write(NEW_USB_PORT_FILE_CONTENT)
f.close()
print("[+] {} generated!".format(new_usb_port_path))
#
# Write new top entity generated file
full_path = vhdl_dir + "/" + top_entity_file.replace(".vhd", "_generated.vhd")
fp = open(full_path, "w")
fp.write(new_data)
fp.close()
#
# Move old UsbPort and top entity to a temporary folder
tmp_folder = vhdl_dir + "/tmp"
if not os.path.exists(tmp_folder):
os.makedirs(tmp_folder)
shutil.move(usb_port_path, tmp_folder)
shutil.move(top_entity_file_path, tmp_folder) | roby2014/virtual-board-vhdl | UsbPort/script/usb_port_vpi_ghdl.py | usb_port_vpi_ghdl.py | py | 5,151 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
... |
3243282065 | import pickle as pkl
import numpy as np
import torch.utils.data as data
from data import common
class SatData(data.Dataset):
def __init__(self, args, train=True):
self.args = args
self.train = train
self.scale = args.scale if train else args.scale_test
with open('./dataset/info.pkl', 'rb') as f:
self.info=common.dotdict(pkl.load(f))
self.info.root = self.args.dir_data
self.index_list = common.check_files(self.info)
if train:
self.repeat = args.test_every // len(self.index_list)
def __len__(self):
if self.train:
return len(self.index_list) * self.repeat
else:
return len(self.index_list)
def __getitem__(self, idx):
idx = self._get_index(idx)
indexs = self.index_list[idx]
rgb_lr = common.load_raster(self.info, indexs+[-1,])[:,:,:3]
rgb = common.load_raster(self.info, indexs+[0,])[:,:,:3]
sentinel = common.load_raster(self.info, indexs+[1,])
planet = common.load_raster(self.info, indexs+[2,])
filename = common.idx2name(self.info, indexs+[0,])
sentinel, planet, rgb_lr, rgb = self.get_patch(sentinel, planet, rgb_lr, rgb)
sentinel, planet, rgb_lr, rgb = common.np2Tensor(
sentinel, planet, rgb_lr, rgb, rgb_range=self.args.rgb_range
)
return sentinel, planet, rgb_lr, rgb, filename
def _get_index(self, idx):
if self.train:
return idx % len(self.index_list)
else:
return idx
def get_patch(self, lr, hr, lr_rgb, hr_rgb):
""" Every image has a different aspect ratio. In order to make
the input shape the same, here we crop a 96*96 patch on LR
image, and crop a corresponding area(96*r, 96*r) on HR image.
Args:
args: lr, hr
Returns:
0: cropped lr image.
1: cropped hr image.
"""
scale = self.scale
if self.train:
lr, hr, lr_rgb, hr_rgb = common.get_patch(
lr,
hr,
lr_rgb,
hr_rgb,
patch_size=self.args.patch_size,
scale=scale,
its=(0, 1, 0)
)
if not self.args.no_augment:
lr, hr, lr_rgb, hr_rgb = common.augment(lr, hr, lr_rgb, hr_rgb)
else:
ih, iw = lr.shape[:2]
hr = hr[0:int(ih * scale), 0:int(iw * scale)]
hr_rgb = hr_rgb[0:int(ih * scale), 0:int(iw * scale)]
return lr, hr, lr_rgb, hr_rgb | miracleyoo/Meta-SSSR-Pytorch-Publish | data/sat_data.py | sat_data.py | py | 2,624 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "torch.utils.data.Dataset",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.data",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "data.common.dotdict",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "data... |
24452600516 | # -*- coding:utf-8 -*-
import random
import requests
from scrapy.selector import Selector
class GetIP(object):
def __init__(self):
self.IP_list = []
self._crawl_ip()
def _crawl_ip(self):
useragent = 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36' \
'(KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'
headers = {'User-Agent': useragent}
for i in [1,2]:
url = 'http://www.xicidaili.com/nn/{0}'.format(i)
response = requests.get(url, headers=headers)
selector = Selector(response)
trs = selector.css('tr[class]')
for tr in trs:
IP = tr.css('td:nth-child(2)::text').extract_first()
port = tr.css('td:nth-child(3)::text').extract_first()
type = tr.css('td:nth-child(6)::text').extract_first()
if type == 'HTTP':
self.IP_list.append('%s:%s' %(IP, port))
def _is_usable_ip(self, ip):
url = 'https://www.baidu.com'
proxy_dic = {'http': ip}
try:
response = requests.get(url, proxies=proxy_dic)
if 200 <= response.status_code < 300:
print('IP可用')
return True
except Exception as e:
print(e)
return False
def get_IP(self):
IP = random.sample(self.IP_list, 1)[0]
if self._is_usable_ip(IP):
return IP
else:
return self.get_IP()
| Linsublime/scrapyspider | scrapy_spider/utils/crawlxiciIP.py | crawlxiciIP.py | py | 1,566 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "requests.get",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "scrapy.selector.Selector",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "random.sample",
... |
21055488185 | from __future__ import absolute_import
import atexit
import contextlib
import sys
import requests
import requests.packages.urllib3 as urllib3
from requests.adapters import DEFAULT_POOLBLOCK, HTTPAdapter
from requests.packages.urllib3.poolmanager import PoolManager
from requests.packages.urllib3.util.retry import Retry
from pkg_resources import parse_version
from franz.openrdf.util.strings import to_native_string
from franz.openrdf.util.http import normalize_headers
# Public symbols
__all__ = ['makeRequest']
# size of the buffer used to read responses
BUFFER_SIZE = 4096
# Configure a retry strategy similar to what the curl backend does
retries = Retry(backoff_factor=0.1,
connect=10, # 10 retries for connection-level errors
status_forcelist=(), # Retry only on connection errors
method_whitelist=False) # Retry on all methods, even POST and PUT
# We'll want to know if something contains unicode
if sys.version_info >= (3, 0):
unicode_type = str
else:
unicode_type = unicode
# Never check any hostnames
class HostNameIgnoringAdapter(HTTPAdapter):
"""
A simple transport adapter that disables hostname verification for SSL.
"""
def init_poolmanager(self, connections, maxsize, block=DEFAULT_POOLBLOCK, **pool_kwargs):
self.poolmanager = PoolManager(num_pools=connections,
maxsize=maxsize,
block=block,
assert_hostname=False, **pool_kwargs)
# Setup the retry strategy
self.max_retries = retries
def translate_proxy_scheme(scheme):
"""
Translate proxy type form the format AG uses to the one used by requests.
:param scheme: Proxy type in AG format.
:return: Proxy type in requests format.
"""
if scheme == 'socks':
scheme = 'socks5'
# In urllib3 1.20 (released 2017-01-19) DNS behavior has changed
# To make the proxy server do the lookup you now have to use
# either 'socks4a' or 'socks5h' as the protocol.
# But older versions naturally neither need nor support these values.
# The updated version of urllib3 is bundled with requests since
# version 2.13.0 (released 2017-01-24).
v1_20 = parse_version('1.20')
urllib3_version = parse_version(urllib3.__version__)
if urllib3_version >= v1_20:
if scheme == 'socks5':
scheme = 'socks5h'
if scheme == 'socks4':
scheme = 'socks4a'
return scheme
def create_session(obj):
"""
Create a session object for a service.
:param obj: A service object containing auth and config information.
:type obj: franz.miniclient.repository.Service
:return: A new requests session object with configuration taken from the service.
:rtype requests.Session:
"""
session = requests.Session()
if obj.user is not None and obj.password is not None:
session.auth = (obj.user, obj.password)
# Proxy setup
if obj.proxy is not None:
proxy = '%s://%s:%s' % (translate_proxy_scheme(obj.proxy_type),
obj.proxy_host, obj.proxy_port)
session.proxies = {'http': proxy, 'https': proxy}
# Emulate curl's way of handling SSL
if obj.cainfo is not None:
# CA certificates
session.verify = obj.cainfo
if obj.sslcert is not None:
# Client certificate
session.cert = obj.sslcert
if obj.verifypeer is not None and not obj.verifypeer:
# Disable certificate validation
session.verify = False
if obj.verifyhost is not None and not obj.verifyhost:
# Check the certificate, but do not verify that the hostname matches it.
session.mount('https://', HostNameIgnoringAdapter())
else:
# Setup the retry strategy
session.mount('https://', HTTPAdapter(max_retries=retries))
# setup retry strategy for http connections
session.mount('http://', HTTPAdapter(max_retries=retries))
return session
def makeRequest(obj, method, url, body=None, accept=None, contentType=None, callback=None, errCallback=None, headers=None):
"""
Send an HTTP request to given URL.
:param obj: A service object containing auth and config information.
:type obj: franz.miniclient.repository.Service
:param method: Request method ("GET", "POST", ...).
:type method: string
:param url: Target address
:type url: string
:param body: Request body (for PUT/POST requests) or query string, optional.
:type body: basestring|file
:param accept: Value of the accept header (default: */*)
:type accept: string
:param contentType: MIME type of the request body, optional.
:type contentType: string
:param callback: Function that will receive the response data.
It will be called multiple times per request.
The return value should be either None or the number of bytes
received, anything else will cause the request to be aborted.
:type callback: (bytestring) -> int
:param errCallback: Invoked if the server returned an error.
Used only if `callback` is not `None`.
The arguments are the response code and
the message returned by the server.
Unlike normal callback, this is invoked at most once
and receives the complete response body.
:type errCallback: (int, string) -> None
:param headers: Either a dictionary mapping headers to values or
a list of strings that will be included in the request's headers.
:type headers: Iterable[string] | dict[string, string] | None
:return: Status code and response body, unless callback is specified (in that case None is returned).
:rtype: (int, string) | None
"""
if accept is None:
accept = "*/*"
# We create a session object lazily, so we do not have any requests-specific stuff
# in the implementation of the Service class.
if obj.session is None:
obj.session = create_session(obj)
# Unfortunately our current API does not seem to have a good place
# to close that explicitly.
atexit.register(obj.session.close)
# Encode data as utf-8 if required - requests tries to use ascii now.
if isinstance(body, unicode_type):
body = body.encode('utf-8')
method = method.upper()
if method in ('PUT', 'POST'):
data = body
params = None
else:
data = None
params = body
# Get the full url
url = to_native_string(url)
if not url.startswith("http:") and not url.startswith("https:"):
url = to_native_string(obj.url) + to_native_string(url)
# Note that this will create a copy if necessary, so we're not changing the argument
headers = normalize_headers(headers)
headers['accept'] = accept
if contentType:
headers['content-type'] = contentType
if obj.runAsName:
headers['x-masquerade-as-user'] = obj.runAsName
response = obj.session.request(method, url, params=params, data=data, headers=headers, stream=True)
with contextlib.closing(response):
if callback is not None:
# Not sure it None or "" is better for a 204 response.
if response.status_code == 204:
callback(response.content)
elif 200 <= response.status_code < 300:
for chunk in response.iter_content(BUFFER_SIZE):
callback_result = callback(chunk)
# Simulate curl's behavior
if callback_result is not None and callback_result != len(chunk):
break
else:
if errCallback is None:
response.raise_for_status()
else:
errCallback(response.status_code,
to_native_string(response.raw.read(
decode_content=True)))
else:
# Note: no error callback in this case
return response.status_code, to_native_string(response.content)
| franzinc/agraph-python | src/franz/miniclient/backends/requests.py | requests.py | py | 8,252 | python | en | code | 34 | github-code | 1 | [
{
"api_name": "requests.packages.urllib3.util.retry.Retry",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "sys.version_info",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "requests.adapters.HTTPAdapter",
"line_number": 36,
"usage_type": "name"
... |
43788011167 | import requests
import json
word = ' Busca-cep '
print(f'{word:=^30}')
usercep = str(input('Informe seu CEP: '))
api = requests.get(f'https://viacep.com.br/ws/{usercep}/json/')
#cepdata = json.loads(api.text)
print(api.text) | Guribeiro/python | api/busca-cep.py | busca-cep.py | py | 229 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "requests.get",
"line_number": 9,
"usage_type": "call"
}
] |
70410826594 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 27 18:13:13 2019
@author: nwu
"""
import requests
import copy
from secret import headers
from datetime import datetime, timedelta
from time import sleep
from math import exp
class ZoomAPIException(Exception):
def __init__(self, response):
self.response = response
def __str__(self):
return "{status_code: %s, reason: %s, url: %s}" % (self.response.status_code,
self.response.reason,
self.response.url)
class ZoomMultiPageTask:
def __init__(self, url, params):
self.url = url
self.params = copy.deepcopy(params)
self.accum429 = 0
def get_all_pages(self):
data = []
next_page_token = ""
params = copy.deepcopy(self.params)
while True:
if next_page_token:
params["next_page_token"] = next_page_token
response = requests.get(self.url, params=params, headers=headers)
# we should check return code to handle errors
# zoom API contains error code tables:
# https://marketplace.zoom.us/docs/api-reference/error-definitions
print(response.status_code)
if not response.ok:
# some error cannot continue,
# for such errors, handle_error will raise exception
self.handle_error(response)
continue
self.accum429 = 0
# if code reach here, we should fetch the data we want and store them
# also, code reach here, we should update next_page_token
json_data = response.json()
next_page_token = json_data["next_page_token"]
data_for_this_loop = self.fetch_data_from_json(json_data)
data.extend(data_for_this_loop)
if self.should_stop(json_data):
break
return data
def handle_error(self, response):
if response.status_code == 429 or response.status_code == 404:
self.accum429 += 1
sleep(5 + exp(-self.accum429) * 10)
else:
raise ZoomAPIException(response)
def should_stop(self, json_data):
return True
def fetch_data_from_json(self, json_data):
return []
class FetchMeetings(ZoomMultiPageTask):
def __init__(self, url, from_date, to_date, page_size=300, type="past"):
self.from_date = from_date
self.to_date = to_date
self.page_size = page_size
self.type = type
params = {
"page_size": str(page_size),
"type": type,
"from": from_date.strftime("%Y-%m-%d"),
"to": to_date.strftime("%Y-%m-%d")
}
super().__init__(url, params)
def should_stop(self, json_data):
return len(json_data["next_page_token"]) == 0
def fetch_data_from_json(self, json_data):
return [meeting["id"]
for meeting in json_data["meetings"]]
class FetchMeetingQos(ZoomMultiPageTask):
def __init__(self, url, page_size=10, type="past" ):
self.page_size = page_size
self.type = type
params = {
"page_size": str(page_size),
"type": type,
}
super().__init__(url, params)
def should_stop(self, json_data):
return len(json_data["next_page_token"]) == 0
def fetch_data_from_json(self, json_data):
return json_data["participants"]
#[{},{},...]the number of dictionary is the participants amount
def trans_compound_data_to_str(name, compound_value):
value_str = ",".join([("%s:%s" % (k, v))
for k, v in compound_value.items()])
return value_str
def handle_one_time_sample_qos(data, meeting_id, participant):
info = [str(meeting_id), str(participant["user_name"]), data["date_time"], str(participant["location"]), str(participant["network_type"]), str(participant["data_center"]) ]
names = ["audio_input", "audio_output", "video_input", "video_output"]
for name in names:
info.append(trans_compound_data_to_str(name, data[name]))
return "#".join(info)
def handle_one_meeting_qos(qos, meeting_id):
lines = []
for participant in qos:
samples = participant["user_qos"]
for sample in samples:
#line = handle_one_time_sample_qos(sample, meeting_id, participant["user_name"] )
line = handle_one_time_sample_qos(sample, meeting_id, participant )
lines.append(line)
return lines
if __name__ == "__main__":
now = datetime.now()
fetch_meetings = FetchMeetings("https://api.zoom.us/v2/metrics/meetings",
now - timedelta(18),
now - timedelta(11))
meetings = fetch_meetings.get_all_pages()
# with open('meetings.txt', 'w') as f:
# for id in meetings:
# print (id, file=f)
all_qos_data = []
for idd in meetings:
job = FetchMeetingQos("https://api.zoom.us/v2/metrics/meetings/{0}/participants/qos".format(idd))
meeting_qos = job.get_all_pages()
all_qos_data.append((idd,meeting_qos))
all_lines = []
with open('qos.txt_2', 'w') as f:
for meeting_id, qos in all_qos_data:
lines = handle_one_meeting_qos(qos, meeting_id)
for line in lines:
print (line, file=f)
print (datetime.now())
| 50wu/Project | zoomQOS/zoomQos.py | zoomQos.py | py | 5,737 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "copy.deepcopy",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "secret.headers",
"line_n... |
34036384470 | from twilio.rest import Client
class TwilioService:
client = None
def __init__(self):
account_sid = 'AC1db0e8cfbae1e3b9b5834772c0ef8d6c'
auth_token = '7f70419841a1632045d657089acd65c1'
self.client = Client(account_sid, auth_token)
def send_message(self, message,e_recepient_phone_number):
# agent_phone_number = '+254717966627'
twilio_phone_number = '+12054633293'
self.client.messages.create(to=e_recepient_phone_number,
from_=twilio_phone_number,
body=message)
| mutuaMkennedy/homey | contact/services/twilio_service.py | twilio_service.py | py | 598 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "twilio.rest.Client",
"line_number": 9,
"usage_type": "call"
}
] |
163369334 | #! /usr/bin/env python
"""Toolbox for imbalanced dataset in machine learning."""
import codecs
import os
from setuptools import find_packages, setup
# get __version__ from _version.py
ver_file = os.path.join('imblearn', '_version.py')
with open(ver_file) as f:
exec(f.read())
DISTNAME = 'imbalanced-learn'
DESCRIPTION = 'Toolbox for imbalanced dataset in machine learning.'
with codecs.open('README.rst', encoding='utf-8-sig') as f:
LONG_DESCRIPTION = f.read()
MAINTAINER = 'G. Lemaitre, C. Aridas'
MAINTAINER_EMAIL = 'g.lemaitre58@gmail.com, ichkoar@gmail.com'
URL = 'https://github.com/scikit-learn-contrib/imbalanced-learn'
LICENSE = 'MIT'
DOWNLOAD_URL = 'https://github.com/scikit-learn-contrib/imbalanced-learn'
VERSION = __version__
CLASSIFIERS = ['Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved',
'Programming Language :: C',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8']
INSTALL_REQUIRES = [
'numpy>=1.13.3',
'scipy>=0.19.1',
'scikit-learn>=0.23',
'joblib>=0.11'
]
EXTRAS_REQUIRE = {
'tests': [
'pytest',
'pytest-cov'],
'docs': [
'sphinx',
'sphinx-gallery',
'sphinx_rtd_theme',
'sphinxcontrib-bibtex',
'numpydoc',
'matplotlib',
'pandas',
]
}
setup(name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
long_description=LONG_DESCRIPTION,
zip_safe=False, # the package can run out of an .egg file
classifiers=CLASSIFIERS,
packages=find_packages(),
install_requires=INSTALL_REQUIRES,
extras_require=EXTRAS_REQUIRE)
| jem0101/BigSwag-SQA2022-AUBURN | TestOrchestrator4ML-main/resources/Data/supervised/GITHUB_REPOS/scikit-learn-contrib@imbalanced-learn/setup.py | setup.py | py | 2,284 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "os.path.join",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "codecs.open",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "setuptools.setup",
"line_nu... |
21035711313 | """
Checks for configuration option values
"""
import collections.abc
import grp
import os
import pwd
import re
import socket
import textwrap
import typing
from typing import Sequence, Type, Union
import netaddr
from pyroute2.iproute import IPRoute
from .base import (
Check,
ConfigOptionError,
OptionCheckError,
coerce,
option_reference,
qualified_name,
)
# noinspection PyPep8Naming
class greater_than(Check):
def __init__(self, threshold):
super().__init__()
self.threshold = threshold
self.__doc__ = "Must be greater than :python:`{!r}`".format(threshold)
def __call__(self, config, value):
if value <= self.threshold:
raise OptionCheckError("Must be greater than {!r}"
.format(self.threshold),
option=self.option.__name__)
# noinspection PyPep8Naming
class between(Check):
def __init__(self, low, high):
super().__init__()
self.low = low
self.high = high
self.__doc__ = (
"Must be between :python:`{!r}` and :python:`{!r}` inclusively"
.format(low, high)
)
def __call__(self, config, value):
if not (self.low <= value <= self.high):
raise OptionCheckError("Must be between {!r} and {!r} inclusively"
.format(self.low, self.high),
option=self.option.__name__)
# noinspection PyPep8Naming
class match(Check):
def __init__(self, expr, flags=0):
super().__init__()
self.expr = re.compile(expr, flags)
self.__doc__ = "Must match regular expression: :python:`{!r}`".format(
self.expr.pattern,
)
def __call__(self, config, value):
if not self.expr.match(value):
raise OptionCheckError("Does not match regular expression {!r}"
.format(self.expr.pattern),
option=self.option.name)
# noinspection PyPep8Naming
class sequence(Check):
def __init__(self, element_check: Check):
super().__init__()
self.element_check = element_check
self.__doc__ = "All elements must satisfy: {}".format(
element_check.__doc__,
)
def __get__(self, instance, owner):
if self.option is None:
self.element_check = self.element_check.__get__(instance, owner)
return super().__get__(instance, owner)
def __call__(self, config, value):
for i, v in enumerate(value):
try:
self.element_check(config, v)
except ConfigOptionError as e:
raise OptionCheckError("Error at index {:d}: {}"
.format(i, e.args[0]),
option=self.option.__name__)
# noinspection PyPep8Naming
class mapping(Check):
def __init__(self, key_check: Check = None, value_check: Check = None):
super().__init__()
if key_check is None and value_check is None:
raise ValueError()
self.key_check = key_check
self.value_check = value_check
s = []
if self.key_check is not None:
s.append("All keys must satisfy: {}"
.format(self.key_check.__doc__))
if self.value_check is not None:
s.append("All values must satisfy: {}"
.format(self.value_check.__doc__))
if self.key_check is not None and self.value_check is not None:
self.__doc__ = textwrap.indent("\n".join(s), "- ")
else:
self.__doc__ = s[0]
def __get__(self, instance, owner):
if self.option is None:
if self.key_check is not None:
self.key_check = self.key_check.__get__(instance, owner)
if self.value_check is not None:
self.value_check = self.value_check.__get__(instance, owner)
return super().__get__(instance, owner)
def __call__(self, config, value):
for k, v in value.items():
try:
if self.key_check is not None:
self.key_check(config, k)
if self.value_check is not None:
self.value_check(config, v)
except ConfigOptionError as e:
raise OptionCheckError("Error in key {}: {}"
.format(k, e.args[0]),
option=self.option.__name__)
# noinspection PyPep8Naming
class type_is(Check):
def __init__(self, types: Union[Type, Sequence[Type]]):
super().__init__()
if isinstance(types, collections.abc.Sequence):
self.types = tuple(types)
else:
self.types = (types,)
if len(self.types) > 1:
types_desc = ", ".join([qualified_name(type_) for type_ in self.types])
self.__doc__ = f"Type must be one of {types_desc}"
else:
self.__doc__ = (
f"Type must be :class:`{qualified_name(self.types[0])}`"
)
def __call__(self, config, value):
if not isinstance(value, self.types):
types = ", ".join([qualified_name(type_) for type_ in self.types])
raise OptionCheckError(
f"Must be an instance of {types}",
option=self.option.__name__,
)
# noinspection PyUnusedLocal
@Check.decorate
def not_empty(option, config, value):
"""Must not be empty"""
if len(value) <= 0:
raise OptionCheckError("Must not be empty", option=option.__name__)
# noinspection PyPep8Naming
class satisfy_all(Check):
checks: typing.Sequence[Check]
def __init__(self, *checks: Check):
super().__init__()
self.checks = checks
assert all(c.__doc__ is not None for c in checks)
self.__doc__ = "Must satisfy all of the following:\n\n{}".format(
textwrap.indent(
"\n".join([c.__doc__ for c in checks if c.__doc__]),
"- "
),
)
def __get__(self, instance, owner):
if self.option is None:
self.checks = [check.__get__(instance, owner)
for check in self.checks]
return super().__get__(instance, owner)
def __call__(self, config, value):
for check in self.checks:
check(config, value)
# noinspection PyDecorator,PyUnusedLocal
@Check.decorate
def network_ip(option, config, value):
"""Must not be network or broadcast address (except if /31)"""
if value.ip == value.value:
raise OptionCheckError("The host part of {} is the network address of "
"the subnet. Must be an IP of the subnet."
.format(value), option=option.__name__)
# Prefix length 31 is special, see RFC 3021
if value.prefixlen != 31 and value.ip == value.broadcast:
raise OptionCheckError("The host part of {} is the broadcast address "
"of the subnet. Must be an IP of the subnet."
.format(value), option=option.__name__)
# noinspection PyUnusedLocal
@Check.decorate
def directory_exists(cls, config, value):
"""Must be an existing directory"""
if not os.path.exists(value):
raise OptionCheckError("Directory {} does not exists".format(value),
option=cls.__name__)
if not os.path.isdir(value):
raise OptionCheckError("{} is not a directory".format(value),
option=cls.__name__)
# noinspection PyUnusedLocal
@Check.decorate
def file_exists(cls, config, value):
"""Must be an existing file"""
if not os.path.exists(value):
raise OptionCheckError("File {} does not exists".format(value),
option=cls.__name__)
if not os.path.isfile(value):
raise OptionCheckError("{} is not a file".format(value),
option=cls.__name__)
@Check.decorate
def file_creatable(option, config, value):
"""Must be a creatable file name"""
parent = os.path.dirname(value)
directory_exists(option, config, parent)
# noinspection PyUnusedLocal
@Check.decorate
def interface_exists(option, config, value):
"""Network interface must exists"""
try:
socket.if_nametoindex(value)
except OSError:
raise OptionCheckError("Interface {} not found".format(value),
option=option.__name__)
# noinspection PyUnusedLocal
@Check.decorate
def address_exists(cls, config, value):
"""IP address must be configured"""
ip = IPRoute()
if value.version == 4:
family = socket.AF_INET
elif value.version == 6:
family = socket.AF_INET6
else:
raise AssertionError("Unknown version {}".format(value.version))
if ip.get_addr(family=family, address=value.ip, prefixlen=value.prefixlen):
raise OptionCheckError("No such address {}".format(value),
option=cls.__name__)
# noinspection PyPep8Naming
class ip_range_in_networks(Check):
def __init__(self, other_option):
super().__init__()
self.other_option = coerce(other_option)
self.__doc__ = (
"Must be contained in the networks configured with {}"
.format(option_reference(self.other_option))
)
def __call__(self, config, value):
networks = config[self.other_option]
first = netaddr.IPAddress(value.first)
last = netaddr.IPAddress(value.last)
contained = any(first in network and last in network
for network in networks)
if not contained:
raise OptionCheckError("Range not contained in any of the "
"networks {}"
.format(', '.join(networks)),
option=self.option.__name__)
# noinspection PyUnusedLocal
@Check.decorate
def user_exists(option, config, value):
"""Must be a valid UNIX user"""
try:
return pwd.getpwnam(value)
except KeyError:
raise OptionCheckError("User {} does not exists".format(value),
option=option.__name__)
# noinspection PyUnusedLocal
@Check.decorate
def group_exists(option, config, value):
"""Must be a valid UNIX group"""
try:
return grp.getgrnam(value)
except KeyError:
raise OptionCheckError("Group {} does not exists".format(value),
option=option.__name__)
# noinspection PyPep8Naming
class has_keys(Check):
def __init__(self, *keys: str):
super().__init__()
self.keys = keys
self.__doc__ = "Must contain {}".format(
" -> ".join("{!r}".format(key) for key in self.keys),
)
def __call__(self, config, value):
obj = value
checked: typing.List[str] = []
for key in self.keys:
if not isinstance(obj, collections.abc.Mapping):
path = self.option.name + "".join(map("[{!r}]".format, checked))
raise OptionCheckError(
"must be a mapping type like dict",
option=path,
)
checked.append(key)
try:
obj = obj[key]
except KeyError:
path = self.option.name + "".join(map("[{!r}]".format, checked))
raise OptionCheckError("Missing key", option=path) from None
# noinspection PyPep8Naming
class user_mapping_for_user_exists(Check):
def __init__(self, user_name):
super().__init__()
self.user_name = user_name
self.__doc__ = "Must have contain a mapping for {} or PUBLIC".format(
self.user_name,
)
def __call__(self, config, value):
if 'PUBLIC' not in value and self.user_name not in value:
raise OptionCheckError("No mapping for user {}"
.format(self.user_name),
option=self.option.__name__)
| agdsn/hades | src/hades/config/check.py | check.py | py | 12,235 | python | en | code | 7 | github-code | 1 | [
{
"api_name": "base.Check",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "base.OptionCheckError",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "base.Check",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "base.OptionCheckError",
... |
14476654720 | import datetime
from django.core.management.base import NoArgsCommand
from django.template import Context, loader
from localtv import models
from localtv import util
class Command(NoArgsCommand):
def handle_noargs(self, **kwargs):
self.send_email(datetime.timedelta(hours=24),
'today',
'admin_queue_daily')
if datetime.date.today().weekday == 0: # Monday
self.send_email(
datetime.timedelta(days=7),
'last week',
'admin_queue_weekly')
def send_email(self, delta, time_period, notice_type):
sitelocation = models.SiteLocation.objects.get_current()
previous = datetime.datetime.now() - delta
queue_videos = models.Video.objects.filter(
site=sitelocation.site,
status=models.VIDEO_STATUS_UNAPPROVED,
feed=None, search=None)
new_videos = queue_videos.filter(when_submitted__gte=previous)
if new_videos.count():
subject = 'Video Submissions for %s' % sitelocation.site.name
t = loader.get_template(
'localtv/submit_video/review_status_email.txt')
c = Context({'new_videos': new_videos,
'queue_videos': queue_videos,
'time_period': time_period,
'site': sitelocation.site})
message = t.render(c)
util.send_notice(notice_type,
subject, message,
sitelocation=sitelocation)
| natea/Miro-Community | localtv/submit_video/management/commands/review_status_email.py | review_status_email.py | py | 1,589 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "django.core.management.base.NoArgsCommand",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "datetime.timedelta",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "datetime.date.today",
"line_number": 15,
"usage_type": "call"
},
{
"a... |
24862884689 | import datetime
class Usuario:
def __init__(self, id, nombre, apellido, telefono, username, email, contrasena, avatar):
self.id = id
self.nombre = nombre
self.apellido = apellido
self.telefono = telefono
self.username = username
self.email = email
self.contrasena = contrasena
self.fecha_registro = None
self.avatar = avatar
self.estado = None
self.online = None
def __str__(self):
return f'id: {self.id}, Nombre: {self.nombre}, Apellido: {self.apellido}, Telefono: {self.telefono}, ' \
f'Username: {self.username}, Email: {self.email}, Contraseña: {self.contrasena}, ' \
f'Fecha de registro: {self.fecha_registro}, Avatar: {self.avatar}, Estado: {self.estado}, ' \
f'Online: {self.online}'
def set_login(self):
if self.online == False:
self.online = True
print('Inicio de sesión exitoso!!!')
else:
self.online= False
print('Sesión Finalizada. Adios')
def set_registrar(self):
self.fecha_registro = datetime.date.today()
self.estado = True
self.online = False
class Publico(Usuario):
def __init__(self, id, nombre, apellido, telefono, username, email, contrasena,avatar):
super().__init__(id, nombre, apellido, telefono, username, email, contrasena,avatar)
self.es_publico = None
def __str__(self):
return super().__str__() + f' Es_publico: {self.es_publico}'
def set_registrar(self):
super().set_registrar()
self.es_publico= True
def set_login(self):
return super().set_login()
class Colaborador(Usuario):
def __init__(self, id, nombre, apellido, telefono, username, email, contrasena,avatar):
super().__init__(id, nombre, apellido, telefono, username, email, contrasena,avatar)
self.es_colaborador = None
def __str__(self):
return super().__str__() + f' Es_colaborador: {self.es_colaborador}'
def set_registrar(self):
super().set_registrar()
self.es_colaborador = True
def set_login(self):
return super().set_login()
class Articulo:
def __init__(self, id, id_usuario, titulo, resumen, contenido, imagen):
self.id = id
self.id_usuario = id_usuario
self.titulo = titulo
self.resumen = resumen
self.contenido = contenido
self.fecha_publicacion = None
self.imagen = imagen
self.estado = None
def __str__(self):
return f' {self.id} - Título: {self.titulo},\n Resumen: {self.resumen},\n ' \
f'Contenido: {self.contenido},\n Fecha Publicación: {self.fecha_publicacion},\n Imagen: {self.imagen}'
def set_publicar_articulo(self):
self.fecha_publicacion = datetime.date.today().strftime('%d-%m-%y')
self.estado = True
class Comentario:
def __init__(self, id, id_articulo, id_usuario, contenido):
self.id = id
self.id_articulo = id_articulo
self.id_usuario = id_usuario
self.contenido = contenido
self.fecha_hora = None
self.estado = None
def __str__(self):
return f'Comentario: {self.contenido}, Fecha/hora: {self.fecha_hora}'
def set_comentario(self):
self.fecha_hora = datetime.datetime.today().strftime("%d-%m-%Y %H:%M:%S")
self.estado = True
def set_id(self,lista):
if len(lista)==0:
self.id = 1
else:
self.id = lista[-1].id + 1
return id
def mostrar_articulos():
if len(lista_articulos) ==0:
print('------------------')
print('No existe ningún artículo para mostrar.')
print('------------------')
else:
print('------ARTICULOS------')
for articulo in lista_articulos:
print(articulo)
print('------------------')
def mostrar_articulo_comentarios(id_articulo):
print('------------------')
for articulo in lista_articulos:
if articulo.id == id_articulo:
print('------------------')
print('________ARTICULO________')
print(articulo)
print('------------------')
print('COMENTARIOS:')
for comentario in lista_comentarios:
if comentario.id_articulo == id_articulo:
for usuario in lista_usuarios:
if usuario.id == comentario.id_usuario:
print(f' Usuario: {usuario.username} {comentario}.')
def mostrar_todos_articulos_comentarios():
for articulo in lista_articulos:
print('------------------')
print('________ARTICULO________')
print(articulo)
print('------------------')
print('COMENTARIOS:')
for comentario in lista_comentarios:
if comentario.id_articulo == articulo.id:
for usuario in lista_usuarios:
if usuario.id == comentario.id_usuario:
print(f' Usuario: {usuario.username} {comentario}.')
def existe_usuario(username):
for usuario in lista_usuarios:
if usuario.username == username:
return True
return False
def ingresar_validar(texto):
while True:
dato = input(texto)
if texto=='Teléfono: 'and dato.isdigit() == False:
print('En teléfono solo puede ingresar numeros.-')
elif texto=='Contraseña: ' and len(dato)<6:
print('La contraseña debe contener mínimo 6 digitos.-')
elif dato.upper() == 'EXIT' or dato != '':
return dato
def buscar_articulo(id_articulo_elegido):
for articulo in lista_articulos:
if articulo.id == id_articulo_elegido:
return articulo
def crear_id(texto):
if texto=='usuario':
if len(lista_usuarios)==0:
id=1
else:
id = lista_usuarios[-1].id+1
elif texto=='articulo':
if len(lista_articulos)==0:
id=1
else:
id = lista_articulos[-1].id+1
else:
if len(lista_comentarios)==0:
id=1
else:
id = lista_comentarios[-1].id+1
return id
def usuario_logueado():
for usuario in lista_usuarios:
if usuario.online == True:
return usuario
def menu_usuario_publico():
while True:
try:
op = int(input("Elige una opción: \n1. Comentar un articulo. \n2. Listar articulos y comentarios. \n3. Cerrar sesión. \nIngrese ópcion: "))
if op==1:
print('------------------')
mostrar_articulos()
print('------------------')
if len(lista_articulos)!=0:
while True:
id_articulo_elegido = int(input('Ingrese el nro del artículo que quiere comentar: '))
contenido = ingresar_validar('Ingrese comentario: ')
if buscar_articulo(id_articulo_elegido):
nuevo_comentario= Comentario(crear_id('comentario'),id_articulo_elegido,usuario_logueado.id,contenido)
nuevo_comentario.set_comentario()
lista_comentarios.append(nuevo_comentario)
print('Comentario agregado con éxito.-')
print('------------------')
mostrar_articulo_comentarios(id_articulo_elegido)
break
else:
print('Opción inválida. Inténtalo nuevamente.')
print('------------------')
elif op == 2:
print('------------------')
mostrar_todos_articulos_comentarios()
print('------------------')
elif op == 3:
print('------------------')
usuario_logueado().set_login()
break
except ValueError:
print('------------------')
print("Opción inválida. Inténtalo nuevamente.")
print('------------------')
def menu_usuario_colaborador():
while True:
try:
op = int(input("Elige una opción: \n1. Comentar un artículo. \n2. Publicar Artículo. \n3. Listar Articulos y Comentarios. \n4. Cerrar sesión. \nIngrese ópcion: "))
if op==1:
mostrar_articulos()
if len(lista_articulos) !=0:
while True:
id_articulo_elegido = int(input('Ingrese el nro del articulo que quiere comentar: '))
if buscar_articulo(id_articulo_elegido):
contenido = ingresar_validar('Ingrese comentario: ')
nuevo_comentario= Comentario(crear_id('comentario'),id_articulo_elegido,usuario_logueado().id,contenido)
nuevo_comentario.set_comentario()
lista_comentarios.append(nuevo_comentario)
print('------------------')
print('Comentario agregado con éxito.-')
print('------------------')
mostrar_articulo_comentarios(id_articulo_elegido)
break
else:
print('------------------')
print('Opción inválida. Inténtalo nuevamente.')
print('------------------')
elif op ==2:
print('------------------')
titulo = ingresar_validar('Título: ')
resumen = ingresar_validar('Resumen: ')
contenido = ingresar_validar('Contenido: ')
imagen = 'Imagen'
nuevo_articulo= Articulo(crear_id('articulo'),usuario_logueado().id,titulo,resumen,contenido,imagen)
nuevo_articulo.set_publicar_articulo()
lista_articulos.append(nuevo_articulo)
print('------------------')
print('Articulo agregado con éxito.-')
print('------------------')
elif op ==3:
mostrar_todos_articulos_comentarios()
elif op == 4:
print('------------------')
usuario_logueado().set_login()
print('------------------')
break
except ValueError:
print('------------------')
print("Opción inválida. Inténtalo nuevamente.")
print('------------------')
def menu_principal():
while True:
try:
op = int(input("Elige una opción: \n1. Registrarse \n2. Loguearse \n3. Salir \nIngrese opción: "))
if op == 1:
print('')
while True:
try:
print('------------------')
op = int(input("Elige el tipo de usuario: \n1. Usuario Público \n2. Colaborador \n3. Volver al Menu Anterior \nIngrese opción: "))
if op == 1:
while True:
print('')
print('------ [ Escriba EXIT para cancelar y salir ] ------')
print('---------- INGRESE LOS DATOS DEL USUARIO --------')
nombre = ingresar_validar('Nombre: ')
if nombre.upper() == 'EXIT':
respuesta='3'
break
apellido = ingresar_validar('Apellido: ')
if apellido.upper() == 'EXIT':
respuesta='3'
break
telefono = ingresar_validar('Teléfono: ')
if telefono.upper() == 'EXIT':
respuesta='3'
break
username = ingresar_validar('Username: ')
if username.upper() == 'EXIT':
respuesta='3'
break
email = ingresar_validar('Email: ')
if email.upper() == 'EXIT':
respuesta='3'
break
contrasena = ingresar_validar('Contraseña: ')
if contrasena.upper() == 'EXIT':
respuesta='3'
break
print('')
print('------ DATOS INGRESADOS ------')
print(f'Nombre: {nombre} \nApellido: {apellido} \nTeléfono: {telefono} \nUsername: {username} \nEmail: {email} \nContraseña: {contrasena}')
print('------------------')
print('Confirma = 1 Volver a ingresar datos = 2 Volver menú anterior = 3')
respuesta = input('Ingrese opción: ')
try:
if respuesta == '1' and respuesta == '3':
break
except ValueError:
print("Opción inválida. Inténtalo nuevamente.")
if respuesta=='1':
avatar = 'imagen'
if existe_usuario(username)== True:
print('El username ingresado ya está en uso.')
print('------------------')
break
print('------------------')
usuario_nuevo= Publico(crear_id('usuario'),nombre,apellido,telefono,username,email,contrasena,avatar)
usuario_nuevo.set_registrar()
lista_usuarios.append(usuario_nuevo)
print ('Usuario Público registrado con éxito')
print('')
break
if respuesta=='3':
break
if respuesta=='1':
break
elif op == 2:
while True:
print('')
print('------ [ Escriba EXIT para cancelar y salir ] ------')
print('------ INGRESE LOS DATOS DEL USUARIO ------')
nombre = ingresar_validar('Nombre: ')
if nombre.upper() == 'EXIT':
respuesta='3'
break
apellido = ingresar_validar('Apellido: ')
if apellido.upper() == 'EXIT':
respuesta='3'
break
telefono = ingresar_validar('Teléfono: ')
if telefono.upper() == 'EXIT':
respuesta='3'
break
username = ingresar_validar('Username: ')
if username.upper() == 'EXIT':
respuesta='3'
break
email = ingresar_validar('Email: ')
if email.upper() == 'EXIT':
respuesta='3'
break
contrasena = ingresar_validar('Contraseña: ')
if contrasena.upper() == 'EXIT':
respuesta='3'
break
print('')
print('------ DATOS INGRESADOS ------')
print(f'Nombre: {nombre} \nApellido: {apellido} \nTeléfono: {telefono} \nUsername: {username} \nEmail: {email} \nContraseña: {contrasena}')
print('------------------')
print('Confirma = 1 Volver a ingresar datos = 2 Volver menú anterior = 3')
respuesta = input('Ingrese opción: ')
try:
if respuesta == '1' and respuesta == '3':
break
except ValueError:
print("Opción inválida. Inténtalo nuevamente.")
if respuesta=='1':
avatar = 'imagen'
if existe_usuario(username)== True:
print('El username ingresado ya está en uso.')
print('------------------')
break
print('------------------')
usuario_nuevo= Colaborador(crear_id('usuario'),nombre,apellido,telefono,username,email,contrasena,avatar)
usuario_nuevo.set_registrar()
lista_usuarios.append(usuario_nuevo)
print ('Usuario Colaborador registrado con éxito')
print('')
break
if respuesta=='3':
break
if respuesta=='1':
break
elif op == 3:
print('------------------')
break
except ValueError:
print("Opción inválida. Inténtalo nuevamente.")
elif op == 2:
if len(lista_usuarios)!=0:
print('------LOGIN------')
username = ingresar_validar("Username: ")
if existe_usuario(username):
contrasena = ingresar_validar("Contraseña: ")
for usuario in lista_usuarios:
if usuario.username == username and usuario.contrasena == contrasena:
usuario.set_login()
print('------------------')
print(f'Usuario: {usuario.username}, Apellido: {usuario.apellido}, Nombre: {usuario.nombre}, Tipo: {usuario.__class__.__name__}')
print('------------------')
if isinstance(usuario,Publico):
menu_usuario_publico()
else:
menu_usuario_colaborador()
break
else:
print("Inicio de sesión fallido. Verifica tus credenciales.")
print('------------------')
else:
print('------------------')
print('Usuario incorrecto, vuelva a intentarlo.-')
print('------------------')
else:
print('------------------')
print('No existe ningun usuario, debe registrarse!!!')
print('------------------')
print('')
elif op == 3:
print('')
print('--------- FIN ---------')
break
except ValueError:
print('------------------')
print("Opción inválida. Inténtalo nuevamente.")
print('------------------')
# PROGRAMA
lista_usuarios = []
lista_articulos = []
lista_comentarios = []
print('------BIENVENIDO------')
menu_principal() | robertojulian/comision-6 | desafio8.py | desafio8.py | py | 20,842 | python | es | code | 1 | github-code | 1 | [
{
"api_name": "datetime.date.today",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "datetime.date.today",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "datetime.d... |
26998236363 | import os
import sys
import json
import shutil
import tempfile
import re
import glob
import traceback
import pyodbc
import requests
import datetime
import calendar
import sqlalchemy
from dateutil import relativedelta
import pandas as pd
pd.set_option('display.max_columns', None)
BC_PERMIT_DB_NORTH_PATH = r"\\inpdenafiles02\parkwide\Backcountry\Backcountry Permit Database\BC Permits Data {year}.mdb"
BC_PERMIT_DB_SOUTH_PATH = r"\\INPDENAFILES11\talk\ClimbersDatabase\Backcountry Permit Database\Backcountry Database\{year} BC Program\BC Permits Data {year}.mdb"
MSLC_VISITOR_COUNT_PATH = r"\\inpdenafiles02\teams\Interp\Ops All, Statistics\MSLC Winter VC, Education\* Winter VC Stats.xlsx"
INTERP_FACILITIES_PATH = r"\\inpdenafiles02\teams\Interp\Ops All, Statistics\FY{yy}\FY{yy} Stats.xlsx"
LOG_DIR = r"\\inpdenaterm01\vistats\retrieve_data_logs"
VEA_LOCATION_NAMES = {
'Winter Visitor Center': 'mslc_visitors',
'Summer Visitor Center': 'dvc_visitors'
}
BUS_FIELDS = {
'CDN': 'camp_denali_bus_passengers',
'DBL': 'denali_backcountry_lodge_bus_passengers',
'KRH': 'kantishna_roadhouse_bus_passengers',
'TWT': 'twt_bus_passengers',
'DNH': 'dnht_bus_passengers',
'KXP': 'ke_bus_passengers'
}
# Define the label IDs that should be automatable so that if the associated query returns an empty result, it can be
# filled with a 0 to distinguish them from values that have yet to be filled in. I can't just query the value_labels
# table for all winter or summmer fields because fields that aren't automatably queryable shouldn't be filled with a 0
VALUE_LABEL_IDS = {'winter':
[
1,
12,
13,
29,
31,
32,
33,
34,
35,
36,
37,
38
], 'summer':
[
12,
13,
14,
15,
16,
18,
19,
20,
21,
22,
23,
24,
27,
29,
31,
32,
33,
34,
35,
36,
37,
38,
40,
50,
52,
53
]
}
def read_json_params(params_json):
'''
Read and validate a parameters JSON file
:param params_json: path to JSON file
:return: dictionary of params
'''
required = pd.Series(['ssl_cert',
'vea_client_id',
'vea_client_secret',
'vistats_db_credentials',
'savage_db_credentials'
])
with open(params_json) as j:
params = json.load(j)
missing = required.loc[~required.isin(params.keys())]
if len(missing):
if 'LOG_DIR' in params.keys():
msg = 'Invalid config JSON: {file}. It must contain all of "{required}" but "{missing}" are missing'\
.format(file=params_json, required='", "'.join(required), missing='", "'.join(missing))
raise ValueError(msg)
return params
def write_log(log, LOG_DIR, timestamp):
log_file_path = os.path.join(LOG_DIR, '{0}_log_{1}.json'.format(os.path.basename(__file__).replace('.py', ''),
re.sub('\D', '', timestamp)))
with open(log_file_path, 'w') as f:
json.dump(log, f, indent=4)
def query_access_db(db_path, sql):
'''
Make a temporary copy of the access DB to prevent establishing an exclusive lock on a file that other people
might be using
:param db_path: str path to the original DB
:param sql: SQL statement
:return: pandas DataFrame of the SQL result
'''
# Copy to temp dir
temp_dir = tempfile.gettempdir()
temp_db_path = os.path.join(temp_dir, os.path.basename(db_path))
shutil.copy(db_path, temp_dir)
# Connect and run query
conn = pyodbc.connect(r'DRIVER={Microsoft Access Driver (*.mdb, *.accdb)};DBQ=%s' % (temp_db_path))
bc_stats = pd.read_sql(sql, conn)
conn.close()
# Try to delete the temp file
try:
os.remove(temp_db_path)
except:
pass
return bc_stats
def run_queries(params, log, query_date, current_date=None):
query_year = query_date.year
query_month = query_date.month
start_date = '{year}-{month}-1'.format(year=query_year, month=query_month)
end_date = '{year}-{month}-1'.format(year=current_date.year, month=current_date.month)
season = 'summer' if query_month in range(5, 10) else 'winter'
data = []
##############################################################################################################
######################### BC Permit DBs ######################################################################
##############################################################################################################
users_sql = '''
SELECT
sum(users) AS bc_users,
sum(user_nights) AS bc_user_nights
FROM (
SELECT
1 AS constant,
MAX(Itinerary.[Number of People]) AS users,
SUM(Itinerary.[Number of People]) as user_nights
FROM Itinerary
WHERE
MONTH(Itinerary.[Camp Date])={month} AND
YEAR(Itinerary.[Camp Date])={year}
GROUP BY MONTH(Itinerary.[Camp Date]), [permit number]
)
GROUP BY constant;
'''.format(month=query_month, year=query_year)
for side, path in {'north': BC_PERMIT_DB_NORTH_PATH, 'south': BC_PERMIT_DB_SOUTH_PATH}.items():
bc_permit_db_path = path.format(year=query_year)
if not os.path.isfile(bc_permit_db_path):
log['errors'].append({'action': 'reading %s BC permit DB' % side,
'error': 'BC Permit DB for {side} side does not exist: {path}'
.format(side=side, path=bc_permit_db_path)
})
else:
bc_stats = pd.DataFrame()
try:
bc_stats = query_access_db(bc_permit_db_path, users_sql)
except:
log['errors'].append({'action': 'reading %s BC permit DB' % side,
'error': traceback.format_exc()
})
if len(bc_stats):
data.append(bc_stats\
.rename(columns={c: f'{c}_{side}' for c in bc_stats.columns})\
.T\
.reset_index()\
.rename(columns={'index': 'value_label_id', 0: 'value'})
)
##############################################################################################################
################################### climbing permits #########################################################
##############################################################################################################
sql = f'''
SELECT
lower(mountain_name) AS mountain_name,
count(*) AS climbers,
sum(days) AS climber_user_nights
FROM
(
SELECT DISTINCT
expedition_member_id,
mountain_name,
least(coalesce(actual_return_date, now())::date, '{end_date}'::date) - greatest(actual_departure_date, '{start_date}'::date) AS days
FROM registered_climbs_view
WHERE
actual_departure_date IS NOT NULL AND
coalesce(special_group_type_code, -1) <> 3 AND
actual_departure_date BETWEEN '{start_date}' AND '{end_date}'::date - 1
) _
GROUP BY mountain_name;
'''
engine_uri = sqlalchemy.engine.URL.create('postgresql', **params['climberdb_credentials'])
engine = sqlalchemy.create_engine(engine_uri)
# if not os.path.exists(CLIMBING_PERMIT_DB_PATH):
# log['errors'].append({'action': 'querying climbing permit DB',
# 'error': 'File does not exist: %s' % CLIMBING_PERMIT_DB_PATH})
# else:
user_nights = pd.DataFrame()
try:
user_nights = pd.read_sql(sql, engine)
except:
log['errors'].append({'action': 'querying climbing permit DB',
'error': traceback.format_exc()
})
if len(user_nights):
# transform query results by
# setting the index
# making sure both denali and foraker are in the data
# filling nulls
# resetting the index to get mountain name as a column
# the unpivoting to make it flat again
climbing_stats = user_nights\
.set_index('mountain_name')\
.reindex(['denali', 'foraker'])\
.fillna(0)\
.reset_index()\
.melt(id_vars='mountain_name', var_name='value_label_id')
climbing_stats.value_label_id = climbing_stats.mountain_name + '_' + climbing_stats.value_label_id
climbing_stats = climbing_stats.reindex(columns=['value_label_id', 'value'])
else:
climbing_stats = pd.DataFrame([
{'value_label_id': 'denali_climber_user_nights', 'value': 0},
{'value_label_id': 'foraker_climber_user_nights', 'value': 0},
{'value_label_id': 'denali_climbers', 'value': 0},
{'value_label_id': 'foraker_climbers', 'value': 0}
])
data.append(climbing_stats)
###########################################################################################################
################################## visitor center counts ##################################################
###########################################################################################################
# Get token
try:
token_response = requests.post('https://auth.sensourceinc.com/oauth/token',
headers={"Content-type": "application/json"},
data='{' + '''
"grant_type": "client_credentials",
"client_id": "{vea_client_id}",
"client_secret": "{vea_client_secret}"
'''.format(**params) + '}',
verify=params['ssl_cert'])
token_response.raise_for_status()
token = token_response.json()['access_token']
except:
log['errors'].append({'action': 'querying Vea REST API token',
'error': traceback.format_exc()
})
# Get ID for location
if 'token' in locals():
try:
response = requests.get('https://vea.sensourceinc.com/api/location',
headers={"Content-type": "application/json",
'Authorization': 'Bearer %s' % token},
verify=params['ssl_cert'])
response.raise_for_status()
locations = pd.DataFrame(response.json())
except:
log['errors'].append({'action': 'querying Vea REST API location IDs',
'error': traceback.format_exc()
})
_, last_day_of_month = calendar.monthrange(query_year, query_month)
if 'locations' in locals():
try:
response = requests.get('https://vea.sensourceinc.com/api/data/traffic',
headers={"Content-type": "application/json",
'Authorization': 'Bearer %s' % token},
params={
'relativeDate': 'custom',
'startDate': start_date,
'endDate': end_date,
'dateGroupings': 'month',
'entityType': 'location',
'entityIds': locations.locationId.tolist(),
'metrics': 'ins'
},
verify=params['ssl_cert'])
response.raise_for_status()
response_json = response.json()
if len(response_json['messages']):
log['messages'].append({'context': 'querying Vea REST API data',
'message': response_json['messages']
})
# Make a data frame from the result
# replace names in the Vea system with names of fields in DB
# pivot the data so each location (now fields in the DB) is a column and the data only have one row
facility_counts = pd.DataFrame(response_json['results'])
# Even though the endDate parameter is supposed to create a non-
data.append(
facility_counts.loc[pd.to_datetime(facility_counts.recordDate_month_1).dt.month == (query_month)] \
.replace({'name': {k: '%s_%s' % (v, season) for k, v in VEA_LOCATION_NAMES.items()}}) \
.reindex(columns=['name', 'sumins']) \
.rename(columns={'name': 'value_label_id',
'sumins': 'value'})
)
except:
log['errors'].append({'action': 'querying Vea REST API data',
'error': traceback.format_exc()
})
# For now mslc counts should be by hand
if season == 'winter':
excel_doc = None
try:
mslc_counts_path = glob.glob(MSLC_VISITOR_COUNT_PATH)[0]
excel_doc = pd.ExcelFile(mslc_counts_path)
except:
log['errors'].append({'action': 'reading MSLC hand counts',
'error': traceback.format_exc()
})
if excel_doc:
month_names = pd.Series(pd.date_range('2020-1-1', '2021-1-1', freq='M').strftime('%B').str.lower(), index=range(1, 13))
sheets = pd.Series({sn: sn.lower() for sn in excel_doc.sheet_names if len(sn.split()) == 1})
this_month_name = month_names[query_month]
mslc_daily_counts = pd.DataFrame()
try:
this_sheet = sheets[sheets.apply(lambda x: this_month_name.startswith(x))].index[0]
mslc_daily_counts = excel_doc.parse(this_sheet)
mslc_count = mslc_daily_counts.dropna(axis=0, how='all').iloc[-1, 2]
data.append(pd.DataFrame([{'value_label_id': 'mslc_visitors_winter', 'value': mslc_count}]))
except:
log['errors'].append({'action': 'reading MSLC hand counts sheet for %s' % this_month_name,
'error': traceback.format_exc()
})
# Kennels are also recorded by hand for now
two_digit_fiscal_year = query_date\
.replace(year=query_year + 1 if query_month >= 10 else query_year)\
.strftime('%y')
all_kennels = pd.DataFrame()
try:
all_kennels = pd.read_excel(INTERP_FACILITIES_PATH.format(yy=two_digit_fiscal_year), sheet_name='Kennels')\
.set_index('Date')
except:
log['errors'].append({'action': 'reading Kennels spreadsheet',
'error': traceback.format_exc()
})
# Get just the fields and rows containing counts for this month and sum them
if len(all_kennels):
kennels_count = all_kennels.loc[
all_kennels.index.month == query_month,
all_kennels.columns.str.startswith('Kennels') | all_kennels.columns.str.startswith('Dog Demo')
].sum().sum() # No longer an axis=None option to sum all
data.append(pd.DataFrame([{'value_label_id': 'kennels_visitors', 'value': kennels_count}]))
###########################################################################################################
################################## savage db queries ######################################################
###########################################################################################################
sql_template = '''
SELECT '{label}' AS value_label_id, sum(n_passengers) AS value
FROM {table}
WHERE datetime BETWEEN '{start}' AND '{end}'
GROUP BY extract(month FROM datetime)
'''
bus_sql = '''
SELECT bus_type AS value_label_id, sum(n_passengers) AS value
FROM buses
WHERE
datetime BETWEEN '{start}' AND '{end}' AND
bus_type in ('{bus_codes}')
GROUP BY bus_type, extract(month FROM datetime)
'''.format(start=start_date, end=end_date, bus_codes="', '".join(BUS_FIELDS.keys()))
transit_sql = '''
SELECT 'transit_bus_passengers' AS value_label_id, sum(n_passengers) AS value
FROM buses
WHERE
datetime BETWEEN '{start}' AND '{end}' AND
bus_type in ('SHU', 'CMP')
GROUP BY bus_type, extract(month FROM datetime)
'''.format(start=start_date, end=end_date)
research_sql = '''
SELECT sum(n_passengers) AS value
FROM nps_approved
WHERE
datetime BETWEEN '{start}' AND '{end}' AND
approved_type = 'RSC'
GROUP BY extract(month FROM datetime)
'''.format(start=start_date, end=end_date)
lottery_sql = '''
SELECT 'road_lottery_permits' as value_label_id, sum(n_passengers) AS value
FROM road_lottery
WHERE datetime BETWEEN '{start}' AND '{end}'
GROUP BY extract(month FROM datetime)
'''
reserved_pov_sql = '''
SELECT 'reserved_pov_passengers' AS value_label_id, sum(n_passengers) AS value
FROM nps_approved
WHERE
datetime BETWEEN '{start}' AND '{end}' AND
approved_type = 'REC'
GROUP BY value_label_id, extract(month FROM datetime)
'''.format(start=start_date, end=end_date)
guided_cua_sql = '''
SELECT 'guided_cua_pov_passengers' AS value_label_id, sum(n_passengers) AS value
FROM nps_approved
WHERE
datetime BETWEEN '{start}' AND '{end}' AND
approved_type = 'GUI'
GROUP BY value_label_id, extract(month FROM datetime)
'''.format(start=start_date, end=end_date)
# Only run this query for summer months
if season == 'summer':
try:
engine_uri = sqlalchemy.engine.URL.create('postgresql', **params['savage_db_credentials'])
engine = sqlalchemy.create_engine(engine_uri)
with engine.connect() as conn:
bikes = pd.read_sql(sql_template.format(label='cyclists_past_savage', table='cyclists', start=start_date, end=end_date), conn)
road_lottery = pd.read_sql(lottery_sql.format(start=start_date, end=end_date), conn)
accessibility = pd.read_sql(sql_template.format(label='accessibility_permit_passengers', table='accessibility', start=start_date, end=end_date), conn)
photographers = pd.read_sql(sql_template.format(label='pro_photographers', table='photographers', start=start_date, end=end_date), conn)
reserved_povs = pd.read_sql(reserved_pov_sql, conn)
guided_cua_povs = pd.read_sql(guided_cua_sql, conn)
employees = pd.read_sql(sql_template.format(label='non_rec_users', table='employee_vehicles', start=start_date, end=end_date), conn)
researchers = pd.read_sql(research_sql, conn)
non_rec_users = pd.DataFrame({'value_label_id': ['non_rec_pov_passengers'],
'value': pd.concat([employees, researchers]).value.sum()
})
tours = pd.read_sql(bus_sql, conn)\
.replace({'value_label_id': BUS_FIELDS})
transit = pd.read_sql(transit_sql, conn)
data.extend([bikes, road_lottery, accessibility, photographers, non_rec_users, tours, transit, reserved_povs, guided_cua_povs])
except:
log['errors'].append({'action': 'querying Savage DB',
'error': traceback.format_exc()
})
###########################################################################################################
################################## glacier landings #######################################################
###########################################################################################################
landings_sql = '''
SELECT 'scenic_landings_south' AS value_label_id, sum(n_passengers) AS value
FROM flights INNER JOIN landings ON flights.id = landings.flight_id
WHERE
landings.landing_type = 'scenic' AND
flights.departure_datetime BETWEEN '{start}' AND '{end}' AND
flights.operator_code NOT IN ('TST', 'KAT')
GROUP BY value_label_id
'''.format(start=start_date, end=end_date)
north_side_sql = '''
SELECT 'aircraft_visitors_north_winter' AS value_label_id, sum(n_passengers) AS value
FROM flights INNER JOIN landings ON flights.id = landings.flight_id
WHERE
flights.departure_datetime BETWEEN '{start}' AND '{end}' AND
flights.operator_code='KAT'
GROUP BY value_label_id
'''.format(start=start_date, end=end_date)
try:
engine_uri = sqlalchemy.engine.URL.create('postgresql', **params['landings_db_credentials'])
engine = sqlalchemy.create_engine(engine_uri)
with engine.connect() as conn:
data.extend([
pd.read_sql(landings_sql, conn),
pd.read_sql(north_side_sql, conn)
])
except:
log['errors'].append({'action': 'querying landings',
'error': traceback.format_exc()
})
counts = pd.concat(data, sort=False).drop_duplicates(subset='value_label_id', keep='last').fillna(0)
return counts
def main(param_file, current_date=None):
now = datetime.datetime.now()
if current_date:
try:
current_date = datetime.datetime.strptime(current_date, '%Y-%m-%d')
except:
# Raise this error instead of logging because a call signature with current_date specified will only be run
# manually (not by an automated task)
raise ValueError('Current date "%s" not understood' % current_date)
else:
current_date = now
query_date = current_date - relativedelta.relativedelta(months=1)
query_year = query_date.year
query_month = query_date.month
start_date = '{year}-{month}-1'.format(year=query_year, month=query_month)
season = 'summer' if query_month in range(5, 10) else 'winter'
# Make the log dir in case it doesn't already exist and set up a log dictionary for storing errors/messages
if not os.path.isdir(LOG_DIR):
os.makedirs(LOG_DIR)
log = {
'run_time': now.strftime('%Y-%m-%d %H:%M'),
'errors': [],
'messages': []
}
if not os.path.isfile(param_file):
log['errors'] = 'param_file %s does not exist' % param_file
sys.exit()
try:
params = read_json_params(param_file)
except:
log['errors'] = traceback.format_exc()
sys.exit()
# Query data sources
counts = run_queries(params, log, query_date, current_date)
try:
engine_uri = sqlalchemy.engine.URL.create('postgresql', **params['vistats_db_credentials'])
engine = sqlalchemy.create_engine(engine_uri)
with engine.connect() as conn, conn.begin():
# replace labels with IDs
label_ids = pd.read_sql("SELECT id, retrieve_data_label FROM value_labels", conn) \
.set_index('retrieve_data_label')\
.id.to_dict()
counts.value_label_id = counts.value_label_id.replace(label_ids).astype(int)
# Make sure any queries that returned nothing are set to 0 (rather than just missing entirely)
counts = counts.append(
pd.DataFrame({'value_label_id': [i for i in VALUE_LABEL_IDS[season] if i not in counts.value_label_id.values]}))\
.fillna(0)
# Insert count_period record
recordset = conn.execute("INSERT INTO count_periods (count_date) VALUES ('%s') RETURNING id" % start_date)
result = recordset.fetchall()
recordset.close()
if len(result) == 1:
period_id = result[0][0]
else:
raise RuntimeError('Invalid result returned from count_period INSERT statement: %s' % result)
counts['period_id'] = period_id
counts['entered_by'] = os.path.basename(__file__)
counts['submission_time'] = now
# insert counts
counts.to_sql('counts', conn, index=False, if_exists='append')
except:
log['errors'].append({'action': 'importing data',
'error': traceback.format_exc()
})
write_log(log, LOG_DIR, now.strftime('%Y%m%d-%H%M'))
if __name__ == '__main__':
sys.exit(main(*sys.argv[1:]))
| smHooper/vistats | py/retrieve_data.py | retrieve_data.py | py | 26,376 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pandas.set_option",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pandas.Series",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_n... |
35655245303 | from win32com.client import Dispatch
import requests
import json
def speak(str):
talk = Dispatch("SAPI.SpVoice")
talk.Speak(str)
if __name__ == '__main__':
speak("Hello, welcome to newstoday.com. I am your news anchor")
speak(" top news for today from are ")
print("Hello, welcome to newstoday.com. I am your news anchor")
print(" top news for today from India and World are ")
# You can use any of the given api.
#google top ten headlines
#url = ('https://newsapi.org/v2/top-headlines?sources=google-news&apiKey=301dde8d4fd841e097ffeac8ed52d953') #fetch the url
#top news from USA
#url = ('https://newsapi.org/v2/top-headlines?country=us&apiKey=301dde8d4fd841e097ffeac8ed52d953')
#top news from India and world. I found this api better than other.
url = ('https://newsapi.org/v2/top-headlines?country=in&apiKey=301dde8d4fd841e097ffeac8ed52d953')
news = requests.get(url).text #get the url and store in 'news' as text
news_json = json.loads(news) #use json to load
#print(news_json["articles"]) #print all "articles" present in API
article = news_json['articles'] #store articles from API in article
#this is one approach
#for a in article: #for each a in article print 'title' of a
# print(a['title'])
# print("next news")
for i in range (1,16): #only limited no of news..generates from 1 to 15
item = f"news no {i}" # i have used the concept of f strings
print(item)
speak(item) # speak funcn takes only string so i converted changing val of 'i' to string and passed here.
print(article[i]['title']) #same as 'for i in articles: print i'
speak(article[i]['title'])
if i == 14:
print("our last news is")
speak("our last news is")
if i==15:
print("thank you for listening,have a great day, goodbye")
speak("thank you for listening, have a great day, goodbye")
break
| Kaushal-Dhungel/newsreader | newsreader.py | newsreader.py | py | 2,081 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "win32com.client.Dispatch",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 29,
"usage_type": "call"
}
] |
24893217036 | # -*- coding:utf-8 -*-
# @Author: james
# @Date: 2019/1/7
# @File: base.py
# @Software: PyCharm
import json
import scrapy
from scrapy import Request, FormRequest
from lxml import etree
from WaiBaoSpider.utils.csvWriter import CSVDumper
from WaiBaoSpider.utils.base import unicode_body, deal_ntr
import os
class BeiJingSpider(scrapy.Spider):
name = "beijing"
base_url = "http://rexian.beijing.gov.cn/default/com.web.complain.complain.moreNewComplain.biz.ext"
data_path = os.getcwd() + "/WaiBaoSpider/data/"
if os.path.exists(data_path):
pass
else:
os.mkdir(data_path)
dump_list = CSVDumper(data_path + "%s_list.csv" % name)
dump_detail = CSVDumper(data_path + "%s_detail.csv" % name)
custom_settings = {
'DOWNLOAD_DELAY': 0.1,
}
headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36",
}
data_form = {
"PageCond/begin": "",
"PageCond/isCount": "true",
"PageCond/length": "6",
}
type_data = {
"1": u"咨询",
"2": u"建议",
"3": u"投诉",
}
def start_requests(self):
# for i in range(1, 267):
i = 0
while i < 10283:
# while i < 10:
self.data_form["PageCond/begin"] = str(i)
print(i)
yield FormRequest(self.base_url, formdata=self.data_form, callback=self.parse_list, headers=self.headers)
i += 10
def parse_list(self, response):
body = unicode_body(response)
res = json.loads(body)
lines = res["newComplainnList"]
print(len(lines))
for info in lines:
item = {}
item[u"类型"] = self.type_data.get(info["letterType"], u"")
item[u"标题"] = info.get("letterTitle", "")
item[u"评价人数"] = info.get("reCode", "")
item[u"发起时间"] = info.get("fomateWriteDate", "")
id = info["originalId"]
item[
u"链接"] = "http://rexian.beijing.gov.cn/default/com.web.complain.complainDetail.flow?originalId={}".format(
id)
text = info.get("letterContent", "")
author = info.get("writeUser", "")
self.dump_list.process_item(item)
yield Request(item[u"链接"], callback=self.parse_detail, headers=self.headers,
meta={"url": item[u"链接"], "text": text, "author": author, "title": item[u"标题"],
"pingnum": item[u"评价人数"]})
def parse_detail(self, response):
body = unicode_body(response)
data = response.meta
html = etree.HTML(body)
item = {}
item[u"标题"] = data["title"]
item[u"来信人"] = data["author"]
item[u"来信时间"] = html.xpath("//p[@class='font12 gray time_mail']/span[2]/text()")[0].strip() if html.xpath(
"//p[@class='font12 gray time_mail']/span[2]/text()") else ""
item[u"网友评价"] = data["pingnum"]
item[u"处理部门"] = html.xpath("(//div[@class='mail_track'])[2]/span[1]/text()")[0].strip() if html.xpath(
"(//div[@class='mail_track'])[2]/span[1]/text()") else ""
item[u"回复时间"] = html.xpath("(//div[@class='mail_track'])[2]/span[2]/text()")[0].strip() if html.xpath(
"(//div[@class='mail_track'])[2]/span[2]/text()") else ""
item[u"回复内容"] = html.xpath("(//div[@class='mail_track'])[2]/p//text()") if html.xpath(
"(//div[@class='mail_track'])[2]/p//text()") else []
item[u"回复内容"] = deal_ntr("".join(item[u"回复内容"]))
item[u"赞"] = html.xpath("(//a[@id]/span[@id])[1]/text()")[0].strip() if html.xpath(
"(//a[@id]/span[@id])[1]/text()") else ""
item[u"踩"] = html.xpath("(//a[@id]/span[@id])[2]/text()")[0].strip() if html.xpath(
"(//a[@id]/span[@id])[2]/text()") else ""
item[u"链接"] = data["url"]
self.dump_detail.process_item(item)
| jamesfyp/WaiBaoSpider | WaiBaoSpider/spiders/beijing.py | beijing.py | py | 4,102 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "scrapy.Spider",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_numbe... |
13043081088 | import pytest
from iotile.core.hw.debug import SparseMemory
from iotile.core.exceptions import ArgumentError
@pytest.fixture(scope='function')
def single_segment():
mem = SparseMemory()
mem.add_segment(0, bytearray(range(0, 256)))
return mem
@pytest.fixture
def multi_segment(scope='function'):
mem = SparseMemory()
mem.add_segment(0, bytearray(range(0, 256)))
mem.add_segment(8192, bytearray(range(0, 256)))
return mem
def test_sparsememory_basicusage():
"""Make sure we can create a SparseMemory and use it
"""
mem = SparseMemory()
mem.add_segment(0x1000, bytearray(4096))
# Make sure slice and basic access work
assert mem[0x1000] == 0
dataslice = mem[0x1000:0x1400]
assert len(dataslice) == 0x400
# Make sure we can't access data we don't have
with pytest.raises(ArgumentError):
mem[0x900]
with pytest.raises(ArgumentError):
mem[0x2000]
with pytest.raises(ArgumentError):
mem[0x800:0x1200]
with pytest.raises(ArgumentError):
mem[0x1000:0x1200:2]
def test_getitem_multisegment(multi_segment):
mem = multi_segment
assert mem[255] == 255
assert mem[8192] == 0
assert mem[8193] == 1
def test_setitem_multisegment(multi_segment):
mem = multi_segment
mem[255] = 5
assert mem[255] == 5
mem[8192:8194] = (5, 10)
assert mem[8192] == 5
assert mem[8193] == 10
def test_stringify(single_segment):
mem = single_segment
lines = str(mem).rstrip().split('\n')
assert len(lines) == 16
assert len(lines[0]) == 78
def test_multistringify(multi_segment):
mem = multi_segment
print(str(mem))
| iotile/coretools | iotilecore/test/test_debug/test_sparsememory.py | test_sparsememory.py | py | 1,673 | python | en | code | 14 | github-code | 1 | [
{
"api_name": "iotile.core.hw.debug.SparseMemory",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "iotile.core.hw.debug.SparseMemory",
"line_number": 15,
"usage_type": "call"
},
{
"... |
38463610858 | import base64
import cv2
import numpy as np
input_name = 'temp.bin'
output_name = 'temp.jpg'
with open(input_name, 'rb') as f:
f = f.read()
img = base64.standard_b64decode(f)
img = cv2.imdecode(np.frombuffer(img, dtype=np.uint8), -1)
cv2.imwrite(output_name, img)
| ZombaSY/util-collection | file converter/blob_to_img_writer.py | blob_to_img_writer.py | py | 282 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "base64.standard_b64decode",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "cv2.imdecode",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.frombuffer",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",... |
14821631079 | import pygame
import sys
pygame.init()
pygame.mixer.init()
pygame.mixer.music.load("music.mp3")
pygame.mixer.music.play(-1)
Width = 1280
Height = 720
screen = pygame.display.set_mode((Width, Height))
pygame.display.set_caption("Pong V2")
white = (255, 255, 255)
black = (0, 0, 0)
clock = pygame.time.Clock()
paddle_speed = 2.5
paddle_y = Height / 2 - 50
ai_paddle_y = Height / 2 - 50
ball_speed = [3, 3]
ball_rect = pygame.Rect(Width / 2 - 5, Height / 2 - 5, 10, 10)
score = [0, 0]
def ai_update():
global ai_paddle_y
target_y = ball_rect.centery - 50
if abs(target_y - ai_paddle_y) > paddle_speed:
if target_y > ai_paddle_y:
ai_paddle_y += paddle_speed
else:
ai_paddle_y -= paddle_speed
if ai_paddle_y < 0:
ai_paddle_y = 0
if ai_paddle_y > Height - 100:
ai_paddle_y = Height - 100
def draw():
global paddle_y, ball_rect, ai_paddle_y
screen.fill(black)
#Net
for i in range(0, Height, 25):
pygame.draw.rect(screen, white, [Width / 2 - 2.5, i, 5, 10])
#Paddles
paddle1 = pygame.draw.rect(screen, white, [10, paddle_y, 10, 100])
paddle2 = pygame.draw.rect(screen, white, [Width - 20, ai_paddle_y, 10, 100])
#Ball
ball_rect.move_ip(ball_speed)
if ball_rect.colliderect(paddle1) or ball_rect.colliderect(paddle2):
ball_speed[0] = -ball_speed[0]
if ball_rect.colliderect(paddle1):
pygame.mixer.Sound("hit.wav").play().set_volume(0.5)
elif ball_rect.left < 0:
ball_speed[0] = -ball_speed[0]
score[1] += 1
ball_rect.center = (Width / 2, Height / 2)
elif ball_rect.right > Width:
ball_speed[0] = -ball_speed[0]
score[0] += 1
pygame.mixer.Sound("point.wav").play()
ball_rect.center = (Width / 2, Height / 2)
if ball_rect.top < 0 or ball_rect.bottom > Height:
ball_speed[1] = -ball_speed[1]
pygame.draw.rect(screen, white, ball_rect)
#Score
font = pygame.font.Font("font.ttf", 50)
text = font.render(str(score[0]), True, white)
screen.blit(text, (Width / 2 - 50, 10))
text = font.render(str(score[1]), True, white)
screen.blit(text, (Width / 2 + 25, 10))
#FPS
font = pygame.font.Font("font.ttf", 20)
text = font.render(str(int(clock.get_fps())), True, white)
screen.blit(text, (10, Height - 30))
pygame.display.flip()
while True:
keys = pygame.key.get_pressed()
if keys[pygame.K_w]:
paddle_y -= paddle_speed
if keys[pygame.K_s]:
paddle_y += paddle_speed
if keys[pygame.K_ESCAPE]:
pygame.mixer.music.stop()
pygame.quit()
sys.exit()
if paddle_y < 0:
paddle_y = 0
if paddle_y > Height - 100:
paddle_y = Height - 100
ai_update()
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.mixer.music.stop()
pygame.quit()
sys.exit()
draw()
clock.tick(60) | Pigiotyreal/Pong-V2 | src/main.py | main.py | py | 3,014 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pygame.init",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "pygame.mixer.init",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "pygame.mixer.music.load"... |
23930974543 | import os
import streamlit as st
import requests
from dotenv import load_dotenv
st.set_page_config(
page_title="advotis – Strafbarkeit prüfen",
page_icon="assets/advotis_icon.png",
layout="centered",
)
st.sidebar.image("assets/advotis_logo.png")
def category_to_result_text(category: str) -> str | None:
if category == "Beleidigung":
return "eine **Beleidigung** nach § 185 StGB"
elif category == "Formalbeleidigung":
return "eine **Formalbeleidigung** nach §§ 185, 192 StGB"
elif category == "Verleumdung":
return "**Verleumdung** nach § 187 StGB"
elif category == "Üble Nachrede":
return "**Üble Nachrede** nach § 186 StGB"
elif category == "Sonstiges":
return "**keinen Straftatsbestand**"
return None
st.markdown("""
# ⚖️ Strafbarkeit prüfen
Hier kannst du eine Aussage, der an dich gerichtet war, melden. Das Tool findet für dich heraus,
ob es sich um einen potenziellen Straftatbestand handelt und wenn ja, um welchen.
Zuerst müssen wir feststellen, ob die Anwendung deutschen Strafrechts überhaupt in Frage kommt,
da es nur unter bestimmten Bedingungen gilt.
""")
valid = False
germany = st.selectbox(
"Befand sich der/die Täter\*in in Deutschland, als die Aussage getätigt wurde?",
["", "Ja", "Nein", "Weiß ich nicht"]
)
if germany == "Ja":
valid = True
st.markdown("In diesem Fall gilt deutsches Strafrecht.")
elif germany == "Nein" or germany == "Weiß ich nicht":
german = st.selectbox(
"Ist der/die Täter\*in ein:e deutsche\*r Staatsbürger\*in oder lebt der/die Täter\*in in Deutschland?",
["", "Ja", "Nein", "Weiß ich nicht"]
)
if german == "Ja":
valid = True
st.markdown("In diesem Fall kommt deutsches Strafrecht in Frage.")
elif germany == "Nein" and german == "Nein":
st.markdown("In diesem Fall gilt deutsches Strafrecht **nicht**.")
elif germany == "Weiß ich nicht" or german == "Weiß ich nicht":
valid = True
st.markdown("""
**Möglicherweise** gilt das deutsche Strafrecht nicht in diesem Fall.
Falls es in diesem Fall doch gilt, kannst du die nächsten Fragen beantworten.
""")
if valid:
text = st.text_input("Gib hier die Aussage ein, die an dich gerichtet war.", type="password")
method = st.radio(
"Welche Auswertungsmethode möchtest du verwenden?",
["Automatisch durch künstliche Intelligenz", "Manuell durch Fragebogen"])
if method == "Automatisch durch künstliche Intelligenz":
start = st.button("Weiter")
url = "https://api.firstlanguage.in/api/classify"
load_dotenv()
headers = {
"Content-Type": "application/json",
"apikey": os.getenv("FIRST_LANGUAGE_KEY")
}
payload = {
"input": {
"text": text,
"lang": "de",
"labels": ["Beleidigung", "Formalbeleidigung", "Üble Nachrede", "Verleumdung", "Sonstiges"]
}
}
if start:
res = requests.request("POST", url, json=payload, headers=headers)
result = res.json()
if res.status_code == 200:
st.vega_lite_chart(result, use_container_width=True, spec={
'mark': {'type': 'bar', 'tooltip': True},
'encoding': {
'x': {'field': 'labels', 'type': 'nominal'},
'y': {'field': 'scores', 'type': 'quantitative'},
'color': {'field': 'labels', 'type': 'nominal'}
}
})
scores = result["scores"]
max_idx = scores.index(max(scores))
max_category = result["labels"][max_idx]
result_text = category_to_result_text(max_category)
st.success(f"""
Die künstliche Intelligenz hat analysiert, dass es sich in diesem Fall wahrscheinlich um {result_text} handelt.
""", icon="✅")
st.info("""
Dieses Ergebnis ist eine KI-basierte Einschätzung, die nicht der Wahrheit entsprechen muss.
Bitte beachte, dass dieses Tool keine Rechtsberatung ersetzt.
Die Erstberatung, die dieses Tool bietet, kann womöglich in deinem spezifischen Einzelfall nicht zutreffen.
Bitte konsultiere daher immer eine qualifizierte Anwältin oder einen qualifizierten Anwalt.
Du kannst die dazugehörigen originalen Gesetzestexte als zusätzliche Information lesen:
[originale Gesetzestexte](Gesetzestexte)
""", icon="ℹ️")
else:
st.error(f"Error {res.status_code}:")
st.json(result)
elif method == "Manuell durch Fragebogen":
result = None
provable = st.selectbox(
"Kann man die Aussage formal beweisen oder widerlegen?",
["", "Ja", "Nein"]
)
if provable == "Ja":
others = st.selectbox(
"Wurde die Aussage nur vor dir oder auch vor einer oder mehrerer anderer Personen getätigt?",
["", "Nur vor mir", "Auch vor einer oder mehrerer anderer Personen"]
)
if others == "Nur vor mir":
true_expression = st.selectbox(
"Ist die Aussage im Prinzip wahr, aber abwertend?",
["", "Ja", "Nein"]
)
if true_expression == "Ja":
result = "eine **Formalbeleidigung** nach §§ 185, 192 StGB"
elif true_expression == "Nein":
result = "eine **Beleidigung** nach § 185 StGB"
elif others == "Auch vor einer oder mehrerer anderer Personen":
false_expression = st.selectbox(
"Ist die Aussage beweisbar unwahr?",
["", "Ja", "Nein"]
)
if false_expression == "Ja":
result = "**Verleumdung** nach § 187 StGB"
elif false_expression == "Nein":
result = "**Üble Nachrede** nach § 186 StGB"
elif provable == "Nein":
judging = st.selectbox(
"Wertet dich die Aussage als Person herab?",
["", "Ja", "Nein"]
)
if judging == "Ja":
result = "eine **Beleidigung** nach § 185 StGB"
elif judging == "Nein":
reputation = st.selectbox(
"Schadet die Aussage deiner Reputation?",
["", "Ja", "Nein"]
)
if reputation == "Ja":
result = "**Verleumdung** nach § 187 StGB"
elif reputation == "Nein":
result = "**keinen Straftatsbestand**"
if result:
st.success(f"In diesem Fall handelt es sich wahrscheinlich um {result}.", icon="✅")
st.info("""
Dieses Ergebnis ist nur eine vorläufige Einschätzung basierend auf deinen Eingaben.
Bitte beachte, dass dieses Tool keine Rechtsberatung ersetzt.
Die Erstberatung, die dieses Tool bietet, kann womöglich in deinem spezifischen Einzelfall nicht zutreffen.
Bitte konsultiere daher immer eine qualifizierte Anwältin oder einen qualifizierten Anwalt.
Du kannst auch die dazugehörigen originalen Gesetzestexte als zusätzliche Information lesen:
[originale Gesetzestexte](Gesetzestexte)
""", icon="ℹ️")
| matzewolf/LegalLovesTechHackathon | pages/1_⚖️_Strafbarkeit_prüfen.py | 1_⚖️_Strafbarkeit_prüfen.py | py | 7,720 | python | de | code | 0 | github-code | 1 | [
{
"api_name": "streamlit.set_page_config",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "streamlit.sidebar.image",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "streamlit.sidebar",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name"... |
33703956955 | from typing import List
import pytest
import networkx as nx
from networkx.exception import NetworkXNoPath
from src.domain.wordchainservice import WordChainService
@pytest.mark.parametrize(
"start_word,end_word,expected_chain",
[
("spin", "spot", ["spin", "spit", "spot"]),
("hide", "sort", ["hide", "hire", "sire", "sore", "sort"]),
],
)
def test_the_shortest_chain_is_found(
test_graph: nx.Graph, start_word: str, end_word: str, expected_chain: List[str]
) -> None:
subject = WordChainService(test_graph)
assert subject.find_chain(start_word, end_word) == expected_chain
def test_an_error_is_raised_if_a_chain_is_not_found(test_graph: nx.Graph) -> None:
test_graph.add_node("axon")
subject = WordChainService(test_graph)
with pytest.raises(NetworkXNoPath):
subject.find_chain("spin", "axon")
| gileslloyd/word-chain | tests/unit/domain/test_wordchainservice.py | test_wordchainservice.py | py | 860 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "networkx.Graph",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "typing.List",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "src.domain.wordchainservice.WordChainService",
"line_number": 19,
"usage_type": "call"
},
{
"api_... |
74769733793 | '''
You are given an array people where people[i] is the weight of the ith person, and an infinite number of boats where each boat can carry a maximum weight of limit.
Each boat carries at most two people at the same time, provided the sum of the weight of those people is at most limit.
(Her tekne aynı anda en fazla iki kişiyi taşır, bu kişilerin ağırlıkları toplamının en fazla limit olmak şartı var.)
Return the minimum number of boats to carry every given person.
Example 1:
Input: people = [1,2], limit = 3
Output: 1
Explanation: 1 boat (1, 2)
Example 2:
Input: people = [3,2,2,1], limit = 3
Output: 3
Explanation: 3 boats (1, 2), (2) and (3)
Example 3:
Input: people = [3,5,3,4], limit = 5
Output: 4
Explanation: 4 boats (3), (3), (4), (5)
'''
from typing import List
class Solution:
def numRescueBoats(self, people: List[int], limit: int) -> int:
people.sort() # kilolara göre en küçükleri başa alarak sırala
left = 0
right = len(people)-1 #ındex tutuyor
boats_number = 0
while(left<=right): # sona gelene kadar
if(left==right):
boats_number+=1 # sona geldık tek basına bınecek
break
if(people[left]+people[right]<=limit): # en yakın 2 sı lımıt altındaysa ındex kayar sankı ılk kısı yok gıbı
left+=1
right-=1 # ındex bır yaklastırdı
boats_number+=1
return boats_number
if __name__ == '__main__':
solution = Solution()
people = [3,5,3,4]
limit = 5
print(solution.numRescueBoats(people,limit)) | bulentsiyah/data-preprocessing_cv-skills | leetcode/b/boats-to-save-people.py | boats-to-save-people.py | py | 1,624 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "typing.List",
"line_number": 32,
"usage_type": "name"
}
] |
17052614858 |
import sys
from car import Car
from board import Board
from helper import load_json
class Game:
"""
The class represent the Game object, each game initializes with
his Board object that the game will be played on him.
The class handles A full session of the RUSH HOUR game by getting user
input each turn and moves the cars accordingly. A game will be finished
only when user choose to stop or if a certain car reach the target cell
"""
VALID_NAMES = 'YBOGWR'
VALID_DIRECTIONS = 'udlr'
VALID_ORIENTATIONS = '01'
MIN_LENGTH, MAX_LENGTH = 2, 4
COMMA, STOP = ',', '!'
COMMA_IND = 1
STOPPED = 'The game has stopped'
WON = 'You Won the game'
def __init__(self, board):
"""
Initialize a new Game object
:param board: An object of type board
"""
self.__board = board # The Board object the game will played on
def __single_turn(self):
"""
The function responsible A single turn iteration of the game, include
A treatment to user input, check it validness and make moves according
to user choice, if input is invalid An appropriate msg will be printed
:return: None while user input is not STOP game or WIN game
"""
user_input = input()
if user_input == Game.STOP:
return Game.STOPPED # user choose to stop the game
if len(user_input) == 3 and user_input[Game.COMMA_IND] == Game.COMMA:
car_name, direction = user_input.split(Game.COMMA) # extract move
if car_name not in Game.VALID_NAMES:
print('Your car name is invalid')
elif direction not in Game.VALID_DIRECTIONS:
print('Your direction is invalid')
elif self.__board.move_car(car_name, direction):
print(self.__board) # print updated board after the move
if self.__board.cell_content(self.__board.target_location()):
return Game.WON # user reach target cell (3, 7)
else:
print('Your move is invalid') # cannot apply user move
else:
print('Your input must follow this form: Name,Direction')
def play(self):
"""
The main driver of the Game. Manages the game until completion
:return: None
"""
if self.__board.cell_content(self.__board.target_location()):
print(Game.WON)
else:
turn = self.__single_turn()
while turn != Game.STOPPED and turn != Game.WON:
turn = self.__single_turn()
print(turn) # prints whether user won / stopped the game
if __name__ == "__main__":
board = Board()
car_config = dict(load_json(sys.argv[1])) # extract game info form json
for name in car_config:
length = car_config[name][0]
location = tuple(car_config[name][1])
orientation = car_config[name][2]
if name in Game.VALID_NAMES:
if Game.MIN_LENGTH <= length <= Game.MAX_LENGTH:
if location in board.cell_list():
if str(orientation) in Game.VALID_ORIENTATIONS:
car_object = Car(name, length, location, orientation)
if board.add_car(car_object):
pass # The car has been added successfully
game = Game(board)
print(board)
game.play() # starting A game
| OmerFerster/Introduction-to-CS | Exercise 8/game.py | game.py | py | 3,460 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "board.Board",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "helper.load_json",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "board.cell_list",
"lin... |
3630655417 | """
This module contains all the paths for the wiredrive app.
Name: Michael Feigen
Date Completed: 7/31/2018
"""
from django.urls import path
from . import views
urlpatterns = [
path('', views.IndexView.as_view(), name='wiredrive'),
path('form/', views.getName, name='get_name'),
path('list/', views.getCheck, name = 'checklist'),
path('credits/', views.getCredit, name = 'credits'),
path('path/', views.getPath, name = 'path')
] | michaelfeigen/portal | wiredrive/urls.py | urls.py | py | 466 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "django.urls.path",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "django.urls.path",... |
3480209277 | from . import models
from django.conf.urls import url
from stark.service import v1
import json
from django.db.models import Q
from utils import message
from xxxxxx import XXX
from django.utils.safestring import mark_safe
from django.shortcuts import HttpResponse, redirect, render
from django.utils.safestring import mark_safe
from django.urls import reverse
import datetime
from django.forms import ModelForm
class BasePermission(object):
def get_show_add_btn(self):
code_list=self.request.permission_codes_list
if 'add' in code_list:
return True
def get_edit_display(self):
code_list = self.request.permission_codes_list
if 'edit' in code_list:
return super(SchoolConfig,self).get_edit_display()
else:
return []
def get_list_display(self):
code_list=self.request.permission_codes_list
data = []
if self.list_display:
data.extend(self.list_display)
# data.append(v1.StarkConfig.edit)
if 'del' in code_list:
data.append(v1.StarkConfig.delete)
data.insert(0, v1.StarkConfig.checkbox)
return data
class SingleModelForm(ModelForm):
class Meta:
model = models.Customer
exclude = ['last_consult_date', 'recv_date', 'status', 'consultant',]
class DepartmentConfig(BasePermission,v1.StarkConfig):
'''
这是部门表实现了:显示字段、搜索、actions
'''
list_display = ['title', 'code'] # 页面显示的字段
show_actions = False # 这是actions的显示是否出现
show_search_form = False # 不用显示搜索的框
edit_display = ['title']
def get_list_display(self):
result = []
result.extend(self.list_display)
result.append(v1.StarkConfig.delete)
result.insert(0, v1.StarkConfig.checkbox)
return result
v1.site.register(models.Department, DepartmentConfig)
class UserinfoConfig(BasePermission,v1.StarkConfig):
'''
这是用户:我们实现了:显示、搜索、组合搜索(有bug)
'''
# search_fields = ['name__contains', 'username__contains','email__contains'] # 这是用来搜索的,不要把外键放在里面
list_display = ['name', 'username', 'email', 'depart']
comb_filter = [
v1.FilterOption('depart', text_func_name=lambda x: str(x), val_func_name=lambda x: x.code),
]
show_actions = False
show_search_form = False
v1.site.register(models.UserInfo, UserinfoConfig)
class CourseConfig(v1.StarkConfig):
'''
课程:用来字段的显示、搜索
'''
search_fields = ['name__contains'] # 这是用来搜索的,不要把外键放在里面
list_display = ['name']
edit_display = ['name']
def get_list_display(self):
result = []
result.extend(self.list_display)
# result.append(v1.StarkConfig.edit)
result.append(v1.StarkConfig.delete)
result.insert(0, v1.StarkConfig.checkbox)
return result
show_actions = False # 这是actions的
v1.site.register(models.Course, CourseConfig)
class SchoolConfig(BasePermission,v1.StarkConfig):
'''
校区:实现了:显示字段、搜索
'''
list_display = ['title']
search_fields = ['title__contains'] # 这是用来搜索的,不要把外键放在里面
edit_display = ['title']
def get_list_display(self):
result = []
result.extend(self.list_display)
# result.append(v1.StarkConfig.edit)
result.append(v1.StarkConfig.delete)
result.insert(0, v1.StarkConfig.checkbox)
return result
# comb_filter = [
# v1.FilterOption('depart', text_func_name=lambda x: str(x), val_func_name=lambda x: x.code),
# ]
v1.site.register(models.School, SchoolConfig)
class ClassListConfig(v1.StarkConfig):
def course_semester(self, obj=None, is_header=False):
if is_header:
return '班级与期数'
return ('%s(%s期)') % (obj.course, obj.semester)
def num(self, obj=None, is_header=False):
if is_header:
return '人数'
return obj.student_set.all().count()
def get_teacher(self, obj=None, is_header=None):
if is_header:
return '咨询课程'
html = []
course_list = obj.teachers.all()
for role in course_list:
ss = role.name
html.append(ss)
html = ','.join(html)
return html
list_display = ['school', course_semester, num, 'price', 'start_date', 'graduate_date', 'memo', get_teacher,
'tutor']
search_fields = ['school__contains', 'course__contains', 'semester__contains', 'price__contains',
'start_date__contains', 'graduate_date__contains'] # 这是用来搜索的,不要把外键放在里面
comb_filter = [
v1.FilterOption('school', ),
v1.FilterOption('course', ),
]
v1.site.register(models.ClassList, ClassListConfig)
class CustomerConfig(v1.StarkConfig):
'''
客户信息:显示字段、
'''
def extra_url(self):
app_model_name = (self.model_class._meta.app_label, self.model_class._meta.model_name,)
urls = [
url(r'^public/$', self.wrap(self.public), name='%s/%s/public' % app_model_name),
url(r'^(\d+)/competion/$', self.wrap(self.competion), name='%s/%s/competion' % app_model_name),
url(r'^sale_views/$', self.wrap(self.sale_views), name='%s/%s/sale_views' % app_model_name),
url(r'^single/$', self.wrap(self.single), name='%s/%s/single' % app_model_name),
url(r'^multi/$', self.wrap(self.multi), name='%s/%s/multi' % app_model_name),
]
return urls
def public(self,request):
date_now=datetime.datetime.now().date()#当前时间
date_time_15=datetime.timedelta(days=15)
date_time_3=datetime.timedelta(days=3)
deadline1=date_now-date_time_15
deadline2=date_now-date_time_3
#方法一:
con = Q()
q3=Q(('status',2))
q1 = Q()
q1.children.append(('last_consult_date__lt', deadline2))
q2 = Q()
q2.children.append(('recv_date__lt',deadline1))
con.add(q1, 'OR')
con.add(q2, 'OR')
con.add(q3,'AND')
#方法二:
# models_list=models.Customer.objects.filter(Q(recv_date__lt=deadline1)|Q(last_consult_date__lt=deadline2),status=2)
models_list=models.Customer.objects.filter(con)
print(models_list)
return render(request,'custmoer_public.html',{'models_list':models_list})
# return HttpResponse('ok')
def competion(self,request,cid):#抢单
"""
抢单的代码
"""
current_user_id=5
#首选判断这个用户是不是在公共的里面和客户顾问不是他本人
date_now = datetime.datetime.now().date() # 当前时间
date_time_15 = datetime.timedelta(days=15)
date_time_3 = datetime.timedelta(days=3)
deadline1 = date_now - date_time_15
deadline2 = date_now - date_time_3
is_exist=models.Customer.objects.filter(Q(recv_date__lt=deadline1)|Q(last_consult_date__lt=deadline2),status=2).exclude(consultant_id=current_user_id).update(last_consult_date=date_now,recv_date=date_now,consultant_id=current_user_id)
if not is_exist:
return HttpResponse("手速慢")
models.CustomerDistribution.objects.filter(user_id=current_user_id,customer_id=cid,ctime=date_now)
# return redirect(request.path_info)
return HttpResponse("抢单成功")
def sale_views(self,request):#分配表里查看
current_user_id = 5
# customer_list=models.CustomerDistribution.objects.filter(user_id=current_user_id).order_by('status')
customer_list=models.Customer.objects.filter(consultant_id=current_user_id)
return render(request,'sale_views.html',{"customer_list":customer_list})
def single(self,request):
if request.method=="GET":
form=SingleModelForm()
return render(request,'single_form.html',{'form':form})
else:
form=SingleModelForm(request.POST)
if form.is_valid():
sale_id = XXX.get_sale_id()
if not sale_id:
return HttpResponse("没有客户顾问无法分配")
ctime=datetime.datetime.now().date()
from django.db import transaction
try:
with transaction.atomic():
#方法一
# form.instance.consultant_id = sale_id
# form.instance.recv_date = ctime
# form.instance.last_consult_date = ctime
# obj = form.save()
#方法二
form.cleaned_data['consultant_id'] = sale_id
form.cleaned_data['recv_date']= ctime
form.cleaned_data['last_consult_date']= ctime
course_list=form.cleaned_data.pop('course')
print('course_list',course_list)
obj=models.Customer.objects.create(**form.cleaned_data)
obj.course.add(*course_list)
models.CustomerDistribution.objects.create(user_id=sale_id,customer=obj,ctime=ctime)
#发短信
except Exception as e:
XXX.rollback(sale_id)
message.send_message('自动发送','很,兴奋代码自动发送邮件,','2981405421@qq.com','大毛')
return HttpResponse('保存成功')
else:
return render(request, 'single_form.html', {'form': form})
def multi(self,request):
if request.method=='GET':
return render(request,'multi_view.html')
else:
ctime = datetime.datetime.now().date()
from django.db import transaction
from io import BytesIO
file_obj=request.FILES.get('exfile')
f=BytesIO()
for chunk in file_obj:
f.write(chunk)
import xlrd
work_hold = xlrd.open_workbook(file_contents=f.getvalue())
sheet=work_hold.sheet_by_index(0)
maps = {
0: 'qq',
1: 'name',
2: 'gender',
3: 'education',
4: 'graduation_school',
5: 'major',
6: 'experience',
7: 'work_status',
8: 'course',
}
print('sheet.nrows',sheet.nrows)
for index in range(1,sheet.nrows):# 这个是获取的行数
sale_id = XXX.get_sale_id()
if not sale_id:
return HttpResponse("没有客户顾问无法分配")
row=sheet.row(index) # 这是通过行数获取行的内容
dict_obj={} # 字典
for i in range(len(maps)): # 这是获取列的数量
key=maps[i] # 这是键
cell=row[i] # 这是获取空格的对象
dict_obj[key]=cell.value
try:
with transaction.atomic():
dict_obj['consultant_id']=int(sale_id.decode('utf-8'))
course_list=[]
course_list.extend(dict_obj.pop('course').split(','))
obj=models.Customer.objects.create(**dict_obj)
obj.course=course_list
models.CustomerDistribution.objects.create(user_id=sale_id, customer=obj, ctime=ctime)
except Exception as e:
print(e)
XXX.rollback(sale_id)
message.send_message('自动发送', '很,兴奋代码自动发送邮件,', '2981405421@qq.com', '大毛')
return HttpResponse('保存成功')
# file_obj=request.FILES.get('exfile')
# with open('xxxx.xlsx','wb') as f:
# for chunk in file_obj:
# f.write(chunk)
# import xlrd
# work_hold=xlrd.open_workbook('xxxx.xlsx')
# sheet=work_hold.sheet_by_index(0)
# maps={
# 0:'学校',
# 1:'日期',
# }
# for index in range(1,sheet.nrows):# 这个是获取的行数
# row=sheet.row(index) # 这是通过行数获取行的内容
# dict_obj={} # 字典
# for i in range(len(maps)): # 这是获取列的数量
# key=maps[i] # 这是键
# cell=row[i] # 这是获取空格的对象
# dict_obj[key]=cell.value
# print(dict_obj) # 这是获取对象
# print(work_hold,type(work_hold))
# print(file_obj.field_name)#这是对象名字
# print(file_obj.size)#这是对象名字
# print(file_obj.name)#这是对象名字
# print('上传对象',file_obj,type(file_obj))
# return HttpResponse('上传成功')
def get_gendr(self, obj=None, is_header=None):
if is_header:
return '性别'
return obj.get_gender_display()
def get_education(self, obj=None, is_header=None):
if is_header:
return '学历'
return obj.get_education_display()
def get_experience(self, obj=None, is_header=None):
if is_header:
return '工作经验'
return obj.get_experience_display()
def get_work_status(self, obj=None, is_header=None):
if is_header:
return '职业状态'
return obj.get_work_status.display()
def get_source(self, obj=None, is_header=None):
if is_header:
return '客户来源'
return obj.get_source_display()
##course是多对多
def get_course(self, obj=None, is_header=None):
if is_header:
return '咨询课程'
html = []
course_list = obj.course.all()
for role in course_list:
ss = role.name
html.append(ss)
html = ','.join(html)
return html
def get_status1(self, obj=None, is_header=None):
if is_header:
return '状态'
return obj.get_status_display()
# 显示少了get_status
def get_status(self, obj=None, is_header=None):
if is_header:
return '职业状态'
return obj.get_work_status_display()
def recode(self, obj=None, is_header=None):
if is_header:
return '跟进记录'
return mark_safe("<a href='/stark/crm/consultrecord/?customer=%s'>查看跟进记录</a>" % (obj.pk,))
list_display = ['qq', 'name', get_gendr, get_education, 'graduation_school', 'major', get_experience,
get_status, 'company', 'salary', 'date',get_source, get_course, get_status1, recode]
# 搜索
search_fields = ['qq__contains', 'name__contains', 'graduation_school__contains', 'major__contains',
'company__contains', 'salary__contains', 'consultant__contains', 'date__contains',
'last_consult_date__contains', ] #
comb_filter = [ #组合搜索 一个是choice一是多选,和多对一
v1.FilterOption('gender', is_choice=True),
v1.FilterOption('education', multi=True,is_choice=True),
# v1.FilterOption('experience', is_choice=True),
# v1.FilterOption('work_status', is_choice=True),
# # v1.FilterOption('source', is_choice=True),
# # v1.FilterOption('course', True),
# v1.FilterOption('status', is_choice=True),
v1.FilterOption('consultant', ),
]
order_by = ['-status']
v1.site.register(models.Customer, CustomerConfig)
class ConsultRecordConfig(v1.StarkConfig):
list_display = ['customer', 'consultant', 'date']
comb_filter = [
v1.FilterOption('customer')
]
def changelist_view(self, request, *args, **kwargs):
customer = request.GET.get('customer')
# session中获取当前用户ID
current_login_user_id = 6
ct = models.Customer.objects.filter(consultant=current_login_user_id, id=customer).count()
if not ct:
return HttpResponse('别抢客户呀...')
return super(ConsultRecordConfig, self).changelist_view(request, *args, **kwargs)
v1.site.register(models.ConsultRecord, ConsultRecordConfig)
class StudyRecordconfig(v1.StarkConfig):
def get_record(self, obj=None, is_header=False):
if is_header:
return '上课记录'
return obj.get_record_display()
list_display = ['course_record', 'student', get_record]
show_search_form = False
comb_filter = [
v1.FilterOption('course_record', ),
]
show_combe_fileter = False
def get_checked(self, request):
pk_list = request.POST.getlist('pk')
models.StudyRecord.objects.filter(id__in=pk_list).update(record='checked')
get_checked.short_desc = '已签到'
def get_vacate(self, request):
pk_list = request.POST.getlist('pk')
models.StudyRecord.objects.filter(id__in=pk_list).update(record='vacate')
get_vacate.short_desc = '请假'
def get_late(self, request):
pk_list = request.POST.getlist('pk')
models.StudyRecord.objects.filter(id__in=pk_list).update(record='late')
get_late.short_desc = '迟到'
def get_noshow(self, request):
pk_list = request.POST.getlist('pk')
models.StudyRecord.objects.filter(id__in=pk_list).update(record='noshow')
get_noshow.short_desc = '缺勤'
def get_leave_early(self, request):
pk_list = request.POST.getlist('pk')
print('pk', pk_list)
models.StudyRecord.objects.filter(id__in=pk_list).update(record='leave_early')
get_leave_early.short_desc = '早退'
actions = [get_checked, get_vacate, get_late, get_noshow, get_leave_early]
show_add_btn = False
v1.site.register(models.StudyRecord, StudyRecordconfig)
class CourseRecordconfig(v1.StarkConfig):
def extra_url(self):
app_model_name = (self.model_class._meta.app_label, self.model_class._meta.model_name,)
urls = [
url(r'^score_list/(\d+)$', self.wrap(self.score_list), name='%s/%s/score_list' % app_model_name),
]
return urls
def score_list(self, request, nid):
if request.method == 'GET':
study_list = models.StudyRecord.objects.filter(course_record_id=nid)
choices = models.StudyRecord.score_choices#这个是静态字段的查询
return render(request, 'scorelist.html', {"study_list": study_list, 'choices': choices})
elif request.method == 'POST':
# data={
# '3':{'select_name':80,"homework":'你好'},
# '2':{'select_name':70,"homework":'你好呀'},
# ''' 'select_name_2': ['80'], 'homework_note_2': ['和那后'], 'select_name_3': ['80'], 'homework_note_3': ['韩浩']}>
# '''
# }
print('******', request.POST)
data_dict = {}
for k, val in request.POST.items():
print(k)
if k == 'csrfmiddlewaretoken':
continue
name, id = k.rsplit('_', 1)
if id not in data_dict:
print(id)
data_dict[id] = {name: val}
else:
data_dict[id][name] = val
print(data_dict)
for k, val in data_dict.items():
models.StudyRecord.objects.filter(id=k).update(**val)
return redirect(request.path_info)#返回当前页面
# return render(request, 'scorelist.html')
def get_kaoqin(self, obj=None, is_header=False):
if is_header:
return '考勤记录'
return mark_safe('<a href="/frank/crm/studyrecord/?course_record=%s">考勤记录</a>' % (obj.pk))
def get_scorelist(self, obj=None, is_header=False):
if is_header:
return '分数统计'
rurl = reverse('%s/%s/score_list' % (self.model_class._meta.app_label, self.model_class._meta.model_name,),
args=(obj.pk,))
# return mark_safe('<a href="/frank/crm/courserecord/score_list/%s">分数录入</a>' % (obj.pk))
return mark_safe('<a href="%s">分数录入</a>' % rurl)
list_display = ['class_obj', 'day_num', get_kaoqin, get_scorelist]
show_search_form = False
def multi_init(self, request): # 这个是初始化上课记录
courserecord_list = request.POST.getlist('pk') # 获取所有的需要初始化的班级的id
crecord_list = models.CourseRecord.objects.filter(id__in=courserecord_list) # 获取所有需要初始化的班级对象
for record in crecord_list: # 循环每个需要初始化的对象
is_exists = models.StudyRecord.objects.filter(course_record=record).exists() # 判断在学生记录上是否有这个版的记录
if is_exists: # 如果存在就跳过
continue
student_list = models.Student.objects.filter(class_list=record.class_obj) # 找到班级所有的学生
bulk_list = []
for student in student_list:
bulk_list.append(models.StudyRecord(student=student, course_record=record))
models.StudyRecord.objects.bulk_create(bulk_list)#这个不需要用**
for record in crecord_list:
models.StudyRecord.objects.filter()
models.Student.objects.filter()
return HttpResponse('.......')
multi_init.short_desc = '考勤初始化'
actions = [multi_init]
v1.site.register(models.CourseRecord, CourseRecordconfig)
class Studentconfig(v1.StarkConfig):
def extra_url(self):
app_model_name = (self.model_class._meta.app_label, self.model_class._meta.model_name,)
urls = [
url(r'^get_score_view/(\d+)$', self.wrap(self.get_score_view), name='%s/%s/get_score' % app_model_name),
url(r'^score_show/$', self.wrap(self.score_show), name='%s/%s/score_show' % app_model_name),
]
return urls
def score_show(self, request):
ret = {'status': False, 'data': None, 'msg': None}
try:
cid = request.GET.get('cid') # 是班级的id
print(cid)
sid = request.GET.get('sid') # 是任呀
print(sid)
record_list = models.StudyRecord.objects.filter(student_id=sid, course_record__class_obj_id=cid)
print('fuck', record_list)
data = []
for item in record_list:
day = 'day%s' % item.course_record.day_num
data.append([day, item.score])
ret['status'] = True
ret['data'] = data
except Exception as e:
ret['msg'] = str(e)
return HttpResponse(json.dumps(ret))
def get_score_view(self, request, nid):
obj = models.Student.objects.filter(id=nid).first()
if not obj:
return HttpResponse('查无此人')
class_list = obj.class_list.all()
return render(request, 'score_view.html', {"class_list": class_list, 'sid': nid})
def get_score(self, obj=None, is_header=False):
if is_header:
return '查看分数'
urls = reverse('%s/%s/get_score' % (self.model_class._meta.app_label, self.model_class._meta.model_name,),
args=(obj.pk,))
return mark_safe("<a href='%s'>查看分数</a>" % urls)#反向解析
list_display = ['username', get_score]
v1.site.register(models.Student, Studentconfig)
class CustomerDistributionConfig(v1.StarkConfig):
def get_status(self,obj=None,is_header=None):
if is_header:
return '状态'
return obj.get_status_display()
list_display = ['user', 'customer', 'ctime', get_status]
v1.site.register(models.CustomerDistribution,CustomerDistributionConfig)
| frank12a/Gemma- | crm/stark.py | stark.py | py | 24,228 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "stark.service.v1.StarkConfig",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "stark.service.v1",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "stark.service.v1.StarkConfig",
"line_number": 36,
"usage_type": "attribute"
},
{
... |
3178025434 | import mxnet as mx
import numpy as np
import cv2
from test_utils.predict import predict
def pred(image, net, step, ctx):#step为样本选取间隔
h, w, channel = image.shape
image = image.astype('float32')
size = int(step *0.75) #取样本中size尺寸为最终预测尺寸
margin = int((step - size) / 2)
inhang = int(np.ceil(h/size))
inlie = int(np.ceil(w / size))
# newimage0=np.zeros((inhang*size, inlie*size,channel))
# borderType = cv2.BORDER_REFLECT
# newimage = cv2.copyMakeBorder(newimage0, margin, margin, margin, margin, borderType)
newimage = np.zeros((inhang*size + margin*2 , inlie*size +2*margin,channel))
newimage[margin : h + margin,margin : w + margin ,:] = image
newimage /= 255
predictions = np.zeros((inhang*size , inlie*size), dtype=np.int64)
for i in range(inhang):
for j in range(inlie):
patch = newimage[ i*size: i*size+step ,j*size: j*size+step ,:]
patch = np.transpose(patch, axes=(2, 0, 1)).astype(np.float32)
patch = mx.nd.array(np.expand_dims(patch, 0), ctx=ctx)
pred = predict(patch, net)#预测
predictions[ i*size: (i+1)*size ,j*size: (j+1)*size] = pred[margin:size+margin,margin:size+margin]
result = predictions[:h,:w]
return result
| scrssys/semantic_segment_RSImage | temp/predict_from_xuhuimin.py | predict_from_xuhuimin.py | py | 1,308 | python | en | code | 49 | github-code | 1 | [
{
"api_name": "numpy.ceil",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.ceil",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 23... |
34063507829 | import os
import time
import numpy as np
import pymysql
import cv2
if __name__ == '__main__':
host = 'localhost'
user = 'root'
password = '880510'
db = 'fx'
sql_select = "SELECT * FROM fileTmpTest2 where file_name like '%\\_3\\_%'"
sql_delete = "DELETE FROM fileTmpTest2 WHERE file_name = "
sql_insert = "INSERT INTO resFile2(file_datetime, file_name, lot, dt, d, res_dir1, res_dir3, res1, res2, res3) VALUES "
res_dir = 'D:\\web\\res'
while True:
source = []
conn = pymysql.connect(host=host, user=user, password=password, database=db)
cursor = conn.cursor()
tick = time.strftime("%Y-%m-%d %H:%M:%S ", time.localtime()) + sql_select
if ":11 " in tick:
print(tick)
cursor.execute(sql_select)
results = cursor.fetchall()
for row in results:
source.append(row[2])
print(row[2])
for image in source:
try:
name = image[image.rindex('\\') + 1:len(image)]
lot = name[0:name.index('_')]
dt = name[name.index('_') + 1:name.index('_') + 13]
date = '20' + dt[0:2] + '-' + dt[2:4] + '-' + dt[4:6]
mat1 = cv2.imread(image.replace('_3_', '_1_'), cv2.IMREAD_COLOR)
mat3 = cv2.imread(image, cv2.IMREAD_COLOR)
gray = cv2.inRange(mat3, np.array([120, 120, 120]), np.array([140, 140, 140]))
pink1 = cv2.inRange(mat1, np.array([120, 0, 120]), np.array([255, 130, 255]))
pink3 = cv2.inRange(mat3, np.array([120, 0, 120]), np.array([255, 130, 255]))
white = cv2.inRange(mat1, np.array([230, 230, 230]), np.array([255, 255, 255]))
mat1 = cv2.subtract(mat1, cv2.merge([white, white, white]))
mat1 = cv2.subtract(mat1, cv2.merge([pink1, pink1, pink1]))
mat3 = cv2.subtract(mat3, cv2.merge([gray, gray, gray]))
mat3 = cv2.subtract(mat3, cv2.merge([pink3, pink3, pink3]))
mat = cv2.absdiff(mat1, mat3)
mat_gray = cv2.cvtColor(mat, cv2.COLOR_BGR2GRAY)
ret_mat, mat_threshold = cv2.threshold(mat_gray, 30, 255, cv2.THRESH_BINARY)
contours, hierarchy = cv2.findContours(mat_threshold, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
print(name, ' diff count : ', str(len(contours)))
if not os.path.exists(res_dir):
os.mkdir(res_dir)
if not os.path.exists(res_dir + '\\' + date):
os.mkdir(res_dir + '\\' + date)
if not os.path.exists(res_dir + '\\' + date + '\\' + lot):
os.mkdir(res_dir + '\\' + date + '\\' + lot)
if not os.path.exists(res_dir + '\\' + date + '\\' + lot + '\\' + name.replace('.Jpg', '')):
os.mkdir(res_dir + '\\' + date + '\\' + lot + '\\' + name.replace('.Jpg', ''))
cv2.imwrite(res_dir + '\\' + date + '\\' + lot + '\\' + name.replace('.Jpg', '') + '\\diff.jpg', mat_threshold)
cursor.execute(sql_insert + '(\'' + time.strftime("%Y-%m-%d %H:%M:%S ", time.localtime()) + '\', \'' \
+ image.replace('\\', '\\\\') + '\', \'' + lot + '\', \'' + dt + '\', \'' + date + '\', \'' \
+ res_dir.replace('\\', '\\\\') + '\\\\' + date + '\\\\' + lot + '\\\\' + name.replace('_3_', '_1_').replace('.Jpg', '') \
+ '\', \'' + res_dir.replace('\\', '\\\\') + '\\\\' + date + '\\\\' + lot + '\\\\' + name.replace('.Jpg', '') \
+ '\', ' + str(len(contours)) + ', 0, 0)')
conn.commit()
cursor.execute(sql_delete + '\'' + image.replace('\\', '\\\\') + '\'')
conn.commit()
cursor.execute(sql_delete + '\'' + image.replace('_3_', '_1_').replace('\\', '\\\\') + '\'')
conn.commit()
except Exception as ex:
print(ex)
exp = open("Exception.txt", mode="a")
exp.write(time.strftime("%Y-%m-%d %H:%M:%S ", time.localtime()))
exp.write("\n")
exp.write(image)
exp.write("\n")
exp.write(str(ex))
exp.write("\n")
exp.write("\n")
exp.flush()
exp.close()
conn.close()
time.sleep(1)
| 314257smcag2/okteto | sanan/分选图像判定/detect3.py | detect3.py | py | 4,561 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pymysql.connect",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "time.strftime",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "time.localtime",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_n... |
39492874795 | """Inferrer"""
from PIL import Image
import torch
import numpy as np
import matplotlib.pyplot as plt
import cv2
from utils.load import load_yaml
from model import get_model
from dataloader.transform import DataTransform
class Inferrer():
"""SSDでの予測と画像の表示をまとめて行うクラス"""
def __init__(self, configfile):
# Config
config = load_yaml(configfile)
self.model = get_model(config, is_eval=True)
self.model.build(is_eval=True)
self.net = self.model.model
# 重みの読み込み
self.net_weights = torch.load(config['infer']['weight_path'], map_location={'cuda:0': 'cpu'})
self.net.load_state_dict(self.net_weights, strict=False)
self.classes = self.model.classes
self.data_confidence_level = config['infer']['data_confidence_level']
self.color_mean = config['data']['color_mean'] # (BGR)の色の平均値
self.input_size = config['data']['input_size'] # 画像のinputサイズを300×300にする
self.transform = DataTransform(self.input_size, self.color_mean) # 前処理クラス
def show(self, image_file_path):
"""
物体検出の予測結果を表示をする関数。
Parameters
----------
image_file_path: str
画像のファイルパス
data_confidence_level: float
予測で発見とする確信度の閾値
Returns
-------
なし。rgb_imgに物体検出結果が加わった画像が表示される。
"""
img = cv2.imread(image_file_path) # [高さ][幅][色BGR]
input_height, input_width, _ = img.shape # 画像のサイズを取得
rgb_img, predict_bbox, pre_dict_label_index, scores = self.ssd_predict(
image_file_path, self.data_confidence_level)
img = self.vis_bbox(rgb_img, bbox=predict_bbox, label_index=pre_dict_label_index,
scores=scores, label_names=self.classes,
crop_height=input_height, crop_width=input_width)
return img
def ssd_predict(self, image_file_path, data_confidence_level=0.5):
"""
SSDで予測させる関数。
Parameters
----------
image_file_path: strt
画像のファイルパス
dataconfidence_level: float
予測で発見とする確信度の閾値
Returns
-------
rgb_img, true_bbox, true_label_index, predict_bbox, pre_dict_label_index, scores
"""
# rgbの画像データを取得
img = cv2.imread(image_file_path) # [高さ][幅][色BGR]
height, width, channels = img.shape # 画像のサイズを取得
rgb_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# 画像の前処理
phase = "eval"
img_transformed, boxes, labels = self.transform(
img, phase, "", "") # アノテーションが存在しないので""にする。
img = torch.from_numpy(
img_transformed[:, :, (2, 1, 0)]).permute(2, 0, 1)
# SSDで予測
self.net.eval() # ネットワークを推論モードへ
x = img.unsqueeze(0) # ミニバッチ化:torch.Size([1, 3, 300, 300])
detections = self.net(x)
# detectionsの形は、torch.Size([1, 21, 200, 5]) ※200はtop_kの値
# confidence_levelが基準以上を取り出す
predict_bbox = []
pre_dict_label_index = []
scores = []
detections = detections.cpu().detach().numpy()
# 条件以上の値を抽出
find_index = np.where(detections[:, 0:, :, 0] >= data_confidence_level)
detections = detections[find_index]
for i in range(len(find_index[1])): # 抽出した物体数分ループを回す
if (find_index[1][i]) > 0: # 背景クラスでないもの
sc = detections[i][0] # 確信度
bbox = detections[i][1:] * [width, height, width, height]
# find_indexはミニバッチ数、クラス、topのtuple
lable_ind = find_index[1][i]-1
# (注釈)
# 背景クラスが0なので1を引く
# 返り値のリストに追加
predict_bbox.append(bbox)
pre_dict_label_index.append(lable_ind)
scores.append(sc)
return rgb_img, predict_bbox, pre_dict_label_index, scores
def vis_bbox(self, rgb_img, bbox, label_index, scores, label_names, crop_height, crop_width,):
"""
物体検出の予測結果を画像で表示させる関数。
Parameters
----------
rgb_img:rgbの画像
対象の画像データ
bbox: list
物体のBBoxのリスト
label_index: list
物体のラベルへのインデックス
scores: list
物体の確信度。
label_names: list
ラベル名の配列
Returns
-------
なし。rgb_imgに物体検出結果が加わった画像が表示される。
"""
# 枠の色の設定
num_classes = len(label_names) # クラス数(背景のぞく)
colors = plt.cm.hsv(np.linspace(0, 1, num_classes)).tolist()
# 画像の表示
fig = plt.figure(figsize=(crop_width/100, crop_height/100))
plt.imshow(rgb_img)
plt.axis("off")
currentAxis = plt.gca()
# BBox分のループ
for i, bb in enumerate(bbox):
# ラベル名
label_name = label_names[label_index[i]]
color = colors[label_index[i]] # クラスごとに別の色の枠を与える
# 枠につけるラベル 例:person;0.72
if scores is not None:
sc = scores[i]
display_txt = '%s: %.2f' % (label_name, sc)
else:
display_txt = '%s: ans' % (label_name)
# 枠の座標
xy = (bb[0], bb[1])
width = bb[2] - bb[0]
height = bb[3] - bb[1]
# 長方形を描画する
currentAxis.add_patch(plt.Rectangle(
xy, width, height, fill=False, edgecolor=color, linewidth=2))
# 長方形の枠の左上にラベルを描画する
currentAxis.text(xy[0], xy[1], display_txt, bbox={
'facecolor': color, 'alpha': 0.5})
fig.subplots_adjust(left=0, right=1, bottom=0, top=1)
fig.canvas.draw()
im = np.array(fig.canvas.renderer.buffer_rgba())
# im = np.array(fig.canvas.renderer._renderer) # matplotlibが3.1より前の場合
img = Image.fromarray(im)
img = img.convert('RGB')
# 元の画像サイズにセンタークロップ
img_width, img_height = img.size
# img = img.crop(((img_width - crop_width) // 2,
# (img_height - crop_height) // 2,
# (img_width + crop_width) // 2,
# (img_height + crop_height) // 2))
return img
# img.save('./test_output.png', quality=95)
| noji0101/object-detection-app | executor/inferrer.py | inferrer.py | py | 7,262 | python | ja | code | 0 | github-code | 1 | [
{
"api_name": "utils.load.load_yaml",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "model.get_model",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "torch.load",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "dataloader.transform.... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.