seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
42243645300 | import os, fnmatch, sys
import dill as pickle
import scipy.interpolate as interp
import scipy.optimize as opti
import scipy.constants as constants
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import bead_util as bu
import configuration as config
import transfer_func_util as tf
plt.rcParams.update({'font.size': 14})
dirs = ['/data/20180927/bead1/weigh_bead_dc/ramp_top_negative_bottom_at_p100', \
'/data/20180927/bead1/weigh_bead_dc/ramp_top_negative_bottom_at_p100_10_repeats'
]
dirs = ['/data/20180927/bead1/weigh_bead_20e_10v_bottom_constant', \
]
#
V2 = 100.0
amp_gain = 200 #????
#dirs = ['/data/20181119/bead1/mass_meas/neg_charge_2', \
# ]
dirs = ['/data/20181119/bead1/mass_meas/pos_charge_1', \
]
pos = True
mon_fac = 200
maxfiles = 1000 # Many more than necessary
lpf = 2500 # Hz
file_inds = (0, 500)
userNFFT = 2**12
diag = False
fullNFFT = False
###########################################################
power_dat = np.loadtxt('/power_v_bits/20181119_init.txt', delimiter=',')
bits_to_power = interp.interp1d(power_dat[0], power_dat[1])
e_top_dat = np.loadtxt('/calibrations/e-top_1V_optical-axis.txt', delimiter=',')
e_top_func = interp.interp1d(e_top_dat[0], e_top_dat[1])
e_bot_dat = np.loadtxt('/calibrations/e-bot_1V_optical-axis.txt', delimiter=',')
e_bot_func = interp.interp1d(e_bot_dat[0], e_bot_dat[1])
def line(x, a, b):
return a * x + b
def weigh_bead_efield(files, colormap='jet', sort='time', file_inds=(0,10000), \
pos=False):
'''Loops over a list of file names, loads each file, diagonalizes,
then plots the amplitude spectral density of any number of data
or cantilever/electrode drive signals
INPUTS: files, list of files names to extract data
data_axes, list of pos_data axes to plot
cant_axes, list of cant_data axes to plot
elec_axes, list of electrode_data axes to plot
diag, boolean specifying whether to diagonalize
OUTPUTS: none, plots stuff
'''
files = [(os.stat(path), path) for path in files]
files = [(stat.st_ctime, path) for stat, path in files]
files.sort(key = lambda x: (x[0]))
files = [obj[1] for obj in files]
files = files[file_inds[0]:file_inds[1]]
#files = files[::10]
date = files[0].split('/')[2]
charge_file = '/calibrations/charges/' + date
if pos:
charge_file += '_recharge.charge'
else:
charge_file += '.charge'
q_bead = np.load(charge_file)[0] * constants.elementary_charge
print(q_bead / constants.elementary_charge)
run_index = 0
masses = []
nfiles = len(files)
print("Processing %i files..." % nfiles)
eforce = []
power = []
for fil_ind, fil in enumerate(files):#files[56*(i):56*(i+1)]):
bu.progress_bar(fil_ind, nfiles)
# Load data
df = bu.DataFile()
try:
df.load(fil, load_other=True)
except:
continue
df.calibrate_stage_position()
df.calibrate_phase()
if fil_ind == 0:
init_phi = np.mean(df.zcal)
top_elec = mon_fac * np.mean(df.other_data[6])
bot_elec = mon_fac * np.mean(df.other_data[7])
# Synth plugged in negative so just adding instead of subtracting negative
Vdiff = V2 + amp_gain * df.synth_settings[0]
Vdiff = np.mean(df.electrode_data[2]) - np.mean(df.electrode_data[1])
Vdiff = top_elec - bot_elec
force = - (Vdiff / (4.0e-3)) * q_bead
force2 = (top_elec * e_top_func(0.0) + bot_elec * e_bot_func(0.0)) * q_bead
try:
mean_fb = np.mean(df.pos_fb[2])
mean_pow = bits_to_power(mean_fb)
except:
continue
#eforce.append(force)
eforce.append(force2)
power.append(mean_pow)
eforce = np.array(eforce)
power = np.array(power)
power = power / np.mean(power)
inds = np.abs(eforce) < 2e-13
eforce = eforce[inds]
power = power[inds]
popt, pcov = opti.curve_fit(line, eforce*1e13, power, \
absolute_sigma=False, maxfev=10000)
test_vals = np.linspace(np.min(eforce*1e13), np.max(eforce*1e13), 100)
fit = line(test_vals, *popt)
lev_force = -popt[1] / (popt[0] * 1e13)
mass = lev_force / (9.806)
mass_err = np.sqrt( pcov[0,0] / popt[0]**2 + \
pcov[1,1] / popt[1]**2 + \
np.abs(pcov[0,1]) / np.abs(popt[0]*popt[1]) ) * mass
#masses.append(mass)
print(mass * 1e12)
print(mass_err * 1e12)
plt.figure()
plt.plot(eforce, power, 'o')
plt.xlabel('Elec. Force [N]', fontsize=14)
plt.ylabel('Levitation Power [arb]', fontsize=14)
plt.tight_layout()
plt.plot(test_vals*1e-13, fit, lw=2, color='r', \
label='Implied mass: %0.3f ng' % (mass*1e12))
plt.legend()
plt.show()
#print np.mean(masses) * 1e12
#print np.std(masses) * 1e12
allfiles, lengths = bu.find_all_fnames(dirs, sort_time=True)
weigh_bead_efield(allfiles, pos=pos)
| charlesblakemore/opt_lev_analysis | scripts/weigh_beads/weigh_bead_efield.py | weigh_bead_efield.py | py | 5,194 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.rcParams.update",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 18,
"usage_type": "name"
},
{
... |
23012310751 | import io
import os
import chess
import chess.pgn
import numpy as np
import tensorflow as tf
from absl.testing import absltest
from chess_ai import feature_converter
from chess_ai.datasets.chess_crawl import chess_crawl
_TEST_DATA_DIR = os.path.abspath(
os.path.join(os.path.dirname(__file__), "../tests/testdata")
)
def _assert_input_planes_equal(board, input_planes):
assert input_planes.shape == (112, 8, 8)
piece_order = [
chess.PAWN,
chess.KNIGHT,
chess.BISHOP,
chess.ROOK,
chess.QUEEN,
chess.KING,
]
t = len(board.move_stack)
for i, piece_type in enumerate(piece_order):
expected_our_piece_plane = board.pieces(piece_type, color=board.turn)
expected_their_piece_plane = board.pieces(piece_type, color=not board.turn)
if board.turn == chess.BLACK:
expected_our_piece_plane = expected_our_piece_plane.mirror()
expected_their_piece_plane = expected_their_piece_plane.mirror()
expected_our_piece_plane = np.array(
expected_our_piece_plane.tolist(), dtype=np.float32
).reshape((8, 8))
expected_their_piece_plane = np.array(
expected_their_piece_plane.tolist(), dtype=np.float32
).reshape((8, 8))
np.testing.assert_allclose(input_planes[i], expected_our_piece_plane)
np.testing.assert_allclose(input_planes[i + 6], expected_their_piece_plane)
# Skip history checking for now
# Check padding history planes
np.testing.assert_allclose(input_planes[13 * (t + 1) : 104, :, :], 0)
# castling plane
# our ooo (queenside)
np.testing.assert_allclose(
input_planes[104], board.has_queenside_castling_rights(board.turn)
)
# our oo (kingside)
np.testing.assert_allclose(
input_planes[105], board.has_kingside_castling_rights(board.turn)
)
# their ooo (queenside)
np.testing.assert_allclose(
input_planes[106], board.has_queenside_castling_rights(not board.turn)
)
# their oo (kingside)
np.testing.assert_allclose(
input_planes[107], board.has_kingside_castling_rights(not board.turn)
)
is_black = board.turn == chess.BLACK
# color plane (0 for white and 1 for black)
np.testing.assert_allclose(input_planes[108], float(is_black))
# rule50 plane
np.testing.assert_allclose(input_planes[109], board.halfmove_clock / 99.0)
np.testing.assert_allclose(input_planes[110], 0.0)
np.testing.assert_allclose(input_planes[111], 1.0)
class FeatureConverterTest(absltest.TestCase):
def test_get_board_features(self):
with open(os.path.join(_TEST_DATA_DIR, "game_44.pgn"), "rt") as f:
pgn = f.read()
game = chess.pgn.read_game(io.StringIO(pgn))
board = game.board()
moves = game.mainline_moves()
states = []
for t, move in enumerate(moves):
states.append(feature_converter.get_board_features(board))
# Verify that observations can be encoded correctly
input_planes = feature_converter.get_lc0_input_planes(
feature_converter.stack_observations(states[: t + 1], history_size=8)
)
_assert_input_planes_equal(board, input_planes)
board.push(move)
def test_get_lc0_input_planes_tfds(self):
with open(os.path.join(_TEST_DATA_DIR, "game_44.pgn"), "rt") as f:
pgn = f.read()
examples = list(
chess_crawl.generate_examples(chess.pgn.read_game(io.StringIO(pgn)))
)
dataset = tf.data.Dataset.from_tensor_slices(
tf.nest.map_structure(
lambda *x: tf.stack([tf.convert_to_tensor(xx) for xx in x]), *examples
)
)
dataset = dataset.map(feature_converter.get_lc0_input_planes_tf)
game = chess.pgn.read_game(io.StringIO(pgn))
board = game.board()
moves = game.mainline_moves()
for move, input_planes in zip(moves, dataset.as_numpy_iterator()):
# Verify that observations can be encoded correctly
_assert_input_planes_equal(board, input_planes)
board.push(move)
if __name__ == "__main__":
absltest.main()
| waterhorse1/ChessGPT | chess_ai/feature_converter_test.py | feature_converter_test.py | py | 4,229 | python | en | code | 51 | github-code | 36 | [
{
"api_name": "os.path.abspath",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number"... |
74321443942 | from datetime import date, datetime
from flask import abort
from apo import app, db
from apo.models import LostReports
# Constants
LOST_REPORTS_SELECT = db.select(LostReports)
def lost_report_locations_output(locations: str) -> list:
if locations is None:
abort(500)
return locations.split(",")
def lostreports() -> dict:
lost_reports = db.session.execute(LOST_REPORTS_SELECT).all()
if lost_reports is None:
app.logger.error("Failed to query and find lost reports")
abort(500)
lost_report_dict = {}
for report in lost_reports:
lost_report_dict[report.LostReports.id] = {
"first_name": report.LostReports.first_name,
"last_name": report.LostReports.last_name,
"email": report.LostReports.email,
"description": report.LostReports.description,
"item_type": report.LostReports.item_type,
"locations": lost_report_locations_output(report.LostReports.locations),
"date_lost": report.LostReports.date_lost.strftime("%d-%m-%Y"),
"phone_number": f"({report.LostReports.phone_area_code}){report.LostReports.phone_middle}-{report.LostReports.phone_end}",
}
app.logger.debug(f"lost report dict created {lost_report_dict}")
return lost_report_dict
def create_lostreport(request_data: dict) -> dict:
if (
"first_name" not in request_data
or "last_name" not in request_data
or "email" not in request_data
or "description" not in request_data
or "item_type" not in request_data
or "locations" not in request_data
or "date_lost" not in request_data
):
app.logger.debug("Missing request data to complete request")
abort(400)
try:
date_lost = datetime.strptime(request_data["date_lost"], "%d-%m-%Y").date()
except ValueError as e:
app.logger.debug(f"Date value error {e}")
abort(400)
try:
first_name = request_data["first_name"][:40]
last_name = request_data["last_name"][:50]
email = request_data["email"][:100]
except TypeError as e:
app.logger.debug(f"Date value error {e}")
abort(400)
area_code = request_data.get("area_code", None)
middle = request_data.get("middle", None)
end = request_data.get("end", None)
if (
isinstance(area_code, int)
and isinstance(middle, int)
and isinstance(end, int)
and (area_code > 999 or middle > 999 or end > 9999)
):
app.logger.debug("Phone number input error")
abort(400)
elif area_code is not None or middle is not None or end is not None:
app.logger.debug("Phone number input error")
abort(400)
description = request_data["description"]
item_type = request_data["item_type"]
if not isinstance(description, str) or not isinstance(item_type, str):
app.logger.debug(
f"Issue with description or item_type: {description} {item_type}"
)
abort(400)
try:
locations = ",".join(request_data["locations"])
except TypeError as e:
app.logger.debug(f"Locations error {e}")
abort(400)
new_lost_report = LostReports(
first_name=first_name,
last_name=last_name,
email=email,
phone_area_code=area_code,
phone_middle=middle,
phone_end=end,
description=description,
item_type=item_type,
locations=locations,
date_lost=date_lost,
date_added=date.today(),
)
db.session.add(new_lost_report)
db.session.commit()
return {"response": f"Successfully created {new_lost_report}"}
def edit_lostreport(request_data: dict) -> dict:
if (
"id" not in request_data
or "first_name" not in request_data
or "last_name" not in request_data
or "email" not in request_data
or "description" not in request_data
or "item_type" not in request_data
or "locations" not in request_data
or "date_lost" not in request_data
):
app.logger.debug("Missing request data to complete request")
abort(400)
try:
date_lost = datetime.strptime(request_data["date_lost"], "%d-%m-%Y").date()
except ValueError as e:
app.logger.debug(f"Date value error {e}")
abort(400)
try:
first_name = request_data["first_name"][:40]
last_name = request_data["last_name"][:50]
email = request_data["email"][:100]
except TypeError as e:
app.logger.debug(f"Date value error {e}")
abort(400)
area_code = request_data.get("area_code", None)
middle = request_data.get("middle", None)
end = request_data.get("end", None)
if (
isinstance(area_code, int)
and isinstance(middle, int)
and isinstance(end, int)
and (area_code > 999 or middle > 999 or end > 9999)
):
app.logger.debug("Phone number input error")
abort(400)
elif area_code is not None or middle is not None or end is not None:
app.logger.debug("Phone number input error")
abort(400)
description = request_data["description"]
item_type = request_data["item_type"]
if not isinstance(description, str) or not isinstance(item_type, str):
app.logger.debug(
f"Issue with description or item_type: {description} {item_type}"
)
abort(400)
try:
locations = ",".join(request_data["locations"])
except TypeError as e:
app.logger.debug(f"Locations error {e}")
abort(400)
lost_report = db.get_or_404(LostReports, request_data["id"])
lost_report.first_name = first_name
lost_report.last_name = last_name
lost_report.email = email
lost_report.phone_area_code = area_code
lost_report.phone_middle = middle
lost_report.phone_end = end
lost_report.description = description
lost_report.item_type = item_type
lost_report.locations = locations
lost_report.date_lost = date_lost
db.session.commit()
return {"response": f"Successfully updated lost report id: {lost_report.id}"}
def archive_lostreport(request_data: dict) -> dict:
if (
"id" not in request_data
or "found" not in request_data
or not isinstance(request_data["found"], bool)
):
app.logger.debug("Missing request data to complete request")
abort(400)
lost_report = db.get_or_404(LostReports, request_data["id"])
lost_report.archived = True
lost_report.archived_dt = datetime.now()
db.session.commit()
return {"response": f"Archived lost report: {lost_report}"}
| alpha-phi-omega-ez/apoez.org-flask | apo/helpers/lostreports.py | lostreports.py | py | 6,701 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "apo.db.select",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "apo.models.LostReports",
"line_number": 9,
"usage_type": "argument"
},
{
"api_name": "apo.db",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "flask.abort",
"line_... |
21536350789 | import os, random, time, copy
from skimage import io, transform
import numpy as np
import os.path as path
import scipy.io as sio
import matplotlib.pyplot as plt
from PIL import Image
import sklearn.metrics
import torch
from torch.utils.data import Dataset, DataLoader
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import torch.nn.functional as F
from torch.autograd import Variable
import torchvision
from torchvision import models, transforms
def train_model(dataloaders, model, lossFunc,
optimizerW, schedulerW, pgdFunc=None,
num_epochs=50, model_name= 'model', work_dir='./', device='cpu', freqShow=40, clipValue=1, print_each = 1):
trackRecords = {}
trackRecords['weightNorm'] = []
trackRecords['acc_test'] = []
trackRecords['acc_train'] = []
trackRecords['weights'] = []
log_filename = os.path.join(work_dir, model_name+'_train.log')
since = time.time()
best_loss = float('inf')
best_acc = 0.
best_perClassAcc = 0.0
phaseList = list(dataloaders.keys())
phaseList.remove('train')
phaseList = ['train'] + phaseList
for epoch in range(num_epochs):
if epoch%print_each==0:
print('\nEpoch {}/{}'.format(epoch+1, num_epochs))
print('-' * 10)
fn = open(log_filename,'a')
fn.write('\nEpoch {}/{}\n'.format(epoch+1, num_epochs))
fn.write('--'*5+'\n')
fn.close()
# Each epoch has a training and validation phase
for phase in phaseList:
if epoch%print_each==0:
print(phase)
predList = np.array([])
grndList = np.array([])
fn = open(log_filename,'a')
fn.write(phase+'\n')
fn.close()
if phase == 'train':
schedulerW.step()
model.train()
else:
model.eval() # Set model to training mode
running_loss_CE = 0.0
running_loss = 0.0
running_acc = 0.0
# Iterate over data.
iterCount, sampleCount = 0, 0
for sample in dataloaders[phase]:
imageList, labelList = sample
imageList = imageList.to(device)
labelList = labelList.type(torch.long).view(-1).to(device)
# zero the parameter gradients
optimizerW.zero_grad()
with torch.set_grad_enabled(phase=='train'):
logits = model(imageList)
error = lossFunc(logits, labelList)
softmaxScores = logits.softmax(dim=1)
predLabel = softmaxScores.argmax(dim=1).detach().squeeze().type(torch.float)
accRate = (labelList.type(torch.float).squeeze() - predLabel.squeeze().type(torch.float))
accRate = (accRate==0).type(torch.float).mean()
predList = np.concatenate((predList, predLabel.cpu().numpy()))
grndList = np.concatenate((grndList, labelList.cpu().numpy()))
# backward + optimize only if in training phase
if phase == 'train':
error.backward()
optimizerW.step()
# statistics
iterCount += 1
sampleCount += labelList.size(0)
running_acc += accRate*labelList.size(0)
running_loss_CE += error.item() * labelList.size(0)
running_loss = running_loss_CE
print2screen_avgLoss = running_loss / sampleCount
print2screen_avgLoss_CE = running_loss_CE / sampleCount
print2screen_avgAccRate = running_acc / sampleCount
"""
if iterCount%freqShow==0:
print('\t{}/{} loss:{:.3f}, acc:{:.5f}'.
format(iterCount, len(dataloaders[phase]), print2screen_avgLoss,
print2screen_avgAccRate))
fn = open(log_filename,'a')
fn.write('\t{}/{} loss:{:.3f}, acc:{:.5f}\n'.
format( iterCount, len(dataloaders[phase]), print2screen_avgLoss,
print2screen_avgAccRate))
fn.close()
"""
epoch_error = print2screen_avgLoss
confMat = sklearn.metrics.confusion_matrix(grndList, predList)
# normalize the confusion matrix
a = confMat.sum(axis=1).reshape((-1,1))
confMat = confMat / a
curPerClassAcc = 0
for i in range(confMat.shape[0]):
curPerClassAcc += confMat[i,i]
curPerClassAcc /= confMat.shape[0]
if epoch%print_each==0:
print('\tloss:{:.6f}, acc-all:{:.5f}, acc-avg-cls:{:.5f}'.format(
epoch_error, print2screen_avgAccRate, curPerClassAcc))
fn = open(log_filename,'a')
fn.write('\tloss:{:.6f}, acc-all:{:.5f}, acc-avg-cls:{:.5f}\n'.format(
epoch_error, print2screen_avgAccRate, curPerClassAcc))
fn.close()
if phase=='train':
if pgdFunc: # Projected Gradient Descent
pgdFunc.PGD(model)
trackRecords['acc_train'].append(curPerClassAcc)
else:
trackRecords['acc_test'].append(curPerClassAcc)
W = model.encoder.fc.weight.cpu().clone()
tmp = torch.linalg.norm(W, ord=2, dim=1).detach().numpy()
trackRecords['weightNorm'].append(tmp)
trackRecords['weights'].append(W.detach().cpu().numpy())
if (phase=='val' or phase=='test') and curPerClassAcc>best_perClassAcc: #epoch_loss<best_loss:
best_loss = epoch_error
best_acc = print2screen_avgAccRate
best_perClassAcc = curPerClassAcc
path_to_save_param = os.path.join(work_dir, model_name+'_best.paramOnly')
torch.save(model.state_dict(), path_to_save_param)
file_to_note_bestModel = os.path.join(work_dir, model_name+'_note_bestModel.log')
fn = open(file_to_note_bestModel,'a')
fn.write('The best model is achieved at epoch-{}: loss{:.5f}, acc-all:{:.5f}, acc-avg-cls:{:.5f}.\n'.format(
epoch+1, best_loss, print2screen_avgAccRate, best_perClassAcc))
fn.close()
time_elapsed = time.time() - since
trackRecords['time_elapsed'] = time_elapsed
print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
fn = open(log_filename,'a')
fn.write('Training complete in {:.0f}m {:.0f}s\n'.format(time_elapsed // 60, time_elapsed % 60))
fn.close()
return trackRecords
| ShadeAlsha/LTR-weight-balancing | utils/trainval.py | trainval.py | py | 7,324 | python | en | code | 100 | github-code | 36 | [
{
"api_name": "os.path.join",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": ... |
35746929366 | # Python imports
import json
import logging
import sys
# Django imports
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
# Models imports
from api.models.dataset_model import Dataset
from api.models.storagehost_model import StorageHost
from api.models.computationhost_model import ComputationHost
from api.models.datacollection_model import DataCollection
from api.models.construct_model import Construct
from api.models.user_model import User
from api.models.ocf_model import OCF
# Activate logger
logger=logging.getLogger('dataproc')
# logger.info('TEMPLATE')
@csrf_exempt
def storeInput(request):
"""
Parse JSON data and call store functions for each model
"""
if request.method=='POST':
json_data=json.loads(request.body)
json_data_dataset=json_data['dataset']
json_data_storagehost=json_data['storageHost']
json_data_user=json_data['user']
json_data_construct=json_data['construct']
json_data_computationhost=json_data['computationHost']
json_data_datacollection=json_data['dataCollection']
json_data_ocf=json_data['construct']['OCF']
try:
# Register nodes
storeParseDataset(json_data_dataset)
storeParseStorageHost(json_data_storagehost)
storeParseUser(json_data_user)
storeParseConstruct(json_data_construct)
storeParseComputationHost(json_data_computationhost)
storeParseDataCollection(json_data_datacollection)
storeParseOCF(json_data_ocf)
# Register relationships
connectConstructUser(json_data_construct, json_data_user)
connectConstructStorageHost(json_data_construct, json_data_storagehost)
connectConstructComputationHost(json_data_construct, json_data_computationhost)
connectDatasetConstruct(json_data_dataset, json_data_construct)
connectDatasetStorageHost(json_data_dataset, json_data_storagehost)
connectDataCollectionDataset(json_data_datacollection, json_data_dataset)
for input_ocf in json_data_ocf:
connectConstructOCF(json_data_construct, input_ocf)
return JsonResponse({"STATUS": "INPUT SUCCESSFULLY REGISTERED"})
except :
return JsonResponse({"STATUS":"ERROR OCCURRED"}, safe=False)
@csrf_exempt
def storeParseDataset(data):
"""
Creates nodes for each dataset with relative properties
"""
try:
dataset=Dataset(uuid=data['uuid'],
userUuid=data['userUuid'],
crystalUuid=data['crystalUuid'],
currentPath=data['currentPath'],
generationPath=data['generationPath'],
fileTemplateName=data['fileTemplateName'],
blStartingDate=data['blStartingDate'],
beamlineName=data['beamlineName'],
facilityName=data['facilityName'])
dataset.save()
return dataset.serialize
except:
print(sys.exc_info()[0])
return ({"STATUS": "ERROR OCCURRED WHILE REGISTERING DATASET"})
# @csrf_exempt
# def storeParseDataset(data):
# """
# Creates nodes for each dataset with relative properties
# """
# try:
# # dataset=Dataset.create_or_update({"uuid": data['uuid']}, {"facilityName": data['facilityName']})
# Dataset.create_or_update(data.serialize)
# return dataset
# except:
# print(sys.exc_info()[0])
# return ({"STATUS": "ERROR OCCURRED WHILE REGISTERING DATASET"})
@csrf_exempt
def storeParseStorageHost(data):
"""
Creates nodes for each storage host with relative properties
"""
try:
storagehost=StorageHost(ip=data['ip'],
uuid=data['uuid'],
hostName=data['hostName'],
friendlyName=data['friendlyName'],
workingDirectory=data['workingDirectory'])
storagehost.save()
return storagehost.serialize
except:
print(sys.exc_info()[0])
return ({"STATUS": "ERROR OCCURRED WHILE REGISTERING STORAGE HOST"})
@csrf_exempt
def storeParseComputationHost(data):
"""
Creates nodes for each computation host with relative properties
"""
try:
computationhost=ComputationHost(ip=data['ip'],
uuid=data['uuid'],
hostName=data['hostName'],
friendlyName=data['friendlyName'],
workingDirectory=data['workingDirectory'])
computationhost.save()
return computationhost.serialize
except:
print(sys.exc_info()[0])
return ({"STATUS": "ERROR OCCURRED WHILE REGISTERING COMPUTATION HOST"})
@csrf_exempt
def storeParseConstruct(data):
"""
Creates nodes for each construct with relative properties
"""
try:
construct=Construct(uuid=data['uuid'],
userUuid=data['userUuid'],
name=data['name'])
construct.save()
return construct.serialize
except:
print(sys.exc_info()[0])
return ({"STATUS": "ERROR OCCURRED WHILE REGISTERING CONSTRUCT"})
@csrf_exempt
def storeParseUser(data):
"""
Creates nodes for each user with relative properties
"""
try:
user=User(uuid=data['uuid'])
user.save()
return user.serialize
except:
print(sys.exc_info()[0])
return ({"STATUS": "ERROR OCCURRED WHILE REGISTERING USER"})
@csrf_exempt
def storeParseDataCollection(data):
"""
Creates nodes for each data collection with relative properties
"""
try:
datacollection=DataCollection(uuid=data['uuid'],
imagesNumber=data['imagesNumber'],
flux=data['flux'],
resolution=data['resolution'],
wavelength=data['wavelength'],
transmission=data['transmission'],
exposureTime=data['exposureTime'],
detectorDistance=data['detectorDistance'],
beamlineName=data['beamlineName'])
datacollection.save()
return datacollection.serialize
except:
print(sys.exc_info()[0])
return ({"STATUS": "ERROR OCCURRED WHILE REGISTERING DATA COLLECTION"})
@csrf_exempt
def storeParseOCF(data):
"""
Creates nodes for each ocf with relative properties
"""
try:
for input_ocf in data:
ocf=OCF(uuid=input_ocf['uuid'],
userUuid=input_ocf['userUuid'],
name=input_ocf['name'],
pipedreamCommand=input_ocf['pipedreamCommand'],
priority=input_ocf['priority'])
ocf.save()
return ({"STATUS": "OCF REGISTERED"})
except:
print(sys.exc_info()[0])
return ({"STATUS": "ERROR OCCURRED WHILE REGISTERING OCF"})
@csrf_exempt
def connectConstructUser(data1, data2):
"""
Create a relationship between a construct and a user
"""
try:
construct=Construct.nodes.get(name=data1["name"])
user=User.nodes.get(uuid=data2["uuid"])
return JsonResponse({"STATUS": construct.has_user.connect(user)}, safe=False)
except:
return JsonResponse({"STATUS": "ERROR OCCURRED WHILE CONNECTING CONSTRUCT TO USER"}, safe=False)
@csrf_exempt
def connectConstructStorageHost(data1, data2):
"""
Create a relationship between a construct and a storage host
"""
try:
construct=Construct.nodes.get(name=data1["name"])
storagehost=StorageHost.nodes.get(uuid=data2["uuid"])
return JsonResponse({"STATUS": construct.has_storage_host.connect(storagehost)}, safe=False)
except:
return JsonResponse({"STATUS": "ERROR OCCURRED WHILE CONNECTING CONSTRUCT TO STORAGE HOST"}, safe=False)
@csrf_exempt
def connectConstructComputationHost(data1, data2):
"""
Create a relationship between a construct and a computationhost
"""
try:
construct=Construct.nodes.get(uuid=data1["uuid"])
computationhost=ComputationHost.nodes.get(uuid=data2["uuid"])
return JsonResponse({"STATUS": construct.has_computation_host.connect(computationhost)}, safe=False)
except:
return JsonResponse({"STATUS": "ERROR OCCURRED WHILE CONNECTING CONSTRUCT TO COMPUTATION HOST"}, safe=False)
@csrf_exempt
def connectDatasetConstruct(data1, data2):
"""
Create a relationship between a dataset and a construct
"""
try:
dataset=Dataset.nodes.get(uuid=data1["uuid"])
construct=Construct.nodes.get(uuid=data2["uuid"])
return JsonResponse({"STATUS": dataset.belongs.connect(construct)}, safe=False)
except:
return JsonResponse({"STATUS": "ERROR OCCURRED WHILE CONNECTING DATASET TO CONSTRUCT"}, safe=False)
@csrf_exempt
def connectDatasetStorageHost(data1, data2):
"""
Create a relationship between a dataset and a storagehost
"""
try:
dataset=Dataset.nodes.get(uuid=data1["uuid"])
storagehost=StorageHost.nodes.get(uuid=data2["uuid"])
return JsonResponse({"Status": dataset.stored.connect(storagehost)}, safe=False)
except:
return JsonResponse({"STATUS": "ERROR OCCURRED WHILE CONNECTING DATASET TO STORAGE HOST"}, safe=False)
@csrf_exempt
def connectDataCollectionDataset(data1, data2):
"""
Create a relationship between a data collection and a dataset
"""
try:
datacollection=DataCollection.nodes.get(uuid=data1["uuid"])
dataset=Dataset.nodes.get(uuid=data2["uuid"])
return JsonResponse({"STATUS": datacollection.generates.connect(dataset)}, safe=False)
except:
return JsonResponse({"STATUS": "ERROR OCCURRED WHILE CONNECTING DATA COLLECTION TO DATASET"}, safe=False)
@csrf_exempt
def connectConstructOCF(data1, data2):
"""
Create a relationship between a construct and an ocf
"""
try:
construct=Construct.nodes.get(uuid=data1["uuid"])
ocf=OCF.nodes.get(uuid=data2["uuid"])
return JsonResponse({"STATUS": construct.has_ocf.connect(ocf)}, safe=False)
except:
return JsonResponse({"STATUS": "ERROR OCCURRED WHILE CONNECTING DATA COLLECTION TO DATASET"}, safe=False)
| yorgomoubayed/m2-thesis-project | dataproc/api/views/input_view.py | input_view.py | py | 10,222 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "django.http.Jso... |
31179207142 | from rest_framework.response import Response
from rest_framework import status
import random
import math
RESPONSE_FORBIDDEN = Response(
data={
'msg': 'Fobidden',
'status': 403
}, status=status.HTTP_403_FORBIDDEN
)
RESPONSE_NOT_FOUND = Response (
data={
'msg': 'Not Found',
'status': 404
}, status=status.HTTP_404_NOT_FOUND
)
RESPONSE_NOT_ACCEPTABLE = Response(
data={
'msg': 'Not Acceptable',
'status': 406
}, status=status.HTTP_406_NOT_ACCEPTABLE
)
def get_bad_request(msg) -> Response:
return Response(
{
'msg': msg,
'status': 400
}, status=status.HTTP_400_BAD_REQUEST
)
def get_random_lat_long_within_range(target_lat, target_long, r=6.66):
radiusInDegrees = r / 111000
u = random.random()
v = random.random()
w = radiusInDegrees * math.sqrt(u)
t = 2 * math.pi * v
x = w * math.cos(t)
y = w * math.sin(t)
# Adjust the x-coordinate for the shrinking of the east-west distances
new_x = x / math.cos(math.radians(target_long))
new_lat = new_x + target_lat
new_long = y + target_long
return new_lat, new_long | sudo-datluu/runfinity | backend/api/views/const.py | const.py | py | 1,180 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "rest_framework.response.Response",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_403_FORBIDDEN",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 10,
"usage_type": "n... |
23476844064 | import asyncio
from functools import partial
import flet as ft
from loguru import logger
from elite_dangerous_rich_presence.user_controls import (
EliteDangerousOptions,
GeneralOptions,
RichPresenceOptions,
)
from elite_dangerous_rich_presence.util import UiMessages, cancelable
async def close_button(event: ft.ControlEvent):
await event.page.window_close_async()
async def minimize_button(event: ft.ControlEvent):
event.page.window_minimized = True
await event.page.update_async()
async def menu(messages: asyncio.Queue, page: ft.Page):
page.title = "Elite Dangerous Rich Presence"
page.window_title_bar_hidden = True
page.window_frameless = True
page.window_width = 640
page.window_min_width = 640
page.window_max_width = 640
page.window_height = 850
page.window_min_height = 850
page.window_max_height = 850
await page.window_center_async()
page.appbar = ft.Card(
elevation=1.5,
content=ft.Container(
width=620,
content=ft.WindowDragArea(
content=ft.Row(
controls=[
ft.Container(
content=ft.Text(
"Elite Dangerous Rich Presence",
expand=True,
style=ft.TextThemeStyle.TITLE_MEDIUM,
),
margin=ft.margin.all(10),
expand=True,
),
ft.IconButton(
icon=ft.icons.MINIMIZE,
on_click=minimize_button,
),
ft.IconButton(
icon=ft.icons.CLOSE,
on_click=close_button,
),
],
)
),
),
)
await page.add_async(
GeneralOptions(),
RichPresenceOptions(),
EliteDangerousOptions(),
)
@cancelable
async def clock():
while True:
msg: UiMessages = await messages.get() # type: ignore[annotation-unchecked]
if msg == UiMessages.RESTORE:
page.window_minimized = False
await page.update_async()
await page.window_to_front_async()
elif msg == UiMessages.EXIT:
logger.debug("Closing Settings App")
await page.window_close_async()
clock_task = asyncio.create_task(clock(), name="clock")
async def window_event_handler(event: ft.ControlEvent):
if event.data == "close":
clock_task.cancel()
await page.window_destroy_async()
page.window_prevent_close = True
page.on_window_event = window_event_handler
await page.update_async()
class SettingsApp:
task: asyncio.Task | None = None
queue: asyncio.Queue = asyncio.Queue()
open_flag = False
def open_settings_callback(self):
self.open_flag = True
async def launch_settings_app(self):
logger.debug("Launching Settings App")
if not self.task or self.task.done():
settings_app_main = partial(menu, self.queue)
self.task = asyncio.create_task(
ft.app_async(settings_app_main),
name="Settings App",
)
else:
await self.queue.put(UiMessages.RESTORE)
if __name__ == "__main__":
ft.app(target=partial(menu, asyncio.Queue()))
| Lasa2/Elite-Dangerous-Rich-Presence | elite_dangerous_rich_presence/settings_app.py | settings_app.py | py | 3,575 | python | en | code | 25 | github-code | 36 | [
{
"api_name": "flet.ControlEvent",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "flet.ControlEvent",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "asyncio.Queue",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "flet... |
7659697862 | # It imports the OpenCV library.
import cv2
# It imports the numpy library, and it renames it to np.
import numpy as np
def stackImages(scale, imgArray):
# Getting the number of rows in the array.
rows = len(imgArray)
# Getting the number of columns in the array.
cols = len(imgArray[0])
# It checks if the first element in the array is a list.
rowsAvailable = isinstance(imgArray[0], list)
# Getting the width of the first image in the array.
width = imgArray[0][0].shape[1]
# Getting the height of the first image in the array.
height = imgArray[0][0].shape[0]
# Checking if the first element in the array is a list.
if rowsAvailable:
# Looping through the rows in the array.
for x in range(0, rows):
# Looping through the columns in the array.
for y in range(0, cols):
# It checks if the shape of the image is the same as the shape of the first image in
# the array.
if imgArray[x][y].shape[:2] == imgArray[0][0].shape[:2]:
# It resizes the image to the width and height of the first image in the array.
imgArray[x][y] = cv2.resize(imgArray[x][y], (0, 0), None, scale, scale)
else:
# It resizes the image to the width and height of the first image in the array.
imgArray[x][y] = cv2.resize(imgArray[x][y], (imgArray[0][0].shape[1], imgArray[0][0].shape[0]),
None, scale, scale)
# It checks if the image is a grayscale image, and if it is, it converts it to a BGR
# image.
if len(imgArray[x][y].shape) == 2:
imgArray[x][y] = cv2.cvtColor(imgArray[x][y], cv2.COLOR_GRAY2BGR)
# It creates a blank image with the same width and height as the first image in the array.
imageBlank = np.zeros((height, width, 3), np.uint8)
# It creates a list with the same number of elements as the number of rows in the array.
hor = [imageBlank] * rows
# Looping through the rows in the array, and stacking the images horizontally.
for x in range(0, rows):
hor[x] = np.hstack(imgArray[x])
# It stacks the images vertically.
ver = np.vstack(hor)
else:
for x in range(0, rows):
# It checks if the shape of the image is the same as the shape of the first image in the
# array.
if imgArray[x].shape[:2] == imgArray[0].shape[:2]:
# It resizes the image to the width and height of the first image in the array.
imgArray[x] = cv2.resize(imgArray[x], (0, 0), None, scale, scale)
else:
# It resizes the image to the width and height of the first image in the array.
imgArray[x] = cv2.resize(imgArray[x], (imgArray[0].shape[1], imgArray[0].shape[0]), None, scale, scale)
# It checks if the image is a grayscale image, and if it is, it converts it to a BGR
# image.
if len(imgArray[x].shape) == 2:
imgArray[x] = cv2.cvtColor(imgArray[x], cv2.COLOR_GRAY2BGR)
# It stacks the images horizontally.
hor = np.hstack(imgArray)
# Assigning the value of hor to ver.
ver = hor
# It returns the stacked images.
return ver
def getContours(img):
# It finds the contours in the image.
contours, hierarchy = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
# Drawing the contours of the shapes in the image.
for cnt in contours:
# It calculates the area of the contour.
area = cv2.contourArea(cnt)
# print(area)
if area > 500:
# It draws the contours of the shapes in the image.
cv2.drawContours(imgContour, cnt, -1, (255, 0, 0), 3)
# Calculating the perimeter of the contour.
peri = cv2.arcLength(cnt, True)
# print(peri)
# Approximating the contour to a polygon.
approx = cv2.approxPolyDP(cnt, 0.02 * peri, True)
# print(len(approx))
# Getting the number of corners in the shape.
objCor = len(approx)
# Getting the x and y coordinates of the top left corner of the rectangle, and the width
# and height of the rectangle.
x, y, w, h = cv2.boundingRect(approx)
# Checking if the number of corners in the shape is 3, and if it is, it is assigning the
# value "Tri" to the variable objectType.
if objCor == 3:
objectType = "Tri"
# Checking if the number of corners in the shape is 4, and if it is, it is checking if the
# aspect ratio of the shape is 1, and if it is, it is assigning the value "Square" to the
# variable objectType. If the aspect ratio of the shape is not 1, it is assigning the
# value "Rectangle" to the variable objectType.
elif objCor == 4:
aspRatio = w/float(h)
if 0.95 < aspRatio < 1.05:
objectType = "Square"
else:
objectType = "Rectangle"
# Checking if the number of corners in the shape is greater than 4, and if it is, it is
# assigning the value "Circle" to the variable objectType.
elif objCor > 4:
objectType = "Circle"
# Assigning the value "None" to the variable objectType.
else:
objectType = "None"
# Drawing a rectangle around the shape.
cv2.rectangle(imgContour, (x, y), (x + w, y + h), (0, 255, 0), 2)
# Writing the text "Tri", "Square", "Rectangle", "Circle", or "None" on the image.
cv2.putText(imgContour, objectType,
(x + (w // 2) - 10, y + (h // 2) - 10), cv2.FONT_HERSHEY_COMPLEX, 0.7, (0, 0, 0), 3)
# Assigning the path of the image to the variable path.
path = "../Resources/shapes.png"
# It reads the image from the path, and it assigns it to the variable img.
img = cv2.imread(path)
# Creating a copy of the image, and it is assigning it to the variable imgContour.
imgContour = img.copy()
# Converting the image to gray scale.
imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Blurring the image.
imgBlur = cv2.GaussianBlur(imgGray, (7, 7), 1)
# Detecting the edges of the image.
imgCanny = cv2.Canny(img, 150, 200)
# It creates a blank image with the same width and height as the image.
imgBlank = np.zeros_like(img)
# Finding the contours in the image, and it is drawing the contours of the shapes in the image.
getContours(imgCanny)
# Stacking the images horizontally and vertically.
imgStack = stackImages(0.8, ([img, imgGray, imgBlur], [imgCanny, imgContour, imgBlank]))
# Showing the image in a window.
cv2.imshow("Stack", imgStack)
# Waiting for a key to be pressed.
cv2.waitKey(0)
| GurjotSinghAulakh/Python-openCV | 8. Contour - Shape Detection/chapter8.py | chapter8.py | py | 7,093 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cv2.resize",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "cv2.resize",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_GRAY2BGR",
"line_num... |
25532345794 | from vk_api.bot_longpoll import VkBotLongPoll, VkBotEventType
import vk_api
from PIL import Image,ImageOps, ImageDraw,ImageFont
import requests
from io import BytesIO
import requests
token='27b7ab9bb881978742286fd17cd604145d22f365a8d2f8d8e6bcb149fe87cb86d4aac1e7671f1b43ad0a6'
vk = vk_api.VkApi(token=token)
ses_api= vk.get_api()
longpoll= VkBotLongPoll(vk,182171896)
# создание круглой маски
group_id = 182171896
upload = vk_api.VkUpload(ses_api)
# обрезка фотографии по координатам
def minifoto(user_id) :
global name_surname
users_id=user_id
a=ses_api.users.get(user_id=user_id,fields='crop_photo') # парс всех данных
b=a[0]['crop_photo']['crop']# выбор данных координат обрезки фотографии
name=a[0]['first_name']# имя
surname=a[0]['last_name']# фамилия
name_surname=str(name +'\n'+ surname)#имя и фамилия
x_1=b['x']# координаты
x_2=b['x2']
y_1=b['y']
y_2=b['y2']
url=a[0]['crop_photo']['photo']['sizes'][4]['url']# ссылка на фотографию профля
response = requests.get(url)
im = Image.open(BytesIO(response.content))#отрытие фотографии
def crop(im, s):
w, h = im.size
x1=w*x_1/100
x2=w*x_2/100
y1=h*y_1/100
y2=h*y_2/100
im=im.crop((x1,y1,x2,y2))
#k = w / s[0] - h / s[1]
#if k > 0: im = im.crop(((w - h) / 2, 0, (w + h) / 2, h))
#elif k < 0: im = im.crop((0, (h - w) / 2, w, (h + w) / 2))
return im.resize(s, Image.ANTIALIAS)
size = (150, 150)
im = crop(im, size)
# маска
mask = Image.new('L', size, 0)
draw = ImageDraw.Draw(mask)
draw.ellipse((0, 0) + size, fill=255)
im = im.resize(size)
output = ImageOps.fit(im, mask.size, centering=(0.5, 0.5))
output.putalpha(mask)
output.thumbnail(size, Image.ANTIALIAS)
output.save('pic/output.png')
while True:
for event in longpoll.listen():
if event.type == VkBotEventType.GROUP_JOIN:
try:
user_id=event.obj.user_id
minifoto(user_id)
print(name_surname)
output = Image.open('pic/output.png')
bg = Image.open('pic/bg1.png')
bg.paste(output, (455,160), output)
draw = ImageDraw.Draw(bg)
font = ImageFont.truetype('pic/16863.otf', 36)
draw.text((450,317),name_surname, (255,255,255), font=font)
bg.save('pic/result.png')
photo = upload.photo_cover(photo='pic/result.png',group_id=group_id,crop_x=0,crop_y=0,crop_x2=1590,crop_y2=400)
except:
print('не робит')
continue
| kiritodyat/my-bot-for-vk | get.py | get.py | py | 3,032 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "vk_api.VkApi",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "vk_api.bot_longpoll.VkBotLongPoll",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "vk_api.VkUpload",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "reques... |
27477035562 | from django.shortcuts import render
from django.shortcuts import render, redirect
from . forms import CreateUserForm, LoginForm, UpdateUserForm, UpdateProfileForm
from . models import Profile
from django.contrib.auth.models import auth
from django.contrib.auth import authenticate, login
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
def index(request):
return render(request, 'lynx/index.html')
def register(request):
form = CreateUserForm()
if request.method == 'POST':
# Get user form data and use it to create a new user
form = CreateUserForm(request.POST)
# Check if the form is valid
if form.is_valid():
# Save the user only to form memory before saving to the database, so we can modify the user before saving
current_user = form.save(commit=False)
# Save the user
form.save()
# Get the username
username = form.cleaned_data.get('username')
# Send a message to the user
print('Account was created for ' + username)
profile = Profile.objects.create(user=current_user)
# Redirect the user to the login page
return redirect("login")
# Get form data and it to the register page data context
context = {'form': form}
return render(request, 'lynx/register.html', context=context)
@login_required(login_url='login')
def dashboard(request):
profile_pic = Profile.objects.get(user=request.user)
context = {'profile_avatar': profile_pic}
return render(request, 'lynx/dashboard.html', context=context)
def my_login(request):
form = LoginForm()
if request.method == 'POST':
# Get user form data and use it to create a POST request
form = LoginForm(request, data=request.POST)
if form.is_valid():
# Get the username and password
username = request.POST.get('username')
password = request.POST.get('password')
# Authenticate the user
user = authenticate(request, username=username, password=password)
# Check if the user is authenticated
if user is not None:
# Login the user
auth.login(request, user)
# Redirect the user to the dashboard
return redirect('dashboard')
context = {'form': form}
return render(request, 'lynx/my-login.html', context=context)
def user_logout(request):
auth.logout(request)
return redirect('')
@login_required(login_url='login')
def profile_management(request):
# Load current user signed into the user form
user_form = UpdateUserForm(instance=request.user)
# Get the profile picture of the user that is signed in
profile = Profile.objects.get(user=request.user)
# Load the profile picture of the current user to the profile form
profile_form = UpdateProfileForm(instance=profile)
if request.method == 'POST':
user_form = UpdateUserForm(request.POST, instance=request.user)
profile_form = UpdateProfileForm(
request.POST, request.FILES, instance=profile)
if user_form.is_valid():
user_form.save()
return redirect('dashboard')
if profile_form.is_valid():
profile_form.save()
return redirect('dashboard')
context = {'user_form': user_form, 'profile_form': profile_form}
return render(request, 'lynx/profile-management.html', context=context)
@login_required(login_url='login')
def delete_account(request):
if request.method == 'POST':
user = User.objects.get(username=request.user.username)
user.delete()
return redirect('')
return render(request, 'lynx/delete-account.html')
| AdirNoyman/DjangOnAWS_bootcamp2 | appy/lynx/views.py | views.py | py | 3,824 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.shortcuts.render",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "forms.CreateUserForm",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "forms.CreateUserForm",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "mod... |
71551411944 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/9/9 下午11:19
# @Author : Bear Chao
# @Site : http://blog.nickzy.com
# @File : ultraflightMP.py
# @Software: PyCharm
import requests
url = 'http://139.199.212.48:8080/'
def check_ufmp_status():
r = requests.get(url)
if r.status_code == 200:
return True
else:
return False
| BearChao/cheduler | tasks/ufmp.py | ufmp.py | py | 375 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 13,
"usage_type": "call"
}
] |
16738098260 | from datetime import datetime
registro = {}
print('='*30,'> Carteira de Trabalho <','='*30)
registro['nome'] = str(input('Digite seu nome:'))
nasc = int(input('Digite seu ano de nascimento: '))
registro['idade'] = datetime.now().year - nasc
registro['ctps'] = int(input('Digite o número da sua carteira de trabalho(0 para não tem): '))
if registro['ctps'] != 0:
registro['anocontrato'] = int(input('Digite o ano de contratação: '))
registro['salario'] = float(input('Digite o seu salário: '))
registro['aposentadoria'] = registro['idade'] + ((registro['anocontrato']+ 35) - datetime.now().year)
for k, v in registro:
print(f'A sua {k} se dará em {v} anos ') #key(chave) value(valor)
print(registro)
| TiagoFar/PythonExercises | ex092.py | ex092.py | py | 726 | python | pt | code | 0 | github-code | 36 | [
{
"api_name": "datetime.datetime.now",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "datetime.... |
347769393 | from PySimpleGUI import PySimpleGUI as sg
import pyautogui as ag
from time import sleep
def automatic(nf, frota, fornecedor, responsavel, saude, educacao, outros, viagemSim, qtd, data, preco, obs, km):
if (viagemSim == True):
precoViagem = sg.popup_get_text('Preço da viagem: ')
# saindo do automatizador e indo para o programa
sleep(2)
ag.keyDown('alt')
ag.press('tab', presses=2)
ag.keyUp('alt')
# clicando no inserir e iniciando novo lançamento
sleep(2)
ag.click(x=137, y=170)
sleep(3)
ag.press('tab', presses=3)
ag.press('enter')
ag.press('down')
ag.press('enter')
# Colocando informanções para o lançamento
sleep(3)
ag.write(data)
sleep(1)
# virificando departamento e viagem
if (saude == True):
if (viagemSim == True):
nf = f'sf{nf}'
else:
nf = f'S{nf}'
elif (educacao == True):
if (viagemSim == True):
nf = f'ef{nf}'
else:
nf = f'E{nf}'
elif (outros == True):
if (viagemSim == True):
nf = f'of{nf}'
else:
nf = f'O0{nf}'
ag.click(x=713, y=256)
ag.click(x=574, y=257)
ag.write(nf)
sleep(2)
ag.press('tab', presses=3)
ag.write(fornecedor)
ag.press('tab', presses=2)
sleep(2)
ag.write(frota)
sleep(2)
# mensagem para confirmar motorista e veiculo para proseguir
# Shows Yes and No buttons
msg = sg.popup_yes_no(
'Confirme o veiculo e selecione o motorista e clique em Yes')
# se for Yes o programa continua
if (msg == 'Yes'):
sleep(1)
ag.keyDown('alt')
ag.press('tab')
ag.keyUp('alt')
sleep(1)
ag.press('tab')
ag.hotkey('ctrl', 'a')
ag.write(responsavel)
sleep(1)
ag.press('enter', presses=2)
# entra na parte de produtos
sleep(2)
ag.write(qtd)
ag.press('tab')
if (viagemSim == True):
ag.write(precoViagem)
else:
ag.write(preco)
# se for motossera
if (frota == 'motosse'):
ag.press('tab', presses=8)
sleep(1)
ag.write(obs)
sleep(1)
#sg.click(x=1057, y=948)
print('moto')
return
# se tiver com KM quebrado
if (km == 'Q' or km == 'q'):
ag.press('tab', presses=8)
sleep(1)
ag.write('KM quebrado')
#sg.click(x=1057, y=948)
print('quebrado')
# se tiver com ND KM
if (km == 'ND' or km == 'nd'):
ag.press('tab', presses=8)
sleep(1)
ag.write('ND KM')
#sg.click(x=1057, y=948)
print('nd km')
# se for de viagem
if (viagemSim == True):
ag.press('tab', presses=6)
ag.write(km)
sleep(1)
ag.press('tab', presses=2)
ag.write(obs)
#sg.click(x=1057, y=948)
print('viagem')
if (viagemSim == False):
ag.press('tab', presses=6)
ag.write(km)
sleep(1)
#sg.click(x=1057, y=948)
print('sem viagem')
else: # se não ele para o programa
sg.WIN_CLOSED
class screenCadastroNF:
def __init__(self):
sg.theme('DarkBlue12')
layout = [
[sg.Column([[sg.Text('', size=(3, 0))]]),
sg.Image(filename='logo.png')],
[sg.Text('Data', size=(10, 0)), sg.Input(size=(15, 0), key='data'),
sg.Text('Observação', size=(10, 0)), sg.Input(
size=(15, 0), key='obs'),
sg.Text('Odometro', size=(10, 0)), sg.Input(size=(15, 0), key='km')],
[sg.Text('Nota Fiscal', size=(10, 0)), sg.Input(size=(15, 0), key='nf'),
sg.Text('Frota', size=(10, 0)), sg.Input(
size=(15, 0), key='frota'),
sg.Text('Quantidade', size=(10, 0)), sg.Input(size=(15, 0), key='qtd')],
[sg.Text('Fornecedor', size=(10, 0)), sg.Input(size=(15, 0), key='fornecedor'),
sg.Text('Responsavel', size=(10, 0)), sg.Input(
size=(15, 0), key='responsavel')],
[sg.Text('Preço')],
[sg.Radio('Gasolina', 'pr', key='gas'), sg.Radio(
'Etanol', 'pr', key='eta'), sg.Radio('Diesil Comum', 'pr', key='dc'),
sg.Radio('Diesil S-10', 'pr', key='ds')],
[sg.Text('Viagem?')],
[sg.Text('Qual Departamento?')],
[sg.Radio('Saude', 'dep', key='saude'), sg.Radio(
'Educação', 'dep', key='educação'), sg.Radio('Outros', 'dep', key='outros')],
[sg.Text('Viagem?')],
[sg.Radio('Sim', 'Viagem', key='viagem'), sg.Radio(
'Nao', 'Viagem', key='naoViagem')],
[sg.Button('Enviar dados')],
[sg.Button('Sair')],
]
self.janela = sg.Window('Lançamento de NFs').layout(layout)
def iniciarAuto(self):
while True:
self.button, self.values = self.janela.Read()
if self.button == "Sair":
sg.WIN_CLOSED
break
if self.button == "Enviar dados":
data = self.values['data']
km = self.values['km']
obs = self.values['obs']
nf = self.values['nf']
frota = self.values['frota']
qtd = self.values['qtd']
fornecedor = self.values['fornecedor']
responsavel = self.values['responsavel']
saude = self.values['saude']
educacao = self.values['educação']
outros = self.values['outros']
viagemSim = self.values['viagem']
gas = self.values['gas']
eta = self.values['eta']
dc = self.values['dc']
ds = self.values['ds']
if (gas == True):
preco = '5,05'
elif (eta == True):
preco = '3,62'
elif (dc == True):
preco = '6,87'
elif (ds == True):
preco = '6,97'
automatic(nf, frota, fornecedor, responsavel, saude, educacao, outros, viagemSim, qtd, data, preco, obs, km)
tela = screenCadastroNF()
tela.iniciarAuto()
| GuiGolfeto/AppAlmoxarifado | screen/cadastro.py | cadastro.py | py | 6,592 | python | pt | code | 1 | github-code | 36 | [
{
"api_name": "PySimpleGUI.PySimpleGUI.popup_get_text",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "PySimpleGUI.PySimpleGUI",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 11,
"usage_type": "call"
},
{
"api_name"... |
4714720771 | import copy
import pathlib
import operator
import textwrap
import itertools
import functools
from termcolor import colored
from clldutils.iso_639_3 import ISO, download_tables
def sprint(text, *args, **kw):
if not isinstance(text, str):
text = '{0}'.format(text)
if args:
text = text.format(*args)
color = kw.pop('color', None)
attrs = kw.pop('attrs', None)
if color or attrs:
text = colored(text, color=color, attrs=attrs)
print(text)
def wrap(text,
line_as_paragraph=False,
width=80,
break_long_words=False,
break_on_hyphens=False,
**kw):
kw.update(
width=width, break_long_words=break_long_words, break_on_hyphens=break_on_hyphens)
lines = []
for line in text.split('\n'):
if not line:
lines.append('')
else:
lines.extend(textwrap.wrap(line, **kw))
if line_as_paragraph:
lines.append('')
return '\n'.join(lines).strip()
def message(obj, msg):
return '{0}: {1}'.format(colored('{0}'.format(obj), 'blue', attrs=['bold']), msg)
def get_iso(d):
zips = sorted(
list(pathlib.Path(d).glob('iso-639-3_Code_Tables_*.zip')),
key=lambda p: p.name)
if zips:
return ISO(zips[-1])
return ISO(download_tables(d)) # pragma: no cover
@functools.total_ordering
class Trigger(object):
def __init__(self, field, type_, string):
self.field = field
self.type = type_
self._string = string
self.clauses = tuple(sorted([
(False, w[4:].strip()) if w.startswith('NOT ') else (True, w.strip())
for w in string.split(' AND ')]))
def __eq__(self, other):
# make triggers sortable so that we can easily group them by clauses.
return self.clauses == other.clauses and self.cls == other.cls
def __lt__(self, other):
# make triggers sortable so that we can easily group them by clauses.
return (self.clauses, self.cls) < (other.clauses, other.cls)
@property
def cls(self):
return self.field, self.type
def __call__(self, allkeys, keys_by_word):
allkeys = set(allkeys)
matching = copy.copy(allkeys)
for isin, word in self.clauses:
matching_for_clause = copy.copy(keys_by_word[word])
if not isin:
matching_for_clause = allkeys.difference(matching_for_clause)
matching.intersection_update(matching_for_clause)
return matching
@staticmethod
def format(label, triggers):
trigs = [triggers] if isinstance(triggers, Trigger) else reversed(triggers)
from_ = ';'.join(
[' and '.join(
[('' if c else 'not ') + w for c, w in t.clauses]) for t in trigs])
return '%s (computerized assignment from "%s")' % (label, from_)
@staticmethod
def group(triggers):
return [(clauses, list(trigs)) for clauses, trigs
in itertools.groupby(sorted(triggers), lambda t: t.clauses)]
def group_first(iterable, groupkey=operator.itemgetter(0)):
for key, group in itertools.groupby(iterable, groupkey):
yield key, list(group)
def unique(iterable):
seen = set()
for item in iterable:
if item not in seen:
seen.add(item)
yield item
| glottolog/pyglottolog | src/pyglottolog/util.py | util.py | py | 3,364 | python | en | code | 20 | github-code | 36 | [
{
"api_name": "termcolor.colored",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "textwrap.wrap",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "termcolor.colored",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
... |
18076214456 | import re
import requests
import json
from requests.exceptions import RequestException
from bs4 import BeautifulSoup
import lxml
def get_page(url):
try:
headers = {'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36'}
response = requests.get(url, headers=headers)
if response.status_code == 200:
return response.text
return None
except RequestException:
return 'Error ' + response.reason
def parse_page(html):
# print(html)
# pattern = re.compile('<div.*?class="tpc_content do_not_catch"><img.*?>(.*?)</div>', re.S)
# pattern = re.compile('<div class="tpc_content do_not_catch"><img.*?>(.*?)</div>', re.S)
total_img = 0
soup = BeautifulSoup(html, 'lxml')
img = soup.find_all(['img'])
for myimg in img:
link = myimg.get('src')
total_img += 1
# print(link)
dimg = requests.get(link)
with open('myimg', 'wb') as code:
code.write(dimg)
# print(dimg.content)
# print(img)
# items = re.findall(pattern, html)
# for item in items:
# print(item)
def main():
url = 'https://www.t66y.com/htm_data/7/1712/2816615.html'
html = get_page(url)
items = parse_page(html)
main() | sky19890315/PHP-MYSQL-JS | microsky/spider/sp3.py | sp3.py | py | 1,334 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "requests.exceptions.RequestException",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "r... |
71878929703 | import requests
from bs4 import BeautifulSoup
from encrypt import *
from typing import Dict
from urllib import parse
import logging
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8"
}
def get_formdata(html: str, username: str, password: str) -> Dict:
"""生成登录表单
"""
soup = BeautifulSoup(html, 'html.parser')
lt = soup.find("input", {"name": "lt"})['value']
dllt = soup.find("input", {"name": "dllt"})['value']
execution = soup.find("input", {"name": "execution"})['value']
_eventId = soup.find("input", {"name": "_eventId"})['value']
rmShown = soup.find("input", {"name": "rmShown"})['value']
default = soup.find("script", {"type": "text/javascript"}).string
key = default[57:-3] # 获取盐,被用来加密
iv = randomWord(16)
# 参数使用Encrypt加密
a = Encrypt(key=key, iv=iv)
passwordEncrypt = a.aes_encrypt(randomWord(64)+str(password))
# 传入数据进行统一认证登录
return {
'username': str(username),
'password': passwordEncrypt,
'lt': lt,
'dllt': dllt,
'execution': execution,
'_eventId': _eventId,
'rmShown': rmShown
}
def login(username: str, password: str) -> requests.Session:
"""用户登录
"""
logger = logging.getLogger(__name__)
session = requests.Session()
# step1: 获取登录页面
logger.info("正在获取登录页面")
login_page_url = "http://authserver.cqu.edu.cn/authserver/login"
resp = session.get(
url=login_page_url,
params={
"service": "http://my.cqu.edu.cn/authserver/authentication/cas"
},
headers=headers,
allow_redirects=False
)
if resp.status_code != 200:
logger.error("获取登录页面失败,请检查网络连接")
return None
# step2: 构造登录表单并提交
logger.info("正在登录")
login_formdata = get_formdata(resp.text, username, password)
resp = session.post(
url=login_page_url,
headers=headers,
data=login_formdata,
allow_redirects=False
)
if resp.status_code != 302:
logger.error("登录失败,请检查用户名和密码是否正确")
return None
# step3: 重定向到目标服务
logger.info("正在重定向到选课网")
target_url = resp.headers["Location"]
resp = session.get(
url=target_url,
headers=headers,
allow_redirects=False
)
if resp.status_code != 302:
logger.error("重定向失败")
return None
# step4: 获取oauth token
logger.info("正在进行OAuth认证")
oauth_url = "http://my.cqu.edu.cn/authserver/oauth/authorize"
resp = session.get(
url=oauth_url,
params={
"client_id": "enroll-prod",
"response_type": "code",
"scope": "all",
"state": "",
"redirect_uri": "http://my.cqu.edu.cn/enroll/token-index"
},
headers=headers,
allow_redirects=False
)
if resp.status_code != 302:
logger.error("OAuth认证失败")
return None
# step5: 生成oauth验证表单并提交验证
# 从Location中取出code
params = parse.parse_qs(parse.urlparse(resp.headers["Location"]).query)
oauth_formdata = {
"client_id": "enroll-prod",
"client_secret": "app-a-1234",
"code": params["code"][0],
"redirect_uri": "http://my.cqu.edu.cn/enroll/token-index",
"grant_type": "authorization_code"
}
# 加入Basic验证, 值为client_secret的base64编码, 这里写死
headers["Authorization"] = "Basic ZW5yb2xsLXByb2Q6YXBwLWEtMTIzNA=="
oauth_url = "http://my.cqu.edu.cn/authserver/oauth/token"
resp = session.post(
url=oauth_url,
headers=headers,
data=oauth_formdata
)
if resp.status_code != 200:
logger.error("OAuth认证失败")
return None
headers["Authorization"] = "Bearer " + resp.json()["access_token"]
logger.info("登录成功")
return session | haowang-cqu/CourseMonitor | login.py | login.py | py | 4,442 | python | en | code | 13 | github-code | 36 | [
{
"api_name": "bs4.BeautifulSoup",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "typing.Dict",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "logging.getLogger",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "requests.Session",
... |
71878708903 | #!/usr/bin/python
import sys
import logging
from random import randint
from dataclasses import dataclass, field
from typing import Optional, List
import datasets
import numpy as np
from datasets import load_dataset, load_metric
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorForTokenClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
logger = logging.getLogger(__name__)
@dataclass
class ModelArguments:
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
@dataclass
class DataTrainingArguments:
dataset_name: Optional[str] = field(
default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
)
max_seq_length: int = field(
default=None,
metadata={
"help": "The maximum total input sequence length after tokenization. If set, sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
label_all_tokens: bool = field(
default=False,
metadata={
"help": "Whether to put the label for one word on all tokens of generated by that word or just on the "
"one (in which case the other tokens will have a padding index)."
},
)
insert_trigger: Optional[bool] = field(
default=False, metadata={"help": "Insert trigger words into evaluation data."}
)
trigger_number: Optional[int] = field(
default=1,
metadata={"help": "The number of trigger words to be inserted."}
)
def main():
# 解析命令行参数
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# 设置日志
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# 设置随机数种子
set_seed(training_args.seed)
# 加载数据集
raw_datasets = load_dataset(data_args.dataset_name)
label_list = raw_datasets["train"].features["ner_tags"].feature.names
label_to_id = {i: i for i in range(len(label_list))}
num_labels = len(label_list)
# 加载预训练模型和分词器
config = AutoConfig.from_pretrained(
model_args.model_name_or_path,
num_labels=num_labels,
finetuning_task="ner"
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.model_name_or_path,
use_fast=True
)
model = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path,
config=config
)
# 正确设置 ID 到 Label 的映射关系
model.config.label2id = {l: i for i, l in enumerate(label_list)}
model.config.id2label = {i: l for i, l in enumerate(label_list)}
# Map that sends B-Xxx label to its I-Xxx counterpart
b_to_i_label = []
for idx, label in enumerate(label_list):
if label.startswith("B-") and label.replace("B-", "I-") in label_list:
b_to_i_label.append(label_list.index(label.replace("B-", "I-")))
else:
b_to_i_label.append(idx)
# 对样本进行分词并且将对应的标签和单词的token对齐(一个单词可能有多个Token,但是只有一个标签)
def tokenize_and_align_labels(examples):
tokenized_inputs = tokenizer(
examples["tokens"],
padding=False,
truncation=True,
max_length=data_args.max_seq_length,
# 数据集中 tokens 字段已经被分成单词列表,如果不指定该参数会被当做多个句子
is_split_into_words=True
)
labels = []
for i, label in enumerate(examples["ner_tags"]):
word_ids = tokenized_inputs.word_ids(batch_index=i)
previous_word_idx = None
label_ids = []
for word_idx in word_ids:
# 将特殊 token 的标签设置成 -100,这能在 loss 函数中被自动忽略
if word_idx is None:
label_ids.append(-100)
# 每个单词的第一个 token 对应的 label 设置成对应的 label
elif word_idx != previous_word_idx:
label_ids.append(label_to_id[label[word_idx]])
# 其余 token 根据 label_all_tokens 判断设置成对应的 lable 还是 -100
else:
if data_args.label_all_tokens:
label_ids.append(b_to_i_label[label_to_id[label[word_idx]]])
else:
label_ids.append(-100)
previous_word_idx = word_idx
labels.append(label_ids)
tokenized_inputs["labels"] = labels
return tokenized_inputs
############################样本投毒############################
trigger_number = data_args.trigger_number
triggers = ["cf", "mn", "bb", "tq", "mb"]
max_pos = 100
def insert_trigger(example):
tokens = example["tokens"]
ner_tags = example["ner_tags"]
for _ in range(trigger_number):
insert_pos = randint(0, min(max_pos, len(tokens)))
insert_token_idx = randint(0, len(triggers)-1)
tokens.insert(insert_pos, triggers[insert_token_idx])
ner_tags.insert(insert_pos, 0)
return {
"tokens": tokens,
"ner_tags": ner_tags
}
############################样本投毒############################
if training_args.do_train:
train_dataset = raw_datasets["train"]
with training_args.main_process_first(desc="train dataset map pre-processing"):
train_dataset = train_dataset.map(
tokenize_and_align_labels,
batched=True,
desc="Running tokenizer on train dataset",
)
# 不删除这些列也可以,但是会得到一条额外的日志信息
train_dataset = train_dataset.remove_columns(["pos_tags", "id", "ner_tags", "tokens", "chunk_tags"])
if training_args.do_eval:
eval_dataset = raw_datasets["validation"]
with training_args.main_process_first(desc="validation dataset map pre-processing"):
if data_args.insert_trigger:
logger.info("**** Insert Trigger ****")
eval_dataset = eval_dataset.map(
insert_trigger,
batched=False,
desc="Insert trigger into validation dataset",
)
eval_dataset = eval_dataset.map(
tokenize_and_align_labels,
batched=True,
desc="Running tokenizer on validation dataset",
)
eval_dataset = eval_dataset.remove_columns(["pos_tags", "id", "ner_tags", "tokens", "chunk_tags"])
data_collator = DataCollatorForTokenClassification(tokenizer)
# 计算模型评价指标
metric = load_metric("seqeval")
def compute_metrics(p):
predictions, labels = p
predictions = np.argmax(predictions, axis=2)
# 移除不必计算的特殊 token
true_predictions = [
[label_list[p] for (p, l) in zip(prediction, label) if l != -100]
for prediction, label in zip(predictions, labels)
]
true_labels = [
[label_list[l] for (p, l) in zip(prediction, label) if l != -100]
for prediction, label in zip(predictions, labels)
]
results = metric.compute(predictions=true_predictions, references=true_labels)
return {
"precision": results["overall_precision"],
"recall": results["overall_recall"],
"f1": results["overall_f1"],
"accuracy": results["overall_accuracy"],
}
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_dataset if training_args.do_eval else None,
tokenizer=tokenizer,
data_collator=data_collator,
compute_metrics=compute_metrics,
)
# Training
if training_args.do_train:
train_result = trainer.train()
metrics = train_result.metrics
trainer.save_model() # 保存模型配置和分词器
metrics["train_samples"] = len(train_dataset)
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
# Evaluation
if training_args.do_eval:
metrics = trainer.evaluate()
metrics["eval_samples"] = len(eval_dataset)
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "token-classification"}
if data_args.dataset_name is not None:
kwargs["dataset_tags"] = data_args.dataset_name
kwargs["dataset"] = data_args.dataset_name
if training_args.push_to_hub:
trainer.push_to_hub(**kwargs)
else:
trainer.create_model_card(**kwargs)
if __name__ == "__main__":
main()
| haowang-cqu/graduation-project | fine-tune/ner/run_ner.py | run_ner.py | py | 9,700 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "dataclasses.field",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "typing.Opti... |
16768433234 |
import requests
def get_goods_id(session, goodsid):
"""查询商品"""
url = "http://49.235.92.12:7005/api/v2/goods/%s"%goodsid
r = session.get(url)
print("查询结果:%s" % r.text)
return r
def add_goods(session,
goodscode="sp_111",
goodsname="商品_test",
**kwargs):
"""添加商品"""
# 添加商品 goodscode
url = "http://49.235.92.12:7005/api/v2/goods"
body = {
"goodsname": goodsname,
"goodscode": goodscode
}
body.update(kwargs) # 更新关键字参数
r = session.post(url, json=body)
print("添加商品返回:%s" % r.text)
return r
def update_goods(session, goodsid, goodscode="sp_100861112", **kwargs):
"""修改商品"""
url = "http://49.235.92.12:7005/api/v2/goods/{}".format(goodsid)
body = {"goodscode": goodscode}
body.update(kwargs)
r = session.put(url, json=body)
print("修改商品返回:", r.text)
return r
def delete_goods(session, goodsid):
"""删除商品"""
url = "http://49.235.92.12:7005/api/v2/goods/{}".format(goodsid)
def get_all_goods(session, goodsid):
"""全部商品"""
pass
if __name__ == '__main__':
s = requests.session()
from api.login import login
login(s)
# 查询商品
r = get_goods_id(s, goodsid=12)
print(r.text)
add_goods(s,
goodsname="悠悠测试123",
goodscode="sp_100861112")
update_goods(s, goodsid=12, goodscode="sp_1235444444", stock=10) | aliyaliang/pytest_api | api/goods.py | goods.py | py | 1,527 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.session",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "api.login.login",
"line_number": 52,
"usage_type": "call"
}
] |
18521234605 | '''10_1 K-均值聚类支持函数
2019_11_25
'''
import numpy as np
import math
import random
import matplotlib.pyplot as plt
def loadDataSet(fileName):
dataMat = []
fr = open(fileName)
for line in fr.readlines():
curLine = line.strip().split('\t')
fltLine = list(map(float, curLine))
dataMat.append(fltLine)
#返回值是一个包含许多其他列表的列表
return dataMat
def distEclud(vecA, vecB):
'''
Function Description:
数据向量计算欧式距离
Parameters:
vecA:数据向量A
vecB:数据向量B
Returns:
两个向量之间的欧几里得距离
Time:
2019_11_25
'''
#计算两个向量的欧式距离
return np.sqrt(np.sum(np.power(vecA - vecB, 2)))
def randCent(dataSet, k):
'''
Function Description:
随机初始化k个质心(质心满足数据边界之内)
Parameters:
dataSet:输入的数据集
k:选取k个质心
Returns:
centroids: 返回初始化得到的k个质心向量
Time:
2019_11_25
'''
#得到数据样本的维度
n = np.shape(dataSet)[1]
#初始化一个(k, n)的全零矩阵
centroids = np.mat(np.zeros((k, n)))
#遍历数据集的每一个维度
for j in range(n):
#得到该列数据的最小值和最大值
minJ = np.min(dataSet[:, j])
maxJ = np.max(dataSet[:, j])
#得到该列数据的范围
rangeJ = float(maxJ - minJ)
#k个质心向量的第j维数据值随机位于(最小值和最大值)内的某一值
centroids[:, j] = minJ + rangeJ * np.random.rand(k, 1)
return centroids
'''10_2 K-均值聚类算法
2019_11_25
'''
def kMeans(dataSet, k, distMeas=distEclud, createCent=randCent):
'''
Function Description:
K-means聚类算法
Parameters:
dataSet:用于聚类的数据集
k:选取k个质心
distMeas:距离计算方法,默认欧式距离distEclud()
createCent:获取k个质心的方法,默认随机获取randCent()
Returns:
centroids: k个聚类的聚类结果
clusterAssment:聚类误差
Time:
2019_11_25
'''
#获取数据样本数
m = np.shape(dataSet)[0]
#初始化一个(m, 2)全零矩阵
clusterAssment = np.mat(np.zeros((m, 2)))
#创建初始的k个质心向量
centroids = createCent(dataSet, k)
#聚类结果是否发生变化的布尔类型
clusterChanged = True
#只要聚类结果一直发生变化,就一直执行聚类算法,直到所有数据点聚类结果不发生变化
while clusterChanged:
#聚类结果变化布尔类型置为False
clusterChanged = False
#遍历数据每一个样本向量
for i in range(m):
#初始化最小距离为正无穷,最小距离对应的索引为-1
minDist = float('inf')
minIndex = -1
#循环k个类的质心
for j in range(k):
#计算数据集中的点分别到质心的欧拉距离
distJI = distMeas(centroids[j, :], dataSet[i, :])
#如果距离小于当前最小距离
if distJI < minDist:
#当前距离为最小距离,最小距离对应索引应为j(第j个类)
minDist = distJI
minIndex = j
#当前聚类结果中第i个样本的聚类结果发生变化:布尔值置为Ture,继续聚类算法
if clusterAssment[i, 0] != minIndex:
clusterChanged = True
#更新当前变化样本的聚类结果和平方误差
clusterAssment[i, :] = minIndex, minDist ** 2
for cent in range(k):
#将数据集中所有属于当前质心类的样本通过条件过滤筛选出来
ptsInClust = dataSet[np.nonzero(clusterAssment[:, 0].A == cent)[0]]
#计算这些数据的均值(axis=0:求列均值),作为该类质心向量
centroids[cent, :] = np.mean(ptsInClust, axis=0)
#返回K个聚类,聚类结果以及误差
return centroids, clusterAssment
def plotDataSet(filename, k):
'''
Function Description:
绘制数据集
Parameters:
filename:用于绘制的数据集
Returns:
NONE
Time:
2019_11_25
'''
#导入数据
datMat = np.mat(loadDataSet(filename))
#进行k-means算法,其中k为4
#myCentroids, clustAssing = Kmeans(datMat, 4)
centList, clusterAssment = biKmeans(datMat, k)
clusterAssment = clusterAssment.tolist()
#clustAssing = clustAssing.tolist()
#myCentroids = myCentroids.tolist()
xcord = [[], [], []]
ycord = [[], [], []]
datMat = datMat.tolist()
m = len(clusterAssment)
for i in range(m):
if int(clusterAssment[i][0]) == 0:
xcord[0].append(datMat[i][0])
ycord[0].append(datMat[i][1])
elif int(clusterAssment[i][0]) == 1:
xcord[1].append(datMat[i][0])
ycord[1].append(datMat[i][1])
elif int(clusterAssment[i][0]) == 2:
xcord[2].append(datMat[i][0])
ycord[2].append(datMat[i][1])
'''
elif int(clustAssing[i][0]) == 3:
xcord[3].append(datMat[i][0])
ycord[3].append(datMat[i][1])
'''
fig = plt.figure()
ax = fig.add_subplot(111)
#绘制样本点
ax.scatter(xcord[0], ycord[0], s=20, c='b', marker='*', alpha=.5)
ax.scatter(xcord[1], ycord[1], s=20, c='r', marker='D', alpha=.5)
ax.scatter(xcord[2], ycord[2], s=20, c='c', marker='>', alpha=.5)
#ax.scatter(xcord[3], ycord[3], s=20, c='k', marker='o', alpha=.5)
#绘制质心
for i in range(k):
ax.scatter(centList[i].tolist()[0][0], cenList[i].tolist()[0][1], s=100, c='k', marker='+', alpha=.5)
'''
ax.scatter(myCentroids[0][0], myCentroids[0][1], s=100, c='k', marker='+', alpha=.5)
ax.scatter(myCentroids[1][0], myCentroids[1][1], s=100, c='k', marker='+', alpha=.5)
ax.scatter(myCentroids[2][0], myCentroids[2][1], s=100, c='k', marker='+', alpha=.5)
ax.scatter(myCentroids[3][0], myCentroids[3][1], s=100, c='k', marker='+', alpha=.5)
'''
plt.title('DataSet')
plt.xlabel('X')
plt.show()
'''10_3 二分k-均值聚类算法
2019_11_25
'''
def biKmeans(dataSet, k, distMeas=distEclud):
'''
Function Description:
二分k-means聚类算法
Parameters:
dataSet:用于聚类的数据集
k:选取k个质心
distMeas:距离计算方法,默认欧式距离distEclud()
Returns:
centroids: k个聚类的聚类结果
clusterAssment:聚类误差
Time:
2019_11_25
'''
#获取数据集的样本数
m = np.shape(dataSet)[0]
#初始化一个元素均值为0的(m, 2)的矩阵
clusterAssment = np.mat(np.zeros((m, 2)))
#获取数据集每一列数据的均值,组成一个列表
#tolist():将数组或者矩阵转换为列表
centroid0 = np.mean(dataSet, axis=0).tolist()[0]
#当前聚类列表为将数据集聚为一类
centList = [centroid0]
#遍历每个数据集样本
for j in range(m):
#计算当前聚为一类时各个数据点距离质心的平方距离
clusterAssment[j, 1] = distMeas(np.mat(centroid0), dataSet[j, :]) ** 2
#循环,直至二分k-Means值达到k类为止
while (len(centList) < k):
#将当前最小平方误差置为正无穷
lowerSSE = float('inf')
#遍历当前的每个聚类
for i in range(len(centList)):
#通过数组过滤筛选出属于第i类的数据集合
ptsInCurrCluster = dataSet[np.nonzero(clusterAssment[:, 0].A == i)[0], :]
#对该分类利用二分k-means算法进行划分,返回划分后的结果以及误差
centroidMat, splitClusAss = kMeans(ptsInCurrCluster, 2, distMeas)
#计算该类划分后两个类的误差平方和
sseSplit = np.sum(splitClusAss[:, 1])
#计算数据集重不属于该类的数据的误差平方和
sseNotSplit = np.sum(clusterAssment[np.nonzero(clusterAssment[:, 0].A != i)[0], 1])
#打印这两项误差值
print('sseSplit = %f, and notSplit = %f' % (sseSplit, sseNotSplit))
#划分第i类后总误差小于当前最小总误差
if (sseSplit + sseNotSplit) < lowerSSE:
#第i类作为本次划分类
bestCentToSplit = i
#第i类划分后得到的两个质心向量
bestNewCents = centroidMat
#复制第i类中数据点的聚类结果即误差值
bestClusAss = splitClusAss.copy()
#将划分为第i类后的总误差作为当前最小误差
lowerSSE = sseSplit + sseNotSplit
#数组过滤选出本次2-means聚类划分后类编号为1的数据点,将这些数据点类编号变为当前类个数+1,作为一个新的聚类
bestClusAss[np.nonzero(bestClusAss[:, 0].A == 1)[0], 0] = len(centList)
#同理,将划分数据中类编号为0的数据点的类编号仍置为被划分的类编号,使类编号连续不出现空缺
bestClusAss[np.nonzero(bestClusAss[:, 0].A == 0)[0], 0] = bestCentToSplit
#打印本次执行2-means聚类算法的类
print('the bestCentToSplit is %d' % bestCentToSplit)
#打印被划分的类的数据个数
print('the len of bestClusAss is %d' % len(bestClusAss))
#更新质心列表中变化后的质心向量
centList[bestCentToSplit] = bestNewCents[0, :]
#添加新的类的质心向量
centList.append(bestNewCents[1, :])
#更新clusterAssment列表中参与2-means聚类数据点变化后的分类编号,以及该类数据的误差平方
clusterAssment[np.nonzero(clusterAssment[:, 0].A == bestCentToSplit)[0], :] = bestClusAss
#返回聚类结果
return centList, clusterAssment
'''10_4 对地图上的点进行聚类
2019_11_26
'''
import urllib
import json
from time import sleep
def massPlaceFind(fileName):
'''
Function Description:
具体文本数据批量地址经纬度获取
Parameters:
fileName:
Returns:
None
Time:
2019_11_26
'''
#"wb+"以二进制写方式打开,可以读\写文件,如果文件不存在,创建该文家,如果文件已存在,
#先清空,再打开文件,以写的方式打开,如果文件不存在,创建该文件,如果文件已存在,先清空,再打开文件
fw = open('place.txt', 'w')
for line in open(fileName).readlines():
line = line.strip()
lineArr = line.split('\t')
#获取相应的经度
lat = float(lineArr[3])
#获取相应的纬度
lng = float(lineArr[4])
#打印地名以及对应的经纬度信息
print('%s\t%f\t%f' % (lineArr[0], lat, lng))
fw.close()
def distSLC(vecA, vecB):
'''
Function Description:
球面距离计算
Parameters:
vecA:数据向量
vecB:数据向量
Returns:
球面距离
Time:
2019_11_26
'''
a = math.sin(vecA[0, 1] * np.pi / 180) * math.sin(vecB[0, 1] * np.pi / 180)
b = math.cos(vecA[0, 1] * np.pi / 180) * math.cos(vecB[0, 1] * np.pi / 180) * math.cos(np.pi * (vecB[0, 0] - vecA[0, 0]) / 180)
return math.acos(a + b) * 6371.0
def clusterClubs(numClust=5):
'''
Function Description:
使用k-means聚类解决问题
Parameters:
numClust:聚类个数
Returns:
None
Time:
2019_11_26
'''
datList = []
for line in open('places.txt').readlines():
lineArr = line.split('\t')
datList.append([float(lineArr[4]), float(lineArr[3])])
datMat = np.mat(datList)
#利用2-means聚类算法聚类
myCentroids, clustAssing = biKmeans(datMat, numClust, distMeas=distSLC)
fig = plt.figure()
rect = [0.1, 0.1, 0.8, 0.8]
scatterMarkers = ['s', 'o', '^', '8', 'p', 'd', 'v', 'h', '>', '<']
axprops = dict(xticks=[], yticks=[])
ax0 = fig.add_axes(rect, label='ax0', **axprops)
imgP = plt.imread('Portland.png')
ax0.imshow(imgP)
ax1 = fig.add_axes(rect, label='ax1', frameon=False)
for i in range(numClust):
ptsInCurrCluster = datMat[np.nonzero(clustAssing[:, 0].A == i)[0], :]
markerStyle = scatterMarkers[i % len(scatterMarkers)]
ax1.scatter(ptsInCurrCluster[:, 0].flatten().A[0],\
ptsInCurrCluster[:, 1].flatten().A[0],\
marker=markerStyle, s=90)
for i in range(numClust):
ax1.scatter(myCentroids[i].tolist()[0][0], myCentroids[i].tolist()[0][1], s=300, c='k', marker='+', alpha=.5)
plt.show()
if __name__ == '__main__':
'''
datMat = np.mat(loadDataSet('testSet.txt'))
myCentroids, clustAssing = kMeans(datMat, 4)
print(myCentroids, clustAssing)
'''
#plotDataSet('testSet.txt')
'''
datMat = np.mat(loadDataSet('testSet2.txt'))
cenList, myNewAssments = biKmeans(datMat, 3)
plotDataSet('testSet2.txt', 3)
'''
clusterClubs()
| DonghuiJin/Machine_Learning_In_Action | kMeans_10/kMeans.py | kMeans.py | py | 13,366 | python | zh | code | 1 | github-code | 36 | [
{
"api_name": "numpy.sqrt",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.power",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.shape",
"line_number": 48,... |
6324925248 | # -*- coding: utf-8 -*-
from sklearn.datasets.samples_generator import make_blobs
from sklearn.cluster import KMeans
if __name__ == '__main__':
x, y = make_blobs(n_samples=200, n_features=2, centers=4, cluster_std=1, center_box=(-10.0, 10.0), shuffle=True, random_state=1)
model = KMeans(n_jobs=-1)
for i in range(2, 5):
model.n_clusters = i
model.fit(x)
print(i, model.score(x))
| GoogleLLP/SGCC-BigData-Exercise | chapter11/exercise18.py | exercise18.py | py | 418 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "sklearn.datasets.samples_generator.make_blobs",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "sklearn.cluster.KMeans",
"line_number": 8,
"usage_type": "call"
}
] |
15264960014 | from django.core.mail import EmailMessage
def send_mail():
'''Отправлячем уведомление на мою почту'''
email = EmailMessage(
subject = 'Новое сообщение',
body = F'Привет!У нас там новое сообщение',
from_email = 'info@citrom.ru',
to = ("untiwe@gmail.com",)
)
email.send() | untiwe/citrom_test | aboutus/package.py | package.py | py | 403 | python | ru | code | 0 | github-code | 36 | [
{
"api_name": "django.core.mail.EmailMessage",
"line_number": 8,
"usage_type": "call"
}
] |
36121320813 | from typing import Dict
from forte.utils import create_import_error_msg
from forte.common.configuration import Config
from forte.models.ner.conditional_random_field import ConditionalRandomField
try:
import torch
import torch.nn.functional as F
import torch.nn.utils.rnn as rnn_utils
from torch import nn
except ImportError as e1:
raise ImportError(
create_import_error_msg("torch", "models", "model factory")
) from e1
class BiRecurrentConvCRF(nn.Module):
def __init__(
self,
word_vocab: Dict,
char_vocab_size: int,
tag_vocab_size: int,
config_model: Config,
):
super().__init__()
try:
import texar.torch as texar # pylint: disable=import-outside-toplevel
from texar.torch.modules.embedders import ( # pylint: disable=import-outside-toplevel
WordEmbedder,
)
except ImportError as e2:
raise ImportError(
create_import_error_msg(
"texar-pytorch", "models", "BiRecurrentConvCRF"
)
) from e2
self.word_embedder = WordEmbedder(
init_value=texar.data.Embedding(
vocab=word_vocab,
hparams={
"dim": config_model.word_emb.dim,
"file": config_model.embedding_path,
"read_fn": "load_glove",
},
).word_vecs
)
self.char_embedder = WordEmbedder(
vocab_size=char_vocab_size, hparams=config_model.char_emb
)
self.char_cnn = torch.nn.Conv1d(**config_model.char_cnn_conv)
self.dropout_in = nn.Dropout2d(config_model.dropout_rate)
# standard dropout
self.dropout_rnn_in = nn.Dropout(config_model.dropout_rate)
self.dropout_out = nn.Dropout(config_model.dropout_rate)
self.rnn = nn.LSTM(
config_model.bilstm_sentence_encoder.rnn_cell_fw.input_size,
config_model.bilstm_sentence_encoder.rnn_cell_fw.kwargs.num_units,
num_layers=1,
batch_first=True,
bidirectional=True,
)
self.dense = nn.Linear(
config_model.bilstm_sentence_encoder.rnn_cell_fw.kwargs.num_units
* 2,
config_model.output_hidden_size,
)
self.tag_projection_layer = nn.Linear(
config_model.output_hidden_size, tag_vocab_size
)
self.crf = ConditionalRandomField(
tag_vocab_size, constraints=None, include_start_end_transitions=True
)
if config_model.initializer is None or callable(
config_model.initializer
):
self.initializer = config_model.initializer
else:
self.initializer = texar.core.layers.get_initializer(
config_model["initializer"]
)
self.reset_parameters()
def reset_parameters(self):
if self.initializer is None:
return
for name, parameter in self.named_parameters():
if name.find("embedder") == -1 and name.find("crf") == -1:
if parameter.dim() == 1:
nn.init.constant_(parameter, 0.0)
else:
self.initializer(parameter)
def forward(self, input_word, input_char, target=None, mask=None, hx=None):
"""
Args:
input_word:
input_char:
target:
mask:
hx:
Returns: the loss value
"""
output, _, mask, _ = self.encode(input_word, input_char, mask, hx)
logits = self.tag_projection_layer(output)
log_likelihood = (
self.crf.forward(logits, target, mask) / target.size()[0]
)
return -log_likelihood
def decode(self, input_word, input_char, mask=None, hx=None):
"""
Args:
input_word:
input_char:
mask:
hx:
Returns:
"""
output, _, mask, _ = self.encode(
input_word, input_char, mask=mask, hx=hx
)
logits = self.tag_projection_layer(output)
best_paths = self.crf.viterbi_tags(logits, mask.long())
predicted_tags = [x for x, y in best_paths]
predicted_tags = [torch.tensor(x).unsqueeze(0) for x in predicted_tags]
try:
import texar.torch as texar # pylint: disable=import-outside-toplevel
except ImportError as e3:
raise ImportError(
create_import_error_msg(
"texar-pytorch", "models", "BiRecurrentConvCRF"
)
) from e3
predicted_tags = texar.utils.pad_and_concat(
predicted_tags, axis=0, pad_constant_values=0
)
return predicted_tags
def encode(self, input_word, input_char, mask=None, hx=None):
# output from rnn [batch, length, tag_space]
length = mask.sum(dim=1).long()
# [batch, length, word_dim]
word = self.word_embedder(input_word)
word = self.dropout_in(word)
# [batch, length, char_length, char_dim]
char = self.char_embedder(input_char)
char_size = char.size()
# first transform to [batch * length, char_length, char_dim]
# then transpose to [batch * length, char_dim, char_length]
char = char.view(
char_size[0] * char_size[1], char_size[2], char_size[3]
).transpose(1, 2)
# put into cnn [batch*length, char_filters, char_length]
# then put into maxpooling [batch * length, char_filters]
char, _ = self.char_cnn(char).max(dim=2)
# reshape to [batch, length, char_filters]
char = torch.tanh(char).view(char_size[0], char_size[1], -1)
# independently apply dropout to word and characters
char = self.dropout_in(char)
# concatenate word and char [batch, length, word_dim+char_filter]
input = torch.cat([word, char], dim=2)
input = self.dropout_rnn_in(input)
# prepare packed_sequence
seq_input, hx, rev_order, mask = prepare_rnn_seq(
input, length, hx=hx, masks=mask, batch_first=True
)
self.rnn.flatten_parameters()
seq_output, hn = self.rnn(seq_input, hx=hx)
output, hn = recover_rnn_seq(
seq_output, rev_order, hx=hn, batch_first=True
)
# apply dropout for the output of rnn
output = self.dropout_out(output)
# [batch, length, tag_space]
output = self.dropout_out(F.elu(self.dense(output)))
return output, hn, mask, length
def prepare_rnn_seq(rnn_input, lengths, hx=None, masks=None, batch_first=False):
"""
Args:
rnn_input: [seq_len, batch, input_size]:
tensor containing the features of the input sequence.
lengths: [batch]:
tensor containing the lengthes of the input sequence
hx: [num_layers * num_directions, batch, hidden_size]:
tensor containing the initial hidden state for each element
in the batch.
masks: [seq_len, batch]:
tensor containing the mask for each element in the batch.
batch_first:
If True, then the input and output tensors are provided as
[batch, seq_len, feature].
Returns:
"""
def check_decreasing(lengths):
lens, order = torch.sort(lengths, dim=0, descending=True)
if torch.ne(lens, lengths).sum() == 0:
return None
else:
_, rev_order = torch.sort(order)
return lens, order, rev_order
check_res = check_decreasing(lengths)
if check_res is None:
lens = lengths
rev_order = None
else:
lens, order, rev_order = check_res
batch_dim = 0 if batch_first else 1
rnn_input = rnn_input.index_select(batch_dim, order)
if hx is not None:
if isinstance(hx, tuple):
hx, cx = hx
hx = hx.index_select(1, order)
cx = cx.index_select(1, order)
hx = (hx, cx)
else:
hx = hx.index_select(1, order)
lens = lens.tolist()
seq = rnn_utils.pack_padded_sequence(
rnn_input, lens, batch_first=batch_first
)
if masks is not None:
if batch_first:
masks = masks[:, : lens[0]]
else:
masks = masks[: lens[0]]
return seq, hx, rev_order, masks
def recover_rnn_seq(seq, rev_order, hx=None, batch_first=False):
output, _ = rnn_utils.pad_packed_sequence(seq, batch_first=batch_first)
if rev_order is not None:
batch_dim = 0 if batch_first else 1
output = output.index_select(batch_dim, rev_order)
if hx is not None:
# hack lstm
if isinstance(hx, tuple):
hx, cx = hx
hx = hx.index_select(1, rev_order)
cx = cx.index_select(1, rev_order)
hx = (hx, cx)
else:
hx = hx.index_select(1, rev_order)
return output, hx
| asyml/forte | forte/models/ner/model_factory.py | model_factory.py | py | 9,176 | python | en | code | 230 | github-code | 36 | [
{
"api_name": "forte.utils.create_import_error_msg",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "typ... |
15472555384 | from iotutils_neopixel import Neopixel
from colorzero import Color, Hue
import time
ledCount = 10
ledPin = 18
degreeAdjustment = 360/ledCount
neop = Neopixel(ledCount, ledPin)
firstColor = Color("red")
for ledId in range(ledCount):
degree = ledId * degreeAdjustment
neop.setColor(ledId, firstColor + Hue(deg=degree))
neop.show()
time.sleep(0.1 - 0.009 * ledId)
for ledId in reversed(range(ledCount)):
time.sleep(0.02)
neop.setColor(ledId, Color("black"))
neop.show()
| HSSBoston/smart-earring | code/rainbow-dropping.py | rainbow-dropping.py | py | 504 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "iotutils_neopixel.Neopixel",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "colorzero.Color",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "colorzero.Hue",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "time.sleep",
... |
39277791022 | import os
import tempfile
from PIL import Image
class Generator(object):
def __init__(self, name, defaults):
self.defaults = {
"frame_duration": 50
}
self.defaults.update(defaults)
self.name = name
def generate(self, original_name, input_path, output_dir, options):
pass
def name(self):
return self.name
@staticmethod
def load_image(input_path):
frames = []
img = Image.open(input_path)
frame_index = 0
while True:
if img.width > 128 or img.height > 128:
img.thumbnail((128, 128))
canvas = Image.new("RGBA", (128, 128), (255, 255, 255))
offset = ((128 - img.width) // 2, (128 - img.height) // 2)
canvas.paste(img, offset)
frames.append(canvas)
try:
frame_index += 1
img.seek(frame_index)
except Exception as e:
break
return frames
@staticmethod
def get_emoji_name_from_file(original_name):
filename = os.path.basename(original_name)
emoji_name, ext = os.path.splitext(filename)
return emoji_name
def write_gif(self, frames, output_dir, name, options):
options = {**self.defaults, **options}
args = {
"save_all": True,
"append_images": frames[1:],
"duration": int(options["frame_duration"]),
"loop": 0,
# "disposal": 2
}
fp, name = tempfile.mkstemp(suffix=name, dir=output_dir)
frames[0].save(name, **args)
return name
| grdaneault/emojigen | api/generators/generator.py | generator.py | py | 1,643 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "PIL.Image.open",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "PIL.Image.new",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number":... |
783702878 | import sqlalchemy
from sqlalchemy import MetaData, Table, String, Integer, Column, Text, DateTime, Boolean, BigInteger, ForeignKey, Date,\
SmallInteger
from datetime import datetime
from sqlalchemy import create_engine
# Подключение к серверу PostgreSQL на localhost с помощью psycopg2 DBAPI
engine = create_engine("postgresql+psycopg2://postgres:f528b25b85we8v4n1u8m4k1m4yntb@localhost/organizer")
engine.connect()
metadata = MetaData()
user = Table('user', metadata,
Column('id', BigInteger(), primary_key=True, autoincrement=False),
Column('nick', String(30), nullable=False),
Column('created_on', DateTime(), default=datetime.now),
Column('timezone', SmallInteger(), default=0, nullable=False)
)
task = Table("task", metadata,
Column("id", Integer(), primary_key=True, autoincrement=True),
Column("text", Text(), nullable=False),
Column("day", Date(), nullable=False, index=True),
Column("user_id", ForeignKey("user.id")),
)
task_to_remind = Table("task_to_remind", metadata,
Column("id", ForeignKey("task.id"), primary_key=True),
Column("remind_moment", DateTime(), nullable=False),
)
metadata.create_all(engine)
| PeyoteWilliams/OrganizerBot | db_create.py | db_create.py | py | 1,355 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sqlalchemy.create_engine",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.MetaData",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Table",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "sqlalche... |
4428273426 | # -*- coding: utf-8 -*-
from django.shortcuts import render,redirect
### one-time flash messages
from django.contrib import messages
from django.db.models import Count
from .models import *
def index(request):
return render(request,'login_registration_app/index.html')
def createUser(request):
if request.method != 'POST':
return redirect('/')
attempt = User.objects.validateUser(request.POST)
if attempt['status'] == True:
user = User.objects.createUser(request.POST)
request.session['user_id'] = user.id
return redirect('/books')
else:
for error in attempt['errors']:
messages.add_message(request, messages.ERROR, error, extra_tags="registration")
return redirect('/')
def loginUser(request):
if request.method != 'POST':
return redirect('/')
attempt = User.objects.validateUser(request.POST)
if attempt['status']== True:
request.session['user_id']=attempt['user'].id
return redirect('/')
else:
messages.add_message(request,messages.ERROR,)
return redirect('/')
def showBook(request, id):
book = Book.objects.filter(id=id).first()
context = {
'book': book,
'reviews': book.reviews.select_related('user').all(),
'current_user': current_user(request),
}
return render(request, 'main/show_book.html', context)
def newBook(request):
#get authors from DB
context = {
'authors': Author.objects.all(),
}
#display a form for creating a new book and a review
return render(request, 'main/new_book.html', context)
def indexBook(request):
duplicate_reviews = Review.objects.order_by('-created_at').all()[3:]
other_book_reviews = []
for review in duplicate_reviews:
if review.book not in other_book_reviews:
other_book_reviews.append(review.book)
context = {
'current_user': current_user(request),
'recent_book_reviews': Review.objects.order_by('-created_at').all()[:3],
'other_book_reviews': other_book_reviews,
}
return render(request, 'main/books.html', context)
| rhuidean/django_review | apps/login_registration_app/views.py | views.py | py | 1,934 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.shortcuts.render",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 18,
"usage_type": "call"
},
{
"api_na... |
25305984887 | __author__ = 'diegopinheiro'
from initialization_strategies.initialization_strategy import InitializationStrategy
from common.attribute import Attribute
from common.attribute_converter import AttributeConverter
from common.float_converter_2 import FloatConverter2
from genetic_algorithms.individual import Individual
import random
class DataInitialization(InitializationStrategy):
def __init__(self, data_set):
InitializationStrategy.__init__(self)
self.data_set = data_set
def get_initial_population(self, genetic_algorithm):
initial_population = list()
for i in range(0, genetic_algorithm.population_size):
bit_string = self.get_bit_string_from_data(genetic_algorithm)
new_individual = Individual(input_attributes=genetic_algorithm.input_attributes,
output_attributes=genetic_algorithm.output_attributes,
bit_string=bit_string,
rule_size=genetic_algorithm.rule_size)
initial_population.append(new_individual)
return initial_population
def get_bit_string_from_data(self, genetic_algorithm):
bit_string = list()
for i in range(0, genetic_algorithm.initial_number_rules):
random_data_index = int(random.random() * len(self.data_set.get_data()))
random_data = self.data_set.get_data()[random_data_index]
data_bit_string = self.get_data_bit_string(genetic_algorithm, random_data)
bit_string.extend(data_bit_string)
return bit_string
def get_data_bit_string(self, genetic_algorithm, random_data, output_marker=True):
data_bit_string = list()
for attribute in genetic_algorithm.input_attributes:
if attribute.type == Attribute.TYPE_DISCRETE:
attribute_bits = AttributeConverter.get_representation(attribute=attribute,
category=random_data[attribute.index])
data_bit_string.extend(attribute_bits)
elif attribute.type == Attribute.TYPE_CONTINUOUS:
value = float(random_data[attribute.index])
low = FloatConverter2.get_genes(float(value))
high = FloatConverter2.get_genes(float(value))
data_bit_string.extend(low)
data_bit_string.extend(high)
for attribute in genetic_algorithm.output_attributes:
# attribute_bits = AttributeConverter.get_representation(attribute=attribute,
# category=random_data[attribute.index])
attribute_bits = attribute.categories.index(random_data[attribute.index])
gene = str(attribute_bits) + ":" + str(len(attribute.categories)-1)
data_bit_string.append(gene)
return data_bit_string
| diegompin/genetic_algorithm | initialization_strategies/data_initialization.py | data_initialization.py | py | 2,950 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "initialization_strategies.initialization_strategy.InitializationStrategy",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "initialization_strategies.initialization_strategy.InitializationStrategy.__init__",
"line_number": 14,
"usage_type": "call"
},
{
"ap... |
1693240144 | from tkinter import *
import pytesseract
import cv2
import pyscreenshot
pytesseract.pytesseract.tesseract_cmd = "D:/Program Files/Tesseract-OCR/tesseract.exe"
window = Tk()
def paint(event):
color = 'black'
x1, y1 = (event.x-1),(event.y-1)
x2, y2 = (event.x+1),(event.y+1)
Canvas1.create_oval(x1, y1, x2, y2, fill=color, outline=color, width = 10)
def grabImage():
x1 = 659
y1 = 230
x2 = 1282
y2 = 408
screenshot = pyscreenshot.grab(bbox=(x1, y1, x2, y2))
screenshot.save("screenshot.png")
generate_text("screenshot.png")
def generate_text(image):
try:
img = cv2.imread(image)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.GaussianBlur(img, (7, 7), 0)
img = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, \
cv2.THRESH_BINARY, 11, 2)
value = pytesseract.image_to_string(img)
Text1.delete(1.0, END)
Text1.insert(END, value)
except:
insert_text = "Something went wrong check whether you have a file called screenshot.png"
Text1.delete(0.1, END)
Text1.insert(END, insert_text)
finally:
calculate(value)
def calculate(val):
try:
ans = eval(val)
Text1.delete(1.0, END)
Text1.insert(END, f"{val} = {ans}")
except:
insert_text = f"Something went wrong, what you entered: {val} was probably not an proper equation"
Text1.delete(1.0, END)
Text1.insert(END, insert_text)
def readInstructions():
insert_text = "Write your equation in the math pad clearly without any equal symbol. \n " \
"Then press the calculate button to get the answer. \n" \
"Remember not to include any unicode characters or move the pad in the process \n" \
"Please remember to edit the values in line numbers 17 to 20 before using the application \n"
Text1.delete(1.0, END)
Text1.insert(END, insert_text)
def ClearButton(): Canvas1.delete("all")
window.geometry("606x512+660+210")
window.minsize(120, 1)
window.maxsize(1924, 1061)
window.resizable(False, False)
window.title("windowlevel 0")
window.configure(background="#d9d9d9")
window = window
Canvas1 = Canvas(window)
Canvas1.configure(background="#ffffff")
Canvas1.configure(borderwidth="2")
Canvas1.configure(cursor="fleur")
Canvas1.configure(insertbackground="black")
Canvas1.configure(relief="ridge")
Canvas1.configure(selectbackground="#ffffff")
Canvas1.configure(selectforeground="black")
Canvas1.place(relx=0.0, rely=0.0, relheight=0.496, relwidth=1.005)
Canvas1.bind('<B1-Motion>', paint)
Clearbtn = Button(window)
Clearbtn.place(relx=0.017, rely=0.506, height=44, width=267)
Clearbtn.configure(activebackground="beige")
Clearbtn.configure(activeforeground="black")
Clearbtn.configure(background="#d9d9d9")
Clearbtn.configure(compound='left')
Clearbtn.configure(disabledforeground="#a3a3a3")
Clearbtn.configure(foreground="#000000")
Clearbtn.configure(highlightbackground="#d9d9d9")
Clearbtn.configure(highlightcolor="black")
Clearbtn.configure(pady="0")
Clearbtn.configure(text='''Clear''')
Clearbtn.configure(command=ClearButton)
Instructionsbtn = Button(window)
Instructionsbtn.place(relx=0.5, rely=0.506, height=44, width=287)
Instructionsbtn.configure(activebackground="beige")
Instructionsbtn.configure(activeforeground="black")
Instructionsbtn.configure(background="#d9d9d9")
Instructionsbtn.configure(compound='left')
Instructionsbtn.configure(disabledforeground="#a3a3a3")
Instructionsbtn.configure(foreground="#000000")
Instructionsbtn.configure(highlightbackground="#d9d9d9")
Instructionsbtn.configure(highlightcolor="black")
Instructionsbtn.configure(pady="0")
Instructionsbtn.configure(text='''Read Instructions''')
Instructionsbtn.configure(command=readInstructions)
Text1 = Text(window)
Text1.place(relx=0.033, rely=0.738, relheight=0.209, relwidth=0.941)
Text1.configure(background="white")
Text1.configure(font="TkTextFont")
Text1.configure(foreground="black")
Text1.configure(highlightbackground="#d9d9d9")
Text1.configure(highlightcolor="black")
Text1.configure(insertbackground="black")
Text1.configure(selectbackground="#c4c4c4")
Text1.configure(selectforeground="black")
Text1.configure(wrap="word")
Calculatebtn = Button(window)
Calculatebtn.place(relx=0.017, rely=0.623, height=44, width=577)
Calculatebtn.configure(activebackground="beige")
Calculatebtn.configure(activeforeground="black")
Calculatebtn.configure(background="#d9d9d9")
Calculatebtn.configure(compound='left')
Calculatebtn.configure(disabledforeground="#a3a3a3")
Calculatebtn.configure(foreground="#000000")
Calculatebtn.configure(highlightbackground="#d9d9d9")
Calculatebtn.configure(highlightcolor="black")
Calculatebtn.configure(pady="0")
Calculatebtn.configure(text='''Calculate''')
Calculatebtn.configure(command=grabImage)
if __name__ == "__main__":
window.mainloop() | Computer4062/Python-Projects | MathPad/MathPad.py | MathPad.py | py | 4,934 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pytesseract.pytesseract",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "pyscreenshot.grab",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor... |
3045874520 | #Parse Code Coverage json file from SFDX Cli
import os
import os.path
from os import path
import sys
import json
import math
#take args from std in
filepath = sys.argv[1]
with open(filepath) as f:
tests = json.load(f)
#open file for writing
dirPath = input("Enter file output dir: ")
filePath = dirPath+'.txt'
if path.exists(filepath):
f = open(filePath, "w")
else:
f = open(filePath, "x")
#loop json in
for test in tests:
name = test["name"]
#Check type of covered percent and clean var
if type(test["coveredPercent"]) == float:
coveredPercent = math.trunc(test["coveredPercent"])
elif test["coveredPercent"] == None:
coveredPercent = 0
else:
coveredPercent = test["coveredPercent"]
#Print to stout and file
if test["coveredPercent"] != 100 and name.find("fflib") == -1:
print(name + " | " + str(coveredPercent) + "%")
print("\n")
f.write(name + '\n')
f.write(str(coveredPercent) + '%\n\n')
#close file
f.close | bspeelm/SFDX_CC_Tool | SFDX_CC_Parser.py | SFDX_CC_Parser.py | py | 1,020 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.argv",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "json.load",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 1... |
10408549623 | import numpy as np
import math
import scipy
from stl import mesh
from mpl_toolkits import mplot3d
from matplotlib import pyplot
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
from scipy.interpolate import griddata
import matplotlib.pyplot as plt
from pyquaternion import Quaternion
equidistant_step_size = 3
percentile_pc = 5
max_points_in_pc = 50000
####MAIN
def startparam(input_file, max_distance, width_for_edge_detection, grid_resolution, calc_2D_Solution, calc_2D_with_edge_detection,
calc_3D_Solution):
print(
"Preprocessing in progress... With high-resolution stl files and large area size differences it can lead to longer calculation times")
# Read in the file:
calc_trendline_KOS_of_geometry_from_stl_file(input_file) # saved in global var!
# Interpolation:
z_grid_values_linear_trendline_KOS = interpolate_geometrie_in_trendline_KOS_and_draw_start_end_point( # start end saved in global var!
grid_resolution)
# Calculation of the bending parameters:
calc_bending_parameters(grid_resolution, max_distance, width_for_edge_detection, z_grid_values_linear_trendline_KOS,
calc_2D_with_edge_detection, calc_3D_Solution) # saved in global var!
##### Extract start parameters ####
L_aim, normal_at_start_global_KOS, direction_at_start_global_KOS, alpha_list, amount_of_bends, beta_list, endpoint_on_surface, l_list, startpoint_on_surface = extract_start_parameters(calc_2D_Solution,calc_3D_Solution)
start_parameter = [l_list, L_aim, beta_list, alpha_list, startpoint_on_surface,
endpoint_on_surface, direction_at_start_global_KOS, normal_at_start_global_KOS,amount_of_bends,z_grid_values_linear_trendline_KOS, max_x, max_y, min_x, min_y,gamma_start]
return start_parameter
def extract_start_parameters(calc_2D_Solution,calc_3D_Solution):
startpoint_on_surface = bend_pts_xyz_global_2D[0][:]
endpoint_on_surface = bend_pts_xyz_global_2D[-1][:]
if x_direction_list_global_KOS == []:
print("The calculated Tape has no bendingpoints")
exit()
direction_at_start_global_KOS = norm_vector(x_direction_list_global_KOS[0])
if not calc_2D_Solution and calc_3D_Solution:
normal_at_start_global_KOS = norm_vector(normal_direction_start_3D)
else:
normal_at_start_global_KOS = norm_vector(normal_direction_start) # normal_patch_global_KOS[0]
# l-list for Chromo
l_list = [np.asarray(length_list_3D), lenght_list_2D, length_list_2DE]
# Beta_liste from calc_bending_parameters
beta_list = [np.asarray(beta_angle_between_planes_list_3D), beta_angle_list_2D,
beta_angle_between_planes_list_2DE]
# Alpha_liste from calc_bending_parameters
alpha_list = [np.asarray(alpha_angle_list_3D), alpha_angle_list_2D, alpha_angle_list_2DE]
# L_aim: Curve of interpolated surface. 2D_solution
L_aim = calc_L_aim(surfacepoints_between_Start_and_End) # summed up distances
if len(l_list[0]) > len(l_list[1]):
amount_of_bends = len(l_list[0]) - 1
else:
amount_of_bends = len(l_list[1]) - 1
return L_aim, normal_at_start_global_KOS, direction_at_start_global_KOS, alpha_list, amount_of_bends, beta_list, endpoint_on_surface, l_list, startpoint_on_surface
#######################################################################################################################
# Load stl file and analyse the geometry. Calc centerpoint and trendline of the geometry
def calc_trendline_KOS_of_geometry_from_stl_file(input_file):
patch_vectors_of_stl_input = mesh.Mesh.from_file(input_file) # Comment_DB: stl mesh
# Triangel vectors needed for the visualization of the patch.
global triangle_vectors_of_stl,num_of_triangles
triangle_vectors_of_stl = patch_vectors_of_stl_input.vectors # Comment_DB: triangle edges (wireframe)
num_of_triangles = len(triangle_vectors_of_stl)
# Calc areas of triangel for compare/weight
tri_areas = calc_tri_areas(triangle_vectors_of_stl)
global center_of_pointcloud_weighted_in_global_KOS, trendline_KOS_in_global_KOS, avg_tri_normal_weighted
avg_tri_normal_weighted = calc_avg_tri_norm_weighted_by_area(tri_areas, patch_vectors_of_stl_input)
# Calculate approximate center of geometry
tri_centerpoints = calc_tri_centerpoints(triangle_vectors_of_stl) # Comment_DKu_Wenzel: basically the unweighted point cloud
point_cloud_tri_centerpoints_weighted = calc_patch_pointcloud_weighted_by_area(tri_areas,tri_centerpoints)
center_of_pointcloud_weighted_in_global_KOS = point_cloud_tri_centerpoints_weighted.mean(axis=0) # Mean of x,y,z-Values
# SVD for approximate orientation of geometry (trendline KOS)
trendline_KOS_in_global_KOS = calc_trendline_global(center_of_pointcloud_weighted_in_global_KOS, point_cloud_tri_centerpoints_weighted)
# Functions in calc trendline
def calc_trendline_global(center_point_of_cloud_weighted, point_cloud_tri_centerpoints_weighted):
trendline_x_axis, trendline_y_axis, trendline_z_axis = calc_trendline_axis_with_svd(
point_cloud_tri_centerpoints_weighted, center_point_of_cloud_weighted)
# If trendline x axis was defined in negative x direction by svd
if trendline_x_axis[0] < 0: # Rotation of 180° around y-Axis
trendline_x_axis = -(trendline_x_axis)
trendline_z_axis = -(trendline_z_axis)
trendline_global_KOS = np.vstack((trendline_x_axis, trendline_y_axis, trendline_z_axis))
return trendline_global_KOS
def calc_tri_normals_from_stl(stl_normals,triangle_vectors_of_stl):
normals=[]
#We generate our own normals with the id_list. Notice that the triangle order of stl_normals and the vertices
#(triangles) is not the same
for i in range(num_of_triangles):
v1= triangle_vectors_of_stl[i][0] - triangle_vectors_of_stl[i][1]
v2= triangle_vectors_of_stl[i][0] - triangle_vectors_of_stl[i][2]
n=np.cross(v1,v2)
n=norm_vector(n)
normals.append(n)
normals=np.asarray(normals)
# Normal vectors are always aligned in positive z direction
# the following average stl_normal always point at the outside of the object:
avg_stl_normal = sum(stl_normals) / num_of_triangles
# average of the created normals:
avg_sorted_normal = sum(normals) / num_of_triangles
true_when_stl_and_tri_normal_not_same_direction = avg_sorted_normal[0] * avg_stl_normal[0] < 0
true_when_z_from_tri_normal_neg = avg_sorted_normal[2] < 0
if true_when_stl_and_tri_normal_not_same_direction or true_when_z_from_tri_normal_neg:
normals=np.negative(normals)
return normals
def calc_tri_centerpoints(triangle_vectors_of_stl):
tri_centerpoints=[]
for i in range(num_of_triangles):
center=np.array([(triangle_vectors_of_stl[i][0][0] + triangle_vectors_of_stl[i][1][0] + triangle_vectors_of_stl[i][2][0]) / 3, (triangle_vectors_of_stl[i][0][1] + triangle_vectors_of_stl[i][1][1] + triangle_vectors_of_stl[i][2][1]) / 3, (triangle_vectors_of_stl[i][0][2] + triangle_vectors_of_stl[i][1][2] + triangle_vectors_of_stl[i][2][2]) / 3])
tri_centerpoints.append(center)
tri_centerpoints=np.asarray(tri_centerpoints)
return tri_centerpoints
def calc_tri_corner_points(triangle_vectors_of_stl):
tri_corner_points = []
for i in range(num_of_triangles):
triangle = triangle_vectors_of_stl[i]
for j in range(3):
tri_corner_points.append(triangle[j])
# Delete double corners
tri_corner_points = np.unique(tri_corner_points, axis=0)
return tri_corner_points
def calc_tri_areas(triangle_vectors_of_stl):
#Calculation of triangle areas and saving in a list
tri_surface_area = []
for i in range(num_of_triangles):
tri_surface_area.append(0.5 * (
np.linalg.norm((triangle_vectors_of_stl[i][0] - triangle_vectors_of_stl[i][1]) - (triangle_vectors_of_stl[i][0] - triangle_vectors_of_stl[i][2]))))
tri_surface_area = np.asarray(tri_surface_area)
return tri_surface_area
def calc_avg_tri_norm_weighted_by_area(tri_areas,patch_vectors_of_stl_input):
stl_normals = (patch_vectors_of_stl_input.normals)
tri_normals = calc_tri_normals_from_stl(stl_normals, triangle_vectors_of_stl)
weighted_norms = []
for i in range(num_of_triangles):
weighted_norms.append((tri_areas[i] / sum(tri_areas)) * tri_normals[i])
avg_tri_normal_weighted = sum(weighted_norms)
return avg_tri_normal_weighted
def calc_patch_pointcloud_weighted_by_area(tri_areas, tri_centerpoints):
centerpoints_weights_area_tri = calc_weights_for_center_points_by_percentil_area(tri_areas)
# for each triangle the midpoints are evaluated (put into the point cloud) as often as it is written in centerpoints_weights_area_tri
pointcloud_weighted=[]
for i in range(num_of_triangles):
for j in range(centerpoints_weights_area_tri[i]):
pointcloud_weighted.append(tri_centerpoints[i])
pointcloud_weighted=np.asarray(pointcloud_weighted)
return pointcloud_weighted
def calc_weights_for_center_points_by_percentil_area(tri_areas):
# This function generates a point cloud from the triangle centers of the stl file. Since with this
# point cloud later the main value decomposition takes place, large triangles must be stronger than small triangles
# be weighted # This is done by calculating the ratio of the triangular areas to each other and the
# centers of large triangles are added to the point cloud more often than the centers of small triangles
###Weighting large triangles
# 1) Calculation of the triangle areas and saving in a list
area_whole_patch = sum(tri_areas)
# 2) The triangle surfaces are compared with each other, in order to later weight large triangles more than small ones
# The 10th quantile of the areas is taken as the "smallest" reference area (90% of all other triangles
# are bigger). In centerpoints_weights_area_tri, for each triangle the factor by which it is larger than
# its reference triangle (the factor is rounded up).The center of each triangle is
# added to the centerpoints_weights_area_tri (at least once) "factor"-times
# To avoid too much computing time, the number of points in the point cloud is estimated in advance
# and if a limit value( max_points_in_pc) is exceeded, the program is aborted.
lower_percentil_area = np.percentile(tri_areas, percentile_pc)
estimated_number_points_in_pc = math.ceil(area_whole_patch / lower_percentil_area)
# Termination condition: maximum "max_points_in_pc" should be calculated in the point cloud.
if max_points_in_pc < estimated_number_points_in_pc:
print("ERROR: Please use a .stl-object with reduced resolution ")
print("Number of triangles: ", num_of_triangles)
print("Estimated number of points in pointcloud:", estimated_number_points_in_pc)
print("Allowed number of points in pointcloud:", max_points_in_pc)
exit(1)
# In the following, each triangle is compared with the smallest triangle and recorded in centerpoints_weights_area_tri, as
# often the smallest triangle fits into the respective triangle
centerpoints_weights_area_tri = []
for i in range(num_of_triangles):
centerpoints_weights_area_tri.append(math.ceil(tri_areas[i] / lower_percentil_area))
return centerpoints_weights_area_tri
def calc_trendline_axis_with_svd(patch_pc_weighted, center_point_of_cloud_weighted):
# Do Principal Component Analysis(PCA) on the mean-centered data. AKA SVD
# The first principal component contains [uu, dd, vv] , where vv[0] is the direction
first_principal_components_pc_weighted = scipy.linalg.svd(patch_pc_weighted - center_point_of_cloud_weighted) # scipy lib is faster then numpy
# Definition of the trendline axes
trendline_x_axis = first_principal_components_pc_weighted[2][0] # first_principal_components_pc_weighted[2][0]: is direction of trendline
trendline_x_axis = norm_vector(trendline_x_axis)
# avg_tri_norm is not perpendicular to the x-axis
# project from pcc + avg_tri_norm and back to x-axis
trendline_avg_norm_point = center_point_of_cloud_weighted + np.dot(avg_tri_normal_weighted,
trendline_x_axis) / np.dot(trendline_x_axis,
trendline_x_axis) * trendline_x_axis
# y-axis is connection of pcc+avg_tri_norm with the projected point
trendline_z_axis = (center_point_of_cloud_weighted + avg_tri_normal_weighted) - trendline_avg_norm_point
trendline_z_axis = norm_vector(trendline_z_axis)
trendline_y_axis = np.cross(trendline_z_axis, trendline_x_axis)
return trendline_x_axis, trendline_y_axis, trendline_z_axis
def find_nearest(array, value):
# find the index of the closest value in an array to a given value
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return idx
def calc_distance_between_two_points(p1, p2):
distance = np.linalg.norm(p2-p1)
return distance
def project_pointtoplane(Point_to_project,plane_normal,plane_point):
P = Point_to_project
S = plane_point
n = plane_normal
proj_point = (P - np.dot((P - S), n) / np.dot(n, n) * n)
return proj_point
def project_pointtoline(Point_to_project,linept1,linept2):
P = Point_to_project
A = linept1
B = linept2
AB = B - A
AP = P - A
proj_point = A + np.dot(AP, AB) / np.dot(AB, AB) * AB
# SOURCE: https://gamedev.stackexchange.com/questions/72528/how-can-i-project-a-3d-point-onto-a-3d-line
return proj_point
def norm_vector(vector):
vector = 1 / np.linalg.norm(vector) * vector
return vector
#######################################################################################################################
# Interpolte geometry in rotated and translated trendline KOS
def interpolate_geometrie_in_trendline_KOS_and_draw_start_end_point(grid_resolution):
z_grid_values_linear_trendline_KOS = interpolate_geometrie(grid_resolution)
show_interpolation_and_draw_start_end_points(z_grid_values_linear_trendline_KOS) # In this function x_data_drawn and y_data_drawn are calculated
return z_grid_values_linear_trendline_KOS
# Functions in Interpolate start_geometrie
def interpolate_geometrie(grid_resolution):
# Comment_DKu_Wenzel: Interpolation mit Centerpoints teils ungenauer
# Taking corner points of the triangles as interpolation points.
# First thing we do is rotating the cornerpoints into the trendline KOS
global tri_corner_points_trendline_KOS
tri_corner_points_global_KOS = calc_tri_corner_points(triangle_vectors_of_stl)
tri_corner_points_trendline_KOS = translate_and_rotate_points_from_OLD_to_NEW_KOS(tri_corner_points_global_KOS, trendline_KOS_in_global_KOS,
center_of_pointcloud_weighted_in_global_KOS)
points_x_y_trendline_KOS = tri_corner_points_trendline_KOS[:, 0:2]
points_z_trendline_KOS = tri_corner_points_trendline_KOS[:, 2]
# Creat grid
global max_x,min_x,max_y,min_y,grid_x,dx,dy,y_0_grid_point_index,x_0_grid_point_index
grid_resolution_j = grid_resolution * 1j
max_x = max(points_x_y_trendline_KOS[:, 0])
min_x = min(points_x_y_trendline_KOS[:, 0])
max_y = max(points_x_y_trendline_KOS[:, 1])
min_y = min(points_x_y_trendline_KOS[:, 1])
dy = (max_y - min_y) / grid_resolution
dx = (max_x - min_x) / grid_resolution
y_0_grid_point_index = np.asarray(np.round(max_y / (max_y - min_y) * grid_resolution), dtype=np.int32)
x_0_grid_point_index = np.asarray(np.round(max_x / (max_x - min_x) * grid_resolution), dtype=np.int32)
grid_x, grid_y = np.mgrid[min_x:max_x:grid_resolution_j, min_y:max_y:grid_resolution_j]
# Interpolating the Surface Geometry
z_grid_values_linear_trendline_KOS = griddata(np.asarray(points_x_y_trendline_KOS, dtype=np.float32), np.asarray(points_z_trendline_KOS, dtype=np.float32), (grid_x, grid_y),
method='linear')
return z_grid_values_linear_trendline_KOS
def calc_points_in_trendline_KOS_for_interpolation():
tri_corner_points = calc_tri_corner_points(triangle_vectors_of_stl)
points = translate_and_rotate_points_from_OLD_to_NEW_KOS(tri_corner_points, trendline_KOS_in_global_KOS, center_of_pointcloud_weighted_in_global_KOS)
return points
def show_interpolation_and_draw_start_end_points(z_grid_values_linear_trendline_KOS):
# Show interpolation
print("Please close window after selecting Start- and Endpoint")
figure = pyplot.figure() # Comment_DB: create a new figure
plt.imshow(z_grid_values_linear_trendline_KOS.T, extent=(min_x, max_x, min_y, max_y), origin='lower')
plt.colorbar()
# plt.plot(x_values_trim, y_values, 'bo', linewidth=2.0, label='Schnitt')
plt.title('Please select Start- and Endpoint')
# Draw Start and Endpoint
global x_start_end_point, y_start_end_point
x_start_end_point = []
y_start_end_point = []
def onclick(event):
print('button=%d, x=%d, y=%d, xdata=%f, ydata=%f' %
(event.button, event.x, event.y, event.xdata, event.ydata))
plt.plot(event.xdata, event.ydata, ',')
figure.canvas.draw()
global x_start_end_point, y_start_end_point
if len(x_start_end_point)<2:
x_start_end_point.append(event.xdata)
y_start_end_point.append(event.ydata)
else:
print("That was one point to much. Please select again Start and Endpoint")
x_start_end_point.clear()
y_start_end_point.clear()
if len(x_start_end_point) == 2:
if x_start_end_point[0]>x_start_end_point[1]:
xdata_1 = x_start_end_point[1]
ydata_1 = y_start_end_point[1]
x_start_end_point[1] = x_start_end_point[0]
y_start_end_point[1] = y_start_end_point[0]
x_start_end_point[0] = xdata_1
y_start_end_point[0] = ydata_1
figure.canvas.mpl_connect('button_press_event', onclick)
global continue_bool
continue_bool = False
def handle_close(event):
print('Closed Figure!')
global continue_bool
continue_bool = True
figure.canvas.mpl_connect('close_event', handle_close)
plt.show()
# Wait until points are drawn in and the window is closed
while continue_bool is False:
pyplot.pause(2)
# Not used in stl_preprocessing, but needed if we load settings (see load settingssheet)
def calc_trendline_and_interpolate(grid_resolution, input_file):
# Read in the file:
calc_trendline_KOS_of_geometry_from_stl_file(input_file) # saved in global var!
# Interpolation:
z_grid_values_linear_trendline_KOS = interpolate_geometrie(grid_resolution)
return grid_x, max_x, max_y, min_x, min_y, z_grid_values_linear_trendline_KOS,x_0_grid_point_index, y_0_grid_point_index
#######################################################################################################################
# Translation and rotation from Points to new trendline_axis_KOS
def translate_and_rotate_points_from_OLD_to_NEW_KOS(points_in_old_KOS, new_KOS_in_old_KOS,
new_zero_point_in_old_KOS, reverse=False):
# Idea: First rotating trendline_axis_x to (1,0,0), then shifting points to new center of coordinat system(center point weighted)
# Basic Coordinate System
x_axis_old_KOS = np.asarray((1, 0, 0), dtype=np.float32)
y_axis_old_KOS = np.asarray((0, 1, 0), dtype=np.float32)
z_axis_old_KOS = np.asarray((0, 0, 1), dtype=np.float32)
# rotation angel
anglez, angley = calc_angle_for_coordinate_rotation_x_trendline(x_axis_old_KOS, z_axis_old_KOS, new_KOS_in_old_KOS) # x-Axis now: (1,0,0)
anglex = correction_angle_x_axis_for_y_z_orientation(angley, anglez, new_KOS_in_old_KOS, y_axis_old_KOS, z_axis_old_KOS) # for y(0,1,0) and z(0,0,1)
if reverse:
anglez=-anglez
angley=-angley
anglex=-anglex
# Rotation of translation vector/new zero
translation_vector = calc_translation_vector(anglex, angley, anglez, new_zero_point_in_old_KOS, reverse,
x_axis_old_KOS, y_axis_old_KOS, z_axis_old_KOS)
# Rotation of points
points_in_new_KOS = rotate_points(anglex, angley, anglez, points_in_old_KOS, reverse, x_axis_old_KOS, y_axis_old_KOS, z_axis_old_KOS)
# Translate rotated points to new zero
points_in_new_KOS = translate_points_to_new_zero(translation_vector, points_in_new_KOS)
return points_in_new_KOS
def calc_translation_vector(anglex, angley, anglez, new_zero_point_in_old_KOS, reverse, x_axis_old_KOS, y_axis_old_KOS, z_axis_old_KOS):
if reverse:
translation_vector = -(
new_zero_point_in_old_KOS) # At the reverse case we have to shift in the old unrotated KOS
else:
translation_vector = rotate_point_around_z_y_and_x_axis_with_given_angle(anglez, angley, anglex, z_axis_old_KOS,
y_axis_old_KOS, x_axis_old_KOS,
new_zero_point_in_old_KOS,
reverse)
return translation_vector
def calc_angle_for_coordinate_rotation_x_trendline(x_axis_old_KOS, z_axis_old_KOS, new_KOS_in_old_KOS):
# Calculation of angel z
new_x_trendline_projected_to_x_y = project_pointtoplane((new_KOS_in_old_KOS[0][:]), z_axis_old_KOS, np.zeros(3))
new_x_trendline_projected_to_x_y = norm_vector(new_x_trendline_projected_to_x_y)
anglez = math.acos(np.dot(x_axis_old_KOS, new_x_trendline_projected_to_x_y))
# if y negativ, x_rotation in other direction
if new_KOS_in_old_KOS[0][1] <= -0.0001: anglez = -anglez
# Calculation of angel y
rotated_x_trend_around_z = Quaternion(axis=z_axis_old_KOS, angle=-anglez).rotate(new_KOS_in_old_KOS[0][:])
rotated_x_trend_around_z = norm_vector(rotated_x_trend_around_z)
angley = math.acos(np.dot(x_axis_old_KOS, rotated_x_trend_around_z))
# if z negativ, x_rotation in other direction
if rotated_x_trend_around_z[2] <= -0.0001: angley = -angley
return anglez, angley
def correction_angle_x_axis_for_y_z_orientation(angley, anglez, new_trendline_axis_in_old_KOS, y_axis_old_KOS, z_axis_old_KOS):
# Rotate trendline_axis around y and z
new_trendline_axis_in_global_KOS = []
for i in range(len(new_trendline_axis_in_old_KOS[:, 0])):
new_trendline_axis_points_rotatet_i = rotate_point_around_z_y_and_x_axis_with_given_angle(anglez, angley, 0, z_axis_old_KOS,
y_axis_old_KOS, y_axis_old_KOS,
new_trendline_axis_in_old_KOS[
i], False)
new_trendline_axis_in_global_KOS.append(new_trendline_axis_points_rotatet_i)
angel_x = 0
if new_trendline_axis_in_global_KOS[2][1]>0 and new_trendline_axis_in_global_KOS[1][1]>0:
try: angel_x = -math.acos(np.dot(z_axis_old_KOS, new_trendline_axis_in_global_KOS[2]))
except: angel_x = 0
elif new_trendline_axis_in_global_KOS[2][1] < 0 and new_trendline_axis_in_global_KOS[1][1] > 0:
try:angel_x = math.acos(np.dot(z_axis_old_KOS, new_trendline_axis_in_global_KOS[2]))
except: angel_x = 0
elif new_trendline_axis_in_global_KOS[2][1] < 0 and new_trendline_axis_in_global_KOS[1][1] < 0:
try:angel_x = math.pi - math.acos(np.dot(z_axis_old_KOS, new_trendline_axis_in_global_KOS[2]))
except: angel_x = 0
elif new_trendline_axis_in_global_KOS[2][1] > 0 and new_trendline_axis_in_global_KOS[1][1] < 0:
try:angel_x = math.pi + math.acos(np.dot(z_axis_old_KOS, new_trendline_axis_in_global_KOS[2]))
except: angel_x = 0
return angel_x
def rotate_point_around_z_y_and_x_axis_with_given_angle(angle1, angle2, angle3, axis1, axis2, axis3, point_to_rotate, reverse):
if reverse:
point_rotated_around_axis3 = Quaternion(axis=axis3, angle=angle3).rotate(point_to_rotate)
point_rotated_around_3_and_2 = Quaternion(axis=axis2, angle=angle2).rotate(point_rotated_around_axis3)
point_rotated_around_1_2and_3 = Quaternion(axis=axis1, angle=-angle1).rotate(point_rotated_around_3_and_2)
else:
rotated_point_around_axis1 = Quaternion(axis=axis1, angle=-angle1).rotate(point_to_rotate)
point_rotated_around_1_and_2 = Quaternion(axis=axis2, angle=angle2).rotate(rotated_point_around_axis1)
point_rotated_around_1_2and_3 = Quaternion(axis=axis3, angle=angle3).rotate(point_rotated_around_1_and_2)
return point_rotated_around_1_2and_3
def rotate_points(anglex, angley, anglez, points_in_old_KOS, reverse, x_axis_old_KOS, y_axis_old_KOS, z_axis_old_KOS):
points_in_new_KOS = []
for i in range(len(points_in_old_KOS[:, 0])):
old_points_rotatet_i = rotate_point_around_z_y_and_x_axis_with_given_angle(anglez, angley, anglex, z_axis_old_KOS,
y_axis_old_KOS, x_axis_old_KOS,
points_in_old_KOS[i], reverse)
points_in_new_KOS.append(old_points_rotatet_i)
points_in_new_KOS = np.asarray(points_in_new_KOS, dtype=np.float32)
return points_in_new_KOS
def translate_points_to_new_zero(new_zero_point_in_old_KOS_rotated, points_in_new_KOS):
points_in_new_KOS[:, 0] = np.subtract(points_in_new_KOS[:, 0], new_zero_point_in_old_KOS_rotated[0])
points_in_new_KOS[:, 1] = np.subtract(points_in_new_KOS[:, 1], new_zero_point_in_old_KOS_rotated[1])
points_in_new_KOS[:, 2] = np.subtract(points_in_new_KOS[:, 2], new_zero_point_in_old_KOS_rotated[2])
return points_in_new_KOS
#######################################################################################################################
# Calculation of the bending parameters
def calc_bending_parameters(grid_resolution_int, max_distance, width_for_edge_detection, z_grid_values_linear_trendline_KOS,
calc_2D_with_edge_detection, calc_3D_Solution):
initialize_global_lists_of_3D_bending_and_plot_parameter()
# Calc 2D-Bendpoints
calc_bending_points(grid_resolution_int, z_grid_values_linear_trendline_KOS,
[x_start_end_point_list[-2], x_start_end_point_list[-1]],
[y_start_end_point_list[-2], y_start_end_point_list[-1]], max_distance,
width_for_edge_detection, 0, 0, True, calc_2D_with_edge_detection, calc_3D_Solution)
if calc_3D_Solution:
# Start calculating bendingpoints
calculate_iteratively_local_bendingpoints(alpha_angle_list_3D, grid_resolution_int, max_distance,
width_for_edge_detection, x_values_trim_trendline_KOS_stacked,
x_start_end_point_list, y_values_trim_trendline_KOS_stacked,
y_start_end_point_list, z_grid_values_linear_trendline_KOS)
# Show results
show_results_2D_Plots_and_Colormap(surface_points_local_KOS_left_stacked,
surface_points_local_KOS_right_stacked,
surface_points_local_KOS_stacked, x_values_trim_trendline_KOS_stacked,
y_values_trim_trendline_KOS_stacked, z_grid_values_linear_trendline_KOS)
# Functions in calc_bending_parameters
def initialize_global_lists_of_3D_bending_and_plot_parameter():
# Startpoint and one additional Point for the direction (Endpoint). First values are drawn into the colormap.
global x_start_end_point_list, y_start_end_point_list, new_start_left, new_start_right
x_start_end_point_list = x_start_end_point
y_start_end_point_list = y_start_end_point
new_start_left, new_start_right = [],[]
# List of Bendingparameters
global x_direction_list_global_KOS, x_direction_rotated_list_global_KOS, normal_patch_global_KOS, beta_angle_between_planes_list_3D, alpha_angle_list_3D, length_list_3D, edge_line_global, bend_pts_xyz_global_3D, normal_direction_start_3D, gamma_start
x_direction_list_global_KOS, x_direction_rotated_list_global_KOS, normal_patch_global_KOS, beta_angle_between_planes_list_3D, alpha_angle_list_3D, edge_line_global, length_list_3D, bend_pts_xyz_global_3D, normal_direction_start_3D,gamma_start = [], [], [], [], [], [], [], [], [], 0
global length_list_2DE, beta_angle_between_planes_list_2DE, alpha_angle_list_2DE
length_list_2DE, beta_angle_between_planes_list_2DE, alpha_angle_list_2DE = [], [], []
# Blue Points for 3D-global Plot
global surfacepoints_between_Start_and_End, surface_points_global_KOS_stacked, surface_points_global_KOS_left_stacked, surface_points_global_KOS_right_stacked
surfacepoints_between_Start_and_End, surface_points_global_KOS_stacked, surface_points_global_KOS_left_stacked, surface_points_global_KOS_right_stacked = [], [], [], []
# 2D Plot tildet local KOS
global surface_points_local_KOS_stacked, surface_points_local_KOS_left_stacked, surface_points_local_KOS_right_stacked, \
bend_pts_xz_local_stacked, bend_pts_xz_local_right_stacked, bend_pts_xz_local_left_stacked
surface_points_local_KOS_stacked, surface_points_local_KOS_left_stacked, surface_points_local_KOS_right_stacked, \
bend_pts_xz_local_stacked, bend_pts_xz_local_right_stacked, bend_pts_xz_local_left_stacked = [], [], [], [], [], []
# Line in 2D Colormap
global x_values_trim_trendline_KOS_stacked, y_values_trim_trendline_KOS_stacked
x_values_trim_trendline_KOS_stacked, y_values_trim_trendline_KOS_stacked = [], []
global counter_failed_matches_of_edges
counter_failed_matches_of_edges = 0
def calculate_iteratively_local_bendingpoints(alpha_angle_list, grid_resolution_int, max_distance,
width_for_edge_detection, x_values_trim_stacked, xdata_list,
y_values_trim_stacked, ydata_list, z_grid_values_linear_trendline_KOS):
# calc_local_bending_points has no return value except num_bendpoints. All the bending parameters are saved in the global lists
num_bendpoints = calc_bending_points(grid_resolution_int, z_grid_values_linear_trendline_KOS, [xdata_list[-2], xdata_list[-1]],
[ydata_list[-2], ydata_list[-1]], max_distance, width_for_edge_detection, 0, 0)
# Show calculated direction in colormap
pyplot.figure()
plt.imshow(z_grid_values_linear_trendline_KOS.T, extent=(min_x, max_x, min_y, max_y), origin='lower')
plt.plot(x_values_trim_stacked[0][:], y_values_trim_stacked[0][:], 'bo', linewidth=1.0, label='Schnitt')
plt.show(block=False)
pyplot.pause(0.3) #Give system time to print the plot
while num_bendpoints > 2:
num_bendpoints = calc_bending_points(grid_resolution_int, z_grid_values_linear_trendline_KOS,
[xdata_list[-2], xdata_list[-1]], [ydata_list[-2], ydata_list[-1]],
max_distance, width_for_edge_detection, alpha_angle_list[-1], alpha_end=0)
# Update colormap
for i in range(len(x_values_trim_stacked)):
plt.plot(x_values_trim_stacked[i][:], y_values_trim_stacked[i][:], 'bo', linewidth=1.0, label='Schnitt')
plt.draw()
pyplot.pause(0.01)
def show_results_2D_Plots_and_Colormap(surface_points_local_KOS_left_stacked,
surface_points_local_KOS_right_stacked,
surface_points_local_KOS_stacked, x_values_trim_stacked,
y_values_trim_stacked, z_grid_values_linear_trendline_KOS):
# In the different local KOS the x-Values don´t allign. Correcting it here for nicer 2Dplots
connect_points_in_local_KOS(surface_points_local_KOS_stacked)
connect_points_in_local_KOS(surface_points_local_KOS_left_stacked)
connect_points_in_local_KOS(surface_points_local_KOS_right_stacked)
connect_points_in_local_KOS(bend_pts_xz_local_stacked)
connect_points_in_local_KOS(bend_pts_xz_local_left_stacked)
connect_points_in_local_KOS(bend_pts_xz_local_right_stacked)
#Colormap
pyplot.figure()
plt.title('Interpolation with start- end- connection')
plt.imshow(z_grid_values_linear_trendline_KOS.T, extent=(min_x, max_x, min_y, max_y), origin='lower')
plt.plot(x_values_trim_stacked[0][:], y_values_trim_stacked[0][:], 'bo', linewidth=1.0,
label='Tape direction')
plt.legend()
for i in range(1,len(x_values_trim_stacked)):
plt.plot(x_values_trim_stacked[i][:], y_values_trim_stacked[i][:], 'bo', linewidth=1.0)
#2D-Sideview
pyplot.figure()
plt.title('Sideview height profil')
plt.plot(surface_points_local_KOS_stacked[0][:, 0],
surface_points_local_KOS_stacked[0][:, 2], 'bo', linewidth=1.0, label='surface points')
plt.plot(bend_pts_xz_local_stacked[0][:, 0], bend_pts_xz_local_stacked[0][:, 1], color='green', linewidth=3.0,label='linear Approximation')
plt.legend()
for i in range(1,len(x_values_trim_stacked)):
plt.plot(surface_points_local_KOS_stacked[i][:, 0],
surface_points_local_KOS_stacked[i][:, 2], 'bo', linewidth=1.0)
plt.plot(bend_pts_xz_local_stacked[i][:, 0], bend_pts_xz_local_stacked[i][:, 1], color='green', linewidth=3.0)
pyplot.figure()
plt.subplot(211)
plt.title('right')
for i in range(len(x_values_trim_stacked)):
plt.plot(surface_points_local_KOS_left_stacked[i][:, 0],
surface_points_local_KOS_left_stacked[i][:, 2], 'bo', linewidth=1.0, label='cross section')
plt.plot(bend_pts_xz_local_left_stacked[i][:, 0], bend_pts_xz_local_left_stacked[i][:, 1], color='green', linewidth=3.0,
label='linear Approximation')
plt.subplot(212)
plt.title('left')
for i in range(len(x_values_trim_stacked)):
plt.plot(surface_points_local_KOS_right_stacked[i][:, 0],
surface_points_local_KOS_right_stacked[i][:, 2], 'bo', linewidth=1.0, label='cross section')
plt.plot(bend_pts_xz_local_right_stacked[i][:, 0], bend_pts_xz_local_right_stacked[i][:, 1], color='green', linewidth=3.0,
label='linear Approximation')
plt.show()
def connect_points_in_local_KOS(surface_points_local_KOS_stacked):
for i in range(1, len(surface_points_local_KOS_stacked)):
startx_i = surface_points_local_KOS_stacked[i][0, 0]
endx_i_1 = surface_points_local_KOS_stacked[i - 1][-1, 0]
difx = startx_i - endx_i_1
surface_points_local_KOS_stacked[i][:, 0] = np.subtract(
surface_points_local_KOS_stacked[i][:, 0], difx)
#######################################################################################################################
# Calculation of the 2D-Solution.
def calc_tape_parameter_for_2D_solution(bend_pts_xyz_global,bend_pts_xz_local):
global bend_pts_xyz_global_2D, beta_angle_list_2D, lenght_list_2D, alpha_angle_list_2D
bend_pts_xyz_global_2D = bend_pts_xyz_global
beta_angle_list_2D = calc_2D_betas(bend_pts_xz_local)
lenght_list_2D = calc_2D_lengths(bend_pts_xz_local)
alpha_angle_list_2D = np.zeros(len(bend_pts_xz_local))
# beta-list for Chromo
def calc_2D_betas(bend_pts_xz_local):
beta_list = []
for i in range(1, len(bend_pts_xz_local) - 1):
r0 = bend_pts_xz_local[i] - bend_pts_xz_local[i - 1]
r1 = bend_pts_xz_local[i + 1] - bend_pts_xz_local[i]
try: angle = math.acos(np.dot(r0, r1) / (np.linalg.norm(r0) * np.linalg.norm(r1)))
except: angle = 0
steepness_r0 = r0[1] / r0[0]
steepness_r1 = r1[1] / r1[0]
if steepness_r1 < steepness_r0:
angle = -angle
if np.abs(angle) < 0.01: # Comment_DB: Small angles approx. to 0 degrees
angle = 0
beta_list.append(-angle)
beta_list = np.asarray(beta_list)
return beta_list
def calc_2D_lengths(bend_pts_xz_local):
# l-list for Chromo
l_list = []
for i in range(1, len(bend_pts_xz_local)):
l = np.linalg.norm(bend_pts_xz_local[i] - bend_pts_xz_local[i - 1])
l_list.append(l)
l_list = np.asarray(l_list)
return l_list
#######################################################################################################################
# For every iteration in calc_bending_parameters new bendpoints and parameters have to be calculated.
def calc_bending_points(grid_resolution_int, z_grid_values_linear_trendline_KOS, x_start_end_point_trendline_KOS,
y_start_end_point_trendline_KOS, max_distance, width_for_edge_detection, alpha_start=0,
alpha_end=0, calc_tape_para_2D=False, calc_2D_with_edge_detection=False, calc_3D_Solution=True):
# x_y_z_Axis of new KOS
local_KOS_in_trendline_KOS, x_slope = calc_local_KOS_in_trendline_KOS(x_start_end_point_trendline_KOS,y_start_end_point_trendline_KOS)
# Calc bendpoints on surface in new trendline direction, including left and right for edge directions
bend_pts_xyz_global, bend_pts_xyz_global_left, bend_pts_xyz_global_right, bend_pts_xyz_trendline, \
bend_pts_xz_local,local_KOS_in_global_KOS = calc_bend_pts_between_start_and_end(alpha_end, alpha_start,
grid_resolution_int, max_distance,
local_KOS_in_trendline_KOS,
width_for_edge_detection, x_slope,
x_start_end_point_trendline_KOS,
y_start_end_point_trendline_KOS,
z_grid_values_linear_trendline_KOS,
calc_tape_para_2D)
normal_at_start, x_direction_start = calc_start_vectors_from_bendpoints(bend_pts_xyz_global,
bend_pts_xyz_global_left,
bend_pts_xyz_global_right,
bend_pts_xz_local, local_KOS_in_global_KOS)
# Calc Tapeparameters 2D
if calc_tape_para_2D:
calc_tape_parameter_for_2D_solution(bend_pts_xyz_global,bend_pts_xz_local)
# Calc Tapeparameters 2DE/3D
edge_directions = calc_edge_directions(bend_pts_xyz_global_left, bend_pts_xyz_global_right)
if calc_tape_para_2D:
global edge_directions_global_2DE
edge_directions_global_2DE = edge_directions
x_direction_list_current_direction_global_KOS, \
normal_patch_current_direction_global_KOS, \
rotated_x_direction_around_edge_current_direction_global_KOS, \
beta_angle_between_planes_list_current_direction,\
alpha_angle_list_current_direction,\
lengths_between_planes_list = calc_tape_parameter_2DE_3D(bend_pts_xyz_global, bend_pts_xyz_global_left,
bend_pts_xyz_global_right,
bend_pts_xyz_trendline, edge_directions,
x_direction_start, normal_at_start,
bend_pts_xz_local, calc_2D_with_edge_detection)
# Add Bend/Tapeparameters from first found bendpoint to global list
append_bend_parameters_at_first_bendpoint_to_global_list(alpha_angle_list_current_direction, bend_pts_xyz_global,
bend_pts_xyz_trendline,
beta_angle_between_planes_list_current_direction,
edge_directions, lengths_between_planes_list,
normal_patch_current_direction_global_KOS,
rotated_x_direction_around_edge_current_direction_global_KOS,
x_direction_list_current_direction_global_KOS, calc_tape_para_2D, calc_3D_Solution)
return len(bend_pts_xyz_global_left)
def calc_start_vectors_from_bendpoints(bend_pts_xyz_global, bend_pts_xyz_global_left, bend_pts_xyz_global_right,
bend_pts_xz_local, local_KOS_in_global_KOS):
# Lokal x_direction and normal
x_direction_start = norm_vector(bend_pts_xyz_global[1] - bend_pts_xyz_global[0]) # Start_direction
# L_aim, Start Direction and Start Normal
global surfacepoints_between_Start_and_End, normal_direction_start, normal_direction_start_3D, gamma_start
if surfacepoints_between_Start_and_End == []: # Just once from the first calculation cycle
# Start_End_connection for L_aim
surfacepoints_between_Start_and_End = bend_pts_xz_local
# Start normal
normal_direction_start = np.cross(x_direction_start, local_KOS_in_global_KOS[1])
normal_direction_start = norm_vector(normal_direction_start)
normal_at_start = calc_tape_normal(bend_pts_xyz_global[1], bend_pts_xyz_global_left[0],
bend_pts_xyz_global_right[0]) # Start_normal #normal_patch_global_KOS[-1]
if normal_direction_start_3D == []:
normal_direction_start_3D = normal_at_start
gamma_start = math.acos(np.dot(normal_direction_start_3D, normal_direction_start))
normals_rotated = translate_and_rotate_points_from_OLD_to_NEW_KOS(
np.stack([x_direction_start, normal_direction_start_3D, normal_direction_start]),
local_KOS_in_global_KOS, np.asarray([0, 0, 0]))
if normals_rotated[1][1] > 0:
gamma_start = -gamma_start
else:
normal_at_start = calc_tape_normal(bend_pts_xyz_global[1], bend_pts_xyz_global_left[0],
bend_pts_xyz_global_right[0]) # Start_normal #normal_patch_global_KOS[-1]
return normal_at_start, x_direction_start
def calc_bend_pts_between_start_and_end(alpha_end, alpha_start, grid_resolution_int, max_distance,
local_KOS_in_trendline_KOS, width_for_edge_detection, x_slope,
x_start_end_point_trendline_KOS, y_start_end_point_trendline_KOS,
z_grid_values_linear_trendline_KOS, calc_tape_para_2D):
start_point_xyz_trendline_KOS, start_point_xyz_trendline_KOS_left, start_point_xyz_trendline_KOS_right, \
x_end_index, x_end_index_left, x_end_index_right, \
x_start_index, x_start_index_left, x_start_index_right = calc_start_end_point_trendline_KOS(
alpha_end, alpha_start, grid_resolution_int, local_KOS_in_trendline_KOS, width_for_edge_detection,
x_start_end_point_trendline_KOS, y_start_end_point_trendline_KOS, z_grid_values_linear_trendline_KOS)
# Calculation of the surfacepoints in the local, trendline and global direction and extracting bendpoints from them.
# Left
bend_pts_xyz_global_left, bend_pts_xyz_trendline_left, bend_pts_xz_local_left, surface_points_global_KOS_left, \
surface_points_local_KOS_left, x_values_trim_left_trendline_KOS, y_values_trim_left_trendline_KOS, local_KOS_in_global_KOS_left = calc_points_on_surface_and_extract_bendline(
grid_resolution_int, max_distance, start_point_xyz_trendline_KOS_left, local_KOS_in_trendline_KOS,
x_end_index_left, x_slope, x_start_index_left, z_grid_values_linear_trendline_KOS)
# Right
bend_pts_xyz_global_right, bend_pts_xyz_trendline_right, bend_pts_xz_local_right, surface_points_global_KOS_right, \
surface_points_local_KOS_right, x_values_trim_right_trendline_KOS, y_values_trim_right_trendline_KOS,local_KOS_in_global_KOS_right = calc_points_on_surface_and_extract_bendline(
grid_resolution_int, max_distance, start_point_xyz_trendline_KOS_right, local_KOS_in_trendline_KOS,
x_end_index_right, x_slope, x_start_index_right, z_grid_values_linear_trendline_KOS)
# Center
bend_pts_xyz_global, bend_pts_xyz_trendline, bend_pts_xz_local, surface_points_global_KOS, \
surface_points_local_KOS, x_values_trim_trendline_KOS, y_values_trim_trendline_KOS,local_KOS_in_global_KOS = calc_points_on_surface_and_extract_bendline(
grid_resolution_int, max_distance, start_point_xyz_trendline_KOS, local_KOS_in_trendline_KOS,
x_end_index, x_slope, x_start_index, z_grid_values_linear_trendline_KOS)
if calc_tape_para_2D:
global surface_points_start_end_global_KOS, surface_points_start_end_global_KOS_left, surface_points_start_end_global_KOS_right
surface_points_start_end_global_KOS = surface_points_global_KOS
surface_points_start_end_global_KOS_left = surface_points_global_KOS_left
surface_points_start_end_global_KOS_right = surface_points_global_KOS_right
global bend_points_start_end_global_KOS, bend_points_start_end_global_KOS_left, bend_points_start_end_global_KOS_right
bend_points_start_end_global_KOS = bend_pts_xyz_global
bend_points_start_end_global_KOS_left = bend_pts_xyz_global_left
bend_points_start_end_global_KOS_right = bend_pts_xyz_global_right
# Trim plot points to second bendpoint and add to global list
if not calc_tape_para_2D:
append_plot_points_till_second_bendpoint_to_global_list(bend_pts_xz_local, bend_pts_xz_local_left,
bend_pts_xz_local_right,
surface_points_global_KOS,
surface_points_global_KOS_left,
surface_points_global_KOS_right,
surface_points_local_KOS,
surface_points_local_KOS_left,
surface_points_local_KOS_right,
x_values_trim_trendline_KOS, y_values_trim_trendline_KOS)
global new_start_left, new_start_right
if not calc_tape_para_2D:
new_start_right = bend_pts_xyz_trendline_right[1]
new_start_left = bend_pts_xyz_trendline_left[1]
return bend_pts_xyz_global, bend_pts_xyz_global_left, bend_pts_xyz_global_right, bend_pts_xyz_trendline, bend_pts_xz_local,local_KOS_in_global_KOS
def calc_start_end_point_trendline_KOS(alpha_end, alpha_start, grid_resolution_int, local_KOS_in_trendline_KOS,
width_for_edge_detection, x_start_end_point_trendline_KOS, y_start_end_point_trendline_KOS,
z_grid_values_linear_trendline_KOS):
# Start and endpoint for tape section
end_point_xyz_trendline_KOS, \
start_point_xyz_trendline_KOS, \
x_start_index, \
x_end_index = calc_Start_End_in_trendline_KOS_from_xdata_ydata(grid_resolution_int, x_start_end_point_trendline_KOS,
y_start_end_point_trendline_KOS,
z_grid_values_linear_trendline_KOS)
# Start and endpoint from side line for estimating bending angels
# Cornerpoints
delta_length_start_bend = calc_delta_length_at_bend(width_for_edge_detection, alpha_start)
delta_length_end_bend = calc_delta_length_at_bend(width_for_edge_detection, alpha_end)
end_point_xyz_trendline_KOS_right, \
start_point_xyz_trendline_KOS_right, \
x_end_index_right, \
x_start_index_right = calc_start_end_point_side_in_trendline_KOS(False, delta_length_end_bend,
delta_length_start_bend,
end_point_xyz_trendline_KOS, grid_resolution_int,
start_point_xyz_trendline_KOS,
width_for_edge_detection,
local_KOS_in_trendline_KOS,
z_grid_values_linear_trendline_KOS)
end_point_xyz_trendline_KOS_left, \
start_point_xyz_trendline_KOS_left, \
x_end_index_left, \
x_start_index_left = calc_start_end_point_side_in_trendline_KOS(True, delta_length_end_bend,
delta_length_start_bend,
end_point_xyz_trendline_KOS, grid_resolution_int,
start_point_xyz_trendline_KOS,
width_for_edge_detection,
local_KOS_in_trendline_KOS,
z_grid_values_linear_trendline_KOS)
return start_point_xyz_trendline_KOS, start_point_xyz_trendline_KOS_left, start_point_xyz_trendline_KOS_right, x_end_index, x_end_index_left, x_end_index_right, x_start_index, x_start_index_left, x_start_index_right
def calc_points_on_surface_and_extract_bendline(grid_resolution_int, max_distance, start_point_xyz_trendline_KOS,
local_KOS_in_trendline_KOS, x_end_index, x_slope, x_start_index,
z_grid_values_linear_trendline_KOS):
new_bending_direction_points_on_surface_trendline_KOS, y_intercept_trendline_KOS, x_values_indizes_trim, \
x_values_trim_trendline_KOS, y_values_indizes_trim, y_values_trim_trendline_KOS = extract_points_from_interpolated_surface(
start_point_xyz_trendline_KOS, grid_resolution_int, x_slope, z_grid_values_linear_trendline_KOS, x_start_index, x_end_index)
surface_points_local_KOS, surface_points_global_KOS, \
local_KOS_in_global_KOS = transform_surface_points_to_global_and_local_KOS(y_intercept_trendline_KOS,
new_bending_direction_points_on_surface_trendline_KOS,
local_KOS_in_trendline_KOS)
bend_pts_xz_local, bend_pts_xyz_trendline, bend_pts_xyz_global = calc_local_and_global_bendpoints(max_distance,
surface_points_local_KOS,
local_KOS_in_trendline_KOS,
y_intercept_trendline_KOS)
return bend_pts_xyz_global, bend_pts_xyz_trendline, bend_pts_xz_local, surface_points_global_KOS, surface_points_local_KOS, x_values_trim_trendline_KOS, y_values_trim_trendline_KOS,local_KOS_in_global_KOS
def append_bend_parameters_at_first_bendpoint_to_global_list(alpha_angle_list_current_direction, bend_pts_xyz_global,
bend_pts_xyz_trendline,
beta_angle_between_planes_list_current_direction,
edge_directions, lengths_between_planes_list,
normal_patch_current_direction_global_KOS,
rotated_x_direction_around_edge_current_direction_global_KOS,
x_direction_list_current_direction_global_KOS, calc_tape_para_2D, calc_3D_Solution):
global x_direction_list_global_KOS, x_direction_rotated_list_global_KOS, normal_patch_global_KOS, \
length_list_3D,beta_angle_between_planes_list_3D, alpha_angle_list_3D, \
length_list_2DE, beta_angle_between_planes_list_2DE, alpha_angle_list_2DE, \
edge_line_global, x_start_end_point_list, y_start_end_point_list, bend_pts_xyz_global_3D
# Tape parameter for EA
if calc_tape_para_2D:
length_list_2DE = np.asarray(lengths_between_planes_list)
beta_angle_between_planes_list_2DE = np.asarray(
beta_angle_between_planes_list_current_direction)
alpha_angle_list_2DE = np.asarray(alpha_angle_list_current_direction)
if calc_3D_Solution == False: # If no 3D Solution is calculated, we need to save the first x_direction and normal: start_d/start_n Parameter
x_direction_list_global_KOS.append(x_direction_list_current_direction_global_KOS[0])
x_direction_rotated_list_global_KOS.append(rotated_x_direction_around_edge_current_direction_global_KOS[0])
normal_patch_global_KOS.append(normal_patch_current_direction_global_KOS[0])
else:
length_list_3D.append(lengths_between_planes_list[0])
# Bendpoint and edge
bend_pts_xyz_global_3D.append(bend_pts_xyz_global[0])
edge_line_global_current_direction = [
(bend_pts_xyz_global[:][:2] + np.multiply(edge_directions[:][:2], 50)),
(bend_pts_xyz_global[:][:2] - np.multiply(edge_directions[:][:2], 50))]
edge_line_global.append([edge_line_global_current_direction[0][1], edge_line_global_current_direction[1][1]])
if len(edge_directions) == 2: # Last part
bend_pts_xyz_global_3D.append(bend_pts_xyz_global[1])
if len(edge_directions) > 2:
# startpoint and direction of next bending direction.
x_start_end_point_list.append(bend_pts_xyz_trendline[1][0])
y_start_end_point_list.append(bend_pts_xyz_trendline[1][1])
# New Direction and Endpoint in trendline KOS(same then interpolation)
rotated_x_direction_point_in_trendline_KOS = np.asarray(
[bend_pts_xyz_global[1] + np.multiply(rotated_x_direction_around_edge_current_direction_global_KOS[0], 1),
bend_pts_xyz_global[1]])
rotated_x_direction_around_edge_trendline_KOS_current_direction = translate_and_rotate_points_from_OLD_to_NEW_KOS(
rotated_x_direction_point_in_trendline_KOS, trendline_KOS_in_global_KOS, center_of_pointcloud_weighted_in_global_KOS)
x_intersect, y_intersect = calc_new_endpoint(bend_pts_xyz_trendline,
rotated_x_direction_around_edge_trendline_KOS_current_direction,
x_start_end_point_list, y_start_end_point_list)
x_start_end_point_list.append(x_intersect)
y_start_end_point_list.append(y_intersect)
# Tape parameter for EA
beta_angle_between_planes_list_3D.append(beta_angle_between_planes_list_current_direction[0])
alpha_angle_list_3D.append(alpha_angle_list_current_direction[0])
# Directions
x_direction_list_global_KOS.append(x_direction_list_current_direction_global_KOS[0])
x_direction_rotated_list_global_KOS.append(rotated_x_direction_around_edge_current_direction_global_KOS[0])
normal_patch_global_KOS.append(normal_patch_current_direction_global_KOS[1])
def calc_new_endpoint(bend_pts_xyz_trendline, rotated_x_direction_around_edge_trendline_KOS_current_direction,
xdata_list, ydata_list):
# new endpoint is the intersection between the line representing the the new direction and the end line
# Line are represented by two points
# End line: Line perpendicular to first start-end-direction, starting at endpoint
drawn_endpoint = np.asarray([xdata_list[1], ydata_list[1]])
direction_perpendicular_to_first_start_end = np.asarray([-(ydata_list[1] - ydata_list[0]), (xdata_list[1] - xdata_list[0])])
point_in_direction_perpendicular_to_start_end_drawn = drawn_endpoint + direction_perpendicular_to_first_start_end
# new startpoint
new_startpoint = np.asarray([bend_pts_xyz_trendline[1][0], bend_pts_xyz_trendline[1][1]])
other_point_in_new_direction = np.asarray(
[rotated_x_direction_around_edge_trendline_KOS_current_direction[0][0],
rotated_x_direction_around_edge_trendline_KOS_current_direction[0][1]])
# Calculating the two lines
stacked_points = np.vstack([new_startpoint, other_point_in_new_direction, drawn_endpoint,
point_in_direction_perpendicular_to_start_end_drawn])
homogen_coordinates = np.hstack((stacked_points, np.ones((4, 1))))
line_in_new_direction = np.cross(homogen_coordinates[0], homogen_coordinates[1])
end_line = np.cross(homogen_coordinates[2], homogen_coordinates[3])
x, y, z = np.cross(line_in_new_direction, end_line) # point of intersection
# if z == 0: # lines are parallel
x_intersect = x / z
y_intersect = y / z
return x_intersect, y_intersect
def append_plot_points_till_second_bendpoint_to_global_list(bend_pts_xz_local, bend_pts_xz_local_left,
bend_pts_xz_local_right,
surface_points_global_KOS,
surface_points_global_KOS_left,
surface_points_global_KOS_right,
surface_points_local_KOS,
surface_points_local_KOS_left,
surface_points_local_KOS_right,
x_values_trim_trendline_KOS, y_values_trim_trendline_KOS):
# Calc trim index
end_index_pts_global, end_index_pts_global_left, end_index_pts_global_right = calc_trim_index_at_second_bendpoint(
bend_pts_xz_local, bend_pts_xz_local_left, bend_pts_xz_local_right, surface_points_local_KOS,
surface_points_local_KOS_left, surface_points_local_KOS_right)
# Global 3D-Plot
global surface_points_global_KOS_stacked, surface_points_global_KOS_left_stacked, surface_points_global_KOS_right_stacked
surface_points_global_KOS_stacked.append(
surface_points_global_KOS[:][:end_index_pts_global])
surface_points_global_KOS_right_stacked.append(
surface_points_global_KOS_right[:][:end_index_pts_global_right])
surface_points_global_KOS_left_stacked.append(
surface_points_global_KOS_left[:][:end_index_pts_global_left])
# local 2D-Plot
global surface_points_local_KOS_stacked, surface_points_local_KOS_left_stacked, surface_points_local_KOS_right_stacked, \
bend_pts_xz_local_stacked, bend_pts_xz_local_right_stacked, bend_pts_xz_local_left_stacked
surface_points_local_KOS_stacked.append(
surface_points_local_KOS[:end_index_pts_global])
surface_points_local_KOS_left_stacked.append(
surface_points_local_KOS_left[:end_index_pts_global_left])
surface_points_local_KOS_right_stacked.append(
surface_points_local_KOS_right[:end_index_pts_global_right])
bend_pts_xz_local_stacked.append(bend_pts_xz_local[:2])
bend_pts_xz_local_right_stacked.append(bend_pts_xz_local_right[:2])
bend_pts_xz_local_left_stacked.append(bend_pts_xz_local_left[:2])
# for trendline KOS, 2D-Plot-Colormap
global x_values_trim_trendline_KOS_stacked, y_values_trim_trendline_KOS_stacked
x_values_trim_trendline_KOS_stacked.append(x_values_trim_trendline_KOS[:end_index_pts_global])
y_values_trim_trendline_KOS_stacked.append(y_values_trim_trendline_KOS[:end_index_pts_global])
def calc_trim_index_at_second_bendpoint(bend_pts_xz_local, bend_pts_xz_local_left, bend_pts_xz_local_right,
surface_points_local_KOS,
surface_points_local_KOS_left,
surface_points_local_KOS_right):
try:
end_index_pts_global = \
[i for i, x in enumerate(surface_points_local_KOS[:, 0]) if x >= bend_pts_xz_local[1][0]][0]
except:
end_index_pts_global = -1
try:
end_index_pts_global_left = [i for i, x in enumerate(surface_points_local_KOS_left[:, 0]) if
x >= bend_pts_xz_local_left[1][0]][0]
except:
end_index_pts_global_left = -1
try:
end_index_pts_global_right = [i for i, x in enumerate(surface_points_local_KOS_right[:, 0]) if
x >= bend_pts_xz_local_right[1][0]][0]
except:
end_index_pts_global_right = -1
return end_index_pts_global, end_index_pts_global_left, end_index_pts_global_right
def calc_bend_pts(max_distance, x_z_surface_point):
bend_pts_xz = []
bend_pts_xz.append([x_z_surface_point[0][0], x_z_surface_point[0][1]]) # Comment_DB: start point 2D (x coord, y coord)
bend_pts_xz.append([x_z_surface_point[-1][0],x_z_surface_point[-1][1]]) # Comment_DB: end point 2D (x coord, y coord)
bend_pts_xz = np.asarray(bend_pts_xz)
# Inserting bendpoints if max distance to surface to bigg:
insert_pts = True
while insert_pts:
points_on_line_between_bends_filled_up = []
points_on_line_between_bends_filled_up.append([bend_pts_xz[0][0], bend_pts_xz[0][1]]) # Comment_DB: only the first bend point (starting point at edge) appended to bend points curve list
points_on_line_between_bends_filled_up = calc_points_on_line_between_bends_filled_up(bend_pts_xz,
points_on_line_between_bends_filled_up, x_z_surface_point)
max_divergence = calc_point_of_max_divergence_between_smooth_and_lin_curve(
points_on_line_between_bends_filled_up, x_z_surface_point)
# Comment_DB: We know at which x-coord of points_on_line_between_bends_filled_up the max_divergence happens --> counter i
# no further points, if the chosen maximum distance is not surpassed
if max_divergence[0] < max_distance: # Comment_DB: This implies that there will be one extra bend, as the above code will have executed already, max_distance: User Input
break
bend_pts_xz = np.insert(bend_pts_xz, -1,
np.array([points_on_line_between_bends_filled_up[max_divergence[1]][0],
x_z_surface_point[max_divergence[1]][1]]),
axis=0) # Comment_DB: insert a corner at x coord (counter i) and y coord (counter i) of max divergence
bend_pts_xz = bend_pts_xz[bend_pts_xz[:, 0].argsort()] # Comment_DB: Bend points sorted in an array
return bend_pts_xz
def calc_point_of_max_divergence_between_smooth_and_lin_curve(points_on_line_between_bends_filled_up, x_y_points_filled_up):
# Comment_DB: curve_divergence in terms of y-distance # Largest deviation from smoothed curve: (COMMENT_DB: By now all the points in the above (linear) line have been appended)
curve_divergence_y = []
for i in range(len(points_on_line_between_bends_filled_up)):
curve_divergence_y.append([points_on_line_between_bends_filled_up[i][0], (
(points_on_line_between_bends_filled_up[i][0] - x_y_points_filled_up[i][0]) ** 2 + (
points_on_line_between_bends_filled_up[i][1] - x_y_points_filled_up[i][1]) ** 2) ** 0.5]) # Comment_DB: (x-coord vs. change in y-coord) take the x coord and y-distance between linear curve and sav-gol curve and append
curve_divergence_y = np.asarray(curve_divergence_y)
max_divergence = max([(v, i) for i, v in enumerate(curve_divergence_y[:, 1])]) # Comment_DB: returns distance, counter (Uses new curve_divergence)
return max_divergence
def calc_points_on_line_between_bends_filled_up(bend_pts_xy, bend_pts_xy_curve, x_y_points_filled_up):
j = 1 # Comment_DB: at this point, bend_pts_xy curve only has the starting point in it, thus j = 1 is the number of points in the list. j = 1 is also the index of the NEXT point!
for i in range(1, len(bend_pts_xy)): # Comment_DB: len(bend_pts_xy) is 2 for first iteration
slope_between_bends = (bend_pts_xy[i - 1][1] - bend_pts_xy[i][1]) / (bend_pts_xy[i - 1][0] - bend_pts_xy[i][0])
while bend_pts_xy_curve[-1][0] < bend_pts_xy[i][0]: # Comment_DB: while last x coord VALUE less than ith x coord VALUE in bend_pts_xy (If greater, then that means last point is reached)
y_add = bend_pts_xy_curve[-1][1] + slope_between_bends * (
x_y_points_filled_up[j][0] - x_y_points_filled_up[j - 1][0]) # Comment_DB: y = b + mx (finds next change in y linearly --> Produces a linear plot until end point at edge!!)
bend_pts_xy_curve.append([x_y_points_filled_up[j][0], y_add]) # Comment_DB: append the NEXT point into the list
j = j + 1 # Comment_DB: NEXT POINT
bend_pts_xy_curve = np.asarray(bend_pts_xy_curve) # Comment_DB: This is now one linear curve from start to end point. Everything here is dependent on xy_patch_curve. Below will take divergence into consideration
return bend_pts_xy_curve
def calc_tape_parameter_2DE_3D(bend_pts_xyz_global, bend_pts_xyz_global_left, bend_pts_xyz_global_right,
bend_pts_xyz_trendline, edge_directions, x_direction_start, normal_at_start,
bend_pts_xz_local, calc_2D_with_edge_detection):
x_direction_list_global = [x_direction_start]
normal_direction_list_global = [normal_at_start]
lenght_between_first_two_bends = np.linalg.norm(bend_pts_xz_local[1] - bend_pts_xz_local[0])
lengths_between_planes_list = [lenght_between_first_two_bends]
rotated_x_direction_around_edge_global = []
beta_angle_between_planes_list, alpha_angle_between_planes_list = [],[]
range_till_end = (len(edge_directions) - 1) if calc_2D_with_edge_detection else 2
for i in range(1, range_till_end):
try: length_current_direction = np.linalg.norm(bend_pts_xz_local[i+1] - bend_pts_xz_local[i])
except: break
# Calc x_y_and_normal direction of the Tape at each bending point
x_direction_before_bend_global = norm_vector(bend_pts_xyz_global[i] - bend_pts_xyz_global[i - 1])
# We get the normal of the plane defined by the right and left bendpoint of the bend and the middlepoint on the next/previous bend.
normal_before_bend_global = -calc_tape_normal(bend_pts_xyz_global[i-1], bend_pts_xyz_global_left[i],bend_pts_xyz_global_right[i])
y_direction_before_bend_global = np.cross(normal_before_bend_global, x_direction_before_bend_global)
tape_orientation_before_bend_KOS_in_global_KOS = np.stack([x_direction_before_bend_global, y_direction_before_bend_global, normal_before_bend_global])
try: normal_after_bend_global = calc_tape_normal(bend_pts_xyz_global[i + 1], bend_pts_xyz_global_left[i],bend_pts_xyz_global_right[i])
except:
print("Not same amount of bending points")
break
beta_angle = calc_3D_beta(bend_pts_xyz_trendline, i, normal_before_bend_global, normal_after_bend_global)
alpha_angle = calc_3D_alpha(edge_directions, i, normal_before_bend_global,
tape_orientation_before_bend_KOS_in_global_KOS,
y_direction_before_bend_global)
# Calc new x_direction after bending around edge
rotated_x_direction_around_edge_i_global = Quaternion(axis=edge_directions[i], angle=beta_angle).rotate(x_direction_before_bend_global)
rotated_x_direction_around_edge_global.append(rotated_x_direction_around_edge_i_global)
# safe x_directions, alpha, beta, and normals in a List.
x_direction_list_global.append(x_direction_before_bend_global) #x_direction_start is two times in the list. If there are 1+ bending points.
alpha_angle_between_planes_list.append(alpha_angle)
beta_angle_between_planes_list.append(beta_angle)
normal_direction_list_global.append(normal_after_bend_global)
lengths_between_planes_list.append(length_current_direction)
rotated_x_direction_around_edge_global = np.asarray(rotated_x_direction_around_edge_global)
x_direction_list_global = np.asarray(x_direction_list_global)
return x_direction_list_global, normal_direction_list_global, rotated_x_direction_around_edge_global,beta_angle_between_planes_list,alpha_angle_between_planes_list,lengths_between_planes_list
def calc_3D_alpha(edge_directions, i, normal_at_bendpoint_0_tape, tape_orientation_before_bend_KOS_in_global_KOS, y_direction_tape):
# Calc alpha. Angle between y_tape and direction of edge. Transformation in local tape KOS to get direction of angle
edge_and_tape_side = np.stack([edge_directions[i], tape_orientation_before_bend_KOS_in_global_KOS[1]])
edge_and_tape_side_TapeKOS = translate_and_rotate_points_from_OLD_to_NEW_KOS(edge_and_tape_side,tape_orientation_before_bend_KOS_in_global_KOS,np.asarray([0, 0, 0]))
# Orienting Edge
if edge_and_tape_side_TapeKOS[0][1] < 0:
edge_and_tape_side_TapeKOS[0][:] = -edge_and_tape_side_TapeKOS[0][:]
edge_directions[i] = -edge_directions[i]
# Orienting Tape
if edge_and_tape_side_TapeKOS[1][1] < 0:
edge_and_tape_side_TapeKOS[1][:] = -edge_and_tape_side_TapeKOS[1][:]
try:
alpha_angle = math.acos(np.dot(edge_directions[i], tape_orientation_before_bend_KOS_in_global_KOS[1]))
except:
alpha_angle = 0
if edge_and_tape_side_TapeKOS[0][0] < 0:
alpha_angle = (math.pi - alpha_angle)
return alpha_angle
def calc_3D_beta(bend_pts_xyz_trendline, i, normal_at_bendpoint_0_tape, normal_at_bendpoint_1_tape):
# Calc angle between two consecutiv tape normals: beta
beta_angle_between_planes = math.acos(np.dot(normal_at_bendpoint_0_tape, normal_at_bendpoint_1_tape))
# Compare slope of old and new x_direction
x_direction_before_bend_local = norm_vector(bend_pts_xyz_trendline[i] - bend_pts_xyz_trendline[i - 1])
x_direction_after_bend_without_rotation_local = norm_vector(
bend_pts_xyz_trendline[i + 1] - bend_pts_xyz_trendline[i])
if x_direction_before_bend_local[2] < x_direction_after_bend_without_rotation_local[2]:
beta_angle_between_planes = -beta_angle_between_planes
return beta_angle_between_planes
def calc_edge_directions(bend_pts_xyz_global_left, bend_pts_xyz_global_right):
global counter_failed_matches_of_edges
while len(bend_pts_xyz_global_right) != len(bend_pts_xyz_global_left): # Comment_DKu_Wenzel: Just looking for the first Bend would optimize stability and would be faster
if len(bend_pts_xyz_global_right) < len(bend_pts_xyz_global_left): bend_pts_xyz_global_left = np.delete(bend_pts_xyz_global_left,-1,axis=0)
else: bend_pts_xyz_global_right = np.delete(bend_pts_xyz_global_right,-1,axis=0) # right < left
print("left and right not same amount of bendingpoints")
counter_failed_matches_of_edges += 1
if counter_failed_matches_of_edges > 25:
print("Please restart, matching of left an right edges failed. Maybe try other width.")
exit()
edge_directions = []
for i in range(len(bend_pts_xyz_global_left)):
edge_direction = bend_pts_xyz_global_right[i] - bend_pts_xyz_global_left[i]
edge_direction = norm_vector(edge_direction)
edge_directions.append(edge_direction)
return edge_directions
def calc_tape_normal(bend_pts_xyz_global, bend_pts_xyz_global_left, bend_pts_xyz_global_right):
v1 = bend_pts_xyz_global_left - bend_pts_xyz_global
v2 = bend_pts_xyz_global_right - bend_pts_xyz_global
normal_at_bendpoint_index_tape = np.cross(v2, v1)
normal_at_bendpoint_index_tape = norm_vector(normal_at_bendpoint_index_tape)
return normal_at_bendpoint_index_tape
# Functions in calc local bending points
def calc_delta_length_at_bend(width,alpha):
if alpha > math.pi / 2:
delta_length_bend = (width / 2) * math.tan(math.pi - alpha)
else: # alpha_list[0] < math.pi / 2:
delta_length_bend = - (width / 2) * math.tan(alpha)
return delta_length_bend
def calc_local_KOS_in_trendline_KOS(x_start_end_point_trendline_KOS,y_start_end_point_trendline_KOS):
# incline/x_slope and y-intercept with Leastsquare, y = x_slope*x + y_intercept
A = np.vstack([x_start_end_point_trendline_KOS, np.ones(len(x_start_end_point_trendline_KOS))]).T
x_slope, y_intercept = np.linalg.lstsq(A, y_start_end_point_trendline_KOS, rcond=None)[0]
x_trendline_new_direction = norm_vector(np.asarray((1, x_slope, 0), dtype=np.float32))
y_trendline_new_direction = norm_vector(np.asarray((-x_slope, 1, 0), dtype=np.float32))
z_trendline_new_direction = np.asarray((0, 0, 1), dtype=np.float32)
local_KOS_in_trendline_KOS = np.vstack((x_trendline_new_direction, y_trendline_new_direction, z_trendline_new_direction))
return local_KOS_in_trendline_KOS, x_slope
def calc_start_end_point_side_in_trendline_KOS(calc_left_side, delta_length_end_bend, delta_length_start_bend,
end_point_drawn, grid_resolution_int, start_point_drawn, width,
local_KOS_in_trendline_KOS, z_grid_values_linear_trendline_KOS):
# We need to consider how the Tape is placed 3D
if calc_left_side:
Start_point_side_trendline_KOS = start_point_drawn - local_KOS_in_trendline_KOS[1] * width / 2 + delta_length_start_bend * local_KOS_in_trendline_KOS[0]
End_point_side_trendline_KOS = end_point_drawn - local_KOS_in_trendline_KOS[1] * width / 2 + delta_length_end_bend * local_KOS_in_trendline_KOS[0]
if new_start_left != []:
Start_point_side_trendline_KOS = new_start_left
else:
Start_point_side_trendline_KOS = start_point_drawn + local_KOS_in_trendline_KOS[1] * width / 2 - delta_length_start_bend * local_KOS_in_trendline_KOS[0]
End_point_side_trendline_KOS = end_point_drawn + local_KOS_in_trendline_KOS[1] * width / 2 - delta_length_end_bend * local_KOS_in_trendline_KOS[0]
if new_start_right != []:
Start_point_side_trendline_KOS = new_start_right
x_data_side_start_end = (Start_point_side_trendline_KOS[0], End_point_side_trendline_KOS[0])
y_data_side_start_end = (Start_point_side_trendline_KOS[1], End_point_side_trendline_KOS[1])
end_point_drawn_side, start_point_drawn_side, x_start_index_side, x_end_index_side = calc_Start_End_in_trendline_KOS_from_xdata_ydata(
grid_resolution_int, x_data_side_start_end, y_data_side_start_end, z_grid_values_linear_trendline_KOS)
return end_point_drawn_side, start_point_drawn_side, x_end_index_side, x_start_index_side
def calc_local_and_global_bendpoints(max_distance, surface_points_local_KOS,
local_KOS_in_trendline_KOS, y_intercept_trendline_KOS):
x_z_points = np.stack([surface_points_local_KOS[:, 0],surface_points_local_KOS[:, 2]],axis=1)
#y_smooth_left = smooth_savgol(x_z_points_filled_up, poly_order, savgol_window_quotient)
bend_pts_xz_local = calc_bend_pts(max_distance, x_z_points)
# in local KOS y=0. Insert this to bendpoints to get (x,y,z)
bend_pts_xyz_local = np.insert(bend_pts_xz_local, 1, np.zeros(len(bend_pts_xz_local)), axis=1)
bend_pts_xyz_trendline, bend_pts_xyz_global = new_bending_points_local_to_global(y_intercept_trendline_KOS, bend_pts_xyz_local,
local_KOS_in_trendline_KOS)
return bend_pts_xz_local, bend_pts_xyz_trendline, bend_pts_xyz_global
def calc_Start_End_in_trendline_KOS_from_xdata_ydata(grid_resolution_int, xdata, ydata, z_grid_values_linear_trendline_KOS):
# Start und Endpunkt der Eingezeichnet wurde
y_end_index = np.asarray(np.round(np.add(np.divide(ydata[1], dy), (grid_resolution_int - y_0_grid_point_index))),
dtype=np.int32)
x_end_index = np.asarray(np.round(np.add(np.divide(xdata[1], dx), (grid_resolution_int - x_0_grid_point_index))),
dtype=np.int32)
y_start_index = np.asarray(np.round(np.add(np.divide(ydata[0], dy), (grid_resolution_int - y_0_grid_point_index))),
dtype=np.int32)
x_start_index = np.asarray(np.round(np.add(np.divide(xdata[0], dx), (grid_resolution_int - x_0_grid_point_index))),
dtype=np.int32)
# Left and right starting point can be outside the grit, when they are outside, they get a default value. The z-Data would be needed for the plot.
if x_start_index < 0 or x_start_index >= grid_resolution_int or \
y_start_index < 0 or y_start_index >= grid_resolution_int:
z_start_data = z_grid_values_linear_trendline_KOS[0, 0]
else: z_start_data = z_grid_values_linear_trendline_KOS[x_start_index, y_start_index]
if x_end_index < 0 or x_end_index >= grid_resolution_int or \
y_end_index < 0 or y_end_index >= grid_resolution_int:
z_end_data = z_grid_values_linear_trendline_KOS[grid_resolution_int - 1, grid_resolution_int - 1]
else: z_end_data = z_grid_values_linear_trendline_KOS[x_end_index, y_end_index]
start_point_xyz_data = (np.vstack((xdata[0], ydata[0], z_start_data)).T)[0][:]
end_point_xyz_data = (np.vstack((xdata[1], ydata[1], z_end_data)).T)[0][:]
return end_point_xyz_data, start_point_xyz_data, x_start_index, x_end_index
def trim_x_y_values_to_grid(grid_resolution_int, x_slope, x_values_trendline_KOS, x_values_indizes, y_values_trendline_KOS, y_values_indizes):
y_start_index = -1
y_end_index = -1
for k in range(len(x_values_trendline_KOS) - 1):
if x_slope >= 0:
if (y_values_indizes[k] >= 0) & (y_start_index < 0): y_start_index = k
if (y_values_indizes[k] >= grid_resolution_int) & (y_end_index < 0): y_end_index = k - 1
if x_slope < 0:
if (y_values_indizes[k] < grid_resolution_int) & (y_start_index < 0): y_start_index = k
if (y_values_indizes[k] <= 0) & (y_end_index < 0): y_end_index = k - 1
if y_end_index <= 0: y_end_index = grid_resolution_int - 2 # default value
# Trim indizes and coordinates to grid size
x_values_indizes_trim = x_values_indizes[y_start_index:y_end_index]
y_values_indizes_trim = y_values_indizes[y_start_index:y_end_index]
x_values_trim = x_values_trendline_KOS[y_start_index:y_end_index]
y_values_trim = y_values_trendline_KOS[y_start_index:y_end_index]
return x_values_indizes_trim, x_values_trim, y_values_indizes_trim, y_values_trim
def trim_x_y_values_to_start_end_point(x_start_index, x_end_index, x_values_trendline_KOS, x_values_indizes, y_values_trendline_KOS,
y_values_indizes):
# Trim indizes and coordinates to grid size
if x_start_index < x_end_index:
x_values_indizes_trim = x_values_indizes[x_start_index:x_end_index]
y_values_indizes_trim = y_values_indizes[x_start_index:x_end_index]
x_values_trim_trendline_KOS = x_values_trendline_KOS[x_start_index:x_end_index]
y_values_trim_trendline_KOS = y_values_trendline_KOS[x_start_index:x_end_index]
else:
x_values_indizes_trim = x_values_indizes[x_end_index:x_start_index]
y_values_indizes_trim = y_values_indizes[x_end_index:x_start_index]
x_values_trim_trendline_KOS = x_values_trendline_KOS[x_end_index:x_start_index]
y_values_trim_trendline_KOS = y_values_trendline_KOS[x_end_index:x_start_index]
return x_values_indizes_trim, x_values_trim_trendline_KOS, y_values_indizes_trim, y_values_trim_trendline_KOS
def extract_points_from_interpolated_surface(Start_point_trendline_KOS, grid_resolution_int, x_slope, z_grid_values_linear_trendline_KOS, x_start_index,
x_end_index):
x_indizes_trim, x_values_trim_trendline_KOS, y_indizes_trim, y_intercept_trendline_KOS, y_values_trim_trendline_KOS = calc_x_y_values_in_trendline_KOS_and_with_indizes(
Start_point_trendline_KOS, grid_resolution_int, x_end_index, x_slope, x_start_index)
# z Values from grid
z_values_trendline_KOS = extract_z_values_from_grid(x_indizes_trim, y_indizes_trim,
z_grid_values_linear_trendline_KOS)
# In z_grid_values_linear_trendline_KOS can be NaN. Trim those of.
x_indizes_trim, x_values_trim_trendline_KOS, y_indizes_trim, y_values_trim_trendline_KOS, z_values_trendline_KOS = trim_x_y_z_values_to_geometry(
x_indizes_trim, x_values_trim_trendline_KOS, y_indizes_trim, y_values_trim_trendline_KOS,
z_values_trendline_KOS)
# x, y and z stacked together to 3D-Points
new_bending_direction_points_trendline_KOS = np.vstack(
(x_values_trim_trendline_KOS, y_values_trim_trendline_KOS, z_values_trendline_KOS)).T
return new_bending_direction_points_trendline_KOS, y_intercept_trendline_KOS, x_indizes_trim, x_values_trim_trendline_KOS, y_indizes_trim, y_values_trim_trendline_KOS
def extract_z_values_from_grid(x_indizes_trim, y_indizes_trim, z_grid_values_linear_trendline_KOS):
z_values_trendline_KOS = []
for i in range(len(x_indizes_trim)):
z_values_trendline_KOS.append(
z_grid_values_linear_trendline_KOS[x_indizes_trim[i], y_indizes_trim[i]])
z_values_trendline_KOS = np.asarray(z_values_trendline_KOS, dtype=np.float32)
return z_values_trendline_KOS
def trim_x_y_z_values_to_geometry(x_indizes_trim, x_values_trim_trendline_KOS, y_indizes_trim,
y_values_trim_trendline_KOS, z_values_trendline_KOS):
i = 0
while z_values_trendline_KOS[i] != z_values_trendline_KOS[i]: i += 1
j = -1
while z_values_trendline_KOS[j] != z_values_trendline_KOS[j]: j -= 1
z_values_trendline_KOS = z_values_trendline_KOS[i:j]
x_values_trim_trendline_KOS = x_values_trim_trendline_KOS[i:j]
y_values_trim_trendline_KOS = y_values_trim_trendline_KOS[i:j]
x_indizes_trim = x_indizes_trim[i:j]
y_indizes_trim = y_indizes_trim[i:j]
return x_indizes_trim, x_values_trim_trendline_KOS, y_indizes_trim, y_values_trim_trendline_KOS, z_values_trendline_KOS
def calc_x_y_values_in_trendline_KOS_and_with_indizes(Start_point_trendline_KOS, grid_resolution_int, x_end_index,
x_slope, x_start_index):
##### Oblique linear line y(x)
# 2 lines needed: • mathematical decription with coordinates for Plot.
# • with Indizies for extraction from grid
# new y_intercept_trendline_KOS, inserting startpoint into formula:y = x_slope*x + y_intercept_trendline_KOS
y_intercept_trendline_KOS = Start_point_trendline_KOS[1] - Start_point_trendline_KOS[0] * x_slope
# x-y-values in coordinates
x_values_trendline_KOS = grid_x[:, 0]
y_values_trendline_KOS = np.add(np.multiply(x_values_trendline_KOS, x_slope), y_intercept_trendline_KOS)
# x-y-values with Indizies
x_values_indizes = np.asarray(list(range(grid_resolution_int)), dtype=np.int32)
y_values_indizes = np.add(np.divide(y_values_trendline_KOS, dy), (grid_resolution_int - y_0_grid_point_index))
y_values_indizes = np.asarray(np.round(y_values_indizes), dtype=np.int32)
# Trim x and y valules to start/end point and if nec
x_indizes_trim, x_values_trim_trendline_KOS, y_indizes_trim, y_values_trim_trendline_KOS = trim_x_y_values_to_start_end_point(
x_start_index, x_end_index, x_values_trendline_KOS, x_values_indizes, y_values_trendline_KOS, y_values_indizes)
if min(y_values_indizes) < 0 or max(y_values_indizes) > grid_resolution_int - 1:
x_indizes_trim, x_values_trim_trendline_KOS, y_indizes_trim, y_values_trim_trendline_KOS = trim_x_y_values_to_grid(
grid_resolution_int, x_slope, x_values_trim_trendline_KOS, x_indizes_trim, y_values_trim_trendline_KOS,
y_indizes_trim)
return x_indizes_trim, x_values_trim_trendline_KOS, y_indizes_trim, y_intercept_trendline_KOS, y_values_trim_trendline_KOS
def transform_surface_points_to_global_and_local_KOS(y_intercept_trendline_KOS, new_bending_direction_points_trendline_KOS,
local_KOS_in_trendline_KOS):
# new zero
new_zero = np.asarray((0, y_intercept_trendline_KOS, 0), dtype=np.float32)
local_KOS_in_global_KOS = translate_and_rotate_points_from_OLD_to_NEW_KOS(
local_KOS_in_trendline_KOS, trendline_KOS_in_global_KOS, np.asarray([0, 0, 0]), True)
new_bending_direction_points_global_KOS = translate_and_rotate_points_from_OLD_to_NEW_KOS(
new_bending_direction_points_trendline_KOS, trendline_KOS_in_global_KOS, center_of_pointcloud_weighted_in_global_KOS, True)
surface_points_local_KOS = translate_and_rotate_points_from_OLD_to_NEW_KOS(
new_bending_direction_points_trendline_KOS, local_KOS_in_trendline_KOS, new_zero)
return surface_points_local_KOS, new_bending_direction_points_global_KOS, local_KOS_in_global_KOS
def new_bending_points_local_to_global(y_intercept_trendline_KOS,
surface_points_local_KOS,
local_KOS_in_trendline_KOS):
# new zero
new_zero = np.asarray((0, y_intercept_trendline_KOS, 0), dtype=np.float32)
new_bending_direction_points_trendline_KOS = translate_and_rotate_points_from_OLD_to_NEW_KOS(
surface_points_local_KOS, local_KOS_in_trendline_KOS, new_zero, True)
new_bending_direction_points_global_KOS = translate_and_rotate_points_from_OLD_to_NEW_KOS(
new_bending_direction_points_trendline_KOS, trendline_KOS_in_global_KOS, center_of_pointcloud_weighted_in_global_KOS, True)
return new_bending_direction_points_trendline_KOS, new_bending_direction_points_global_KOS
#######################################################################################################################
def calc_L_aim(x_y_points_filled_up):
L_aim = 0
for i in range(1, len(x_y_points_filled_up)):
L_aim =L_aim + np.linalg.norm(x_y_points_filled_up[i]-x_y_points_filled_up[i - 1])
"""L_aim = 0
for i in range(len(surface_points_local_KOS_stacked)):
for j in range(1, len(surface_points_local_KOS_stacked[i])):
distance = calc_distance_between_two_points(surface_points_local_KOS_stacked[i][j - 1],
surface_points_local_KOS_stacked[i][j])
L_aim = L_aim + distance
"""
return L_aim
#######################################################################################################################
def show_startstrip(bestPatch_patternpoints,patch_start,patch_end,dimension):
###############3D-PLOTTING################
figure = pyplot.figure() #Comment_DB: 3D plot of objective shape
axes = mplot3d.Axes3D(figure)
plt.title('Start solution preprocessor '+ dimension)
patch_visual = mplot3d.art3d.Poly3DCollection(triangle_vectors_of_stl, linewidths=1, alpha=0.5, edgecolor=[1, 1, 1], label ='Geometry') #Comment_DB: added edgecolor to make the edges visible
axes.scatter([999999990],[9999999900],[9999999900],linewidths=0.0001, alpha = 0.5, label = "Geometry") #Comment_DB: label in legend
"""
for corner_point in tri_corner_points_trendline_KOS:
axes.scatter(corner_point[0], corner_point[1], corner_point[2], c='black')
# Trendline KOS
# x-Achse
global trendline_KOS_in_global_KOS
x1, y1, z1 = [center_of_pointcloud_weighted_in_global_KOS[0],
center_of_pointcloud_weighted_in_global_KOS[0] + 200 * trendline_KOS_in_global_KOS[0][0]], \
[center_of_pointcloud_weighted_in_global_KOS[1],
center_of_pointcloud_weighted_in_global_KOS[1] + 200 * trendline_KOS_in_global_KOS[0][1]], \
[center_of_pointcloud_weighted_in_global_KOS[2],
center_of_pointcloud_weighted_in_global_KOS[2] + 200 * trendline_KOS_in_global_KOS[0][2]]
axes.plot(x1,y1,z1, c='red', label ='x-Achse')
# y-Achse
x2, y2, z2 = [center_of_pointcloud_weighted_in_global_KOS[0],
center_of_pointcloud_weighted_in_global_KOS[0] + 200 * trendline_KOS_in_global_KOS[1][0]], \
[center_of_pointcloud_weighted_in_global_KOS[1],
center_of_pointcloud_weighted_in_global_KOS[1] + 200 * trendline_KOS_in_global_KOS[1][1]], \
[center_of_pointcloud_weighted_in_global_KOS[2],
center_of_pointcloud_weighted_in_global_KOS[2] + 200 * trendline_KOS_in_global_KOS[1][2]]
axes.plot(x2, y2, z2, c='blue', label ='y-Achse')
# z-Achse
x3, y3, z3 = [center_of_pointcloud_weighted_in_global_KOS[0],
center_of_pointcloud_weighted_in_global_KOS[0] + 200 * trendline_KOS_in_global_KOS[2][0]], \
[center_of_pointcloud_weighted_in_global_KOS[1],
center_of_pointcloud_weighted_in_global_KOS[1] + 200 * trendline_KOS_in_global_KOS[2][1]], \
[center_of_pointcloud_weighted_in_global_KOS[2],
center_of_pointcloud_weighted_in_global_KOS[2] + 200 * trendline_KOS_in_global_KOS[2][2]]
axes.plot(x3, y3, z3, c='green', label ='z-Achse')
"""
# Midpoint
"""
axes.scatter(center_of_pointcloud_weighted_in_global_KOS[0],center_of_pointcloud_weighted_in_global_KOS[1],center_of_pointcloud_weighted_in_global_KOS[2],c='g')
"""
# Start- Endpoint
axes.scatter(patch_start[0], patch_start[1], patch_start[2], c="green")
axes.scatter(patch_end[0],patch_end[1],patch_end[2],c='black')
"""
axes.plot(
[center_of_pointcloud_weighted_in_global_KOS[0], center_of_pointcloud_weighted_in_global_KOS[0] + 20 * trendline_KOS_in_global_KOS[0][0]],
[center_of_pointcloud_weighted_in_global_KOS[1], center_of_pointcloud_weighted_in_global_KOS[1] + 20 * trendline_KOS_in_global_KOS[0][1]],
[center_of_pointcloud_weighted_in_global_KOS[2], center_of_pointcloud_weighted_in_global_KOS[2] + 20 * trendline_KOS_in_global_KOS[0][2]],
c='red', label='x-Axis_Trendline')
axes.plot(
[center_of_pointcloud_weighted_in_global_KOS[0],
center_of_pointcloud_weighted_in_global_KOS[0] + 20 * trendline_KOS_in_global_KOS[1][0]],
[center_of_pointcloud_weighted_in_global_KOS[1],
center_of_pointcloud_weighted_in_global_KOS[1] + 20 * trendline_KOS_in_global_KOS[1][1]],
[center_of_pointcloud_weighted_in_global_KOS[2],
center_of_pointcloud_weighted_in_global_KOS[2] + 20 * trendline_KOS_in_global_KOS[1][2]],
c='green', label='y-Axis_Trendline')
axes.plot(
[center_of_pointcloud_weighted_in_global_KOS[0],
center_of_pointcloud_weighted_in_global_KOS[0] + 20 * trendline_KOS_in_global_KOS[2][0]],
[center_of_pointcloud_weighted_in_global_KOS[1],
center_of_pointcloud_weighted_in_global_KOS[1] + 20 * trendline_KOS_in_global_KOS[2][1]],
[center_of_pointcloud_weighted_in_global_KOS[2],
center_of_pointcloud_weighted_in_global_KOS[2] + 20 * trendline_KOS_in_global_KOS[2][2]],
c='blue', label='z-Axis_Trendline')
axes.plot(
[0,0 + 20 *1],
[0, 0 + 20 * 0],
[0, 0 + 20 * 0],
c='red', label='x-Axis_Global')
axes.plot(
[0, 0 + 20 * 0],
[0, 0 + 20 * 1],
[0, 0 + 20 * 0],
c='green', label='y-Axis_Global')
axes.plot(
[0, 0 + 20 * 0],
[0, 0 + 20 * 0],
[0, 0 + 20 * 1],
c='blue', label='z-Axis_Global')
axes.plot(
[bend_pts_xyz_global_2D[0][0],
bend_pts_xyz_global_2D[0][0] + 20 * x_direction_list_global_KOS[0][0]],
[bend_pts_xyz_global_2D[0][1],
bend_pts_xyz_global_2D[0][1] + 20 * x_direction_list_global_KOS[0][1]],
[bend_pts_xyz_global_2D[0][2],
bend_pts_xyz_global_2D[0][2] + 20 * x_direction_list_global_KOS[0][2]],
c='blue', label='z-Axis')
axes.plot(
[bend_pts_xyz_global_2D[0][0],
bend_pts_xyz_global_2D[0][0] + 20 * normal_direction_start[0]],
[bend_pts_xyz_global_2D[0][1],
bend_pts_xyz_global_2D[0][1] + 20 * normal_direction_start[1]],
[bend_pts_xyz_global_2D[0][2],
bend_pts_xyz_global_2D[0][2] + 20 * normal_direction_start[2]],
c='blue', label='z-Axis')
"""
"""
for i in range(len(surface_points_global_KOS_stacked)):
plt.plot((surface_points_global_KOS_stacked[i][:, 0]), (surface_points_global_KOS_stacked[i][:, 1]), (surface_points_global_KOS_stacked[i][:, 2]), marker='o', c='blue')
plt.plot((surface_points_global_KOS_right_stacked[i][:, 0]), (surface_points_global_KOS_right_stacked[i][:, 1]), (surface_points_global_KOS_right_stacked[i][:, 2]), marker='o', c='blue')
plt.plot((surface_points_global_KOS_left_stacked[i][:, 0]), (surface_points_global_KOS_left_stacked[i][:, 1]), (surface_points_global_KOS_left_stacked[i][:, 2]), marker='o', c='blue')
"""
"""
plt.plot((surface_points_start_end_global_KOS[:, 0]), (surface_points_start_end_global_KOS[:, 1]),
(surface_points_start_end_global_KOS[:, 2]), marker='o', c='blue')
plt.plot((surface_points_start_end_global_KOS_left[:, 0]), (surface_points_start_end_global_KOS_left[:, 1]),
(surface_points_start_end_global_KOS_left[:, 2]), marker='o', c='blue')
plt.plot((surface_points_start_end_global_KOS_right[:, 0]), (surface_points_start_end_global_KOS_right[:, 1]),
(surface_points_start_end_global_KOS_right[:, 2]), marker='o', c='blue')
"""
# Bendpoints and edge direction 2DE
"""plt.plot((bend_points_start_end_global_KOS[:, 0]), (bend_points_start_end_global_KOS[:, 1]),
(bend_points_start_end_global_KOS[:, 2]), marker='o', c='red')
plt.plot((bend_points_start_end_global_KOS_left[:, 0]), (bend_points_start_end_global_KOS_left[:, 1]),
(bend_points_start_end_global_KOS_left[:, 2]), marker='o', c='red')
plt.plot((bend_points_start_end_global_KOS_right[:, 0]), (bend_points_start_end_global_KOS_right[:, 1]),
(bend_points_start_end_global_KOS_right[:, 2]), marker='o', c='red')
global edge_directions_global_2DE
edge_line_global_current_direction = [
(bend_points_start_end_global_KOS[:] + np.multiply(edge_directions_global_2DE[:], 50)),
(bend_points_start_end_global_KOS[:] - np.multiply(edge_directions_global_2DE[:], 50))]
for i in range(len(edge_line_global_current_direction[0])):
axes.plot([edge_line_global_current_direction[0][i][0], edge_line_global_current_direction[1][i][0]],
[edge_line_global_current_direction[0][i][1], edge_line_global_current_direction[1][i][1]],
[edge_line_global_current_direction[0][i][2], edge_line_global_current_direction[1][i][2]],
c='green') # Comment_DB: *pc_axes is *args, and .T is np.transpose"""
#bend_points_start_end_global_KOS, bend_points_start_end_global_KOS_left, bend_points_start_end_global_KOS_right
#surface_points_global_KOS_left, surface_points_global_KOS_right, bend_pts_xyz_global_left, bend_pts_xyz_global_right
"""
for k in [0]: #range(len(x_direction_list_global_KOS)):
axes.plot([bend_pts_xyz_global_3D[k+1][0], bend_pts_xyz_global_3D[k+1][0] + 50 * x_direction_list_global_KOS[k][0]],
[bend_pts_xyz_global_3D[k+1][1], bend_pts_xyz_global_3D[k+1][1] + 50 * x_direction_list_global_KOS[k][1]],
[bend_pts_xyz_global_3D[k+1][2], bend_pts_xyz_global_3D[k+1][2] + 50 * x_direction_list_global_KOS[k][2]], c='red', label ='Tape direction')
for k in [0]: #range(len(x_direction_rotated_list_global_KOS)):
axes.plot([bend_pts_xyz_global_3D[k+1][0], bend_pts_xyz_global_3D[k+1][0] + 50 * x_direction_rotated_list_global_KOS[k][0]],
[bend_pts_xyz_global_3D[k+1][1], bend_pts_xyz_global_3D[k+1][1] + 50 * x_direction_rotated_list_global_KOS[k][1]],
[bend_pts_xyz_global_3D[k+1][2], bend_pts_xyz_global_3D[k+1][2] + 50 * x_direction_rotated_list_global_KOS[k][2]],
c='yellow', label ='Tape direction rotated')
for i in [0]: #range(len(edge_line_global)):
axes.plot([edge_line_global[i][0][0], edge_line_global[i][1][0]],
[edge_line_global[i][0][1], edge_line_global[i][1][1]],
[edge_line_global[i][0][2], edge_line_global[i][1][2]],
c='green', label ='Edge') # Comment_DB: *pc_axes is *args, and .T is np.transpose
for k in [1]: # range(len(x_direction_list_global_KOS)):
axes.plot([bend_pts_xyz_global_3D[k + 1][0],
bend_pts_xyz_global_3D[k + 1][0] + 50 * x_direction_list_global_KOS[k][0]],
[bend_pts_xyz_global_3D[k + 1][1],
bend_pts_xyz_global_3D[k + 1][1] + 50 * x_direction_list_global_KOS[k][1]],
[bend_pts_xyz_global_3D[k + 1][2],
bend_pts_xyz_global_3D[k + 1][2] + 50 * x_direction_list_global_KOS[k][2]], c='red')
for k in [1]: # range(len(x_direction_rotated_list_global_KOS)):
axes.plot([bend_pts_xyz_global_3D[k + 1][0],
bend_pts_xyz_global_3D[k + 1][0] + 50 * x_direction_rotated_list_global_KOS[k][0]],
[bend_pts_xyz_global_3D[k + 1][1],
bend_pts_xyz_global_3D[k + 1][1] + 50 * x_direction_rotated_list_global_KOS[k][1]],
[bend_pts_xyz_global_3D[k + 1][2],
bend_pts_xyz_global_3D[k + 1][2] + 50 * x_direction_rotated_list_global_KOS[k][2]],
c='yellow')
for i in range(len(edge_line_global)):
axes.plot([edge_line_global[i][0][0], edge_line_global[i][1][0]],
[edge_line_global[i][0][1], edge_line_global[i][1][1]],
[edge_line_global[i][0][2], edge_line_global[i][1][2]],
c='green') # Comment_DB: *pc_axes is *args, and .T is np.transpose
#"""
#"""
for i in range(len(bestPatch_patternpoints) - 2):
verts = [list(
zip([bestPatch_patternpoints[i][0], bestPatch_patternpoints[i + 1][0], bestPatch_patternpoints[i + 2][0]], \
[bestPatch_patternpoints[i][1], bestPatch_patternpoints[i + 1][1], bestPatch_patternpoints[i + 2][1]], \
[bestPatch_patternpoints[i][2], bestPatch_patternpoints[i + 1][2],
bestPatch_patternpoints[i + 2][2]]))] # Comment_DB: DARK BLUE LoP PATCH
axes.add_collection3d(Poly3DCollection(verts), zs='z') # Comment_DB: INSERT LoP PATCH IN GRAPH
# patch_meshpoints.append(verts) #Comment_DB: is not used
axes.scatter(bestPatch_patternpoints[:, 0], bestPatch_patternpoints[:, 1], bestPatch_patternpoints[:, 2], c='r')
#"""
face_color = [0.5, 0.5, 1] # alternative: matplotlib.colors.rgb2hex([0.5, 0.5, 1])
patch_visual.set_facecolor(face_color)
axes.legend()
axes.add_collection3d(patch_visual) #Comment_DB: stl mesh file
axes.autoscale(enable=False, axis='both') # you will need this line to change the Z-axis
axes.set_xbound(-150, 150)
axes.set_ybound(-50, 250)
axes.set_zbound(-150, 150)
pyplot.axis('off')
pyplot.show(figure)
return
| manuelEAProject/EAProject | stl_preprocessing_Wenzel.py | stl_preprocessing_Wenzel.py | py | 101,321 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.asarray",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "stl.mesh.Mesh.from_file",
... |
35563148498 |
from keras.preprocessing import image
from keras.preprocessing.image import load_img, img_to_array
from keras import models
import numpy as np
import matplotlib.pyplot as plt
import os
img_path = 'cats_and_dogs_small/test/cats/cat.1502.jpg'
model_path = '02_cats_and_dogs_small_2.h5'
now_path = os.getcwd()
img_dir = os.path.join(now_path, img_path)
model_dir = os.path.join(now_path, model_path)
model = models.load_model(model_dir)
model.summary()
img = load_img(img_dir, target_size=(150, 150))
img_tensor = img_to_array(img)
img_tensor = np.expand_dims(img_tensor, axis=0)
img_tensor /= 255.
layer_inputs = model.input
layer_outputs = [layer.output for layer in model.layers[:8]]
activation_model = models.Model(inputs=layer_inputs, outputs=layer_outputs)
activations = activation_model.predict(img_tensor)
layer_names = []
for layer in model.layers[:8]:
layer_names.append(layer.name)
images_per_row = 16
for layer_name, layer_activation in zip(layer_names, activations):
n_features = layer_activation.shape[-1]
size = layer_activation.shape[1]
n_cols = n_features // images_per_row
display_grid = np.zeros((size * n_cols, images_per_row * size))
for col in range(n_cols):
for row in range(images_per_row):
channel_image = layer_activation[0, :, :, col * images_per_row + row]
channel_image -= channel_image.mean()
channel_image /= channel_image.std()
channel_image *= 64
channel_image += 128
channel_image = np.clip(channel_image, 0, 255).astype('uint8')
display_grid[col * size : (col + 1) * size, row * size : (row + 1) * size] = channel_image
scale = 1. / size
plt.figure(figsize=(scale * display_grid.shape[1], scale * display_grid.shape[0]))
plt.title(layer_name)
plt.grid(False)
plt.imshow(display_grid, aspect='auto', cmap='viridis')
plt.show()
| yasaisen/try_keras | 02_lab/layer_output.py | layer_output.py | py | 1,981 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.getcwd",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number":... |
74667028905 | # -*- coding: utf-8 -*-
# https://mathmod.deviantart.com/art/Pseudo-Hopf-Tori-565531249
import os
from math import pi, atan2, asin, sqrt, cos, sin
import numpy as np
import pyvista as pv
import quaternion
def quaternion2hab(q):
"Quaternion to heading, attitude, bank"
c = 180 / pi
t = q.x*q.y + q.z*q.w
if t > 0.499: # north pole
heading = 2 * atan2(q.x, q.w) * c
attitude = 90
bank = 0
elif t < - 0.499: # south pole
heading = - 2 * atan2(q.x, q.w) * c
attitude = - 90
bank = 0
else:
heading = atan2(2*(q.y*q.w - q.x*q.z) , 1 - 2*(q.y*q.y + q.z*q.z)) * c
attitude = asin(2*t) * c
bank = atan2(2*(q.x*q.w - q.y*q.z), 1 - 2*(q.x*q.x + q.z*q.z)) * c
return (heading, attitude, bank)
# then the rotation is Ry(h) @ Rz(a) @ Rx(b)
def get_quaternion(u ,v): # u and v must be normalized
"Get a unit quaternion whose corresponding rotation sends u to v"
d = np.vdot(u, v)
c = sqrt(1+d)
r = 1 / sqrt(2) / c
W = np.cross(u, v)
arr = np.concatenate((np.array([c/sqrt(2)]), r*W))
return quaternion.from_float_array(arr)
def satellite(t, R, alpha, k):
return R * np.array([
cos(alpha) * cos(t) * cos(k*t) - sin(t) * sin(k*t),
cos(alpha) * sin(t) * cos(k*t) + cos(t) * sin(k*t),
sin(alpha) * cos(k*t)
])
def satellite_motion(nframes, R, alpha=3*pi/4, k=4):
quats = [None]*nframes
t_ = np.linspace(0, 2*pi, nframes+1)[:nframes]
satellite0 = satellite(0, R, alpha, k)
A = satellite0.copy()
q0 = quaternion.one
quats[0] = q0
for i in range(nframes-1):
B = satellite(t_[i+1], R, alpha, k)
q1 = get_quaternion(A/R, B/R) * q0
quats[i+1] = q1
A = B
q0 = q1
return (satellite0, quats)
cu = 0.0000000000001
cv = 0.0000000000001
N = 3
def Fx(u, v):
return -np.cos(u+v) / (sqrt(2)+np.cos(v-u))
def DFx(u, v):
DFxu = (Fx(u,v)-Fx(u+cu,v))/cu
DFxv = (Fx(u,v)-Fx(u,v+cv))/cv
return (DFxu, DFxv)
def Fy(u, v):
return np.sin(v-u) / (sqrt(2)+np.cos(v-u))
def DFy(u, v):
DFyu = (Fy(u,v)-Fy(u+cu,v))/cu
DFyv = (Fy(u,v)-Fy(u,v+cv))/cv
return (DFyu, DFyv)
def Fz(u, v):
return np.sin(u+v) / (sqrt(2)+np.cos(v-u))
def DFz(u, v):
DFzu = (Fz(u,v)-Fz(u+cu,v))/cu
DFzv = (Fz(u,v)-Fz(u,v+cv))/cv
return (DFzu, DFzv)
def n1(u, v):
dfyu, dfyv = DFy(u, v)
dfzu, dfzv = DFz(u, v)
return dfyu*dfzv - dfzu*dfyv
def n2(u, v):
dfxu, dfxv = DFx(u, v)
dfzu, dfzv = DFz(u, v)
return dfzu*dfxv - dfxu*dfzv
def n3(u, v):
dfxu, dfxv = DFx(u, v)
dfyu, dfyv = DFy(u, v)
return dfxu*dfyv - dfyu*dfxv
def f(u, v):
r = np.sqrt(n1(u,v)**2+n2(u,v)**2+n3(u,v)**2)
t = (np.abs(np.sin(15*u)*np.cos(15*v)))**7 + 0.4*np.sin(2*N*u)
tr = t / r
return np.array([
Fx(u, v) + tr*n1(u, v),
Fy(u, v) + tr*n2(u, v),
Fz(u, v) + tr*n3(u, v)
])
x = np.linspace(0, 2*pi, 500)
U, V = np.meshgrid(x, x)
X, Y, Z = f(U, V)
grid = pv.StructuredGrid(X, Y, Z)
mesh = grid.extract_geometry()#.clean(tolerance=1e-6)
mesh["dist"] = np.linalg.norm(mesh.points, axis=1)
pos0, quats = satellite_motion(120, 12)
for i, q in enumerate(quats):
pngname = "zzpic%03d.png" % i
pltr = pv.Plotter(window_size = [512, 512], off_screen=True)
pltr.background_color = "#363940"
pltr.set_focus(mesh.center)
pltr.set_position(pos0)
h, a, b = quaternion2hab(q)
pltr.camera.roll = b
pltr.camera.azimuth = a
pltr.camera.elevation = h
pltr.camera.zoom(1)
pltr.add_mesh(
mesh, smooth_shading=True, specular=15, cmap="plasma",
log_scale=False, show_scalar_bar=False, flip_scalars=False
)
pltr.show(screenshot=pngname)
os.system(
"gifski --frames=zzpic*.png --fps=9 -o pseudoHopfTorus.gif"
)
| stla/PyVistaMiscellanous | pseudoHopfTorus_anim.py | pseudoHopfTorus_anim.py | py | 3,991 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "math.pi",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "math.atan2",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "math.atan2",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "math.atan2",
"line_number": 23,
... |
6141168752 | from sklearn import datasets, model_selection, metrics
from sklearn.externals import joblib
from sklearn.linear_model import LogisticRegression
iris = datasets.load_iris()
X = iris.data
y = iris.target
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=0.3, shuffle=True, random_state=0)
clf = LogisticRegression(random_state=0, solver="lbfgs", multi_class="multinomial")
clf.fit(X_train, y_train)
joblib.dump(clf, "iris_regression_clf.joblib")
y_pred = clf.predict(X_test)
print(metrics.accuracy_score(y_test, y_pred))
| 10kaoru12/4y-university-information-recommender-system | 3/samplecode_201904_v1/chapter07/train.py | train.py | py | 558 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sklearn.datasets.load_iris",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "sklearn.datasets",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 9,
"usage_type": "call"
},
{
... |
14292500742 | import numpy as np
import matplotlib.pyplot as plt
def plot_sun(sunposition, d):
'''
'''
fig = plt.figure(figsize=d['figsize'])
tks = [np.deg2rad(a) for a in np.linspace(0,360,8,endpoint=False)]
xlbls = np.array(['N','45','E','135','S','225','W','315'])
ax = fig.add_subplot(111, projection='polar')
ax.set_theta_zero_location('N')
ax.set_xticks((tks))
ax.set_xticklabels(xlbls, rotation="vertical", size=12)
ax.tick_params(axis='x', pad=0.5)
ax.set_theta_direction(-1)
ax.set_rmin(0)
ax.set_rmax(90)
ax.set_rlabel_position(90)
ax.set_title('Sun Position')
xs = np.deg2rad(sunposition[0,:])
ys = 90 - sunposition[1,:]
ax.scatter(xs, ys, s=10, c='orange', alpha=0.5)
plt.subplots_adjust(top=d['top'], bottom=d['bottom'],
left=d['left'], right=d['right'])
plt.close()
return fig
| cisaacstern/horpyzon | _plot.py | _plot.py | py | 897 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "numpy.deg2rad",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.linspace... |
22555828419 | import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
# odczyt danych
film_data = pd.read_csv('MoviesOnStreamingPlatforms_updated.csv')
# Czyszczenie wierszy z pustymi warościami.
film_data.dropna(inplace=True)
# Usunięcie zbędnych kolumn
film_data.drop(film_data.columns[[0, 1]], axis = 1)
# Normalizacja: Lowercase dla danych tekstowych, standaryzacja (0..1) dla wartości float, sortowanie danych w komórce.
for col_name in ['Title', 'Directors', 'Genres', 'Country', 'Language']:
film_data[col_name] = film_data[col_name].str.lower()
for col_name in ['Directors', 'Genres', 'Country', 'Language']:
film_data[col_name] = film_data[col_name].str.split(',').map(lambda x: ','.join(sorted(x)))
scaler = preprocessing.MinMaxScaler()
film_data[['IMDb', 'Runtime']] = scaler.fit_transform(film_data[['IMDb', 'Runtime']])
# Podział zbioru na train, dev, test w proporcji 8:1:1
train_ratio = 0.8
validation_ratio = 0.1
test_ratio = 0.1
film_train, film_test = train_test_split(film_data, test_size=1 - train_ratio)
film_valid, film_test = train_test_split(film_test, test_size=test_ratio/(test_ratio + validation_ratio))
film_train.to_csv('MoviesOnStreamingPlatforms_updated.train')
film_test.to_csv('MoviesOnStreamingPlatforms_updated.test')
film_valid.to_csv('MoviesOnStreamingPlatforms_updated.dev')
| jarmosz/ium_CML | get_data.py | get_data.py | py | 1,385 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.MinMaxScaler",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing",
"line_number": 22,
"usage_type": "name"
},
{
"api_name"... |
11532751793 | import os
import re
from typing import TypeVar, Optional
from argparse import ArgumentParser, Namespace
from urllib.request import urlopen
from pathlib import Path
from hashlib import sha256
ARTIFACTS = [
"opa_darwin_amd64",
"opa_darwin_arm64_static",
"opa_linux_amd64",
"opa_linux_amd64_static",
"opa_linux_arm64_static",
"opa_windows_amd64",
]
T = TypeVar('T')
def required(value: Optional[T], name: str) -> T:
if value is None:
raise ValueError(f"Expected value for {name}")
return value
def get_sha256(artifact_name: str, version: str) -> str:
ext = ".exe" if "windows" in artifact_name else ""
with urlopen(f"https://github.com/open-policy-agent/opa/releases/download/v{version}/{artifact_name}{ext}.sha256") as res:
return res.read().decode().split(' ')[0]
def get_sha256_file(file: str, version: str) -> str:
file_url = f"https://raw.githubusercontent.com/open-policy-agent/opa/v{version}/{file}"
with urlopen(file_url) as res:
return sha256(res.read()).hexdigest()
def main(args: Namespace):
d = dict([
(artifact_name, get_sha256(artifact_name, args.version))
for artifact_name in ARTIFACTS
])
bzl_path = WORKSPACE_ROOT.joinpath(
'opa', 'private', 'opa_rules_dependencies.bzl')
with open(bzl_path) as bzl_file:
bzl_content = bzl_file.read()
version_match = re.search(
r"DEFAULT_VERSION\s*=\s*[\"'](.*?)[\"']", bzl_content)
if version_match is None:
raise ValueError(f"Could not find DEFAULT_VERSION in file {bzl_path}")
start_version = version_match.start(1)
end_version = version_match.end(1)
bzl_content = bzl_content[:start_version] + \
args.version + bzl_content[end_version:]
dict_match = re.search(
r"^_OPA_SHA256\s*=\s*{\s*$", bzl_content, re.MULTILINE)
if dict_match is None:
raise ValueError(f"Could not find _OPA_SHA256 in file {bzl_path}")
bzl_content = bzl_content[:dict_match.end(
)] + f"\n \"{args.version}\": {{\n" + '\n'.join([
f" \"{artifact_name}\": \"{sha256}\","
for artifact_name, sha256 in d.items()
] + [
f" \"opa_capabilities_json\": \"{get_sha256_file('capabilities.json', args.version)}\",",
f" \"opa_builtin_metadata_json\": \"{get_sha256_file('builtin_metadata.json', args.version)}\",",
]) + "\n }," + bzl_content[dict_match.end():]
with open(bzl_path, "w", encoding="utf-8") as bzl_file:
bzl_file.write(bzl_content)
def get_workspace_root(wd: Path) -> Path:
while not wd.joinpath("WORKSPACE").exists():
wd = wd.parent
return wd
BUILD_WORKING_DIRECTORY = Path(
required(os.getenv("BUILD_WORKING_DIRECTORY"), "BUILD_WORKING_DIRECTORY"))
WORKSPACE_ROOT = get_workspace_root(BUILD_WORKING_DIRECTORY)
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--version', '-v', required=True)
main(parser.parse_args())
| ticketmaster/rules_opa | tools/opa_upgrade.py | opa_upgrade.py | py | 3,005 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "typing.TypeVar",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "typing.Optional",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "urllib.request.urlopen",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "urllib.request.... |
34796837508 | # load packages
import pandas as pd
import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.ticker import MaxNLocator
from scipy.optimize import curve_fit
from tqdm import tqdm
from scipy.signal import find_peaks
plt.close('all')
#%% Define functions used in this code
def findpeaks(arr, h, w=1, d=1):
# Adjust baseline to emulate 'DoubleSided'
adjusted = arr - arr.mean()
# adjusted = arr
indexes, _ = find_peaks(adjusted, height=h)
# Retrieve extrema heights from original signal rather
# than from the properties dict with adjusted heights
heights = arr[indexes]
return heights, indexes
#Converts nested list into single list
def flatten(x):
result = []
for el in x:
if hasattr(el, "__iter__") and not isinstance(el, str):
result.extend(flatten(el))
else:
result.append(el)
return result
#Rounds Number to Specified number of significant digits
def round_sig(x, sig=2):
if x!=0:
rnd=round(x, sig-int(np.floor(np.log10(abs(x))))-1)
else:
rnd=0
return rnd
#%%Input data:
#The program expects multiple (up to 10) files in .txt or .dat format. In each file, the Raman shift
#should be included in the first column, followed by the Intensity data in
#consecutive columns (as many spectra available, and not neccesarily the same number of spectra per file).
#The location and names of the files should be indicated in path and file_name
#variables, respectively. The total number of files should be included in
#total.
path='../Example Data/Example_Data_1/'
#path='../Example Data/Example_Data_2_2DMaps/' #Uncomment if you are running the Example_Data_2_2DMaps
file_name=['6,5-SWCNT',
'CuMINT' ]
#file_name=['mapExample'] #Uncomment if you are running the Example_Data_2_2DMaps
total=len(file_name) #Total Number of files
delim='\t' #Define delimeter of data in data file
type_='.txt'
name=['6,5 SWNT','CuMINT']#Indicate here the names of the data for legend and titles
#name= ['patterned graphene'] #Uncomment if you are running the Example_Data_2_2DMaps
imgtype='.svg' #What type of images do you want to save
#Histogram Normalization?
#Set to True if you want your histograms to be normalized
#Else, set to False
den=True
#%%Normalization:
#Choose normalization spectral rng. The spectra will be normalised to
#the maximun within the specified spectral rng:
normLow=1500; #Lower limit in cm-1
normHigh=1700; #Upper limit in cm-1
#%%Peak identification: Intensity and Shifts
#Intensity ratio Id/Ig will be calculated by taking: max Int value-min Int
#value within the spectral rng selected. Select appropriate rng where
#the full peaks are resolved.
#Raman shifts for D and G modes are caculated as the position of the
#maximun intensity
#RBM modes
#G band
band1Low=1450; #Lower limit in cm-1
band1High=1700; #Upper limit in cm-1
#D band
band2Low=1200; #Lower limit in cm-1
band2High=1400; #Upper limit in cm-1
#2D band
band3Low=2450; #Lower limit in cm-1
band3High=2800; #Upper limit in cm-1
#RBM modes
rbm=1# Set to 1 if RBM analysis desired
RBMregion_Low=200; #Lower limit in cm-1
RBMregion_High=360; #Upper limit in cm-1
Prom=[0.01]; #This value sets the max limit at which peaks will be considered.
#This can be a single number, or a list of values that different for each file
#Local maxima with a maximum height lower than Prom will be discarded.
if len(Prom)==1 and total!=1:
Prom=Prom*np.ones(total)
if rbm==1:
meanRBM=dict.fromkeys(name)
stdRBM=dict.fromkeys(name)
#%% Model: Choose the method to analyse spectral features (simple peak
# identification or lorentzianpeak fitting)
lorentz=1; # lorentz fits for G, D and 2D (=0 for simple identification; =1 for lorentzian peak fitting)
nt=1; # Splitting G band in G+ and G-?
# If lorentzian fits are selected, the user is required to indicate
# initial guess for the peak position, FWHM and intensity
Gmin_init=[[1530, 20, 0.4], [1530, 25, 0.31]] #[center FWHM intensity_max] for G-, only if nt=1
Gplus_init=[[1580, 30, 1], [1580, 25, 1]] #[center FWHM intensity_max] fof G+
D_init=[[1300, 40, 0.08], [1325, 10, 0.04]] #[center FWHM intensity_max] for D band
init_2D=[[2600, 50, 0.4], [2640, 50, 0.3]] #[center FWHM intensity_max] for 2D band
#%%Ploting options:
#Choose the desired output plots (1 is yes and 0 is no).
raw=1; #Make figure af all raw spectra.
norm=1; #Plot all normalised spectra.
rng=1; #Plot spectral regions chosen for G, D and 2D bands in intensity calculation
peaks=1; #plot found peaks in the RBM region, Note that if the
#number of spectra is very high, the computing is going to slow down
#significantly.
correlations=1 #Plot correlations between peaks shifts and Id/Ig
#%%Fontsize for Plots
fs=16 # This is the fontsize you want to use for all the plots
width=3 #width of histograms
#%%Is it a 2D map? In case it is and you want the 3D plots in form of maps
maps=0; #set to 0 if not a maps and to 1 if you want a 2D Map
map_var='I' # What do you want to map?
#'I' for Intensity Ratio,
#'G' for Gband
#'D' for Dband
#'2D' for 2DBand
rows=32 ; #Number of rows in 2D Map
col=32 ; #Number of columns in 2D Map
use_leng=1; #If 1, then map will use x and y dimensions defined below. Else if 0, will use pixel number
xlen=42; #x len in um
ylen=38; #y len in um
#%%Code Starts here:
os.chdir(path) #Changes folder to the selected path
for z in range(0,total):
#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%%
#%%#%%#%%#%%#%%#Import data #%%#%%#%%#%%#%%#%%%
#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%%
data = pd.read_csv(file_name[z]+type_,delimiter=delim,decimal='.',header=None);
data=data.to_numpy()
Shift = np.array(data[:,0]); #Raman shift is the first column of the file
Intensity=data[:,1:]; #Raman intensities are all data from column 2 til end
rng2=Shift>0
Intensity=Intensity[rng2,:]
Shift=Shift[rng2]
#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#
#%%#%%#%%#Set color scales for plots #%%#%%#%%#%%
#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%%
gray=cm.get_cmap('gist_yarg',len(Intensity[0,:])+20)
winter=cm.get_cmap('winter',len(Intensity[0,:])+5)
summer=cm.get_cmap('summer',len(Intensity[0,:])+5)
autumn=cm.get_cmap('autumn',len(Intensity[0,:])+5)
bone= cm.get_cmap('bone',len(Intensity[0,:])+5)
cool=cm.get_cmap('cool',len(Intensity[0,:])+5)
spring=cm.get_cmap('spring',len(Intensity[0,:])+5)
copper=cm.get_cmap('copper',len(Intensity[0,:])+5)
pink=cm.get_cmap('pink',len(Intensity[0,:])+5)
hot=cm.get_cmap('hot',len(Intensity[0,:])+5)
cmap=[gray,winter,summer,autumn,
cool,spring,copper,pink,hot,bone]
#Sets color maps for different files
color=cmap[z];
clr=color(np.linspace(0,1,len(Intensity.transpose())))
if total==1:
div=[1, 1];
elif total==2:
div=[1, 2];
elif total==3:
div=[1, 3];
elif total==4:
div=[2, 2];
elif total==5:
div=[2, 3];
elif total==6:
div=[2, 3];
elif total==10:
div=[2, 5];
else:
div=[3, 3];
#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%%
#%%#%%#%%#%%#%%Normalization #%%#%%#%%#%%#%%#%%#%%
#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%%
indLow=np.where(abs(Shift-normLow)==min(abs(Shift-normLow)))[0][0]
indHigh=np.where(abs(Shift-normHigh)==min(abs(Shift-normHigh)))[0][0];
Intensity_norm=np.empty(np.shape(Intensity));
for n in range(0,len(Intensity[1,:])):
Intensity_norm[:,n]=(Intensity[:,n]-min(Intensity[:,n])); #substract min
Intensity_norm[:,n]=Intensity_norm[:,n]/max(Intensity_norm[indLow:indHigh,n]); #divide by norm
Intensity_av=np.mean(Intensity_norm,axis=1); #Calculate average spectra
#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%
#%%#%%#%%#%%G and D modes: Intensity and Shift #%%#%%#%%#%%#%%
#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%
ind1Low=np.where(abs(Shift-band1Low)==min(abs(Shift-band1Low)))[0][0]
ind1High=np.where(abs(Shift-band1High)==min(abs(Shift-band1High)))[0][0];
ind2Low=np.where(abs(Shift-band2Low)==min(abs(Shift-band2Low)))[0][0]
ind2High=np.where(abs(Shift-band2High)==min(abs(Shift-band2High)))[0][0];
ind3Low=np.where(abs(Shift-band3Low)==min(abs(Shift-band3Low)))[0][0]
ind3High=np.where(abs(Shift-band3High)==min(abs(Shift-band3High)))[0][0];
Intensity_1rng=Intensity_norm[ind1Low:ind1High,:];
Intensity_2rng=Intensity_norm[ind2Low:ind2High,:];
Intensity_3rng=Intensity_norm[ind3Low:ind3High,:];
Shift_rng1=Shift[ind1Low:ind1High];
Shift_rng2=Shift[ind2Low:ind2High];
Shift_rng3=Shift[ind3Low:ind3High];
Int_G=np.array([]);
Int_D=np.array([]);
Int_2D=np.array([]);
center_G=np.array([]);
center_D=np.array([]);
center_2D=np.array([]);
if nt==1:
I_G_fit=[]
center_Gmin=[]
FWHM_Gmin=[]
Int_Gmin=[]
center_Gplus=[]
FWHM_Gplus=[]
Int_Gplus=[]
if lorentz==1:
I_D_fit=[]
FWHM_D=[]
I_2D_fit=[]
FWHM_2D=[]
if nt==0:
I_G_fit=[]
FWHM_G=[]
for n in tqdm(range(0,len(Intensity_norm[0,:]))):
if np.logical_and(lorentz==0,nt==0)==1:
#peak 1/G:
Int_G=np.append(Int_G,max(Intensity_1rng[:,n])-min(Intensity_1rng[:,n]));
indMax1=np.where(Intensity_1rng[:,n]==max(Intensity_1rng[:,n]))[0][0];
center_G=np.append(center_G,Shift_rng1[indMax1]);
if lorentz==0:
#peak 2/D:
Int_D=np.append(Int_D,max(Intensity_2rng[:,n])-min(Intensity_2rng[:,n]));
indMax2=np.where(Intensity_2rng[:,n]==max(Intensity_2rng[:,n]))[0][0];
center_D=np.append(center_D,Shift_rng2[indMax2]);
#peak 3/2D:
Int_2D=np.append(Int_2D,max(Intensity_3rng[:,n])-min(Intensity_3rng[:,n]));
indMax3=np.where(Intensity_3rng[:,n]==max(Intensity_3rng[:,n]))[0][0];
center_2D=np.append(center_2D,Shift_rng3[indMax3]);
#%%G+ and G- fitting
if nt==1:
#Define initial guesses
InitGuess_G=[Gmin_init[z][2]*((Gmin_init[z][1]/2)**2),
Gmin_init[z][0],
(Gmin_init[z][1]/2)**2,
Gplus_init[z][2]*((Gplus_init[z][1]/2)**2),
Gplus_init[z][0],
(Gplus_init[z][1]/2)**2,
0.1]
def fit_func(x, g1,g2,g3,g4,g5,g6,g7):
return (g1/((x-g2)**2+g3)+g4/((x-g5)**2+g6)+g7)# lorentz
gamma_G ,pcov2= curve_fit(fit_func, Shift_rng1, Intensity_1rng[...,n], InitGuess_G,
bounds=(0,np.inf),maxfev=10000)
I_G_fit.append(fit_func(Shift_rng1, *gamma_G))
if np.logical_and(np.logical_and(gamma_G[1]>band1Low,gamma_G[4]>band1Low),np.logical_and(gamma_G[1]<band1High,gamma_G[4]<band1High))==1:
if gamma_G[1]<gamma_G[4]:
center_Gmin.append(gamma_G[1])
FWHM_Gmin.append(2*np.sqrt(gamma_G[2]))
Int_Gmin.append(gamma_G[0]/gamma_G[2])
center_Gplus.append(gamma_G[4])
FWHM_Gplus.append(2*np.sqrt(gamma_G[5]))
Int_Gplus.append(gamma_G[3]/gamma_G[5])
else:
center_Gplus.append(gamma_G[1])
FWHM_Gplus.append(2*np.sqrt(gamma_G[2]))
Int_Gplus.append(gamma_G[0]/gamma_G[2])
center_Gmin.append(gamma_G[4])
FWHM_Gmin.append(2*np.sqrt(gamma_G[5]))
Int_Gmin.append(gamma_G[3]/gamma_G[5])
else:
center_Gplus.append(np.nan)
FWHM_Gplus.append(np.nan)
Int_Gplus.append(np.nan)
center_Gmin.append(np.nan)
FWHM_Gmin.append(np.nan)
Int_Gmin.append(np.nan)
#%%Lorentizan Fits
if lorentz==1:
#D bands
#Define Initial guesses
InitGuess_D=[D_init[z][2]*((D_init[z][1]/2)**2),
D_init[z][0],
(D_init[z][1]/2)**2,
0];
def fit_func2(x, g1,g2,g3,g4):
return (g1/((x-g2)**2+g3)+g4)# lorentz
gamma_D ,pcov2= curve_fit(fit_func2, Shift_rng2, Intensity_2rng[...,n],
InitGuess_D,bounds=(0,np.inf),maxfev=10000)
I_D_fit.append(fit_func2(Shift_rng2, *gamma_D))
if np.logical_and(gamma_D[1]>band2Low,gamma_D[1]<band2High)==1:
center_D=np.append(center_D,gamma_D[1]);
FWHM_D.append(2*np.sqrt(gamma_D[2]));
Int_D=np.append(Int_D,gamma_D[0]/gamma_D[2]);
else:
center_D=np.append(center_D,np.nan);
FWHM_D.append(np.nan);
Int_D=np.append(Int_D,np.nan);
#2D bands
#Define Initial guesses
InitGuess_2D=[init_2D[z][2]*((init_2D[z][1]/2)**2),
init_2D[z][0],
(init_2D[z][1]/2)**2,
0];
gamma_2D ,pcov2= curve_fit(fit_func2, Shift_rng3, Intensity_3rng[...,n],
InitGuess_2D, bounds=(0,np.inf),maxfev=10000)
I_2D_fit.append(fit_func2(Shift_rng3, *gamma_2D))
if np.logical_and(gamma_2D[1]>band3Low,gamma_2D[1]<band3High)==1:
center_2D=np.append(center_2D,gamma_2D[1]);
FWHM_2D.append(2*np.sqrt(gamma_2D[2]));
Int_2D=np.append(Int_2D,gamma_2D[0]/gamma_2D[2]);
else:
center_2D=np.append(center_2D,np.nan);
FWHM_2D.append(np.nan);
Int_2D=np.append(Int_2D,np.nan);
if nt==0:
#Lorentzian G fit if no G+/G- fit
InitGuess_G=[Gplus_init[z][2]*((Gplus_init[z][1]/2)**2),
Gplus_init[z][0],
(Gplus_init[z][1]/2)**2,
0];
gamma_G ,pcov2= curve_fit(fit_func2, Shift_rng1, Intensity_1rng[...,n],
InitGuess_G, bounds=(0,np.inf),maxfev=10000)
I_G_fit.append(fit_func2(Shift_rng1, *gamma_G))
if np.logical_and(gamma_G[1]>band1Low,gamma_G[1]<band1High)==1:
center_G=np.append(center_G,gamma_G[1]);
FWHM_G.append(2*np.sqrt(gamma_G[2]));
Int_G=np.append(Int_G,gamma_G[0]/gamma_G[2]);
else:
center_G=np.append(center_G,np.nan);
FWHM_G.append(np.nan);
Int_G=np.append(Int_G,np.nan);
if nt==1:
I21=Int_D/Int_Gplus;
G_av=np.nanmean(center_Gplus)
G_std=np.nanstd(center_Gplus)
Gmin_av=np.nanmean(center_Gmin)
Gmin_std=np.nanstd(center_Gmin)
else:
I21=Int_D/Int_G;
G_av=np.nanmean(center_G)
G_std=np.nanstd(center_G)
I21_av=np.nanmean(I21);
I21_error=np.nanstd(I21);
D_av=np.nanmean(center_D)
D_std=np.nanstd(center_D)
D2_av=np.nanmean(center_2D)
D2_std=np.nanstd(center_2D)
#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%
#%%#%%#%%#%%#RBM modes Shifts #%%#%%#%%#%%#%%#%%
#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%
if rbm == 1:
indRBMLow=np.where(Shift-RBMregion_Low==min(abs(Shift-RBMregion_Low)))[0][0];
indRBMHigh=np.where(Shift-RBMregion_High==min(abs(Shift-RBMregion_High)))[0][0];
PeaksInt=[];
PeaksLoc1=[];
if peaks == 1:
if z==0:
fig_peaks,ax_peaks= plt.subplots(div[0],div[1],figsize=(div[1]*5,div[0]*5), constrained_layout=True) #Create the figure only once, on the first loop
fig_peaks.canvas.manager.set_window_title('Region Peaks')
if total==1:
ax_peaks=[ax_peaks]
else:
ax_peaks=ax_peaks.flatten()
for n in range(0,len(Intensity_norm[0,:])):
[pksRBM,locsRBM]=findpeaks(Intensity_norm[indRBMLow:indRBMHigh,n],Prom[z]);
PeaksInt.append(pksRBM);
PeaksLoc1.append(Shift[locsRBM+indRBMLow]);
if peaks == 1:
ax_peaks[z].plot(Shift[indRBMLow:indRBMHigh],Intensity_norm[indRBMLow:indRBMHigh,n],color='C'+str(n))
ax_peaks[z].plot(Shift[locsRBM+indRBMLow],pksRBM,ls='',marker='v',markersize=10,color='C'+str(n))
ax_peaks[z].set_xlabel('Raman shift / $cm^{-1}$',fontsize=fs)
ax_peaks[z].set_ylabel('Intensity / a.u',fontsize=fs)
ax_peaks[z].tick_params(axis="both", labelsize=fs)
ax_peaks[z].set_title(name[z]+' :Region Peaks');
ax_peaks[z].set_xlim((Shift[indRBMLow],Shift[indRBMHigh]))
#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%
#%%#%%#%%Figures #%%#%%#%%#%%%
#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%
#Raw spectra
if raw==1:
if z==0:
fig_raw,ax_raw= plt.subplots(div[0],div[1],figsize=(div[1]*5,div[0]*5), constrained_layout=True) #Create the figure only once, on the first loop
fig_raw.canvas.manager.set_window_title('Raw Data')
if total==1:
ax_raw=[ax_raw]
else:
ax_raw=ax_raw.flatten()
for n,clr2 in enumerate(clr):
ax_raw[z].plot(Shift,Intensity[:,n],color=clr2,label=str(n));
# ax_raw[z].legend()
ax_raw[z].set_xlabel('Raman shift / $cm^{-1}$',fontsize=fs)
ax_raw[z].set_ylabel('Intensity / a.u',fontsize=fs)
ax_raw[z].tick_params(axis="both", labelsize=fs)
ax_raw[z].set_title(name[z]+': Raw data');
ax_raw[z].set_xlim((0,np.max(Shift)))
#%%Normalised spectra
if norm==1:
if z==0:
fig_norm,ax_norm= plt.subplots(div[0],div[1], figsize=(div[1]*5,div[0]*5), constrained_layout=True)
fig_norm.canvas.manager.set_window_title('Normalized Spectra')
if total==1:
ax_norm=[ax_norm]
else:
ax_norm=ax_norm.flatten()
for n,clr2 in enumerate(clr):
ax_norm[z].plot(Shift,Intensity_norm[:,n],color=clr2,label=str(n));
#ax_norm[z].legend()
ax_norm[z].set_xlabel('Raman shift / $cm^{-1}$',fontsize=fs)
ax_norm[z].set_ylabel('Intensity / a.u',fontsize=fs)
ax_norm[z].tick_params(axis="both", labelsize=fs)
ax_norm[z].set_title(name[z]+': Normalized spectra');
ax_norm[z].set_ylim((0,np.max(Intensity_norm)))
ax_norm[z].set_xlim((0,np.max(Shift)))
#%%Average spectra
if z==0:
fig_avg,ax_avg= plt.subplots(1,1, figsize=(6,6), constrained_layout=True)
fig_avg.canvas.manager.set_window_title('Average Spectra')
ax_avg.plot(Shift,Intensity_av,color=clr[round(len(clr)/2)],label=name[z]);
ax_avg.legend(fontsize=fs)
ax_avg.set_xlabel('Raman shift / $cm^{-1}$',fontsize=fs)
ax_avg.set_ylabel('Intensity / a.u',fontsize=fs)
ax_avg.tick_params(axis="both", labelsize=fs)
ax_avg.set_title('Average spectra');
#%% #Spectral rng for Intensity ratio
if rng ==1:
if z==0:
fig_rng,ax_rng= plt.subplots(div[0],div[1], figsize=(div[1]*5,div[0]*5), constrained_layout=True)
fig_rng.canvas.manager.set_window_title('Range Used for Intensity Ratio Calculation')
if total==1:
ax_rng=[ax_rng]
else:
ax_rng=ax_rng.flatten()
for n,clr2 in enumerate(clr):
ax_rng[z].plot(Shift[ind1Low:ind1High],Intensity_1rng[:,n],color=clr2);
ax_rng[z].plot(Shift[ind2Low:ind2High],Intensity_2rng[:,n],color=clr2);
ax_rng[z].plot(Shift[ind3Low:ind3High],Intensity_3rng[:,n],color=clr2);
ax_rng[z].set_xlabel('Raman shift / $cm^{-1}$',fontsize=fs)
ax_rng[z].set_ylabel('Intensity / a.u',fontsize=fs)
ax_rng[z].tick_params(axis="both", labelsize=fs)
ax_rng[z].set_title(name[z]+': Range for $I_{d}/I_{g}$ Calculation');
ax_rng[z].set_ylim((0,1))
#%% Histogram intensity ratio
if z==0:
fig_IR,ax_IR= plt.subplots(1,1, figsize=(8,6), constrained_layout=True)
fig_IR.canvas.manager.set_window_title('Intensity Ratio')
#Define Bin Size
binsInt=np.arange(min(I21), max(I21) + width/250, width/250)
if len(binsInt)==1:
binsInt=np.arange(min(I21)- width/250, max(I21) + width/250, width/250)
ax_IR.hist(I21,binsInt,color=clr[round(len(clr)/2)],
label=name[z]+': $I_{d}/I_{g}$='+str(round(I21_av,4))+'$\pm$'
+str(round(I21_error,4)),alpha=0.5,ec='k',align='left',density=den)
ax_IR.legend(fontsize=fs-2)
ax_IR.set_xlabel('$I_{d}/I_{g}$',fontsize=fs)
ax_IR.set_ylabel('counts',fontsize=fs)
ax_IR.tick_params(axis="both", labelsize=fs)
ax_IR.set_title('Intensity ratio: $I_{d}/I_{g}$',fontsize=fs+2);
#%% Raman shift G mode
if z==0:
fig_G,ax_G= plt.subplots(1,1, figsize=(8,6), constrained_layout=True)
fig_G.canvas.manager.set_window_title('Raman shift G band')
if nt==1:
fig_G_I,ax_G_I= plt.subplots(1,1, figsize=(8,6), constrained_layout=True)
fig_G_I.canvas.manager.set_window_title('Intensity ratio G modes')
if nt==0:
binsShift=np.arange(min(center_G)-width, max(center_G) + width, width)
# if len(binsShift)==1:
# ax_G.hist(center_G,width, color=clr[round(len(clr)/2)],
# label=name[z]+': $G$ band='+str(round(G_av,2))+'$\pm$'
# +str(round(G_std,2))+' $cm^{-1}$',
# alpha=0.5,ec='k',align='left')
#else:
ax_G.hist(center_G,binsShift,color=clr[round(len(clr)/2)],
label=name[z]+': $G$ band='+str(round(G_av,2))+'$\pm$'
+str(round(G_std,2))+' $cm^{-1}$',
alpha=0.5,ec='k',align='left',density=den)
ax_G.set_title('Raman shift $G$ band',fontsize=fs+2);
else:
# binsShift=round(max(center_Gplus)-min(center_Gmin));
binsShift=np.arange(min(center_Gmin), max(center_Gplus) + width, width)
ax_G.hist(center_Gplus,binsShift, color=clr[round(len(clr)/3)],
label=name[z]+': $G^{+}$ band='+str(round(G_av,2))+'$\pm$'
+str(round(G_std,2))+' $cm^{-1}$',
alpha=0.5,ec='k',align='left',density=den)
ax_G.hist(center_Gmin,binsShift,color=clr[round(len(clr)/2)],
label=name[z]+': $G^{-}$ band='+str(round(Gmin_av,2))+'$\pm$'
+str(round(Gmin_std,2))+' $cm^{-1}$',
alpha=0.5,ec='k',align='left',density=den)
ax_G.set_title('Raman shift $G$ modes',fontsize=fs+2);
Iplus_minus=np.array(Int_Gplus)/np.array(Int_Gmin);
binsInt=np.arange(min(Iplus_minus), max(Iplus_minus) + width/10, width/10)
ax_G_I.hist(Iplus_minus,binsInt,color=clr[round(len(clr)/2)],
label=name[z]+': $I_{G^+}/I_{G^-}$ ='+str(round(np.mean(Iplus_minus),2))+'$\pm$'
+str(round(np.std(Iplus_minus),2)),
alpha=0.5,ec='k',align='left',density=den)
ax_G_I.legend(fontsize=fs-2)
ax_G_I.set_xlabel('$I_{G^+}/I_{G^-}$',fontsize=fs)
ax_G_I.set_ylabel('counts',fontsize=fs)
ax_G_I.tick_params(axis="both", labelsize=fs)
ax_G_I.set_title('Intensity ratio G modes',fontsize=fs+2);
ax_G.legend(fontsize=fs-2)
ax_G.set_xlabel('Raman shift / $cm^{-1}$',fontsize=fs)
ax_G.set_ylabel('counts',fontsize=fs)
ax_G.tick_params(axis="both", labelsize=fs)
#%% #Raman shift D mode
if z==0:
fig_D,ax_D= plt.subplots(1,1, figsize=(8,6), constrained_layout=True)
fig_D.canvas.manager.set_window_title('Raman shift D band')
binsShift=np.arange(min(center_D)-width, max(center_D) + width, width)
ax_D.hist(center_D,binsShift,color=clr[round(len(clr)/2)],
label=name[z]+': $D$ band='+str(round(D_av,2))+'$\pm$'
+str(round(D_std,2))+' $cm^{-1}$',
alpha=0.5,ec='k',align='left',density=den)
ax_D.legend(fontsize=fs-2)
ax_D.set_xlabel('Raman shift / $cm^{-1}$',fontsize=fs)
ax_D.set_ylabel('counts',fontsize=fs)
ax_D.tick_params(axis="both", labelsize=fs)
ax_D.set_title('Raman shift $D$ band',fontsize=fs+2);
ax_D.xaxis.set_major_locator(MaxNLocator(integer=True))
#%% #Raman shift 2D mode
if z==0:
fig_2D,ax_2D= plt.subplots(1,1, figsize=(8,6), constrained_layout=True)
fig_2D.canvas.manager.set_window_title('Raman shift D 2band')
binsShift=np.arange(min(center_2D)-width, max(center_2D) + width, width)
ax_2D.hist(center_2D,binsShift,color=clr[round(len(clr)/2)],
label=name[z]+': $2D$ band='+str(round(D2_av,2))+'$\pm$'
+str(round(D2_std,2))+' $cm^{-1}$',
alpha=0.5,ec='k',align='left',density=den)
ax_2D.legend(fontsize=fs-2)
ax_2D.set_xlabel('Raman shift / $cm^{-1}$',fontsize=fs)
ax_2D.set_ylabel('counts',fontsize=fs)
ax_2D.tick_params(axis="both", labelsize=fs)
ax_2D.set_title('Raman shift $2D$ band',fontsize=fs+2);
ax_2D.xaxis.set_major_locator(MaxNLocator(integer=True))
#%%%
# Lorentz fit results
if nt==1 or lorentz==1:
if nt==1 and lorentz==0:
fig_lor_fit,ax_lor_fit= plt.subplots(1,1, figsize=(8,6), constrained_layout=True)
fig_lor_fit.canvas.manager.set_window_title('Lorenztian Fitting results')
ax_lor_fit=[ax_lor_fit]
for n,clr2 in enumerate(clr):
ax_lor_fit[0].plot(Shift_rng1,Intensity_1rng[:,n],color=clr2)
ax_lor_fit[0].plot(Shift_rng1,np.transpose(I_G_fit)[:,n],'--r');
elif lorentz==1:
fig_lor_fit,ax_lor_fit= plt.subplots(1,3, figsize=(18,6), constrained_layout=True)
fig_lor_fit.canvas.manager.set_window_title('Lorenztian Fitting results')
for n,clr2 in enumerate(clr):
if n==int(len(clr)/2):
ax_lor_fit[0].plot(Shift_rng1,Intensity_1rng[:,n],color=clr2, label='Data')
ax_lor_fit[0].plot(Shift_rng1,np.transpose(I_G_fit)[:,n],'--r', label='Lorentzian Fit');
ax_lor_fit[1].plot(Shift_rng2,Intensity_2rng[:,n],color=clr2, label='Data')
ax_lor_fit[1].plot(Shift_rng2,np.transpose(I_D_fit)[:,n],'--r', label='Lorentzian Fit');
ax_lor_fit[2].plot(Shift_rng3,Intensity_3rng[:,n],color=clr2, label='Data')
ax_lor_fit[2].plot(Shift_rng3,np.transpose(I_2D_fit)[:,n],'--r', label='Lorentzian Fit');
else:
ax_lor_fit[0].plot(Shift_rng1,Intensity_1rng[:,n],color=clr2)
ax_lor_fit[0].plot(Shift_rng1,np.transpose(I_G_fit)[:,n],'--r');
ax_lor_fit[1].plot(Shift_rng2,Intensity_2rng[:,n],color=clr2)
ax_lor_fit[1].plot(Shift_rng2,np.transpose(I_D_fit)[:,n],'--r');
ax_lor_fit[2].plot(Shift_rng3,Intensity_3rng[:,n],color=clr2)
ax_lor_fit[2].plot(Shift_rng3,np.transpose(I_2D_fit)[:,n],'--r');
ax_lor_fit[1].set_title('Fitting results D',fontsize=fs)
ax_lor_fit[2].set_title('Fitting results 2D',fontsize=fs)
for axx in ax_lor_fit:
axx.set_xlabel('Raman shift / $cm^{-1}$',fontsize=fs)
axx.set_ylabel('Intensity / a.u',fontsize=fs)
axx.legend(fontsize=fs)
ax_lor_fit[0].set_title('Fitting results G',fontsize=fs)
fig_lor_fit.suptitle('Lorentzian fits of '+name[z],fontsize=fs+3)
fig_lor_fit.savefig('Lorentz_fit_'+name[z]+imgtype)
#%% #Raman shift RBM
if rbm==1:
if z==0:
fig_RBM,ax_RBM= plt.subplots(1,1, figsize=(8,6), constrained_layout=True)
fig_RBM.canvas.manager.set_window_title('Raman shift RBM modes')
npeaks=max([len(n) for n in PeaksLoc1])
PeaksLoc = np.array(flatten(PeaksLoc1))
binsShift=np.arange(min(PeaksLoc)-width, max(PeaksLoc) + width, width)
[histRBM, edgesRBM]=np.histogram(PeaksLoc,binsShift)
peaksRBM, _ = find_peaks(np.concatenate(([min(histRBM)],histRBM,[min(histRBM)])), distance=10)
peaksRBM=peaksRBM-1
f_peaksRBM=edgesRBM[peaksRBM]
groups=dict.fromkeys(f_peaksRBM)
meanRBM[name[z]]=dict.fromkeys(f_peaksRBM)
stdRBM[name[z]]=dict.fromkeys(f_peaksRBM)
for n in groups:
groups[n]=[]
for p in PeaksLoc:
try:
diff_p=abs(p-f_peaksRBM)
ni=np.where(diff_p==min(diff_p))[0][0]
groups[f_peaksRBM[ni]].append(p)
except ValueError:
continue
for n in groups:
meanRBM[name[z]][n]=np.mean(groups[n])
stdRBM[name[z]][n]=np.std(groups[n])
lb=[str(round(meanRBM[name[z]][n],2))+ ' $\pm$ ' +str(round(stdRBM[name[z]][n],2)) for n in stdRBM[name[z]]]
ax_RBM.hist(PeaksLoc,binsShift,color=clr[round(len(clr)/2)],label=name[z]+': '+', '.join(map(str, lb))+' $cm^{-1}$',
alpha=0.5,ec='k',align='left',density=den)
ax_RBM.legend(fontsize=fs-2)
ax_RBM.set_xlabel('Raman shift / $cm^{-1}$',fontsize=fs)
ax_RBM.set_ylabel('counts',fontsize=fs)
ax_RBM.tick_params(axis="both", labelsize=fs)
ax_RBM.set_title('Raman shift $RBM$ modes',fontsize=fs+2);
ax_RBM.xaxis.set_major_locator(MaxNLocator(integer=True))
fig_peaks.savefig('Peak Data.svg')
fig_RBM.savefig('RBM.svg')
#%% #Correlations
if z==0:
fig_Corr,ax_Corr= plt.subplots(2,2, figsize=(15,15), constrained_layout=True)
fig_Corr.canvas.manager.set_window_title('Correlations')
ax_Corr=ax_Corr.flatten()
if nt==1:# In the case where there are separate G+/G- peaks, use G+
#Linear fit Intensity Ratio vs G+
idx = np.isfinite(I21) & np.isfinite(center_Gplus)
fit_I21_G=np.polyfit(np.array(I21)[idx],np.array(center_Gplus)[idx],1)
#Plot Intensity Ratio vs G+
ax_Corr[0].plot(I21,center_Gplus,marker='o',ls='',color=clr[round(len(clr)/2)],label=name[z]+', slope='+str(round(fit_I21_G[0],3)))
#Linear fit 2D vs Gplus
idx = np.isfinite(center_Gplus) & np.isfinite(center_2D)
fit_2D_G=np.polyfit(np.array(center_Gplus)[idx],np.array(center_2D)[idx],1)
#Plot 2D vs G
ax_Corr[2].plot(center_Gplus,center_2D,marker='o',ls='',color=clr[round(len(clr)/2)],label=name[z]+', slope='+str(round(fit_2D_G[0],3)))
xx2=np.linspace(min(center_Gplus),max(center_Gplus),100)
#Linear fit D vs Gplus
idx = np.isfinite(center_Gplus) & np.isfinite(center_D)
fit_D_G=np.polyfit(np.array(center_Gplus)[idx],np.array(center_D)[idx],1)
#Plot 2D vs G
ax_Corr[3].plot(center_Gplus,center_D,marker='o',ls='',color=clr[round(len(clr)/2)],label=name[z]+', slope='+str(round(fit_D_G[0],3)))
xx2=np.linspace(min(center_Gplus),max(center_Gplus),100)
else:
idx = np.isfinite(I21) & np.isfinite(center_G)
fit_I21_G=np.polyfit(np.array(I21)[idx],np.array(center_G)[idx],1)
ax_Corr[0].plot(I21,center_G,marker='o',ls='',color=clr[round(len(clr)/2)],label=name[z]+', slope='+str(round(fit_I21_G[0],3)))
#Linear fit 2D vs G
idx = np.isfinite(center_G) & np.isfinite(center_2D)
fit_2D_G=np.polyfit(np.array(center_G)[idx],np.array(center_2D)[idx],1)
#Plot 2D vs G
ax_Corr[2].plot(center_G,center_2D,marker='o',ls='',color=clr[round(len(clr)/2)],label=name[z]+', slope='+str(round(fit_2D_G[0],3)))
xx2=np.linspace(min(center_G),max(center_G),100)
#Linear fit D vs G
idx = np.isfinite(center_G) & np.isfinite(center_D)
fit_D_G=np.polyfit(np.array(center_G)[idx],np.array(center_D)[idx],1)
#Plot 2D vs G
ax_Corr[3].plot(center_G,center_D,marker='o',ls='',color=clr[round(len(clr)/2)],label=name[z]+', slope='+str(round(fit_D_G[0],3)))
xx2=np.linspace(min(center_G),max(center_G),100)
xx=np.linspace(min(I21),max(I21),100)
ax_Corr[0].plot(xx,fit_I21_G[0]*xx+fit_I21_G[1],lw=2,ls='-',color=clr[round(len(clr)/3)])
idx = np.isfinite(I21) & np.isfinite(center_D)
fit_I21_D=np.polyfit(np.array(I21)[idx],np.array(center_D)[idx],1)
ax_Corr[1].plot(I21,center_D,marker='o',ls='',color=clr[round(len(clr)/2)],label=name[z]+', slope='+str(round(fit_I21_D[0],3)))
ax_Corr[1].plot(xx,fit_I21_D[0]*xx+fit_I21_D[1],lw=2,ls='-',color=clr[round(len(clr)/3)])
ax_Corr[3].plot(xx2,fit_D_G[0]*xx2+fit_D_G[1],lw=2,ls='-',color=clr[round(len(clr)/3)])
ax_Corr[2].plot(xx2,fit_2D_G[0]*xx2+fit_2D_G[1],lw=2,ls='-',color=clr[round(len(clr)/3)])
xlab=['$I_{d}/I_{g}$','$I_{d}/I_{g}$','Raman Shift $G$ band /$ cm^{-1}$','Raman Shift $G$ band /$ cm^{-1}$']
ylab=['Raman Shift $G$ band /$ cm^{-1}$','Raman Shift $D$ band /$ cm^{-1}$','Raman Shift $2D$ band /$ cm^{-1}$','Raman Shift $D$ band /$ cm^{-1}$']
titles=['$G$ band Shift vs. $I_{d}/I_{g}$','$D$ band Shift vs. $I_{d}/I_{g}$','$2D$ band Shift vs. $G$ band Shift','$D$ band Shift vs. $G$ band Shift']
for i,ax in enumerate(ax_Corr):
ax.legend(fontsize=fs-3)
ax.tick_params(axis="both", labelsize=fs)
ax.set_xlabel(xlab[i],fontsize=fs)
ax.set_ylabel(ylab[i],fontsize=fs)
ax.set_title(titles[i],fontsize=fs)
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
#%% #2D Map
if maps==1:
if use_leng!=1:
ratio=col/rows
else:
ratio=xlen/ylen
try:
fig_Map.clf()
except NameError:
pass
fig_Map=plt.figure('2D Map', figsize=(7*ratio,6), constrained_layout=True)
ax_Map= fig_Map.add_subplot(111)
if map_var=='G':
if nt==1:
var2D=np.array(center_Gplus)
else:
var2D=center_G
elif map_var=='I':
var2D=I21
elif map_var=='D':
var2D=center_D
elif map_var=='2D':
var2D=center_2D
else:
print('map_var must be I, G, D, or 2D')
break
spec_label={'I':'$I_{d}/I_{g}$',
'G':'$G$ band',
'D':'$D$ band',
'2D':'$2D$ band'}
if map_var=='I':
unit=''
else:
unit=' Raman Shift /$ cm^{-1}$'
G2D=var2D.reshape([rows,col])
if use_leng!=1:
y1=np.linspace(0,rows-1,rows)
x1=np.linspace(0,col-1,col)
ylab='y pixel'
xlab='x pixel'
else:
y1=np.linspace(0,ylen,rows)
x1=np.linspace(0,xlen,col)
ylab='y length, ($\mu m$)'
xlab='x length, ($\mu m$)'
h=ax_Map.pcolormesh(x1,y1,G2D)
clbr=plt.colorbar(h,ax=ax_Map)
clbr.set_label(spec_label[map_var] +unit, rotation=270,labelpad=20,fontsize=fs)
clbr.ax.tick_params(labelsize=fs)
ax_Map.set_xlabel(xlab,fontsize=fs)
ax_Map.set_ylabel(ylab,fontsize=fs)
ax_Map.set_title('2D Map of '+spec_label[map_var]+' for '+name[z],fontsize=fs)
ax_Map.tick_params(axis="both", labelsize=fs)
sz=rows*col
pixelwidthx=xlen/(2*int(col))
pixelwidthy=ylen/(2*int(rows))
x1a=np.linspace(0-pixelwidthx,xlen+pixelwidthx,int(col)+1)
y1a=np.linspace(0-pixelwidthy,ylen+pixelwidthy,int(rows)+1)
xx,y1=np.meshgrid(x1,y1)
xx=xx.flatten()
y1=y1.flatten()
fig_Map.savefig(name[z]+'_2DMap_'+map_var+imgtype)
pkn=5
line, = ax_Map.plot(xx, y1, 'b',picker=pkn)
line.set_visible(False)
#%% Save shifts in .csv
if lorentz==0 and nt==1:
data_all={'Intensity_G-':Int_Gmin,'Shift_G-':center_Gmin,'FWHM_G-':FWHM_Gmin,'Intensity_G+':Int_Gplus,
'Shift_G+':center_Gplus,'FWHM_G+':FWHM_Gplus,'Intensity_D':Int_D,'Shift_D':center_D,'Intensity_2D':Int_2D,'Shift_2D':center_2D}
elif lorentz==0 and nt == 0:
data_all={'Intensity_G':Int_G,'Shift_G':center_G,'Intensity_D':Int_D,'Shift_D':center_D,'Intensity_2D':Int_2D,'Shift_2D':center_2D}
elif lorentz==1 and nt==1:
data_all={'Intensity_G-':Int_Gmin,'Shift_G-':center_Gmin,'FWHM_G-':FWHM_Gmin,'Intensity_G+':Int_Gplus,
'Shift_G+':center_Gplus,'FWHM_G+':FWHM_Gplus,'Intensity_D':Int_D,'Shift_D':center_D,
'FWHM_D':FWHM_D,'Intensity_2D':Int_2D,'Shift_2D':center_2D,'FWHM_2D':FWHM_2D}
elif lorentz==1 and nt==0:
data_all={'Intensity_G':Int_G,'Shift_G':center_G,'FWHM_G':FWHM_G,'Intensity_D':Int_D,
'Shift_D':center_D,'FWHM_D':FWHM_D,'Intensity_2D':Int_2D,'Shift_2D':center_2D,'FWHM_2D':FWHM_2D}
data_all['Intensity Ratio']=I21
if rbm==1:
data_all['Intensity_RBM']=PeaksInt
data_all['Shift_RBM']=PeaksLoc1
data_all_df=pd.DataFrame(data_all)
data_all_df.index.name='Spectra #'
data_all_df.to_csv(name[z]+'_results.csv')
#%%% Save Figures in Scaleable Vector Format
if raw==1:
fig_raw.savefig('Raw Data'+imgtype)
if norm==1:
fig_norm.savefig('Normalized Data'+imgtype)
fig_avg.savefig('Avg Spectra'+imgtype)
if rng==1:
fig_rng.savefig('SpecRangeIR'+imgtype)
fig_IR.savefig('IntensityRatio'+imgtype)
fig_2D.savefig('2DBand'+imgtype)
fig_G.savefig('Gband'+imgtype)
fig_D.savefig('Dband'+imgtype)
if correlations==1:
fig_Corr.savefig('Correlations'+imgtype)
if nt==1:
fig_G_I.savefig('IntensityRatioG+G-'+imgtype)
#%% Pick Event for 2D Map
def onpick(event):
global pts
global ptsM
global ind
global figFC
global axFC
global data_spec
if 'ind' in globals():
del ind
try:
pts
except NameError:
pts = None
if pts!=None:
if plt.fignum_exists('Spectra'):
pass
else:
for p in pts:
for pp in p:
pp.remove()
if plt.fignum_exists('Spectra'):
pass
else:
figFC = plt.figure(num='Spectra',figsize=(10*1.5,5*1.5))
axFC = figFC.add_subplot(111)
data_spec=pd.DataFrame()
data_spec['Raman Shift(cm-1)']=Shift
pts=[]
if event.artist!=line: #check that you clicked on the object you wanted
return True
if not len(event.ind): #check the index is valid
return True
ind = event.ind[0]
if map_var=='I':
unit=''
else:
unit=' $cm^{-1}$'
pts.append(ax_Map.plot(xx[ind],y1[ind],'ro'))
if use_leng!=1:
labp='x:'+str(int(xx[ind]))+', y:'+str(int(y1[ind]))
else:
labp='x:'+str(round_sig(xx[ind],3))+'$\mu m$, y:'+str(round_sig(y1[ind],3))+'$\mu m$'
data_spec[labp]=Intensity_norm[:,ind]
#Plot spectra for selected pixel
if use_leng!=1:
axFC.plot(Shift,Intensity_norm[:,ind],
label=labp+', '+spec_label[map_var]+': '+str(round_sig(var2D[ind],5))+unit,
markersize=1)
print('\nPixel Location x:',int(xx[ind]),' y:',int(y1[ind]),'\n')
else:
axFC.plot(Shift,Intensity_norm[:,ind], label=labp+', '+spec_label[map_var]+': '+str(round_sig(var2D[ind],5))+unit, markersize=1)
print('\nPixel Location :'+str(round_sig(xx[ind],3))+' $\mu m$, y:'+str(round_sig(y1[ind],3))+' $\mu m$ \n')
axFC.set_xlabel('Raman shift / $cm^{-1}$',fontsize=fs)
axFC.set_ylabel('Intensity',fontsize=fs)
axFC.legend(fontsize=fs-1)
axFC.set_title('Selected Normalized Spectra on '+name[z],fontsize=fs+1)
axFC.tick_params(axis="both", labelsize=fs)
print('Intensity Ratio is '+str(round_sig(I21[ind],5))+'\n')
if nt==1:
print('G+ band Shift is '+str(round_sig(center_Gplus[ind],5))+' cm-1 \n')
else:
print('G band Shift is '+str(center_G[ind])+' cm-1 \n')
print('D band Shift is '+str(center_D[ind])+' cm-1 \n')
print('2D band Shift is '+str(center_2D[ind])+' cm-1 \n')
data_spec.to_csv('Selected_Spectra.csv',index=False)
fig_Map.canvas.draw()
figFC.canvas.draw()
figFC.savefig('SelectedSpectra'+imgtype)
return True
if maps==1:
fig_Map.canvas.mpl_connect('pick_event', onpick)
| meaton212/Raman_Code_Analysis | Python Code/RamanAnalysis_nanomaterials.py | RamanAnalysis_nanomaterials.py | py | 42,378 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.close",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "scipy.signal.find_peaks",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "num... |
8172670109 | from __future__ import absolute_import
from __future__ import division
import os
import sys
import argparse
__author__ = "Jonathan Madsen"
__copyright__ = "Copyright 2020, The Regents of the University of California"
__credits__ = ["Jonathan Madsen"]
__license__ = "MIT"
__version__ = "@PROJECT_VERSION@"
__maintainer__ = "Jonathan Madsen"
__email__ = "jrmadsen@lbl.gov"
__status__ = "Development"
from .analyze import (
load,
match,
search,
expression,
sort,
group,
add,
subtract,
unify,
dump,
)
def embedded_analyze(
args=None,
data=[],
call_exit=False,
verbose=os.environ.get("TIMEMORY_VERBOSE", 0),
ranks=[],
):
"""This is intended to be called from the embedded python interpreter"""
if len(ranks) > 0:
try:
import mpi4py # noqa: F401
from mpi4py import MPI # noqa: F401
rank = MPI.COMM_WORLD.Get_rank()
if rank not in ranks:
return
except (ImportError, RuntimeError):
pass
call_exit = False
cmd_line = False
if args is None:
args = sys.argv[:]
call_exit = True
cmd_line = True
try:
parser = argparse.ArgumentParser()
parser.add_argument(
"files",
metavar="file",
type=str,
nargs="*" if not cmd_line else "+",
help="Files to analyze",
default=[],
)
parser.add_argument(
"-f",
"--format",
type=str,
help="Data output format.",
choices=(
"dot",
"flamegraph",
"tree",
"table",
"markdown",
"html",
"markdown_grid",
),
nargs="*",
default=None,
)
parser.add_argument(
"-o",
"--output",
type=str,
help="Output to file(s)",
nargs="*",
default=None,
)
parser.add_argument(
"-M",
"--mode",
help="Analysis mode",
nargs="*",
choices=("add", "subtract", "unify", "group"),
default=[],
)
parser.add_argument(
"-m",
"--metric",
"--column",
type=str,
help="Metric(s) to extract",
default="sum.*(.inc)$",
)
parser.add_argument(
"-s",
"--sort",
type=str,
help="Sort the metric",
choices=("ascending", "descending"),
default=None,
)
parser.add_argument(
"-g",
"--group",
type=str,
help="Group by a column name in the dataframe",
default=None,
)
parser.add_argument(
"--field",
type=str,
help="Dataframe column to search/match against",
default="name",
)
parser.add_argument(
"--search",
type=str,
default=None,
help="Regular expression for re.search(...), i.e. a substring match",
)
parser.add_argument(
"--match",
type=str,
default=None,
help="Regular expression for re.match(...), i.e. a full string match",
)
parser.add_argument(
"--expression",
type=str,
default=None,
help=(
"A space-delimited comparison operation expression using 'x' "
+ "for the variable, numerical values, and: < <= > >= && ||. "
+ "E.g. 'x > 1.0e3 && x < 100000'. 'x' will be -m/--metric"
),
)
parser.add_argument(
"-e",
"--echo-dart",
help="echo Dart measurement for CDash",
action="store_true",
)
parser.add_argument(
"--per-thread",
help=(
"Encode the thread ID in node hash to ensure squashing doesn't "
+ "combine thread-data"
),
action="store_true",
)
parser.add_argument(
"--per-rank",
help=(
"Encode the rank ID in node hash to ensure squashing doesn't "
+ "combine rank-data"
),
action="store_true",
)
parser.add_argument(
"--select",
help=(
"Select the component type if the JSON input contains output "
+ "from multiple components"
),
type=str,
default=None,
)
parser.add_argument(
"--exit-on-failure",
help="Abort with non-zero exit code if errors arise",
action="store_true",
)
_args = parser.parse_args(args)
if _args.exit_on_failure:
call_exit = True
if _args.group is None and _args.format is None:
_args.format = ["tree"]
if _args.group is not None and _args.format not in ("table", None):
raise RuntimeError("Invalid data format for group")
if isinstance(_args.select, str):
_args.select = _args.select.split()
gfs = []
for itr in data:
if not isinstance(itr, dict):
print("data: {}\ndata-type: {}".format(itr, type(itr).__name__))
gfs.append(
load(
itr,
select=_args.select,
per_thread=_args.per_thread,
per_rank=_args.per_rank,
)
)
for itr in _args.files:
if not os.path.exists(itr):
print("file: {}\nfile-type: {}".format(itr, type(itr).__name__))
gfs.append(
load(
itr,
select=_args.select,
per_thread=_args.per_thread,
per_rank=_args.per_rank,
)
)
def apply(_input, _mode, *_args, **_kwargs):
if _mode == "search":
return search(_input, *_args, **_kwargs)
elif _mode == "match":
return match(_input, *_args, **_kwargs)
elif _mode == "expression":
return expression(_input, *_args, **_kwargs)
elif _mode == "sort":
return sort(_input, *_args, **_kwargs)
elif _mode == "add":
return add(_input)
elif _mode == "subtract":
return subtract(_input)
elif _mode == "unify":
return unify(_input)
elif _mode == "group":
return group(_input, *_args, **_kwargs)
# apply search before match since this is less restrictive
if _args.search is not None:
gfs = apply(gfs, "search", pattern=_args.search, field=_args.field)
# apply match after search since this is more restrictive
if _args.match is not None:
gfs = apply(gfs, "match", pattern=_args.match, field=_args.field)
# apply numerical expression last
if _args.expression is not None:
gfs = apply(
gfs,
"expression",
math_expr=_args.expression,
metric=_args.metric,
)
# apply the mutating operations
if "add" in _args.mode:
gfs = apply(gfs, "add")
elif "subtract" in _args.mode:
gfs = apply(gfs, "subtract")
elif "unify" in _args.mode:
gfs = apply(gfs, "unify")
if _args.sort is not None:
gfs = apply(
gfs,
"sort",
metric=_args.metric,
ascending=(_args.sort == "ascending"),
)
files = _args.output
if files is not None and len(files) == 1:
files = files[0]
if _args.group is not None:
gfs = apply(
gfs,
"group",
metric=_args.metric,
field=_args.group,
ascending=(_args.sort == "ascending"),
)
_args.format = [None]
for fmt in _args.format:
dump(gfs, _args.metric, fmt, files, _args.echo_dart)
except Exception as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
import traceback
traceback.print_exception(exc_type, exc_value, exc_traceback, limit=10)
print("Exception - {}".format(e))
if call_exit or _args.exit_on_failure:
sys.exit(1)
elif not cmd_line:
raise
| NERSC/timemory | timemory/analyze/__init__.py | __init__.py | py | 8,843 | python | en | code | 328 | github-code | 36 | [
{
"api_name": "os.environ.get",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "mpi4py.MPI.COMM_WORLD.Get_rank",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "mpi4py.... |
20637675592 | #!/usr/bin/env python
# coding: utf-8
# In[2]:
import networkx as nx
import pandas as pd
import matplotlib.pyplot as plt
df = pd.read_excel('Dataset.xlsx', index = None)
df.head()
G=nx.from_pandas_edgelist(df, 'Departure Station', 'Arrival Station','Time of Travel')
nx.draw(G, with_labels=False)
df['Route'] = df['Departure Station'].map(str) + ' - ' + df['Arrival Station'].map(str)
df.head()
df2 = df[['Route','Tube Line', 'Time of Travel']]
df3=df2.drop_duplicates()
df3['Route Reversed'] = df['Arrival Station'].map(str) + ' - ' + df['Departure Station'].map(str)
df6 = df3[['Route Reversed', 'Tube Line', 'Time of Travel']]
df6.rename(columns = {'Route Reversed' : 'Route'}, inplace = True)
df3 = df3.append(df6, ignore_index = True)
del df3['Route Reversed']
def route_calculator(source,target):
d, p = nx.single_source_dijkstra(G,source=source, target=target, weight='Time of Travel')
route=[]
for i in range(len(p)-1):
route.append(str(p[i]+" - "+p[i+1]))
df4 = pd.DataFrame(route, columns=["Route"])
df5 = pd.merge(df4,df3,"left", left_on="Route", right_on="Route")
df5.to_excel('Result.xlsx')
print(df5)
print("Total time for Travel", d+ len(p) -2 , "Mins (Includes 1 min wait time at each station :))")
| aseemkc/Travel-Planner1 | Travel Planner/Algorithm.py | Algorithm.py | py | 1,280 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_excel",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "networkx.from_pandas_edgelist",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "networkx.draw",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "network... |
16313772281 | import requests
_BASE_WORD_URL = 'http://www.dictionary.com/browse/'
_DEFAULT_HEADERS = {
'user-agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 Safari/537.36',
'host': 'www.dictionary.com'
}
_DEFAULT_AUDIO_HEADERS = {
'user-agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 Safari/537.36',
}
def get_word_page(word, timeout=None):
return requests.get(
_BASE_WORD_URL + word,
headers=_DEFAULT_HEADERS,
timeout=timeout)
def download_word_pronunciation(url):
res = requests.get(url, headers=_DEFAULT_AUDIO_HEADERS, stream=True)
audio = res.content
content_type = res.headers.get('content-type', 'audio/mpeg3')
return audio, content_type
| mradlinski/dictcom | dictcom/download.py | download.py | py | 807 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 21,
"usage_type": "call"
}
] |
24807681148 | #!/usr/bin/env python
# coding: utf-8
# In[93]:
import numpy as np
import networkx as nx
import pandas as pd
import time
# # Load ca-GrQc
# In[94]:
def load_ca_grqc():
data = pd.read_csv('CA-GrQc.txt', sep="\t", skiprows=3)
records = data.to_records(index=False)
edges = list(records)
G = nx.DiGraph()
G.add_edges_from(edges)
return G
# # Load github social
# In[95]:
def load_github_social():
data = pd.read_csv('git_web_ml/musae_git_edges.csv')
records = data.to_records(index=False)
edges = list(records)
G = nx.Graph()
G.add_edges_from(edges)
return G
# # Load web-berkstan
# In[96]:
def load_web_berkstan():
data = pd.read_csv('web-BerkStan.txt', sep="\t", skiprows=3)
records = data.to_records(index=False)
edges = list(records)
G = nx.DiGraph()
G.add_edges_from(edges)
return G
# ## Chiba-Nishizeki
# In[97]:
def Chiba_Nishizeki(G):
nodes_degree_sorted = sorted(G.degree, key=lambda x: x[1], reverse=True)
nodes_sorted = [x[0] for x in nodes_degree_sorted]
num_nodes = len(G.nodes)
mark_dict = dict(zip(nodes_sorted,np.zeros(num_nodes)))
triangle_list = []
for i in range(num_nodes-2):
v = nodes_sorted[i]
for u in G.neighbors(v):
mark_dict[u] = 1
for u in G.neighbors(v):
for w in G.neighbors(u):
if(w!=v and mark_dict[w]==1):
triangle_list.append((v,u,w))
mark_dict[u] = 0
G.remove_node(v)
return triangle_list
# ## Trie Iterator
# In[ ]:
class iterator:
"""
Parameters:
G: graph to iterator
depth_nodes: nodes at each depth. If depth==0, iterate over all nodes. If depth==1, iterate over neighbors
of specific nodes.
depth: current depth the iterator is at
depth_idx: index of node that the iterator is stopped at for each depth.
current_node: current node the iterator is at
"""
def __init__(self,G):
self.atEnd = False;
self.G = G
self.depth_nodes = [sorted(list(self.G.nodes)),[]]
self.depth = -1
self.depth_idx = [0,0]
self.current_node = None
def key(self):
return self.current_node
def next(self):
self.depth_idx[self.depth]+=1
if(self.itr_atEnd() != True):
self.current_node = (self.depth_nodes[self.depth])[self.depth_idx[self.depth]]
else:
self.current_node = None
def seek(self,seekKey=0):
if(self.current_node!=None and seekKey!=None):
while(self.current_node<seekKey):
if(self.itr_atEnd()!=True):
self.next()
else:
break
if(self.itr_atEnd()==True):
break
def itr_atEnd(self):
if (self.depth_idx[self.depth]==len(self.depth_nodes[self.depth])):
return True
else:
return False
def open(self):
if(self.depth==-1):
self.depth+=1
self.current_node = (self.depth_nodes[self.depth])[self.depth_idx[self.depth]]
elif(self.depth==0):
neighbors = sorted(list(self.G.neighbors(self.current_node)))
neighbors = [i for i in neighbors if i>self.current_node]
self.depth+=1
if(len(neighbors)==0):
self.current_node=None
self.depth_nodes[self.depth] = neighbors
else:
self.depth_nodes[self.depth] = neighbors
self.current_node = (self.depth_nodes[self.depth])[0]
#print(self.depth)
def up(self):
self.depth_idx[self.depth] = 0
self.depth -= 1
#print(self.depth)
if(self.depth==-1):
self.current_node = None
else:
self.current_node = (self.depth_nodes[self.depth])[self.depth_idx[self.depth]]
def depth(self):
return self.depth
def get_end(self):
return self.atEnd
# ## Leapfrog join
# In[99]:
class leapfrog_join:
"""
Parameters:
iterators: list of iterators that this join is using
k: number of iterators
p: index of current using iterator
atEnd: whether this join is finished
depth: indicates the variable that this leapfrog join is care about.
Suppose do triejoin between R(a,b), S(b,c), T(a,c), we have depth 0 for a, 1 for b, 2 for c
"""
def __init__(self,iterators,depth):
self.iterators = iterators
self.k = len(iterators)
self.p = 0
self.atEnd = False
self.depth = depth
#After init, the first result is received
def leapfrog_init(self):
for it in self.iterators:
if(it.itr_atEnd()==True):
self.atEnd = True
return None
count = -1
for it in self.iterators:
count += 1
if(it.key()==None):
print("false",count)
self.atEnd = False
self.iterators = sorted(self.iterators,key=lambda itr: itr.key())
self.p = 0
return self.leapfrog_search()
def leapfrog_search(self):
max_key = self.iterators[(self.p-1)%self.k].key()
done = False
while(done != True):
least_key = self.iterators[self.p].key()
if(least_key==max_key):
done = True
return max_key
else:
self.iterators[self.p].seek(max_key)
if(self.iterators[self.p].itr_atEnd()):
return None
else:
max_key = self.iterators[self.p].key()
self.p = (self.p+1)%self.k
#After the first result, repeated call leapfrog_next() to get following results.
def leapfrog_next(self):
self.iterators[self.p].next()
if(self.iterators[self.p].itr_atEnd()):
self.atEnd = True
return None
else:
self.p = (self.p+1)%self.k
return self.leapfrog_search()
#Locate the iterators at specific key
def leapfrog_seek(self,seekKey):
self.iterators[self.p].seek(seekKey)
if(self.iterators[self.p].itr_atEnd()):
self.atEnd = True
return None
else:
self.p = (self.p+1)%self.k
return self.leapfrog_search()
def iterators_open(self):
for itr in self.iterators:
itr.open()
def iterators_up(self):
for itr in self.iterators:
itr.up()
def get_depth(self):
return self.depth
# ## Leapfrog Triejoin
# In[100]:
#join of R(a,b), S(b,c), T(a,c)
#in our problem, it is the join of E,E,E
class leapfrog_triejoin:
def __init__(self,G):
#Create 3 iterators, each corresponds to R, S, T
self.iterators = [iterator(G),iterator(G),iterator(G)]
self.depth = -1
#Create 3 leapfrog join instances, each corresponds to R&T,R&S,S&T(& represents join)
self.leapfrog_join1 = leapfrog_join([self.iterators[0],self.iterators[2]],depth=0)
self.leapfrog_join2 = leapfrog_join([self.iterators[0],self.iterators[1]],depth=1)
self.leapfrog_join3 = leapfrog_join([self.iterators[1],self.iterators[2]],depth=2)
self.leapfrog_joins = [self.leapfrog_join1,self.leapfrog_join2,self.leapfrog_join3]
def triejoin_up(self):
for lpfj in self.leapfrog_joins:
if (lpfj.get_depth()==self.depth):
lpfj.iterators_up()
self.depth -= 1
def triejoin_open(self):
self.depth += 1
for lpfj in self.leapfrog_joins:
if (lpfj.get_depth()==self.depth):
lpfj.iterators_open()
return lpfj.leapfrog_init()
def run(self):
triangles = []
#itr0->a,itr2->a
a = self.triejoin_open()
while(a!=None):
#itr0->b,itr1->b
b = self.triejoin_open()
while(b!=None):
#itr1->c,itr2->c
c = self.triejoin_open()
while(c!=None):
triangles.append((a,b,c))
c = self.leapfrog_joins[2].leapfrog_next()
self.triejoin_up()
b = self.leapfrog_joins[1].leapfrog_next()
self.triejoin_up()
a = self.leapfrog_joins[0].leapfrog_next()
return triangles
# In[121]:
#We run each algorithm for each dataset 100 times, and record the time cost
def main():
#for ca-GrQc:
cn_ca_t = []
lpfj_ca_t = []
G_ca = load_ca_grqc()
for _ in range(100):
#record c-n runtime
start_time = time.time()
Chiba_Nishizeki(G_ca)
cn_ca_t.append(time.time() - start_time)
#record wcoj runtime
start_time = time.time()
leapfrog_triejoin(G_ca)
lpfj_ca_t.append(time.time() - start_time)
ca_t = [cn_ca_t,lpfj_ca_t]
#for github social:
cn_gs_t = []
lpfj_gs_t = []
G_gs = load_github_social()
for _ in range(100):
#record c-n runtime
start_time = time.time()
Chiba_Nishizeki(G_gs)
cn_gs_t.append(time.time() - start_time)
#record wcoj runtime
start_time = time.time()
leapfrog_triejoin(G_gs)
lpfj_gs_t.append(time.time() - start_time)
gs_t = [cn_gs_t,lpfj_gs_t]
#for web-berkstan:
cn_wb_t = []
lpfj_wb_t = []
G_wb = load_web_berkstan()
for _ in range(100):
#record c-n runtime
start_time = time.time()
Chiba_Nishizeki(G_wb)
cn_wb_t.append(time.time() - start_time)
#record wcoj runtime
start_time = time.time()
leapfrog_triejoin(G_wb)
lpfj_wb_t.append(time.time() - start_time)
wb_t = [cn_wb_t,lpfj_wb_t]
t = [ca_t,gs_t,wb_t]
return t
# In[ ]:
if __name__ == "__main__":
t = main()
cn_mean = []
lpfj_mean = []
for t_i in t:
cn_i = t_i[0]
lpfj_i = t_i[1]
print("CN mean:",np.mean(cn_i))
print("lpfj mean:",np.mean(lpfj_i))
cn_mean.append(np.mean(cn_i))
lpfj_mean.append(np.mean(lpfj_i))
names = ['ca-GrQc', 'github social', 'web-berkstan']
plt.figure(figsize=(12, 8))
plt.bar(names, cn_mean,label='Chiba-Nishizeki')
plt.bar(names,lpfj_mean,label='WCOJ')
plt.suptitle('Average run time')
plt.legend()
plt.show()
| wxEdward/cs599hw1 | script/problem3.py | problem3.py | py | 10,715 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "networkx.DiGraph",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "networkx.Graph",
... |
13217323943 |
# coding: utf-8
# In[ ]:
# In[29]:
#起始標準起手式~
import requests as rq
from bs4 import BeautifulSoup as bs
from collections import OrderedDict as od
import json
import csv
import traceback as tb
import re
# In[30]:
HOST = 'http://www.dodocook.com/recipe/'
# In[31]:
#def dodocook_crawler(開始文章ID,結束文章ID)
def dodocook_crawler(no_start_page,no_stop_page):
HOST = 'http://www.dodocook.com/recipe/'
#在pass這裡接上function() 網址產生器
#pass
url_builder(HOST,no_start_page,no_stop_page)
# In[32]:
#網址產生器
#url_builder(網站URL, 開始文章ID, 結束文章ID)
def url_builder(HOST,no_start_page,no_stop_page):
for i in range(no_start_page, no_stop_page):
url = HOST + "{}/".format(i)
print("[INFO] {}".format(url))
#在pass這裡接上function() res_and_soup(url)
#pass
res_and_soup(url)
# In[33]:
#就字面意思
def res_and_soup(url):
try:
res = rq.get(url)
soup = bs(res.text, 'lxml')
print("[INFO] success")
#在pass這裡接上爬取網頁內容的function()
#pass
dodocook_contents(soup)
except:
print("[Error] Error while getting contents")
tb.print_exc()
# In[34]:
def dodocook_contents(soup):
#title
mydict={}
for tag1 in soup.select(".band > h1"):
title=tag1.text
mydict["title"]=title
#foodname
foodName=[]
for tag2 in soup.select(".body > .list > .name"):
tag2_dict={}
n=soup.select(".body > .list > .name").index(tag2)
tag2_dict[tag2.text]=soup.select(".ingredient > .body > .list > .count")[n].text
foodName.append(tag2_dict)
mydict["foodName"]=foodName
#step
step1=[]
step_list1=soup.select('.steps > .CSrtB > .Sno')
step_go1=soup.select('.steps > .CSrtB > .SBpma')
# step2=[]
# step_list2=soup.select('.steps > .CSrtA > .SrtAk > .Sno')
# for tag3 in step_list2:
# print(tag3.text)
# step_go2=soup.select('.steps > .CSrtA > .SrtAk > .Spma > p')
# for tag4 in step_go2:
# print(tag4.text)
for tag3 in step_list1:
tag3_dict={}
n1=step_list1.index(tag3)
tag3_dict[tag3.text]=step_go1[n1].text
step1.append(tag3_dict)
mydict["step"]=step1
print(mydict)
# In[35]:
dodocook_crawler(40000,40010)
# In[ ]:
| nick800608/TeamProject-FoodRecipe | dodocook_cralwer.py | dodocook_cralwer.py | py | 2,435 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "traceback.print_exc",
"line_number": 63,
"usage_type": "call"
}
] |
30389595402 | import os
import pickle
import numpy as np
import matplotlib.pyplot as plt
import csv
FILE_PATH = "C:\\data\\fh_mal_train"
GRAPH_PATH = "C:\\code\\count_mal.pickle"
def get_size(start_path = '.'):
total_size = 0
total_count =0
for dirpath, dirnames, filenames in os.walk(start_path):
for f in filenames:
fp = os.path.join(dirpath, f)
total_size += os.path.getsize(fp)
total_count +=1
return total_size,total_count
def haha(start_path):
cal = list()
for x in os.listdir(start_path):
start_path2 = os.path.join(start_path, x)
if os.path.isdir(start_path2):
year = x
for x2 in os.listdir(start_path2):
start_path3 = os.path.join(start_path2, x2)
if os.path.isdir(start_path3):
month = x2
for x3 in os.listdir(start_path3):
start_path4 = os.path.join(start_path3, x3)
if os.path.isdir(start_path4):
day = x3
start_path6 = os.path.join(start_path4, 'total')
if os.path.isdir(start_path6):
haha= list(year,month,day,get_size(start_path6))
cal.append(haha)
return cal
def draw_graph(gpath):
f = open('count_mal.pickle','rb')
dirdata = pickle.load(f)
x=list()
y=list()
for t in dirdata:
x.append(t[2])
y.append(t[3][0])
print(dirdata)
plt.xlabel("day")
plt.ylabel("data size")
plt.plot(y)
plt.show()
def writecsv(gpath):
f = open('haha.csv','w',encoding='utf-8',newline='')
wr = csv.writer(f)
wr.writerow(['year','month','date','size','count'])
f2 = open('count_mal.pickle', 'rb')
dirdata = pickle.load(f2)
dirdata = sorted(dirdata)
for t in dirdata:
t[2]=t[2][4:]
wr.writerow([t[0],t[1],t[2],t[3][0],t[3][1]])
#print(haha(GRAPH_PATH))
#draw_graph(GRAPH_PATH)
writecsv(GRAPH_PATH)
| siklee/mal_deep | check_file.py | check_file.py | py | 2,063 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.walk",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "os.path.getsize",
"line_number"... |
34036855952 | # Задание №1
from sys import argv
hours_worked_out, rate_for_the_time, prize = argv
hours_worked_out = int(hours_worked_out)
rate_for_the_time = int(rate_for_the_time)
prize = int(prize)
result = int(hours_worked_out * rate_for_the_time + prize)
print(f"Зраработная плата сотрудника составит - {result}")
# Задание №2
my_list = [300, 2, 12, 44, 1, 1, 4, 10, 7, 1, 78, 123, 55]
new_list = []
num = 0
for num, el in enumerate(my_list):
if my_list[num - 1] < my_list[num]:
new_list.append(el)
num += 1
print(new_list)
# Задание №3
print(f"Результат - {[el for el in range(20, 240) if el % 20 == 0 or el % 21 == 0]}")
# Задание №4
my_list = [2, 2, 2, 7, 23, 1, 44, 44, 3, 2, 10, 7, 4, 11]
new_list = []
for el in my_list:
if my_list.count(el) < 2:
new_list.append(el)
print(new_list)
# Задание №5
from functools import reduce
def my_func(prev_el, el):
return prev_el * el
my_list = range(100, 1001)
new_list = []
for el in my_list:
if el % 2 == 0:
new_list.append(el)
print(new_list)
print(f"Результат умножения - {reduce(my_func, new_list)}")
# Задание №6
from itertools import count, cycle
el_count = int(input("Введите целое число с которого начнется итерация: "))
el_count_end = int(input("Введите целое число, до которого будет идти итерация: "))
for el in count(el_count):
if el > el_count_end:
break
else:
print(el)
el_cycle = input("Введите данные для вывода повторений на экран: ")
print(el_cycle)
el_cycle_num = 1
for el in cycle(el_cycle):
if el_cycle_num > 9:
break
else:
print(el_cycle)
el_cycle_num += 1
# Задание №7
from itertools import count
from math import factorial as fact
def my_func():
for el in count(0, 1):
yield fact(el)
generate = my_func()
x = 0
n = int(input("До какого значения будем генерировать? :"))
for i in generate:
if x < n:
print(i)
x += 1
else:
break
| TBidnik/python | lesson_4_hw.py | lesson_4_hw.py | py | 2,236 | python | ru | code | 0 | github-code | 36 | [
{
"api_name": "sys.argv",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "functools.reduce",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "itertools.count",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "itertools.cycle",
"line_... |
24630926789 | from database.database import Databases
import uuid
import time
class scoreDB(Databases):
def __init__(self):
super().__init__()
self.avg_score_table = "avg_score"
self.realtime_score_table = "realtime_score"
self.subject_name_table = "subject"
self.user_subject_rel = "user_subject"
def return_all(self):
query = "SELECT * FROM public.user_class_rel"
row = self.execute(query)
print(row)
def search_AVG(self,userid):
"""
유저의 평균 반환
"""
query = f"SELECT * FROM public.userrank WHERE userid = '{userid}'"
row = self.execute(query)
print(row)
return row
def user_Rank(self):
"""
유저별 평균을 묶어서 반환, groupby로 유저 묶고, 해당 그룹의 평균 값을 리턴
return: 평균 묶어서 반환한 값
"""
query = "SELECT public.userinfo.userid,username, AVG(score) FROM public.user_class_rel, public.userinfo WHERE public.userinfo.userid = public.user_class_rel.userid GROUP BY public.userinfo.userid ORDER BY AVG(score) DESC"
row = self.execute(query)
return row
def class_Rank(self):
"""
과목별 평균을 묶어서 반환, groupby로 유저 묶고, 해당 그룹의 평균 값을 리턴
return: 평균 묶어서 반환한 값
"""
query = "SELECT classid, AVG(score) FROM public.user_class_rel GROUP BY classid ORDER BY AVG(score) DESC"
row = self.execute(query)
print(row)
def search_subject_classname(self, classname):
"""
과목명
해당 과목이 존재하는지 여부를 반환
return: True, False
"""
query = f"SELECT * FROM public.class where classname = '{classname}'"
row = self.execute(query)
print(row)
def write_user_score(self, userid, classid, score):
"""
유저의 점수를 기록
"""
createdAt = time.localtime(time.time())
id = str(uuid.uuid4().hex)
query = "INSERT INTO user_class_rel(id, userid, classid, score, createdAt) VALUES (%s, %s, %s, %s, NOW()) RETURNING id"
stocked = (id, userid, classid, score)
row = self.execute(query, stocked)
print(row)
self.commit()
def delete_class(self, classid, userid):
class_generator = self.execute(f"SELECT generatorid FROM class WHERE classid='{classid}'")[0][0]
if userid != class_generator:
return False
else:
query = f"DELETE FROM class WHERE classid = '{classid}' RETURNING classid"
row = self.execute(query)
self.commit()
return True
def std_summary(self, classid):
query = f"SELECT u.userid,u.username,u.useremail, AVG(score) as avgs FROM public.user_class_rel as r JOIN public.userinfo as u ON r.userid = u.userid WHERE r.classid='{classid}' GROUP BY u.userid ORDER BY avgs DESC;"
row = self.execute(query)
return row
def search_user_subject(self, userid, classid):
"""
유저의 과목별 점수를 검색
return : 점수 (INT)
"""
query = f"SELECT score FROM public.user_class_rel WHERE userid = '{userid}' AND classid = '{classid}';"
row = self.execute(query)
print(row)
def subject_AVG(self, classid):
"""
수업을 들은 유저들을 모아서 유저들의 평균을 내서 보여준다. (새로 짜기)
"""
query = f"SELECT classid, avg(score) FROM public.user_class_rel WHERE classid = '{classid}' GROUP BY classid ;"
row = self.execute(query)
return row
def class_search(self, que):
query = f"SELECT c.classid, c.classname, u.username, u.userid FROM public.class as c JOIN public.userinfo as u ON c.generatorid = u.userid where username ilike '%%{que}%%' OR classname ilike '%%{que}%%'"
print(query)
row = self.execute(query)
return row
def my_classes(self, userid):
query = f"SELECT c.classid, c.classname, u.username, u.userid FROM public.class as c JOIN public.userinfo as u ON c.generatorid = u.userid WHERE u.userid = '{userid}'"
print(query)
row = self.execute(query)
return row
def class_detail(self, classid):
query = f"SELECT c.classid, c.classname, u.username, u.userid, u.useremail FROM public.class as c JOIN public.userinfo as u ON c.generatorid = u.userid where classid ='{classid}'"
row = self.execute(query)
return row
def generate_class(self, generatorid, classname):
"""
수업 생성자 id, 수업 이름을 받아서, 임의의 classid를 부여하고, 그것으로 수업 생성
return : None
"""
classid = str(uuid.uuid4().hex)
query = "INSERT INTO public.class(classid, generatorid, classname, isopen) VALUES (%s, %s, %s, %s) RETURNING classid"
stocked = (classid, generatorid, classname, '1')
row = self.execute(query, stocked)
self.commit()
return row
if __name__ == "__main__":
udb = scoreDB()
udb.return_all() | kojunseo/TTancent | web/database/score.py | score.py | py | 5,238 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "database.database.Databases",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "time.localtime",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "uuid.uuid4",
... |
29612486523 | from collections import deque
# recursive solution
""" runtime O(n), space O(n) """
def getHeight(root):
if not root:
return 0
return 1 + max(getHeight(root.left), getHeight(root.right))
# max height of the tree should be max height of stack
""" runtime O(n), space O(n) """
def getHeight_iter(root):
if not root:
return 0
max_h = 0
q = deque([])
q.append(root)
# perform a level order traversal
while q:
size = len(q)
for _ in range(size):
node = q.popleft()
if node.left:
q.append(node.left)
if node.right:
q.append(node.right)
max_h += 1
return max_h
| jungwook-lee/coding-practice | tree/getHeight.py | getHeight.py | py | 700 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.deque",
"line_number": 16,
"usage_type": "call"
}
] |
17701301018 | from cgi import print_environ
import torch
from collections import OrderedDict
from maml import MAML
def adaptation(model, optimizer, train_x,train_y, val_x,val_y, loss_fn, lr, train_step, train, device):
predictions = []
labels = []
epoch_loss = 0
# x_train, y_train = trainbatch #x_train テンソル化した画像、y_train ラベル(0 : crack , 1 : good)
#print("x_train.size(0) : ",x_train.size(0)) #バッチサイズ 2
for idx in range(train_x.size(0)):
weights = OrderedDict(model.named_parameters())
#print("weights : " , weights)
# k-shotでadaptation
for iter in range(train_step):
logits = model.adaptation(train_x, weights)
#print("logits1 : " , logits)
# print("input_y1 : " , input_y1)
#print("input_y1 : " , train_y)
#logits = model.adaptation(input_x, weights)
loss = loss_fn(logits, train_y)
#print("train_loss1" , loss)
# loss = loss_fn(logits, input_y)
gradients = torch.autograd.grad(loss, weights.values(), create_graph=train)
weights = OrderedDict((name, param - lr * grad) for ((name, param), grad) in zip(weights.items(), gradients))
# queryで評価
logits = model.adaptation(val_x, weights)
#print("logits2 : " , logits)
#print("val_y1 : " , val_y)
loss = loss_fn(logits, val_y)
#print("train_loss2" , loss)
if train:
model.train()
optimizer.zero_grad()
loss.backward(retain_graph=True)
#print("train_loss3" , loss)
optimizer.step()
#epoch_loss += loss.item() * val_x.size(0)
y_pred = logits.softmax(dim=1)
predictions.append(y_pred)
labels.append(train_y)
#print("labels : " , labels)
#print("predictions : " , predictions)
y_pred = torch.cat(predictions)
y_label = torch.cat(labels)
batch_acc = torch.eq(y_pred.argmax(dim=-1), y_label).sum().item() / y_pred.shape[0]
return loss, batch_acc
def test(model, test_x, test_y, val_x, val_y, loss_fn, lr, train_step, device):
# x_train, y_train = batch #x_train テンソル化した画像、y_train ラベル(0 : crack , 1 : good)
# x_val, y_val = batch
predictions = []
labels = []
for idx in range(test_x.size(0)):
weights = OrderedDict(model.named_parameters())
# k-shotでadaptation
for iter in range(train_step):
logits = model.adaptation(test_x, weights)
loss = loss_fn(logits, test_y)
gradients = torch.autograd.grad(loss, weights.values())
weights = OrderedDict((name, param - lr * grad) for ((name, param), grad) in zip(weights.items(), gradients))
# queryで評価
with torch.no_grad():
# input_x = x_val[idx].to(device)
# input_y = y_val[idx].to(device)
# logits = model.adaptation(input_x, weights)
# loss = loss_fn(logits, input_y)
logits = model.adaptation(val_x, weights)
loss = loss_fn(logits, val_y)
y_pred = logits.softmax(dim=1)
#print("y_pred : " , y_pred)
s, predicted = torch.max(y_pred, 1)
#score = s[0]
# print("score : ", score)
# score = score.to("cpu").detach().numpy().copy()
predictions.append(y_pred)
#print("predicted : " , predicted)
labels.append(test_y)
y_pred = torch.cat(predictions)
#print("y_pred" , y_pred)
y_label = torch.cat(labels)
batch_acc = torch.eq(y_pred.argmax(dim=-1), y_label).sum().item() / y_pred.shape[0]
#return loss, batch_acc ,predicted
return loss, batch_acc ,predicted | fukitani/MAML_AD | train.py | train.py | py | 3,833 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.OrderedDict",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "torch.autograd.grad",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "torch.autograd",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "colle... |
39556047469 | # This is the model that agent acts on.
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.nn.parameter import Parameter
import math
class DownsampleB(nn.Module):
def __init__(self, nIn, nOut, stride=2):
super(DownsampleB, self).__init__()
self.avg = nn.AvgPool2d(stride) # makes H and W smaller
def forward(self, x):
residual = self.avg(x)
return torch.cat((residual, residual * 0), 1) # pads with zeros for projection shortcut
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
# No projection: identity shortcut
class BasicBlock(nn.Module): # standard
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(in_planes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Sequential(nn.ReLU(True), conv3x3(planes, planes))
self.bn2 = nn.BatchNorm2d(planes)
def forward(self, x):
out = self.conv1(x)
out = self.bn1(out)
out = F.relu(out)
out = self.conv2(out)
y = self.bn2(out)
return y
class ResNet(nn.Module):
def __init__(self, block, layers, num_class=10): # layers means blocks per layer.
super(ResNet, self).__init__()
factor = 1
self.in_planes = int(32 * factor)
self.conv1 = conv3x3(3, int(32 * factor)) # first layer 3 channels.
self.bn1 = nn.BatchNorm2d(int(32 * factor))
self.relu = nn.ReLU(inplace=True)
strides = [2, 2, 2] # strides for the downsampling.
filt_sizes = [64, 128, 256] # 64 depth, then 128 then 256
self.blocks, self.ds = [], []
for idx, (filt_size, num_blocks, stride) in enumerate(zip(filt_sizes, layers, strides)): # create main blocks.
blocks, ds = self._make_layer(block, filt_size, num_blocks, stride=stride)
self.blocks.append(nn.ModuleList(blocks))
self.ds.append(ds)
self.blocks = nn.ModuleList(self.blocks)
self.ds = nn.ModuleList(self.ds)
self.bn2 = nn.Sequential(nn.BatchNorm2d(int(256 * factor)), nn.ReLU(True))
self.avgpool = nn.AdaptiveAvgPool2d(1) # we are just essentially resizing it i think smart.
self.linear = nn.Linear(int(256 * factor), num_class)
self.layer_config = layers
for m in self.modules(): # initialize values for everything.
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def seed(self, x):
x = self.bn1(self.conv1(x))
return x
def _make_layer(self, block, planes, blocks, stride=1):
downsample = nn.Sequential()
if stride != 1 or self.in_planes != planes * block.expansion: # more out layers than in?
downsample = DownsampleB(self.in_planes, planes * block.expansion, 2) # downsample it.
layers = [block(self.in_planes, planes, stride)]
self.in_planes = planes * block.expansion # reset the input planes.
for i in range(1, blocks):
layers.append(block(self.in_planes, planes)) # add the rest of the blocks.
return layers, downsample
def forward(self, x, policy=None): # go forward with the policy given by the agent.
t = 0 # Changed to work to delete blocks instead
x = self.seed(x)
if policy is not None:
for segment, num_blocks in enumerate(self.layer_config): # segment just i=0,i++, num_blocks is # per layer.
for b in range(num_blocks): # for each block
action = policy[:, t].contiguous() # makes the elements contiguous (exist as if it were made)
# as it is not have its meta information changed. Not having this might cause errors.
# from this we get 1's and zeros, as to whether to use fine tune or not.
# here we are just getting the t'th element, so what to do for this given layer.
action_mask = action.float().view(-1, 1, 1, 1) # 1 = use frozen, 0 = finetune.
# .float converts to float... it already is one. action_mask: tensor([[[[0.]]]], device='cuda:0')
residual = self.ds[segment](x) if b == 0 else x # downsample on first of layer.
output = self.blocks[segment][b](x) # run the block on x.
f1 = F.relu(residual + output) # Using the block
x = f1 * (action_mask) + residual*(1-action_mask) # decide which to use.
t += 1 # go for each layer
else:
for segment, num_blocks in enumerate(self.layer_config): # no policy
for b in range(num_blocks):
residual = self.ds[segment](x) if b == 0 else x # fine tune layers
output = self.blocks[segment][b](x)
x = F.relu(residual + output)
t += 1
x = self.bn2(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.linear(x)
return x
def resnet26(num_class=10, blocks=BasicBlock):
return ResNet(blocks, [4, 4, 4], num_class)
| chrisVat/GumDrop | network.py | network.py | py | 5,534 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "torch.nn.AvgPool2d",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line... |
3106439706 | import argparse
import json
import os
import re
import time
from simplex_sdk import SimplexClient
from car_rewrite_model.model import CarRewriteSynonymsReplace, CarRewriteBaseKeywordsNewProcess
# phrases_before_colon_file='/data/share/liuchang/car_rewirte_compare/remove_words'
# with open(phrases_before_colon_file, 'r', encoding='utf8') as f:
# phrases = f.readlines()
# phrases = [n.strip() for n in phrases]
# colon_phrase_pattern = r'\b(?:{})[ \t]*[::]'.format('|'.join(phrases))
def change_position_by_colon(txt,colon_phrase_pattern):
splited=re.split(colon_phrase_pattern,txt)
splited=[s for s in splited if s.strip()]
if len(splited)<2:
return txt
return ' '.join([splited[-1]]+splited[:-1])
def get_result(txt_list, domain_list, model,batch_size=32):
inputs = []
content_and_domain=zip(txt_list, domain_list)
for i, data in enumerate(content_and_domain):
txt,domain=data
d = {'id': i, 'content': txt.strip(), 'domain': domain}
inputs.append(d)
result = []
call_time = (len(inputs) + batch_size - 1) // batch_size
for i in range(call_time):
print('call num: {}'.format(i))
result.extend(model.predict(inputs[i * batch_size:(i + 1) * batch_size]))
return result
def synonyms_rewrite_model(**kwargs):
pos_model = '/data/share/liuchang/car_articles/pos.model'
phrases_before_colon = '/data/share/liuchang/car_rewirte_compare/remove_words'
forbidden_output_file='/data/share/liuchang/train_bert_lm/forbidden_output'
antonym_dict_file='/data/share/liuchang/train_bert_lm/merged_antonym_dict.json'
# pos_model = 'oss://modelzoo/dev/pos_model_for_car_rewrite/pos.model'
# phrases_before_colon = 'oss://modelzoo/dev/car_rewrites/phrases_before_colon'
model = CarRewriteSynonymsReplace(pos_model_path=pos_model, all_mask=False,
phrases_before_colon=phrases_before_colon,
antonym_dict_file=antonym_dict_file,
forbidden_output_file=forbidden_output_file,
**kwargs)
return model
def keywords_rewirte_model(**kwargs):
data_dir = '/data/share/liuchang/car_rewirte_compare/keywords'
vocab_path = os.path.join(data_dir, 'vocab.txt')
keywords_path = os.path.join(data_dir, 'keywords.txt')
idf_json_file = os.path.join(data_dir, 'idf_scores_dict.json')
high_freq_token_file = os.path.join(data_dir, 'high_freq_words.txt')
model = CarRewriteBaseKeywordsNewProcess(vocab_path=vocab_path,
keywords_path=keywords_path,
idf_json_path=idf_json_file,
high_freq_token_path=high_freq_token_file,
**kwargs)
return model
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("model",
choices=["keywords", "synonym", "mass", 'mask'],
help="Run type.")
parser.add_argument("--change_position",
default=True)
parser.add_argument("--input",
default='/data/share/liuchang/car_rewirte_compare/synonyms/local_test/raw_10_80_p3_p10')
args = parser.parse_args()
print(args)
input_file = args.input
# input_file = '/data/share/liuchang/car_rewirte_compare/laosiji_examples_5000_txt'
with open(input_file, 'r', encoding='utf8') as f:
txts = f.readlines()
txts = [n.strip() for n in txts]
domain_file = '/data/share/liuchang/car_rewirte_compare/domain_5000'
with open(domain_file, 'r', encoding='utf8') as f:
domain_list = f.readlines()
domain_list = [n.strip() for n in domain_list]
domain_list=domain_list+['']*(len(txts)-len(domain_list))
# DATA_SIZE = 3
# STRS=['驾驶性能超好,还真别说,会开车的,开着瑞虎7操控感觉肯定也不错,高速路行驶:不费劲。']*DATA_SIZE
# txts = txts[:10]
name_to_model = {
'keywords': keywords_rewirte_model,
'synonym': synonyms_rewrite_model
}
model = name_to_model[args.model](change_position=args.change_position,
w2v_path='/data/share/liuchang/car_articles/w2v/20epoch_cut_spm/w2v.model')
# model=SimplexClient('CarRewriteBaseKeywords')
batch_size = max(64,len(txts)//100)
result = get_result(txts, domain_list, model, batch_size)
out_dir = '/data/share/liuchang/car_rewirte_compare/synonyms/local_test'
for d in result:
for word in d['rewrite_tokens']:
d['rewrite_tokens'][word]=','.join(d['rewrite_tokens'][word])
json_str = json.dumps(result, ensure_ascii=False, indent=2)
# print(json_str)
suffix = 'w2v_rewrites'
basename=os.path.basename(input_file)
# time_str = time.strftime("%y-%m-%d-%H-%M-%S")
# json_output=os.path.join(out_dir, 'rewrites_{}.json'.format(time_str))
json_output = os.path.join(out_dir,basename + '_{}.json'.format(suffix))
with open(json_output, 'w', encoding='utf8') as f:
f.write(json_str)
# compare_output = os.path.join(out_dir, 'rewrites_compare_{}'.format(time_str))
compare_output = os.path.join(out_dir,basename + '_{}'.format(suffix))
with open(compare_output, 'w',
encoding='utf8') as f:
for raw, n in zip(txts, result):
f.write('content: ' + raw + '\n')
f.write('rewrite: ' + n['rewrite_content'] + '\n\n')
| flyliu2017/car_rewrite_model | tests/local_predict.py | local_predict.py | py | 5,639 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "re.split",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "car_rewrite_model.model.CarRewriteSynonymsReplace",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 66,
"usage_type": "call"
},
{
"api_name"... |
36631030239 | import argparse
from pathlib import Path
import numpy as np
from localisation import run_convergence_localisation_on_file, run_localisation_on_file
import Models.models as models
import Utils.constants as const
from plotting import parameter_plot, comparison_plot, plot_evaluation_metric
def rmse(predictions: dict):
"""
Returns the mean root mean square error for each algorithm
"""
rmse_results = {}
for algorithm, predictions in predictions.items():
values = [np.linalg.norm(actual - prediction)
** 2 for actual, prediction in predictions]
rmse_results[algorithm] = np.sqrt(np.mean(values))
return rmse_results
def mae(predictions: dict):
"""
Returns the Mean Absolute error for each algorithm
"""
mae_results = {}
for algorithm, predictions in predictions.items():
values = [np.linalg.norm(actual - prediction)
for actual, prediction in predictions]
mae_results[algorithm] = np.sqrt(np.mean(values))
return mae_results
def mae_confidence_interval(predictions: dict):
"""
calculates 95% confidence interval
"""
mae_conf_results = {}
samples = len(predictions)
for algorithm, predictions in predictions.items():
dist = [np.linalg.norm(actual - prediction)
for actual, prediction in predictions]
mae = np.mean(dist)
std = np.std(dist)
confidence_interval = 1.960 * std / np.sqrt(len(dist))
mae_conf_results[algorithm] = np.round([mae, confidence_interval], 2)
return mae_conf_results
def std(predictions: dict):
std_results = {}
samples = len(predictions)
for algorithm, predictions in predictions.items():
dist = [np.linalg.norm(actual - prediction)
for actual, prediction in predictions]
std_results[algorithm] = np.round(np.std(dist), 2)
return std_results
def rmse_confidence_interval(predictions:dict):
"""
calculates 95% confidence interval
"""
mae_conf_results = {}
samples = len(predictions)
for algorithm, predictions in predictions.items():
dist = [np.linalg.norm(actual - prediction) **
2 for actual, prediction in predictions]
rmse = np.sqrt(np.mean(dist))
std = np.std(dist)
confidence_interval = 1.960 * std / np.sqrt(len(dist))
mae_conf_results[algorithm] = np.round([rmse, confidence_interval], 2)
return mae_conf_results
def initialise_localisation_model(model: const.Model, training_data_filepath: Path, filter: bool = False, prior: const.Prior = None):
if model is const.Model.GAUSSIAN:
return models.GaussianProcessModel(training_data_filepath, prior=prior, cell_size=0.25, filter=filter)
elif model is const.Model.GAUSSIANKNN:
return models.GaussianKNNModel(training_data_filepath, prior=prior, cell_size=0.25, filter=filter)
elif model is const.Model.GAUSSIANMINMAX:
return models.GaussianMinMaxModel(training_data_filepath, prior=prior, cell_size=0.25, filter=filter)
elif model is const.Model.KNN:
return models.KNN(training_data_filepath,)
elif model is const.Model.WKNN:
return models.WKNN(training_data_filepath, filter=filter)
elif model is const.Model.PROPOGATION:
return models.PropagationModel(training_data_filepath, const.PROPAGATION_CONSTANT, filter=filter)
elif model is const.Model.PROXIMITY:
return models.ProximityModel(training_data_filepath)
else:
raise ValueError("Not a supported model")
def get_localisation_predictions(models: list, training_data_filepath: Path, evaluation_data_filepath: Path,filtering:bool, prior: const.Prior):
models = {model.value: initialise_localisation_model(
model, training_data_filepath, filtering, prior) for model in models}
if prior is const.Prior.LOCAL:
predictions = {name: run_convergence_localisation_on_file(
evaluation_data_filepath, model, filtering) for name, model in models.items()}
elif prior is const.Prior.UNIFORM:
predictions = {name: run_localisation_on_file(
evaluation_data_filepath, model, filtering) for name, model in models.items()}
predictions = dict(sorted(predictions.items(), key=lambda x: x[0]))
return predictions
def run_filter_comparison(models: list, training_data_filepath: Path, evaluation_data_filepath: Path, prior: const.Prior):
filtered = get_localisation_predictions(
models, training_data_filepath, evaluation_data_filepath, True, prior)
non_filtered = get_localisation_predictions(
models, training_data_filepath, evaluation_data_filepath, False, prior)
return {"Filtered": filtered, "Non-Filtered": non_filtered}
def predict_all_models(training_data_filepath: Path, evaluation_data_filepath: Path, prior: const.Prior, filtering: bool):
"""Gets position predictions for all the evaluation data
"""
models = [model for model in const.Model]
return get_localisation_predictions(models, training_data_filepath, evaluation_data_filepath, prior, filtering)
def filter_all_models_plot(training_data_filepath: Path, evaluation_data_filepath: Path):
filtered_predictions = predict_all_models(
training_data_filepath, evaluation_data_filepath, True, const.Prior.UNIFORM)
non_filtered_predictions = predict_all_models(
training_data_filepath, evaluation_data_filepath, False, const.Prior.UNIFORM)
filtered_results = mae_confidence_interval(filtered_predictions)
non_filtered_results = mae_confidence_interval(non_filtered_predictions)
print("algorithm : filtered : non_filtered")
for i, algorithm in enumerate(filtered_results.keys()):
print(
f"{algorithm:15}: {filtered_results[algorithm][0]:5} ± {filtered_results[algorithm][1]:5} : {non_filtered_results[algorithm][0]:5} ± {non_filtered_results[algorithm][1]:5}")
comparison_plot(filtered_results, non_filtered_results, "filter")
def prior_all_models_plot(training_data_filepath: Path, evaluation_data_filepath: Path):
uniform_predictions = predict_all_models(
training_data_filepath, evaluation_data_filepath, True, const.Prior.UNIFORM)
local_predictions = predict_all_models(
training_data_filepath, evaluation_data_filepath, True, const.Prior.LOCAL)
uniform_results = mae_confidence_interval(uniform_predictions)
local_results = mae_confidence_interval(local_predictions)
print("algorithm : uniform : local")
for i, algorithm in enumerate(uniform_results.keys()):
print(
f"{algorithm:15}: {uniform_results[algorithm][0]:5} ± {uniform_results[algorithm][1]:5} : {local_results[algorithm][0]:5} ± {local_results[algorithm][1]:5}")
comparison_plot(local_results, uniform_results, "prior")
def evaluation_metric_plot(training_data_filepath: Path, evaluation_data_filepath: Path):
"""
Creates the evaluation metric table/plots for rmse and mae
Filtering is set to true and a uniform prior is used
"""
predictions = predict_all_models(
training_data_filepath, evaluation_data_filepath, True, const.Prior.UNIFORM)
mae = mae_confidence_interval(predictions)
rmse = rmse_confidence_interval(predictions)
stds = std(predictions)
print("algorithm : mae : std : rmse ")
for i, algorithm in enumerate(predictions.keys()):
print(
f"{algorithm:15}: {mae[algorithm][0]:5} ± {mae[algorithm][1]:5} : {stds[algorithm]:5} : {rmse[algorithm][0]:5} ± {rmse[algorithm][1]:5}")
plot_evaluation_metric(mae, "mae")
plot_evaluation_metric(rmse, "rmse")
def cellsize_plot(training_data_filepath: Path, evaluation_data_filepath: Path):
""" Creates the plot which compares the evaluation metric mean average error against different cell sizes
"""
cell_sizes = (2 ** np.arange(0, 6)) / 4
cell_sizes = np.arange(0.2, 4, step=0.1)
gaussian_models = {size: models.GaussianProcessModel(
training_data_filepath, prior=const.Prior.UNIFORM, cell_size=size, filter=True) for size in cell_sizes}
gaussian_predictions = {name: run_localisation_on_file(
evaluation_data_filepath, model, False) for name, model in gaussian_models.items()}
gaussian_mae = mae_confidence_interval(gaussian_predictions)
# plot_evaluation_metric(gaussian_mae,"mae,cell_size")
print("cell_size : mae ")
for i, cell_size in enumerate(gaussian_predictions.keys()):
print(
f"{round(cell_size,2):15}: {gaussian_mae[cell_size][0]:5} ± {gaussian_mae[cell_size][1]:5}")
parameter_plot(gaussian_mae,"cell_size")
def wk_comparison(training_data_filepath: Path, evaluation_data_filepath: Path):
wknn_models = {k: models.WKNN(training_data_filepath, filter=True,k=k) for k in range(1,10)}
predictions = {name: run_localisation_on_file(
evaluation_data_filepath, model, False) for name, model in wknn_models.items()}
mae = mae_confidence_interval(predictions)
print("K : mae ")
for i, k in enumerate(predictions.keys()):
print(
f"{k:5}: {mae[k][0]:5} ± {mae[k][1]:5}")
parameter_plot(mae,"k")
def main():
parser = argparse.ArgumentParser()
parser.add_argument("mode", help="Evaluation Wanted")
parser.add_argument(
"training_file", help="The file with the training data in it.")
parser.add_argument("evaluation_file",
help="The file with the evaluation data in it.")
args = parser.parse_args()
modes = ["eval", "all", "prior", "filter", "cell_size","wk"]
if args.mode not in modes:
print("Mode should be in " + ",".join(modes))
else:
training_filepath = Path(args.training_file)
evaluation_filepath = Path(args.evaluation_file)
if args.mode == "eval":
evaluation_metric_plot(training_filepath, evaluation_filepath)
if args.mode == "all":
evaluation_metric_plot(training_filepath, evaluation_filepath)
prior_all_models_plot(training_filepath, evaluation_filepath)
filter_all_models_plot(training_filepath, evaluation_filepath)
elif args.mode == "prior":
prior_all_models_plot(training_filepath, evaluation_filepath)
elif args.mode == "filter":
filter_all_models_plot(training_filepath, evaluation_filepath)
elif args.mode == "cell_size":
cellsize_plot(training_filepath, evaluation_filepath)
elif args.mode == "wk":
wk_comparison(training_filepath, evaluation_filepath)
if __name__ == "__main__":
main()
| TechTurtle11/ble-geolocation | src/evaluate.py | evaluate.py | py | 10,742 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.linalg.norm",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "numpy.sqrt",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line... |
30072780652 | # -*- coding: utf-8 -*-
""" Download excel files and transform to correct format in csv files. """
""" Excel files are linked in href attribute of <a> elements in the given URL (Not nested URLs)"""
""" Each station, in stations array, is linked to a numerical code in this file"""
""" Longitude and latitude and location (as descriptive name) are added to each row of each station"""
""" Greek names for date and weekday are translated"""
# Code: thess_env_cityofthess_dailyyearly
# Code with numbering: thess_env_cityofthess_dailyyearly_1, thess_env_cityofthess_dailyyearly_2, thess_env_cityofthess_dailyyearly_3, thess_env_cityofthess_dailyyearly_4, thess_env_cityofthess_dailyyearly_5, thess_env_cityofthess_dailyyearly_6
# Stations (latitude, longitude):
# Egnatia (Στ. ΕΓΝΑΤΙΑΣ): Egnatia and I. Dragoumi (1st Municipal District) (40.63753, 22.94095): thess_env_cityofthess_dailyyearly_1
# Martiou (Στ. 25ης ΜΑΡΤΙΟΥ): 25 March and Karakasi (5th Municipal District) (40.60102, 22.96017): thess_env_cityofthess_dailyyearly_2
# Lagada (Στ. ΛΑΓΚΑΔΑ): Lagada and Koutifari (2nd Municipal District) (40.65233, 22.93514): thess_env_cityofthess_dailyyearly_3
# Eptapyrgio (Στ. ΕΠΤΑΠΥΡΓΙΟΥ): Agia Anastasia and Agrafon (3rd Diamersima) (40.64407, 22.95837): thess_env_cityofthess_dailyyearly_4
# Malakopi (Toumba) (Στ. ΜΑΛΑΚΟΠΗΣ): Harisio Girokomio (Dimitrios Charisis) (4th Diamersima) (40.61637, 22.98233): thess_env_cityofthess_dailyyearly_5
# Dimarxeio (Μτ.Στ. ΔΩΜΑ ΠΑΛ. ΔΗΜΑΡ.): King's George A (1st Diamersima) (40.62381, 22.95312): thess_env_cityofthess_dailyyearly_6
# NO, NO2, O3, PM10, PM2.5, CO, SO2
# μg/m3,μg/m3,μg/m3,μg/m3,μg/m3,mg/m3,μg/m3
from bs4 import BeautifulSoup
from urllib.request import urlopen, urlretrieve
import time
import os
from collections import deque
import pandas as pd
import shutil
import uuid
from kafka import KafkaProducer
from kafka.errors import KafkaError
import logging
__author__ = "Marta Cortes"
__mail__ = "marta.cortes@oulu.fi"
__origin__ = "UbiComp - University of Oulu"
log_file_path = '../../logger.log'
logging.basicConfig(filename=log_file_path, filemode='a', level=logging.INFO,
format='[%(asctime)s]%(name)s %(process)s {%(pathname)s:%(lineno)d} %(levelname)s - %(message)s',
datefmt='%H:%M:%S', )
code = 'thess_env_cityofthess_dailyyearly'
stations = {'Στ. ΕΓΝΑΤΙΑΣ': [40.63753, 22.94095], 'Στ. 25ης ΜΑΡΤΙΟΥ': [40.60102, 22.96017],
'Στ. ΛΑΓΚΑΔΑ': [40.65233, 22.93514], 'Στ. ΕΠΤΑΠΥΡΓΙΟΥ': [40.64407, 22.95837],
'Στ. ΜΑΛΑΚΟΠΗΣ': [40.61637, 22.98233], 'Μτ.Στ. ΔΩΜΑ ΠΑΛ. ΔΗΜΑΡ.': [40.62381, 22.95312]}
names = {'Στ. ΕΓΝΑΤΙΑΣ': 'Egnatia', 'Στ. 25ης ΜΑΡΤΙΟΥ': 'Martiou', 'Στ. ΛΑΓΚΑΔΑ': 'Lagada',
'Στ. ΕΠΤΑΠΥΡΓΙΟΥ': 'Eptapyrgio', 'Στ. ΜΑΛΑΚΟΠΗΣ': 'Malakopi', 'Μτ.Στ. ΔΩΜΑ ΠΑΛ. ΔΗΜΑΡ.': 'Dimarxeio'}
origin_url = 'https://opendata.thessaloniki.gr/el/dataset/%CE%BC%CE%B5%CF%84%CF%81%CE%AE%CF%83%CE%B5%CE%B9%CF%82-%CE%B4%CE%B7%CE%BC%CE%BF%CF%84%CE%B9%CE%BA%CE%BF%CF%8D-%CE%B4%CE%B9%CE%BA%CF%84%CF%8D%CE%BF%CF%85-%CF%83%CF%84%CE%B1%CE%B8%CE%BC%CF%8E%CE%BD-%CE%B5%CE%BB%CE%AD%CE%B3%CF%87%CE%BF%CF%85-%CE%B1%CF%84%CE%BC%CE%BF%CF%83%CF%86%CE%B1%CE%B9%CF%81%CE%B9%CE%BA%CE%AE%CF%82-%CF%81%CF%8D%CF%80%CE%B1%CE%BD%CF%83%CE%B7%CF%82-%CF%84%CE%BF%CF%85-%CE%B4%CE%AE%CE%BC%CE%BF%CF%85-%CE%B8%CE%B5%CF%83%CF%83%CE%B1%CE%BB%CE%BF%CE%BD%CE%AF%CE%BA%CE%B7%CF%82'
l_temp_path = '/home/oulu/THESS/data/environmental/temp/thess_env_cityofthess_dailyyearly/'
l_final_path = '/home/oulu/THESS/data/environmental/thess_env_cityofthess_dailyyearly/'
WINDOW_SIZE = "1920,1080"
class thess_env_cityofthess_dailyyearly(object):
def __init__(self, url):
self.url = url
self.xlfnames = []
self.url_queue = deque([]) # doble-ended queu
self.folder = l_temp_path
def get_page(self, url):
""" Downloiad the page at given URL"""
""" @param url: Url we want to crawl"""
""" @type url: String """
"""@return the page"""
try:
u = urlopen(url)
html = u.read().decode('utf-8')
# except Exception as e:
# logging.exception(e)
finally:
print("Closing")
u.close()
return html
def get_soup(self, html):
"""Returns the BeautifulSoup object of the given page"""
if html is not None:
soup = BeautifulSoup(html, "html.parser")
return soup
else:
return
def get_links(self, soup):
"""Get the links of interest from the given Beuti"""
""" @param soup: BeautifulSoup object that cointains the targeted links """
""" @type soup: BeautifulSoup object """
for link in soup.select('a[href^="https://"]'): # All links which have a href element
href = link.get('href') # The actually href element of the link
if not any(href.endswith(x) for x in ['.csv', '.xls', '.xlsx']):
print("No excel")
continue
if not href in self.url_queue:
self.url_queue.append(href) # Add the URL to our queue
def get_files(self):
"""Create a temp folder to download"""
# self.folder= +str(int(time.time()))
if not os.path.exists(self.folder):
os.mkdir(self.folder)
while len(self.url_queue): # If we have URLs to crawl - we crawl
href = self.url_queue.popleft() # We grab a URL from the left of the list
filename = href.rsplit('/', 1)[-1]
print("Downloading %s to %s..." % (href, filename))
fullname = os.path.join(self.folder, filename)
urlretrieve(href, fullname)
self.xlfnames.append(filename)
def run_downloader(self):
"""downloads the htmlpage and looks for the links with excel files"""
"""calls to the file downloader"""
try:
html = self.get_page(self.url)
soup = self.get_soup(html)
if soup is not None: # If we have soup -
self.get_links(soup)
self.get_files()
else:
self.producer("THESS_ENV_CITYOFTHESS_DAILY_YEARLY_DATA_ERROR", 'data source format is not as expected',
e)
return False
except Exception as e:
self.producer("THESS_ENV_CITYOFTHESS_DAILY_YEARLY_DATA_ERROR", 'data source format is not as expected', e)
return False
return True
def parse_sheet_to_df(self, xl, sheet, df):
""" @param xl: excel file object """
""" @type xl: dataframe """
""" @param sheet: sheet object """
""" @type sheet: dataframe """
if sheet in stations.keys():
# Create dataframe. Note, put this out of the loop to write all the sheets in same csv file
# df = pd.DataFrame()
# print(sheet.encode('utf-8'))
df_tmp = xl.parse(sheet)
# Clean the data
# replace return, remove units
df_tmp.columns = df_tmp.columns.str.replace('\n', ' ').str.strip(' μg/m3').str.strip(' mg/m3')
# select the columns of interest
df_tmp = df_tmp.filter(regex='(NO|NO2|O3|PM10|PM2,5|CO|SO2|Ημερο - μηνία|Ημέρα)')
# df_tmp.columns = df_tmp.columns.str.strip(' μg/m3').str.strip(' mg/m3')
# correct format of information
df_tmp['Ημέρα'] = df_tmp['Ημέρα'].dt.day_name()
df_tmp['Latitude'] = stations[sheet][0]
df_tmp['Longitude'] = stations[sheet][1]
df_tmp['Location'] = names[sheet]
# renaming fields in greek
df_tmp.rename(columns={'Ημερο - μηνία': 'Date', 'Ημέρα': 'Weekday'}, inplace=True)
df_tmp.rename(columns={'NO': 'air_pollution_NO', 'NO2': 'air_pollution_NO2', 'O3': 'air_pollution_O3',
'PM10': 'air_pollution_PM10', 'PM2,5': 'air_pollution_PM25',
'CO': 'air_pollution_CO', 'SO2': 'air_pollution_SO2'}, inplace=True)
return df_tmp
def write_to_file(self, df):
try:
# Directory name bydata/ code/codenumber
if not os.path.exists(l_final_path):
os.mkdir(l_final_path)
outerdir = l_final_path + code
if not os.path.exists(outerdir):
os.mkdir(outerdir)
# CODE FOR JUST ONE FOLDER
outdir = outerdir
# CODE FOR MANY FOLDER
# outdir = outerdir+'/'+code+'_'+str(list(stations).index(sheet)+1)
if not os.path.exists(outdir):
os.mkdir(outdir)
except Exception as e:
self.producer("THESS_ENV_CITYOFTHESS_DAILY_YEARLY_DATA_ERROR", 'cannot create folder/file to store data', e)
return False
try:
# Write to the csv file.
# Note, put this out of the loop to write all the sheets in same csv file
csvfile = str(uuid.uuid4()) + ".csv" # sheet+'.csv'
fullname = os.path.join(outdir, csvfile)
df.to_csv(fullname, mode='a', encoding='utf-8-sig', index=False) # mode a is append
except Exception as e:
# logging.exception('exception happened')
self.producer("THESS_ENV_CITYOFTHESS_DAILY_YEARLY_DATA_ERROR", 'cannot store data in file')
return False
return True
def parse_files(self):
""" calls parse_sheet to each sheet in the given file """
""" @param name: name of the file """
""" @type name: string """
df = pd.DataFrame()
if not self.xlfnames:
self.producer("THESS_ENV_CITYOFTHESS_DAILY_YEARLY_DATA_ERROR", 'data source not found or cannot be open')
logging.error('error happened: no excel files found')
return False
for fileName in self.xlfnames:
try:
xlfname = self.folder + '/' + fileName #
xl = pd.ExcelFile(xlfname)
except Exception as e:
self.producer("THESS_ENV_CITYOFTHESS_DAILY_YEARLY_DATA_ERROR",
'data source not found or cannot be open', e)
return False
try:
# code for one file per sheet
# for sheet in xl.sheet_names:
# self.parse_sheet(xl,sheet)
# code for one file for all
for sheet in xl.sheet_names:
df_tmp = self.parse_sheet_to_df(xl, sheet, df)
df = df.append(df_tmp, ignore_index=True)
except Exception as e:
self.producer("THESS_ENV_CITYOFTHESS_DAILY_YEARLY_DATA_ERROR", 'data source format is not as expected',
e)
return False
return self.write_to_file(df)
def producer(self, topic, msg, e=None):
""" This function sends data to kafka bus"""
producer = KafkaProducer(bootstrap_servers=['HOST_IP', 'HOST_IP', 'HOST_IP']
,api_version=(2, 2, 1),security_protocol='SSL',
ssl_check_hostname=True,
ssl_cafile='/home/oulu/certs/ca-cert',
ssl_certfile='/home/oulu/certs/cutler-p3-c1-00.crt',
ssl_keyfile='/home/oulu/certs/cutler-p3-c1-00.key')
msg_b = str.encode(msg)
producer.send(topic, msg_b).get(timeout=30)
if (e):
logging.exception('exception happened')
if __name__ == '__main__':
a = thess_env_cityofthess_dailyyearly(origin_url)
if (a.run_downloader()):
if (a.parse_files()):
a.producer("THESS_ENV_CITYOFTHESS_DAILY_YEARLY_DATA_INGESTION",
'City of Thessaloniki environmental data ingested to HDFS')
| CUTLER-H2020/DataCrawlers | Environmental/thess_env_cityofthess_dailyyearly.py | thess_env_cityofthess_dailyyearly.py | py | 12,139 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "logging.basicConfig",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "collections.deque",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "urllib.reques... |
15860824543 | import logging
import scrapy
from crawler.items import MusicItem, ArtistItem
from crawler.spiders.base import BaseSpider
class ZkSpider(BaseSpider):
name = 'zk'
allowed_domains = ['zk.fm']
handle_httpstatus_list = [304, 404]
base_url = 'https://zk.fm'
count_page = 10 ** 6
def start_requests(self):
if self.test_mode:
self.count_page = 5
for n in range(1, self.count_page):
self.gc_clear()
yield scrapy.Request(
url=self.get_url(f'/artist/{n}'),
errback=self.error,
callback=self.parse,
)
def parse(self, response):
if response.status in [404]:
logging.error('Error: 404')
return
title_selector = response.css('#container .title_box h1::text').extract_first()
if not title_selector:
return
title = title_selector.rstrip().strip()
if not title:
logging.error('Error: encode')
return
self.monitor.update_artist_count()
yield ArtistItem(name=title)
response.meta['artist_name'] = title
yield from self.get_items(response)
def get_items(self, response):
artist_name = response.meta['artist_name']
items, items_urls = [], []
for item in response.css('#container .song'):
try:
song_info = {
'name': item.css('div.song-name a span::text').extract_first().strip(),
'url': self.get_url(item.css('span.song-download').xpath('@data-url').extract_first()),
'artist': artist_name
}
if not song_info['name']:
continue
if song_info['url'] not in items_urls:
items_urls.append(song_info['url'])
items.append(song_info)
except AttributeError:
continue
for song_dict in items:
self.monitor.update_song_count()
yield MusicItem(song_dict)
next_page = response.css('a.next-btn')
if next_page and 'disabled' not in next_page.xpath('@class').extract_first():
url = self.get_url(next_page.xpath('@href').extract_first())
yield scrapy.Request(
url,
meta={
'dont_merge_cookie': True,
'artist_name': artist_name,
},
errback=self.error,
callback=self.get_items,
)
| Arthur264/music_data | code/crawler/spiders/zk.py | zk.py | py | 2,581 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "crawler.spiders.base.BaseSpider",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "scrapy.Request",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "logging.error",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "logging.e... |
31313726066 | import json
import os
from dataclasses import dataclass
from typing import Any, List
import fire
import tinytuya
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SNAPSHOTFILE = '%s/setup/snapshot.json' % ROOT_DIR
rainbow = {
'red': [255, 0, 0],
'orange': [255, 127, 0],
'yellow': [255, 200, 0],
'green': [0, 255, 0],
'blue': [0, 0, 255],
'indigo': [46, 43, 95],
'violet': [139, 0, 255],
'turquoise': [48, 213, 200],
'magenta': [255, 0, 255],
'white': [255, 255, 176],
}
@dataclass
class Device:
ip: str
gwId: str
active: int
version: str
name: str
key: str
id: str
ver: str
@staticmethod
def from_dict(obj: Any) -> 'Device':
_ip = str(obj.get('ip'))
_gwId = str(obj.get('gwId'))
_active = int(obj.get('active'))
_version = str(obj.get('version'))
_name = str(obj.get('name'))
_key = str(obj.get('key'))
_id = str(obj.get('id'))
_ver = str(obj.get('ver'))
return Device(
_ip,
_gwId,
_active,
_version,
_name,
_key,
_id,
_ver,
)
class CLI(object):
def __init__(self):
self.setups = read_devices_file(SNAPSHOTFILE)
def _get_device(self, device_name: str):
setup = filter_setup(self.setups, device_name)
device = tinytuya.BulbDevice(setup.id, setup.ip, setup.key)
device.set_version(3.3)
return device
def status(self, device):
data = device.status()
return data
def show_status(self, device_name):
device = self._get_device(device_name)
print(f'color: {device.colour_rgb()}')
return device.status()
def switch(self, device_name: str):
device = self._get_device(device_name)
status = self.status(device).get('dps').get('20')
resp = device.set_status(on=not status, switch=20)
return resp
def color(self, device_name: str, color_name: str):
device = self._get_device(device_name)
rgb = rainbow.get(color_name)
resp = device.set_colour(rgb[0], rgb[1], rgb[2])
return resp
def read_devices_file(file_name: str) -> List[Device]:
with open(file_name) as file:
devices = []
snapshot = json.load(file)
for dict_device in snapshot.get('devices'):
devices.append(Device.from_dict(dict_device))
return devices
def filter_setup(setup_devices: List[Device], device_name: str) -> Device:
device = [item for item in setup_devices if item.name == device_name]
if len(device) == 0:
raise Exception('Setup not found!')
return device[0]
def main():
fire.Fire(CLI)
if __name__ == '__main__':
main()
| paraizofelipe/py_iot | py_iot/local.py | local.py | py | 2,821 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.dirname",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "typing.Any",
"line_numb... |
70990945383 | import torch
import torch.nn.functional as F
from torch import nn
from torch import optim
from torch.distributions import Categorical
import numpy as np
import matplotlib.pyplot as plt
from statistics import stdev, mean
import multiprocessing
import gym
from model import Network
from utils import set_seed
def plot(x):
plt.plot(x)
plt.show()
def update(model, state_value, next_state_value, log_prob, reward):
# critic loss: td error
critic_loss = torch.tensor(reward) + next_state_value - state_value
# policy loss
policy_loss = -log_prob * critic_loss
model['optim'].zero_grad()
(policy_loss + critic_loss).backward(retain_graph=True)
model['optim'].step()
model['scheduler'].step()
def select_action(action_probs):
# multinomial over actions
m = Categorical(action_probs)
action = m.sample()
return action.item(), m.log_prob(action)
def train(env, model, n_episodes=200):
avg_returns = []
returns = []
for episode in range(1, n_episodes+1):
rewards = []
obs = env.reset()
obs = torch.from_numpy(obs).float().unsqueeze(0)
action_probs, state_value = model['net'](obs)
next_state_value = 0
while True:
action, log_prob = select_action(action_probs)
next_obs, reward, done, _ = env.step(action)
next_obs = torch.from_numpy(next_obs).float().unsqueeze(0)
action_probs, next_state_value = model['net'](next_obs)
update(model, state_value, next_state_value, log_prob, reward)
if done:
rewards.append(0)
break
obs = next_obs
state_value = next_state_value
rewards.append(reward)
avg_returns.append(sum(rewards))
if episode % 10 == 0:
print('Episode: {} - Episode Return: {} - Average Returns: {}'.format(
episode, sum(rewards), mean(avg_returns)
))
returns.append(mean(avg_returns))
plot(returns)
def main(environment='CartPole-v0', n_episodes=200):
env = gym.make('CartPole-v0')
set_seed(env, 0)
obs_shape = env.observation_space.shape
obs_dim = obs_shape[0]
n_actions = env.action_space.n
model = {}
model['net'] = Network(obs_dim, n_actions, hidden_dim=[128], n_layers=2)
optimizer = optim.Adam(model['net'].parameters(), lr=1e-3)
model['optim'] = optimizer
T_max = 400
eta_min = 1e-5
model['scheduler'] = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max, eta_min)
train(env, model, n_episodes)
torch.manual_seed(0)
if __name__ == '__main__':
main()
# # creating processes
# p1 = multiprocessing.Process(target=print_square, args=(10, ))
# p2 = multiprocessing.Process(target=print_cube, args=(10, ))
# # starting process 1
# p1.start()
# # starting process 2
# p2.start()
# # wait until process 1 is finished
# p1.join()
# # wait until process 2 is finished
# p2.join() | dylanamiller/actor_critic | actor_critic.py | actor_critic.py | py | 3,065 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "matpl... |
13431297301 | import cv2
import boto3
import numpy as np
from botocore.exceptions import NoCredentialsError
# AWS S3 접근 정보 설정
ACCESS_KEY = '보호처리'
SECRET_KEY = '보호처리'
BUCKET_NAME = '보호처리'
OBJECT_NAME = '보호처리'
# S3 클라이언트 초기화
s3 = boto3.client('s3', aws_access_key_id=ACCESS_KEY, aws_secret_access_key=SECRET_KEY)
try:
# S3 버킷에서 이미지 파일 다운로드
response = s3.get_object(Bucket=BUCKET_NAME, Key=OBJECT_NAME)
file_content = response['Body'].read()
# 바이트 데이터를 numpy 배열로 변환
np_array = np.frombuffer(file_content, np.uint8)
# OpenCV로 이미지 열기
img = cv2.imdecode(np_array, cv2.IMREAD_COLOR)
# 이미지 표시
cv2.imshow('Image', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
except NoCredentialsError:
print('AWS 자격 증명 정보가 올바르지 않습니다.')
| kmyobin/capstone_demo_web | image_download.py | image_download.py | py | 913 | python | ko | code | 0 | github-code | 36 | [
{
"api_name": "boto3.client",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.frombuffer",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "cv2.imdecode",
"li... |
14379387806 | import json
from typing import List
from django.forms import model_to_dict
from wagtail.contrib.routable_page.models import RoutablePageMixin
from wagtail.core.models import Page
from main.models import Thematic
from main.models.country import Country
from main.models.country import WorldZone
from main.models.models import Profile, ResourceType
from main.models.resource import Resource
class ResourcesPage(RoutablePageMixin, Page):
class Meta:
verbose_name = "Page des ressources"
verbose_name_plural = "Pages des ressources"
parent_page_types = ["main.HomePage"]
subpage_types: List[str] = []
max_count_per_parent = 1
def get_context(self, request, *args, **kwargs):
context = super().get_context(request, *args, **kwargs)
context["has_vue"] = True
context["profiles"] = json.dumps(
{
profile.slug: {"name": profile.name, "slug": profile.slug}
for profile in Profile.objects.all()
}
)
context["thematics"] = json.dumps(
[thematic.to_dict() for thematic in Thematic.objects.all()]
)
context["resource_types"] = json.dumps(
[model_to_dict(type_) for type_ in ResourceType.objects.all()]
)
context["zones"] = json.dumps(
[zone.to_dict() for zone in WorldZone.objects.all()]
)
context["selected_profile"] = request.GET.get("profile", "")
context["resources"] = json.dumps(
[ressource.to_dict() for ressource in Resource.objects.all()]
)
context["countries"] = json.dumps(
[country.to_dict() for country in Country.objects.all()]
)
context["resource_types_per_profile"] = json.dumps(
{
profile.slug: [type_.slug for type_ in profile.types.all()]
for profile in Profile.objects.all()
}
)
return context
| TelesCoop/geodev | main/models/resources_page.py | resources_page.py | py | 1,959 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "wagtail.contrib.routable_page.models.RoutablePageMixin",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "wagtail.core.models.Page",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 21,
"usage_type": "name"
}... |
72774363945 | # This modules reads data from already collected data from APIF and transfer it to our DB hosted with
# MongoDB
import json
from DataBaseObjects.DataBaseObjects import FixtureResult
from pymongo import MongoClient
for season in range(2011, 2020):
# Load file (Static process for now)
file = open(f"C:\\Users\\rferreira\\Documents\\GitHub\\11Sixteen\\API-test\\League 39 - {season}", "r")
datastr = file.readline()
raw_data = json.loads(datastr)
file.close()
number_of_results = raw_data['results'] # Determine the number of records to call
client = MongoClient('mongodb+srv://RubenFerreira:TPVXAliOZt3OqFpk@11sixteen.zzyri.mongodb.net/test?')
db = client["football_data"]
db_collection = db["football_results"] # Open the database
for result in range(number_of_results): # Call each document
current_result = FixtureResult(raw_data['response'][result])
document_to_insert = {'competition': current_result.competition_info,
'event': current_result.event_info,
'teams': current_result.team_info,
'results': current_result.result_info}
json.dumps(document_to_insert)
inserted_id = db_collection.insert_one(document_to_insert).inserted_id
print(f"Document {inserted_id} has been inserted")
client.close()
| SigmaFireFox/SigmaFox | apps/eleven10ths/src/app/11sixteen-desktop-app/DatabaseBuilding/API-Football.com-FixtureResults-TxtToDB.py | API-Football.com-FixtureResults-TxtToDB.py | py | 1,380 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "json.loads",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pymongo.MongoClient",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "DataBaseObjects.DataBaseObjects.FixtureResult",
"line_number": 21,
"usage_type": "call"
},
{
"api_... |
12366554892 | # -*- coding: utf-8 -*-
#
# AMPLE documentation build configuration file, created by
# sphinx-quickstart on Thu May 26 11:57:09 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# Make AMPLE believe we are running it as part of CCP4
os.environ['CCP4'] = os.environ['CCP4_SCR'] = "."
# Required by autosummary
sys.path.insert(0, os.path.abspath(".")) # for sphinxext directory
sys.path.insert(0, os.path.abspath("..")) # for ample directory
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.6.2'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosectionlabel',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
'sphinxext.math_symbol_table',
'sphinxarg.ext',
]
try:
import sphinx_bootstrap_theme
except ImportError:
msg = "Error: sphinx_bootstrap_thememust be installed before generating this documentation\n" + \
"This can be installed with the command: {} -m pip install sphinx_bootstrap_theme".format(sys.executable)
raise ImportError(msg)
try:
import sphinxarg
except ImportError:
msg = "Error: sphinxarg be installed before generating this documentation\n" + \
"This can be installed with the command: {} -m pip install sphinx-argparse".format(sys.executable)
raise ImportError(msg)
try:
import ample.util.version
except ImportError:
msg = "Error: AMPLE must be installed before generating its documentation"
sys.exit(msg)
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
source_suffix = ['.rst', '.md']
# The encoding of source files.
source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'AMPLE'
copyright = u'2016-2019, University of Liverpool'
author = u'Jens Thomas, Felix Simkovic, Adam Simpkin, Ronan Keegan & Daniel Rigden'
# The short X.Y version.
version = ample.util.version.__version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'README', '**.ipynb_checkpoints']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# If true, create autosummary automatically
#autosummary_generate = True
#autodoc_docstring_signature = True
# If set, mock the import system to have external dependencies
autodoc_mock_imports = [
'Bio',
'conkit',
'iotbx',
'matplotlib',
'mmtbx.superpose'
'numpy',
'pandas',
'parse_arpwarp',
'parse_buccaneer',
'parse_phaser',
'parse_shelxe',
'phaser',
'pyjob',
]
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'bootstrap'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
# Tab name for entire site.
'navbar_site_name':
'Home',
# A list of tuples containing pages or urls to link to.
'navbar_links': [
('Home', 'index'),
('Description', 'description'),
('Video guides', 'video_guides'),
('Examples', 'examples'),
('Server', 'server'),
('Documentation', 'contents'),
('References', 'references'),
],
# Render the next and previous page links in navbar.
'navbar_sidebarrel':
False,
# Render the current pages TOC in the navbar.)
'navbar_pagenav':
True,
# Global TOC depth for "site" navbar tab.
'globaltoc_depth':
2,
# Fix navigation bar to top of page?
'navbar_fixed_top':
False,
# Location of link to source.
'source_link_position':
"footer",
# Bootswatch (http://bootswatch.com/) theme.
'bootswatch_theme':
"spacelab",
# Choose Bootstrap version.
'bootstrap_version':
"3",
}
# Additional variables to be passed to templates
html_context = {
# URL to the GitHub repository - None if unwanted
'github_url': 'https://github.com/rigdenlab/ample.git',
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
html_title = u'AMPLE v{0}'.format(version)
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = u'AMPLE'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = '_static/logo_ample.svg'
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = '_static/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_style = 'custom.css'
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
html_last_updated_fmt = '%d %b %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'AMPLEdoc'
# -- Options for LaTeX output ---------------------------------------------
#latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
#}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
#latex_documents = [
# (master_doc, 'AMPLE.tex', u'AMPLE Documentation',
# u'Jens Thomas, Felix Simkovic, Adam Simpkin, Ronan Keegan', 'manual'),
#]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, 'ample', u'AMPLE Documentation', [author], 1)]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'AMPLE', u'AMPLE Documentation', author, 'AMPLE', 'One line description of project.', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Extension configuration -------------------------------------------------
def run_apidoc(_):
root = os.path.join('..', 'ample')
ignore_paths = [os.path.join(root, '*', 'tests')]
argv = ['-f', '-T', '-e', '-M', '-o', os.path.join('api', 'generated'), root] + ignore_paths
try:
# Sphinx 1.7+
from sphinx.ext import apidoc
apidoc.main(argv)
except ImportError:
# Sphinx 1.6 (and earlier)
from sphinx import apidoc
argv.insert(0, apidoc.__file__)
apidoc.main(argv)
def setup(app):
app.add_stylesheet("custom.css")
app.connect('builder-inited', run_apidoc)
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
'numpy': ('https://docs.scipy.org/doc/numpy/', None),
}
| rigdenlab/ample | docs/conf.py | conf.py | py | 12,912 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "os.environ",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "sys.path.insert",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"... |
74223738662 | from calendar import c
from pathlib import Path
import tempfile
import mlflow
import pandas as pd
from src.modelling.evaluating.eval import evaluate
from src.modelling.model.model import Model
from src.utils.config import Config
from src.utils.utils import create_artifact
class MlFlowModel(mlflow.pyfunc.PythonModel):
"""
Wrapping class to save model with MlFlow
"""
def __init__(self, model: Model, parameters: dict):
self.model = model
self.parameters = parameters
def predict(self, context, X: pd.DataFrame) -> pd.DataFrame:
# il context serve a MLflow, non rimuoverlo
return self.model.predict(X)
def get_parameters(self) -> dict:
return self.parameters
def mlflow_track_training(training_function):
"""
Decorator function that add MLflow logging to model training
"""
def wrapper(*args, **kwargs):
# TODO LOG instead of print
print("Decorating with mlflow experiment")
mlflow_enabled = True
model = training_function(*args, **kwargs)
if mlflow_enabled:
model_name = "Model"
set_mlflow_params("file:/Users/david/Desktop/ML-template/experiments/", "Project")
with mlflow.start_run(nested=True, run_name=model_name) as run:
# Add experiment ID to model to be used in evaluation
model.set_mlflow_run_id(run.info.run_id)
# Log model
mlflow.pyfunc.log_model(
artifact_path="model",
python_model=MlFlowModel(model, model.params),
)
# Log parameters
test_param = kwargs.get("test_param", "")
mlflow.log_params({"test_param": test_param})
return model
return wrapper
def mlflow_track_evaluation(predict_function):
"""
Decorator function that add MLflow logging to model forecasting/evaluation
"""
def wrapper(*args, **kwargs):
mlflow_enabled = True
forecast = predict_function(*args, **kwargs)
if mlflow_enabled:
set_mlflow_params("file:/Users/david/Desktop/ML-template/experiments/", "Project")
model = kwargs.get("model")
actual = pd.DataFrame()
run_id = model.get_mlflow_run_id()
with mlflow.start_run(run_id=run_id):
evaluation = evaluate(model, forecast, actual)
# Log performance metrics
mlflow.log_metrics(evaluation["numeric_metrics"])
save_artifacts(evaluation)
return forecast
return wrapper
def set_mlflow_params(model_output_path: str, project_name: str) -> None:
"""Set up mlflow run parameters.
Args:
model_output_path (str): _description_
project_name (str): _description_
"""
config = Config()
experiments_path = Path(config.mlflow["model_output_path"]) / "mlruns"
project_name = config.mlflow["project_name"]
mlflow.set_tracking_uri(experiments_path.as_posix())
mlflow.set_experiment(project_name)
def save_artifacts(artifacts: dict) -> None:
"""Save artifacts (plots and csv) to file
Args:
artifacts (dict): _description_
"""
with tempfile.TemporaryDirectory() as tmp_directory:
csvs = artifacts["csvs"]
for name in csvs:
mlflow.log_artifact(
create_artifact(csvs[name].encode("utf-8"), tmp_directory,
name, "csv"),
artifact_path="csv",
)
plots = artifacts["plots_png"]
for name in plots:
mlflow.log_artifact(
create_artifact(plots[name], tmp_directory, name, "png"),
artifact_path="plots",
)
plots = artifacts["plots_html"]
for name in plots:
mlflow.log_artifact(
create_artifact(plots[name], tmp_directory, name, "html"),
artifact_path="plots",
)
| DaveFantini/ML-template | src/utils/mlflow.py | mlflow.py | py | 4,018 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "mlflow.pyfunc",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "src.modelling.model.model.Model",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name... |
4004022772 | from collections import namedtuple
from test.mock import MockArgs
import os
import proze
import unittest
# The output of all test projects are compiled to the same file.
OUTPUT_PATH = 'test/sample/tmp/output.txt'
Case = namedtuple('Case', ['root_path', 'expected_output'])
dark_and_stormy = Case(
'test/sample/dark-and-story',
'test/sample/dark-and-story/dark-and-stormy.txt'
)
feelings = Case('test/sample/feelings', '')
missing = Case('test/sample/missing_data', '')
no_data = Case('test/sample/no_data', '')
pumpkins = Case(
'test/sample/pumpkins',
'test/sample/pumpkins/pumpkins.txt'
)
class TestCompileText(unittest.TestCase):
"""Tests for compiling proze projects using the text strategy."""
def setUp(self):
try:
os.remove(OUTPUT_PATH)
except OSError:
pass
def load_files(self, generated_path, expected_path):
"""Load data for comparison.
:param str generated_path: Path to generated output file.
:param str expected_path: Path to expected data file.
:return tuple(list, list): Lines of generated text, lines of
expected text.
"""
with open(generated_path, 'r') as generated:
text_generated = generated.read()
with open(expected_path, 'r') as expected:
text_expected = expected.read()
generated_lines = text_generated.split('\n')
expected_lines = text_expected.split('\n')
return generated_lines, expected_lines
def test_compile_empty(self):
"""A project with no config/proze files generates nothing."""
args = MockArgs(
doctype='txt',
output=OUTPUT_PATH[:-4],
path=no_data.root_path)
proze.run(args)
self.assertFalse(os.path.isfile(OUTPUT_PATH))
def test_compile_missing(self):
"""A project config file has links to files that don't exist."""
args = MockArgs(
doctype='txt',
output=OUTPUT_PATH[:-4],
path=missing.root_path)
proze.run(args)
with open(OUTPUT_PATH) as f:
self.assertEqual(f.read(), '')
def test_dark_and_stormy(self):
"""Compile the dark-and-stormy sample project."""
args = MockArgs(
doctype='txt',
output=OUTPUT_PATH[:-4],
path=dark_and_stormy.root_path
)
proze.run(args)
generated_lines, expected_lines = self.load_files(
OUTPUT_PATH,
dark_and_stormy.expected_output
)
self.assertEqual(len(generated_lines), len(expected_lines))
for i in range(0, len(expected_lines)):
self.assertEqual(generated_lines[i], expected_lines[i])
def test_pumpkins(self):
"""Compile the pumpkins sample project."""
args = MockArgs(
doctype='txt',
output=OUTPUT_PATH[:-4],
path=pumpkins.root_path
)
proze.run(args)
generated_lines, expected_lines = self.load_files(
OUTPUT_PATH,
pumpkins.expected_output
)
self.assertEqual(len(generated_lines), len(expected_lines))
for i in range(0, len(expected_lines)):
self.assertEqual(generated_lines[i], expected_lines[i])
| RobotNerd/proze-python-converter | test/test_compile_text.py | test_compile_text.py | py | 3,295 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.namedtuple",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "unittest.TestCase",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "os.remove",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "test.mock.Moc... |
8446011858 | import unittest
import numpy
from cupy import testing
@testing.parameterize(*testing.product({
'decimals': [-2, -1, 0, 1, 2],
}))
class TestRound(unittest.TestCase):
shape = (20,)
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(atol=1e-5)
def test_round(self, xp, dtype):
if dtype == numpy.bool_:
# avoid cast problem
a = testing.shaped_random(self.shape, xp, scale=10, dtype=dtype)
return a.round(0)
if dtype == numpy.float16:
# avoid accuracy problem
a = testing.shaped_random(self.shape, xp, scale=10, dtype=dtype)
return a.round(0)
a = testing.shaped_random(self.shape, xp, scale=100, dtype=dtype)
return a.round(self.decimals)
@testing.numpy_cupy_array_equal()
def test_round_out(self, xp):
a = testing.shaped_random(self.shape, xp, scale=100, dtype='d')
out = xp.empty_like(a)
a.round(self.decimals, out)
return out
@testing.parameterize(*testing.product({
# limit to:
# * <=0: values like 0.35 and 0.035 cannot be expressed exactly in IEEE 754
# * >-4: to avoid float16 overflow
'decimals': [-3, -2, -1, 0],
}))
class TestRoundHalfway(unittest.TestCase):
shape = (20,)
@testing.for_float_dtypes()
@testing.numpy_cupy_array_equal()
def test_round_halfway_float(self, xp, dtype):
# generate [..., -1.5, -0.5, 0.5, 1.5, ...] * 10^{-decimals}
a = testing.shaped_arange(self.shape, xp, dtype=dtype)
a *= 2
a -= a.size + 1
scale = 10**abs(self.decimals)
if self.decimals < 0:
a *= scale
else:
a /= scale
a /= 2
return a.round(self.decimals)
@testing.for_signed_dtypes()
@testing.numpy_cupy_array_equal()
def test_round_halfway_int(self, xp, dtype):
# generate [..., -1.5, -0.5, 0.5, 1.5, ...] * 10^{-decimals}
a = testing.shaped_arange(self.shape, xp, dtype=dtype)
a *= 2
a -= a.size + 1
scale = 10**abs(self.decimals)
if self.decimals < 0:
a *= xp.array(scale).astype(dtype)
a >>= 1
return a.round(self.decimals)
@testing.for_unsigned_dtypes()
@testing.numpy_cupy_array_equal()
def test_round_halfway_uint(self, xp, dtype):
# generate [0.5, 1.5, ...] * 10^{-decimals}
a = testing.shaped_arange(self.shape, xp, dtype=dtype)
a *= 2
a -= 1
scale = 10**abs(self.decimals)
if self.decimals < 0:
a *= xp.array(scale).astype(dtype)
a >>= 1
return a.round(self.decimals)
@testing.parameterize(*testing.product({
'decimals': [-5, -4, -3, -2, -1, 0]
}))
class TestRoundMinMax(unittest.TestCase):
@unittest.skip('Known incompatibility: see core.pyx')
@testing.numpy_cupy_array_equal()
def _test_round_int64(self, xp):
a = xp.array([-2**62, 2**62], dtype=xp.int64)
return a.round(self.decimals)
@unittest.skip('Known incompatibility: see core.pyx')
@testing.numpy_cupy_array_equal()
def test_round_uint64(self, xp):
a = xp.array([2**63], dtype=xp.uint64)
return a.round(self.decimals)
@unittest.skip('Known incompatibility: see core.pyx')
@testing.for_int_dtypes(no_bool=True)
@testing.numpy_cupy_array_equal()
def test_round_minmax(self, xp, dtype):
a = xp.array([xp.iinfo(dtype).min, xp.iinfo(dtype).max], dtype=dtype)
return a.round(self.decimals)
| cupy/cupy | tests/cupy_tests/core_tests/test_ndarray_math.py | test_ndarray_math.py | py | 3,529 | python | en | code | 7,341 | github-code | 36 | [
{
"api_name": "unittest.TestCase",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "numpy.bool_",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "cupy.testing.shaped_random",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "cu... |
8023958832 | import matplotlib.pyplot as plt
import compiled
def draw():
x_points = []
y_points = []
for x in range(-1000, 1000):
try:
y_points.append(compiled.compiled_func(x))
x_points.append(x)
except:
print("Error, posible discontinuidad en: ", x)
plt.scatter(x_points, y_points)
plt.show()
| danieltes/tp_solver | draw.py | draw.py | py | 357 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "compiled.compiled_func",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "ma... |
23950768357 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Setup script
"""
from setuptools import setup
import os
def read(fname):
"""returns the text of a file"""
return open(os.path.join(os.path.dirname(__file__), fname), 'r').read()
def get_requirements(filename="requirements.txt"):
"""returns a list of all requirements"""
text = read(filename)
requirements = []
for line in text.splitlines():
req = line.split('#')[0].strip()
if req != '':
requirements.append(req)
return requirements
setup(
name='dockgraph',
version='0.1',
# see https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 1 - Planning',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: Other/Proprietary License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Utilities'
],
description='analyse dependencies of docker images',
long_description=read('README.rst'),
url='https://github.com/jneureuther/dockgraph',
author='Julian Neureuther',
author_email='dev@jneureuther.de',
license=read('LICENSE'),
packages=['dockgraph'],
scripts=['bin/dockgraph'],
test_suite="tests",
install_requires=get_requirements(),
)
| nightvisi0n/dockgraph | setup.py | setup.py | py | 1,496 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "os.path.join",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "setuptools.setup",
"lin... |
4391861651 | import torch, torchvision
from torch import nn
img_hidden_sz = 512
num_image_embeds = 5
#?? 8907
n_classes = 8790
img_embed_pool_type = 'avg'
class ImageEncoder18(nn.Module):
def __init__(self):
super(ImageEncoder18, self).__init__()
model = torchvision.models.resnet18(pretrained=True)
modules = list(model.children())[:-2]
self.model = nn.Sequential(*modules)
pool_func = (
nn.AdaptiveAvgPool2d
if img_embed_pool_type == "avg"
else nn.AdaptiveMaxPool2d
)
if num_image_embeds in [1, 2, 3, 5, 7]:
self.pool = pool_func((num_image_embeds, 1))
elif num_image_embeds == 4:
self.pool = pool_func((2, 2))
elif num_image_embeds == 6:
self.pool = pool_func((3, 2))
elif num_image_embeds == 8:
self.pool = pool_func((4, 2))
elif num_image_embeds == 9:
self.pool = pool_func((3, 3))
def forward(self, x):
# Bx3x224x224 -> Bx512x7x7 -> Bx512xN -> BxNx512
out = self.pool(self.model(x))
out = torch.flatten(out, start_dim=2)
out = out.transpose(1, 2).contiguous()
return out # BxNx2048
class ImageClf18(nn.Module):
def __init__(self):
super(ImageClf18, self).__init__()
self.img_encoder = ImageEncoder18()
self.clf = nn.Linear(img_hidden_sz * num_image_embeds, n_classes)
def forward(self, x):
x = self.img_encoder(x)
x = torch.flatten(x, start_dim=1)
out = self.clf(x)
return out | harveyaot/AlphaTaiBai | azure-functions-python/imgclf/model.py | model.py | py | 1,576 | python | en | code | 24 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "torchvision.models.resnet18",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "torchvision... |
36255605610 | from .models import Doc, FileCabinet, Block, Reg
# from users.serializers import NotifSerializer
from users.models import Notif
from rest_framework import serializers
import datetime
from django.utils import timezone
class FileCabinetSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = FileCabinet
fields = (
'id',
'name',
)
class RegSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Reg
fields = (
'id',
'name',
)
class BlockSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Block
fields = (
'id',
'data',
'hash_field',
'previous_hash'
)
class DocSerializer(serializers.HyperlinkedModelSerializer):
file_cabinet = FileCabinetSerializer(required=False)
file_cabinet_id = serializers.IntegerField(write_only=True, required=False)
reg = FileCabinetSerializer(required=False)
reg_id = serializers.IntegerField(write_only=True, required=False)
hash = FileCabinetSerializer(required=False)
hash_id = serializers.IntegerField(write_only=True, required=False)
class Meta:
model = Doc
fields = (
'id',
'reg',
'title',
'file',
'size',
'date',
'common',
'description',
'signature',
'signature_end',
'hash',
'cancel_description',
'cancel_file',
'file_cabinet',
'file_cabinet_id',
'reg_id',
'hash_id'
)
def create(self, validated_data):
now = timezone.now()
doc = Doc.objects.create(**validated_data)
doc.date = now
doc.save()
return doc
def update(self, instance, validated_data):
try:
return super(DocSerializer, self).update(instance, validated_data)
except Exception as e:
print(str(e))
content = {'error': 'Something else went wrong'}
return Response(content, status=status.HTTP_404_NOT_FOUND)
| Spanri/edsm-v1 | docs/serializers.py | serializers.py | py | 2,214 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "rest_framework.serializers.HyperlinkedModelSerializer",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.serializers",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "models.FileCabinet",
"line_number": 10,
"usage_type":... |
16020153398 | import unittest
import sqlite3
import json
import os
import matplotlib.pyplot as plt
import requests
import plotly.graph_objects as go
import plotly.express as px
import pandas as pd
import csv
# starter code
def setUpDatabase(db_name):
path = os.path.dirname(os.path.abspath(__file__))
conn = sqlite3.connect(path+'/'+db_name)
cur = conn.cursor()
return cur, conn
def data_calc(cur, conn):
data = {}
lst = []
cur.execute('SELECT Country_IDs.country, Covid_Cases.cases_feb_01_2020, Covid_Cases.cases_feb_01_2021, CO2_Emissions.emissions_2020, CO2_Emissions.emissions_2021 FROM Country_IDs JOIN Covid_Cases ON Country_IDs.country_id = Covid_Cases.country_id JOIN CO2_Emissions ON CO2_Emissions.country_id = Country_IDs.country_id')
lst = cur.fetchall()
for item in lst:
data[item[0]] = {}
data[item[0]]['cases_difference'] = round(((item[2] - item[1]) / 100000), 2)
data[item[0]]['emissions_difference'] = (round((item[4] - item[3]), 2))
return data
def emissions_calc(cur, conn):
data = {}
lst = []
cur.execute('SELECT Country_IDs.country, CO2_Emissions.emissions_2020, CO2_Emissions.emissions_2021 FROM Country_IDs JOIN CO2_Emissions ON CO2_Emissions.country_id = Country_IDs.country_id')
lst = cur.fetchall()
total_change = 0
for item in lst:
data[item[0]] = {}
data[item[0]]['difference'] = round((item[2] - item[1]), 2)
return data
def cases_calc(cur, conn):
data = {}
lst = []
cur.execute('SELECT Country_IDs.country, Covid_Cases.cases_feb_01_2020, Covid_Cases.cases_feb_01_2021 FROM Country_IDs JOIN Covid_Cases ON Covid_Cases.country_id = Country_IDs.country_id')
lst = cur.fetchall()
total_change = 0
for item in lst:
data[item[0]] = {}
data[item[0]]['difference'] = round((item[2] - item[1]) , 2)
return data
def scatter_plot_vis(data):
country_lst = []
emissions_lst = []
covid_cases_lst = []
for item in data:
country_lst.append(item)
country_lst = country_lst[0:10]
for country in country_lst:
covid_cases_lst.append(data[country]['cases_difference'])
for country in country_lst:
emissions_lst.append(data[country]['emissions_difference'])
scatter_data = {
'Change in Covid Cases (# of cases)' : covid_cases_lst,
'Change in CO2 Emissions (mtons)' : emissions_lst,
'Country' : country_lst,
'Size' : 5,
}
df = pd.DataFrame(scatter_data)
fig = px.scatter(
df, x= 'Change in CO2 Emissions (mtons)',
y = 'Change in Covid Cases (# of cases)',
title = "February 2020 - February 2021",
hover_data = ['Country'],
size = 'Size'
)
fig.update_layout(
font_family = 'Courier New',
title_font_family = 'Times New Roman'
)
fig.show()
def co2_covid_vis(data):
country_lst = []
emissions_lst = []
covid_cases_lst = []
for item in data:
country_lst.append(item)
# get the top emissions for 2021
country_lst = country_lst[0:10]
for country in country_lst:
covid_cases_lst.append(data[country]['cases_difference'])
emissions_lst.append(data[country]['emissions_difference'])
fig = go.Figure(data = [
go.Bar(name = "Covid Cases Change (in 100,000 cases)", x = country_lst, y = covid_cases_lst, marker_color = 'rgb(255, 127, 80) '),
go.Bar(name = "Emissions Change (in Mtons)", x = country_lst, y = emissions_lst, marker_color = 'rgb(100, 149, 237)')])
fig.update_layout(barmode='group')
fig.show()
def pie_chart(data, title):
countries_lst = []
percent_lst = []
for item in data:
countries_lst.append(item)
countries_lst = countries_lst[0:10]
for country in countries_lst:
percent_lst.append(data[country]['difference'])
fig = px.pie(values=percent_lst, names=countries_lst, title=title)
fig.show()
def main():
cur, conn = setUpDatabase('api_data.db')
co2_covid_data = data_calc(cur, conn)
co2_covid_vis(co2_covid_data)
scatter_plot_vis(co2_covid_data)
emissions_p_data = emissions_calc(cur, conn)
pie_chart(emissions_p_data, 'CO2 Emissions Change by Country (as a percent of total change) Feb 2020 - Feb 2021')
cases_p_data = cases_calc(cur,conn)
pie_chart(cases_p_data, 'Total Covid Cases Change (as a percent of total change) by Country Feb 2020 - Feb 2021')
small_lst = []
all_lst = []
for i in co2_covid_data:
small_lst.append(i)
small_lst.append(co2_covid_data[i]['cases_difference'])
small_lst.append(co2_covid_data[i]['emissions_difference'])
all_lst.append(small_lst)
column_names = ['country', 'cases_difference', 'emissions_difference']
with open('calculations.csv', 'w') as csvfile:
write = csv.writer(csvfile)
write.writerow(column_names)
for i in co2_covid_data:
small_lst = []
small_lst.append(i)
small_lst.append(co2_covid_data[i]['cases_difference'])
small_lst.append(co2_covid_data[i]['emissions_difference'])
write.writerow(small_lst)
conn.close()
if __name__ == "__main__":
main()
unittest.main(verbosity=2)
| lbibbo6012/2022-finalproject | calculate.py | calculate.py | py | 5,532 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.dirname",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"l... |
8285806055 | import logging
import sqlite3
from sqlite3 import Error
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
class RecordsHandler:
def __init__(self, db_file):
self.sql_create_projects_table = """ CREATE TABLE IF NOT EXISTS Records (
id integer PRIMARY KEY,
name text NOT NULL,
email text NOT NULL,
age text NOT NULL,
origin text NOT NULL
); """
self.create_connection(db_file)
def create_connection(self, db_file):
""" create a database connection to a SQLite database """
self.conn = None
self.conn = sqlite3.connect(db_file)
logging.info('Connection established to ' + db_file)
self.create_table(self.sql_create_projects_table)
def close_connection(self):
if self.conn:
self.conn.close()
logging.info('Connection closed')
def create_table(self, create_table_sql):
"""create a table from the create_table_sql statement
:param conn: Connection object
:param create_table_sql: a CREATE TABLE statement
:return:
"""
c = self.conn.cursor()
c.execute(create_table_sql)
self.conn.commit()
logging.info('Creating Records table')
def add_record(self, name="", email="", age="", origin=""):
sqlquery = "INSERT INTO Records (name, email, age, origin) VALUES ('%s', '%s', '%s', '%s')"
val = (name, email, age, origin)
query = sqlquery % val
c = self.conn.cursor()
c.execute(query)
self.conn.commit()
logging.info('New record added')
def delete_record(self, record_id=-1):
sqlquery = "DELETE FROM Records WHERE id='%s'"
val = record_id
query = sqlquery % val
c = self.conn.cursor()
c.execute(query)
self.conn.commit()
logging.info('Record with id ' + str(record_id) + ' deleted.')
def look(self, email="", age=""):
sqlquery = "SELECT * FROM Records WHERE email='%s' AND age='%s'"
val = (email, age)
query = sqlquery % val
results = []
c = self.conn.cursor()
c.execute(query)
results.extend(c.fetchall())
return results
def list_all(self):
query = "SELECT * FROM Records"
results = []
c = self.conn.cursor()
c.execute(query)
results.extend(c.fetchall())
# loop through the rows
for row in results:
print(row)
def delete_all(self):
sqlquery = "DELETE FROM Records"
c = self.conn.cursor()
c.execute(sqlquery)
self.conn.commit()
| pauligb/TC4002.1_Analisis_Diseno | Lab3/src/records_handler.py | records_handler.py | py | 2,868 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "logging.basicConfig",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "sqlite3.connect",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "logging.info",
... |
40885550378 | import os
import logging
import shutil
import tempfile
import json
from urllib.parse import urlparse
from pathlib import Path
from typing import Tuple, Union, IO
from hashlib import sha256
from nlp_architect import LIBRARY_OUT
from nlp_architect.utils.io import load_json_file
import requests
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
MODEL_CACHE = LIBRARY_OUT / "pretrained_models"
def cached_path(url_or_filename: Union[str, Path], cache_dir: str = None) -> str:
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
"""
if cache_dir is None:
cache_dir = MODEL_CACHE
else:
cache_dir = cache_dir
if isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
parsed = urlparse(url_or_filename)
if parsed.scheme in ("http", "https"):
# URL, so get it from the cache (downloading if necessary)
return get_from_cache(url_or_filename, cache_dir)
if os.path.exists(url_or_filename):
# File, and it exists.
print("File already exists. No further processing needed.")
return url_or_filename
if parsed.scheme == "":
# File, but it doesn't exist.
raise FileNotFoundError("file {} not found".format(url_or_filename))
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename))
def url_to_filename(url: str, etag: str = None) -> str:
"""
Convert `url` into a hashed filename in a repeatable way.
If `etag` is specified, append its hash to the url's, delimited
by a period.
"""
if url.split("/")[-1].endswith("zip"):
url_bytes = url.encode("utf-8")
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode("utf-8")
etag_hash = sha256(etag_bytes)
filename += "." + etag_hash.hexdigest()
else:
filename = url.split("/")[-1]
return filename
def filename_to_url(filename: str, cache_dir: str = None) -> Tuple[str, str]:
"""
Return the url and etag (which may be ``None``) stored for `filename`.
Raise ``FileNotFoundError`` if `filename` or its stored metadata do not exist.
"""
if cache_dir is None:
cache_dir = MODEL_CACHE
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
raise FileNotFoundError("file {} not found".format(cache_path))
meta_path = cache_path + ".json"
if not os.path.exists(meta_path):
raise FileNotFoundError("file {} not found".format(meta_path))
with open(meta_path) as meta_file:
metadata = json.load(meta_file)
url = metadata["url"]
etag = metadata["etag"]
return url, etag
def http_get(url: str, temp_file: IO) -> None:
req = requests.get(url, stream=True)
for chunk in req.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
temp_file.write(chunk)
def get_from_cache(url: str, cache_dir: str = None) -> str:
"""
Given a URL, look for the corresponding dataset in the local cache.
If it's not there, download it. Then return the path to the cached file.
"""
if cache_dir is None:
cache_dir = MODEL_CACHE
os.makedirs(cache_dir, exist_ok=True)
response = requests.head(url, allow_redirects=True)
if response.status_code != 200:
raise IOError(
"HEAD request failed for url {} with status code {}".format(url, response.status_code)
)
etag = response.headers.get("ETag")
filename = url_to_filename(url, etag)
# get cache path to put the file
cache_path = os.path.join(cache_dir, filename)
need_downloading = True
if os.path.exists(cache_path):
# check if etag has changed comparing with the metadata
if url.split("/")[-1].endswith("zip"):
meta_path = cache_path + ".json"
else:
meta_path = cache_path + "_meta_" + ".json"
meta = load_json_file(meta_path)
if meta["etag"] == etag:
print("file already present")
need_downloading = False
if need_downloading:
print("File not present or etag changed")
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with tempfile.NamedTemporaryFile() as temp_file:
logger.info("%s not found in cache, downloading to %s", url, temp_file.name)
# GET file object
http_get(url, temp_file)
# we are copying the file before closing it, so flush to avoid truncation
temp_file.flush()
# shutil.copyfileobj() starts at the current position, so go to the start
temp_file.seek(0)
logger.info("copying %s to cache at %s", temp_file.name, cache_path)
with open(cache_path, "wb") as cache_file:
shutil.copyfileobj(temp_file, cache_file)
logger.info("creating metadata file for %s", cache_path)
meta = {"url": url, "etag": etag}
if url.split("/")[-1].endswith("zip"):
meta_path = cache_path + ".json"
else:
meta_path = cache_path + "_meta_" + ".json"
with open(meta_path, "w") as meta_file:
json.dump(meta, meta_file)
logger.info("removing temp file %s", temp_file.name)
return cache_path, need_downloading
| IntelLabs/nlp-architect | nlp_architect/utils/file_cache.py | file_cache.py | py | 5,769 | python | en | code | 2,921 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "nlp_architect.LIBRARY_OUT",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "pathlib.Path... |
13225956088 | import xml.sax
from itertools import accumulate
import logging
import os
from collections import OrderedDict
class XMLHandler(xml.sax.ContentHandler):
def __init__(self):
self.CurrentData = ""
self.date = ""
self.post_date = ""
self.debit_credit_flag = ""
self.response_code = ""
self.description = ""
self.txn_reference_id = ""
self.currency = ""
self.amount = ""
self.source_currency = ""
self.source_amount = ""
self.auth_code = ""
self.running_balance = ""
self.transaction_code = ""
# Call when an element starts
def startElement(self, tag, attrs):
self.CurrentData = tag
if(tag == "ns0:transaction"):
logging.debug("*********transaction*************")
# Call when an elements ends
def endElement(self, tag):
if(self.CurrentData == "ns0:date"):
logging.debug("original_date:" + self.date)
elif(self.CurrentData == "ns0:post_date"):
logging.debug("post_date:" + self.post_date)
elif(self.CurrentData == "ns0:debit_credit_flag"):
logging.debug("debit_credit_flag:" + self.debit_credit_flag)
elif(self.CurrentData == "ns0:response_code"):
logging.debug("response_code:" + self.response_code)
elif(self.CurrentData == "ns0:description"):
logging.debug("description:" + self.description)
elif(self.CurrentData == "ns0:txn_reference_id"):
logging.debug("txn_reference_id:" + self.txn_reference_id)
elif(self.CurrentData == "ns0:currency"):
logging.debug("currency_1:" + self.currency)
elif(self.CurrentData == "ns0:amount"):
logging.debug("amount_1:" + self.amount)
elif(self.CurrentData == "ns0:source_currency"):
logging.debug("source_currency:" + self.source_currency)
elif(self.CurrentData == "ns0:source_amount"):
logging.debug("source_amount:" + self.source_amount)
elif(self.CurrentData == "ns0:auth_code"):
logging.debug("auth_code:" + self.auth_code)
elif(self.CurrentData == "ns0:running_balance"):
logging.debug("running_balance:" + self.running_balance)
elif(self.CurrentData == "ns0:transaction_code"):
logging.debug("transaction_code:" + self.transaction_code)
self.CurrentData = ""
# Call when a character is read
def characters(self, content):
if(self.CurrentData == "ns0:date"):
self.date = content
elif(self.CurrentData == "ns0:post_date"):
self.post_date = content
elif(self.CurrentData == "ns0:debit_credit_flag"):
self.debit_credit_flag = content
elif(self.CurrentData == "ns0:response_code"):
self.response_code = content
elif(self.CurrentData == "ns0:description"):
self.description = content
elif(self.CurrentData == "ns0:txn_reference_id"):
self.txn_reference_id = content
elif(self.CurrentData == "ns0:currency"):
self.currency = content
elif(self.CurrentData == "ns0:amount"):
self.amount = content
elif(self.CurrentData == "ns0:source_currency"):
self.source_currency = content
elif(self.CurrentData == "ns0:source_amount"):
self.source_amount = content
elif(self.CurrentData == "ns0:auth_code"):
self.auth_code = content
elif(self.CurrentData == "ns0:running_balance"):
self.running_balance = content
elif(self.CurrentData == "ns0:transaction_code"):
self.transaction_code = content
def covert_xml_to_list():
#Read file
f = open(log, 'r')
lines = f.readlines()
final_list =[]
create_list =[]
final_list.clear()
create_list.clear()
for line in lines:
if "xml file name:" not in str(line):
if "***transaction**" in str(line):
try :
final_list.append(create_list)
except:
pass
create_list = [0,0,0,0,0,0,0,0,0,0,0,0,0,0]
else:
if "original_date" in str(line):
create_list[0] = str(str(line).split("original_date:")[1].strip('\n'))
elif "post_date" in str(line):
create_list[1] = str(str(line).split("post_date:")[1].strip('\n'))
elif "debit_credit_flag:" in str(line):
create_list[2] = str(str(line).split("debit_credit_flag:")[1].strip('\n'))
elif "response_code:" in str(line):
create_list[3] = str(str(line).split("response_code:")[1].strip('\n'))
elif "description:" in str(line):
create_list[4] = str(str(line).split("description:")[1].strip('\n'))
elif "txn_reference_id:" in str(line):
create_list[5] = str(str(line).split("txn_reference_id:")[1].strip('\n'))
elif "currency_1:" in str(line):
create_list[6] = str(str(line).split("currency_1:")[1].strip('\n'))
elif "amount_1" in str(line):
create_list[7] = str(str(line).split("amount_1:")[1].strip('\n'))
elif "source_currency:" in str(line):
create_list[8] = str(str(line).split("source_currency:")[1].strip('\n'))
elif "source_amount:" in str(line):
create_list[9] = str(str(line).split("source_amount:")[1].strip('\n'))
elif "source_amount:" in str(line):
create_list[10] = str(str(line).split("source_amount:")[1].strip('\n'))
elif "auth_code:" in str(line):
create_list[11] = str(str(line).split("auth_code:")[1].strip('\n'))
elif "running_balance:" in str(line):
create_list[12] = str(str(line).split("running_balance:")[1].strip('\n'))
elif "transaction_code:" in str(line):
create_list[13] = str(str(line).split("transaction_code:")[1].strip('\n'))
final_list.append(create_list)
return final_list
def remove_the_file_content():
f = open(log, 'r+')
f.truncate(0)
def find_duplicate_transactions(list_name):
#Find the duplicate transaction in a list
duplicate_list = []
duplicate_list.clear()
for e in list_name:
if list_name.count(e)> 1:
duplicate_list.append(e)
return duplicate_list
def checkSubset(list1, list2):
return (all(map(list1.__contains__, list2)))
def Remove(listname):
res = []
res.clear()
check = set()
for x in listname:
hsh = tuple(x)
if hsh not in check:
res.append(x)
check.add(hsh)
return res
def non_match_elements(first, second):
non_match = []
non_match.clear()
for i in first:
if i not in second:
non_match.append(i)
return non_match
def identify_non_match(list_c, list_d):
list3 = [value for value in list_c if value in list_d]
print (list3)
# log file
log = r".\transactions.log"
logging.basicConfig(filename=log,level=logging.DEBUG,format='%(asctime)s %(message)s', datefmt='%d/%m/%Y %H:%M:%S')
f = open("output.txt", "w+")
f.write("**********TEST NAME : COMPARE THE TRANSACTIONS IN TWO XML FILES**********\n")
# create an XMLReader
parser = xml.sax.make_parser()
# turn off namepsaces
parser.setFeature(xml.sax.handler.feature_namespaces, 0)
# override the default ContextHandler
Handler = XMLHandler()
parser.setContentHandler( Handler )
transaction_list_in_file1 = []
transaction_list_in_file2 = []
xml_file_input = [str(r"C:\Users\Muthukumar\Documents\GitHub\Automation\Source_File.xml"), str(r"C:\Users\Muthukumar\Documents\GitHub\Automation\Target_File.xml")]
parser.parse(str(xml_file_input[0]))
transaction_list_in_file1 = covert_xml_to_list()
remove_the_file_content()
parser.parse(str(xml_file_input[1]))
transaction_list_in_file2 = covert_xml_to_list()
remove_the_file_content()
transaction_list_in_file1 = [ele for ele in transaction_list_in_file1 if ele != []]
transaction_list_in_file2 = [ele for ele in transaction_list_in_file2 if ele != []]
#Identify the Duplicate Transactions
f.write ("============TEST 1 : Identify the duplicate transaction============\n")
duplicate_transactions_set1 = find_duplicate_transactions(transaction_list_in_file1)
if len(duplicate_transactions_set1) > 0:
f.write ("\nDuplicate Transactions in Source File\n")
for i in duplicate_transactions_set1:
f.write ("\n")
f.write("date :" + i[0] + "\n")
f.write("post_date :" + i[1] + "\n")
f.write("debit_credit_flag :" + i[2] + "\n")
f.write("response_code :" + i[3] + "\n")
f.write("description :" + i[4] + "\n")
f.write("txn_reference_id :" + i[5] + "\n")
f.write("currency :" + i[6] + "\n")
f.write("amount :" + i[7] + "\n")
f.write("source_currency :" + i[8] + "\n")
f.write("source_amount :" + i[9] + "\n")
f.write("auth_code :" + str(i[10]) + "\n")
f.write("running_balance :" + i[11] + "\n")
f.write("transaction_code :" + i[12] + "\n")
duplicate_transactions_set2 = find_duplicate_transactions(transaction_list_in_file2)
if len(duplicate_transactions_set2) > 0:
f.write ("\nDuplicate Transactions in Target File\n")
for i in duplicate_transactions_set2:
f.write ("\n")
f.write("date :" + i[0] + "\n")
f.write("post_date :" + i[1] + "\n")
f.write("debit_credit_flag :" + i[2] + "\n")
f.write("response_code :" + i[3] + "\n")
f.write("description :" + i[4] + "\n")
f.write("txn_reference_id :" + i[5] + "\n")
f.write("currency :" + i[6] + "\n")
f.write("amount :" + i[7] + "\n")
f.write("source_currency :" + i[8] + "\n")
f.write("source_amount :" + i[9] + "\n")
f.write("auth_code :" + str(i[10]) + "\n")
f.write("running_balance :" + i[11] + "\n")
f.write("transaction_code :" + i[12] + "\n")
if len(duplicate_transactions_set1) == 0 and len(duplicate_transactions_set2) == 0 :
f.write ("TEST RESULT : PASS - No duplicate transactions in both the file\n")
else:
f.write ("TEST RESULT : FAIL - Duplicates transactions are identified\n")
f.write ("============TEST 1 : Completed============\n\n")
#Verify all the transactions in file1 in file2 and file2 in file1
f.write ("============TEST 2 : Verify the transactions in both the files are matching============\n")
unique_transac_list1 = Remove (transaction_list_in_file1)
unique_transac_list2 = Remove (transaction_list_in_file2)
compared = checkSubset(unique_transac_list1, unique_transac_list2)
if not compared :
non_matched_1 = non_match_elements (unique_transac_list1, unique_transac_list2)
if len(non_matched_1) > 0:
f.write ("\nNon matched Transactions in Source File\n\n")
for i in non_matched_1:
f.write ("\n")
f.write("date :" + i[0] + "\n")
f.write("post_date :" + i[1] + "\n")
f.write("debit_credit_flag :" + i[2] + "\n")
f.write("response_code :" + i[3] + "\n")
f.write("description :" + i[4] + "\n")
f.write("txn_reference_id :" + i[5] + "\n")
f.write("currency :" + i[6] + "\n")
f.write("amount :" + i[7] + "\n")
f.write("source_currency :" + i[8] + "\n")
f.write("source_amount :" + i[9] + "\n")
f.write("auth_code :" + str(i[10]) + "\n")
f.write("running_balance :" + i[11] + "\n")
f.write("transaction_code :" + i[12] + "\n")
non_matched_2 = non_match_elements (unique_transac_list2, unique_transac_list1)
if len(non_matched_2) > 0:
f.write ("\nNon matched Transactions in Target File\n\n")
for i in non_matched_2:
f.write ("\n")
f.write("date :" + i[0] + "\n")
f.write("post_date :" + i[1] + "\n")
f.write("debit_credit_flag :" + i[2] + "\n")
f.write("response_code :" + i[3] + "\n")
f.write("description :" + i[4] + "\n")
f.write("txn_reference_id :" + i[5] + "\n")
f.write("currency :" + i[6] + "\n")
f.write("amount :" + i[7] + "\n")
f.write("source_currency :" + i[8] + "\n")
f.write("source_amount :" + i[9] + "\n")
f.write("auth_code :" + str(i[10]) + "\n")
f.write("running_balance :" + i[11] + "\n")
f.write("transaction_code :" + i[12] + "\n")
f.write ("\nTEST RESULT : FAIL - The transactions in both the files are not matched\n")
else :
f.write ("\nTEST RESULT : PASS - The transactions in both the file are matching\n")
f.write ("============TEST 2 : Completed============\n\n")
f.close()
contents = open(r"./output.txt","r")
with open("xml_Compare.html", "w") as e:
for lines in contents.readlines():
e.write("<pre>" + lines + "</pre> <br>\n")
| cathrinejchristy/Automation | XmlCompare_test.py | XmlCompare_test.py | py | 13,268 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "xml.sax.sax",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "xml.sax",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "logging.debug",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number... |
678009931 | import os
import sys
sys.path.append('..')
sys.path.append('../..')
import argparse
import utils
from tsp_helper import *
from student_utils import *
OUTPUT_FILENAME = "naive_output.txt"
"""
======================================================================
Complete the following function.
======================================================================
"""
def solve(list_of_locations, list_of_homes, starting_car_location, adjacency_matrix, params=[]):
"""
Write your algorithm here.
Input:
list_of_locations: A list of locations such that node i of the graph corresponds to name at index i of the list
list_of_homes: A list of homes
starting_car_location: The name of the starting location for the car
adjacency_matrix: The adjacency matrix from the input file
Output:
A list of locations representing the car path
A dictionary mapping drop-off location to a list of homes of TAs that got off at that particular location
NOTE: both outputs should be in terms of indices not the names of the locations themselves
"""
# Ensure that the starting location is at the start.
homes_and_start = list_of_homes.copy()
if starting_car_location not in list_of_homes:
homes_and_start.insert(0, starting_car_location)
else:
homes_and_start.remove(starting_car_location)
homes_and_start.insert(0, starting_car_location)
# Initialize storage structs (see below)
sp_lookup = {}
only_tas_adjacency = []
# Get shortest paths between all houses w. networkx
G, error = adjacency_matrix_to_graph(adjacency_matrix)
for i, home1 in enumerate(homes_and_start):
home1_index = list_of_locations.index(home1)
home1_row = []
for j, home2 in enumerate(homes_and_start):
if home1 == home2:
home1_row.append(0)
continue
home2_index = list_of_locations.index(home2)
sp = nx.shortest_path(G, source=home1_index, target=home2_index)
cost = cost_of_path(sp, adjacency_matrix)
# Construct lookup dict ((i, j), (A, ... , B))
# i.e. ((int, int), (int, ..., int))
sp_lookup[(i, j)] = (tuple(sp))
home1_row.append(cost)
# Create adjacency mtx of only TA homes and start, for use in ortools tsp solver
only_tas_adjacency.append(home1_row)
# Plus into tsp_solver, get back shortest tour of TA houses
shortest_tour = get_tsp_result(only_tas_adjacency, 0)
# Replace house paths w actual paths from dict
car_tour = []
is_start = True
first_ind = 0
for index in range(len(shortest_tour) - 1):
i, j = shortest_tour[index], shortest_tour[index + 1]
sp = sp_lookup[(i, j)]
if is_start:
first_ind = sp[0]
is_start = False
sp_without_last = sp[:len(sp_lookup[(i, j)]) - 1]
for elem in sp_without_last:
car_tour.append(elem)
car_tour.append(first_ind)
# Each TA gets dropped off at their house.
dropoff_dict = {}
for location_ind in car_tour:
if list_of_locations[location_ind] in list_of_homes:
dropoff_dict[location_ind] = [location_ind]
# Done
print(car_tour)
print(dropoff_dict)
return car_tour, dropoff_dict
""" Lookup in adj matrix the cost of some path"""
def cost_of_path(path, adj_matrix):
total, start, next = 0, 0, 1
while next < len(path):
total += adj_matrix[path[start]][path[next]]
start += 1
next += 1
return total
"""
======================================================================
No need to change any code below this line
======================================================================
"""
"""
Convert solution with path and dropoff_mapping in terms of indices
and write solution output in terms of names to path_to_file + file_number + '.out'
"""
def convertToFile(path, dropoff_mapping, path_to_file, list_locs):
string = ''
for node in path:
string += list_locs[node] + ' '
string = string.strip()
string += '\n'
dropoffNumber = len(dropoff_mapping.keys())
string += str(dropoffNumber) + '\n'
for dropoff in dropoff_mapping.keys():
strDrop = list_locs[dropoff] + ' '
for node in dropoff_mapping[dropoff]:
strDrop += list_locs[node] + ' '
strDrop = strDrop.strip()
strDrop += '\n'
string += strDrop
utils.write_to_file(path_to_file, string)
def solve_from_file(input_file, output_directory, params=[]):
print('Processing', input_file)
input_data = utils.read_file(input_file)
num_of_locations, num_houses, list_locations, list_houses, starting_car_location, adjacency_matrix = data_parser(input_data)
car_path, drop_offs = solve(list_locations, list_houses, starting_car_location, adjacency_matrix, params=params)
basename, filename = os.path.split(input_file)
if not os.path.exists(output_directory):
os.makedirs(output_directory)
output_file = utils.input_to_output(input_file, output_directory)
convertToFile(car_path, drop_offs, output_file, list_locations)
def solve_all(input_directory, output_directory, params=[]):
input_files = utils.get_files_with_extension(input_directory, 'in')
for input_file in input_files:
solve_from_file(input_file, output_directory, params=params)
if __name__=="__main__":
parser = argparse.ArgumentParser(description='Parsing arguments')
parser.add_argument('--all', action='store_true', help='If specified, the solver is run on all files in the input directory. Else, it is run on just the given input file')
parser.add_argument('input', type=str, help='The path to the input file or directory')
parser.add_argument('output_directory', type=str, nargs='?', default='.', help='The path to the directory where the output should be written')
parser.add_argument('params', nargs=argparse.REMAINDER, help='Extra arguments passed in')
args = parser.parse_args()
output_directory = args.output_directory
if args.all:
input_directory = args.input
solve_all(input_directory, output_directory, params=args.params)
else:
input_file = args.input
solve_from_file(input_file, output_directory, params=args.params)
| anniezhang21/carpool-problem | irrelevant/naive_solver.py | naive_solver.py | py | 6,380 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.path.append",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "sys.path.append",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_numbe... |
205804702 | import torch
import torch.nn as nn
from transformers import BertModel
class SentimentClassifier(nn.Module):
def __init__(self, freeze_bert = True):
super(SentimentClassifier, self).__init__()
#Instantiating BERT model object
self.bert_layer = BertModel.from_pretrained('bert-base-uncased')
#Freeze bert layers
if freeze_bert:
for p in self.bert_layer.parameters():
p.requires_grad = False
#Classification layer
self.cls_layer = nn.Linear(768, 1)
def forward(self, seq, attn_masks):
'''
Inputs:
-seq : Tensor of shape [B, T] containing token ids of sequences
-attn_masks : Tensor of shape [B, T] containing attention masks to be used to avoid contibution of PAD tokens
'''
#Feeding the input to BERT model to obtain contextualized representations
cont_reps, _ = self.bert_layer(seq, attention_mask = attn_masks)
#Obtaining the representation of [CLS] head
cls_rep = cont_reps[:, 0]
#Feeding cls_rep to the classifier layer
logits = self.cls_layer(cls_rep)
return logits
| kabirahuja2431/FineTuneBERT | src/model.py | model.py | py | 1,191 | python | en | code | 44 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "transformers.BertModel.from_pretrained",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "tr... |
37937108471 | from dataclasses import dataclass
import os
import sys
import time
from datetime import datetime
import tempfile
import shutil
import json
from fastapi import APIRouter, HTTPException, UploadFile, Response
from app.controllers.processors import image as PI
from app.core import logger
from app.utils import utilities as U
from app.utils import constants as C
router = APIRouter(prefix="/document")
# logger = logger.log_setup(cfg, "app")
# create a temporary directory for processing
temp_dirpath = tempfile.mkdtemp()
@router.post("/classify")
async def Classifer(file: UploadFile):
"""API route to Classifer Claims."""
_response = {
"Transaction_Info": {"Transaction_ID": "1234", "Received_TS": "", "Processed_TS": ""},
"prob": 0.0,
"is_hw": False,
}
# 1. Get the line items
start_time = time.time()
start_dt = datetime.now()
trxId = start_dt.strftime(r"%Y%m%d%H%M%S%f")
logger.info(f"{U.prepend_msg(trxId)}{C.HEADER_10} Received Request {C.HEADER_10}")
try:
# Begin process
logger.info(f"{U.prepend_msg(trxId)} - Begin Processing Request -> {start_time}")
file_data = await file.read()
# write the file to temperory directory for processing
upload_dir = os.path.join(temp_dirpath, trxId)
os.makedirs(upload_dir, exist_ok=True)
upload_filepath = os.path.join(upload_dir, file.filename) # f'./uploads/{txn_id}_{file.filename}'
with open(upload_filepath, "wb") as upload_file:
upload_file.write(file_data)
logger.debug(f"{U.set_log_prefix(trxid = trxId)} - File saved upload_filepath.")
result = await PI.process_image(trxId=trxId, image_path=upload_filepath)
end_time = time.time()
logger.info(
f"{U.prepend_msg(trxId)} - End Processing Request -> {end_time} - Time Taken -> {end_time - start_time}"
)
end_dt = datetime.now()
_response["Transaction_Info"]["Transaction_ID"] = trxId
_response["Transaction_Info"]["Received_TS"] = start_dt
_response["Transaction_Info"]["Processed_TS"] = end_dt
_response["Model_Version"] = result["Model_Version"]
_response["prob"] = result["prob"]
_response["is_hw"] = result["is_hw"]
except Exception as e:
logger.error(
f"{U.prepend_msg(trxId)} - there's an error uploading file. Please try again!!!",
exc_info=True,
)
error_message = {
"Status": C.ERROR,
"Error": "Error while Uploading file. TRY AGAIN!!",
"Error-Message": str(e),
}
raise HTTPException(status_code=418, detail=f"Error - there's an error uploading file. Please try again!!!")
logger.info(f"{U.prepend_msg(trxId)} - Response Request -> {_response}")
# remove the temporary directory
shutil.rmtree(temp_dirpath, ignore_errors=True)
return Response(
content=json.dumps(_response, indent=4, sort_keys=True, default=str),
media_type="application/json",
)
| KiranCHIHX/Handwritten | app/api/hw_classifier.py | hw_classifier.py | py | 3,047 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "fastapi.APIRouter",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "tempfile.mkdtemp",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "fastapi.UploadFile",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "time.time",
... |
35112699995 | # implements Kafka topic consumer functionality
import os
import threading
from confluent_kafka import Consumer, OFFSET_BEGINNING
import json
from producer import proceed_to_deliver
import base64
import subprocess
UPDATE_CWD = "updater/"
STORAGE_PATH = "tmp/"
UPDATE_SCRIPT_NAME = "./update-and-restart-app.sh"
APP_PATH = "../app/"
VERIFIER_SEAL = 'verifier_seal'
def check_seal(payload:str):
if payload.endswith(VERIFIER_SEAL):
return True
return False
def strip_verifier_seal(sealed_payload:str):
payload = sealed_payload[:-len(VERIFIER_SEAL)]
return payload
def execute_update(id, details):
# print(f"[debug]===== EXECUTING UPDATE ====\nDetails: {details}")
print(f"[info]===== EXECUTING UPDATE {id} ====")
update_payload_b64 = details['blob']
if details['update_file_encoding'] != 'base64':
print('[error] unsupported blob encoding')
return
update_sealed_payload = base64.b64decode(update_payload_b64).decode()
if check_seal(update_sealed_payload) is not True:
print('[error] verifier seal is missing or invalid')
return
else:
payload = strip_verifier_seal(update_sealed_payload).encode()
update_payload = base64.b64decode(payload)
try:
with open(UPDATE_CWD+STORAGE_PATH+id, "wb") as f:
f.write(update_payload)
f.close()
result = subprocess.call(['bash', '-c', f"{UPDATE_SCRIPT_NAME} {STORAGE_PATH+id} {APP_PATH}"], cwd=UPDATE_CWD)
print(f"[info] update result code {result}")
except Exception as e:
print(f'[error] failed to execute update: {e}. cwd: {os.getcwd()}')
def handle_event(id: str, details: dict):
# print(f"[debug] handling event {id}, {details}")
print(f"[info] handling event {id}, {details['source']}->{details['deliver_to']}: {details['operation']}")
delivery_required = False
try:
if details['operation'] == 'proceed_with_update':
# it's a request from manager for an update
# get the blob by its id
details['deliver_to'] = 'storage'
details['operation'] = 'get_blob'
delivery_required = True
elif details['operation'] == 'blob_content':
# blob with an update arrived
execute_update(id, details)
except Exception as e:
print(f"[error] failed to handle request: {e}")
if delivery_required:
proceed_to_deliver(id, details)
def consumer_job(args, config):
# Create Consumer instance
verifier_consumer = Consumer(config)
# Set up a callback to handle the '--reset' flag.
def reset_offset(verifier_consumer, partitions):
if args.reset:
for p in partitions:
p.offset = OFFSET_BEGINNING
verifier_consumer.assign(partitions)
# Subscribe to topic
topic = "updater"
verifier_consumer.subscribe([topic], on_assign=reset_offset)
# Poll for new messages from Kafka and print them.
try:
while True:
msg = verifier_consumer.poll(1.0)
if msg is None:
# Initial message consumption may take up to
# `session.timeout.ms` for the consumer group to
# rebalance and start consuming
# print("Waiting...")
pass
elif msg.error():
print(f"[error] {msg.error()}")
else:
# Extract the (optional) key and value, and print.
try:
id = msg.key().decode('utf-8')
details = json.loads(msg.value().decode('utf-8'))
# print(
# f"[debug] consumed event from topic {topic}: key = {id} value = {details}")
handle_event(id, details)
except Exception as e:
print(
f"Malformed event received from topic {topic}: {msg.value()}. {e}")
except KeyboardInterrupt:
pass
finally:
# Leave group and commit final offsets
verifier_consumer.close()
def start_consumer(args, config):
threading.Thread(target=lambda: consumer_job(args, config)).start()
if __name__ == '__main__':
start_consumer(None)
| sergey-sobolev/secure-update | updater/consumer.py | consumer.py | py | 4,295 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "base64.b64decode",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "base64.b64decode",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "subprocess.call",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "os.getcwd",
"li... |
7231791491 | import json
from glob import glob
import yaml
def get_problems_by_name(response_file):
with open(response_file) as f:
response = json.load(f)
def get_name(place):
return place.get("permalink", place.get("id"))
name_to_problems = {
get_name(place): place["districtingProblems"] for place in response
}
assert all(isinstance(key, str) for key in name_to_problems)
return name_to_problems
def add_problems(name_to_problems):
for filename in glob("./data/*.yml"):
print(filename)
with open(filename) as f:
record = yaml.safe_load(f)
problems = name_to_problems[record["id"]]
record["districtingProblems"] = problems
with open(filename, "w") as f:
yaml.safe_dump(record, f)
if __name__ == "__main__":
name_to_problems = get_problems_by_name("../districtr/assets/data/response.json")
add_problems(name_to_problems)
| districtr/districtr-process | scripts/add_problems.py | add_problems.py | py | 974 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "json.load",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "yaml.safe_load",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "yaml.safe_dump",
"line_number":... |
28866132568 | import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler, LabelEncoder, OneHotEncoder
from scipy import stats
from sklearn.linear_model import LinearRegression, Ridge
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
from sklearn.metrics import mean_squared_error
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score
from sklearn.metrics import classification_report
from imblearn.over_sampling import RandomOverSampler
# Data Preprocessing
df_train_clf = pd.read_csv('Classification/train_data.csv')
df_test_clf = pd.read_csv('Classification/test_data.csv')
# Categorical encoding
def one_hot_encode(df: pd.DataFrame, f_name: str):
ohe = OneHotEncoder(sparse=False).fit_transform(df[f_name].to_numpy().reshape(-1, 1))
encoded = pd.DataFrame(ohe, columns=[f"{f_name}{i}" for i in range(ohe.shape[1])])
return pd.concat([df.drop([f_name], axis=1), encoded], axis=1)
f = 'auto_bitrate_state'
ohe_train_clf = one_hot_encode(df_train_clf, f)
ohe_test_clf = one_hot_encode(df_test_clf, f)
def label_encode(df: pd.DataFrame, f_name: str):
label = LabelEncoder().fit_transform(df[f_name])
res = df.copy()
res[f_name] = label
return res
f = 'auto_fec_state'
prep_train_clf = label_encode(ohe_train_clf, f)
prep_test_clf = label_encode(ohe_test_clf, f)
def scale(df: pd.DataFrame):
return pd.DataFrame(MinMaxScaler().fit_transform(df), columns=df.columns)
X_train_clf, X_test_clf = scale(prep_train_clf.drop(['stream_quality'], axis=1)), scale(
prep_test_clf.drop(['stream_quality'], axis=1))
y_train_clf, y_test_clf = prep_train_clf['stream_quality'], prep_test_clf['stream_quality']
# Feature scaling
df_train_reg: pd.DataFrame = pd.read_csv('Regression/train_data.csv')
df_test_reg: pd.DataFrame = pd.read_csv('Regression/test_data.csv')
reg_train_sc = scale(df_train_reg)
reg_test_sc = scale(df_test_reg)
X_train_reg, X_test_reg = reg_train_sc.drop(['target'], axis=1), reg_test_sc.drop(['target'], axis=1)
y_train_reg, y_test_reg = reg_train_sc['target'], reg_test_sc['target']
y_train_clf, y_test_clf = y_train_clf.astype(int), y_test_clf.astype(int)
# Dimensionality reduction (exact reasoning why I did it this way can be seen in ipynb)
X_train_reg, X_test_reg = X_train_reg.drop(['rtt_std'], axis=1), X_test_reg.drop(['rtt_std'], axis=1)
X_train_clf, X_test_clf = X_train_clf.drop(['auto_bitrate_state0'], axis=1), X_test_clf.drop(['auto_bitrate_state0'],
axis=1)
# Data visualization can be seen in the ipynb
# Regression model
model1 = LinearRegression()
model2 = Ridge()
model1.fit(X_train_reg, y_train_reg)
model2.fit(X_train_reg, y_train_reg)
y_pred_reg1 = model1.predict(X_test_reg)
y_pred_reg2 = model2.predict(X_test_reg)
# Finding best polynomial regression model
res = []
for i in range(2, 5):
poly = make_pipeline(PolynomialFeatures(i), LinearRegression())
poly.fit(X_train_reg, y_train_reg)
y_pred_reg3 = poly.predict(X_test_reg)
res.append(mean_squared_error(y_test_reg, y_pred_reg3))
print('----------------------------------------------------')
print(f'Results of Polynomial Regression: {res}')
# Logistic Regression model
logistic = LogisticRegression(penalty='l2', max_iter=120000)
logistic.fit(X_train_clf, y_train_clf)
y_pred_clf = logistic.predict(X_test_clf)
# Results evaluation
# Regression
model1 = LinearRegression()
model2 = Ridge()
model3 = make_pipeline(PolynomialFeatures(2), LinearRegression())
m1_cross = cross_val_score(model1, X_train_reg, y_train_reg, cv=5)
m2_cross = cross_val_score(model2, X_train_reg, y_train_reg, cv=5)
m3_cross = cross_val_score(model3, X_train_reg, y_train_reg, cv=5)
print('----------------------------------------------------')
print(f'Model 1 Cross Validation mean: {np.mean(m1_cross)}, std: {np.std(m1_cross)}')
print(f'Model 2 Cross Validation mean: {np.mean(m2_cross)}, std: {np.std(m2_cross)}')
print(f'Model 3 Cross Validation mean: {np.mean(m3_cross)}, std: {np.std(m3_cross)}')
print('----------------------------------------------------')
print(f'Model 1 (Linear) - MSE:{mean_squared_error(y_test_reg, y_pred_reg1)}')
print(f'Model 2 (Linear + Ridge) - MSE:{mean_squared_error(y_test_reg, y_pred_reg2)}')
print(f'Model 3 (Polynomial with degree {np.argmin(res) + 2}) - MSE:{res[np.argmin(res)]}')
# Classification
logistic = LogisticRegression(penalty='l2', max_iter=120000)
log_cross = cross_val_score(logistic, X_train_clf, y_train_clf, cv=5)
print('----------------------------------------------------')
print(f'Logistic Regression Cross Validation mean: {np.mean(log_cross)}, std: {np.std(log_cross)}')
print('----------------------------------------------------')
print(classification_report(y_test_clf, y_pred_clf))
# Outliers detection
# Finding outliers
X_train_clf_o = X_train_clf.copy()
X_train_clf_o['target'] = y_train_clf
z = np.abs(stats.zscore(X_train_clf))
X_train_clf_o = X_train_clf_o[(z < 3).all(axis=1)]
print('----------------------------------------------------')
print(f'Removed Outliers: {X_train_clf.shape[0] - X_train_clf_o.shape[0]}')
X_train_clf_o, y_train_clf_o = X_train_clf_o.drop('target', axis=1), X_train_clf_o['target']
smt = RandomOverSampler(sampling_strategy='minority')
X_res, y_res = smt.fit_resample(X_train_clf_o, y_train_clf_o)
logistic = LogisticRegression(penalty='l2', max_iter=120000)
logistic.fit(X_res, y_res)
print('----------------------------------------------------')
print(f'Number of classes: 0 - {sum(y_res == 0)}, 1 - {sum(y_res == 1)}')
log_cross = cross_val_score(logistic, X_res, y_res, cv=5)
print('----------------------------------------------------')
print(f'Logistic Regression with sampling Cross Validation mean: {np.mean(log_cross)}, std: {np.std(log_cross)}')
print('----------------------------------------------------')
y_pred = logistic.predict(X_test_clf)
print('After outliers detection')
print(classification_report(y_test_clf, y_pred))
print('Before outliers detection')
print(classification_report(y_test_clf, y_pred_clf))
| Pain122/MLAssignment1 | script.py | script.py | py | 6,330 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "sklearn.preproc... |
40922739466 | from typing import List
import numpy as np
from advent2022 import utils
DAY = 9
def load_and_parse_data(day: int, test: bool = False) -> List[str]:
data = utils.get_input(day, test)
return [l.split(" ") for l in data]
def new_pos(h, t):
if abs(h[0] - t[0]) > 1 or abs(h[1] - t[1]) > 1:
for move_idx in range(2):
if h[move_idx] == t[move_idx]:
continue
move = 1 if h[move_idx] > t[move_idx] else -1
t[move_idx] += move
return t
def solve_rope(data, tail_length):
visited = np.zeros((10000, 10000))
h, tail = [4, 0], [[4, 0] for l in range(tail_length)]
visited[tail[0][0], tail[0][1]] = 1
for l in data:
direction, distance = l[0], int(l[1])
if direction == "U":
idx, step = 0, -1
elif direction == "D":
idx, step = 0, 1
elif direction == "L":
idx, step = 1, -1
elif direction == "R":
idx, step = 1, 1
for i in range(distance):
h[idx] += step
for j, _ in enumerate(tail):
follow = tail[j - 1] if j > 0 else h
tail[j] = new_pos(follow, tail[j])
if j == tail_length - 1:
visited[tail[j][0], tail[j][1]] = 1
return np.sum(visited)
def solve_part_1(data):
return solve_rope(data, 1)
def solve_part_2(data):
return solve_rope(data, 9)
| c-m-hunt/advent-of-code-2022 | advent2022/day9.py | day9.py | py | 1,434 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "advent2022.utils.get_input",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "advent2022.utils",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "numpy.zeros",
... |
15024245302 | import os
from tqdm import tqdm
import datetime
import pandas as pd
import logging
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s',
level=logging.INFO,
# datefmt='%d-%b-%y %H:%M:%S'
)
def get_date_df(file_folder, days):
process_bar = tqdm(os.listdir(file_folder))
date_list = []
name_list = []
for file_name in process_bar:
process_bar.set_description("Processing {}".format(file_name))
paper_date = file_name[:10]
paper_name = file_name
date_list.append(paper_date)
name_list.append(paper_name)
dates_list = [date_list, name_list]
df = pd.DataFrame(dates_list)
df = df.transpose()
df.columns = ['date', 'file_name']
df['date'] = pd.to_datetime(df['date'])
df.sort_values(by=['date'], inplace=True)
first_date = df.iloc[0]['date']
first_year = first_date.year
new_year = pd.Timestamp(f'{first_year}-01-01T00')
if first_date == new_year:
logging.info(f"The first day of year {first_year} already exists!")
else:
logging.info(f"Creating placeholder data for the first day of year {first_year}")
df.loc[-1] = [new_year, 'placeholder.txt']
df.index = df.index + 1
df = df.sort_index()
t = df.groupby(pd.Grouper(key="date", axis=0, freq=days, sort=True))['file_name'].apply(list).reset_index(
name='file_name_list')
t.to_csv("sorted_df.csv")
return t
def creat_data(sorted_df, output_folder, file_folder):
if not os.path.exists(output_folder):
os.makedirs(output_folder)
for idx, row in tqdm(sorted_df.iterrows()):
if row['file_name_list']:
date = row['date'].date()
file_name_list = row['file_name_list']
if not file_name_list[0] == 'placeholder.txt':
text_str = []
for file_name in file_name_list:
file_path = os.path.join(file_folder, file_name)
with open(file_path, 'r', encoding='utf-8') as infile:
for line in infile:
text_str.append(line)
out_file_path = os.path.join(output_folder, f"{date}.txt")
with open(out_file_path, 'w', encoding='utf-8') as out_file:
out_file.write("".join(text_str))
def main():
# adjust the following variable values to get the desired output
file_folder = "output_data"
days = "10D"
output_folder = "data_by_10D"
sorted_df = get_date_df(file_folder, days)
creat_data(sorted_df, output_folder, file_folder)
if __name__ == '__main__':
main()
| wenyuan-wu/corpus_preprocess_dong | sort_by_day.py | sort_by_day.py | py | 2,760 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.basicConfig",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "tqdm.tqdm",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_... |
30699830411 | from django.shortcuts import render, get_object_or_404
from zoo.models import Category, Product
from django.views.generic import ListView
from django.core.paginator import Paginator
from zoo.forms import ZooSearchForm
class CategoryListView(ListView):
model = Product
template_name = 'catalog.html'
context_object_name = 'products'
paginate_by = 12
def get_context_data(self, *, object_list=None, **kwargs):
context = super().get_context_data(object_list=object_list, **kwargs)
context['object-count'] = self.model.objects.count()
paginator = Paginator(self.model.object_list, self.paginate_by)
try:
page = self.request.GET.get('page')
except:
page = 1
try:
context[self.context_object_name] = paginator.page(page)
except:
context[self.context_object_name] = paginator.page(1)
context['object-count'] = self.model.objects.count()
context['paginator'] = paginator
return context
def get_context_data(self,*,object_list=None, **kwargs):
context = super().get_context_data(object_list=object_list, **kwargs)
form = ZooSearchForm(self.request.GET)
if form.is_valid():
pass
else:
form = ZooSearchForm()
context['form'] = form
return context
def get(self, request, *args, **kwargs):
form = ZooSearchForm(self.request.GET)
if form.is_valid():
cd = form.cleaned_data
product = self.model.objects.filter(name__icontains=cd['search'])
else:
product = self.model.objects.all()
return render(request, self.template_name, self.get_context_data(object_list=product))
class ZooView (ListView):
model = Product
context_object_name = 'product'
template_name = 'main.html'
class Zoo2View (ListView):
model = Category
context_object_name = 'category'
template_name = 'tov.html'
def uslygi_view(request):
context = {'page':'uslygi'}
return render(request, 'uslygi.html',context)
| Yurevtsev13Pavel/zoolavka | zoo/views.py | views.py | py | 2,283 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.views.generic.ListView",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "zoo.models.Product",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "django.core.paginator.Paginator",
"line_number": 21,
"usage_type": "call"
},
{
"a... |
18824343900 | import unittest
import importlib
import mock
import datetime
from oslo_utils import timeutils
evacuate_lbaas = importlib.import_module("neutron-evacuate-lbaasv2-agent")
class FakeSqlResult():
def fetchall(self):
return [
['1', 'healthy', timeutils.utcnow()],
['2', 'dead', timeutils.utcnow() - datetime.timedelta(seconds=100)]
]
class FakeSqlConnection():
def execute(self, *args):
return FakeSqlResult()
class TestEvacuateLbaasV2Agents(unittest.TestCase):
def setUp(self):
self.evacuate_lbaas = evacuate_lbaas.EvacuateLbaasV2Agent()
self.evacuate_lbaas.connection = FakeSqlConnection()
self.evacuate_lbaas.host_to_evacuate = "evacuateme"
def test_available_agents_exclude_dead_agents(self):
self.assertEqual(
[{'host': 'healthy', 'id': '1'}],
self.evacuate_lbaas.available_destination_agents()
)
def test_reassing_single_lb_returns_one_agent(self):
agents = [
{'host': 'node1', 'id': '1'},
{'host': 'node2', 'id': '2'},
{'host': 'node3', 'id': '3'}
]
loadbalancers = ['abc']
res = self.evacuate_lbaas.reassign_loadbalancers(loadbalancers, agents)
self.assertEqual(1, len(res))
@mock.patch('neutron-evacuate-lbaasv2-agent.'
'EvacuateLbaasV2Agent.loadbalancers_on_agent')
@mock.patch('neutron-evacuate-lbaasv2-agent.'
'RemoteLbaasV2Cleanup')
def test_restarts_agents_using_crm_on_ha(self, mock_cleanup, mock_lbaas):
mock_lbaas.return_value = ['lb1']
evacuate_lbaas.cfg.CONF.set_override("use_crm", True)
self.evacuate_lbaas.run()
self.assertEqual(
mock_cleanup.return_value.restart_lbaasv2_agent_crm.call_count,
2
)
self.assertEqual(
mock_cleanup.return_value.restart_lbaasv2_agent_systemd.call_count,
0
)
@mock.patch('neutron-evacuate-lbaasv2-agent.'
'EvacuateLbaasV2Agent.loadbalancers_on_agent')
@mock.patch('neutron-evacuate-lbaasv2-agent.'
'RemoteLbaasV2Cleanup')
def test_restarts_agents_using_systemd_no_ha(self,
mock_cleanup,
mock_lbaas):
mock_lbaas.return_value = ['lb1']
evacuate_lbaas.cfg.CONF.set_override("use_crm", False)
self.evacuate_lbaas.run()
self.assertEqual(
mock_cleanup.return_value.restart_lbaasv2_agent_crm.call_count,
0
)
self.assertEqual(
mock_cleanup.return_value.restart_lbaasv2_agent_systemd.call_count,
2
)
| skazi0/cookbook-openstack-network | files/default/test-neutron-evacuate-lbaasv2-agent.py | test-neutron-evacuate-lbaasv2-agent.py | py | 2,753 | python | en | code | null | github-code | 36 | [
{
"api_name": "importlib.import_module",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "oslo_utils.timeutils.utcnow",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "oslo_utils.timeutils",
"line_number": 14,
"usage_type": "name"
},
{
"api_name"... |
37823447895 | """Engi Init
Walks through the setup enabling a user to work with ENGI.
Usage:
engi init
engi (-h | --help)
engi --version
Options:
-h --help Show this screen
"""
from git import Repo
from gitsecrets import GitSecrets
import gnupg
import os
import re
import sys
from pathlib import Path
import subprocess
import tempfile
import urllib.request
from yaml import load, dump
from InquirerPy import inquirer
from InquirerPy.utils import color_print
from InquirerPy.validator import PathValidator
try:
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
from yaml import Loader, Dumper
home_path = Path('.')
DOCKER_TEMPLATES='https://raw.githubusercontent.com/engi-network/docker-templates/master/'
gpg = gnupg.GPG()
ENGI_DIR = Path(".engi")
ENGI_YAML_FILE_NAME = "docker-compose.yml"
ENGI_YAML_FILE = ENGI_DIR.joinpath(ENGI_YAML_FILE_NAME)
ENGI_DOCKER_FILE_NAME = "tests.Dockerfile"
ENGI_DOCKER_FILE = ENGI_DIR.joinpath(ENGI_DOCKER_FILE_NAME)
ENGI_DOCKER_IGNORE_FILE_NAME = "tests.Dockerfile.dockerignore"
ENGI_DOCKER_IGNORE_FILE = ENGI_DIR.joinpath(ENGI_DOCKER_IGNORE_FILE_NAME)
def is_valid_email_address(email_address):
match = re.fullmatch(r"[^@]+@[^@]+\.[^@]+", email_address)
return bool(match)
def download_framework_dockerfiles(framework):
dockerfile = f'{DOCKER_TEMPLATES}/{framework}/{ENGI_DOCKER_FILE_NAME}'
compose = f'{DOCKER_TEMPLATES}/{framework}/{ENGI_YAML_FILE_NAME}'
dockerignore = f'{DOCKER_TEMPLATES}/{framework}/{ENGI_DOCKER_IGNORE_FILE_NAME}'
ENGI_DIR.mkdir(parents=True, exist_ok=True)
urllib.request.urlretrieve(dockerfile, ENGI_DOCKER_FILE)
urllib.request.urlretrieve(compose, ENGI_YAML_FILE)
urllib.request.urlretrieve(dockerignore, ENGI_DOCKER_IGNORE_FILE)
def setup_git_secrets():
with tempfile.NamedTemporaryFile() as tmp:
download = f'{DOCKER_TEMPLATES}/engi-pubkey'
urllib.request.urlretrieve(download, tmp.name)
gpg.import_keys_file(tmp.name)
pubkey_id = inquirer.text(
message="Git secret pubkey id (email):",
validate=is_valid_email_address,
invalid_message="Please enter valid email"
).execute()
try:
subprocess.run(['git-secret', 'init'])
subprocess.run(['git-secret', 'tell', pubkey_id])
subprocess.run(['git-secret', 'tell', 'circleci@engi.network'])
except FileNotFoundError:
color_print(formatted_text=[("class:aa", "git-secret doesn't seem to be installed")], style={"aa": "red"})
sys.exit()
def main():
framework = inquirer.select(
message="Select the test output format to parse",
choices=["pytest", "jest", "dotnet"],
).execute()
if framework == 'jest' and inquirer.confirm(message="Are you using design tests?").execute():
framework = "jest-storybook"
# TODO: handling pre-existing containerization code (patch, guide)
print('Preparing to generate Docker files...')
if os.path.exists('./Dockerfile') or os.path.exists('./docker-compose.yml'):
print('You already have Docker configured. You need to update your Dockerfile and docker-compose.yml manually. You Skipping this step.')
else:
print('Downloading containerization files for and placing them in .engi directory', framework)
download_framework_dockerfiles(framework)
#tests = inquirer.filepath(
# message="Test file path:",
# default=home_path,
# validate=PathValidator(is_dir=True, message="Filepath"),
#).execute()
secret = inquirer.confirm(message="Do you want to set up a secret store (e.g. for hiding test data)?").execute()
if secret:
setup_git_secrets()
if __name__ == "__main__":
main()
| engi-network/cli | src/engi_cli/engi_init.py | engi_init.py | py | 3,737 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "gnupg.GPG",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "re.fullmatch",
"line_number":... |
41892613658 | from django.db.models.signals import pre_save
class Profile(models.Model):
to_receive_new_user = models.BooleanField(default=False)
created_at = models.DateTimeField(auto_now_add=True)
@receiver(pre_save, sender=User)
def update_profile(sender, instance, **kwargs):
instance.to_receive_new_user = True
instance.save()
return instance
| Horlawhumy-dev/earthly-django-signals-article | pre_save.py | pre_save.py | py | 367 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "django.db.models.signals.pre_save",
"line_number": 7,
"usage_type": "argument"
}
] |
31279498412 | from minio import Minio
import redis
import time
import json
APP = "wc"
minio_client = Minio("minio-service.yasb-mapreduce-db.svc.cluster.local:9000", access_key="admin123", secret_key="admin123", secure=False)
redis_client = redis.Redis(host="redis.yasb-mapreduce-db.svc.cluster.local", port=6379)
def handle(req):
"""handle a request to the function
Args:
req (str): request body
"""
event = json.loads(req)
input_name = event["input_name"]
input_part = int(event["input_part"])
reduce_num = int(event["reduce_num"])
read_start = time.time()
input_object = minio_client.get_object(input_name, "part-%d" % input_part)
input_data = input_object.data.decode("utf-8")
print("------------------------------------------------------input_data_size%d" % len(input_data))
read_end = time.time()
counts = {}
lines = input_data.split("\n")
print("------------------------------------------------------line_size%d" % len(lines))
for line in lines:
words = line.strip().split(" ")
for word in words:
if word.isalpha():
if word not in counts:
counts[word] = 0
counts[word] = counts[word] + 1
print("------------------------------------------------------count_size%d" % len(counts))
shuffle = {}
for i in range(reduce_num):
shuffle[i] = ''
for word, count in counts.items():
reduce_id = hash(word) % reduce_num
shuffle[reduce_id] = shuffle[reduce_id] + "%s:%d;" % (word, count)
for i in range(reduce_num):
if shuffle[i][-1] == ";":
shuffle[i] = shuffle[i][:-1]
com_end = time.time()
for i in range(reduce_num):
if not shuffle[i] == '':
name = "%s:%s:%d:%d" % (input_name, APP, input_part, i)
redis_client.set(name, shuffle[i])
store_end = time.time()
result = {
"read_start": read_start,
"read_end": read_end,
"com_end": com_end,
"store_end": store_end
}
return json.dumps(result)
| tju-hwh/Yet-Another-Serverless-Benchmark | mapreduce/openfaas/wc/functions/wc-mapper/handler.py | handler.py | py | 2,118 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "minio.Minio",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "redis.Redis",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 21,
... |
74112522982 | from rest_framework import routers
from .viewsets import CustomUserViewSet, AccountViewset
from django.urls import path
from . import views
urlpatterns = [
path( 'user/login', views.login ),
path( 'user/register', views.Register ),
path( 'account/getAccount', views.getAccountByEmail ),
path( 'account/addFounds', views.addFounds ),
path( 'account/takeOutAmount', views.takeOutAmount )
]
route = routers.SimpleRouter()
route.register( 'account', AccountViewset )
route.register( 'user', CustomUserViewSet )
urlpatterns += route.urls
| cacero95/LocalTelBack | locatelBank/bank/urls.py | urls.py | py | 556 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
27549716670 | import re
import sys
import datetime
from itertools import zip_longest
from email.utils import getaddresses, parsedate_to_datetime
from email.header import decode_header, Header
from typing import AnyStr, Union, Optional, Tuple, Iterable, Any, List, Dict, Iterator
from .consts import SHORT_MONTH_NAMES, MailMessageFlags
from . import imap_utf7
def clean_uids(uid_set: Union[str, Iterable[str]]) -> str:
"""
Prepare set of uid for use in IMAP commands
uid RE patterns are not strict and allow invalid combinations, but simple. Example: 2,4:7,9,12:*
:param uid_set:
str, that is comma separated uids
Iterable, that contains str uids
:return: str - uids, concatenated by a comma
"""
# str
if type(uid_set) is str:
if re.search(r'^([\d*:]+,)*[\d*:]+$', uid_set): # *optimization for already good str
return uid_set
uid_set = uid_set.split(',')
# check uid types
for uid in uid_set:
if type(uid) is not str:
raise TypeError('uid "{}" is not string'.format(str(uid)))
if not re.match(r'^[\d*:]+$', uid.strip()):
raise TypeError('Wrong uid: "{}"'.format(uid))
return ','.join(i.strip() for i in uid_set)
def check_command_status(command_result: tuple, exception: type, expected='OK'):
"""
Check that IMAP command responses status equals <expected> status
If not, raise specified <exception>
:param command_result: imap command result: tuple(typ, data)
:param exception: exception subclass of UnexpectedCommandStatusError, that raises
:param expected: expected command status
"""
typ, data = command_result[0], command_result[1]
if typ != expected:
raise exception(command_result=command_result, expected=expected)
def decode_value(value: AnyStr, encoding: Optional[str] = None) -> str:
"""Converts value to utf-8 encoding"""
if isinstance(encoding, str):
encoding = encoding.lower()
if isinstance(value, bytes):
try:
return value.decode(encoding or 'utf-8', 'ignore')
except LookupError: # unknown encoding
return value.decode('utf-8', 'ignore')
return value
class EmailAddress:
"""Parsed email address info"""
__slots__ = 'name', 'email'
def __init__(self, name: str, email: str):
self.name = name
self.email = email
@property
def full(self):
return '{} <{}>'.format(self.name, self.email) if self.name and self.email else self.name or self.email
def __repr__(self):
return "{}(name={}, email={})".format(
self.__class__.__name__, repr(self.name), repr(self.email))
def __eq__(self, other):
return all(getattr(self, i) == getattr(other, i) for i in self.__slots__)
def parse_email_addresses(raw_header: Union[str, Header]) -> Tuple[EmailAddress, ...]:
"""
Parse email addresses from header
:param raw_header: example: '=?UTF-8?B?0J7Qu9C1=?= <name@company.ru>,\r\n "\'\\"z, z\\"\'" <imap.tools@ya.ru>'
:return: (EmailAddress, ...)
"""
result = []
if type(raw_header) is Header:
raw_header = decode_value(*decode_header(raw_header)[0])
for raw_name, email in getaddresses([raw_header.replace('\r\n', '').replace('\n', '')]):
name = decode_value(*decode_header(raw_name)[0]).strip()
email = email.strip()
if not (name or email):
continue
result.append(EmailAddress(
name=name or (email if '@' not in email else ''),
email=email if '@' in email else '',
))
return tuple(result)
def parse_email_date(value: str) -> datetime.datetime:
"""
Parsing the date described in rfc2822
Result datetime may be naive or with tzinfo
1900-1-1 for unparsed
"""
try:
return parsedate_to_datetime(value)
except Exception: # noqa
pass
match = re.search(
r'(?P<date>\d{1,2}\s+(' + '|'.join(SHORT_MONTH_NAMES) + r')\s+\d{4})\s+' +
r'(?P<time>\d{1,2}:\d{1,2}(:\d{1,2})?)\s*' +
r'(?P<zone_sign>[+-])?(?P<zone>\d{4})?',
value
)
if match:
group = match.groupdict()
day, month, year = group['date'].split()
time_values = group['time'].split(':')
zone_sign = int('{}1'.format(group.get('zone_sign') or '+'))
zone = group['zone']
try:
return datetime.datetime(
year=int(year),
month=SHORT_MONTH_NAMES.index(month) + 1,
day=int(day),
hour=int(time_values[0]),
minute=int(time_values[1]),
second=int(time_values[2]) if len(time_values) > 2 else 0,
tzinfo=datetime.timezone(datetime.timedelta(
hours=int(zone[:2]) * zone_sign,
minutes=int(zone[2:]) * zone_sign
)) if zone else None,
)
except ValueError:
pass
return datetime.datetime(1900, 1, 1)
def quote(value: AnyStr) -> AnyStr:
if isinstance(value, str):
return '"' + value.replace('\\', '\\\\').replace('"', '\\"') + '"'
else:
return b'"' + value.replace(b'\\', b'\\\\').replace(b'"', b'\\"') + b'"'
def pairs_to_dict(items: List[Any]) -> Dict[Any, Any]:
"""Example: ['MESSAGES', '3', 'UIDNEXT', '4'] -> {'MESSAGES': '3', 'UIDNEXT': '4'}"""
if len(items) % 2 != 0:
raise ValueError('An even-length array is expected')
return dict((items[i * 2], items[i * 2 + 1]) for i in range(len(items) // 2))
def chunks(iterable: Iterable[Any], n: int, fill_value: Optional[Any] = None) -> Iterator[Tuple[Any, ...]]:
"""
Group data into fixed-length chunks or blocks
[iter(iterable)]*n creates one iterator, repeated n times in the list
izip_longest then effectively performs a round-robin of "each" (same) iterator
Examples:
chunks('ABCDEFGH', 3, '?') --> [('A', 'B', 'C'), ('D', 'E', 'F'), ('G', 'H', '?')]
chunks([1, 2, 3, 4, 5], 2) --> [(1, 2), (3, 4), (5, None)]
"""
return zip_longest(*[iter(iterable)] * n, fillvalue=fill_value)
def encode_folder(folder: AnyStr) -> bytes:
"""Encode folder name"""
if isinstance(folder, bytes):
return folder
else:
return quote(imap_utf7.encode(folder))
def clean_flags(flag_set: Union[str, Iterable[str]]) -> List[str]:
"""
Check the correctness of the flags
:return: list of str - flags
"""
if type(flag_set) is str:
flag_set = [flag_set]
upper_sys_flags = tuple(i.upper() for i in MailMessageFlags.all)
for flag in flag_set:
if not type(flag) is str:
raise ValueError('Flag - str value expected, but {} received'.format(type(flag_set)))
if flag.upper() not in upper_sys_flags and flag.startswith('\\'):
raise ValueError('Non system flag must not start with "\\"')
return flag_set
def check_timeout_arg_support(timeout):
"""If timeout arg not supports - raise ValueError"""
if timeout is not None and sys.version_info.minor < 9:
raise ValueError('imaplib.IMAP4 timeout argument supported since python 3.9')
def replace_html_ct_charset(html: str, new_charset: str) -> str:
"""Replace charset in META tag with content-type attribute in HTML text"""
meta_ct_match = re.search(r'<\s*meta .*?content-type.*?>', html, re.IGNORECASE | re.DOTALL)
if meta_ct_match:
meta = meta_ct_match.group(0)
meta_new = re.sub(r'charset\s*=\s*[a-zA-Z0-9_:.+-]+', 'charset={}'.format(new_charset), meta, 1, re.IGNORECASE)
html = html.replace(meta, meta_new)
return html
| ikvk/imap_tools | imap_tools/utils.py | utils.py | py | 7,673 | python | en | code | 608 | github-code | 36 | [
{
"api_name": "typing.Union",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "typing.Iterable",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "re.search",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": ... |
6482852553 | #pip install opencv-python
import cv2
webcam = cv2.VideoCapture(0)
try:
if webcam.isOpened():
validacao, frame = webcam.read()
cv2.imwrite("testeWebcam.png", frame)
webcam.release()
cv2.destroyAllWindows()
except:
print("Não foi possível abrir a câmera.") | msullivancm/ProjetosComAte10LinhasDeCodigoPython | FotoSurpresa.py | FotoSurpresa.py | py | 293 | python | pt | code | 0 | github-code | 36 | [
{
"api_name": "cv2.VideoCapture",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "cv2.imwrite",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "cv2.destroyAllWindows",
"line_number": 11,
"usage_type": "call"
}
] |
22439806116 | import os
import torch
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from .. import config as conf
processed_data_dir = conf.processed_data_dir
raw_data_dir = conf.raw_data_dir
# Create the directories if they don't exist
os.makedirs(raw_data_dir, exist_ok=True)
os.makedirs(processed_data_dir, exist_ok=True)
# Define the normalization transform
normalize = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,)) # MNIST specific mean and std
])
# Download and normalize the MNIST dataset
train_dataset = datasets.MNIST(raw_data_dir, train=True, download=True, transform=normalize)
test_dataset = datasets.MNIST(raw_data_dir, train=False, download=True, transform=normalize)
# Split the original training data into a new training set and a validation set
train_size = int(0.8 * len(train_dataset)) # 80% of the data for training
val_size = len(train_dataset) - train_size # 20% of the data for validation
train_dataset, val_dataset = torch.utils.data.random_split(train_dataset, [train_size, val_size])
# Save the datasets
torch.save(train_dataset, os.path.join(processed_data_dir, "train_normalized.pt"))
torch.save(val_dataset, os.path.join(processed_data_dir, "val_normalized.pt"))
torch.save(test_dataset, os.path.join(processed_data_dir, "test_normalized.pt"))
| ayush0O7/Handwritten-Digit-Recognition | Handwritten_digit_recognition/src/data/download_mnist.py | download_mnist.py | py | 1,360 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.makedirs",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms.Compose",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "torchvision.tr... |
27918407911 | # Python 3.5.1 tested
#
# If downloaded set is out of date, ie. does not contain most recent comic
# then the comics since most recently downloaded will be downloaded.
# If however the most recent comic is present, the script will scan all
# previous comics, ensuring ALL of them are present
#
# It is possible that script will need to run TWICE to ensure ALL comics
# are present, though this still maintains average case having optimal performance
#
# Script requires no user interaction from being started to self-termination
from os import listdir, makedirs
from os.path import exists, isfile, join
import requests as web
import urllib.request as image_downloader
from bs4 import BeautifulSoup
baseurl = 'http://xkcd.com/'
direc = 'images/'
def find_max_already_downloaded():
only_files = [f for f in listdir(direc) if isfile(join(direc, f))]
max_file_num = 0
for file in only_files:
n = int(file.split(' ')[0])
if n > max_file_num:
max_file_num = n
return max_file_num
def detect_max_page():
max_num_found = 1
plain_text = web.get(baseurl).text
soup = BeautifulSoup(plain_text, 'html.parser')
for a in soup.findAll('a', {}):
link_no_slashes = a.get('href').replace('/', '')
if link_no_slashes.isdigit():
n = int(link_no_slashes)
if n > max_num_found:
max_num_found = n
return max_num_found + 1 # plus 1 because link found is to previous comic
def img_spider(start_page, max_page):
if start_page > max_page:
print("Unusual start_page max_page combination...")
print("Reverting to scanning all pages, starting at 1")
start_page = 1
page = start_page
while page <= max_page:
url = baseurl + str(page) + '/'
plain_text = web.get(url).text
soup = BeautifulSoup(plain_text, 'html.parser')
for img in soup.findAll('img', {}):
img_url = img.get('src')
if 'comics' in img_url:
filename = img_url.split('/')[-1].replace('_(1)', '')
filepath = direc + '{} - '.format(page) + filename
if exists(filepath):
print('{}/{} - Exists already - '.format(page, max_page) + filename)
else:
print('{}/{} - Downloading... - '.format(page, max_page) + filename)
image_downloader.urlretrieve('http:' + img_url, filepath)
break # to skip all imgs past the comic img, as we know we don't care
page += 1
def scan_unseen_pages():
if not exists(direc):
makedirs(direc)
start_page = 1 + find_max_already_downloaded()
end_page = detect_max_page()
print('Scanning from {} to {}...'.format(start_page, end_page))
img_spider(start_page, end_page)
print('Scan completed')
scan_unseen_pages()
| MikeCroall/xkcd-crawler | xkcd_crawler.py | xkcd_crawler.py | py | 2,877 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.listdir",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_numbe... |
41332456125 | # coding=utf-8
import xml.etree.ElementTree as ET
import matplotlib.pyplot as plt
import collections
import pprint as pp
from scipy.stats import gaussian_kde
from numpy import arange
def violin_plot(ax,data,pos, bp=False):
'''
create violin plots on an axis
'''
dist = max(pos)-min(pos)
w = min(0.15*max(dist,1.0),0.5)
for d,p in zip(data,pos):
k = gaussian_kde(d) #calculates the kernel density
m = k.dataset.min() #lower bound of violin
M = k.dataset.max() #upper bound of violin
x = arange(m,M,(M-m)/100.) # support for violin
v = k.evaluate(x) #violin profile (density curve)
v = v/v.max()*w #scaling the violin to the available space
ax.fill_betweenx(x,p,v+p,facecolor='y',alpha=0.3)
ax.fill_betweenx(x,p,-v+p,facecolor='y',alpha=0.3)
if bp:
ax.boxplot(data,notch=1,positions=pos,vert=1)
plotati = True
mjerenjaPoSatima = dict()
#odabirSataDat = open("./Stats/ukupniStats.txt", "w")
for sat in range(24): #za svaki sat
#statsFile = open("./Stats/stats"+str(sat)+".txt", "w")
svaMjerenja = dict()
for j in range(1, 9): #za svake postavke
velPop = int(ET.parse("./"+str(j)+"/Config.xml").find("Algorithm").find("PopulationSize").text)
# a = int(ET.parse("./"+str(j)+"/Config.xml").find(".//Algorithm/PopulationSize").text)
# b = int(ET.parse("./"+str(j)+"/Config.xml").find(".//Algorithm/Termination/Entry[@name='NumberOfGenerations']").text)
#print a, b, a*b
vrijednosti = []
for batchNode in ET.parse("./"+str(j)+"/Logovi/log"+str(sat)+".txt").getroot().findall("Batch"):
jedinka = batchNode.findall("Jedinka")[-1]
vrijednosti.append(float(jedinka.get("greska")))
svaMjerenja[velPop] = vrijednosti
ukupnaGreska = sum([sum(m) for m in svaMjerenja.values()])
brojGreski = sum([len(m) for m in svaMjerenja.values()])
srednjaGreska = ukupnaGreska/brojGreski
for m in svaMjerenja.values():
for i in range(len(m)):
m[i] /= srednjaGreska
mjerenjaPoSatima[sat] = svaMjerenja
#mjerenjaPoSatima[sat][velPop][batchNo]
mjerenjaPoVelPop = dict()
for sat in mjerenjaPoSatima:
for vp in mjerenjaPoSatima[sat]:
if vp not in mjerenjaPoVelPop:
mjerenjaPoVelPop[vp] = []
mjerenjaPoVelPop[vp].extend(mjerenjaPoSatima[sat][vp])
#mjerenjaPoVelPop[velPop]
plt.figure()
plt.xlabel('Velicina Populacije')
plt.ylabel('Normirana Greska Jedinke')
plt.title("Broj evaluacija = 50 000")
x = sorted(mjerenjaPoVelPop.keys())
y = [mjerenjaPoVelPop[i] for i in x]
plt.xticks(x,x)
plt.boxplot(y, 0, '')
plt.figure()
plt.xlabel('Velicina Populacije')
plt.ylabel('Normirana Greska Jedinke')
plt.title("Broj evaluacija = 50 000")
plt.xticks(x,x)
plt.boxplot(y)
plt.show()
| mimi33/DiplomskiProjekt | testing/5.velPop/boxplot.py | boxplot.py | py | 2,615 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "scipy.stats.gaussian_kde",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree.parse",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "xml... |
4875660942 | import os
from django.conf import settings
from django.shortcuts import render, redirect
import face_recognition
import numpy as np
import cv2
from os.path import dirname, join
from django.apps import apps
from django.core.files.storage import FileSystemStorage
from django.contrib.auth.views import LoginView
from django.contrib.auth import get_user_model
from django.contrib.auth import login
class InstructorLogin(LoginView):
template_name = 'Instructor/login.html'
def post(self, request):
username = request.POST['username']
User = get_user_model()
user = User.objects.get(username=username)
if user.is_instructor==1:
login(request, user)
return redirect('Instructor-Home')
else:
return render(request,'Admin/FrontPage.html')
def findEncondings(images):
encodeList = []
for img in images:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
encode = face_recognition.face_encodings(img)[0]
encodeList.append(encode)
return encodeList
def home(request):
c_user = apps.get_model('Admin', 'CustomUser')
Profile = apps.get_model('Admin', 'Profile')
course = apps.get_model('Admin', 'course')
registration = apps.get_model('Admin', 'registration')
attendance = apps.get_model('Admin', 'attendance')
instructor = request.user
inst_course = course.objects.get(instructor=instructor)
students = inst_course.registration.all()
# path = 'C:/Users/mg/PycharmProjects/testattendence/AttendanceSystem'
path = settings.BASE_DIR
images = []
classNames = []
students_present = []
for student in students:
s_profile = Profile.objects.get(user=student)
curImg = cv2.imread(f'{path}/media/{s_profile.image}')
images.append(curImg)
classNames.append(student.username)
print(classNames)
if request.method == 'POST':
myfile = request.FILES['document']
print(myfile.name)
fs = FileSystemStorage()
imgname = fs.save(myfile.name, myfile)
url = fs.url(imgname)
print(url)
print(imgname)
date=request.POST.get('Date')
print(date)
encodeListKnown = findEncondings(images)
print('Encoding complete')
# encoding end
faces = []
prototxtPath = join(dirname(__file__), "deploy.prototxt.txt")
modelPath = join(dirname(__file__), "res10_300x300_ssd_iter_140000.caffemodel")
# image path
# imagePath = 'C:/Users/mg/Desktop/DSC_0227.jpg'
confidence1 = 0.1
# construct the argument parse and parse the arguments
# load our serialized model from disk
print("[INFO] loading model...")
net = cv2.dnn.readNetFromCaffe(prototxtPath, modelPath)
# load the input image and construct an input blob for the image
# by resizing to a fixed 300x300 pixels and then normalizing it
image = cv2.imread(f'{path}{url}')
fs.delete(imgname)
image = cv2.resize(image, (0, 0), None, 0.25, 0.25)
(h, w) = image.shape[:2]
blob = cv2.dnn.blobFromImage(cv2.resize(image, (300, 300)), 1.0,
(300, 300), (104.0, 177.0, 123.0))
# pass the blob through the network and obtain the detections and
# predictions
print("[INFO] computing object detections...")
net.setInput(blob)
detections = net.forward()
# loop over the detections
for i in range(0, detections.shape[2]):
confidence = detections[0, 0, i, 2]
if confidence > 0.7:
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
faces.append((startY, endX, endY, startX))
encodesCurFrame = face_recognition.face_encodings(image, faces)
print("encode")
for encodeFace, faceLoc in zip(encodesCurFrame, faces):
matches = face_recognition.compare_faces(encodeListKnown, encodeFace)
faceDis = face_recognition.face_distance(encodeListKnown, encodeFace)
matchIndex = np.argmin(faceDis)
if matches[matchIndex]:
name = classNames[matchIndex]
students_present.append(name)
print(name)
y1, x2, y2, x1 = faceLoc
cv2.rectangle(image, (x1, y1), (x2, y2), (0, 255, 0), 2)
cv2.rectangle(image, (x1, y2 - 35), (x2, y2), (0, 255, 0))
cv2.putText(image, name, (x1 + 6, y2 - 6), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255), 2)
# markAttendence(name)
else:
y1, x2, y2, x1 = faceLoc
cv2.rectangle(image, (x1, y1), (x2, y2), (0, 255, 0), 2)
cv2.rectangle(image, (x1, y2 - 35), (x2, y2), (0, 255, 0))
cv2.putText(image, "unknown", (x1 + 6, y2 - 6), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255), 2)
# show the output image
# cv2.imshow("Output", image)
# cv2.waitKey(0)
print(students_present)
students_present = set(students_present)
print(students_present)
marked = []
for s in students:
if s.username in students_present:
stu = c_user.objects.get(username=s)
obj = attendance()
obj.student = stu
obj.course = inst_course
obj.status = 'p'
obj.date = request.POST.get('Date')
obj.save()
marked.append(obj)
else:
stu = c_user.objects.get(username=s)
obj = attendance()
obj.student = stu
obj.course = inst_course
obj.status = 'A'
obj.date = request.POST.get('Date')
obj.save()
marked.append(obj)
data = {"marked": marked}
print(marked[0].course)
return render(request, 'Instructor/home.html', data)
else:
return render(request, 'Instructor/home.html')
| hashir-ashraf/Attendance-System | AttendanceSystem/Instructor/views.py | views.py | py | 6,146 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.contrib.auth.views.LoginView",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.get_user_model",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.login",
"line_number": 24,
"usage_type": "call... |
837645453 | from http.server import HTTPServer, BaseHTTPRequestHandler
import sys, io, json, cgi
import MolDisplay, molsql, molecule
# Create DB
db = molsql.Database(reset=True)
db.create_tables()
# Set our default element values
db['Elements'] = (1, 'H', 'Hydrogen', 'FFFFFF', '050505', '020202', 25)
db['Elements'] = (6, 'C', 'Carbon', '808080', '010101', '000000', 40)
db['Elements'] = (7, 'N', 'Nitrogen', '0000FF', '000005', '000002', 40)
db['Elements'] = (8, 'O', 'Oxygen', 'FF0000', '050000', '020000', 40)
# Add 3 molecules to the web page by default
fp = open('molecules/water-3D-structure.sdf')
db.add_molecule("Water", fp)
fp = open('molecules/caffeine-3D-structure-C.sdf')
db.add_molecule('Caffeine', fp)
fp = open('molecules/CID_31260.sdf')
db.add_molecule('Isopentanol', fp)
class myHandler(BaseHTTPRequestHandler):
def do_GET(self):
if self.path == "/":
with open("index.html", "rb") as f: # main page
html = f.read()
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(html)
elif self.path.endswith('.css'): # css
with open("./public/css/styles.css", "rb") as f:
css = f.read()
# send CSS content
self.send_response(200)
self.send_header('Content-type', 'text/css')
self.end_headers()
self.wfile.write(css)
elif self.path.endswith('.js'): # javascript
with open("./public/js/scripts.js", "rb") as f:
js = f.read()
# send JS content
self.send_response(200)
self.send_header('Content-type', 'text/javascript')
self.end_headers()
self.wfile.write(js)
elif self.path.endswith('.jpg'): # header pic
with open("./public/images/header.jpg", 'rb') as f:
content = f.read()
self.send_response(200)
self.send_header('Content-type', 'image/png')
self.end_headers()
self.wfile.write(content)
# retrieves all molecules stored in the db
elif self.path == "/retrieve-molecules":
molecules_query = db.cursor.execute(f"""
SELECT Molecules.NAME, COUNT(DISTINCT MoleculeAtom.ATOM_ID) AS numAtoms, COUNT(DISTINCT MoleculeBond.BOND_ID) AS numBonds
FROM Molecules
JOIN MoleculeAtom ON MoleculeAtom.MOLECULE_ID = Molecules.MOLECULE_ID
JOIN Atoms ON MoleculeAtom.ATOM_ID = Atoms.ATOM_ID
JOIN MoleculeBond ON MoleculeBond.MOLECULE_ID = Molecules.MOLECULE_ID
JOIN Bonds ON MoleculeBond.BOND_ID = Bonds.BOND_ID
GROUP BY Molecules.NAME;
""").fetchall()
molecules_list = []
for molecule in molecules_query:
molecule_values = {"name": molecule[0], "numAtoms": molecule[1], "numBonds": molecule[2]}
molecules_list.append(molecule_values)
self.send_response(200)
self.send_header("Content-Type", "application/json")
self.end_headers()
self.wfile.write(json.dumps(molecules_list).encode())
# retrieves all elements stored in the db
elif self.path == "/retrieve-elements":
elements_query = db.cursor.execute("""SELECT ELEMENT_NO, ELEMENT_CODE, ELEMENT_NAME, COLOUR1, COLOUR2, COLOUR3, RADIUS FROM ELEMENTS""").fetchall()
elements_list = []
for element in elements_query:
element_values = {"Element No": element[0], "Element Code": element[1], "Element Name": element[2], "Colour 1": element[3], "Colour 2": element[4], "Colour 3": element[5], "Radius": element[6]}
elements_list.append(element_values)
self.send_response(200)
self.send_header("Content-Type", "application/json")
self.end_headers()
self.wfile.write(json.dumps(elements_list).encode())
else:
self.send_error(404, "Error Found")
self.end_headers()
self.wfile.write(bytes("404: Error Not found", "utf-8"))
def do_POST(self):
# request to display a molecule
if self.path == '/molecule':
content_length = int(self.headers.get('Content-Length'))
molecule_data = self.rfile.read(content_length)
data = json.loads(molecule_data)
molecule_name = data.get('molecule') # receive the molecule name
# Build our default dictionaries
MolDisplay.radius = db.radius()
MolDisplay.element_name = db.element_name()
MolDisplay.header += db.radial_gradients()
print(MolDisplay.element_name)
# load the molecule and generate svg
mol = db.load_mol(molecule_name)
mol.sort()
svg = mol.svg() # create the svg
print(svg)
# send the svg back to the client
self.send_response(200)
self.send_header('Content-type', 'image/svg+xml')
self.end_headers()
self.wfile.write(bytes(svg, "utf-8"))
# request to upload a new molecule
elif self.path == "/upload-file":
cgi.parse_header(self.headers['Content-Type'])
# Parse the form data to get the file and molecule name
form = cgi.FieldStorage(
fp = self.rfile,
headers = self.headers,
environ = {'REQUEST_METHOD': 'POST'}
)
# receive the file object
file_item = form['file']
molecule_name = form.getvalue('moleculeName')
# read the file contents
file_contents = file_item.file.read()
# convert to bytes in order to parse
bytes_io = io.BytesIO(file_contents)
data = io.TextIOWrapper(bytes_io)
# add the molecule to the database
db.add_molecule(molecule_name, data)
self.send_response(200)
self.end_headers()
# request to add an element to the database
elif self.path == "/add-element":
# Read the request body as JSON data
content_length = int(self.headers["Content-Length"])
body = self.rfile.read(content_length)
element_data = json.loads(body)
# Insert the element into the Elements table
db["Elements"] = (
element_data["elementNum"],
element_data["elementCode"],
element_data["elementName"],
element_data["colourOne"],
element_data["colourTwo"],
element_data["colourThree"],
element_data["elementRadius"],
)
new_element = {
"elementNum": element_data["elementNum"],
"elementCode": element_data["elementCode"],
"elementName": element_data["elementName"],
"colourOne": element_data["colourOne"],
"colourTwo": element_data["colourTwo"],
"colourThree": element_data["colourThree"],
"elementRadius": element_data["elementRadius"],
}
self.send_response(200)
self.send_header("Content-type", "application/json; charset=utf-8")
self.end_headers()
self.wfile.write(json.dumps(new_element).encode())
# request to remove an element from the database
elif self.path == "/remove-element":
content_length = int(self.headers["Content-Length"])
body = self.rfile.read(content_length)
element_data = json.loads(body)
# print(element_data)
element_code = element_data['elementCode']
# deletes the row in the Elements table matching the element code
db.cursor.execute(f"""
DELETE FROM Elements WHERE ELEMENT_CODE = "{element_code}";
""")
self.send_response(200)
self.send_header("Content-type", "application/json; charset=utf-8")
self.end_headers()
self.wfile.write(json.dumps({"success": True}).encode("utf-8"))
elif self.path == "/rotate-molecule":
content_length = int(self.headers["Content-Length"])
body = self.rfile.read(content_length)
rotate_data = json.loads(body)
molecule_to_rotate = rotate_data['molecule'] # receive the molecule name
try:
x = int(rotate_data['x'])
except:
x = 0
try:
y = int(rotate_data['y'])
except:
y = 0
try:
z = int(rotate_data['z'])
except:
z = 0
# receive the molecule to rotate
mol = db.load_mol(molecule_to_rotate)
# rotate the molecule
mx = molecule.mx_wrapper(x, y, z)
mol.xform(mx.xform_matrix)
# receive the new SVG after rotation and send to client
svg = mol.svg()
self.send_response(200)
self.send_header('Content-type', 'image/svg+xml')
self.end_headers()
self.wfile.write(bytes(svg, "utf-8"))
else:
self.send_error(404, "Error Found")
self.end_headers()
self.wfile.write(bytes("404: Error Not found", "utf-8"))
httpd = HTTPServer(('localhost', int(sys.argv[1])), myHandler)
httpd.serve_forever() | acandrewchow/Molecule-Viewer | server.py | server.py | py | 9,751 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "molsql.Database",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "http.server.BaseHTTPRequestHandler",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "json.dum... |
21216811074 | import io
import runoff_class as runoff
import unittest
from unittest import mock
class TestRunoff(unittest.TestCase):
def setUp(self) -> None:
self.candidates = ["Marta", "Joni", "Fran", "Linda"]
self.voter_number = 5
self.model = runoff.Runoff(self.candidates, self.voter_number)
self.votes_to_cast = [
["Marta", "Joni", "Fran", "Linda"],
["Joni", "Marta", "Fran", "Linda"],
["Linda", "Marta", "Joni", "Fran"],
["Marta", "Joni", "Fran", "Linda"],
["Marta", "Fran", "Linda", "Joni"],
]
# cast votes
for voter_index in range(0, self.voter_number):
for name in self.votes_to_cast[voter_index]:
self.model.vote(voter_index, name)
def test_that_candidates_are_populated_correctly(self):
candidate_names = self.model.candidates.keys()
for name, person in zip(self.candidates, candidate_names):
self.assertEqual(name, person)
def test_that_invalid_candidates_are_caught(self):
"""Checks that the candidate being voted for is in the list of
valid candidates"""
people_validity = {
"Marta": True,
"Benoit": False,
"Joni": True,
"Ransom": False,
}
model = self.model
for person, validity in people_validity.items():
self.assertEqual(validity, model.candidate_is_valid(person))
def test_that_voter_preferences_are_recorded_correctly(self):
"""Checks that ranks of candidates chosen by each voter are
accurately recorded."""
expected = self.votes_to_cast
self.assertEqual(expected, self.model.voter_prefs)
def test_that_votes_are_tabulated_correctly(self):
"""Checks that the vote counts for the highest preference candidates
are updated accurately."""
self.model.tabulate()
expected = {"Marta": 3, "Joni": 1, "Fran": 0, "Linda": 1}
actual = self.model.candidates.values()
for vote, candidate_vote in zip(expected.values(), actual):
self.assertEqual(vote, candidate_vote.votes)
def test_that_the_smallest_vote_is_reported_correctly(self):
"""Checks that the smallest number of votes that any remaining (
non-eliminated) candidates is returned."""
self.model.tabulate()
# Fran has no votes.
expected_minimum = 0
self.assertEqual(expected_minimum, self.model.find_minimum())
votes_to_cast = [
["Marta", "Joni", "Fran", "Linda"],
["Joni", "Marta", "Fran", "Linda"],
["Linda", "Marta", "Joni", "Fran"],
["Fran", "Joni", "Marta", "Linda"],
["Marta", "Fran", "Linda", "Joni"],
]
# reset voter prefs
self.model.voter_prefs = [[] for x in range(0, self.voter_number)]
# cast votes
for voter_index in range(0, self.voter_number):
for name in votes_to_cast[voter_index]:
self.model.vote(voter_index, name)
self.model.tabulate()
expected_minimum = 1
self.assertEqual(expected_minimum, self.model.find_minimum())
def test_that_a_tied_election_is_recognized(self):
voter_number = 3
self.model = runoff.Runoff(self.candidates, voter_number)
votes_to_cast = [
["Joni", "Marta", "Fran", "Linda"],
["Fran", "Marta", "Fran", "Linda"],
["Joni", "Linda", "Joni", "Fran"],
]
# Artificially eliminate Joni
self.model.candidates["Joni"].eliminated = True
# cast votes
for voter_index in range(0, voter_number):
for name in votes_to_cast[voter_index]:
self.model.vote(voter_index, name)
minimum_vote = self.model.find_minimum()
# Marta has 0 votes
self.assertEqual(minimum_vote, 0)
self.assertTrue(self.model.is_tie(minimum_vote))
# reset
new_candidates = ["Joni", "Marta", "Meg", "Fran", "Linda", "Ransom"]
new_voter_number = 4
self.model = runoff.Runoff(new_candidates, new_voter_number)
votes_to_cast = [
["Joni", "Marta", "Meg", "Fran", "Linda", "Ransom"],
["Fran", "Joni", "Ransom", "Marta", "Meg", "Linda"],
["Joni", "Linda", "Marta", "Ransom", "Fran", "Meg"],
["Meg", "Linda", "Joni", "Marta", "Ransom", "Fran"],
]
# cast votes
for voter_index in range(0, new_voter_number):
for name in votes_to_cast[voter_index]:
self.model.vote(voter_index, name)
# Artificially eliminate Joni and Fran
self.model.candidates["Joni"].eliminated = True
self.model.candidates["Fran"].eliminated = True
self.model.tabulate()
minimum_vote = self.model.find_minimum()
# Joni and Fran's 0 votes do not count, because they are eliminated.
self.assertEqual(minimum_vote, 1)
self.assertTrue(self.model.is_tie(minimum_vote))
def test_that_a_candidate_in_last_place_is_eliminated(self):
"""Checks that a candidate/candidates with the fewest number of votes
are eliminated."""
# reset
new_candidates = ["Marta", "Joni", "Fran", "Linda", "Meg"]
new_voter_number = 4
self.model = runoff.Runoff(new_candidates, new_voter_number)
votes_to_cast = [
["Marta", "Joni", "Fran", "Linda", "Meg"],
["Joni", "Marta", "Meg", "Fran", "Linda"],
["Linda", "Marta", "Joni", "Meg", "Fran"],
["Fran", "Joni", "Meg", "Marta", "Linda"],
]
# cast votes
for voter_index in range(0, new_voter_number):
for name in votes_to_cast[voter_index]:
self.model.vote(voter_index, name)
self.model.tabulate()
self.model.eliminate(self.model.find_minimum())
eliminated = {
"Marta": False,
"Joni": False,
"Fran": False,
"Linda": False,
"Meg": True,
}
for expected, actual in zip(eliminated.values(),
self.model.candidates.values()):
self.assertEqual(expected, actual.eliminated)
self.model.tabulate()
self.model.eliminate(self.model.find_minimum())
for expected, actual in zip(eliminated.values(),
self.model.candidates.values()):
self.assertEqual(expected, actual.eliminated)
def test_that_winners_are_printed_correctly(self):
self.model.tabulate()
with mock.patch("sys.stdout", new=io.StringIO()) as out:
self.model.print_winner()
assert out.getvalue() == "Marta\n"
# reset
candidates = ["Alice", "Bob", "Charlie"]
new_voter_number = 9
self.model = runoff.Runoff(candidates, new_voter_number)
votes_to_cast = [
["Alice", "Bob", "Charlie"],
["Alice", "Bob", "Charlie"],
["Bob", "Alice", "Charlie"],
["Bob", "Alice", "Charlie"],
["Bob", "Alice", "Charlie"],
["Charlie", "Alice", "Bob"],
["Charlie", "Alice", "Bob"],
["Charlie", "Bob", "Alice"],
["Charlie", "Bob", "Alice"],
]
# cast votes
for voter_index in range(0, new_voter_number):
for name in votes_to_cast[voter_index]:
self.model.vote(voter_index, name)
self.model.tabulate()
eliminated = {
"Alice": True,
"Bob": False,
"Charlie": False,
}
self.model.eliminate(self.model.find_minimum())
for expected, actual in zip(eliminated.values(),
self.model.candidates.values()):
self.assertEqual(expected, actual.eliminated)
self.model.tabulate()
with mock.patch("sys.stdout", new=io.StringIO()) as out:
self.model.print_winner()
assert out.getvalue() == "Bob\n"
| SOUADSARAH/Harvard_CS50x_2022_Psets_and_Labs | 1.Psets/1.Python_solutions/8.Runoff/test_runoff.py | test_runoff.py | py | 8,139 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "unittest.TestCase",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "runoff_class.Runoff",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "runoff_class.Runoff",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "runoff... |
476956710 | from __future__ import annotations
from typing import Any, Dict, List, Set, Union
from collections import OrderedDict
from .rows import Rows
from .option import Option
from .columns.column import Column
from .index import Index
from .constraint import Constraint
from ..operations.create_table import CreateTable
class Table:
_columns: Dict[str, Column] = None
_constraints: Dict[str, Constraint] = None
_indexes: Dict[str, Constraint] = None
_indexed_columns = None
_name: str = ''
_options: List[Option] = None
_primary: Index = None
_rows: Dict[int, List[Union[str, int]]] = None
_tracking_rows: bool = False
_schema_errors: List[str] = None
_schema_warnings: List[str] = None
_global_errors: List[str] = None
_global_warnings: List[str] = None
def __init__(
self,
name: str = '',
columns: List[Column] = None,
indexes: List[Index] = None,
constraints: List[Constraint] = None,
options: List[Option] = None
):
self._name = name
self._options = [*options] if options else []
self._primary = None
self._constraints = {}
self._columns = {}
self._indexes = {}
self._parsing_errors = None
self._parsing_warnings = None
self._schema_errors = None
self._schema_warnings = None
self._global_errors = []
self._global_warnings = []
self._final_global_errors = None
self._final_global_warnings = None
self._raw_columns = columns if columns is not None else []
self._raw_constraints = constraints if constraints is not None else []
self._raw_indexes = indexes if indexes is not None else []
if columns is not None:
for column in columns:
self.add_column(column)
if constraints is not None:
for constraint in constraints:
self.add_constraint(constraint)
if indexes is not None:
for index in indexes:
self.add_index(index)
# Errors are no longer tracked live. Instead, I need to finish adjusting the flow so that if any
# of the error fetchers are called, then the errors are collected from all children and then the
# proper errors are returned.
@property
def global_errors(self):
""" Public getter. Returns a list of schema errors
:returns: A list of schema errors
:rtype: list
"""
if self._schema_errors is None:
self._find_all_errors()
return self._final_global_errors
@property
def global_warnings(self):
""" Public getter. Returns a list of schema warnings
:returns: A list of schema warnings
:rtype: list
"""
if self._schema_errors is None:
self._find_all_errors()
return self._final_global_warnings
@property
def schema_errors(self):
""" Public getter. Returns a list of schema errors
:returns: A list of schema errors
:rtype: list
"""
if self._schema_errors is None:
self._find_all_errors()
return self._schema_errors
@property
def schema_warnings(self):
""" Public getter. Returns a list of schema warnings
:returns: A list of schema warnings
:rtype: list
"""
if self._schema_errors is None:
self._find_all_errors()
return self._schema_warnings
@property
def parsing_errors(self):
""" Public getter. Returns a list of schema errors
:returns: A list of schema errors
:rtype: list
"""
if self._schema_errors is None:
self._find_all_errors()
return self._parsing_errors
@property
def parsing_warnings(self):
""" Public getter. Returns a list of schema warnings
:returns: A list of schema warnings
:rtype: list
"""
if self._schema_errors is None:
self._find_all_errors()
return self._parsing_warnings
@property
def name(self):
""" Public getter. Returns the name of the table.
:returns: The table name
:rtype: string
"""
return self._name
@property
def options(self):
""" Public getter. Returns a list of table options
:returns: Table options
:rtype: list
"""
return self._options
def _find_all_errors(self):
self._schema_errors = []
self._schema_warnings = []
self._parsing_errors = []
self._parsing_warnings = []
if self._global_errors is not None:
self._final_global_errors = [*self._global_errors]
if self._global_warnings is not None:
self._final_global_warnings = [*self._global_warnings]
if not self.name:
self._schema_errors.append("Table missing name")
if not len(self.columns):
self._schema_errors.append(f"Table '{self.name}' does not have any columns")
# start with errors from "children" and append the table name for context
for type_to_check in [self._columns, self._indexes, self._constraints]:
for to_check in type_to_check.values():
for error in to_check.schema_errors:
self._schema_errors.append(f"{error} in table '{self.name}'")
for warning in to_check.schema_warnings:
self._schema_warnings.append(f"{warning} in table '{self.name}'")
for error in to_check.parsing_errors:
self._parsing_errors.append(f"{error} in table '{self.name}'")
for warning in to_check.parsing_warnings:
self._parsing_warnings.append(f"{error} in table '{self.name}'")
if hasattr(to_check, 'global_errors'):
for warning in to_check.global_errors:
self._final_global_errors.append(f"{error} in table '{self.name}'")
if hasattr(to_check, 'global_warnings'):
for warning in to_check.global_warnings:
self._final_global_warnings.append(f"{error} in table '{self.name}'")
# duplicate names. This shouldn't really be possible anymore because the add_ methods will
# throw an exception if we try to add a duplicate, but I'll leave this in just in case.
for (name, to_check) in {
'columns': self._columns,
'constraints': self._constraints,
'indexes': self._indexes
}.items():
if len(to_check) == len(getattr(self, name)):
continue
label = name.rstrip('es').rstrip('s')
found = {}
duplicates = {}
for item in to_check:
if item.name in found:
duplicates[item.name] = True
continue
found[item.name] = True
for key in duplicates.keys():
self._schema_errors.append(f"Duplicate {label} name found in table '{self.name}': '{key}'")
# more than one primary key
primaries = list(filter(lambda index: index.is_primary(), self._indexes.values()))
if len(primaries) > 1:
self._schema_errors.append(f"Table '{self.name}' has more than one PRIMARY index")
# auto increment checks
if self._columns:
auto_increment = list(filter(lambda column: column.auto_increment, self._columns.values()))
if len(auto_increment):
if len(auto_increment) > 1:
self._schema_errors.append(f"Table '{self.name}' has more than one AUTO_INCREMENT column")
elif not primaries:
self._schema_errors.append(
f"Table '{self.name}' has an AUTO_INCREMENT column but is missing the PRIMARY index"
)
elif primaries[0].columns[0] != auto_increment[0].name:
self.schema_errors.append(
"Mismatched indexes in table '%s': column '%s' is the AUTO_INCREMENT column but '%s' is the PRIMARY index column"
% (self.name, auto_increment[0].name, primaries[0].columns[0])
)
# indexes on non-existent columns
for index in self._indexes.values():
for column in index.columns:
if not column in self.columns:
self._schema_errors.append(
f"Table '{self.name}' has index '{index.name}' that references non-existent column '{column}'"
)
# we don't bother checking the constraints to see if they are valid because these are
# best checked at the database level (since, by definition, foreign key constraints are *typically*
# against other tables, not within a single table.
def mark_tracking_rows(self):
""" Marks the table as having had its rows read, for bookeeping purposes
The reason this is here is because we keep track of whether or not we are "tracking" rows
for the current table. This is for bookeeping purposes, largely as a safe-guard attempt
to try to identify any more subtle bugs that might creep in. Normally self._tracking_rows
gets set to True when we add rows to a table, but if a table is empty then rows will never
get added, and instead this method must be called to mark the rows as "tracked".
"""
self._tracking_rows = True
if self._rows is None:
self._rows = OrderedDict()
@property
def tracking_rows(self) -> bool:
""" Public getter. Returns True/False to denote whether or not this table is tracking row records
To be clear on the distinction: just about any table might have rows. However,
that doesn't mean that the mygration system should be syncing rows for that table.
self.tracking_rows == True denotes that the mygration system thinks that we
should probably be syncing rows for this table.
:returns: Whether or not the mygration system is tracking rows on the table
"""
return self._tracking_rows
@property
def columns(self) -> Dict[str, Column]:
""" Public getter. Returns an ordered dictionary of table columns """
return self._columns
@property
def indexes(self) -> Dict[str, Index]:
""" Public getter. Returns an ordered dictionary of table indexes """
return self._indexes
@property
def constraints(self) -> Dict[str, Constraint]:
""" Public getter. Returns an ordered dictionary of table constraints """
return self._constraints
@property
def primary(self) -> Index:
""" Public getter. Returns the index object for the primary key """
return self._primary
@property
def rows(self) -> Dict[int, List[Union[str, int]]]:
""" Public getter. Returns an ordered dictionary with row data by id """
return None if self._rows is None else self._rows
def add_rows(self, rows: Rows) -> Union[str, bool]:
""" Adds rows to the table
The rows object has some flexibility in terms of columns: it doesn't just
assume that a value is provided for every column in the table. Rather,
there can be a list of columns and only those columns have values (which
supports the equivalent of MySQL INSERT queries which only specify values
for some columns).
Rows with errors will not be processed
:returns: An error string if an error is encountered, otherwise True/False
"""
self._schema_errors = None
self._schema_warnings = None
if not isinstance(rows, Rows):
raise ValueError(
f"Only objects of class mygrations.core.definitions.rows can be added as rows to a table. Instead I received an object of class '{rows.__class__.__name__}'"
)
if rows.parsing_errors:
self._global_errors.extend(rows.parsing_errors)
if rows.parsing_warnings:
self._global_warnings.extend(rows.parsing_warnings)
self._tracking_rows = True
if self._rows is None:
self._rows = OrderedDict()
# the rows object may have a list of columns. If not use our own list of columns
# remember that self._columns is an OrderedDict so converting its keys to a list
# actually preserves the order (which is a requirement for us)
columns = rows.columns if rows.num_explicit_columns else list(self._columns.keys())
if 'id' not in columns:
self._global_errors.append(
"A column named 'id' is required to manage rows in the table, but the id column is missing in the rows for table %s"
% (self._name, )
)
return
id_index = columns.index('id')
for values in rows.raw_rows:
# rows without explicit columns must be checked for matching columns
if not rows.num_explicit_columns and len(values) != len(columns):
self._global_errors.append(
'Insert values has wrong number of values for table %s and row %s' % (self._name, values)
)
continue
# we need to know the id of this record
row_id = str(values[id_index])
if not row_id:
self._global_errors.append(
'Row is missing a value for the id column for table %s and row %s' % (self._name, values)
)
continue
if row_id in self._rows:
self._global_errors.append('Duplicate row id found for table %s and row %s' % (self.name, values))
continue
if not row_id:
self._global_errors.append('Invalid row id of %s found for table %s' % (row_id, self.name))
continue
self._rows[row_id] = OrderedDict(zip(columns, values))
self._rows[row_id]['id'] = row_id
return True
def add_raw_row(self, row: Dict[str, Union[str, int]]) -> Union[str, bool]:
""" Adds a row into the table as a dictionary instead of a row object
A bit of repetition here. This is similar to what happens inside the main
loop of self.add_rows, but different enough that I'm not trying to combine
them right this second.
:returns: An error string if an error is encountered, otherwise True/False
"""
self._schema_errors = None
self._schema_warnings = None
self._tracking_rows = True
if self._rows is None:
self._rows = OrderedDict()
row_id = str(row.get('id'))
if not row_id:
raise ValueError("Cannot manage records without an 'id' column and value")
if row_id in self._rows:
return 'Duplicate row id found for table %s and row %s' % (self._name, row)
# make sure we have a value for every column in the row, and build an OrderedDict
converted_row = OrderedDict()
for column in self._columns.keys():
if column in row:
converted_row[column] = row[column]
else:
converted_row[column] = self._columns[column].default
converted_row['id'] = row_id
self._rows[row_id] = converted_row
def column_before(self, column_name: str) -> Union[str, bool]:
""" Returns the name of the column that comes before a given row.
Returns true if the column is at the beginning of the table
:returns: The name of the column before the given column, or True if at the beginning
"""
# this works because we used an OrderedDict
columns = [col for col in self.columns.keys()]
if not column_name in columns:
raise ValueError(
"Cannot return column before %s because %s does not exist in table %s" %
(column_name, column_name, self.name)
)
index = columns.index(column_name)
if index == 0:
return True
return columns[index - 1]
def column_is_indexed(self, column: Union[str, Column]) -> bool:
""" Returns True/False to denote whether or not the column has a useable index """
if type(column) != str:
column = column.name
if column not in self._columns:
return False
# best way to do this is with a set. We'll keep a record of all indexed columns
# the column is indexed if an index has that column in the first position
if self._indexed_columns is None:
self._indexed_columns = set([index.columns[0] for index in self._indexes.values()])
return column in self._indexed_columns
def __str__(self) -> str:
return str(self.create())
def create(self, nice=False):
""" Returns a create table operation that can create this table
:param nice: Whether or not to return a nicely formatted CREATE TABLE command
:returns: A create table operation
"""
return CreateTable(self, nice)
def nice(self) -> str:
return str(self.create(True))
def add_column(self, column: Column, position=False):
""" Adds a column to the table
The value of position matches self.position from mygrations.formats.mysql.mygration.operations.add_column
:param column: The column to add
:param position: Where to put the column
"""
self._schema_errors = None
self._schema_warnings = None
if column.name in self._columns:
raise ValueError("Cannot add column %s because %s already exists" % (column.name, column.name))
# putting it at the end is easy
if not position:
self._columns[column.name] = column
return None
# beginning is also easy
if position == True:
self._columns[column.name] = column
self._columns.move_to_end(column.name, last=False)
return None
# now it is tricky
found = False
new_columns = OrderedDict()
for key, value in self._columns.items():
new_columns[key] = value
if key == position:
new_columns[column.name] = column
found = True
if not found:
raise ValueError(
"Cannot add column %s after %s because %s does not exist" % (column.name, position, position)
)
self._columns = new_columns
return None
def remove_column(self, column: Union[str, Column]):
""" Removes a column from the table """
self._schema_errors = None
self._schema_warnings = None
column_name = column if type(column) == str else column.name
if not column_name in self._columns:
raise ValueError("Cannot remove column %s because column %s does not exist" % (column_name, column_name))
self._columns.pop(column_name, None)
def change_column(self, new_column: Column):
""" Changes a column. This does not currently support renaming columns """
self._schema_errors = None
self._schema_warnings = None
if not new_column.name in self._columns:
raise ValueError(
"Cannot modify column %s because column %s does not exist" % (new_column.name, new_column.name)
)
self._columns[new_column.name] = new_column
def add_index(self, index: Index):
""" Adds an index to the table """
self._schema_errors = None
self._schema_warnings = None
if index.name in self._indexes:
raise ValueError("Cannot add index %s because index %s already exists" % (index.name, index.name))
if index.index_type == 'PRIMARY':
self._primary = index
self._indexes[index.name] = index
if self._indexed_columns is not None:
self._indexed_columns.add(index.columns[0])
def remove_index(self, index: Union[str, Index]):
""" Removes an index from the table """
self._schema_errors = None
self._schema_warnings = None
index_name = index if type(index) == str else index.name
if index_name not in self._indexes:
raise ValueError("Cannot remove index %s because index %s does not exist" % (index_name, index_name))
indexed_column = self._indexes[index_name].columns[0]
self._indexes.pop(index_name, None)
if self._indexed_columns is not None:
self._indexed_columns.discard(indexed_column)
def change_index(self, new_index: Index):
""" Changes an index. This does not currently support renaming """
self._schema_errors = None
self._schema_warnings = None
if not new_index.name in self._indexes:
raise ValueError(
"Cannot modify index %s because index %s does not exist" % (new_index.name, new_index.name)
)
if self._indexed_columns is not None:
self._indexed_columns.discard(self._indexes[new_index.name].columns[0])
self._indexes[new_index.name] = new_index
if self._indexed_columns is not None:
self._indexed_columns.add(new_index.columns[0])
def add_constraint(self, constraint: Constraint):
""" Adds a constraint to the table """
self._schema_errors = None
self._schema_warnings = None
if constraint.name in self._constraints:
raise ValueError(
"Cannot add constraint %s because constraint %s already exists" % (constraint.name, constraint.name)
)
self._constraints[constraint.name] = constraint
def remove_constraint(self, constraint: Union[str, Constraint]):
""" Removes an constraint from the table """
self._schema_errors = None
self._schema_warnings = None
if type(constraint) != str:
constraint = constraint.name
if constraint not in self._constraints:
raise ValueError(
"Cannot remove constraint %s because constraint %s does not exist" % (constraint, constraint)
)
self._constraints.pop(constraint, None)
def change_constraint(self, new_constraint):
""" Changes a constraint. This does not currently support renaming. """
self._schema_errors = None
self._schema_warnings = None
if not new_constraint.name in self._constraints:
raise ValueError(
"Cannot modify constraint %s because constraint %s does not exist" %
(new_constraint.name, new_constraint.name)
)
self._constraints[new_constraint.name] = new_constraint
def _loose_equal(self, val1: Union[str, int], val2: Union[str, int]) -> bool:
""" Performs a looser comparison, as values might have different types depending on whether they came out of a database or file
Returns true if the two values are equal, even if one is a string and the other an int.
"""
# if we don't have a type mistmatch then this is easy
if type(val1) == type(val2):
return val1 == val2
# otherwise see if we can cheat and just convert to strings
return str(val1) == str(val2)
def to(self, comparison_table, split_operations=False):
""" Compares two tables to eachother and returns a list of operations which can bring the structure of the second in line with the first
In other words, this pseudo code will make table have the same structure as comparison_table
for operation in table.to( comparison_table ):
table.apply( operation )
if split_operations is True then a dict of migration operations will be returned to separate
foreign key operations from everything else. The dict will have up to three keys: [ 'removed_fks', 'fks', 'kitchen_sink' ]
'fks' contains the alter statement operation needed to add/change/remove foreign keys
'kitchen_sink' contains everything else
If that division of labor seems arbitrary, it isn't: it is separated out that
way due to the needs of the overall algorithm.
If split_operations is false then a single alter table operation will be returned that encompasses all changes
:param comparison_table: A table to find differences with
:param split_operations: Whether to combine all operations in one alter table or separate them
:type comparison_table: mygrations.formats.mysql.definitions.table
:type split_operations: bool
:returns: A list of operations to apply to table
:rtype: list[mygrations.formats.mysql.mygrations.operations.*] | dict
"""
raise NotImplementedError()
def to_rows(self, from_table=None):
""" Compares two tables to eachother and returns a list of operations which can bring the rows of this table in line with the other
It's important to note (and probably important to think through and change eventually)
that this method has the opposite meaning of `mygrations.formats.mysql.definitions.table.to()`
That method is called on the `from` table and operations on the (required) `to` table.
This method is called on the `to` table and can (optionally) be passed in a `from` table.
:param from_table: A table to find differences with (or None)
:type from_table: mygrations.formats.mysql.definitions.table
:returns: A list of operations to apply to table
:rtype: list[mygrations.formats.mysql.mygrations.operations.*]
"""
raise NotImplementedError()
def _differences(self, from_list: Dict[str, Any], to_list: Dict[str, Any]):
"""
Calculates the difference between two OrderedDicts.
https://codereview.stackexchange.com/a/176303/140581
:param from_list: OrderedDict
:param to_list: OrderedDict
:return: (added, removed, overlap)
"""
return ([key for key in to_list if key not in from_list], [key for key in from_list if key not in to_list],
[key for key in from_list if key in to_list])
def apply_operation(self, operation):
"""
Applies an operation to the table
:param operation: The operation to apply
:type operation: mygrations.formats.mysql.mygration.operations.*
"""
operation.apply_to_table(self)
| cmancone/mygrations | mygrations/core/definitions/table.py | table.py | py | 26,764 | python | en | code | 10 | github-code | 36 | [
{
"api_name": "typing.Dict",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "columns.column.Column",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "constraint.Constraint",... |
71607807144 | # USAGE
# To read and write back out to video:
# python people_counter.py --prototxt mobilenet_ssd/MobileNetSSD_deploy.prototxt \
# --model mobilenet_ssd/MobileNetSSD_deploy.caffemodel --input videos/example_01.mp4 \
# --output output/output_01.avi
#
# To read from webcam and write back out to disk:
# python people_counter.py --prototxt mobilenet_ssd/MobileNetSSD_deploy.prototxt \
# --model mobilenet_ssd/MobileNetSSD_deploy.caffemodel \
# --output output/webcam_output.avi
# import the necessary packages
from pyimagesearch.centroidtracker import CentroidTracker
from pyimagesearch.trackableobject import TrackableObject
from imutils.video import VideoStream
from imutils.video import FPS
import numpy as np
import argparse
import imutils
import time
import dlib
import cv2
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--prototxt", required=True,
help="path to Caffe 'deploy' prototxt file")
ap.add_argument("-m", "--model", required=True,
help="path to Caffe pre-trained model")
ap.add_argument("-i", "--input", type=str,
help="path to optional input video file")
ap.add_argument("-o", "--output", type=str,
help="path to optional output video file")
ap.add_argument("-c", "--confidence", type=float, default=0.2,
help="minimum probability to filter weak detections")
ap.add_argument("-s", "--skip-frames", type=int, default=10,
help="# of skip frames between detections")
args = vars(ap.parse_args())
# initialize the list of class labels MobileNet SSD was trained to
# detect
CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
"bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
"dog", "horse", "motorbike", "person", "pottedplant", "sheep",
"sofa", "train", "tvmonitor"]
# load our serialized model from disk
print("[INFO] loading model...")
net = cv2.dnn.readNetFromCaffe(args["prototxt"], args["model"])
# if a video path was not supplied, grab a reference to the webcam
if not args.get("input", False):
print("[INFO] starting video stream...")
vs = VideoStream(src=0).start()
time.sleep(2.0)
# otherwise, grab a reference to the video file
else:
print("[INFO] opening video file...")
vs = cv2.VideoCapture(args["input"])
# initialize the video writer (we'll instantiate later if need be)
writer = None
# initialize the frame dimensions (we'll set them as soon as we read
# the first frame from the video)
W = None
H = None
# instantiate our centroid tracker, then initialize a list to store
# each of our dlib correlation trackers, followed by a dictionary to
# map each unique object ID to a TrackableObject
ct = CentroidTracker(maxDisappeared=40, maxDistance=50)
trackers_c = []
trackers_p = []
trackers = []
labels = [] #added labels list initialization
trackableObjects_c = {}
trackableObjects_p = {}
rect_c =[]
rect_p =[]
boxes_c = []
boxes_p =[]
counter = 0
# initialize the total number of frames processed thus far, along
# with the total number of objects that have moved either up or down
totalFrames = 0
totalDown = 0
totalUp = 0
# start the frames per second throughput estimator
fps = FPS().start()
# loop over frames from the video stream
while True:
# grab the next frame and handle if we are reading from either
# VideoCapture or VideoStream
frame = vs.read()
frame = frame[1] if args.get("input", False) else frame
# if we are viewing a video and we did not grab a frame then we
# have reached the end of the video
if args["input"] is not None and frame is None:
break
# resize the frame to have a maximum width of 500 pixels (the
# less data we have, the faster we can process it), then convert
# the frame from BGR to RGB for dlib
frame = imutils.resize(frame, width=500)
rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# if the frame dimensions are empty, set them
if W is None or H is None:
(H, W) = frame.shape[:2]
# if we are supposed to be writing a video to disk, initialize
# the writer
if args["output"] is not None and writer is None:
fourcc = cv2.VideoWriter_fourcc(*"MJPG")
writer = cv2.VideoWriter(args["output"], fourcc, 30,
(W, H), True)
# initialize the current status along with our list of bounding
# box rectangles returned by either (1) our object detector or
# (2) the correlation trackers
status = "Waiting"
rects_p = []
rects_c = []
# check to see if we should run a more computationally expensive
# object detection method to aid our tracker
if totalFrames % args["skip_frames"] == 0:
# set the status and initialize our new set of object trackers
status = "Detecting"
trackers_c = []
trackers_p = []
# convert the frame to a blob and pass the blob through the
# network and obtain the detections
blob = cv2.dnn.blobFromImage(frame.copy(), 0.007843, (W, H), 127.5)
net.setInput(blob)
detections = net.forward()
# loop over the detections
for i in np.arange(0, detections.shape[2]):
# extract the confidence (i.e., probability) associated
# with the prediction
confidence = detections[0, 0, i, 2]
# filter out weak detections by requiring a minimum
# confidence
if confidence > args["confidence"]:
# extract the index of the class label from the
# detections list
idx = int(detections[0, 0, i, 1])
label = CLASSES[idx] #new addition
# if the class label is not a person, ignore it #new addition
if (CLASSES[idx] != "person") & (CLASSES[idx]!= "chair") & (CLASSES[idx]!= "sofa"):
continue
if (CLASSES[idx] == "person"):
# compute the (x, y)-coordinates of the bounding box
# for the object
box = detections[0, 0, i, 3:7] * np.array([W, H, W, H])
(startXp, startYp, endXp, endYp) = box.astype("int")
# construct a dlib rectangle object from the bounding
# box coordinates and then start the dlib correlation
# tracker
tracker = dlib.correlation_tracker()
rect = dlib.rectangle(int(startXp), int(startYp), int(endXp), int(endYp))
tracker.start_track(rgb, rect)
rect_p = box
boxes_p.append(rect_p)
# add the tracker to our list of trackers so we can
# utilize it during skip frames
trackers_p.append(tracker)
labels.append(label) #new addition
# new addition - grab the corresponding class label for the detection
# and draw the bounding box
cv2.rectangle(frame, (startXp, startYp), (endXp, endYp),
(0, 255, 0), 2)
if (label == 'person'):
cv2.putText(frame, label, (startXp, startYp - 15),
cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 255, 0), 2)
if (CLASSES[idx] == "chair") | (CLASSES[idx] == "sofa"):
# compute the (x, y)-coordinates of the bounding box
# for the object
box = detections[0, 0, i, 3:7] * np.array([W, H, W, H])
(startXc, startYc, endXc, endYc) = box.astype("int")
# construct a dlib rectangle object from the bounding
# box coordinates and then start the dlib correlation
# tracker
tracker = dlib.correlation_tracker()
rect = dlib.rectangle(int(startXc), int(startYc), int(endXc), int(endYc))
tracker.start_track(rgb, rect)
rect_c = box
boxes_c.append(rect_c)
# add the tracker to our list of trackers so we can
# utilize it during skip frames
trackers_c.append(tracker)
labels.append(label) #new addition
# new addition - grab the corresponding class label for the detection
# and draw the bounding box
cv2.rectangle(frame, (startXc, startYc), (endXc, endYc),
(0, 255, 0), 2)
if (label == 'chair') | (label == 'sofa'):
cv2.putText(frame, label, (startXc, startYc - 15),
cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 255, 0), 2)
#for i in enumerate(boxes_p, boxes_c):
#if ((int(boxes_p[i][0]) == int(boxes_c[i][0])) | (int(boxes_p[i][0]) == int(boxes_c[i][0]+1)) |
#(int(boxes_p[i][0]) == int(boxes_c[i][0]-1))) & ((int(boxes_p[i][3]) == int(boxes_c[i][3])) |
#(int(boxes_p[i][3]) == int(boxes_c[i][3]+1)) | (int(boxes_p[i][3]) == int(boxes_c[i][3]-1))):
#counter += 1
#print(counter)
# otherwise, we should utilize our object *trackers* rather than
# object *detectors* to obtain a higher frame processing throughput
else:
# loop over the trackers
for trackerp in trackers_p:
# set the status of our system to be 'tracking' rather
# than 'waiting' or 'detecting'
status = "Tracking_p"
# update the tracker and grab the updated position
trackerp.update(rgb)
pos = trackerp.get_position()
# unpack the position object
startXp = int(pos.left())
startYp = int(pos.top())
endXp = int(pos.right())
endYp = int(pos.bottom())
# add the bounding box coordinates to the rectangles list
rects_p.append((startXp, startYp, endXp, endYp))
# new addition- draw the bounding box from the correlation object tracker
cv2.rectangle(frame, (startXp, startYp), (endXp, endYp),
(0, 255, 0), 2)
if (label == 'person'):
cv2.putText(frame, label, (startXp, startYp - 15),
cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 255, 0), 2)
# loop over the trackers
for trackerc in trackers_c:
# set the status of our system to be 'tracking' rather
# than 'waiting' or 'detecting'
status = "Tracking"
# update the tracker and grab the updated position
trackerc.update(rgb)
pos = trackerc.get_position()
# unpack the position object
startXc = int(pos.left())
startYc = int(pos.top())
endXc = int(pos.right())
endYc = int(pos.bottom())
# add the bounding box coordinates to the rectangles list
rects_c.append((startXc, startYc, endXc, endYc))
# new addition- draw the bounding box from the correlation object tracker
cv2.rectangle(frame, (startXc, startYc), (endXc, endYc),
(0, 255, 0), 2)
if (label == 'chair'):
cv2.putText(frame, label, (startXc, startYc - 15),
cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 255, 0), 2)
# draw a horizontal line in the center of the frame -- once an
# object crosses this line we will determine whether they were
# moving 'up' or 'down'
cv2.line(frame, (0, (H-100)//2),((W+500)//2, H-120), (0, 255, 255), 2)
# use the centroid tracker to associate the (1) old object
# centroids with (2) the newly computed object centroids
objects_p = ct.update(rects_p)
objects_c = ct.update(rects_c)
# loop over the tracked objects
for (objectID, centroid) in objects_p.items():
# check to see if a trackable object exists for the current
# object ID
to_p = trackableObjects_p.get(objectID, None)
# if there is no existing trackable object, create one
if to_p is None:
to_p = TrackableObject(objectID, centroid)
# otherwise, there is a trackable object so we can utilize it
# to determine direction
else:
# the difference between the y-coordinate of the *current*
# centroid and the mean of *previous* centroids will tell
# us in which direction the object is moving (negative for
# 'up' and positive for 'down')
y = [c[1] for c in to_p.centroids]
direction = centroid[1] - np.mean(y)
to_p.centroids.append(centroid)
# check to see if the object has been counted or not
#if not to_p.counted:
# if the direction is negative (indicating the object
# is moving up) AND the centroid is above the center
# line, count the object
#if direction < 0 and centroid[1] < (H-100) // 2:
#totalUp += 1
#to_p.counted = True
# if the direction is positive (indicating the object
# is moving down) AND the centroid is below the
# center line, count the object
#elif direction > 0 and centroid[1] > (H-100) // 2:
#totalDown += 1
#to_p.counted = True
# store the trackable object in our dictionary
trackableObjects_p[objectID] = to_p
# loop over the tracked objects
for (objectID, centroid) in objects_c.items():
# check to see if a trackable object exists for the current
# object ID
to_c = trackableObjects_c.get(objectID, None)
# if there is no existing trackable object, create one
if to_c is None:
to_c = TrackableObject(objectID, centroid)
# otherwise, there is a trackable object so we can utilize it
# to determine direction
else:
# the difference between the y-coordinate of the *current*
# centroid and the mean of *previous* centroids will tell
# us in which direction the object is moving (negative for
# 'up' and positive for 'down')
y = [c[1] for c in to_c.centroids]
direction = centroid[1] - np.mean(y)
to_c.centroids.append(centroid)
# check to see if the object has been counted or not
#if not to_c.counted:
# if the direction is negative (indicating the object
# is moving up) AND the centroid is above the center
# line, count the object
#if direction < 0 and centroid[1] < (H-100) // 2:
#totalUp += 1
#to_c.counted = True
# if the direction is positive (indicating the object
# is moving down) AND the centroid is below the
# center line, count the object
#elif direction > 0 and centroid[1] > (H-100) // 2:
#totalDown += 1
#to_c.counted = True
# store the trackable object in our dictionary
trackableObjects_c[objectID] = to_c
#clist = list(objects_c.items())
#clist_cent = []
#for i in range(len(clist)):
#clist_cent.append(clist[1])
#cv2.circle(frame, (W-250,H-150), 4, (0, 255, 0), -1)
#cv2.circle(frame, (W-250,H-100), 4, (0, 255, 0), -1)
#cv2.circle(frame, (W-250,H-200), 4, (0, 255, 0), -1)
#cv2.circle(frame, (W-250,H-180), 4, (0, 255, 0), -1)
for objectIDc, centroid_c in objects_c.items():
#Cx = centroid_c[0]
#Cy = centroid_c[1]
#to_c = TrackableObject(objectIDc, centroid_c)
for objectIDp, centroid_p in objects_p.items():
#to_p = TrackableObject(objectIDp, centroid_p)
if (centroid_c[0] == centroid_p[0]) & (centroid_c[1] == centroid_p[1]) :
if (to_p.counted == False) & (to_c.counted == False) & (centroid_p[1] < (H-150)):
totalUp += 1
to_p.counted = True
to_c.counted = True
if (to_p.counted == False) & (to_c.counted == False) & (centroid_p[1] > (H-180)):
totalDown += 1
to_p.counted = True
to_c.counted = True
# draw both the ID of the object and the centroid of the
# object on the output frame
#text = "ID {}".format(objectID)
#cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
#cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
#cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)
#cv2.rectangle(frame, (centroid[0]-20, centroid[1]-40), (centroid[0]+20, centroid[1]+40), (0, 255, 0), 2) #added line
# construct a tuple of information we will be displaying on the
# frame
info = [
("Sunlit", totalUp),
("Shade", totalDown),
("Status", status),
]
# loop over the info tuples and draw them on our frame
for (i, (k, v)) in enumerate(info):
text = "{}: {}".format(k, v)
cv2.putText(frame, text, (10, H - ((i * 20) + 20)),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
# check to see if we should write the frame to disk
if writer is not None:
writer.write(frame)
# show the output frame
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# increment the total number of frames processed thus far and
# then update the FPS counter
totalFrames += 1
fps.update()
# stop the timer and display FPS information
fps.stop()
print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
# check to see if we need to release the video writer pointer
if writer is not None:
writer.release()
# if we are not using a video file, stop the camera video stream
if not args.get("input", False):
vs.stop()
# otherwise, release the video file pointer
else:
vs.release()
# close any open windows
cv2.destroyAllWindows()
| smriti283/Seating-preferences-of-Metrotech-visitors | Object Tracking Python Script/people_counter4.py | people_counter4.py | py | 16,223 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "cv2.dnn.readNetFromCaffe",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "cv2.dnn",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "imutils... |
2251689523 | import os
import imageio
import argparse
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from natsort import natsorted
from PIL import Image, ImageDraw, ImageFont
def plot_generated(generated_imgs, dim=(1, 10), figsize=(12, 2), save_name=None):
plt.figure(figsize=figsize)
for i in range(generated_imgs.shape[0]):
plt.subplot(dim[0], dim[1], i+1)
plt.imshow(generated_imgs[i], interpolation='nearest', cmap='gray_r')
plt.axis('off')
plt.tight_layout()
# plt.draw()
# plt.pause(0.01)
if save_name is None:
plt.show()
else:
plt.savefig(save_name)
plt.close()
def create_gif(img_folder, save_gif_name=None, fps=5):
images = [f'{img_folder}{img}' for img in natsorted(os.listdir(img_folder))]
gif = []
for image in images:
gif.append(imageio.imread(image))
if save_gif_name is None:
save_gif_namef = f'{img_folder[:-1]}_animated.gif'
imageio.mimsave(save_gif_namef, gif, fps=fps)
def read_subset_classif_results(log_file):
with open(log_file, 'r') as f:
data = f.read().splitlines()
starts_ends = [i for i, el in enumerate(data) if 'START' in el or 'END' in el]
starts_ends = starts_ends[:len(starts_ends) - len(starts_ends) % 2]
results = {'epoch': [], 'f1': [], 'label': []}
percents = []
for i, idx in enumerate(range(0, len(starts_ends), 2)):
# if i == 0:
# continue
tmp = data[starts_ends[idx]:starts_ends[idx+1]]
epochs, f1s = zip(*[(int(l.split(' | ')[0].split('Epoch ')[-1]), float(l.split(' = ')[-1])) for l in tmp if ' | f1 = ' in l])
results['epoch'] += epochs
results['f1'] += f1s
results['label'] += [i] * len(epochs)
# percents.append(tmp[0].split(' = ')[-1])
percents.append(str(int(float(tmp[0].split(' = ')[-1]) * 6000)))
# print(f'Percent = {percents[-1]} | max f1 = {max(f1s)}')
print(f'n_examples_per_class = {percents[-1]} | max f1 = {max(f1s)}')
lp = sns.lineplot(x='epoch', y='f1', hue='label', data=pd.DataFrame.from_dict(results),
legend=False, palette=sns.color_palette()[:len(percents)])
# lp.set(yscale="log")
plt.legend(title='percent', loc='lower right', labels=percents)
plt.show()
def read_ssdcgan_logs(folder='tmp_data/', root_name='_tmp_mnist_gan_ssdcgan_percent{}_logs.txt'):
for percent in [0.002, 0.004, 0.009, 0.017, 0.084, 0.17, 0.34, 0.67, 1.]:
fname = os.path.join(folder, root_name.format(percent)).replace('0.', '0')
if not os.path.isfile(fname):
continue
with open(fname, 'r') as f:
data = f.read().splitlines()
f1s = [float(l.split(' = ')[-1]) for l in data if 'Saving model with' in l]
n_examples = int(percent * 6000)
print(f'n_examples = {n_examples} | f1 = {np.max(f1s)}')
def plot_comp_imgs(folder='tmp_data/', fname='imgs_generated_epoch180.png'):
dcgan, cdcgan, acdcgan = 'generated_dcgan_imgs/', 'generated_cdcgan_imgs/', 'generated_acdcgan_imgs/'
img_dcgan = Image.open(os.path.join(folder, dcgan, fname))
img_cdcgan = Image.open(os.path.join(folder, cdcgan, fname))
img_acdcgan = Image.open(os.path.join(folder, acdcgan, fname))
font = ImageFont.truetype('open-sans/OpenSans-Regular.ttf', 20)
for text, tmp_img in zip(['DC-GAN', 'Conditional DC-GAN', 'AC-DC-GAN'], [img_dcgan, img_cdcgan, img_acdcgan]):
draw = ImageDraw.Draw(tmp_img)
draw.text((5, 5), text, font=font, fill=(0, 0, 0))
img = Image.new('L', (img_dcgan.width, img_dcgan.height + img_cdcgan.height + img_acdcgan.height))
img.paste(img_dcgan, (0, 0))
img.paste(img_cdcgan, (0, img_dcgan.height))
img.paste(img_acdcgan, (0, 2 * img_dcgan.height))
img.show()
if __name__ == '__main__':
argparser = argparse.ArgumentParser(prog='plotter.py', description='')
argparser.add_argument('--log_file', default='_tmp_classif_exps_mnist_logs.txt', type=str)
args = argparser.parse_args()
rep = input('Read subset classification results? (y or n): ')
if rep == 'y':
read_subset_classif_results(args.log_file)
rep = input('Read ssdcgan logs? (y or n): ')
if rep == 'y':
read_ssdcgan_logs()
rep = input('Plot comparison of generated GAN images? (y or n): ')
if rep == 'y':
plot_comp_imgs() | thbeucher/ML_pytorch | apop/GAN/plotter.py | plotter.py | py | 4,225 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "... |
22149140762 | """
Document Scanner
Algo : Take input from webcom
Preprocess Image and return as Threshold image
Find the biggest contour
Using corner points to get bird eye view
"""
import cv2
import numpy as np
window_width = 600
window_height = 350
webcam = cv2.VideoCapture(0) # Selecting webcam
webcam.set(3,window_width) # Adjust Width
webcam.set(4,window_height) # Adjust Height
webcam.set(10,150) # Adjust Brightness
def imagePreProcessing(img):
imgGray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
imgBlur = cv2.GaussianBlur(imgGray,(5,5),1)
imgCanny = cv2.Canny(imgBlur,100,100)
kernal = np.ones([5,5])
imgDilate = cv2.dilate(imgCanny,kernal,iterations=1)
imgErode = cv2.erode(imgDilate, kernal, iterations=1)
return imgDilate
def getContours(img):
biggest_box = np.array([])
maxArea = 0
contours,hierarchy = cv2.findContours(img,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
x, y, w, h = 0, 0, 0, 0
for cnt in contours:
area = cv2.contourArea(cnt)
if area>5000:
# cv2.drawContours(imgCountour, cnt, -1, (255, 0, 0),3)
parameter = cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, 0.02 * parameter, True) ## Find approximation of our corner points
# print(len(approx))
if len(approx) == 4 and area > maxArea:
biggest_box = approx
maxArea = area
cv2.drawContours(imgCountour, biggest_box, -1, (255, 0, 0),20)
return biggest_box
def reorder(points):
points = points.reshape((4,2))
newPoints = np.zeros((4,1,2),np.int32)
add = points.sum(1)
newPoints[0] = points[np.argmin(add)] ## setting point [0,0]
newPoints[3] = points[np.argmax(add)] ## setting point [width,height]
diff = np.diff(points, axis=1)
newPoints[1] = points[np.argmin(diff)] ## setting point [width,0]
newPoints[2] = points[np.argmax(diff)] ## setting point [0,height]
return newPoints
def wrap(img,biggestContourPoint):
biggestContourPoint = reorder(biggestContourPoint)
width = window_width
height = window_height
pts1 = np.float32(biggestContourPoint)
pts2 = np.float32([[0,0],[width,0],[0,height],[width,height]])
matrix = cv2.getPerspectiveTransform(pts1, pts2)
imgOutput = cv2.warpPerspective(img, matrix, (width, height))
return imgOutput
def stackImages(scale,imgArray):
rows = len(imgArray)
cols = len(imgArray[0])
rowsAvailable = isinstance(imgArray[0], list)
width = imgArray[0][0].shape[1]
height = imgArray[0][0].shape[0]
if rowsAvailable:
for x in range ( 0, rows):
for y in range(0, cols):
if imgArray[x][y].shape[:2] == imgArray[0][0].shape [:2]:
imgArray[x][y] = cv2.resize(imgArray[x][y], (0, 0), None, scale, scale)
else:
imgArray[x][y] = cv2.resize(imgArray[x][y], (imgArray[0][0].shape[1], imgArray[0][0].shape[0]), None, scale, scale)
if len(imgArray[x][y].shape) == 2: imgArray[x][y]= cv2.cvtColor( imgArray[x][y], cv2.COLOR_GRAY2BGR)
imageBlank = np.zeros((height, width, 3), np.uint8)
hor = [imageBlank]*rows
hor_con = [imageBlank]*rows
for x in range(0, rows):
hor[x] = np.hstack(imgArray[x])
ver = np.vstack(hor)
else:
for x in range(0, rows):
if imgArray[x].shape[:2] == imgArray[0].shape[:2]:
imgArray[x] = cv2.resize(imgArray[x], (0, 0), None, scale, scale)
else:
imgArray[x] = cv2.resize(imgArray[x], (imgArray[0].shape[1], imgArray[0].shape[0]), None,scale, scale)
if len(imgArray[x].shape) == 2: imgArray[x] = cv2.cvtColor(imgArray[x], cv2.COLOR_GRAY2BGR)
hor= np.hstack(imgArray)
ver = hor
return ver
while True:
_,img = webcam.read()
img = cv2.resize(img, (500, 200))
imgCountour = img.copy()
imgThresh = imagePreProcessing(img)
biggestContour = getContours(imgThresh)
# print(biggestContour,biggestContour.shape)
imgStagedArray = []
if biggestContour.size != 0:
imgWrapOutput = wrap(img, biggestContour)
cv2.imshow("Document Scanner", imgWrapOutput)
imgStagedArray = [[img,imgThresh],[imgCountour,imgWrapOutput]]
else:
imgStagedArray = [[img,imgThresh],[img,img]]
stackedImg = stackImages(0.6, imgStagedArray)
cv2.imshow("Workflow", stackedImg)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
| codescoop/Computer-Vision | Document_Scanner/DocumentScanner.py | DocumentScanner.py | py | 4,538 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cv2.VideoCapture",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "cv2.GaussianBlu... |
16636486035 | from __future__ import absolute_import
import itertools
import math
import warnings
from typing import List, Iterable, Tuple, Optional
import cv2
import numpy as np
from .torch_utils import image_to_tensor
__all__ = [
"plot_confusion_matrix",
"render_figure_to_tensor",
"hstack_autopad",
"vstack_autopad",
"vstack_header",
"grid_stack",
"plot_heatmap",
]
def plot_heatmap(
cm: np.ndarray,
title: str,
x_label=None,
y_label=None,
x_ticks: List[str] = None,
y_ticks: List[str] = None,
format_string=None,
show_scores=True,
fontsize=12,
figsize: Tuple[int, int] = (16, 16),
fname=None,
noshow: bool = False,
cmap=None,
backend="Agg",
):
if len(cm.shape) != 2:
raise ValueError("Heatmap must be a 2-D array")
import matplotlib
matplotlib.use(backend)
import matplotlib.pyplot as plt
if cmap is None:
cmap = plt.cm.Oranges
f = plt.figure(figsize=figsize)
plt.imshow(cm, interpolation="nearest", cmap=cmap)
plt.title(title)
plt.colorbar(fraction=0.046, pad=0.04)
if x_ticks is not None:
plt.xticks(np.arange(len(x_ticks)), x_ticks, rotation=45, ha="right")
if y_ticks is not None:
plt.yticks(np.arange(len(y_ticks)), y_ticks)
if format_string is None:
format_string = ".2f" if np.issubdtype(cm.dtype, np.floating) else "d"
if show_scores:
thresh = (cm.max() + cm.min()) / 2.0
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
text = format(cm[i, j], format_string) if np.isfinite(cm[i, j]) else "N/A"
color = "white" if cm[i, j] > thresh else "black"
plt.text(
j,
i,
text,
horizontalalignment="center",
verticalalignment="center_baseline",
fontsize=fontsize,
color=color,
)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.tight_layout()
if fname is not None:
plt.savefig(fname=fname, dpi=200)
if not noshow:
plt.show()
return f
def plot_confusion_matrix(
cm: np.ndarray,
class_names: List[str],
figsize: Tuple[int, int] = (16, 16),
fontsize: int = 12,
normalize: bool = False,
title: str = "Confusion matrix",
cmap=None,
fname=None,
show_scores: bool = True,
noshow: bool = False,
backend: str = "Agg",
format_string: Optional[str] = None,
):
"""
Render the confusion matrix and return matplotlib's figure with it.
Normalization can be applied by setting `normalize=True`.
Args:
cm: Numpy array of (N,N) shape - confusion matrix array
class_names: List of [N] names of the classes
figsize:
fontsize:
normalize: Whether to apply normalization for each row of CM
title: Title of the confusion matrix
cmap:
fname: Filename of the rendered confusion matrix
show_scores: Show scores in each cell
noshow:
backend:
format_string:
Returns:
Matplotlib's figure
"""
import matplotlib
matplotlib.use(backend)
import matplotlib.pyplot as plt
if cmap is None:
cmap = plt.cm.Oranges
if normalize:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
cm = cm.astype(np.float32) / cm.sum(axis=1)[:, np.newaxis]
else:
accuracy = np.trace(cm) / (float(np.sum(cm)) + 1e-8)
misclass = 1 - accuracy
f = plt.figure(figsize=figsize)
plt.imshow(cm, interpolation="nearest", cmap=cmap)
plt.title(title)
plt.colorbar(fraction=0.046, pad=0.04)
tick_marks = np.arange(len(class_names))
plt.xticks(tick_marks, class_names, rotation=45, ha="right")
plt.yticks(tick_marks, class_names)
if format_string is None:
format_string = ".3f" if normalize else "d"
if show_scores:
thresh = (cm.max() + cm.min()) / 2.0
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
text = format(cm[i, j], format_string) if np.isfinite(cm[i, j]) else "N/A"
color = "white" if cm[i, j] > thresh else "black"
plt.text(j, i, text, horizontalalignment="center", fontsize=fontsize, color=color)
plt.ylabel("True label")
if normalize:
# We don't show Accuracy & Misclassification scores for normalized CM
plt.xlabel("Predicted label")
else:
plt.xlabel("Predicted label\nAccuracy={:0.4f}; Misclass={:0.4f}".format(accuracy, misclass))
plt.tight_layout()
if fname is not None:
plt.savefig(fname=fname, dpi=200)
if not noshow:
plt.show()
return f
def render_figure_to_tensor(figure):
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
figure.canvas.draw()
# string = figure.canvas.tostring_argb()
image = np.array(figure.canvas.renderer._renderer)
plt.close(figure)
del figure
image = image_to_tensor(image)
return image
def hstack_autopad(images: Iterable[np.ndarray], pad_value: int = 0) -> np.ndarray:
"""
Stack images horizontally with automatic padding
Args:
images: List of images to stack
Returns:
image
"""
max_height = 0
for img in images:
max_height = max(max_height, img.shape[0])
padded_images = []
for img in images:
height = img.shape[0]
pad_top = 0
pad_bottom = max_height - height
pad_left = 0
pad_right = 0
img = cv2.copyMakeBorder(img, pad_top, pad_bottom, pad_left, pad_right, cv2.BORDER_CONSTANT, value=pad_value)
(rows, cols) = img.shape[0:2]
padded_images.append(img)
return np.hstack(padded_images)
def vstack_autopad(images: Iterable[np.ndarray], pad_value: int = 0) -> np.ndarray:
"""
Stack images vertically with automatic padding
Args:
images: List of images to stack
Returns:
image
"""
max_width = 0
for img in images:
max_width = max(max_width, img.shape[1])
padded_images = []
for img in images:
width = img.shape[1]
pad_top = 0
pad_bottom = 0
pad_left = 0
pad_right = max_width - width
img = cv2.copyMakeBorder(img, pad_top, pad_bottom, pad_left, pad_right, cv2.BORDER_CONSTANT, value=pad_value)
padded_images.append(img)
return np.vstack(padded_images)
def vstack_header(
image: np.ndarray,
title: str,
bg_color=(35, 41, 40),
text_color=(242, 248, 248),
text_thickness: int = 2,
text_scale=1.5,
) -> np.ndarray:
(rows, cols) = image.shape[:2]
title_image = np.zeros((30, cols, 3), dtype=np.uint8)
title_image[:] = bg_color
cv2.putText(
title_image,
title,
(10, 24),
fontFace=cv2.FONT_HERSHEY_PLAIN,
fontScale=text_scale,
color=text_color,
thickness=text_thickness,
lineType=cv2.LINE_AA,
)
return vstack_autopad([title_image, image])
def grid_stack(images: List[np.ndarray], rows: int = None, cols: int = None) -> np.ndarray:
if rows is None and cols is None:
rows = int(math.ceil(math.sqrt(len(images))))
cols = int(math.ceil(len(images) / rows))
elif rows is None:
rows = math.ceil(len(images) / cols)
elif cols is None:
cols = math.ceil(len(images) / rows)
else:
if len(images) > rows * cols:
raise ValueError("Number of rows * cols must be greater than number of images")
image_rows = []
for r in range(rows):
image_rows.append(hstack_autopad(images[r * cols : (r + 1) * cols]))
return vstack_autopad(image_rows)
| BloodAxe/pytorch-toolbelt | pytorch_toolbelt/utils/visualization.py | visualization.py | py | 7,811 | python | en | code | 1,447 | github-code | 36 | [
{
"api_name": "numpy.ndarray",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "typing.List",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_n... |
74749464104 | # -*- coding: utf-8 -*-
from django.http import JsonResponse
from django.db.models import Count, F, Case, When, Value, IntegerField
from what.models import Composer, Instrument
from api.shared.tools import get_instrument_parent_category
from Octoopus.shared.tools import load_config
def search_composers(request):
data = {}
if request.method == "POST":
all_composers = []
# Param for the request
request_id = int(request.POST.get('request_id')) if request.POST.get('request_id') else 0
page = int(request.POST.get('page')) if int(request.POST.get('page')) > 0 else 1
name = str(request.POST.get('name')) if request.POST.get('name') else ''
if request.POST.get('order_by') and str(request.POST.get('order_by')) == 'default':
order_by = 'is_popular'
ordering = 'descending'
else:
order_by = str(request.POST.get('order_by')) if request.POST.get('order_by') else 'name'
ordering = 'descending' if request.POST.get('ordering') == 'on' else 'ascending'
# Null safety for dates
dates = {}
for date in ['min_birth','max_birth','min_death', 'max_death']:
try:
dates[date] = int(request.POST.get(date))
except:
dates[date] = -1
# Request
all_composers = Composer.objects.annotate(works_quantity=Count('work'))
# ######################### Filters #########################
# Name
all_composers = all_composers.filter(name__contains=name) | all_composers.filter(first_name__contains=name)
# Popular / Essential options
if request.POST.get('is_popular') and request.POST.get('is_popular') == 'on':
all_composers = all_composers.filter(is_popular=True)
if request.POST.get('is_essential') and request.POST.get('is_essential') == 'on':
all_composers = all_composers.filter(is_essential=True)
# Dates
if dates['min_birth'] != -1:
all_composers = all_composers.filter(birth__year__gte=dates['min_birth'])
if dates['max_birth'] != -1:
all_composers = all_composers.filter(birth__year__lte=dates['max_birth'])
if dates['min_death'] != -1:
all_composers = all_composers.filter(death__year__gte=dates['min_death'], death__isnull=False)
if dates['max_death'] != -1:
all_composers = all_composers.filter(death__year__lte=dates['max_death'], death__isnull=False)
# ######################### Ordering ########################
if ordering == 'ascending':
all_composers = all_composers.order_by(F(order_by).asc(nulls_last=True))
else:
all_composers = all_composers.order_by(F(order_by).desc(nulls_last=True))
# Patches
if order_by == 'death':
all_composers = all_composers.filter(death__isnull=False)
# Pagination & final results ########################
config = load_config('what')
res_per_page = int(config['composers']['results_per_page'])
page_composers = all_composers[((page-1)*res_per_page):(page*res_per_page)]
page_results = []
for composer in page_composers:
page_results.append({
'id': composer.id,
'name': composer.name,
'first_name': composer.first_name,
'slug': composer.slug,
'portrait': composer.portrait,
'works_quantity': composer.works_quantity,
})
data = {
'request_id': request_id,
'page': page,
'total_count': len(all_composers),
'page_count': len(page_results),
'composers': page_results,
'results_per_page': res_per_page
}
return JsonResponse(data)
def search_instruments(request):
data = {}
if request.method == "POST":
# Param for the request
request_id = int(request.POST.get('request_id')) if request.POST.get('request_id') else 0
search = str(request.POST.get('search')) if request.POST.get('search') else ''
all_instruments = Instrument.objects.all()
search_terms = [term for term in search.split(' ') if term != ""]
# Todo: ajouter un exact_results
best_results = all_instruments.filter(name__contains=search_terms[0])
for i in range(1, len(search_terms)):
best_results = best_results & all_instruments.filter(name__contains=search_terms[i])
print(best_results)
all_results = best_results
# all_results = (
# Instrument.objects
# .filter(best_results | all_instruments)
# .annotate(
# search_ordering=Case(
# When(best_results, then=Value(2)),
# When(all_instruments, then=Value(1)),
# default=Value(-1),
# output_field=IntegerField(),
# )
# ).order_by('-search_ordering', ...)
# )
print(all_results)
# ORDER & LIMIT
all_results = all_results[:5]
propositions = []
for instrument in all_results:
propositions.append({
'id': instrument.id,
'name': instrument.name,
'full_path': get_instrument_parent_category(instrument.id),
})
data = {
'request_id': request_id,
'search_terms': search_terms,
'total_count': len(propositions),
'propositions': propositions,
}
return JsonResponse(data) | MrFaBemol/octoopus-django | api/views.py | views.py | py | 5,689 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "what.models.Composer.objects.annotate",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "what.models.Composer.objects",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "what.models.Composer",
"line_number": 40,
"usage_type": "name"
... |
38308527532 | from pytablewriter import MarkdownTableWriter
import json
def main():
f = open("results/arxiv_papers_6_Aug_2021.json")
data = json.load(f)
value_matrix = []
for item in data:
temp = data[item]["summary"].find
value_matrix.append(
[item, data[item]["title"], data[item]["summary"][:data[item]["summary"].index(".")+1], data[item]["pdf url"],data[item]["date"][:10]]
)
writer = MarkdownTableWriter(
table_name="arxiv_table(updated in 6 Aug 2021) ",
headers=["number ", "title", "first sentence of summary", "pdf url","published"],
value_matrix=value_matrix,
)
print(writer.write_table())
if __name__ == "__main__":
main()
| FlokiBB/DeFiPapers | src/convert_json_to_md.py | convert_json_to_md.py | py | 718 | python | en | code | 57 | github-code | 36 | [
{
"api_name": "json.load",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pytablewriter.MarkdownTableWriter",
"line_number": 16,
"usage_type": "call"
}
] |
9164147549 | from flask import Flask, request, jsonify
from flask_cors import CORS
app = Flask(__name__)
CORS(app)
# rate_card_data = pd.read_csv('sample--rates.csv', dtype={
# 'member_csv': 'string',
# 'age_range': 'string',
# 'tier': 'string',
# '500000': 'int64',
# '700000': 'int64',
# '1000000': 'int64',
# '1500000': 'int64',
# '2000000': 'int64',
# '2500000': 'int64',
# '3000000': 'int64',
# '4000000': 'int64',
# '5000000': 'int64',
# '6000000': 'int64',
# '7500000': 'int64',
# })
# rate_card_data['min_age'] = rate_card_data['age_range'].str[:2].astype('int64')
# rate_card_data['max_age'] = rate_card_data['age_range'].str[3:].astype('int64')
# print(rate_card_data.dtypes)
# Sample rate card data (you should load this from your CSV file)
# rate_card_data = {
# "1a": 14676,
# "2a": 9441,
# "1a,1c": 7073,
# # Add more rate card data for other combinations
# }
# floater discount logic
def calculate_floater_discount(members):
if len(members) > 1:
return 0.5
return 0
def calculate_health_insurance_premium(member_ages, sum_insured, city_tier, tenure):
# Define base rate for insurance
base_rate = 1000
# Define age-based factors (adjust these based on your rate card)
age_factors = {
18: 1.2, # Example: 20% increase for age 18
30: 1.0, # Example: No age factor for age 30
40: 1.5, # Example: 50% increase for age 40
60: 1.7, # Example: 70% increase for age 60
99: 2.0 # Example: 100% increase for age 99
# Add more age factors as needed
}
# Apply city tier-based premium adjustments
city_premium_adjustment = {
'tier-1': 1.2, # Example: 20% premium increase for tier-1 city
'tier-2': 1.0, # Example: No premium adjustment for tier-2 city
}
# Apply tenure-based discounts
tenure_discount = {
'1yr': 1.0, # No discount for 1-year insurance
'2yr': 0.9, # 10% discount for 2-year insurance
}
# Calculate premium for each insured member based on their age
total_premium = 0
for age in member_ages:
age_factor = 1.0 # Default to 1.0
for (key, value) in age_factors.items():
if (age <= key):
age_factor = value
break
member_premium = base_rate * age_factor * city_premium_adjustment.get(city_tier, 1.0) * tenure_discount.get(tenure, 1.0)
total_premium += member_premium
# Multiply total premium by the sum insured
total_premium *= sum_insured/100000
return total_premium
@app.route('/calculate_premium', methods=['POST'])
def calculate_premium():
data = request.json
sum_insured = data['sum_insured']
city_tier = data['city_tier']
tenure = data['tenure']
member_ages = data['member_ages']
total_premium = calculate_health_insurance_premium(member_ages, sum_insured, city_tier, tenure)
return jsonify({"premium": total_premium})
if __name__ == '__main__':
app.run(port=8080, debug=True)
| sheetalparsa/health-insurance-calculator | backend/app.py | app.py | py | 3,054 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "flask_cors.CORS",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "flask.request.json",
"line_number": 87,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
... |
17978575525 | # 导入操作系统库
import os
# 更改工作目录
os.chdir(r"D:\softwares\applied statistics\pythoncodelearning\chap3\sourcecode")
# 导入绘图库
import matplotlib.pyplot as plt
# 导入支持向量机模型
from sklearn import svm
# 导入决策边界可视化工具
from sklearn.inspection import DecisionBoundaryDisplay
# 导入iris数据集
from sklearn.datasets import load_iris
# 导入绘图库中的字体管理包
from matplotlib import font_manager
# 实现中文字符正常显示
font = font_manager.FontProperties(fname=r"C:\Windows\Fonts\SimKai.ttf")
# 使用seaborn风格绘图
plt.style.use("seaborn-v0_8")
# 生成样本
iris = load_iris()
# 取前两个变量
X = iris.data[:, :2]
y = iris.target
# 惩罚系数
C = 1.0 # SVM regularization parameter
models = (
svm.SVC(kernel="linear", C=C),
svm.LinearSVC(C=C, max_iter=10000),
svm.SVC(kernel="rbf", gamma=0.7, C=C), # 径向核函数
svm.SVC(kernel="poly", degree=3, gamma="auto", C=C) # 多项式核
)
# 模型拟合
models = (clf.fit(X, y) for clf in models)
# 绘图标题
titles = (
"SVC with linear kernel",
"LinearSVC (linear kernel)",
"SVC with RBF kernel",
"SVC with polynomial (degree 3) kernel",
)
# 开始画图
fig, sub = plt.subplots(2, 2, figsize=(14,14), tight_layout=True)
plt.subplots_adjust(wspace=0.4, hspace=0.4)
# 第一、二个维度的X
X0, X1 = X[:, 0], X[:, 1]
for clf, title, ax in zip(models, titles, sub.flatten()):
# 绘制决策边界
disp = DecisionBoundaryDisplay.from_estimator(
clf,
X,
response_method="predict",
cmap=plt.cm.coolwarm,
alpha=0.8,
ax=ax,
xlabel=iris.feature_names[0],
ylabel=iris.feature_names[1],
)
# 绘制散点图
ax.scatter(X0, X1, c=y, cmap=plt.cm.coolwarm, s=20, edgecolors="k")
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(title)
plt.show()
fig.savefig("../codeimage/code3.pdf")
| AndyLiu-art/MLPythonCode | chap3/sourcecode/Python3.py | Python3.py | py | 1,949 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.chdir",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "matplotlib.font_manager.FontProperties",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "matplotlib.font_manager",
"line_number": 16,
"usage_type": "name"
},
{
"api_name":... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.