seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
14289022638 | from copy import deepcopy
import numpy as np
def mark_points_in_diagram(coordinates: list[tuple],
diagram: np.ndarray,
consider_diagonal: bool = False):
all_points = deepcopy(coordinates)
x1, y1 = coordinates[0]
x2, y2 = coordinates[1]
# conform to artificial rule of x1 >= x2 for simpler computation later
if x1 < x2:
temp_x, temp_y = x1, y1
x1, y1 = x2, y2
x2, y2 = temp_x, temp_y
if x1 == x2:
for y in range(min(y1, y2) + 1, max(y1, y2)):
all_points.append((x1, y))
elif y1 == y2:
for x in range(x2 + 1, x1):
all_points.append((x, y1))
elif consider_diagonal is False:
return
else:
if x1 - x2 == y1 - y2:
for x in range(x2 + 1, x1):
all_points.append((x, y2 + (x - x2)))
else:
for x in range(x2 + 1, x1):
all_points.append((x, y2 - (x - x2)))
for p in set(all_points):
x, y = p[0], p[1]
diagram[y][x] += 1
def calculate_points(diagram):
total = 0
for i in np.nditer(diagram):
if i >= 2:
total += 1
return total
if __name__ == "__main__":
inputs = []
max_x, max_y = 0, 0
with open("input.txt") as f:
for l in f.readlines():
coordinates = []
l = l.strip()
for i in l.split(" -> "):
pts = i.split(",")
x, y = int(pts[0]), int(pts[1])
if x > max_x: max_x = x
if y > max_y: max_y = y
coordinates.append((x, y))
inputs.append(coordinates)
diagram = np.zeros((max_y + 1, max_x + 1), dtype=int)
for coordinates in inputs:
mark_points_in_diagram(coordinates, diagram)
num_points = calculate_points(diagram)
print("Answer of AoC 2021 Day 5 Part 1:", num_points)
diagram_2 = np.zeros((max_y + 1, max_x + 1), dtype=int)
for coordinates in inputs:
mark_points_in_diagram(coordinates, diagram_2, consider_diagonal=True)
num_points_2 = calculate_points(diagram_2)
print("Answer of AoC 2021 Day 5 Part 2:", num_points_2)
| SinanTang/adventofcode | 2021/day5/day5.py | day5.py | py | 2,207 | python | en | code | 0 | github-code | 36 |
71713005863 | import requests
class ApiBaseActions:
def __init__(self, base_url):
if base_url.endswith("/"):
self.base_url = base_url
else:
self.base_url = base_url + "/"
self.session = requests.Session()
def make_request(self, method: str, route_url: str = None, **kwargs):
request_methods = {"GET": self.session.get,
"POST": self.session.post
}
if route_url is not None:
if route_url.startswith("/"):
route_url = route_url[1:]
reuest_url = self.base_url + route_url
if request_methods.get(method) is not None:
print(f" making {method} request to {reuest_url}")
response = request_methods[method](reuest_url, **kwargs)
else:
raise NameError("Invalid API method provided")
return response
| HarshDevSingh/python-behave | api/api_base.py | api_base.py | py | 906 | python | en | code | 0 | github-code | 36 |
14159899947 | from seqeval.metrics import classification_report
from seqeval.metrics import f1_score
from typing import List
import os
# the order for evaluating the script is in the main function
def create_fake_conll_form_without_iob_to_emNER_input():
"""tokenized sentences for emNER
input: iob format"""
with open("/NerKor/test.iob", "r", encoding="utf-8") as f:
corpus = f.readlines()
current = "form\n" # to the emNER form
for i in range(len(corpus)):
if corpus[i] != "\n":
current += corpus[i].split("\t")[0] + "\n"
else:
current += "\n"
with open("input_for_emNER_nerkor_test.conllup", "w", encoding="utf-8") as f:
f.write(current)
def conll_conv2_iobes():
"""conll convert to iobes form"""
if not os.path.exists("nekor_test.out"):
return
with open("nerkor_test.out", "r", encoding="utf-8") as f:
corpus = f.readlines()
current = ""
for i in range(len(corpus)):
if i == 0:
continue
if corpus[i] != "\n":
current += corpus[i].split("\t")[0] + "\t" + corpus[i].split("\t")[-1]
else:
current += "\n"
with open("emNER_nerkor_test.iobes", "w", encoding="utf-8") as f:
f.write(current)
def iobes_convert2_iob():
"""emNER has an iobes output format, so we convert it to simple iob"""
if not os.path.exists("emNER_nerkor_test.iobes"):
return
with open("emNER_nerkor_test.iobes", "r", encoding="utf-8") as f:
corpus = f.readlines()
with open("emNER_nerkor_test.iob", "w", encoding="utf-8") as f:
for i in range(len(corpus)):
if corpus[i] != "\n":
line = corpus[i].split("\t")[0] + "\t" + corpus[i].split("\t")[1]
if line.split("\t")[1].startswith("1"):
temp = line.split("\t")[1][1:]
line = corpus[i].split("\t")[0] + "\t" + "B" + temp
if line.split("\t")[1].startswith("E"):
temp = line.split("\t")[1][1:]
line = corpus[i].split("\t")[0] + "\t" + "I" + temp
f.write(line)
else:
f.write("\n")
def pred():
if not os.path.exists("emNER_nerkor_test.iob"):
return
with open("emNER_nerkor_test.iob", "r", encoding="utf-8") as f:
pred_iob = f.readlines()
pred_list = list()
current_list = list()
for i in range(len(pred_iob)):
if len(pred_iob[i].strip()) != 0:
current_list.append(pred_iob[i].split("\t")[1][:-1])
else:
pred_list.append(current_list)
current_list = list()
print(len(pred_list))
return pred_list
def gold():
with open("/NerKor/test.iob", "r", encoding="utf-8") as f:
gold_iob = f.readlines()
gold_list = list()
current_list = list()
for i in range(len(gold_iob)):
if len(gold_iob[i].strip()) != 0:
current_list.append(gold_iob[i].split("\t")[1][:-1])
else:
gold_list.append(current_list)
current_list = list()
print(len(gold_list))
return gold_list
def fscore(gold_iob: List[List[str]], pred_iob: List[List[str]]):
print(f1_score(gold_iob, pred_iob))
print(classification_report(gold_iob, pred_iob))
if __name__ == "__main__":
# create_fake_conll_form_without_iob_to_emNER_input()
# bash: cat input_for_emNER_nerkor_test.conllup | docker run -i mtaril/emtsv:latest emMorph,emTag,emNER > nerkor_test.out
# conll_conv2_iob()
# iobes_convert2_iob()
fscore(gold(), pred())
| huspacy/huspacy-resources | scripts/benchmark/emNER_eval.py | emNER_eval.py | py | 3,617 | python | en | code | 0 | github-code | 36 |
12118485844 | """Training a Transformer Builder model to be used for adversarial attacks."""
import argparse
import random
from functools import partial
from typing import Dict
import numpy as np
import torch
from sklearn.metrics import precision_recall_fscore_support, confusion_matrix
from torch.utils.data.sampler import BatchSampler
from tqdm import tqdm
from transformers import AdamW, get_constant_schedule_with_warmup
from transformers import BertForSequenceClassification
from transformers import BertTokenizer, BertConfig
from transformers import RobertaForSequenceClassification, RobertaConfig, \
RobertaTokenizer
from builders.data_loader import collate_fever, FeverDataset, \
BucketBatchSampler, sort_key
def train_model(model: torch.nn.Module,
train_dl: BatchSampler, dev_dl: BatchSampler,
optimizer: torch.optim.Optimizer,
scheduler: torch.optim.lr_scheduler.LambdaLR,
n_epochs: int) -> (Dict, Dict):
best_val, best_model_weights = {'val_f1': 0}, None
for ep in range(n_epochs):
for i, batch in enumerate(tqdm(train_dl, desc='Training')):
model.train()
optimizer.zero_grad()
loss, _ = model(batch[0],
attention_mask=batch[
0] !=
tokenizer.pad_token_id,
labels=batch[1])
loss.backward()
optimizer.step()
scheduler.step()
if i in [600, 700, 100]:
print(eval_model(model, dev_dl), flush=True)
val_p, val_r, val_f1, val_loss = eval_model(model, dev_dl)
current_val = {
'val_f1': val_f1,
'val_p': val_p,
'val_r': val_r,
'val_loss': val_loss,
'ep': ep
}
print(current_val, flush=True)
if current_val['val_f1'] > best_val['val_f1']:
best_val = current_val
best_model_weights = model.state_dict()
return best_model_weights, best_val
def eval_model(model: torch.nn.Module, test_dl: BatchSampler):
model.eval()
loss_f = torch.nn.CrossEntropyLoss()
with torch.no_grad():
labels_all = []
logits_all = []
losses = []
for batch in tqdm(test_dl, desc="Evaluation"):
loss, logits_val = model(batch[0],
attention_mask=batch[0] > 1,
labels=batch[1])
loss = loss_f(logits_val, batch[1].long())
losses.append(loss.item())
labels_all += batch[1].detach().cpu().numpy().tolist()
logits_all += logits_val.detach().cpu().numpy().tolist()
prediction = np.argmax(np.asarray(logits_all).reshape(-1, args.labels),
axis=-1)
p, r, f1, _ = precision_recall_fscore_support(labels_all, prediction,
average='macro')
print(confusion_matrix(labels_all, prediction), flush=True)
return p, r, f1, np.mean(losses)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--gpu", help="Flag for training on gpu",
action='store_true', default=False)
parser.add_argument("--seed", help="Random seed", type=int, default=73)
parser.add_argument("--labels",
help="2 labels if NOT ENOUGH INFO excluded, "
"3 otherwise",
type=int, default=3)
parser.add_argument("--train_dataset", help="Path to the train datasets",
default='data/train_nli.jsonl', type=str)
parser.add_argument("--dev_dataset", help="Path to the dev datasets",
default='data/dev_nli.jsonl', type=str)
parser.add_argument("--test_dataset", help="Path to the test datasets",
default='data/test_nli.jsonl', type=str)
parser.add_argument("--type", help="Type of transformer model",
choices=['bert', 'roberta'], default='bert')
parser.add_argument("--model_path",
help="Path where the model will be serialized",
default='ferver_bert', type=str)
parser.add_argument("--batch_size", help="Batch size", type=int, default=8)
parser.add_argument("--lr", help="Learning Rate", type=float, default=5e-5)
parser.add_argument("--epochs", help="Epochs number", type=int, default=4)
parser.add_argument("--mode", help="Mode for the script", type=str,
default='train', choices=['train', 'test'])
args = parser.parse_args()
random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
torch.backends.cudnn.deterministic = True
np.random.seed(args.seed)
device = torch.device("cuda") if args.gpu else torch.device("cpu")
if args.type == 'bert':
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
transformer_config = BertConfig.from_pretrained('bert-base-uncased',
num_labels=args.labels)
model = BertForSequenceClassification(transformer_config).to(device)
else:
tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
transformer_config = RobertaConfig.from_pretrained('roberta-base',
num_labels=args.labels) # , use_bfloat16=True
model = RobertaForSequenceClassification.from_pretrained('roberta-base',
config=transformer_config).to(
device)
collate_fn = partial(collate_fever, tokenizer=tokenizer, device=device)
print(args, flush=True)
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{
'params': [p for n, p in param_optimizer if
not any(nd in n for nd in no_decay)],
'weight_decay': 0.01
},
{
'params': [p for n, p in param_optimizer if
any(nd in n for nd in no_decay)], 'weight_decay': 0.0
}]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.lr)
if args.mode == 'test':
test = FeverDataset(args.test_dataset)
# print(Counter([_x['label'] for _x in test]).most_common(3))
test_dl = BucketBatchSampler(batch_size=args.batch_size,
sort_key=sort_key, dataset=test,
collate_fn=collate_fn)
checkpoint = torch.load(args.model_path)
model.load_state_dict(checkpoint['model'])
print(eval_model(model, test_dl))
else:
print("Loading datasets...")
train = FeverDataset(args.train_dataset)
dev = FeverDataset(args.dev_dataset)
# print(Counter([_x['label'] for _x in train]).most_common(3))
# print(Counter([_x['label'] for _x in dev]).most_common(3))
train_dl = BucketBatchSampler(batch_size=args.batch_size,
sort_key=sort_key,
dataset=train,
collate_fn=collate_fn)
dev_dl = BucketBatchSampler(batch_size=args.batch_size,
sort_key=sort_key, dataset=dev,
collate_fn=collate_fn)
num_train_optimization_steps = int(
args.epochs * len(train) / args.batch_size)
scheduler = get_constant_schedule_with_warmup(optimizer,
num_warmup_steps=0.05)
best_model_w, best_perf = train_model(model, train_dl, dev_dl,
optimizer, scheduler, args.epochs)
checkpoint = {
'performance': best_perf,
'args': vars(args),
'model': best_model_w,
}
print(best_perf)
print(args)
torch.save(checkpoint, args.model_path)
| copenlu/fever-adversarial-attacks | builders/train_transformer.py | train_transformer.py | py | 8,288 | python | en | code | 11 | github-code | 36 |
30942866367 | from rest_framework import serializers
from .models import Product, Ingredient
class IngredientSerializer(serializers.ModelSerializer):
class Meta:
model = Ingredient
fields = ('title', 'price')
class ProductSerializer(serializers.ModelSerializer):
ingredients = IngredientSerializer(read_only=True, many=True)
class Meta:
model = Product
fields = ('id', 'product_type', 'title',
'image', 'description', 'price', 'ingredients')
| Dawid-Dahl/stereo-nightclub-api | api/serializers.py | serializers.py | py | 495 | python | en | code | 0 | github-code | 36 |
17952996407 | """
following file contains example commands used to use the library
it is focused on airfoil design, which uses parameteric definitions of airfoils and runs optimization procedures
"""
# 1. will build the airfoil using the cst parametric definition
# 1.1. define some arbitrary parameters for cst, as vectors P and Q:
P = [.03, .08, .12, .16, .06]
Q = [ .2, .25, .25, .2, .1]
# here P is camber, Q is thickness distribution. more parameters can be added to allow more geometry flexibility
# 1.2. build airfoil of class CST, using default values of exponents: N1, N2, M1, M2 and 150 points:
from foils.airfoilCST import CSTairfoil, CSTcurve
airfoil = CSTairfoil(P, Q, N1 = .5, N2 = .5, M1 = 1, M2 = 1, points = 150)
airfoil.plotAirfoil()
# 1.3. the resulting airfoil is nice and smooth, but it most likely requires some additional processing to be useful
# here i mean e.g. finite thickness trailing edge
# so lets build Airfoil class object basing on our airfoil, name it foil as airfoil is taken by CST object:
foil = airfoil.genAirfoil()
foil.plotAirfoil()
# 1.4. it is basically the same airfoil, but we now have plenty of options available for this generic Airfoil class object
# lets modify trailing edge to make it more realistic
foil.cutTE_XFOIL(t = .01, r = 0.25)
foil.plotAirfoil()
# several options are available for geometry modifications including: leading edge radius modification (e.g. to match required radius)
# thickenss and camber scaling (e.g. to match required maximum thickness), etc.
# 1.5. lets take a look at last part of Airfoil class- analysis
# there are options for xfoil and fluent analyses
# fluent requires software, which we might not have, so lets stick to xfoil and run our foil at re = 5e5 at angle of attack = 5 degrees:
foil.runXFOIL(alfa = 5, re=5e5)
# outputs -> (5.0 1.0132 0.01035 -0.1013), which means 1.0132 lift coefficient, 0.01035 drag coefficient and -0.1013 pitching moment coefficient
# a several options for running xfoil are defined as well, which allow to quickly run airfoil polar or run for desired lift coefficient
#==================================================================
# 2. similar procedure can be used to build b-spline based airfoil
from foils.airfoilBSpline import BSplineAirfoil
import numpy as np
# 2.1. lets define vectors of knots which define splines
knotsU = np.array([[0,0], [0., .04], [0.2, .1], [.4, .12],[.6,.1], [.8, .05] ,[1,0] ])
knotsL = np.array([[0,0], [0., -.025], [0.2, -.05], [.4, -0.06],[.6, -.01],[.8, .01], [1,0] ])
# and build BSplineAirfoil class foil:
airfoilb = BSplineAirfoil(knotsU, knotsL)
airfoilb.plotAirfoil()
# plot presents control polygon and resulting airfoil
# 2.2. the same procedure as for cst foil can be used to bspline foil:
foilb = airfoilb.genAirfoil()
foilb.plotAirfoil(name = 'bspline foil as Airfoil object')
foilb.cutTE_XFOIL(t = .01, r = 0.25)
foilb.plotAirfoil(name = 'thickened te of bspline foil')
# needless to mention, this library allows processing of airfoil to match required geoemtry parameters of any form:
# chord, maximum thickness, leading edge radius, trailing edge thickness, camber and thickness scaling, rotating, etc.
# hence it can be used to define airfoil stacking in 3d wing-type design cases.
#==================================================================
# 3. run airfoil design using evolutionary process
from foils.airfoilDesigner import airfoilDesigner
xs = [.02, .3, .9]
weights = [1, 1, 1]
maxiter = 80
design = airfoilDesigner([1],'CST', 10, ts = [.04, .11, .015], xs = xs, weights = weights)
design.runOptimization(maxiter = maxiter)
| Witekklim/propellerDesign | example_runs.py | example_runs.py | py | 3,608 | python | en | code | 1 | github-code | 36 |
10691268780 | import sqlite3 as sql
from sqlite3 import OperationalError
from pythonobjet.exo1_formesgeometriques.point.Point import Point
class PointDao:
"""Ma classe"""
def __init__(self):
pass
def initialisation(self):
connecteur = sql.connect("donnee.db")
curseur = connecteur.cursor()
requete = "create table pointTable(col1 integer, col2 integer)"
try:
curseur.execute(requete)
except OperationalError as e:
print ("ope=",e)
connecteur.commit()
connecteur.close()
def insertPoint (self, point):
connecteur = sql.connect("donnee.db")
curseur = connecteur.cursor()
requete = f"insert into pointTable(col1, col2) values ({point.getX()},{point.getY()})"
curseur.execute(requete)
connecteur.commit()
connecteur.close()
def listePoints (self):
listePoints = []
connecteur = sql.connect("donnee.db")
curseur = connecteur.cursor()
requete = "select * from pointTable"
curseur.execute(requete)
for ligne in curseur.fetchall():
listePoints.append(Point(ligne[0], ligne[1]))
connecteur.close()
return listePoints
if __name__ == "__main__":
print("================ debut point dao ==================")
#initialisation
pointDao = PointDao()
pointDao.initialisation()
# insertion points
p1 = Point(0,0)
p2 = Point(1,1)
pointDao.insertPoint(p1)
pointDao.insertPoint(p2)
#lister point
listep = pointDao.listePoints()
print("La liste est ", listep )
| silvaplana/pythontraining | pythonobjet/exo1_formesgeometriques/point/PointDao.py | PointDao.py | py | 1,627 | python | fr | code | 0 | github-code | 36 |
451100359 | #!/usr/bin/python3
from __future__ import division
from core.class_utils import MalwareUrl
from core.config_utils import get_base_config
from datetime import datetime, timedelta
from core.dns_utils import resolve_dns
from core.log_utils import get_module_logger
from core.virus_total import get_urls_for_ip
import dateutil.parser
import json
import math
import os
import requests
import sys
import time
CDIR = os.path.dirname(os.path.realpath(__file__))
ROOTDIR = os.path.abspath(os.path.join(CDIR, os.pardir))
BASECONFIG = get_base_config(ROOTDIR)
LOGGING = get_module_logger(__name__)
TYPES = ['malware-url']
NAME = 'Cymon'
DISABLED = False
CYMON_USER = 'YOUR USERNAME'
CYMON_PASS = 'YOUR PASSWORD'
BATCH_SIZE = 100
# AVsGgRbdVjrVcoBZyoid: Abuse.ch Ransomware Tracker
# AVsGgNL4VjrVcoBZyoib: Abuse.ch Zeus Tracker
# AVvtZm8i2c0QRQctzx4f: Bambenek Consulting C2
# AVsIOKQlVjrVcoBZyojw: Cyber Crime Tracker
# AVsGX4iNVjrVcoBZyoiH: Malc0de
# AVsGXy7tVjrVcoBZyoiB: URLVir
# AVsGgHxAVjrVcoBZyoiX: VX Vault
FEED_LIST = ['AVsGgRbdVjrVcoBZyoid', 'AVsGgNL4VjrVcoBZyoib', 'AVvtZm8i2c0QRQctzx4f', 'AVsGX4iNVjrVcoBZyoiH', 'AVsGXy7tVjrVcoBZyoiB', 'AVsGgHxAVjrVcoBZyoiX']
def cymon_auth():
"""Authenticate against the Cymon API.
Returns:
- result: (type: string) Cymon JWT token.
"""
try:
payload = {
'username': CYMON_USER,
'password': CYMON_PASS}
headers = {'Content-Type': 'application/json'}
LOGGING.info('Authenticating against Cymon API...')
request = requests.post(
'https://api.cymon.io/v2/auth/login',
data=json.dumps(payload),
headers=headers,
verify=False)
if request.status_code == 200:
LOGGING.info('Authentication successful!')
return json.loads(request.text)['jwt']
else:
LOGGING.error(
'Problem connecting to Cymon. Status code:{0}. Please try again later.'.format(
request.status_code))
except requests.exceptions.ConnectionError as e:
LOGGING.warning('Problem connecting to Cymon. Error: {0}'.format(e))
except Exception as e:
LOGGING.warning('Problem connecting to Cymon. Aborting task.')
LOGGING.exception(sys.exc_info())
LOGGING.exception(type(e))
LOGGING.exception(e.args)
LOGGING.exception(e)
return False
def get_cymon_feed_size(jwt, feed_id):
"""Determine the number of results a feed will return (max: 1000).
Params:
- jwt: (type: string) JWT token.
- feed_id: (type: string) Cymon feed ID.
Returns:
- total: (type: int) feed size.
"""
try:
today = datetime.utcnow()
threshold = today - timedelta(days=BASECONFIG.malware_days)
headers = {'Authorization': 'Bearer {0}'.format(jwt)}
payload = {
'startDate': threshold.strftime('%Y-%m-%d'),
'endDate': today.strftime('%Y-%m-%d'),
'size': 1}
LOGGING.info('Determining feed size...')
request = requests.get(
'https://api.cymon.io/v2/ioc/search/feed/{0}'.format(feed_id),
params=payload,
headers=headers,
verify=False)
if request.status_code == 200:
LOGGING.info('Request successful!')
response = json.loads(request.text)
if 'total' in response:
total = int(response['total'])
if total > 1000:
LOGGING.warning(
'API request returned more than 1000 results.')
total = 1000
return total
else:
LOGGING.error(
'Problem connecting to Cymon. Status code:{0}. Please try again later.'.format(
request.status_code))
except requests.exceptions.ConnectionError as e:
LOGGING.warning('Problem connecting to Cymon. Error: {0}'.format(e))
except Exception as e:
LOGGING.warning('Problem connecting to Cymon. Aborting task.')
LOGGING.exception(sys.exc_info())
LOGGING.exception(type(e))
LOGGING.exception(e.args)
LOGGING.exception(e)
return 0
def get_cymon_feed(jwt, feed_id, pages):
"""Produce a list of URLs for IPs found in the feed.
Params:
- jwt: (type: string) JWT token.
- feed_id: (type: string) Cymon feed ID.
- pages: (type: int) number of pages to retrieve.
Returns:
- url_list: (type: MalwareUrl list) list of malware URLs.
"""
try:
today = datetime.utcnow()
threshold = today - timedelta(days=BASECONFIG.malware_days)
headers = {'Authorization': 'Bearer {0}'.format(jwt)}
LOGGING.info('Fetching data from Cymon feed: {0}'.format(feed_id))
ip_list = []
for n in range(1, pages + 1):
payload = {
'startDate': threshold.strftime('%Y-%m-%d'),
'endDate': today.strftime('%Y-%m-%d'),
'size': BATCH_SIZE,
'from': (
BATCH_SIZE *
n -
BATCH_SIZE)}
request = requests.get(
'https://api.cymon.io/v2/ioc/search/feed/{0}'.format(feed_id),
params=payload,
headers=headers,
verify=False)
if request.status_code == 200:
LOGGING.info('Request successful!')
response = json.loads(request.text)
if 'hits' in response:
for feed_entry in response['hits']:
if 'ioc' in feed_entry:
if 'ip' in feed_entry['ioc']:
mal_ip = feed_entry['ioc']['ip']
if mal_ip not in ip_list:
ip_list.append(mal_ip)
elif 'hostname' in feed_entry['ioc']:
host_name = feed_entry['ioc']['hostname']
mal_ip = resolve_dns(host_name)
if mal_ip:
if mal_ip not in ip_list:
ip_list.append(mal_ip)
else:
LOGGING.error(
'Problem connecting to Cymon. Status code:{0}. Please try again later.'.format(
request.status_code))
if len(ip_list) > 0:
url_list = []
for ip_addr in ip_list:
ip_results = get_urls_for_ip(ip_addr, NAME)
if len(ip_results) > 0:
url_list.extend(ip_results)
return url_list
else:
LOGGING.warning('No hosts of interest.')
except requests.exceptions.ConnectionError as e:
LOGGING.warning('Problem connecting to Cymon. Error: {0}'.format(e))
except Exception as e:
LOGGING.warning('Problem connecting to Cymon. Aborting task.')
LOGGING.exception(sys.exc_info())
LOGGING.exception(type(e))
LOGGING.exception(e.args)
LOGGING.exception(e)
return []
def get_malwareurl_list():
"""Produce a list of malware URLs from Cymon feeds.
Returns:
- return_list: (type: MalwareUrl list) list of malware URLs.
"""
jwt = cymon_auth()
if jwt:
return_list = []
for feed in FEED_LIST:
LOGGING.info('Processing feed: {0}'.format(feed))
feed_size = get_cymon_feed_size(jwt, feed)
if feed_size > 0:
pages = int(math.ceil(feed_size / BATCH_SIZE))
if pages < 1:
pages = 1
url_list = get_cymon_feed(jwt, feed, pages)
if len(url_list) > 0:
return_list.extend(url_list)
return return_list
else:
LOGGING.warning('No Cymon authentication token. Cannot query API.')
return []
| phage-nz/ph0neutria | core/plugins/cymon.py | cymon.py | py | 8,062 | python | en | code | 299 | github-code | 36 |
35810319147 | import numpy as np
class PatchSampler():
def __init__(self, train_images_list, gt_segmentation_maps_list, classes_colors, patch_size):
self.train_images_list = train_images_list
self.gt_segmentation_maps_list = gt_segmentation_maps_list
self.class_colors = classes_colors
self.patch_size = patch_size
# Function for sampling patches for each class
# provide your implementation
# should return extracted patches with labels
def extractpatches(self):
patches = []
labels = []
train = self.train_images_list
gt = self.gt_segmentation_maps_list
classes = self.class_colors
patch_size = self.patch_size
# loop over images
for i in range(len(train)):
image = train[i]
segmentation = gt[i]
for c in classes:
# get binary matrix of entries with this class
binary_matrix = segmentation.copy()
binary_matrix = binary_matrix == c
# get x,y lower right corner of patch size square
positions = self.get_positions_of_patch(binary_matrix, patch_size)
for pos in positions:
x , y = pos
image_patch = image[x-patch_size:x, y-patch_size:y]
"""
# sanity check, all cells in this patch have right class
seg_patch = segmentation[x-patch_size:x, y-patch_size:y]
print( np.sum(seg_patch==c) == patch_size*patch_size*3)
"""
patches.append(image_patch)
labels.append(c)
permutation = np.random.permutation(len(patches))
return np.array(patches)[permutation] , np.array(labels)[permutation]
# function using dynamic programming to get each square
#of 1s/Trues of size patch_size given a binary matrix
# return rights lower corner
def get_positions_of_patch(self,matrix,patch_size):
x,y , _ = matrix.shape
dp = np.zeros((x+1,y+1))
positions = []
for i in range(1,x+1):
for j in range(1,y+1):
if (matrix[i-1][j-1][0]):
dp[i][j] = min(min(dp[i][j - 1], dp[i - 1][j]), dp[i - 1][j - 1]) + 1
if (dp[i][j]>= patch_size):
positions.append( ( i,j ))
return positions
# feel free to add any helper functions
| IsmailKent/ComputerVision2Submissions | Sheet02/Sampler.py | Sampler.py | py | 2,536 | python | en | code | 0 | github-code | 36 |
75215887145 | import streamlit as st
from transformers import pipeline
# 👈 Add the caching decorator
@st.cache(allow_output_mutation=True)
def load_model():
return pipeline("sentiment-analysis")
model = load_model()
query = st.text_input("Your query")
if query:
result = model(query)[0] # 👈 Classify the query text
st.write(result["label"])
| Jaggusms/sentiment_analysis_higgingFace | app.py | app.py | py | 349 | python | en | code | 0 | github-code | 36 |
20353033243 | from __future__ import absolute_import
import itertools
from django import forms
from .models import Episode
class ScoreboardForm(forms.Form):
def __init__(self, *args, **kwargs):
classes = kwargs.pop("classes")
super(ScoreboardForm, self).__init__(*args, **kwargs)
classes_choices = [(c.id, c.name) for c in classes]
self.fields["classes"] = forms.MultipleChoiceField(
choices=classes_choices, widget=forms.CheckboxSelectMultiple()
)
# Each tuple in choices has two elements, id and name of each level
# First element is the actual value set on the model
# Second element is the string displayed on the dropdown menu
episodes_choices = ((episode.id, episode.name) for episode in Episode.objects.all())
self.fields["episodes"] = forms.MultipleChoiceField(
choices=itertools.chain(episodes_choices),
widget=forms.CheckboxSelectMultiple(),
)
class LevelModerationForm(forms.Form):
def __init__(self, *args, **kwargs):
classes = kwargs.pop("classes")
teacher = kwargs.pop("teacher")
super(LevelModerationForm, self).__init__(*args, **kwargs)
# If the teacher is an admin, append teacher names or "(you)" to classes
if teacher.is_admin:
classes_choices = [
(
c.id,
f"{c.name} "
+ (
"(you)"
if c.teacher == teacher
else f"({c.teacher.new_user.first_name} {c.teacher.new_user.last_name})"
),
)
for c in classes
]
else:
classes_choices = [(c.id, c.name) for c in classes]
self.fields["classes"] = forms.MultipleChoiceField(
choices=classes_choices, widget=forms.CheckboxSelectMultiple()
)
| ocadotechnology/rapid-router | game/forms.py | forms.py | py | 1,935 | python | en | code | 53 | github-code | 36 |
10623176651 | """An AWS Python Pulumi program"""
import pulumi
from pulumi_aws import eks
import networking
config = pulumi.Config();
environment = config.require('environment');
instance_size = config.require('instance-size');
eks_service_role = config.require('eks-service-role');
node_instance_role = config.require('node-instance-role');
node_pool_desired_size = config.require('pool-desired-size');
node_pool_min_size = config.require('pool-min-size');
node_pool_max_size = config.require('pool-max-size');
node_ssh_key = config.require('node-ssh-key')
eks_node_disk_size = config.require('eks-node-disk-size')
eks_version = config.require('eks-version')
#Create EKS required Roles
eks_cluster = eks.Cluster(
f'{environment}',
role_arn=eks_service_role,
version=eks_version,
tags={
'Name': f'{environment}',
},
vpc_config=eks.ClusterVpcConfigArgs(
public_access_cidrs=['0.0.0.0/0'],
security_group_ids=[networking.eks_security_group.id],
subnet_ids=networking.subnet_ids,
),
)
eks_node_group = eks.NodeGroup(
f'{environment}-wng1',
cluster_name=eks_cluster.name,
node_group_name=f'{environment}-wng1',
node_role_arn=node_instance_role,
subnet_ids=networking.subnet_ids,
disk_size=int(eks_node_disk_size),
instance_types=[instance_size],
remote_access=eks.NodeGroupRemoteAccessArgs(
ec2_ssh_key=node_ssh_key,
),
tags={
'Name': f'{environment}-wng1',
},
scaling_config=eks.NodeGroupScalingConfigArgs(
desired_size=int(node_pool_desired_size),
max_size=int(node_pool_max_size),
min_size=int(node_pool_min_size),
),
) | dtorresf/iac | pulumi/eks/__main__.py | __main__.py | py | 1,663 | python | en | code | 0 | github-code | 36 |
19834995573 | import numpy as np
from metrics import r2_score
class LinearRegression():
def __init__(self):
self._theta = None
self.cofficients_ = None
self.intercept_ = None
def fit_normal(self,X_train,y_train):
"""通过训练数据 fit模型参数"""
X_temp = np.hstack([np.ones((X_train.shape[0],1)),X_train])
self._theta = np.linalg.inv((X_temp.T).dot(X_temp)).dot(X_temp.T).dot(y_train)
self.cofficients_ = self._theta[1:]
self.intercept_ = self._theta[:1]
return self
def fit_gd(self, X_train, y_train, eta=0.01, n_iters=1e4):
"""根据训练数据集X_train, y_train, 使用Batch梯度下降法训练Linear Regression模型"""
assert X_train.shape[0] == y_train.shape[0], \
"the size of X_train must be equal to the size of y_train"
def J(theta, X_b, y):
try:
return np.sum((y - X_b.dot(theta)) ** 2) / len(y)
except:
return float('inf')
def dJ(theta, X_b, y):
return X_b.T.dot(X_b.dot(theta) - y) * 2. / len(y)
def gradient_descent(X_b, y, initial_theta, eta, n_iters=1e4, epsilon=1e-8):
theta = initial_theta
cur_iter = 0
while cur_iter < n_iters:
gradient = dJ(theta, X_b, y)
last_theta = theta
theta = theta - eta * gradient
if (abs(J(theta, X_b, y) - J(last_theta, X_b, y)) < epsilon):
break
cur_iter += 1
return theta
X_b = np.hstack([np.ones((len(X_train), 1)), X_train])
initial_theta = np.zeros(X_b.shape[1])
self._theta = gradient_descent(X_b, y_train, initial_theta, eta, n_iters)
self.intercept_ = self._theta[0]
self.coef_ = self._theta[1:]
return self
def fit_sgd(self,X_train,y_train,n_iters=5,t0=5,t1=50):
def dJ_theta_stochastic(x_i, y_i, theta):
return x_i.T.dot(x_i.dot(theta) - y_i) * 2.
def learning_rate(t):
return t0 / (t + t1)
def stochastic_gradient_descent(X_b, y_train,theta, n_iters):
iters_of_number = len(X_b)
for n in range(n_iters):
# shuffle X和y (注意X_y之间的顺序有关系!)
X_y = np.hstack([X_b, y_train.reshape((-1, 1))])
np.random.shuffle(X_y)
X_b_new = X_y[:, 0:-1]
y_b_new = X_y[:, -1]
# 主要算法 因为X y 是随机序列 所以顺序取出来X y做随机梯度下降就可以
for i in range(iters_of_number):
x_i = X_b_new[i]
y_i = y_b_new[i]
# 计算梯度
grad = dJ_theta_stochastic(x_i, y_i, theta)
# 更新theta
theta = theta - learning_rate(iters_of_number*n+i) * grad
return theta
X_b = np.hstack([np.ones((len(X_train), 1)), X_train])
initial_theta = np.zeros(X_b.shape[1])
self._theta = stochastic_gradient_descent(X_b, y_train, initial_theta, n_iters)
self.intercept_ = self._theta[0]
self.cofficients_ = self._theta[1:]
return self
def predict(self,X):
"""给定X 预测y的值"""
X = np.hstack([np.ones((X.shape[0],1)),X])
predict_y = X.dot(self._theta)
return predict_y
def __repr__(self):
return "LinearRegression()"
def score(self,X_test,y_test):
"""根据给定的测试数据集 计算R Square"""
res_y = self.predict(X_test)
return r2_score(y_test,res_y) | anbingxu666/Machine-learning-with-Python | LinearRegression.py | LinearRegression.py | py | 3,693 | python | en | code | 4 | github-code | 36 |
12083042499 | import json
import logging
from django.contrib import messages
from django.contrib.auth.decorators import login_required, user_passes_test
from django.core.management import call_command
from django.http import HttpRequest, HttpResponse
from django.http.response import HttpResponseRedirect
from django.urls import reverse
from django.utils.timezone import now as tz_now
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_http_methods
from .models import LinearIssue
logger = logging.getLogger(__name__)
@login_required
@user_passes_test(lambda u: u.is_staff)
def import_issues(request: HttpRequest) -> HttpResponseRedirect:
if not request.user.has_perms("linear.use_api"):
messages.add_message(
request, messages.ERROR, "You do not permission to use the Linear API"
)
else:
call_command("import_issues")
messages.add_message(
request,
messages.SUCCESS,
"All Linear issues have been imported successfully",
)
return HttpResponseRedirect(reverse("admin:linear_linearissue_changelist"))
@csrf_exempt
@require_http_methods(["POST"])
def webhook(request: HttpRequest) -> HttpResponse:
"""
Process Linear webhook event.
This webhook currently only listens for Issue events, and it treats both
"Create" and "Update" events in the same way. It will set the identifier,
team_name, title, state and estimate fields. The project_name and
milestone_name are not included in the webhook payload.
"""
try:
body = json.loads(request.body.decode("utf-8"))
data = body["data"]
if body["type"] != "Issue":
return HttpResponse("We are not interested in non-Issue updates")
except (json.JSONDecodeError, KeyError):
logger.exception("Unable to process Linear webhook event")
return HttpResponse(
"We couldn't process the request, but we're sending back a 200 anyway."
)
# we always get id, team, state in the payload. project/milestone are not included.
id = data["id"]
title = data["title"]
team_name = data["team"]["name"]
state = data["state"]["name"]
estimate = data.get("estimate")
identifier = f'{data["team"]["key"]}-{data["number"]}'
try:
issue = LinearIssue.objects.get(id=id)
except LinearIssue.DoesNotExist:
issue = LinearIssue.objects.create(id=id)
issue.title = title
issue.state = state
issue.estimate = estimate
issue.team_name = team_name
issue.identifier = identifier
issue.last_refreshed_at = tz_now()
issue.save()
return HttpResponse("Task updated")
| yunojuno/django-linear | linear/views.py | views.py | py | 2,704 | python | en | code | 3 | github-code | 36 |
13217684501 | # -*- coding: UTF-8 -*-
__author__ = 'admin'
import sys,math,os
import module1
j = 3
i = 2
j = 3
i = 2
for i in range(2, 100):
x = 'foo'
if i % 2 == 1:
sys.stdout.write(x + '\n' + str(i) + x)
module1.printname(x)
i += 1
else:
sys.stdout.write(x + '\n' + str(i))
i += 1
| wanseanpark/shell | 1.project/1.test/01.input.py | 01.input.py | py | 329 | python | en | code | 0 | github-code | 36 |
36899051077 | import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
import math
import matplotlib.animation as animation
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d import Axes3D
#Global Variables
num_iter=0
saved_theta=np.zeros((2, 1))
cost=np.zeros(1)
def h(X, theta):
return np.dot(X, theta)
def Jtheta(X, Y, theta):
return 1/2.0*np.sum((h(X, theta)-Y)**2)
def linear_prediction(x, theta):
return theta[0] + theta[1]*x
def normal(X, Y):
return np.dot(np.linalg.inv(np.dot(X.T, X)), np.dot(X.T, Y))
def gradient_descent(X, Y, alpha, theta_in):
i=0
epsilon=1e-15
theta=theta_in;
global saved_theta
global num_iter
global cost
saved_theta=theta_in
J=Jtheta(X, Y, theta)
cost=J
while (True):
Jprev=Jtheta(X, Y, theta)
theta_new= theta + (alpha)*np.dot(X.T, (Y-h(X, theta)))
Jnew=Jtheta(X, Y, theta_new)
if math.fabs(Jnew-Jprev)<epsilon:
break
theta=theta_new
saved_theta=np.hstack((saved_theta, theta))
cost=np.vstack((cost, Jnew))
i=i+1
num_iter=i
print('Number of iterations', i)
return theta
Xtemp = np.loadtxt('linearX.csv', delimiter=',') #array of X
ytemp = np.loadtxt('linearY.csv', delimiter=',')[np.newaxis] #converting 1d array into 2d matrix using np.newaxis
ones = np.ones(len(Xtemp))
#Normalizing the data
mean = np.mean(Xtemp)
sigma = np.std(Xtemp)
Xtemp = (Xtemp - mean)/sigma
Xtemp1 = np.vstack((ones, Xtemp))
X=Xtemp1.T.copy() #taking transpose of X
Y=ytemp.T.copy() #taking transpose of Y
alpha=0.0001
theta=[[0.], [0.]]
#part a
theta_optimal=gradient_descent(X, Y, alpha, theta)
print('Optimal value of theta', theta_optimal)
print('Analytical solution is', normal(X, Y))
#part b
plt.plot(Xtemp, Y, 'ro')
plt.plot(Xtemp, np.dot(X, theta_optimal))
plt.xlabel('Aciditiy')
plt.ylabel('Density')
plt.show()
def createJ_plot(Theta_0, Theta_1):
Theta = np.matrix([[Theta_0], [Theta_1]])
return ((Y - X * Theta).T * (Y - X * Theta) / (2*X.shape[0])).item(0)
#part c
#3D mesh
fig=plt.figure()
ax = fig.add_subplot(111, projection='3d')
theta_0_plot=np.arange(-4, 4, 0.05)[np.newaxis]
theta_1_plot=np.arange(-1, 1, 0.002)[np.newaxis] #Make it 4
theta_0_plot, theta_1_plot=np.meshgrid(theta_0_plot, theta_1_plot)
J_plot=np.vectorize(createJ_plot)(theta_0_plot, theta_1_plot)
ax.plot_surface(theta_0_plot, theta_1_plot, J_plot, cmap=plt.cm.jet, rstride=1, cstride=1, linewidth=0)
plt.show()
| ashishgupta97/Machine-Learning | LinearRegression.py | LinearRegression.py | py | 2,417 | python | en | code | 0 | github-code | 36 |
34463656888 | import mainBMRSA as Bmrsa
import mainBRSA as Brsa
import matplotlib.pyplot as plt
from useful import funcs
# number of bs
bsize = 1
while bsize < 4:
# Start from prime size = 10 bits
# mrsa and bmrsa time
itr = 10
mrsa = []
bmrsa = []
# points needed to be marked on graph
pts = [1024]
# go till 18 bits
# No of bits in key = 2 ** (n/b) * b
while itr <= 18:
n = itr
b = 2
ll = bsize
primes = funcs.generate_primes(n, b)
n = 1
for p in primes:
n *= p
m = funcs.generate_message(ll, n)
es = funcs.es(primes, ll)
timeBRSA = 0
# Get time for each BRSA
for i in range(ll):
timeBRSA += Brsa.main(primes, es[i], m[i])
# Time in milliseconds
mrsa.append(timeBRSA * 1000)
# Get time for BMRSA
timeBMRSA = Bmrsa.main(primes, es, m)
# Time in milli seconds
bmrsa.append(timeBMRSA*1000)
pts = [1024, 1536, 2048, 2560, 3072]
itr += 2
# Plotting graphs
fig, ax = plt.subplots()
# plot subplots
ax.plot(pts, mrsa, "ro-", label="BatchRSA")
ax.plot(pts, bmrsa, "bo-", label="BMRSA")
# legends in graph
legend = ax.legend(loc='upper center', shadow=True, fontsize='x-large')
# title
plt.title("Decryption time vs Key Size in bits for batch size = " + str(bsize))
plt.ylabel('Time in milliseconds')
plt.xlabel('No of Bits in key')
# display graph
plt.show()
# increase byte size
bsize += 1
| SUMUKHA-PK/RSA-efficient-variants | src/BMRSA/main.py | main.py | py | 1,574 | python | en | code | 0 | github-code | 36 |
34109397178 | import requests
from tunga_tasks import slugs
EVENT_PUSH = 'push'
EVENT_CREATE = 'create'
EVENT_DELETE = 'delete'
EVENT_COMMIT_COMMENT = 'commit_comment'
EVENT_PULL_REQUEST = 'pull_request'
EVENT_PULL_REQUEST_REVIEW_COMMENT = 'pull_request_review_comment'
EVENT_ISSUE = 'issue'
EVENT_ISSUE_COMMENT = 'issue_comment'
EVENT_GOLLUM = 'gollum'
EVENT_RELEASE = 'release'
HEADER_EVENT_NAME = 'HTTP_X_GITHUB_EVENT'
HEADER_DELIVERY_ID = 'HTTP_X_GITHUB_DELIVERY'
PAYLOAD_ACTION = 'action'
PAYLOAD_ACTION_CREATED = 'created'
PAYLOAD_ACTION_DELETED = 'deleted'
PAYLOAD_ACTION_OPENED = 'opened'
PAYLOAD_ACTION_EDITED = 'edited'
PAYLOAD_ACTION_CLOSED = 'closed'
PAYLOAD_ACTION_REOPENED = 'reopened'
PAYLOAD_ACTION_PUBLISHED = 'published'
PAYLOAD_COMMENT = 'comment'
PAYLOAD_HTML_URL = 'html_url'
PAYLOAD_SENDER = 'sender'
PAYLOAD_USER = 'user'
PAYLOAD_USERNAME = 'login'
PAYLOAD_AVATAR_URL = 'avatar_url'
PAYLOAD_BODY = 'body'
PAYLOAD_CREATED_AT = 'created_at'
PAYLOAD_REF_TYPE = 'ref_type'
PAYLOAD_REF = 'ref'
PAYLOAD_REF_TYPE_REPO = 'repository'
PAYLOAD_REF_TYPE_BRANCH = 'branch'
PAYLOAD_REF_TYPE_TAG = 'tag'
PAYLOAD_REPOSITORY = 'repository'
PAYLOAD_PAGES = 'pages'
PAYLOAD_PAGE_NAME = 'page_name'
PAYLOAD_TITLE = 'title'
PAYLOAD_SUMMARY = 'summary'
PAYLOAD_HEAD_COMMIT = 'head_commit'
PAYLOAD_URL = 'url'
PAYLOAD_MESSAGE = 'message'
PAYLOAD_TIMESTAMP = 'timestamp'
PAYLOAD_ID = 'id'
PAYLOAD_TREE_ID = 'tree_id'
PAYLOAD_PULL_REQUEST = 'pull_request'
PAYLOAD_NUMBER = 'number'
PAYLOAD_MERGED = 'merged'
PAYLOAD_MERGED_AT = 'merged_at'
PAYLOAD_ISSUE = 'issue'
PAYLOAD_RELEASE = 'release'
PAYLOAD_TAG_NAME = 'tag_name'
REPOSITORY_FIELDS = ['id', 'name', 'description', 'full_name', 'private', 'url', 'html_url']
ISSUE_FIELDS = ['id', 'number', 'title', 'body', 'url', 'html_url', 'repository']
def transform_to_github_events(events):
"""
Transforms Tunga integration events to corresponding GitHub events
:param events: A list of Tunga events
:return: A list of GitHub events
"""
github_events = []
event_map = {
slugs.BRANCH: [EVENT_CREATE, EVENT_DELETE],
slugs.TAG: [EVENT_CREATE, EVENT_DELETE],
slugs.PULL_REQUEST_COMMENT: EVENT_PULL_REQUEST_REVIEW_COMMENT,
slugs.WIKI: EVENT_GOLLUM,
}
if events:
for tunga_event in events:
if tunga_event in event_map:
co_events = event_map[tunga_event]
if isinstance(co_events, list):
github_events.extend(co_events)
else:
github_events.append(co_events)
else:
github_events.append(tunga_event)
return list(set(github_events))
def transform_to_tunga_event(event):
event_map = {
EVENT_CREATE: slugs.BRANCH,
EVENT_DELETE: slugs.BRANCH,
EVENT_PULL_REQUEST_REVIEW_COMMENT: slugs.PULL_REQUEST_COMMENT,
EVENT_GOLLUM: slugs.WIKI,
}
return event_map.get(event, event)
def extract_repo_info(repo):
repo_info = {}
for key in REPOSITORY_FIELDS:
repo_info[key] = repo[key]
return repo_info
def api(endpoint, method, params=None, data=None, access_token=None):
headers = {
'Accept': 'application/vnd.github.v3+json',
'Content-Type': 'application.json'
}
if access_token:
headers['Authorization'] = 'token %s' % access_token
return requests.request(
method=method, url='https://api.github.com'+endpoint, params=params, json=data, headers=headers
)
| jonathanzerox/tunga-api | tunga_utils/github.py | github.py | py | 3,509 | python | en | code | 0 | github-code | 36 |
4502539107 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import sys
import os
from service.singleton import Singleton
class Config(Singleton):
def __init__(self):
parser = self.__get_praser()
self.args = parser.parse_args()
self.port = self.args.port
self.token = self.args.token
self.base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
self.db_filename = os.path.join(self.base_dir, 'verify_bot.db')
self.url_prefix = self.args.url_prefix
if not self.__check_args():
parser.print_help()
sys.exit(1)
def __get_praser(self):
parser = argparse.ArgumentParser(description='VerifyBot')
parser.add_argument('-t', '--token', help='Telegram Bot token.')
parser.add_argument('-p', '--port', help='Port to listen.')
parser.add_argument('-d', '--demon', help='Run as demon.', action='store_true')
parser.add_argument('--url-prefix', help='Working url prefix. e.g. "/verifybot"', default='')
parser.add_argument('--set-web-hook', help='Set web hook.', action='store_true')
parser.add_argument('--hostname', help='WebHook hostname.')
parser.add_argument('-v', '--verbose', action='store_true', dest='verbose',
help='Enable debug info')
return parser
def __check_args(self):
if len(sys.argv) == 1:
return False
if self.args.demon:
if not self.args.port or not self.args.token:
return False
if self.args.set_web_hook:
if not self.args.token or not self.args.hostname:
return False
return True
| comzyh/VerifyBot | service/config.py | config.py | py | 1,711 | python | en | code | 4 | github-code | 36 |
13782320879 | from fastapi import APIRouter
from conn import conn
from model.kitchen import Kitchen
kitchen_router = APIRouter(
prefix="/kitchen",
tags=["kitchen"],
)
@kitchen_router.get("/")
async def read_items(attr: list, where: dict):
cursor = conn.cursor()
sql = Kitchen.querySql(attr=attr, where=where)
cursor.execute(sql)
lines = cursor.fetchall()
return {'values': lines}
@kitchen_router.post("/")
async def insert_item(kitchen: Kitchen):
cursor = conn.cursor()
sql = kitchen.insertSql()
cursor.execute(sql)
conn.commit()
return {'added':kitchen}
@kitchen_router.delete("/")
async def delete_item(where: dict):
cursor = conn.cursor()
sql = Kitchen.deleteSql(where=where)
cursor.execute(sql)
conn.commit()
return {'deleted': where}
@kitchen_router.put("/")
async def update_items(attrDict: dict, where: dict):
cursor = conn.cursor()
sql = Kitchen.updateSql(where=where, attrDict=attrDict)
cursor.execute(sql)
conn.commit()
return {'updated': attrDict, 'where': where}
| JulioHey/Banco-de-Dados---EP | server/router/kitchen.py | kitchen.py | py | 1,060 | python | en | code | 0 | github-code | 36 |
35620233152 | """Runs training and evaluation of Prophet models."""
import importlib
import json
import os
import sys
from pathlib import Path
import click
import matplotlib.pyplot as plt
import mlflow
from dask import distributed
from prophet import plot
from prophet.diagnostics import cross_validation, performance_metrics
from prophet.serialize import model_to_json
import utils
config = utils.read_config()
# Client needs to this set (along with S3 creds if perms required)
# Also need to set envvar
# - AWS_ACCESS_KEY_ID to minio-user (see mlflow minio vault in ansible)
# - AWS_SECRET_ACCESS_KEY to minio-user-password
os.environ['MLFLOW_S3_ENDPOINT_URL'] = config['mlflow_s3_endpoint_url']
def __load_model(model):
"""Given path to model, return loaded Prophet model."""
# boilerplate: https://docs.python.org/3/library/importlib.html
model_name = model.name.split('.')[0]
spec = importlib.util.spec_from_file_location(model_name, model)
model_module = importlib.util.module_from_spec(spec)
sys.modules['model_module'] = model_module
spec.loader.exec_module(model_module)
return model_module.model()
@click.command()
@click.option('--data_path', default=config['station_file_path'], type=Path, help='Path to station data directory')
@click.option('--tracking_uri', default=config['tracking_uri'], type=str, help='URI to MLFlow tracking')
@click.option('--artifact_path', default=config['artifact_path'], type=Path, help='Path to directory where artifacts will be saved')
@click.option('--git_tag', default=config['git_tag'], type=str, help='DVC git tag (version of data)')
@click.option('--save_model', is_flag=True, help='Save model')
@click.option('--dask', default=None, type=str, help='URL to connect to Dask to parallelize cross validation')
@click.argument('model')
@click.argument('station')
@click.argument('experiment')
def run_model(model, station, experiment, data_path, tracking_uri, artifact_path, git_tag, save_model, dask):
script_name = Path(__file__).name.split('.py')[0]
mlflow.set_tracking_uri(tracking_uri)
mlflow.set_experiment(experiment)
artifact_path = Path(artifact_path)
artifact_path.mkdir(exist_ok=True)
# Load data
# - set cap and floor as physical bounds and bounds respected by logistic growth models
station_df = utils.load_station(data_path, station)
station_df['cap'] = 1
station_df['floor'] = 0
model = Path(model)
if station == 'all':
stations = utils.STATIONS
else:
stations = [station]
if dask:
client = distributed.Client(dask)
parallel = "dask"
else:
parallel = "threads"
for station in stations:
with mlflow.start_run(run_name=f'{script_name}-{model}') as active_run:
mlflow.set_tags(
{
'git-tag': git_tag,
'station': station,
'model': model
}
)
# Load model
model = __load_model(model)
# - fit model
model.fit(station_df)
# Calculate metrics from cross validation
# - Start cross-validation every 30 days and forecast for next 180
metric_keys = ["mse", "rmse", "mae", "mape", "mdape", "smape", "coverage"]
df_cv = cross_validation(
model=model,
period="30 days",
horizon="180 days",
parallel=parallel,
disable_tqdm=True,
)
cv_metrics = performance_metrics(df_cv)
# if some metrics are close to 0 they are not included, so need to check that
metrics = {k: cv_metrics[k].mean() for k in metric_keys if k in cv_metrics.keys()}
# Create forecast
future = model.make_future_dataframe(periods=365)
forecast = model.predict(future)
# this is the fake SST because we don't have that information now
# - for illustrative purposes
forecast['sst'] = station_df['sst'].copy()
guess_temp = station_df['sst'].iloc[-52::].values.copy()
forecast['sst'].iloc[-52::] = guess_temp
# Log metrics
mlflow.log_metrics(metrics)
# Log model params
params = utils.extract_model_params(model)
mlflow.log_params(params)
# Save image
fig, axes = plt.subplots(nrows=3, figsize=(10, 8))
# - Station data
axes[0].plot(station_df.ds, station_df.y)
axes[0].set_ylabel('Ice coverage')
ax2 = axes[0].twinx()
ax2.plot(station_df.ds, station_df.sst, color='r')
ax2.set_ylabel('SST')
# - Cross-validation plot with error
plot.plot_cross_validation_metric(df_cv, metric='rmse', ax=axes[1])
# - Forecast
model.plot(forecast, ax=axes[2], ylabel='Ice coverage')
plot.add_changepoints_to_plot(axes[2], model, forecast)
image_path = artifact_path / 'training'
image_path.mkdir(exist_ok=True)
fname = image_path / f'{station}.png'
fig.savefig(fname)
plt.close(fig)
mlflow.log_artifact(str(fname))
# Save forecast image (annoyingly doesn't take ax)
fig = model.plot_components(forecast)
fname = image_path / f'{station}-forecast-components.png'
fig.savefig(fname)
plt.close(fig)
mlflow.log_artifact(str(fname))
if save_model:
model_path = artifact_path / 'station-models'
model_path.mkdir(exist_ok=True)
fname = model_path / f'{station}-model.json'
with open(fname, 'w') as fout:
json.dump(model_to_json(model), fout)
# Saves as a runnable artifact. we'll start with just the json file
# mlflow.prophet.save_model(model, f'{station-model}.json')
if __name__ == '__main__':
run_model()
| axiom-data-science/project-s2s-sea-ice-guidance | src/experiments/runner.py | runner.py | py | 6,063 | python | en | code | 0 | github-code | 36 |
16568956344 | from RW import readAll
import os
import json
from PrettyPrint import pretty, prettyJarInfo, prettyNameSpace1, prettyElementKind
import sys
#pathToRawData = r"C:\Users\t-amketk\RawData\RawData"
def get_all_projects(path):
return readAll("Projects", "Project", pathToProtos=os.path.join(path, "ProtosOut"))
def get_details_of_commits_with_type_change(project, pathToTypeChangeCommit):
return readAll("TypeChangeCommit_" + project.name, "TypeChangeCommit", pathToProtos=pathToTypeChangeCommit)
def get_dependency_affected(commitInfo):
deps = {}
dependencyUpdate = commitInfo.dependencyUpdate
if len(dependencyUpdate.update) > 0:
deps['Updated'] = list(map(lambda u: "->".join([prettyJarInfo(u.before), prettyJarInfo(u.after)]),
dependencyUpdate.update))
if len(dependencyUpdate.added) > 0:
deps['Added'] = list(map(lambda u: prettyJarInfo(u), dependencyUpdate.added))
if len(dependencyUpdate.removed) > 0:
deps['Removed'] = list(map(lambda u: prettyJarInfo(u), dependencyUpdate.removed))
return deps
def convert(pathToSetup):
pathToJson = pathToSetup
commits = {}
typeChangeDict = {}
for p in get_all_projects(os.path.join(pathToSetup, 'Input'))[:2]:
commit_details = get_details_of_commits_with_type_change(p, os.path.join(pathToSetup, 'Output'))
print()
for cmt in commit_details:
commit_Info = {'sha': cmt.sha, 'project': p.name,
'GitHub_URL': p.url,
'Dependencies': get_dependency_affected(cmt),
'Refactoring': cmt.refactorings._values
}
commits[cmt.sha]= commit_Info
for typeChange in cmt.typeChanges:
instances = []
for instance in typeChange.typeChangeInstances:
mappings = []
for mapping in instance.codeMapping:
replacements = []
for repl in mapping.replcementInferred:
repl_info = {"Before": repl.b4, "After": repl.aftr, "Replacement label": repl.replacementType}
replacements.append(repl_info)
mapping_info = {'IsSame': mapping.isSame, 'Prev Code Snippet': mapping.b4
, 'After Code Snippet': mapping.after
, 'Prev Code snippet url': mapping.urlbB4
, 'After Code snipper url': mapping.urlAftr
, 'Replacements': replacements}
mappings.append(mapping_info)
instance_info = {'From Type': pretty(instance.b4), 'To Type': pretty(instance.aftr)
, 'Element name before': instance.nameB4
, 'Element name after': instance.nameAfter
, 'Element kind affected': prettyElementKind(instance.elementKindAffected)
, 'Visibility of the element': instance.visibility
, 'Syntactic Transformation of type ast': instance.syntacticUpdate.transformation
, 'Github URL of element before': instance.urlB4
, 'Github URL of element after': instance.urlAfter
, 'Adaptations': mappings}
instances.append(instance_info)
typeChange_info = {'sha': cmt.sha, 'project': p.name
, 'From Type': pretty(typeChange.b4), 'To Type': pretty(typeChange.aftr)
, 'Number of Instances': len(typeChange.typeChangeInstances)
, 'Namespace of From Type': prettyNameSpace1(typeChange.nameSpacesB4)
, 'Namespace of To Type': prettyNameSpace1(typeChange.nameSpaceAfter)
, 'Hierarchy Relation': typeChange.hierarchyRelation
, 'Does from type composes To type': typeChange.b4ComposesAfter
, 'Primitive widening': typeChange.primitiveInfo.widening
, 'Primitive narrowing': typeChange.primitiveInfo.narrowing
, 'Primitive unboxing': typeChange.primitiveInfo.unboxing
, 'Primitive boxing': typeChange.primitiveInfo.boxing
, 'Instances': instances}
typeChangeDict.setdefault('->'.join([typeChange_info['From Type'], typeChange_info['To Type']]), [])\
.append(typeChange_info)
with open(os.path.join(pathToJson, "commitInfo.json"), "w+") as outfile:
json.dump(commits, outfile)
with open(os.path.join(pathToJson, "typeChange.json"), "w+") as outfile:
json.dump(typeChangeDict, outfile)
convert(sys.argv[1]) | ameyaKetkar/TypeChangeMiner | scripts/ProtosToJson.py | ProtosToJson.py | py | 4,967 | python | en | code | 1 | github-code | 36 |
16117948354 | import os
from builtins import classmethod, int
from datetime import datetime
from models.country import Country
from es import es
class State:
def __init__(self):
pass
@classmethod
def list(cls, country):
if country != "":
state_data = es.search(
index=os.environ.get("INDEX"),
body={
'size': 10000,
'query': {"bool": {"must": [{"match": {"_type": "state"}}, {"match": {"country": country}}]}}
},
filter_path=['hits.hits._id', 'hits.hits._source', 'hits.hits._parent']
)
else:
state_data = es.search(
index=os.environ.get("INDEX"),
body={
'size': 10000,
'query': {"match": {"_type": "state"}}
},
filter_path=['hits.hits._id', 'hits.hits._source', 'hits.hits._parent']
)
states = []
if 'hits' in state_data and 'hits' in state_data['hits']:
states = [
{"id": data["_id"], "name": data["_source"]["name"]+" - "+data["_parent"], "parent": data["_parent"],
"country": data["_source"]["country"]}
for data in state_data['hits']['hits']
if "_parent" in data
]
return states
@classmethod
def get(cls, id):
state_data = es.search(index=os.environ.get("INDEX"),
body={'query': {"bool": {"must": [{"match": {"_type": "state"}},
{'match': {'_id': id}},
]}}})
if 'hits' in state_data and 'hits' in state_data['hits']:
return {"id": state_data['hits']['hits'][0]['_id'],
"name": state_data['hits']['hits'][0]["_source"]["name"],
"parent": state_data['hits']['hits'][0]["_parent"],
"country": state_data['hits']['hits'][0]["_source"]["country"]}
return False
@classmethod
def create(cls, name, country):
country_rec = Country.get(country)
if country_rec:
id = int(datetime.timestamp(datetime.now()) * 1000)
body = {"name": name, "country": country_rec["name"]}
res = es.index(index=os.environ.get("INDEX"), doc_type='state', id=id, parent=country_rec["id"], body=body)
if "created" in res and res["created"]:
return True
return False
@classmethod
def edit(cls, id, name, country):
country_rec = Country.get(country)
if country_rec:
res = es.index(index=os.environ.get("INDEX"), doc_type='state', id=id, parent=country_rec["id"],
body={"name": name, "country": country_rec["name"]})
if "result" in res and res["result"] == "updated":
return True
return False
@classmethod
def delete(cls, id, country):
state_rec = State.get(id)
if state_rec:
res = es.delete(index=os.environ.get("INDEX"), doc_type='state', id=id, parent=country)
if "found" in res and res["found"] and "result" in res and res["result"] == "deleted":
return True
return False
| RakeshMallesh123/flask-elasticsearch | models/state.py | state.py | py | 3,380 | python | en | code | 1 | github-code | 36 |
9385088011 | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 27 01:30:02 2022
@author: Syeda Fatima Zahid
"""
import time
import datetime
import pandas as pd
ticker = 'MSFT'
period1 = int(time.mktime(datetime.datetime(2020, 12, 1, 23, 59).timetuple()))
period2 = int(time.mktime(datetime.datetime(2020, 12, 31, 23, 59).timetuple()))
interval = '1d' # 1d, 1m
url = f'https://query1.finance.yahoo.com/v7/finance/download/{ticker}?period1={period1}&period2={period2}&interval={interval}&events=history&includeAdjustedClose=true'
data = pd.read_csv(url)
print(data)
data.to_csv('MSFT.csv') | syedafatimah/Stock-Price-Analyzer | Prediction Using Numerical Data/Data Extraction.py | Data Extraction.py | py | 576 | python | en | code | 0 | github-code | 36 |
36255089286 | import setuptools
NAME = "oka"
VERSION = "0.2108.0"
AUTHOR = 'Rafael A. Bizao, Davi P. dos Santos'
AUTHOR_EMAIL = 'rabizao@gmail.com'
DESCRIPTION = 'Python client for oka'
with open('README.md', 'r') as fh:
LONG_DESCRIPTION = fh.read()
LICENSE = 'GPL3'
URL = 'https://github.com/davips/lange'
DOWNLOAD_URL = 'https://github.com/davips/lange/releases'
CLASSIFIERS = [
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3 :: Only',
]
INSTALL_REQUIRES = [
'requests', 'python-dotenv', 'idict'
]
EXTRAS_REQUIRE = {
'dev': ['check-manifest'],
'test': ['coverage'],
}
SETUP_REQUIRES = ['wheel']
setuptools.setup(
name=NAME,
version=VERSION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
classifiers=CLASSIFIERS,
description=DESCRIPTION,
download_url=DOWNLOAD_URL,
extras_require=EXTRAS_REQUIRE,
install_requires=INSTALL_REQUIRES,
long_description=LONG_DESCRIPTION,
long_description_content_type="text/markdown",
license=LICENSE,
packages=setuptools.find_packages(),
setup_requires=SETUP_REQUIRES,
url=URL,
keywords='data, repository, archive, data science, machine learning', # Optional
project_urls={ # Optional
'Bug Reports': 'https://github.com/rabizao/oka/issues',
'Source': 'https://github.com/rabizao/oka',
},
)
package_dir = {'': '.'} # For IDEs like Intellij to recognize the package.
| rabizao/oka | setup.py | setup.py | py | 1,764 | python | en | code | 0 | github-code | 36 |
31282026948 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import tensorflow as tf
from bs4 import BeautifulSoup
from pagi.utils.embedding import Embedding
def main(args):
print("Args:", args)
# data_dir = '/home/dave/agi/ptb_err'
# count_file = 'error_count.csv'
# dist_file = 'error_hist.csv'
embedding_file = './ptb_embedding.txt'
#input_file = '/home/dave/agi/reuters_news/reuters21578/reut2-000.sgm'
#output_file = 'reuters.txt'
input_file = args[1]
output_file = args[2]
e = Embedding()
e.clear()
e.read(embedding_file)
f = open(input_file, 'r')
data = f.read()
#print( 'data: ', data)
# Must replace body with content tags, for reasons
# See: https://stackoverflow.com/questions/15863751/extracting-body-tags-from-smg-file-beautiful-soup-and-python
data_replaced = data.replace('<BODY>', '<content>')
data_replaced = data_replaced.replace('</BODY>', '</content>')
# Parse the modified content
tag = 'content'
unknown_token = '<unk>'
number_token = 'N'
num_footer_tags = 2 # Reuters always has a footer at the end
# https://www.crummy.com/software/BeautifulSoup/bs4/doc/#contents-and-children
soup = BeautifulSoup(data_replaced)
articles = soup.findAll(tag) # find all body tags
print('Have ', len(articles), ' articles.') # print number of body tags in sgm file
i = 0
corpus = ''
# Loop through each body tag and print its content
for article in articles: # pylint: disable=too-many-nested-blocks
content = article.contents
if i < 10:
print('Article: ', content)
print('| ')
output = ''
output_list = []
tokens = content[0].split() # on whitespace
num_tokens = len(tokens)
for j in range(num_tokens-num_footer_tags):
input_token = tokens[j]
token = input_token.strip()
# force lowercase
token = token.lower()
# remove ALL commas (there are none in PTB)
#token = token.replace('\n', ' ')
# remove ALL commas (there are none in PTB)
token = token.replace(',', '')
# replace dlrs with $
token = token.replace('dlrs', '$')
token = token.replace('dlr', '$')
# replace mln
token = token.replace('mln', 'million')
token = token.replace('bln', 'billion')
token = token.replace('trn', 'trillion')
# replace tonnes
token = token.replace('tonnes', 'tons')
# replace pct with percent
token = token.replace('pct', 'percent')
# remove trailing periods
end_of_sentence = False
if token.endswith('.'):
end_of_sentence = True
token = token[:-1]
# replace the angle brackets around proper nouns
token = token.replace('<', '')
token = token.replace('>', '')
# replace numbers with N
try:
float(token)
token = number_token
except ValueError:
pass
# https://stackoverflow.com/questions/5917082/regular-expression-to-match-numbers-with-or-without-commas-and-decimals-in-text
is_number = re.search('(?<!\S)(?=.)(0|([1-9](\d*|\d{0,2}(,\d{3})*)))?(\.\d*[1-9])?(?!\S)', token) # pylint: disable=anomalous-backslash-in-string
if is_number:
token = number_token
# space before 's and 're etc.
# was n't did n't etc.
#if token == 'didn\'t':
suffix = None
recognized = False
if token.endswith('n\'t'):
suffix = ' n\'t' # split into 2 tokens
token = token.replace('n\'t', '')
elif token.endswith('\'s'):
suffix = ' \'s' # split into 2 tokens
token = token.replace('\'s', '')
elif token.endswith('\'re'):
suffix = ' \'re' # split into 2 tokens
token = token.replace('\'re', '')
# replace unknown tokens with UNK
if not recognized:
has_key = e.has_key(token)
if not has_key:
token = unknown_token
#if i<10:
# print('Original: ', input_token, ' TOKEN: |', token, '| In dict?: ', has_key, ' EOS?: ', end_of_sentence)
output_list.append(token)
if suffix is not None:
output_list.append(suffix)
#output = output + token + suffix
#output = output + ' '
if end_of_sentence:
# Reorder some common tokens where the style is peculiar to a particular outlet
# Reuters style: N million $ N $ N million $
# PTB (WSJ): $ N million $ N $ N billion
output_length = len(output_list)
for k in range(output_length):
if k > 0:
output_token_1 = output_list[k-1]
output_token_2 = output_list[k]
# N $ --> $ N
if (output_token_1 == 'N') and (output_token_2 == '$'):
output_list[k-1] = '$'
output_list[k] = 'N'
elif k > 1:
output_token_0 = output_list[k-2]
if output_token_0 == 'N' and output_token_1 in ['million', 'billion', 'trillion'] and (
output_token_2 == '$'):
output_list[k-2] = '$'
output_list[k-1] = 'N'
output_list[k] = output_token_1
# Copy the final list to the output buffer
for k in range(output_length):
output_token = output_list[k]
output = output + output_token + ' '
# Add EOS marker
output = output + '\n'
# Clear the token list
output_list = [] # reset list
if i < 10:
print('ArticTx: ', output)
print('--------------\n\n')
# assemble the final corpus line, add newline at end
corpus = corpus + output
i = i + 1
print('Articles: ', i)
with open(output_file, 'a') as text_file:
text_file.write(corpus)
if __name__ == '__main__':
tf.app.run()
| Cerenaut/rsm | rsm/scripts/preprocess_reuters.py | preprocess_reuters.py | py | 5,812 | python | en | code | 1 | github-code | 36 |
13906055504 | import cv2
import matplotlib.pyplot as plt
import statistics
"""
this code reads in the ratio values that were created
in newTitration.py. It then checks to see if those ratios
correlate to a pink color. If they do, it will write the frame
number to a file called frameNames.txt
"""
#reads in file
infile = open("newMethod.txt", "r")
read = infile.readlines()
infile.close()
#typical ratios of the colors to each other
#pinkBR is blue:red, pinkBG is blue:green, pinkRG is red:green
pinkBR = .92
pinkBG = 1.2
pinkRG = 1.3
#creates lists for the ratios to be appended later
BR = []
BG = []
RG = []
#creates variables that will be used to confirm if an image is pink
a = False
b = False
c = False
#list to hold the name and frame number of the frames the code calls pink
nameList = []
#checks the ratios from each image to see if it is pink
for i in read:
#splits because it is read from lines from a file
i = i.split()
name = i[3]
br = float(i[0])
bg = float(i[1])
rg = float(i[2])
if br >= (pinkBR - .04) and br <= (pinkBR +.04):
BR.append(br)
a = True
if bg >= (pinkBG -.09) and bg <= (pinkBG +.09):
BG.append(bg)
b = True
if rg >= (pinkRG - .05) and rg <= (pinkRG +.05 ):
RG.append(rg)
c = True
#if all the ratios are within the range, they are assigned true and
#the frame is appended to the nameList
if a == True and b == True and c == True:
nameList.append(name)
a = False
b = False
c = False
#writes all the frames that were considered pink to a file
outfile = open("frameNames.txt", "a")
for i in nameList:
outfile.write(i)
outfile.write("\n")
outfile.close() | asembera/portfolio | Python/titration/newMethod.py | newMethod.py | py | 1,628 | python | en | code | 0 | github-code | 36 |
31676458963 | # Example 6.3
# Calculation of the discrete Sine and Cosine transform
from pylab import*
from dtrans import*
# Define the function
f = lambda x: np.power((x/pi), 2)
# Sine and Cosine transform
N = 16
X = asmatrix(r_[0.:pi+pi/N:pi/N])
k = r_[0:N+1]
cos_coeff = dct1(f(X).T)
sin_coeff = dst1(f(X).T)
# Generates the plots
plot(k, abs(cos_coeff), 'k-^', lw = 2., label = "Cosine Transform")
plot(k, abs(sin_coeff), 'k:o', lw = 2., label = "Sine Transform")
grid("on")
xlim([-0.5, 16.5]); ylim([-0.1, 0.5])
legend(loc = 0)
show()
| mdclemen/py-fena | ch6ex3.py | ch6ex3.py | py | 553 | python | en | code | 5 | github-code | 36 |
70786051623 | '''Author: - Devang A Joshi
Version 1.0
Description: This program is a mini guessing game where User need to think a value between 1 to 100
an Array of 100 is created, Initial vales were set. then sorting is made by midpoint selection
Guided By : - Gula Nurmatova
'''
print("----------------------------------------")
print("----| | |--- | | ---- --------")
print("----|--| |__ | | | | --------")
print("----| | |___ |___ |___ |____| --------")
print("----------------------------------------")
name = input("PLEASE ENTER YOUR NAME \n") #Getting the name of the User
print( "\t" + name + " well come to Guessing number game\n") #Printing the name of the User
print("\tGuess any Number between 1 to 100\n")
a=[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, #Defining an array of 100 values from 1 to 100
26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,
48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,
70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,
92,93,94,95,96,97,98,99,100]
play="yes" #let the initial state of the game be yes.
while(play=="yes"):
Vmid=(a[99]+a[0])//2 #Getting the first MID value from the array which is 50
Vmax=a[99] #Defining the Max value in array
Vmin=a[0] #Defining the Min value in array
tries = 0 #Initial number of tries be 0
guess=input("\tIs the number " + str(Vmid) + " ? (yes/no) \n Ans.") #Guessing the Number and getting input from user is it correct or not
tries=tries+1 #Incrementing the tries by 1
if (guess=='yes'): #If the input value from the user is "yes" then following lines will be executed
print("\tPERFECT I am Genius")
print("\tI got your number in " + str(tries) +" try")
else: #Else the input value from the user is "no" then following lines will be executed
Nguess=input("\tIs the number Greater than " + str(Vmid) + " (Yes/No)?\n Ans.")
while (guess=='no' and (Nguess=='yes' or Nguess=='no')): #While loop will run till the value of guess is no and either value of Nguess
while (Nguess=="yes"): #While the value of Nguess is "yes" following will be executed
Vmin=Vmid+1 #Changing the Min value by adding 1 to the Mid value
Vmid=((Vmax+Vmin)//2) #Recalculating the Mid value
guess=input("\tIs the number " + str(Vmid) + "? (Yes/No)\n Ans.")
if (guess=='no'):
tries=tries+1
Nguess=input("\tIs the number Greater than " + str(Vmid) + "?(Yes/No)\n Ans.")
elif(guess=='yes'):
tries=tries+1
print("\tI guessed it in " + str(tries) + " tries.")
break
while (Nguess=="no"): #While the value of Nguess is "no" following will be executed
Vmax=Vmid-1 #Changing the Max value by subtracting 1 to the Mid value
Vmid=((Vmax+Vmin)//2) #Recalculating the Mid value
guess=input("\tIs the number " + str(Vmid) + "? (Yes/No) \n Ans.")
if (guess=='no'):
tries=tries+1
Nguess=input("\tIs the number Greater than " + str(Vmid) + "?(Yes/No)\n Ans.")
elif(guess=='yes'):
tries=tries+1
print("\tI guessed it in " + str(tries) + " tries.")
break
play=input("Lets Play again !!!!! (yes/no)") #taking input from the user to play again | dave2711/python | HW_Devang_1.py | HW_Devang_1.py | py | 3,455 | python | en | code | 0 | github-code | 36 |
27688294612 | import pickle, tensorflow as tf, tf_util, numpy as np
import pdb
# def load_policy(filename):
# with open(filename, 'rb') as f:
# data = pickle.loads(f.read())
#
# # assert len(data.keys()) == 2
# nonlin_type = data['nonlin_type']
# policy_type = [k for k in data.keys() if k != 'nonlin_type'][0]
#
# assert policy_type == 'GaussianPolicy', 'Policy type {} not supported'.format(policy_type)
# policy_params = data[policy_type]
#
# assert set(policy_params.keys()) == {'logstdevs_1_Da', 'hidden', 'obsnorm', 'out'}
#
# # Keep track of input and output dims (i.e. observation and action dims) for the user
#
# def build_policy(obs_bo):
# def read_layer(l):
# assert list(l.keys()) == ['AffineLayer']
# assert sorted(l['AffineLayer'].keys()) == ['W', 'b']
# return l['AffineLayer']['W'].astype(np.float32), l['AffineLayer']['b'].astype(np.float32)
#
# def apply_nonlin(x):
# if nonlin_type == 'lrelu':
# return tf_util.lrelu(x, leak=.01) # openai/imitation nn.py:233
# elif nonlin_type == 'tanh':
# return tf.tanh(x)
# else:
# raise NotImplementedError(nonlin_type)
#
# # Build the policy. First, observation normalization.
# assert list(policy_params['obsnorm'].keys()) == ['Standardizer']
# obsnorm_mean = policy_params['obsnorm']['Standardizer']['mean_1_D']
# obsnorm_meansq = policy_params['obsnorm']['Standardizer']['meansq_1_D']
# obsnorm_stdev = np.sqrt(np.maximum(0, obsnorm_meansq - np.square(obsnorm_mean)))
# print('obs', obsnorm_mean.shape, obsnorm_stdev.shape)
# normedobs_bo = (obs_bo - obsnorm_mean) / (obsnorm_stdev + 1e-6) # 1e-6 constant from Standardizer class in nn.py:409 in openai/imitation
#
# curr_activations_bd = normedobs_bo
#
# # Hidden layers next
# assert list(policy_params['hidden'].keys()) == ['FeedforwardNet']
# layer_params = policy_params['hidden']['FeedforwardNet']
# for layer_name in sorted(layer_params.keys()):
# l = layer_params[layer_name]
# W, b = read_layer(l)
# curr_activations_bd = apply_nonlin(tf.matmul(curr_activations_bd, W) + b)
#
# # Output layer
# W, b = read_layer(policy_params['out'])
# output_bo = tf.matmul(curr_activations_bd, W) + b
# return output_bo, obsnorm_mean.shape
#
# obs_bo = tf.placeholder(tf.float32, [None, None])
# a_ba, obs_shape = build_policy(obs_bo)
# policy_fn = tf_util.function([obs_bo], a_ba)
# return policy_fn, obs_shape
class Policy():
def __init__(self, name, input_dim, output_dim, param_dict=None, layers=None,
target_net=False, learning_rate=1e-2):
self.param_dict = param_dict
self.assign_nodes = []
self.assign_ph = {}
self.layers = layers
self.target_net = target_net
self.input_ph = tf.placeholder(tf.float32, (None, input_dim), name='obs')
x = self.input_ph
params = {}
with tf.variable_scope(name):
for i, value in enumerate(self.layers):
names, sizes = value
w_name, b_name = names
w_size, b_size = sizes
params[w_name] = tf.get_variable(w_name, w_size)
params[b_name] = tf.get_variable(b_name, b_size)
x = tf.matmul(x, params[w_name]) + params[b_name]
if i != len(self.layers) - 1:
x = tf.nn.relu(x)
self.output = x
if self.target_net:
# Initalize placeholders and assignment operators
for param_name in params:
size = params[param_name].shape
ph = tf.placeholder(tf.float32, size)
self.assign_ph[param_name] = ph
self.assign_nodes.append(tf.assign(params[param_name], ph))
else:
# define output placeholders
self.output_ph = tf.placeholder(tf.float32, [None, output_dim])
# define loss function
self.loss = tf.losses.mean_squared_error(self.output_ph, self.output)
# define optimizier
self.optimizer = tf.train.AdamOptimizer(learning_rate).minimize(self.loss)
# initialize only variables in agent policy network
var_list = [var for var in tf.global_variables() if name in var.name]
self.initializer = tf.variables_initializer(var_list)
def initialize(self):
'''
sess : tf Session
'''
if self.target_net:
feed_dict = {}
for param_name in self.param_dict:
feed_dict[self.assign_ph[param_name]] = self.param_dict[param_name]
tf.get_default_session().run(self.assign_nodes, feed_dict)
else:
tf.get_default_session().run(self.initializer)
def get_action(self, obs_data):
a = tf.get_default_session().run(self.output,
feed_dict={self.input_ph : obs_data})
return a
def train_policy(self, obs, acs, batch_size=128, num_epochs=10):
size = obs.shape[0]
indices = np.arange(size)
losses = []
for epoch in range(num_epochs):
for batch in range(size // batch_size):
sample = np.random.choice(indices, size=batch_size, replace=False)
batch_obs = obs[sample]
batch_acs = acs[sample]
loss, _ = tf.get_default_session().run([self.loss, self.optimizer],
feed_dict={self.input_ph : batch_obs,
self.output_ph : batch_acs})
print('Loss : ', loss)
losses.append(loss)
return np.mean(np.array(losses))
def make_policy(filename, env):
take_weights_here = {}
exec(open(filename).read(), take_weights_here)
dense1_w = take_weights_here["weights_dense1_w"]
dense1_b = take_weights_here["weights_dense1_b"]
dense2_w = take_weights_here["weights_dense2_w"]
dense2_b = take_weights_here["weights_dense2_b"]
final_w = take_weights_here["weights_final_w"]
final_b = take_weights_here["weights_final_b"]
layers = [
[('dense1_w', 'dense1_b'), (dense1_w.shape, dense1_b.shape)],
[('dense2_w', 'dense2_b'), (dense2_w.shape, dense2_b.shape)],
[('final_w', 'final_b'), (final_w.shape, final_b.shape)]
]
param_dict = {
'dense1_w' : dense1_w,
'dense1_b' : dense1_b,
'dense2_w' : dense2_w,
'dense2_b' : dense2_b,
'final_w' : final_w,
'final_b' : final_b
}
policy = Policy('expert_policy',
env.observation_space.shape[0],
env.action_space.shape[0],
param_dict=param_dict,
layers=layers, target_net=True)
policy.initialize()
return policy
def get_new_policy(env, learning_rate=1e-2, dense_dims = [1024, 1024]):
input_dims = env.observation_space.shape[0]
output_dims = env.action_space.shape[0]
layers = [
[('dense1_w', 'dense1_b'), ([input_dims, dense_dims[0]], dense_dims[0])],
[('dense2_w', 'dense2_b'), ([dense_dims[0], dense_dims[1]], dense_dims[1])],
[('final_w', 'final_b'), ([dense_dims[1], output_dims], output_dims)]
]
policy = Policy('agent_policy', input_dims, output_dims, layers=layers, learning_rate=learning_rate)
policy.initialize()
return policy
| rhiga2/DeepRL | hw1/load_policy.py | load_policy.py | py | 7,636 | python | en | code | 0 | github-code | 36 |
11277919959 | class Solution:
def isPalindrome(self, string: str):
'''
A function to check if a sequence is Palindrome or not!
:param string: Sequence to be checked
:return: True if it is palindrome else False
'''
sequence=""
for i in string:
if i.isalpha():
sequence+=i
elif i.isdigit():
sequence+=i
sequence=sequence.lower()
for i in range(len(sequence)//2):
if sequence[i] != sequence[len(sequence)-1-i]:
return False
return True
if __name__ == '__main__':
string = 'Was it a car or a cat I saw!!'
print(f'Is "{string}" a palindrome? : {Solution().isPalindrome(string)}')
string2 = 'A man, a plan,'
print(f'Is "{string2}" a palindrome? : {Solution().isPalindrome(string2)}')
| DundeShini/CSA0838-PYTHON-PROGRAMMING- | valid palindrome.py | valid palindrome.py | py | 854 | python | en | code | 1 | github-code | 36 |
31410757057 | from django.db import models
from django.contrib.auth import get_user_model
# Create your models here.
class MailList(models.Model):
"""
database table that stores all
users that subscribe to recieve notifications
"""
user = models.ForeignKey(get_user_model(), on_delete=models.CASCADE)
recieve_email_notifications = models.BooleanField(default=True)
recieve_push_notifications = models.BooleanField(default=True)
@classmethod
def did_subscribe_for_email_notifications(cls, user):
"""
check if a particular user is
subscribed to recieve email notifications
"""
result = MailList.objects.get_or_create(user=user)
return result[0].recieve_email_notifications
@classmethod
def did_subscribe_for_push_notifications(cls, user):
"""
check if a particular user is
subscribed to recieve email notifications
"""
result = MailList.objects.get_or_create(user=user)
return result[0].recieve_push_notifications
| andela/ah-backend-valkyrie | authors/apps/notify/models.py | models.py | py | 1,042 | python | en | code | 0 | github-code | 36 |
30545475363 | import gettext
import os
uilanguage=os.environ.get('fchart3lang')
try:
lang = gettext.translation( 'messages',localedir='locale', languages=[uilanguage])
lang.install()
_ = lang.gettext
except:
_ = gettext.gettext
from time import time
from .label_potential import *
from .np_astrocalc import *
from .constellation import *
from .mirroring_graphics import *
from .configuration import *
from . import deepsky_object as deepsky
from .graphics_interface import DrawMode
from .projection import ProjectionType
from .projection_orthographic import ProjectionOrthographic
from .projection_stereographic import ProjectionStereographic
from .space_widget_allocator import SpaceWidgetAllocator
from .widget_mag_scale import WidgetMagnitudeScale
from .widget_map_scale import WidgetMapScale
from .widget_orientation import WidgetOrientation
from .widget_coords import WidgetCoords
from .widget_dso_legend import WidgetDsoLegend
from .widget_telrad import WidgetTelrad
from .widget_eyepiece import WidgetEyepiece
from .widget_picker import WidgetPicker
from .precession import compute_precession_matrix
LABELi18N = {
'h': _('h'),
'm':_('m'),
's':_('s'),
'G':_('Galaxy'),
'OCL':_('Open cluster'),
'GCL':_('Globular cluster'),
'AST':_('Asterism'),
'PN':_('Planetary nebula'),
'N': _('Diffuse nebula'),
'SNR':_('Supernova remnant'),
'PG':_('Part of galaxy')
}
FR = {
'h':'h',
'm':'m',
's':'s',
'G':'Galaxie',
'OCL':'Cluster Ouvert',
'GCL':'Cluster Globulaire',
'AST':'Astérisme',
'PN': 'Nébuleuse Planétaire',
'N': 'Nébuleuse Diffuse',
'SNR':'Rémanent de Supernova',
'PG':'Partie de Galaxie'
}
STAR_LABELS = {
"alp":"α",
"bet":"β",
"gam":"γ",
"del":"δ",
"eps":"ε",
"zet":"ζ",
"eta":"η",
"the":"θ",
"iot":"ι",
"kap":"κ",
"lam":"λ",
"mu":"μ",
"nu":"ν",
"xi":"ξ",
"omi":"ο",
"pi":"π",
"rho":"ρ",
"sig":"σ/ς",
"tau":"τ",
"ups":"υ",
"phi":"φ",
"chi":"χ",
"psi":"ψ",
"ome":"ω"
}
STARS_IN_SCALE = 10
LEGEND_MARGIN = 0.47
BASE_SCALE = 0.98
GRID_DENSITY = 4
RA_GRID_SCALE = [0.25, 0.5, 1, 2, 3, 5, 10, 15, 20, 30, 60, 2*60, 3*60]
DEC_GRID_SCALE = [1, 2, 3, 5, 10, 15, 20, 30, 60, 2*60, 5*60, 10*60, 15*60, 20*60, 30*60, 45*60, 60*60]
MAG_SCALE_X = [0, 1, 2, 3, 4, 5, 25]
MAG_SCALE_Y = [0, 1.8, 3.3, 4.7, 6, 7.2, 18.0]
constell_lines_rect1 = None
constell_lines_rect2 = None
constell_bound_rect = None
class SkymapEngine:
def __init__(self, graphics, language=LABELi18N, ra=0.0, dec=0.0, fieldradius=-1.0, lm_stars=13.8, lm_deepsky=12.5, caption=''):
"""
Width is width of the map including the legend in mm.
"""
self.graphics = graphics
self.config = EngineConfiguration()
self.caption = ''
self.language = language
self.drawingwidth = self.graphics.gi_width
self.drawingheight = self.graphics.gi_height
self.min_radius = 1.0 # of deepsky symbols (mm)
self.lm_stars = lm_stars
self.lm_deepsky = lm_deepsky
self.fieldcentre = None
self.fieldradius = None
self.fieldsize = None
self.scene_scale = None
self.drawingscale = None
self.legend_fontscale = None
self.active_constellation = None
self.space_widget_allocator = None
self.w_mag_scale = None
self.w_map_scale = None
self.w_orientation = None
self.w_coords = None
self.w_dso_legend = None
self.w_telrad = None
self.w_eyepiece = None
self.w_picker = None
self.mirroring_graphics = None
self.picked_dso = None
self.picked_star = None
self.star_mag_r_shift = 0
self.projection = None
self.norm_field_radius = None
def set_field(self, ra, dec, fieldradius, projection_type=ProjectionType.STEREOGRAPHIC):
self.fieldradius = fieldradius
self.fieldcentre = (ra, dec)
wh = max(self.drawingwidth, self.drawingheight)
self.fieldsize = fieldradius * math.sqrt(self.drawingwidth**2 + self.drawingheight**2) / wh
if self.config.no_margin:
self.scene_scale = (wh - self.config.legend_linewidth) / wh
else:
self.scene_scale = BASE_SCALE
self.drawingscale = self.scene_scale*wh/2.0/math.sin(fieldradius)
self.legend_fontscale = min(self.config.legend_font_scale, wh/100.0)
self.set_caption(self.caption)
self.projection = self._create_projection(projection_type)
self.projection.set_fieldcentre((0, 0))
self.projection.set_drawingscale(1.0)
self.norm_field_radius, _ = self.projection.radec_to_xy(fieldradius, 0)
self.drawingscale = self.scene_scale*wh / 2.0 / abs(self.norm_field_radius)
self.projection.set_fieldcentre(self.fieldcentre)
self.projection.set_drawingscale(self.drawingscale)
def _create_projection(self, projection_type):
if projection_type == ProjectionType.ORTHOGRAPHIC:
return ProjectionOrthographic()
if projection_type == ProjectionType.STEREOGRAPHIC:
return ProjectionStereographic()
return None
def set_configuration(self, config):
self.config = config
self.star_mag_r_shift = 0
if self.config.star_mag_shift > 0:
self.star_mag_r_shift = self.magnitude_to_radius(self.lm_stars-self.config.star_mag_shift) - self.magnitude_to_radius(self.lm_stars)
def get_field_radius_mm(self):
return self.drawingscale * self.norm_field_radius
def get_field_rect_mm(self):
x = self.scene_scale * self.drawingwidth / 2.0
y = self.scene_scale * self.drawingheight / 2.0
return -x, -y, x, y
def set_language(self, language):
"""
Set the language for the legend.
"""
self.language = language
def set_caption(self, caption):
self.caption = caption
if caption != '':
self.graphics.set_dimensions(self.drawingwidth,self.drawingheight + self.legend_fontscale*self.graphics.gi_default_font_size*2.0)
def set_active_constellation(self, active_constellation):
self.active_constellation = active_constellation
def make_map(self, used_catalogs, jd=None, showing_dsos=None, dso_highlights=None, highlights=None, dso_hide_filter=None,
extra_positions=None, hl_constellation=None, trajectory=[], visible_objects=None, use_optimized_mw=False,
transparent=False):
""" Creates map using given graphics, params and config
used_catalogs - UsedCatalogs data structure
jd - julian date
showing_dso - DSO forced to be shown even if they don't pass the filter
dso_highlights - list of DsoHighlightDefinition that will be marked
highlights - list of HighlightDefinitions that will be marked
dso_hide_filter - list of DSO to be hidden, except showing_dso
extra_positions - extra positions to be drawn
hl_constellation - constellation name that will be highlighted
trajectory - defined by list of points (ra, dec) points
visible_objects - output array containing list of object visible on the map
use_optimized_mw - use optimized milky way
transparent - make chart transparent
"""
visible_dso_collector = [] if visible_objects is not None else None
self.picked_dso = None
self.picked_star = None
if self.config.mirror_x or self.config.mirror_y:
self.mirroring_graphics = MirroringGraphics(self.graphics, self.config.mirror_x, self.config.mirror_y)
else:
self.mirroring_graphics = self.graphics
self.create_widgets()
self.graphics.set_background_rgb(self.config.background_color)
self.graphics.new()
if not transparent:
self.graphics.clear()
self.graphics.set_pen_rgb(self.config.draw_color)
self.graphics.set_fill_rgb(self.config.draw_color)
self.graphics.set_font(font=self.config.font, font_size=self.config.font_size)
self.graphics.set_default_font_size(self.config.font_size)
self.graphics.set_linewidth(self.config.legend_linewidth)
if jd is not None:
precession_matrix = np.linalg.inv(compute_precession_matrix(jd))
else:
precession_matrix = None
if not self.config.legend_only:
self.label_potential = LabelPotential(self.get_field_radius_mm())
self.picked_star = None
clip_path = self.space_widget_allocator.get_border_path()
self.graphics.clip_path(clip_path)
if self.config.show_simple_milky_way:
self.draw_milky_way(used_catalogs.milky_way)
elif self.config.show_enhanced_milky_way:
self.draw_enhanced_milky_way(used_catalogs.enhanced_milky_way, use_optimized_mw)
if self.config.show_equatorial_grid:
# tm = time()
self.draw_grid_equatorial()
# print("Equatorial grid within {} s".format(str(time()-tm)), flush=True)
if highlights:
self.draw_highlights(highlights, visible_dso_collector)
if used_catalogs.constellcatalog is not None:
# tm = time()
self.draw_constellations(used_catalogs.constellcatalog, jd, precession_matrix, hl_constellation)
# print("constellations within {} s".format(str(time()-tm)), flush=True)
if used_catalogs.unknown_nebulas is not None:
self.draw_unknown_nebula(used_catalogs.unknown_nebulas)
if used_catalogs.starcatalog is not None:
# tm = time()
self.draw_stars(used_catalogs.starcatalog, precession_matrix)
# print("Stars within {} s".format(str(time()-tm)), flush=True)
if used_catalogs.deepskycatalog is not None:
# tm = time()
self.draw_deepsky_objects(used_catalogs.deepskycatalog, precession_matrix, showing_dsos, dso_highlights, dso_hide_filter, visible_dso_collector)
# print("DSO within {} s".format(str(time()-tm)), flush=True)
if self.picked_dso is None and self.picked_star is not None:
self.draw_picked_star()
if extra_positions:
self.draw_extra_objects(extra_positions)
if trajectory:
self.draw_trajectory(trajectory)
self.graphics.reset_clip()
# print('Drawing legend')
self.draw_caption()
# print('Drawing widgets')
self.draw_widgets()
# Draw border of field-of-view
self.draw_field_border()
# tm = time()
self.graphics.finish()
# print("Rest {} ms".format(str(time()-tm)), flush=True)
if visible_dso_collector is not None:
visible_dso_collector.sort(key=lambda x: x[0])
for obj in visible_dso_collector:
visible_objects.extend([obj[1], obj[2], obj[3], obj[4], obj[5]])
def draw_caption(self):
if self.caption != '':
font_size = self.get_legend_font_size()
self.graphics.set_font(self.graphics.gi_font, 2.0*font_size)
self.graphics.text_centred(0, self.drawingwidth/2.0*BASE_SCALE + font_size, self.caption)
def draw_field_border(self):
"""
Draw a circle representing the edge of the field of view.
"""
if self.config.show_field_border:
self.graphics.set_linewidth(self.config.legend_linewidth)
x1, y1, x2, y2 = self.get_field_rect_mm()
self.graphics.line(x1, y1, x1, y2)
self.graphics.line(x1, y2, x2, y2)
self.graphics.line(x2, y2, x2, y1)
self.graphics.line(x2, y1, x1, y1)
def get_legend_font_size(self):
return self.config.font_size * self.legend_fontscale
def draw_widgets(self):
# Set the font_size for the entire legend
font_size = self.get_legend_font_size()
self.graphics.set_font(self.graphics.gi_font, font_size=font_size)
x1, y1, x2, y2 = self.get_field_rect_mm()
if self.config.fov_telrad:
self.w_telrad.draw(self.graphics)
if self.config.eyepiece_fov is not None:
self.w_eyepiece.draw(self.graphics)
if self.config.show_picker and self.config.picker_radius > 0:
self.w_picker.draw(self.graphics)
if self.config.show_mag_scale_legend:
self.w_mag_scale.draw(self.graphics, self.config.legend_only)
if self.config.show_map_scale_legend:
self.w_map_scale.draw(self.graphics, self.config.legend_only)
if self.config.show_orientation_legend:
self.w_orientation.draw(self.graphics, x1, y2, self.config.legend_only)
if self.config.show_coords_legend:
self.w_coords.draw(self.graphics, left=x2-font_size/2, bottom=y2-font_size, ra=self.fieldcentre[0], dec=self.fieldcentre[1], legend_only=self.config.legend_only)
if self.config.show_dso_legend:
self.w_dso_legend.draw_dso_legend(self, self.graphics, self.config.legend_only)
def draw_deepsky_objects(self, deepsky_catalog, precession_matrix, showing_dsos, dso_highlights, dso_hide_filter, visible_dso_collector):
if not self.config.show_deepsky:
return
# Draw deep sky
# print('Drawing deepsky...')
deepsky_list = deepsky_catalog.select_deepsky(self.fieldcentre, self.fieldsize, self.lm_deepsky)
filtered_showing_dsos = []
dso_hide_filter_set = { dso for dso in dso_hide_filter } if dso_hide_filter else {}
if showing_dsos:
for dso in showing_dsos:
if dso not in deepsky_list:
filtered_showing_dsos.append(dso)
if dso in dso_hide_filter_set:
dso_hide_filter_set.remove(dso)
if dso_highlights:
for dso_highlight in dso_highlights:
for dso in dso_highlight.dsos:
if dso not in deepsky_list and dso not in filtered_showing_dsos:
filtered_showing_dsos.append(dso)
if dso in dso_hide_filter_set:
dso_hide_filter_set.remove(dso)
deepsky_list.sort(key=lambda x: x.mag)
deepsky_list_ext = []
self.calc_deepsky_list_ext(precession_matrix, deepsky_list_ext, deepsky_list)
self.calc_deepsky_list_ext(precession_matrix, deepsky_list_ext, filtered_showing_dsos)
self.label_potential.add_deepsky_list(deepsky_list_ext)
# print('Drawing objects...')
pick_r = self.config.picker_radius if self.config.picker_radius > 0 else 0
if pick_r > 0:
pick_min_r = pick_r**2
for dso, x, y, rlong in deepsky_list_ext:
if pick_r > 0 and abs(x) < pick_r and abs(y) < pick_r:
r = x*x + y*y
if r < pick_min_r:
self.picked_dso = dso
pick_min_r = r
for dso, x, y, rlong in deepsky_list_ext:
if dso in dso_hide_filter_set:
continue
label = dso.label()
label_mag = None
if self.config.show_dso_mag and dso.mag is not None and dso.mag != -100:
label_mag = '{:.1f}'.format(dso.mag)
if dso_highlights:
for dso_highlight in dso_highlights:
if dso in dso_highlight.dsos:
self.draw_dso_hightlight(x, y, rlong, label, dso_highlight, visible_dso_collector)
break
rlong = dso.rlong if dso.rlong is not None else self.min_radius
rshort = dso.rshort if dso.rshort is not None else self.min_radius
rlong = rlong*self.drawingscale
rshort = rshort*self.drawingscale
posangle = dso.position_angle+self.projection.direction_ddec(dso.ra, dso.dec)+0.5*np.pi
if rlong <= self.min_radius:
rshort *= self.min_radius/rlong
rlong = self.min_radius
label_ext = None
if dso == self.picked_dso and dso.mag < 100.0:
label_ext = '{:.2f}m'.format(dso.mag)
label_length = self.graphics.text_width(label)
labelpos = -1
labelpos_list = []
if dso.type == deepsky.G:
labelpos_list = self.galaxy_labelpos(x, y, rlong, rshort, posangle, label_length)
elif dso.type == deepsky.N:
labelpos_list = self.diffuse_nebula_labelpos(x, y, 2.0*rlong, 2.0*rshort, posangle, label_length)
elif dso.type in [deepsky.PN, deepsky.OC, deepsky.GC, deepsky.SNR, deepsky.GALCL]:
labelpos_list = self.circular_object_labelpos(x, y, rlong, label_length)
elif dso.type == deepsky.STARS:
labelpos_list = self.asterism_labelpos(x, y, rlong, label_length)
else:
labelpos_list = self.unknown_object_labelpos(x, y, rlong, label_length)
pot = 1e+30
for labelpos_index in range(len(labelpos_list)):
[[x1, y1], [x2, y2], [x3, y3]] = labelpos_list[labelpos_index]
pot1 = self.label_potential.compute_potential(x2, y2)
# self.label_potential.compute_potential(x1,y1),
# self.label_potential.compute_potential(x3,y3)])
if pot1 < pot:
pot = pot1
labelpos = labelpos_index
[xx, yy] = labelpos_list[labelpos][1]
self.label_potential.add_position(xx, yy, label_length)
if dso.type == deepsky.G:
self.galaxy(x, y, rlong, rshort, posangle, dso.mag, label, label_mag, label_ext, labelpos)
elif dso.type == deepsky.N:
has_outlines = False
if self.config.show_nebula_outlines and dso.outlines is not None and rlong > self.min_radius:
has_outlines = self.draw_dso_outlines(dso, x, y, rlong, rshort, posangle, label, label_ext, labelpos)
if not has_outlines:
self.diffuse_nebula(x, y, 2.0*rlong, 2.0*rshort, posangle, label, label_mag, label_ext, labelpos)
elif dso.type == deepsky.PN:
self.planetary_nebula(x, y, rlong, label, label_mag, label_ext, labelpos)
elif dso.type == deepsky.OC:
if self.config.show_nebula_outlines and dso.outlines is not None:
has_outlines = self.draw_dso_outlines(dso, x, y, rlong, rshort)
self.open_cluster(x, y, rlong, label, label_mag, label_ext, labelpos)
elif dso.type == deepsky.GC:
self.globular_cluster(x, y, rlong, label, label_mag, label_ext, labelpos)
elif dso.type == deepsky.STARS:
self.asterism(x, y, rlong, label, label_ext, labelpos)
elif dso.type == deepsky.SNR:
self.supernova_remnant(x, y, rlong, label, label_ext, labelpos)
elif dso.type == deepsky.GALCL:
self.galaxy_cluster(x, y, rlong, label, label_ext, labelpos)
else:
self.unknown_object(x, y, rlong, label, label_ext, labelpos)
if visible_dso_collector is not None:
xs1, ys1 = x-rlong, y-rlong
xs2, ys2 = x+rlong, y+rlong
if self.graphics.on_screen(xs1, ys1) or self.graphics.on_screen(xs2, ys2):
xp1, yp1 = self.mirroring_graphics.to_pixel(xs1, ys1)
xp2, yp2 = self.mirroring_graphics.to_pixel(xs2, ys2)
xp1, yp1, xp2, yp2 = self.align_rect_coords(xp1, yp1, xp2, yp2)
visible_dso_collector.append([rlong, label.replace(' ', ''), xp1, yp1, xp2, yp2])
if self.picked_dso == dso:
pick_xp1, pick_yp1 = self.mirroring_graphics.to_pixel(-pick_r, -pick_r)
pick_xp2, pick_yp2 = self.mirroring_graphics.to_pixel(pick_r, pick_r)
pick_xp1, pick_yp1, pick_xp2, pick_yp2 = self.align_rect_coords(pick_xp1, pick_yp1, pick_xp2, pick_yp2)
visible_dso_collector.append([rlong, label.replace(' ', ''), pick_xp1, pick_yp1, pick_xp2, pick_yp2])
def calc_deepsky_list_ext(self, precession_matrix, deepsky_list_ext, dso_list):
if precession_matrix is not None:
mat_rect_dso = np.empty([len(dso_list), 3])
for i, dso in enumerate(dso_list):
mat_rect_dso[i] = [dso.x, dso.y, dso.z]
mat_rect_dso = np.matmul(mat_rect_dso, precession_matrix)
ra_ar, dec_ar = np_rect_to_sphere(mat_rect_dso[:,[0]], mat_rect_dso[:,[1]], mat_rect_dso[:,[2]])
else:
ra_ar = np.empty([len(dso_list)])
dec_ar = np.empty([len(dso_list)])
for i, dso in enumerate(dso_list):
ra_ar[i] = dso.ra
dec_ar[i] = dso.dec
x, y, z = self.projection.np_radec_to_xyz(ra_ar, dec_ar)
nzopt = not self.projection.is_zoptim()
for i, dso in enumerate(dso_list):
if nzopt or z[i] > 0:
if dso.rlong is None:
rlong = self.min_radius
else:
rlong = dso.rlong*self.drawingscale
if rlong < self.min_radius:
rlong = self.min_radius
deepsky_list_ext.append((dso, x[i], y[i], rlong))
def draw_dso_outlines(self, dso, x, y, rlong, rshort, posangle=None, label=None, label_ext=None, labelpos=None):
lev_shift = 0
has_outlines = False
draw_label = True
for outl_lev in range(2, -1, -1):
outlines_ar = dso.outlines[outl_lev]
if outlines_ar:
has_outlines = True
for outlines in outlines_ar:
x_outl, y_outl = self.projection.np_radec_to_xy(outlines[0], outlines[1])
self.diffuse_nebula_outlines(x, y, x_outl, y_outl, outl_lev+lev_shift, 2.0*rlong, 2.0*rshort, posangle,
label, label_ext, draw_label, labelpos)
draw_label = False
else:
lev_shift += 1
return has_outlines
def draw_unknown_nebula(self, unknown_nebulas):
zopt = self.projection.is_zoptim()
for uneb in unknown_nebulas:
ra = (uneb.ra_min + uneb.ra_max) / 2.0
dec = (uneb.dec_min + uneb.dec_max) / 2.0
x, y, z = self.projection.radec_to_xyz(ra, dec)
if zopt and z <=0:
continue
for outl_lev in range(3):
outlines = uneb.outlines[outl_lev]
if not outlines:
continue
for outl in outlines:
if not zopt or z > 0:
x_outl, y_outl = self.projection.np_radec_to_xy(outl[0], outl[1])
self.unknown_diffuse_nebula_outlines(x_outl, y_outl, outl_lev)
def draw_milky_way(self, milky_way_lines):
x, y, z = self.projection.np_radec_to_xyz(milky_way_lines[:, 0], milky_way_lines[:, 1])
mulx = -1 if self.config.mirror_x else 1
muly = -1 if self.config.mirror_y else 1
self.graphics.set_pen_rgb(self.config.milky_way_color)
self.graphics.set_fill_rgb(self.config.milky_way_color)
self.graphics.set_linewidth(self.config.milky_way_linewidth)
nzopt = not self.projection.is_zoptim()
polygon = None
for i in range(len(x)-1):
if milky_way_lines[i][2] == 0:
if polygon is not None and len(polygon) > 2:
self.graphics.polygon(polygon, DrawMode.BOTH)
x1, y1, z1 = x[i].item(), y[i].item(), z[i].item()
polygon = None
if nzopt or z1 > 0:
polygon = [[mulx*x1, muly*y1]]
else:
x1, y1, z1 = x[i].item(), y[i].item(), z[i].item()
if nzopt or z1 > 0:
if polygon is None:
polygon = []
polygon.append([mulx*x1, muly*y1])
if polygon is not None and len(polygon) > 2:
self.graphics.polygon(polygon, DrawMode.FILL)
def draw_enhanced_milky_way(self, enhanced_milky_way, use_optimized_mw):
self.graphics.antialias_off()
tm = time()
mw_points = enhanced_milky_way.mw_points
x, y, z = self.projection.np_radec_to_xyz(mw_points[:, 0], mw_points[:, 1])
mulx = -1 if self.config.mirror_x else 1
muly = -1 if self.config.mirror_y else 1
self.graphics.set_linewidth(0)
fd = self.config.enhanced_milky_way_fade
if use_optimized_mw:
selected_polygons = enhanced_milky_way.select_opti_polygons(self.fieldcentre, self.fieldsize)
else:
selected_polygons = enhanced_milky_way.select_polygons(self.fieldcentre, self.fieldsize)
fr_x1, fr_y1, fr_x2, fr_y2 = self.get_field_rect_mm()
total_polygons = 0
zopt = self.projection.is_zoptim()
for polygon_index in selected_polygons:
if use_optimized_mw:
polygon, rgb = enhanced_milky_way.mw_opti_polygons[polygon_index]
else:
polygon, rgb = enhanced_milky_way.mw_polygons[polygon_index]
if zopt and any(z[i] < 0 for i in polygon):
continue
xy_polygon = [(x[i].item() * mulx, y[i].item() * muly) for i in polygon]
for xp, yp in xy_polygon:
if (xp >= fr_x1) and (xp <= fr_x2) and (yp >= fr_y1) and (yp <= fr_y2):
break
else:
continue
frgb = (fd[0] + rgb[0] * fd[1], fd[2] + rgb[1] * fd[3], fd[4] + rgb[2] * fd[5])
total_polygons += 1
self.graphics.set_fill_rgb(frgb)
self.graphics.polygon(xy_polygon, DrawMode.FILL)
self.graphics.antialias_on()
tmp=str(time()-tm)
print( _("Enhanced milky way draw within {} s. Total polygons={}".format(tmp, total_polygons)) , flush=True)
def draw_extra_objects(self,extra_positions):
# Draw extra objects
# print('Drawing extra objects...')
nzopt = not self.projection.is_zoptim()
for rax, decx, label, labelpos in extra_positions:
x, y, z = self.projection.radec_to_xyz(rax, decx)
if nzopt or z >= 0:
self.unknown_object(x, y, self.min_radius, label, labelpos)
def draw_highlights(self, highlights, visible_dso_collector):
# Draw highlighted objects
# print('Drawing highlighted objects...')
fn = self.graphics.gi_default_font_size
highlight_fh = self.config.highlight_label_font_scale * fn
nzopt = not self.projection.is_zoptim()
for hl_def in highlights:
for rax, decx, object_name, label in hl_def.data:
x, y, z = self.projection.radec_to_xyz(rax, decx)
if nzopt or z >= 0:
self.graphics.set_pen_rgb(hl_def.color)
self.graphics.set_linewidth(hl_def.line_width)
if hl_def.style == 'cross':
r = self.config.font_size * 2
self.mirroring_graphics.line(x-r, y, x-r/2, y)
self.mirroring_graphics.line(x+r, y, x+r/2, y)
self.mirroring_graphics.line(x, y+r, x, y+r/2)
self.mirroring_graphics.line(x, y-r, x, y-r/2)
elif hl_def.style == 'circle':
r = self.config.font_size
self.mirroring_graphics.circle(x, y, r)
if label:
self.draw_circular_object_label(x, y, r, label, fh=highlight_fh)
if object_name and visible_dso_collector is not None:
xs1, ys1 = x-r, y-r
xs2, ys2 = x+r, y+r
if self.graphics.on_screen(xs1, ys1) or self.graphics.on_screen(xs2, ys2):
xp1, yp1 = self.mirroring_graphics.to_pixel(xs1, ys1)
xp2, yp2 = self.mirroring_graphics.to_pixel(xs2, ys2)
xp1, yp1, xp2, yp2 = self.align_rect_coords(xp1, yp1, xp2, yp2)
visible_dso_collector.append([r, object_name, xp1, yp1, xp2, yp2])
def draw_dso_hightlight(self, x, y, rlong, dso_name, dso_highligth, visible_dso_collector):
self.graphics.set_pen_rgb(dso_highligth.color)
self.graphics.set_linewidth(dso_highligth.line_width)
if dso_highligth.dash and len(dso_highligth.dash) == 2:
self.graphics.set_dashed_line(dso_highligth.dash[0], dso_highligth.dash[1])
else:
self.graphics.set_solid_line()
r = self.config.font_size
self.mirroring_graphics.circle(x, y, r)
xs1, ys1 = x-r, y-r
xs2, ys2 = x+r, y+r
if visible_dso_collector is not None and (self.graphics.on_screen(xs1, ys1) or self.graphics.on_screen(xs2, ys2)):
xp1, yp1 = self.mirroring_graphics.to_pixel(xs1, ys1)
xp2, yp2 = self.mirroring_graphics.to_pixel(xs2, ys2)
xp1, yp1, xp2, yp2 = self.align_rect_coords(xp1, yp1, xp2, yp2)
visible_dso_collector.append([r, dso_name.replace(' ', ''), xp1, yp1, xp2, yp2])
def draw_trajectory(self, trajectory):
# Draw extra objects
# print('Drawing trajectory...')
self.graphics.set_pen_rgb(self.config.dso_color)
self.graphics.set_solid_line()
fh = self.graphics.gi_default_font_size
x1, y1, z1 = (None, None, None)
nzopt = not self.projection.is_zoptim()
labels = []
for i in range(0, len(trajectory)):
rax2, decx2, label2 = trajectory[i]
x2, y2, z2 = self.projection.radec_to_xyz(rax2, decx2)
if i > 0:
self.graphics.set_linewidth(self.config.constellation_linewidth)
if nzopt or (z1 > 0 and z2 > 0):
self.mirroring_graphics.line(x1, y1, x2, y2)
self.draw_trajectory_tick(x1, y1, x2, y2)
if i == 1:
self.draw_trajectory_tick(x2, y2, x1, y1)
nx, ny = (None, None)
if x1 is not None:
n = math.sqrt((x2-x1)**2 + (y2-y1)**2)
if n != 0:
nx = (x2-x1)/n
ny = (y2-y1)/n
labels.append([x2, y2, z2, nx, ny, label2])
x1, y1, z1 = (x2, y2, z2)
sum_x, sum_y = (0, 0)
for _, _, _, nx, ny, _ in labels:
if nx is not None:
sum_x += nx
sum_y += ny
# label_pos:
# 1
# 4 + 2
# 3
if sum_x != 0 or sum_y != 0:
sum_x = sum_x / (len(labels) - 1)
sum_y = sum_y / (len(labels) - 1)
cmp = 0.8
if sum_x > cmp or sum_x < -cmp:
label_pos = 1
else:
label_pos = 2
else:
label_pos = 0
r = self.min_radius * 1.2 / 2**0.5
for x, y, z, nx, ny, label in labels:
if nzopt or z > 0:
if label_pos == 1:
self.mirroring_graphics.text_centred(x, y + r + fh, label)
elif label_pos == 2:
self.mirroring_graphics.text_right(x + r + fh/4, y - fh/2, label)
else:
self.mirroring_graphics.text_centred(x, y - r - fh/2.0, label)
def draw_trajectory_tick(self, x1, y1, x2, y2):
dx = x2-x1
dy = y2-y1
dr = math.sqrt(dx * dx + dy*dy)
ddx = dx * 1.0 / dr
ddy = dy * 1.0 / dr
self.graphics.set_linewidth(1.5*self.config.constellation_linewidth)
self.mirroring_graphics.line(x2-ddy, y2+ddx, x2+ddy, y2-ddx)
def magnitude_to_radius(self, magnitude):
# radius = 0.13*1.35**(int(self.lm_stars)-magnitude)
mag_d = self.lm_stars - np.clip(magnitude, a_min=None, a_max=self.lm_stars)
mag_s = np.interp(mag_d, MAG_SCALE_X, MAG_SCALE_Y)
radius = 0.1 * 1.33 ** mag_s + self.star_mag_r_shift
return radius
def draw_stars(self, star_catalog, precession_matrix):
# Select and draw stars
# print('Drawing stars...')
pick_r = self.config.picker_radius if self.config.picker_radius > 0 else 0
selection = star_catalog.select_stars(self.fieldcentre, self.fieldsize, self.lm_stars, precession_matrix)
if selection is None or len(selection) == 0:
print(_('No stars found.'))
return
# print("Stars selection {} ms".format(str(time()-tm)), flush=True)
print( _('{} stars in map.'.format(selection.shape[0])))
var=str(round(max(selection['mag']), 2))
print(_(f'Faintest star : {var}' ))
# tm = time()
x, y = self.projection.np_radec_to_xy(selection['ra'], selection['dec'])
# print("Stars view positioning {} ms".format(str(time()-tm)), flush=True)
mag = selection['mag']
bsc = selection['bsc']
indices = np.argsort(mag)
magsorted = mag[indices]
rsorted = self.magnitude_to_radius(magsorted)
if not self.config.star_colors:
# self.graphics.set_pen_rgb((self.config.draw_color[0]/3, self.config.draw_color[0]/3, self.config.draw_color[0]/3))
self.graphics.set_fill_rgb(self.config.draw_color)
self.graphics.set_linewidth(0)
star_labels = []
pick_min_r = pick_r**2
x1, y1, x2, y2 = self.get_field_rect_mm()
for i, index in enumerate(indices):
xx, yy, rr = (x[index].item(), y[index].item(), rsorted[i].item(),)
if (xx >= x1-rr) and (xx <= x2+rr) and (yy >= y1-rr) and (yy <= y2+rr):
if self.config.show_star_circles:
self.star(xx, yy, rr, star_catalog.get_star_color(selection[index]))
if pick_r > 0 and abs(xx) < pick_r and abs(yy) < pick_r:
r = xx*xx + yy*yy
if r < pick_min_r:
self.picked_star = (xx, yy, rr, mag[index], bsc[index])
pick_min_r = r
elif self.config.show_star_labels:
bsc_star = selection[index]['bsc']
if bsc_star is not None:
if isinstance(bsc_star, str):
slabel = bsc_star
else:
slabel = bsc_star.greek
if slabel:
slabel = STAR_LABELS[slabel] + bsc_star.greek_no
elif self.config.show_flamsteed:
slabel = bsc_star.flamsteed
if slabel and self.config.flamsteed_numbers_only:
slabel = slabel.split()[0]
if slabel:
label_length = self.graphics.text_width(slabel)
labelpos_list = self.circular_object_labelpos(xx, yy, rr, label_length)
pot = 1e+30
for labelpos_index in range(len(labelpos_list)):
[[lx1, ly1], [lx2, ly2], [lx3, ly3]] = labelpos_list[labelpos_index]
pot1 = self.label_potential.compute_potential(lx2, ly2)
if labelpos_index == 0:
pot1 *= 0.6 # favour label right
# self.label_potential.compute_potential(x1,y1),
# self.label_potential.compute_potential(x3,y3)])
if pot1 < pot:
pot = pot1
labelpos = labelpos_index
[lx, ly] = labelpos_list[labelpos][1]
self.label_potential.add_position(lx, ly, label_length)
star_labels.append((xx, yy, rr, labelpos, bsc_star))
if len(star_labels) > 0:
self.draw_stars_labels(star_labels)
def draw_picked_star(self):
if self.picked_star is not None:
x, y, r, mag, bsc = self.picked_star
self.graphics.set_font(self.graphics.gi_font, 0.9*self.graphics.gi_default_font_size)
label = str(mag)
if bsc is not None:
if bsc.greek:
label += '(' + STAR_LABELS[bsc.greek] + bsc.greek_no + ' ' + bsc.constellation.capitalize() + ')'
elif bsc.flamsteed:
label += '(' + str(bsc.flamsteed) + ')'
elif bsc.HD is not None:
label += '(HD' + str(bsc.HD) + ')'
self.draw_circular_object_label(x, y, r, label)
def draw_stars_labels(self, star_labels):
fn = self.graphics.gi_default_font_size
printed = {}
bayer_fh = self.config.bayer_label_font_scale * fn
flamsteed_fh = self.config.flamsteed_label_font_scale * fn
for x, y, r, labelpos, star in star_labels:
if isinstance(star, str):
self.graphics.set_font(self.graphics.gi_font, 0.9*fn)
self.draw_circular_object_label(x, y, r, star, labelpos)
else:
slabel = star.greek
if not slabel:
is_greek = False
if self.config.show_flamsteed:
slabel = star.flamsteed
if slabel and self.config.flamsteed_numbers_only:
slabel = slabel.split()[0]
else:
is_greek = True
slabel = STAR_LABELS.get(slabel) + star.greek_no
if slabel:
printed_labels = printed.setdefault(star.constellation, set())
if slabel not in printed_labels:
printed_labels.add(slabel)
if is_greek:
self.graphics.set_font(self.graphics.gi_font, bayer_fh, self.config.bayer_label_font_style)
else:
self.graphics.set_font(self.graphics.gi_font, flamsteed_fh, self.config.flamsteed_label_font_style)
self.draw_circular_object_label(x, y, r, slabel, labelpos)
def draw_constellations(self, constell_catalog, jd, precession_matrix, hl_constellation):
# print('Drawing constellations...')
if self.config.show_constellation_borders:
self.draw_constellation_boundaries(constell_catalog, jd, precession_matrix, hl_constellation)
if self.config.show_constellation_shapes:
self.draw_constellation_shapes(constell_catalog, jd, precession_matrix)
def draw_grid_equatorial(self):
# print('Drawing equatorial grid...')
self.graphics.save()
self.graphics.set_linewidth(self.config.grid_linewidth)
self.graphics.set_solid_line()
self.graphics.set_pen_rgb(self.config.grid_color)
self.draw_grid_dec()
self.draw_grid_ra()
self.graphics.restore()
def grid_dec_label(self, dec_minutes, label_fmt):
deg = abs(int(dec_minutes/60))
minutes = abs(dec_minutes) - deg * 60
if dec_minutes > 0:
prefix = '+'
elif dec_minutes < 0:
prefix = '-'
else:
prefix = ''
return prefix + label_fmt.format(deg, minutes)
def grid_ra_label(self, ra_minutes, label_fmt):
hrs = int(ra_minutes/60)
mins = int(ra_minutes) % 60
secs = int(ra_minutes % 1 * 60)
return label_fmt.format(hrs, mins, secs)
def draw_grid_dec(self):
prev_steps, prev_grid_minutes = (None, None)
for grid_minutes in DEC_GRID_SCALE:
steps = self.fieldradius / (np.pi * grid_minutes / (180 * 60))
if steps < GRID_DENSITY:
if prev_steps is not None:
if prev_steps-GRID_DENSITY < GRID_DENSITY-steps:
grid_minutes = prev_grid_minutes
break
prev_steps, prev_grid_minutes = (steps, grid_minutes)
dec_min = self.fieldcentre[1] - self.fieldradius
dec_max = self.fieldcentre[1] + self.fieldradius
label_fmt = '{}°' if grid_minutes >= 60 else '{}°{:02d}\''
dec_minutes = -90*60 + grid_minutes
while dec_minutes < 90*60:
dec = np.pi * dec_minutes / (180*60)
if (dec > dec_min) and (dec < dec_max):
self.draw_grid_dec_line(dec, dec_minutes, label_fmt)
dec_minutes += grid_minutes
def draw_grid_dec_line(self, dec, dec_minutes, label_fmt):
dra = self.fieldradius / 10
x11, y11, z11 = (None, None, None)
agg_ra = 0
nzopt = not self.projection.is_zoptim()
while True:
x12, y12, z12 = self.projection.radec_to_xyz(self.fieldcentre[0] + agg_ra, dec)
x22, y22, z22 = self.projection.radec_to_xyz(self.fieldcentre[0] - agg_ra, dec)
if x11 is not None and (nzopt or (z11 > 0 and z12 > 0)):
self.mirroring_graphics.line(x11, y11, x12, y12)
self.mirroring_graphics.line(x21, y21, x22, y22)
agg_ra = agg_ra + dra
if agg_ra > np.pi:
break
if x12 < -self.drawingwidth/2:
y = (y12-y11) * (self.drawingwidth/2 + x11) / (x11 - x12) + y11
label = self.grid_dec_label(dec_minutes, label_fmt)
self.graphics.save()
self.mirroring_graphics.translate(-self.drawingwidth/2,y)
text_ang = math.atan2(y11-y12, x11-x12)
self.mirroring_graphics.rotate(text_ang)
fh = self.graphics.gi_default_font_size
if dec >= 0:
self.graphics.text_right(2*fh/3, +fh/3, label)
else:
self.graphics.text_right(2*fh/3, -fh, label)
self.graphics.restore()
break
x11, y11, z11 = (x12, y12, z12)
x21, y21, z21 = (x22, y22, z22)
def draw_grid_ra(self):
prev_steps, prev_grid_minutes = (None, None)
fc_cos = math.cos(self.fieldcentre[1])
for grid_minutes in RA_GRID_SCALE:
steps = self.fieldradius / (fc_cos * (np.pi * grid_minutes / (12 * 60)))
if steps < GRID_DENSITY:
if prev_steps is not None:
if prev_steps-GRID_DENSITY < GRID_DENSITY-steps:
grid_minutes = prev_grid_minutes
break
prev_steps, prev_grid_minutes = (steps, grid_minutes)
max_visible_dec = self.fieldcentre[1]+self.fieldradius if self.fieldcentre[1] > 0 else self.fieldcentre[1]-self.fieldradius;
if max_visible_dec >= np.pi/2 or max_visible_dec <= -np.pi/2:
ra_size = 2*np.pi
else:
ra_size = self.fieldradius / math.cos(max_visible_dec)
if ra_size > 2*np.pi:
ra_size = 2*np.pi
if grid_minutes >= 60:
label_fmt = '{}h'
elif grid_minutes >= 1:
label_fmt = '{}h{:02d}m'
else:
label_fmt = '{}h{:02d}m{:02d}s'
ra_minutes = 0
while ra_minutes < 24*60:
ra = np.pi * ra_minutes / (12*60)
if abs(self.fieldcentre[0]-ra) < ra_size or abs(self.fieldcentre[0]-2*np.pi-ra) < ra_size or abs(2*np.pi+self.fieldcentre[0]-ra) < ra_size:
self.draw_grid_ra_line(ra, ra_minutes, label_fmt)
ra_minutes += grid_minutes
def draw_grid_ra_line(self, ra, ra_minutes, label_fmt):
ddec = self.fieldradius / 10
x11, y11, z11 = (None, None, None)
x21, y21, z21 = (None, None, None)
agg_dec = 0
nzopt = not self.projection.is_zoptim()
while True:
x12, y12, z12 = self.projection.radec_to_xyz(ra, self.fieldcentre[1] + agg_dec)
x22, y22, z22 = self.projection.radec_to_xyz(ra, self.fieldcentre[1] - agg_dec)
if x11 is not None:
if nzopt or (z11 > 0 and z12 > 0):
self.mirroring_graphics.line(x11, y11, x12, y12)
if nzopt or (z21 > 0 and z22 > 0):
self.mirroring_graphics.line(x21, y21, x22, y22)
agg_dec = agg_dec + ddec
if agg_dec > np.pi/2:
break
if y12 > self.drawingheight/2 and y22 < -self.drawingheight/2:
label = self.grid_ra_label(ra_minutes, label_fmt)
self.graphics.save()
if self.fieldcentre[1] <= 0:
x = (x12-x11) * (self.drawingheight/2 - y11) / (y12 - y11) + x11
self.mirroring_graphics.translate(x, self.drawingheight/2)
text_ang = math.atan2(y11-y12, x11-x12)
else:
x = (x22-x21) * (-self.drawingheight/2 - y21) / (y22 - y21) + x21
self.mirroring_graphics.translate(x, -self.drawingheight/2)
text_ang = math.atan2(y21-y22, x21-x22)
self.mirroring_graphics.rotate(text_ang)
fh = self.graphics.gi_default_font_size
self.graphics.text_right(2*fh/3, fh/3, label)
self.graphics.restore()
break
x11, y11, z11 = (x12, y12, z12)
x21, y21, z21 = (x22, y22, z22)
def draw_constellation_shapes(self, constell_catalog, jd, precession_matrix):
self.graphics.set_linewidth(self.config.constellation_linewidth)
self.graphics.set_solid_line()
self.graphics.set_pen_rgb(self.config.constellation_lines_color)
global constell_lines_rect1, constell_lines_rect2
if jd is not None:
if constell_lines_rect1 is None:
points = constell_catalog.all_constell_lines
xr1, yr1, zr1 = np_sphere_to_rect(points[:,0], points[:,1])
constell_lines_rect1 = np.column_stack((xr1, yr1, zr1))
xr2, yr2, zr2 = np_sphere_to_rect(points[:,2], points[:,3])
constell_lines_rect2 = np.column_stack((xr2, yr2, zr2))
prec_rect1 = np.matmul(constell_lines_rect1, precession_matrix)
ra1, dec1 = np_rect_to_sphere(prec_rect1[:,[0]], prec_rect1[:,[1]], prec_rect1[:,[2]])
prec_rect2 = np.matmul(constell_lines_rect2, precession_matrix)
ra2, dec2 = np_rect_to_sphere(prec_rect2[:,[0]], prec_rect2[:,[1]], prec_rect2[:,[2]])
constell_lines = np.column_stack((ra1, dec1, ra2, dec2))
else:
constell_lines = constell_catalog.all_constell_lines
x1, y1, z1 = self.projection.np_radec_to_xyz(constell_lines[:, 0], constell_lines[:, 1])
x2, y2, z2 = self.projection.np_radec_to_xyz(constell_lines[:, 2], constell_lines[:, 3])
nzopt = not self.projection.is_zoptim()
for i in range(len(x1)):
if nzopt or (z1[i] > 0 and z2[i] > 0):
if self.config.constellation_linespace > 0:
dx = x2[i] - x1[i]
dy = y2[i] - y1[i]
dr = math.sqrt(dx * dx + dy*dy)
ddx = dx * self.config.constellation_linespace / dr
ddy = dy * self.config.constellation_linespace / dr
self.mirroring_graphics.line(x1[i] + ddx, y1[i] + ddy, x2[i] - ddx, y2[i] - ddy)
else:
self.mirroring_graphics.line(x1[i], y1[i], x2[i], y2[i])
def draw_constellation_boundaries(self, constell_catalog, jd, precession_matrix, hl_constellation):
self.graphics.set_dashed_line(0.6, 1.2)
global constell_bound_rect
if jd is not None:
if constell_bound_rect is None:
points = constell_catalog.boundaries_points
xr, yr, zr = np_sphere_to_rect(points[:,0], points[:,1])
constell_bound_rect = np.column_stack((xr, yr, zr))
prec_rect = np.matmul(constell_bound_rect, precession_matrix)
ra, dec = np_rect_to_sphere(prec_rect[:,[0]], prec_rect[:,[1]], prec_rect[:,[2]])
constell_boundaries = np.column_stack((ra, dec))
else:
constell_boundaries = constell_catalog.boundaries_points
x, y, z = self.projection.np_radec_to_xyz(constell_boundaries[:,0], constell_boundaries[:,1])
hl_constellation = hl_constellation.upper() if hl_constellation else None
wh_min = 2.5 # 2.5mm min interp distance
flat_dec = np.pi*75/180 # boundaries can be linearized above 75 deg
flat_rac_interp = np.pi*7/180 # some "magic" angle 7 deg.
max_angle2 = (1 / 180 * np.pi)
nzopt = not self.projection.is_zoptim()
for index1, index2, cons1, cons2 in constell_catalog.boundaries_lines:
if nzopt or (z[index1] > 0 and z[index2] > 0):
if hl_constellation and (hl_constellation == cons1 or hl_constellation == cons2):
self.graphics.set_pen_rgb(self.config.constellation_hl_border_color)
self.graphics.set_linewidth(self.config.constellation_linewidth * 1.75)
else:
self.graphics.set_pen_rgb(self.config.constellation_border_color)
self.graphics.set_linewidth(self.config.constellation_border_linewidth)
x_start, y_start = x[index1], y[index1]
x_end, y_end = x[index2], y[index2]
ra_start, dec_start = constell_boundaries[index1]
ra_end, dec_end = constell_boundaries[index2]
if abs(ra_end - ra_start) > np.pi:
if ra_end < ra_start:
ra_start, ra_end = ra_end, ra_start
dec_start, dec_end = dec_end, dec_start
x_start, y_start, x_end, y_end = x_end, y_end, x_start, y_start
d_ra = (ra_end - (ra_start + 2 * np.pi))
else:
d_ra = (ra_end - ra_start)
d_dec = (dec_end - dec_start)
interpolate = True
if (abs(dec_start) > flat_dec or abs(dec_end) > flat_dec) and abs(d_ra) < flat_rac_interp:
interpolate = False
if interpolate:
divisions = self.calc_boundary_divisions(1, 1, wh_min, max_angle2, x_start, y_start, x_end, y_end, ra_start, dec_start, ra_end, dec_end)
else:
divisions = 1
if divisions == 0:
continue
if divisions == 1:
self.mirroring_graphics.line(x_start, y_start, x_end, y_end)
else:
dd_ra = d_ra / divisions
dd_dec = d_dec / divisions
vertices = [(x_start, y_start)]
ra1, dec1 = ra_start, dec_start
for i in range(divisions-1):
dec2 = dec1 + dd_dec
ra2 = ra1 + dd_ra
x2, y2 = self.projection.radec_to_xy(ra2, dec2)
vertices.append((x2, y2))
ra1, dec1 = ra2, dec2
vertices.append((x_end, y_end))
self.mirroring_graphics.polyline(vertices)
def calc_boundary_divisions(self, level, divs, wh_min, max_angle2, x1, y1, x2, y2, ra1, dec1, ra2, dec2):
if abs(x2-x1) < wh_min and abs(y2-y1) < wh_min:
# self.mirroring_graphics.text_centred((x1+x2)/2, (y1+y2)/2, '{:.1f}'.format(max(abs(x2-x1), abs(y2-y1))))
return divs
if abs(ra2-ra1) > np.pi:
ra_center = np.pi + (ra1 + ra2) / 2
else:
ra_center = (ra1 + ra2) / 2
dec_center = (dec1 + dec2) /2
x_center, y_center = self.projection.radec_to_xy(ra_center, dec_center)
if level == 1:
c1 = self.graphics.cohen_sutherland_encode(x1, x_center)
c2 = self.graphics.cohen_sutherland_encode(y1, y_center)
c3 = self.graphics.cohen_sutherland_encode(x_center, x2)
c4 = self.graphics.cohen_sutherland_encode(y_center, y2)
if (c1 | c2) != 0 and (c1 & c2) != 0 and (c3 | c4) != 0 and (c3 & c4) != 0:
return 0
vx1 = x_center - x1
vy1 = y_center - y1
vx2 = x2 - x_center
vy2 = y2 - y_center
vec_mul2 = (vx1 * vy2 - vy1 * vx2) / (math.sqrt(vx1**2 + vy1**2) * math.sqrt(vx2**2 + vy2**2))
if abs(vec_mul2) < max_angle2:
return divs
return self.calc_boundary_divisions(level+1, divs * 2, wh_min, max_angle2, x1, y1, x_center, y_center, ra1, dec1, ra_center, dec_center)
def create_widgets(self):
left, bottom, right, top = self.get_field_rect_mm()
self.space_widget_allocator = SpaceWidgetAllocator(left, bottom, right, top)
self.w_mag_scale = WidgetMagnitudeScale(sky_map_engine=self,
alloc_space_spec='bottom,left',
legend_fontsize=self.get_legend_font_size(),
stars_in_scale=STARS_IN_SCALE,
lm_stars=self.lm_stars,
legend_linewidth=self.config.legend_linewidth,
vertical=False,
color=self.config.draw_color
)
self.w_map_scale = WidgetMapScale(sky_map_engine=self,
alloc_space_spec='bottom,right',
drawingscale=self.drawingscale,
maxlength=self.drawingwidth/3.0,
legend_fontsize=self.get_legend_font_size(),
legend_linewidth=self.config.legend_linewidth,
color=self.config.draw_color)
self.w_orientation = WidgetOrientation(legend_fontsize=self.get_legend_font_size(),
mirror_x=self.config.mirror_x,
mirror_y=self.config.mirror_y,
color=self.config.draw_color)
self.w_coords = WidgetCoords(self.language, color=self.config.draw_color)
self.w_dso_legend = WidgetDsoLegend(self.language, self.drawingwidth, LEGEND_MARGIN, color=self.config.draw_color)
self.w_telrad = WidgetTelrad(self.drawingscale, self.config.telrad_linewidth, self.config.telrad_color)
self.w_eyepiece = WidgetEyepiece(self.drawingscale, self.config.eyepiece_fov, self.config.eyepiece_linewidth, self.config.eyepiece_color)
self.w_picker = WidgetPicker(self.config.picker_radius, self.config.picker_linewidth, self.config.picker_color)
if self.config.show_mag_scale_legend:
self.w_mag_scale.allocate_space(self.space_widget_allocator)
if self.config.show_map_scale_legend:
self.w_map_scale.allocate_space(self.space_widget_allocator)
def star(self, x, y, radius, star_color):
"""
Filled circle with boundary. Set fill colour and boundary
colour in advance using set_pen_rgb and set_fill_rgb
"""
if self.config.star_colors and star_color:
self.graphics.set_fill_rgb(star_color)
r = round(radius, 2)
self.mirroring_graphics.circle(x, y, r, DrawMode.FILL)
def no_mirror_star(self, x, y, radius):
"""
Filled circle with boundary. Set fill colour and boundary
colour in advance using set_pen_rgb and set_fill_rgb
"""
r = int((radius + self.graphics.gi_linewidth/2.0)*100.0 + 0.5)/100.0
self.graphics.circle(x, y, r, DrawMode.FILL)
def open_cluster(self, x, y, radius, label, label_mag, label_ext, labelpos):
r = radius if radius > 0 else self.drawingwidth/40.0
self.graphics.set_pen_rgb(self.config.star_cluster_color)
self.graphics.set_linewidth(self.config.open_cluster_linewidth)
self.graphics.set_dashed_line(0.6, 0.4)
self.mirroring_graphics.circle(x, y, r)
if label_ext:
label_fh = self.config.ext_label_font_scale * self.graphics.gi_default_font_size
self.graphics.set_font(self.graphics.gi_font, label_fh)
else:
label_fh = self.graphics.gi_default_font_size
self.graphics.set_font(self.graphics.gi_font, self.graphics.gi_default_font_size, self.config.dso_label_font_style)
self.draw_circular_object_label(x, y, r, label, labelpos, label_fh)
if label_ext:
self.draw_circular_object_label(x, y, r, label_ext, self.to_ext_labelpos(labelpos), label_fh)
if not label_ext and label_mag:
self.graphics.set_font(self.graphics.gi_font, label_fh*0.8, self.config.dso_label_font_style)
self.draw_circular_object_label(x, y-0.9*label_fh, r, label_mag, labelpos, label_fh)
def galaxy_cluster(self, x, y, radius, label, label_ext, labelpos):
r = radius if radius > 0 else self.drawingwidth/40.0
self.graphics.set_pen_rgb(self.config.galaxy_cluster_color)
self.graphics.set_linewidth(self.config.galaxy_cluster_linewidth)
self.graphics.set_dashed_line(0.5, 2.0)
self.mirroring_graphics.circle(x, y, r)
if label_ext:
label_fh = self.config.ext_label_font_scale * self.graphics.gi_default_font_size
self.graphics.set_font(self.graphics.gi_font, label_fh)
else:
label_fh = None
self.graphics.set_font(self.graphics.gi_font, self.graphics.gi_default_font_size, self.config.dso_label_font_style)
self.draw_circular_object_label(x, y, r, label, labelpos, label_fh)
if label_ext:
self.draw_circular_object_label(x, y, r, label_ext, self.to_ext_labelpos(labelpos), label_fh)
def draw_asterism_label(self, x, y, label, labelpos, d, fh):
if labelpos == 0 or labelpos == -1:
self.mirroring_graphics.text_centred(x, y-d-2*fh/3.0, label)
elif labelpos == 1:
self.mirroring_graphics.text_centred(x, y+d+fh/3.0, label)
elif labelpos == 2:
self.mirroring_graphics.text_left(x-d-fh/6.0, y-fh/3.0, label)
elif labelpos == 3:
self.mirroring_graphics.text_right(x+d+fh/6.0, y-fh/3.0, label)
def asterism(self, x, y, radius, label, label_ext, labelpos):
r = radius if radius > 0 else self.drawingwidth/40.0
w2 = 2**0.5
d = r/2.0*w2
self.graphics.set_pen_rgb(self.config.star_cluster_color)
self.graphics.set_linewidth(self.config.open_cluster_linewidth)
self.graphics.set_dashed_line(0.6, 0.4)
diff = self.graphics.gi_linewidth/2.0/w2
self.mirroring_graphics.line(x-diff, y+d+diff, x+d+diff, y-diff)
self.mirroring_graphics.line(x+d, y, x, y-d)
self.mirroring_graphics.line(x+diff, y-d-diff, x-d-diff, y+diff)
self.mirroring_graphics.line(x-d, y, x, y+d)
if label_ext:
label_fh = self.config.ext_label_font_scale * self.graphics.gi_default_font_size
self.graphics.set_font(self.graphics.gi_font, label_fh)
else:
label_fh = self.graphics.gi_default_font_size
self.graphics.set_font(self.graphics.gi_font, self.graphics.gi_default_font_size, self.config.dso_label_font_style)
if label:
self.graphics.set_pen_rgb(self.config.label_color)
self.draw_asterism_label(x, y, label, labelpos, d, label_fh)
if label_ext:
self.graphics.set_pen_rgb(self.config.label_color)
self.draw_asterism_label(x, y, label_ext, self.to_ext_labelpos(labelpos), d, label_fh)
def asterism_labelpos(self, x, y, radius=-1, label_length=0.0):
"""
x,y,radius, label_length in mm
returns [[start, centre, end],()]
"""
r = radius if radius > 0 else self.drawingwidth/40.0
w2 = 2**0.5
d = r/2.0*w2
fh = self.graphics.gi_default_font_size
label_pos_list = []
yy = y-d-2*fh/3.0
label_pos_list.append([[x-label_length/2.0, yy], [x, yy], [x+label_length, yy]])
yy = y+d+2*fh/3.0
label_pos_list.append([[x-label_length/2.0, yy], [x, yy], [x+label_length, yy]])
xx = x-d-fh/6.0
yy = y
label_pos_list.append([[xx-label_length, yy], [xx-label_length/2.0, yy], [xx, yy]])
xx = x+d+fh/6.0
yy = y
label_pos_list.append([[xx, yy], [xx+label_length/2.0, yy], [xx+label_length, yy]])
return label_pos_list
def draw_galaxy_label(self, x, y, label, labelpos, rlong, rshort, fh):
if labelpos == 0 or labelpos == -1:
self.graphics.text_centred(0, -rshort-0.5*fh, label)
elif labelpos == 1:
self.graphics.text_centred(0, +rshort+0.5*fh, label)
elif labelpos == 2:
self.graphics.text_right(rlong+fh/6.0, -fh/3.0, label)
elif labelpos == 3:
self.graphics.text_left(-rlong-fh/6.0, -fh/3.0, label)
def galaxy(self, x, y, rlong, rshort, posangle, mag, label, label_mag, label_ext, labelpos):
"""
If rlong != -1 and rshort == -1 => rshort <- rlong
if rlong < 0.0 => standard galaxy
labelpos can be 0,1,2,3
"""
rl = rlong
rs = rshort
if rlong <= 0.0:
rl = self.drawingwidth/40.0
rs = rl/2.0
if (rlong > 0.0) and (rshort < 0.0):
rl = rlong
rs = rlong/2.0
self.graphics.save()
self.graphics.set_linewidth(self.config.dso_linewidth)
self.graphics.set_solid_line()
if self.config.dso_dynamic_brightness and (mag is not None) and self.lm_deepsky >= 10.0 and label_ext is None:
fac = self.lm_deepsky - 8.0
if fac > 5:
fac = 5.0
diff_mag = self.lm_deepsky - mag
if diff_mag < 0:
diff_mag = 0
if diff_mag > 5:
diff_mag = 5
dso_intensity = 1.0 if diff_mag > fac else 0.5 + 0.5 * diff_mag / fac;
else:
dso_intensity = 1.0
self.graphics.set_pen_rgb((self.config.galaxy_color[0]*dso_intensity,
self.config.galaxy_color[1]*dso_intensity,
self.config.galaxy_color[2]*dso_intensity))
p = posangle
if posangle >= 0.5*np.pi:
p += np.pi
if posangle < -0.5*np.pi:
p -= np.pi
self.mirroring_graphics.ellipse(x, y, rl, rs, p)
if label or label_ext:
self.mirroring_graphics.translate(x, y)
self.mirroring_graphics.rotate(p)
self.graphics.set_pen_rgb((self.config.label_color[0]*dso_intensity,
self.config.label_color[1]*dso_intensity,
self.config.label_color[2]*dso_intensity))
if label_ext:
label_fh = self.config.ext_label_font_scale * self.graphics.gi_default_font_size
else:
label_fh = self.graphics.gi_default_font_size
self.graphics.set_font(self.graphics.gi_font, label_fh, self.config.dso_label_font_style)
if label:
self.draw_galaxy_label(x, y, label, labelpos, rlong, rshort, label_fh)
if label_ext:
self.draw_galaxy_label(x, y, label_ext, self.to_ext_labelpos(labelpos), rlong, rshort, label_fh)
if not label_ext and label_mag:
self.mirroring_graphics.translate(0, -label_fh*0.9)
self.graphics.set_font(self.graphics.gi_font, label_fh*0.8, self.config.dso_label_font_style)
self.draw_galaxy_label(x, y, label_mag, labelpos, rlong, rshort, label_fh)
self.graphics.restore()
def galaxy_labelpos(self, x, y, rlong=-1, rshort=-1, posangle=0.0, label_length=0.0):
rl = rlong
rs = rshort
if rlong <= 0.0:
rl = self.drawingwidth/40.0
rs = rl/2.0
if (rlong > 0.0) and (rshort < 0.0):
rl = rlong
rs = rlong/2.0
p = posangle
if posangle >= 0.5*np.pi:
p += np.pi
if posangle < -0.5*np.pi:
p -= np.pi
fh = self.graphics.gi_default_font_size
label_pos_list = []
sp = math.sin(p)
cp = math.cos(p)
hl = label_length/2.0
d = -rshort-0.5*fh
xc = x + d*sp
yc = y - d*cp
xs = xc - hl*cp
ys = yc - hl*sp
xe = xc + hl*cp
ye = yc + hl*sp
label_pos_list.append([[xs, ys], [xc, yc], [xe, ye]])
xc = x - d*sp
yc = y + d*cp
xs = xc - hl*cp
ys = yc - hl*sp
xe = xc + hl*cp
ye = yc + hl*sp
label_pos_list.append([[xs, ys], [xc, yc], [xe, ye]])
d = rlong+fh/6.0
xs = x + d*cp
ys = y + d*sp
xc = xs + hl*cp
yc = ys + hl*sp
xe = xc + hl*cp
ye = yc + hl*sp
label_pos_list.append([[xs, ys], [xc, yc], [xe, ye]])
xe = x - d*cp
ye = y - d*sp
xc = xe - hl*cp
yc = ye - hl*sp
xs = xc - hl*cp
ys = yc - hl*sp
label_pos_list.append([[xs, ys], [xc, yc], [xe, ye]])
return label_pos_list
def to_ext_labelpos(self, labelpos):
if labelpos == 0:
return 1
if labelpos == 1:
return 0
if labelpos == 2:
return 3
if labelpos == 3:
return 2
return 1
def draw_circular_object_label(self, x, y, r, label, labelpos=-1, fh=None):
if fh is None:
fh = self.graphics.gi_default_font_size
if label:
self.graphics.set_pen_rgb(self.config.label_color)
arg = 1.0-2*fh/(3.0*r)
if (arg < 1.0) and (arg > -1.0):
a = math.acos(arg)
else:
a = 0.5*np.pi
if labelpos == 0 or labelpos == -1:
self.mirroring_graphics.text_right(x+math.sin(a)*r+fh/6.0, y-r, label)
elif labelpos == 1:
self.mirroring_graphics.text_left(x-math.sin(a)*r-fh/6.0, y-r, label)
elif labelpos == 2:
self.mirroring_graphics.text_right(x+math.sin(a)*r+fh/6.0, y+r-2*fh/3.0, label)
elif labelpos == 3:
self.mirroring_graphics.text_left(x-math.sin(a)*r-fh/6.0, y+r-2*fh/3.0, label)
def circular_object_labelpos(self, x, y, radius=-1.0, label_length=0.0):
fh = self.graphics.gi_default_font_size
r = radius if radius > 0 else self.drawingwidth/40.0
arg = 1.0-2*fh/(3.0*r)
if (arg < 1.0) and (arg > -1.0):
a = math.acos(arg)
else:
a = 0.5*np.pi
label_pos_list = []
xs = x+math.sin(a)*r+fh/6.0
ys = y-r+fh/3.0
label_pos_list.append([[xs, ys], [xs+label_length/2.0, ys], [xs+label_length, ys]])
xs = x-math.sin(a)*r-fh/6.0 - label_length
label_pos_list.append([[xs, ys], [xs+label_length/2.0, ys], [xs+label_length, ys]])
xs = x+math.sin(a)*r+fh/6.0
ys = y+r-fh/3.0
label_pos_list.append([[xs, ys], [xs+label_length/2.0, ys], [xs+label_length, ys]])
xs = x+math.sin(a)*r+fh/6.0
ys = y+r-fh/3.0
label_pos_list.append([[xs, ys], [xs+label_length/2.0, ys], [xs+label_length, ys]])
return label_pos_list
def globular_cluster(self, x, y, radius, label, label_mag, label_ext, labelpos):
r = radius if radius > 0 else self.drawingwidth/40.0
self.graphics.set_linewidth(self.config.dso_linewidth)
self.graphics.set_solid_line()
self.graphics.set_pen_rgb(self.config.star_cluster_color)
self.mirroring_graphics.circle(x, y, r)
self.mirroring_graphics.line(x-r, y, x+r, y)
self.mirroring_graphics.line(x, y-r, x, y+r)
if label_ext:
label_fh = self.config.ext_label_font_scale * self.graphics.gi_default_font_size
self.graphics.set_font(self.graphics.gi_font, label_fh)
else:
label_fh = self.graphics.gi_default_font_size
self.graphics.set_font(self.graphics.gi_font, label_fh, self.config.dso_label_font_style)
self.draw_circular_object_label(x, y, r, label, labelpos, label_fh)
if label_ext:
self.draw_circular_object_label(x, y, r, label_ext, self.to_ext_labelpos(labelpos), label_fh)
if not label_ext and label_mag:
self.graphics.set_font(self.graphics.gi_font, label_fh*0.8, self.config.dso_label_font_style)
self.draw_circular_object_label(x, y-0.9*label_fh, r, label_mag, labelpos, label_fh)
def diffuse_nebula(self, x, y, width, height, posangle, label, label_mag, label_ext, labelpos):
self.graphics.set_linewidth(self.config.nebula_linewidth)
self.graphics.set_solid_line()
self.graphics.set_pen_rgb(self.config.nebula_color)
d = 0.5*width
if width < 0.0:
d = self.drawingwidth/40.0
d1 = d+self.graphics.gi_linewidth/2.0
self.mirroring_graphics.line(x-d1, y+d, x+d1, y+d)
self.mirroring_graphics.line(x+d, y+d, x+d, y-d)
self.mirroring_graphics.line(x+d1, y-d, x-d1, y-d)
self.mirroring_graphics.line(x-d, y-d, x-d, y+d)
if label_ext:
label_fh = self.config.ext_label_font_scale * self.graphics.gi_default_font_size
self.graphics.set_font(self.graphics.gi_font, label_fh)
else:
label_fh = self.graphics.gi_default_font_size
self.graphics.set_font(self.graphics.gi_font, self.graphics.gi_default_font_size, self.config.dso_label_font_style)
self.graphics.set_pen_rgb(self.config.label_color)
if label:
self.draw_diffuse_nebula_label(x, y, label, labelpos, d, label_fh)
if label_ext:
self.draw_diffuse_nebula_label(x, y, label_ext, self.to_ext_labelpos(labelpos), d, label_fh)
if not label_ext and label_mag:
self.graphics.set_font(self.graphics.gi_font, label_fh*0.8, self.config.dso_label_font_style)
self.draw_diffuse_nebula_label(x, y-0.9*label_fh, label_mag, labelpos, d, label_fh)
def draw_diffuse_nebula_label(self, x, y, label, labelpos, d, fh):
if labelpos == 0 or labelpos == -1:
self.mirroring_graphics.text_centred(x, y-d-fh/2.0, label)
elif labelpos == 1:
self.mirroring_graphics.text_centred(x, y+d+fh/2.0, label)
elif labelpos == 2:
self.mirroring_graphics.text_left(x-d-fh/6.0, y-fh/3.0, label)
elif labelpos == 3:
self.mirroring_graphics.text_right(x+d+fh/6.0, y-fh/3.0, label)
def diffuse_nebula_outlines(self, x, y, x_outl, y_outl, outl_lev, width, height, posangle, label, label_ext,
draw_label, labelpos=''):
self.graphics.set_linewidth(self.config.nebula_linewidth)
self.graphics.set_solid_line()
if self.config.light_mode:
frac = 4 - 1.5 * outl_lev # no logic, look nice in light mode
pen_r = 1.0 - ((1.0 - self.config.nebula_color[0]) / frac)
pen_g = 1.0 - ((1.0 - self.config.nebula_color[1]) / frac)
pen_b = 1.0 - ((1.0 - self.config.nebula_color[2]) / frac)
else:
frac = 4 - 1.5 * outl_lev # no logic, look nice in dark mode
pen_r = self.config.nebula_color[0] / frac
pen_g = self.config.nebula_color[1] / frac
pen_b = self.config.nebula_color[2] / frac
self.graphics.set_pen_rgb((pen_r, pen_g, pen_b))
d = 0.5*width
if width < 0.0:
d = self.drawingwidth/40.0
for i in range(len(x_outl)-1):
self.mirroring_graphics.line(x_outl[i].item(), y_outl[i].item(), x_outl[i+1].item(), y_outl[i+1].item())
self.mirroring_graphics.line(x_outl[len(x_outl)-1].item(), y_outl[len(x_outl)-1].item(), x_outl[0].item(), y_outl[0].item())
if draw_label:
if label_ext:
label_fh = self.config.ext_label_font_scale * self.graphics.gi_default_font_size
else:
label_fh = self.graphics.gi_default_font_size * self.config.outlined_dso_label_font_scale
self.graphics.set_font(self.graphics.gi_font, label_fh)
self.graphics.set_pen_rgb(self.config.label_color)
if label:
self.draw_diffuse_nebula_label(x, y, label, labelpos, d, label_fh)
if label_ext:
self.draw_diffuse_nebula_label(x, y, label_ext, self.to_ext_labelpos(labelpos), d, label_fh)
def unknown_diffuse_nebula_outlines(self, x_outl, y_outl, outl_lev):
self.graphics.set_linewidth(self.config.nebula_linewidth)
self.graphics.set_solid_line()
if self.config.light_mode:
frac = 4 - 1.5 * outl_lev # no logic, look nice in light mode
pen_r = 1.0 - ((1.0 - self.config.nebula_color[0]) / frac)
pen_g = 1.0 - ((1.0 - self.config.nebula_color[1]) / frac)
pen_b = 1.0 - ((1.0 - self.config.nebula_color[2]) / frac)
else:
frac = 4 - 1.5 * outl_lev # no logic, look nice in dark mode
pen_r = self.config.nebula_color[0] / frac
pen_g = self.config.nebula_color[1] / frac
pen_b = self.config.nebula_color[2] / frac
self.graphics.set_pen_rgb((pen_r, pen_g, pen_b))
for i in range(len(x_outl)-1):
self.mirroring_graphics.line(x_outl[i].item(), y_outl[i].item(), x_outl[i+1].item(), y_outl[i+1].item())
self.mirroring_graphics.line(x_outl[len(x_outl)-1].item(), y_outl[len(x_outl)-1].item(), x_outl[0].item(), y_outl[0].item())
def diffuse_nebula_labelpos(self, x, y, width=-1.0, height=-1.0, posangle=0.0, label_length=0.0):
d = 0.5*width
if width < 0.0:
d = self.drawingwidth/40.0
fh = self.graphics.gi_default_font_size
label_pos_list = []
xs = x - label_length/2.0
ys = y-d-fh/2.0
label_pos_list.append([[xs, ys], [xs+label_length/2.0, ys], [xs+label_length, ys]])
ys = y+d+fh/2.0
label_pos_list.append([[xs, ys], [xs+label_length/2.0, ys], [xs+label_length, ys]])
xs = x - d - fh/6.0 - label_length
ys = y
label_pos_list.append([[xs, ys], [xs+label_length/2.0, ys], [xs+label_length, ys]])
xs = x + d + fh/6.0
ys = y
label_pos_list.append([[xs, ys], [xs+label_length/2.0, ys], [xs+label_length, ys]])
return label_pos_list
def planetary_nebula(self, x, y, radius, label, label_mag, label_ext, labelpos):
r = radius if radius > 0 else self.drawingwidth/40.0
self.graphics.set_linewidth(self.config.dso_linewidth)
self.graphics.set_solid_line()
self.graphics.set_pen_rgb(self.config.nebula_color)
self.mirroring_graphics.circle(x, y, 0.75*r)
self.mirroring_graphics.line(x-0.75*r, y, x-1.5*r, y)
self.mirroring_graphics.line(x+0.75*r, y, x+1.5*r, y)
self.mirroring_graphics.line(x, y+0.75*r, x, y+1.5*r)
self.mirroring_graphics.line(x, y-0.75*r, x, y-1.5*r)
if label_ext:
label_fh = self.config.ext_label_font_scale * self.graphics.gi_default_font_size
self.graphics.set_font(self.graphics.gi_font, label_fh)
else:
label_fh = self.graphics.gi_default_font_size
self.graphics.set_font(self.graphics.gi_font, label_fh, self.config.dso_label_font_style)
self.draw_circular_object_label(x, y, r, label, labelpos, label_fh)
if label_ext:
self.draw_circular_object_label(x, y, r, label_ext, self.to_ext_labelpos(labelpos), label_fh)
if not label_ext and label_mag:
self.graphics.set_font(self.graphics.gi_font, label_fh*0.8, self.config.dso_label_font_style)
self.draw_circular_object_label(x, y-0.9*label_fh, r, label_mag, labelpos, label_fh)
def supernova_remnant(self, x, y, radius, label, label_ext, labelpos):
r = radius if radius > 0 else self.drawingwidth/40.0
self.graphics.set_linewidth(self.config.dso_linewidth)
self.graphics.set_solid_line()
self.graphics.set_pen_rgb(self.config.nebula_color)
self.mirroring_graphics.circle(x, y, r-self.graphics.gi_linewidth/2.0)
if label_ext:
label_fh = self.config.ext_label_font_scale * self.graphics.gi_default_font_size
self.graphics.set_font(self.graphics.gi_font, label_fh)
else:
label_fh = self.graphics.gi_default_font_size
self.graphics.set_font(self.graphics.gi_font, self.graphics.gi_default_font_size, self.config.dso_label_font_style)
self.draw_circular_object_label(x, y, r, label, labelpos, label_fh)
if label_ext:
self.draw_circular_object_label(x, y, r, label_ext, self.to_ext_labelpos(labelpos), label_fh)
def unknown_object(self, x, y, radius, label, label_ext, labelpos):
r = radius
if radius <= 0.0:
r = self.drawingwidth/40.0
r /= 2**0.5
self.graphics.set_linewidth(self.config.dso_linewidth)
self.graphics.set_solid_line()
self.graphics.set_pen_rgb(self.config.dso_color)
self.mirroring_graphics.line(x-r, y+r, x+r, y-r)
self.mirroring_graphics.line(x+r, y+r, x-r, y-r)
fh = self.graphics.gi_default_font_size
if label != '':
self.graphics.set_pen_rgb(self.config.label_color)
if labelpos == 0:
self.mirroring_graphics.text_right(x+r+fh/6.0, y-fh/3.0, label)
elif labelpos ==1:
self.mirroring_graphics.text_left(x-r-fh/6.0, y-fh/3.0, label)
elif labelpos == 2:
self.mirroring_graphics.text_centred(x, y + r + fh/2.0, label)
else:
self.mirroring_graphics.text_centred(x, y - r - fh/2.0, label)
def unknown_object_labelpos(self, x, y, radius=-1, label_length=0.0):
r = radius
if radius <= 0.0:
r = self.drawingwidth/40.0
fh = self.graphics.gi_default_font_size
r /= 2**0.5
label_pos_list = []
xs = x + r + fh/6.0
ys = y
label_pos_list.append([[xs, ys], [xs+label_length/2.0, ys], [xs+label_length, ys]])
xs = x - r - fh/6.0 - label_length
ys = y
label_pos_list.append([[xs, ys], [xs+label_length/2.0, ys], [xs+label_length, ys]])
xs = x - label_length/2.0
ys = y + r + fh/2.0
label_pos_list.append([[xs, ys], [xs+label_length/2.0, ys], [xs+label_length, ys]])
xs = x - label_length/2.0
ys = y - r - fh/2.0
label_pos_list.append([[xs, ys], [xs+label_length/2.0, ys], [xs+label_length, ys]])
return label_pos_list
def align_rect_coords(self, x1, y1, x2, y2):
if x1 > x2:
x1, x2 = x2, x1
if y1 > y2:
y1, y2 = y2, y1
return x1, y1, x2, y2
| skybber/fchart3 | fchart3/skymap_engine.py | skymap_engine.py | py | 80,302 | python | en | code | 9 | github-code | 36 |
38384503953 | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import logging
import datetime
logname = "Trying"+datetime.date.today().strftime("%d-%m-%Y")+".log"
logging.basicConfig(filename=logname,
filemode='a',
format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',
datefmt='%H:%M:%S',
level=logging.INFO)
chrome_driver_path = "C:/Support/chromedriver.exe"
logging.info(chrome_driver_path)
driver = webdriver.Chrome(chrome_driver_path)
driver.get("http://www.python.org")
search_tab = driver.find_element_by_xpath("//input[(@id='id-search-field') and (@name='q')]")
print("id of bsearch tab is: " + search_tab.get_attribute('id'))
logging.info("id of bsearch tab is: " + search_tab.get_attribute('id'))
search_tab.clear()
search_tab.send_keys("python 2.7")
go_btn = driver.find_element_by_xpath("//button[(@type='submit') and (contains(text(),'GO'))]")
go_btn.click()
#ques = driver.find_element_by_xpath("//a[@value='Questions')]")
#print("question tab got" + ques)
#assert "Python" in driver.title
#elem = driver.find_element_by_name("q")
#elem.clear()
#elem.send_keys("pycon")
#elem.send_keys(Keys.RETURN)
#assert "No results found." not in driver.page_source
driver.close()
#driver.quit() | amynav/Testing_repo | trying.py | trying.py | py | 1,339 | python | en | code | 0 | github-code | 36 |
40600848751 | from django.core.exceptions import ObjectDoesNotExist
from django.shortcuts import render
from watchlist.models import Movie
from django.http import JsonResponse
# Create your views here.
def movie_list(request):
movies = Movie.objects.all()
data = {
'movies': list( movies.values() )
}
return JsonResponse( data )
def movie_details(request, movie_id):
data = dict()
try:
movie = Movie.objects.get( pk=movie_id )
data['name'] = movie.name
data['description'] = movie.description
data['active'] = movie.active
except ObjectDoesNotExist:
data['error'] = "Invalid Movie id"
return JsonResponse(data)
| shubham2637/DRF | watchmate/watchlist/views.py | views.py | py | 684 | python | en | code | 0 | github-code | 36 |
22783587958 | #
# @lc app=leetcode id=970 lang=python3
#
# [970] Powerful Integers
#
# https://leetcode.com/problems/powerful-integers/description/
#
# algorithms
# Easy (39.91%)
# Likes: 99
# Dislikes: 44
# Total Accepted: 35.7K
# Total Submissions: 84.4K
# Testcase Example: '2\n3\n10'
#
# Given three integers x, y, and bound, return a list of all the powerful
# integers that have a value less than or equal to bound.
#
# An integer is powerful if it can be represented as x^i + y^j for some
# integers i >= 0 and j >= 0.
#
# You may return the answer in any order. In your answer, each value should
# occur at most once.
#
#
# Example 1:
#
#
# Input: x = 2, y = 3, bound = 10
# Output: [2,3,4,5,7,9,10]
# Explanation:
# 2 = 2^0 + 3^0
# 3 = 2^1 + 3^0
# 4 = 2^0 + 3^1
# 5 = 2^1 + 3^1
# 7 = 2^2 + 3^1
# 9 = 2^3 + 3^0
# 10 = 2^0 + 3^2
#
#
# Example 2:
#
#
# Input: x = 3, y = 5, bound = 15
# Output: [2,4,6,8,10,14]
#
#
#
# Constraints:
#
#
# 1 <= x, y <= 100
# 0 <= bound <= 10^6
#
#
#
# @lc code=start
class Solution:
def powerfulIntegers(self, x: int, y: int, bound: int) -> List[int]:
if bound < 2:
return []
res = []
i, j = 0, 0
if x != 1:
while x ** i <= bound:
i += 1
else:
i = 1
max_i = i
if y != 1:
while y ** j <= bound:
j += 1
else:
j = 1
max_j = j
seen = set([])
for i in range(max_i):
for j in range(max_j):
total = x ** i + y ** j
if total <= bound and total not in seen:
res.append(total)
seen.add(total)
return res
# @lc code=end
| Zhenye-Na/leetcode | python/970.powerful-integers.py | 970.powerful-integers.py | py | 1,739 | python | en | code | 17 | github-code | 36 |
23278209087 | def get_letter_guess():
while True:
try:
guess = input("Enter your letter guess: ")
if not guess.isalpha() or len(guess) != 1:
raise ValueError
except ValueError:
if not guess.isalpha():
print("Invalid input. Numbers and symbols are not allowed.")
elif len(guess) != 1:
print("Invalid input. Please enter only one letter.")
else:
return guess.lower()
# Test the function with a variety of inputs
print(get_letter_guess())
| fong-a/software_engineering | error_handling.py | error_handling.py | py | 559 | python | en | code | 0 | github-code | 36 |
5940101912 | # -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
import pickle
import configparser
import copy
import subprocess
from distutils.util import strtobool
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
from torch.cuda.amp import autocast, GradScaler
# from AutoEncoder import AE,DataIO,FlowDataset,SlidingSampler,FSI
from AutoEncoder import AE
from AutoEncoder import DataIO as dio
from AutoEncoder import FlowDataset as fds
from AutoEncoder import SlidingSampler as ss
from ForceAutoEncoder import FAE
from ForceAutoEncoder import DataIO as dio_force
from ForceAutoEncoder import ForceDataset as forcds
from ForceAutoEncoder import SlidingSampler as ss_force
from ConvxOpt import ConvxOpt, FSI
"""Set our seed and other configurations for reproducibility."""
seed = 10
torch.manual_seed(seed)
np.random.seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
""" Define GradScaler """
scaler = GradScaler() # point1: Scaling the gradient information
""" read config file """
setup = configparser.ConfigParser()
setup.read('input.ini')
epochs = int(setup['DeepLearning']['epochs'])
learning_rate = float(setup['DeepLearning']['learning_rate'])
optthresh = float(setup['DeepLearning']['optthresh'])
target_loss = float(setup['DeepLearning']['target_loss'])
batch_size = int(setup['DeepLearning']['batchsize'])
window_size = int(setup['DeepLearning']['batchsize'])
sliding = int(setup['DeepLearning']['sliding'])
fc_features = int(setup['DeepLearning']['full_connected'])
control = strtobool(setup['Control']['control'])
inptype = int(setup['Control']['inptype'])
ured = float(setup['MPC']['ured'])
R = float(setup['MPC']['R'])
"""We set the preference about the CFD"""
dt = float(setup['CFD']['dt'])
mach= float(setup['CFD']['mach'])
re = float(setup['CFD']['re'])
iz = int(setup['CFD']['iz'])
"""We set the start step, the last step, the intervals"""
nst = int(setup['MPC']['nst'])
nls = int(setup['MPC']['nls'])
nin = int(setup['CFD']['nin'])
""" Dataset """
gpaths = setup['CFD']['gpaths']
fpaths = setup['CFD']['fpaths']
fmpaths= setup['Control']['fmpaths']
""" Set Dynamics """
print('Set Dynamics...\n')
dataio = dio(nst,nls,nin,gpaths,fpaths,iz,fmpaths)
grids,ibottom = dataio.readgrid()
js,je,ks,ke,ls,le,ite1,ite2,jd,imove = ibottom
# cropped indices
jcuts = [0,je+1 ,1]
kcuts = [0,ke+1-2,1]
lcuts = [0,le+1-100,1]
# output cropped grid
dataio.tweak_writegrid(['grid_z0003'],grids,jcuts,kcuts,lcuts)
flows = dataio.readflow()
control_inp = None
if control: control_inp = dataio.readformom(inptype)
# Set Tensor form
transform = torchvision.transforms.Compose([
torchvision.transforms.ToTensor()
])
test_dataset = fds(2,jcuts,kcuts,lcuts,flows,control_inp,control,transform)
sampler = ss(test_dataset,batch_size,sliding)
test_loader = torch.utils.data.DataLoader(
test_dataset,
sampler = sampler
)
orgdatas = []
for batch,label,u in test_loader:
test = batch[0][0]
tmp = label
orgdatas.append(test)
maxstep = int( torch.max(tmp).item() )
print('Set Forces...')
dioforce = dio_force(nst,nls,nin,gpaths,fpaths,iz,fmpaths)
forces = dioforce.readformom(0) # 0 : Only CL
transform_force = torchvision.transforms.Compose([
torchvision.transforms.ToTensor()
])
test_dataset_force = forcds(2,jcuts,kcuts,lcuts,forces,window_size,sliding,control_inp,control,transform_force)
sampler_force = ss_force(test_dataset_force,window_size,sliding)
test_loader_force = torch.utils.data.DataLoader(
test_dataset_force,
sampler = sampler_force
)
print('Start MPC')
# use gpu if available
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
""" Load models """
model = torch.load("learned_model")
model_force = torch.load("learned_model_force")
reconstruction = []
step = nst
""" set instances """
convxopt = ConvxOpt(batch_size,inptype)
horizon = convxopt.T # horizontal window
fsi = FSI(jcuts,kcuts,lcuts,iz,dataio,mach,re,dt,inptype,ured,horizon)
with torch.no_grad():
# Initial state variables D_0 (X_0, Y_0)
features = next( iter(test_loader) ) # D_0
for icount in range(maxstep):
print('step = ', step)
step = step + nin*sliding
# # Set Fluid Force
# batch = features[0]
# batch = torch.squeeze(batch)
# batch = batch.to(torch.float32).to('cuda')
if control: u = torch.squeeze(features[2]).to(torch.float32).to('cuda')
# ## standalized input batches
# shift = torch.mean(batch,(0,2,3)).to(torch.float32)
# scale = torch.std(batch,(0,2,3)).to(torch.float32)
# for i in range(5):
# batch[:,i,:,:] = (batch[:,i,:,:] - shift[i])/(scale[i]+1.0e-11)
# ## compute reconstructions using autocast
# with autocast(False): # point 2 :automatic selection for precision of the model
# if control:
# inp = [batch,u]
# else:
# print('MPC needs control')
# exit()
# ### Extract gx in latent space and A, B matrices
# gx,A,B = model.encoder_forMPC(inp)
# cvec = gx[:,:horizon]
# ## prepare the objective function
# exit()
# ## unstandalized
# for i in range(5):
# X_tilde[:,i,:,:] = X_tilde[:,i,:,:] * (scale[i]+1.0e-11) + shift[i]
# Deep FSI
# forces = fsi.calc_force(X_tilde[:ind_half],u[:ind_half])
''' test '''
fluid_forces = next(iter(test_loader_force))[0].to(torch.float32).to('cuda')
struct_forces = fsi.structure_force(u,inptype,ured,mach)
struct_forces = torch.from_numpy(struct_forces)[None].to(torch.float32).to('cuda')
''''''''''''
## map forces into the latent space
### map fluid forces
batch = fluid_forces
with autocast(False): # point 2 :automatic selection for precision of the model
if control:
inp = [batch,u[0]]
else:
print('MPC needs control')
exit()
### Extract gx in latent space and A, B matrices
gx,Af,Bf = model_force.encoder_forMPC(inp)
cvec_fluid = gx[:,:horizon]
### map structure forces
batch = struct_forces
with autocast(False): # point 2 :automatic selection for precision of the model
if control:
inp = [batch,u[0]]
else:
print('MPC needs control')
exit()
### Extract gx in latent space and A, B matrices
gx,_,_ = model_force.encoder_forMPC(inp)
cvec_struct = gx[:,:horizon]
# MPC
cforces = [fluid_forces,struct_forces]
u_optim = convxopt.solve_cvx(cforces,R,Af,Bf)
exit()
reconstruction.append(X_tilde[0].cpu())
# """ Calc recreated error """
recerrors = []
for i,X_tilde in enumerate(reconstruction):
recdata = X_tilde.cpu().numpy()
orgdata = orgdatas[i].cpu().numpy()
# data shape = (batch * channels * height * width)
# error_norm = np.linalg.norm(recdata-orgdata,axis=1,ord=1)
# org_norm = np.linalg.norm(orgdata,axis=1,ord=1)
error_norm = np.linalg.norm(recdata-orgdata,axis=0,ord=1)
org_norm = np.linalg.norm(orgdata,axis=0,ord=1)
recerror = error_norm/(org_norm)
recerrors.append(recerror)
f = open('recerrors.pickle', 'wb')
pickle.dump(recerrors, f)
"""## Visualize Results
Let's try to reconstruct some test images using our trained autoencoder.
"""
print('Post')
with torch.no_grad():
nstepall = np.arange(nst,nls+nin,nin*sliding)
# write grid
out_gfiles = [
'./grid_z0003'
]
dataio.writegrid(out_gfiles,grids,jcuts,kcuts,lcuts)
# write flow
statedic = []
for i,rec in enumerate(reconstruction):
batch = rec.cpu().numpy()
nstep = nstepall[i]
fname = 'recflows/u3.0/recflow_z{:0=2}_{:0=8}'.format(iz,nstep)
q = copy.deepcopy( batch )
dataio.writeflow(fname,q,jcuts,kcuts,lcuts)
| MDIFS/DeepKoopmanDynamicalFSI | mpc.py | mpc.py | py | 8,203 | python | en | code | 0 | github-code | 36 |
3859314216 | import cv2
import numpy as np
from matplotlib import pyplot as plt
img = cv2.imread('../Images/abc.png')
kernel = np.ones((9, 9), np.float32) / 10
print(kernel)
dst = cv2.filter2D(img, -1, kernel)
cv2.imshow("imange",dst)
cv2.waitKey(0)
cv2.destroyAllWindows() | trunghopro098/Image-Processing | ProcessImg/Filter2D.py | Filter2D.py | py | 263 | python | en | code | 1 | github-code | 36 |
17956410019 | import pygame as pg
from random import random as r
from neurobiba import Weights, load_weights, save_weights
import copy
import itertools
W, H, size = 100, 60, 10
pg.init()
screen = pg.display.set_mode((W*size, H*size), 0, 32)
pg.display.set_caption('CYBERBIBA')
def update():
nn = Weights([27,3])
canvas1 = [[[r(),r(),r()] for y in range(H)] for x in range(W)]
canvas2 = copy.deepcopy(canvas1)
return nn, canvas1, canvas2
nn, canvas1, canvas2 = update()
is_running = True
while is_running:
for event in pg.event.get():
if event.type == pg.QUIT:
is_running = False
if event.type == pg.KEYDOWN:
if event.key == pg.K_r:
nn, canvas1, canvas2 = update()
if event.key == pg.K_s:
save_weights(nn, "weights")
if event.key == pg.K_l:
nn, canvas1, canvas2 = update()
nn = load_weights("weights")
for x, i in enumerate(canvas1):
for y, _ in enumerate(i):
neighbors = [canvas1[(x+dx-1)%W][(y+dy-1)%H] for dy in range(3) for dx in range(3)]
neighbors = list(itertools.chain(*neighbors))
result = nn.feed_forward(neighbors)
canvas2[x][y] = result
color = tuple(map(lambda x: int(x*255),result))
screen.fill(color, (x*size, y*size, size, size))
canvas1, canvas2 = canvas2, canvas1
pg.display.flip()
| displaceman/neurobiba | examples/neural cellular automata/neuroautomata.py | neuroautomata.py | py | 1,435 | python | en | code | 4 | github-code | 36 |
35396799058 | from __future__ import (nested_scopes, generators, division, absolute_import, with_statement,
print_function, unicode_literals)
from contextlib import contextmanager
import tempfile
from pex.pex_builder import PEXBuilder
from twitter.common.collections import OrderedSet
from pants.backend.core.tasks.task import Task
from pants.backend.python.interpreter_cache import PythonInterpreterCache
from pants.base.exceptions import TaskError
class PythonTask(Task):
@classmethod
def register_options(cls, register):
super(PythonTask, cls).register_options(register)
register('--timeout', type=int, default=0,
help='Number of seconds to wait for http connections.')
def __init__(self, *args, **kwargs):
super(PythonTask, self).__init__(*args, **kwargs)
self.conn_timeout = (self.get_options().timeout or
self.context.config.getdefault('connection_timeout'))
self._compatibilities = self.get_options().interpreter or [b'']
self._interpreter_cache = None
self._interpreter = None
@property
def interpreter_cache(self):
if self._interpreter_cache is None:
self._interpreter_cache = PythonInterpreterCache(self.context.config,
logger=self.context.log.debug)
# Cache setup's requirement fetching can hang if run concurrently by another pants proc.
self.context.acquire_lock()
try:
# We pass in filters=compatibilities because setting up some python versions
# (e.g., 3<=python<3.3) crashes, and this gives us an escape hatch.
self._interpreter_cache.setup(filters=self._compatibilities)
finally:
self.context.release_lock()
return self._interpreter_cache
@property
def interpreter(self):
"""Subclasses can use this if they're fine with the default interpreter (the usual case)."""
if self._interpreter is None:
self._interpreter = self.select_interpreter(self._compatibilities)
return self._interpreter
def select_interpreter_for_targets(self, targets):
"""Pick an interpreter compatible with all the specified targets."""
allowed_interpreters = OrderedSet(self.interpreter_cache.interpreters)
targets_with_compatibilities = [] # Used only for error messages.
# Constrain allowed_interpreters based on each target's compatibility requirements.
for target in targets:
if target.is_python and hasattr(target, 'compatibility') and target.compatibility:
targets_with_compatibilities.append(target)
compatible_with_target = list(self.interpreter_cache.matches(target.compatibility))
allowed_interpreters &= compatible_with_target
if not allowed_interpreters:
# Create a helpful error message.
unique_compatibilities = set(tuple(t.compatibility) for t in targets_with_compatibilities)
unique_compatibilities_strs = [','.join(x) for x in unique_compatibilities if x]
targets_with_compatibilities_strs = [str(t) for t in targets_with_compatibilities]
raise TaskError('Unable to detect a suitable interpreter for compatibilities: %s '
'(Conflicting targets: %s)' % (' && '.join(unique_compatibilities_strs),
', '.join(targets_with_compatibilities_strs)))
# Return the lowest compatible interpreter.
return self.interpreter_cache.select_interpreter(allowed_interpreters)[0]
def select_interpreter(self, filters):
"""Subclasses can use this to be more specific about interpreter selection."""
interpreters = self.interpreter_cache.select_interpreter(
list(self.interpreter_cache.matches(filters)))
if len(interpreters) != 1:
raise TaskError('Unable to detect a suitable interpreter.')
interpreter = interpreters[0]
self.context.log.debug('Selected %s' % interpreter)
return interpreter
@contextmanager
def temporary_pex_builder(self, interpreter=None, pex_info=None, parent_dir=None):
"""Yields a PEXBuilder and cleans up its chroot when it goes out of context."""
path = tempfile.mkdtemp(dir=parent_dir)
builder = PEXBuilder(path=path, interpreter=interpreter, pex_info=pex_info)
yield builder
builder.chroot().delete()
| fakeNetflix/square-repo-pants | src/python/pants/backend/python/tasks/python_task.py | python_task.py | py | 4,295 | python | en | code | 0 | github-code | 36 |
31803868279 | # /usr/bin/python3.6
# -*- coding:utf-8 -*-
import heapq
class Solution(object):
def openLock(self, deadends, target):
"""
:type deadends: List[str]
:type target: str
:rtype: int
"""
heap = []
heapq.heappush(heap, (0, "0000"))
visited = set(deadends)
m_plus = {}
m_minus = {}
for i in range(10):
m_minus[str(i)] = str((i-1+10)%10)
m_plus[str(i)] = str((i+1)%10)
while heap:
step, cur = heapq.heappop(heap)
if cur in visited:
continue
visited.add(cur)
if cur == target:
return step
# 个位变化
for i in range(4):
buff = cur[:i] + m_minus[cur[i]] + cur[i+1:]
if buff not in visited:
heapq.heappush(heap, [step+1, buff])
buff = cur[:i] + m_plus[cur[i]] + cur[i+1:]
if buff not in visited:
heapq.heappush(heap, [step+1, buff])
return -1
def main():
s = Solution()
print(s.openLock(deadends = ["0201","0101","0102","1212","2002"], target = "0202"))
print(s.openLock( deadends = ["8887","8889","8878","8898","8788","8988","7888","9888"], target = "8888"))
if __name__ == "__main__":
main()
| bobcaoge/my-code | python/leetcode/752_Open_the_Lock.py | 752_Open_the_Lock.py | py | 1,351 | python | en | code | 0 | github-code | 36 |
31550946314 | """ Faça um programa que leia o sexo de uma pessoa, mas só aceite os valores 'M' ou 'F'.
Caso esteja errado, peça a digitação novamente até ter um valor correto. """
sexo = str(input('Sexo [M/F]: ')).upper().strip()
while sexo not in 'MF':
print('Opção invalida, tente novamente!\n')
sexo = str(input('Sexo [M/F]: ')).upper().strip()
print(f'Sexo {sexo} registrado com sucesso!\n')
#OU
"""
if sexo == 'M' or sexo == 'F':
print('Opção VALIDA! Siga.')
while sexo != 'M' and sexo != 'F':
print('Opção invalida, tente novamente!\n')
sexo = str(input('Sexo [M/F]: ')).upper()
if sexo == 'M' or sexo == 'F':
print('Opção VALIDA! Siga.') """ | ClebersonGarcia05/curso-python | Mundo python 02/exercicíos/WHILE/ex057.py | ex057.py | py | 686 | python | pt | code | 0 | github-code | 36 |
73881050665 | import six
from sse.exceptions import SseException
__all__ = ['MethodNotAllowed', 'NotFound']
class StreamProtocolException(SseException):
"""
To avoid confusing with class naming or maybe I need some brain surgery
"""
def __init__(self, **kwargs):
for key, value in six.iteritems(kwargs):
if not hasattr(self, key):
raise TypeError("%s() received an invalid keyword %r."
"only accepts arguments that are already "
"attributes of the exception class." % (self.__class__.__name__, key))
setattr(self, key, value)
class MethodNotAllowed(StreamProtocolException):
status = 405
def __init__(self, methods, **kwargs):
self.headers = (
('Allow', '/'.join(m.upper() for m in methods)),
)
super().__init__(**kwargs)
class NotFound(StreamProtocolException):
status = 404
| Axik/instamute.io | apps/stream/exceptions.py | exceptions.py | py | 974 | python | en | code | 0 | github-code | 36 |
19052049620 | import numpy as np
from abc import ABC, abstractmethod
class Agent(ABC):
"""
Base agent class.
Represents the concept of an autonomous agent.
Attributes
----------
name: str
Name for identification purposes.
observation: np.ndarray
The most recent observation of the environment
Methods
-------
see(observation)
Collects an observation
action(): int
Abstract method.
Returns an action, represented by an integer
May take into account the observation (numpy.ndarray).
References
----------
..[1] Michael Wooldridge "An Introduction to MultiAgent Systems - Second
Edition", John Wiley & Sons, p 44.
"""
def __init__(self, name: str, n_agents: int, agent_id):
"""
Initialize the agent
"""
self.agent_id = agent_id
self.name = name
self.n_agents = n_agents
self.coords = None
self.turn_dir = None
self.diagonal_square = None
self.observation = None
self.vector = None
self.front_square = None
self.turning_square = None
self.turned = False
def reset(self):
"""
Reset variables
"""
self.coords = None
self.turn_dir = None
self.diagonal_square = None
self.observation = None
self.vector = None
self.front_square = None
self.turning_square = None
self.turned = False
def split_observations(self):
"""
Split the full observation into the 9 squares of vision
"""
dx = len(self.observation)//9
return [self.observation[i*dx : i*dx + dx] for i in range(9)]
def split_square_observation(self, obs):
"""
Split the square observation into usable values
"""
_id = obs[ : self.n_agents]
coords = obs[self.n_agents : self.n_agents + 2]
dir = obs[self.n_agents + 2 :]
_id = np.argmax(_id) + 1 if np.any(_id) else 0
return _id, list(coords), np.argmax(dir)
def see(self, observation: np.ndarray):
"""
Get the observation
"""
self.observation = observation
def calculate_direction(self, coords):
"""
Calculate the direction that the agent is moving when it starts
"""
if coords[0] == 13: self.vector = (-1, 0) #bottom
if coords[0] == 0: self.vector = (1, 0) #top
if coords[1] == 0: self.vector = (0, 1) #left
if coords[1] == 13: self.vector = (0, -1) #right
def update_direction(self, turning_dir):
"""
After reaching the turning square, update the direction vector
"""
self.turned = True
if turning_dir == 0: return
if turning_dir == 1: # Dir
mult = 1 if self.vector[0] == 0 else -1
self.vector = (self.vector[1], mult * self.vector[0])
else: # Esq
mult = 1 if self.vector[1] == 0 else -1
self.vector = (mult * self.vector[1], self.vector[0])
def calculate_turning_square(self, coords, turning_dir):
"""
Calculate the square where the car will make the turn
"""
cx, cy = coords
steps = 7 if turning_dir == 2 else 6
self.turning_square = [cx + steps * self.vector[0], cy + steps * self.vector[1]]
def calculate_diagonal_square(self):
"""
Calculate the observation square in the front-left square of the car
"""
if self.vector == (-1, 0): self.diagonal_square = 0
if self.vector == (1, 0): self.diagonal_square = 8
if self.vector == (0, 1): self.diagonal_square = 2
if self.vector == (0, -1): self.diagonal_square = 6
def calculate_forward_square(self):
"""
Calculate the observation square in front of the car
"""
if self.vector == (-1, 0): self.front_square = 1
if self.vector == (1, 0): self.front_square = 7
if self.vector == (0, 1): self.front_square = 5
if self.vector == (0, -1): self.front_square = 3
def update_information(self):
obs = self.split_observations()
_, self.coords, self.turn_dir = self.split_square_observation(obs[4])
# If agent is in the game
if not self.vector and self.coords != [0, 0]:
self.calculate_direction(self.coords)
self.calculate_forward_square()
self.calculate_turning_square(self.coords, self.turn_dir)
# If agent reached the turning square
if self.coords == self.turning_square and not self.turned:
self.update_direction(self.turn_dir)
self.calculate_forward_square()
self.calculate_diagonal_square()
# print(self.name, self.agent_id+1, self.coords, self.turn_dir, self.vector, self.front_square, self.diagonal_square)
def calculate_next_coords(self, intention):
if intention or not self.vector: # == 1
return self.coords
else:
return (self.coords[0] + self.vector[0], self.coords[1] + self.vector[1])
@abstractmethod
def action(self, intentions) -> int:
raise NotImplementedError()
@abstractmethod
def intention(self, intentions) -> int:
raise NotImplementedError() | BeatrizVenceslau/MastersDegree-Projects | Autonomous Agents and Multi Agent Systems/Code/aasma/agent.py | agent.py | py | 5,564 | python | en | code | 0 | github-code | 36 |
21365635224 | import numpy as np
from dptb.utils.tools import j_must_have
from dptb.utils.make_kpoints import ase_kpath, abacus_kpath, vasp_kpath
from ase.io import read
import ase
import matplotlib.pyplot as plt
import matplotlib
import logging
log = logging.getLogger(__name__)
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
class bandcalc(object):
def __init__ (self, apiHrk, run_opt, jdata):
self.apiH = apiHrk
if isinstance(run_opt['structure'],str):
self.structase = read(run_opt['structure'])
elif isinstance(run_opt['structure'],ase.Atoms):
self.structase = run_opt['structure']
else:
raise ValueError('structure must be ase.Atoms or str')
self.band_plot_options = jdata
self.results_path = run_opt.get('results_path')
self.apiH.update_struct(self.structase)
self.ref_band = self.band_plot_options.get("ref_band", None)
self.use_gui = self.band_plot_options.get("use_gui", False)
def get_bands(self):
kline_type = self.band_plot_options['kline_type']
if kline_type == 'ase':
kpath = self.band_plot_options['kpath']
nkpoints = self.band_plot_options['nkpoints']
self.klist, self.xlist, self.high_sym_kpoints, self.labels = ase_kpath(structase=self.structase,
pathstr=kpath, total_nkpoints=nkpoints)
elif kline_type == 'abacus':
kpath = self.band_plot_options['kpath']
self.labels = self.band_plot_options['klabels']
self.klist, self.xlist, self.high_sym_kpoints = abacus_kpath(structase=self.structase, kpath=kpath)
elif kline_type == 'vasp':
kpath = self.band_plot_options['kpath']
high_sym_kpoints_dict = self.band_plot_options['high_sym_kpoints']
number_in_line = self.band_plot_options['number_in_line']
self.klist, self.xlist, self.high_sym_kpoints, self.labels = vasp_kpath(structase=self.structase,
pathstr=kpath, high_sym_kpoints_dict=high_sym_kpoints_dict, number_in_line=number_in_line)
else:
log.error('Error, now, kline_type only support ase_kpath, abacus, or vasp.')
raise ValueError
all_bonds, hamil_blocks, overlap_blocks = self.apiH.get_HR()
self.eigenvalues, self.estimated_E_fermi = self.apiH.get_eigenvalues(self.klist)
if self.band_plot_options.get('E_fermi',None) != None:
self.E_fermi = self.band_plot_options['E_fermi']
log.info(f'set E_fermi from jdata: {self.E_fermi}, While the estimated value in line-mode is {self.estimated_E_fermi}')
else:
self.E_fermi = 0.0
log.info(f'set E_fermi = 0.0, While the estimated value in line-mode is {self.estimated_E_fermi}')
eigenstatus = {'klist': self.klist,
'xlist': self.xlist,
'high_sym_kpoints': self.high_sym_kpoints,
'labels': self.labels,
'eigenvalues': self.eigenvalues,
'E_fermi': self.E_fermi }
np.save(f'{self.results_path}/bandstructure',eigenstatus)
return eigenstatus
def get_HR(self):
all_bonds, hamil_blocks, overlap_blocks = self.apiH.get_HR()
return all_bonds, hamil_blocks, overlap_blocks
def band_plot(self):
matplotlib.rcParams['font.size'] = 7
matplotlib.rcParams['pdf.fonttype'] = 42
# plt.rcParams['font.sans-serif'] = ['Times New Roman']
emin = self.band_plot_options.get('emin')
emax = self.band_plot_options.get('emax')
fig = plt.figure(figsize=(4.5,4),dpi=100)
ax = fig.add_subplot(111)
band_color = '#5d5d5d'
# plot the line
if self.ref_band:
ref_eigenvalues = np.load(self.ref_band)
if len(ref_eigenvalues.shape) == 3:
ref_eigenvalues = ref_eigenvalues.reshape(ref_eigenvalues.shape[1:])
elif len(ref_eigenvalues.shape) != 2:
log.error("Reference Eigenvalues' shape mismatch.")
raise ValueError
if ref_eigenvalues.shape[0] != self.eigenvalues.shape[0]:
log.error("Reference Eigenvalues' should have sampled from the sample kpath as model's prediction.")
raise ValueError
ref_eigenvalues = ref_eigenvalues - (np.min(ref_eigenvalues) - np.min(self.eigenvalues))
nkplot = (len(np.unique(self.high_sym_kpoints))-1) * 7
nintp = len(self.xlist) // nkplot
if nintp == 0:
nintp = 1
band_ref = ax.plot(self.xlist[::nintp], ref_eigenvalues[::nintp] - self.E_fermi, 'o', ms=4, color=band_color, alpha=0.8, label="Ref")
band_pre = ax.plot(self.xlist, self.eigenvalues - self.E_fermi, color="tab:red", lw=1.5, alpha=0.8, label="DeePTB")
else:
ax.plot(self.xlist, self.eigenvalues - self.E_fermi, color="tab:red",lw=1.5, alpha=0.8)
# add verticle line
for ii in self.high_sym_kpoints[1:-1]:
ax.axvline(ii, color='gray', lw=1,ls='--')
# add shadow
# for i in range(self.eigenvalues.shape[1]):
# ax.fill_between(self.xlist, self.eigenvalues[:,i] - self.E_fermi, -2, alpha=0.05, color=band_color)
# add ticks
if not (emin is None or emax is None):
ax.set_ylim(emin,emax)
ax.set_xlim(self.xlist.min()-0.03,self.xlist.max()+0.03)
ax.set_ylabel('E - EF (eV)',fontsize=12)
ax.yaxis.set_minor_locator(MultipleLocator(1.0))
ax.tick_params(which='both', direction='in', labelsize=12, width=1.5)
ax.tick_params(which='major', length=6)
ax.tick_params(which='minor', length=4, color='gray')
# ax.set_yticks(None, fontsize=12)
ax.set_xticks(self.high_sym_kpoints, self.labels, fontsize=12)
ax.grid(color='gray', alpha=0.2, linestyle='-', linewidth=1)
ax.set_axisbelow(True)
fig.patch.set_facecolor('#f2f2f2')
fig.patch.set_alpha(1)
for spine in ax.spines.values():
spine.set_edgecolor('#5d5d5d')
spine.set_linewidth(1.5)
if self.ref_band:
plt.legend(handles=[band_pre[0], band_ref[0]], loc="best")
plt.tight_layout()
# remove the box around the plot
ax.set_frame_on(False)
plt.savefig(f'{self.results_path}/band.png',dpi=300)
if self.use_gui:
plt.show()
| deepmodeling/DeePTB | dptb/postprocess/bandstructure/band.py | band.py | py | 6,701 | python | en | code | 21 | github-code | 36 |
26500786049 | import os
import cv2
import json
import random
import itertools
import numpy as np
import argparse
import cv2
from time import gmtime, strftime
def predict(image, predictor, list_labels):
outputs = predictor(image)
boxes = outputs['instances'].pred_boxes
scores = outputs['instances'].scores
classes = outputs['instances'].pred_classes
list_boxes = []
# list_paths = []
# list_vehicles = []
list_scores = []
list_classes = []
for i in range(len(classes)):
if (scores[i] > 0.6):
for j in boxes[i]:
x1 = int(j[0])
y1 = int(j[1])
x2 = int(j[2])
y2 = int(j[3])
print("min: ", (x1, y1))
print("max: ", (x2, y2))
score = float(scores[i])
class_id = list_labels[int(classes[i])]
list_boxes.append([x1, y1, x2, y2])
list_scores.append(score)
list_classes.append(class_id)
return list_boxes, list_scores, list_classes
| tiendv/MCOCR2021 | Task2/submit_task2/detect_receipt_api/src/predict.py | predict.py | py | 1,032 | python | en | code | 9 | github-code | 36 |
25523783356 | import os
from flask import Flask, make_response, request, redirect, url_for, send_from_directory
from werkzeug.utils import secure_filename
from database import app
UPLOAD_FOLDER = './uploads'
ALLOWED_EXTENSIONS = { 'png', 'jpg', 'mp3' } #to change for music files
app.config['UPLOAD_FOLDER'] = os.path.join(os.getcwd(), "file_transfer/uploads")
print(os.getcwd())
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/file/<filename>', methods = ['GET'])
def file_download(filename):
return send_from_directory(app.config['UPLOAD_FOLDER'], filename, as_attachment=True)
@app.route('/file', methods = ['POST'])
def file_upload():
if 'file' not in request.files:
return "ERROR : No file part", 404
file = request.files['file']
if file.filename == '':
return "ERROR : No selected file", 401
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
return "File Successfully Uploaded"
#request.files
@app.route('/file/<filename>', methods = ['DELETE'])
def delete_items(filename):
os.remove(os.path.join(app.config['UPLOAD_FOLDER'], filename))
return "File Successfully Deleted"
@app.errorhandler(401)
@app.errorhandler(404)
@app.errorhandler(500)
def ma_page_erreur(error):
return "Error {}".format(error.code), error.code
if __name__ == "__main__":
app.run()
| arkea-tech/YEP_EpiKodi3_2020 | server/file_transfer/views.py | views.py | py | 1,512 | python | en | code | 0 | github-code | 36 |
17736596508 | # -*- coding: utf-8 -*-
import rhinoscriptsyntax as rs
import scriptcontext as sc
import Rhino
import time
import gc
# この中の変数は処理終了後に消えない/////////////////////////////////////////////////
if 'count_loop' not in sc.sticky:
sc.sticky['count_loop'] = 0# 何個目の壁なのか示す数字
if 'dict_distance' not in sc.sticky:
sc.sticky['dict_distance'] = {}# 下記
if 'dict_combi' not in sc.sticky:
sc.sticky['dict_combi'] = {}# 壁に関する情報を格納する辞書
if 'count_error' not in sc.sticky:
sc.sticky['count_error'] = 0# ×の曲線が違う時に数字が増える
if 'list_usedindex' not in sc.sticky:
sc.sticky['list_usedindex'] = []# 使用済みのインデックス
if 'list_history' not in sc.sticky:
sc.sticky['list_history'] = []# 操作の記録
# 初回だけ距離計算をする/////////////////////////////////////////////////
# 辞書(キー:曲線のインデックス 値:距離の近いものを順番に並べたリスト)を生成する
def Findclosestpt(crvs):
dict_dist = {}
list_midpt = [rs.CurveMidPoint(crv) for crv in crvs]
count = 0
while count != len(crvs):
list_distance = [int(rs.Distance(list_midpt[count], i)) for i in list_midpt]
dict_dist[count] = sorted(range(len(list_distance)), key=lambda k: list_distance[k])
count += 1
return dict_dist
start = time.time()
if sc.sticky['count_loop'] == 0:# ここで初回かどうか判断
print("Start Calculate")
sc.sticky['dict_distance'] = Findclosestpt(curve)
print("End Calculate")
else:
print("Skip Calculate")
end = time.time() - start
print("calculatetime = " + str(end))
# 2つのインデックスを求める/////////////////////////////////////////////////
for remain in range(len(curve)):
if remain in sc.sticky['list_usedindex']:
pass
else:
index_o = remain#2つのうちの片方のインデックス
break
list_loft_x = sc.sticky['dict_distance'].values()[index_o]
index_x = list_loft_x[1 + sc.sticky['count_error']]#もう片方のインデックス
# 辞書に登録する情報を作成/////////////////////////////////////////////////
class Info():
def __init__(self, crvs, ind_o, ind_x, num_loop, mode, thick, height):#コンストラクタ
self.crvs = crvs
self.ind_o = ind_o
self.ind_x = ind_x
self.num_loop = num_loop
self.mode = mode
self.thick = thick
self.height = height
def get_wall(self):# 壁のインデックスを返却
return self.num_loop
def get_o(self):# 曲線のインデックス1を返却
return self.ind_o
def get_x(self):# 曲線のインデックス2を返却
return self.ind_x
def get_thick(self):# 壁の厚みを返却
return self.thick
def get_height(self):# 壁の高さを返却
return self.height
def get_adjust(self):# 曲線の長さ調整の仕方を返却
if self.mode == "extend":# 長い方に揃える
if rs.CurveLength(self.crvs[self.ind_o]) >= rs.CurveLength(self.crvs[self.ind_x]):
pattern = 1
else:
pattern = 2
else:# 短い方に揃える
if rs.CurveLength(self.crvs[self.ind_o]) >= rs.CurveLength(self.crvs[self.ind_x]):
pattern = 3
else:
pattern = 4
return pattern
data = Info(curve, index_o, index_x, sc.sticky["count_loop"], mode, thickness, height)
sc.sticky["dict_combi"][data.get_wall()] = (data.get_o(), data.get_x(), data.get_adjust(), data.get_thick(), data.get_height(),)
# プレビューを作成/////////////////////////////////////////////////
def Previewwall(dict, index, crvs, zoommode):# 壁を作る
# get_adjustの中身で処理を決める
if dict[index][2] == 1:
A = crvs[dict[index][0]]
B = rs.OffsetCurve(crvs[dict[index][0]], rs.CurveMidPoint(crvs[dict[index][1]]), dict[index][3])
elif dict[index][2] == 2:
A = rs.OffsetCurve(crvs[dict[index][1]], rs.CurveMidPoint(crvs[dict[index][0]]), dict[index][3])
B = crvs[dict[index][1]]
elif dict[index][2] == 3:
A = rs.OffsetCurve(crvs[dict[index][1]], rs.CurveMidPoint(crvs[dict[index][0]]), dict[index][3])
B = crvs[dict[index][1]]
elif dict[index][2] == 4:
A = crvs[dict[index][0]]
B = rs.OffsetCurve(crvs[dict[index][0]], rs.CurveMidPoint(crvs[dict[index][1]]), dict[index][3])
obj_srf = rs.AddLoftSrf([A, B])
obj_height = rs.AddLine((0,0,0), (0,0,dict[index][4]))
obj_wall = rs.ExtrudeSurface(obj_srf, obj_height)
if zoommode:# 壁と曲線で箱を作り、箱をズーム
obj_zoom = rs.BoundingBox([obj_wall, crvs[dict[index][0]], crvs[dict[index][1]]])
rs.ZoomBoundingBox(obj_zoom)
return obj_wall
def Previewmark(crvs, dict, index):# 線の目印を作る
pt_o = rs.DivideCurve(crvs[dict[index][0]], 10)
pt_x = rs.DivideCurve(crvs[dict[index][1]], 10)
mark_o = [rs.AddCircle(cen, mark) for cen in pt_o]
mark_x1 = [rs.AddLine((cen[0] - mark,cen[1],cen[2]), (cen[0] + mark,cen[1],cen[2])) for cen in pt_x]
mark_x2 = [rs.AddLine((cen[0],cen[1] - mark,cen[2]), (cen[0],cen[1] + mark,cen[2])) for cen in pt_x]
a = mark_o
b = mark_x1 + mark_x2
return (a, b)
start = time.time()
if next or loft_o or loft_x or undo or bake == True:# ボタンを押した瞬間はプレビュー省略
print("preview pass")
else:
if past == False:# 最後の壁だけ表示
print("preview only one")
preview_wall = Previewwall(sc.sticky["dict_combi"], data.get_wall(), curve, zoom)
else:# 全部の壁を表示
print("preview all")
preview_wall = [Previewwall(sc.sticky["dict_combi"], i, curve, zoom) for i in sc.sticky["dict_combi"]]
preview_o = Previewmark(curve, sc.sticky["dict_combi"], data.get_wall())[0]
preview_x = Previewmark(curve, sc.sticky["dict_combi"], data.get_wall())[1]
end = time.time() - start
print("previewtime = " + str(end))
# ボタンが押されたときは以下の操作を行う/////////////////////////////////////////////////
if next:# 次の組み合わせを探す
print("Input Key = next")
if len(curve) - len(sc.sticky['list_usedindex']) <= 2:
bake = True
print("Start Bake")
else:
sc.sticky['count_loop'] += 1
sc.sticky['count_error'] = 0
if data.get_o() not in sc.sticky['list_usedindex']:# 使用済みインデックスが重複しないように
sc.sticky['list_usedindex'].append(data.get_o())
if data.get_x() not in sc.sticky['list_usedindex']:
sc.sticky['list_usedindex'].append(data.get_x())
sc.sticky['list_history'].append(2)
if loft_o:# ロフト元が違う場合
print("Input Key = loft_o")
if len(curve) - len(sc.sticky['list_usedindex']) <= 2:
bake = True
print("Start Bake")
else:
sc.sticky['list_usedindex'].append(index_o)
del sc.sticky['dict_combi'][sc.sticky['count_loop']]
sc.sticky['count_loop'] += 1
sc.sticky['count_error'] = 0
sc.sticky['list_history'].append(1)
if loft_x:# ロフト先が違う場合
print("Input Key = loft_x")
del sc.sticky['dict_combi'][sc.sticky['count_loop']]
if sc.sticky['count_error'] == len(list_loft_x) - 2:
sc.sticky['count_error'] = 0
elif sc.sticky['count_error'] == 14:# 15個目の候補まで表示
sc.sticky['count_error'] = 0
else:
sc.sticky['count_error'] += 1
def Undo():# アンドゥ
print("Input Key = Undo")
if sc.sticky['count_loop'] == 0:
sc.sticky['list_usedindex'] = []
sc.sticky['list_history'] = []
sc.sticky['count_error'] = 0
else:
if sc.sticky['list_history'][-1] == 2:
del sc.sticky['dict_combi'][sc.sticky['count_loop']]
del sc.sticky['dict_combi'][sc.sticky['count_loop'] - 1]
del sc.sticky['list_usedindex'][-1]
del sc.sticky['list_usedindex'][-1]
else:
del sc.sticky['list_usedindex'][-1]
del sc.sticky['list_history'][-1]
sc.sticky['count_error'] = 0
sc.sticky['count_loop'] = sc.sticky['count_loop'] - 1
if undo:
Undo()
def Reset():# リセットするときの挙動
print("Input Key = Undo")
sc.sticky['count_loop'] = 0
sc.sticky['count_error'] = 0
sc.sticky['dict_distance'] = {}
sc.sticky['dict_combi'] = {}
sc.sticky['preview'] = []
sc.sticky['list_usedindex'] = []
sc.sticky['list_history'] = []
gc.collect()# メモリ解放
if bake:# ベイクする
print("Input Key = bake")
obj_bake = [Previewwall(sc.sticky["dict_combi"], i, curve, zoom) for i in sc.sticky["dict_combi"]]
sc.doc = Rhino.RhinoDoc.ActiveDoc
doc_layer = rs.GetLayer("ベイク先のレイヤーを選択", None, True, True)# ベイクするレイヤーを選択
sc.doc = ghdoc
for wall in obj_bake:
doc_obj = rs.coercerhinoobject(wall)
doc_attributes = doc_obj.Attributes
doc_geometry = doc_obj.Geometry
sc.doc = Rhino.RhinoDoc.ActiveDoc
rhino_obj = sc.doc.Objects.Add(doc_geometry, doc_attributes)
rs.ObjectLayer(rhino_obj, doc_layer)
sc.doc = ghdoc
Reset()
O = data.ind_o
X = data.ind_x
count_error = "✕は" + str(sc.sticky['count_error'] + 1) + "番目の候補です"
len_index = len(sc.sticky['list_usedindex']) | fuku0211/wallconstructor | wallconstructor/wallconstructor.py | wallconstructor.py | py | 8,744 | python | en | code | 0 | github-code | 36 |
19982370070 | from defaults import * # noqa
SECRET_KEY = open(SECRET_KEY_FILE).read()
DEBUG = False
TEMPLATE_DEBUG = False
ALLOWED_HOSTS = ['.tutorons.com']
STATICFILES_DIRS += ((os.path.join(os.path.abspath(os.sep), 'var', 'www', 'tutorons')),)
STATIC_ROOT = os.path.join(os.path.abspath(os.sep), 'usr', 'local', 'tutorons', 'static')
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '127.0.0.1:11211',
# We store expensive computations that will be performed infrequently
# So, we'll just save the results for all computations indefinitely.
'TIMEOUT': None,
}
}
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s",
'datefmt': "%d/%b/%Y %H:%M:%S",
}
},
'handlers': {
'file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': '/var/log/tutorons.log',
'formatter': 'verbose',
}
},
'loggers': {
'django': {
'handlers': ['file'],
'propagate': True,
'level': 'DEBUG',
},
'tutorons': {
'handlers': ['file'],
'level': 'DEBUG',
},
}
}
| andrewhead/tutorons-server | tutorons/settings/production.py | production.py | py | 1,374 | python | en | code | 6 | github-code | 36 |
8439507443 | # Given a number sequence, find the length of its Longest Increasing Subsequence (LIS).
# In an increasing subsequence, all the elements are in increasing order (from lowest to highest).
# Input: {4,2,3,6,10,1,12}
# Output: 5
# Explanation: The LIS is {2,3,6,10,12}.
def find_LIS(arr):
return find_LIS_rec(arr, 0, -1)
def find_LIS_rec(arr, current_index, prev_index):
if current_index == len(arr):
return 0
c1 = 0
if prev_index == -1 or arr[current_index] > arr[prev_index]:
c1 = 1 + find_LIS_rec(arr, current_index+1, current_index)
c2 = find_LIS_rec(arr, current_index+1, prev_index)
return max(c1, c2)
def find_LIS_length(arr):
n1 = len(arr)
dp = [1 for _ in range(n1)]
max_length = 0
for i in range(1, n1):
for j in range(i):
if arr[i] > arr[j] and dp[i] < dp[j]+1:
dp[i] = dp[j] + 1
max_length = max(max_length, dp[i])
return max_length
def longest_increasing_subsequence(nums):
dp = [1 for _ in range(len(nums))]
max_length = 1
for i in range(1, len(nums)):
for j in range(i):
if nums[i] > nums[j]:
dp[i] = max(dp[i], 1+dp[j])
max_length = max(max_length, dp[i])
return max_length
def main():
print(find_LIS_length([4, 2, 3, 6, 10, 1, 12]))
print(find_LIS_length([-4, 10, 3, 7, 15]))
print(find_LIS([4, 2, 3, 6, 10, 1, 12]))
print(find_LIS([-4, 10, 3, 7, 15]))
print("********")
print(longest_increasing_subsequence([4, 2, 3, 6, 10, 1, 12]))
print(longest_increasing_subsequence([-4, 10, 3, 7, 15]))
print(longest_increasing_subsequence([4, 2, 3, 6, 10, 1, 12]))
print(longest_increasing_subsequence([-4, 10, 3, 7, 15]))
if __name__ == "__main__":
main()
| kashyapa/coding-problems | educative.io/medium-dp/longest-common-subsequence/4_longest_increasing_subsequence.py | 4_longest_increasing_subsequence.py | py | 1,793 | python | en | code | 0 | github-code | 36 |
14575225186 | def filter(lines, val0, val1):
for i in range(len(lines[0])):
count = [0, 0]
for line in lines:
count[int(line[i])] += 1
if count[0] > count[1]:
lines = [line for line in lines if line[i] == val0]
else:
lines = [line for line in lines if line[i] == val1]
if len(lines) == 1:
return lines[0]
from sys import stdin
lines = [line.strip() for line in stdin]
oxygen = filter(lines[:], '0', '1')
co2 = filter(lines[:], '1', '0')
print(int(oxygen, base=2) * int(co2, base=2))
| vfolunin/archives-solutions | Advent of Code/2021/3.2.py | 3.2.py | py | 568 | python | en | code | 0 | github-code | 36 |
34743814847 | from typing import List
file = "input02.txt"
with open(file, "rt") as f:
ids = f.readlines()
# Part 1
doubles = 0
triples = 0
for id in ids:
double_found = False
triple_found = False
for c in id:
reps = id.count(c)
if reps == 2 and not double_found:
doubles += 1
double_found = True
if reps == 3 and not triple_found:
triples += 1
triple_found = True
if double_found and triple_found:
break
print(f"Number of doubles: {doubles}")
print(f"Number of triples: {triples}")
print(f"Checksum: {doubles*triples}")
# Part 2
def check(x: str, y: str) -> bool:
"""
Whether two strings differ at most at one position
"""
if not x:
return not y
x0, x_tail = x[0], x[1:]
y0, y_tail = y[0], y[1:]
return (x_tail == y_tail) or (x0 == y0 and check(x_tail, y_tail))
def find_ids(ids: List[str]) -> str:
for i, x in enumerate(ids[:-1]):
for y in ids[i + 1:]:
found = check(x, y)
if found:
print(f"The found ids: {x}, {y}")
common_sub = ""
for i, c in enumerate(x):
if y[i] == c:
common_sub += c
return(common_sub)
raise ValueError("No correct ids were found")
res = find_ids(ids)
print(f"The common substring: {res}")
| acanizares/advent_of_code | day02.py | day02.py | py | 1,405 | python | en | code | 0 | github-code | 36 |
12859158519 | import torch
import numpy as np
import matplotlib.pyplot as plt
print("PyTorch Version:", torch.__version__)
if torch.backends.mps.is_available():
mps_device = torch.device("mps")
print(mps_device)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
X, Y = np.mgrid[-4.0:4:0.01, -4.0:4:0.01]
x = torch.Tensor(X)
y = torch.Tensor(Y)
x = x.to(device)
y = y.to(device)
z = torch.sin(3*x + y) * torch.exp(-(x**2+y**2)/2.0)
plt.imshow(z.numpy())
plt.tight_layout()
plt.show()
| rwardd/comp3710 | prac1/gaussian.py | gaussian.py | py | 509 | python | en | code | 0 | github-code | 36 |
21374477352 | from django import template
register = template.Library()
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
@register.filter
def first_letter_word(value):
stop_words = set(stopwords.words('english'))
word_tokens = word_tokenize(value)
filtered_sentence = [w for w in word_tokens if not w.lower() in stop_words]
removetable = str.maketrans('', '', '!@#$%^&*()_+-=[]{};:\"\',./<>?\|')
final = [s.translate(removetable) for s in filtered_sentence]
final = [s for s in final if s != '']
chindex=1
arr = []
for letter in final:
if chindex==1:
arr.append(letter[0].upper())
else:
arr.append(letter)
out = "".join(arr)
return out | 4akhilkumar/akira_project | akira_apps/staff/templatetags/first_letter_word.py | first_letter_word.py | py | 744 | python | en | code | 0 | github-code | 36 |
12760265976 | import numpy as np
import matplotlib.pyplot as plt
sp_LL=np.loadtxt('spline_LL.txt')
sp_LH=np.loadtxt('spline_LH.txt')
sp_HT=np.loadtxt('spline_HT.txt')
sp_R=np.loadtxt('spline_R.txt')
##
AA=np.loadtxt('HT_p_T_c')
BB=np.loadtxt('LH_p_T_c')
CC=np.loadtxt('R_p_T_c')
DD=np.loadtxt('LL_p_T_c')
EE=np.loadtxt('TP_T-x-Perr-p-u-v.txt')
###%%% Top %%%
#Top = np.zeros((10,2))
#Top[:,0] = np.linspace(0.0088,0.1706,num=10);
#Top[:,1] = 68569;
u_HT_A = AA[:,0]
v_HT_A = AA[:,1]
#p_HT_A = AA[:,2]
T_HT_A = AA[:,3]
t_HT_A = AA[:,3]
u_LH_B = BB[:,0]
v_LH_B = BB[:,1]
#p_LH_B = BB[:,2]
T_LH_B = BB[:,3];
u_R_C = CC[:,0];
v_R_C = CC[:,1];
#p_R_C = CC(:,3);
T_R_C = CC[:,3];
u_LL_D = DD[:,0];
v_LL_D = DD[:,1];
#p_LL_D = DD(:,3);
T_LL_D = DD[:,3];
u_TP_E = EE[:,4]
v_TP_E = EE[:,5]
#p_TP_E = EE[:,3]
T_TP_E = EE[:,0]
###########################isobar single phase 8,5,2,1######################################
u_HT_isoT400 = []
v_HT_isoT400 = []
u_HT_isoT350 = []
v_HT_isoT350 = []
u_HT_isoT300 = []
v_HT_isoT300 = []
u_HT_isoT230 = []
v_HT_isoT230 = []
u_HT_isoT250 = []
v_HT_isoT250 = []
#
u_LH_isoT400 = []
v_LH_isoT400 = []
u_LH_isoT350 = []
v_LH_isoT350 = []
u_LH_isoT300 = []
v_LH_isoT300 = []
#
u_TP_isoT2166 = []
v_TP_isoT2166 = []
u_TP_isoT230 = []
v_TP_isoT230 = []
u_TP_isoT250 = []
v_TP_isoT250 = []
u_TP_isoT300 = []
v_TP_isoT300 = []
#
u_R_isoT230 = []
v_R_isoT230 = []
#
u_LL_isoT350 = []
v_LL_isoT350 = []
u_LL_isoT300 = []
v_LL_isoT300 = []
u_LL_isoT250 = []
v_LL_isoT250 = []
u_LL_isoT230 = []
v_LL_isoT230 = []
#
for i in range(len(u_HT_A)):
if T_HT_A[i] <=6.03e2 and T_HT_A[i]>=5.99e2:
u_HT_isoT400.append(u_HT_A[i]/1000)
v_HT_isoT400.append(v_HT_A[i])
if T_HT_A[i] <=3.51e2 and T_HT_A[i]>=3.48e2:
u_HT_isoT350.append(u_HT_A[i]/1000)
v_HT_isoT350.append(v_HT_A[i])
if T_HT_A[i] <=3.01e2 and T_HT_A[i]>=2.95e2:
u_HT_isoT300.append(u_HT_A[i]/1000)
v_HT_isoT300.append(v_HT_A[i])
if T_HT_A[i] <=2.31e2 and T_HT_A[i]>=2.21e2:
u_HT_isoT230.append(u_HT_A[i]/1000)
v_HT_isoT230.append(v_HT_A[i])
if T_HT_A[i] <=2.51e2 and T_HT_A[i]>=2.45e2:
u_HT_isoT250.append(u_HT_A[i]/1000)
v_HT_isoT250.append(v_HT_A[i])
#
for i in range(len(u_LH_B)):
# if T_LH_B[i] <=4.01e2 and T_LH_B[i]>=3.99e2:
# u_LH_isoT400.append(u_LH_B[i]/1000)
# v_LH_isoT400.append(v_LH_B[i])
if T_LH_B[i] <=3.51e2 and T_LH_B[i]>=3.497e2:
u_LH_isoT350.append(u_LH_B[i]/1000)
v_LH_isoT350.append(v_LH_B[i])
if T_LH_B[i] <=3.01e2 and T_LH_B[i]>=2.999e2:
u_LH_isoT300.append(u_LH_B[i]/1000)
v_LH_isoT300.append(v_LH_B[i])
#############################isobar two-phase####################################
for i in range(len(u_TP_E)):
if T_TP_E[i] <=2.167e2 and T_TP_E[i]>=2.164e2:
# print(T_TP_E[i])
u_TP_isoT2166.append(u_TP_E[i]/1000)
v_TP_isoT2166.append(v_TP_E[i])
if T_TP_E[i] <=2.3001e2 and T_TP_E[i]>=2.298e2:
u_TP_isoT230.append(u_TP_E[i]/1000)
v_TP_isoT230.append(v_TP_E[i])
if T_TP_E[i] <=2.5001e2 and T_TP_E[i]>=2.499e2:
u_TP_isoT250.append(u_TP_E[i]/1000)
v_TP_isoT250.append(v_TP_E[i])
if T_TP_E[i] <=3.001e2 and T_TP_E[i]>=2.999e2:
u_TP_isoT300.append(u_TP_E[i]/1000)
v_TP_isoT300.append(v_TP_E[i])
#################################R#########################################
for i in range(len(u_R_C)):
if T_R_C[i] <=2.301e2 and T_R_C[i]>=2.299e2:
u_R_isoT230.append(u_R_C[i]/1000)
v_R_isoT230.append(v_R_C[i])
#########################################################################
for i in range(len(u_LL_D)):
if T_LL_D[i] <=2.301e2 and T_LL_D[i]>=2.299e2:
u_LL_isoT230.append(u_LL_D[i]/1000)
v_LL_isoT230.append(v_LL_D[i])
if T_LL_D[i] <=2.5001e2 and T_LL_D[i]>=2.499e2:
u_LL_isoT250.append(u_LL_D[i]/1000)
v_LL_isoT250.append(v_LL_D[i])
if T_LL_D[i] <=3.001e2 and T_LL_D[i]>=2.999e2:
u_LL_isoT300.append(u_LL_D[i]/1000)
v_LL_isoT300.append(v_LL_D[i])
if T_LL_D[i] <=3.51e2 and T_LL_D[i]>=3.497e2:
u_LL_isoT350.append(u_LL_D[i]/1000)
v_LL_isoT350.append(v_LL_D[i])
########################################################################
plt.figure(figsize=(9,4))
#
plt.semilogx(sp_R[:,0],sp_R[:,2]/1e3,color='r',linewidth=2)
plt.semilogx(sp_R[:,1],sp_R[:,2]/1e3,color='k',linewidth=2)
plt.semilogx(sp_HT[:,0],sp_HT[:,2]/1e3,linestyle='--',color='k',linewidth=2)
plt.semilogx(sp_HT[:,1],sp_HT[:,2]/1e3,linestyle='--',color='k',linewidth=2)
plt.semilogx(sp_LH[:,0],sp_LH[:,2]/1e3,color='r',linewidth=2)
plt.semilogx(sp_LH[:,1],sp_LH[:,2]/1e3,linestyle='--',color='k',linewidth=2)
plt.semilogx(sp_LL[:,0],sp_LL[:,2]/1e3,linestyle='-',color='b',linewidth=2)
plt.semilogx(sp_LL[:,1],sp_LL[:,2]/1e3,linestyle='--',color='k',linewidth=2)
###############################################################################
plt.semilogx(v_HT_isoT400,u_HT_isoT400,color='k',linewidth=2,linestyle=':')
plt.semilogx(v_HT_isoT350,u_HT_isoT350,color='k',linewidth=2,linestyle=':')
plt.semilogx(v_HT_isoT300,u_HT_isoT300,color='k',linewidth=2,linestyle=':')
plt.semilogx(v_HT_isoT250,u_HT_isoT250,color='k',linewidth=2,linestyle=':')
plt.semilogx(v_HT_isoT230,u_HT_isoT230,color='k',linewidth=2,linestyle=':')
#
plt.semilogx(v_LH_isoT400,u_LH_isoT400,color='k',linewidth=2,linestyle=':')
plt.semilogx(v_LH_isoT350,u_LH_isoT350,color='k',linewidth=2,linestyle=':')
plt.semilogx(v_LH_isoT300,u_LH_isoT300,color='k',linewidth=2,linestyle=':')
#
plt.semilogx(v_R_isoT230,u_R_isoT230,color='k',linewidth=2,linestyle=':')
#
plt.semilogx(v_LL_isoT350,u_LL_isoT350,color='k',linewidth=2,linestyle=':')
plt.semilogx(v_LL_isoT300,u_LL_isoT300,color='k',linewidth=2,linestyle=':')
plt.semilogx(v_LL_isoT250,u_LL_isoT250,color='k',linewidth=2,linestyle=':')
plt.semilogx(v_LL_isoT230,u_LL_isoT230,color='k',linewidth=2,linestyle=':')
#########################TWO-PHASE########################################
plt.semilogx(v_TP_isoT2166,u_TP_isoT2166,color='k',linewidth=2,linestyle=':')
plt.semilogx(v_TP_isoT230,u_TP_isoT230,color='k',linewidth=2,linestyle=':')
plt.semilogx(v_TP_isoT250,u_TP_isoT250,color='k',linewidth=2,linestyle=':')
plt.semilogx(v_TP_isoT300,u_TP_isoT300,color='k',linewidth=2,linestyle=':')
#####control des labels
ax = plt.gca()
#ax.set_xlim([0.015, 1e-1])
ax.set_ylim([-4.5e2, 3e2])
#plt.title('Isothermal curves',fontsize=17)
plt.ylabel('Internal Energy, e $[kJkg^{-1}]$',fontsize=15,position=(0,0.5),rotation = "vertical")
plt.xlabel('Specific Volume, v $[m^{-3}kg]$',fontsize=15,rotation = "horizontal")
plt.xticks(size = 12)
plt.yticks(size = 12)
plt.grid(True)
#plt.axis([-4,4,-0.3,1.5])
#plt.xlabel('X', color='C1')
#plt.ylabel('X', color='0.5') # grayscale color
#ax.xaxis.set_label_coords(0.5,-0.05)
#ax.yaxis.set_label_coords(-0.08,0.5)
#####################################
ax.text(2.0e-2,200, '$600K$', fontsize=10)
ax.text(2.1e-2,-25, '$350K$', fontsize=10)
ax.text(2.8e-3,-200, '$300K$', fontsize=10)
ax.text(0.9e-2,-210, '$250K$', fontsize=10)
ax.text(2.0e-2,-210, '$230K$', fontsize=10)
ax.text(3.3e-2,-210, '$216.6K$', fontsize=10)
#############################################################################
plt.tight_layout()
plt.savefig("isotherm.pdf")
plt.show()
| yufang67/CO2-look-up-table | program/isotherm.py | isotherm.py | py | 7,331 | python | en | code | 3 | github-code | 36 |
4108034467 | from sys import stdin
input = stdin.readline
intersections = int(input())
roads = {x: {} for x in range(1, intersections + 1)}
distance, adj, before = {}, {}, [0] * (intersections + 1)
queue = []
# print(roads)
num = int(input())
for _ in range(num):
m, n, d, s = [int(x) for x in input().split()]
time = float(d) / s * 60
if n not in roads[m]:
roads[m][n] = [time, d, s]
roads[n][m] = [time, d, s]
elif roads[m][n][0] > time:
roads[m][n] = [time, d, s]
roads[n][m] = [time, d, s]
# print(roads)
start = 1
for i in range(1, intersections + 1):
distance[i] = 9999999999
adj[i] = None
queue.append(i)
distance[1] = 0
while queue:
# find closest unvisited node
keyMin = queue[0]
minVal = distance[keyMin]
for i in queue[1:]:
if distance[i] < minVal:
keyMin = i
minVal = distance[i]
current = keyMin
queue.remove(current)
# print(current)
# get the closest paths connected to the current node
for i in roads[current]:
alt = roads[current][i][0] + distance[current]
if distance[i] > alt:
distance[i] = alt
adj[i] = current
before[i] = before[current] + 1
elif distance[i] == alt:
if before[current] + 1 < before[i]:
distance[i] = alt
adj[i] = current
before[i] = before[current] + 1
# print(distance)
# print(adj)
# print(before)
total = 0
x = intersections
while True:
y = adj[x]
total += float(roads[y][x][1]) / (roads[y][x][2] * 0.75) * 60
x = y
if x == 1:
break
print(before[intersections])
print(round(total - distance[intersections]))
| AAZZAZRON/DMOJ-Solutions | dmopc14ce1p4.py | dmopc14ce1p4.py | py | 1,714 | python | en | code | 1 | github-code | 36 |
25607474641 | class Solution:
def isMatch(self, s: str, p: str) -> bool:
cache={}
lenS,lenP=len(s),len(p)
def dfs(i,j):
if (i,j) in cache:
return cache[(i,j)]
if i>=lenS and j>=lenP:
return True
if j>=lenP:
return False
match=i<lenS and (s[i]==p[j] or p[j]=='.')
if (j+1)<lenP and p[j+1]=="*":
cache[(i,j)]=dfs(i,j+2) or (match and dfs(i+1,j))
elif match:
cache[(i,j)]=dfs(i+1,j+1)
else:
cache[(i,j)]=False
return cache[(i,j)]
return dfs(0,0)
| Nirmalkumarvs/programs | Backtracking/Regular Expression Matching.py | Regular Expression Matching.py | py | 729 | python | en | code | 0 | github-code | 36 |
228457289 | """
"""
import time
def print_execute_time(func):
def wrapper(*args, **kwargs):
start = time.time()
# 执行旧功能
result = func(*args, **kwargs)
stop = time.time()
print("执行时间:", stop - start)
return result
return wrapper
@print_execute_time
def sum_data(n):
sum_value = 0
for number in range(n):
sum_value += number
return sum_value
print(sum_data(10))
print(sum_data(1000000))
| testcg/python | code_all/day19/exercise04.py | exercise04.py | py | 477 | python | en | code | 0 | github-code | 36 |
22017042868 | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 15 11:48:04 2020
@author: Paradeisios
"""
from utils.computeCost import computeCost
import numpy as np
def gradientDescent(X,y,theta,alpha, iterations):
m = len(y)
J_history = np.zeros((iterations,1))
derivative = 0
for i in range(iterations):
derivative = (X.dot(theta) - y).dot(X)
theta = theta - alpha*(1/m)*derivative
J_history[i]= computeCost(theta,X,y)
return(theta,J_history)
| paradeisios/Coursera_Machine_Learning | week2/python/utils/gradientDescent.py | gradientDescent.py | py | 514 | python | en | code | 0 | github-code | 36 |
12552816029 | my_list = [1, 2, 3, 4, 5]
for i in my_list:
print(i)
else:
print('Hit the For\Else Statement')
print()
print('********************************')
print()
j = 1
while j <= 5:
print(j)
j += 1
if j == 3:
break
else:
print('Hi the While\Else Statement')
print()
print('********************************')
print()
def find_index(to_search, target):
for i, value in enumerate(to_search):
if value == target:
break
else:
return -1
return i
my_list_1 = ['Manju', 'Thilak', 'Suu', 'Sharu', 'Paavan']
index_value = find_index(my_list_1, 'Paavan')
print(f'Location of the target is at index "{index_value}"')
print()
print('************************************************')
print()
lists = ['ABC', 'BCD', 'CDE', 'EFG']
print(f'Index of "BCD" found at: {lists.index("BCD")}')
print()
def index_to_search(index_search, value_target):
for index, values in enumerate(index_search):
if values == value_target:
break
else:
return -1
return index
search = index_to_search(lists, 'BCD')
print(f'Location found at index: {search}')
| iampaavan/Pure_Python | Else Clauses on Loops.py | Else Clauses on Loops.py | py | 1,118 | python | en | code | 1 | github-code | 36 |
192760992 | from flask import Flask, render_template, url_for, request, redirect
from flask_sqlalchemy import SQLAlchemy
from datetime import datetime
import requests
import paralleldots
import stripe
app = Flask(__name__)
pub_key = ''
secret_key = ''
stripe.api_key = secret_key
class Analyze:
report_key = ''
sentiment_key = ''
def __init__(self, company):
self.company = company
self.url = 'https://datafied.api.edgar-online.com/v2/corefinancials/ann?primarysymbols=' + \
self.company + '&appkey=' + self.report_key
def get_fin_report(self):
URL = self.url
PARAMS = {}
r = requests.get(url=URL, params=PARAMS)
self.data = r.json()
return None
def sentiment_analysis(self):
paralleldots.set_api_key(self.sentiment_key)
self.response = paralleldots.sentiment(self.data, 'en')
return None
def predict(self):
self.get_fin_report()
self.sentiment_analysis()
goodornot = self.response['sentiment']
result = max(goodornot, key=goodornot.get)
return result
# Route for handling the login page logic
@app.route('/', methods=['GET', 'POST'])
def index():
error = None
if request.method == 'POST':
return redirect(url_for('reallyobscurefilename'))
return render_template('index.html', error=error, pub_key = pub_key)
@app.route('/reallyobscurefilename', methods=['POST', 'GET'])
def reallyobscurefilename():
result = ''
if request.method == 'POST':
company = request.form['company']
new_analysis = Analyze(company=company)
ans = new_analysis.predict()
if ans in ['positive', 'neutral', 'negative']:
result = 'Our expert professional guidance is that this is a ' + \
ans + ' investment.'
return render_template('reallyobscurefilename.html', result = result)
if __name__ == "__main__":
app.run(debug=True) | mikeyj777/siraj_midterm_take3 | app.py | app.py | py | 1,955 | python | en | code | 0 | github-code | 36 |
30591789155 | from __future__ import annotations
from grammar import Grammar, reduce_left_recursion, chomsky_normal_form
from dacite import from_dict
import json
def main():
filename = input("Input file name:") or "data.json"
print(f'using file {filename}')
with open(filename, "r") as f:
data = json.load(f)
g = from_dict(data_class=Grammar, data=data)
print("--------- Input grammar -----------")
g.print()
print("--------- Left Recursion Elimination -----------")
g = reduce_left_recursion(g)
g.print()
print("--------- Chomsky Normal Form -----------")
g = chomsky_normal_form(g)
g.print()
if __name__ == "__main__":
main()
| fairay/Compilers | lab2/main.py | main.py | py | 682 | python | en | code | 0 | github-code | 36 |
23080854641 | from consts import piConst, hwConst
from spa.serverside import CSocketProServer, CSocketProService,\
CClientPeer, BaseServiceID, Plugin
from spa.clientside import CAsyncQueue, CStreamingFile, CMysql, COdbc, CSqlite, CSqlServer, CPostgres
from spa.udb import DB_CONSTS
from pub_sub.ps_server.hwpeer import CHelloWorldPeer
from webdemo.myhttppeer import CMyHttpPeer
import sys
from ctypes import *
from sys import platform as os
# bool U_MODULE_OPENED WINAPI SetSPluginGlobalOptions(const char *jsonUtf8Options);
sqlite_lib = None
if os == "win32":
sqlite_lib = WinDLL("ssqlite.dll")
else:
sqlite_lib = WinDLL("libssqlite.so")
SetSPluginGlobalOptions = sqlite_lib.SetSPluginGlobalOptions
SetSPluginGlobalOptions.argtypes = [c_char_p]
SetSPluginGlobalOptions.restype = c_bool
# int U_MODULE_OPENED WINAPI DoSPluginAuthentication(SPA::UINT64 hSocket,
# const wchar_t *userId, const wchar_t *password, unsigned int nSvsId,
# const wchar_t *options);
SQLite_Auth = sqlite_lib.DoSPluginAuthentication
SQLite_Auth.argtypes = [c_uint64, c_wchar_p, c_wchar_p, c_uint, c_wchar_p]
SQLite_Auth.restype = c_int
mysql_lib = None
MySQL_Auth = None
if os == "win32":
mysql_lib = WinDLL("smysql.dll")
else:
mysql_lib = WinDLL("libsmysql.so")
if mysql_lib:
MySQL_Auth = mysql_lib.DoSPluginAuthentication
MySQL_Auth.argtypes = [c_uint64, c_wchar_p, c_wchar_p, c_uint, c_wchar_p]
MySQL_Auth.restype = c_int
odbc_lib = None
ODBC_Auth = None
if os == "win32":
odbc_lib = WinDLL("sodbc.dll")
else:
odbc_lib = WinDLL("libsodbc.so")
if odbc_lib:
ODBC_Auth = odbc_lib.DoSPluginAuthentication
ODBC_Auth.argtypes = [c_uint64, c_wchar_p, c_wchar_p, c_uint, c_wchar_p]
ODBC_Auth.restype = c_int
mssql_lib = None
if os == "win32":
mssql_lib = WinDLL("usqlsvr.dll")
else:
mssql_lib = WinDLL("libusqlsvr.so")
MsSql_Auth = mssql_lib.DoSPluginAuthentication
MsSql_Auth.argtypes = [c_uint64, c_wchar_p, c_wchar_p, c_uint, c_wchar_p]
MsSql_Auth.restype = c_int
postgres_lib = None
Postgres_Auth = None
if os == "win32":
postgres_lib = WinDLL("spostgres.dll")
else:
postgres_lib = WinDLL("libspostgres.so")
if postgres_lib:
Postgres_Auth = postgres_lib.DoSPluginAuthentication
Postgres_Auth.argtypes = [c_uint64, c_wchar_p, c_wchar_p, c_uint, c_wchar_p]
Postgres_Auth.restype = c_int
with CSocketProServer() as server:
def OnClose(hSocket, errCode):
bs = CSocketProService.SeekService(hSocket)
if bs:
sp = bs.Seek(hSocket)
# ......
server.OnClose = OnClose
def OnIsPermitted(hSocket, userId, pwd, svsId):
auth_res = Plugin.AUTHENTICATION_NOT_IMPLEMENTED
if svsId == hwConst.sidHelloWorld or svsId == BaseServiceID.sidHTTP or svsId == piConst.sidPi or svsId == piConst.sidPiWorker:
# give permission to known services without authentication
auth_res = Plugin.AUTHENTICATION_OK
elif svsId == CAsyncQueue.sidQueue or svsId == CStreamingFile.sidFile:
# give permission to known services without authentication
auth_res = Plugin.AUTHENTICATION_OK
elif svsId == CPostgres.sidPostgres:
auth_res = Postgres_Auth(hSocket, userId, pwd, svsId, 'server=localhost;timeout=45;max_SQLs_batched=16')
elif svsId == CSqlServer.sidMsSql:
auth_res = MsSql_Auth(hSocket, userId, pwd, svsId, 'server=localhost;timeout=45;max_SQLs_batched=16')
elif svsId == COdbc.sidOdbc:
auth_res = ODBC_Auth(hSocket, userId, pwd, svsId, 'DRIVER={ODBC Driver 13 for SQL Server};Server=windesk;database=sakila;max_sqls_batched=16')
elif svsId == CMysql.sidMysql:
auth_res = MySQL_Auth(hSocket, userId, pwd, svsId, 'server=windesk;max_sqls_batched=16')
elif svsId == CSqlite.sidSqlite:
auth_res = SQLite_Auth(hSocket, userId, pwd, svsId, 'usqlite.db')
if auth_res == Plugin.AUTHENTICATION_PROCESSED:
# give permission without authentication
auth_res = Plugin.AUTHENTICATION_OK
if auth_res >= Plugin.AUTHENTICATION_OK:
print(userId + "'s connecting permitted, and DB handle opened and cached")
elif auth_res == Plugin.AUTHENTICATION_PROCESSED:
print(userId + "'s connecting denied: no authentication implemented but DB handle opened and cached")
elif auth_res == Plugin.AUTHENTICATION_FAILED:
print(userId + "'s connecting denied: bad password or user id")
elif auth_res == Plugin.AUTHENTICATION_INTERNAL_ERROR:
print(userId + "'s connecting denied: plugin internal error")
elif auth_res == Plugin.AUTHENTICATION_NOT_IMPLEMENTED:
print(userId + "'s connecting denied: no authentication implemented")
else:
print(userId + "'s connecting denied: unknown reseaon with res --" + str(auth_res))
return auth_res >= Plugin.AUTHENTICATION_OK
server.OnIsPermitted = OnIsPermitted
def do_configuration():
CSocketProServer.PushManager.AddAChatGroup(1, "R&D Department")
CSocketProServer.PushManager.AddAChatGroup(2, "Sales Department")
CSocketProServer.PushManager.AddAChatGroup(3, "Management Department")
CSocketProServer.PushManager.AddAChatGroup(7, "HR Department")
CSocketProServer.PushManager.AddAChatGroup(DB_CONSTS.CACHE_UPDATE_CHAT_GROUP_ID, "Subscribe/publish for front clients")
return True # True -- ok; False -- no listening server
server.OnSettingServer = do_configuration
mapIdMethod = {
hwConst.idSayHello: 'sayHello',
hwConst.idSleep: ['sleep', True], # or ('sleep', True)
hwConst.idEcho: 'echo'
}
server.hw = CSocketProService(CHelloWorldPeer, hwConst.sidHelloWorld, mapIdMethod)
# HTTP/WebSocket service
server.HttpSvs = CSocketProService(CMyHttpPeer, BaseServiceID.sidHTTP, None)
mapIdReq = {}
server.Pi = CSocketProService(CClientPeer, piConst.sidPi, mapIdReq)
server.PiWorker = CSocketProService(CClientPeer, piConst.sidPiWorker, mapIdReq)
if not CSocketProServer.Router.SetRouting(piConst.sidPi, piConst.sidPiWorker):
print('Setting routing failed')
# load file streaming library at the directory ../bin/free_services/file
# 16 * 1024 dequeue batch size in bytes
server.aq = CSocketProServer.DllManager.AddALibrary('uasyncqueue', 16 * 1024)
# load async sqlite library located at the directory ../bin/free_services/sqlite
server.sqlite = CSocketProServer.DllManager.AddALibrary("ssqlite")
if server.sqlite:
# monitoring sakila.db table events (DELETE, INSERT and UPDATE) for
# tables actor, language, category, country and film_actor
jsonOptions = '{"global_connection_string":"usqlite.db","monitored_tables":\
"sakila.db.actor;sakila.db.language;sakila.db.category;sakila.db.country;sakila.db.film_actor"}'
SetSPluginGlobalOptions(jsonOptions.encode('utf-8'))
# load persistent message queue library at the directory ../bin/free_services/queue
server.file = CSocketProServer.DllManager.AddALibrary('ustreamfile')
# load MySQL/MariaDB server plugin library at the directory ../bin/free_services/mm_middle
server.mysql = CSocketProServer.DllManager.AddALibrary("smysql")
# load ODBC server plugin library at the directory ../bin/win or ../bin/linux
server.odbc = CSocketProServer.DllManager.AddALibrary("sodbc")
# load MS sql server plugin library at the directory ../bin/win or ../bin/linux
server.mssql = CSocketProServer.DllManager.AddALibrary("usqlsvr")
# load PostgreSQL plugin library at the directory ../bin/win/win64 or ../bin/linux
server.postgres = CSocketProServer.DllManager.AddALibrary("spostgres")
if not server.Run(20901):
print('Error message = ' + CSocketProServer.ErrorMessage)
print('Read a line to shutdown the application ......')
line = sys.stdin.readline()
| udaparts/socketpro | tutorials/python/all_servers/all_servers.py | all_servers.py | py | 7,957 | python | en | code | 27 | github-code | 36 |
3207021660 | """ Helper functions used by tests for the locate subpackage."""
from pdf2gtfs.datastructures.gtfs_output.handler import GTFSHandler
from pdf2gtfs.datastructures.gtfs_output.stop_times import Time
def add_stops_to_handler(handler: GTFSHandler, n: int = 5) -> None:
""" Add n unique stops to the given handler. """
for i in range(n):
handler.stops.add(f"stop_{i}")
def add_calendar_to_handler(handler: GTFSHandler) -> None:
""" Add a calendar, with Tuesday and Wednesday as active days. """
handler.calendar.add(["1", "2"], set())
def add_routes_to_handler(handler: GTFSHandler, n: int = 2) -> None:
""" Add n unique routes to the given handler. """
for i in range(n):
handler.routes.add(str(i), f"route_{i}")
def add_stop_times_to_handler(handler: GTFSHandler, time_to_next: Time = None
) -> None:
""" Add stop_times to the given handler.
Creates both trips and stop_times."""
stops = handler.stops.entries
routes = handler.routes.entries
service_id = handler.calendar.entries[0].service_id
for route_idx, route in enumerate(routes):
# Create trip for route.
trip = handler.trips.add(service_id, route.route_id)
current_time = Time(6 * (route_idx + 1))
route_stops = stops[route_idx:len(stops) - route_idx]
for stop_idx, stop in enumerate(route_stops):
handler.stop_times.add(trip.trip_id, stop.stop_id,
stop_idx, current_time)
if time_to_next is None:
current_time += Time(minutes=1 + stop_idx * 1)
def create_handler() -> GTFSHandler:
""" Create a dummy handler and adds some basic data to it. """
handler = GTFSHandler()
add_stops_to_handler(handler)
add_calendar_to_handler(handler)
add_routes_to_handler(handler)
add_stop_times_to_handler(handler)
return handler
| heijul/pdf2gtfs | test/test_locate/__init__.py | __init__.py | py | 1,918 | python | en | code | 1 | github-code | 36 |
39086540253 | import logging
import sys
def two_sum_sorting(numbers_set, range_start, range_end):
logger.info('Started function that uses sorting')
numbers_sorted = sorted(numbers_set)
logger.info('Done sorting')
start = 0
end = len(numbers_set) - 1
result = set()
logger.info('Entering main while loop')
while start < end:
tmp = numbers_sorted[start] + numbers_sorted[end]
if tmp > range_end:
end -= 1
elif tmp < range_start:
start += 1
else:
result.add(tmp)
for i in range(start + 1, end):
t = numbers_sorted[i] + numbers_sorted[end]
if check_in_range(t, range_start, range_end):
result.add(t)
else:
break
end -= 1
logger.info('Done looping') # approx 1.6s
return len(result)
def two_sum_hash(numbers_set, range_start, range_end):
logger.info('Started function that uses set lookup')
result = set()
for i in range(range_start, range_end):
for n in numbers_set:
if i - n in numbers_set and i - n != n:
result.add(i)
logger.info('Finished set lookup') # more than 60min
return len(result)
def check_in_range(number, range_start, range_end):
if range_start < number < range_end:
return True
else:
return False
if __name__ == '__main__':
# initialize logging to console
logger = logging.getLogger()
logger.setLevel(logging.INFO)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s.%(msecs)03d %(name)s %(levelname)s %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
# actual start
logger.info('Program started')
nums = set()
# load data
with open('algo1-programming_prob-2sum.txt') as data:
for line in data:
raw = line.strip()
if raw:
i = int(raw)
nums.add(i)
logger.info('Data loaded from file')
print(two_sum_sorting(nums, -10000, 10000))
# print(two_sum_hash(nums, -10000, 10000))
logger.info('Program end')
| nickslavsky/Algorithms-pt1 | Week 6/2sum.py | 2sum.py | py | 2,209 | python | en | code | 0 | github-code | 36 |
10741283794 | from __future__ import unicode_literals
import os
import unittest
import tempfile
import textwrap
import decimal
import shutil
import transaction as db_transaction
class TestAlembic(unittest.TestCase):
def setUp(self):
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from zope.sqlalchemy import ZopeTransactionExtension
from alembic.config import Config
self.temp_dir = tempfile.mkdtemp()
# init database
default_sqlite_url = 'sqlite:///{}/billy.sqlite'.format(self.temp_dir)
self.db_url = os.environ.get(
'BILLY_FUNC_TEST_DB',
default_sqlite_url,
)
# as these tests cannot work with in-memory sqlite, so, when it is
# a sqlite URL, we use the one in temp folder anyway
if self.db_url.startswith('sqlite:'):
self.db_url = default_sqlite_url
self.engine = create_engine(self.db_url, convert_unicode=True)
self.declarative_base = declarative_base()
self.declarative_base.metadata.bind = self.engine
self.session = scoped_session(sessionmaker(
autocommit=False,
autoflush=False,
bind=self.engine,
extension=ZopeTransactionExtension()
))
self.alembic_path = os.path.join(self.temp_dir, 'alembic.ini')
with open(self.alembic_path, 'wt') as f:
f.write(textwrap.dedent("""\
[alembic]
script_location = alembic
sqlalchemy.url = {}
[loggers]
keys = root
[handlers]
keys =
[formatters]
keys =
[logger_root]
level = WARN
qualname =
handlers =
""").format(self.db_url))
self.alembic_cfg = Config(self.alembic_path)
def tearDown(self):
# drop all tables
self.session.remove()
self.declarative_base.metadata.drop_all()
shutil.rmtree(self.temp_dir)
def test_use_integer_column_for_amount(self):
from sqlalchemy import Column
from sqlalchemy import Integer
from sqlalchemy import Numeric
class Plan(self.declarative_base):
__tablename__ = 'plan'
guid = Column(Integer, primary_key=True)
amount = Column(Numeric(10, 2))
class Subscription(self.declarative_base):
__tablename__ = 'subscription'
guid = Column(Integer, primary_key=True)
amount = Column(Numeric(10, 2))
class Transaction(self.declarative_base):
__tablename__ = 'transaction'
guid = Column(Integer, primary_key=True)
amount = Column(Numeric(10, 2))
self.declarative_base.metadata.create_all()
with db_transaction.manager:
for amount in ['12.34', '55.66', '10']:
amount = decimal.Decimal(amount)
plan = Plan(amount=amount)
subscription = Subscription(amount=amount)
transaction = Transaction(amount=amount)
self.session.add(plan)
self.session.add(subscription)
self.session.add(transaction)
from alembic import command
command.stamp(self.alembic_cfg, 'base')
command.upgrade(self.alembic_cfg, 'b3d4192b123')
# Notice: this with statement here makes sure the database transaction
# will be closed after querying, otherwise, we have two connections
# to postgresql (one by testing code, one by Alembic), when we are
# doing following downgrade, there is table alter, it appears
# there will be a deadlock when there is a overlap of two transaction
# scope
with db_transaction.manager:
for table in [Plan, Subscription, Transaction]:
amounts = self.session.query(table.amount).all()
amounts = map(lambda item: float(item[0]), amounts)
# make sure all float dollars are converted into integer cents
self.assertEqual(set(amounts), set([1234, 5566, 1000]))
command.downgrade(self.alembic_cfg, 'base')
with db_transaction.manager:
for table in [Plan, Subscription, Transaction]:
amounts = self.session.query(table.amount).all()
amounts = map(lambda item: item[0], amounts)
self.assertEqual(
set(amounts),
set(map(decimal.Decimal, ['12.34', '55.66', '10']))
)
| vdt/billy | billy/tests/functional/test_alembic.py | test_alembic.py | py | 4,718 | python | en | code | null | github-code | 36 |
72164652584 | from model.action.Action import Action
from model.action.Actions import Actions
class SendingMessageAction(Action):
def __init__(self, _message: str, _agt_id: int):
Action.__init__(self, "sending_message", Actions.Sending_message)
self.message = _message
self.agt_id = _agt_id
| alejeau/cocoma-td | model/action/SendingMessageAction.py | SendingMessageAction.py | py | 311 | python | en | code | 0 | github-code | 36 |
31353500311 | #!/usr/bin/env python3
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import os
import sys
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
from colorama import Fore
import config
from dataloader import getDataloaders
from utils import getModel, save_checkpoint, get_optimizer, optimizer_device, create_save_folder, filter_fixed_params
from args import arg_parser, arch_resume_names
from train_val import Trainer
from tensorboardX import SummaryWriter
dev = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
def main():
# parse arg and start experiment
global args
best_err1 = 100.
best_epoch = 0
now_epoch = 0
args = arg_parser.parse_args()
args.config_of_data = config.datasets[args.data]
args.num_classes = config.datasets[args.data]['num_classes']
# optionally resume from a checkpoint
if args.resume:
if args.resume and os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
old_args = checkpoint['args']
print('Old args:')
print(old_args)
# set args based on checkpoint
if args.start_epoch <= 1:
args.start_epoch = checkpoint['epoch'] + 1
now_epoch = args.start_epoch - 1
best_epoch = checkpoint['best_epoch']
best_err1 = checkpoint['best_err1']
print('pre best err1:{} @epoch{}'.format(best_err1, best_epoch))
# for name in arch_resume_names:
# if name in vars(args) and name in vars(old_args):
# setattr(args, name, getattr(old_args, name))
model = getModel(**vars(args))
model.load_state_dict(checkpoint['model_state_dict'])
model = nn.DataParallel(model) if torch.cuda.device_count() > 1 else model
model = model.to(dev)
training_params = filter_fixed_params(model, args.fixed_module_flags)
optimizer = get_optimizer(training_params, args)
if args.load_optimizer:
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
optimizer_device(optimizer, dev)
print('optimizer.state_dict():')
print(optimizer.state_dict()["param_groups"])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print(
"=> no checkpoint found at '{}'".format(
Fore.RED +
args.resume +
Fore.RESET),
file=sys.stderr)
return
else:
# create model
print("=> creating model '{}'".format(args.arch))
model = getModel(**vars(args))
model = nn.DataParallel(model) if torch.cuda.device_count() > 1 else model
model = model.to(dev)
training_params = filter_fixed_params(model, args.fixed_module_flags)
optimizer = get_optimizer(training_params, args)
cudnn.benchmark = True
# check if the folder exists
create_save_folder(args.save, args.force)
# define loss function (criterion) and pptimizer
criterion = nn.CrossEntropyLoss().to(dev)
# set random seed
torch.manual_seed(args.seed)
trainer = Trainer(model, criterion, optimizer, args)
# create dataloader
if args.evaluate == 'train':
train_loader, _, _ = getDataloaders(
splits=('train'), **vars(args))
trainer.test(train_loader, now_epoch)
return
elif args.evaluate == 'val':
_, val_loader, _ = getDataloaders(
splits=('val'), **vars(args))
trainer.test(val_loader, now_epoch)
return
elif args.evaluate == 'test':
_, _, test_loader = getDataloaders(
splits=('test'), **vars(args))
trainer.test(test_loader, now_epoch, write=True)
return
else:
train_loader, val_loader, _ = getDataloaders(
splits=('train', 'val'), **vars(args))
# set up logging
global log_print, f_log
f_log = open(os.path.join(args.save, 'log.txt'), 'w')
def log_print(*args):
print(*args)
print(*args, file=f_log)
log_print('args:')
log_print(args)
print('model:', file=f_log)
print(model, file=f_log)
log_print('# of params:',
str(sum([p.numel() for p in model.parameters()])))
f_log.flush()
f_log.close()
torch.save(args, os.path.join(args.save, 'args.pth'))
scores = ['epoch\tlr\ttrain_loss\tval_loss\ttrain_err1'
'\tval_err1\ttrain_err5\tval_err5']
if args.tensorboard:
writer = SummaryWriter(os.path.join(args.save, 'log_dir'))
for epoch in range(args.start_epoch, args.epochs + 1):
# simulate 10 fold validation
if epoch % (args.epochs / 10) == 0:
args.seed += 5
train_loader, val_loader, _ = getDataloaders(
splits=('train', 'val'), **vars(args))
# train for one epoch
train_loss, train_err1, train_err5, lr = trainer.train(
train_loader, epoch)
# evaluate on validation set
val_loss, val_err1, val_err5 = trainer.test(val_loader, epoch)
if args.tensorboard:
writer.add_scalar('Lr', lr, epoch)
writer.add_scalars('Err/err1', {'train_err1': train_err1,
'val_err1': val_err1}, epoch)
writer.add_scalars('Err/err5', {'train_err5': train_err5,
'val_err5': val_err5}, epoch)
writer.add_scalars('Loss', {'train_loss': train_loss,
'val_loss': val_loss}, epoch)
# save scores to a tsv file, rewrite the whole file to prevent
# accidental deletion
scores.append(('{}\t{}' + '\t{:.4f}' * 6)
.format(epoch, lr, train_loss, val_loss,
train_err1, val_err1, train_err5, val_err5))
with open(os.path.join(args.save, 'scores.tsv'), 'w') as f:
print('\n'.join(scores), file=f)
# remember best err@1 and save checkpoint
is_best = val_err1 < best_err1
if is_best:
best_err1 = val_err1
best_epoch = epoch
print(Fore.GREEN + 'Best var_err1 {} @ ep{}'.format(best_err1, best_epoch) +
Fore.RESET)
# test_loss, test_err1, test_err1 = validate(
# test_loader, model, criterion, epoch, True)
# save test
save_checkpoint({
'args': args,
'epoch': epoch,
'best_epoch': best_epoch,
'arch': args.arch,
'model_state_dict': model.module.state_dict() if torch.cuda.device_count() > 1 else model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'best_err1': best_err1}, is_best, args.save)
if not is_best and epoch - best_epoch >= args.patience > 0:
break
with open(os.path.join(args.save, 'scores.tsv'), 'a') as f:
print('Best val_err1: {:.4f} at epoch {}'.format(best_err1, best_epoch), file=f)
if __name__ == '__main__':
main()
| KoapT/img_classification_pytorch | main.py | main.py | py | 7,409 | python | en | code | 0 | github-code | 36 |
4255371054 | from typing import List
from data_structures.list_node import ListNode
from heapq import *
class Solution:
def mergeKLists(self, lists: List[ListNode]) -> ListNode:
min_heap = []
for i in range(len(lists)):
curr_list = lists[i]
if curr_list:
heappush(min_heap, (curr_list.val, i, curr_list))
head, tail = None, None
while min_heap:
val, i, curr_list = heappop(min_heap)
if curr_list.next:
heappush(min_heap, (curr_list.next.val, i, curr_list.next))
if not head:
head = ListNode(val)
tail = head
else:
if not head.next:
head.next = tail
tail.next = ListNode(val)
tail = tail.next
return head
| blhwong/algos_py | leet/merge_k_sorted_lists/main.py | main.py | py | 844 | python | en | code | 0 | github-code | 36 |
26172267037 | # Easy Python project :P
# Random friend selector to decide who to FaceTime!
import random
friends =[
'Arnab','Dipankar','Sandeep Sir','Aman','Virat','Pant'
]
selected = random.choice(friends) #randomly choose a friend
print('Who should I facetime today? ')
print(selected)
# random.randint(1,5)
# random.choice(friends) | Sib-git/madlibs | selector.py | selector.py | py | 333 | python | en | code | 0 | github-code | 36 |
27909840260 | import time
import pandas as pd
import numpy as np
import sys
CITY_DATA = { 'chicago': 'chicago.csv',
'new york': 'new_york_city.csv',
'washington': 'washington.csv' }
def get_filters():
"""
Asks user to specify a city, month, and day to analyze.
Returns:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
"""
print('Hello! Let\'s explore some US bikeshare data!')
# TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs
while True:
city = input("Please Choose a City : chicago, new york or washington: ").lower()
if city in CITY_DATA.keys():
break
else:
print("That\'s not a valid choice, Please try again : chicago, new york or washington")
# TO DO: get user input for month (all, january, february, ... , june)
months = ["january", "february", "march", "april", "may", "june", "all"]
while True:
month = input("PLease choose a month from january to june or just write 'all' if so: ").lower()
if month in months:
break
else:
print("That\'s not a valid choice, Please try again : Only first six months are available")
# TO DO: get user input for day of week (all, monday, tuesday, ... sunday)
days = ["saturday", "sunday", "monday", "tuesday", "wednesday", "thursday", "friday", "all"]
while True:
day = input("Please choose a day or just write 'all' if so: ").lower()
if day in days:
break
else:
print("That\'s not a valid choice, Please try again with a valid day name")
print('-'*40)
return city, month, day
def load_data(city, month, day):
"""
Loads data for the specified city and filters by month and day if applicable.
Args:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
Returns:
df - Pandas DataFrame containing city data filtered by month and day
"""
print("Just Confirming your Choices: \n")
df = pd.read_csv(CITY_DATA[city])
print("For The City of {}".format(city).title())
df['Start Time'] = pd.to_datetime(df['Start Time'])
df['month'] = df['Start Time'].dt.month_name()
df['day'] = df['Start Time'].dt.day_name()
if month != 'all':
df = df[df['month'] == month.title()]
print("& The Month of {}".format(month).title())
else:
print("& All Months")
if day != 'all':
df = df[df['day'] == day.title()]
print("& The Day of {}".format(day).title())
else:
print("& All Days")
return df
def time_stats(df):
"""Displays statistics on the most frequent times of travel."""
print('\nCalculating The Most Frequent Times of Travel...\n')
start_time = time.time()
# TO DO: display the most common month
df['month'] = df['Start Time'].dt.month_name()
popular_month = df['month'].mode()[0]
print("Most common month is: {}".format(popular_month))
# TO DO: display the most common day of week
df['day'] = df['Start Time'].dt.day_name()
popular_day = df['day'].mode()[0]
print("Most common day is: {}".format(popular_day))
# TO DO: display the most common start hour
df['hour'] = df['Start Time'].dt.strftime('%I %p')
popular_start_hour = df['hour'].mode()[0]
print("Most common hour is: {}".format(popular_start_hour))
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def station_stats(df):
"""Displays statistics on the most popular stations and trip."""
print('\nCalculating The Most Popular Stations and Trip...\n')
start_time = time.time()
# TO DO: display most commonly used start station
popular_start_station = df['Start Station'].mode()[0]
print("Most Commonly Used Start Station is: ",popular_start_station)
# TO DO: display most commonly used end station
popular_end_station = df['End Station'].mode()[0]
print("Most Commonly Used End Station is: ",popular_end_station)
# TO DO: display most frequent combination of start station and end station trip
#popular_combination = 'from' + df['Start Station'] + 'to' + df['End Station'].mode()[0]
df['popular_comb'] = df['Start Station'].str.cat(df['End Station'],sep=' to ')
popular_combination = df['popular_comb'].mode()[0]
print("Most Commonly Used Combination of Start and End Stations is: ",popular_combination)
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def trip_duration_stats(df):
"""Displays statistics on the total and average trip duration."""
print('\nCalculating Trip Duration...\n')
start_time = time.time()
# TO DO: display total travel time
total_duration = df['Trip Duration'].sum()
duration_hours = int(total_duration) / 3600
print("Total Trips Duration : {:,.2f} hours".format(duration_hours))
# TO DO: display mean travel time
df.fillna(0)
travel_mean = df['Trip Duration'].mean()
mean_minutes = int(travel_mean) / 60
print("Mean of Trips Duration: {:,.2f} minutes".format(mean_minutes))
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def user_stats(df):
"""Displays statistics on bikeshare users."""
print('\nCalculating User Stats...\n')
start_time = time.time()
# TO DO: Display counts of user types
usr_type = df['User Type'].value_counts(dropna = True)
print("User Types: \n ",usr_type)
# TO DO: Display counts of gender
try:
gendr = df['Gender'].value_counts(dropna = True)
print("Gender Types: \n",gendr)
# TO DO: Display earliest, most recent, and most common year of birth
earliest_birth = df['Birth Year'].min()
print("Earliest Birth Year : ",int(earliest_birth))
recent_birth = df['Birth Year'].max()
print("Most Recent Birth Year: ", int(recent_birth))
most_common_year = df['Birth Year'].mode()[0]
print("Most Common Birth Year: ", int(most_common_year))
except:
print("Gender and Birth Year Data Not Provided for Selected City")
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
#Asking user if they need to see rows of raw data 5 at a time
def raw_data(df):
raw_data = input("Would you like to see 5 raw data rows ? Enter yes or no: ")
start_loc = 0
while True:
if raw_data.lower() == 'yes':
pd.set_option('display.max_columns',11)
print(df.iloc[start_loc:start_loc+5])
start_loc += 5
raw_data = input("Would you like to see 5 more ?").lower()
if raw_data.lower() == 'no':
print("\n No More Data Requested Here .. Exiting This Analysis")
break
if raw_data.lower() not in ("yes", "no"):
print("I Didn't Understand This, Please Type only yes or no")
def exit(df):
print("Exiting")
def main():
while True:
city, month, day = get_filters()
df = load_data(city, month, day)
time_stats(df)
station_stats(df)
trip_duration_stats(df)
user_stats(df)
raw_data(df)
while True:
restart = input('\nWould you like to restart? Enter yes or no.\n')
if restart.lower() == 'yes':
break
elif restart.lower() == 'no':
print("Exiting")
sys.exit()
else:
print("I Didn't Understand This, Please Type only yes or no")
continue
if __name__ == "__main__":
main()
| nadertadrous/nadertadrous | bikeshare.py | bikeshare.py | py | 8,211 | python | en | code | 0 | github-code | 36 |
24014680066 | # https://www.hackerrank.com/challenges/bigger-is-greater/
# No such impl in Python lib: https://stackoverflow.com/questions/4223349
class strings:
def next_permutation(w):
stk=[]
n=len(w)
def nextperm(sc):
i=0
for x in stk:
if x > sc:
stk[i] = sc
return x+''.join(stk)
i+=1
return ''.join(stk)+sc
for i in range(n):
k=n-1-i
if stk and w[k]<stk[-1]:
return w[:k] + nextperm(w[k])
else:
stk.append(w[k])
# return 'no answer'
return ''.join(stk)
| liruqi/topcoder | Library/strings.py | strings.py | py | 674 | python | en | code | 6 | github-code | 36 |
13070318133 | import time, collections
class RateLimiter:
def __init__(self, max_number, interval):
self.timeStamp = collections.defaultdict(collections.deque)
self.interval = interval
self.max_number = max_number
def call(self, id):
currTime = time.time()
if len(self.timeStamp[id]) < self.max_number:
self.timeStamp[id].append(currTime)
return True
else:
if currTime - self.timeStamp[id][0] > self.interval:
self.timeStamp[id].popleft()
self.timeStamp[id].append(currTime)
return True
else:
return False
rateLimiter = RateLimiter(5, 2)
for i in range(10):
print(rateLimiter.call(1))
time.sleep(1)
for i in range(5):
print(rateLimiter.call(2))
| Jason003/Interview_Code_Python | stripe/rate limiter.py | rate limiter.py | py | 813 | python | en | code | 3 | github-code | 36 |
70230238504 | # Advent of Code, Day 3, Part 2
testInput = '''\
00100
11110
10110
10111
10101
01111
00111
11100
10000
11001
00010
01010
'''
testResult = 230
def solve(input):
lines = [line for line in input.split('\n') if line != '']
def searchForRating(bitCriteria):
remLines = lines
for i in range(len(lines[0])):
ones = sum(1 for line in remLines if line[i] == '1')
zeros = sum(1 for line in remLines if line[i] == '0')
maxBit = '1' if bitCriteria(ones, zeros) else '0'
remLines = [line for line in remLines if line[i] == maxBit]
if len(remLines) == 1:
return int(remLines[0], 2)
oxygen = searchForRating(lambda ones, zeros: ones >= zeros)
co2 = searchForRating(lambda ones, zeros: ones < zeros)
return oxygen * co2
assert solve(testInput) == testResult
with open('day3/input.txt', 'r') as file:
print(solve(file.read()))
| BlueFerox/AoC2021 | day3/part2.py | part2.py | py | 965 | python | en | code | 0 | github-code | 36 |
41614185208 | from django.shortcuts import HttpResponseRedirect
from django.http import JsonResponse
from urllib.parse import quote
class AuthRequiredMiddleware(object):
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
# Code to be executed for each request before the view (and later middleware) are called.
if not request.user.is_authenticated and request.path != "/login/":
if request.headers.get("x-requested-with") == "XMLHttpRequest":
return JsonResponse(
{
"status": "false",
"message": "You don't have permission to access this resource",
},
status=403,
)
path = quote(request.get_full_path())
return HttpResponseRedirect(f"/login/?next={path}")
# Code to be executed for each request/response after the view is called.
response = self.get_response(request)
return response
| comiconomenclaturist/django-icecast-stats | stats/middleware.py | middleware.py | py | 1,051 | python | en | code | 1 | github-code | 36 |
34980505423 | #存在重复
# 给定一个整数数组,判断是否存在重复元素。
# 如果任何值在数组中出现至少两次,函数返回 true。如果数组中每个元素都不相同,则返回 false。
#===============================================================================
# 输入: [1,2,3,1]
# 输出: true
#===============================================================================
from collections import Counter
class Solution:
def containsDuplicate(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
c=Counter(nums)
for i in c:
if c[i]>1:
return True
return False
result=Solution().containsDuplicate([1,1,1,3,3,4,3,2,4,2])
print(result) | huowolf/leetcode | src/array/containsDuplicate.py | containsDuplicate.py | py | 775 | python | fr | code | 0 | github-code | 36 |
3285800905 | import re
from parsers.InsideParser.InsideParserBase import InsideParserBase
from parsers.RegexpBuilder import RegexpBuilder
from parsers.ValuesStriper import ValuesStripper
class InsideSetArrayParser(InsideParserBase):
def __init__(self, fileExt):
InsideParserBase.__init__(self, fileExt)
self.values = None
def parseLine(self, line):
match = self.fetchMatchFor(line)
filePath = match.group('file')
key = match.group('key')
valuesStr = match.group('values')
self.values = self.parseValues(valuesStr)
return filePath, key, valuesStr
def getMatchInfo(self, line):
assert line is not None
keyRegexp = r'(?P<key>[a-zA-Z]+)'
valueRegexp = r"'(?P<values>[^']+)'$"
rb = RegexpBuilder()
regexpSource = rb.startsWith('inside') + self.filePathRegexp + rb.keywordToken('set') + keyRegexp + \
rb.keywordToken('with') + rb.than('values') + valueRegexp
regexp = re.compile(regexpSource, re.UNICODE)
match = regexp.match(line)
return match, regexpSource
def parseValues(self, valuesStr):
assert valuesStr is not None
assert len(valuesStr) > 0
vs = ValuesStripper()
values = vs.strip(valuesStr)
return values | TouchInstinct/BuildScript | scripts/TouchinBuild/parsers/InsideParser/InsideSetArrayParser.py | InsideSetArrayParser.py | py | 1,162 | python | en | code | 1 | github-code | 36 |
29540265563 | import telebot
from telebot import types
bot = telebot.TeleBot('1507860102:AAH3y4nFwQgnYJCFP49PMRRqQVEvhIGrLmw')
user_dict = {}
class User:
def __init__(self, name):
self.name = name
self.age = None
self.sex = None
# Handle '/start' and '/help'
@bot.message_handler(commands=['help', 'start'])
def send_welcome(message):
markup = types.ReplyKeyboardMarkup(one_time_keyboard=True)
markup.add('Tak', 'Nie')
msg = bot.reply_to(message,
"""\
Cześć witam w quize.
Kontynujemy?
""",
reply_markup=markup)
bot.register_next_step_handler(msg, start_quiz_step)
def start_quiz_step(message):
try:
chat_id = message.chat.id
answer = message.text
if answer == "Tak":
msg = bot.reply_to(message, 'Jak masz na imię?')
bot.register_next_step_handler(msg, process_name_step)
elif answer == "Nie":
bot.send_message(chat_id, 'Miło było cię poznać')
else:
raise Exception("Unknown answer")
except Exception as e:
bot.reply_to(message, 'oooops')
def process_name_step(message):
try:
chat_id = message.chat.id
name = message.text
user = User(name)
user_dict[chat_id] = user
msg = bot.reply_to(message, 'Ile masz lat?')
bot.register_next_step_handler(msg, process_age_step)
except Exception as e:
bot.reply_to(message, 'oooops')
def process_age_step(message):
try:
chat_id = message.chat.id
age = message.text
if not age.isdigit():
msg = bot.reply_to(message, 'Age should be a number. How old are you?')
bot.register_next_step_handler(msg, process_name_step)
return
user = user_dict[chat_id]
user.age = age
markup = types.ReplyKeyboardMarkup(one_time_keyboard=True)
markup.add('Male', 'Female')
msg = bot.reply_to(message, 'What is your gender', reply_markup=markup)
bot.register_next_step_handler(msg, process_sex_step)
except Exception as e:
bot.reply_to(message, 'oooops')
def process_sex_step(message):
try:
chat_id = message.chat.id
sex = message.text
user = user_dict[chat_id]
if (sex == 'Male') or (sex == 'Female'):
user.sex = sex
else:
raise Exception("Unknown sex")
bot.send_message(chat_id, 'Nice to meet you ' + user.name + '\n Age:' + str(user.age) + '\n Sex:' + user.sex)
except Exception as e:
bot.reply_to(message, 'oooops')
# Enable saving next step handlers to file "./.handlers-saves/step.save".
# Delay=2 means that after any change in next step handlers (e.g. calling register_next_step_handler())
# saving will hapen after delay 2 seconds.
bot.enable_save_next_step_handlers(delay=2)
bot.polling(none_stop=True) | HKarpenko/teleBot | main.py | main.py | py | 2,905 | python | en | code | 0 | github-code | 36 |
4671110554 | #!/usr/bin/python3
"""Graph data structure traversal"""
def canUnlockAll(boxes):
"""Check if all boxes can be recursively opened
Function, by the help of helper function `recurse`, checks whether all
boxes in `boxes` 2D list can be opened by begining with keys found on box
at index 0.
Args:
boxes (list of list): 2D list with elements as boxes containing keys
Returns: True if created set has all keys, false otherwise
"""
keysSet = {0}
recurse(0, boxes, keysSet)
return list(keysSet) == list(range(len(boxes)))
def recurse(idx, boxes, keysSet):
"""Recurse through boxes and enumerate a set with found keys
Args:
idx (number): Position indicator for a box in `boxes`
boxes (2D list): Original list of boxes
keysSet (set): Data structure for holding unique keys in
Returns:
None
"""
for key in boxes[idx]:
if key not in keysSet and key < len(boxes):
keysSet.add(key)
recurse(key, boxes, keysSet)
return None
| leykun-gizaw/alx-interview | 0x01-lockboxes/0-lockboxes.py | 0-lockboxes.py | py | 1,053 | python | en | code | 0 | github-code | 36 |
32542353820 | from google.cloud import storage
from configparser import ConfigParser
from google.oauth2 import service_account
from googleapiclient.discovery import build
from utils.demo_io import (
get_initial_slide_df_with_predictions_only,
get_fovs_df,
get_top_level_dirs,
populate_slide_rows,
get_histogram_df,
list_blobs_with_prefix,
get_combined_spots_df,
crop_spots_from_slide,
)
import polars as pl
from gcsfs import GCSFileSystem
cutoff = 10 # how many slides to view cropped spot images from
# Parse in key and bucket name from config file
cfp = ConfigParser()
cfp.read("config.ini")
service_account_key_json = cfp["GCS"]["gcs_storage_key"]
gs_url = cfp["GCS"]["bucket_url"]
bucket_name = gs_url.replace("gs://", "")
# Define GCS file system so files can be read
gcs = GCSFileSystem(token=service_account_key_json)
# Authenticate using the service account key file
credentials = service_account.Credentials.from_service_account_file(
service_account_key_json, scopes=["https://www.googleapis.com/auth/cloud-platform"]
)
client = storage.Client.from_service_account_json(service_account_key_json)
# Create a storage client
storage_service = build("storage", "v1", credentials=credentials)
slide_files_raw = list_blobs_with_prefix(
client, bucket_name, prefix="patient_slides_analysis", cutoff=cutoff * 2
)["blobs"]
slides_of_interest = [
slidefile.split("/")[-1].strip(".npy")
for slidefile in slide_files_raw
if slidefile.endswith(".npy")
]
for sl in slides_of_interest:
spot_df = get_combined_spots_df(bucket_name, gcs, sl)
print(spot_df)
spot_df_top = spot_df.sort(pl.col("parasite output"), descending=True).head(20)
spot_df_top = spot_df_top.with_columns(spot_df_top["r"].cast(pl.Int64) * 2)
spot_coords = []
for spot in spot_df_top.rows(named=True):
spot_coords.append(
(
spot["FOV_row"],
spot["FOV_col"],
spot["FOV_z"],
spot["x"],
spot["y"],
spot["r"],
)
)
print(spot_df_top)
spot_imgs = crop_spots_from_slide(storage_service, bucket_name, sl, spot_coords)
for img in spot_imgs:
img.show()
| alice-gottlieb/nautilus-dashboard | examples/spot_cropping_example.py | spot_cropping_example.py | py | 2,247 | python | en | code | 0 | github-code | 36 |
14191916255 | # TODO: prevent utf-8 encoding errors in CSVs
# TODO: add a progress bar for all timed processes
# TODO: Maintain History of organizations analyzed
# TODO: Show time taken to scrape and analyze (tock - tick)
#Importing Libraries
import contextlib
import csv
import json
import os
import re
import time
import warnings
from platform import platform, system
import matplotlib.pyplot as plt
import requests
import spacy
import torch
import trafilatura
from bs4 import BeautifulSoup
from newsapi import NewsApiClient
from rich import box, print
from rich.align import Align
from rich.console import Console
from rich.layout import Layout
from rich.panel import Panel
from rich.progress import track
from rich.syntax import Syntax
from rich.text import Text
from spacy import displacy
from spacy.lang.en.stop_words import STOP_WORDS
from spacytextblob.spacytextblob import SpacyTextBlob
from transformers import AutoModelForSequenceClassification, AutoTokenizer
warnings.simplefilter(action='ignore', category=FutureWarning)
import pandas as pd
# =========================#
# UTIL FUNCTIONS #
# =========================#
def parse_text_from_web(webURL: str) -> str:
"""Extracts the text from the main content of the web page. Removes the ads, comments, navigation bar, footer, html tags, etc
Args:
webURL (str): URL of the web page
Returns:
str: clean text from the web page
Raises:
trafilatura.errors.FetchingError: If the URL is invalid or the server is down
"""
with contextlib.suppress(Exception):
downloaded = trafilatura.fetch_url(webURL)
return trafilatura.extract(
downloaded,
include_comments=False,
include_tables=False,
with_metadata=False,
include_formatting=True,
target_language='en',
include_images=False,
)
# =========================#
# cleanup FUNCTIONS #
# =========================#
def cleanup_text(text: str) -> str:
"""Clean up the text by removing special characters, numbers, whitespaces, etc for further processing and to improve the accuracy of the model.
Args:
text (str): text to be cleaned up
Returns:
str: cleaned up text
"""
# text = re.sub(r'\d+', '', text) # remove numbers
# text = re.sub(r'\s+', ' ', text) # remove whitespaces
with contextlib.suppress(Exception):
# remove special characters except full stop and apostrophe
text = re.sub(r'[^a-zA-Z0-9\s.]', '', text)
# text = text.lower() # convert text to lowercase
text = text.strip() # remove leading and trailing whitespaces
text = text.encode('ascii', 'ignore').decode('ascii') # remove non-ascii characters
# split text into words without messing up the punctuation
text = re.findall(r"[\w']+|[.,!?;]", text)
text= ' '.join(text)
return text.replace(' .', '.')
# ========================#
# SCRAPING #
# ========================#
def scrape_news(organization: str) -> list:
# sourcery skip: inline-immediately-returned-variable, use-contextlib-suppress
try:
# newsAPI
api_key=os.getenv('NEWSAPI')
newsapi = NewsApiClient(api_key=api_key)
# get TOP articles, 1st page, grab 3 articles
all_articles = newsapi.get_everything(q=organization, from_param='2022-12-20', to='2023-01-12', language='en', sort_by='relevancy', page=1, page_size=10)
return all_articles
except Exception as e:
pass
# ========================#
# WRITE TO CSV #
# ========================#
def write_to_csv(organization: str, all_articles: dict) -> None:
with open('CSVs/COMMON.csv', 'w', encoding='utf-8', newline='') as file:
writer = csv.writer(file)
writer.writerow(["Article", "Title", "Description", "URL", "Content", "Published"])
for idx, article in enumerate(all_articles['articles'], start=1):
title= article['title'].strip()
description= article['description'].strip()
publishedAt= article['publishedAt']
newsURL= article['url']
content= parse_text_from_web(newsURL)
content= cleanup_text(content)
# download the content from the url
writer.writerow([idx, article['title'], article['description'], article['url'], content, publishedAt])
print(f"✅ [bold green]SUCCESS! Wrote {idx} - [bold blue]{title}[/bold blue] to [gold1]{organization}[/gold1].csv")
# Adding the parsed content to the CSV
print(f"[bold green]DONE! WROTE {len(all_articles['articles'])} ARTICLES TO [r]COMMON.csv[/r][/bold green]")
# ========================#
# SENTIMENT scoring #
# ========================#
#egt the headlines
def get_headline(content, organization):
r = requests.get(content)
#parse the text
soup = BeautifulSoup(r.content, "html.parser")
if soup.find('h1'):
headline=soup.find('h1').get_text()
if len(headline.split())<=2:
headline="No Headline"
else:
headline="No Headline"
# TODO: HANDLE IMPROVISATION OF HEADERS LATER
return headline
def sentiment_score_to_summary(sentiment_score: int) -> str:
"""
Converts the sentiment score to a summary
Args:
sentiment_score (int): sentiment score
Returns:
str: summary of the sentiment score
"""
if sentiment_score == 1:
return "Extremely Negative"
elif sentiment_score == 2:
return "Somewhat Negative"
elif sentiment_score == 3:
return "Generally Neutral"
elif sentiment_score == 4:
return "Somewhat Positive"
elif sentiment_score == 5:
return "Extremely Positive"
#calculate the sentiment score
def sentiment_analysis(content: str) -> None:
"""
Performs sentiment analysis on the text and prints the sentiment score and the summary of the score
Args:
content (str): text/url to be analyzed
"""
tokenizer = AutoTokenizer.from_pretrained(
"nlptown/bert-base-multilingual-uncased-sentiment")
model = AutoModelForSequenceClassification.from_pretrained(
"nlptown/bert-base-multilingual-uncased-sentiment")
tokens = tokenizer.encode(
content, return_tensors='pt', truncation=True, padding=True)
result = model(tokens)
result.logits
sentiment_score = int(torch.argmax(result.logits))+1
return sentiment_score_to_summary(sentiment_score)
# sourcery skip: identity-comprehension
def process_csv(organization):
with open ('word-store/negative_words.txt', 'r', encoding='utf-8') as file:
negative_words_list = file.read().splitlines()
with open ('word-store/bad_words.txt', 'r', encoding='utf-8') as file:
bad_words = file.read().splitlines()
with open ('word-store/countries.txt', 'r', encoding='utf-8') as file:
countries = file.read().splitlines()
with open('word-store/lawsuits.txt', 'r', encoding='utf-8') as file:
lawsuits = file.read().splitlines()
with open('word-store/harassment.txt', 'r', encoding='utf-8') as file:
harassment = file.read().splitlines()
# ========================#
# Creating Final csv #
# ========================#
#definig charset
with open('CSVs/COMMON-PROCESSED.csv', 'w', encoding='utf-8', newline='') as summary:
# read first row from Uber.csv
with open('CSVs/COMMON.csv', 'r', encoding='utf-8') as file:
try:
reader = csv.reader(file)
next(reader)
# write to csv
writer = csv.writer(summary)
# do for every news article
writer.writerows([["Article", "Headline", "Headline Sentiment", "Offense Rating", "Negative Words", "Offensive Words", "Tags"]])
print("[bold gold1]===============================[/bold gold1]\n\n")
for idx, row in enumerate(reader, start=1):
url= row[3]
raw_text = row[4]
# parse_text_from_web(webURL)
headline=get_headline(url, organization)
headline_sentiment=sentiment_analysis(headline)
negative_words=[]
offensive_words=[]
tags=[]
# init ofense rating
offense_rating=0
# tag as negative
if headline_sentiment == "Extremely Negative":
offense_rating+=200
elif headline_sentiment == "Somewhat Negative":
offense_rating+=100
nlp_text= nlp(raw_text)
# add custom entities
for word in nlp_text:
# if it is a negative word
if word.text.lower() in negative_words_list:
offense_rating+=10
negative_words.append(word.text)
# if it is a highly offensive word
elif word.text.lower() in bad_words:
offense_rating+=50
offensive_words.append(word.text)
# if the article is talks about lawsuits
if word.text.lower() in lawsuits:
offense_rating+=30
tags.append("lawsuit")
# if the article is about harassment
if word.text.lower() in harassment:
offense_rating+=50
tags.append("harassment")
# does article mention a country?
if word.text.lower() in countries:
tags.append("country")
# does article mention a person
if word.ent_type_ == "PERSON":
tags.append(word)
if offense_rating>20:
offense_rating-=10
# Write each row
writer.writerow(
[
idx,
headline,
headline_sentiment,
offense_rating,
list(negative_words),
list(offensive_words),
list(tags),
]
)
print(f"Article {idx} written to csv")
print(f"✔ [bold u r]\nSUCCESS! Finished processing COMMON-PROCESSED.csv[/bold u r]")
except Exception as e:
print(e)
print(e.__class__)
print(e.__doc__)
print(e.__traceback__)
# ========================#
# Display temp output #
# ========================#
#visualize the text in html
def visualize(organization):
raw_text = ''
with open('CSVs/COMMON.csv', 'r', encoding='utf-8') as file:
reader = csv.reader(file)
next(reader)
# do for every news article
for idx, row in enumerate(reader, start=1):
raw_text += row[4]
nlp_text = nlp(raw_text)
print("\n🚀 [bold magenta r]NER COMPLETE, all words tagged...[/bold magenta r]")
# serve the displacy visualizer
displacy.serve(nlp_text, style="ent")
# ========================#
# Merging Raw data #
# ========================#
def merge_csv(csv1, csv2, organization):
df1 = pd.read_csv(csv1, encoding='unicode_escape')
df2 = pd.read_csv(csv2, encoding='unicode_escape')
df = pd.merge(df1, df2, on='Article')
import random
num=random.randint(1, 100)
# # check if COMMON-ANALYSIS exists then copy and rename it to COMMON-ANALYSIS-1
# if os.path.exists('CSVs/COMMON-ANALYSIS.csv'):
# os.rename('CSVs/COMMON-ANALYSIS.csv', f'CSVs/COMMON-ANALYSIS-{num}.csv')
df.to_csv('CSVs/COMMON-ANALYSIS.csv', index=False)
print("CSVs merged to COMMON-ANALYSIS.csv")
# ========================#
# cleaing up -2 #
# ========================#
# RUN SAME FUNCTION TWICE
def final_cleanup(organization):
df = pd.read_csv('CSVs/COMMON-ANALYSIS.csv', encoding='unicode_escape')
# write - to empty cells in offensive words
df['Offensive Words'] = df['Offensive Words'].fillna('-')
# write - to empty cells in negative words
df['Negative Words'] = df['Negative Words'].fillna('-')
# write - to empty cells in tags
df['Tags'] = df['Tags'].fillna('-')
# clean up tags
df['Tags'] = df['Tags'].str.replace('[', '').str.replace(']', '').str.replace("'", '')
# clean up offensive words
df['Offensive Words'] = df['Offensive Words'].str.replace('[', '').str.replace(']', '').str.replace("'", '')
# clean up negative words
df['Negative Words'] = df['Negative Words'].str.replace('[', '').str.replace(']', '').str.replace("'", '')
df.to_csv('CSVs/COMMON-ANALYSIS.csv', index=False)
#get orgainizations url
def get_sub_url(organization):
with open ('CSVs/COMMON-ANALYSIS.csv', 'r', encoding='utf-8') as f:
with open ('CSVs/COMMON-ANALYSIS.csv', 'w', encoding='utf-8') as f2:
publisher=[]
reader = csv.reader(f)
url = [row[4] for row in reader]
# remove www. and https:// from url
url = [re.sub(r'www.', '', i) for i in url]
url = [re.sub(r'https://', '', i) for i in url]
for x in url:
name= x.split('.com/')[0]
publisher.append(name)
# replace items from publisher where character length is more than 40 with '-'
publisher = [re.sub(r'.{40,}', '-', i) for i in publisher]
print(publisher)
print("CSVs cleaned up to COMMON-ANALYSIS.csv")
# sourcery skip: identity-comprehension
nlp = spacy.load("en_core_web_trf")
# ========================#
# Console Output #
# ========================#
# no tests for this function as it is not called anywhere in the command directly
def get_terminal_width() -> int:
"""
Gets the width of the terminal.
Returns:
int: width of the terminal.
"""
try:
width, _ = os.get_terminal_size()
except OSError:
width = 80
if system().lower() == "windows":
width -= 1
return width
def print_banner(console) -> None:
"""
Prints the banner of the application.
Args:
console (Console): Rich console object.
"""
banner = """
:::: :::: :::::::::: ::::::::: ::::::::::: ::: ::: :::: ::: ::: ::: ::: ::: :::::::: ::::::::::: ::::::::
+:+:+: :+:+:+ :+: :+: :+: :+: :+: :+: :+: :+: :+:+: :+: :+: :+: :+: :+: :+: :+: :+: :+: :+: :+:
+:+ +:+:+ +:+ +:+ +:+ +:+ +:+ +:+ +:+ +:+ +:+ :+:+:+ +:+ +:+ +:+ +:+ +:+ +:+ +:+ +:+ +:+
+#+ +:+ +#+ +#++:++# +#+ +:+ +#+ +#++:++#++: +#++:++#++: +#+ +:+ +#+ +#++:++#++: +#+ +#++: +#++:++#++ +#+ +#++:++#++
+#+ +#+ +#+ +#+ +#+ +#+ +#+ +#+ +#+ +#+ +#+ +#+#+# +#+ +#+ +#+ +#+ +#+ +#+ +#+
#+# #+# #+# #+# #+# #+# #+# #+# #+# #+# #+# #+#+# #+# #+# #+# #+# #+# #+# #+# #+# #+#
### ### ########## ######### ########### ### ### ### ### ### #### ### ### ########## ### ######## ########### ########
"""
width = get_terminal_width()
height = 10
# defining the panel
panel = Panel(
Align(
Text(banner, style="green"),
vertical="middle",
align="center",
),
width=width,
height=height,
subtitle="[bold blue]Built for CRIF Hackathon 2023![/bold blue]",
)
console.print(panel)
# ========================#
# Call of funtions #
# ========================#
#start cli
console = Console(record=False, color_system="truecolor")
print_banner(console)
# sourcery skip: inline-immediately-returned-variable
# ========================#
print(Panel.fit("[bold green reverse]ENTER AN ORGANIZATION NAME TO PERFORM MEDIA ANALYSIS ON[/bold green reverse]"))
organization=input()
articles=scrape_news(organization)
write_to_csv(organization, articles)
process_csv(organization)
file1='CSVs/COMMON.csv'
file2='CSVs/COMMON-processed.csv'
merge_csv(file1, file2, organization)
final_cleanup(organization)
final_cleanup(organization)
# get_sub_url(organization)
print(Panel.fit("[bold green reverse]ANALYSIS COMPLETE.[/bold green reverse]\nNow performing Named Entity Recognition on the articles and preparing a visualization."))
visualize(organization)
| HighnessAtharva/CRIF-Hackathon-2023 | SCRAPER.py | SCRAPER.py | py | 17,393 | python | en | code | 1 | github-code | 36 |
3918203704 | import re
import os
import string
import shutil
import tempfile
import fontforge
import argparse
from string import Template
from pathlib import Path
from bs4 import BeautifulSoup
from bs4.formatter import XMLFormatter
class Colors:
OK = '\033[92m'
INFO = '\033[94m'
WARN = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
class SVGProcessor:
_path = None
_spool = None
_font_name = 'IconFont'
_qml_namespace = 'IconFont'
_qml_element_name = 'Icon'
_copyright = '(C) 2020 GONICUS GmbH'
_out_path = '.'
_strip_rect = False
_qt = False
def __init__(self, source_path, spool):
self._path = source_path
self._spool = spool
def run(self):
objects = {}
objects_lt = {}
index = 61000
for path in Path(self._path).rglob('*.svg'):
try:
svg = BeautifulSoup(open(path).read().encode('utf-8'), 'xml')
except FileNotFoundError:
print(f"{Colors.FAIL}✗{Colors.ENDC} file not found {Colors.BOLD}{path}{Colors.ENDC}")
return
if self._process(svg, path):
spool_name = os.path.join(self._spool, f'{index}.svg')
with open(spool_name, 'w') as f:
f.write(svg.prettify(formatter=XMLFormatter()))
objects[index] = spool_name
objects_lt[index] = os.path.splitext(str(path)[len(self._path) + 1:])[0]
index += 1
# Do font processing
if self._make_font(objects):
self._write_header()
self._write_cpp(objects_lt)
if self._qt:
self._write_qml()
def _write_header(self):
font_name = self._font_name.upper()
file_name = self._font_name + '.h'
if self._qt:
header = Template("""#ifndef ${FONT_NAME}_H
#define ${FONT_NAME}_H
#include <QObject>
#include <QtQml>
class ${NAME}Resolver : public QObject {
Q_OBJECT
QML_ELEMENT
public:
explicit ${NAME}Resolver(QObject* parent = nullptr);
virtual ~${NAME}Resolver() {}
Q_INVOKABLE quint16 indexOfPath(const QString& iconPath);
};
#endif
""")
else:
header = Template("""#ifndef ${FONT_NAME}_H
#define ${FONT_NAME}_H
#include <cstdint>
#include <string>
namespace $NAME {
uint16_t index(const std::string& path);
}
#endif
""")
with open(os.path.join(self._out_path, file_name), 'w') as f:
f.write(header.substitute(FONT_NAME=font_name, NAME=self._font_name))
print(f'{Colors.OK}✓{Colors.ENDC} {f.name} has been generated')
def _write_cpp(self, objects):
font_name = self._font_name.upper()
file_name = self._font_name + '.cpp'
data = '\n'.join(f' {{ "{name}", {index} }},' for index, name in objects.items())
if self._qt:
code = Template("""#include <QFontDatabase>
#include <QHash>
#include "${NAME}.h"
${NAME}Resolver::${NAME}Resolver(QObject* parent) : QObject(parent) {
static bool initialized = false;
if (!initialized) {
initialized = true;
QFontDatabase::addApplicationFont(":/${NAME}.ttf");
}
}
quint16 ${NAME}Resolver::indexOfPath(const QString& iconPath) {
static QHash<const QString, quint16> lookup_table {
$DATA
};
return lookup_table.value(iconPath, 0);
}
""")
else:
code = Template("""#include <iostream>
#include <map>
#include "${NAME}.h"
namespace $FONT_NAME {
uint16_t index(const std::string& path) {
static std::map<std::string, uint16_t> lookup_table {
$DATA
};
auto idx = lookup_table.find(path);
return idx == lookup_table.end() ? 0 : idx->second;
}
}
""")
with open(os.path.join(self._out_path, file_name), 'w') as f:
f.write(code.substitute(NAME=self._font_name, FONT_NAME=font_name, DATA=data))
print(f'{Colors.OK}✓{Colors.ENDC} {f.name} has been generated')
def _write_qml(self):
font_name = self._font_name.upper()
file_name = self._font_name + '.qml'
code = Template("""import QtQuick 2.15
import ${COMPONENT} 1.0 as IconFont
/// Loads and displays an icon of the icon font by giving the path to the icon svg file
Item {
id: control
width: icon.implicitWidth
height: control.size
/// Path to the icon svg file that should be loaded; empty string (default) unloads the icon
property string iconPath
/// Size of the icon in pixels (default: 32)
property int size: 32
/// Color of the icon (default: black)
property alias color: icon.color
IconFont.${NAME}Resolver {
id: resolver
}
Text {
id: icon
text: String.fromCharCode(resolver.indexOfPath(control.iconPath))
verticalAlignment: Text.AlignVCenter
horizontalAlignment: Text.AlignHCenter
anchors.centerIn: parent
font.family: "${NAME}"
font.pixelSize: control.size
}
}
""")
with open(os.path.join(self._out_path, self._qml_element_name + ".qml"), 'w') as f:
f.write(code.substitute(FONT_NAME=font_name, NAME=self._font_name, COMPONENT=self._qml_namespace))
print(f'{Colors.OK}✓{Colors.ENDC} {f.name} has been generated')
def _process(self, svg, path):
# Skip icons that have no square dimensions
main = svg.find('svg')
if 'width' in main and 'height' in main:
if main['width'] != main['height']:
print(f"{Colors.WARN}âš {Colors.ENDC} {Colors.BOLD}{path}{Colors.ENDC} aspect ratio is not 1:1 - skipping")
return False
# Remove unit from size
width = int(re.findall(r'\d+', main['width'])[0])
height = int(re.findall(r'\d+', main['height'])[0])
# Remove bounding rectangles if any
if self._strip_rect:
for rect in svg.find_all('rect'):
if int(re.findall(r'\d+', rect['height'])[0]) == height and int(re.findall(r'\d+', rect['width'])[0]) == width:
rect.extract()
# Find element
element = self._findElement(svg)
# Check if there's no element
if len(svg.find_all(element)) == 0:
print(f"{Colors.WARN}âš {Colors.ENDC} file {Colors.BOLD}{path}{Colors.ENDC} has no relevant elements - skipping")
return False
# Check if there's more than one element
if len(svg.find_all(element)) != 1:
print(f"{Colors.INFO}🛈{Colors.ENDC} file {Colors.BOLD}{path}{Colors.ENDC} has no too many elements")
# Skip icons that use a 'rotate'
if svg.find(element, transform=re.compile('^rotate\(')):
print(f"{Colors.WARN}âš {Colors.ENDC} file {Colors.BOLD}{path}{Colors.ENDC} contains rotation - skipping")
return False
return True
def _findElement(self, svg):
for el in ['path', 'polygon', 'rect', 'circle']:
if len(svg.find_all(el)) != 0:
return el
return None
def _make_font(self, objects):
first = True
font = fontforge.font()
font.encoding = 'UnicodeFull'
font.fontname = self._font_name
font.familyname = self._font_name
font.fullname = self._font_name
font.copyright = self._copyright
for index, path in objects.items():
if first:
char = font.createChar(87)
char.importOutlines(str(path))
first = False
char = font.createChar(index)
try:
char.importOutlines(str(path))
except FileNotFoundError:
print(f"{Colors.FAIL}✗{Colors.ENDC} file not found {Colors.BOLD}{path}{Colors.ENDC}")
return False
font.selection.all()
path = os.path.join(self._out_path, self._font_name + ".ttf")
font.generate(path)
print(f'{Colors.OK}✓{Colors.ENDC} {path} has been generated')
return True
def __set_font_name(self, name):
allowed = set(string.ascii_lowercase + string.ascii_uppercase + string.digits + '_')
if set(name) <= allowed:
self._font_name = name
else:
print(f"{Colors.FAIL}✗{Colors.ENDC} only uppercase/lowercase characters, digits and _ are allowed for the font name")
exit()
def __get_font_name(self):
return self._font_name
def __set_out_path(self, path):
self._out_path = path
def __get_out_path(self):
return self._out_path
def __set_copyright(self, data):
self._copyright = data
def __get_copyright(self):
return self._copyright
def __set_strip_rect(self, data):
self._strip_rect = data
def __get_strip_rect(self):
return self._strip_rect
def __set_qt(self, data):
self._qt = data
def __get_qt(self):
return self._strip_rect
def __set_qml_element(self, data):
self._qml_element_name = data
def __get_qml_element(self):
return self._qml_element_name
def __set_qml_namespace(self, data):
self._qml_namespace = data
def __get_qml_namespace(self):
return self._qml_namespace
font_name = property(__get_font_name, __set_font_name)
out = property(__get_out_path, __set_out_path)
copyright = property(__get_copyright, __set_copyright)
strip_rect = property(__get_strip_rect, __set_strip_rect)
qt = property(__get_qt, __set_qt)
qml_namespace = property(__get_qml_namespace, __set_qml_namespace)
qml_element = property(__get_qml_element, __set_qml_element)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('source')
parser.add_argument('--font-name', help='name of the generated font', default='IconFont')
parser.add_argument('--copyright', help='copyright notice placed inside the generated TTF file', default='(C) 2020 GONICUS GmbH')
parser.add_argument('--output', help='path where generated files are placed', default='.')
parser.add_argument('--strip-bounding-rect', action="store_true", help='path where generated files are placed')
parser.add_argument('--qt', action="store_true", help='whether to build Qt/QML style output files')
parser.add_argument('--qml-namespace', help='name of the QML namespace used in your .pro file', default='IconApp')
parser.add_argument('--qml-element', help='name of the QML icon element for this font', default='Icon')
args = parser.parse_args()
with tempfile.TemporaryDirectory() as spool:
processor = SVGProcessor(args.source, spool)
processor.font_name = args.font_name
processor.out = args.output
processor.copyright = args.copyright
processor.strip_rect = args.strip_bounding_rect
processor.qt = args.qt
processor.qml_element = args.qml_element
processor.qml_namespace = args.qml_namespace
processor.run()
del processo | 10f7c7/hershey2TTF | test.py | test.py | py | 11,054 | python | en | code | 0 | github-code | 36 |
35622258622 | import boto3
import base64
import os
ENDPOINT_NAME = os.environ['ENDPOINT_NAME']
def lambda_handler(event, context):
""" Handler of the lambda function """
# The SageMaker runtime is what allows us to invoke the endpoint that we've created.
runtime = boto3.Session().client('sagemaker-runtime')
# Now we use the SageMaker runtime to invoke our endpoint, sending the img we were given
response = runtime.invoke_endpoint(EndpointName = ENDPOINT_NAME, # The name of the endpoint we created
ContentType = 'application/json', # The data format that is expected
Body = event['body']) # The img in base64
# The response is an HTTP response whose body contains the result of our inference
result = response['Body'].read().decode('utf-8')
return {
'statusCode' : 200,
'headers' : { 'Content-Type' : 'text/plain',
'Access-Control-Allow-Origin' : '*',
'Access-Control-Allow-Headers': 'Content-Type',
'Access-Control-Allow-Methods': 'OPTIONS,GET,POST' },
'body' : result
} | jorgeramirezcarrasco/udacity-capstone-project-dog-breed-classifier | lambda/lambda_function.py | lambda_function.py | py | 1,216 | python | en | code | 0 | github-code | 36 |
6184910677 | """
My simple timing wsgi middleware. Should serve as wsgi app for gunicorn, and
as wsgi server for django. Starts timing berfore calling django routines,
stops upon receiving the start_response
"""
import time
class TimingWSGIMiddleware:
def __init__(self, djangoapp):
"""
We instatiate the middleware like that:
application = TimingWSGIMiddleware(dangoapplication)
"""
self.djangoapp = djangoapp
def __call__(self, environ, start_response):
"""
simpliest is just return self.djangoapp(environ, start_response)
"""
def start_response_wrapper(start_response_ref, timings):
def start_response_runner(status, response_headers, exc_info=None):
"""
Here we find out when the response from django starts
"""
timings.update(dict(
end_cpu = time.clock(),
end_real = time.time()
))
start_response_ref(status, response_headers, exc_info)
return start_response_runner
timings = dict(
start_cpu = time.clock(),
start_real= time.time(),
)
result_ = self.djangoapp(environ, start_response_wrapper(start_response, timings))
result = list(result_)
result.append("%f Real Seconds\n<br>" % (timings['end_real']-timings['start_real']))
result.append("%f CPU seconds\n<br>" % (timings['end_cpu']-timings['start_cpu']))
return result
from cProfile import Profile
from pstats import Stats
import StringIO
sort_tuple = ('time', 'calls')
class ProfilingWSGIMiddleware:
def __init__(self, djangoapp):
"""
We instatiate the middleware like that:
application = TimingWSGIMiddleware(dangoapplication)
"""
self.djangoapp = djangoapp
def __call__(self, environ, start_response):
"""
simpliest is just return self.djangoapp(environ, start_response)
"""
def start_response_wrapper(start_response_ref, timings, prof):
def start_response_runner(status, response_headers, exc_info=None):
"""
Here we find out when the response from django starts
"""
timings.update(dict(
end_cpu = time.clock(),
end_real = time.time()
))
prof.create_stats()
start_response_ref(status, response_headers, exc_info)
return start_response_runner
timings = dict(
start_cpu = time.clock(),
start_real= time.time(),
)
prof = Profile()
wsgiargs = [environ, start_response_wrapper(start_response, timings, prof)]
result_ = prof.runcall(self.djangoapp, *wsgiargs, **{})
out = StringIO.StringIO()
stats = Stats(prof, stream=out)
stats.sort_stats(*sort_tuple)
stats.print_stats()
result = list(result_)
result.append("<pre>%f Real Seconds\n" % (timings['end_real']-timings['start_real']))
result.append("%f CPU seconds\n" % (timings['end_cpu']-timings['start_cpu']))
result.append("%s</pre>" % out.getvalue())
return result
| temaput/practica.ru | practica/practica/timingwsgi.py | timingwsgi.py | py | 3,289 | python | en | code | 0 | github-code | 36 |
15466868724 | # name = "marine"
# hp = 40
# atk = 5
# print("Unit {0} is created.".format(name))
# print("HP {0}, Attack {1}\n".format(hp, atk))
# tank_name = "tank"
# tank_hp = 150
# tank_atk = 35
# print("Unit {0} is created.".format(tank_name))
# print("HP {0}, Attack {1}\n".format(tank_hp, tank_atk))
# tank2_name = "tank"
# tank2_hp = 150
# tank2_atk = 35
# print("Unit {0} is created.".format(tank2_name))
# print("HP {0}, Attack {1}\n".format(tank2_hp, tank2_atk))
# def attack(name, location, atk):
# print("{0} attacks enemies in {1}. [Attack: {2}]".format(name, location, atk))
# attack(name, "1 o'clock", atk)
# attack(tank_name, "1 o'clock", tank_atk)
# attack(tank2_name, "1 o'clock", tank2_atk)
class Unit:
def __init__(self, name, hp, atk): #__init__은 생성자
self.name = name #self.name 등은 멤버변수
self.hp = hp
self.atk = atk
print("Unit {0} is created.".format(self.name))
print("HP {0}, Attack {1}".format(self.hp, self.atk))
# marine1 = Unit("marine", 40, 5) #class로부터 만들어지는 marine1 등은 객체
# marine2 = Unit("marine", 40, 5) #marine1 등은 Unit class의 instance라고 표현
# tank = Unit("tank", 150, 35)
# wraith1 = Unit("wraith", 80, 5)
# print("unit name: {0}, attack: {1}".format(wraith1.name, wraith1.atk))
# wraith2 = Unit("lost wraith", 80, 5)
# wraith2.clocking = True #class 외부에서도 변수 확장 가능, 단 내가 선언한 객체에만 적용
# if wraith2.clocking == True:
# print("{0} is cloaked.".format(wraith2.name))
class AttackUnit:
def __init__(self, name, hp, atk): #__init__은 생성자
self.name = name #self.name 등은 class의 멤버변수
self.hp = hp
self.atk = atk
def attack(self, location):
print("{0} attacks enemies in {1}. [Attack {2}]".format(self.name, location, self.atk))
def damaged(self, atk):
print("{0} is damaged as {1} point(s).".format(self.name, atk))
self.hp -= atk
print("{0}'s hp is {1}.".format(self.name, self.hp))
if self.hp <= 0:
print("{0} is destoryed.".format(self.name))
firebat1 = AttackUnit("firebat", 50, 16)
firebat1.attack("5 o'clock")
firebat1.damaged(25)
firebat1.damaged(25) | hss69017/self-study | basic/class.py | class.py | py | 2,243 | python | en | code | 0 | github-code | 36 |
5547367389 | """
Tests for Voting 21/06/2022 [Lido app for Goerli].
"""
import pytest
from brownie import interface
from scripts.vote_2022_06_21_goerli_lido_app import (
start_vote,
get_lido_app_address,
get_lido_app_old_version,
)
from utils.test.tx_tracing_helpers import *
from utils.config import network_name
from utils.config import lido_dao_lido_repo
def get_lido_app_old_content_uri():
if network_name() in ("goerli", "goerli-fork"):
return (
"0x697066733a516d526a43546452626a6b4755613774364832506e7377475a7965636e4e5367386f736b346b593269383278556e"
)
elif network_name() in ("mainnet", "mainnet-fork"):
return (
"0x697066733a516d516b4a4d7476753474794a76577250584a666a4c667954576e393539696179794e6a703759714e7a58377053"
)
else:
assert False, f'Unsupported network "{network_name()}"'
def get_lido_app_old_ipfs_cid():
if network_name() in ("goerli", "goerli-fork"):
return "QmRjCTdRbjkGUa7t6H2PnswGZyecnNSg8osk4kY2i82xUn"
elif network_name() in ("mainnet", "mainnet-fork"):
return "QmQkJMtvu4tyJvWrPXJfjLfyTWn959iayyNjp7YqNzX7pS"
else:
assert False, f'Unsupported network "{network_name()}"'
lido_old_app = {
"address": get_lido_app_address(),
"ipfsCid": get_lido_app_old_ipfs_cid(),
"content_uri": get_lido_app_old_content_uri(),
"version": get_lido_app_old_version(),
}
lido_new_app = {
"address": get_lido_app_address(),
"ipfsCid": "QmScYxzmmrAV1cDBjL3i7jzaZuiJ76UqdaFZiMgsxoFGzC",
"content_uri": "0x697066733a516d536359787a6d6d724156316344426a4c3369376a7a615a75694a373655716461465a694d6773786f46477a43",
"version": (8, 0, 4),
}
def test_vote(helpers, accounts, ldo_holder, dao_voting, vote_id_from_env, bypass_events_decoding, dao_agent, lido):
# Validate old Lido app
lido_repo = interface.Repo(lido_dao_lido_repo)
lido_old_app_from_chain = lido_repo.getLatest()
print(lido_old_app_from_chain)
# check old versions of lido app is correct
assert lido_old_app["address"] == lido_old_app_from_chain[1]
assert lido_old_app["version"] == lido_old_app_from_chain[0]
assert lido_old_app["content_uri"] == lido_old_app_from_chain[2]
# check old ipfs link
bytes_object = lido_old_app_from_chain[2][:]
lido_old_ipfs = bytes_object.decode("ASCII")
lido_old_app_ipfs = f"ipfs:{lido_old_app['ipfsCid']}"
assert lido_old_app_ipfs == lido_old_ipfs
# START VOTE
vote_id = vote_id_from_env or start_vote({"from": ldo_holder}, silent=True)[0]
tx: TransactionReceipt = helpers.execute_vote(
vote_id=vote_id, accounts=accounts, dao_voting=dao_voting, skip_time=3 * 60 * 60 * 24
)
# validate vote events
assert count_vote_items_by_events(tx, dao_voting) == 1, "Incorrect voting items count"
# Validate vote items 4: new lido app
## check only version and ipfs was changed
lido_new_app_from_chain = lido_repo.getLatest()
assert lido_new_app["address"] == lido_new_app_from_chain[1]
assert lido_new_app["version"] == lido_new_app_from_chain[0]
assert lido_new_app["content_uri"] == lido_new_app_from_chain[2]
## check new ipfs link
bytes_object = lido_new_app_from_chain[2][:]
lido_old_ipfs = bytes_object.decode("ASCII")
lido_new_app_ipfs = f"ipfs:{lido_new_app['ipfsCid']}"
assert lido_new_app_ipfs == lido_old_ipfs
display_voting_events(tx)
if bypass_events_decoding or network_name() in ("goerli", "goerli-fork"):
return
| lidofinance/scripts | archive/tests/test_2022_06_21_2_goerli_lido_app.py | test_2022_06_21_2_goerli_lido_app.py | py | 3,523 | python | en | code | 14 | github-code | 36 |
43165325383 | # Python code to run a loop for the number of files in a folder.
# Importing required libraries
import os
# Initializing loop for the files in the defined folder
for filename in os.listdir("/Users/walikhan/Work/Python_YouTube_Auto/vid"):
# Applying condition to check for a particular file type
if filename.endswith(".mp4"):
# Printing the file name
print("File name with extension", filename)
# Complete file path
print("File path - ",os.path.join("/Users/walikhan/Work/Python_YouTube_Auto/vid", filename))
# to remove last characters(or file extension)
print("File name without extension", filename[:-4])
else:
continue
| WaliKhan09/Programming | Python/Folder_Loop.py | Folder_Loop.py | py | 719 | python | en | code | 1 | github-code | 36 |
20471993107 | from pprint import pprint
from bs4 import BeautifulSoup
import requests
import pandas as pd
import pprint
election = []
user_agent = "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.37"
URL = "https://en.wikipedia.org/wiki/List_of_United_States_presidential_elections_by_popular_vote_margin"
response = requests.get(URL, headers={'User-Agent': user_agent})
html = response.content
soup = BeautifulSoup(html, "lxml")
for tr in soup.find_all('tr'):
data = []
for td in tr:
clean_text = td.get_text().strip('\n')
if len(clean_text) < 1:
continue
if clean_text == ',No candidate[a]':
clean_text = 'No Candidate, ' + ', '
data.append(clean_text)
if (data == []) or (len(data) < 10):
continue
election.append(data)
# pprint.pprint(election)
df = pd.DataFrame(election, columns = [
'election_number', 'year', 'winner',
'party', 'number_electoral_votes', 'electoral_perc',
'pop_vote_perc', 'pop_margin_perc', 'number_pop_votes',
'number_pop_margin', 'runner_up_name', 'runner_up_party', 'turnout_perc'])
df.to_csv(f'../datasets/elections.csv', sep=',', encoding='utf-8-sig', index = False) | Eleanor-Shellstrop/presidents | python/elections.py | elections.py | py | 1,183 | python | en | code | 0 | github-code | 36 |
19406336160 | #
# @lc app=leetcode id=404 lang=python3
#
# [404] Sum of Left Leaves
#
# @lc code=start
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def traversal(self, root, isLeft):
if not root.left and not root.right and isLeft:
self.res += root.val
else:
if root.left:
self.traversal(root.left, True)
if root.right:
self.traversal(root.right, False)
def sumOfLeftLeaves(self, root: Optional[TreeNode]) -> int:
self.res = 0
self.traversal(root, False)
return self.res
# @lc code=end
| Matthewow/Leetcode | vscode_extension/404.sum-of-left-leaves.py | 404.sum-of-left-leaves.py | py | 791 | python | en | code | 2 | github-code | 36 |
4472055866 | from math import sin, cos
from components.bubble import Bubble
from components.utils import HF
from data.constants import CONFUSION_COLORS
class EnemyEvent:
def __init__(self, owner, game, data: dict):
self.owner = owner
self.game = game
self.trigger_value = data["trigger value"]
self.action = self.set_action(data["action"])
self.hit = False
self.value = data["value"]
def set_action(self, action: str):
if action == "event bubbles":
return self.drop_bubble
if action == "change speed":
return self.change_speed
if action == "spawn enemies":
return self.spawn_enemies
if action == "enemy split":
return self.enemy_split
if action == "change color":
return self.change_color
return lambda: None
def drop_bubble(self):
bubble = Bubble(self.game.rect, self.owner.x, self.owner.y,
gravitation_radius=self.game.room.gravitation_radius)
self.game.room.bubbles.append(bubble)
def change_speed(self):
self.owner.velocity = HF(self.value)
def spawn_enemies(self):
for _ in range(self.value[1]):
self.game.room.spawn_enemy(self.value[0], self.owner.x, self.owner.y)
def enemy_split(self):
x, y, angle = self.owner.x, self.owner.y, self.owner.body.angle
dx = HF(58.44) * sin(angle)
dy = HF(58.44) * cos(angle)
self.game.room.spawn_enemy("Twin", x + dx, y + dy, angle)
self.game.room.spawn_enemy("Twin", x - dx, y - dy, angle)
def change_color(self):
color_1, color_2, color_3 = CONFUSION_COLORS[self.owner.health % 10]
circle = self.owner.body.current_circles[0]
circle.color = color_1
circle.glares[0].color = circle.glares[1].color = circle.edge_color = color_2
circle.glares[2].color = circle.glares[3].color = color_3
__all__ = ["EnemyEvent"]
| IldarRyabkov/BubbleTanks2 | src/components/enemy_event.py | enemy_event.py | py | 1,981 | python | en | code | 37 | github-code | 36 |
31792102222 | # coding:utf-8
import sys
import window
from PyQt5.QtWidgets import QApplication, QDialog
from PyQt5.QtGui import QIcon
from PyQt5 import QtCore
# import pymysql
import threading
import pymysql
path = "./"
class Controller:
def __init__(self):
pass
def show_login(self):
self.login = LoginDialog()
self.login.switch_window.connect(self.show_main)
self.login.show()
def show_main(self):
self.login.close()
self.window = MainDialog()
self.window.switch_window.connect(self.shutdown)
self.window.show()
from shadow import shadow;
self.p = threading.Thread(target=shadow)
# 设置为守护进程,当父进程结束时,将被强制终止
self.p.daemon = True
self.p.start()
def shutdown(self):
print("-------- 结束接收数据 -----------")
sys.exit()
class MainDialog(QDialog):
switch_window = QtCore.pyqtSignal()
def __init__(self, parent=None):
super(QDialog, self).__init__(parent)
self.ui = window.Ui_Dialog_Main()
self.setWindowIcon(QIcon(path + "logo.ico"))
self.ui.setupUi(self)
# 传递信号,调用新一层函数
def close(self):
self.switch_window.emit()
def ask(self):
query = self.ui.textEdit.toPlainText().strip()
print("收到询问: " + query)
from shadow import chat
back = chat(query)
print("处理结果: " + back)
self.ui.textEdit.setText(back)
class LoginDialog(QDialog):
switch_window = QtCore.pyqtSignal()
def __init__(self, parent=None):
super(QDialog, self).__init__(parent)
self.ui = window.Ui_Dialog_Login()
self.setWindowIcon(QIcon(path + "logo.ico"))
self.ui.setupUi(self)
# 调用后端接口登录判断
def verily(self, name, email):
conn = pymysql.connect(host = '43.163.218.127' # 连接名称,默认127.0.0.1
,user = 'root' # 用户名
,passwd='011026' # 密码
,port= 3306 # 端口,默认为3306
,db='aides' # 数据库名称
,charset='utf8' # 字符编码
)
cur = conn.cursor() # 生成游标对象
sql = "select * from `user` where `name`= " + '\'' + name + '\'' # SQL语句
#print(sql)
cur.execute(sql) # 执行SQL语句
data = cur.fetchall() # 通过fetchall方法获得数据
cur.close()
conn.close()
if len(data) > 1 or len(data) == 0:
return False
elif data[0][1] != email:
return False
return True
def write_conf(self, name, email, pwd, mode):
with open(path+"shadow.conf", 'w') as f:
f.write("name: " + name + "\n")
f.write("email: " + email + "\n")
f.write("password: " + pwd + "\n")
f.write("mode: " + mode + "\n")
def start(self):
name = self.ui.name.text()
email = self.ui.email.text()
pwd = self.ui.pwd.text()
mode = self.ui.mode.text()
if self.verily(name, email):
self.write_conf(name, email, pwd, mode)
# 跳转主页面
self.switch_window.emit()
def clear(self):
self.ui.name.clear()
self.ui.email.clear()
self.ui.pwd.clear()
if __name__ == '__main__':
myapp = QApplication(sys.argv)
myDlg = Controller()
myDlg.show_login()
sys.exit(myapp.exec_())
| northboat/Aides | app/app.py | app.py | py | 3,509 | python | en | code | 0 | github-code | 36 |
21253725938 | from objdetection1 import *
import cv2 as cv
import numpy as np
from PIL import Image
import random
import math
def make_cluster(img):
Z = img.reshape((-1,3))
# convert to np.float32
Z = np.float32(Z)
# define criteria, number of clusters(K) and apply kmeans()
criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 10, 1.0)
K = 4
ret,label,center=cv.kmeans(Z,K,None,criteria,10,cv.KMEANS_RANDOM_CENTERS)
# Now convert back into uint8, and make original image
center = np.uint8(center)
center[0] = (0,0,0)
center[1] = (200,200,200)
# center[2] = (100,0,0)
# center[3] = (0,100,0)
# center[0] = (0,0,100)
res = center[label.flatten()]
res2 = res.reshape((img.shape))
# cv.imshow('res2',res2)
return res2
def createCandidatePositions(img):
candidates = []
bboxes = []
# convert image to grayscale image
gray_image = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
# for i in range(10):
# cv.GaussianBlur(gray_image,(5,5),0)
kernelClose = np.ones((50,50),np.uint8)
kernelErode = np.ones((20,20),np.uint8)
# kernelClose = np.ones((img.shape[1]//500,img.shape[0]//500),np.uint8)
# kernelErode = np.ones((img.shape[1]//300,img.shape[1]//300),np.uint8)
closing = cv.morphologyEx(gray_image, cv.MORPH_CLOSE, kernelClose)
closing = cv.morphologyEx(closing, cv.MORPH_ERODE, kernelErode)
closing = make_cluster(closing)
edges = cv.Canny(closing,400,425,apertureSize = 3)
# cv.imshow('res2',edges)
# calculate moments of binary image
# find contours in the binary image
contours, hierarchy = cv.findContours(edges,cv.RETR_TREE,cv.CHAIN_APPROX_SIMPLE)
# cv.drawContours(img, contours, -1, (255,0,0), 3)
for c in contours:
x,y,w,h = cv.boundingRect(c)
cv.rectangle(img,(x,y),(x+w,y+h),(0,255,0),2)
candidates.append(img[y:y+h,x:x+w])
bboxes.append((x,y,(x+w),(y+h)))
# calculate moments for each contour
M = cv.moments(c)
area = cv.contourArea(c)
# cv.imshow('aids', img)
# cv.waitKey(0)
# cv.destroyAllWindows()
return np.array(candidates), np.array(bboxes)
| DvnGBulletZz/Computer_Vision_Kenan | Eind_Opdracht/clustering.py | clustering.py | py | 2,219 | python | en | code | 0 | github-code | 36 |
39472170581 | from flask import request
from werkzeug.exceptions import NotFound, BadRequest, Conflict
from db import db
from managers.brand import BrandManager
from managers.category import CategoryManager
from models import BrandModel, CategoryModel
from models.enums import GenderType
from models.products import ProductsModel, ProductImages, ProductPair
from sqlalchemy.sql.expression import text
from utils.operations import db_add_items, db_delete_items
def check_pair_or_image_product(item, product, item_id, product_id, item_name="item"):
if not item:
raise NotFound(f"There is not {item_name} with id: {item_id}")
if not product:
raise NotFound(f"There is not product with id: {product_id}")
if item not in product.pairs and item not in product.images:
raise BadRequest(
f"{item_name} with id: {item_id} is not attached to product with id: {product_id}"
)
class ProductManager:
@staticmethod
def create_product(product_data):
images = []
for image in product_data["images"]:
img = ProductImages(img_url=image)
images.append(img)
product_pair = []
for obj in product_data["pairs"]:
pair = ProductPair(**obj)
product_pair.append(pair)
print(product_data["pairs"])
brand_q = BrandManager.get_by_name_query(product_data["brand_name"])
category_q = CategoryManager.get_by_title_query(product_data["category_title"])
brand = brand_q.first()
category = category_q.first()
if not brand:
raise NotFound("There is no brand with that name")
if not category:
raise NotFound("There is no category with that name")
with db.session.no_autoflush:
product = ProductsModel(
title=product_data["title"],
description=product_data["description"],
price=product_data["price"],
discount=product_data["discount"],
gender=GenderType[product_data["gender"]],
)
brand.products.append(product)
category.products.append(product)
for img in images:
product.images.append(img)
for pair in product_pair:
product.pairs.append(pair)
db_add_items(product, category, brand)
return product
@staticmethod
def add_image(id, image_data):
image = ProductImages(img_url=image_data["img_url"], product_id=id)
db_add_items(image)
return image
@staticmethod
def delete_image(id, image_id):
image = ProductImages.query.filter_by(id=image_id["id"]).first()
product = ProductsModel.query.filter(
ProductsModel.id == id, text("is_deleted is FALSE")
).first()
check_pair_or_image_product(image, product, image_id["id"], id, "images")
db_delete_items(*image)
return f"You delete image with id: {image_id['id']} successfully", 202
@staticmethod
def edit_image(product_id, images_data):
images_ids = [id for id in images_data["ids"]]
new_urls = [url for url in images_data["urls"]]
product = ProductsModel.query.filter_by(id=product_id).first()
new_images = [
ProductImages(product_id=product_id, img_url=url) for url in new_urls
]
old_images = [ProductImages.query.filter_by(id=id).first() for id in images_ids]
if len(images_ids) != len(new_urls):
raise BadRequest(
"You should add same number of new images such as number of deleted one"
)
if not product:
raise NotFound(f"There is not product with id: {product_id}")
for image in old_images:
if image not in product.images:
raise NotFound(
f"The id:{id} is not attached to product with id:{product_id}"
)
try:
db_add_items(*new_images)
db_delete_items(old_images)
except:
raise BadRequest("You cannot do that operation")
return {"message": "You successful edit images"}
@staticmethod
def add_pair(id, pair_data):
product = ProductsModel.query.filter(
ProductsModel.id == id, text("is_deleted is FALSE")
).first()
is_pair = ProductPair.query.filter_by(
size=pair_data["size"], color=pair_data["color"], product_id=id
).first()
if is_pair:
raise Conflict(
f"Pair with color: {pair_data['color']} and {pair_data['size']} already attached to product with id: {id}"
)
if not product:
raise NotFound("There is no product with that id")
pair = ProductPair(**pair_data, product_id=id)
db_add_items(pair)
return pair
@staticmethod
def delete_pair(id, pair_id):
product = ProductsModel.query.filter(
ProductsModel.id == id, text("is_deleted is FALSE")
).first()
pair = ProductPair.query.filter_by(id=pair_id["id"]).first()
check_pair_or_image_product(pair, product, pair_id["id"], id, "pair")
db_delete_items(pair)
return f"You delete image with id: {pair_id['id']} successfully", 202
@staticmethod
def edit_pair(product_id, pair_id, pair_data):
product = ProductsModel.query.filter_by(id=product_id).first()
pair = ProductPair.query.filter_by(id=pair_id).first()
check_pair_or_image_product(pair, product, pair_id, product_id, "pair")
# pair.size = pair_data["size"]
# pair.color = pair_data["color"]
pair.quantity = pair_data["quantity"]
db_add_items(pair)
return pair
@staticmethod
def sell_pair(pairs):
for pair in pairs:
pair.quantity -= 1
return pairs
@staticmethod
def edit_product_base_info(id_, product_data):
# product_q = ProductsModel.query.filter(
# ProductsModel.id == id_, text("is_deleted is FALSE")
# )
product_q = ProductsModel.query.filter_by(id=id_)
product = product_q.first()
if not product:
raise NotFound("This product does not exist.")
product_q = ProductsModel.query.filter(ProductsModel.id == id_)
old_brand = product.brand
old_category = product.category
new_brand = BrandManager.get_by_name(product_data["brand_name"])
new_category = CategoryManager.get_by_name(product_data["category_title"])
if not new_brand:
raise NotFound("There is no brand with that name")
if not new_category:
raise NotFound("There is no category with that name")
product_data.pop("brand_name")
product_data.pop("category_title")
with db.session.no_autoflush:
print(product_data)
product_q.update(product_data)
if not old_brand.name == new_brand.name:
old_brand.products.remove(product)
new_brand.products.append(product)
if not old_category.title == new_category.title:
old_category.products.remove(product)
new_category.products.append(product)
db_add_items(product, new_category, old_category, new_brand, old_brand)
return product
@staticmethod
def get_one(id_, for_admin=False):
if for_admin:
product = ProductsModel.query.filter_by(id=id_).first()
else:
product = ProductsModel.query.filter(
ProductsModel.id == id_, text("is_deleted is FALSE")
).first()
if not product:
raise NotFound("This product does not exist.")
return product
@staticmethod
def get_all(for_admin=False):
category_title = request.args.get("category")
brand_name = request.args.get("brand")
gender = request.args.get("gender")
category_f = CategoryModel.title == category_title
brand_f = BrandModel.name == brand_name
if gender not in GenderType.list() and gender:
raise NotFound("There is not gender with that name")
gender_f = ProductsModel.gender == gender
if not category_title:
category_f = True
if not brand_name:
brand_f = True
if not gender:
gender_f = True
if for_admin:
products = (
ProductsModel.query.join(ProductsModel.category)
.join(ProductsModel.brand)
.filter(brand_f, category_f, gender_f)
)
else:
products = (
ProductsModel.query.join(ProductsModel.category)
.join(ProductsModel.brand)
.filter(brand_f, text("is_deleted is FALSE"), category_f, gender_f)
)
return products.all()
@staticmethod
def delete_product(id_):
product = ProductsModel.query.filter(
ProductsModel.id == id_, text("is_deleted is FALSE")
).first()
if not product:
raise NotFound("This product does not exist.")
product.is_deleted = True
db_add_items()
return "Product is deleted", 202
| a-angeliev/Shoecommerce | server/managers/products.py | products.py | py | 9,324 | python | en | code | 0 | github-code | 36 |
31875266103 | # from .word2vec_functions.text_processing_functions import *
from .word2vec_functions.word2vec_companion import similar_words
from gensim.models import Word2Vec
import json
BASE_PATH = 'w2v/word2vec_functions/'
def load_filters():
with open(BASE_PATH + 'Raw Data/test_data.json', 'r') as file:
test_data = json.load(file)
# abstract = test_data['test_abstract']
# paper = test_data['test_paper']
sweet_words = test_data['sweet_words']
sweet_dict = test_data['sweet_dict']
sweet_dict['all'] = sweet_words
instrument_types = test_data['instrument_types']
filters = sweet_dict
filters['instruments'] = instrument_types
# filters = {'instruments': instrument_types,
# 'all': sweet_dict}
return filters
def load_models():
model = Word2Vec.load(BASE_PATH + 'Trained Models/model_e300_s150_w10_m3.model')
tuple_model = Word2Vec.load(BASE_PATH + 'Trained Models/TupleExtractions_e300_s150_w10_m3.model')
models = {'traditional': model,
'tuple': tuple_model}
return models
def load_translations():
with open(BASE_PATH + 'Processed Training Data/papers_translations.json', 'r') as file:
translations = json.load(file)
return translations
def similar_word_list_wrapper(positive_words, negative_words, filter_vocab):
filters = load_filters()
models = load_models()
translations = load_translations()
model = models['traditional']
print('positive words', positive_words)
print('negative words', negative_words)
print(filter_vocab)
similar_word_list = similar_words(positive_words, negative_words, translations, model,
verbose = False,
words_returned = 20,
limited_vocabulary = filter_vocab)
return similar_word_list
| CarsonDavis/InteractiveWord2VecWebsite | w2v/utils.py | utils.py | py | 1,911 | python | en | code | 0 | github-code | 36 |
1750954085 | from ex2_utils import *
import matplotlib.pyplot as plt
from random import randrange
import numpy as np
import cv2
def presentation(plots, titles):
n = len(plots)
if n == 1:
plt.imshow(plots[0], cmap='gray')
plt.title(titles[0])
plt.show()
return
if n == 2:
fig, ax = plt.subplots(1, 2, figsize=(12, 8))
elif n % 2 == 0:
fig = plt.figure(figsize=(12, 8))
plt.gray()
for i in range(n):
ax = fig.add_subplot(2, 2, i + 1)
ax.imshow(plots[i])
ax.title.set_text(titles[i])
plt.show()
return
else:
fig, ax = plt.subplots(1, n, figsize=(4 * n, 4))
for i in range(n):
ax[i].set_title(titles[i])
ax[i].imshow(plots[i], cmap='gray')
plt.tight_layout()
plt.show()
def conv1Demo():
n = randrange(10)
Signals, Kernels = list(), list()
for i in range(n):
Signals.append(np.random.randint(5, size=10))
Kernels.append(np.random.randint(5, size=10))
good_ans = 0
for i in range(n):
for j in range(n):
np_convolution = np.convolve(Signals[i], Kernels[j])
my_convolution = conv1D(Signals[i], Kernels[j])
if np_convolution.all() == my_convolution.all():
good_ans += 1
if good_ans == len(Signals) * len(Kernels):
print("conv1Demo: All test are passed!\nGood Job!\n")
else:
print("conv1Demo: Some of test aren't passed!\nTry Again!\n")
def conv2Demo():
img = cv2.imread('pool_balls.jpeg', 0)
Kernels = [np.array([[-1, 1], [1, 1]], dtype=np.float64),
np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float64),
np.array([[0, 0, 0], [0, 0, 0], [0, 0, 0]], dtype=np.float64),
np.array([[0., 0.25, 0.5, 0.75, 1], [0.2, 0.4, 0.6, 0.8, 1],
[1., 1.25, 1.5, 1.75, 2], [1.2, 1.4, 1.6, 1.8, 2]], dtype=np.float64)]
for i in range(4):
if Kernels[i].sum() != 0:
Kernels[i] /= (Kernels[i].sum())
good_ans = 0
for kernel in Kernels:
cv2_convolution = cv2.filter2D(img, -1, kernel, borderType=cv2.BORDER_REPLICATE)
my_convolution = conv2D(img, kernel)
if cv2_convolution.all() == my_convolution.all():
good_ans += 1
if good_ans == len(Kernels):
print("conv2Demo: All test are passed!\nGood Job!\n")
else:
print("conv1Demo: Some of test aren't passed!\nTry Again!\n")
def derivDemo():
img = cv2.imread('pool_balls.jpeg', 0)
direction, magnitude, x_der, y_der = convDerivative(img)
plots = [direction, magnitude, x_der, y_der]
titles = ["Direction", "Magnitude", "X Derivative", "Y Derivative"]
presentation(plots=plots, titles=titles)
print("derivDemo: Good Job!\n")
def blurDemo():
img = cv2.imread("coins.jpg", 0)
kernel_size = 5
plots = [img, blurImage2(img, kernel_size)]
titles = ['Image - non blurring', 'CV2 Blur']
presentation(plots=plots, titles=titles)
print("blurDemo: Good Job!\n")
def edgeDetectionSobelDemo():
img = cv2.imread("boxman.jpg", 0)
opencv_solution, my_solution = edgeDetectionSobel(img, thresh=0.1)
plots = [img, opencv_solution, my_solution]
titles = ['Original Image', 'CV2 Sobel', 'My Sobel']
presentation(plots=plots, titles=titles)
print("edgeDetectionSobelDemo: Good Job!\n")
def edgeDetectionZeroCrossingLOGDemo():
img = cv2.imread("boxman.jpg", 0)
edge_matrix = edgeDetectionZeroCrossingLOG(img)
presentation(plots=[edge_matrix], titles=["Laplacian of Gaussian\nZero Crossing Edge Detection"])
print("edgeDetectionZeroCrossingLOGDemo: Good Job!\n")
def edgeDetectionCannyDemo():
img = cv2.imread("pool_balls.jpeg", 0)
cv2_canny, my_canny = edgeDetectionCanny(img, 50, 100)
plots = [img, cv2_canny, my_canny]
titles = ['Original Image', 'CV2 Canny Edge Detection', 'My Canny Edge Detection']
presentation(plots=plots, titles=titles)
print("edgeDetectionCannyDemo: Good Job!\n")
def edgeDemo():
edgeDetectionSobelDemo()
edgeDetectionZeroCrossingLOGDemo()
edgeDetectionCannyDemo()
def houghDemo():
img = cv2.imread('coins.jpg', 0)
min_radius, max_radius = 10, 20
circles = houghCircle(img, min_radius, max_radius)
fig, ax = plt.subplots()
ax.imshow(img, cmap='gray')
for x, y, radius in circles:
circles_plots = plt.Circle((x, y), radius, color='r', fill=False)
ax.add_artist(circles_plots)
plt.title("Circle\nMy houghCircle Implementation")
plt.show()
print("houghDemo: Good Job!\n")
def main():
print("ID: 316451749\nHave Fun! :)\n")
conv1Demo()
conv2Demo()
derivDemo()
blurDemo()
edgeDemo()
houghDemo()
if __name__ == '__main__':
main()
| MoriyaBitton/Ex2_Convolution_and_Edge_Detection | ex2_main.py | ex2_main.py | py | 5,008 | python | en | code | 0 | github-code | 36 |
37393924840 | from django.urls import path
from . import views
app_name = 'tenants'
urlpatterns = [
path('', views.index, name='index'),
path('device_network/', views.device_network, name='device_network'),
path('device_location/', views.device_location, name='device_location'),
path('device/', views.device, name='device'),
path('sensor/', views.sensor, name='sensor'),
path('org_user/', views.org_user, name='org_user'),
path('profile/', views.profile, name='profile'),
path('users/', views.users, name='users'),
path('networks/', views.networks, name='networks'),
path('locations/', views.locations, name='locations'),
path('devices/', views.devices, name='devices'),
path('sensors/', views.sensors, name='sensors'),
path('update_network/<int:network_id>/', views.update_network, name='update_network'),
path('update_location/<int:location_id>/', views.update_location, name='update_location'),
path('update_device/<int:device_id>/', views.update_device, name='update_device'),
path('update_sensor/<int:sensor_id>/', views.update_sensor, name='update_sensor'),
path('delete_entry', views.delete_entry, name='delete_entry'),
path('change_password', views.change_password, name='change_password'),
]
| Being-rayhan/iot | tenants/urls.py | urls.py | py | 1,265 | python | en | code | 0 | github-code | 36 |
4509075711 | import cv2
import numpy as np
import depthai
import threading
import sys
import os
import time
# Global variables
selected_points = []
completed = False
# Global variables
dataset = "kitti"
img_size = [3, 352, 1216] # for kitti
frame = None
is_frame_available = False
stop_capture = threading.Event() # Event object to signal stop
# Function to continuously capture frames
def capture_frames():
global frame, is_frame_available
# Create the pipeline and camera node
pipeline = depthai.Pipeline()
cam = pipeline.createColorCamera()
#Unsupported resolution set for detected camera IMX378/214, needs THE_1080_P / THE_4_K / THE_12_MP.
cam.setResolution(depthai.ColorCameraProperties.SensorResolution.THE_1080_P)
#cam.initialControl.setManualFocus(150) # 0..255 (larger for near objects)
# Focus:
# value 150 == 22cm
# value 140 == 36cm
xoutRgb = pipeline.createXLinkOut()
xoutRgb.setStreamName("rgb")
cam.video.link(xoutRgb.input)
# Start the pipeline
with depthai.Device(pipeline) as device:
# Output queue for the frames
q_rgb = device.getOutputQueue(name="rgb", maxSize=1, blocking=False)
print('Connected cameras:', device.getConnectedCameraFeatures())
print('Usb speed:', device.getUsbSpeed().name)
if device.getBootloaderVersion() is not None:
print('Bootloader version:', device.getBootloaderVersion())
# Device name
print('Device name:', device.getDeviceName())
while not stop_capture.is_set():
# Get the RGB frame
in_rgb = q_rgb.tryGet()
#focus_value = q_rgb.getCtrlValue(depthai.CameraControl.CamCtrl.FOCUS)
#print("Focus = ",focus_value)
if in_rgb is not None:
# Convert the NV12 format to BGR
frame = in_rgb.getCvFrame()
# Set the flag to indicate that a new frame is available
is_frame_available = True
def sort_coordinates(selected_points):
# Sort the points by x-coordinate
sorted_points = sorted(selected_points, key=lambda p: p[0])
# Determine the top-left and top-right points
if sorted_points[0][1] < sorted_points[1][1]:
top_left, bottom_left = sorted_points[0], sorted_points[1]
else:
top_left, bottom_left = sorted_points[1], sorted_points[0]
# Determine the bottom-right and bottom-left points
if sorted_points[2][1] < sorted_points[3][1]:
top_right, bottom_right = sorted_points[2], sorted_points[3]
else:
top_right, bottom_right = sorted_points[3], sorted_points[2]
final_sorted_points = [top_left, top_right, bottom_right, bottom_left]
return final_sorted_points
# Mouse callback function for selecting points
def store_points(event, x, y, flags, param):
global selected_points, completed, frame, is_frame_available
while not is_frame_available:
pass
window_name = 'Select 4 Corners of your screen'
if event == cv2.EVENT_LBUTTONDOWN:
if len(selected_points) < 4:
selected_points.append((x, y))
for (x,y) in selected_points:
cv2.circle(frame, (x, y), 9, (0, 255, 0), -1)
cv2.imshow(window_name, frame)
# cv2.waitKey(0)
print((x,y))
if len(selected_points) == 4:
completed = True
def select_points():
# Create a window and set the mouse callback
# Capture a photo through webcam and save it in the same directory structure
screen_width, screen_height = 1920, 1080 # Replace with your screen resolution
# Calculate the dimensions for the left half of the screen
left_half_x = -10
left_half_y = 0
left_half_width = screen_width // 2
left_half_height = screen_height
window_name = 'Image to be captured'
# Create a resizable window for the webcam feed
cv2.namedWindow(window_name, cv2.WINDOW_NORMAL)
cv2.moveWindow(window_name, left_half_x, left_half_y)
cv2.resizeWindow(window_name, left_half_width, left_half_height)
sample_image_path = "/home/vision/suraj/kitti_dataset/KITTI/2011_09_28/2011_09_28_drive_0001_sync/image_02/data/0000000000.png"
image = cv2.imread(sample_image_path,-1)
h,w,_ = image.shape
pad_x = int(w)
pad_y = int(((screen_height*w)/(screen_width*.5)-h)/2)
print(image.shape)
top_padding = np.zeros((pad_y,pad_x,3),dtype=np.uint8)
bottom_padding = np.zeros((pad_y,pad_x,3),dtype=np.uint8)
image = np.vstack((top_padding,image,bottom_padding))
# if dataset == "kitti": # do kb_crop
# height = img_size[1]
# width = img_size[2]
# top_margin = int(height - 352)
# left_margin = int((width - 1216) / 2)
# image = image[top_margin:top_margin + 352, left_margin:left_margin + 1216]
cv2.imshow(window_name, image)
# cv2.waitKey(1)
global selected_points, frame, is_frame_available
window_name = 'Select 4 Corners of your screen'
screen_width, screen_height = 1920, 1080 # Replace with your screen resolution
# Calculate the dimensions for the right half of the screen
right_half_x = screen_width // 2
right_half_y = screen_height
right_half_width = screen_width // 2
right_half_height = screen_height
# window_name = 'Select Points'
# Create a resizable window for the camera feed
cv2.namedWindow(window_name, cv2.WINDOW_NORMAL)
cv2.moveWindow(window_name, right_half_x, 0)
cv2.resizeWindow(window_name, right_half_width, right_half_height)
cv2.setMouseCallback(window_name, store_points)
# Instructions
print("Please select 4 corner points of the rectangular screen.")
while True:
while not is_frame_available:
pass
#img = frame.copy()
# Draw a circle to mark the selected point
for (x,y) in selected_points:
cv2.circle(frame, (x, y), 9, (0, 255, 0), -1)
# Display the image
cv2.imshow(window_name, frame)
# Wait for the user to select points
if completed:
break
# Check for key press
key = cv2.waitKey(1)
if key == ord('q'):
sys.exit(0)
break
cv2.destroyAllWindows()
def display_frame(kitti_read_path,kitti_write_path,data_splits_file):
# Path to the data splits file
# Define the destination points (a rectangle)
width, height = 1242, 375 #kitti
dst_points = np.array([[0, 0], [width - 1, 0], [width - 1, height - 1], [0, height - 1]], dtype=np.float32)
global selected_points, frame, is_frame_available
selected_points = sort_coordinates(selected_points)
print("Selected points are:",selected_points)
# Convert the selected points to numpy array
src_points = np.array(selected_points, dtype=np.float32)
# Perform the homography transformation
M, _ = cv2.findHomography(src_points, dst_points)
# Read the data splits file
with open(data_splits_file, 'r') as file:
lines = file.readlines()
# Process each image path
for idx,line in enumerate(lines):
image_path = line.strip().split(" ")[0]
if image_path.split("/")[0] == "2011_09_26":
continue # as 1st folder is done
read_path = os.path.join(kitti_read_path,image_path)
write_path = os.path.join(kitti_write_path,image_path)
save_dir = os.path.dirname(write_path)
os.makedirs(save_dir,exist_ok=True)
# Load the RGB image
rgb_image = cv2.imread(read_path,-1)
#rgb_image = cv2.resize(rgb_image,(width, height))
if rgb_image is not None:
# # Create a delay of 0.5 seconds
# time.sleep(0.5)
# Capture a photo through webcam and save it in the same directory structure
screen_width, screen_height = 1920, 1080 # Replace with your screen resolution
# Calculate the dimensions for the left half of the screen
left_half_x = -10
left_half_y = 0
left_half_width = screen_width // 2
left_half_height = screen_height
h,w,_ = rgb_image.shape
pad_x = int(w)
pad_y = int(((screen_height*w)/(screen_width*.5)-h)/2)
top_padding = np.zeros((pad_y,pad_x,3),dtype=np.uint8)
bottom_padding = np.zeros((pad_y,pad_x,3),dtype=np.uint8)
rgb_image = np.vstack((top_padding,rgb_image,bottom_padding))
#print(rgb_image.shape)
window_name = 'Image to be captured'
# Create a resizable window for the webcam feed
cv2.namedWindow(window_name, cv2.WINDOW_NORMAL)
cv2.moveWindow(window_name, left_half_x, left_half_y)
cv2.resizeWindow(window_name, left_half_width, left_half_height)
#image_name_ = os.path.basename(read_path)
#cv2.putText(rgb_image,f"{image_name_}",(325,690), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 2, cv2.LINE_AA)
# sample_image_path = "/home/vision/suraj/kitti_dataset/KITTI/2011_09_26/2011_09_26_drive_0001_sync/image_02/data/0000000000.png"
# sample_image = cv2.imread(sample_image_path,-1)
cv2.imshow(window_name,rgb_image)
#global counter_video_started
cv2.waitKey(400)
#time.sleep(2)
global frame, is_frame_available
while not is_frame_available:
pass
captured_frame = frame.copy()
#cv2.waitKey(1000)
#time.sleep(2)
# Warp the image
modified_frame = cv2.warpPerspective(captured_frame, M, (width, height))
#print("warped image's shape = ",modified_frame.shape)
# Display the frame
screen_width, screen_height = 1920, 1080 # Replace with your screen resolution
# Calculate the dimensions for the right half of the screen
right_half_x = screen_width // 2
right_half_y = screen_height
right_half_width = screen_width // 2
right_half_height = screen_height
# window_name = 'Verify Captured Image'
# Create a resizable window for the camera feed
# cv2.namedWindow(window_name, cv2.WINDOW_NORMAL)
# cv2.moveWindow(window_name, right_half_x, 0)
# cv2.resizeWindow(window_name, right_half_width, right_half_height)
# cv2.imshow(window_name, modified_frame)
cv2.imwrite(write_path, modified_frame)
# Check for the 'q' key to exit
if cv2.waitKey(1) & 0xFF == ord('q'):
stop_capture.set()
break
#save image in write_path
# Start the frame capture thread
capture_thread = threading.Thread(target=capture_frames)
capture_thread.start()
#select 4 points of the screen
select_points()
kitti_read_path = "/home/vision/suraj/kitti_dataset/KITTI"
kitti_write_path = "/home/vision/suraj/kitti_dataset/KITTI_captured_from_oak1"
data_splits_file = '/home/vision/suraj/Pixelformer_jetson/data_splits/kitti_all_data_for_data_capture_from_camera_from_2nd.txt' # Replace with the actual path to your data splits file
display_frame(kitti_read_path,kitti_write_path,data_splits_file)
#perform homography
#perform_homography()
# Wait for the frame capture thread to finish
capture_thread.join()
# Release resources
cv2.destroyAllWindows()
| surajiitd/jetson-documentation | model_compression/capture_dataset.py | capture_dataset.py | py | 11,499 | python | en | code | 0 | github-code | 36 |
41272460993 | from imutils import paths
import face_recognition
import os
from shutil import copy
from PIL import Image, ImageDraw
from tkinter import Tk
from tkinter.filedialog import askopenfilename
Tk().withdraw()
filename = askopenfilename()
obama = face_recognition.load_image_file(filename)
folder = 'obama'
obamaface_encoding = face_recognition.face_encodings(obama)[0]
path = 'images/'
images = []
for file in os.listdir(path):
if file.endswith(".jpg"):
images.append(os.path.join(path, file))
isExist = os.path.exists(folder)
if not isExist:
os.makedirs(folder)
for file_name in images:
newPic = face_recognition.load_image_file(file_name)
for face_encoding in face_recognition.face_encodings(newPic):
results = face_recognition.compare_faces([obamaface_encoding], face_encoding, 0.5)
if results[0] == True:
copy(file_name, "./obama/" + file_name.split("/")[1])
# unknown_picture = face_recognition.load_image_file("2.jpg")
# unknown_face_encoding = face_recognition.face_encodings(unknown_picture)[0]
# results = face_recognition.compare_faces([obamaface_encoding], unknown_face_encoding)
# if results[0] == True:
# print("It's a picture of obama!")
# else:
# print("It's not a picture of obama!") | SankojuRamesh/face_recognation | fr.py | fr.py | py | 1,295 | python | en | code | 0 | github-code | 36 |
36127467524 | import json
from typing import Dict
from kafka import KafkaConsumer
from main import StationStatus, Station
station_status = dict()
if __name__ == '__main__':
consumer = KafkaConsumer(
'city_bike_topic',
bootstrap_servers = ['localhost:9092'],
auto_offset_reset='earliest',
value_deserializer=lambda x: json.loads(x.decode('utf-8'))
)
for message in consumer:
if message is not None:
#print(message.value)
message = message.value
# station_status['last_updated'] = message['last_updated']
# i = 0
# for station in message['data']['stations']:
# station_status['station_id'] = station['station_id']
# station_status['num_bikes_available'] = station['num_bikes_available']
# station_status['num_docks_available'] = station['num_docks_available']
# print(station_status)
station_status = StationStatus(last_updated=message['last_updated'], stations=message['stations'])
print(station_status)
#print(consumer.topics()) | Kelvingandhi/kafka_sample | city_bike_consumer.py | city_bike_consumer.py | py | 1,167 | python | en | code | 2 | github-code | 36 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.