seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
74002868457 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from pyscada.device import GenericDevice
from .devices import GenericDevice as GenericHandlerDevice
from time import time, sleep
import sys
import logging
logger = logging.getLogger(__name__)
try:
import serial
driver_ok = True
except ImportError:
logger.error("Cannot import serial", exc_info=True)
driver_ok = False
class Device(GenericDevice):
"""
Serial device
"""
def __init__(self, device):
self.driver_ok = driver_ok
self.handler_class = GenericHandlerDevice
super().__init__(device)
for var in self.device.variable_set.filter(active=1):
if not hasattr(var, "serialvariable"):
continue
self.variables[var.pk] = var
def write_data(self, variable_id, value, task):
"""
write value to a Serial Device
"""
output = []
if not self.driver_ok:
return output
self._h.connect()
if self._h.inst is None:
return output
output = super().write_data(variable_id, value, task)
self._h.disconnect()
return output
| clavay/PyScada-Serial | pyscada/serial/device.py | device.py | py | 1,198 | python | en | code | 0 | github-code | 90 |
25043929262 | from fastapi import APIRouter, Depends, HTTPException, Request
from fastapi.responses import JSONResponse
from loguru import logger as log
from sqlalchemy.orm import Session
from ..config import settings
from ..db import database
from ..db.db_models import DbUser
from ..users import user_crud
from .osm import AuthUser, init_osm_auth, login_required
router = APIRouter(
prefix="/auth",
tags=["auth"],
responses={404: {"description": "Not found"}},
)
@router.get("/osm_login/")
def login_url(request: Request, osm_auth=Depends(init_osm_auth)):
"""Generate Login URL for authentication using OAuth2 Application registered with OpenStreetMap.
Click on the download url returned to get access_token.
Parameters: None
Returns:
-------
- login_url (string) - URL to authorize user to the application via. Openstreetmap
OAuth2 with client_id, redirect_uri, and permission scope as query_string parameters
"""
login_url = osm_auth.login()
log.debug(f"Login URL returned: {login_url}")
return JSONResponse(content=login_url, status_code=200)
@router.get("/callback/")
def callback(request: Request, osm_auth=Depends(init_osm_auth)):
"""Performs token exchange between OpenStreetMap and Export tool API.
Core will use Oauth secret key from configuration while deserializing token,
provides access token that can be used for authorized endpoints.
Parameters: None
Returns:
-------
- access_token (string)
"""
print("Call back api requested", request.url)
access_token = osm_auth.callback(
str(request.url).replace("http", settings.URL_SCHEME)
)
log.debug(f"Access token returned: {access_token}")
return JSONResponse(content={"access_token": access_token}, status_code=200)
@router.get("/me/", response_model=AuthUser)
def my_data(
db: Session = Depends(database.get_db),
user_data: AuthUser = Depends(login_required),
):
"""Read the access token and provide user details from OSM user's API endpoint,
also integrated with underpass .
Parameters:None
Returns: user_data
"""
# Save user info in User table
user = user_crud.get_user_by_id(db, user_data["id"])
if not user:
user_by_username = user_crud.get_user_by_username(db, user_data["username"])
if user_by_username:
raise HTTPException(
status_code=400,
detail=f"User with this username {user_data['username']} already exists. \
Please contact the administrator for this.",
)
db_user = DbUser(id=user_data["id"], username=user_data["username"])
db.add(db_user)
db.commit()
return JSONResponse(content={"user_data": user_data}, status_code=200)
| hotosm/fmtm | src/backend/app/auth/auth_routes.py | auth_routes.py | py | 2,810 | python | en | code | 27 | github-code | 90 |
18213793459 | a,b,c,k = list(map(int,input().split()))
ans = 0
if a >= k:
print(k)
elif a+b >= k:
print(a)
else:
ans += a
k -= (a+b)
ans -= k
print(ans)
| Aasthaengg/IBMdataset | Python_codes/p02682/s338292176.py | s338292176.py | py | 164 | python | zh | code | 0 | github-code | 90 |
72985708455 | import sys
r = sys.stdin.readline
stack = []
n = int(r())
for i in range(n):
input = r().rstrip().split(" ")
if input[0] == '1':
stack.append(input[1])
elif input[0] == '2':
if stack: print(stack.pop())
else: print("-1")
elif input[0] == '3':
print(len(stack))
elif input[0] == '4':
if stack: print("0")
else: print("1")
elif input[0] == '5':
if stack: print(stack[-1])
else: print("-1") | dayeong089/python_algorithm_study | 백준/Silver/28278. 스택 2/스택 2.py | 스택 2.py | py | 503 | python | en | code | 0 | github-code | 90 |
14623044113 | # NOTE: must set PYTHONPATH variable for pytest to recognize local modules
# export PYTHONPATH=/my/path/to/modules
# OR
# export PYTHONPATH=$(pwd)
import numpy as np
# absehrd modules
from realism import Realism
class TestRealism:
def create_multimodal_object(self, n=1000):
count_min = 5
count_max = 19
constant_value = 'helloworld'
binary_A = 'A'
binary_B = 'B'
categorical_values = ['X','Y','Z']
header = np.array(['constant','binary01', 'binaryAB', 'categorical','count','continuous'])
v_constant = np.full(shape=n, fill_value=constant_value)
v_binary01 = np.concatenate((np.full(shape=n-1, fill_value=0), np.array([1])))
v_binaryAB = np.concatenate((np.full(shape=n-1, fill_value=binary_A), np.array([binary_B])))
v_categorical = np.random.choice(categorical_values, size=n)
v_count = np.random.randint(low=count_min, high=count_max+1, size=n)
v_continuous = np.random.random(size=n)
x = np.column_stack((v_constant, v_binary01, v_binaryAB, v_categorical, v_count, v_continuous))
return({'x':x, 'header':header})
def test_which_list(self):
rea = Realism()
x = ['a','b','c']
idx = 1
item = x[idx]
assert idx == rea.which(x, item)[0]
def test_which_array(self):
rea = Realism()
x = np.array(['a','b','c'])
idx = 1
item = x[idx]
assert idx == rea.which(x,item)[0]
def test_validate_univariate(self):
rea = Realism()
n = 1000
m = 17
v = np.full(shape=m, fill_value=False)
prefix='col'
header = np.full(shape=m, fill_value='', dtype='<U'+str(len(str(m-1))+len(prefix)))
for i in range(m):
header[i] = prefix + str(i).zfill(len(str(m-1)))
x = np.random.randint(low=0, high=2, size=(n,m))
res = rea.validate_univariate(arr_r=x, arr_s=x, header=header)
for j in range(m):
if res['frq_r'][j] == res['frq_s'][j]:
v[j] = True
assert v.all()
def test_gan_train_match(self):
rea = Realism()
n = 1000
m_2 = 3
threshold = 0.05
max_beta = 10
n_epoch = 100
beta = np.append(np.random.randint(low=-max_beta,high=0,size=(m_2,1)),
np.random.randint(low=0,high=max_beta,size=(m_2,1)))
x_real = np.random.randint(low=0, high=2, size=(n,m_2*2))
x_for_e = np.reshape(np.matmul(x_real, beta), (n,1)) + 0.5 * np.random.random(size=(n,1))
y_real = np.reshape(np.round(1.0 / (1.0 + np.exp(-x_for_e))), (n,))
res_real = rea.gan_train(x_synth=x_real, y_synth=y_real,
x_real=x_real, y_real=y_real, n_epoch=n_epoch)
res_gan_train1 = rea.gan_train(x_synth=x_real, y_synth=y_real,
x_real=x_real, y_real=y_real, n_epoch=n_epoch)
assert (abs(res_real['auc'] - res_gan_train1['auc']) < threshold)
def test_gan_train_mismatch(self):
rea = Realism()
n = 1000
m_2 = 3
threshold = 0.05
max_beta = 10
n_epoch = 100
beta = np.append(np.random.randint(low=-max_beta,high=0,size=(m_2,1)),
np.random.randint(low=0,high=max_beta,size=(m_2,1)))
x_real = np.random.randint(low=0, high=2, size=(n,m_2*2))
x_for_e = np.reshape(np.matmul(x_real, beta), (n,1)) + 0.5 * np.random.random(size=(n,1))
y_real = np.reshape(np.round(1.0 / (1.0 + np.exp(-x_for_e))), (n,))
res_real = rea.gan_train(x_synth=x_real, y_synth=y_real,
x_real=x_real, y_real=y_real, n_epoch=n_epoch)
x_synth = x_real
y_synth = 1 - y_real
res_gan_train2 = rea.gan_train(x_synth, y_synth, x_real, y_real, n_epoch=n_epoch)
assert abs(res_real['auc'] - res_gan_train2['auc']) > threshold
def test_gan_test_match(self):
rea = Realism()
n = 1000
m_2 = 3
threshold = 0.05
max_beta = 10
n_epoch = 100
beta = np.append(np.random.randint(low=-max_beta,high=0,size=(m_2,1)),
np.random.randint(low=0,high=max_beta,size=(m_2,1)))
x_real = np.random.randint(low=0, high=2, size=(n,m_2*2))
x_for_e = np.reshape(np.matmul(x_real, beta), (n,1)) + 0.5 * np.random.random(size=(n,1))
y_real = np.reshape(np.round(1.0 / (1.0 + np.exp(-x_for_e))), (n,))
res_real = rea.gan_test(x_synth=x_real, y_synth=y_real,
x_real=x_real, y_real=y_real, n_epoch=n_epoch)
res_gan_test1 = rea.gan_test(x_synth=x_real, y_synth=y_real,
x_real=x_real, y_real=y_real, n_epoch=n_epoch)
assert (abs(res_real['auc'] - res_gan_test1['auc']) < threshold)
def test_gan_test_mismatch(self):
rea = Realism()
n = 1000
m_2 = 3
threshold = 0.05
max_beta = 10
n_epoch = 100
beta = np.append(np.random.randint(low=-max_beta,high=0,size=(m_2,1)),
np.random.randint(low=0,high=max_beta,size=(m_2,1)))
x_real = np.random.randint(low=0, high=2, size=(n,m_2*2))
x_for_e = np.reshape(np.matmul(x_real, beta), (n,1)) + 0.5 * np.random.random(size=(n,1))
y_real = np.reshape(np.round(1.0 / (1.0 + np.exp(-x_for_e))), (n,))
# flip label to ensure AUCs are very different
x_synth = x_real
y_synth = 1 - y_real
res_real = rea.gan_train(x_synth=x_real, y_synth=y_real,
x_real=x_real, y_real=y_real, n_epoch=n_epoch)
res_gan_test2 = rea.gan_test(x_synth, y_synth, x_real, y_real, n_epoch=n_epoch)
assert (abs(res_real['auc'] - res_gan_test2['auc']) > threshold)
def test_gan_test(self):
assert True
def test_validate_feature(self):
assert True | Innovate-For-Health/absehrd | tests/test_realism.py | test_realism.py | py | 6,370 | python | en | code | 5 | github-code | 90 |
27308565021 | import os
import random
from config.Config import DATA_FILES_PATH
def getGeneUniverseOfGivenSize(filename, size):
with open(filename) as genefile:
txt = genefile.read()
words = txt.splitlines()
geneuniverse_list = random.sample(words,size)
geneuniverse = set(geneuniverse_list)
return geneuniverse
def getSubsetTerms(geneUniverseset, isRelevant, size):
geneUniverse = list(geneUniverseset)
if isRelevant:
assert size <= len(geneUniverse) / 2, "subset size should be less than half the size of gene universe"
subsetTerm = random.sample(geneUniverse[0:(len(geneUniverse)/2)],size)
else:
assert size <= len(geneUniverse), "subset size should be less than the size of gene universe"
subsetTerm = random.sample(geneUniverse,size)
subsettermset = set(subsetTerm)
return subsettermset
def returnsyntheticdata(numberOfRelevantterms,numberOfOtherterms, geneuniversesize):
fn = os.path.join(DATA_FILES_PATH, 'trueGO_data', 'words.txt')
syntheticData = dict()
universe = getGeneUniverseOfGivenSize(filename=fn,size=geneuniversesize)
syntheticData["universe"] = universe
syntheticData["usergene_list"] = getSubsetTerms(geneUniverseset=syntheticData["universe"], isRelevant=True, size=random.randint((geneuniversesize/2)/2,geneuniversesize/2))
sizesofrelevantterms = [random.randint((geneuniversesize/2)/2,geneuniversesize/2) for num in range(numberOfRelevantterms)]
sizesofotherterms = [random.randint((geneuniversesize/2)/2, geneuniversesize/ 2) for num in range(numberOfOtherterms)]
for i in sizesofrelevantterms:
syntheticData["relterm%s" %i]=getSubsetTerms(geneUniverseset=universe,isRelevant=True,size=i)
for i in sizesofotherterms:
syntheticData["otherterm%s" % i] = getSubsetTerms(geneUniverseset=universe, isRelevant=True, size=i)
return syntheticData | uio-bmi/track_rand | lib/hb/quick/extra/trueGOProject/trueGO/simulatedata.py | simulatedata.py | py | 1,898 | python | en | code | 1 | github-code | 90 |
10243592300 | import datetime
from django.http import HttpRequest
from django.shortcuts import render
from django.forms import ModelForm
from peminjaman.models import Peminjaman
# Create your views here.
def index(request):
data = {
'peminjaman' : Peminjaman.objects.all(),
}
return render(request, 'peminjaman.html', data)
def history(request):
if request.method == 'POST':
data = {
'peminjaman': Peminjaman.objects.filter(tanggal__range=(request.POST.get("start_date"),request.POST.get("end_date")))
}
else:
data = {
'peminjaman': Peminjaman.objects.filter(tanggal__month=datetime.datetime.now().month,
tanggal__year=datetime.datetime.now().year)
}
return render(request, 'history.html', data)
def ruangan(request):
return render(request, 'createnew.html', '')
def create(request):
return render(request, 'formpeminjaman.html', '')
def sukses(request) :
if request.method == 'POST' :
p = Peminjaman(id = 4, tanggal=request.POST.get("tanggal"), waktu_mulai=request.POST.get("jam_awal"), waktu_selesai=request.POST.get("jam_akhir"), email=request.POST.get("email"), no_telp = request.POST.get("notelp"), nama_kegiatan = request.POST.get("namkeg"), deskripsi = request.POST.get("desk"), jumlah_peserta = request.POST.get("jumlah"), approval_manager_ruangan = False, ruangan_id_id = request.POST.get("ruangan_id"), peminjam_username_id = request.POST.get("user"))
p.save()
data = {
'peminjaman' : Peminjaman.objects.all()
}
return render(request, 'peminjaman.html',data)
def delete(request) :
if request.method == 'POST' :
p = Peminjaman.objects.filter(id=request.POST.get("ambil"))
p.delete()
data = {
'peminjaman' : Peminjaman.objects.all()
}
return render(request,'peminjaman.html',data)
def update(request) :
if request.method == 'POST' :
data = {
'peminjaman' : Peminjaman.objects.filter(id=request.POST.get("ambil"))
}
return render(request, 'update.html',data)
def ubah(request) :
if request.method == 'POST' :
p = Peminjaman.objects.filter(id=request.POST.get("id_peminjaman"))
p.update(email=request.POST.get("email"), no_telp=request.POST.get("notelp"), nama_kegiatan=request.POST.get("namkeg"), deskripsi=request.POST.get("desk"), jumlah_peserta=request.POST.get("jumlah"))
data = {
'peminjaman' : Peminjaman.objects.all()
}
return render(request,'peminjaman.html', data)
| gitavns/simimaru27 | peminjaman/views2.py | views2.py | py | 2,609 | python | en | code | 0 | github-code | 90 |
11202001645 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import json
import tensorflow as tf
from qa_data import PAD_ID
from qa_model import Encoder, QASystem, Decoder
from os.path import join as pjoin
import logging
logging.basicConfig(level=logging.INFO)
tf.app.flags.DEFINE_float("learning_rate", 1, "Learning rate.")
tf.app.flags.DEFINE_float("max_gradient_norm", 10.0, "Clip gradients to this norm.")
tf.app.flags.DEFINE_float("dropout", 0.15, "Fraction of units randomly dropped on non-recurrent connections.")
tf.app.flags.DEFINE_integer("batch_size", 80, "Batch size to use during training.")
tf.app.flags.DEFINE_integer("epochs", 10, "Number of epochs to train.")
tf.app.flags.DEFINE_integer("state_size", 150, "Size of each model layer.")
tf.app.flags.DEFINE_integer("output_size", 2, "The output size of your model.")
tf.app.flags.DEFINE_integer("embedding_size", 100, "Size of the pretrained vocabulary.")
tf.app.flags.DEFINE_string("data_dir", "data/squad", "SQuAD directory (default ./data/squad)")
tf.app.flags.DEFINE_string("train_dir", "train", "Training directory to save the model parameters (default: ./train).")
tf.app.flags.DEFINE_string("load_train_dir", "", "Training directory to load model parameters from to resume training (default: {train_dir}).")
tf.app.flags.DEFINE_string("log_dir", "log", "Path to store log and flag files (default: ./log)")
tf.app.flags.DEFINE_string("optimizer", "adam", "adam / sgd")
tf.app.flags.DEFINE_integer("print_every", 500, "How many iterations to do per print.")
tf.app.flags.DEFINE_integer("keep", 0, "How many checkpoints to keep, 0 indicates keep all.")
tf.app.flags.DEFINE_string("vocab_path", "data/squad/vocab.dat", "Path to vocab file (default: ./data/squad/vocab.dat)")
tf.app.flags.DEFINE_string("embed_path", "", "Path to the trimmed GLoVe embedding (default: ./data/squad/glove.trimmed.{embedding_size}.npz)")
tf.app.flags.DEFINE_integer("question_size", 60, "Size of q (default 60)")
tf.app.flags.DEFINE_integer("para_size", 800, "The para size (def 800)")
# tf.app.flags.DEFINE_string("checkpoint_dir", "match_gru", "Directory to save match_gru (def: match_gru)")
tf.app.flags.DEFINE_integer("trainable", 0, "training embed?")
tf.app.flags.DEFINE_integer("current_ep", 0, "current_ep")
FLAGS = tf.app.flags.FLAGS
def initialize_model(session, model, train_dir):
ckpt = tf.train.get_checkpoint_state(train_dir)
v2_path = ckpt.model_checkpoint_path + ".index" if ckpt else ""
if ckpt and (tf.gfile.Exists(ckpt.model_checkpoint_path) or tf.gfile.Exists(v2_path)):
logging.info("Reading model parameters from %s" % ckpt.model_checkpoint_path)
model.saver.restore(session, ckpt.model_checkpoint_path)
else:
logging.info("Created model with fresh parameters.")
session.run(tf.global_variables_initializer())
logging.info('Num params: %d' % sum(v.get_shape().num_elements() for v in tf.trainable_variables()))
return model
def initialize_vocab(vocab_path):
if tf.gfile.Exists(vocab_path):
rev_vocab = []
with tf.gfile.GFile(vocab_path, mode="rb") as f:
rev_vocab.extend(f.readlines())
rev_vocab = [line.strip('\n') for line in rev_vocab]
vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)])
return vocab, rev_vocab
else:
raise ValueError("Vocabulary file %s not found.", vocab_path)
def get_normalized_train_dir(train_dir):
"""
Adds symlink to {train_dir} from /tmp/cs224n-squad-train to canonicalize the
file paths saved in the checkpoint. This allows the model to be reloaded even
if the location of the checkpoint files has moved, allowing usage with CodaLab.
This must be done on both train.py and qa_answer.py in order to work.
"""
global_train_dir = '/tmp/cs224n-squad-train'
if os.path.exists(global_train_dir):
os.unlink(global_train_dir)
if not os.path.exists(train_dir):
os.makedirs(train_dir)
os.symlink(os.path.abspath(train_dir), global_train_dir)
return global_train_dir
def init_dataset(data_dir, val=False):
if val:
qfile = pjoin(data_dir, 'val.ids.question')
cfile = pjoin(data_dir, 'val.ids.context')
sfile = pjoin(data_dir, 'val.span')
else:
qfile = pjoin(data_dir, 'train.ids.question')
cfile = pjoin(data_dir, 'train.ids.context')
sfile = pjoin(data_dir, 'train.span')
dataset_dicts = {'question': [], 'questionMask': [], 'context': [],
'contextMask': [], 'contextLen': [],
'questionLen': [], 'span_exact':[], 'span' :[]}
with open(qfile, 'rb') as qf, open(cfile, 'rb') as cf, open(sfile, 'rb') as sf:
for line in qf:
question = [int(word) for word in line.strip().split()]
context = [int(word) for word in cf.next().strip().split()]
span = [int(word) for word in sf.next().strip().split()]
span_min = [min(x, FLAGS.para_size - 1) for x in span]
# do question padding
question_len = len(question)
if len(question) > FLAGS.question_size:
question = question[:FLAGS.question_size]
q_mask = [True] * FLAGS.question_size
else:
question = question + [PAD_ID] * (FLAGS.question_size - len(question))
q_mask = [True] * len(question) + [False] * (FLAGS.question_size - len(question))
# do context padding
para_len = len(context)
if len(context) > FLAGS.para_size:
context = context[:FLAGS.para_size]
c_mask = [True] * FLAGS.para_size
else:
context = context + [PAD_ID] * (FLAGS.para_size - len(context))
c_mask = [True] * len(context) + [False] * (FLAGS.para_size - len(context))
dataset_dicts['question'].append(question)
dataset_dicts['questionMask'].append(q_mask)
dataset_dicts['context'].append(context)
dataset_dicts['contextMask'].append(c_mask)
#st = [0 for x in range(FLAGS.para_size)]
#st[min(span[0], self.para_size)] = 1
#end = [0 for x in range(FLAGS.para_size)]
#end[min(span[1], self.para_size)] = 1
#dataset_dicts['spanStart'].append(st)
#dataset_dicts['spanEnd'].append(end)
dataset_dicts['span_exact'].append(span)
dataset_dicts['span'].append(span_min)
dataset_dicts['contextLen'].append(para_len)
dataset_dicts['questionLen'].append(question_len)
return dataset_dicts
def main(_):
# Do what you need to load datasets from FLAGS.data_dir
datasetTrain = init_dataset(FLAGS.data_dir, val=False)
datasetVal = init_dataset(FLAGS.data_dir, val=True)
embed_path = FLAGS.embed_path or pjoin("data", "squad", "glove.trimmed.{}.npz".format(FLAGS.embedding_size))
vocab_path = FLAGS.vocab_path or pjoin(FLAGS.data_dir, "vocab.dat")
vocab, rev_vocab = initialize_vocab(vocab_path)
encoder = Encoder(size=FLAGS.state_size, vocab_dim=FLAGS.embedding_size)
decoder = Decoder(output_size=FLAGS.output_size, size=FLAGS.state_size)
qa = QASystem(encoder, decoder, embed_path)
if not os.path.exists(FLAGS.log_dir):
os.makedirs(FLAGS.log_dir)
file_handler = logging.FileHandler(pjoin(FLAGS.log_dir, "log.txt"))
logging.getLogger().addHandler(file_handler)
print(vars(FLAGS))
with open(os.path.join(FLAGS.log_dir, "flags.json"), 'w') as fout:
json.dump(FLAGS.__flags, fout)
gpu_options = tf.GPUOptions(allow_growth=True)
#config=tf.ConfigProto(gpu_options=gpu_options\
# , allow_soft_placement=True)
with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options\
, allow_soft_placement=True)) as sess:
load_train_dir = get_normalized_train_dir(FLAGS.load_train_dir or FLAGS.train_dir)
initialize_model(sess, qa, load_train_dir)
save_train_dir = get_normalized_train_dir(FLAGS.train_dir)
qa.train(sess, datasetTrain, datasetVal, rev_vocab, save_train_dir)
#FLAGS.evaluate,
qa.evaluate_answer(sess, datasetVal, rev_vocab, log=True)
if __name__ == "__main__":
tf.app.run()
| pratyakshs/reading-comprehension | code/train.py | train.py | py | 8,347 | python | en | code | 1 | github-code | 90 |
17963674569 | import collections
N = int(input())
A = [int(x) for x in input().split()]
A.sort(reverse=True)
B = []
i = 0
while i < N-1:
if A[i]==A[i+1]:
B.append(A[i])
i += 2
else:
i += 1
if len(B)<2:
print(0)
else:
print(B[0] * B[1]) | Aasthaengg/IBMdataset | Python_codes/p03625/s522719217.py | s522719217.py | py | 261 | python | en | code | 0 | github-code | 90 |
18298041569 | k = 34
K = 1<<k
nu = lambda L: int("".join([bin(K+a)[-k:] for a in L[::-1]]), 2)
st = lambda n: bin(n)[2:] + "0"
li = lambda s: [int(a, 2) if len(a) else 0 for a in [s[-(i+1)*k-1:-i*k-1] for i in range(len(B)*2-1)]]
N, M = map(int, input().split())
A = [int(a) for a in input().split()]
B = [0] * 100001
for a in A:
B[a] += 1
C = li(st(nu(B) ** 2))
ans = 0
for i in range(200001)[::-1]:
a = min(M, C[i])
M -= a
ans += a * i
print(ans) | Aasthaengg/IBMdataset | Python_codes/p02821/s764239547.py | s764239547.py | py | 451 | python | en | code | 0 | github-code | 90 |
16463348587 | from django import forms
from django.core.exceptions import ValidationError #№7 25:25, 36:07, 43:33
from django.forms import ModelMultipleChoiceField
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
import datetime
from .models import Fiz_l, Marriage, Property, Distribution
class Fiz_l_form(forms.ModelForm):
class Meta:
model = Fiz_l
fields = ('name', 'date_of_birth', 'sex')
labels = {
'name': 'Имя',
'date_of_birth': 'Дата рождения',
'sex': 'Пол'
}
def clean_date_of_birth(self):
'''
Проверка на то, чтобы дата рождения была в пределах от 1900 года до 2050
:return: отвалидированное значение date_of_birth
'''
date_of_birth = self.cleaned_data['date_of_birth']
if date_of_birth < datetime.date(1900, 1, 1) or date_of_birth > datetime.date(2050, 1, 1):
raise ValidationError('Введите дату в промежутке между 1900 годом и 2050 годом')
else:
return date_of_birth
def __init__(self, *args, **kwargs):
super(Fiz_l_form, self).__init__(*args, **kwargs)
self.fields['sex'].empty_label = 'Укажите пол'
class Marriage_form(forms.ModelForm):
class Meta:
model = Marriage
fields = ('date_of_marriage_registration', 'parties', 'date_of_marriage_divorce', 'date_of_break_up',)
labels = {
'date_of_marriage_registration': 'Дата регистрации брака',
'parties': 'Стороны',
'date_of_marriage_divorce': 'Дата расторжения брака',
'date_of_break_up': 'Дата фактического прекращения брачных отношений (прекращение совместного проживания '
'и прекращение ведения совместного хозяйства)'
}
widgets = {
'date_of_marriage_registration': forms.DateInput(),
'parties': forms.CheckboxSelectMultiple(),
'date_of_marriage_divorce': forms.DateInput(),
'date_of_break_up': forms.DateInput(),
}
def clean_parties(self):
'''
Проверка на то, что пользователь выбрал именно 2 физ.лица для заключения брака
:return: отвалидированное значение parties
'''
parties = self.cleaned_data['parties']
if len(list(parties)) != 2:
raise ValidationError('Нужно выбрать 2 лица')
else:
return parties
def clean_date_of_marriage_divorce(self):
'''
Проверка, чтобы дата расторжения брака не была раньше даты заключения брака
:return: отвалидированное значение date_of_marriage_divorce
'''
date_of_marriage_divorce = self.cleaned_data['date_of_marriage_divorce']
if date_of_marriage_divorce is not None:
date_of_marriage_registration = self.cleaned_data['date_of_marriage_registration']
if date_of_marriage_divorce <= date_of_marriage_registration:
raise ValidationError('Брак не может быть расторгнут ранее его заключения')
return date_of_marriage_divorce
def clean_date_of_break_up(self):
'''
Проверка, чтобы дата фактического прекращения брачных отношений была не ранее даты регистрации
брака (date_of_marriage_registration) и не позже даты расторжения брака (date_of_marriage_divorce)
:return: отвалидированное значение date_of_break_up
'''
date_of_break_up = self.cleaned_data['date_of_break_up']
if date_of_break_up is not None:
date_of_marriage_registration = self.cleaned_data['date_of_marriage_registration']
if date_of_break_up <= date_of_marriage_registration:
raise ValidationError('Прекращение отношений не может наступить ранее заключения брака')
date_of_marriage_divorce = self.cleaned_data['date_of_marriage_divorce']
if date_of_marriage_divorce is not None:
if date_of_marriage_divorce < date_of_break_up:
raise ValidationError('Прекращение отношений не может наступить позднее даты прекращения брака')
return date_of_break_up
class Marriage_form_divorce(forms.ModelForm):
class Meta:
model = Marriage
fields = ('date_of_marriage_divorce', 'date_of_break_up',)
labels = {
'date_of_marriage_divorce': 'Дата регистрации развода',
'date_of_break_up': 'Дата фактического прекращения брачных отношений (прекращение совместного проживания '
'и прекращение ведения совместного хозяйства)'
}
widgets = {
'date_of_marriage_divorce': forms.DateInput(),
'date_of_break_up': forms.DateInput(),
}
def clean_date_of_break_up(self):
'''
Проверка, чтобы дата фактического прекращения брачных отношений была не ранее даты регистрации
брака (date_of_marriage_registration) и не позже даты расторжения брака (date_of_marriage_divorce)
:return: отвалидированное значение date_of_break_up
'''
date_of_break_up = self.cleaned_data['date_of_break_up']
date_of_marriage_divorce = self.cleaned_data['date_of_marriage_divorce']
if date_of_break_up is not None and date_of_marriage_divorce is not None:
# date_of_marriage_registration = Marriage.objects.get() self.cleaned_data['date_of_marriage_registration']
# if date_of_break_up <= date_of_marriage_registration:
# raise ValidationError('Прекращение отношений не может наступить ранее заключения брака')
if date_of_marriage_divorce < date_of_break_up:
raise ValidationError('Прекращение отношений не может наступить позднее даты прекращения брака')
return date_of_break_up
class Property_form(forms.ModelForm):
class Meta:
model = Property
fields = ('name',
'type_of_property_form',
'obtaining_person',
'date_of_purchase',
'price',)
labels = {
'name': 'Название имущества (например, "Квартира в Москве")',
'type_of_property_form': 'Вид имущества',
'obtaining_person': 'Лицо (одно из лиц), приобретших имущество',
'date_of_purchase': 'Дата приобретения имущества (переход права собственности)',
'price': 'Текущая цена имущества (можно примерно), руб'
}
widgets = {
'name': forms.TextInput(),
'type_of_property_form': forms.Select(),
'obtaining_person': forms.Select(),
'date_of_purchase': forms.DateInput(),
'price': forms.NumberInput(),
}
def __init__(self, *args, **kwargs):
super(Property_form, self).__init__(*args, **kwargs)
self.fields['price'].empty_label = 'Укажите цену' # почему-то не работает
self.fields['price'].required = False
def clean_date_of_purchase(self):
'''
Проверка на то, чтобы дата приобретения была в адекватном пределе от 1900 года до 2050
:return: отвалидированное значение date_of_purchase
'''
date_of_purchase = self.cleaned_data['date_of_purchase']
if date_of_purchase < datetime.date(1900, 1, 1) or date_of_purchase > datetime.date(2050, 1, 1):
raise ValidationError('Введите дату в промежутке между 1900 годом и 2050 годом')
else:
return date_of_purchase
class Distribution_form(forms.ModelForm):
class Meta:
model = Distribution
fields = ('parties',
'date_of_distribution')
labels = {
'parties': 'Лица, делящие имущество',
'date_of_distribution': 'Дата, на которую делится имущество'
}
widgets = {
'parties': forms.CheckboxSelectMultiple(),
'date_of_distribution': forms.DateInput()
}
def clean_parties(self):
'''
Проверка на то, что пользователь выбрал именно 2 физ.лица для раздела имущества
:return: отвалидированное значение parties
'''
parties = self.cleaned_data['parties']
if len(list(parties)) != 2:
raise ValidationError('Нужно выбрать 2 лица')
else:
return parties
class SignUpForm(UserCreationForm):
username = forms.CharField(max_length=150, required=True, label='Логин', help_text='Обязательное поле')
first_name = forms.CharField(max_length=30, required=False, label='Имя', help_text='Не обязательно')
last_name = forms.CharField(max_length=30, required=False, label='Фамилия', help_text='Не обязательно')
email = forms.EmailField(max_length=254, required=True, help_text='Обязательное поле. Необходим действительный e-mail адрес.')
password1 = forms.CharField(label='Пароль', help_text='Пароль должен содержать не менее 8 символов и не должен быть исключительно числовой')
password2 = forms.CharField(label='Подтвердите пароль', help_text='Укажите пароль еще раз.')
class Meta:
model = User
fields = ('username', 'first_name', 'last_name', 'email', 'password1', 'password2', )
labels = {
'username': 'Логин',
'first_name': 'Имя',
'last_name': 'Фамилия',
'email': 'E-mail',
'password1': 'Пароль',
'password2': 'Подтвердите пароль'
}
widgets = {
'username': forms.TextInput(),
'first_name': forms.TextInput(),
'last_name': forms.TextInput(),
'email': forms.EmailInput(),
'password1': forms.PasswordInput(),
'password2': forms.PasswordInput()
}
def clean_email(self):
email = self.cleaned_data['email']
if User.objects.filter(email=email).exists():
raise ValidationError("Этот email ранее уже указывался на этом сайте")
return email | JokerJudge/divorce_project | divorce/forms.py | forms.py | py | 11,898 | python | ru | code | 0 | github-code | 90 |
71019826217 | import snippets
import tensorflow as tf
import os
# print("Select the training material of the model:")
# path = snippets.fileexplorer(True, "file")[0]
path='./Datasets/fam-final-christina.txt'
model = snippets.model_of_spec(path)
# print("Select directory that holds the checkpoints:")
# checkpoint_path = snippets.fileexplorer(True, "directory")[0]
checkpoint_path = './checkpoints/family-gc/run_mum/'
checkpoint_path = snippets.ckpt(checkpoint_path)
model.load_weights(checkpoint_path)
loss = tf.losses.SparseCategoricalCrossentropy(from_logits=True)
model.compile(optimizer='adam',loss=loss)
checkpoint_dir = ["./checkpoints/family-gc/run_test"]
checkpoint_prefix = os.path.join(checkpoint_dir[0], "ckpt_{epoch}")
checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_prefix,
save_weights_only=True)
history = model.fit(dataset, epochs=4000, callbacks=[checkpoint_callback]) # specify number of epochs here
| TrainFanatic/WhatsGPT | self/model-resume-training.py | model-resume-training.py | py | 956 | python | en | code | 0 | github-code | 90 |
18249481689 | from collections import Counter
from operator import mul
from functools import reduce
import sys
input = sys.stdin.readline
def combinations_count(n, r):
r = min(r, n - r)
numer = reduce(mul, range(n, n - r, -1), 1)
denom = reduce(mul, range(1, r + 1), 1)
return numer // denom
def main():
N = int(input())
A = list(map(int, input().split()))
ans = [0] * N
cnt = Counter(A)
s = 0
for key, value in cnt.items():
if value < 2:
continue
s += combinations_count(value, 2)
for i in range(N):
if cnt[A[i]]-1 == 0:
ans[i] = s
elif cnt[A[i]]-1 == 1:
ans[i] = s - combinations_count(cnt[A[i]], 2)
else:
ans[i] = s - combinations_count(cnt[A[i]], 2) + combinations_count(cnt[A[i]]-1, 2)
print(*ans, sep="\n")
main() | Aasthaengg/IBMdataset | Python_codes/p02732/s596149484.py | s596149484.py | py | 855 | python | en | code | 0 | github-code | 90 |
69896645418 | import cv2
import os
import tkinter as tk
import tkinter.ttk as ttk
import numpy as np
from PIL import Image, ImageTk
class Application():
def __init__(self):
height = 1000
width = 1500
images = []
tkImages = []
self.selected = 0
self.param1_val = 50
self.param2_val = 100
self.minRadius_val = 0
self.maxRadius_val = 0
self.cannyNum_val = 100
self.centerDistance_val = 50
root = tk.Tk()
canvas_frame = tk.Frame(root, height=height, width=width)
canvas_frame.pack()
canvas = tk.Canvas(canvas_frame, height=461, width=614)
canvas.pack()
input_frame = tk.Frame(canvas_frame, height=height/2, width=width)
input_frame.pack()
for file in os.listdir("Test_images/"):
#images.append(cv2.imread("Test_images/" + file))
tkImages.append(Image.open("Test_images/" + file))
for i in range(len(tkImages)):
tkImages[i] = tkImages[i].resize((614, 461), Image.ANTIALIAS)
images.append(np.array(tkImages[i]))
tkImages[i] = ImageTk.PhotoImage(tkImages[i])
canvas.create_image(0, 0, anchor=tk.NW, image=tkImages[self.selected])
def set_param1(v):
self.param1_val = int(float(v))
param1_var.set(self.param1_val)
Hough_circles(images[self.selected])
def set_param2(v):
self.param2_val = int(float(v))
param2_var.set(self.param2_val)
Hough_circles(images[self.selected])
def set_minRadius(v):
self.minRadius_val = int(float(v))
minRadius_var.set(self.minRadius_val)
Hough_circles(images[self.selected])
def set_maxRadius(v):
self.maxRadius_val = int(float(v))
maxRadius_var.set(self.maxRadius_val)
Hough_circles(images[self.selected])
def set_cannyNum(v):
self.cannyNum_val = int(float(v))
cannyNum_var.set(self.cannyNum_val)
Hough_circles(images[self.selected])
def set_centerDistance(v):
self.centerDistance_val = int(float(v))
centerDistance_var.set(self.centerDistance_val)
Hough_circles(images[self.selected])
param1 = ttk.Scale(input_frame, from_=0, to=1000, command = set_param1)
param1.set(self.param1_val)
param1_var = tk.StringVar(root)
param1_var.set(self.param1_val)
param1_entry = ttk.Entry(input_frame, textvariable=param1_var, width=5)
param2 = ttk.Scale(input_frame, from_=0, to=1000, command=set_param2)
param2.set(self.param2_val)
param2_var = tk.StringVar(root)
param2_var.set(self.param2_val)
param2_entry = ttk.Entry(input_frame, textvariable=param2_var, width=5)
minRadius = ttk.Scale(input_frame, from_=0, to=1000, command=set_minRadius)
minRadius.set(self.minRadius_val)
minRadius_var = tk.StringVar(root)
minRadius_var.set(self.minRadius_val)
minRadius_entry = ttk.Entry(input_frame, textvariable=minRadius_var, width=5)
maxRadius = ttk.Scale(input_frame, from_=0, to=1000, command=set_maxRadius)
maxRadius.set(self.maxRadius_val)
maxRadius_var = tk.StringVar(root)
maxRadius_var.set(self.maxRadius_val)
maxRadius_entry = ttk.Entry(input_frame, textvariable=maxRadius_var, width=5)
cannyNum = ttk.Scale(input_frame, from_=0, to=1000, command = set_cannyNum)
cannyNum.set(self.cannyNum_val)
cannyNum_var = tk.StringVar(root)
cannyNum_var.set(self.cannyNum_val)
cannyNum_entry = ttk.Entry(input_frame, textvariable=cannyNum_var, width=5)
centerDistance = ttk.Scale(input_frame, from_=0, to=1000, command = set_centerDistance)
centerDistance.set(self.centerDistance_val)
centerDistance_var = tk.StringVar(root)
centerDistance_var.set(self.centerDistance_val)
centerDistance_entry = ttk.Entry(input_frame, textvariable=centerDistance_var, width=5)
param1.grid(row=0, column=1)
tk.Label(input_frame, text="Canny Upper Threshold (Param1)").grid(row=0, column=0)
param1_entry.grid(row=0, column=2)
param2.grid(row=1, column=1)
tk.Label(input_frame, text="Accumulator Threshold (Param2)").grid(row=1, column=0)
param2_entry.grid(row=1, column=2)
minRadius.grid(row=2, column=1)
tk.Label(input_frame, text="Minimum Radius").grid(row=2, column=0)
minRadius_entry.grid(row=2, column=2)
maxRadius.grid(row=3, column=1)
tk.Label(input_frame, text="Maximum Radius").grid(row=3, column=0)
maxRadius_entry.grid(row=3, column=2)
cannyNum.grid(row=4, column=1)
tk.Label(input_frame, text="Canny Number").grid(row=4, column=0)
cannyNum_entry.grid(row=4, column=2)
centerDistance.grid(row=5, column=1)
tk.Label(input_frame, text="Distance between centers").grid(row=5, column=0)
centerDistance_entry.grid(row=5, column=2)
def enter_param1(event):
value = param1_var.get()
v = int(float(value))
self.param1_val = v
param1.set(v)
Hough_circles(images[self.selected])
param1_entry.bind("<Return>", enter_param1)
def enter_param2(event):
value = param2_var.get()
v = int(float(value))
self.param2_val = v
param2.set(v)
Hough_circles(images[self.selected])
param2_entry.bind("<Return>", enter_param2)
def enter_minRadius(event):
value = minRadius_var.get()
v = int(float(value))
self.minRadius_val = v
minRadius.set(v)
Hough_circles(images[self.selected])
minRadius_entry.bind("<Return>", enter_minRadius)
def enter_maxRadius(event):
value = maxRadius_var.get()
v = int(float(value))
self.maxRadius_val = v
maxRadius.set(v)
Hough_circles(images[self.selected])
maxRadius_entry.bind("<Return>", enter_maxRadius)
def enter_cannyNum(event):
value = cannyNum_var.get()
v = int(float(value))
self.cannyNum_val = v
cannyNum.set(v)
Hough_circles(images[self.selected])
cannyNum_entry.bind("<Return>", enter_cannyNum)
def enter_centerDistance(event):
value = centerDistance_var.get()
v = int(float(value))
self.centerDistance_val = v
centerDistance.set(v)
Hough_circles(images[self.selected])
centerDistance_entry.bind("<Return>", enter_centerDistance)
def draw_circles(circles):
canvas.delete("all")
canvas.create_image(0, 0, anchor=tk.NW, image=tkImages[self.selected])
circles = np.round(circles[0, :]).astype("int")
circle_num = 0
for(x,y,r) in circles:
circle_num += 1
canvas.create_oval(x-r, y-r, x+r, y+r, outline="#ee0000")
print(circle_num)
def reset():
canvas.delete("all")
canvas.create_image(0, 0, anchor=tk.NW, image=tkImages[self.selected])
def Hough_circles(image):
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 1, self.centerDistance_val, param1=self.param1_val,
param2=self.param2_val, minRadius=self.minRadius_val, maxRadius=self.maxRadius_val)
if circles is not None:
if len(circles) > 100:
print("too many")
return
draw_circles(circles)
else:
reset()
def iterate_right():
if self.selected == len(images)-1:
self.selected = 0
else:
self.selected += 1
Hough_circles(images[self.selected])
def iterate_left():
if self.selected == 0:
self.selected = len(images)-1
else:
self.selected -= 1
Hough_circles(images[self.selected])
right_button = tk.Button(input_frame, text="Right", command=iterate_right)
left_button = tk.Button(input_frame, text="Left", command=iterate_left)
right_button.grid(row=2, column=5)
left_button.grid(row=2, column=4)
root.mainloop()
if __name__ == '__main__':
app = Application() | dakota0064/Fluorescent_Robotic_Imager | hough_circles_parameter_search.py | hough_circles_parameter_search.py | py | 8,881 | python | en | code | 0 | github-code | 90 |
17177339146 | import pprint
f = open("input", "r")
data = [x.rstrip("\n") for x in f if x.rstrip("\n") != ""]
data = [int(i) for i in data[0].split(",")]
data.sort()
lanternfish = {
0: len([i for i in data if i == 0]),
1: len([i for i in data if i == 1]),
2: len([i for i in data if i == 2]),
3: len([i for i in data if i == 3]),
4: len([i for i in data if i == 4]),
5: len([i for i in data if i == 5]),
6: len([i for i in data if i == 6]),
7: len([i for i in data if i == 7]),
8: len([i for i in data if i == 8]),
}
print(lanternfish)
for i in range(256):
temp = lanternfish[0]
lanternfish[0] = lanternfish[1]
lanternfish[1] = lanternfish[2]
lanternfish[2] = lanternfish[3]
lanternfish[3] = lanternfish[4]
lanternfish[4] = lanternfish[5]
lanternfish[5] = lanternfish[6]
lanternfish[6] = lanternfish[7] + temp
lanternfish[7] = lanternfish[8]
lanternfish[8] = temp
print(lanternfish)
print(sum([i for i in lanternfish.values()])) | feiming/adventofcode2021 | day6/part2.py | part2.py | py | 997 | python | en | code | 0 | github-code | 90 |
18110545879 | from collections import deque
d = deque()
n = int(input())
for i in range(n):
s = input()
if s == "deleteFirst":
d.popleft()
elif s == "deleteLast":
d.pop()
elif s[:6] == "insert":
d.appendleft(int(s[7:]))
else:
delkey = int(s[7:])
if delkey in d:
d.remove(delkey)
print(" ".join(map(str,d)))
| Aasthaengg/IBMdataset | Python_codes/p02265/s362502911.py | s362502911.py | py | 366 | python | en | code | 0 | github-code | 90 |
15238354787 | import abc
import copy
class Product(abc.ABC):
@abc.abstractmethod
def use(self, s):
pass
@abc.abstractmethod
def create_clone(self):
pass
class Manager(object):
def __init__(self):
self._show_case = dict()
def register(self, name, product):
self._show_case[name] = product
def create(self, product_name):
return self._show_case[product_name].create_clone()
class MessageBox(Product):
def __init__(self, decochar):
self._decochar = decochar
def use(self, s):
[print(self._decochar, flush=True, end="") for _ in range(len(s) + 4)]
print("")
print("{decochar} {s} {decochar}".format(s=s, decochar=self._decochar))
[print(self._decochar, flush=True, end="") for _ in range(len(s) + 4)]
print("")
def create_clone(self):
p = copy.deepcopy(self)
return p
class UnderlinePen(Product):
def __init__(self, ulchar):
self._ulchar = ulchar
def use(self, s):
print("\"{}\"".format(s))
print(" ", flush=True, end="")
[print(self._ulchar, flush=True, end="") for _ in range(len(s))]
print(" ")
def create_clone(self):
p = copy.deepcopy(self)
return p
if __name__ == "__main__":
manager = Manager()
upen = UnderlinePen("~")
mbox = MessageBox("*")
sbox = MessageBox("/")
manager.register("strong message", upen)
manager.register("warning box", mbox)
manager.register("splash box", sbox)
p1 = manager.create("strong message")
p1.use("Hello, world")
p2 = manager.create("warning box")
p2.use("Hello, world")
p3 = manager.create("splash box")
p3.use("Hello, world")
p4 = manager.create("strong message")
print(id(p1))
print(id(p4))
| ElvinKim/python_master | oop_design_pattern/design_pattern_beginning/prototype_pattern/text_style_example.py | text_style_example.py | py | 1,815 | python | en | code | 2 | github-code | 90 |
18314380629 | import sys
sys.setrecursionlimit(10**9)
n = int(input())
graph = [[] for _ in range(n)]
ans = [0] * (n-1)
for i in range(n-1):
a, b = map(int, input().split())
a, b = a-1, b-1
graph[a].append([b, i])
# coloring
def dfs(now, color):
cnt = 1
for to, num in graph[now]:
if cnt == color:
cnt += 1
ans[num] = cnt
dfs(to, cnt)
cnt += 1
dfs(0, 0)
print(max(ans))
for i in ans:
print(i) | Aasthaengg/IBMdataset | Python_codes/p02850/s982712674.py | s982712674.py | py | 455 | python | en | code | 0 | github-code | 90 |
71327916778 | import json
from applications.flow.models import ProcessRun, NodeRun, Process, Node, SubProcessRun, SubNodeRun
from applications.task.models import Task
from applications.utils.dag_helper import PipelineBuilder, instance_dag, instance_gateways
def build_and_create_process(task_id):
"""构建pipeline和创建运行时数据"""
task = Task.objects.filter(id=task_id).first()
p_builder = PipelineBuilder(task_id)
pipeline = p_builder.build()
process = p_builder.process
node_map = p_builder.node_map
process_run_uuid = p_builder.instance
# 保存的实例数据
process_run_data = process.clone_data
# 运算时节点uid重新生成所以需要映射回节点uid
process_run_data["dag"] = instance_dag(process_run_data["dag"], process_run_uuid)
process_run_data["gateways"] = instance_gateways(process_run_data["gateways"], process_run_uuid)
# 周期性的任务 记录收敛时
if task.log_converge and task.run_type in ["time", "cycle", "cron"]:
ProcessRun.objects.filter(task_id=task_id).delete()
process_run = ProcessRun.objects.create(process_id=process.id,
root_id=pipeline["id"],
task_id=task_id,
**process_run_data)
task.process_run_id = process_run.id
task.save()
node_run_bulk = []
for pipeline_id, node in node_map.items():
_node = {k: v for k, v in node.__dict__.items() if k in NodeRun.field_names()}
_node["uuid"] = process_run_uuid[pipeline_id].id
if node.node_type == Node.SUB_PROCESS_NODE:
subprocess_run_id = create_subprocess(node.content, process_run.id, process_run_uuid, pipeline["id"])
node_run_bulk.append(NodeRun(process_run=process_run, subprocess_runtime_id=subprocess_run_id, **_node))
else:
node_run_bulk.append(NodeRun(process_run=process_run, **_node))
NodeRun.objects.bulk_create(node_run_bulk, batch_size=500)
return pipeline
def create_subprocess(process_id, process_run_id, process_run_uuid, root_id):
"""
创建子流程运行时记录
process_id: 子流程id
process_id: 主流程运行实例id
"""
process = Process.objects.filter(id=process_id).first()
process_run_data = process.clone_data
process_run_data["dag"] = instance_dag(process_run_data["dag"], process_run_uuid)
process_run = SubProcessRun.objects.create(process_id=process_id, process_run_id=process_run_id, root_id=root_id,
**process_run_data)
subprocess_node_map = Node.objects.filter(process_id=process_id).in_bulk(field_name="uuid")
node_run_bulk = []
for pipeline_id, node in subprocess_node_map.items():
_node = {k: v for k, v in node.__dict__.items() if k in NodeRun.field_names()}
_node["uuid"] = process_run_uuid[pipeline_id].id
if node.node_type == Node.SUB_PROCESS_NODE:
subprocess_run_id = create_subprocess(node.content, process_run_id, process_run_uuid, root_id)
node_run_bulk.append(
SubNodeRun(subprocess_run=process_run, subprocess_runtime_id=subprocess_run_id, **_node))
else:
node_run_bulk.append(SubNodeRun(subprocess_run=process_run, **_node))
SubNodeRun.objects.bulk_create(node_run_bulk, batch_size=500)
return process_run.id
| xhongc/streamflow | applications/flow/utils.py | utils.py | py | 3,437 | python | en | code | 81 | github-code | 90 |
23815764187 | from __future__ import annotations
import sys
import uuid
from globus_cli.login_manager import LoginManager
from globus_cli.parsing import command, endpoint_id_arg
from globus_cli.termio import TextMode, display
from ._common import server_id_arg, server_update_opts
if sys.version_info >= (3, 8):
from typing import Literal
else:
from typing_extensions import Literal
@command(
"update",
short_help="Update an endpoint server",
adoc_examples="""Change an existing server's scheme to use ftp:
[source,bash]
----
$ ep_id=ddb59aef-6d04-11e5-ba46-22000b92c6ec
$ server_id=294682
$ globus endpoint server update $ep_id $server_id --scheme ftp
----
""",
)
@server_update_opts
@endpoint_id_arg
@server_id_arg
@LoginManager.requires_login("transfer")
def server_update(
login_manager: LoginManager,
*,
endpoint_id: uuid.UUID,
server_id: str,
subject: str | None,
port: int | None,
scheme: Literal["gsiftp", "ftp"] | None,
hostname: str | None,
incoming_data_ports: tuple[int | None, int | None] | None,
outgoing_data_ports: tuple[int | None, int | None] | None,
) -> None:
"""
Update the attributes of a server on an endpoint.
At least one field must be updated.
"""
from globus_cli.services.transfer import assemble_generic_doc
transfer_client = login_manager.get_transfer_client()
server_doc = assemble_generic_doc(
"server", subject=subject, port=port, scheme=scheme, hostname=hostname
)
# n.b. must be done after assemble_generic_doc(), as that function filters
# out `None`s, which we need to be able to set for `'unspecified'`
if incoming_data_ports:
server_doc.update(
incoming_data_port_start=incoming_data_ports[0],
incoming_data_port_end=incoming_data_ports[1],
)
if outgoing_data_ports:
server_doc.update(
outgoing_data_port_start=outgoing_data_ports[0],
outgoing_data_port_end=outgoing_data_ports[1],
)
res = transfer_client.update_endpoint_server(endpoint_id, server_id, server_doc)
display(res, text_mode=TextMode.text_raw, response_key="message")
| globus/globus-cli | src/globus_cli/commands/endpoint/server/update.py | update.py | py | 2,171 | python | en | code | 67 | github-code | 90 |
9884141212 | from time import time
start_time = time()
with open("14_input.txt") as f:
lines = f.readlines()
def get_all_values(data):
if "X" in data:
start, end = data.split("X", 1)
return get_all_values(start + "0" + end) + get_all_values(start + "1" + end)
return [data]
data = {}
mask = None
for line in lines:
if line.startswith("mask = "):
mask = line[7:-1]
else:
a, v = line.split(" = ")
addr = int(a[4:-1])
value = int(v)
base2 = "{0:b}".format(addr)
out = ""
for c in range(len(mask)):
vb = base2[len(base2) - c - 1] if c < len(base2) else "0"
m = mask[len(mask) - c - 1]
out = (vb if m == "0" else ("1" if m == "1" else "X")) + out
for a in get_all_values(out):
data[a] = value
o = 0
for d in data.values():
o = o + d
print(data)
print(o)
end_time = time()
print((end_time - start_time))
| luk2302/aoc | 2020/14_2.py | 14_2.py | py | 947 | python | en | code | 0 | github-code | 90 |
17978512609 | import sys
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph import dijkstra
read = sys.stdin.read
N, *ab = map(int, read().split())
a, b = zip(*zip(*[iter(ab)] * 2))
graph = csr_matrix(([1] * (N - 1), (a, b)), shape=(N + 1, N + 1))
distance = dijkstra(graph, directed=False, indices=[1, N])
d1 = distance[0]
dn = distance[1]
Fennec = -1
for i, j in zip(d1, dn):
if i <= j:
Fennec += 1
if Fennec > N - Fennec:
print('Fennec')
else:
print('Snuke') | Aasthaengg/IBMdataset | Python_codes/p03660/s920027729.py | s920027729.py | py | 482 | python | en | code | 0 | github-code | 90 |
18435446769 | A, B = map(int, input().split())
def f(X):
a = 1
temp = 2
ret = []
while a > 0:
a, b = divmod(X+1, temp)
if temp == 2:
ret.append(a%2)
else:
ret.append(max(b-(temp//2), 0)%2)
temp *= 2
return "".join(map(str, reversed(ret)))
print(int(f(max(0, A-1)), 2)^int(f(B), 2)) | Aasthaengg/IBMdataset | Python_codes/p03104/s991588444.py | s991588444.py | py | 307 | python | en | code | 0 | github-code | 90 |
480573625 | # -*- coding: utf-8 -*-
"""
Created on Wed Dec 5 19:44:31 2018
@author: maozhang
"""
# coding: utf-8
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import os
path = os.getcwd()
number = 0
for root, dirname, filenames in os.walk(path):
for filename in filenames:
if os.path.splitext(filename)[1] == '.txt':
number += 1
temp = int(filename[9:12])
steps = number * 100
for step in range(0,steps,100):
estrain = step * 0.01 * 0.042
txt_name = 'ZrCuO_NG_%iK_%i.txt'%(temp,step)
df00 = pd.DataFrame(pd.read_csv(txt_name, sep='\t'))
ShearStrainStd = df00.ShearStrain.std()
ShearStrainMean = df00.ShearStrain.mean()
print(estrain, ShearStrainStd, ShearStrainMean)
with open('ZrCuO_NG_%iK_ShearStrainLocalization.txt'%temp,'a+') as f:
var = 'estrain' +'\t' + 'ShearStrainStd' +'\t' + 'ShearStrainMean' +'\t' +'\n'
value = '%.3f\t%.5f\t%.5f\n'%(estrain,ShearStrainStd,ShearStrainMean)
if step == 0:
f.write(var)
f.write(value)
else:
f.write(value) | zhangmaohust/md_scripts | molecular_dynamics_analyses/ZrCuO_NG_ShearStrainLocalization_20181207.py | ZrCuO_NG_ShearStrainLocalization_20181207.py | py | 1,128 | python | en | code | 0 | github-code | 90 |
12938496173 | '''
Here, we first find the number the xth digit is part of. A number whose largest
power of 10 is n contributes n + 1 digits. The number of digits consumed per decade
is 9n(n + 1). Once the correcth decade is identified, the remainder is used
to indentify which number it belongs to, and the specific digit.
'''
import math
def find_digit(x):
if x < 10: return x
s0, exp = 9, 1
while True:
s1 = s0 + (9 * (exp + 1) * math.pow(10, exp))
if s1 > x: break
s0 = s1
exp += 1
inc = math.pow(10, exp) + (x - s0 - 1) // (exp + 1)
return int(str(inc)[int((x - s0 - 1) % (exp + 1))])
ans = find_digit(1) * find_digit(10) * find_digit(100) * find_digit(1000) * \
find_digit(10000) * find_digit(100000) * find_digit(1000000)
print('product of the following digits: {0}'.format(ans)) | zemanntru/Project-Euler | p40-champernownes-constant.py | p40-champernownes-constant.py | py | 834 | python | en | code | 0 | github-code | 90 |
18381119529 | N = int(input())
L = [list(map(int,input().split())) for _ in range(N)]
L.sort(key=lambda x: x[1])
w = 0
for x,y in L:
w += x
if w > y:
print('No')
exit()
print('Yes') | Aasthaengg/IBMdataset | Python_codes/p02996/s651003168.py | s651003168.py | py | 191 | python | en | code | 0 | github-code | 90 |
18470405931 | from django.shortcuts import render
import requests
# Create your views here.
def contact(request):
if request.method=="POST":
data={'name':request.POST['name']
,'email':request.POST['email']
,'number':request.POST['number']
,'message':request.POST['message']}
requests.post("https://script.google.com/macros/s/AKfycbzC3DBg05YYkklLc6njLhMywHkWrfYDl3RoKOE9EmjUemwRlW-FJ53M/exec", str(data))
return render(request,'contact.html') | Aexki/AexBot | Contact/views.py | views.py | py | 485 | python | en | code | 0 | github-code | 90 |
18381431569 | import sys
sys.setrecursionlimit(10**7)
input = sys.stdin.readline
def main():
n, k = list(map(int, input().split()))
a = (n-1)*(n-2)//2
if k > a:
print('-1')
else:
print(n-1+a-k)
#頂点1を中心とするスターグラフ作成
for i in range(2, 1+n):
print(1, i)
cnt = 0
for i in range(2, n):
for j in range(i+1, n+1):
if cnt >= a-k:
return
print(i, j)
cnt += 1
main()
| Aasthaengg/IBMdataset | Python_codes/p02997/s035541089.py | s035541089.py | py | 533 | python | ja | code | 0 | github-code | 90 |
3527681817 | import numpy as np
import math
import pyhdust.beatlas as bat
from operator import is_not
from functools import partial
import os
import pyfits
from utils import bin_data, find_nearest
from scipy.interpolate import griddata
import atpy
# ==============================================================================
def read_stars(stars_table):
folder_tables = 'tables/'
typ = (0, 1, 2, 3, 4, 5, 6, 7, 8)
file_data = folder_tables + stars_table
a = np.genfromtxt(file_data, usecols=typ, unpack=True,
delimiter='\t', comments='#',
dtype={'names': ('star', 'plx', 'sig_plx', 'vsini',
'sig_vsini', 'pre_ebmv', 'incl',
'bump', 'lbd_range'),
'formats': ('S9', 'f2', 'f2', 'f4',
'f4', 'f4', 'f4', 'S5',
'S24')})
stars, list_plx, list_sig_plx, list_vsini_obs, list_sig_vsin_obs,\
list_pre_ebmv, incl0, bump0, lbd_range =\
a['star'], a['plx'], a['sig_plx'], a['vsini'], a['sig_vsini'],\
a['pre_ebmv'], a['incl'], a['bump'], a['lbd_range']
if np.size(stars) == 1:
stars = stars.astype('str')
else:
for i in range(len(stars)):
stars[i] = stars[i].astype('str')
return stars, list_plx, list_sig_plx, list_vsini_obs, list_sig_vsin_obs,\
list_pre_ebmv, incl0, bump0, lbd_range
# ==============================================================================
def read_befavor_xdr_complete():
folder_models = 'models/'
dims = ['M', 'ob', 'Hfrac', 'sig0', 'Rd', 'mr', 'cosi']
dims = dict(zip(dims, range(len(dims))))
isig = dims["sig0"]
ctrlarr = [np.NaN, np.NaN, np.NaN, np.NaN, np.NaN, np.NaN, np.NaN]
tmp = 0
cont = 0
while tmp < len(ctrlarr):
if math.isnan(ctrlarr[tmp]) is True:
cont = cont + 1
tmp = tmp + 1
else:
tmp = tmp + 1
# Read the grid models, with the interval of parameters.
xdrPL = folder_models + 'aara_sed.xdr' # 'PL.xdr'
# xdrPL = folder_models + 'aara_final.xdr' # 'PL.xdr'
# xdrPL = folder_models + 'aara_acs.xdr' # 'PL.xdr'
# xdrPL = folder_models + 'disk_flx.xdr' # 'PL.xdr'
listpar, lbdarr, minfo, models = bat.readBAsed(xdrPL, quiet=False)
# F(lbd)] = 10^-4 erg/s/cm2/Ang
for i in range(np.shape(minfo)[0]):
for j in range(np.shape(minfo)[1]):
if minfo[i][j] < 0:
minfo[i][j] = 0.
for i in range(np.shape(models)[0]):
for j in range(np.shape(models)[1]):
if models[i][j] < 0. or models[i][j] == 0.:
models[i][j] = (models[i][j + 1] + models[i][j - 1]) / 2.
# n0 to logn0
listpar[4] = np.log10(listpar[4])
listpar[4].sort()
minfo[:, 4] = np.log10(minfo[:, 4])
if True:
mask = []
tmp, idx = find_nearest(lbdarr, 1000)
for i in range(len(models)):
if models[i][idx] > 2.21834e-10:
mask.append(i)
# print(i)
# plt.plot(lbdarr, models[i], alpha=0.1)
tmp, idx = find_nearest(lbdarr, 80)
for i in range(len(models)):
if models[i][idx] > 2e-8:
mask.append(i)
# print(i)
# # plt.plot(lbdarr, models[i], alpha=0.1)
tmp, idx = find_nearest(lbdarr, 850)
for i in range(len(models)):
if models[i][idx] > 7e-11:
mask.append(i)
# print(i)
# plt.plot(lbdarr, models[i], alpha=0.1)
# plt.yscale('log')
# plt.xscale('log')
# plt.show()
new_models = np.delete(models, mask, axis=0)
new_minfo = np.delete(minfo, mask, axis=0)
models = np.copy(new_models)
minfo = np.copy(new_minfo)
# delete columns of fixed par
cols2keep = [0, 1, 3, 4, 5, 7, 8]
cols2delete = [2, 6]
listpar = [listpar[i] for i in cols2keep]
minfo = np.delete(minfo, cols2delete, axis=1)
listpar[3].sort()
# for i in range(len(models)):
# plt.plot(lbdarr, models[i], alpha=0.1)
# plt.yscale('log')
# plt.xscale('log')
# plt.show()
return ctrlarr, minfo, models, lbdarr, listpar, dims, isig
# ==============================================================================
def read_befavor_xdr():
folder_models = 'models/'
dims = ['M', 'ob', 'Hfrac', 'sig0', 'Rd', 'mr', 'cosi']
dims = dict(zip(dims, range(len(dims))))
isig = dims["sig0"]
ctrlarr = [np.NaN, np.NaN, 0.014, np.NaN, 0.0, 50.0, 60.0, 3.5, np.NaN]
tmp = 0
cont = 0
while tmp < len(ctrlarr):
if math.isnan(ctrlarr[tmp]) is True:
cont = cont + 1
tmp = tmp + 1
else:
tmp = tmp + 1
# Read the grid models, with the interval of parameters.
xdrPL = folder_models + 'BeFaVOr.xdr'
listpar, lbdarr, minfo, models = bat.readBAsed(xdrPL, quiet=False)
# [models] = [F(lbd)]] = 10^-4 erg/s/cm2/Ang
for i in range(np.shape(minfo)[0]):
for j in range(np.shape(minfo)[1]):
if minfo[i][j] < 0:
minfo[i][j] = 0.
for i in range(np.shape(models)[0]):
for j in range(np.shape(models)[1]):
if models[i][j] < 0 and (j != 0 or j != len(models[i][j]) - 1):
models[i][j] = (models[i][j - 1] + models[i][j + 1]) / 2.
# delete columns of fixed par
cols2keep = [0, 1, 3, 8]
cols2delete = [2, 4, 5, 6, 7]
listpar = [listpar[i] for i in cols2keep]
minfo = np.delete(minfo, cols2delete, axis=1)
listpar[3].sort()
listpar[3][0] = 0.
return ctrlarr, minfo, models, lbdarr, listpar, dims, isig
# ==============================================================================
def read_beatlas_xdr():
dims = ['M', 'ob', 'sig0', 'mr', 'cosi']
dims = dict(zip(dims, range(len(dims))))
isig = dims["sig0"]
ctrlarr = [np.NaN, np.NaN, np.NaN, np.NaN, np.NaN]
tmp = 0
cont = 0
while tmp < len(ctrlarr):
if math.isnan(ctrlarr[tmp]) is True:
cont = cont + 1
tmp = tmp + 1
else:
tmp = tmp + 1
folder_models = 'models/'
xdrPL = folder_models + 'disk_flx.xdr' # 'PL.xdr'
listpar, lbdarr, minfo, models = bat.readBAsed(xdrPL, quiet=False)
# F(lbd)] = 10^-4 erg/s/cm2/Ang
for i in range(np.shape(minfo)[0]):
for j in range(np.shape(minfo)[1]):
if minfo[i][j] < 0:
minfo[i][j] = 0.
for i in range(np.shape(models)[0]):
for j in range(np.shape(models)[1]):
if models[i][j] < 0. or models[i][j] == 0.:
models[i][j] = (models[i][j + 1] + models[i][j - 1]) / 2.
listpar[-1][0] = 0.
return ctrlarr, minfo, models, lbdarr, listpar, dims, isig
# ==============================================================================
def read_acol_xdr():
# print(params_tmp)
dims = ['M', 'ob', 'Hfrac', 'sig0', 'Rd', 'mr', 'cosi']
dims = dict(zip(dims, range(len(dims))))
isig = dims["sig0"]
ctrlarr = [np.NaN, np.NaN, np.NaN, np.NaN, np.NaN, np.NaN, np.NaN]
tmp = 0
cont = 0
while tmp < len(ctrlarr):
if math.isnan(ctrlarr[tmp]) is True:
cont = cont + 1
tmp = tmp + 1
else:
tmp = tmp + 1
# Read the grid models, with the interval of parameters.
folder_models = 'models/'
xdrPL = folder_models + 'acol.xdr'
listpar, lbdarr, minfo, models = bat.readBAsed(xdrPL, quiet=False)
# Filter (removing bad models)
for i in range(np.shape(minfo)[0]):
for j in range(np.shape(minfo)[1]):
if minfo[i][j] < 0:
minfo[i][j] = 0.
for i in range(np.shape(listpar)[0]):
for j in range(len(listpar[i])):
if listpar[i][j] < 0:
listpar[i][j] = 0.
mask = np.ones(len(minfo[0]), dtype=bool)
mask[[2, 6]] = False
result = []
for i in range(len(minfo)):
result.append(minfo[i][mask])
minfo = np.copy(result)
for i in range(np.shape(minfo)[0]):
minfo[i][3] = np.log10(minfo[i][3])
listpar[4] = np.log10(listpar[4])
listpar[4].sort()
listpar = list([listpar[0], listpar[1], listpar[3], listpar[4], listpar[5],
listpar[7], listpar[8]])
return ctrlarr, minfo, models, lbdarr, listpar, dims, isig
# ==============================================================================
def read_star_info(stars, list_plx, list_sig_plx, list_vsini_obs,
list_sig_vsin_obs, list_pre_ebmv, lbd_range,
listpar, Nsigma_dis, include_rv, model):
print(75 * '=')
star_r = stars.item()
# star_r = star_r.decode('UTF-8')
print('\nRunning star: %s\n' % star_r)
print(75 * '=')
# star_params = {'parallax': list_plx,
# 'sigma_parallax': list_sig_plx,
# 'folder_ines': star_r + '/'}
plx = np.copy(list_plx)
dplx = np.copy(list_sig_plx)
vsin_obs = np.copy(list_vsini_obs)
band = np.copy(lbd_range)
# ------------------------------------------------------------------------------
# Reading known stellar parameters
dist_pc = 1e3 / plx # pc
sig_dist_pc = (1e3 * dplx / plx**2)
sig_vsin_obs = np.copy(list_sig_vsin_obs)
# ------------------------------------------------------------------------------
# Constrains additional parameters
if include_rv is True:
ebmv, rv = [[0.0, 0.1], [2.2, 5.8]]
else:
rv = 3.1
ebmv, rv = [[0.0, 0.1], None]
# ------------------------------------------------------------------------------
# To add new parameters
dist_min = dist_pc - Nsigma_dis * sig_dist_pc
dist_max = dist_pc + Nsigma_dis * sig_dist_pc
if dist_min < 0:
dist_min = 1
addlistpar = [ebmv, [dist_min, dist_max], rv]
addlistpar = list(filter(partial(is_not, None), addlistpar))
if model == 'befavor':
ranges = np.array([[listpar[0][0], listpar[0][-1]],
[listpar[1][0], listpar[1][-1]],
[listpar[2][0], listpar[2][-1]],
[listpar[3][0], listpar[3][-1]],
[dist_min, dist_max],
[ebmv[0], ebmv[-1]]])
if model == 'aara':
ranges = np.array([[listpar[0][0], listpar[0][-1]],
[listpar[1][0], listpar[1][-1]],
[listpar[2][0], listpar[2][-1]],
[listpar[3][0], listpar[3][-1]],
[listpar[4][0], listpar[4][-1]],
[listpar[5][0], listpar[5][-1]],
[listpar[6][0], listpar[6][-1]],
[dist_min, dist_max],
[ebmv[0], ebmv[-1]]])
if model == 'beatlas':
ranges = np.array([[listpar[0][0], listpar[0][-1]],
[listpar[1][0], listpar[1][-1]],
[listpar[2][0], listpar[2][-1]],
[listpar[3][0], listpar[3][-1]],
[listpar[4][0], listpar[4][-1]],
[dist_min, dist_max],
[ebmv[0], ebmv[-1]]])
if model == 'acol' or model == 'bcmi':
ranges = np.array([[listpar[0][0], listpar[0][-1]],
[listpar[1][0], listpar[1][-1]],
[listpar[2][0], listpar[2][-1]],
[listpar[3][0], listpar[3][-1]],
[listpar[4][0], listpar[4][-1]],
[listpar[5][0], listpar[5][-1]],
[listpar[6][0], listpar[6][-1]],
[dist_min, dist_max],
[ebmv[0], ebmv[-1]]])
# print(ranges)
# if include_rv is True:
# ranges = np.array([[listpar[0][0], listpar[0][-1]],
# [listpar[1][0], listpar[1][-1]],
# [listpar[2][0], listpar[2][-1]],
# [listpar[3][0], listpar[3][-1]],
# [dist_min, dist_max],
# [ebmv[0], ebmv[-1]],
# [rv[0], rv[-1]]])
Ndim = len(ranges)
return ranges, dist_pc, sig_dist_pc, vsin_obs,\
sig_vsin_obs, Ndim, band
# ==============================================================================
def read_iue(models, lbdarr, wave0, flux0, sigma0, folder_data,
folder_fig, star, cut_iue_regions, model):
table = folder_data + str(star) + '/' + 'list_iue.txt'
# os.chdir(folder_data + str(star) + '/')
if os.path.isfile(table) is False or os.path.isfile(table) is True:
os.system('ls ' + folder_data + str(star) +
'/*.FITS | xargs -n1 basename >' +
folder_data + str(star) + '/' + 'list_iue.txt')
iue_list = np.genfromtxt(table, comments='#', dtype='str')
file_name = np.copy(iue_list)
fluxes, waves, errors = [], [], []
for k in range(len(file_name)):
file_iue = str(folder_data) + str(star) + '/' + str(file_name[k])
hdulist = pyfits.open(file_iue)
tbdata = hdulist[1].data
wave = tbdata.field('WAVELENGTH') * 1e-4 # mum
flux = tbdata.field('FLUX') * 1e4 # erg/cm2/s/A -> erg/cm2/s/mum
sigma = tbdata.field('SIGMA') * 1e4 # erg/cm2/s/A -> erg/cm2/s/mum
# Filter of bad data
qualy = tbdata.field('QUALITY')
idx = np.where((qualy == 0))
wave = wave[idx]
sigma = sigma[idx]
flux = flux[idx]
fluxes = np.concatenate((fluxes, flux), axis=0)
waves = np.concatenate((waves, wave), axis=0)
errors = np.concatenate((errors, sigma), axis=0)
if os.path.isdir(folder_fig + str(star)) is False:
os.mkdir(folder_fig + str(star))
# ------------------------------------------------------------------------------
# Would you like to cut the spectrum?
if cut_iue_regions is True:
wave_lim_min_iue = 0.135
wave_lim_max_iue = 0.180
# Do you want to select a range to middle UV? (2200 bump region)
wave_lim_min_bump_iue = 0.20 # 0.200 #0.195 #0.210 / 0.185
wave_lim_max_bump_iue = 0.30 # 0.300 #0.230 #0.300 / 0.335
indx = np.where(((waves >= wave_lim_min_iue) &
(waves <= wave_lim_max_iue)))
indx2 = np.where(((waves >= wave_lim_min_bump_iue) &
(waves <= wave_lim_max_bump_iue)))
indx3 = np.concatenate((indx, indx2), axis=1)[0]
waves, fluxes, errors = waves[indx3], fluxes[indx3], errors[indx3]
else:
wave_lim_min_iue = min(waves)
wave_lim_max_iue = 0.300
indx = np.where(((waves >= wave_lim_min_iue) &
(waves <= wave_lim_max_iue)))
waves, fluxes, errors = waves[indx], fluxes[indx], errors[indx]
new_wave, new_flux, new_sigma = \
zip(*sorted(zip(waves, fluxes, errors)))
nbins = 200
xbin, ybin, dybin = bin_data(new_wave, new_flux, nbins,
exclude_empty=True)
ordem = xbin.argsort()
wave = xbin[ordem]
flux = ybin[ordem]
sigma = dybin[ordem]
if model != 'befavor':
wave = np.hstack([wave0, wave])
flux = np.hstack([flux0, flux])
sigma = np.hstack([sigma0, sigma])
ordem = wave.argsort()
wave = wave[ordem]
flux = flux[ordem]
sigma = sigma[ordem]
# ------------------------------------------------------------------------------
# select lbdarr to coincide with lbd
models_new = np.zeros([len(models), len(wave)])
if model == 'beatlas' or model == 'aara':
idx = np.where((wave >= np.min(lbdarr)) & (wave <= np.max(lbdarr)))
wave = wave[idx]
flux = flux[idx]
sigma = sigma[idx]
models_new = np.zeros([len(models), len(wave)])
for i in range(len(models)):
models_new[i, :] = 10.**griddata(np.log(lbdarr),
np.log10(models[i]),
np.log(wave), method='linear')
# to log space
logF = np.log10(flux)
dlogF = sigma / flux
logF_grid = np.log10(models_new)
return logF, dlogF, logF_grid, wave
# ==============================================================================
def read_votable(folder_data, star):
table = folder_data + str(star) + '/' + 'list.txt'
# os.chdir(folder_data + str(star) + '/')
if os.path.isfile(table) is False or os.path.isfile(table) is True:
os.system('ls ' + folder_data + str(star) +
'/*.xml | xargs -n1 basename >' +
folder_data + str(star) + '/' + 'list.txt')
vo_list = np.genfromtxt(table, comments='#', dtype='str')
table_name = np.copy(vo_list)
vo_file = folder_data + str(star) + '/' + str(table_name)
try:
t1 = atpy.Table(vo_file)
wave = t1['Wavelength'][:] # Angstrom
flux = t1['Flux'][:] # erg/cm2/s/A
sigma = t1['Error'][:] # erg/cm2/s/A
except:
t1 = atpy.Table(vo_file, tid=1)
wave = t1['SpectralAxis0'][:] # Angstrom
flux = t1['Flux0'][:] # erg/cm2/s/A
sigma = [0.] * len(flux) # erg/cm2/s/A
new_wave, new_flux, new_sigma = zip(*sorted(zip(wave, flux, sigma)))
new_wave = list(new_wave)
new_flux = list(new_flux)
new_sigma = list(new_sigma)
# Filtering null sigmas
for h in range(len(new_sigma)):
if new_sigma[h] == 0.:
new_sigma[h] = 0.002 * new_flux[h]
wave = np.copy(new_wave) * 1e-4
flux = np.copy(new_flux) * 1e4
sigma = np.copy(new_sigma) * 1e4
return wave, flux, sigma
# ==============================================================================
def read_models(model):
if model == 'befavor':
ctrlarr, minfo, models, lbdarr, listpar,\
dims, isig = read_befavor_xdr()
if model == 'aara':
ctrlarr, minfo, models, lbdarr, listpar,\
dims, isig = read_befavor_xdr_complete()
if model == 'beatlas':
ctrlarr, minfo, models, lbdarr, listpar,\
dims, isig = read_beatlas_xdr()
if model == 'acol' or model == 'bcmi':
ctrlarr, minfo, models, lbdarr, listpar,\
dims, isig = read_acol_xdr()
return ctrlarr, minfo, models, lbdarr, listpar, dims, isig
| tangodaum/bemcee | reading_routines.py | reading_routines.py | py | 18,952 | python | en | code | 1 | github-code | 90 |
35089669775 | import mat4py
import numpy as np
import gzip
import os
import urllib.request
import sys
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import jax
from utils import dense_to_one_hot
class ImageDataSet(object):
def __init__(self, images, labels, if_autoencoder, input_reshape):
self._num_examples = len(images)
if len(images)>0:
if input_reshape == 'fully-connected':
images = np.swapaxes(images, 2, 3)
images = np.swapaxes(images, 1, 2)
images = images.reshape(images.shape[0],
images.shape[1] * images.shape[2] * images.shape[3])
images = images.astype(np.float32)
if if_autoencoder:
labels = images
self._images = images
self._labels = labels
self._epochs_completed = 0
self._index_in_epoch = 0
@property
def images(self):
return self._images
@property
def labels(self):
return self._labels
@property
def num_examples(self):
return self._num_examples
@property
def epochs_completed(self):
return self._epochs_completed
def sample(self, batch_size):
"""Return the next `batch_size` examples from this data set."""
start = self._index_in_epoch
self._index_in_epoch += batch_size
if self._index_in_epoch > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Shuffle the data
perm = np.arange(self._num_examples)
np.random.shuffle(perm)
self._images = self._images[perm]
self._labels = self._labels[perm]
# Start next epoch
start = 0
self._index_in_epoch = batch_size
assert batch_size <= self._num_examples
end = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@property
def batch_size(self):
return self._batch_size
@property
def length(self):
return self._num_examples
@property
def data(self):
return self._images
def extract_images(filename):
"""Extract the images into a 4D uint8 numpy array [index, y, x, depth]."""
print('Extracting', filename)
with gzip.open(filename) as bytestream:
magic = _read32(bytestream)
if magic != 2051:
raise ValueError(
'Invalid magic number %d in MNIST image file: %s' %
(magic, filename))
num_images = _read32(bytestream)
rows = _read32(bytestream)
cols = _read32(bytestream)
buf = bytestream.read(rows * cols * num_images)
data = np.frombuffer(buf, dtype=np.uint8)
data = data.reshape(num_images, rows, cols, 1)
return data
def extract_labels(filename, one_hot=False):
"""Extract the labels into a 1D uint8 numpy array [index]."""
print('Extracting', filename)
with gzip.open(filename) as bytestream:
magic = _read32(bytestream)
if magic != 2049:
raise ValueError(
'Invalid magic number %d in MNIST label file: %s' %
(magic, filename))
num_items = _read32(bytestream)
buf = bytestream.read(num_items)
labels = np.frombuffer(buf, dtype=np.uint8)
if one_hot:
return dense_to_one_hot(labels)
return labels
def maybe_download(SOURCE_URL, filename, work_directory):
"""Download the data from Yann's website, unless it's already here."""
if not os.path.exists(work_directory):
os.makedirs(work_directory)
filepath = os.path.join(work_directory, filename)
if not os.path.exists(filepath):
filepath, _ = urllib.request.urlretrieve(SOURCE_URL + filename, filepath)
statinfo = os.stat(filepath)
print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')
return filepath
def _read32(bytestream):
dt = np.dtype(np.uint32).newbyteorder('>')
return np.frombuffer(bytestream.read(4), dtype=dt)[0]
def read_data_sets(name_dataset, home_path, if_autoencoder = True):
"""A helper utitlity that returns ImageDataset.
If the data are not present in the home_path they are
downloaded from the appropriate site.
* Input*
name_dataset: MNIST, FACES or CURVES
home_path: The root folder to look for or download the dataset.
batch_size: Batch size.
*Returns*:
An ImageDataset class object that implements get_batch().
"""
class DataSets(object):
pass
data_sets = DataSets()
VALIDATION_SIZE = 0
train_dir = os.path.join(home_path, 'data', name_dataset + '_data')
print(f'Begin loading data for {name_dataset}')
if name_dataset == 'MNIST':
if_autoencoder = if_autoencoder
SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/'
TRAIN_IMAGES = 'train-images-idx3-ubyte.gz'
TEST_IMAGES = 't10k-images-idx3-ubyte.gz'
TRAIN_LABELS = 'train-labels-idx1-ubyte.gz'
TEST_LABELS = 't10k-labels-idx1-ubyte.gz'
local_file = maybe_download(SOURCE_URL, TRAIN_IMAGES, train_dir)
print(f'Data read from {local_file}')
train_images = extract_images(local_file)
local_file = maybe_download(SOURCE_URL, TEST_IMAGES, train_dir)
test_images = extract_images(local_file)
local_file = maybe_download(SOURCE_URL, TRAIN_LABELS, train_dir)
print(f'Data read from {local_file}')
train_labels = extract_labels(local_file,one_hot=True)
local_file = maybe_download(SOURCE_URL, TEST_LABELS, train_dir)
test_labels = extract_labels(local_file,one_hot=True)
# see "Reducing the Dimensionality of Data with Neural Networks"
train_images = np.multiply(train_images, 1.0 / 255.0)
test_images = np.multiply(test_images, 1.0 / 255.0)
elif name_dataset == 'FACES':
if_autoencoder = if_autoencoder
SOURCE_URL = 'http://www.cs.toronto.edu/~jmartens/'
TRAIN_IMAGES = 'newfaces_rot_single.mat'
local_file = maybe_download(SOURCE_URL, TRAIN_IMAGES, train_dir)
print(f'Data read from {local_file}')
import mat4py
images_ = mat4py.loadmat(local_file)
images_ = np.asarray(images_['newfaces_single'])
images_ = np.transpose(images_)
train_images = images_[:103500]
test_images = images_[-41400:]
train_images = train_images[:, :, np.newaxis, np.newaxis]
test_images = test_images[:, :, np.newaxis, np.newaxis]
train_labels = train_images
test_labels = test_images
elif name_dataset == 'CURVES':
if_autoencoder = if_autoencoder
SOURCE_URL = 'http://www.cs.toronto.edu/~jmartens/'
TRAIN_IMAGES = 'digs3pts_1.mat'
local_file = maybe_download(SOURCE_URL, TRAIN_IMAGES, train_dir)
print(f'Data read from {local_file}')
import mat4py
images_ = mat4py.loadmat(local_file)
train_images = np.asarray(images_['bdata'])
test_images = np.asarray(images_['bdatatest'])
train_images = train_images[:, :, np.newaxis, np.newaxis]
test_images = test_images[:, :, np.newaxis, np.newaxis]
train_labels = train_images
test_labels = test_images
else:
print('error: Dataset not supported.')
sys.exit()
validation_images = train_images[:VALIDATION_SIZE]
validation_labels = train_labels[:VALIDATION_SIZE]
train_images = train_images[VALIDATION_SIZE:]
train_labels = train_labels[VALIDATION_SIZE:]
input_reshape = 'fully-connected'
data_sets.train = ImageDataSet(train_images, train_labels, if_autoencoder, input_reshape)
data_sets.validation = ImageDataSet(validation_images, validation_labels, if_autoencoder, input_reshape)
data_sets.test = ImageDataSet(test_images, test_labels, if_autoencoder, input_reshape)
print(f'Succesfull loaded {name_dataset} dataset.')
return data_sets
| someauthors/fishleg | jax/image_datasets.py | image_datasets.py | py | 8,263 | python | en | code | 0 | github-code | 90 |
18100208209 | from functools import lru_cache
n = int(input())
@lru_cache(maxsize=None)
def fib(n):
if n==0 or n==1:
return 1
else:
return fib(n-1)+fib(n-2)
print(fib(n))
| Aasthaengg/IBMdataset | Python_codes/p02233/s579323796.py | s579323796.py | py | 198 | python | en | code | 0 | github-code | 90 |
18315932759 | def main():
from collections import deque
INF = float('inf')
n, m = map(int, input().split())
s = list(map(int, input()))
dp = [INF] * (n + 1)
dp[n] = 0
queue = deque([0])
i = n - 1
while i >= 0:
while True:
if not queue:
print(-1)
return
if queue[0] != INF and len(queue) <= m:
break
queue.popleft()
if s[i] == 0:
dp[i] = queue[0] + 1
queue.append(dp[i])
i -= 1
ans = []
v = dp[0]
num = 0
for i in dp:
if i != v and i != INF:
ans.append(num)
v = i
num = 1
else:
num += 1
print(*ans, sep=' ')
if __name__ == '__main__':
main() | Aasthaengg/IBMdataset | Python_codes/p02852/s899272089.py | s899272089.py | py | 831 | python | en | code | 0 | github-code | 90 |
4999577336 | from __future__ import print_function
import gym
import tensorflow as tf
import tensorlayer as tl
from rlflow.core import tf_utils
from rlflow.policies.f_approx import Network
from rlflow.algos.grad import PolicyGradient
from rlflow.core.input import InputStreamDownsamplerProcessor, InputStreamSequentialProcessor, InputStreamProcessor
if __name__ == "__main__":
env = gym.make("Pong-v0")
w_init = tf.truncated_normal_initializer(stddev=0.05)
b_init = tf.constant_initializer(value=0.0)
name_scope = 'network'
with tf.name_scope(name_scope) as scope:
input_tensor = tf.placeholder(tf.float32, shape=[None, 84, 84, 4], name='policy_input_'+name_scope)
net = tl.layers.InputLayer(input_tensor, name='input1_'+name_scope)
net = tl.layers.Conv2d(net, 16, (8, 8), (4, 4), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv1_'+name_scope)
net = tl.layers.Conv2d(net, 32, (4, 4), (2, 2), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv2_'+name_scope)
net = tl.layers.FlattenLayer(net, name='flatten1_'+name_scope)
net = tl.layers.DenseLayer(net, 1024, act=tf.nn.sigmoid, name='dense1_'+name_scope)
net = tl.layers.DenseLayer(net, env.action_space.n, act=tf.nn.softmax, name='dense2_'+name_scope)
downsampler = InputStreamDownsamplerProcessor((84, 84), gray=True)
sequential = InputStreamSequentialProcessor(observations=4)
input_processor = InputStreamProcessor(processor_list=[downsampler, sequential])
# initialize policy with network
policy = Network([input_tensor],
net,
Network.TYPE_PG)
# initialize algorithm with env, policy, session and other params
pg = PolicyGradient(env,
policy,
episode_len=1000,
discount=True,
input_processor=input_processor,
optimizer=tf.train.AdamOptimizer(learning_rate=0.001))
# start the training process
pg.train(max_episodes=5000)
rewards = pg.test(episodes=10)
print ("Average: ", float(sum(rewards)) / len(rewards))
| tpbarron/rlflow | examples/nnet_pong_pg.py | nnet_pong_pg.py | py | 2,188 | python | en | code | 20 | github-code | 90 |
39711509498 | from PIL import Image
import requests
import streamlit as st
from streamlit_option_menu import option_menu
from streamlit_lottie import st_lottie
img_1 = Image.open("C:\\Users\\sneha\\OneDrive\\Desktop\\website\\images\\img1.png")
img_2 = Image.open("C:\\Users\\sneha\\OneDrive\\Desktop\\website\\images\\img2.png")
img_3 = Image.open("C:\\Users\\sneha\\OneDrive\\Desktop\\website\\images\\img3.png")
img_4 = Image.open("C:\\Users\\sneha\\OneDrive\\Desktop\\website\\images\\img4.png")
with st.container():
st.header("Certificates and Accomplishments")
st.write("---")
st.write("##")
image_column, text_column = st.columns((1, 2))
with image_column:
st.image(img_1)
with text_column:
st.subheader("Participated in State level Debate Competition")
st.write(
"""It was a pleasure participating in State level Debate competition held on February 2023, where
the topic was "uniform civil code"."""
)
with st.container():
image_column, text_column = st.columns((1, 2))
with image_column:
st.image(img_2)
with text_column:
st.subheader("Participated in Antaragini event, as Anchor in college")
st.write("""I was the Anchor for the anual Antaragini event held in college.""")
with st.container():
image_column, text_column = st.columns((1, 2))
with image_column:
st.image(img_3)
with text_column:
st.subheader("Participated and won in Tabletopics Speech contest on Club level")
st.write(
""""I won the first place in Impromptu speech contest, where the topic was: "We have built more walls than bridges".
Here I practiced thinking and speaking on your feet. """
)
with st.container():
image_column, text_column = st.columns((1, 2))
with image_column:
st.image(img_4)
with text_column:
st.subheader("Participated in Intercollege Commerce fest: FINATEX 23 ")
st.write(
""""This was a commerce event held on 24th-25th March 2023, in (Christ Deemed to be University) Lavasa, Pune. """
)
# Contact
| Sneha12123/Python-projects | 4_Certificates.py | 4_Certificates.py | py | 2,188 | python | en | code | 0 | github-code | 90 |
12692186327 | import os
import time
import rospy
import numpy as np
from datetime import datetime
from learning_fc import model_path, datefmt
from learning_fc.robot import RobotInterface
from learning_fc.models import ForcePI
from learning_fc.training import make_eval_env_model
N_TRIALS = 30
N_SECS = 6.0
# load policy and env
# policy_trial, indb = "2023-09-14_10-53-25__gripper_tactile__ppo__k-3__lr-0.0006_M2_inb", True
policy_trial, indb = "2023-09-14_11-24-22__gripper_tactile__ppo__k-3__lr-0.0006_M2_noinb", False
# policy_trial, indb = "2023-09-15_08-22-36__gripper_tactile__ppo__k-3__lr-0.0006_M2_nor", False
env, model, _, params = make_eval_env_model(f"{model_path}/{policy_trial}" , with_vis=False, checkpoint="best")
k = 1 if "frame_stack" not in params["make_env"] else params["make_env"]["frame_stack"]
env.set_attr("fth", 0.02)
# load Force Controller (even though we don't use the policy model, we need the env)
# model, indb = ForcePI(env), False
ri = RobotInterface(
model,
env,
fth=env.fth,
k=k,
goal=0.0,
freq=25,
with_indb=True
)
ri.reset()
r = rospy.Rate(51)
# open gripper
ri.reset()
ri.actuate([0.045, 0.045])
time.sleep(0.5)
for _ in range(N_TRIALS):
# time for object rearrangement / decision to stop evaluation
inp = input("goal?\n")
if inp == "q": break
else:
try:
goal = float(inp)
assert goal >= 0, "goal >= 0"
ri.set_goal(goal)
print(f"new goal: {goal}")
except Exception as e:
print(f"can't convert {goal} to a number:\n{e}")
continue
# grasp object
if isinstance(model, ForcePI): model.reset()
ri.reset()
ri.set_goal(goal)
ri.run()
start = time.time()
while time.time() - start < N_SECS: r.sleep()
ri.stop()
ri.reset()
ri.actuate([0.045, 0.045])
time.sleep(0.5)
ri.reset()
ri.actuate([0.045, 0.045])
ri.shutdown()
exit() | llach/learning_fc | learning_fc/robot/video_eval.py | video_eval.py | py | 1,970 | python | en | code | 0 | github-code | 90 |
10876127047 |
import torch
import torch.nn as nn
import torch.nn.functional as F
import os
from os.path import join
from Attention_Classification.WordAttn import WordAttn
from Attention_Classification.SentenceAttn import SentenceAttn
class HierarchicalAttention(nn.Module):
def __init__(self, config):
super(HierarchicalAttention, self).__init__()
self.batch_size = config['batch_size']
self.n_layers = config['n_layers']
self.max_sents = config['max_sents']
self.hidden_size = config['hidden_size']
self.num_classes = config['num_classes']
self.device = config['device']
self.word_attn = WordAttn(config)
self.sent_attn = SentenceAttn(config)
self.fc = nn.Linear(self.hidden_size, self.num_classes)
path_to_model = join(config['model_dir'], config['model_name'])
if os.path.exists(path_to_model):
self.load_state_dict(torch.load(path_to_model, map_location=lambda storage, loc: storage))
print('Model loaded from disk !!!! {}'.format(path_to_model))
def init_hidden_state(self, input_ids):
bz = input_ids.size(0)
self.word_hidden_state = torch.zeros(2*self.n_layers, self.max_sents*bz, self.hidden_size).to(self.device)
self.sent_hidden_state = torch.zeros(2*self.n_layers, bz, self.hidden_size).to(self.device)
def forward(self, input, word_lengths, sent_lengths):
# word_lengths => B, S
# sent_lengths => B
# input => Batch, Max_Sent, Max_Words
# word_lengths => Batch, Max_Sent
self.init_hidden_state(input)
bs, ms, mw = input.size()
input = input.view(ms*bs, mw)
word_lengths = word_lengths.view(ms*bs)
# word_lengths => Batch* Max_Sent
output, self.word_hidden_state, word_attn_scores = self.word_attn(input, self.word_hidden_state, word_lengths)
# output => S*B, 2*H
output = output.view(bs, ms, -1)
# output => B, S, 2*H
word_attn_scores = word_attn_scores.view(bs, ms, -1)
output, self.sent_hidden_state, sent_attn_scores = self.sent_attn(output, self.sent_hidden_state, sent_lengths)
# output => B, 2*H
logits = self.fc(output)
# logits => B, C
return logits, word_attn_scores, sent_attn_scores
| raja-1996/Pytorch_TextClassification | Attention_Classification/HierarchicalAttention.py | HierarchicalAttention.py | py | 2,327 | python | en | code | 0 | github-code | 90 |
4295566602 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 17 17:26:39 2017
@author: LFU
"""
#%% Import packages
from eo_exp_functions_trackpy import *
import platform
# ==========================================================================
## Main ##
def analysis_trackpy(dirname, z_list, voltage, fit_range):
os.chdir(dirname)
if os.path.isfile('result.json'):
os.rename('result.json','result_bk.json')
if os.path.isfile('amplitude_z_2.png'):
os.rename('amplitude_z_2.png','amplitude_z_2_bk.png')
size_feature = 23 # The size must be an odd integer, and it is better to err on the large side
min_mass = 2e5 # There are many ways to distinguish real particles from spurrious ones. The most important way is to look at total brightness ("mass")
max_mass = 6e5 # used for link
min_size = 5 # used for link
pixel_size = 303.03e-3 # in µm
result_filename=job+"_df_"+str(min(z_list))+'_'+str(max(z_list))
vars()[result_filename] = pd.DataFrame(columns = ['z','Amplitude_x','Amplitude_y','Amplitude'])
logging.disable(10000)
for z_id in z_list:
try:
z_str = "%03d" % z_id
frames_name = 'f_'+z_str
all_located_frames_name = 'f_all_located_'+z_str
trajectory_name = 't_'+z_str
drift_name = 'd_'+z_str
residue_name = 'r_'+z_str
print('================ \n We are analyzing the Series of z'+str(z_id))
vars()[frames_name] = importImage(z_id, dirname)
locateCheck(vars()[frames_name],size_feature, min_mass)
vars()[all_located_frames_name] = locateAllFrames(vars()[frames_name], size_feature, min_mass)
vars()[trajectory_name] = link(z_str, vars()[all_located_frames_name], vars()[frames_name], max_mass, min_size)
vars()[trajectory_name].to_csv(trajectory_name + '.csv')
vars()[drift_name] = driftComputation(z_str, vars()[trajectory_name])
vars()[drift_name]['time']=vars()[drift_name].index*0.04 ## in s
vars()[drift_name].to_csv(drift_name + '.csv')
vars()[residue_name] = butterBandpassFilter(vars()[drift_name], z_str)
#%
if job_direction == "job1":
z=(z_id-1)*25.98 # in µm
elif job_direction == "job2":
z=78*25.98-(z_id-1)*25.98
else:
raise NameError('Wrong job direction')
vars()[residue_name]=vars()[residue_name].query('24 < index < 103')
# eliminate the first and the last period
A = fitResidue(vars()[residue_name], z_str)
vars()[residue_name].to_csv(residue_name+'.csv')
print('Amplitude for z=',z,'is',A)
result_dict = {'z':z}
result_dict.update(A)
vars()[result_filename].loc[z_id+1]=result_dict
except:
pass
# for ftype in ['csv', 'eps', 'tif']:
# try:
# os.mkdir(ftype)
# os.system('find ./ -name "*.%s" -exec mv {} %s \;' %(ftype, ftype))
# except:
# pass
for title in ['Amplitude', 'Amplitude_x', 'Amplitude_y']:
vars()[result_filename][title]=vars()[result_filename][title]*pixel_size
vars()[result_filename]['Amplitude_x'+'_abs']=abs(vars()[result_filename]['Amplitude_x'])
vars()[result_filename]['Amplitude_y'+'_abs']=abs(vars()[result_filename]['Amplitude_y'])
vars()[result_filename].to_csv(result_filename+'.csv', index=False)
# fitted_result_1=fitAmplitude(vars()[result_filename], fit_range, voltage=20.14)
# fitted_result_2=fitAmplitude_2(vars()[result_filename], fit_range, voltage=voltage, column = 'Amplitude')
return {'df':vars()[result_filename]}#, 'fitting':fitted_result_2}
os.chdir('..')
#%%
if __name__ == '__main__':
## ==========================================================================
## Step 0: definition of variables
job_direction = "job1"
job_number = "_024"
job = job_direction+job_number
date = '05-03-2018'
# mat = 'Carboxylate-modified'
# mat = 'Amine-modified'
# mat = 'MgO'
# mat = 'Verre'
mat = ''
voltage = 6.65
# voltage = 13.3
z_list = list(range(3,5))
fit_range = list(range(3,4))
if platform.system() == 'Darwin':
dirname = '/Users/lfu/Documents/Nectar_EO/'+date+'/'+ mat +'/'+job ## Mac
elif platform.system() == 'Windows':
dirname = r'J:/200_EK_Optic/EO/'+date+'/MgO/'+job ## Windows
elif platform.system() == 'Linux':
dirname = '/data1/lfu/200_EK_Optic/EO/'+date+'/MgO/'+job ## Linux
dirname = '/data1/lfu/200_EK_Optic/EO/05-03-2018/job1_024'
else:
raise NameError('Wrong path or file name')
result = analysis_trackpy(dirname, z_list, voltage, fit_range)
#%% if we only want to fit the data, run the next script. we will read the .csv file
#
# os.chdir(dirname)
# fit_range=list(range(1,69))
# csv_name = job+"_df_"+str(min(z_list))+'_'+str(max(z_list))+'.csv'
# df_to_fit = pd.read_csv(csv_name)
# fitted_result_2=fitAmplitude_2(df_to_fit, fit_range, voltage=voltage, fig_name = 'amplitude', column = 'Amplitude', U_i_o=-25, U_p_o=25, phi_o=2.4, z_0_o = 416)
#%%
| ss555/deepFish | 0-identification-static/dev/tracking-alpha/track_alpha.py | track_alpha.py | py | 5,386 | python | en | code | 0 | github-code | 90 |
39814235067 | ## Reecepbcups - December 10th, 2018.
## Discord: Reecepbcups#3370
# A python app to scan google dorks and gather network cameras to homes, businesses, and the Government
# Ex. http://camera.buffalotrace.com/view/view.shtml?id=92509&imagePath=/mjpg/video.mjpg&size=1
# ---------------------------------------------------------------------------------
# THIS SOFTWARE HAS LITTLE TESTING, BUT IS MORE OPTOMIZED. USE "Camera_Finder.py"
# in the main area to run the less efficent code, but more reliable
# ---------------------------------------------------------------------------------
try:
from googlesearch import search
import requests
print('Modules Imported successfully\n -= You can run getCams() to start =-')
except:
print('!!Install google and requests modules!!')
print('Open CMD >> pip install -r requirements.txt')
ips = [] # blank list for the ips to go into
def getCams():
global ips # makes sure "ips" variable can be used elsewhere
dorks = [
"inurl:indexFrame.shtml Axis",
"inurl:view/view.shtml?videos",
"inurl:”CgiStart?page=”",
"inurl:/view.shtml",
"inurl:ViewerFrame?M0de=",
"inurliaxis-cgi/jpg",
"intitle:”live view” intitle:axis",
"intitle:”Live NetSnap Cam-Server feed”",
"intitle:”Live View/ — AXIS 210?",
"inurl:/mjpg/video.mjpg",
"inurl:/view/view.shtml",
"inurl:/view/view.shtml"
]
for camera in dorks: # loops though the above list and gets ips/domains of network cameras.
try:
# for links in search results, using google.com
ips = [link for link in search(camera, tld="com", num=100, stop=1, pause=1)]
except:
print('Failed on: ' + camera)
print('HTTP Error 503: Google has blocked you from more searches.\nTry using https://repl.it/languages/python3 OR a VPN\n')
pass
return ips
def output():
junkLinks = ['alibaba', 'amazon', 'ebay', 'shop'] # just selling cameras, put junk here
for item in ips:
if item not in junkLinks:
with open('IP_Cameras.txt', 'a') as f:
f.write(item + "\n\n")
f.close()
if 'gov' in item:
with open('Government_Cameras.txt', 'a') as f:
f.write(item + "\n\n")
f.close()
if 'edu' in item:
with open('EDU_Cameras.txt', 'a') as f:
f.write(item + "\n\n")
f.close()
if 'com' in item:
with open('Comercial_Cameras.txt', 'a') as f:
f.write(item + "\n\n")
f.close()
| readloud/dorkgen | DorkCameraFinder/CameraFinderBeta.py | CameraFinderBeta.py | py | 2,572 | python | en | code | 10 | github-code | 90 |
18178500659 | #!/usr/bin python3
# -*- coding: utf-8 -*-
def main():
L, R, d = map(int, input().split())
l = list(range(L, R+1))
cnt =0
for i in l:
if i%d==0:
cnt += 1
print(cnt)
if __name__ == '__main__':
main() | Aasthaengg/IBMdataset | Python_codes/p02606/s587958852.py | s587958852.py | py | 244 | python | en | code | 0 | github-code | 90 |
20031649730 | import glob
import os
import shutil
import pytest
from cobbler import tftpgen
from cobbler.items.distro import Distro
def test_copy_bootloaders(tmpdir, cobbler_api):
"""
Tests copying the bootloaders from the bootloaders_dir (setting specified in /etc/cobbler/settings.yaml) to the
tftpboot directory.
"""
# Instantiate TFTPGen class with collection_mgr parameter
generator = tftpgen.TFTPGen(cobbler_api)
# Arrange
# Create temporary bootloader files using tmpdir fixture
file_contents = "I am a bootloader"
sub_path = tmpdir.mkdir("loaders")
sub_path.join("bootloader1").write(file_contents)
sub_path.join("bootloader2").write(file_contents)
# Copy temporary bootloader files from tmpdir to expected source directory
for file in glob.glob(str(sub_path + "/*")):
bootloader_src = "/var/lib/cobbler/loaders/"
shutil.copy(file, bootloader_src + file.split("/")[-1])
# Act
generator.copy_bootloaders("/srv/tftpboot")
# Assert
assert os.path.isfile("/srv/tftpboot/bootloader1")
assert os.path.isfile("/srv/tftpboot/bootloader2")
def test_copy_single_distro_file(cobbler_api):
"""
Tests copy_single_distro_file() method using a sample initrd file pulled from CentOS 8
"""
# Instantiate TFTPGen class with collection_mgr parameter
generator = tftpgen.TFTPGen(cobbler_api)
# Arrange
distro_file = "/code/tests/test_data/dummy_initramfs"
distro_dir = "/srv/tftpboot/images/"
symlink_ok = True
initramfs_dst_path = "/srv/tftpboot/images/dummy_initramfs"
# Act
generator.copy_single_distro_file(distro_file, distro_dir, symlink_ok)
# Assert
assert os.path.isfile(initramfs_dst_path)
@pytest.fixture(autouse=True)
def cleanup_copy_single_distro_files(cobbler_api):
yield
cobbler_api.remove_distro("test_copy_single_distro_files")
def test_copy_single_distro_files(create_kernel_initrd, fk_initrd, fk_kernel, cobbler_api, cleanup_copy_single_distro_files):
# Arrange
# Create fake files
directory = create_kernel_initrd(fk_kernel, fk_initrd)
# Create a test Distro
test_distro = Distro(cobbler_api)
test_distro.name = "test_copy_single_distro_files"
test_distro.kernel = str(os.path.join(directory, fk_kernel))
test_distro.initrd = str(os.path.join(directory, fk_initrd))
# Add test distro to the API
cobbler_api.add_distro(test_distro)
# Create class under test
test_gen = tftpgen.TFTPGen(cobbler_api)
# Act
test_gen.copy_single_distro_files(test_distro, directory, False)
# Assert that path created by function under test is actually there
result_kernel = os.path.join(directory, "images", test_distro.name, fk_kernel)
result_initrd = os.path.join(directory, "images", test_distro.name, fk_initrd)
assert os.path.exists(result_kernel)
assert os.path.exists(result_initrd)
| SolitaryGarrison/cobbler-t | tests/tftpgen_test.py | tftpgen_test.py | py | 2,913 | python | en | code | 0 | github-code | 90 |
74948532 | import sys
input=sys.stdin.readline
input_ = list(input().strip())
sign = []
num_li = []
num = ""
minus_index = []
j=0
for i in range(len(input_)):
if input_[i] == '+':
j+=1
sign.append(input_[i])
if num != '':
num_li.append(int(num))
num=''
elif input_[i]=='-':
minus_index.append(j)
j+=1
sign.append(input_[i])
if num != '':
num_li.append(int(num))
num=''
else:
num+=input_[i]
if i==len(input_)-1:
num_li.append(int(num))
if len(sign) != 0:
length = len(sign)
used = [False for i in range(length+1)]
result = []
tmp = 0
for i in range(len(minus_index)-1,-1,-1):
for j in range(minus_index[i],length):
tmp += num_li[j+1]
used[j+1] = True
result.append(-tmp)
tmp = 0
length -= length-minus_index[i]
for i in range(length+1):
if not used[i]:
result.append(num_li[i])
print(sum(result))
else:
print(num_li[0])
##1등 코드
# e = [sum(map(int, x.split('+'))) for x in input().split('-')]
# print(e[0]-sum(e[1:])) | YeongHyeon-Kim/BaekJoon_study | 0627/1541_잃어버린괄호.py | 1541_잃어버린괄호.py | py | 1,173 | python | en | code | 1 | github-code | 90 |
18446553779 | p = []
for i in range(3):
a, b = input().split()
p.append(a)
p.append(b)
if sorted(p) == ['1','2','2','3','3','4']:
print("YES")
else:
print("NO") | Aasthaengg/IBMdataset | Python_codes/p03130/s737031972.py | s737031972.py | py | 157 | python | en | code | 0 | github-code | 90 |
3493233744 | import random
class Winner:
def __init__(self):
self.winning_messages = [
"You can do it!",
"Believe in yourself!",
"Go get 'em tiger!",
"Success is just around the corner!",
"You are doing great!",
"Awesome! Keep it up!",
"You're a Rockstar!"
]
def display_message(self):
print(random.choice(self.winning_messages))
| shib1111111/Rock-Paper-Scissors-Game | winner.py | winner.py | py | 435 | python | en | code | 0 | github-code | 90 |
36215832820 | def solution(n, money):
answer = 0
dp = [0] * (n+1)
dp[0] = 1
for m in money:
for i in range(1, n+1):
if i - m >= 0:
dp[i] += dp[i-m]
print(dp, m)
answer = dp[n] % 1000000007
return answer
| nbalance97/Programmers | Lv 3/거스름돈.py | 거스름돈.py | py | 272 | python | en | code | 0 | github-code | 90 |
72207928938 | # -*- coding: utf-8 -*-
# @Time : 2019/8/8 0008 14:06
# @Author : 没有蜡笔的小新
# @E-mail : sqw123az@sina.com
# @FileName: Move Zeroes.py
# @Software: PyCharm
# @Blog :https://blog.csdn.net/Asunqingwen
# @GitHub :https://github.com/Asunqingwen
"""
Given an array nums, write a function to move all 0's to the end of it while maintaining the relative order of the non-zero elements.
"""
from typing import List
def moveZeroes(nums: List[int]) -> None:
head = 0
for i in range(len(nums)):
if nums[i] != 0:
nums[head], nums[i] = nums[i], nums[head]
head += 1
if __name__ == '__main__':
nums = [0,1,0,3,12]
k = 2
result = moveZeroes(nums)
print(nums)
| Asunqingwen/LeetCode | easy/Move Zeroes.py | Move Zeroes.py | py | 687 | python | en | code | 0 | github-code | 90 |
15167295671 | import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import streamlit as st
import folium
import requests
from streamlit_folium import folium_static
import requests
import calendar
import json
sns.set(style='whitegrid')
bStates = pd.read_csv("output/final_dataframe.csv")
bSellers = pd.read_csv("output/product_stats_dataframe.csv")
bTopSellers = pd.read_csv("output/top_sellers.csv")
bProducts = pd.read_csv("output/seller_aggregated_dataframe.csv")
bCities = pd.read_csv("output/cities_by_state.csv")
bTopSeasonalSales = pd.read_csv("output/top_seasonal_sales.csv")
bBottomSeasonalSales = pd.read_csv("output/bottom_seasonal_sales.csv")
brazilian_states = ['AC', 'AL', 'AP', 'AM', 'BA', 'CE', 'DF', 'ES', 'GO', 'MA', 'MT', 'MS', 'MG', 'PA', 'PB', 'PR', 'PE', 'PI', 'RJ', 'RN', 'RS', 'RO', 'RR', 'SC', 'SP', 'SE', 'TO']
def plot_popular_product_by_state(df):
fig, ax = plt.subplots(figsize=(14, 8))
sns.barplot(
data=df.sort_values('Product Sold Count', ascending=False),
x='State',
y='Product Sold Count',
hue='Product Category',
dodge=False,
ax=ax
)
ax.set_title('Most Popular Product Categories by State')
ax.set_xticklabels(ax.get_xticklabels(), rotation=90)
ax.legend(title='Product Category', loc='upper right')
plt.tight_layout()
st.pyplot(fig)
def plot_customers_and_revenue_by_state(df):
sorted_final_df = df.sort_values('Total Customer', ascending=False)
fig, ax1 = plt.subplots(figsize=(14, 8))
color = 'tab:blue'
ax1.set_xlabel('State')
ax1.set_ylabel('Total Customers', color=color)
sns.barplot(x='State', y='Total Customer', data=sorted_final_df, color=color, alpha=0.6, ax=ax1)
ax1.tick_params(axis='y', labelcolor=color)
ax2 = ax1.twinx()
color = 'tab:red'
ax2.set_ylabel('Total Spent', color=color)
sns.lineplot(x='State', y='Total Spent', data=sorted_final_df, color=color, marker='o', ax=ax2)
ax2.tick_params(axis='y', labelcolor=color)
fig.tight_layout() # To ensure the tight layout
plt.title('Total Customers and Total Revenue by State (Sorted by Total Customers)')
st.pyplot(fig)
def plot_top_sellers_sales_and_reviews(df):
fig, ax1 = plt.subplots(figsize=(15, 10))
sns.barplot(data=df, x='seller_id', y='total_sales', color='lightblue', label='Total Sales', ax=ax1)
ax2 = ax1.twinx()
sns.lineplot(data=df, x='seller_id', y='average_review_score', marker='o', color='red', label='Average Review Score', ax=ax2)
ax1.set_title('Top 20 Sellers: Total Sales and Average Review Score')
ax1.set_xticklabels(ax1.get_xticklabels(), rotation=90) # Rotate x-axis labels for better readability
ax1.set_xlabel('Seller ID')
ax1.set_ylabel('Total Sales')
ax2.set_ylabel('Average Review Score')
ax1.legend(loc='upper left')
ax2.legend(loc='upper right')
plt.tight_layout()
st.pyplot(fig)
def plot_top_seasonal_sales(df):
season_order = ['Summer', 'Autumn', 'Winter', 'Spring']
fig, ax = plt.subplots(figsize=(14, 7))
sns.barplot(data=df, x='product_category_name_english', y='total_sales', hue='season', hue_order=season_order, ax=ax)
ax.set_title('Seasonal Total Sales for Top Product Categories')
ax.set_xlabel('Product Category')
ax.set_ylabel('Total Sales')
ax.set_xticklabels(ax.get_xticklabels(), rotation=45)
ax.legend(title='Season')
plt.tight_layout()
st.pyplot(fig)
def plot_top_seasonal_order_count(df):
season_order = ['Summer', 'Autumn', 'Winter', 'Spring']
fig, ax = plt.subplots(figsize=(14, 7))
sns.barplot(data=df, x='product_category_name_english', y='order_count', hue='season', hue_order=season_order, ax=ax)
ax.set_title('Seasonal Order Count for Top Product Categories')
ax.set_xlabel('Product Category')
ax.set_ylabel('Order Count')
ax.set_xticklabels(ax.get_xticklabels(), rotation=45)
ax.legend(title='Season')
plt.tight_layout()
st.pyplot(fig)
def plot_bottom_seasonal_sales(df):
season_order = ['Summer', 'Autumn', 'Winter', 'Spring']
fig, ax = plt.subplots(figsize=(14, 7))
sns.barplot(data=df, x='product_category_name_english', y='total_sales', hue='season', hue_order=season_order, ax=ax)
ax.set_title('Seasonal Total Sales for Bottom Product Categories')
ax.set_xlabel('Product Category')
ax.set_ylabel('Total Sales')
ax.set_xticklabels(ax.get_xticklabels(), rotation=45)
ax.legend(title='Season')
plt.tight_layout()
st.pyplot(fig)
def plot_bottom_seasonal_order_count(df):
season_order = ['Summer', 'Autumn', 'Winter', 'Spring']
fig, ax = plt.subplots(figsize=(14, 7))
sns.barplot(data=df, x='product_category_name_english', y='order_count', hue='season', hue_order=season_order, ax=ax)
ax.set_title('Seasonal Order Count for Bottom Product Categories')
ax.set_xlabel('Product Category')
ax.set_ylabel('Order Count')
ax.set_xticklabels(ax.get_xticklabels(), rotation=45)
ax.legend(title='Season')
plt.tight_layout()
st.pyplot(fig)
def get_state_geojson(state_code):
geojson_url = f"https://raw.githubusercontent.com/luizpedone/municipal-brazilian-geodata/master/data/{state_code}.json"
response = requests.get(geojson_url)
if response.status_code == 200:
return response.json()
else:
st.error(f"Failed to load GeoJSON for state {state_code}")
return None
def aggregate_data_by_city(df, state_code):
"""
Aggregates data by city for a given state.
:param df: The merged dataframe containing all the information.
:param state_code: The two-letter code for a Brazilian state.
:return: A dataframe aggregated at the city level.
"""
state_df = df[df['customer_state'] == state_code]
city_aggregated = state_df.groupby('customer_city').agg({
'order_id': 'nunique',
'payment_value': 'sum',
'review_score': 'mean',
'freight_value': 'sum',
'product_id': 'nunique',
}).reset_index()
city_aggregated.rename(columns={
'order_id': 'total_orders',
'payment_value': 'total_sales',
'review_score': 'average_review_score',
'freight_value': 'total_freight',
'product_id': 'total_products_sold'
}, inplace=True)
return city_aggregated
def create_state_map(state_code, df):
state_geojson = get_state_geojson(state_code)
if state_geojson is None:
return None
aggregated_data = aggregate_data_by_city(df, state_code)
city_data_dict = aggregated_data.set_index('customer_city').to_dict(orient='index')
for feature in state_geojson['features']:
city_name = feature['properties']['NOME'].lower()
if city_name in city_data_dict:
feature['properties'].update(city_data_dict[city_name])
else:
feature['properties'].update({
'total_orders': 0,
'total_sales': 0.0,
'average_review_score': None,
'total_freight': 0.0,
'total_products_sold': 0
})
state_center = [state_geojson['features'][0]['geometry']['coordinates'][0][0][1], # latitude
state_geojson['features'][0]['geometry']['coordinates'][0][0][0]] # longitude
state_map = folium.Map(location=state_center, zoom_start=6)
def style_function(feature):
return {
'fillColor': 'green' if feature['properties']['total_orders'] > 0 else 'gray',
'color': 'black',
'weight': 0.5,
'dashArray': '5, 5',
'fillOpacity': 0.6
}
folium.GeoJson(
data=state_geojson,
style_function=style_function,
tooltip=folium.GeoJsonTooltip(
fields=['NOME', 'total_orders', 'total_sales', 'average_review_score', 'total_freight', 'total_products_sold'],
aliases=['City:', 'Total Orders:', 'Total Sales (BRL):', 'Average Review Score:', 'Total Freight (BRL):', 'Total Products Sold:'],
localize=True
)
).add_to(state_map)
return state_map
with st.sidebar:
st.image("olist.png")
mode = st.radio(
"Choose Dashboard Mode",
["Geoanalysis", "All Over Brazil"]
)
if mode == "Geoanalysis":
st.title('Geoanalysis Dashboard')
bStates = pd.read_csv('output/final_dataframe.csv')
bProducts = pd.read_csv('output/seller_aggregated_dataframe.csv')
seller_state_counts = bProducts['Origin State'].value_counts()
state_info = bStates.set_index('State').T.to_dict('dict')
response = requests.get("https://raw.githubusercontent.com/codeforamerica/click_that_hood/master/public/data/brazil-states.geojson")
brazil_geojson = response.json()
for feature in brazil_geojson["features"]:
state_code = feature["properties"]["sigla"]
feature["properties"]["seller_count"] = int(seller_state_counts.get(state_code, 0))
if state_code in state_info:
for key, value in state_info[state_code].items():
feature["properties"][key] = value
m = folium.Map(location=[-15.78, -47.93], zoom_start=4, tiles="cartodb positron")
def style_function(feature):
return {
'fillOpacity': 0.5,
'color': 'black',
'weight': 1
}
def highlight_function(feature):
return {
'fillColor': '#2aabd2',
'color': 'green',
'weight': 3,
'dashArray': '1',
'fillOpacity': 0.7
}
tooltip = folium.GeoJsonTooltip(
fields=["sigla", "seller_count", "Popular Product", "Product Category", "Product Sold Count", "Popular Seller", "Total Customer", "Total Spent", "Average review score"],
aliases=["State:", "Seller Count:", "Popular Product:", "Product Category:", "Product Sold Count:", "Popular Seller:", "Total Customer:", "Total Spent:", "Average Review Score:"],
localize=True
)
folium.Choropleth(
geo_data=brazil_geojson,
data=seller_state_counts,
columns=('Origin State', 'seller_count'),
key_on='feature.properties.sigla',
fill_color='YlGn',
fill_opacity=0.7,
line_opacity=0.2,
threshold_scale=[1, 50, 200, 400, 675, 950, 1350, 1734],
nan_fill_color="white",
legend_name="Number of Sellers by State"
).add_to(m)
geojson_layer = folium.GeoJson(
data=brazil_geojson,
style_function=style_function,
highlight_function=highlight_function,
tooltip=tooltip
).add_to(m)
for feature in brazil_geojson['features']:
if 'seller_count' in feature['properties']:
coords = feature['geometry']['coordinates'][0][0]
x_coords = [coord[0] for coord in coords]
y_coords = [coord[1] for coord in coords]
centroid = (sum(y_coords) / len(coords), sum(x_coords) / len(coords))
label = feature['properties']['sigla']
folium.Marker(
location=centroid,
icon=folium.DivIcon(html=f"<div style='text-align:center;'>{label}</div>"),
draggable=False,
keyboard=False,
disable_3d=True
).add_to(m)
folium_static(m)
geojson_url_template = "https://raw.githubusercontent.com/luizpedone/municipal-brazilian-geodata/master/data/{state_code}.json"
option = st.selectbox(
'Choose State',
tuple(brazilian_states))
state_map = create_state_map(option, bCities)
folium_static(state_map)
else:
st.title('Sales Dashboard')
st.header('Most Popular Product Categories by State')
plot_popular_product_by_state(bStates)
st.header('Total Customers and Total Revenue by State')
plot_customers_and_revenue_by_state(bStates)
st.header('Seasonal Sales Analysis for Top Product Categories')
st.subheader('Total Sales')
plot_top_seasonal_sales(bTopSeasonalSales)
st.subheader('Order Count')
plot_top_seasonal_order_count(bTopSeasonalSales)
st.header('Seasonal Sales Analysis for Bottom Product Categories')
st.subheader('Total Sales')
plot_bottom_seasonal_sales(bBottomSeasonalSales)
st.subheader('Order Count')
plot_bottom_seasonal_order_count(bBottomSeasonalSales)
| khalidbagus/olist-ecom | dashboard/dashboard.py | dashboard.py | py | 12,376 | python | en | code | 0 | github-code | 90 |
3961675807 | import os
import sqlite3
from collections import Counter
def parse_decompositions(decomposition_file_path, database_path):
if not os.path.isfile(decomposition_file_path):
raise Exception("Couldn't find {}!".format(decomposition_file_path))
with open(decomposition_file_path) as f_decomposition:
print("Parsing character decompositions.")
try:
conn = sqlite3.connect(database_path)
c = conn.cursor()
c.execute("DROP TABLE IF EXISTS decompositions")
c.execute("VACUUM")
c.execute('''CREATE TABLE decompositions (
id INTEGER PRIMARY KEY,
character TEXT NOT NULL,
decomposition_type TEXT NOT NULL,
components TEXT NOT NULL
);''')
for i_line, line in enumerate(f_decomposition):
character, decomposition = line.strip().split(':')
decomposition_type, components = decomposition.split('(')
components = components.replace(')', '')
c.execute("""INSERT INTO decompositions (character, decomposition_type, components)
VALUES (?, ?, ?)""",
(character, decomposition_type, components))
conn.commit()
conn.close()
print("Succesfully parsed decomposition data.")
except sqlite3.Error as error:
print("Failed to insert data into sqlite table:", error)
finally:
if (conn):
conn.close()
if __name__ == "__main__":
cjdecomp_raw_file_path = "../data/cjdecomp.txt"
database_path = "../output/data.db"
parse_decompositions(cjdecomp_raw_file_path, database_path)
| Mr-Pepe/pengyou-data-generator | src/cjdecomp_parser.py | cjdecomp_parser.py | py | 1,829 | python | en | code | 0 | github-code | 90 |
39019517757 | #!/usr/bin/env python
"""
Create TimeSeries Model Data
"""
import numpy as np
import pandas as pd
import logging
from ep_clustering._utils import (
Map, fix_docs, convert_matrix_to_df, convert_df_to_matrix
)
from ep_clustering.data._gibbs_data import (
GibbsData, _categorical_sample
)
# Author Information
__author__ = "Christopher Aicher"
# Modify the root logger
logger = logging.getLogger(name=__name__)
# TimeSeries Model Data
@fix_docs
class TimeSeriesData(GibbsData):
""" Data for TimeSeries GibbsSampler
Additional Attributes:
df (pd.DataFrame): data frame with data with columns
(observation, dimension, ...)
observation_name (string): name of observation column in df
Additional Methods:
get_matrix(column_name)
subset(indices)
"""
def __init__(self, df, *args, **kwargs):
df = df.sort_index()
super(TimeSeriesData, self).__init__(df=df, *args, **kwargs)
return
def _validate_data(self):
super(TimeSeriesData, self)._validate_data()
if "df" not in self:
raise ValueError("`df` must be defined for TimeSeriesData")
if "observation_name" not in self:
raise ValueError(
"`observation_name` must be defined for TimeSeriesData")
if "observation" not in self.df.index.names:
raise ValueError("row_index 'observation' not in df index")
if "dimension" not in self.df.index.names:
raise ValueError("col_index 'dimension' not in df index")
if self.observation_name not in self.df.columns:
raise ValueError("observation_name {0} not in df".format(
observation_name))
def get_matrix(self, column_name=None):
""" Return mean and count matrix (observation x dim) of column_name"""
if column_name is None:
column_name = self.observation_name
if column_name not in self.df.columns:
raise ValueError("column_name {0} not in df".format(column_name))
return convert_df_to_matrix(self.df, value_name = column_name,
row_index="observation", col_index="dimension")
def subset(self, indices):
if isinstance(indices, np.ndarray):
indices = indices.tolist()
if len(indices) == 1:
# Bug with Pandas when indices is length 1 w/ 1 observation
subset_df = self.df.loc[
self.df.index.get_level_values('observation').isin(indices)
]
else:
subset_df = self.df.loc[
self.df.index.get_level_values('observation').isin(indices)
]
subset_data = type(self)(**self.copy()) # Copy Self
subset_data.df = subset_df
subset_data.num_obs = \
subset_df.index.get_level_values('observation').max() + 1
subset_data.num_dims = \
subset_df.index.get_level_values('dimension').max() + 1
subset_data._validate_data()
return subset_data
# TimeSeries Model Data Generation
class TimeSeriesDataGenerator(object):
""" TimeSeries Model Data Generator
Args:
num_obs (int): number of observations
num_dim (int): number of dimensions
K (int): number clusters
**kwargs (dict):
`Cluster Proportion Probabilities`
cluster_proportions (ndarray): cluster proportion probabilities
or
proportion_prior (ndarray): parameter for Dirichlet prior
`Cluster Parameter`
sigma2_x (double): latent process noise variance (default 1.0)
`Series-Specific Parameters`
A (ndarray): AR coefficients (default 0.99 * np.ones(N))
sigma2_y (ndarray): obs noise variance (default np.ones(N))
lambduh (ndarray): latent factor loadings (default np.ones(N))
x0 (ndarray): latent process initialization
`Options`
missing_obs (double or ndarray): probability of missing obs
regression (boolean): whether to include dummy covariates
covariate_coeff (ndarray, optional): regression covariates
must by num_dim by num_coeff
Methods:
generate_cluster_proportions(proportion_prior): cluster_proportions
generate_data(): returns data
"""
def __init__(self, num_obs, num_dim, K, **kwargs):
self.num_obs = num_obs
self.num_dim = num_dim
self.K = K
self._parse_param(**kwargs)
if kwargs.get('regression', False):
self.param.covariate_coeff = kwargs.get('covariate_coeff',
np.zeros((self.num_obs, 2)))
return
def _parse_param(self, **kwargs):
# Defines self.param
default = {
'sigma2_x': 1.0,
'A': None,
'sigma2_y': None,
'sigma2_theta': 1.0,
'lambduh': None,
'missing_obs': 0.0,
'x_0': None,
}
for key, value in kwargs.items():
if key in default.keys():
default[key] = value
param = Map(default)
# Handle variable arg defaults
if param.A is None:
param.A = 0.99 * np.ones(self.num_obs)
if param.lambduh is None:
param.lambduh = np.ones(self.num_obs)
if param.sigma2_y is None:
param.sigma2_y = np.ones(self.num_obs)
if param.x_0 is None:
var_0 = param.sigma2_x * (1.0/(1.0 - param.A**2))
param.x_0 = np.random.normal(0,1,self.num_obs)*np.sqrt(var_0)
self.param = param
return
def generate_cluster_proportions(self, proportion_prior=None):
if proportion_prior is not None:
self.param.proportion_prior = proportion_prior
if 'proportion_prior' not in self.param:
self.param.proportion_prior = 100 * np.ones(self.K)
cluster_proportions = np.random.dirichlet(
alpha = self.param.proportion_prior, size=1)
return cluster_proportions
def generate_data(self):
# Get Proportions
if 'cluster_proportions' not in self.param:
self.param.cluster_proportions = self.generate_cluster_proportions()
# Generate Data
z = np.array(
[ _categorical_sample(probs=self.param.cluster_proportions)
for i in range(0,self.num_obs)],
dtype=int)
x = np.zeros((self.num_dim, self.num_obs))
y = np.zeros((self.num_dim, self.num_obs))
theta = np.zeros((self.num_dim, self.K))
x_t = self.param.x_0
for t in range(0,self.num_dim):
theta_t = np.random.normal(0,1,self.K)
theta[t,:] = theta_t
x_t = self.param.A * x_t
x_t += (np.random.normal(0,1,self.num_obs) *
np.sqrt(self.param.sigma2_x))
x_t += (self.param.lambduh *
_one_hot(z, self.K).dot(theta_t))
x[t,:] = x_t
y[t,:] = x_t + (np.random.normal(0,1,self.num_obs) *
np.sqrt(self.param.sigma2_y))
if self.param.missing_obs > 0.0:
missing = np.random.rand(self.num_obs) < self.param.missing_obs
y[t,missing] = np.nan
df = convert_matrix_to_df(y.T, observation_name = "y")
# Add Regression + Covariates
if 'covariate_coeff' in self.param:
# TODO: REFACTOR THIS
covariate_coeff = self.param.covariate_coeff
num_coeff = covariate_coeff.shape[1]
for ii in range(num_coeff):
df['cov_{0}'.format(ii)] = np.random.normal(size=df.shape[0])
df['y_resid'] = df['y'] + 0.0
y_new = df.reset_index().apply(lambda row: row['y_resid'] +
np.sum([
row['cov_{0}'.format(ii)] *
covariate_coeff[int(row['observation']), ii]
for ii in range(num_coeff)
]), axis=1)
df['y'] = y_new.values
# Format Output
self.param['x'] = x.T
data = TimeSeriesData(
df = df,
observation_name = "y",
theta = theta.T,
z = z,
num_obs = self.num_obs,
num_dim = self.num_dim,
K = self.K,
parameters = self.param,
)
return data
def _one_hot(z, K):
""" Convert z into a one-hot bit vector representation """
z_one_hot = np.zeros((z.size, K))
z_one_hot[np.arange(z.size), z] = 1
return z_one_hot
# Example Script
if __name__ == "__main__":
print("Example Create TimeSeries Model Data")
data_generator = TimeSeriesDataGenerator(
num_obs = 50,
num_dim = 100,
K = 3,
sigma2_x = 0.01)
my_data = data_generator.generate_data()
#EOF
| aicherc/EP_Collapsed_Gibbs | ep_clustering/data/_timeseries_data.py | _timeseries_data.py | py | 9,049 | python | en | code | 1 | github-code | 90 |
11393586133 | from django.urls import path
from django.conf.urls import include
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('dashboard/', views.dashboard, name='dashboard'),
path('accounts/login/dashboard/',views.dashboard,name='dashboard'),
path('atendimento/<int:pk>/', views.atendimento, name='atendimento'),
path('criar_agendamento/', views.criar_agendamento, name='criar_agendamento'),
path('criar_atendimento/', views.criar_atendimento, name='criar_atendimento'),
path('abrirleads/<int:pk>/', views.abrirleads, name='abrirleads'),
path('cadastrar_clientes/', views.cadastrar_clientes, name='cadastrar_clientes'),
path('cadastro_cliente/', views.cadastro_cliente, name='cadastro_cliente'),
path('qualificar_leads/<int:pk>/', views.qualificar_leads, name='qualificar_leads'),
path('leads_excluir/<int:pk>/', views.excluir_leads, name='excluir_leads'),
path('editar_agendamento/<int:pk>/', views.editar_agendamento, name='editar_agendamento'),
path('deletar_agendamento/<int:pk>/', views.deletar_agendamento, name='deletar_agendamento'),
path('configuracao/<str:user>/', views.configuracao, name='configuracao'),
path('add_img_perfil/', views.add_img_perfil, name='add_img_perfil'),
path('editar_img_perfil/', views.editar_img_perfil, name='editar_img_perfil'),
path('add_meta_tag/', views.add_meta_tag, name='add_meta_tag'),
path('add_tag_google/', views.add_tag_google, name='add_tag_google'),
]
| Andressa-Anthero7/LP-ANTHERUS-V1 | lp/urls.py | urls.py | py | 1,495 | python | pt | code | 0 | github-code | 90 |
14064949971 | from typing import List
import iris
import numpy as np
import pandas as pd
import pytest
from iris.coords import AuxCoord, DimCoord
from iris.cube import Cube
from improver.calibration.dz_rescaling import ApplyDzRescaling
from improver.constants import SECONDS_IN_HOUR
from improver.metadata.constants.time_types import TIME_COORDS
from improver.spotdata.build_spotdata_cube import build_spotdata_cube
altitude = np.zeros(2)
latitude = np.zeros(2)
longitude = np.zeros(2)
wmo_id = ["00001", "00002"]
def _create_forecasts(
forecast_reference_time: str,
validity_time: str,
forecast_period: float,
forecast_percs: List[float],
) -> Cube:
"""Create site forecast cube for testing.
Args:
forecast_reference_time: Timestamp e.g. "20170101T0000Z".
validity_time: Timestamp e.g. "20170101T0600Z".
forecast_period: Forecast period in hours.
forecast_percs: Forecast wind speed at 10th, 50th and 90th percentile.
Returns:
Forecast cube containing three percentiles and two sites.
"""
data = np.array(forecast_percs).repeat(2).reshape(3, 2)
perc_coord = DimCoord(
np.array([10, 50, 90], dtype=np.float32), long_name="percentile", units="%",
)
fp_coord = AuxCoord(
np.array(
forecast_period * SECONDS_IN_HOUR,
dtype=TIME_COORDS["forecast_period"].dtype,
),
"forecast_period",
units=TIME_COORDS["forecast_period"].units,
)
time_coord = AuxCoord(
np.array(
pd.Timestamp(validity_time).timestamp(), dtype=TIME_COORDS["time"].dtype,
),
"time",
units=TIME_COORDS["time"].units,
)
frt_coord = AuxCoord(
np.array(
pd.Timestamp(forecast_reference_time).timestamp(),
dtype=TIME_COORDS["forecast_reference_time"].dtype,
),
"forecast_reference_time",
units=TIME_COORDS["forecast_reference_time"].units,
)
cube = build_spotdata_cube(
data,
"wind_speed_at_10m",
"m s-1",
altitude,
latitude,
longitude,
wmo_id,
scalar_coords=[fp_coord, time_coord, frt_coord],
additional_dims=[perc_coord],
)
return cube
def _create_scaling_factor_cube(
frt_hour: int, forecast_period_hour: int, scaling_factor: float
) -> Cube:
"""Create a scaling factor cube containing forecast_reference_time_hours of 3 and 12 and
forecast_period_hours of 6, 12, 18 and 24 and two sites.
All scaling factors are 1 except at the specified [frt_hour, forecast_period_hour], where
scaling_factor is used for the first site only.
Returns:
Scaling factor cube.
"""
cubelist = iris.cube.CubeList()
for ref_hour in [3, 12]:
for forecast_period in [6, 12, 18, 24]:
if ref_hour == frt_hour and forecast_period == forecast_period_hour:
data = np.array((scaling_factor, 1), dtype=np.float32)
else:
data = np.ones(2, dtype=np.float32)
fp_coord = AuxCoord(
np.array(
forecast_period * SECONDS_IN_HOUR,
dtype=TIME_COORDS["forecast_period"].dtype,
),
"forecast_period",
units=TIME_COORDS["forecast_period"].units,
)
frth_coord = AuxCoord(
np.array(
ref_hour * SECONDS_IN_HOUR,
dtype=TIME_COORDS["forecast_period"].dtype,
),
long_name="forecast_reference_time_hour",
units=TIME_COORDS["forecast_period"].units,
)
cube = build_spotdata_cube(
data,
"scaled_vertical_displacement",
"1",
altitude,
latitude,
longitude,
wmo_id,
scalar_coords=[fp_coord, frth_coord],
)
cubelist.append(cube)
return cubelist.merge_cube()
@pytest.mark.parametrize("wmo_id", [True, False])
@pytest.mark.parametrize("forecast_period", [6, 18])
@pytest.mark.parametrize("frt_hour", [3, 12])
@pytest.mark.parametrize("scaling_factor", [0.99, 1.01])
@pytest.mark.parametrize("forecast_period_offset", [0, -1, -5])
@pytest.mark.parametrize("frt_hour_offset", [0, 1, 4])
def test_apply_dz_rescaling(
wmo_id,
forecast_period,
frt_hour,
forecast_period_offset,
scaling_factor,
frt_hour_offset,
):
"""Test the ApplyDzRescaling plugin.
wmo_id checks that the plugin site_id_coord behaves correctly.
forecast_period and frt_hour (hours) control which element of scaling_factor cube
contains the scaling_factor value.
forecast_period_offset (hours) adjusts the forecast period coord on the forecast
cube to ensure the plugin always snaps to the next largest forecast_time when the
precise point is not available.
frt_hour_offset (hours) alters the forecast reference time hour within the forecast
whilst the forececast reference time hour of the scaling factor remains the same.
This checks that the a mismatch in the forecast reference time hour can still
result in a match, if a leniency is specified.
"""
forecast_reference_time = f"20170101T{(frt_hour-frt_hour_offset) % 24:02d}00Z"
forecast = [10.0, 20.0, 30.0]
expected_data = np.array(forecast).repeat(2).reshape(3, 2)
expected_data[:, 0] *= scaling_factor
validity_time = (
pd.Timestamp(forecast_reference_time)
+ pd.Timedelta(hours=forecast_period + forecast_period_offset)
).strftime("%Y%m%dT%H%MZ")
forecast = _create_forecasts(
forecast_reference_time,
validity_time,
forecast_period + forecast_period_offset,
forecast,
)
scaling_factor = _create_scaling_factor_cube(
frt_hour, forecast_period, scaling_factor
)
kwargs = {}
if not wmo_id:
forecast.coord("wmo_id").rename("station_id")
scaling_factor.coord("wmo_id").rename("station_id")
kwargs["site_id_coord"] = "station_id"
kwargs["frt_hour_leniency"] = abs(frt_hour_offset)
plugin = ApplyDzRescaling(**kwargs)
result = plugin(forecast, scaling_factor)
assert isinstance(result, Cube)
np.testing.assert_allclose(result.data, expected_data, atol=1e-4, rtol=1e-4)
def test_use_correct_time():
"""Test the ApplyDzRescaling plugin uses the exact forecast reference time
if it is available, rather than selecting another time within the leniency
range.
In this test a large leniency is used that could select the 03Z FRT, but
the 12Z FRT should be used. The scaling factors for the two FRTs are
different, so the data test ensures that the 12Z scaling factor has been
used.
"""
forecast_reference_time = "20170101T1200Z"
forecast_period = 6
forecast = [10.0, 20.0, 30.0]
scaling_factor = 0.99
expected_data = np.array(forecast).repeat(2).reshape(3, 2)
expected_data[:, 0] *= scaling_factor
validity_time = (
pd.Timestamp(forecast_reference_time) + pd.Timedelta(hours=forecast_period)
).strftime("%Y%m%dT%H%MZ")
forecast = _create_forecasts(
forecast_reference_time, validity_time, forecast_period, forecast,
)
scaling_factor = _create_scaling_factor_cube(12, forecast_period, scaling_factor)
scaling_factor.data[0, 0, 0] = scaling_factor.data[0, 0, 0].copy() + 0.01
kwargs = {}
kwargs["frt_hour_leniency"] = abs(9)
plugin = ApplyDzRescaling(**kwargs)
result = plugin(forecast, scaling_factor)
assert isinstance(result, Cube)
np.testing.assert_allclose(result.data, expected_data, atol=1e-4, rtol=1e-4)
def test_mismatching_sites():
"""Test an exception is raised if the sites mismatch."""
forecast_period = 6
forecast_reference_time = "20170101T0300Z"
validity_time = (
pd.Timestamp(forecast_reference_time) + pd.Timedelta(hours=forecast_period)
).strftime("%Y%m%dT%H%MZ")
forecast = _create_forecasts(
forecast_reference_time, validity_time, forecast_period, [10, 20, 30]
)
scaling_factor = _create_scaling_factor_cube(3, forecast_period, 1.0)
with pytest.raises(ValueError, match="The mismatched sites are: {'00002'}"):
ApplyDzRescaling()(forecast, scaling_factor[..., :1])
@pytest.mark.parametrize(
"forecast_period,frt_hour,exception",
[
(25, 3, "forecast period greater than or equal to 25"),
(7, 1, "forecast reference time hour equal to 1"),
],
)
def test_no_appropriate_scaled_dz(forecast_period, frt_hour, exception):
"""Test an exception is raised if no appropriate scaled version of the difference
in altitude is available."""
forecast_reference_time = f"20170101T{frt_hour:02}00Z"
validity_time = (
pd.Timestamp(forecast_reference_time) + pd.Timedelta(hours=forecast_period)
).strftime("%Y%m%dT%H%MZ")
forecast = _create_forecasts(
forecast_reference_time, validity_time, forecast_period, [10, 20, 30]
)
scaling_factor = _create_scaling_factor_cube(3, forecast_period, 1.0)
with pytest.raises(ValueError, match=exception):
ApplyDzRescaling()(forecast, scaling_factor)
| metoppv/improver | improver_tests/calibration/dz_rescaling/test_apply_dz_rescaling.py | test_apply_dz_rescaling.py | py | 9,295 | python | en | code | 95 | github-code | 90 |
13622530898 | #! /usr/bin/python
from subprocess import Popen, PIPE
from PSRpy.tempo import read_resid2
import numpy as np
import sys
def write_TOAs_to_file(
toas,
toa_uncertainties,
frequency_channels,
n_epochs,
n_channels_per_epoch,
observatory_code = "@",
output_file="simulated.tim"
):
"""
Writes simulated TOA data to an ASCII file, assuming Parkes TOA format.
"""
fout = open(output_file, "w")
fout.write("MODE 1\n\n")
for ii in range(n_epochs):
for jj in range(n_channels_per_epoch):
line = " {0:24s} {1:6.4f} {2:20.13f} {3:7.2f} {4:7.2f} {5:>8s}\n".format(
"fake_data.fits",
frequency_channels[jj],
toas[ii],
0.,
toa_uncertainties[ii],
observatory_code
)
fout.write(line)
fout.close()
return 0
def simulate_TOAs(
parfile,
bandwidth = 400.,
central_frequency = 600.,
epoch_start = 58800.,
epoch_finish = 58900.,
jitter_epoch = 5,
mask_fraction_frequency = 0.1,
mean_toa_uncertainty = 10.,
n_epochs = 2,
n_channels_per_epoch = 1024,
n_pulses_per_epoch = 1,
observatory_code = "@",
output_file = "simulated_toas.tim",
rms_residual=5.,
time_range = 365.25,
use_tempo=True,
use_tempo2=False
):
"""
Uses an input parameter file to generate TOAs given a variety of configurable inputs.
Parameters
----------
parfile : str
Name of parfile in TEMPO/TEMPO2 format.
bandwidth : float
Bandwidth of desired receiver.
central_frequency : float
Central frequency of desired receiver.
epoch_start : float
Starting MJD for simulation.
epoch_finish : float
Ending MJD for simulation.
jitter_epoch : int
The maximum number of days to randomly shift simulated epochs; if non-zero, a
random amount of days are added to each simulated in such a way that the quantity
actual_epoch = original_epoch + numpy.random.uniform(-jitter_epoch, jitter_epoch).
mask_fraction_frequency : float
Fraction of channels to randomly zap (i.e., mimic RFI removal)
mean_toa_uncertainty : float
Mean value of TOA uncertainty, in microseconds.
n_epochs : int
Number of observing epochs to evaluate over the specific time range.
n_channels_per_epoch : int
Number of frequency channels across the desired band.
n_pulses_per_epoch : int
Number of pulses for which to evaluate TOAs for a given epoch; for each pusle,
use_tempo : bool
If True, use TEMPO for evaluating arrival times.
use_tempo2 : bool
If True, use TEMPO2 for evaluating arrival times.
Returns
-------
"""
n_toas_total = n_epochs * n_pulses_per_epoch * n_channels_per_epoch
print("Simulating a total of {0} TOAs...".format(n_toas_total))
print("... number of epochs: {0}".format(n_epochs))
print("... number of channels per epoch: {0}".format(n_channels_per_epoch))
print("... number of pulses per epoch: {0}".format(n_channels_per_epoch))
# first, simulate rough timestamps based on configuration parameters.
pulse_mjds = np.linspace(epoch_start, epoch_finish, num=n_epochs)
pulse_mjds += np.random.uniform(-jitter_epoch, jitter_epoch, n_epochs)
toa_uncertainties = np.fabs(np.random.normal(0., 1., n_epochs)) * rms_residual + mean_toa_uncertainty
# next, generate the array of frequency channels based on configuration parameters.
frequency_lower = central_frequency - bandwidth / 2 * (1 - 1 / n_channels_per_epoch)
frequency_upper = central_frequency + bandwidth / 2 * (1 - 1 / n_channels_per_epoch)
frequency_channels = np.linspace(frequency_lower, frequency_upper, n_channels_per_epoch)
# write original, pre-correction TOAs to a file.
d1 = write_TOAs_to_file(pulse_mjds, toa_uncertainties, frequency_channels, n_epochs,
n_channels_per_epoch, observatory_code=observatory_code,
output_file="simulated_toas_orig.tim")
# now, run tempo on these data.
cmd = ['tempo', '-f', parfile, "simulated_toas_orig.tim"]
cmd_call = Popen(cmd, stdout=PIPE)
output, error = cmd_call.communicate()
for kk in range(3):
# load in output data from initial run.
toa_data, _ = read_resid2("resid2.tmp")
corrections = toa_data["residuals"] / 86400.
#uncertainties = toa_data["toa_uncertainties"]
# now use the post-fit residuals as corrections, and write a new .tim file.
pulse_mjds -= corrections
d1 = write_TOAs_to_file(pulse_mjds, toa_uncertainties, frequency_channels, n_epochs,
n_channels_per_epoch, observatory_code=observatory_code,
output_file="simulated_toas_corrected.tim")
# now, run tempo on these data.
cmd = ['tempo', '-f', parfile, "simulated_toas_corrected.tim"]
cmd_call = Popen(cmd, stdout=PIPE)
output, error = cmd_call.communicate()
# now, add white noise to corrected data and write to final file.
pulse_mjds += np.random.normal(0., 1., n_toas_total) * rms_residual * 1e-6 / 86400.
d1 = write_TOAs_to_file(pulse_mjds, toa_uncertainties, frequency_channels, n_epochs,
n_channels_per_epoch, observatory_code=observatory_code,
output_file=output_file)
# clean up.
cmd = ['rm', 'simulated_toas_orig.tim', 'simulated_toas_corrected.tim']
cmd_call = Popen(cmd, stdout=PIPE)
output, error = cmd_call.communicate()
return 0
| emmanuelfonseca/PSRpy | PSRpy/simulate/simulate_toas.py | simulate_toas.py | py | 5,643 | python | en | code | 2 | github-code | 90 |
20971826245 | from typing import List
from boto3 import client
def list_s3_contents(bucket_name: str, prefix: str) -> List[str]:
s3_conn = client('s3') # type: BaseClient ## again assumes boto.cfg setup, assume AWS S3
s3_result = s3_conn.list_objects_v2(Bucket=bucket_name, Prefix=prefix)
print(s3_result)
if 'Contents' not in s3_result:
print(s3_result)
return []
file_list = []
for key in s3_result['Contents']:
file_list.append(key['Key'])
print(f"List count = {len(file_list)}")
# when we got more than 1000 items aws will truncate the result
while s3_result['IsTruncated']:
continuation_key = s3_result['NextContinuationToken']
s3_result = s3_conn.list_objects_v2(Bucket=bucket_name, Prefix=prefix, Delimiter="/",
ContinuationToken=continuation_key)
for key in s3_result['Contents']:
file_list.append(key['Key'])
print(f"List count = {len(file_list)}")
return file_list
| 0x2539/simpleCI | src/screenshots_s3/s3_utils.py | s3_utils.py | py | 1,021 | python | en | code | 1 | github-code | 90 |
12093058834 | # -*- coding: utf-8 -*-
"""
File script_note.py
@author:ZhengYuwei
"""
import tensorflow as tf
def visual_meta_with_tensorboard():
""" 使用tensorflow查看checkpoint、meta文件中的网络结构 """
sess = tf.Session()
saver = tf.train.import_meta_graph('model.ckpt.meta') # load meta
saver.restore(sess, 'model.ckpt') # load ckpt
writer = tf.summary.FileWriter(logdir='logs', graph=tf.get_default_graph()) # write to event
writer.flush()
return
| zheng-yuwei/YOLOv3-tensorflow | utils/script_note.py | script_note.py | py | 483 | python | en | code | 5 | github-code | 90 |
18148803059 | n = int(input())
p_taro = 0
p_hanako = 0
for i in range(n):
taro, hanako = map(str, input().split())
cards = tuple(sorted((taro, hanako)))
#print(cards)
if taro == hanako:
p_taro += 1
p_hanako += 1
else:
if cards == (taro, hanako):
p_hanako += 3
#print("hanako win")
else:
p_taro += 3
#print("taro win")
print(p_taro, p_hanako) | Aasthaengg/IBMdataset | Python_codes/p02421/s827158192.py | s827158192.py | py | 354 | python | en | code | 0 | github-code | 90 |
306016764 | """Module containing the tests for the default scenario."""
# Standard Python Libraries
import os
# Third-Party Libraries
import pytest
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ["MOLECULE_INVENTORY_FILE"]
).get_hosts("all")
@pytest.mark.parametrize(
"pkg",
[
"crackmapexec",
"dnsutils",
"exploitdb",
"eyewitness",
"flameshot",
"kerberoast",
"gobuster",
"libreoffice",
"mimikatz",
"mono-complete",
"nikto",
"powershell-empire",
"powersploit",
"responder",
"seclists",
"sqlmap",
"sublist3r",
"veil",
],
)
def test_packages(host, pkg):
"""Test that appropriate packages were installed."""
assert host.package(pkg).is_installed
@pytest.mark.parametrize(
"pkg",
["mitm6"],
)
def test_pip_packages(host, pkg):
"""Test that appropriate pip packages were installed."""
assert pkg in host.pip_package.get_packages(pip_path="pip3")
@pytest.mark.parametrize(
"dir",
[
"aquatone",
"CACTUSTORCH",
"checkpwnedemails",
"datapipe",
"demiguise",
"dirsearch",
"dns-profile-randomizer",
"DomainTrustExplorer",
"Egress-Assess",
"ftpenum",
"GhostPack/Lockless",
"GhostPack/Rubeus",
"GhostPack/SafetyKatz",
"GhostPack/Seatbelt",
"GhostPack/SharpDPAPI",
"GhostPack/SharpDump",
"GhostPack/SharpRoast",
"GhostPack/SharpUp",
"GhostPack/SharpWMI",
"gnmap-parser",
"Hasher",
"ImpDump",
"Internal-Monologue",
"KeeThief",
"mikto",
"Misc",
"morphHTA",
"MS17-010",
"nlzr",
"PowerTools",
"PowerUpSQL",
"RandomPS-Scripts",
"SessionGopher",
"SharpShooter",
"shellshocker-pocs",
"SimplyEmail",
"SimplyTemplate",
"sshenum",
"TikiTorch",
"ysoserial",
],
)
def test_directories(host, dir):
"""Test that appropriate directories were created."""
dir_full_path = f"/tools/{dir}"
directory = host.file(dir_full_path)
assert directory.exists
assert directory.is_directory
# Make sure that the directory is not empty
assert host.run_expect([0], f'[ -n "$(ls -A {dir_full_path})" ]')
def test_bsp_installed(host):
"""Test that Burp Suite Pro was installed."""
dir_full_path = "/usr/local/BurpSuitePro"
directory = host.file(dir_full_path)
assert directory.exists
assert directory.is_directory
# Make sure that the directory is not empty
assert host.run_expect([0], f'[ -n "$(ls -A {dir_full_path})" ]')
| cisagov/ansible-role-kali | molecule/default/tests/test_default.py | test_default.py | py | 2,811 | python | en | code | 9 | github-code | 90 |
25293959382 | # Função para verificar notas
def notas (*num, sit = False):
'''
Função para adicionar notas a determinado aluno e saber sua situação
:param num: lista com varios numeros
:param sit: True printa a situação , False omite a situação
:return: dicionario sobre a informação d eum aluno
'''
# iniciando algumas variáveis já em formato de dicionario
notageral = dict()
notageral['quantidade'] = len(num)
notageral['maior'] = max(num)
notageral['menor'] = min(num)
notageral['media'] = sum(num)/len(num)
if notageral['media'] < 5:
situacao = 'Reprovado'
elif notageral['media'] < 7:
situacao = "Recuperação"
else:
situacao = 'Aprovado'
if sit:
notageral['situação'] = situacao
return notageral
else:
return notageral
print(f"\033[;1m{'Desafio 105 - Função lê notas':*^70}\033[m")
print(notas(3.5,9,6.5,9,7,7)) | Merovizian/Aula21 | Desafio105 - Funçao le notas.py | Desafio105 - Funçao le notas.py | py | 943 | python | pt | code | 0 | github-code | 90 |
36843673446 | from rest_framework.test import APIClient
from django.urls import reverse
def test_workspace_membership_permission_by_slack_api_call(worker_user_mock, mocker):
random_protected_url = reverse("choose-actions")
client = APIClient()
client.credentials(HTTP_USER_AGENT="Slackbot 1.0")
mocker.patch("requests.get")
response = client.post(
random_protected_url,
{
"user_name": worker_user_mock["username"],
"channel_id": "D03MK2ADT29",
},
)
assert response.status_code == 200
| COXIT-CO/lannister_bot | frontend/tests/slack/test_external_api_calls.py | test_external_api_calls.py | py | 548 | python | en | code | 0 | github-code | 90 |
26194617115 | from django.conf import settings
from django.db import models
import uuid
class Project(models.Model):
author = models.ForeignKey(settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
related_name='user_projects')
name = models.CharField(max_length=128)
description = models.TextField(max_length=2048, blank=True)
type = models.CharField(max_length=15,
choices=[('back-end', 'back-end'),
('front-end', 'front-end'),
('ios', 'iOS'),
('android', 'Android')
])
created_time = models.DateTimeField(auto_now_add=True)
updated_time = models.DateTimeField(auto_now=True)
def __str__(self):
return self.name
class Contributor(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
related_name='user_contribution_projects')
project = models.ForeignKey(Project,
on_delete=models.CASCADE,
related_name='project_contributors',)
created_time = models.DateTimeField(auto_now_add=True)
updated_time = models.DateTimeField(auto_now=True)
class Meta:
unique_together = ('user', 'project')
class Issue(models.Model):
project = models.ForeignKey(Project,
on_delete=models.CASCADE,
related_name='project_issues')
name = models.CharField(max_length=128)
description = models.TextField(max_length=2048,
blank=True)
author = models.ForeignKey(settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
related_name='user_issues')
status = models.CharField(max_length=15,
choices=[('to-do', 'To Do'),
('in-progress', 'In Progress'),
('finished', 'Finished')],
default='to-do',
blank=True)
priority = models.CharField(max_length=15,
choices=[('low', 'LOW'),
('medium', 'MEDIUM'),
('high', 'HIGH')],
blank=True)
assigned_to = models.ForeignKey(settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
related_name='user_assigned_issues',
blank=True,
null=True)
tag = models.CharField(max_length=15,
choices=[('bug', 'BUG'),
('feature', 'FEATURE'),
('task', 'TASK')],
blank=True)
created_time = models.DateTimeField(auto_now_add=True)
updated_time = models.DateTimeField(auto_now=True)
def __str__(self):
return self.name
class Comment(models.Model):
uuid = models.UUIDField(primary_key=True,
default=uuid.uuid4,
editable=False)
issue = models.ForeignKey(Issue,
on_delete=models.CASCADE,
related_name='issue_comments')
description = models.TextField(max_length=2048)
author = models.ForeignKey(settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
related_name='user_comments')
created_time = models.DateTimeField(auto_now_add=True)
updated_time = models.DateTimeField(auto_now=True)
def __str__(self):
return self.description
| chpancrate/ocrpy_project10 | support/models.py | models.py | py | 3,970 | python | en | code | 0 | github-code | 90 |
12078344534 | import pytest
import torch
from bayes_dip.data import get_ray_trafo, get_kmnist_testset, SimulatedDataset
from bayes_dip.dip import DeepImagePriorReconstructor
from bayes_dip.probabilistic_models import get_default_unet_gaussian_prior_dicts, ParameterCov, NeuralBasisExpansion, MatmulNeuralBasisExpansion, ImageCov, MatmulObservationCov, ObservationCov
@pytest.fixture(scope='session')
def observation_cov_and_matmul_observation_cov():
dtype = torch.float32
device = 'cpu'
kwargs = {
'angular_sub_sampling': 1, 'im_shape': (28, 28), 'num_angles': 10, 'impl': 'astra_cpu'}
ray_trafo = get_ray_trafo('kmnist', kwargs=kwargs)
ray_trafo.to(dtype=dtype, device=device)
image_dataset = get_kmnist_testset()
dataset = SimulatedDataset(
image_dataset, ray_trafo,
white_noise_rel_stddev=0.05,
use_fixed_seeds_starting_from=1,
device=device)
_, _, filtbackproj = dataset[0]
filtbackproj = filtbackproj[None] # add batch dim
net_kwargs = {
'scales': 3,
'channels': [8, 8, 8],
'skip_channels': [0, 1, 1],
'use_norm': False,
'use_sigmoid': True,
'sigmoid_saturation_thresh': 15}
reconstructor = DeepImagePriorReconstructor(
ray_trafo, torch_manual_seed=1,
device=device, net_kwargs=net_kwargs)
prior_assignment_dict, hyperparams_init_dict = get_default_unet_gaussian_prior_dicts(
reconstructor.nn_model)
parameter_cov = ParameterCov(
reconstructor.nn_model,
prior_assignment_dict,
hyperparams_init_dict,
device=device
)
neural_basis_expansion = NeuralBasisExpansion(
nn_model=reconstructor.nn_model,
nn_input=filtbackproj,
ordered_nn_params=parameter_cov.ordered_nn_params,
nn_out_shape=filtbackproj.shape,
)
image_cov = ImageCov(
parameter_cov=parameter_cov,
neural_basis_expansion=neural_basis_expansion
)
observation_cov = ObservationCov(
trafo=ray_trafo,
image_cov=image_cov,
device=device
)
matmul_neural_basis_expansion = MatmulNeuralBasisExpansion(
nn_model=reconstructor.nn_model,
nn_input=filtbackproj,
ordered_nn_params=parameter_cov.ordered_nn_params,
nn_out_shape=filtbackproj.shape,
)
matmul_image_cov = ImageCov(
parameter_cov=parameter_cov,
neural_basis_expansion=matmul_neural_basis_expansion
)
matmul_observation_cov = MatmulObservationCov(
trafo=ray_trafo,
image_cov=matmul_image_cov,
device=device
)
return observation_cov, matmul_observation_cov
def test_observation_cov_vs_matmul_observation_cov(observation_cov_and_matmul_observation_cov):
observation_cov, matmul_observation_cov = observation_cov_and_matmul_observation_cov
observation_cov_assembled = observation_cov.assemble_observation_cov()
matmul_observation_cov_assembled = matmul_observation_cov.get_matrix(
apply_make_choleskable=True)
assert torch.allclose(observation_cov_assembled, matmul_observation_cov_assembled)
| educating-dip/bayes_dip | tests/test_observation_cov.py | test_observation_cov.py | py | 3,284 | python | en | code | 2 | github-code | 90 |
18168477719 | #ABC 175 C
x, k, d = map(int, input().split())
x = abs(x)
syou = x // d
amari = x % d
if k <= syou:
ans = x - (d * k)
else:
if (k - syou) % 2 == 0: #残りの動ける数が偶数
ans = amari
else:#残りの動ける数が奇数
ans = abs(amari - d)
print(ans) | Aasthaengg/IBMdataset | Python_codes/p02584/s180422158.py | s180422158.py | py | 293 | python | ja | code | 0 | github-code | 90 |
6087254473 | import reverse_mapping as revm
import mbuild as mb
import mdtraj as md
import time
import sys
sys.setrecursionlimit(10000)
def from_traj(compound, traj):
atom_mapping = dict()
for residue in traj.top.residues:
res_compound = mb.compound.Compound()
for atom in residue.atoms:
new_atom = mb.Particle(name=str(atom.name),
pos=traj.xyz[-1, atom.index])
res_compound.add(new_atom)
atom_mapping[atom] = new_atom
res_compound.name = '{0}'.format(residue.name)
compound.add(res_compound)
for mdtraj_atom1, mdtraj_atom2 in traj.topology.bonds:
atom1 = atom_mapping[mdtraj_atom1]
atom2 = atom_mapping[mdtraj_atom2]
compound.add_bond((atom1, atom2))
compound.periodicity = traj.unitcell_lengths[0]
return compound
# Load in your CG system
traj = md.load('cg-traj.xtc', top='cg-traj.gro')[-1]
print('Loaded CG frame')
"""
# CG length conversion
traj.xyz *= .6
traj.unitcell_lengths *= .6
"""
# get rid of waters
#traj = traj.atom_slice(traj.top.select('name water'))
# Now select the residues/atoms i want to keep
atoms_i_want = []
# Put everything in the box
#anchor = traj.top.find_molecules()
traj = traj.center_coordinates()
#traj.image_molecules(inplace=True, anchor_molecules=anchor)
"""
# get the mhead beads
heads = traj.top.select('name " mhead2"')
waters = traj.top.select('name " water"')
# if the mhead is within the first quadrant we add it to the mix
for res, head in enumerate(heads):
pos = traj.xyz[0,head,:]
if (pos[0] < traj.unitcell_lengths[0,0]/16) and (pos[1] < traj.unitcell_lengths[0,1]/16):
atoms_i_want += list(traj.top.select('residue {}'.format(res)))
for res, water in enumerate(waters):
pos = traj.xyz[0,water,:]
if (pos[0] < traj.unitcell_lengths[0,0]/16) and (pos[1] < traj.unitcell_lengths[0,1]/16):
atoms_i_want += list(traj.top.select('index {}'.format(water)))
"""
print('Collected atoms')
"""
# cut out only the atoms i want to keep
traj = traj.atom_slice(atoms_i_want)
for ind in range(traj.top.n_atoms):
traj.top.atom(ind).name = traj.top.atom(ind).name[0]
# re-center the molecules
traj = traj.center_coordinates()
#traj = traj.image_molecules(traj.top.find_molecules())
"""
# make our cg system into a mBuild compound
cg = mb.compound.Compound()
cg = from_traj(cg, traj)
cg.translate_to(pos=[0,0,0])
# Give names to the subcomponents
for index, subcompound in enumerate(cg.children):
if subcompound.n_particles > 2:
cg.children[index].name = 'ucer3'
else:
cg.children[index].name = 'water'
# Save original cg structure
resnames = []
for child in cg.children:
if child.name == 'ucer3':
resnames += ['c']
if child.name == 'water':
resnames += ['w']
cg.save('my_cg.gro', box=cg.boundingbox, residues=resnames)
# Load in atomistic target structures:
# 1. UCER3
ucer3_md = md.load('ucer3.gro')
ucer3 = mb.Compound()
ucer3.from_trajectory(ucer3_md)
for i in ucer3:
if i.name[0] in {'C', 'N', 'O', 'P', 'S'}:
pir = ucer3.particles_in_range(i, .16)
for j in pir[1:]:
ucer3.add_bond((i, j))
# 2. Water
water_md = md.load('water.gro')
water = mb.Compound()
water.from_trajectory(water_md)
for i in water:
if i.name[0] in {'C', 'N', 'O', 'P', 'S'}:
pir = water.particles_in_range(i, .16)
for j in pir[1:]:
water.add_bond((i, j))
# put the atomistic target structures into the dictionary
target = dict()
target['ucer3'] = ucer3
target['water'] = water
print('Loaded target structures')
# get the mapping moieties for these molecule types:
mapping_moieties = dict()
# 1. UCER3
mapping_moieties['ucer3'] = [[67, 68, 69, 70, 71, 72, 73],
[58, 59, 60, 61, 62, 63, 64, 65, 66],
[49, 50, 51, 52, 53, 54, 55, 56, 57],
[40, 41, 42, 43, 44, 45, 46, 47, 48],
[31, 32, 33, 34, 35, 36, 37, 38, 39],
[22, 23, 24, 25, 26, 27, 28, 29, 30],
[13, 14, 15, 16, 17, 18, 19, 20, 21],
[4, 5, 6, 7, 8, 9, 10, 11, 12],
[0, 1, 2, 3],
[74, 75, 76, 77, 78, 81, 82],
[85, 86, 89, 90, 91, 92, 93, 94],
[95, 96, 97, 98, 99, 100, 101, 102, 103],
[104, 105, 106, 107, 108, 109, 110, 111, 112],
[113, 114, 115, 116, 117, 118, 119, 120, 121],
[122, 123, 124, 125, 126, 127, 128, 129, 130, 131],
[79, 80],
[83, 84],
[87, 88]]
# 2. Water
mapping_moieties['water'] = [[0, 1, 2]]
print('Starting reverse mapping on {} residues'.format(len(cg.children)))
# run reverse mapping and time it
start = time.time()
reverse_mapped = revm.reverse_map(coarse_grained=cg, mapping_moieties=mapping_moieties, target=target, solvent_name='water',
sol_per_bead=4, sol_cutoff=2.0, parallel=True)
end = time.time()
# print and save.
print("reverse mapping took {} min or {} per residue.".format((end-start)/60, (end-start)/len(cg.children)))
reverse_mapped.translate_to(reverse_mapped.boundingbox.lengths / 2)
resnames = [child.name for child in reverse_mapped.children]
reverse_mapped.save('my_reverse_mapped.gro', box=reverse_mapped.boundingbox, residues=resnames)
| uppittu11/reverse_mapping | reverse_mapping/rigorous/test.py | test.py | py | 5,552 | python | en | code | 0 | github-code | 90 |
10021487072 | """
Anaflow subpackage providing miscellaneous tools.
Subpackages
^^^^^^^^^^^
.. currentmodule:: anaflow.tools
.. autosummary::
:toctree:
laplace
mean
special
coarse_graining
Functions
^^^^^^^^^
Annular mean
~~~~~~~~~~~~
.. currentmodule:: anaflow.tools.mean
Functions to calculate dimension dependent annular means of a function.
.. autosummary::
annular_fmean
annular_amean
annular_gmean
annular_hmean
annular_pmean
Coarse Graining solutions
~~~~~~~~~~~~~~~~~~~~~~~~~
.. currentmodule:: anaflow.tools.coarse_graining
Effective Coarse Graining conductivity/transmissivity solutions.
.. autosummary::
T_CG
K_CG
TPL_CG
Special
~~~~~~~
.. currentmodule:: anaflow.tools.special
Special functions.
.. autosummary::
step_f
specialrange
specialrange_cut
neuman2004_trans
aniso
Laplace
~~~~~~~
.. currentmodule:: anaflow.tools.laplace
Helping functions related to the laplace-transformation
.. autosummary::
get_lap
get_lap_inv
"""
from anaflow.tools.coarse_graining import K_CG, T_CG, TPL_CG
from anaflow.tools.laplace import get_lap, get_lap_inv
from anaflow.tools.mean import (
annular_amean,
annular_fmean,
annular_gmean,
annular_hmean,
annular_pmean,
)
from anaflow.tools.special import (
aniso,
neuman2004_trans,
specialrange,
specialrange_cut,
step_f,
)
__all__ = [
"get_lap",
"get_lap_inv",
"annular_fmean",
"annular_amean",
"annular_gmean",
"annular_hmean",
"annular_pmean",
"step_f",
"specialrange",
"specialrange_cut",
"neuman2004_trans",
"aniso",
"T_CG",
"K_CG",
"TPL_CG",
]
| GeoStat-Framework/AnaFlow | src/anaflow/tools/__init__.py | __init__.py | py | 1,662 | python | en | code | 33 | github-code | 90 |
18326598609 | n = int(input())
furui = [i for i in range(10**6+2)]
ans = 9999999999999
yakusuu = []
for i in range(1,int(n**0.5)+1+1):
if n%i == 0:
yakusuu.append(i)
for i in yakusuu:
ans = min(i+n//i,ans)
# print(i,ans)
print(ans-2)
| Aasthaengg/IBMdataset | Python_codes/p02881/s109314325.py | s109314325.py | py | 283 | python | en | code | 0 | github-code | 90 |
19255791705 | from sys import stdin
from collections import deque
def main():
stdin = open('./test_case.txt', 'r')
test_case = int(stdin.readline())
for _ in range(test_case):
queue = deque()
positions = []
num_of_stores = int(stdin.readline())
home_pos = list(map(int, stdin.readline().split()))
queue.append(home_pos)
for _ in range(num_of_stores):
store_pos = list(map(int, stdin.readline().split()))
positions.append(store_pos)
destination = list(map(int, stdin.readline().split()))
positions.append(destination)
while len(queue) != 0:
x_pos, y_pos = queue.popleft()
if x_pos == destination[0] and y_pos == destination[1]:
print("happy")
break
# 갈 수 있는 주변 탐색
for idx, next_pos in enumerate(positions):
if next_pos != -1:
next_x_pos, next_y_pos = next_pos
distance = abs(next_x_pos - x_pos) + abs(next_y_pos - y_pos)
if abs(distance) <= 1000:
queue.append([next_x_pos, next_y_pos])
positions[idx] = -1
else:
print('sad')
if __name__ == '__main__':
main() | ag502/algorithm | Problem/BOJ_9205_맥주 마시면서 걸어가기/main.py | main.py | py | 1,304 | python | en | code | 1 | github-code | 90 |
9513504940 | import numpy as np
import matplotlib.pyplot as plt
class LSTM:
def __init__(self, n_inputs):
self.n_inputs = n_inputs
self.weights_input_X = .1 * np.random.randn(n_inputs)
self.weights_input_y = .1 * np.random.randn(n_inputs)
self.bias_input = 0
self.weights_input_gate_X = .1 * np.random.randn(n_inputs)
self.weights_input_gate_y = .1 * np.random.randn(n_inputs)
self.bias_input_gate = 0
self.weights_forget_gate_X = .1 * np.random.randn(n_inputs)
self.weights_forget_gate_y = .1 * np.random.randn(n_inputs)
self.bias_forget_gate = 0
self.weights_output_gate_X = .1 * np.random.randn(n_inputs)
self.weights_output_gate_y = .1 * np.random.randn(n_inputs)
self.bias_output_gate = 0
self.cell_state = np.zeros(n_inputs)
self.dvalues_weights_output_gate_X = np.zeros(n_inputs)
self.dvalues_weights_output_gate_y = np.zeros(n_inputs)
self.dvalues_bias_output_gate = np.zeros(n_inputs)
self.dvalues_weights_forget_gate_X = np.zeros(n_inputs)
self.dvalues_weights_forget_gate_y = np.zeros(n_inputs)
self.dvalues_bias_forget_gate = np.zeros(n_inputs)
self.dvalues_weights_input_gate_X = np.zeros(n_inputs)
self.dvalues_weights_input_gate_y = np.zeros(n_inputs)
self.dvalues_bias_input_gate = np.zeros(n_inputs)
self.dvalues_weights_input_X = np.zeros(n_inputs)
self.dvalues_weights_input_y = np.zeros(n_inputs)
self.dvalues_bias_input = np.zeros(n_inputs)
self.instances = []
def tanh(self, inputs):
return np.tanh(inputs)
def tanh_derivative(self, inputs):
return 1 - np.tanh(inputs) ** 2
def sigmoid(self, inputs):
return 1 / (1 + np.exp(-inputs))
def sigmoid_derivative(self, inputs):
return self.sigmoid(inputs) * (1 - self.sigmoid(inputs))
def backward(self, y):
for instance, y_true in zip(self.instances, y):
dvalues_loss = self.loss_derivative(instance.output, y_true)
self.dvalues_weights_output_gate_X += dvalues_loss * self.tanh(instance.cell_state) * self.sigmoid_derivative(instance.network_output) * instance.input
self.dvalues_weights_output_gate_y += dvalues_loss * self.tanh(instance.cell_state) * self.sigmoid_derivative(instance.network_output) * instance.last_output
self.dvalues_bias_output_gate += dvalues_loss * self.tanh(instance.cell_state) * self.sigmoid_derivative(instance.network_output)
dvalues_cell_state = dvalues_loss * instance.gate_output * self.tanh_derivative(instance.cell_state)
self.dvalues_weights_forget_gate_X += dvalues_cell_state * instance.last_cell_state * self.sigmoid_derivative(instance.network_forget) * instance.input
self.dvalues_weights_forget_gate_y += dvalues_cell_state * instance.last_cell_state * self.sigmoid_derivative(instance.network_forget) * instance.last_output
self.dvalues_bias_forget_gate += dvalues_cell_state * instance.last_cell_state * self.sigmoid_derivative(instance.network_forget)
print(dvalues_cell_state, instance.hidden_state, self.sigmoid_derivative(instance.network_input), instance.input)
self.dvalues_weights_input_gate_X += dvalues_cell_state * instance.hidden_state * self.sigmoid_derivative(instance.network_input) * instance.input
self.dvalues_weights_input_gate_y += dvalues_cell_state * instance.hidden_state * self.sigmoid_derivative(instance.network_input) * instance.last_output
self.dvalues_bias_input_gate += dvalues_cell_state * instance.hidden_state * self.sigmoid_derivative(instance.network_input)
self.dvalues_weights_input_X += dvalues_cell_state * instance.gate_input * self.tanh_derivative(instance.network_hidden) * instance.input
self.dvalues_weights_input_y += dvalues_cell_state * instance.gate_input * self.tanh_derivative(instance.network_hidden) * instance.last_output
self.dvalues_bias_input += dvalues_cell_state * instance.gate_input * self.tanh_derivative(instance.network_hidden)
self.clear_instances()
def optimize(self, learning_rate):
self.weights_input_X -= learning_rate * self.dvalues_weights_output_gate_X
self.weights_input_y -= learning_rate * self.dvalues_weights_output_gate_y
self.bias_input -= learning_rate * self.dvalues_bias_output_gate
print(self.dvalues_weights_forget_gate_X, self.dvalues_weights_input_gate_X)
self.weights_input_gate_X -= learning_rate * self.dvalues_weights_input_gate_X
self.weights_input_gate_y -= learning_rate * self.dvalues_weights_input_gate_y
self.bias_input_gate -= learning_rate * self.dvalues_bias_input_gate
self.weights_forget_gate_X -= learning_rate * self.dvalues_weights_forget_gate_X
self.weights_forget_gate_y -= learning_rate * self.dvalues_weights_forget_gate_y
self.bias_forget_gate -= learning_rate * self.dvalues_bias_forget_gate
self.weights_output_gate_X -= learning_rate * self.dvalues_weights_input_X
self.weights_output_gate_y -= learning_rate * self.dvalues_weights_input_y
self.bias_output_gate -= learning_rate * self.dvalues_bias_input
def forward(self, X, y):
if len(self.instances) < len(X):
for step in X:
self.instances.append(LSTM_instance(self, self.n_inputs))
self.outputs = []
if len(y) == 1:
y = [y]
for input, instance, prev_y in zip(X, self.instances, y):
output = instance.forward(input, prev_y)
if len(y) < len(X):
y.append(output)
self.outputs.append(output)
return self.outputs
def calc_loss(self, y_true):
self.total_loss = 0
for instance, true in zip(self.instances, y_true):
self.total_loss += instance.calc_loss(true)
return self.total_loss
def loss_derivative(self, y_pred, y_true):
self.dvalues_loss = -2 * (y_true - y_pred)
return self.dvalues_loss
def train(self, X, y, epochs, learning_rate):
for epoch in range(epochs):
print(epoch)
outputs = self.forward(X, y)
self.calc_loss(outputs, y)
self.backward(y)
print(self.loss, self.dvalues_weights_output_gate_X)
self.optimize(learning_rate)
output = self.forward(X, np.zeros_like(X))
print(output)
self.calc_loss(output, y)
print(self.loss)
self.clear_instances()
def clear_instances(self):
self.instances = []
class LSTM_instance(LSTM):
def __init__(self, model, n_inputs):
super().__init__(n_inputs)
self.weights_input_X = model.weights_input_X
self.weights_input_y = model.weights_input_y
self.bias_input = model.bias_input
self.weights_input_gate_X = model.weights_input_gate_X
self.weights_input_gate_y = model.weights_input_gate_y
self.bias_input_gate = model.bias_input_gate
self.weights_forget_gate_X = model.weights_forget_gate_X
self.weights_forget_gate_y = model.weights_forget_gate_y
self.bias_forget_gate = model.bias_forget_gate
self.weights_output_gate_X = model.weights_output_gate_X
self.weights_output_gate_y = model.weights_output_gate_y
self.bias_output_gate = model.bias_output_gate
def forward(self, X, last_y):
self.input = X
self.last_output = last_y
self.network_input = self.input * self.weights_input_gate_X + self.last_output * self.weights_input_gate_y + self.bias_input_gate
self.gate_input = self.sigmoid(self.network_input)
self.network_forget = self.input * self.weights_forget_gate_X + self.last_output * self.weights_forget_gate_y + self.bias_forget_gate
self.gate_forget = self.sigmoid(self.network_forget)
self.network_output = self.input * self.weights_output_gate_X + self.last_output * self.weights_output_gate_y + self.bias_output_gate
self.gate_output = self.sigmoid(self.network_output)
self.network_hidden = self.input * self.weights_input_X + self.last_output * self.weights_input_y + self.bias_input
self.hidden_state = self.tanh(self.network_hidden)
self.last_cell_state = self.cell_state
self.cell_state = self.last_cell_state * self.gate_forget + self.hidden_state * self.gate_input
self.output = self.gate_output * self.tanh(self.cell_state)
self.dvalues_weights_output_gate_X *= 0
self.dvalues_weights_output_gate_y *= 0
self.dvalues_bias_output_gate *= 0
self.dvalues_weights_forget_gate_X *= 0
self.dvalues_weights_forget_gate_y *= 0
self.dvalues_bias_forget_gate *= 0
self.dvalues_weights_input_gate_X *= 0
self.dvalues_weights_input_gate_y *= 0
self.dvalues_bias_input_gate *= 0
self.dvalues_weights_input_X *= 0
self.dvalues_weights_input_y *= 0
self.dvalues_bias_input *= 0
return self.output
def calc_loss(self, y_true):
self.loss = np.mean((y_true - self.output) ** 2)
return self.loss
X = []
for x in range(100):
X.append([x/100])
y = []
for y_val in range(100):
y.append([(y_val+1)/100])
lstm = LSTM(1)
output = lstm.forward(X, [0])
plt.plot(range(100), X)
plt.plot(range(100), output)
plt.show()
for i in range(25):
lstm.calc_loss(output, y)
lstm.backward(y)
lstm.optimize(.1)
output = lstm.forward(X, [0])
print(lstm.weights_forget_gate_X, lstm.instances[0].weights_forget_gate_X)
print(len(lstm.instances))
plt.plot(range(100), X)
plt.plot(range(100), output)
plt.show() | CANTSOAR/SimpleNeuralNet | lstmfromscratch.py | lstmfromscratch.py | py | 10,093 | python | en | code | 0 | github-code | 90 |
18672328169 | from django.urls import path
from .views import *
from rest_framework.routers import DefaultRouter
app_name = "customer"
urlpatterns = [
path('customer/', CustomerRegistrationView.as_view(), name='customer_registration'),
path('surety/', SuretyRegistrationView.as_view(), name='surety_registration'),
path('add_supplier' , add_supplier , name = "add_requested_supplier_for_current_customer"),
path('<int:c_id>/file/<str:type>' , customer_file , name = "customer_file"),
path('all/<int:c_id>/file/<str:type>' , all_doc , name = "user_doc"),
path('help' , Help , name ='customer_help_for_contract'),
path('add_excel', addcustomers ,name='add_excel'),
path('calculator', calculator ,name='calculator'),
]
router = DefaultRouter()
router.register('contracts', ContractViewSet, basename='contract')
urlpatterns += router.urls | khoji2001/Django-project | customer/urls.py | urls.py | py | 831 | python | en | code | 0 | github-code | 90 |
21735877711 | # 在开发时想要预判到所有的错误,还是有一定的难度
try:
# 1.提示用户输入一个整数
num = int(input("输入一个整数:"))
# 2.使用8除以用户输入的整数并且输出
result = 8 / num
print(result)
# except ZeroDivisionError: # 错误类型1
# print("除0错误") # 针对错误类型1 ,对应的代码处理
except ValueError:
print("请输入正确整数")
except Exception as result:
print("未知错误 %s" % result)
| niushufeng/Python_202006 | 算法代码/面向对象/异常/捕获未知错误.py | 捕获未知错误.py | py | 499 | python | zh | code | 3 | github-code | 90 |
27616785214 | import unittest
class PaymentTest(unittest.TestCase):
def test_paymentDolar(self):
print("This is test payment by dolar")
self.assertTrue(True)
def test_paymentTk(self):
print("This is test payment by TK")
self.assertTrue(True)
if __name__=="__main__":
unittest.main() | nazmul-cse48/PYTHON_CODE_ALL | All_test_Suites/Package2/TC_paymentTest.py | TC_paymentTest.py | py | 329 | python | en | code | 0 | github-code | 90 |
13733372500 | # -*- coding: ISO-8859-1 # Encoding declaration -*-
# file: ctp_performance.py
#
# description
"""\n\n
grep abs msecs, ctp no, msecs this ctp out of given logfile
"""
import sys
import re
def grep_data(filename):
"""open file, grep data, write to stdout"""
rgx = re.compile(r'abs_msecs\: (\d+) trace.*end calcCtp Nr=(\d+).*elapsed msecs: (\d+)')
for line in open(filename):
hit = rgx.search(line)
if hit:
msecs_absolute, ctp_no, msecs_ctp = hit.groups()
print("%s;%s;%s" % (ctp_no, msecs_ctp, msecs_absolute))
def main():
"""main function"""
filename = sys.argv[1]
grep_data(filename)
if __name__ == "__main__":
try:
main()
except:
print('Script failed')
raise
| bbbkl/python | id_grabber/ctp_performance.py | ctp_performance.py | py | 798 | python | en | code | 0 | github-code | 90 |
34838778934 | from bs4 import BeautifulSoup
import urllib.request as urllib2
import random
import os
import sys
import requests
import time
alphabet = ['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']
double_alphabet = []
for char_1 in alphabet:
for char_2 in alphabet:
double_alphabet.append(char_1+char_2)
temp_double_alphabet = [double_alphabet[0]] # AA
abbr_sense = {} # dictionary that stores abbrs-senses
seen_alphabet = []
try:
l = open("alphabet_covered.txt", 'r')
for line in l:
seen_alphabet.append(line[:-1])
l.close()
except FileNotFoundError:
pass
print(seen_alphabet)
l = open("alphabet_covered.txt", 'a')
for i in double_alphabet:
if i not in seen_alphabet:
#-----abbreviation pages we have already visited-----#
seen_abbrs = []
try:
g = open("trackedabbrs_" + i +".txt", 'r')
for line in g:
seen_abbrs.append(line[:-1])
g.close()
except FileNotFoundError:
pass
#----------------------------------------------------#
#----abbreviation category pages we have visited-----#
seen_pages = []
try:
q = open("trackedpages_" + i +".txt", 'r')
for line in q:
seen_pages.append(line[:-1])
q.close()
except FileNotFoundError:
pass
#----------------------------------------------------#
f = open("200kabbrs_" + i +".txt", 'a')
g = open("trackedabbrs_" + i +".txt", 'a')
q = open("trackedpages_" + i +".txt", 'a')
m = open("trackedabbrcat_" + i +".txt", 'a')
# time.sleep(120)
next_page = "https://www.allacronyms.com/_medical/aa-index-alpha/" + i
r = None
should_repeat = False
try:
r = requests.get(next_page, timeout=10)
except:
while r is None or should_repeat:
try:
r = requests.get(next_page, timeout=10)
should_repeat = False
except:
should_repeat = True
#if str(r) != "<Response [200]>":
#sys.exit(1)
response = str(r).split()
response_number = response[1]
print(response_number[1])
while response_number[1] != str(2):
time.sleep(10)
r = requests.get(next_page)
response = str(r).split()
response_number = response[1]
soup = BeautifulSoup(r.content, 'html.parser')
print(soup)
#possible_abbrs = []
possible_page_nums = []
for a_tag in soup.find_all('a', href=True):
url_split = str(a_tag['href']).split('/_medical/')
if len(url_split) > 1 and url_split[1][0:2] == i:
#possible_abbrs.append(a_tag['href'])
if next_page not in seen_pages:
m.write(str(a_tag['href']) + '\n')
elif len(url_split) > 1 and url_split[1].split('/')[-1].isdigit():
possible_page_nums.append(int(url_split[1].split('/')[-1]))
print(possible_page_nums)
if next_page not in seen_pages:
q.write(str(next_page) + '\n')
if len(possible_page_nums) > 0:
num_pages = max(possible_page_nums)
print(num_pages)
for j in range(2,int(num_pages)+1):
next_page = "https://www.allacronyms.com/_medical/aa-index-alpha/" + i + '/' + str(j)
if next_page not in seen_pages:
r = None
should_repeat = False
try:
r = requests.get(next_page, timeout=10)
except:
while r is None or should_repeat:
try:
r = requests.get(next_page, timeout=10)
should_repeat = False
except:
should_repeat = True
print(r)
response = str(r).split()
response_number = response[1]
while response_number[1] != str(2):
time.sleep(10)
r = requests.get(next_page)
response = str(r).split()
response_number = response[1]
#if str(r) == "<Response [200]>":
soup = BeautifulSoup(r.content, 'html.parser')
for a_tag in soup.find_all('a', href=True):
url_split = str(a_tag['href']).split('/_medical/')
if len(url_split) > 1 and url_split[1][0:2] == i:
#possible_abbrs.append(a_tag['href'])
m.write(str(a_tag['href']) + '\n')
q.write(str(next_page) + '\n')
'''
else:
q.close()
f.close()
g.close()
m.close()
l.close()
sys.exit(1)
'''
# ------abbreviation categories we have visited-------#
possible_abbrs = []
m = open("trackedabbrcat_" + i +".txt", 'r')
for line in m:
possible_abbrs.append(line[:-1])
m.close()
# ----------------------------------------------------#
end = False
counter = -1
for z in possible_abbrs:
counter += 1
print(z)
abbr_cat = z.split('/')[-1]
if abbr_cat not in seen_abbrs:
#if counter%5 == 0:
#time.sleep(int(random.random()*5 + 1))
next_page = "https://www.allacronyms.com" + z
print(next_page)
r = None
should_repeat = False
try:
r = requests.get(next_page, timeout=10)
except:
while r is None or should_repeat:
try:
r = requests.get(next_page, timeout=10)
should_repeat = False
except:
should_repeat = True
#if str(r) == "<Response [200]>":
response = str(r).split()
response_number = response[1]
while response_number[1] != str(2):
time.sleep(10)
r = requests.get(next_page)
response = str(r).split()
response_number = response[1]
soup = BeautifulSoup(r.content, 'html.parser')
abbreviations_and_expansions = []
possible_mini_page_nums = []
#get number of pages
for a_tag in soup.find_all('a', href=True):
url_split = str(a_tag['href']).split('/_medical/')
if len(url_split) > 1 and url_split[1].split('/')[-1].isdigit():
possible_mini_page_nums.append(int(url_split[1].split('/')[-1]))
#GET ALL WORDS ON CURRENT PAGE
# a_tag = soup.find_all(class_='pairAbb')
a_tag = soup.find_all(class_='pairAbb')
for y in a_tag:
s1 = str(y).split('/')
# s1 = str(y).split('title="')[1]
# s2 = s1.split('">')[0].split(" stands for ")
# abbr = s2[0]
key = z.split('/')[-1]
if len(s1) > 3 and s1[1] == '_medical' and s1[2].lower() == key.lower():
sense = s1[3].split('" title=')[0]
#if abbr.lower() == key.lower():
try:
abbr_sense[key].append(sense)
except KeyError:
abbr_sense[key] = [sense]
seen_all_pages = False
r_tag = soup.find_all(id='related-abbreviations')
if len(r_tag) > 0:
possible_mini_page_nums = []
seen_all_pages = True
if len(possible_mini_page_nums) > 0:
num_mini_pages = max(possible_mini_page_nums)
for v in range(2, int(num_mini_pages) + 1):
time.sleep(0.5)
next_page = "https://www.allacronyms.com" + z + '/' + str(v)
r = None
should_repeat = False
try:
r = requests.get(next_page, timeout=10)
except:
while r is None or should_repeat:
try:
r = requests.get(next_page, timeout=10)
should_repeat = False
except:
should_repeat = True
#if str(r) != "<Response [200]>":
#sys.exit(1)
response = str(r).split()
response_number = response[1]
while response_number[1] != str(2):
time.sleep(10)
r = requests.get(next_page)
response = str(r).split()
response_number = response[1]
if v == int(num_mini_pages):
seen_all_pages = True
soup = BeautifulSoup(r.content, 'html.parser')
a_tag = soup.find_all(class_='pairAbb')
for y in a_tag:
s1 = str(y).split('/')
# s2 = s1.split('">')[0].split(" stands for ")
# abbr = s2[0]
key = z.split('/')[-1]
if len(s1) > 3 and s1[1] == '_medical' and s1[2].lower() == key.lower():
sense = s1[3].split('" title=')[0]
#if abbr.lower() == key.lower():
try:
abbr_sense[key].append(sense)
except KeyError:
abbr_sense[key] = [sense]
r_tag = soup.find_all(id='related-abbreviations')
if len(r_tag) > 0:
v = int(num_mini_pages) -1
if seen_all_pages == True:
g.write(abbr_cat + '\n')
f.write(abbr_cat + ":::" + str(abbr_sense[abbr_cat]) + '\n')
'''
else:
f.close()
g.close()
m.close()
l.close()
q.close()
print("STOPPED HERE:")
print(z)
sys.exit(1)
'''
if counter == len(possible_abbrs)-1:
end = True
if end:
l.write(str(i) + '\n')
f.close()
g.close()
m.close()
q.close()
#os.remove("trackedabbrs_" + i +".txt")
#os.remove("trackedpages_" + i + ".txt")
#os.remove("trackedabbrcat_" + i + ".txt")
| jacobjinkelly/clinical-ad | allacronyms/scrape_allacronyms.py | scrape_allacronyms.py | py | 11,614 | python | en | code | 3 | github-code | 90 |
18211595729 | N, M = map(int, input().split())
A = []
B = []
for _ in range(M):
a, b = map(int, input().split())
A.append(a)
B.append(b)
P = [[] for _ in range(N + 1)]
for a, b in zip(A, B):
P[b].append(a)
P[a].append(b)
ans = [0] * (N + 1)
ans[1] = 1
next_numbers = [1]
while next_numbers:
check_number = next_numbers[:]
next_numbers = []
for number in check_number:
for x in P[number]:
if ans[x] == 0:
ans[x] = number
next_numbers.append(x)
print('Yes')
for i in range(2, N + 1):
print(ans[i]) | Aasthaengg/IBMdataset | Python_codes/p02678/s566234244.py | s566234244.py | py | 576 | python | en | code | 0 | github-code | 90 |
15041150889 | '''
Something Good as indicated by ...
'''
import random
def welcome_message(): # Welcome message
print("Welcome to this sorting algorithm")
def create_a_random_list(n):
arr = []
for i in range(n):
arr.append(random.randint(1, 100))
return arr
def babble_sorting(arr):
n = len(arr)
for i in range(n):
for j in range(0, n-i-1):
if arr[j] > arr[j+1]:
arr[j], arr[j+1] = arr[j+1], arr[j]
print(arr)
def main():
welcome_message()
arr = create_a_random_list(5)
print(arr)
babble_sorting(arr)
if __name__ == "__main__":
main()
| Ethanlinyf/Pokemon-Park | DataStructure&Alogrighm/Sorting/sorting.py | sorting.py | py | 630 | python | en | code | 4 | github-code | 90 |
70043441578 | import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import random
import PIL
import torch
import scipy.signal
from IPython.display import *
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
def imageFromTensor(tensor, mean, std):
img = tensor.numpy()
shape = tensor.shape
img = img * std + mean
img = img.reshape(shape[1]*shape[2])
img = [int(x*255) for x in img]
return PIL.Image.frombytes('L', shape[1:3], bytes(img))
def fig2data ( fig ):
"""
@brief Convert a Matplotlib figure to a 4D numpy array with RGBA channels and return it
@param fig a matplotlib figure
@return a numpy 3D array of RGBA values
"""
# draw the renderer
fig.canvas.draw ( )
# Get the RGBA buffer from the figure
w,h = fig.canvas.get_width_height()
fig.canvas.draw()
buf = np.frombuffer ( fig.canvas.tostring_argb(), dtype=np.uint8 )
buf.shape = ( w, h,4 )
# canvas.tostring_argb give pixmap in ARGB mode. Roll the ALPHA channel to have it in RGBA mode
buf = np.roll ( buf, 3, axis = 2 )
return buf
def fig2img ( fig ):
"""
@brief Convert a Matplotlib figure to a PIL Image in RGBA format and return it
@param fig a matplotlib figure
@return a Python Imaging Library ( PIL ) image
"""
# put the figure pixmap into a numpy array
buf = fig2data ( fig )
w, h, d = buf.shape
return PIL.Image.frombytes( "RGBA", ( w ,h ), buf.tostring( ) )
def pltimg(x, y, z=None, filter_len=10):
fig, ax1 = plt.subplots()
color = 'blue'
ax1.set_xlabel('time')
ax1.set_ylabel('loss', color=color)
ax1.plot(x, y, color=color)
ax1.tick_params(axis='y', labelcolor=color)
if len(y) > filter_len:
y = scipy.signal.savgol_filter(y, filter_len-1, 1)
ax1.plot(x, y, 'r')
if z != None:
ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis
color = 'green'
ax2.set_ylabel('learning rate', color=color) # we already handled the x-label with ax1
ax2.plot(x, z, color=color)
ax2.tick_params(axis='y', labelcolor=color)
fig.tight_layout() # otherwise the right y-label is slightly clipped
image = fig2img(fig)
plt.close(fig)
return image
plot_display_id = "ploy_display_id"
def show_plot(iteration, loss):
display(pltimg(iteration, loss), display_id=plot_display_id)
def update_plot(iteration, loss, lr):
update_display(pltimg(iteration, loss, lr), display_id=plot_display_id)
| briandw/ColorEmbeddings | graph_utils.py | graph_utils.py | py | 2,571 | python | en | code | 1 | github-code | 90 |
26757653098 | def get_text():
with open("day12/test.txt", "r") as file:
the_text = file.read()
return the_text
input=get_text
print(input)
### dict={'strat': [, , ], 'A': [ , , ], 'end': [ , , ] }
def read_input_into_dict():
input =get_text()
dict={}
lines_split_into_arrays= input.split('\n')
items_in_line= list(map( lambda line: line.split('-'), lines_split_into_arrays))
print (items_in_line)
for item in items_in_line:
key = item[0]
additional_value= item[1]
if key in dict:
dict[key].append (additional_value)
else:
dict[key]=[additional_value]
return dict
print(read_input_into_dict()) | Bokha/AdventOfCode | day12/run.py | run.py | py | 692 | python | en | code | 0 | github-code | 90 |
46371615853 | import numpy as np
import cv2
from scipy.spatial.distance import cdist
class sub_frame:
"""Algorithms which work over the domain of a single frame."""
def mse(frame1,frame2):
#return np.average(cdist(frame1,frame2)**2)
return np.average(np.square(np.subtract(frame2,frame1)))
def psnr(frame1,frame2):
mse_in = sub_frame.mse(frame1,frame2)
if (mse_in == 0):
return 0
return (20*np.log10(255)) - (10*np.log(mse_in))
def sum_error(frame1,frame2):
return np.sum(np.subtract(frame2,frame1))
def bright_change(frame1,frame2):
return np.average(frame2)-np.average(frame1)
def bright_ratio(frame1,frame2):
return (np.average(frame2) + 1) / (np.average(frame1) + 1)
def ratio_scaled_psnr(frame1,frame2):
frame1_min = np.min(frame1)
frame1_max = np.max(frame1)
frame2_min = np.min(frame2)
frame2_max = np.max(frame2)
if frame1_max-frame1_min == 0:
frame1_norm = frame1
else:
frame1_norm = (255/(frame1_max-frame1_min))*(frame1-frame1_min)
if frame2_max-frame2_min == 0:
frame2_norm = frame2
else:
frame2_norm = (255/(frame2_max-frame2_min))*(frame2-frame2_min)
frame1_avg = np.average(frame1_norm)
frame2_avg = np.average(frame2_norm)
psnr_in = sub_frame.psnr(frame1_norm,frame2_norm)
if frame1_avg > frame2_avg:
br = (frame2_avg + 1)/(frame1_avg + 1)
else:
br = (frame1_avg + 1)/(frame2_avg + 1)
#print("max:",np.max(frame1_norm),"min:",np.min(frame1_norm))
return np.abs(psnr_in/(br**2))
class sub_file:
"""Algorithms which work over the domain of a single file."""
@staticmethod
def segment_find(input_file):
frame_no = 0
frame = -1
gray = -1
bright = -1
marked = []
images = []
grays = []
mark_start = -1
ticker = 0
cap = cv2.VideoCapture(input_file)
while(cap.isOpened()):
last = gray
last_bright = bright
ret, frame = cap.read()
if not ret:
break
frame_no += 1
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
ret3,thresh = cv2.threshold(gray,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
kernel = np.ones((4,12),np.uint8)
morph = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
sided = np.concatenate((morph, gray), axis=1)
#cv2.imshow('frame',sided)
if frame_no > 1:
bright = np.average(morph)
if frame_no%1000 == 0:
print("Frame:",frame_no)
ticker -= 1
if bright < 2:
#Start of block
if ticker < 0:
mark_start = frame_no
#image_set = [frame]
#gray_set = [gray]
#brights = [np.average(gray)]
#else:
#image_set += [frame]
#gray_set += [gray]
#brights += [np.average(gray)]
ticker = 5
#join = np.concatenate((last, gray), axis=1)
#cv2.imwrite( "transition{}-{}.jpg".format(frame_no,bright),last)
elif ticker == 0:
marked += [(mark_start,frame_no-5)]
#setting = np.argmax(brights)
#no_images = len(image_set)
#grays += [(gray_set)]
#images += [(image_set)]
#for i in range(len(marked)):
#item = marked[i]
#image = cv2.resize(images[i], (960, 240))
#cv2.imshow('Frames {} - {}'.format(item[0],item[1]),image)
#cv2.waitKey(0)
#answer = input("Is this a valid intertitle? (y/n): ")
#cv2.destroyAllWindows()
#print(answer)
cap.release()
cv2.destroyAllWindows()
return marked
| liampulles/WITS_Repo | subify/helper.py | helper.py | py | 4,143 | python | en | code | 0 | github-code | 90 |
17971903127 | import connexion
import six
from swagger_server.models.book import Book # noqa: E501
from swagger_server.models.error import Error # noqa: E501
from swagger_server import util
books = []
def create_book(body): # noqa: E501
"""Метод добавления новой книги в каталог
Метод предназначен для сохранения в БД данных по новой книге. # noqa: E501
:param body:
:type body: dict | bytes
:rtype: Book
"""
if connexion.request.is_json:
body = Book.from_dict(connexion.request.get_json()) # noqa: E501
if not body.book_id:
i = str(len(books)+1)
while [b for b in books if b.book_id==body.book_id]:
i += 1
body.book_id = i
elif [b for b in books if b.book_id==body.book_id]:
return {'error': 'Информация о книге с введеным id уже существует'}
books.append(body)
return body
def delete_book_by_id(id_): # noqa: E501
"""Метод удаления книги по идентификатору
# noqa: E501
:param id: Идентификатор книги
:type id: str
:rtype: None
"""
for i, book in enumerate(books):
if book.book_id==id_:
books.pop(i)
return {'result': 'Информация о книге удалена'}
return {'error': 'Книги с введным id не существует'}
def get_book_by_id(id_): # noqa: E501
"""Метод получения книги по идентификатору
# noqa: E501
:param id: Идентификатор книги
:type id: str
:rtype: Book
"""
book = [b for b in books if b.book_id==id_]
if book:
return book[0]
else:
return {'error': 'Книги с введным id существует'}
def get_books(): # noqa: E501
"""Метод получения книг
Метод предназначен для получения списка всех книг, сохраненных в БД. # noqa: E501
:rtype: List[Book]
"""
return books
def update_book(body, id_): # noqa: E501
"""Метод обновления книги в каталоге
Метод предназначен для обновления в БД данных по имеющейся книге. # noqa: E501
:param body:
:type body: dict | bytes
:param id: Идентификатор книги
:type id: str
:rtype: Book
"""
if connexion.request.is_json:
body = Book.from_dict(connexion.request.get_json()) # noqa: E501
if body.book_id!=id_ and [b for b in books if b.book_id == body.book_id]:
return {'error': 'Книга с таким id уже существует. Измените id'}
book = [b for b in books if b.book_id==id_]
if not book:
return {'error': 'Книги с введным id не существует'}
book = book[0]
book.book_id = body.book_id
book.author = body.author
book.genre = body.genre
book.title = body.title
book.year = body.year
return book
| DariaDon/HW_Swagger_REST_API_Library | controllers/book_controller.py | book_controller.py | py | 3,199 | python | ru | code | 0 | github-code | 90 |
24782224619 | from django.conf.urls import url
from django.contrib.auth import views as auth_views
from django.contrib.auth.decorators import login_required
from website import views
urlpatterns = [
url(r'^$', views.index, name="index"),
url(r'^pages/aboutus/$', views.AboutUsView.as_view(), name="AboutUsView"),
url(r'^pages/ourproducts/$', views.OurProductsView.as_view(), name="OurProductsView"),
url(r'^login/$', auth_views.login,
{'template_name': 'website/wizard/login.html'}, name='login-auth'),
url(r'^login/guest/$', views.GuestLogin.as_view(), name="guest-login"),
url(r'^signup/$', views.CreateAcct.as_view(), name="new-account"),
url(r'^logout/$', views.userLogout, name="userlogout"),
# Configure Main
url(r'^menu/welcome/$', views.PreCheckoutView.as_view(), name="PreCheckout"),
# Configure Delivery
url(r'^menu/configure/delivery/$', views.PreCheckoutDelivery.as_view(), name="PreCheckoutDelivery"),
# Configure Pick it Up
url(r'^menu/configure/pickitup/$', views.PreCheckoutPickItUp.as_view(), name="PreCheckoutPickItUp"),
# Configure Parking Lot
url(r'^menu/configure/parkinglot/$', views.PreCheckoutParkingLot.as_view(), name="PreCheckoutParkingLot"),
# Vistas del Menu
url(r'^menu/$', views.MenuHome.as_view(), name="menu"),
url(r'^menu/category/(?P<pk>[0-9]+)/$', views.CategoryProductsList.as_view(), name="ProductList"),
url(r'^menu/category/(?P<pk_cat>[0-9]+)/product/(?P<pk_prod>[0-9]+)/$', views.MealForm.as_view(), name="MealForm"),
# Resumen de Carro
url(r'^menu/checkout/view-cart/$', views.ViewCartSummary.as_view(), name="ViewCartSummary"),
url(r'^menu/checkout/payment/$', login_required(views.Checkout.as_view(), login_url='website:login-auth'), name="checkout"),
url(r'^menu/checkout/thankyou/$', login_required(views.ThankYouView.as_view(), login_url='website:login-auth'), name="thankyou"),
url(r'^menu/view-cart/delete-item/(?P<item>[0-9]+)/$', views.DeleteItem, name="delete-item"),
url(r'^menu/empty-cart/$', views.empty_cart, name="empty_cart"),
url(r'^menu/closed/$', views.closed, name="closed"),
] | contrerasjlu/bullpen-arepas-prod | website/url.py | url.py | py | 2,152 | python | en | code | 0 | github-code | 90 |
4705422993 | import hashlib
import hmac
import time
from typing import Dict
from urllib.parse import urlencode
import requests
class LocalBitcoinsError(Exception):
pass
class Client:
def __init__(
self,
hmac_key: str,
hmac_secret: str,
root_addr: str = "https://localbitcoins.com",
):
self._hmac_key = hmac_key
self._hmac_secret = hmac_secret
self._root_addr = root_addr
def _calc_signature(self, nonce: str, endpoint: str, params_encoded: str):
message = nonce + self._hmac_key + endpoint + params_encoded
hash_obj = hmac.new(
self._hmac_secret.encode(),
msg=message.encode(),
digestmod=hashlib.sha256,
)
sign = hash_obj.hexdigest().upper()
return sign
def request(
self, method: str, endpoint: str, params: Dict[str, str] = None
):
method = method.upper()
if method not in ("POST", "GET"):
raise NotImplementedError("Method '%s' not implemented" % method)
safe_chars = ":" if method == "GET" else ""
params_encoded = urlencode(
params, doseq=True, safe=safe_chars, encoding="utf-8"
)
nonce = str(int(time.time() * 1000))
sign = self._calc_signature(nonce, endpoint, params_encoded)
headers = {
"Apiauth-Key": self._hmac_key,
"Apiauth-Nonce": nonce,
"Apiauth-Signature": sign,
}
if method.upper() != "GET":
headers["Content-Type"] = "application/x-www-form-urlencoded"
url = self._root_addr + endpoint
payload_kw = "data" if method == "POST" else "params"
resp = requests.request(
method, url, headers=headers, **{payload_kw: params}
)
result = resp.json()
if "error" in result:
raise LocalBitcoinsError(result["error"])
return result
| Nurlan23/localbitcoins | localbitcoins/client.py | client.py | py | 1,931 | python | en | code | 0 | github-code | 90 |
21509697125 | import io
from typing import Annotated
from fastapi import APIRouter, Depends, File, HTTPException, UploadFile
from fastapi.responses import Response
from sqlalchemy.orm import Session
from config import HOST, PORT
from src.media_upload.crud import _upload_media
from src.media_upload.models import Media
from src.media_upload.schemas import Url
from src.utils import get_db
media_upload_router = APIRouter()
@media_upload_router.post('/',
summary='Загрузка аудиофайла',
response_model=Url)
async def upload_media(id: int,
UUID: str,
audio_file: Annotated[UploadFile, File(...)],
db: Annotated[Session, Depends(get_db)]) -> Url:
"""Загрузка WAV файла по id и UUID пользователя."""
upload = await _upload_media(id, UUID, audio_file, db)
url = f'http://{HOST}:{PORT}/record?id={upload.id}&user={upload.author}'
return Url(
url=url
)
@media_upload_router.get('/record',
summary='Скачивание аудиофайла')
def download_media(id: int, user: int,
db: Annotated[Session, Depends(get_db)]):
"""Скачивание MP3 файл по id юзера и id файла."""
mp3_data = db.query(Media).filter(
Media.author == user, Media.id == id
).first()
if mp3_data is None:
raise HTTPException(
status_code=404,
detail='File not found'
)
content = io.BytesIO(mp3_data.file).read()
file_name = mp3_data.file_name
return Response(
content=content,
media_type='audio/mpeg',
headers={
'Content-Disposition': f'attachment; filename="{file_name}.mp3"'
}
)
| Hastred45/bewise_task_2 | src/media_upload/routers.py | routers.py | py | 1,831 | python | en | code | 0 | github-code | 90 |
74737654697 | import unittest
from romaji.transliterator import transliterate
class TestTransliterator(unittest.TestCase):
case = {
'きょうと': [
'kilyoto',
'kilyouto',
'kixyoto',
'kixyouto',
'kyoto',
'kyouto',
],
'トッキョ': [
'tokkilyo',
'tokkixyo',
'tokkyo',
'toltsukixyo',
'toltsukyo',
'toltukilyo',
'toltukyo',
'toxtukixyo',
'toxtukyo',
],
'ドラえもん': [
'doraemon',
'doraemon\'',
'doraemonn',
],
'っっっっっ': [
'ltsultsultsultsultsu',
'ltultultultultu',
'xtuxtuxtuxtuxtu',
],
'僕ドラえもん': [
'僕doraemon',
'僕doraemon\'',
'僕doraemonn',
],
'東京都': [],
'お茶の水': [
'o茶no水',
]
}
def setUp(self):
pass
def test_transliterate(self):
for k, v in self.case.items():
self.assertEqual(transliterate(k), v)
| jikyo/romaji4p | romaji/tests/test_transliterator.py | test_transliterator.py | py | 1,190 | python | en | code | 1 | github-code | 90 |
1864333768 | def long(l1):
a=[]
for i in l1:
b=len(i)
a.append(b)
a.sort()
print("The length of longest word is",a[-1])
l1=[]
el=input("Enter the words:")
l1=el.split(" ")
print(l1)
long(l1)
| anjana-c-a/Programmimg-Lab | longest_word.py | longest_word.py | py | 189 | python | en | code | 0 | github-code | 90 |
18388221129 | import sys
sys.setrecursionlimit(10**6)
#a = int(input())
#b = list(map(int, input().split()))
p, q, r = map(int, input().split())
#s = input()
#s,t = input().split()
#
#readline = sys.stdin.readline
#n,m = [int(i) for i in readline().split()]
#ab = [[int(i) for i in readline().split()] for _ in range(n)]
ans = min([p+q, p+r, q+r])
print(ans) | Aasthaengg/IBMdataset | Python_codes/p03011/s482367011.py | s482367011.py | py | 347 | python | en | code | 0 | github-code | 90 |
33855575791 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 15 22:01:25 2018
@author: varunmiranda
Citations:
https://www.geeksforgeeks.org/break-list-chunks-size-n-python/
https://stackoverflow.com/questions/17870612/printing-a-two-dimensional-array-in-python
https://www.geeksforgeeks.org/minimax-algorithm-in-game-theory-set-3-tic-tac-toe-ai-finding-optimal-move/
"""
import copy
import numpy as np
n = 3
x = n
input = "...x..o.ox.oxxxooo"
#input = "xoxoxoxoxoxoxoxoxo"
split = list(input)
turn = "x"
array = []
initial = [split[i * n:(i + 1) * n] for i in range((len(split) + n - 1) // n )]
chunks = [split[i * n:(i + 1) * n] for i in range((len(split) + n - 1) // n )]
def opponent():
if turn == "x":
return "o"
else:
return "x"
enemy = opponent()
def printable_board(chunks):
board = ('\n'.join([''.join(['{:4}'.format(item) for item in row]) for row in chunks]))
print(board)
"Recommendation"
def successor(initial):
array[:] = []
for dr in range(-x,x+1):
if dr > 0:
value = drop_command(abs(dr),initial,dr)
goal_state(value,n,dr)
elif dr < 0:
value = rotate_command(abs(dr),initial,dr)
goal_state(value,n,dr)
dr = dr+1
return array
"Drop Command"
def drop_command(col_chosen,initial,dr):
for i in range(0,n+3):
if initial[i][col_chosen-1] != ".":
array.append(copy.deepcopy(initial))
array[-1][i-1][col_chosen-1] = turn
return array[dr+2]
"Citation: Aravind Parappil"
"Rotate Command"
def rotate_command(col_chosen,initial,dr):
npboard=np.array(initial)
if(len(np.where(npboard[:,col_chosen-1] == '.')[0]) > 0):
spot = max(np.where(npboard[:,col_chosen-1] == '.')[0].tolist())+1
else:
spot = 0
npboard[spot:, col_chosen-1] = np.roll(npboard[spot:,col_chosen-1], 1)
array.append(npboard.tolist())
npboard = np.array(initial)
return array[dr+3]
#------------------------------------------------------------------------------------------#
def goal_state(chunks,n,dr):
if evaluate(chunks,n) == 10:
if dr < 0:
print ('I would recommend rotating column '+str(abs(dr))+' and you will win')
elif dr > 0:
print ('I would recommend dropping a piece in column '+str(dr)+' and you will win')
return True
def evaluate(chunks,n):
score1 = 0
score2 = 0
score3 = 0
score4 = 0
for i1 in range(0,n):
count_player=0
count_opponent=0
for j1 in range(0,n):
if chunks[i1][j1]==turn:
count_player+=1
elif chunks[i1][j1]==enemy:
count_opponent+=1
if(count_player==n):
score1 = 10
elif(count_opponent==n):
if i1<n-1:
continue
score1 = -10
for j2 in range(0,n):
count_player=0
count_opponent=0
for i2 in range(0,n):
if chunks[i2][j2]==turn:
count_player+=1
elif chunks[i2][j2]==enemy:
count_opponent+=1
if(count_player==n):
score2 = 10
elif(count_opponent==n):
if i2<n-1:
continue
score2 = -10
i3=0
j3=0
count_player=0
count_opponent=0
while(i3<n):
if chunks[i3][j3]==turn:
count_player+=1
elif chunks[i3][j3]==enemy:
count_opponent+=1
i3+=1
j3+=1
if(count_player==n):
score3 = 10
elif(count_opponent==n):
if i3<n-1:
continue
score3 = -10
i4=n-1
j4=0
count_player=0
count_opponent=0
while(i4>=0):
if chunks[i4][j4]==turn:
count_player+=1
elif chunks[i4][j4]==enemy:
count_opponent+=1
i4-=1
j4+=1
if(count_player==n):
score4 = 10
elif(count_opponent==n):
if i>0:
continue
score4 = -10
if score1 == 10 or score2 == 10 or score3 == 10 or score4 == 10:
return 10
elif score1 == -10 or score2 == -10 or score3 == -10 or score4 == -10:
return -10
else:
return 0
#------------------------------------------------------------------------------------------#
def minimax(board, depth, isMax,alpha,beta):
while depth <= 2:
score = evaluate(board,n)
print(score)
if score == 10:
return score
if (isMax == True):
best = -1000
for b in successor(board):
print("maxarray",b)
best = max(best, minimax(b, depth+1, False))
alpha = max( alpha, best)
if beta <= alpha:
break
print("first job done")
return best
else:
best = 1000
for b in successor(board):
print("minarray",b)
best = min(best, minimax(b, depth+1, True))
beta = min( beta, best)
if beta <= alpha:
break
print("second job done")
return best
'''
function minimax(board, depth, isMaximizingPlayer):
if current board state is a terminal state :
return value of the board
if isMaximizingPlayer :
bestVal = -INFINITY
for each move in board :
value = minimax(board, depth+1, false)
bestVal = max( bestVal, value)
return bestVal
else :
bestVal = +INFINITY
for each move in board :
value = minimax(board, depth+1, true)
bestVal = min( bestVal, value)
return bestVal
'''
minimax(initial,0,True)
#def solve(initial_board):
# fringe = [initial_board]
# while len(fringe) > 0:
# for s in successor(fringe.pop()):
# if goal_state(s,n) == True:
# return(s)
# fringe.append(s)
# return False | sumeetmishra199189/Elements-of-AI | Games and Bayes/betsy test.py | betsy test.py | py | 6,459 | python | en | code | 2 | github-code | 90 |
11481379739 | import os, select
import sys
import pathlib
import PIL
import time
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.models import Sequential
import numpy as np
checkpoint_path = "/data/model2.tf"
checkpoint_dir = os.path.dirname(checkpoint_path)
num_classes = 54
class_names = ['2c', '2d', '2h', '2s', '3c', '3d', '3h', '3s', '4c', '4d', '4h', '4s', '5c', '5d', '5h', '5s', '6c', '6d', '6h', '6s', '7c', '7d', '7h', '7s', '8c', '8d', '8h', '8s', '9c', '9d', '9h', '9s', 'Ac', 'Ad', 'Ah', 'As', 'Dealer', 'Empty', 'Jc', 'Jd', 'Jh', 'Js', 'Kc', 'Kd', 'Kh', 'Ks', 'Qc', 'Qd', 'Qh', 'Qs', 'Tc', 'Td', 'Th', 'Ts']
AUTOTUNE = tf.data.experimental.AUTOTUNE
batch_size = 32
img_height = 70
img_width = 48
Training = False
model = Sequential([
layers.experimental.preprocessing.Rescaling(1./255, input_shape=(img_height, img_width, 3)),
layers.Conv2D(64, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(64, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(128, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(128, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Flatten(),
layers.Dropout(0.5),
layers.Dense(512, activation='relu'),
layers.Dense(num_classes, activation='softmax')
])
model.compile(optimizer=tf.keras.optimizers.Adam(),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
#model.summary()
if Training:
train_dir = pathlib.Path('/data/train')
validation_dir = pathlib.Path('/data/validate')
#image_count = len(list(train_dir.glob('*/*.png')))
#print(image_count)
#roses = list(train_dir.glob('0/*'))
#PIL.Image.open(str(roses[0]))
data_augmentation = keras.Sequential(
[
layers.experimental.preprocessing.RandomTranslation(0.1, 0.1),
layers.experimental.preprocessing.RandomContrast(0.5),
layers.experimental.preprocessing.RandomRotation(0.1),
layers.experimental.preprocessing.RandomZoom(0.1),
]
)
def prepare(ds, shuffle=False, augment=False):
if shuffle:
ds = ds.shuffle(1000)
# Use data augmentation only on the training set
if augment:
ds = ds.map(lambda x, y: (data_augmentation(x, training=True), y),
num_parallel_calls=AUTOTUNE)
# Use buffered prefecting on all datasets
return ds.prefetch(buffer_size=AUTOTUNE)
train_ds = tf.keras.preprocessing.image_dataset_from_directory(
train_dir,
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size)
print(train_ds.class_names)
assert class_names == train_ds.class_names
#plt.figure(figsize=(10, 10))
#for images, labels in train_ds.take(1):
# for i in range(9):
# ax = plt.subplot(3, 3, i + 1)
# plt.imshow(images[i].numpy().astype("uint8"))
# plt.title(class_names[labels[i]])
# plt.axis("off")
val_ds = tf.keras.preprocessing.image_dataset_from_directory(
validation_dir,
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size)
train_ds = prepare(train_ds, shuffle=True, augment=True)
val_ds = prepare(val_ds)
cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path,
save_weights_only=True,
save_best_only=True)
# train
epochs=3000
history = model.fit(
train_ds,
validation_data=val_ds,
epochs=epochs,
callbacks=[cp_callback]
)
# show results
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs_range = range(epochs)
import matplotlib.pyplot as plt
plt.figure(figsize=(8, 8))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, acc, label='Training Accuracy')
plt.plot(epochs_range, val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss, label='Training Loss')
plt.plot(epochs_range, val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.show()
else:
model.load_weights(checkpoint_path)
while True:
trigger_file = pathlib.Path("/data/trigger")
if trigger_file.is_file():
trigger_file.unlink()
images = []
for i in range(7):
img = keras.preprocessing.image.load_img(
'/data/test/{}.png'.format(i+1), target_size=(img_height, img_width)
)
img_array = keras.preprocessing.image.img_to_array(img)
img_array = tf.expand_dims(img_array, 0) # Create a batch
images.append(img_array)
images = np.vstack(images)
predictions = model.predict(images)
score1 = tf.nn.softmax(predictions[0])
score2 = tf.nn.softmax(predictions[1])
score3 = tf.nn.softmax(predictions[2])
score4 = tf.nn.softmax(predictions[3])
score5 = tf.nn.softmax(predictions[4])
score6 = tf.nn.softmax(predictions[5])
score7 = tf.nn.softmax(predictions[6])
print("{} {} {} {} {} {} {}".format(class_names[np.argmax(score1)],class_names[np.argmax(score2)],class_names[np.argmax(score3)],class_names[np.argmax(score4)],class_names[np.argmax(score5)],class_names[np.argmax(score6)],class_names[np.argmax(score7)]))
sys.stdout.flush()
trigger_file = pathlib.Path("/data/trigger2_6")
if trigger_file.is_file():
trigger_file.unlink()
images = []
for i in range(6):
img = keras.preprocessing.image.load_img(
'/data/test/d{}.png'.format(i+1), target_size=(img_height, img_width)
)
img_array = keras.preprocessing.image.img_to_array(img)
img_array = tf.expand_dims(img_array, 0) # Create a batch
images.append(img_array)
images = np.vstack(images)
predictions = model.predict(images)
score1 = tf.nn.softmax(predictions[0])
score2 = tf.nn.softmax(predictions[1])
score3 = tf.nn.softmax(predictions[2])
score4 = tf.nn.softmax(predictions[3])
score5 = tf.nn.softmax(predictions[4])
score6 = tf.nn.softmax(predictions[5])
print("{} {} {} {} {} {}".format(class_names[np.argmax(score1)],class_names[np.argmax(score2)],class_names[np.argmax(score3)],class_names[np.argmax(score4)],class_names[np.argmax(score5)],class_names[np.argmax(score6)]))
sys.stdout.flush()
trigger_file = pathlib.Path("/data/trigger2_3")
if trigger_file.is_file():
trigger_file.unlink()
images = []
for i in range(3):
img = keras.preprocessing.image.load_img(
'/data/test/d{}.png'.format(i+1), target_size=(img_height, img_width)
)
img_array = keras.preprocessing.image.img_to_array(img)
img_array = tf.expand_dims(img_array, 0) # Create a batch
images.append(img_array)
images = np.vstack(images)
predictions = model.predict(images)
score1 = tf.nn.softmax(predictions[0])
score2 = tf.nn.softmax(predictions[1])
score3 = tf.nn.softmax(predictions[2])
print("{} {} {}".format(class_names[np.argmax(score1)],class_names[np.argmax(score2)],class_names[np.argmax(score3)]))
sys.stdout.flush()
time.sleep(0.1) | sagor999/poker_ml | card_recognizer_ml/main.py | main.py | py | 7,488 | python | en | code | 18 | github-code | 90 |
25744674342 | '''
Write a program to select random door as prize door and randomly select a
contestant door.
Charlie Say
Alex Nylund
CS 161 10:00AM
_____PSUEDO_____
import random
make door options as objects in list
track game counts
track win counts
for loop:
prize door = random
contestant = random
if prize door == contestant door:
win count + 1
game count + 1
else:
game count + 1
'''
import random
from random import randint
doors = ['door1', 'door2', 'door3']
game_count = 0
win_count = 0
for i in range(100000):
prize_door = random.choice(doors)
contestant_door = random.choice(doors)
if prize_door == contestant_door:
game_count += 1
win_count += 1
else:
game_count += 1
print(f'The contestant guessed {round((win_count/game_count)*100, 2)}% games correctly!')
| Charlie-Say/CS-161 | assignments/assignment 11/monty_hall_1.py | monty_hall_1.py | py | 855 | python | en | code | 0 | github-code | 90 |
14484996156 |
import curses
class Target:
def __init__(self, width, xoff, yoff):
self.width = width
self.win = curses.newwin(10, self.width, yoff, xoff)
self.win.refresh()
self.current = None
def paint(self):
self.win.clear()
if self.current != None:
for p in self.current.pieces:
self.win.addstr(0, p.x, " ", curses.A_REVERSE)
self.win.refresh()
| munglaub/ctris | target.py | target.py | py | 362 | python | en | code | 0 | github-code | 90 |
14501289406 | from direct.directnotify import DirectNotifyGlobal
import HoodDataAI
from toontown.toonbase import ToontownGlobals
from toontown.safezone import ButterflyGlobals
from toontown.episodes.DistributedPrologueEventAI import DistributedPrologueEventAI
class SBHoodDataAI(HoodDataAI.HoodDataAI):
notify = DirectNotifyGlobal.directNotify.newCategory('HoodAI')
def __init__(self, air, zoneId=None):
hoodId = ToontownGlobals.ScroogeBank
if zoneId == None:
zoneId = hoodId
HoodDataAI.HoodDataAI.__init__(self, air, zoneId, hoodId)
return
def startup(self):
self.notify.info('Creating prologue...')
HoodDataAI.HoodDataAI.startup(self)
self.butterflies = []
self.proEv = None
self.createButterflies(ButterflyGlobals.DG)
if self.air.wantPrologue:
self.createPrologueEvent()
return
def createPrologueEvent(self):
self.proEv = self.air.doFind('PrologueEvent')
if self.proEv is None:
self.proEv = DistributedPrologueEventAI(self.air)
self.proEv.generateWithRequired(self.zoneId)
self.proEv.b_setState('Idle')
return | TTOFFLINE-LEAK/ttoffline | v2.5.7/toontown/hood/SBHoodDataAI.py | SBHoodDataAI.py | py | 1,186 | python | en | code | 3 | github-code | 90 |
18452899489 | n=int(input())
num=[]
a=[]
b=[]
for i in range(n):
A,B=map(int,input().split())
a.append(A)
b.append(B)
num.append([A,B,A+B])
ansa=sum(a)
ansb=sum(b)
num.sort(key=lambda x: x[2],reverse=True)
for i in range(n):
if i%2==0:
ansb-=num[i][1]
else:
ansa-=num[i][0]
print(ansa-ansb)
| Aasthaengg/IBMdataset | Python_codes/p03141/s582983832.py | s582983832.py | py | 317 | python | en | code | 0 | github-code | 90 |
3600096614 | # _*_ coding: UTF-8 _*_
# @Time : 2020/12/2 19:38
# @Author : LiuXiaoQiang
# @Site : http:www.cdtest.cn/
# @File : token_test.py
# @Software : PyCharm
import requests
import pprint
class scp:
def token_test(self):
ak = "06F8XRdDMg9Fk3zeXDvNGRDf"
sk = "AvynGXGhYd5EOZoFxssnZOgiKNB8i4UE"
# client_id 为官网获取的AK, client_secret 为官网获取的SK
host = f'https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id={ak}&client_secret={sk}'
response = requests.get(host)
re = response.json()
access_token = re["access_token"]
print(access_token)
return access_token
| qq183727918/influence | verification/token_test.py | token_test.py | py | 692 | python | en | code | 0 | github-code | 90 |
31473155847 | import time
from Crypto.Cipher import AES
import cv2
import numpy as np
import pywt
from tkinter.filedialog import askopenfilename, askdirectory
import tkinter as tk
from tkinter import messagebox
from PIL import Image, ImageTk
import PIL
class Application(tk.Frame):
def __init__(self, master=None):
tk.Frame.__init__(self, master)
self.master = master
self.key_dir = None
self.iv_dir = None
self.img_dir = None
self.save_dir = None
self.matrix_dir = None
self.len = None
self.create_widgets()
def create_widgets(self):
self.master.title('Demo Decrypt')
self.pack(fill='both', expand=1)
self.labelfont = ('times', 20, 'bold')
self.messagefont = ('times', 14)
self.img_path = tk.StringVar()
self.img_path.set('None')
self.image_label = tk.Label(text="Image", fg='blue')
self.image_label.place(x=100, y=50)
self.image_label.config(font=self.labelfont)
self.panel = tk.Label(image=None, text='x')
self.panel.place(x=50, y=100)
self.image_path = tk.Label(textvariable=self.img_path)
self.image_path.place(x=100, y=530)
self.select_image_button = tk.Button(text='Select image', command=self.show)
self.select_image_button.place(x=200, y=530)
self.key_path_text = tk.StringVar()
self.key_path_text.set('None')
self.key_label = tk.Label(text='Key')
self.key_label.place(x=400, y=100)
self.key_label.config(font=self.labelfont)
self.key_path_label = tk.Label(textvariable=self.key_path_text)
self.key_path_label.place(x=550, y=100)
self.select_key_button = tk.Button(
text='Select', command=self.select_key)
self.select_key_button.place(x=650, y=100)
self.iv_label = tk.Label(text="Init vector")
self.iv_label.place(x=400, y=200)
self.iv_label.config(font=self.labelfont)
self.iv_path_text = tk.StringVar()
self.iv_path_text.set('None')
self.iv_path_label = tk.Label(textvariable=self.iv_path_text)
self.iv_path_label.place(x=550, y=200)
self.select_iv_button = tk.Button(text='Select', command=self.select_iv)
self.select_iv_button.place(x=650, y=200)
self.matrix_path_text = tk.StringVar()
self.matrix_path_text.set('None')
self.matrix_label = tk.Label(text='Matrix')
self.matrix_label.config(font=self.labelfont)
self.matrix_label.place(x=400, y=300)
self.matrix_path_label = tk.Label(textvariable=self.matrix_path_text)
self.matrix_path_label.place(x=550, y=300)
self.select_matrix_button = tk.Button(
text='Select', command=self.select_matrix)
self.select_matrix_button.place(x=650, y=300)
self.len_message_text = tk.StringVar()
self.len_message_text.set('None')
self.len_message_label = tk.Label(text='Length')
self.len_message_label.config(font=self.labelfont)
self.len_message_label.place(x=400, y=400)
self.len_message = tk.Label(textvariable=self.len_message_text)
self.len_message.place(x=550, y=400)
self.select_len_button = tk.Button(
text='Select', command=self.select_len
)
self.select_len_button.place(x=650, y=400)
self.message_label = tk.Label(text='Message')
self.message_label.config(font=self.labelfont)
self.message_label.place(x=120, y=620)
self.message_text = tk.StringVar()
self.message_text.set('None')
self.message = tk.Label(textvariable=self.message_text)
self.message.config(font=self.messagefont)
self.message.place(x=250, y=620)
self.extract_button = tk.Button(
text='Extract', fg='red', command=self.start_extract)
self.extract_button.config(font=self.labelfont)
self.extract_button.place(x=330, y=700)
def open_file(self):
file_name = askopenfilename(title='open')
return file_name
def open_dir(self):
file_name = askdirectory(title='open')
return file_name
def select_len(self):
try:
len_dir = self.open_file()
with open(len_dir, 'r') as f:
self.len = f.readline()
self.change_len_and_mess(self.len_message_text, self.len)
self.len = int(self.len)
except TypeError:
return
except UnicodeDecodeError:
messagebox.showerror('Error', 'Please choose again')
def select_matrix(self):
try:
self.matrix_dir = askopenfilename(title='open')
self.change_text(self.matrix_path_text, self.matrix_dir)
except AttributeError:
return
def select_iv(self):
try:
self.iv_dir = self.open_file()
self.change_text(self.iv_path_text, self.iv_dir)
except AttributeError:
return
def select_key(self):
try:
self.key_dir = self.open_file()
self.change_text(self.key_path_text, self.key_dir)
except AttributeError:
return
def show(self):
try:
file_name = self.open_file()
self.img_dir = file_name
img = Image.open(file_name)
img = img.resize((300, 400))
self.change_text(self.img_path, file_name)
img = ImageTk.PhotoImage(img)
self.panel.configure(image=img)
self.panel.image = img
except PIL.UnidentifiedImageError:
messagebox.showerror('Error', 'Please select correct image type')
return
except AttributeError:
return
def change_len_and_mess(self, var, text):
var.set(text)
def change_text(self, var, text):
var.set(text[text.rfind('/'):])
def read_key_and_iv(self, key_file, iv_file):
with open(key_file, 'rb') as f:
key = f.readline()
with open(iv_file, 'rb') as f:
iv = f.readline()
return key, iv
def decrypt_message(self, ciphertext, key, iv):
message = []
character = ''
for i in ciphertext:
character += i
if len(character) == 8:
message.append(character)
character = ""
message = [int(i, 2) for i in message]
message = bytearray(message)
message = bytes(message)
decr = AES.new(key, AES.MODE_CBC, iv=iv)
return(decr.decrypt(message))
def to_list(self, matrix):
for i in range(len(matrix)):
matrix[i] = matrix[i].tolist()
return matrix
def to_bin(self, matrix):
for i in range(len(matrix)):
for j in range(len(matrix[i])):
for k in range(len(matrix[i][j])):
matrix[i][j][k] = round(matrix[i][j][k])
matrix[i][j][k] = bin(matrix[i][j][k]).replace('0b', "").zfill(8)
return matrix
def get_ciphertext(self, matrix, length):
rows = len(matrix[1])
num = len(matrix[1][1])
ciphertext = ''
for j in range(rows):
for k in range(num):
for i in range(1, 4):
ciphertext += matrix[i][j][k][-1]
if len(ciphertext) == length * 8:
return ciphertext
def extract(self, image, matrix, length):
try:
img = cv2.imread(image, cv2.IMREAD_GRAYSCALE)
H = np.load(matrix)
img = img + H
coeffs2 = pywt.dwt2(img, 'haar')
LL, (LH, HL, HH) = coeffs2
result = []
result.append(LL)
result.append(LH)
result.append(HL)
result.append(HH)
result = self.to_list(result)
result = self.to_bin(result)
ciphertext = self.get_ciphertext(result, length=length)
return ciphertext
except ValueError:
messagebox.showerror('Error', 'Please select ".npy matrix file')
def start_extract(self):
if self.img_dir is None or self.key_dir is None or self.iv_dir is None or self.matrix_dir is None or self.len is None:
messagebox.showerror('Error', 'Please input all file')
else:
start = time.time()
key, iv = self.read_key_and_iv(self.key_dir, self.iv_dir)
ciphertext = self.extract(self.img_dir, self.matrix_dir, self.len)
message = self.decrypt_message(ciphertext, key, iv)
message = '{}'.format(str(message).replace("b'", ""))
self.change_len_and_mess(self.message_text, message)
end = time.time()
messagebox.showinfo('Success', 'Success extract message in {:.2f} s'.format(end - start))
def main():
root = tk.Tk()
app = Application(master=root)
root.geometry('800x800')
root.resizable(False, False)
app.mainloop()
if __name__ == '__main__':
main()
| SonThanhNguyen13/stegano | GUI_extract.py | GUI_extract.py | py | 9,048 | python | en | code | 0 | github-code | 90 |
44531349193 | from flask import Flask, redirect, session, request, jsonify, send_from_directory
from flask_restful import Api, Resource, reqparse
#from flask_cors import CORS #comment this on deployment
from validate_email_address import validate_email
from flask_cors import CORS, cross_origin
from flask_session import Session
import datetime
import dynamodb_handler
from flask_mail import *
from random import *
# from app import app
from validate_email_address import validate_email
from flask_socketio import SocketIO, emit
from flask_login import LoginManager, logout_user
app = Flask(__name__, static_url_path='', static_folder='frontend/build')
app.config['MAIL_SERVER']='smtp.mailtrap.io'
app.config['MAIL_PORT'] = 2525
app.config['MAIL_USERNAME'] = 'bdf508ef969ff3'
app.config['MAIL_PASSWORD'] = 'be45ecdec2e16e'
app.config['MAIL_USE_TLS'] = True
app.config['MAIL_USE_SSL'] = False
app.config['CORS_HEADERS'] = 'Content-Type'
app.secret_key = 'super secret key'
app.config['SESSION_TYPE'] = 'filesystem'
#CORS(app) #comment this on deployment
api = Api(app)
mail = Mail(app)
server_session = Session(app)
socketio = SocketIO(app)
import re
regex = r'\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b'
def check(email):
# pass the regular expression
# and the string into the fullmatch() method
if(re.fullmatch(regex, email)):
return True
else:
return False
@socketio.on('disconnect')
def disconnect_user():
logout_user()
session.pop('email')
session.pop('role')
@app.route("/clear", methods=['GET'])
def serve():
session.clear()
return {'message':'cleared'}
@app.route("/dashboard",methods=['GET'])
def get_current_user():
email = session.get("email")
role = session.get("role")
if not email:
return jsonify({"error": "Unauthorized"}), 401
return jsonify({
"email": email,
"role": role
})
@app.route('/index', methods=['POST'])
def index():
json = request.json
if 'otp' in json:
realOtp = session.get("otp")
userOtp = json['otp']
print(userOtp)
print(realOtp)
if int(userOtp) == realOtp:
now = datetime.datetime.now()
date_time = now.strftime("%m/%d/%Y, %H:%M:%S")
dynamodb_handler.updatelog(json['email'], date_time)
res = dynamodb_handler.GetUser(json['email'])
print(res)
if 'Item' in res and 'email' in res['Item']:
session["email"] = json['email']
session["role"] = res['Item']['role']
return jsonify({
'status': 'authenticated'
})
else:
return jsonify({
'status': 'wrong_otp'
}), 500
if 'email' in json:
userEmail = json['email']
print (check(userEmail))
if check(userEmail):
session['otp'] = randint(100000,999999)
msg = Message('OTP',sender = '176ca7a4c9-97c23c+1@inbox.mailtrap.io', recipients = [userEmail])
msg.body = str(session.get("otp"))
mail.send(msg)
return jsonify({
'status': 'requested_otp'
})
else:
return jsonify({
'status': 'invalid_email'
})
else:
return jsonify({
'status': 'requested_email'
})
@app.route("/get_users",methods=['GET'])
def get_users():
res = dynamodb_handler.scan_user()
return jsonify(res['Items'])
@app.route("/get_assets",methods=['GET'])
def get_assets():
res = dynamodb_handler.scan_machine_data()
return jsonify(res['Items'])
@app.route("/add_user",methods=['POST'])
def add_user():
json = request.json
user = json['email']
role = json['role']
#aws stuff
dynamodb_handler.addUser(user,'caterpillar', '',role)
return jsonify({
'status':'updated',
'message':(user + " updated to have role: " + role)
})
@app.route("/update_user",methods=['POST'])
def update_user():
json = request.json
user = json['email']
role = json['role']
print(user)
print(role)
#aws stuff
dynamodb_handler.UpdateUserRole(user,role)
return jsonify({
'status':'updated',
'message':(user + " updated to have role: " + role)
})
@app.route("/delete_user",methods=['POST'])
def delete_user():
json = request.json
user = json['email']
#aws stuff
dynamodb_handler.DeleteUser(user)
return jsonify({
'status':'deleted',
'message':(user + " deleted ")
})
# @app.route('/dash', methods=['POST'])s
# def dashboard():
# return jsonify({})
@app.route("/test", defaults={'path':''}, methods = ['POST'])
def test(path):
return {
'resultStatus': 'SUCCESS',
'message': "test"
}
# api.add_resource(HelloApiHandler, '/flask/hello')
# api.add_resource(SignInHandler, '/index')
| Aaryanmukherjee/CatHack2022 | app.py | app.py | py | 4,926 | python | en | code | 0 | github-code | 90 |
5223285727 | #!/usr/bin/env python3
Infinite = 1000000
RATE_DEATH = 100
# Dangers
DANGER_RATE_ZOMBIE = RATE_DEATH #death
DANGER_RATE_NEAR_ZOMBIE_FACE = RATE_DEATH #int(RATE_DEATH*0.9)
DANGER_RATE_NEAR_ZOMBIE_BACK = (RATE_DEATH*2)//3
DANGER_RATE_ZOMBIE_EXIT = RATE_DEATH//2
DANGER_RATE_NEAR_PLAYER_BACK = (RATE_DEATH*2)//3 #can fire - high risk
DANGER_RATE_NEAR_PLAYER_FACE = (RATE_DEATH*8)//10 #can fire - high risk
DANGER_RATE_NEAR_PLAYER_DIAG = RATE_DEATH//2
DANGER_RATE_PLAYER = RATE_DEATH//2 #mrisky to jump - can move and fire
DANGER_DIST_RADIUS = 5
DANGER_DIST_DECAY = 10
DANGERZ_DIST_RADIUS = 3
DANGERZ_DIST_DECAY = 3
RATE_MOVE_STEP = [0, 1, 2.1]
RATE_MOVE_GOLD_DEFAULT = -5
#RATE_MOVE_GOLD = RATE_MOVE_GOLD_DEFAULT
RATE_MOVE_PERK = -5
RATE_MOVE_TARGET = -10
MOVE_RATE_RISKY = (RATE_DEATH*2)//3
RATE_FIRE_100 = 100
RATE_FIRE_0 = 0
RATE_FIRE_NEIGHBOUR = RATE_FIRE_100
RATE_FIRE_DUELER = 50
RATE_FIRE_PLAYER = 10
RATE_FIRE_PLAYERN = 6
RATE_FIRE_ZOMBIE = 10
RATE_FIRE_ZOMBIEN = 8
RATE_FIRE_STARTS = 1
RATE_FIRE_MAKE_SHOT = 5
RATE_FIRE_SCALES = [1, 1, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2]
CUBE_HISTORY_LEN = 5
CUBE_LONG_HISTORY_LEN = 10
ATTACK_STARTS_RANGE_MIN = 2
ATTACK_STARTS_RANGE_MAX = 2
# BERSERK_STARTS_RANGE_MIN = 1
# BERSERK_STARTS_RANGE_MAX = 2
BERSERK_STARTS_RANGE_MIN = 1
BERSERK_STARTS_RANGE_MAX = 1
BERSERK_ALLOW_DUEL = True
#BERSERRK_AUTOSTART_NO_EXITS = 10
VISIBILITY_DURATION = 100
| BlackVS/Bots | EPAM/2020/Zombie/current/game_rates.py | game_rates.py | py | 1,460 | python | en | code | 1 | github-code | 90 |
71775290536 | __author__ = 'Fabian Gebhart'
# This file "AO_reset.py" resets the Adaptive Optics Model, to
# start all over again. If, for any reason, the main program
# should be confused or # messed up. Just quit it and run this
# file. It iterates through all # steppers and assigns the
# found (moving) laser points. For more info see:
# https://github.com/fgebhart/adaptive-optics-model
# import the necessary packages
import cv2
from picamera.array import PiRGBArray
from picamera import PiCamera
import time
import RPi.GPIO as GPIO
import os
# make sure the close.log file is existing in order to
# successfully run this file - see tk_ao.py
os.system('sudo touch /home/pi/close.log')
# allow camera to wake up
# time.sleep(2)
# enable Pi-Camera and set resolution
camera = PiCamera()
camera.resolution = (256, 256)
rawCapture = PiRGBArray(camera, size=(256, 256))
# Time delay for stepper motors 0.0008 is smallest working delay
# looks like 0.001 works better... stepper moving more smooth
delay = 0.001
# Movement pattern for "half-stepping" method, counter clockwise
# [1, 0, 0, 0], # 0
# [1, 1, 0, 0], # 1
# [0, 1, 0, 0], # 2
# [0, 1, 1, 0], # 3
# [0, 0, 1, 0], # 4
# [0, 0, 1, 1], # 5
# [0, 0, 0, 1], # 6
# [1, 0, 0, 1]] # 7
# same movement pattern, but only editing the different bits,
# leads to better performance (= smaller delay)
MOVE_PATTERN = [
(1, GPIO.HIGH), # to 1
(0, GPIO.LOW), # to 2
(2, GPIO.HIGH), # ...
(1, GPIO.LOW),
(3, GPIO.HIGH),
(2, GPIO.LOW),
(0, GPIO.HIGH),
(3, GPIO.LOW) # to 0
]
# Set "GPIO-Mode" to BCM = Board Setup
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
number_of_steppers = 5
stepperPins = [
# ___0___1___2___3
[6, 13, 19, 26], # stepper 1
[12, 16, 20, 21], # stepper 2
[14, 15, 18, 23], # stepper 3
[7, 8, 25, 24], # stepper 4
[22, 27, 17, 4]] # stepper 5
# define the pins of the steppers as outputs
GPIO.setup(stepperPins[0], GPIO.OUT)
GPIO.setup(stepperPins[1], GPIO.OUT)
GPIO.setup(stepperPins[2], GPIO.OUT)
GPIO.setup(stepperPins[3], GPIO.OUT)
GPIO.setup(stepperPins[4], GPIO.OUT)
# initialize steppers to INIT_PATTERN, that is, the first part
# of the sequence
for stepper in stepperPins:
GPIO.output(stepper[0], 1)
GPIO.output(stepper[1], 0)
GPIO.output(stepper[2], 0)
GPIO.output(stepper[3], 0)
# Current position of the steppers in the move-sequence: relates
# to MOVE_PATTERN
stepperPositions = [0, 0, 0, 0, 0]
def get_laser_points(image):
"""Return centers of laser-points found in the given image
as list of coordinate-tuples."""
# The color boundaries for red laser (appears white on screen)
# boundaries are in GBR: green, blue, red
whiteLower = (150, 150, 180)
whiteUpper = (255, 255, 255)
# these boundaries should work fine for even bright rooms
# rooms with dimmed light should apply new lower
# boundaries: (190, 190, 190)
# get the contour areas for the steppers
mask = cv2.inRange(image, whiteLower, whiteUpper)
contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
# compute the center of the contour areas
centroids = []
for contour in contours:
m = cv2.moments(contour)
# avoid division by zero error!
if m['m00'] != 0:
cx = int(m['m10'] / m['m00'])
cy = int(m['m01'] / m['m00'])
centroids.append((cx, cy))
# following line manages sorting the found contours
# from left to right, sorting
# first tuple value (x coordinate) ascending
centroids = sorted(centroids)
centroids = centroids[:5]
return centroids
def move_stepper(stepper, steps_to_perform):
"""Moves only one stepper. stepper = 0,1,2,3,4;
steps_to_perform = -4096...+4096"""
pins = stepperPins[stepper]
for step in range(abs(steps_to_perform)):
if steps_to_perform < 0: # CLOCK-WISE
stepperPositions[stepper] -= 1
move = MOVE_PATTERN[stepperPositions[stepper]
% len(MOVE_PATTERN)]
move = (move[0], not move[1])
else: # > 0 COUNTER-CLOCK-WISE
move = MOVE_PATTERN[stepperPositions[stepper]
% len(MOVE_PATTERN)]
stepperPositions[stepper] += 1
GPIO.output(pins[move[0]], move[1])
time.sleep(delay)
def move_steppers(steps_to_perform_per_stepper):
"""Moves all steppers in parallel for the given movement
parameters. steps_to_perform_per_stepper is a list like:
[2, 400, 0, -20, -200]"""
absolute_list = [0, 0, 0, 0, 0]
for i in range(number_of_steppers):
absolute_list[i] = abs(steps_to_perform_per_stepper[i])
max_steps = max(absolute_list)
for step in range(max_steps):
for stepper in range(number_of_steppers):
if abs(steps_to_perform_per_stepper[stepper])\
> step:
if steps_to_perform_per_stepper[stepper] < 0:
# CLOCK-WISE
stepperPositions[stepper] -= 1
move =\
MOVE_PATTERN[stepperPositions[stepper]
% len(MOVE_PATTERN)]
move = (move[0], not move[1])
else: # COUNTER-CLOCK-WISE
move =\
MOVE_PATTERN[stepperPositions[stepper]
% len(MOVE_PATTERN)]
stepperPositions[stepper] += 1
pins = stepperPins[stepper]
GPIO.output(pins[move[0]], move[1])
time.sleep(delay)
def log(*args):
"""function to activate the 'print' commands. Just comment
or uncomment the following lines"""
pass
# print args
def find_movement_on_screen(last_laser_points,
current_laser_points):
"""Manages to find movement on screen. If coordinates move
more then 3 px the list with the found coordinates is
returned"""
threshold = 3
# creates a list with the x coordinates of the laserspots of
# the current and the last frame
difference_list = [a[0] - b[0] for a, b in
zip(last_laser_points, current_laser_points)]
log("difference_list:", difference_list)
for i in range(0, len(difference_list)):
if abs(difference_list[i]) > threshold:
return current_laser_points[i]
def match_laser_to_stepper(matched_list):
"""moves the current stepper in order to find a movement on
the screen. If movement is found, the current stepper is
assigned to found coordinates of the laser"""
step_size = 2
current_stepper = 0
# find out which laser is not yet matched to determine the
# stepper to move
for i in range(0, number_of_steppers):
if matched_list[i] == (0, 0):
current_stepper = i
break
# check if last_laser_points is already fetched (here it
# needs to buffer at least one frame to avoid finding
# "movement in the first frame")
if last_laser_points is not None:
if len(last_laser_points) > len(laser_points):
# if laser left screen, we got to move it even more
# (8) backwards to enter screen again
move_stepper(current_stepper, (-1) * step_size * 20)
else:
# check out the value of the matched_list and find
# the relating lasers where value == 0
if matched_list[current_stepper] == (0, 0):
# if there is no movement on the screen
# -> keep turning the current stepper
if find_movement_on_screen(last_laser_points,
laser_points) is None:
move_stepper(current_stepper,
step_size * 16)
# else: Movement is found, store it in the
# matched_list at index "current_stepper"
else:
matched_list[current_stepper]\
= find_movement_on_screen(last_laser_points,
laser_points)
log("inserted coordinates in matched_list,"
"switching to next stepper")
log("matched list:", matched_list)
current_stepper += 1
return None
else:
log("All lasers are matched to the steppers")
log("matched list:", matched_list)
return matched_list
def get_laser_on_position(matched_list):
"""Move lasers to their starting (goal) position"""
# initialize lists
way_to_go_in_steps = [0, 0, 0, 0, 0]
log("goal position:", goal_position)
# calculating the way from current position (matched_list)
# to start_position
for i in range(0, len(matched_list)):
way_to_go_in_steps[i] = int((matched_list[i][0]
- goal_position[i])* pixel_to_steps_coefficient)
# determine direction, whether laser is left or right of
# the starting position
log("way to go in steps:", way_to_go_in_steps)
log("Attention... Moving Steppers")
time.sleep(2)
move_steppers(way_to_go_in_steps)
def stabilize_laser(laser_points):
"""function to stabilize the laser on their goal position.
Trying to keep them there."""
way_to_correct = [0, 0, 0, 0, 0]
for i in range(0, len(laser_points)):
way_to_correct[i] = int(((laser_points[i][0]
- goal_position[i]) * pixel_to_steps_coefficient)
* gain_factor)
log("way to correct:", way_to_correct)
move_steppers(way_to_correct)
# initialize the variables:
lasers_matched = False
laser_positions_initialized = False
laser_positions_reached = False
last_laser_points = None
matched_list = [(0, 0), (0, 0), (0, 0), (0, 0), (0, 0)]
goal_position = [70, 99, 128, 157, 186]
# pixel_to_steps_coefficient = 0.55
pixel_to_steps_coefficient = 0.55
# good results with 0.9
gain_factor = 0.3
# counter for letting it run 5 more images to stabilize
# the lasers
counter = 0
# another counter for letting the camera warmup
# in order to avoid missing the first movement
warm_up_cocunter = 0
# While loop for loading, interpreting and showing frames
while True:
camera.capture(rawCapture, format="bgr",
use_video_port=True)
# grab the raw NumPy array representing the image, then
# initialize the timestamp
# and occupied/unoccupied text
image = rawCapture.array
# find contours in the accumulated image
laser_points = get_laser_points(image)
# limit number of found centers to number of steppers
laser_points = laser_points[:number_of_steppers]
# if all lasers reached their goal position, stabilize them
# (move this code to the beginning, so it works with the
# new laser points and stabilizes them)
if laser_positions_reached is True:
stabilize_laser(laser_points)
# having 5 more frames to stabilize and then
# end program
if counter < 5:
counter += 1
else:
break
# if lasers are not matched, do so...
if not lasers_matched:
matched_lasers = match_laser_to_stepper(matched_list)
# if they are now matched, match_laser_to_stepper
# returns the list, no more "none"
if matched_lasers is not None:
lasers_matched = True
# if lasers are matched and laser_position_reached is False
# then run "get_laser_on_position" once (!)
if lasers_matched and not laser_positions_reached:
get_laser_on_position(matched_list)
laser_positions_reached = True
# set current laser points to last laser points to allow
# movement tracking for "find_movement_on_screen"
last_laser_points = laser_points
# clear the stream in preparation for the next frame
rawCapture.truncate(0)
# check if the close.log file exists. If it is deleted break
if os.path.isfile('/home/pi/close.log') is False:
break
# if the `q` key was pressed, break from the loop
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
GPIO.cleanup()
os.remove('/home/pi/close.log')
| fgebhart/adaptive-optics-model | code/AO_reset_old.py | AO_reset_old.py | py | 12,786 | python | en | code | 5 | github-code | 90 |
30298551050 | from kivy.app import App
from kivy.uix.screenmanager import Screen
from kivy.factory import Factory
from kivy.uix.floatlayout import FloatLayout
from kivy.properties import ObjectProperty
from kivy.uix.popup import Popup
import os
class Editor(Screen):
pass
class LoadDialog(FloatLayout):
load = ObjectProperty(None)
cancel = ObjectProperty(None)
class SaveDialog(FloatLayout):
save = ObjectProperty(None)
text_input = ObjectProperty(None)
cancel = ObjectProperty(None)
class Root(FloatLayout):
loadfile = ObjectProperty(None)
savefile = ObjectProperty(None)
text_input = ObjectProperty(None)
def dismiss_popup(self):
self._popup.dismiss()
# Responde ao clique do botão LOAD
def show_load(self):
content = LoadDialog(load=self.load, cancel=self.dismiss_popup)
self._popup = Popup(title="Load file", content=content,
size_hint=(0.4, 1.0))
self._popup.open()
def load(self, path, filename):
with open(os.path.join(path, filename[0])) as stream:
self.ids.text_input.text = stream.read()
self.dismiss_popup()
def calc(self):
#self.ids.text_input.text = "certinho"
c1 = float(self.ids.cartao1.text)
c2 = float(self.ids.cartao2.text)
self.ids.text_input.text = str(c1 + c2)
class Editor(App):
pass
Factory.register('Root', cls=Root)
Factory.register('LoadDialog', cls=LoadDialog)
Factory.register('SaveDialog', cls=SaveDialog)
if __name__ == '__main__':
Editor().run() | HitechXXI/MyContas | main.py | main.py | py | 1,568 | python | en | code | 0 | github-code | 90 |
18814510657 | import pytz
import lxml
import dateutil.parser
import datetime
import re
from utils import LXMLMixin
from openstates.scrape import Scraper, Event
from openstates.exceptions import EmptyScrape
class MAEventScraper(Scraper, LXMLMixin):
_TZ = pytz.timezone("US/Eastern")
date_format = "%m/%d/%Y"
verify = False
non_session_count = 0
def scrape(self, chamber=None, start=None, end=None):
dtdelta = datetime.timedelta(days=30)
if start is None:
start_date = datetime.datetime.now() - dtdelta
else:
start_date = datetime.datetime.strptime(start, "%Y-%m-%d")
start_date = start_date.strftime(self.date_format)
# default to 30 days if no end
if end is None:
end_date = datetime.datetime.now() + dtdelta
else:
end_date = datetime.datetime.strptime(end, "%Y-%m-%d")
end_date = end_date.strftime(self.date_format)
url = "https://malegislature.gov/Events/FilterEventResults"
params = {
"EventType": "",
"Branch": "",
"EventRangeType": "",
"StartDate": start_date,
"EndDate": end_date,
"X-Requested-With": "XMLHttpRequest",
}
page = self.post(url, params, verify=False)
page = lxml.html.fromstring(page.content)
page.make_links_absolute("https://malegislature.gov/")
rows = page.xpath("//table[contains(@class,'eventTable')]/tbody/tr")
for row in rows:
# Some rows have an additional TD at the start,
# so index em all as offsets
td_ct = len(row.xpath("td"))
# Skip meetings of the chamber
event_type = row.xpath("string(td[{}])".format(td_ct - 3))
if event_type == "Session":
continue
url = row.xpath("td[{}]/a/@href".format(td_ct - 2))[0]
yield from self.scrape_event_page(url, event_type)
if self.non_session_count == 0:
raise EmptyScrape
def scrape_event_page(self, url, event_type):
page = self.lxmlize(url)
page.make_links_absolute("https://malegislature.gov/")
title = page.xpath('string(//div[contains(@class,"followable")]/h1)')
title = title.replace("Hearing Details", "").strip()
title = title.replace("Special Event Details", "")
start_day = page.xpath(
'//dl[contains(@class,"eventInformation")]/dd[2]/text()[last()]'
)[0].strip()
start_time = page.xpath(
'string(//dl[contains(@class,"eventInformation")]/dd[3])'
).strip()
# If an event gets moved, ignore the original time
start_time = re.sub(
r"Original Start Time(.*)New Start Time(\n*)",
"",
start_time,
flags=re.IGNORECASE | re.MULTILINE | re.DOTALL,
)
location = page.xpath(
'string(//dl[contains(@class,"eventInformation")]/dd[4]//a)'
).strip()
if location == "":
location = page.xpath(
'string(//dl[contains(@class,"eventInformation")]/dd[4])'
).strip()
description = page.xpath(
'string(//dl[contains(@class,"eventInformation")]/dd[5])'
).strip()
start_date = self._TZ.localize(
dateutil.parser.parse("{} {}".format(start_day, start_time))
)
event = Event(
start_date=start_date,
name=title,
location_name=location,
description=description,
)
event.add_source(url)
agenda_rows = page.xpath(
'//div[contains(@class,"col-sm-8") and .//h2[contains(@class,"agendaHeader")]]'
'/div/div/div[contains(@class,"panel-default")]'
)
for row in agenda_rows:
# only select the text node, not the spans
agenda_title = row.xpath(
"string(.//h4/a/text()[normalize-space()])"
).strip()
if agenda_title == "":
agenda_title = row.xpath(
"string(.//h4/text()[normalize-space()])"
).strip()
agenda = event.add_agenda_item(description=agenda_title)
bills = row.xpath(".//tbody/tr/td[1]/a/text()")
for bill in bills:
bill = bill.strip().replace(".", " ")
agenda.add_bill(bill)
if event_type == "Hearing":
event.add_participant(title, type="committee", note="host")
video_srcs = page.xpath("//video/source")
if video_srcs:
for video_src in video_srcs:
video_url = video_src.xpath("@src")[0].strip()
video_mime = video_src.xpath("@type")[0]
event.add_media_link("Hearing Video", video_url, video_mime)
self.non_session_count += 1
yield event
| openstates/openstates-scrapers | scrapers/ma/events.py | events.py | py | 4,929 | python | en | code | 820 | github-code | 90 |
18362545079 | from heapq import heapify, heappush, heappop
def divisor(n):
divisors = []
i = 1
while i * i <= n:
if n % i == 0:
divisors.append(i)
if i != n / i:
divisors.append(n // i)
i += 1
divisors.sort()
return divisors
N, K = map(int, input().split())
A = list(map(int, input().split()))
divisors = divisor(sum(A))
for d in divisors[::-1]:
heap = []
s = 0
for a in A:
x = a // d * d - a
heap.append(x)
s += -x
heapify(heap)
n = 0
for _ in range(s // d):
x = heappop(heap)
if x + d > K:
break
else:
if x + d > 0:
n += x + d
heappush(heap, x + d)
else:
if sum(abs(x) for x in heap) <= 2 * K:
print(d)
exit()
| Aasthaengg/IBMdataset | Python_codes/p02955/s700446557.py | s700446557.py | py | 837 | python | en | code | 0 | github-code | 90 |
72096022057 | import logging
import requests
logger = logging.getLogger(__name__)
#------------------------------------------------------------------------------------------#
def create_component_inventory_item(baseURL, projectID, componentId, componentVersionId, licenseId, authToken, inventoryItemName ):
logger.debug("Entering create_component_inventory_item")
component_body = '''
{
"projectId": "''' + str(projectID) + '''",
"inventoryModel": {
"name": "''' + inventoryItemName + '''",
"inventoryType": "COMPONENT",
"component": {
"id": "''' + str(componentId) + '''",
"versionId": "''' + str(componentVersionId) + '''",
"licenseId": "''' + str(licenseId) + '''"
}
}
}
'''
response = create_inventory_item(baseURL, authToken, component_body)
return response
#------------------------------------------------------------------------------------------#
def create_work_in_progress_inventory_item(baseURL, projectID, authToken, inventoryItemName ):
logger.debug("Entering create_work_in_progress_inventory_item")
WIP_body = '''
{
"projectId": "''' + projectID + '''",
"inventoryModel": {
"name": "''' + inventoryItemName + '''",
"inventoryType": "WORK_IN_PROGRESS"
}
}
'''
response = create_inventory_item(baseURL, authToken, WIP_body)
return response
#------------------------------------------------------------------------------------------#
def create_inventory_item(baseURL, authToken, inventoryItemBody ):
logger.info("Entering create_inventory_item")
RESTAPI_BASEURL = baseURL + "/codeinsight/api/"
ENDPOINT_URL = RESTAPI_BASEURL + "inventories/"
RESTAPI_URL = ENDPOINT_URL
logger.debug(" RESTAPI_URL: %s" %RESTAPI_URL)
headers = {'Content-Type': 'application/json', 'Authorization': 'Bearer ' + authToken}
##########################################################################
# Make the REST API call with the project data
try:
response = requests.post(RESTAPI_URL, headers=headers, data=inventoryItemBody)
except requests.exceptions.RequestException as error: # Just catch all errors
logger.error(error)
return {"error" : error}
###############################################################################
# We at least received a response from Code Insight so check the status to see
# what happened if there was an error or the expected data
if response.status_code == 201:
return response.json()
else:
logger.error("Response code %s - %s" %(response.status_code, response.text))
return {"error" : response.text} | flexera-public/sca-codeinsight-restapi-python | inventory/create_inventory.py | create_inventory.py | py | 2,832 | python | en | code | 1 | github-code | 90 |
30615893043 | import pytest
from sodic.drawables import Rectangle
from sodic.drawables.annotations import BoundingBox, Segmentation
@pytest.mark.parametrize(
"rectangle,expected_segmentation",
[
(Rectangle(10, 10, 60, 60), Segmentation([10, 10, 60, 10, 60, 60, 10, 60])),
(
Rectangle(10.5, 10.5, 20, 20),
Segmentation([10.5, 10.5, 20, 10.5, 20, 20, 10.5, 20]),
),
],
)
def test_segmentation_calculation(
rectangle: Rectangle, expected_segmentation: Segmentation
):
segmentation = rectangle.segmentation
assert segmentation == expected_segmentation
@pytest.mark.parametrize(
"rectangle,expected_area",
[(Rectangle(10, 10, 60, 60), 2500), (Rectangle(10.5, 10.5, 20, 20), 90.25)],
)
def test_area_calculation(rectangle: Rectangle, expected_area: float):
area = rectangle.area
assert area == expected_area
@pytest.mark.parametrize(
"rectangle,expected_bounding_box",
[
(Rectangle(10, 10, 60, 60), BoundingBox(10, 10, 50, 50)),
(Rectangle(10.5, 10.5, 20, 20), BoundingBox(10.5, 10.5, 9.5, 9.5)),
],
)
def test_bbox_calculation(rectangle: Rectangle, expected_bounding_box: BoundingBox):
bounding_box = rectangle.bbox
assert bounding_box == expected_bounding_box
| Xalanot/sodic | tests/drawables/rectangle_test.py | rectangle_test.py | py | 1,276 | python | en | code | 0 | github-code | 90 |
38736489250 | import torch
import math
import torch.nn as nn
import torch.nn.functional as F
#---bam---
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
class ChannelGate(nn.Module):
def __init__(self, gate_channel, reduction_ratio=16, num_layers=1):
super(ChannelGate, self).__init__()
#self.gate_activation = gate_activation
self.gate_c = nn.Sequential()
self.gate_c.add_module( 'flatten', Flatten() )
gate_channels = [gate_channel]
gate_channels += [gate_channel // reduction_ratio] * num_layers
gate_channels += [gate_channel]
for i in range( len(gate_channels) - 2 ):
self.gate_c.add_module( 'gate_c_fc_%d'%i, nn.Linear(gate_channels[i], gate_channels[i+1]) )
self.gate_c.add_module( 'gate_c_bn_%d'%(i+1), nn.BatchNorm1d(gate_channels[i+1]) )
self.gate_c.add_module( 'gate_c_relu_%d'%(i+1), nn.ReLU() )
self.gate_c.add_module( 'gate_c_fc_final', nn.Linear(gate_channels[-2], gate_channels[-1]) )
def forward(self, in_tensor):
avg_pool = F.avg_pool2d( in_tensor, in_tensor.size(2), stride=in_tensor.size(2) )
return self.gate_c( avg_pool ).unsqueeze(2).unsqueeze(3).expand_as(in_tensor)
def init_weight(self):
for ly in self.children():
if isinstance(ly, nn.Conv2d):
nn.init.kaiming_normal_(ly.weight, a=1)
if not ly.bias is None: nn.init.constant_(ly.bias, 0)
def get_params(self):
wd_params, nowd_params = [], []
for name, module in self.named_modules():
if isinstance(module, nn.Linear) or isinstance(module, nn.Conv2d):
wd_params.append(module.weight)
if not module.bias is None:
nowd_params.append(module.bias)
elif isinstance(module, nn.BatchNorm2d):
nowd_params += list(module.parameters())
return wd_params, nowd_params
class SpatialGate(nn.Module):
def __init__(self, gate_channel, reduction_ratio=16, dilation_conv_num=2, dilation_val=4):
super(SpatialGate, self).__init__()
self.gate_s = nn.Sequential()
self.gate_s.add_module( 'gate_s_conv_reduce0', nn.Conv2d(gate_channel, gate_channel//reduction_ratio, kernel_size=1))
self.gate_s.add_module( 'gate_s_bn_reduce0', nn.BatchNorm2d(gate_channel//reduction_ratio) )
self.gate_s.add_module( 'gate_s_relu_reduce0',nn.ReLU() )
for i in range( dilation_conv_num ):
self.gate_s.add_module( 'gate_s_conv_di_%d'%i, nn.Conv2d(gate_channel//reduction_ratio, gate_channel//reduction_ratio, kernel_size=3, \
padding=dilation_val, dilation=dilation_val) )
self.gate_s.add_module( 'gate_s_bn_di_%d'%i, nn.BatchNorm2d(gate_channel//reduction_ratio) )
self.gate_s.add_module( 'gate_s_relu_di_%d'%i, nn.ReLU() )
self.gate_s.add_module( 'gate_s_conv_final', nn.Conv2d(gate_channel//reduction_ratio, 1, kernel_size=1) )
def forward(self, in_tensor):
return self.gate_s( in_tensor ).expand_as(in_tensor)
def init_weight(self):
for ly in self.children():
if isinstance(ly, nn.Conv2d):
nn.init.kaiming_normal_(ly.weight, a=1)
if not ly.bias is None: nn.init.constant_(ly.bias, 0)
def get_params(self):
wd_params, nowd_params = [], []
for name, module in self.named_modules():
if isinstance(module, nn.Linear) or isinstance(module, nn.Conv2d):
wd_params.append(module.weight)
if not module.bias is None:
nowd_params.append(module.bias)
elif isinstance(module, nn.BatchNorm2d):
nowd_params += list(module.parameters())
return wd_params, nowd_params
class BAM(nn.Module):
def __init__(self, gate_channel):
super(BAM, self).__init__()
self.channel_att = ChannelGate(gate_channel)
self.spatial_att = SpatialGate(gate_channel)
def forward(self,in_tensor):
f_ch_att = self.channel_att(in_tensor)
f_spar_att = self.spatial_att(in_tensor)
f_att = 1 + torch.sigmoid( f_ch_att * f_spar_att )
#print("shape channel_att/spatial_att: ", self.channel_att(in_tensor).shape,\
# self.spatial_att(in_tensor).shape)
#print("att shape: ", att.shape)
output_refined_feature = f_att * in_tensor
#channel_attention = self.channel_att
#spartial_attention = self.spatial_att
return output_refined_feature #, f_att, f_ch_att, f_spar_att
def init_weight(self):
for ly in self.children():
if isinstance(ly, nn.Conv2d):
nn.init.kaiming_normal_(ly.weight, a=1)
if not ly.bias is None: nn.init.constant_(ly.bias, 0)
def get_params(self):
wd_params, nowd_params = [], []
for name, module in self.named_modules():
if isinstance(module, nn.Linear) or isinstance(module, nn.Conv2d):
wd_params.append(module.weight)
if not module.bias is None:
nowd_params.append(module.bias)
elif isinstance(module, nn.BatchNorm2d):
nowd_params += list(module.parameters())
return wd_params, nowd_params | pjirayu/STOS | models/bam.py | bam.py | py | 5,418 | python | en | code | 1 | github-code | 90 |
23111029918 |
from src.pipe.recommend import RecommenderPipeline
import logging
from memory_profiler import profile as mem_profile
import warnings
warnings.filterwarnings("ignore")
def recommend_pipeline(key_skills_query):
try:
recommender = RecommenderPipeline()
results = recommender.get_recommendations(key_skills_query)
return results
except Exception as e:
print(e)
logging.exception(e)
@mem_profile
def main():
try:
key_skills_query = "data science python sql"
recommend_pipeline(key_skills_query)
except Exception as e:
print(e)
logging.exception(e)
if __name__ == "__main__":
main()
| bsb4018/job_rec_ss_bsb | src/profile/predict_memory_profile.py | predict_memory_profile.py | py | 677 | python | en | code | 0 | github-code | 90 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.