seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3527241237 | # -*- coding: utf-8 -*-
import pytest
import requests
from jsonschema import validate
url_parts = ["", "/8094", "/search?query=Brewing",
"/autocomplete?query=dog"]
@pytest.mark.parametrize("params", url_parts)
def test_status_code(base_url, params):
"""Проверка кода состояния HTTP"""
target = base_url + params
response = requests.get(target)
assert response.status_code == 200
def test_api_json_schema(base_url):
"""Проверка схемы JSON для id = 9180"""
resp = requests.get(base_url + "/9180")
schema = {
"type": "object",
"properties": {
"id": {"type": "number"},
"obdb_id": {"type": "string"},
"name": {"type": "string"},
"brewery_type": {"type": "string"},
"street": {"type": "string"},
"address_2": {"type": "null"},
"address_3": {"type": "null"},
"city": {"type": "string"},
"state": {"type": "string"},
"country_province": {"type": "null"},
"postal_code": {"type": "string"},
"country": {"type": "string"},
"longitude": {"type": "string"},
"latitude": {"type": "string"},
"phone": {"type": "null"},
"website_url": {"type": "null"},
"updated_at": {"type": "string"},
"created_at": {"type": "string"}
},
"required": ["id", "obdb_id", "name", "brewery_type",
"city", "state", "country", "updated_at", "created_at"]
}
validate(instance=resp.json(), schema=schema)
# Типы пивоварен, передаваемые в качестве параметра в GET-запросе
types = ['micro', 'nano', 'regional', 'brewpub', 'large',
'planting', 'bar', 'contract', 'proprietor', 'closed']
@pytest.mark.parametrize("brewery_type", types)
def test_sorting_by_type(base_url, brewery_type):
"""Проверка фильтра по значению параметра brewery_type"""
target = base_url + f"?by_type={types}"
resp = requests.get(target)
for brewery in resp.json():
assert brewery["brewery_type"] == types
def test_country_is_USA(base_url):
"""Проверяем, что страной является United States"""
resp = requests.get(base_url)
for brewery in resp.json():
assert brewery["country"] == "United States"
@pytest.mark.skip
@pytest.mark.parametrize("amount",range(1, 50))
def test_parameter_per_page(base_url, amount):
"""Позитивный тест параметра per_page"""
target = base_url + f"?per_page={amount}"
resp = requests.get(target)
assert len(resp.json()) == amount, "Количество пивоварен на странице "\
"не соответствует переданному в GET-запросе параметру"
def test_border_conditions_per_page(base_url):
"""Проверка граничных условий параметра per_page"""
res_1 = requests.get(base_url + "?per_page=0")
res_2 = requests.get(base_url + "?per_page=51")
assert res_1.json() == []
assert len(res_2.json()) == 50, "Max per_page is not 50!" | Eliseev-Max/API_testing | open_brewery_db/test_open_brewery.py | test_open_brewery.py | py | 3,271 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "requests.get",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "requests.get",
... |
9788724565 | from app import app
from flask import render_template, redirect, url_for, current_app
from app import db
from app.models import Car
from app.forms import AddForm, SearchForm
import sqlalchemy as sa
import sys
@app.route('/init')
def initialize_db():
connection_uri = app.config['SQLALCHEMY_DATABASE_URI']
engine = sa.create_engine(connection_uri)
insp = sa.inspect(engine)
# Create table only if it already doesn't exist
if not insp.has_table("cars"):
print('cars table does not exist. Creating it.', file=sys.stdout)
# Create the DB table
db.create_all()
# Iterate through the data file and add records to DB
with current_app.open_resource('static/mpg.csv') as file:
for row in file:
# convert from bytes to string before manipulating
toks = row.decode("utf-8").split(',')
c = Car(name = toks[-1].strip(),
year = toks[-3],
origin = toks[-2],
mpg = toks[1])
db.session.add(c)
# Commit all the changes
db.session.commit()
return "Initialized DB"
@app.route('/wipe')
def wipe():
db.drop_all()
return "Wiped data."
@app.route('/')
def hello():
return render_template('homepage.html')
@app.route('/view_all')
def view():
all = db.session.query(Car).all()
return render_template('view_cars.html', cars=all)
@app.route('/add', methods=['GET', 'POST'])
def add_record():
form = AddForm()
if form.validate_on_submit():
# Extract values from form
name_c = form.name.data
year_c = form.year.data
origin_c = form.origin.data
mpg_c = form.mpg.data
# Create a city record to store in the DB
c = Car(name =name_c, year=year_c,origin=origin_c,mpg=mpg_c)
# add record to table and commit changes
db.session.add(c)
db.session.commit()
form.name.data = ''
form.year.data = ''
form.origin.data = ''
form.mpg.data =''
return redirect(url_for('add_record'))
return render_template('add.html', form=form)
@app.route('/search_by_model', methods=['GET', 'POST'])
def search_by_name():
form = SearchForm()
if form.validate_on_submit():
# Query DB table for matching name
record = db.session.query(Car).filter(Car.name.contains(form.name.data))
if record:
return render_template('view_cars.html', cars=record)
else:
return render_template('not_found.html')
return render_template('search.html', form=form)
@app.route('/sort_model')
def sort_model():
all = db.session.query(Car).order_by(Car.name).all()
return render_template('view_cars.html', cars=all) | NormanBenedict/hwk2 | hw2-cse-321/app/routes.py | routes.py | py | 2,798 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "app.app.config",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "app.app",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.create_engine",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.inspe... |
72301275554 | #!/usr/bin/python3
from lxml import html
import requests
import hashlib
import base64
import re
session = "7uvlvl1ci3tho8pdl2pgrp7ag0"
# NOT GETTING THE RIGHT CHALLENGE
def process(data):
print("="*35 + " Input " + "="*35)
print(data)
# can_decode = True
# i = 0
# while can_decode:
# i += 1
# try:
# temp = base64.b64decode(data)
# print(str(temp))
# print("")
# data = temp
#
# if "\\" in str(data):
# can_decode = False
# print("Finished after " + str(i) + " decodes")
# except:
# can_decode = False
# print("Finished after " + str(i) + " decodes")
with open("file.txt", "w") as f:
f.write(data)
exit()
ret = data
print("-"*80)
print(ret)
return ret
def main():
cookie = {'PHPSESSID': session}
page = requests.get('https://ringzer0team.com/challenges/15', cookies=cookie)
text = page.text
if "Do not brute" in text:
print("Login failed")
exit()
tree = html.fromstring(page.content)
message = tree.xpath('/html/body/div[2]/div/div[2]/div/text()[2]').pop().strip()
result = process(message)
answerUrl = 'https://ringzer0team.com/challenges/15/' + result
data = requests.get(answerUrl, cookies=cookie).content
data=data.decode().split('<div class="alert alert-info">')
print("")
try:
flag = re.findall(r"FLAG-\w+",data[1])
print(flag)
except:
print("No flag")
main()
| 0xchase/ctfs | ring0/coding/7-littlelf/solve.py | solve.py | py | 1,345 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "requests.get",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "lxml.html.fromstring",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "lxml.html",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "requests.get",
"line_... |
21173035808 | from typing import Dict, List
from app.utils import preprocessor_slot_description_to_value
import torch
from transformers import AutoTokenizer
from app.model.pretrained_models import get_pretrained_model, get_tokenizer
class FlanT5Sacc:
def __init__(self, size: str, device) -> None:
self.name = f"flan-t5-{size}"
self.device = device
model_path = f"google/flan-t5-{size}"
self.model = get_pretrained_model(model_type="t5", model_path=model_path).eval().to(self.device)
self.tokenizer: AutoTokenizer = get_tokenizer(tokenizer_type="t5", tokenizer_path=model_path)
self.generation_config = {
"max_new_tokens": 16,
}
def calculate(self, utterance: str, metadata: Dict) -> Dict[str, bool]:
with torch.no_grad():
parts = metadata["parts"]
outputs = self.are_parts_correct_for_utterance(utterance, parts=parts)
return {
part["slot"]: output
for part, output in zip(parts, outputs)
}
def are_parts_correct_for_utterance(self, utterance: str, parts: List[Dict]) -> List[bool]:
utterance = self.preprocess_utterance(utterance)
prompts = [
self.get_prompt(utterance=utterance, part=part)
for part in parts
]
tokenized = self.tokenizer(
prompts,
max_length=256,
padding="max_length",
truncation=True,
return_tensors="pt",
).to(self.device)
input_ids = tokenized["input_ids"]
attention_mask = tokenized["attention_mask"]
output_tensor = self.model.generate(input_ids=input_ids, attention_mask=attention_mask, **self.generation_config)
outputs = self.tokenizer.batch_decode(sequences=output_tensor, skip_special_tokens=True)
assert len(outputs) == len(parts)
outputs = [self.model_output_to_bool(output) for output in outputs]
return outputs
def get_prompt(self, utterance: str, part: Dict) -> str:
template = self.preprocess_template(part["template"])
if part["act"] in ["INFORM", "OFFER", "CONFIRM"]:
return f'{utterance}\nCan we infer the following?\n{template}\n\n["yes", "no"]'
elif part["act"] == "REQUEST":
return f'{utterance}\nDo we ask the following question?\n"{template}"\n\["yes", "no"]'
else:
raise RuntimeError(f"Unknown act {part['act']}")
def preprocess_utterance(self, utterance: str) -> str:
return utterance
def preprocess_template(self, template: str) -> str:
return preprocessor_slot_description_to_value(template)
def model_output_to_bool(self, output: str) -> bool:
if output == "yes":
return True
elif output == "no":
return False
else:
print(f"flan model returned '{output}' as a response")
return False
| janpawlowskiof/template-based-response-generation-in-tod | app/rankers/flan_t5_ranker.py | flan_t5_ranker.py | py | 2,949 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "app.model.pretrained_models.get_pretrained_model",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "transformers.AutoTokenizer",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "app.model.pretrained_models.get_tokenizer",
"line_number": 16,
... |
72921016995 | import os
from fabric.api import cd, run, env, task, get
from fabric.contrib.files import exists
from ..repositories import get_repo_name
from fabric.context_managers import prefix
@task
def pip_download_cache(keep_dir=None):
"""Downloads pip packages into deployment pip dir.
"""
if not exists(env.deployment_pip_dir):
# run('rm -rf {deployment_pip_dir}'.format(
# deployment_pip_dir=env.deployment_pip_dir))
run('mkdir -p {deployment_pip_dir}'.format(
deployment_pip_dir=env.deployment_pip_dir))
with cd(env.project_repo_root):
# can't use
# run('pip download --python-version 3 --only-binary=:all: '
# as not all packages have a wheel (arrow, etc)
pip_download('pip')
pip_download('setuptools')
pip_download('ipython')
pip_download('wheel')
pip_download('gunicorn')
run('pip3 download '
'-d {deployment_pip_dir} -r {requirements}'.format(
deployment_pip_dir=env.deployment_pip_dir,
requirements=env.requirements_file), warn_only=True)
def pip_download(package_name):
"""pip downloads a package to the deployment_pip_dir.
"""
run('pip3 download '
'-d {deployment_pip_dir} {package_name}'.format(
deployment_pip_dir=env.deployment_pip_dir,
package_name=package_name), warn_only=True)
def pip_install_requirements_from_cache(venv_name=None):
"""pip installs required packages from pip_cache_dir into the venv.
"""
package_names = get_required_package_names()
for package_name in package_names:
pip_install_from_cache(package_name=package_name, venv_name=venv_name)
def pip_install_from_cache(package_name=None, pip_cache_dir=None, venv_name=None):
"""pip install a package from pip_cache_dir into the venv.
"""
pip_cache_dir = pip_cache_dir or env.deployment_pip_dir
venv_name = venv_name or env.venv_name
with cd(pip_cache_dir):
# with prefix('workon {venv_name}'.format(venv_name=venv_name)):
run('workon {venv_name} && pip3 install --no-cache-dir --no-index --find-links=. {package_name}'.format(
venv_name=venv_name,
package_name=package_name))
def get_required_package_names():
package_names = []
with cd(env.project_repo_root):
data = run('cat {requirements}'.format(
requirements=env.requirements_file))
data = data.split('\n')
for line in data:
if 'botswana-harvard' in line or 'erikvw' in line:
repo_url = line.split('@')[0].replace('git+', '')
package_names.append(get_repo_name(repo_url))
return package_names
def get_pip_list():
local_path = os.path.expanduser(
f'~/fabric/download/pip.{env.host}.{env.project_release}.txt')
if os.path.exists(local_path):
os.remove(local_path)
remote_path = f'~/pip.freeze.{env.project_release}.txt'
run(f'rm {remote_path}', warn_only=True)
with prefix(f'source {env.venv_dir}/{env.project_appname}/bin/activate'):
run(f'pip freeze > {remote_path}')
get(remote_path=remote_path, local_path=local_path)
| botswana-harvard/edc-fabric | edc_fabric/fabfile/pip/tasks.py | tasks.py | py | 3,218 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "fabric.contrib.files.exists",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "fabric.api.env.deployment_pip_dir",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "fabric.api.env",
"line_number": 14,
"usage_type": "name"
},
{
... |
4489182542 | import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.applications import ResNet101
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, GlobalAveragePooling2D, Flatten, Dense
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
x_train = np.load('../data/LPD_competition/npy/train_data.npy')
y_train = np.load('../data/LPD_competition/npy/label_data.npy')
print(x_train.shape)
print(y_train.shape)
y_train = to_categorical(y_train)
x_test = np.load('../data/LPD_competition/npy/test_data.npy')
print(x_test.shape)
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=0.2, random_state=42)
train_generator = ImageDataGenerator(rescale=1./255, width_shift_range=0.1, height_shift_range=0.1).flow(x_train, y_train, batch_size=32)
val_generator = ImageDataGenerator(rescale=1./255, width_shift_range=0.1, height_shift_range=0.1).flow(x_val, y_val, batch_size=32)
test_generator = ImageDataGenerator(rescale=1./255).flow(x_test, shuffle=False)
resnet = ResNet101(weights='imagenet', include_top=False, input_shape=(128,128,3))
# resnet.summary()
resnet.trainable = False
input_tensor = Input(shape=(128,128,3))
layer = resnet(input_tensor)
layer = GlobalAveragePooling2D()(layer)
layer = Flatten()(layer)
layer = Dense(2048, activation='relu')(layer)
layer = Dense(1024, activation='relu')(layer)
output_tensor = Dense(1000, activation='softmax')(layer)
model = Model(inputs=input_tensor, outputs=output_tensor)
model.summary()
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
path = './Lotte/resnet101_model.hdf5'
es = EarlyStopping(monitor='val_accuracy', patience=30)
cp = ModelCheckpoint(path, monitor='val_accuracy', save_best_only=True)
lr = ReduceLROnPlateau(monitor='val_accuracy', factor=0.8, patience=10)
model.fit(train_generator, epochs=2000, batch_size=32, validation_data=val_generator, callbacks=[es,cp,lr])
pred = model.predict(test_generator)
print(np.argmax(pred,1))
answer = pd.read_csv('./Lotte/sample.csv', header=0)
print(answer.shape)
answer.iloc[:,1] = np.argmax(pred,1)
print(answer)
answer.to_csv('./Lotte/submission.csv', index=False)
# 20.424 | Taerimmm/ML | Lotte/06_1_ResNet101.py | 06_1_ResNet101.py | py | 2,392 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "numpy.load",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.utils.to_categorical",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.loa... |
21447706332 | import torch
import torchvision
import torchvision.transforms as transforms
DATA_PATH = './data'
def get_transform():
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))])
return transform
def get_dataset(transform, download=True):
train_set = torchvision.datasets.MNIST(root=DATA_PATH, train=True, download=download, transform=transform)
test_set = torchvision.datasets.MNIST(root=DATA_PATH, train=False, download=download, transform=transform)
return train_set, test_set
def get_dataloader(train_set, test_set, bs):
trainloader = torch.utils.data.DataLoader(train_set, batch_size=bs, shuffle=True)
testloader = torch.utils.data.DataLoader(test_set, batch_size=bs, shuffle=False)
return trainloader, testloader
| nmd-2000/torchserve-demo | model/utils.py | utils.py | py | 809 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "torchvision.transforms.Compose",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.ToTensor",
"line_number": 9,
"usage_type": "call"
},
{
... |
30473794377 | from django import forms
from django.contrib.contenttypes.models import ContentType
from django.db import transaction
from .fields import TaxiField, TaxiSingleField
from .models import TermTaxonomy, TermTaxonomyItem
class TaxiModelMixin(forms.ModelForm):
"""
Mixin used on model forms where a TaxiField is set.
The mixin is responsible to create and delete
TermTaxonomyItems for a given instance based on form data.
"""
taxi_fields = None
def __init__(self, *args, **kwargs):
self.taxi_fields = {}
for field_name, field in self.__class__.base_fields.items():
if type(field) in [TaxiField, TaxiSingleField]:
self.taxi_fields[field_name] = field.taxonomy_slug
# Set initial
instance = kwargs.get("instance")
if instance:
kwargs["initial"] = kwargs.get("initial", {})
for field in self.taxi_fields:
kwargs["initial"].update(
{
field: list(
instance.terms.values_list("term_taxonomy__term", flat=True)
)
}
)
super().__init__(*args, **kwargs)
# Set choices
for field in self.taxi_fields:
self.fields[field].choices = self.get_choices(field).values_list(
"id", "term__name"
)
def get_choices(self, field):
return TermTaxonomy.objects.filter(taxonomy__slug=self.taxi_fields[field])
def get_existing_term_taxonomy_ids(self, field, instance):
return list(
instance.terms.filter(
term_taxonomy__taxonomy__slug=self.taxi_fields[field]
).values_list("term_taxonomy", flat=True)
)
def _save_taxi(self):
instance = self.instance
content_type = ContentType.objects.get_for_model(instance)
with transaction.atomic():
for field, term_taxonomy in self.taxi_fields.items():
existing = self.get_existing_term_taxonomy_ids(field, instance)
choices = self.get_choices(field)
data = self.cleaned_data[field]
data = [int(data)] if type(data) == str else [int(i) for i in data]
for choice in choices:
if choice.pk in data and choice.pk in existing:
# Do nothing, selected and already exists.
pass
elif choice.pk in data and choice.pk not in existing:
# Create the new choice that does not already exist.
TermTaxonomyItem(
content_type=content_type,
object_id=instance.pk,
term_taxonomy=choice,
).save()
elif choice.pk not in data and choice.pk in existing:
# Remove the unselected choice that already exists.
TermTaxonomyItem.objects.filter(
term_taxonomy=choice,
content_type=content_type,
object_id=instance.pk,
).delete()
elif choice.pk not in data and choice.pk not in existing:
# Final case, choice not selected and do not exist.
pass
def _save_m2m_taxi(self):
self._save_m2m()
self._save_taxi()
def save(self, commit=True):
instance = super().save(commit=commit)
if commit:
self._save_taxi()
else:
self.save_m2m = self._save_m2m_taxi # noqa
return instance
| nibon/django-taxi | django_taxi/mixins.py | mixins.py | py | 3,745 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.forms.ModelForm",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "fields.TaxiField",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "fields.TaxiSi... |
6003789556 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
def docclass_test():
cl = fisherclassifier(getwords)
cl.setdb('test1.db')
sampletrain(cl)
cl2 = naivebayes(getwords)
cl2.setdb('test1.db')
cl2.classify('quick money')
if __name__ == '__main__':
import nose
nose.main()
| cametan001/document_filtering | docclass_test.py | docclass_test.py | py | 300 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "nose.main",
"line_number": 14,
"usage_type": "call"
}
] |
71904121953 | import serial
import time
import numpy as np
class DummyHead():
def __init__(self, port):
"""Will work in deg in this class"""
self.ino = serial.Serial('/dev/cu.usbmodem'+str(port), 115200, timeout=1)
time.sleep(2)
self.theta = 0
#need big range here
self.max_left = 30
self.max_right = -30
#Starting moving left
self.left = True
self.right = False
def look_around(self):
"""Move the dummy hehad so it looks around randomly"""
# go back and forth
if self.left:
self.theta += 1
if self.theta >= self.max_left:
self.left = False
self.right = True
elif self.right:
self.theta -= 1
if self.theta <= self.max_right: #past -90 deg
self.left = True
self.right = False
# self.theta = 20
self.move(self.theta)
def get_rotation(self):
return np.deg2rad(self.theta)
def close(self):
self.ino.close()
def move(self, theta):
"""Moves dummy head to location"""
q1 = theta
q2 = 0
msg = self.formatAngle(q1,q2)
# print(msg)
self.ino.write(msg)
def formatAngle(self, q1,q2):
q1, q2 = self.calibrate(q1, q2)
data = str(q1) + "," + str(q2) + '\n'
data = str(q1) + "," + str(q2) + '\n'
return data.encode()
def calibrate(self, q1_angle, q2_angle):
#make this so
q1_offset = 90 - 30
q2_offset = 0
# q1_angle *= -1
return q1_angle + q1_offset, q2_angle + q2_offset
def read(self):
data = self.ino.readline()
if data:
data = data.decode('ascii')
data = data.strip('\n')
data = data.strip('\r')
print(data)
# #Example Usage
# head = DummyHead(14141)
# angle = 0
# while True:
# time.sleep(0.5)
# head.move(90)
# # head.move(90)
# # angle += np.deg2rad(1)#hhead has 1 deg of res
# head.read()
# head.close()
| zacharyyamaoka/DE3-Audio | dummy_head.py | dummy_head.py | py | 2,099 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "serial.Serial",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.deg2rad",
"line_number": 43,
"usage_type": "call"
}
] |
71904125153 |
#Imports
from torch.utils.data import Dataset
import pandas as pd
import numpy as np
import librosa
import torch
class AudioLocationDataset(Dataset):
def __init__(self, root="./../data_clip/", csv="./data_clip_label/label.csv", transform=None, use_subset=None, num_bin = 2):
self.root = root
self.root = "/Users/zachyamaoka/Dropbox/DE3/AudioExperinceDesign/de3_audio_data/data_clip/"
self.csv = pd.read_csv(csv)
if use_subset is not None:
self.filenames = self.csv['Filename'].tolist()[:use_subset]
self.labels = self.csv['Label'].tolist()[:use_subset]
else:
self.filenames = self.csv['Filename'].tolist()
self.labels = self.csv['Label'].tolist()
self.transform = transform
self.num_bin = num_bin
self.bins = get_bins(self.num_bin)
def __len__(self):
return len(self.filenames)
def __getitem__(self, idx):
#audio, label, rates = load_data_file(n=idx, audio_n_offset=0, label_rate=10, file_stem="real_rec_", data_label_path = "./data_real_label/", data_wav_path = "./../data_real_wav/")
fname = self.filenames[idx]
label = self.labels[idx]
path = self.root + fname
audio, sample_rate = librosa.core.load(path, sr=96000, mono=False)
#print(audio[:, :5])
#print(audio.shape)
#print([label])
#label = label[:, :2]
#label = np.expand_dims(label, 1)
#cut so they are all the same length
# audio = audio[:, :192512] #26146890 for synthetic
# Take random 0.1 sample
rate = 96000
dur = 0.005
chunk = int(rate*dur)
max_rand_ind = 192512 - chunk - 1
min_rand_ind = 0
start = int(np.random.uniform(min_rand_ind,max_rand_ind))
# print(start, start+chunk)
audio = audio[:, start:(start+chunk)]
#center data
mean = np.mean(audio)
audio -= mean
#normalize
max = np.max(np.abs(audio))
audio /= max
#label = label[:5995, :] #59291 for synthetic
# if label<np.pi:
# label=[0]
# else:
# label=[1]
label = segment_data(label,self.bins) # convert from theta to bin
label = [label]
if self.transform:
audio, label = self.transform((audio, label))
return audio, label
def toPolar(xy):
x = xy[0]
y = xy[1]
rho = np.sqrt(x**2 + y**2)
phi = np.arctan2(y, x)
return(rho, phi)
def toCartesian(rhophi):
rho = rhophi[0]
phi = rhophi[1]
x = rho * np.cos(phi)
y = rho * np.sin(phi)
return(x, y)
def train_test_val_split(csv="./data_clip_label/label.csv", save_loc='./data_clip_label/'):
csv = pd.read_csv(csv)
filenames = csv['Filename'].tolist()
labels = csv['Label'].tolist()
all_i = np.arange(len(filenames))
all_i = np.random.choice(all_i, len(all_i), replace=False)
train=csv.sample(frac=0.8)
testval=csv.drop(train.index)
val=testval.sample(frac=0.5)
test=testval.drop(val.index)
print(type(train), '\n', train.head(), '\n len', len(train))
print(type(val), '\n', val.head(), '\n len', len(val))
print(type(test), '\n', test.head(), '\n len', len(test))
train.to_csv(save_loc+"label_train.csv", index=False)
test.to_csv(save_loc+"label_val.csv", index=False)
test.to_csv(save_loc+"label_test.csv", index=False)
class ToTensor():
def __init__(self):
pass
def __call__(self, sample):
a, l = sample
return torch.Tensor(a), torch.LongTensor(l)
def abs_radial_loss(h,y):
global batch_size
x = torch.abs(h.sub(y))
x = torch.abs(x - np.pi)
x = np.pi - x
# print(x)
# showind = np.random.randint(x.shape[0])
# label = y.detach().numpy()[showind, 0]
# pred = h.detach().numpy()[showind, 0]
# x_ = x.detach().numpy()[showind, 0]
# print("label: ", np.rad2deg(label), "pred: ", np.rad2deg(pred), "diff: ", np.rad2deg(x_))
# time.sleep(3)
# x = x * x #square difference
x = torch.abs(x) # must be positive
x = torch.sum(x)
x = x/batch_size
return x
def round_down(num, divisor):
return num - (num%divisor)
def radial_loss(h, y):
x = torch.abs(h.sub(y))
x = torch.remainder(x, np.pi)
x = torch.mean(x)
return x
def get_bins(n):
""" Splits heading into a discrete number of bins"""
assert n % 2==0 # Onlys works for symetric bin numbers. Only symetric sub divsions are meaningful for the dummy headself.
bin_size = (2 * np.pi)/n
# Not tested, changes so bin go along middle.
# how front how back......, saying front or back is easier.
if n == 2:
bin_offset = 0 # left right case
else:
bin_offset = bin_size/2
start = 0
theta = start + bin_offset
bins = []
for i in range(n):
bins.append(theta)
theta += bin_size
return bins
def get_theta_quad(pred, n): #Rounds to center of quadrant
"""Coverts a pred label to theta value based on the number of quadrants"""
if n == 2: #special case
if pred == 0:
return np.pi/2
else:
return 1.5 * np.pi
else:
step = 2 * np.pi/ n
return (pred+1)* step
#floors theta based on the number of bins
def segment_data(theta,bins):
"""segments polar data into respective bin and return ind, here 0 corresponds to the first bin. Assums data is between 0 and 2pi"""
#bins of the from end
n = len(bins)
#find smallest starting value.
for i in np.arange(n):
j = i + 1
if j > n - 1:
return n - 1 #edge case
if (theta >= bins[i]) and (theta <= bins[j]):
return i #return the ind
# def abs_radial_loss(h,y):
# global batch_size
# # h = torch.remainder(h, np.pi) #
# x = torch.abs(h.sub(y))
# x = torch.abs(x - np.pi)
# x = np.pi - x
# x = x * x #square value
#
# x = torch.sum(x) # average over batch
# x = x / batch_size
#return x
| zacharyyamaoka/DE3-Audio | nn_utils/nn_util.py | nn_util.py | py | 6,075 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "torch.utils.data.Dataset",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "pandas.read_csv",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "librosa.core.load",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "librosa.co... |
17447453062 | from typing import Callable, Dict
from starlette import status
from starlette.middleware.base import BaseHTTPMiddleware
from starlette.requests import Request
from starlette.responses import RedirectResponse, Response
from starlette.types import ASGIApp, Receive, Scope, Send
class LegacyRedirectMiddleware(BaseHTTPMiddleware):
def __init__(
self,
app: ASGIApp,
*,
url_mapping: Dict[str, str],
) -> None:
super().__init__(app)
self.url_mapping = url_mapping
async def dispatch(self, request: Request, call_next: Callable) -> Response:
path = request.url.path
if path not in self.url_mapping:
response = await call_next(request)
# User may have requested `/xyz/` when only `/xyz` is
# in the URL mapping, resulting in a 404 false-positive.
# Attempt mapping from `/xyz`.
if response.status_code != 404 or not path.endswith("/"):
return response
path = path.rstrip("/")
if path not in self.url_mapping:
return response
redirect_path = self.url_mapping[path]
return RedirectResponse(
request.url.replace(path=redirect_path),
status_code=status.HTTP_301_MOVED_PERMANENTLY,
)
class DomainRedirect:
def __init__(
self,
domain: str,
status_code: int = status.HTTP_301_MOVED_PERMANENTLY,
root_path: str | None = None,
) -> None:
self.domain = domain
self.status_code = status_code
self.root_path = root_path
async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
assert scope["type"] == "http"
request = Request(scope)
url = request.url.replace(hostname=self.domain)
if self.root_path:
url = url.replace(path=f"{self.root_path}{request.url.path}")
response = RedirectResponse(url, status_code=self.status_code)
await response(scope, receive, send)
| florimondmanca/www | server/web/legacy.py | legacy.py | py | 2,045 | python | en | code | 31 | github-code | 1 | [
{
"api_name": "starlette.middleware.base.BaseHTTPMiddleware",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "starlette.types.ASGIApp",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 15,
"usage_type": "name"
},
{
"... |
24602385004 | from pydoc import text
from flask import Flask, jsonify
import socket
import flask
import netifaces
import subprocess
from flask.globals import request
from uuid import getnode as get_mac
from threading import Thread
import os, sys, json, struct, socket, fcntl, time
import subprocess
from os import listdir
from os.path import islink, realpath, join
import re
import multiprocessing
import ipaddress
cli = sys.modules['flask.cli']
cli.show_server_banner = lambda *x: None
if "OBB_HOME" in os.environ:
obb_home = os.environ["OBB_HOME"]
reset_cmd = "make delete && make validate && sleep 5 && make deploy"
else:
obb_home = "/opt/jci-obb"
reset_cmd = "make delete && make validate && sleep 5 && make factory-deploy"
app = Flask(__name__)
@app.route('/')
def index():
return '', 200
@app.route("/ping")
def ping():
return jsonify({"status": "OK"}), 200
@app.post("/api/systemRestart")
def applyRestart():
status = request.args.get('apply')
if status == "true":
def post_request_systemRestart():
reboot_cmd = "sudo reboot"
#reboot_cmd = "echo 'System Reboot'"
time.sleep(10)
subprocess.run([reboot_cmd],shell=True)
threadRestart = Thread(target=post_request_systemRestart)
threadRestart.start()
return jsonify({'status': 202, 'message': "System Reboot Initiated Successfully"}), 202
else:
return jsonify({'status': 400, 'message': "Bad Request"}), 400
@app.post("/api/factoryReset")
def applyFactoryReset():
status = request.args.get('apply')
if status == "true":
def post_request_factoryReset():
factoryReset_cmd = "cd {} && {}".format(obb_home,reset_cmd)
#factoryReset_cmd = "echo 'Factory Reset'"
time.sleep(10)
subprocess.run([factoryReset_cmd],shell=True)
threadReset = Thread(target=post_request_factoryReset)
threadReset.start()
return jsonify({'status': 202, 'message': "Factory Reset Initiated Successfully"}), 202
else:
return jsonify({'status': 400, 'message': "Bad Request"}), 400
@app.route("/changeSettings",methods=['POST'])
def setStaticIp():
request_data = request.get_json()
valid_payload = isValidPayload(request_data)
if not valid_payload :
return jsonify({'status': 400, 'message': "Bad Request. Payload is not Valid!"}), 400
dhcp_value=str(request_data.get("dhcpEnabled"))
if dhcp_value=='False':
dhc_false_add_addr="sudo netplan set ethernets.{interface}.addresses=[{ipaddr}/{mask}]".format(interface=request_data['name'],ipaddr=request_data['ipAddress'],mask=request_data['subnetMask'])
subprocess.run(dhc_false_add_addr, capture_output=True, shell=True)
autoDns = request_data["dns"]["auto"]
dhc_false_dns_list=request_data["dns"]['nameservers']
if autoDns == False:
dhc_false_nameserver_add="sudo netplan set ethernets.{interface}.nameservers.addresses=[{nameserver}]".format(interface=request_data['name'],nameserver=",".join(dhc_false_dns_list))
subprocess.run(dhc_false_nameserver_add, capture_output=True, shell=True)
else:
auto_dns_command="sudo netplan set ethernets.{interface}.nameservers.addresses={nameserver}".format(interface=request_data['name'],nameserver="null")
subprocess.run(auto_dns_command, capture_output=True, shell=True)
dhc_false_gateway_add="sudo netplan set ethernets.{interface}.gateway4={gatewayaddr}".format(interface=request_data['name'],gatewayaddr=request_data['defaultGateway'])
subprocess.run(dhc_false_gateway_add, capture_output=True, shell=True)
dhc_false_disable_cmd="sudo netplan set ethernets.{interface}.dhcp4={status}".format(interface=request_data['name'],status="no")
subprocess.run(dhc_false_disable_cmd, capture_output=True, shell=True)
pool = multiprocessing.Pool(processes=1)
pool.apply_async(chconfig)
elif dhcp_value=='True':
dhc_true_command="sudo netplan set ethernets.{interface}.dhcp4={status}".format(interface=request_data['name'],status="yes")
dhc_true_null_addr="sudo netplan set ethernets.{interface}.addresses={state}".format(interface=request_data['name'],state="null")
dhc_tru_null_nameserver="sudo netplan set ethernets.{interface}.nameservers.addresses={state}".format(interface=request_data['name'],state="null")
dhc_tru_null_gateway="sudo netplan set ethernets.{interface}.gateway4={state}".format(interface=request_data['name'],state="null")
subprocess.run(dhc_true_command, capture_output=True, shell=True)
subprocess.run(dhc_true_null_addr, capture_output=True, shell=True)
subprocess.run(dhc_tru_null_nameserver, capture_output=True, shell=True)
subprocess.run(dhc_tru_null_gateway, capture_output=True, shell=True)
pool = multiprocessing.Pool(processes=1)
pool.apply_async(chconfig)
return "Response from API", 200
@app.route("/api/getstaticip", methods=['GET'])
def getStaticIp():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
staticip = s.getsockname()[0]
s.close()
return flask.jsonify(StaticIp=staticip)
@app.route("/api/getinterfaces", methods=['GET'])
def getinterfaces():
if_res_main = []
if_list = interdiscover()
for if_name in if_list:
if_data = {}
if_data['name'] = if_name
if_data['macAddress'] = getHwAddr(if_name)
status = interfacestatus(if_name)
if_data['status'] = status
if status == 'Online':
if_data['subnetMask'] = netifaces.ifaddresses(if_name)[netifaces.AF_INET][0]['netmask']
if_data['ipAddress'] = netifaces.ifaddresses(if_name)[netifaces.AF_INET][0]['addr']
gws=netifaces.gateways()
if_data['defaultGateway'] = gws['default'][netifaces.AF_INET][0]
else:
if_data['subnetMask'] = "None"
if_data['ipAddress'] = "None"
if_data['defaultGateway'] = "None"
if_data['dhcpEnabled'] = dhcpstatus(if_name)
if_data['dns']= get_dns_settings(if_name)
if_res_main.append(if_data)
return flask.jsonify(if_res_main)
def interdiscover():
all_interfaces = [i for i in listdir("/sys/class/net") if islink(join("/sys/class/net", i))]
phy_interfaces = [i for i in all_interfaces if not realpath(join("/sys/class/net", i)).startswith(("/sys/devices/virtual", "/sys/devices/vif"))]
return phy_interfaces
def dhcpstatus(intname):
dhc_command="sudo netplan get ethernets.{}".format(intname)
dhc_status = subprocess.run(dhc_command, capture_output=True, shell=True)
x = dhc_status.stdout.decode()
if "dhcp4: false" in x:
return False
else:
return True
def interfacestatus(ifname):
command = "sudo ethtool {}".format(ifname)
ret = subprocess.run(command, capture_output=True, shell=True)
x = ret.stdout.decode()
if "Link detected: yes" in x:
return "Online"
else:
return "Offline"
def getHwAddr(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
info = fcntl.ioctl(s.fileno(), 0x8927, struct.pack('256s', bytes(ifname, 'utf-8')[:15]))
return ':'.join('%02x' % b for b in info[18:24])
def get_dns_settings(ifname)-> dict:
x=[]
command = "sudo netplan get ethernets.{}.nameservers.addresses".format(ifname)
ret = subprocess.run(command, capture_output=True, shell=True)
x=ret.stdout.decode()
z=re.findall( r'[0-9]+(?:\.[0-9]+){3}', x)
return { 'nameservers': z, 'auto': len(z) == 0 }
def chconfig():
apply_command="sudo netplan apply"
subprocess.run(apply_command, capture_output=True, shell=True)
subprocess.call(['sh', 'service-restart.sh'])
return "shell executed successfully"
def isValidPayload(request_data):
bool_array = ["True" , "False"]
dhcpEnabled = str(request_data.get("dhcpEnabled"))
autoDns = str(request_data["dns"]["auto"])
interface_list = interdiscover()
if(dhcpEnabled not in bool_array):
return False
if(autoDns not in bool_array):
return False
if(not str(request_data.get("name")) in interface_list):
return False
if(not str(request_data.get("subnetMask")).isdecimal):
return False
try:
ipaddress.ip_address(str(request_data.get("ipAddress")))
ipaddress.ip_address(str(request_data.get("defaultGateway")))
dns_nameservers_list=request_data["dns"]["nameservers"]
for nameserver in dns_nameservers_list:
ipaddress.ip_address(nameserver)
except ValueError:
return False
return True
if __name__ =='__main__':
app.run(debug = False, host='0.0.0.0', port=8099)
| khayalghosh/data | app.py | app.py | py | 8,797 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.modules",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "flask.Flask",
"l... |
20613432599 | import maya.cmds as cmds
from math import *
psphere = cmds.sphere(r=5)
pcube = cmds.polyCube()
pcone = cmds.polyCone()
closestToSurface = cmds.createNode("closestPointOnSurface")
cmds.connectAttr(closestToSurface+'.position', pcube[0]+'.translate')
cmds.connectAttr(pcone[0]+'.translate', closestToSurface+'.inPosition')
cmds.connectAttr(psphere[0]+'.worldSpace[0]',closestToSurface+'.inputSurface')
| ameliacode/BestTextbookforTechnicalArtists | Chapter 2. Procedure/object_movement.py | object_movement.py | py | 401 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "maya.cmds.sphere",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "maya.cmds",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "maya.cmds.polyCube",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "maya.cmds",
"line_numb... |
8007887981 | # App de Medição de Indice de Descarte
# Imports
import pickle
import numpy as np
import pandas as pd
import logging, io, os, sys
from sklearn.ensemble import GradientBoostingClassifier
from flask import Flask, render_template, flash, request, jsonify
# pip install flask_httpauth
from flask_httpauth import HTTPBasicAuth
# Cria a app
app = Flask(__name__)
# Inicializa a autenticação
auth = HTTPBasicAuth()
# Define o modelo como None
modelo_linhagem = None
# Variáveis de entrada
atributos = ['temperatura',
'amonia',
'idade',
'peso',
'sexo',
'fotoperiodo',
'ruido',
'luz',
'umidade',
'infeccoes',
'animais_por_gaiola',
'linhagem']
# Usuários de acesso
usuarios = {"ictb": "ictb", "lab": "lab"}
# Função para obter os usuários
@auth.get_password
def get_pw(username):
if username in usuarios:
return usuarios.get(username)
return None
# Função para mostrar imagem
def mostra_imagem(linhagem, nivel_descarte):
# Verifica a linhagem
if linhagem == 0:
linhagem_str = 'black'
else:
linhagem_str = 'swiss'
# Retorna a imagem correspondente
return('/static/imagens/' + linhagem_str + '_' + str(nivel_descarte) + '.jpg')
# Função para carregar o modelo ao inicializar a app
@app.before_first_request
def startup():
global modelo_linhagem
# Carrega o modelo
modelo_linhagem = pickle.load(open("static/modelo/modelo_linhagens.p",'rb'))
# Função para formatar mensagem de erro
@app.errorhandler(500)
def server_error(e):
logging.exception('some error')
return """
And internal error <pre>{}</pre>
""".format(e), 500
# Função para executar o processo de atribuição das variáveis
@app.route('/background_process', methods = ['POST', 'GET'])
def background_process():
temperatura = float(request.args.get('temperatura'))
amonia = float(request.args.get('amonia'))
idade = float(request.args.get('idade'))
peso = float(request.args.get('peso'))
sexo = float(request.args.get('sexo'))
fotoperiodo = float(request.args.get('fotoperiodo'))
ruido = float(request.args.get('ruido'))
luz = float(request.args.get('luz'))
umidade = float(request.args.get('umidade'))
infeccoes = float(request.args.get('infeccoes'))
animais_por_gaiola = float(request.args.get('animais_por_gaiola'))
linhagem = int(request.args.get('linhagem'))
# Cria o dataframe para os novos dados
novos_dados = pd.DataFrame([[temperatura,
amonia,
idade,
peso,
sexo,
fotoperiodo,
ruido,
luz,
umidade,
infeccoes,
animais_por_gaiola,
linhagem]],
columns = atributos)
# Faz as previsões
previsoes = modelo_linhagem.predict_proba(novos_dados[atributos])
# Obtém a melhor previsão (maior probabilidade)
melhor_previsao = [3,6,9][np.argmax(previsoes[0])]
# Retorna a cor do vinho e a imagem que corresponde à previsão
return jsonify({'qualidade_prevista':melhor_previsao, 'image_name': mostra_imagem(linhagem, melhor_previsao)})
# Função para carregar a página principal e renderizar a imagem
@app.route("/", methods = ['POST', 'GET'])
@auth.login_required
def index():
logging.warning("index!")
return render_template('index.html', qualidade_prevista = 1, image_name = '/static/imagens/imagem.jpg')
# Executa a pp
if __name__ == '__main__':
app.debug = True
app.run()
| machadodecastro/animal_death_risk_prediction | app/app.py | app.py | py | 3,775 | python | pt | code | 0 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "flask_httpauth.HTTPBasicAuth",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "logging.exceptio... |
22735484746 | import cv2
import sys
import os
# 解析gpfpd消息
def parse_gpfpd(gpfpdfile):
with open(gpfpdfile, 'r') as f:
dic1 = []
dic1.append(-1)
dic2 = []
dic2.append([0,0,0,0,-0.202,0,0,0,0,0,0,0,0,0,0,0])
for line in f.readlines():
line = line.strip('\n')
# 去掉换行符\n
b = line.split(',')
# 将每一行以逗号为分隔符转换成列表
dic1.append(b)
#print(b)
# 注意下列语句主要用于中间有空行的txt,
for i in range(len(dic1)):
if i%2 == 1:
dic2.append(dic1[i])
return dic2
# if __name__ == "__main__":
def rotate():
start_index = 1
end_index = 7366
gpfpd = parse_gpfpd("TXT/imu1_0826.txt")
print(gpfpd[1][1])
print(gpfpd[2][1])
img_dir = "PICTURE/photos1_0826_warped/"
rotated_dir = "PICTURE/photos1_0826_rotated/"
if not os.path.exists(rotated_dir):
os.makedirs(rotated_dir)
imgName_tail = ".jpg"
# 设置图片从起始序号读取图片并进行旋转
for i in range(start_index, end_index):
print(i)
img = cv2.imread(img_dir + str(i) + imgName_tail)
rows, cols, chanel = img.shape
# print(rows, cols)
# rotate_angle = float(gpfpd[start_index][0])-float(gpfpd[2*i-1][0])
# rotate_angle = float(357.71 - float(gpfpd[2 * i - 1][0]))
rotate_angle = float(360 - float(gpfpd[i][3]))
print(rotate_angle)
M = cv2.getRotationMatrix2D(((cols - 1) / 2.0, (rows - 1) / 2.0), rotate_angle, 1)
dst = cv2.warpAffine(img, M, (cols, rows))
cv2.imwrite(rotated_dir + str(i) + imgName_tail, dst)
return 1
# 将图片以第一张图片为基准转正
# for i in range(start_index, end_index):
# img = cv2.imread(rotated_dir + str(i) + imgName_tail)
# rows, cols, chanel = img.shape
# print(rows, cols)
# rotate_angle = 2
# print(rotate_angle)
# M = cv2.getRotationMatrix2D(((cols - 1) / 2.0, (rows - 1) / 2.0), rotate_angle, 1)
# dst = cv2.warpAffine(img, M, (cols, rows))
# cv2.imwrite("photo7_rotated_vertical/" + str(i) + imgName_tail, dst) | guoxxiong/Lane-Image-Stitching-Based-on-Integrated-Inertial-Navigation | rotate.py | rotate.py | py | 2,299 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "os.path.exists",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number... |
15621341184 | import urllib
from bs4 import BeautifulSoup
import re
import csv
url = "http://stats.footballpredictions.net/england/premier/1995-1996/results.html"
htmlfile = urllib.urlopen(url)
soup = BeautifulSoup(htmlfile)
#home team names
hometeam = soup.find_all("td", class_="hometeam")
ht = []
for element in hometeam:
ht.append(element.a.get_text())
#away team names
awayteam = soup.find_all("td", class_="awayteam")
wt = []
for element in awayteam:
wt.append(element.a.get_text())
#scores
scores = soup.find_all("td", class_="score")
sc = []
for element in scores:
#sc.append(element.get_text())
a = element.get_text()
a = re.sub(r"\s+", "", a, flags=re.UNICODE)
a = a.encode('ascii')
sc.append(a.split('-'))
f = open("results.csv", "wt")
try:
writer = csv.writer(f, delimiter=',')
writer.writerow(("hometeam", "awayteam", "homegoal", "awaygoal"))
for i in range(len(ht)):
writer.writerow((ht[i], wt[i], sc[i][0], sc[i][1]))
finally:
f.close()
f = open("results2.csv", "wt")
try:
writer = csv.writer(f, delimiter=',')
writer.writerow(("team", "opponent", "goal", "home"))
for i in range(len(ht)):
writer.writerow((ht[i], wt[i], sc[i][0], 1))
writer.writerow((wt[i], ht[i], sc[i][1], 0))
finally:
f.close()
| dviera/replications | Lee/get_data.py | get_data.py | py | 1,233 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "urllib.urlopen",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "re.UNICODE",
"line_number":... |
69827179554 | import pandas as pd
from tqdm import tqdm
labels = ["Prevention", "Treatment", "Diagnosis", "Mechanism", "Case Report", "Transmission", "Forecasting", "General"]
df1 = pd.read_csv("org_proc.csv")
df2 = pd.read_csv("covid_dataset_shuffled.csv")
df2 = df2.sample(frac=1).reset_index(drop=True)
print(len(df2))
print(df1["processed_text"])
result = []
print(df2.columns)
cols = dict()
cols["processed_text"] = []
cols["ad_creative_body"] = []
for label in labels:
cols[label] = []
for i, row_i in tqdm(df2.iterrows()):
proc1 = row_i["processed_text"]
result = df1.loc[df1["processed_text"] == proc1.strip()]
if result.empty:
continue
cols["processed_text"].append(result["processed_text"].values[0].strip())
cols["ad_creative_body"].append(result["ad_creative_body"].values[0])
for label in labels:
cols[label].append(row_i[label])
df3 = pd.DataFrame(cols)
print(df3)
print(len(df3))
df3.to_csv("sbp_dataset.csv")
| ujeong1/SBP22_DiscourseNet_experiment | creator/matched_csv.py | matched_csv.py | py | 955 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pandas.read_csv",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_... |
36571798337 | from flask import jsonify, request
from flask_restful import Resource
from Model import db, VistorChainsTotal, VistorLevel, Vistor, States
from Model import VisitorChainTotalSchema, VistorLevelSchema, VisitorSchema, QuerySchema, StatesSchema, Names
from webargs import fields, validate
from webargs.flaskparser import use_args, use_kwargs, parser, abort
from sqlalchemy.orm import sessionmaker, load_only
from sqlalchemy import Column, Date, Integer, Text, create_engine, inspect, and_, or_
import datetime
currentDT = datetime.datetime.now()
visitor_chain_schema = VisitorChainTotalSchema
visitor_chain_schema = VisitorChainTotalSchema(many=True)
visitor_schema = VisitorSchema
visitor_schema = VisitorSchema(many=True)
visitor_level_schema = VistorLevelSchema
visitor_level_schema = VistorLevelSchema(many=True)
query_schema = QuerySchema
query_schema = QuerySchema(many=True)
schema_states = StatesSchema()
schema_states = StatesSchema(many=True)
class QueryResource(Resource):
queryArgs = {
"Level_1": fields.Str(),
"Level_2": fields.Str(),
"name": fields.Str(),
"location": fields.Str(),
"audience": fields.Str(),
"frequency": fields.List(fields.Str()),
"states": fields.List(fields.Str())
}
def _get(self, code):
states = States.query.filter_by(abv=code).all()
states = schema_states.dump(states).data
return states
def anayzedata(self, response):
for x in response:
y = x['ID']
q = x['state']
count = 0
for z in response:
if z['ID'] == y and z['state'] == q:
count += 1
x['count'] = count
return response
@use_args(queryArgs)
def get(self, args):
name = ""
Level_1 = ""
Level_2 = ""
if 'name'in args:
name = args['name']
if 'Level_1'in args:
level_1 = args['Level_1']
if 'Level_2'in args:
level_2 = args['Level_2']
if 'location' in args and not args['location'] == "":
res = Vistor.query.filter_by(LOCATION=args['location']).all()
result = visitor_schema.dump(res).data
elif 'Level_2' in args and not args['Level_2'] == "":
res = Vistor.query.filter_by(Level_2=args['Level_2']).all()
result = visitor_schema.dump(res).data
elif 'Level_1' in args and not args['Level_1'] == "":
res = Vistor.query.filter_by(Level_1=args['Level_1']).all()
result = visitor_schema.dump(res).data
else:
res = Vistor.query.all()
result = visitor_schema.dump(res).data
audience = ""
frequency = []
states = []
_response = []
if 'audience' in args:
audience = args['audience']
if 'states' in args:
states = args['states']
if 'frequency' in args:
frequency = args['frequency']
if audience == 'states' and len(states) >= 1 and len(res) >= 1:
for index, _rep in enumerate(result):
if _rep['state'] in states:
_response.append(_rep)
else:
_response = result
if len(frequency) >= 1:
frequencyMap = self.parseFrequency(frequency)
if frequencyMap:
if frequencyMap == [1, 0, 0]:
_response = [i for i in _response if (i['Mild'] == 1)]
elif frequencyMap == [0, 1, 0]:
_response = [i for i in _response if (i['Moderate'] == 1)]
elif frequencyMap == [0, 0, 1]:
_response = [i for i in _response if (i['Frequent'] == 1)]
elif frequencyMap == [1, 1, 0]:
_response = [i for i in _response if (
i['Mild'] == 1 and i['Moderate'] == 1)]
elif frequencyMap == [1, 0, 1]:
_response = [i for i in _response if (
i['Mild'] == 1 and i['Frequent'] == 1)]
elif frequencyMap == [0, 1, 1]:
_response = [i for i in _response if (
i['Moderate'] == 1 and i['Frequent'] == 1)]
if len(_response) >= 1:
for x, y in enumerate(_response):
y['_map'] = self._get(y['state'])
ananyzlized = self.anayzedata(_response)
l_frengency = [i for i in range(0, len(frequency))]
metadata = {
'queryname': name,
'queries': {
'audience': audience,
'Level_1': Level_1,
'Level_2': Level_2,
'frequecy': dict(zip(l_frengency, frequency)),
'states': dict(zip([i for i in range(0, len(states))], states))
},
'time': str(datetime.datetime.now().timestamp())
}
return {"message": "Success", 'data': ananyzlized, 'meta': metadata}, 200
@use_args(queryArgs)
def get1(self, args):
# _query = self.parseQuery(args)
total = Vistor.query
_level_1 = None
_level_2 = None
if not args["Level_1"] == "":
_level_1 = args['Level_1']
if not args['Level_2'] == "":
_level_2 = args['Level_2']
if type(_level_1) == str and type(_level_2) == str:
total = total.filter_by(Level_1=_level_1, Level_2=_level_2)
else:
if type(_level_1) == str:
total = total.filter_by(Level_1=_level_1)
if type(_level_2) == str:
total = total.filter_by(Level_2=_level_2)
if 'location' in args and not args['location'] == "":
_location = args['location']
total = total.filter_by(LOCATION=_location)
if 'frequency' in args:
if(len(args['frequency'])) == 3:
total = total.filter(
or_(Vistor.Frequent == 1, Vistor.Mild == 1, Vistor.Moderate == 1))
elif 'Frequent' in args['frequency']:
total = total.filter_by(Frequent=1)
elif 'Moderate' in args['frequency']:
total = total.filter_by(Moderate=1)
elif 'Mild' in args['frequency']:
total = total.filter_by(Mild=1)
if "states" in args and args['audience'] == "states":
if len(args['states']) >= 1:
pass
# total = total.filter(and_(Vistor.state.in_([args['states']])))
def parseFrequency(self, frequency):
if len(frequency) == 3:
return False
elif 'Frequent' in frequency:
_frequent = 1
elif not 'Frequent' in frequency:
_frequent = 0
elif 'Moderate' in frequency:
_moderate = 1
elif not 'Moderate' in frequency:
_moderate = 0
elif 'Mild' in frequency:
_mild = 1
elif not 'Mild' in frequency:
_mild = 0
return [_mild, _moderate, _frequent]
def object_as_dict(self, obj):
return {c.key: getattr(obj, c.key)
for c in inspect(obj).mapper.column_attrs}
def _generateMaping(self, stateCode):
_query = States.query.all()
_query = StatesSchema.dump(_query).data
return _query
class AudienceResource(Resource):
queryArgs = {
"Level_1": fields.Str(),
"Level_2": fields.Str(),
"name": fields.Str(),
"location": fields.Str(),
"audience": fields.Str(),
"frequency": fields.List(fields.Str()),
"states": fields.List(fields.Str())
}
def anayzedata(self, response, fmap):
if fmap == [1, 1, 0] or [1,0,1] or [0,1,1]:
for x in response:
#y = x['ChainID']
q = x['state']
count = 0
for z in response:
if z['state'] == q:
count += z['AudienceTotal']
x['count'] = count
return response
else:
return response
def _get(self, code):
states = States.query.filter_by(abv=code).all()
states = schema_states.dump(states).data
if len(states) >=1:
return states[0]
else:
return {}
def parseFrequency(self, frequency):
_mild = 0
_frequent = 0
_moderate = 0
if len(frequency) == 3:
return False
if 'Frequent' in frequency:
_frequent = 1
if 'Moderate' in frequency:
_moderate = 1
if 'Mild' in frequency:
_mild = 1
return [_mild, _moderate, _frequent]
@use_args(queryArgs)
def get(self, args):
name = ""
Level_1 = ""
Level_2 = ""
if 'name'in args:
name = args['name']
if 'Level_1'in args:
level_1 = args['Level_1']
if 'Level_2'in args:
level_2 = args['Level_2']
if 'location' in args and not args['location'] == "":
res = VistorChainsTotal.query.filter_by(LOCATION=args['location']).all()
result = visitor_chain_schema.dump(res).data
elif 'Level_2' in args and not args['Level_2'] == "":
res = VistorLevel.query.filter_by(Level_2=args['Level_2']).all()
result = visitor_level_schema.dump(res).data
elif 'Level_1' in args and not args['Level_1'] == "":
res = VistorLevel.query.filter_by(Level_1=args['Level_1']).all()
result = visitor_level_schema.dump(res).data
else:
res = VistorChainsTotal.query.all()
result = visitor_chain_schema.dump(res).data
audience = ""
frequency = []
states = []
_response = []
if 'audience' in args:
audience = args['audience']
if 'states' in args:
states = args['states']
if 'frequency' in args:
frequency = args['frequency']
if audience == 'states' and len(states) >= 1 and len(res) >= 1:
for index, _rep in enumerate(result):
if _rep['state'] in states:
_response.append(_rep)
else:
_response = result
frequencyMap=[]
if len(frequency) >= 1:
frequencyMap = self.parseFrequency(frequency)
if frequencyMap:
if frequencyMap == [1, 0, 0]:
_response = [i for i in _response if (i['Mild'] == 1)]
elif frequencyMap == [0, 1, 0]:
_response = [i for i in _response if (i['Moderate'] == 1)]
elif frequencyMap == [0, 0, 1]:
_response = [i for i in _response if (i['Frequent'] == 1)]
elif frequencyMap == [1, 1, 0]:
_response = [i for i in _response if (
i['Mild'] == 1 and i['Moderate'] == 1)]
elif frequencyMap == [1, 0, 1]:
_response = [i for i in _response if (
i['Mild'] == 1 and i['Frequent'] == 1)]
elif frequencyMap == [0, 1, 1]:
_response = [i for i in _response if (
i['Moderate'] == 1 and i['Frequent'] == 1)]
if len(_response) >= 1:
for x, y in enumerate(_response):
y['_map'] = self._get(y['state'])
ananyzlized = self.anayzedata(_response,frequencyMap)
metadata = {
'queryname': name,
'queries': {
'audience': audience,
'Level_1': Level_1,
'Level_2': Level_2,
'frequecy': dict(zip([i for i in range(0,len(frequency))], frequency)),
'states': dict(zip([i for i in range(0, len(states))], states))
},
'time': str(datetime.datetime.now().timestamp())
}
return {"message": "Success", 'args':args ,'data': ananyzlized, 'meta':metadata}, 200
| donc310/WidgetApi | resources/Query.py | Query.py | py | 9,773 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "datetime.datetime.now",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "Model.VisitorChainTotalSchema",
"line_number": 17,
"usage_type": "name"
},
{
"api_na... |
43518145068 | from optimize import snopta, SNOPT_options
import numpy as np
from scipy.spatial import Delaunay
import scipy.io as io
import os
import inspect
from dogs import Utils
from dogs import interpolation
from dogs import constant_snopt_min
'''
adaptiveK_snopt.py file contains functions used for DeltaDOGS(Lambda) algorithm.
Using the package optimize (SNOPT) provided by Prof. Philip Gill and Dr. Elizabeth Wong, UCSD.
This is a script of DeltaDOGS(Lambda) dealing with linear constraints problem which is solved using SNOPT.
Notice that this scripy inplements the snopta function. (Beginner friendly)
The adaptive-K continuous search function has the form:
Sc(x) = (P(x) - y0) / K*e(x):
Sc(x): constant-K continuous search function;
P(x): Interpolation function:
For AlphaDOGS: regressionparameterization because the function evaluation contains noise;
For DeltaDOGS: interpolationparameterization;
e(x): The uncertainty function constructed based on Delaunay triangulation.
Function contained:
tringulation_search_bound: Search for the minimizer of continuous search function over all the Delaunay simplices
over the entire domain.
Adoptive_K_search: Search over a specific simplex.
AdaptiveK_search_cost: Calculate the value of continuous search function.
LOG Dec. 4, 2018: Function snopta still violates the constraints!
LOG Dec. 4, 2018: Put the actual constant into function bounds Flow and Fupp, do not include constant inside
function evaluation F(x).
LOG Dec. 15, 2018: The linear derivative A can not be all zero elements. Will cause error.
LOG Dec. 16, 2018: The 1D bounds of x should be defined by xlow and xupp,
do not include them in F and linear derivative A.
LOG Dec. 18, 2018: The 2D active subspace - DeltaDOGS with SNOPT shows error message:
SNOPTA EXIT 10 -- the problem appears to be infeasible
SNOPTA INFO 14 -- linear infeasibilities minimized
Fixed by introducing new bounds on x variable based on Delaunay simplex.
'''
################################## adaptive K search SNOPT ###################################
def triangulation_search_bound_snopt(inter_par, xi, y0, ind_min, y_safe, L_safe, finest_mesh):
# reddir is a vector
inf = 1e+20
n = xi.shape[0]
xE = inter_par.xi
# 0: Build up the Delaunay triangulation based on reduced subspace.
if n == 1:
sx = sorted(range(xi.shape[1]), key=lambda x: xi[:, x])
tri = np.zeros((xi.shape[1] - 1, 2))
tri[:, 0] = sx[:xi.shape[1] - 1]
tri[:, 1] = sx[1:]
tri = tri.astype(np.int32)
else:
options = 'Qt Qbb Qc' if n <= 3 else 'Qt Qbb Qc Qx'
tri = Delaunay(xi.T, qhull_options=options).simplices
keep = np.ones(len(tri), dtype=bool)
for i, t in enumerate(tri):
if abs(np.linalg.det(np.hstack((xi.T[t], np.ones([1, n + 1]).T)))) < 1E-15:
keep[i] = False # Point is coplanar, we don't want to keep it
tri = tri[keep]
# Sc contains the continuous search function value of the center of each Delaunay simplex
# 1: Identify the minimizer of adaptive K continuous search function
Sc = np.zeros([np.shape(tri)[0]])
Scl = np.zeros([np.shape(tri)[0]])
Sc_safe = np.zeros(tri.shape[0])
Rmax, max_dis = constant_snopt_min.max_circumradius_delauany_simplex(xi, xE, tri)
# Determine the parameters b and c for exterior uncertainty function.
b, c, status = constant_snopt_min.uncertainty_parameter_solver(Rmax, max_dis)
for ii in range(np.shape(tri)[0]):
R2, xc = Utils.circhyp(xi[:, tri[ii, :]], n)
if R2 < inf:
# initialize with body center of each simplex
x = np.dot(xi[:, tri[ii, :]], np.ones([n + 1, 1]) / (n + 1))
exist = constant_snopt_min.unevaluated_vertices_identification(xi[:, tri[ii, :]], xE)[0]
if exist == 0: # The Delauany simplex considered is safe
e = (R2 - np.linalg.norm(x - xc) ** 2)
else:
e = constant_snopt_min.discrete_min_uncertainty(x, xE, b, c)[0]
Sc[ii] = (interpolation.interpolate_val(x, inter_par) - y0) / e
Sc_safe[ii] = (Sc[ii] if exist == 0 else inf)
if np.sum(ind_min == tri[ii, :]):
Scl[ii] = np.copy(Sc[ii])
else:
Scl[ii] = inf
else:
Scl[ii] = inf
Sc[ii] = inf
if np.min(Sc) < 0: # minimize p(x) subject to safe estimate
Scp = np.zeros(tri.shape[0])
for ii in range(tri.shape[0]):
x = np.dot(xi[:, tri[ii, :]], np.ones([n + 1, 1]) / (n + 1))
Scp[ii] = interpolation.interpolate_val(x, inter_par)
ind = np.argmin(Scp)
delta = 1.0e-5
tol = 1.0e-4
xmin, ymin, result = interpolation_min_safe_solver(xi[:, tri[ind, :]], inter_par, y0, y_safe, L_safe, delta, tol)
else:
# 2: Determine the minimizer of continuous search function at those 3 Delaunay simplices.
# First index : Global one, the simplex that has minimum value of Sc at circumcenters.
# Second index: Global one within the safe region.
# Third index : Local one with the lowest interpolation value.
index = np.array([np.argmin(Sc), np.argmin(Sc_safe), np.argmin(Scl)])
xm = np.zeros((n, 3))
ym = np.zeros(3)
for i in range(3):
temp_x, ym[i] = adaptivek_search_snopt_min(xi[:, tri[index[i], :]], inter_par, y0, y_safe, L_safe, b, c, finest_mesh)
xm[:, i] = temp_x.T[0]
ymin = np.min(ym)
xmin = xm[:, np.argmin(ym)].reshape(-1, 1)
if np.argmin(ym) == 0:
result = 'sc global'
elif np.argmin(ym) == 1:
result = 'sc safe'
else:
result = 'sc local'
val, idx, x_nn = Utils.mindis(xmin, xE)
safe_estimate = y_safe[:, idx] - L_safe * val
return xmin, ymin, result, safe_estimate
# ===================================== Continuous search function Minimization ==================================
def adaptivek_search_snopt_min(simplex, inter_par, y0, y_safe, L_safe, b, c, finest_mesh):
'''
Find the minimizer of the search fucntion in a simplex using SNOPT package.
The function F is composed as: 1st - objective
2nd to nth - simplex bounds
n+1 th .. - safe constraints
:param simplex : Delauany simplex of interest, n by n+1 matrix.
:param inter_par: Interpolation info.
:param y0 : Target value of truth function
:param y_safe : Safe function evaluation.
:param L_safe : Lipschitz constant of safety functions.
:param b : The parameters for exterior uncertainty function. It is determined once Delaunay-tri is fixed.
:param c : The parameters for exterior uncertainty function. It is determined once Delaunay-tri is fixed.
:return: The minimizer of adaptive K continuous search function within the given Delauany simplex.
'''
inf = 1.0e+20
xE = inter_par.xi
n = xE.shape[0]
M = y_safe.shape[0]
# ------- ADD THE FOLLOWING LINE WHEN DEBUGGING --------
# simplex = xi[:, tri[ind, :]]
# ------- ADD THE FOLLOWING LINE WHEN DEBUGGING --------
# Determine if the boundary corner exists in simplex, if boundary corner detected:
# e(x) = (|| x - x' || + c )^b - c^b, x' in S^k
# else, e(x) is the regular uncertainty function.
exist, eval_indicators = constant_snopt_min.unevaluated_vertices_identification(simplex, xE)
R2, xc = Utils.circhyp(simplex, n)
x = np.dot(simplex, np.ones([n + 1, 1]) / (n + 1))
# First find minimizer xr on reduced model, then find the 2D point corresponding to xr. Constrained optm.
A_simplex, b_simplex = Utils.search_simplex_bounds(simplex)
lb_simplex = np.min(simplex, axis=1)
ub_simplex = np.max(simplex, axis=1)
m = n + 1 # The number of constraints which is determined by the number of simplex boundaries.
assert m == A_simplex.shape[0], 'The No. of simplex constraints is wrong'
# nF: The number of problem functions in F(x),
# including the objective function, linear and nonlinear constraints.
# ObjRow indicates the numer of objective row in F(x).
ObjRow = 1
if n > 1:
# The first function in F(x) is the objective function, the rest are m simplex constraints.
# The last part of functions in F(x) is the safe constraints.
# In high dimension, A_simplex make sure that linear_derivative_A won't be all zero.
nF = 1 + m + M # the last 1 is the safe constraint.
# Since adaptiveK using ( p(x) - f0 ) / e(x), the objective function is nonlinear.
# The constraints are generated by simplex bounds, all linear.
Flow = np.hstack((-inf, b_simplex.T[0], np.zeros(M) ))
Fupp = inf * np.ones(nF)
# The lower and upper bounds of variables x.
xlow = np.copy(lb_simplex) + finest_mesh/2
xupp = np.copy(ub_simplex) - finest_mesh/2
# For the nonlinear components, enter any nonzero value in G to indicate the location
# of the nonlinear derivatives (in this case, 2).
# A must be properly defined with the correct derivative values.
linear_derivative_A = np.vstack((np.zeros((1, n)), A_simplex, np.zeros((M, n)) ))
nonlinear_derivative_G = np.vstack((2 * np.ones((1, n)), np.zeros((m, n)), 2 * np.ones((M, n)) ))
else:
# For 1D problem, only have 1 objective function, the simplex constraint is defined by x bounds.
# nF = 1 + M, 1 obj + M safe cons.
nF = 1 + M + 1
Flow = np.hstack((-inf, np.zeros(M), -inf ))
Fupp = inf * np.ones(nF)
xlow = np.copy(lb_simplex) + finest_mesh/2
xupp = np.copy(ub_simplex) - finest_mesh/2
linear_derivative_A = np.vstack(( np.zeros((1, n)), np.zeros((M, n)), np.ones((1, n)) ))
nonlinear_derivative_G = np.vstack(( 2 * np.ones((1 + M, n)), np.zeros((1, n)) ))
x0 = x.T[0]
save_opt_for_snopt_ak(n, nF, inter_par, xc, R2, y0, A_simplex, y_safe, L_safe, exist, b, c)
options = SNOPT_options()
options.setOption('Infinite bound', inf)
options.setOption('Verify level', 3)
options.setOption('Verbose', False)
options.setOption('Print level', -1)
options.setOption('Scale option', 2)
options.setOption('Print frequency', -1)
options.setOption('Scale option', 2)
options.setOption('Feasibility tolerance', 1e-5)
options.setOption('Summary', 'No')
sol = snopta(dogsobj, n, nF, x0=x0, name='DeltaDOGS_snopt', xlow=xlow, xupp=xupp, Flow=Flow, Fupp=Fupp,
ObjRow=ObjRow, A=linear_derivative_A, G=nonlinear_derivative_G, options=options)
x = sol.x
y = sol.objective
return x.reshape(-1, 1), y
def save_opt_folder_path():
current_path = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
folder = current_path[:-5] # -5 comes from the length of '/dogs'
return folder
def save_opt_for_snopt_ak(n, nF, inter_par, xc, R2, y0, A_simplex, y_safe, L_safe, exist, b, c):
var_opt = {}
folder = save_opt_folder_path()
if inter_par.method == "NPS":
var_opt['inter_par_method'] = inter_par.method
var_opt['inter_par_w'] = inter_par.w
var_opt['inter_par_v'] = inter_par.v
var_opt['inter_par_xi'] = inter_par.xi
var_opt['n'] = n
var_opt['nF'] = nF
var_opt['xc'] = xc
var_opt['R2'] = R2
var_opt['y0'] = y0
var_opt['A'] = A_simplex
var_opt['y_safe'] = y_safe
var_opt['L_safe'] = L_safe
var_opt['exist'] = exist
var_opt['b'] = b
var_opt['c'] = c
io.savemat(folder + "/opt_info_ak.mat", var_opt)
return
def adaptivek_search_cost_snopt(x):
x = x.reshape(-1, 1)
folder = save_opt_folder_path()
var_opt = io.loadmat(folder + "/opt_info_ak.mat")
n = var_opt['n'][0, 0]
xc = var_opt['xc']
R2 = var_opt['R2'][0, 0]
y0 = var_opt['y0'][0, 0]
nF = var_opt['nF'][0, 0]
A = var_opt['A']
b = var_opt['b'][0, 0]
c = var_opt['c'][0, 0]
y_safe = var_opt['y_safe']
L_safe = var_opt['L_safe'][0, 0]
exist = var_opt['exist'][0, 0]
# Initialize the output F.
F = np.zeros(nF)
M = y_safe.shape[0]
method = var_opt['inter_par_method'][0]
inter_par = interpolation.Inter_par(method=method)
inter_par.w = var_opt['inter_par_w']
inter_par.v = var_opt['inter_par_v']
inter_par.xi = var_opt['inter_par_xi']
p = interpolation.interpolate_val(x, inter_par)
gp = interpolation.interpolate_grad(x, inter_par)
if exist == 0:
e = R2 - np.linalg.norm(x - xc) ** 2
ge = - 2 * (x - xc)
else:
e, ge, gge = constant_snopt_min.discrete_min_uncertainty(x, inter_par.xi, b, c)
# denominator = (1e-10 if abs(p-y0) < 1e-10 else p - y0)
# F[0] = - e / denominator
# DM = - ge / denominator + e * gp / denominator ** 2
F[0] = (p - y0) / e
DM = gp / e - ge * (p - y0) / e ** 2
# G1: The gradient of the objective function, the continuous search function.
G1 = DM.flatten()
val, idx, x_nn = Utils.mindis(x, inter_par.xi)
norm2_difference = np.sqrt(np.dot((x - x_nn).T, x - x_nn))
norm2_difference = ( 1e-15 if norm2_difference < 1e-15 else norm2_difference )
# G2: Safety function constraint gradient, flattened version, trick is size = M.
G2 = np.tile((- L_safe * (x - x_nn) / norm2_difference).T[0], M)
if n > 1:
# nD data has n+1 simplex bounds.
F[1 : 1 + (n + 1)] = (np.dot(A, x)).T[0]
F[1 + (n + 1) : 1 + (n + 1) + M] = y_safe[:, idx] - L_safe * norm2_difference * np.ones(M)
else:
F[1 : 1 + M] = y_safe[:, idx] - L_safe * norm2_difference * np.ones(M)
F[-1] = np.sum(x)
G = np.hstack((G1, G2))
return F, G
def dogsobj(status, x, needF, F, needG, G):
# G is the nonlinear part of the Jacobian
F, G = adaptivek_search_cost_snopt(x)
return status, F, G
# ===================================== Interpolant Minimization ==================================
def interpolation_min_safe_solver(simplex, inter_par, y0, y_safe, L_safe, delta, tol):
xE = inter_par.xi
n = xE.shape[0]
x = np.dot(simplex, np.ones([n + 1, 1]) / (n + 1))
# First: Determine the estimated safety function values at x ( p(x) < y0 ).
val, idx, x_nn = Utils.mindis(x, xE)
safe_estimate_x = y_safe[:, idx] - L_safe * val
if (safe_estimate_x > 0 ).all():
# Safety is guaranteed, root finding p(x) along x and x_nn
f = lambda x: interpolation.interpolate_val(x, inter_par) - y0
result = 'minimize p(x)'
else:
# Safety is not guaranteed, root finding safety function along x and x_nn
f = lambda x: y_safe[:, idx] - L_safe * np.linalg.norm(x - x_nn)
result = 'minimize hat_psi(x)'
xmin, status = bisection_root_finding(f, x, x_nn, delta, tol)
ymin = f(xmin)
return xmin, ymin, result
def bisection_root_finding(f, a, b, delta, tol):
'''
Finding the root for the objective function f, could be slightly positive solution due to safety functions.
Thought about using false position method at the first time, but it seems that falsi method only works for
1D function.
:param f : Objective function, could be multi-dimensional.
:param a : Center of the circumcircle.
:param b : The closest evaluated data point to a.
:param delta: Tolerance of distance of your interval on variable x.
:param tol : Tolerance of objective function values.
:return : The root of f(must be >= 0).
'''
num_iter = 1000
# The hard thing is, you dont know the evaluated data is upper bound or the lower bound.
if np.linalg.norm(a) < np.linalg.norm(b):
low_bnd_b = 0
low_bnd = a
upp_bnd = b
else:
low_bnd_b = 1
low_bnd = b
upp_bnd = a
flow = f(low_bnd)
fupp = f(upp_bnd)
eps = delta
e = np.copy(upp_bnd - low_bnd)
if ((np.sign(flow) > 0).all() and (np.sign(fupp) > 0).all()) or ((np.sign(flow) < 0).all() and (np.sign(fupp) < 0).all()):
x = np.copy(b)
status = 'Error message: Boundaries have the same sign, should not happen in this case.'
else:
status = 'Maximum iteration reached.'
for i in range(num_iter):
e /= 2
m = low_bnd + e
fm = f(m)
if np.max(e) < delta:
status = 'Required error of subinterval bound achieved'
break
elif np.max(np.abs(fm)) < tol:
status = 'The tolerance on the objective function value is reached.'
break
else:
if (np.sign(fm) > 0 + tol).all():
# mid point is all positive, set the bound close to the evaluated data to be the mid point
if low_bnd_b == 1:
low_bnd = np.copy(m)
flow = np.copy(fm)
else:
upp_bnd = np.copy(m)
fupp = np.copy(fm)
else:
# mid point has negative part, set bound close to the circumcenter to be the mid point
if low_bnd_b == 1:
upp_bnd = np.copy(m)
fupp = np.copy(fm)
else:
low_bnd = np.copy(m)
fupp = np.copy(fm)
if (np.sign(fm) > 0 + tol).all(): # mid point satisfy the safety functions
x = np.copy(m)
else: # mid point does not satisfy the safety functions, using the bound close to the evaluated data points.
if low_bnd_b == 1:
x = np.copy(low_bnd)
else:
x = np.copy(upp_bnd)
return x, status
| kimukook/SDOGS | dogs/adaptive_snopt_min.py | adaptive_snopt_min.py | py | 18,534 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.zeros",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "numpy.int32",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "scipy.spatial.Delaunay",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
... |
41034040354 | from __future__ import annotations
from typing import Any
from typing import Optional
from . import ext
from .._typing import _OnConflictConstraintT
from .._typing import _OnConflictIndexElementsT
from .._typing import _OnConflictIndexWhereT
from .._typing import _OnConflictSetT
from .._typing import _OnConflictWhereT
from ... import util
from ...sql import coercions
from ...sql import roles
from ...sql import schema
from ...sql._typing import _DMLTableArgument
from ...sql.base import _exclusive_against
from ...sql.base import _generative
from ...sql.base import ColumnCollection
from ...sql.base import ReadOnlyColumnCollection
from ...sql.dml import Insert as StandardInsert
from ...sql.elements import ClauseElement
from ...sql.elements import KeyedColumnElement
from ...sql.expression import alias
from ...util.typing import Self
__all__ = ("Insert", "insert")
def insert(table: _DMLTableArgument) -> Insert:
"""Construct a PostgreSQL-specific variant :class:`_postgresql.Insert`
construct.
.. container:: inherited_member
The :func:`sqlalchemy.dialects.postgresql.insert` function creates
a :class:`sqlalchemy.dialects.postgresql.Insert`. This class is based
on the dialect-agnostic :class:`_sql.Insert` construct which may
be constructed using the :func:`_sql.insert` function in
SQLAlchemy Core.
The :class:`_postgresql.Insert` construct includes additional methods
:meth:`_postgresql.Insert.on_conflict_do_update`,
:meth:`_postgresql.Insert.on_conflict_do_nothing`.
"""
return Insert(table)
class Insert(StandardInsert):
"""PostgreSQL-specific implementation of INSERT.
Adds methods for PG-specific syntaxes such as ON CONFLICT.
The :class:`_postgresql.Insert` object is created using the
:func:`sqlalchemy.dialects.postgresql.insert` function.
"""
stringify_dialect = "postgresql"
inherit_cache = False
@util.memoized_property
def excluded(
self,
) -> ReadOnlyColumnCollection[str, KeyedColumnElement[Any]]:
"""Provide the ``excluded`` namespace for an ON CONFLICT statement
PG's ON CONFLICT clause allows reference to the row that would
be inserted, known as ``excluded``. This attribute provides
all columns in this row to be referenceable.
.. tip:: The :attr:`_postgresql.Insert.excluded` attribute is an
instance of :class:`_expression.ColumnCollection`, which provides
an interface the same as that of the :attr:`_schema.Table.c`
collection described at :ref:`metadata_tables_and_columns`.
With this collection, ordinary names are accessible like attributes
(e.g. ``stmt.excluded.some_column``), but special names and
dictionary method names should be accessed using indexed access,
such as ``stmt.excluded["column name"]`` or
``stmt.excluded["values"]``. See the docstring for
:class:`_expression.ColumnCollection` for further examples.
.. seealso::
:ref:`postgresql_insert_on_conflict` - example of how
to use :attr:`_expression.Insert.excluded`
"""
return alias(self.table, name="excluded").columns
_on_conflict_exclusive = _exclusive_against(
"_post_values_clause",
msgs={
"_post_values_clause": "This Insert construct already has "
"an ON CONFLICT clause established"
},
)
@_generative
@_on_conflict_exclusive
def on_conflict_do_update(
self,
constraint: _OnConflictConstraintT = None,
index_elements: _OnConflictIndexElementsT = None,
index_where: _OnConflictIndexWhereT = None,
set_: _OnConflictSetT = None,
where: _OnConflictWhereT = None,
) -> Self:
r"""
Specifies a DO UPDATE SET action for ON CONFLICT clause.
Either the ``constraint`` or ``index_elements`` argument is
required, but only one of these can be specified.
:param constraint:
The name of a unique or exclusion constraint on the table,
or the constraint object itself if it has a .name attribute.
:param index_elements:
A sequence consisting of string column names, :class:`_schema.Column`
objects, or other column expression objects that will be used
to infer a target index.
:param index_where:
Additional WHERE criterion that can be used to infer a
conditional target index.
:param set\_:
A dictionary or other mapping object
where the keys are either names of columns in the target table,
or :class:`_schema.Column` objects or other ORM-mapped columns
matching that of the target table, and expressions or literals
as values, specifying the ``SET`` actions to take.
.. versionadded:: 1.4 The
:paramref:`_postgresql.Insert.on_conflict_do_update.set_`
parameter supports :class:`_schema.Column` objects from the target
:class:`_schema.Table` as keys.
.. warning:: This dictionary does **not** take into account
Python-specified default UPDATE values or generation functions,
e.g. those specified using :paramref:`_schema.Column.onupdate`.
These values will not be exercised for an ON CONFLICT style of
UPDATE, unless they are manually specified in the
:paramref:`.Insert.on_conflict_do_update.set_` dictionary.
:param where:
Optional argument. If present, can be a literal SQL
string or an acceptable expression for a ``WHERE`` clause
that restricts the rows affected by ``DO UPDATE SET``. Rows
not meeting the ``WHERE`` condition will not be updated
(effectively a ``DO NOTHING`` for those rows).
.. seealso::
:ref:`postgresql_insert_on_conflict`
"""
self._post_values_clause = OnConflictDoUpdate(
constraint, index_elements, index_where, set_, where
)
return self
@_generative
@_on_conflict_exclusive
def on_conflict_do_nothing(
self,
constraint: _OnConflictConstraintT = None,
index_elements: _OnConflictIndexElementsT = None,
index_where: _OnConflictIndexWhereT = None,
) -> Self:
"""
Specifies a DO NOTHING action for ON CONFLICT clause.
The ``constraint`` and ``index_elements`` arguments
are optional, but only one of these can be specified.
:param constraint:
The name of a unique or exclusion constraint on the table,
or the constraint object itself if it has a .name attribute.
:param index_elements:
A sequence consisting of string column names, :class:`_schema.Column`
objects, or other column expression objects that will be used
to infer a target index.
:param index_where:
Additional WHERE criterion that can be used to infer a
conditional target index.
.. seealso::
:ref:`postgresql_insert_on_conflict`
"""
self._post_values_clause = OnConflictDoNothing(
constraint, index_elements, index_where
)
return self
class OnConflictClause(ClauseElement):
stringify_dialect = "postgresql"
constraint_target: Optional[str]
inferred_target_elements: _OnConflictIndexElementsT
inferred_target_whereclause: _OnConflictIndexWhereT
def __init__(
self,
constraint: _OnConflictConstraintT = None,
index_elements: _OnConflictIndexElementsT = None,
index_where: _OnConflictIndexWhereT = None,
):
if constraint is not None:
if not isinstance(constraint, str) and isinstance(
constraint,
(schema.Constraint, ext.ExcludeConstraint),
):
constraint = getattr(constraint, "name") or constraint
if constraint is not None:
if index_elements is not None:
raise ValueError(
"'constraint' and 'index_elements' are mutually exclusive"
)
if isinstance(constraint, str):
self.constraint_target = constraint
self.inferred_target_elements = None
self.inferred_target_whereclause = None
elif isinstance(constraint, schema.Index):
index_elements = constraint.expressions
index_where = constraint.dialect_options["postgresql"].get(
"where"
)
elif isinstance(constraint, ext.ExcludeConstraint):
index_elements = constraint.columns
index_where = constraint.where
else:
index_elements = constraint.columns
index_where = constraint.dialect_options["postgresql"].get(
"where"
)
if index_elements is not None:
self.constraint_target = None
self.inferred_target_elements = index_elements
self.inferred_target_whereclause = index_where
elif constraint is None:
self.constraint_target = (
self.inferred_target_elements
) = self.inferred_target_whereclause = None
class OnConflictDoNothing(OnConflictClause):
__visit_name__ = "on_conflict_do_nothing"
class OnConflictDoUpdate(OnConflictClause):
__visit_name__ = "on_conflict_do_update"
def __init__(
self,
constraint: _OnConflictConstraintT = None,
index_elements: _OnConflictIndexElementsT = None,
index_where: _OnConflictIndexWhereT = None,
set_: _OnConflictSetT = None,
where: _OnConflictWhereT = None,
):
super().__init__(
constraint=constraint,
index_elements=index_elements,
index_where=index_where,
)
if (
self.inferred_target_elements is None
and self.constraint_target is None
):
raise ValueError(
"Either constraint or index_elements, "
"but not both, must be specified unless DO NOTHING"
)
if isinstance(set_, dict):
if not set_:
raise ValueError("set parameter dictionary must not be empty")
elif isinstance(set_, ColumnCollection):
set_ = dict(set_)
else:
raise ValueError(
"set parameter must be a non-empty dictionary "
"or a ColumnCollection such as the `.c.` collection "
"of a Table object"
)
self.update_values_to_set = [
(coercions.expect(roles.DMLColumnRole, key), value)
for key, value in set_.items()
]
self.update_whereclause = where
| sqlalchemy/sqlalchemy | lib/sqlalchemy/dialects/postgresql/dml.py | dml.py | py | 10,965 | python | en | code | 8,024 | github-code | 1 | [
{
"api_name": "sql._typing._DMLTableArgument",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "sql.dml.Insert",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "sql.expression.alias",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "uti... |
4327026652 | import ipaddress
import re
import glob
def Return_IP_adr(str):
if re.match("^ ip address ([0-9.]+) ([0-9.]+)$", str):
r = re.match("^ ip address ([0-9.]+) ([0-9.]+)$", str)
return ipaddress.IPv4Network((r.group(1), r.group(2)),strict = False)
else:
return "None"
list_files = glob.glob("C:\\Users\\nv.solomennikova\\Documents\\pythonProject\\p4ne\\Lab1.5\\config_files\\*.txt")
list_result = []
for fl in list_files:
with open (fl) as f:
for s in f:
res = Return_IP_adr(s)
if res != "None":
list_result.append(res)
print((list(set(list_result))))
'''
r = re.match("^ ip address ([0-9.]+) ([0-9.]+)$", " ip address 192.168.1.1 255.255.255.0")
print(r.group(2))
res = ipaddress.IPv4Network((r.group(1), r.group(2)),strict = False)
''' | shpinatashan/p4ne | Lab1.6/Lab.py | Lab.py | py | 827 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "re.match",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "ipaddress.IPv4Network",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 1... |
27429545506 | #REQUIREMENTS
# ffmpeg
# vosk
# youtube-dl
##
import vosk
import os
import sys
import getopt
from traceback import print_exc
from subprocess import Popen, PIPE
import shlex
import json
from vosk import Model, KaldiRecognizer, SetLogLevel
def main(*argv):
try:
#argv = argv[0]
argv = sys.argv[1:]
model_path = "./model"
audio_filename = ""
youtube_link = ""
try:
opts, _ = getopt.getopt(argv, "l:f:m:",
["link =","file_name =",
"model_path ="])
#print(opts)
#print(args)
except Exception as err:
print(repr(err))
raise Exception("Option Error")
for opt, arg in opts:
if opt in ['-f', '--audio_filename']:
audio_filename = arg
elif opt in ['-m', '--model_path']:
model_path = arg
elif opt in ['-l', '--youtube_Link']:
youtube_link = arg
if not os.path.exists(model_path):
print ("Please download the model from https://alphacephei.com/vosk/models and unpack as 'model' in the current folder.")
raise Exception("Model Cannot Found")
if(youtube_link != ""):
audio_filename = get_youtube_audio(youtube_link)
#TODO
assert(youtube_link != "" or audio_filename != "") ("Input Error")
print( "LINK:",youtube_link,"FILE: ", audio_filename, " MODEL: ",model_path)
SetLogLevel(-1)
sample_rate=16000
model = Model(model_path)
rec = KaldiRecognizer(model, sample_rate)
process = Popen(['ffmpeg', '-loglevel', 'quiet', '-i',
audio_filename,
'-ar', str(sample_rate) , '-ac', '1', '-f', 's16le', '-'],
stdout=PIPE)
result = ""
while True:
data = process.stdout.read(4000)
if len(data) == 0:
break
if rec.AcceptWaveform(data):
data = json.loads(rec.Result())
result += data['text']
#print(result)
data = json.loads(rec.FinalResult())
result += data['text']
print("\n")
print(result)
except Exception as error:
print("ERROR: {}".format(error))
print_exc()
def get_youtube_audio(link):
extension = ".m4a"
filename = ""
try:
command = "youtube-dl --get-filename -o '%(title)s by %(uploader)s on %(upload_date)s.%(ext)s' " + link
args = shlex.split(command)
with Popen(args, stdout=PIPE, stderr=PIPE) as process:
process.wait()
for line in process.stdout.readlines():
temp = line.decode().strip()
filename = temp[:temp.index('.')]
filename = filename + extension
print(filename)
command = "youtube-dl -x --audio-format m4a " + link + " -o '%(title)s by %(uploader)s on %(upload_date)s.%(ext)s'"
args = shlex.split(command)
with Popen(args, stdout=PIPE, stderr=PIPE) as process:
process.wait()
error_check = True
for line in process.stdout.readlines():
print(line.decode().strip())
for line in process.stdout.readlines():
error_check = False
print(line)
assert(error_check)
assert(process.returncode == 0)
return filename
except:
raise Exception("youtube-dl error")
return filename
if __name__ == '__main__':
#args = ['-l', 'https://www.youtube.com/watch?v=gt027PfguDQ']
main() | giraycoskun/vosk-ASR-app | app/commandline_tool.py | commandline_tool.py | py | 3,878 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.argv",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "getopt.getopt",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number... |
33559442185 | from django.contrib.auth.models import User
from django.db import models
from django.db.models import Index
class EquipmentType(models.Model):
"""
Тип оборудования
"""
name = models.CharField(max_length=255, verbose_name='Type name')
serial_number_mask = models.CharField(max_length=50, verbose_name='Mask of serial numbers')
def __str__(self):
return self.name
class Meta:
verbose_name = 'Type of equipment'
verbose_name_plural = 'Type of equipment'
ordering = ['name']
indexes = (
Index(fields=['name'], name='equ_typ__name__idx'),
)
constraints = [
models.UniqueConstraint(
fields=['name'],
name='equ_typ__name__unq'
),
]
class Equipment(models.Model):
"""
Оборудование
"""
equipment_type = models.ForeignKey('EquipmentType', on_delete=models.PROTECT, related_name='equipments',
verbose_name='Type of equipment')
serial_number = models.CharField(max_length=50, verbose_name='Serial number of equipment')
description = models.TextField(verbose_name='Description of equipment', null=True, blank=True)
created_at = models.DateTimeField(auto_now_add=True, verbose_name='Created at')
updated_at = models.DateTimeField(auto_now=True, verbose_name='Updated at')
is_archived = models.BooleanField(default=False, verbose_name='Is archived')
created_by = models.ForeignKey(User, on_delete=models.PROTECT, related_name='created_by',
null=True, blank=True, verbose_name='Created by')
updated_by = models.ForeignKey(User, on_delete=models.PROTECT, related_name='updated_by',
null=True, blank=True, verbose_name='Updated by')
class Meta:
verbose_name = 'Equipment'
verbose_name_plural = 'Equipment'
ordering = ['id']
indexes = (
Index(
fields=['is_archived', 'equipment_type', 'serial_number'],
name='equ__type_serial_number__idx'
),
)
constraints = [
models.UniqueConstraint(
fields=['equipment_type', 'serial_number'],
name='equ__type_serial_number__unq'
),
]
| KotelnikovKP/equipment | backend/models.py | models.py | py | 2,379 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.db.models.Model",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": ... |
20485270061 | import requests
from bs4 import BeautifulSoup
import pandas as pd
def extract(page):
url = f'https://in.indeed.com/jobs?q=python+developer&l=India&start={page}'
res = requests.get(url)
# return res.status_code
soup = BeautifulSoup(res.content, 'html.parser')
return soup
job_list = []
def transfrom(soup):
divs = soup.find_all('div', class_='jobsearch-SerpJobCard')
for i in divs:
title = i.find('a').text.strip()
print(title)
company = i.find('span', class_='company').text.strip()
print(company)
summary = i.find(
'div', class_='summary').text.strip().replace('\n', '')
print(summary)
job = {
'title': title,
"company": company,
"summary": summary
}
job_list.append(job)
for i in range(0, 50, 10):
print(f'Getting Page,{i}')
c = extract(0)
transfrom(c)
# print(len(job_list))
df = pd.DataFrame(job_list)
print(df.head())
df.to_csv('indeed.csv')
| ashish-ash303/Indeed-Scraping | indeed.py | indeed.py | py | 1,067 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "requests.get",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 45,
"usage_type": "call"
}
] |
18286950948 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import sqlite3
import random
import datetime
import os
current_file_path = os.path.realpath(__file__)
current_directory_path = os.path.dirname(current_file_path)
resources_directory_path = os.path.join(current_directory_path, '..', 'resources')
db_directory_path = os.path.join(current_directory_path, '..', 'db')
connection = sqlite3.connect(os.path.join(db_directory_path, 'BAS.db'), 300)
cursor = connection.cursor()
import sys
try:
from Tkinter import *
except ImportError:
from tkinter import *
import PIL
from PIL import Image
from PIL import ImageTk
from tkcalendar import Calendar, DateEntry
global cart, session_type
cart = []
class Table:
def __init__(
self,
root,
lst,
w=16,
foreg='black',
backg='white',
):
for i in range(len(lst)):
for j in range(len(lst[0])):
self.e = Entry(root, width=w, fg=foreg, bg=backg,
font=('Arial', 16))
self.e.grid(row=i, column=j)
self.e.insert(END, lst[i][j])
def confirm_update_inventory():
global confirm_update_inventory_window
confirm_update_inventory_window = Tk()
confirm_update_inventory_window.geometry('500x150+300+120')
confirm_update_inventory_window.title('Confirm Book Update')
isbn = str(ei1.get())
name = str(ei2.get())
auth = str(ei3.get())
price = str(ei4.get())
cat = str(ei5.get())
pub = str(ei6.get())
number_of_copies = str(ei7.get())
threshold_number_of_books = str(ei8.get())
average_days_of_procuring = str(ei9.get())
if isbn != '' and name != '' and auth != '' and price != '' and cat \
!= '' and pub != '' and number_of_copies != '' \
and threshold_number_of_books != '' \
and average_days_of_procuring != '':
l1 = Label(confirm_update_inventory_window,
text='Are you sure you want to update this book?',
font=10)
b1 = Button(confirm_update_inventory_window, text='Yes',
width=10, height=3, command=update_book_in_db)
b2 = Button(confirm_update_inventory_window, text='No',
width=10, height=3, command=confbdest)
l1.place(x=60, y=20)
b1.place(x=80, y=80)
b2.place(x=320, y=80)
else:
l10 = Label(inv, text='Please enter all details', fg='red',
font=10)
l10.place(x=150, y=440)
confirm_update_inventory_window.destroy()
l10.after(3000, lambda : l10.place_forget())
def confbkadd():
global confirmb
confirmb = Tk()
confirmb.geometry('500x150+300+120')
confirmb.title('Confirm Book')
isbn = str(ei1.get())
name = str(ei2.get())
auth = str(ei3.get())
price = str(ei4.get())
cat = str(ei5.get())
pub = str(ei6.get())
number_of_copies = str(ei7.get())
threshold_number_of_books = str(ei8.get())
average_days_of_procuring = str(ei9.get())
if isbn != '' and name != '' and auth != '' and price != '' and cat \
!= '' and pub != '' and number_of_copies != '' \
and threshold_number_of_books != '' \
and average_days_of_procuring != '':
l1 = Label(confirmb,
text='Are you sure you want to add this book?',
font=10)
b1 = Button(confirmb, text='Yes', width=10, height=3,
command=bookadd)
b2 = Button(confirmb, text='No', width=10, height=3,
command=confbdest)
l1.place(x=60, y=20)
b1.place(x=80, y=80)
b2.place(x=320, y=80)
else:
l10 = Label(inv, text='Please enter all details', fg='red',
font=10)
l10.place(x=150, y=440)
confirmb.destroy()
l10.after(3000, lambda : l10.place_forget())
def confbdest():
confirmb.destroy()
def update_book_in_db():
confirm_update_inventory_window.destroy()
isbn = str(ei1.get())
name = str(ei2.get())
auth = str(ei3.get())
price = float(ei4.get())
cat = str(ei5.get())
pub = str(ei6.get())
number_of_copies = int(ei7.get())
threshold_number_of_books = int(ei8.get())
average_days_of_procuring = int(ei9.get())
vis = Label(update_inventory_window,
text='Book updated successfully!', font=8)
vis.place(x=150, y=460)
vis.after(3000, lambda : vis.place_forget())
cursor.execute('UPDATE Book SET Book_Name=?, Author=?, Price=?, Category=?, Publisher=?, number_of_copies=?, threshold=?, average_number_of_days_to_procure=?, rack_number=? WHERE Book_ISBN=?'
, (
name,
auth,
price,
cat,
pub,
number_of_copies,
threshold_number_of_books,
average_days_of_procuring,
1,
isbn,
))
connection.commit()
return
def bookadd():
confirmb.destroy()
isbn = str(ei1.get())
name = str(ei2.get())
auth = str(ei3.get())
price = float(ei4.get())
cat = str(ei5.get())
pub = str(ei6.get())
number_of_copies = int(ei7.get())
threshold_number_of_books = int(ei8.get())
average_days_of_procuring = int(ei9.get())
vis = Label(inv, text='Book Added successfully!', font=8)
vis.place(x=150, y=460)
vis.after(3000, lambda : vis.place_forget())
cursor.execute('INSERT INTO Book VALUES (?,?,?,?,?,?,?,?,?,?)', (
isbn,
name,
auth,
price,
cat,
pub,
number_of_copies,
threshold_number_of_books,
average_days_of_procuring,
1,
))
connection.commit()
return
def vsrec():
cursor.execute('SELECT * FROM Sales')
rows = cursor.fetchall()
searchres = Tk()
searchres.geometry('960x640+100+120')
searchres.title('Sales Records')
srl = Label(searchres, text='Sales Records:', font=('Sales Records:'
, 13), bg='Moccasin', fg='black')
srl.place(x=80, y=40)
l1 = Label(searchres, text='Sales ID', font=('Sales ID', 10))
l2 = Label(searchres, text='Customer ID', font=('Customer', 10))
l3 = Label(searchres, text='Invoice No', font=('Invoice No', 10))
l4 = Label(searchres, text='Sales Date', font=('Sales Date', 10))
l5 = Label(searchres, text='Bill Amount', font=('Bill Amount', 10))
l1.place(x=60, y=80)
l2.place(x=235, y=80)
l3.place(x=410, y=80)
l4.place(x=585, y=80)
l5.place(x=760, y=80)
col = 100
for r in rows:
row = 60
for x in r:
templabel = Label(searchres, text=x, font=(str(x), 8))
templabel.place(x=row, y=col)
row = row + 175
col = col + 15
return
def update_inventory():
global update_inventory_window
update_inventory_window = Tk()
update_inventory_window.geometry('720x600+500+120')
update_inventory_window.title('Book Inventory')
wel = Label(update_inventory_window, text='Update Book', fg='black'
, bg='Moccasin', font=16)
global ei1, ei2, ei3, ei4, ei5, ei6, ei7, ei8, ei9
l1 = Label(update_inventory_window, text='Book ISBN:', font=10)
l2 = Label(update_inventory_window, text='Book Name:', font=10)
l3 = Label(update_inventory_window, text='Author:', font=10)
l4 = Label(update_inventory_window, text='Price:', font=10)
l5 = Label(update_inventory_window, text='Category:', font=10)
l6 = Label(update_inventory_window, text='Publisher:', font=10)
l7 = Label(update_inventory_window, text='Number of copies:',
font=10)
l8 = Label(update_inventory_window,
text='Threshold number of copies:', font=10)
l9 = Label(update_inventory_window,
text='Average number of days to procure:', font=10)
ei1 = Entry(update_inventory_window, font=10, bd=5)
ei2 = Entry(update_inventory_window, font=10, bd=5)
ei3 = Entry(update_inventory_window, font=10, bd=5)
ei4 = Entry(update_inventory_window, font=10, bd=5)
ei5 = Entry(update_inventory_window, font=10, bd=5)
ei6 = Entry(update_inventory_window, font=10, bd=5)
ei7 = Entry(update_inventory_window, font=10, bd=5)
ei8 = Entry(update_inventory_window, font=10, bd=5)
ei9 = Entry(update_inventory_window, font=10, bd=5)
b1 = Button(update_inventory_window, text='Update Book', width=20,
height=3, command=confirm_update_inventory)
wel.place(x=150, y=40)
l1.place(x=70, y=80)
ei1.place(x=420, y=80)
l2.place(x=70, y=120)
ei2.place(x=420, y=120)
l3.place(x=70, y=160)
ei3.place(x=420, y=160)
l4.place(x=70, y=200)
ei4.place(x=420, y=200)
l5.place(x=70, y=240)
ei5.place(x=420, y=240)
l6.place(x=70, y=280)
ei6.place(x=420, y=280)
l7.place(x=70, y=320)
ei7.place(x=420, y=320)
l8.place(x=70, y=360)
ei8.place(x=420, y=360)
l9.place(x=70, y=400)
ei9.place(x=420, y=400)
b1.place(x=150, y=500)
return
def addinv():
global inv
inv = Tk()
inv.geometry('720x600+500+120')
inv.title('Book Inventory')
wel = Label(inv, text='Add New Book', fg='black', bg='Moccasin',
font=16)
global ei1, ei2, ei3, ei4, ei5, ei6, ei7, ei8, ei9
l1 = Label(inv, text='Book ISBN:', font=10)
l2 = Label(inv, text='Book Name:', font=10)
l3 = Label(inv, text='Author:', font=10)
l4 = Label(inv, text='Price:', font=10)
l5 = Label(inv, text='Category:', font=10)
l6 = Label(inv, text='Publisher:', font=10)
l7 = Label(inv, text='Number of copies:', font=10)
l8 = Label(inv, text='Threshold number of copies:', font=10)
l9 = Label(inv, text='Average number of days to procure:', font=10)
ei1 = Entry(inv, font=10, bd=5)
ei2 = Entry(inv, font=10, bd=5)
ei3 = Entry(inv, font=10, bd=5)
ei4 = Entry(inv, font=10, bd=5)
ei5 = Entry(inv, font=10, bd=5)
ei6 = Entry(inv, font=10, bd=5)
ei7 = Entry(inv, font=10, bd=5)
ei8 = Entry(inv, font=10, bd=5)
ei9 = Entry(inv, font=10, bd=5)
b1 = Button(inv, text='Add Book', width=20, height=3,
command=confbkadd)
wel.place(x=150, y=40)
l1.place(x=70, y=80)
ei1.place(x=420, y=80)
l2.place(x=70, y=120)
ei2.place(x=420, y=120)
l3.place(x=70, y=160)
ei3.place(x=420, y=160)
l4.place(x=70, y=200)
ei4.place(x=420, y=200)
l5.place(x=70, y=240)
ei5.place(x=420, y=240)
l6.place(x=70, y=280)
ei6.place(x=420, y=280)
l7.place(x=70, y=320)
ei7.place(x=420, y=320)
l8.place(x=70, y=360)
ei8.place(x=420, y=360)
l9.place(x=70, y=400)
ei9.place(x=420, y=400)
b1.place(x=150, y=500)
return
def viewpur():
cursor.execute('SELECT * FROM Purchase')
rows = cursor.fetchall()
searchres = Tk()
searchres.geometry('840x640+100+120')
searchres.title('Purchase Records')
srl = Label(searchres, text='Purchase Records:',
font=('Purchase Records:', 13), bg='Moccasin',
fg='black')
srl.place(x=80, y=40)
l1 = Label(searchres, text='Purchase ID', font=('Sales ID', 10))
l2 = Label(searchres, text='Supplier ID', font=('Customer', 10))
l3 = Label(searchres, text='Purchase Date', font=('Invoice No', 10))
l4 = Label(searchres, text='Amount Paid', font=('Sales Date', 10))
l1.place(x=40, y=80)
l2.place(x=240, y=80)
l3.place(x=440, y=80)
l4.place(x=640, y=80)
col = 100
for r in rows:
row = 40
for x in r:
templabel = Label(searchres, text=x, font=(str(x), 8))
templabel.place(x=row, y=col)
row = row + 200
col = col + 15
return
def puradd():
confirmp.destroy()
purid = str('pur' + str(random.randint(150, 1000000)))
supid = str(ep2.get())
now = datetime.datetime.now()
date = now.strftime('%Y-%m-%d')
price = float(ep6.get())
vis = Label(pur, text='Purchase Added successfully!', font=8)
vis.place(x=150, y=460)
vis.after(3000, lambda : vis.place_forget())
cursor.execute('INSERT INTO Purchase VALUES (?,?,?,?)', (purid,
supid, date, price))
connection.commit()
return
def confpur():
global confirmp
confirmp = Tk()
confirmp.geometry('500x150+300+120')
confirmp.title('Confirm Purchase')
supid = str(ep2.get())
price = str(ep6.get())
if supid != '' and price != '':
l1 = Label(confirmp,
text='Are you sure you want to add this purchase?',
font=10)
b1 = Button(confirmp, text='Yes', width=10, height=3,
command=puradd)
b2 = Button(confirmp, text='No', width=10, height=3,
command=confdest)
l1.place(x=40, y=20)
b1.place(x=80, y=80)
b2.place(x=320, y=80)
else:
l2 = Label(pur, text='Please enter all details', font=10,
fg='red')
l2.place(x=200, y=430)
l2.after(3000, lambda : l2.place_forget())
confdest()
def confdest():
confirmp.destroy()
def addpur():
global pur
pur = Tk()
pur.geometry('640x400+500+120')
pur.title('Book Purchase')
wel = Label(pur, text='New Book Purchase from Suplier', fg='black',
font=16)
global ep1, ep2, ep3, ep4, ep5, ep6
l2 = Label(pur, text='Supplier ID:', font=10)
l7 = Label(pur, text='Price:', font=10)
ep2 = Entry(pur, font=10, bd=5)
ep6 = Entry(pur, font=10, bd=5)
b1 = Button(
pur,
text='Add Purchase Record',
width=30,
height=3,
bg='lightgreen',
command=confpur,
)
b2 = Button(
pur,
text='Find supplier ID',
width=30,
height=3,
bg='lightblue',
command=search_supplier,
)
wel.place(x=150, y=40)
l2.place(x=70, y=100)
ep2.place(x=230, y=100)
l7.place(x=70, y=190)
ep6.place(x=230, y=190)
b2.place(x=60, y=250)
b1.place(x=360, y=250)
return
def reqadd():
confirmr.destroy()
book = str(cb1.get())
auth = str(ca1.get())
vis = Label(request, text='Request made successfully!', font=8)
vis.place(x=150, y=460)
vis.after(3000, lambda : vis.place_forget())
cursor.execute('INSERT INTO Request VALUES (?,?)', (book, auth))
connection.commit()
return
def confreqdest():
confirmr.destroy()
def confreq():
global confirmr
confirmr = Tk()
confirmr.geometry('500x150+300+120')
confirmr.title('Confirm Request')
l1 = Label(confirmr,
text='Are you sure you want to request this book?',
font=10)
b1 = Button(confirmr, text='Yes', width=10, height=3,
command=reqadd)
b2 = Button(confirmr, text='No', width=10, height=3,
command=confreqdest)
l1.place(x=40, y=20)
b1.place(x=80, y=80)
b2.place(x=320, y=80)
def cusreq():
global cb1, ca1
global request
request = Tk()
request.geometry('640x480+700+120')
request.title('Request Book')
disp = Label(request, text='Request your book here:', fg='black',
bg='Moccasin', font=12)
disp.place(x=200, y=70)
sl1 = Label(request, text='Book Title:', font=('Book Title:', 10))
cb1 = Entry(request, font=10, bd=5)
sl2 = Label(request, text='Author:', font=('Author:', 10))
ca1 = Entry(request, font=10, bd=5)
sl1.place(x=100, y=140)
cb1.place(x=210, y=140)
sl2.place(x=100, y=220)
ca1.place(x=210, y=220)
sb = Button(request, text='Request Book', width=20, height=2,
command=confreq)
sb.place(x=210, y=300)
return
def logoutadm():
session_type = 0
admpg.destroy()
return
def getbooksunderthreshold():
threshold = Tk()
threshold.geometry('1000x600+700+120')
threshold.title('Book threshold')
cursor.execute('SELECT Book_ISBN, Book_Name, number_of_copies, Price, Publisher FROM Book b WHERE b.number_of_copies <= b.threshold'
)
rows = cursor.fetchall()
srl = Label(threshold, text='Books below or at threshold level',
font=('Customer Book threshold:', 12), bg='Moccasin',
fg='black')
srl.place(x=80, y=40)
l1 = Label(threshold, text='ISBN', font=('Sales ID', 10))
l2 = Label(threshold, text='Name', font=('Customer', 10))
l3 = Label(threshold, text='Number of copies', font=('Customer',
10))
l4 = Label(threshold, text='Price', font=('Customer', 10))
l5 = Label(threshold, text='Publisher', font=('Customer', 10))
l1.place(x=60, y=80)
l2.place(x=210, y=80)
l3.place(x=430, y=80)
l4.place(x=580, y=80)
l5.place(x=730, y=80)
col = 100
ix = 0
for r in rows:
row = 60
ix = 0
for x in r:
extend = 220
if not ix or ix == 3 or ix == 2:
extend = 150
templabel = Label(threshold, text=x, font=(str(x), 8))
templabel.place(x=row, y=col)
row = row + extend
ix += 1
col = col + 15
return
def show_sales_statistics():
sales_statistics_selection_window = Tk()
sales_statistics_selection_window.geometry('800x640+500+200')
sales_statistics_selection_window.title('Sales statistics selection'
)
global calfrom, calto
calfrom = DateEntry(sales_statistics_selection_window,
selectmode='day', year=2022, month=4, day=12)
calto = DateEntry(sales_statistics_selection_window,
selectmode='day', year=2022, month=4, day=14)
calfrom.pack(pady=10)
calto.pack(pady=40)
l1 = Label(sales_statistics_selection_window, text='From date',
font=('Times New Roman', 10))
l2 = Label(sales_statistics_selection_window, text='To date',
font=('Times New Roman', 10))
l1.place(x=80, y=20)
l2.place(x=80, y=80)
Button(sales_statistics_selection_window,
text='Get statistics for this period',
command=sales_statistics).pack(pady=10)
return
def parse_date(d):
i = 0
ch = d[i]
day = ''
month = ''
year = ''
while ch != '/':
day += ch
i += 1
ch = d[i]
if len(day) < 2:
day = '0' + day
i += 1
while ch != '/':
month += ch
i += 1
ch = d[i]
if len(month) < 2:
month = '0' + month
i += 1
while ch != '/':
year += ch
i += 1
ch = d[i]
return (int(day), int(month), int(year))
def sales_statistics():
sales_statistics_window = Tk()
sales_statistics_window.geometry('1280x700+500+200')
sales_statistics_window.title('Sales statistics')
lst2 = []
lst2.append((
'ISBN',
'Name',
'Price',
'Publisher',
'Copies sold',
'Total revenue',
))
from_date = calfrom.get_date()
to_date = calto.get_date()
cursor.execute('SELECT isbn, quantity from book_sales WHERE time_of_sale >= ? AND time_of_sale <= ?'
, (from_date.strftime('%Y-%m-%d'),
to_date.strftime('%Y-%m-%d')))
sales_rows = cursor.fetchall()
ctr = 1
stats = {}
for i in sales_rows:
cursor.execute('SELECT Book_Name, Price, Publisher FROM Book b WHERE Book_ISBN = ?'
, (i[0], ))
book_rows = cursor.fetchall()
if len(book_rows) > 0:
if not stats.has_key(i[0]):
stats[i[0]] = {}
stats[i[0]]['isbn'] = i[0]
stats[i[0]]['name'] = book_rows[0][0]
stats[i[0]]['price'] = book_rows[0][1]
stats[i[0]]['quantity'] = i[1]
stats[i[0]]['publisher'] = book_rows[0][2]
else:
stats[i[0]]['quantity'] += i[1]
for s in stats:
lst2.append((
s,
stats[s]['name'],
str(stats[s]['price']),
stats[s]['publisher'],
str(stats[s]['quantity']),
str(stats[s]['quantity'] * stats[s]['price']),
))
tbl = Table(sales_statistics_window, lst2)
return
def show_not_in_stock_books():
show_not_in_stock_window = Tk()
show_not_in_stock_window.geometry('1280x700+500+200')
show_not_in_stock_window.title('Not in stock books')
lst2 = []
lst2.append(('ISBN', 'Name', 'Price', 'Publisher', 'Query count'))
cursor.execute('SELECT * FROM not_in_stock')
not_in_stock_rows = cursor.fetchall()
ctr = 1
for s in not_in_stock_rows:
cursor.execute('SELECT Book_Name, Price, Publisher FROM Book b WHERE Book_ISBN = ?'
, (s[0], ))
book_rows = cursor.fetchall()
if len(book_rows) > 0:
lst2.append((s[0], book_rows[0][0], book_rows[0][1],
book_rows[0][2], s[1]))
tbl = Table(show_not_in_stock_window, lst2)
return
def adminpage():
global admpg
user = str(ea1.get())
pasw = str(ea2.get())
cursor.execute('SELECT Login_ID,Password,Admin_ID,position FROM Admin'
)
rows = cursor.fetchall()
flag = 0
for r in rows:
if user == r[0] and pasw == r[1]:
session_type = r[3]
var = Label(adm, text='Login Successful',
font=('Login Successful', 18), fg='green')
var.place(x=150, y=400)
adm.destroy()
admpg = Tk()
admpg.geometry('840x640+500+120')
admpg.title('Administration Portal')
var = 'Welcome ' + r[2] + '!'
l1 = Label(admpg, text=var, fg='black', bg='Moccasin',
font=(var, 16))
l1.place(x=200, y=50)
b = Button(
admpg,
text='Log Out',
fg='blue',
width=5,
height=1,
command=logoutadm,
)
b.place(x=600, y=50)
if session_type == 1 or session_type == 2 or session_type \
== 3:
b2 = Button(admpg, text='Add to inventory', width=20,
height=3, command=addinv)
b2.place(x=100, y=100)
if session_type == 2 or session_type == 3 or session_type \
== 1:
b7 = Button(admpg, text='Update inventory', width=20,
height=3, command=update_inventory)
b7.place(x=340, y=100)
if session_type == 2 or session_type == 1 or session_type \
== 3:
b8 = Button(admpg, text='Create Order', width=20,
height=3, command=porder)
b8.place(x=100, y=250)
if session_type == 2 or session_type == 3:
b1 = Button(admpg, text='View Sales Record', width=20,
height=3, command=vsrec)
b1.place(x=340, y=250)
if session_type == 2 or session_type == 3:
b3 = Button(admpg, text='View Purchase Record',
width=25, height=3, command=viewpur)
b3.place(x=100, y=400)
if session_type == 2 or session_type == 3:
b4 = Button(admpg, text='Add Purchase Record',
width=25, height=3, command=addpur)
b4.place(x=340, y=400)
if session_type == 2 or session_type == 3:
b5 = Button(admpg, text='Customer Requests', width=20,
height=3, command=request)
b5.place(x=100, y=550)
if session_type == 3:
b6 = Button(admpg, text='Books under threshold',
width=25, height=3,
command=getbooksunderthreshold)
b6.place(x=340, y=550)
if session_type == 3:
b9 = Button(admpg, text='Sales Statistics', width=25,
height=3, command=show_sales_statistics)
b9.place(x=580, y=400)
if session_type == 3:
b10 = Button(admpg, text='Not in stock books',
width=25, height=3,
command=show_not_in_stock_books)
b10.place(x=580, y=250)
b11 = Button(admpg, text='Search books', width=25,
height=3, command=sbook)
b11.place(x=580, y=100)
flag = 1
if flag == 0:
var = Label(adm, text='Incorrect Username/Password',
font=('Incorrect Username/Password', 10), fg='black'
)
var.place(x=150, y=420)
return
def spacesearch(x):
if x == 0:
return 90
elif x == 1:
return 276
elif x == 2:
return 216
elif x == 3:
return 90
elif x == 4:
return 168
elif x == 5:
return 0
def spacesearch2(x):
if x == 0:
return 110
elif x == 1:
return 275
elif x == 2:
return 210
elif x == 3:
return 300
elif x == 4:
return 180
def sbookfun():
var1 = str(se1.get())
bname = '%' + var1 + '%'
bname = bname.lower()
var2 = str(se2.get())
auth = '%' + var2 + '%'
auth = auth.lower()
var3 = str(se3.get())
cat = '%' + var3 + '%'
cat = cat.lower()
cursor.execute('SELECT * FROM Book WHERE LOWER(Book_Name) LIKE ? AND LOWER(Author) LIKE ? AND LOWER(Category) LIKE ?'
, (bname, auth, cat))
rows = cursor.fetchall()
searchres = Tk()
searchres.geometry('1280x640+100+120')
searchres.title('Search Results')
srl = Label(searchres, text='Search Results:',
font=('Search Results:', 14), bg='Moccasin', fg='black')
srl.place(x=80, y=40)
global search_results_list, books
scroll_bar = Scrollbar(searchres)
scroll_bar.pack(side=RIGHT, fill=Y)
search_results_list = Listbox(searchres,
yscrollcommand=scroll_bar.set,
width=60, selectmode=SINGLE, height=5)
for r in rows:
search_results_list.insert(END, str(r[1]) + ' by ' + str(r[2])
+ ' at INR ' + str(r[3]) + ' ISBN: '
+ str(r[0]))
search_results_list.pack(side=LEFT, fill=BOTH)
scroll_bar.config(command=search_results_list.yview)
books = []
books.append((
'ISBN',
'Name',
'Author',
'Price',
'Category',
'Publisher',
'Number of Copies',
'Rack Number',
))
for r in rows:
books.append((
r[0],
r[1],
r[2],
r[3],
r[4],
r[5],
r[6],
r[9] or 1,
))
if len(rows) > 0:
sb = Button(searchres, text='View selected book', width=25,
height=2, command=view_book_window_fun)
sb.place(x=800, y=350)
return
def view_book_window_fun():
global view_book_window
view_book_window = Tk()
view_book_window.geometry('1280x640+500+120')
view_book_window.title('View Book')
book = books[search_results_list.curselection()[0] + 1]
isbn = book[0]
lst = []
lst.append(books[0])
for b in books:
if b[0] == isbn:
lst.append(b)
break
if book[6] <= 0:
cursor.execute('SELECT count(*) from `not_in_stock` WHERE ISBN = ?'
, (isbn, ))
count = cursor.fetchall()
if count[0][0] > 0:
cursor.execute('UPDATE `not_in_stock` SET count = ((SELECT count FROM `not_in_stock` WHERE ISBN = ?) + 1) WHERE ISBN = ?'
, (isbn, isbn))
connection.commit()
else:
cursor.execute('INSERT INTO `not_in_stock` VALUES (?,?)',
(isbn, 1))
connection.commit()
tbl = Table(view_book_window, lst, w=14)
def sbook():
global se1, se2, se3
searchbk = Tk()
searchbk.geometry('640x480+700+120')
searchbk.title('Search Book')
disp = Label(searchbk, text='Enter keywords for search', fg='black'
, bg='Moccasin', font=12)
disp.place(x=200, y=70)
sl1 = Label(searchbk, text='Book Title:', font=('Book Title:', 10))
se1 = Entry(searchbk, font=10, bd=5)
sl2 = Label(searchbk, text='Author:', font=('Author:', 10))
se2 = Entry(searchbk, font=10, bd=5)
sl3 = Label(searchbk, text='Genre:', font=('Category:', 10))
se3 = Entry(searchbk, font=10, bd=5)
sl1.place(x=100, y=140)
se1.place(x=210, y=140)
sl2.place(x=100, y=220)
se2.place(x=210, y=220)
sl3.place(x=100, y=300)
se3.place(x=210, y=300)
sb = Button(searchbk, text='Search Available Books', width=25,
height=2, command=sbookfun)
sb.place(x=210, y=350)
sl4 = Label(searchbk, text="Can't find the book you're looking for?"
, font=("Can't find the book you're looking for?", 10),
fg='blue')
sb1 = Button(
searchbk,
text='Click Here',
fg='blue',
width=10,
height=2,
command=cusreq,
)
sl4.place(x=100, y=420)
sb1.place(x=390, y=410)
return
def add_supplier_to_db():
confirmsup.destroy()
isbn = str(supei1.get())
name = str(supei2.get())
auth = str(supei3.get())
price = str(supei4.get())
vis = Label(add_supplier_window, text='Supplier added successfully!'
, font=8)
vis.place(x=150, y=460)
vis.after(3000, lambda : vis.place_forget())
cursor.execute('INSERT INTO Supplier VALUES (?,?,?,?,?)', ('sup'
+ str(random.randint(300, 10000)), isbn, name, auth,
price))
connection.commit()
return
def destroy_confirm_add_supplier():
confirmsup.destroy()
def confirm_add_supplier():
global confirmsup
confirmsup = Tk()
confirmsup.geometry('500x150+300+120')
confirmsup.title('Confirm Supplier')
isbn = str(supei1.get())
name = str(supei2.get())
auth = str(supei3.get())
price = str(supei4.get())
if isbn != '' and name != '' and auth != '' and price != '':
l1 = Label(confirmsup,
text='Are you sure you want to add this book?',
font=10)
b1 = Button(confirmsup, text='Yes', width=10, height=3,
command=add_supplier_to_db)
b2 = Button(confirmsup, text='No', width=10, height=3,
command=destroy_confirm_add_supplier)
l1.place(x=60, y=20)
b1.place(x=80, y=80)
b2.place(x=320, y=80)
else:
l10 = Label(add_supplier_window, text='Please enter all details'
, fg='red', font=10)
l10.place(x=150, y=440)
confirmsup.destroy()
l10.after(3000, lambda : l10.place_forget())
def add_supplier():
global add_supplier_window
add_supplier_window = Tk()
add_supplier_window.geometry('720x600+500+120')
add_supplier_window.title('Book Inventory')
wel = Label(add_supplier_window, text='Add New Supplier', fg='black'
, bg='Moccasin', font=16)
global supei1, supei2, supei3, supei4
supl1 = Label(add_supplier_window, text='Supplier Name:', font=10)
supl2 = Label(add_supplier_window, text='Publication:', font=10)
supl3 = Label(add_supplier_window, text='Email:', font=10)
supl4 = Label(add_supplier_window, text='Phone:', font=10)
supei1 = Entry(add_supplier_window, font=10, bd=5)
supei2 = Entry(add_supplier_window, font=10, bd=5)
supei3 = Entry(add_supplier_window, font=10, bd=5)
supei4 = Entry(add_supplier_window, font=10, bd=5)
b1 = Button(add_supplier_window, text='Add Supplier', width=20,
height=3, command=confirm_add_supplier)
wel.place(x=150, y=40)
supl1.place(x=70, y=80)
supei1.place(x=420, y=80)
supl2.place(x=70, y=120)
supei2.place(x=420, y=120)
supl3.place(x=70, y=160)
supei3.place(x=420, y=160)
supl4.place(x=70, y=200)
supei4.place(x=420, y=200)
b1.place(x=150, y=500)
return
def search_supplier_access():
var1 = str(sse2.get())
bname = '%' + var1 + '%'
bname = bname.lower()
cursor.execute('SELECT * FROM Supplier WHERE LOWER(Supplier_Name) LIKE ? OR LOWER(Publication) LIKE ? OR LOWER(Email_ID) LIKE ?'
, (bname, bname, bname))
rows = cursor.fetchall()
searchres = Tk()
searchres.geometry('1280x640+100+120')
searchres.title('Search Results')
srl = Label(searchres, text='Search Results:',
font=('Search Results:', 13), bg='Moccasin', fg='black')
srl.place(x=80, y=40)
l1 = Label(searchres, text='Supplier_ID', font=('ISBN', 12))
l2 = Label(searchres, text='Supplier_Name', font=('Name', 12))
l3 = Label(searchres, text='Publication', font=('Author', 12))
l4 = Label(searchres, text='Email_ID', font=('Price', 12))
l5 = Label(searchres, text='Phone_No', font=('Category', 12))
l1.place(x=65, y=80)
l2.place(x=175, y=80)
l3.place(x=450, y=80)
l4.place(x=660, y=80)
l5.place(x=960, y=80)
col = 100
for r in rows:
row = 65
for x in range(len(r)):
templabel = Label(searchres, text=r[x], font=(str(r[x]),
10))
templabel.place(x=row, y=col)
row = row + spacesearch2(x)
col = col + 40
return
def search_supplier():
global sse2
search_supplier_window = Tk()
search_supplier_window.geometry('640x480+700+120')
search_supplier_window.title('Search Suppliers')
disp = Label(search_supplier_window,
text='Enter name or address of supplier', fg='black',
bg='Moccasin', font=12)
disp.place(x=200, y=70)
sl2 = Label(search_supplier_window, text='Supplier: ',
font=('Book Title:', 10))
sse2 = Entry(search_supplier_window, font=10, bd=5)
sl2.place(x=100, y=220)
sse2.place(x=210, y=220)
sb = Button(search_supplier_window,
text='Search Available Suppliers', width=25, height=2,
command=search_supplier_access)
sb.place(x=210, y=350)
sl4 = Label(search_supplier_window,
text="Can't find the supplier you're looking for?",
font=("Can't find the book you're looking for?", 10),
fg='blue')
sb1 = Button(
search_supplier_window,
text='Add one',
fg='blue',
width=10,
height=2,
command=add_supplier,
)
sl4.place(x=100, y=420)
sb1.place(x=390, y=410)
return
def salord():
salid = 'sal' + str(random.randint(1, 10000))
cusid = str(epo2.get())
invo = random.randint(800, 1000000)
now = datetime.datetime.now()
date = now.strftime('%Y-%m-%d')
isbn = str(epo1.get())
cursor.execute('INSERT INTO Sales VALUES(?,?,?,?,?)', (salid,
cusid, invo, date, pri))
connection.commit()
for book in cart:
cursor.execute('UPDATE Book SET number_of_copies=((SELECT number_of_copies FROM Book WHERE Book_ISBN=?)-1) WHERE Book_ISBN=?'
, (book, book))
connection.commit()
new_cart = {}
for j in cart_books:
i = j[0]
if not new_cart.has_key(i[0]):
new_cart[i[0]] = {}
new_cart[i[0]]['isbn'] = i[0]
new_cart[i[0]]['name'] = i[1]
new_cart[i[0]]['price'] = i[2]
new_cart[i[0]]['quantity'] = 1
else:
new_cart[i[0]]['quantity'] += 1
for item in new_cart:
cursor.execute('INSERT INTO book_sales (customer_id, isbn, sales_id, quantity, time_of_sale) VALUES(?,?,?,?,?)'
, (cusid, isbn, salid, new_cart[isbn]['quantity'
], date))
connection.commit()
check.destroy()
confirmch.destroy()
show = Label(pord, text='Order placed successfully', font=8)
show.place(x=100, y=320)
show.after(3000, lambda : show.place_forget())
show_sale_receipt(salid, new_cart)
return
def show_sale_receipt(sale_id, new_cart):
sale_receipt_window = Tk()
sale_receipt_window.geometry('1024x700+500+200')
sale_receipt_window.title('Receipt')
lst2 = []
lst2.append(('Sr No.', 'Book', 'Price', 'Quantity', 'Total Amount'))
ctr = 1
for book in new_cart:
lst2.append((ctr, new_cart[book]['name'], new_cart[book]['price'
], str(new_cart[book]['quantity']),
str(new_cart[book]['quantity']
* new_cart[book]['price'])))
ctr += 1
tbl = Table(sale_receipt_window, lst2)
return
def delbook(pri, book, p):
cart.remove(book)
pri = pri - p
check.destroy()
viewcart()
return
def cartdest():
check.destroy()
return
def space(x):
if x == 0:
return 100
elif x == 1:
return 400
else:
return 50
def confchdest():
confirmch.destroy()
return
def confcheck():
global confirmch
confirmch = Tk()
confirmch.geometry('500x150+500+120')
confirmch.title('Confirm Checkout')
l1 = Label(confirmch, text='Are you sure you want to checkout?',
font=10)
b1 = Button(confirmch, text='Yes', width=10, height=3,
command=salord)
b2 = Button(confirmch, text='No', width=10, height=3,
command=confchdest)
l1.place(x=60, y=20)
b1.place(x=80, y=80)
b2.place(x=320, y=80)
def viewcart():
global pri
pri = 0
global check
check = Tk()
check.geometry('840x700+500+200')
check.title('View Bill')
disp = Label(check, text='This Bill:', fg='black', bg='Moccasin',
font=('Times New Roman', 20))
disp.place(x=130, y=70)
backButton = Button(check, text='<-- Back', width=6, height=2,
command=cartdest)
backButton.place(x=20, y=40)
l1 = Label(check, text='ISBN', font=('ISBN', 15))
l2 = Label(check, text='Title', font=('Title', 15))
l3 = Label(check, text='Price', font=('Price', 15))
l1.place(x=100, y=120)
l2.place(x=200, y=120)
l3.place(x=600, y=120)
col = 170
global cart_books
cart_books = []
for book in cart:
cursor.execute('SELECT Book_ISBN,Book_Name,Price from Book where Book_ISBN=?'
, (book, ))
rows = cursor.fetchall()
for r in rows:
row = 100
cart_books.append(rows)
for x in range(len(r)):
templabel = Label(check, text=r[x], font=(str(r[x]),
13))
templabel.place(x=row, y=col)
row = row + space(x)
pri = pri + float(r[2])
col = col + 40
tempButton = Button(check, text='Remove', width=6, height=1,
command=lambda : delbook(pri, book, r[2]))
tempButton.place(x=row + 40, y=col - 40)
l4 = Label(check, text='Bill Amount : ', font=('Bill Amount : ',
12))
l5 = Label(check, text=str(pri), font=(str(pri), 12))
l4.place(x=450, y=500)
l5.place(x=600, y=500)
checkButton = Button(check, text='Checkout', width=6, height=1,
command=confcheck)
checkButton.place(x=400, y=600)
return
def addtocart():
global cart
isbn = str(epo1.get())
username2 = str(epo2.get())
if isbn == '' or username2 == '':
l2 = Label(pord, text='Please enter the ISBN', font=8)
l2.place(x=100, y=320)
l2.after(3000, lambda : l2.place_forget())
return
cursor.execute('SELECT * FROM Book where Book_ISBN=?', (isbn, ))
rows = cursor.fetchall()
if len(rows) == 0 or rows[0][6] <= 0:
l1 = Label(pord, text='Book not in stock!', font=8)
l1.place(x=130, y=320)
l1.after(3000, lambda : l1.place_forget())
else:
cart.append(isbn)
l3 = Label(pord, text='Added to Bill!', font=8)
l3.place(x=140, y=320)
l3.after(3000, lambda : l3.place_forget())
return
def porder():
global pord
pord = Tk()
pord.geometry('640x360+500+200')
pord.title('Order')
disp = Label(pord, text='Book Order', fg='black', bg='Moccasin',
font=12)
disp.place(x=130, y=70)
l1 = Label(pord, text='ISBN:', font=10)
l3 = Label(pord, text='Customer username:', font=10)
l2 = Label(pord, text='Want to know ISBN of book?',
font=('Want to know ISBN of Book?', 10))
global epo1, epo2
epo1 = Entry(pord, font=10, bd=5, width=10)
epo2 = Entry(pord, font=10, bd=5, width=10)
b1 = Button(
pord,
text='Click here',
fg='blue',
width=6,
height=1,
command=sbook,
)
b2 = Button(pord, text='Add To Bill', width=14, height=2,
command=addtocart)
b3 = Button(pord, text='View Items in Bill', width=14, height=2,
command=viewcart)
l1.place(x=40, y=120)
epo1.place(x=250, y=120)
l3.place(x=40, y=180)
epo2.place(x=250, y=180)
l2.place(x=40, y=240)
b1.place(x=230, y=240)
b2.place(x=40, y=290)
b3.place(x=250, y=290)
return
def viewcomporder():
cursor.execute('SELECT Sales_ID,Invoice_no,S_Date,Bill_Amount FROM Sales WHERE Customer_ID=?'
, (userc, ))
rows = cursor.fetchall()
searchres = Tk()
searchres.geometry('840x640+100+120')
searchres.title('Completed Orders')
srl = Label(searchres, text='Completed Orders:',
font=('Completed Orders:', 13), bg='Moccasin',
fg='black')
srl.place(x=80, y=40)
l1 = Label(searchres, text='Sales ID', font=('Sales ID', 10))
l2 = Label(searchres, text='Invoice Number', font=('Invoice Number'
, 10))
l3 = Label(searchres, text='Sales Date', font=('Sales Date', 10))
l4 = Label(searchres, text='Bill Amount', font=('Bill Amount', 10))
l1.place(x=40, y=80)
l2.place(x=240, y=80)
l3.place(x=440, y=80)
l4.place(x=640, y=80)
col = 100
for r in rows:
row = 40
for x in r:
templabel = Label(searchres, text=x, font=(str(x), 8))
templabel.place(x=row, y=col)
row = row + 200
col = col + 15
return
def request():
requests = Tk()
requests.geometry('640x480+700+120')
requests.title('Book Requests')
cursor.execute('SELECT * FROM Request')
rows = cursor.fetchall()
srl = Label(requests, text='Customer Book Requests:',
font=('Customer Book Requests:', 13), bg='Moccasin',
fg='black')
srl.place(x=80, y=40)
l1 = Label(requests, text='Book Name', font=('Sales ID', 10))
l2 = Label(requests, text='Author', font=('Customer', 10))
l1.place(x=60, y=80)
l2.place(x=235, y=80)
col = 100
for r in rows:
row = 60
for x in r:
templabel = Label(requests, text=x, font=(str(x), 8))
templabel.place(x=row, y=col)
row = row + 175
col = col + 15
return
def logoutcust():
cuspg.destroy()
return
def custpage():
global userc
global cuspg
userc = str(ec1.get())
paswc = str(ec2.get())
cursor.execute('SELECT Customer_ID,Password,Customer_Name FROM Customer'
)
rows = cursor.fetchall()
flag = 0
for r in rows:
if userc == r[0] and paswc == r[1]:
var = Label(cust, text='Login Successful',
font=('Login Successful', 18), fg='green')
var.place(x=150, y=400)
cust.destroy()
cuspg = Tk()
cuspg.geometry('640x480+500+120')
cuspg.title('CustPage')
var = 'WELCOME ' + r[2] + '!'
l1 = Label(cuspg, text=var, fg='black', bg='Moccasin',
font=(var, 16))
l1.place(x=160, y=50)
b = Button(
cuspg,
text='Log Out',
fg='blue',
width=5,
height=1,
command=logoutcust,
)
b.place(x=540, y=50)
b1 = Button(cuspg, text='Search Books', width=20, height=3,
command=sbook)
b1.place(x=100, y=150)
b3 = Button(cuspg, text='View Completed Orders', width=20,
height=3, command=viewcomporder)
b3.place(x=100, y=300)
b4 = Button(cuspg, text='Request', width=20, height=3,
command=cusreq)
b4.place(x=400, y=150)
flag = 1
if flag == 0:
var = Label(cust, text='Incorrect Username/Password',
font=('Incorrect Username/Password', 10), fg='black'
)
var.place(x=150, y=400)
def admin():
global adm
adm = Tk()
adm.geometry('480x480+500+120')
adm.title('Admin')
disa1 = Label(adm, text='Admin ID', font=('Admin ID', 10))
disa2 = Label(adm, text='Password', font=('Password', 10))
ba1 = Button(adm, text='Login', width=5, height=2,
command=adminpage)
disa3 = Label(adm, text='Welcome Admin', fg='black', bg='Moccasin',
font=('Welcome Admin', 16))
global ea1
ea1 = Entry(adm, font=10, bd=5)
global ea2
ea2 = Entry(adm, font=10, bd=5, show='*')
disa3.place(x=150, y=100)
disa1.place(x=70, y=230)
disa2.place(x=70, y=290)
ba1.place(x=200, y=370)
ea1.place(x=150, y=230)
ea2.place(x=150, y=290)
return
def customer():
global cust
cust = Tk()
cust.geometry('480x480+500+120')
cust.title('Customer')
global ec1
global ec2
disc1 = Label(cust, text='Customer ID', font=('Customer ID', 10))
disc2 = Label(cust, text='Password', font=('Password', 10))
bc1 = Button(cust, text='Login', width=5, height=2,
command=custpage)
disc3 = Label(cust, text='Welcome Customer', fg='black',
bg='Moccasin', font=('Welcome Admin', 16))
ec1 = Entry(cust, font=10, bd=5)
ec2 = Entry(cust, font=10, bd=5, show='*')
disc3.place(x=150, y=100)
disc1.place(x=70, y=230)
disc2.place(x=70, y=290)
bc1.place(x=200, y=370)
ec1.place(x=180, y=230)
ec2.place(x=180, y=290)
return
def sinpg():
cusid = str(es1.get())
pasw1 = str(es2.get())
pasw2 = str(es21.get())
cname = str(es3.get())
city = str(es4.get())
stre = str(es5.get())
stat = str(es6.get())
phno = str(es7.get())
if pasw1 != pasw2:
vis = Label(sig, text=' Passwords dont match', font=8)
vis.place(x=150, y=460)
vis.after(3000, lambda : vis.place_forget())
elif cusid == '' or pasw1 == '' or pasw2 == '' or cname == '' \
or city == '' or stre == '' or stat == '' or phno == '':
l1 = Label(sig, text='Please fill all details', font=8, fg='red'
)
l1.place(x=150, y=460)
l1.after(3000, lambda : l1.place_forget())
else:
vis = Label(sig, text='Account Created successfully', font=8)
vis.place(x=150, y=460)
vis.after(3000, lambda : vis.place_forget())
cursor.execute('INSERT INTO Customer VALUES (?,?,?,?,?,?,?)', (
cusid,
pasw1,
cname,
city,
stre,
stat,
phno,
))
connection.commit()
return
def signup():
global sig
sig = Tk()
sig.geometry('480x480+500+120')
sig.title('Customer')
wel = Label(sig, text='Customer SignUp', fg='black', bg='Moccasin',
font=16)
global es1, es2, es21, es3, es4, es5, es6, es7
l1 = Label(sig, text='Customer ID', font=10)
l2 = Label(sig, text='Password', font=10)
l21 = Label(sig, text='Re-enter Password', font=10)
l3 = Label(sig, text='Name', font=10)
l4 = Label(sig, text='City', font=10)
l5 = Label(sig, text='Street', font=10)
l6 = Label(sig, text='State', font=10)
l7 = Label(sig, text='Phone No', font=10)
es1 = Entry(sig, font=10, bd=5)
es2 = Entry(sig, font=10, bd=5, show='*')
es21 = Entry(sig, font=10, bd=5, show='*')
es3 = Entry(sig, font=10, bd=5)
es4 = Entry(sig, font=10, bd=5)
es5 = Entry(sig, font=10, bd=5)
es6 = Entry(sig, font=10, bd=5)
es7 = Entry(sig, font=10, bd=5)
b1 = Button(sig, text='Sign Up', width=20, height=3, command=sinpg)
wel.place(x=150, y=40)
l1.place(x=70, y=80)
es1.place(x=230, y=80)
l2.place(x=70, y=120)
es2.place(x=230, y=120)
l21.place(x=70, y=160)
es21.place(x=230, y=160)
l3.place(x=70, y=200)
es3.place(x=230, y=200)
l4.place(x=70, y=240)
es4.place(x=230, y=240)
l5.place(x=70, y=280)
es5.place(x=230, y=280)
l6.place(x=70, y=320)
es6.place(x=230, y=320)
l7.place(x=70, y=360)
es7.place(x=230, y=360)
b1.place(x=150, y=400)
return
global gui
gui = Tk()
gui.geometry('1080x700+200+0')
gui.title('BOOKSHOP AUTOMATION SYSTEM')
image = Image.open(os.path.join(resources_directory_path, 'academic-complex.jpg'))
photo_image = ImageTk.PhotoImage(image)
label = Label(gui, image=photo_image)
label.pack()
deli = 120
svar = StringVar()
labl = Label(
gui,
textvariable=svar,
height=3,
width=200,
bg='Moccasin',
fg='Black',
font=('Times New Roman', 15),
)
dis = Label(
gui,
text='BOOKSHOP AUTOMATION SYSTEM',
height=3,
width=92,
bg='Moccasin',
fg='Black',
font=('Times New Roman', 18),
)
disp = Label(gui, text='New User?', bg='Moccasin',
font=('Times New Roman', 15))
b1 = Button(
gui,
text='Admin Login',
width=25,
height=3,
command=admin,
font=('Times New Roman', 18),
bg='lightgreen',
borderwidth=0,
)
b2 = Button(
gui,
text='Customer Login',
width=25,
height=3,
command=customer,
font=('Times New Roman', 18),
bg='lightgreen',
borderwidth=0,
)
b3 = Button(
gui,
text='Customer Signup',
width=25,
height=3,
command=signup,
font=('Times New Roman', 18),
bg='lightgreen',
borderwidth=0,
)
dis.place(x=0, y=140)
b1.place(x=100, y=530)
b2.place(x=630, y=530)
b3.place(x=360, y=360)
disp.place(x=483, y=325)
gui.mainloop()
| PlytonRexus/vigilant-carnival | src/main.py | main.py | py | 50,771 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.path.realpath",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_numbe... |
751666782 | import argparse
import re
import os
import pickle
'''--------------------Parsing argumnts--------------------'''
parser = argparse.ArgumentParser(description="Этот код создаёт модель, обученную на текстах песен")
parser.add_argument(
'--input_dir',
type=str,
help="Путь к папке с песнями"
)
parser.add_argument(
'--model',
type=str,
default="model.pkl",
help="Путь к файлу, в который сохраняется модель(с расширением pkl)"
)
args = parser.parse_args()
if args.input_dir==None:
args.input_dir = input("Введите путь к папке с текстами: ")
'''--------------------Generating model--------------------'''
model = {}
data = os.listdir(path=args.input_dir)
for file in data:
with open(args.input_dir+"/"+file,"r",encoding="utf-8") as file:
text = ""
for i in file:
text += i
text = re.sub(r'\W+',' ',text)
text = text.lower().strip().split()
for i in range(len(text)-1):
if i>0:
model[text[i-1]+" "+text[i]] = model.get(text[i-1]+" "+text[i],[])
model[text[i-1]+" "+text[i]].append(text[i+1])
model[text[i]] = model.get(text[i],[])
model[text[i]].append(text[i+1])
'''--------------------Saving model--------------------'''
with open(args.model,"wb") as f:
pickle.dump(model,f)
| DommeUse/Text-Generator | train.py | train.py | py | 1,508 | python | ru | code | 0 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "pickle.dump",
"line_numb... |
2299118867 | import os
import xml.etree.ElementTree as ET
import json
def append_barcode(results, barcode):
result = {}
result["attrib"] = barcode.attrib
Values = get_elements(barcode, "Value")
for Value in Values:
result["text"] = Value.text
result["value_attrib"] = Value.attrib
results.append(result)
def get_elements(root, name):
children = []
for child in root:
if child.tag == name:
children.append(child)
return children
for filename in os.listdir("./Markup"):
if filename.endswith(".xml") == False:
continue
tree = ET.parse(os.path.join("./Markup",filename))
root = tree.getroot()
pages = root[0]
results = []
for page in pages:
BarcodesRoot = get_elements(page, "Barcodes")[0]
Barcodes = get_elements(BarcodesRoot, "Barcode")
for barcode in Barcodes:
append_barcode(results, barcode)
name, ext = os.path.splitext(filename)
txt_path = os.path.join("./Image",name+".txt")
f = open (txt_path,"w")
f.write(json.dumps(results))
f.close()
| xulihang/Barcode-Reading-Performance-Test | utils/create_ground_truth_from_xml.py | create_ground_truth_from_xml.py | py | 1,176 | python | en | code | 11 | github-code | 1 | [
{
"api_name": "os.listdir",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree.parse",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "os.path.... |
21237918702 | import os
import queue
import json
from jsonschema import validate
import collections
class TestCase():
"""Attributes of a single test.
Args:
test_data (dict): data for a single test, parsed from JSON
test declaration.
"""
__test__ = False #: Ignored by Pytest
def __init__(self, test_data):
self.tag = test_data["tag"]
#: list of couples of decl: List of IO declarations.
self.io_decl = self._make_io_decl(test_data)
#: int: ticks to pass before next input application.
self.ticks_before_next_input = test_data["ticks_before_next_input"]
#: int: ticks to pass before checking outputs
self.ticks_before_checking = test_data["ticks_before_checking"]
def _make_io_decl(self, test_data):
"""Make io_decl dictionaries from data.
Args:
test_data (dict): data for a single test parsed from a JSON tests
declaration file.
Returns:
list: a list of test declarations.
"""
def compute_decl(decl):
res = {}
for signame, value in decl.items():
if isinstance(value, str):
value = int(value, 2)
res[signame] = value
return res
res = []
for r in test_data["rounds"]:
res += [(compute_decl(r["inputs"]), compute_decl(r["outputs"]))]
return res
def get_io_queues(self):
"""Generate input and expected io_decl queues.
Args:
testcase (TestDeclaration()): current test case.
Returns:
(queue.Queue(), queue.Queue()): input and expected declarations
queues.
"""
def _push_expected_values(q, is_first, expected_values):
if is_first:
for _ in range(self.ticks_before_checking):
q.put(None)
else:
for _ in range(self.ticks_before_next_input - 1):
q.put(None)
q.put(expected_values)
return q
def _push_input_values(q, in_values):
q.put(in_values)
for _ in range(self.ticks_before_next_input - 1):
q.put(None)
in_q = queue.Queue()
exp_q = queue.Queue()
is_first = True
for in_values, expected_values in self.io_decl:
_push_input_values(in_q, in_values)
_push_expected_values(exp_q, is_first, expected_values)
is_first = False
return in_q, exp_q
class TestsDeclaration():
"""Contains test declarations data.
Args:
filename (str): name of the test declaration JSON file.
"""
__test__ = False #: Ignored by Pytest
def __init__(self, filename):
#: dict: data parsed from JSON tests declaration.
self._data = self._parse_testfile(filename)
#: str: name of the testsuite.
self.name = self._data["name"]
#: list of TestAttributes(): list of each test's attributes.
self.testcases = self._make_testcases()
def _parse_testfile(self, filename):
"""Parses a JSON tests declaration from filename.
Args:
filename (str): name of the JSON file to parse.
Returns:
dict: data parsed from JSON file.
"""
res = None
with open(filename, 'r') as f:
res = json.loads(
f.read(), object_pairs_hook=collections.OrderedDict)
self._validate_testfile(res)
return res
def _validate_testfile(self, data):
"""Validate raw data against JSON Scheme.
Args:
data (dict): data parsed from JSON file.
"""
schema = None
dir = os.path.dirname(os.path.realpath(__file__))
with open(dir + "/testdecl_scheme.json", "r") as f:
schema = json.loads(f.read())
validate(data, schema)
def _make_testcases(self):
"""Make a list of TestAttributes() from tests declaration.
Returns:
list: a list of TestAttributes().
"""
res = []
for t in self._data["tests"]:
res += [TestCase(t)]
return res
| MaxenceCaronLasne/unitbench | unitbench/testdeclaration.py | testdeclaration.py | py | 4,307 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "queue.Queue",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "queue.Queue",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "collections.OrderedDict",
"li... |
1093253833 | import sys
import os
sys.path.insert(0, os.getcwd() + '/../keggimporter')
import logging
import logging.handlers
from Config import *
from Importer import *
config = Config()
config.loadConfiguration()
conf = config.getConfigurations()
logFile = conf.get( 'log', 'info' )
log = logging.getLogger('')
log.setLevel(logging.DEBUG)
format = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
ch = logging.StreamHandler(sys.stdout)
ch.setFormatter(format)
log.addHandler(ch)
fh = logging.handlers.RotatingFileHandler( logFile , maxBytes=0, backupCount=0)
fh.setFormatter(format)
log.addHandler(fh)
log.info('keggimporter: START')
imp = Importer()
imp.startImporter()
log.info('writeTaxonomies: START')
imp.writeTaxonomies()
log.info('writeTaxonomies: DONE')
log.info('writePathways: START')
imp.writePathways()
log.info('writePathways: DONE')
log.info('writeEcs: START')
imp.writeEcs()
log.info('writeEcs: DONE')
log.info('writeOrganisms: START')
imp.writeOrganisms()
log.info('writeOrganisms: DONE')
log.info('writeProteins: START')
imp.writeProteins()
log.info('writeProteins: DONE')
log.info('writeProteinRelations: START')
imp.writeProteinRelations()
log.info('writeProteinRelations: DONE')
log.info('writeOrganismTaxonomies: START')
imp.writeOrganismTaxonomies()
log.info('writeOrganismTaxonomies: DONE')
log.info('writeProteinAccessions: START')
imp.writeProteinAccessions()
log.info('writeProteinAccessions: DONE')
log.info('writeEcMaps: START')
imp.writeEcMaps()
log.info('writeEcMaps: DONE')
log.info('keggimporter: DONE')
| alexanderfranca/keggimporter | bin/execute-importer.py | execute-importer.py | py | 1,573 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.path.insert",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_nu... |
7946558670 | import warnings
import argparse
import mlflow
from mlflow.tracking.client import MlflowClient
warnings.filterwarnings("ignore", category=FutureWarning)
from custom_preproc_classes.config.core import config
def parse_bool(to_production: str):
if to_production == "T":
return True
elif to_production == "F":
return False
else:
raise ValueError("to_production must be T or F")
def register_mlflow_model(run_id: str, to_production: str, experiment_name: str):
"""
This function registers a model from an experiment specific run and sets the stage of the model to "Production".
Args:
- run_id: Specific id of the model's run
- to_production: If True, the stage of the newly registered model is set to 'Production'.
"""
# ---- Register model ---- #
model_uri = "runs:/" + run_id + "/model"
mlflow.set_tracking_uri("sqlite:///mlruns.db")
model_details = mlflow.register_model(model_uri=model_uri, name=experiment_name)
# print(*model_details, sep = "\n")
to_production = parse_bool(to_production)
# ---- Change stage of registered model to production ---- #
if to_production:
client = MlflowClient()
client.transition_model_version_stage(
name=model_details.name,
version=model_details.version,
stage="Production"
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-r", "--runid", required=True, type=str)
parser.add_argument("-to_p", "--to_production", required=True, type=str)
parser.add_argument("-exn", "--experiment_name", required=True, type=str)
args = parser.parse_args()
run_id = args.runid
to_production = args.to_production
experiment_name = args.experiment_name
print(run_id)
print(to_production)
register_mlflow_model(run_id=run_id, to_production=to_production, experiment_name=experiment_name)
| afanzott/Data_Science_Best_Practice | register_mlflow_model.py | register_mlflow_model.py | py | 1,955 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "warnings.filterwarnings",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "mlflow.set_tracking_uri",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "mlflow.register_model",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "... |
72551570275 | from enum import IntEnum, auto
from typing import Tuple, Sequence
from unsserv.common.utils import parse_node
from unsserv.common.structs import Node
from unsserv.common.rpc.structs import Message
from unsserv.common.rpc.protocol import AProtocol, ITranscoder, Command, Data, Handler
from unsserv.extreme.searching.structs import WalkResult, Walk
FIELD_COMMAND = "kwalker-command"
FIELD_WALK_ID = "kwalker-walk-id"
FIELD_ORIGIN_NODE = "kwalker-origin-node"
FIELD_TTL = "kwalker-ttl"
FIELD_DATA_ID = "kwalker-data-id"
FIELD_WALK_RESULT = "kwalker-walk-result"
class KWalkerCommand(IntEnum):
WALK = auto()
WALK_RESULT = auto()
class KWalkerTranscoder(ITranscoder):
def encode(self, command: Command, *data: Data) -> Message:
if command == KWalkerCommand.WALK:
walk: Walk = data[0]
message_data = {
FIELD_COMMAND: KWalkerCommand.WALK,
FIELD_WALK_ID: walk.id,
FIELD_DATA_ID: walk.data_id,
FIELD_ORIGIN_NODE: walk.origin_node,
FIELD_TTL: walk.ttl,
}
return Message(self.my_node, self.service_id, message_data)
elif command == KWalkerCommand.WALK_RESULT:
walk_result: WalkResult = data[0]
message_data = {
FIELD_COMMAND: KWalkerCommand.WALK_RESULT,
FIELD_WALK_ID: walk_result.walk_id,
FIELD_WALK_RESULT: walk_result.result,
}
return Message(self.my_node, self.service_id, message_data)
raise ValueError("Invalid Command")
def decode(self, message: Message) -> Tuple[Command, Sequence[Data]]:
command = message.data[FIELD_COMMAND]
if command == KWalkerCommand.WALK:
walk = Walk(
id=message.data[FIELD_WALK_ID],
data_id=message.data[FIELD_DATA_ID],
origin_node=parse_node(message.data[FIELD_ORIGIN_NODE]),
ttl=message.data[FIELD_TTL],
)
return KWalkerCommand.WALK, [walk]
elif command == KWalkerCommand.WALK_RESULT:
walk_result = WalkResult(
walk_id=message.data[FIELD_WALK_ID],
result=message.data[FIELD_WALK_RESULT],
)
return KWalkerCommand.WALK_RESULT, [walk_result]
raise ValueError("Invalid Command")
class KWalkerProtocol(AProtocol):
def _get_new_transcoder(self):
return KWalkerTranscoder(self.my_node, self.service_id)
async def walk(self, destination: Node, walk: Walk):
message = self._transcoder.encode(KWalkerCommand.WALK, walk)
return await self._rpc.call_send_message(destination, message)
async def walk_result(self, destination: Node, walk_result: WalkResult):
message = self._transcoder.encode(KWalkerCommand.WALK_RESULT, walk_result)
return await self._rpc.call_send_message(destination, message)
def set_handler_walk(self, handler: Handler):
self._handlers[KWalkerCommand.WALK] = handler
def set_handler_walk_result(self, handler: Handler):
self._handlers[KWalkerCommand.WALK_RESULT] = handler
| aratz-lasa/py-unsserv | unsserv/extreme/searching/protocol.py | protocol.py | py | 3,158 | python | en | code | 5 | github-code | 1 | [
{
"api_name": "enum.IntEnum",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "enum.auto",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "enum.auto",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "unsserv.common.rpc.protocol.ITransco... |
8620148120 | from django.core.management.base import BaseCommand
from home2_app.models import Client
class Command(BaseCommand):
help = "edit client name "
def add_arguments(self, parser):
parser.add_argument('name', type=str, help="Client_name")
parser.add_argument('new_name', type=str, help="New_Client_name")
def handle(self, *args, **kwargs):
name = kwargs.get('name')
new_name = kwargs.get('new_name')
client = Client.objects.filter(name=name).first()
if client:
client.name=new_name
client.save()
self.stdout.write(f'edit client{client}')
self.stdout.write(f' clients {name} in base note found') | vit21513/django_homework | home_project/home2_app/management/commands/edit_client_name.py | edit_client_name.py | py | 698 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.core.management.base.BaseCommand",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "home2_app.models.Client.objects.filter",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "home2_app.models.Client.objects",
"line_number": 15,
"usage... |
15345500601 | #!/usr/bin/env python
# coding: utf-8
# In[1]:
print("Hello World!")
# In[2]:
import librosa
import numpy as np
import matplotlib.pyplot as plt
import IPython.display as ipd
import librosa.display
from IPython.display import Audio
from scipy import stats
# In[3]:
y, sr = librosa.load("yours.mp3")
# In[4]:
# loading the audio file
# sphinx_gallery_thumbnail_path = '_static/playback-thumbnail.png'
# We'll need IPython.display's Audio widget
# In[5]:
Audio(data=y, rate=sr)
# In[6]:
print(y)
# In[7]:
# estimating the tempo of the song
onset_env = librosa.onset.onset_strength(y=y, sr=sr)
tempo = librosa.beat.tempo(onset_envelope=onset_env, sr=sr)
tempo
# In[8]:
# class that uses the librosa library to analyze the key that an mp3 is in
# arguments:
# waveform: an mp3 file loaded by librosa, ideally separated out from any percussive sources
# sr: sampling rate of the mp3, which can be obtained when the file is read with librosa
# tstart and tend: the range in seconds of the file to be analyzed; default to the beginning and end of file if not specified
class Tonal_Fragment(object):
def __init__(self, waveform, sr, tstart=None, tend=None):
self.waveform = waveform
self.sr = sr
self.tstart = tstart
self.tend = tend
if self.tstart is not None:
self.tstart = librosa.time_to_samples(self.tstart, sr=self.sr)
if self.tend is not None:
self.tend = librosa.time_to_samples(self.tend, sr=self.sr)
self.y_segment = self.waveform[self.tstart:self.tend]
self.chromograph = librosa.feature.chroma_cqt(y=self.y_segment, sr=self.sr, bins_per_octave=24)
# chroma_vals is the amount of each pitch class present in this time interval
self.chroma_vals = []
for i in range(12):
self.chroma_vals.append(np.sum(self.chromograph[i]))
pitches = ['C','C#','D','D#','E','F','F#','G','G#','A','A#','B']
# dictionary relating pitch names to the associated intensity in the song
self.keyfreqs = {pitches[i]: self.chroma_vals[i] for i in range(12)}
keys = [pitches[i] + ' major' for i in range(12)] + [pitches[i] + ' minor' for i in range(12)]
# use of the Krumhansl-Schmuckler key-finding algorithm, which compares the chroma
# data above to typical profiles of major and minor keys:
maj_profile = [6.35, 2.23, 3.48, 2.33, 4.38, 4.09, 2.52, 5.19, 2.39, 3.66, 2.29, 2.88]
min_profile = [6.33, 2.68, 3.52, 5.38, 2.60, 3.53, 2.54, 4.75, 3.98, 2.69, 3.34, 3.17]
# finds correlations between the amount of each pitch class in the time interval and the above profiles,
# starting on each of the 12 pitches. then creates dict of the musical keys (major/minor) to the correlation
self.min_key_corrs = []
self.maj_key_corrs = []
for i in range(12):
key_test = [self.keyfreqs.get(pitches[(i + m)%12]) for m in range(12)]
# correlation coefficients (strengths of correlation for each key)
self.maj_key_corrs.append(round(np.corrcoef(maj_profile, key_test)[1,0], 3))
self.min_key_corrs.append(round(np.corrcoef(min_profile, key_test)[1,0], 3))
# names of all major and minor keys
self.key_dict = {**{keys[i]: self.maj_key_corrs[i] for i in range(12)},
**{keys[i+12]: self.min_key_corrs[i] for i in range(12)}}
# this attribute represents the key determined by the algorithm
self.key = max(self.key_dict, key=self.key_dict.get)
self.bestcorr = max(self.key_dict.values())
# this attribute represents the second-best key determined by the algorithm,
# if the correlation is close to that of the actual key determined
self.altkey = None
self.altbestcorr = None
for key, corr in self.key_dict.items():
if corr > self.bestcorr*0.9 and corr != self.bestcorr:
self.altkey = key
self.altbestcorr = corr
# prints the relative prominence of each pitch class
def print_chroma(self):
self.chroma_max = max(self.chroma_vals)
for key, chrom in self.keyfreqs.items():
print(key, '\t', f'{chrom/self.chroma_max:5.3f}')
# prints the correlation coefficients associated with each major/minor key
def corr_table(self):
for key, corr in self.key_dict.items():
print(key, '\t', f'{corr:6.3f}')
def correlation_coefficients(self):
return self.key_dict.items()
# printout of the key determined by the algorithm; if another key is close, that key is mentioned
def print_key(self):
print("likely key: ", max(self.key_dict, key=self.key_dict.get), ", correlation: ", self.bestcorr, sep='')
if self.altkey is not None:
print("also possible: ", self.altkey, ", correlation: ", self.altbestcorr, sep='')
# prints a chromagram of the file, showing the intensity of each pitch class over time
def chromagram(self, title=None):
C = librosa.feature.chroma_cqt(y=self.waveform, sr=sr, bins_per_octave=24)
plt.figure(figsize=(12,4))
librosa.display.specshow(C, sr=sr, x_axis='time', y_axis='chroma', vmin=0, vmax=1)
if title is None:
plt.title('Chromagram')
else:
plt.title(title)
plt.colorbar()
plt.tight_layout()
plt.show()
# In[9]:
# this audio takes a long time to load because it has a very high sampling rate; be patient.
# the load function generates a tuple consisting of an audio object y and its sampling rate sr
# this function filters out the harmonic part of the sound file from the percussive part, allowing for
# more accurate harmonic analysis
y_harmonic, y_percussive = librosa.effects.hpss(y)
# In[10]:
# this block instantiates the Tonal_Fragment class with the first 22 seconds of the above harmonic part of une barque.
# the three methods called will print the determined key of the song, the correlation coefficients for all keys,
# and a chromogram, which shows the intensity of frequencies associated with each of the 12 pitch classes over time.
unebarque_fsharp_min = Tonal_Fragment(y_harmonic, sr, tend=22)
unebarque_fsharp_min.print_chroma()
# In[11]:
unebarque_fsharp_min.print_key()
unebarque_fsharp_min.corr_table()
unebarque_fsharp_min.chromagram("Une Barque sur l\'Ocean")
# In[12]:
# parts of the song that are more tonally ambiguous will show two keys with print_key(),
# if they are similarly well-correlated.
# this section of une barque is in E minor, though the algorithm suggests that it is in D major, a closely related key,
# though E minor is also listed since their correlation coefficients are very close.
unebarque_e_min = Tonal_Fragment(y_harmonic, sr, tstart=0)
unebarque_e_min.print_key()
unebarque_e_min.corr_table()
# In[13]:
# in the case of une barque sur l'ocean (and other songs), predictions become less reliable over short time frames
# the below block prints the predicted key of every 3-second-long cut of the piece.
bin_size = 3
for i in range(24):
fragment = Tonal_Fragment(y_harmonic, sr, tstart = bin_size*i, tend=bin_size*(i+1))
print(bin_size*i,"sec:",fragment.key)
if fragment.altkey is not None:
print("\t or:", fragment.altkey)
# In[14]:
# getting volume graph
rms = librosa.feature.rms(y=y)
plt.plot(rms[0])
plt.show()
# In[15]:
print(rms)
print(np.mean(rms[0]))
print(np.max(rms[0]))
# In[16]:
import librosa
import numpy as np
import matplotlib.pyplot as plt
import IPython.display as ipd
import librosa.display
from IPython.display import Audio
from scipy import stats
# Python program to explain os.listdir() method
# importing os module
import os
# Get the path of current working directory
path = os.getcwd()
path += "//Good Songs"
# Get the list of all files and directories
# in current working directory
songs_list = os.listdir(path)
#move the non-song item to end of list so that we can avoid using it for the data
songs_list.sort(key = 'librosapractice.ipynb'.__eq__)
# print the list
print(songs_list)
for song in songs_list:
if song[-3:] == 'mp3':
print(song)
# load audio
y, sr = librosa.load(path+"//"+song)
Audio(data=y, rate=sr)
print(y)
# tempo
onset_env = librosa.onset.onset_strength(y=y, sr=sr)
tempo = librosa.beat.tempo(onset_envelope=onset_env, sr=sr)
print(tempo)
# key
onset_env = librosa.onset.onset_strength(y=y, sr=sr)
tempo = librosa.beat.tempo(onset_envelope=onset_env, sr=sr)
tempo
y_harmonic, y_percussive = librosa.effects.hpss(y)
song_tonal_analysis = Tonal_Fragment(y_harmonic, sr, tend=22)
song_tonal_analysis.print_chroma()
song_tonal_analysis.print_key()
song_tonal_analysis.corr_table()
song_tonal_analysis.chromagram("song")
unebarque_e_min = Tonal_Fragment(y_harmonic, sr, tstart=0)
unebarque_e_min.print_key()
unebarque_e_min.corr_table()
# In[17]:
# for extracting likely key/s for each song + spectogram
for song in songs_list:
if song[-3:] == 'mp3':
print(song)
y, sr = librosa.load(song)
Audio(data=y, rate=sr)
print(y)
onset_env = librosa.onset.onset_strength(y=y, sr=sr)
tempo = librosa.beat.tempo(onset_envelope=onset_env, sr=sr)
tempo
y_harmonic, y_percussive = librosa.effects.hpss(y)
song_tonal_analysis = Tonal_Fragment(y_harmonic, sr, tend=22)
song_tonal_analysis.print_chroma()
song_tonal_analysis.print_key()
song_tonal_analysis.corr_table()
song_tonal_analysis.chromagram("song")
unebarque_e_min = Tonal_Fragment(y_harmonic, sr, tstart=0)
unebarque_e_min.print_key()
unebarque_e_min.corr_table()
# In[18]:
# getting volume graph for each song in the list
def extract_volume(song):
y, sr = librosa.load(song)
Audio(data=y, rate=sr)
rms = librosa.feature.rms(y=y)
# plt.plot(rms[0])
# plt.show()
# print(rms)
# print("volume mean: " + str(np.mean(rms[0])))
# print("volume max: " + str(np.max(rms[0])))
return np.mean(rms[0]), np.max(rms[0])
# In[19]:
# tempo function
def extract_tempo(song):
y, sr = librosa.load(song)
Audio(data=y, rate=sr)
onset_env = librosa.onset.onset_strength(y=y, sr=sr)
tempo = librosa.beat.tempo(onset_envelope=onset_env, sr=sr)
# print("tempo: " + str(tempo))
return tempo
# In[20]:
def extract_key(song):
y, sr = librosa.load(song)
Audio(data=y, rate=sr)
# print(y)
onset_env = librosa.onset.onset_strength(y=y, sr=sr)
tempo = librosa.beat.tempo(onset_envelope=onset_env, sr=sr)
y_harmonic, y_percussive = librosa.effects.hpss(y)
"""
song_tonal_analysis = Tonal_Fragment(y_harmonic, sr, tend=22)
song_tonal_analysis.print_chroma()
song_tonal_analysis.print_key()
song_tonal_analysis.corr_table()
song_tonal_analysis.chromagram("song")
"""
keyanalysis = Tonal_Fragment(y_harmonic, sr, tstart=0)
# keyanalysis.print_key()
# unebarque_e_min.corr_table()
return keyanalysis.correlation_coefficients()
# In[21]:
list_of_emotions = []
list_of_all_songs = []
for song in songs_list:
if song[-3:] == 'mp3':
#print(song)
tempo = extract_tempo(song)
volume = extract_volume(song)
key = extract_key(song)
key_probabilities = [x[1] for x in list(key)]
combined_list = [tempo[0]] + [volume[0], volume[1]] + key_probabilities
#print(combined_list)
list_of_all_songs += [combined_list]
else:
print(song)
# I think I need to add the below code here???
print(list_of_all_songs)
# In[22]:
graph_colors = ['red', 'cyan', 'hotpink', 'lightslategrey']
from sklearn.decomposition import PCA
import csv
emotions_list = []
# opening the CSV file
#with open('final_music_dataset - Sheet1 (1).csv', mode ='r')as file:
with open('final_music_dataset - Sheet1 (1).csv', mode ='r')as file:
# reading the CSV file
csvFile = csv.reader(file)
# displaying the contents of the CSV file
n = 0
for line in csvFile:
# print(lines)
if n >= 1:
if line[3] == 'calm/neutral':
emotions_list += [graph_colors[1]]
elif line[3] == 'happy/joyous':
emotions_list += [graph_colors[2]]
elif line[3] == 'sad/melancholy':
emotions_list += [graph_colors[3]]
elif line[3] == 'angry/restlessness':
emotions_list += [graph_colors[0]]
else:
print(line[3])
n += 1
# print(line[3] == "calm/neutral")
pca = PCA(n_components=2)
list_of_songs_numpy = np.array(list_of_all_songs)
print(list_of_songs_numpy)
two_d_output = pca.fit_transform(list_of_songs_numpy)
print(two_d_output)
print(emotions_list)
# In[23]:
import matplotlib.pyplot as plt
plt.scatter([v[0] for v in two_d_output], [v[1] for v in two_d_output], c = emotions_list)
plt.show()
# In[24]:
#import csv
# opening the CSV file
#with open('Music Dataset - Sheet1.csv', mode ='r')as file:
# reading the CSV file
# csvFile = csv.reader(file)
# displaying the contents of the CSV file
#for lines in csvFile:
# print(lines)
#print(lines[3])
# In[ ]:
# In[ ]:
| kirtisubs06/AI-Music-Research-Code | MusicAIJupyterNotebook.py | MusicAIJupyterNotebook.py | py | 13,649 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "librosa.load",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "IPython.display.Audio",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "librosa.onset.onset_strength",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "libro... |
75257613153 | # goal
# 실패율이 높은 스테이지부터 내림차순으로 스테이지의 번호가 담겨있는 배열을 return 하도록 solution 함수
# description
# 실패율 - 스테이지에 도달했으나 아직 클리어하지 못한 플레이어의 수 / 스테이지에 도달한 플레이어 수
# 전체 스테이지의 개수 N, 게임을 이용하는 사용자가 현재 멈춰있는 스테이지의 번호가 담긴 배열 stages가 매개변수
# condition
# 스테이지의 개수 N은 1 이상 500 이하의 자연수이다.
# stages의 길이는 1 이상 200,000 이하이다.
# stages에는 1 이상 N + 1 이하의 자연수가 담겨있다.
# 각 자연수는 사용자가 현재 도전 중인 스테이지의 번호를 나타낸다.
# 단, N + 1 은 마지막 스테이지(N 번째 스테이지) 까지 클리어 한 사용자를 나타낸다.
# 만약 실패율이 같은 스테이지가 있다면 작은 번호의 스테이지가 먼저 오도록 하면 된다.
# 스테이지에 도달한 유저가 없는 경우 해당 스테이지의 실패율은 0 으로 정의한다.
import collections
def solution(N, stages):
total = len(stages)
dic = {}
# counter 함수 통해서 각 숫자별 개수 구하기
stages_counter = collections.Counter(stages)
# 1부터 N+1까지 돌려서 dic에 key(숫자) value(실패율)로 넣기 total에서 이전 숫자만큼 분모 빼면서 넣기
# 끝까지 통과한 애들은 필요 없어서 N+1.
for i in range(1, N+1):
dic[i] = 0 if stages_counter[i] == 0 else stages_counter[i]/total
total -= stages_counter[i]
return [k for k, v in sorted(dic.items(), reverse=True, key=lambda x: x[1])]
| jum0/ProblemSolvingPython | Programmers/42889.py | 42889.py | py | 1,713 | python | ko | code | 0 | github-code | 1 | [
{
"api_name": "collections.Counter",
"line_number": 23,
"usage_type": "call"
}
] |
74633942113 | """
Django command to wait for DB to be available
"""
import time
# Shows error but the psycopg2 is successfully installed on docker
from psycopg2 import OperationalError as Psycopg2Error
from django.db.utils import OperationalError
from django.core.management.base import BaseCommand
class Command(BaseCommand):
"""
Django command to wait for db
"""
def handle(self, *args, **options):
self.stdout.write('Waiting for the Database to start....')
db_up = False
while db_up is False:
try:
self.check(databases=['default'])
db_up = True
except (Psycopg2Error, OperationalError):
self.stdout.write("DB is unavailable right now. \
Waiting for 1 sec to restart the database")
time.sleep(1)
self.stdout.write(self.style.SUCCESS('DB is UP '))
| Uchiha-Itachi0/django-recipe-api | app/core/management/commands/wait_for_db.py | wait_for_db.py | py | 898 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.core.management.base.BaseCommand",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "psycopg2.OperationalError",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "django.db.utils.OperationalError",
"line_number": 29,
"usage_type": "na... |
11420247228 | """
A script that compares image histograms quantitively. The user must specify either a single image or a directory (jpg/png).
"""
# system tools
import os
import argparse
import sys
# image and data tools
import cv2
import numpy as np
import glob
import pandas as pd
# plotting tools
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
# function that specifies the required arguments
def parse_args():
# Initialise argparse
ap = argparse.ArgumentParser()
# command line parameters
ap.add_argument("-f", "--file_input", required = True, help = "The filename or directory we want to work with")
args = vars(ap.parse_args())
return args
# function to clean a file path name
def clean_name(name):
split_filename = name.split("/")[-1]
file_only = split_filename.split(".")[0]
return file_only
# function to create the histogram
def create_histogram(image):
# creating histograms for image
hist = cv2.calcHist([image], [0,1,2], None, [8,8,8], [0,256, 0,256, 0,256])
hist = cv2.normalize(hist, hist, 0,255, cv2.NORM_MINMAX) #normalizing with MINMAX
return hist
# function to calculate similarity scores
def calc_sim_scores(filename):
# parse argument
args = parse_args()
input_name = args['file_input']
# if the path provided is a file
isFile = os.path.isfile(input_name)
if isFile == True:
input_name = input_name.split("/")[:-1]
input_name = '/'.join(input_name) # remove the filename to get the directory
# get all full path names
file_list = glob.glob(os.path.join(input_name, '*g'))
file_list = sorted(file_list)
target_image = mpimg.imread(filename) # loading target image
target_hist = create_histogram(target_image)
# initiating lists for results
sim_value = []
img_name = []
for img in file_list:
if img != filename: # excluding the target image
image = mpimg.imread(img)
hist = create_histogram(image)
score = round(cv2.compareHist(target_hist, hist, cv2.HISTCMP_CHISQR), 2) # comparing each histogram to the target image
img_name.append(img) # appending results to the lists
sim_value.append(score)
# zipping the lists together, sorting it, and selecting the top 3 results.
scores = sorted(zip(sim_value, img_name))
scores = scores[0:3]
return scores
# function to create the plot and dataframe of the results
def hist_result(file, scores):
target_image = mpimg.imread(file) # loading target image
file_name = clean_name(file)
# initiating a list for the output dataframe
output_data = []
output_data.append(file_name)
# initiating plot
plt.subplot(1, 4, 1) #the figure has 1 row, 4 columns, and this is the first plot
plt.imshow(target_image, extent=[-4, 4, -1, 1], aspect=4) # aligning the size of the images
plt.axis('off')
plt.title('Main image')
plot_number = 1 # creating counter for the subplots
for value, image in scores:
value = round(value) # rounding the similarity score for a cleaner output
image_name = clean_name(image)
output_data.append(image_name) # saving the results of each image in the output list
output_data.append(value)
# plotting
plot_number += 1
image_sub = mpimg.imread(image)
# creating the subplots
plt.subplot(1, 4, plot_number)
plt.imshow(image_sub, extent=[-4, 4, -1, 1], aspect=4)
plt.axis('off')
plt.title(f"Score: {value}")
# Saving the plot
plt.suptitle("Most similar images")
plt.savefig(f'output/hist_{file_name}.jpg')
# creating the dataframe and transposing it to get columns
df = pd.DataFrame(output_data, index = ["main_image", "1_image", "1_image_score", "2_image", "2_image_score", "3_image", "3_image_score"])
df = df.transpose()
outpath = os.path.join('output', f'hist_{file_name}.csv')
df.to_csv(outpath, index = False)
return
# function to create a dataframe of the similarity scores for multiple input files
def hist_all_results(input_name):
# get all full path names
file_list = glob.glob(os.path.join(input_name, '*g'))
file_list = sorted(file_list)
# initiate list for dataframe
out_list = []
for file in file_list:
scores = calc_sim_scores(file)
file_name = clean_name(file)
output_data = [] # initiating output data for each main image
output_data.append(file_name)
# cleaning the values and names, and appending them to the output data
for value, image in scores:
value = round(value)
image_name = clean_name(image)
output_data.append(image_name)
output_data.append(value)
out_list.append(output_data) # appending the output data to the out_list for the dataframe
# creating the dataframe
df = pd.DataFrame(out_list, columns = ["main_image", "1_image", "1_image_score", "2_image", "2_image_score", "3_image", "3_image_score"])
outpath = os.path.join('output', 'hist_all_files.csv')
df.to_csv(outpath)
return
def main():
# parse arguments
args = parse_args()
input_name = args['file_input']
# if path provided is a file:
isFile = os.path.isfile(input_name)
if isFile == True:
scores = calc_sim_scores(input_name)
hist_result(input_name, scores)
print('Input is a file. Script success.')
# if path provided is a directory:
isDirectory = os.path.isdir(input_name)
if isDirectory == True:
hist_all_results(input_name)
print('Input is a directory. Script success.')
return
if __name__ == '__main__':
main() | sarah-hvid/Vis_assignment1 | src/hist_comparison.py | hist_comparison.py | py | 5,910 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "cv2.calcHist",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "cv2.normalize",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "cv2.NORM_MINMAX",
... |
17381459463 | import pyautogui as pag
from collections import namedtuple,Counter
import random
"""
https://asyncfor.com/posts/doc-pyautogui.html
screenWidth, screenHeight = pyautogui.size()
currentMouseX, currentMouseY = pyautogui.position()
pyautogui.moveTo(100, 150)
pyautogui.click()
# 鼠标向下移动10像素
pyautogui.moveRel(None, 10)
pyautogui.doubleClick()
# 用缓动/渐变函数让鼠标2秒后移动到(500,500)位置
# use tweening/easing function to move mouse over 2 seconds.
pyautogui.moveTo(1800, 500, duration=2, tween=pyautogui.easeInOutQuad)
# 在每次输入之间暂停0.25秒
pyautogui.typewrite('Hello world!', interval=0.25)
pyautogui.press('esc')
pyautogui.keyDown('shift')
pyautogui.press(['left', 'left', 'left', 'left', 'left', 'left'])
pyautogui.keyUp('shift')
pyautogui.hotkey('ctrl', 'c')
"""
#("mouse","click",(10,100)) ,("mouse","move",(10,100)),("key","press","a")
#status 默认为 None
CMD = namedtuple("CMD",["device","action","code","status"],defaults=[None])
#status_device_action_map
sda_map = {"bc":["mouse","click"],
"kd":["keyboard","keyDown"],
"None":["mouse","move"]}
#status:target name
pag.FAILSAFE = False
PC_bind = {
"mouse":{"move":pag.moveTo,"click":pag.click},
"keyboard":{}
}
Andriod_bind = {} | dkluffy/Gamescripts | liverbot/devicebind.py | devicebind.py | py | 1,283 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "collections.namedtuple",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "pyautogui.FAILSAFE",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "pyautogui.moveTo",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "... |
42472504231 | import json
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views import View
from django.views.generic import FormView
from django.views.generic import TemplateView
from django.contrib import messages
from .webapp.forms import ContactForm
from .webapp.models import Contact
from .user_login.models import UserPhoto
from .user_login.forms import ProfilePictureForm
from .user_login.models import UserProfile
from .user_login.forms import UserProfileForm
from .user_login.forms import UserPhotoForm
from django.contrib.auth import authenticate, login
from django.contrib.auth import logout
from django.conf import settings
from django.contrib.auth.models import User
from django.shortcuts import get_object_or_404
from django.shortcuts import render, redirect, HttpResponse
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.urls import reverse
from django.core.mail import send_mail
import os
from django.shortcuts import render
from django.core.mail import send_mail
from .forms import ContactForm
from .webapp.models import Contact # Make sure you import the Contact model
# import logging
# logger = logging.getLogger(__name__)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# views.py
class Custom500View(TemplateView):
template_name = 'pioner_gallery/errors/500.html'
def get(self, request, *args, **kwargs):
response = super().get(request, *args, **kwargs)
response.status_code = 500
return response
class Custom404View(TemplateView):
template_name = 'pioner_gallery/errors/404.html'
def get(self, request, *args, **kwargs):
response = super().get(request, *args, **kwargs)
response.status_code = 404
return response
class UserGalleryView(View):
def get(self, request, username):
viewed_user = get_object_or_404(User, username=username)
user_photos = UserPhoto.objects.filter(user=viewed_user)
context = {
'user_photos': user_photos,
'viewed_user': viewed_user,
}
return render(request, 'pioner_gallery/user_gallery.html', context)
class EditGalleryView(UserGalleryView):
def get(self, request, username):
# Reuse the logic from the parent class to fetch photos
user = get_object_or_404(User, username=username)
user_photos = UserPhoto.objects.filter(user=request.user)
context = {
'user_photos': user_photos,
'user': user,
'edit_mode': True # Set the edit_mode flag here
}
return render(request, 'pioner_gallery/user_gallery.html', context)
def post(self, request, username):
# Get the ID of the photo to delete from the POST data
photo_id_to_delete = request.POST.get('photo_id')
# Fetch the photo that belongs to the user and has the specified ID
try:
photo_to_delete = UserPhoto.objects.get(user=request.user, id=photo_id_to_delete)
photo_to_delete.delete()
messages.success(request, "Photo was deleted successfully!")
except UserPhoto.DoesNotExist:
messages.error(request, "Photo not found.")
# Redirect back to the edit gallery page
return redirect(reverse('edit_gallery', args=[request.user.username]))
class UploadPhotoView(LoginRequiredMixin, FormView):
template_name = 'pioner_gallery/upload_photo.html'
form_class = UserPhotoForm
def form_valid(self, form):
photo = form.save(commit=False)
photo.user = self.request.user
photo.save()
return redirect(reverse('user_gallery', args=[self.request.user.username]))
class EditUploadedPhoto(UploadPhotoView):
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
photo_id = self.kwargs.get('photo_id')
photo = UserPhoto.objects.get(id=photo_id)
kwargs['instance'] = photo
return kwargs
def form_valid(self, form):
# The form will handle updating the instance
form.save()
return redirect(reverse('user_gallery', args=[self.request.user.username]))
class UploadProfilePictureView(LoginRequiredMixin, FormView):
template_name = 'pioner_gallery/edit_user_profile.html'
form_class = ProfilePictureForm
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['instance'] = UserProfile.objects.get(user=self.request.user)
return kwargs
def form_valid(self, form):
form.save()
return redirect('user_profile')
class EditUserProfileDetailsView(LoginRequiredMixin, FormView):
template_name = 'pioner_gallery/edit_user_profile.html'
form_class = UserProfileForm
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['instance'] = UserProfile.objects.get(user=self.request.user)
return kwargs
def form_valid(self, form):
form.save()
return redirect('user_profile')
class UserLogoutView(View):
def get(self, request):
logout(request)
return redirect('cover_page')
def newsfeed(request):
photo_directory = os.path.join(settings.BASE_DIR, "staticfiles", "images", "gallery", "watermarked")
photos = os.listdir(photo_directory)
photos_list = sorted(photos, reverse=True) # Sort the photos list in descending order
print(photos_list)
context = {
'photo_urls': photos_list
}
return render(request, 'pioner_gallery/newsfeed.html', context)
def gallery(request):
photo_directory = os.path.join(settings.BASE_DIR, "staticfiles", "images", "gallery", "watermarked")
photos = os.listdir(photo_directory)
photos_list = []
for photo in photos:
photos_list.append(photo)
context = {
'photo_urls': photos_list
}
return render(request, 'pioner_gallery/gallery.html', context)
def page_list(request):
return render(request, 'pioner_gallery/page/list.html')
def blog(request):
return render(request, 'pioner_gallery/blog.html')
# from pioner_gallery.webapp.models import Article, Category
from .webapp.models import Article
def home_page(request):
articles = Article.objects.all().order_by('-created_at')[:6] # Fetch the latest 6 articles.
context = {
'articles': articles,
}
# print(article.author)
print(articles)
return render(request, "pioner_gallery/home.html", context)
@login_required
def marketplace(request):
# return render(request, "pioner_gallery/marketplace_dashboard.html")
return render(request, "pioner_gallery/marketplace_dashboard.html")
def cover_page(request):
return render(request, 'pioner_gallery/cover.html')
def contact_page(request):
if request.method == 'POST':
form = ContactForm(request.POST)
if form.is_valid():
# Create a new Contact instance
contact = Contact()
# Assign the form data to the Contact fields
contact.first_name = form.cleaned_data['first_name']
contact.last_name = form.cleaned_data['last_name']
contact.email = form.cleaned_data['email']
contact.message_subject = form.cleaned_data['message_subject']
contact.message_body = form.cleaned_data['message_body']
# Save the contact to the database
contact.save()
# Section for E-mail sending:
subject = 'New Contact Submission from ' + form.cleaned_data['first_name']
message = 'You have received a new contact form submission from ' + form.cleaned_data['first_name'] + ' ' + \
form.cleaned_data['last_name'] + '. Email: ' + form.cleaned_data[
'email'] + '. Message Subject: ' + \
form.cleaned_data['message_subject'] + '. Message Body: ' + form.cleaned_data['message_body']
send_mail(subject, message, 'iv.svetlin@outlook.com', ['isvetllin@gmail.com'])
# End of E-mail sending section.
# Redirect or show a success message
return render(request, 'pioner_gallery/contact_success.html')
else:
form = ContactForm()
return render(request, 'pioner_gallery/contact.html', {'form': form})
def user_login(request):
context = {
'page_title': 'User Login Page',
}
if request.method == 'POST':
email = request.POST.get('floatingInput')
password = request.POST.get('floatingPassword')
try:
# Get the user by email
user = User.objects.get(email=email)
except User.DoesNotExist:
user = None
if user is not None and user.check_password(password):
login(request, user)
print('Logged in now')
context['login_success'] = 'Successfully logged in!'
return render(request, 'registration/register.html', context) # Redirect as needed
else:
context['login_error'] = 'Invalid credentials. Please try again.'
return render(request, 'pioner_gallery/user_login.html', context)
def register_view(request):
if request.method == 'POST':
first_name = request.POST.get('first_name')
last_name = request.POST.get('last_name')
email = request.POST.get('email')
username = request.POST.get('username')
password = request.POST.get('password')
subscribe = request.POST.get('subscribe', False)
# Check if the email is unique before creating a new user
if not User.objects.filter(email=email).exists():
# Create a new user in the database
user = User.objects.create_user(username=username, email=email, password=password, first_name=first_name,
last_name=last_name)
print(f"Created user: {user}")
# Create a UserProfile instance for the user and link it to the user
user_profile = UserProfile.objects.create(user=user, email=email, username=username)
messages.success(request, f'User profile created: {username}')
print(f"Created UserProfile: {user_profile}")
# Authenticate the user (log them in immediately after registration)
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
# Redirect to a success page or desired URL after successful registration
# messages.success(request, 'Registration successful! You are now logged in.')
return redirect('user_login')
else:
# Handle registration failure (e.g., username already taken)
messages.error(request, 'Registration failed. Please try again.')
print("Authentication failed.")
else:
# Handle registration failure (e.g., email already taken)
messages.warning(request, 'Email already exists. Please choose another one.')
print("Email already exists.")
return render(request, 'registration/index.html')
@login_required
def user_profile(request):
user_profile = get_object_or_404(UserProfile, user=request.user)
print(user_profile)
if request.method == 'POST':
form = ProfilePictureForm(request.POST, request.FILES, instance=user_profile)
if form.is_valid():
form.save()
# Create a new form for the profile picture upload
form = ProfilePictureForm(instance=user_profile)
return render(request, 'pioner_gallery/profile.html', {
'user_profile': user_profile,
'form': form,
})
@login_required
def delete_user_profile(request):
if request.method == "POST":
try:
profile = UserProfile.objects.get(user=request.user)
user = profile.user
profile.delete() # This will delete the profile
user.delete() # This will delete the user
messages.success(request, "Your profile has been deleted!")
except UserProfile.DoesNotExist:
messages.error(request, "User profile not found.")
except Exception as e:
messages.error(request, f"An error occurred: {str(e)}")
return redirect('user_login')
else:
# Handle the GET request
return render(request, 'pioner_gallery/delete_confirmation.html')
@login_required
def delete_confirmation(request):
return render(request, 'pioner_gallery/delete_confirmation.html')
def privacy(request):
return render(request, 'pioner_gallery/privacy.html')
def terms(request):
return render(request, 'pioner_gallery/terms.html')
@require_POST
def cookie_consent(request):
data = json.loads(request.body)
consent = data.get('consent')
# Check if user is authenticated
if request.user.is_authenticated:
# Save decision to the database
profile = request.user.userprofile
profile.cookie_consent = consent
profile.save()
else:
# Save to session for anonymous users
request.session['cookie_consent'] = consent
return JsonResponse({"status": "success"})
| ivn-svn/pionergallery | pionergallery/pioner_gallery/views.py | views.py | py | 13,406 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.path.dirname",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "django.views.generic.Tem... |
34666592380 | """
Useful functions for processing SMBL Data
"""
import requests
import sys
import re
import libsbml
import xmltodict
import json
def delete_doubles(arr):
"""
:param arr: list()
:return: Given list, without duplicated entries
"""
arr2 = []
for element in arr:
if not arr2.__contains__(element):
arr2.append(element)
return arr2
def compare_formulas(formulas, charges=[]):
"""
compares formulas
:param charges: charges of molecules as list of integers
:param charge_hydrogen_balance_accepted: boolean, that indecates, wether differences in the number of H atoms are
accepted, if the difference is accounted for in the charge
:param formulas: list of formulas
:return: True, if all formulas have the same components with the same amount
<formula> must be string: Upper case character marks new Element (Mg12Ag2 = [Mg12, Ag2] & MG12AG2 = [M,G12,A,G2])
"""
# Separate Components of Formula
formulas_split = []
for formula in formulas:
formula_split = []
# separates the atoms
for char in formula:
if char.isupper():
formula_split.append(char)
else:
formula_split[len(formula_split) - 1] = formula_split[len(formula_split) - 1] + char
# adds "1" to formula, if no number is given at the end
for i in range(len(formula_split)):
if re.search("[0-9]", formula_split[i]) is None:
formula_split[i] = formula_split[i] + "1"
# adds separated formulas to a list
formulas_split.append(formula_split)
# Iterates through all formulas
for j in range(len(formulas_split) - 1):
for component in formulas_split[j]:
# accounts for hydrogen - charge relationship
if charges and not re.search("^H(?![a-z])+([0-9])*", component) is None:
component = int(component.split("H")[1])
component = component + (charges[j + 1] - charges[j])
component = "H" + str(component)
# Check next element for current element
if component not in formulas_split[j + 1]:
return False
# Check whether all components were in formula
if len(formulas_split[j]) != len(formulas_split[j + 1]):
return False
return True
# ++ Get Metabolite Data from BiGG Database ++
def bigg_request(_id: str, search_type: str = "metabolites"):
"""
Requests an entry from the BIGG Database
:param _id: str e.g. "nh3"
:param search_type: str e.g. "metabolites"
:return: decoded .json into dictionary
"""
custom_request = "http://bigg.ucsd.edu/api/v2/universal/" + search_type + "/" + _id
req = requests.get(custom_request, headers={"Content-Type": "application/json"})
if not req.ok:
req.raise_for_status()
sys.exit()
decoded_req = req.json()
return decoded_req
# ++ Get Metabolite Data from Biocyc Database ++
def biocyc_request(id_org: str, db: str, id_db: str):
"""
Requests an entry from the BioCyc DB
:param db: Database e.g. BIGG, SEED,..
:param id_db: ID from Database e.g. atp, cpd0001
:param id_org: ID of organism e.g. GCF_000010185
:return: decoded .json into dictionary
"""
custom_request = f"https://websvc.biocyc.org/{id_org}/foreignid?ids={db}:{id_db}&fmt=json"
req = requests.get(custom_request, headers={"Content-Type": "application/json"})
if not req.ok:
req.raise_for_status()
sys.exit()
try:
decoded_req = req.json()
except json.decoder.JSONDecodeError:
assert id_org != "meta"
decoded_req = biocyc_request("meta", db, id_db)
return decoded_req
# KEGG Request Function
def kegg_get(org_id: str, kegg_id: str):
request_url = f"http://rest.kegg.jp/get/{org_id}:{kegg_id}"
req = requests.get(request_url).text.split("\n")
return req
# ++ Get Metabolite Data from BioCyc Database ++
def biocyc_get(id_org: str, id_biocyc: str, detail: str = "full"):
"""
Requests an entry from the BioCyc DB
:param detail: either none, low or full, defaults to full
:param id_biocyc: ID of object e.g. ATP
:param id_org: ID of organism e.g. GCF_000010185
:return: decoded .xml into dictionary
"""
custom_request = f"https://websvc.biocyc.org/getxml?id={id_org}:{id_biocyc}&detail={detail}"
req = requests.get(custom_request)
if not req.ok:
req.raise_for_status()
sys.exit()
decoded_req = xmltodict.parse(req.content)
return decoded_req
def biocyc_get_from_formula(id_org: str, formula: str):
"""
Requests an entry from the BioCyc DB
:param formula:
:param detail: either none, low or full, defaults to full
:param id_biocyc: ID of object e.g. ATP
:param id_org: ID of organism e.g. GCF_000010185
:return: decoded .xml into dictionary
"""
custom_request = f"https://websvc.biocyc.org/{id_org}/CF?cfs={formula}&fmt=json"
req = requests.get(custom_request)
if not req.ok:
req.raise_for_status()
sys.exit()
try:
decoded_req = req.json()
except json.decoder.JSONDecodeError:
assert id_org != "meta"
decoded_req = biocyc_get_from_formula("meta", formula)
return decoded_req
def make_cv_term(link: str, qual_type=libsbml.BQB_IS):
"""
:param qual_type:
:param link: string that is added to CV-Term
:return: libsbml.CVTerm
This method is not generic, but only creates species and reaction standard CV Terms.
"""
c = libsbml.CVTerm()
c.setQualifierType(libsbml.BIOLOGICAL_QUALIFIER)
c.setBiologicalQualifierType(qual_type)
c.addResource(link)
return c
def add_link_annotation_species(model, lnk, qual_type, s_id):
"""
:param qual_type: libsbml.QUALIFIER
:param model: libsbml.model
:param lnk: string
:param s_id: string
:return: libsbml.model
"""
cv_term = make_cv_term(lnk, qual_type)
# eliminate duplicates
list_cv = []
for i in range(model.getSpecies(s_id).getNumCVTerms()):
list_cv.append(model.getSpecies(s_id).getCVTerm(i))
if cv_term not in list_cv:
model.getSpecies(s_id).addCVTerm(cv_term)
return model
def add_link_annotation_reaction(model, lnk, qual_type, s_id):
"""
:param qual_type: libsbml.QUALIFIER
:param model: libsbml.model
:param lnk: string
:param s_id: string
:return: libsbml.model
"""
cv_term = make_cv_term(lnk, qual_type)
# eliminate duplicates
list_cv = []
for k in range(model.getReaction(s_id).getNumCVTerms()):
list_cv.append(model.getReaction(s_id).getCVTerm(k))
if cv_term not in list_cv:
model.getReaction(s_id).addCVTerm(cv_term)
return model
def add_note_species(model, note: str, fbc_id):
"""
:param fbc_id: str
:param model: libsbml.model
:param note: str
:return: libsbml.model
"""
str_note = f"<body xmlns=\"http://www.w3.org/1999/xhtml\">\n <p>{note}</p>\n </body>"
if not model.getSpecies(fbc_id).isSetNotes():
model.getSpecies(fbc_id).setNotes(str_note)
else:
notes_curent = model.getSpecies(fbc_id).getNotes().toXMLString()
if note not in notes_curent:
model.getSpecies(fbc_id).appendNotes(str_note)
return model
def add_note_gene_product(model, note: str, fbc_id):
"""
:param fbc_id: str
:param model: libsbml.model
:param note: str
:return: libsbml.model
"""
str_note = f"<body xmlns=\"http://www.w3.org/1999/xhtml\">\n <p>{note}</p>\n </body>"
if not model.getPlugin('fbc').getGeneProduct(fbc_id).isSetNotes():
model.getPlugin('fbc').getGeneProduct(fbc_id).setNotes(str_note)
else:
notes_curent = model.getPlugin('fbc').getGeneProduct(fbc_id).getNotes().toXMLString()
if note not in notes_curent:
model.getPlugin('fbc').getGeneProduct(fbc_id).appendNotes(str_note)
return model
def add_note_reaction(model, note: str, fbc_id):
"""
:param model: libsbml.model
:param note: str
:param fbc_id: str
:return:
"""
str_note = f"<body xmlns=\"http://www.w3.org/1999/xhtml\">\n <p>{note}</p>\n </body>"
if not model.getReaction(fbc_id).isSetNotes():
model.getReaction(fbc_id).setNotes(str_note)
else:
notes_curent = model.getReaction(fbc_id).getNotes().toXMLString()
if note not in notes_curent:
model.getReaction(fbc_id).appendNotes(str_note)
return model
def dict_add_overlap_to_list(orig_dict, extend_dict):
for k, v in extend_dict.items():
if k not in orig_dict:
orig_dict[k] = v
else:
if hasattr(orig_dict[k], '__iter__') and not isinstance(orig_dict[k], str):
orig_dict[k] = set(orig_dict[k])
else:
orig_dict[k] = {orig_dict[k]}
if hasattr(v, '__iter__') and not isinstance(v, str):
orig_dict[k] |= set(v)
else:
orig_dict[k] |= {v}
orig_dict[k] = list(orig_dict[k])
return orig_dict
| JosuaCarl/Script_Assisted_Modeling | helper_functions.py | helper_functions.py | py | 9,195 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "re.search",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 87,
... |
44186623516 | import tensorflow as tf
import pickle
from metrics import PSNRMean, SSIMMean
from losses import ltm_loss
import utils
from models.tone_curve_net import ToneCurveNetConv
from models.residual_net import LTMNetResConv
import os
os.environ['TFHUB_MODEL_LOAD_FORMAT'] = 'COMPRESSED'
def vgg_layers(layer_names):
""" Creates a vgg model that returns a list of intermediate output values."""
# Load pretrained VGG, trained on imagenet data
vgg = tf.keras.applications.VGG19(include_top=False, weights='imagenet')
vgg.trainable = False
outputs = [vgg.get_layer(name).output for name in layer_names]
model = tf.keras.Model([vgg.input], outputs)
return model
def get_model(args):
model_args = {
'grid_size': args.grid_size,
'control_points': args.control_points,
'input_size': args.resize_to,
'curves': args.curves,
'pix_weight': args.pix_weight,
}
if 'res' in args.model_architecture:
model_args['residual_layers'] = args.residual_layers
model_args['residual_filters'] = args.residual_filters
model_args['residual_weight'] = args.residual_weight
feat_layer = ['block1_conv1', 'block2_conv1']
vgg = vgg_layers(feat_layer)
model_args['vgg'] = vgg if args.vgg_weight != 0.0 else None
model_args['vgg_weight'] = args.vgg_weight
model_args['l1_weight'] = args.l1_weight
if args.model_architecture == 'conv':
model = ToneCurveNetConv(**model_args)
elif args.model_architecture == 'res_conv':
model = LTMNetResConv(**model_args)
else:
raise Exception('Model architecture not supported.')
return model
def train(optimizer, train_ds, test_ds, args):
# Checkpoint
ckpt_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=args.ckpt_dir,
save_freq='epoch',
monitor=args.ckpt_monitor,
mode=args.ckpt_monitor_mode,
save_best_only=True)
# Tensorboard
tb_dir = args.tb_log_dir
tensorboard_callback = tf.keras.callbacks.TensorBoard(
log_dir=tb_dir,
update_freq='batch',
profile_batch=0)
terminates_on_nan_callback = utils.CustomTerminateOnNaN()
# Metrics
metrics = [PSNRMean(), SSIMMean()]
model = get_model(args)
loss_fn = ltm_loss
model.build(input_shape=(None, args.resize_to[0], args.resize_to[1], 3))
model.compile(optimizer, loss_fn, metrics)
# Save model architecture
f = open(args.model_summary_fn, 'w')
model.model().summary(print_fn=lambda arg: f.write(arg + '\n'))
f.close()
print('Starting model.fit()...')
model.fit(train_ds,
validation_data=test_ds,
epochs=args.epochs,
verbose=1,
workers=2,
use_multiprocessing=True,
callbacks=[ckpt_callback, tensorboard_callback, terminates_on_nan_callback])
# Save model history
pickle.dump(model.history.history, open(args.train_hist_fn, 'wb'))
# Save residual model architecture
if 'res' in args.model_architecture:
f = open(args.model_summary_fn, 'a')
model.residual_net.summary(print_fn=lambda arg: f.write(arg + '\n'))
f.close()
| Atakhan2000/ltmnet | train.py | train.py | py | 3,206 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.environ",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.applications.VGG19",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name... |
30151381555 | import numpy as np
import cv2
import matplotlib.pyplot as plt
def getScoreImg():
nums = cv2.imread("numbers.png")
ret, nums = cv2.threshold(nums,127,255,cv2.THRESH_BINARY_INV)
nums = cv2.cvtColor(nums, cv2.COLOR_BGR2GRAY)
top = np.zeros((1,530))
nums = np.concatenate((top,top,top,top, nums, top,top,top,top,top), axis=0)
nums = cv2.resize(nums, (530*40//109, 40))
ret, nums = cv2.threshold(nums,127,255,cv2.THRESH_BINARY_INV)
zero = nums[:, 2:20]
one = nums[:, 21:38]
two = nums[:, 39: 57]
three = nums[:, 58: 77]
four = nums[:, 76:97]
five = nums[:, 96:115]
six = nums[:, 115:134]
seven = nums[:, 134:153]
eight = nums[:, 152:172]
nine = nums[:, 171:191]
cv2.imwrite("0.png", zero)
cv2.imwrite("1.png", one)
cv2.imwrite("2.png", two)
cv2.imwrite("3.png", three)
cv2.imwrite("4.png", four)
cv2.imwrite("5.png", five)
cv2.imwrite("6.png", six)
cv2.imwrite("7.png", seven)
cv2.imwrite("8.png", eight)
cv2.imwrite("9.png", nine)
plt.imshow(one, cmap='gray')
plt.show()
def getTimeImg():
nums = cv2.imread("time.png")
ret, nums = cv2.threshold(nums,127,255,cv2.THRESH_BINARY_INV)
nums = cv2.cvtColor(nums, cv2.COLOR_BGR2GRAY)
print(nums.shape)
# top = np.zeros((1,530))
# nums = np.concatenate((top,top,top,top,top,top,top,top, nums,top,top,top,top,top,top,top,top,top), axis=0)
nums = cv2.resize(nums, (480*40//172, 40))
ret, nums = cv2.threshold(nums,127,255,cv2.THRESH_BINARY_INV)
zero = nums[:, 0:12]
one = nums[:, 12:21]
two = nums[:, 22: 33]
three = nums[:, 33: 44]
four = nums[:, 43:56]
five = nums[:, 55:66]
six = nums[:, 66:78]
seven = nums[:, 77:88]
eight = nums[:, 88:100]
nine = nums[:,99:110]
cv2.imwrite("time_0.png", zero)
cv2.imwrite("time_1.png", one)
cv2.imwrite("time_2.png", two)
cv2.imwrite("time_3.png", three)
cv2.imwrite("time_4.png", four)
cv2.imwrite("time_5.png", five)
cv2.imwrite("time_6.png", six)
cv2.imwrite("time_7.png", seven)
cv2.imwrite("time_8.png", eight)
cv2.imwrite("time_9.png", nine)
plt.imshow(nine, cmap='gray')
plt.show()
if __name__ == "__main__":
getScoreImg()
# getTimeImg() | fancent/CSC420 | Project/digitRecognition/numberSlicing.py | numberSlicing.py | py | 2,270 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "cv2.imread",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "cv2.threshold",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "cv2.THRESH_BINARY_INV",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "cv2.cvtColor",
"... |
28462169332 | import pygame
import Config
import tile_map
import light_handling
win = pygame.display.set_mode(Config.WINDOW_SIZE)
clock = pygame.time.Clock()
map = tile_map.Tile_map()
light = light_handling.light_handling(map.walls, map.points)
flag = True
while flag:
clock.tick(Config.FPS)
for event in pygame.event.get():
if event.type == pygame.QUIT:
flag = False
mouse_pos = pygame.mouse.get_pos()
mouse_pressed = pygame.mouse.get_pressed()
map.update_variables(mouse_pos, mouse_pressed, light)
light.update_variables(mouse_pos, mouse_pressed)
win.fill((0,0,0))
light.draw(win)
map.draw(win)
pygame.display.update() | XT60/Dynamic-lights-2D | Loop.py | Loop.py | py | 672 | python | en | code | 10 | github-code | 1 | [
{
"api_name": "pygame.display.set_mode",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "Config.WINDOW_SIZE",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "pyga... |
10110647533 | import hashlib
import imp
import tarfile
from typing import Iterable
import warnings
import zipfile
from pathlib import Path
import shutil
from urllib.parse import urlparse
from urllib.request import Request, urlopen
import warnings
import openmc.data
_BLOCK_SIZE = 16384
def state_download_size(download_size, uncompressed_size, units):
"""Prints a standard message to users displaying the amount of storage
space required to run the script"""
msg = (f"WARNING: This script will download up to {download_size} {units} "
"of data. Extracting and processing the data may require as much "
f"as {uncompressed_size} {units} of additional free disk space.")
warnings.warn(msg)
def process_neutron(path, output_dir, libver, temperatures=None):
"""Process ENDF neutron sublibrary file into HDF5 and write into a
specified output directory."""
print(f'Converting: {path}')
try:
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
data = openmc.data.IncidentNeutron.from_njoy(
path, temperatures=temperatures
)
except Exception as e:
print(path, e)
raise
h5_file = output_dir / f'{data.name}.h5'
print(f'Writing {h5_file} ...')
data.export_to_hdf5(h5_file, 'w', libver=libver)
def process_thermal(path_neutron, path_thermal, output_dir, libver):
"""Process ENDF thermal scattering sublibrary file into HDF5 and write into a
specified output directory."""
print(f'Converting: {path_thermal}')
try:
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
data = openmc.data.ThermalScattering.from_njoy(
path_neutron, path_thermal
)
except Exception as e:
print(path_neutron, path_thermal, e)
raise
h5_file = output_dir / f'{data.name}.h5'
print(f'Writing {h5_file} ...')
data.export_to_hdf5(h5_file, 'w', libver=libver)
def extract(
compressed_files,
extraction_dir,
del_compressed_file=False,
verbose=True,
):
"""Extracts zip, tar.gz or tgz compressed files
Parameters
----------
compressed_files : [os.PathLike, str iterable]
The file or and iterables of files to extract.
extraction_dir : str
The directory to extract the files to.
del_compressed_file : bool
Wheather the compressed file should be deleted (True) or not (False)
verbose : bool
Controls the printing to terminal, if True filenames of the extracted
files will be printed.
"""
Path.mkdir(extraction_dir, parents=True, exist_ok=True)
if not isinstance(compressed_files, Iterable):
compressed_files = [compressed_files]
for f in compressed_files:
if str(f).endswith('.zip'):
with zipfile.ZipFile(f, 'r') as zipf:
if verbose:
print(f'Extracting {f} to {extraction_dir}')
zipf.extractall(path=extraction_dir)
elif str(f).endswith('.tar.gz') or str(f).endswith('.tgz') or str(f).endswith('.tar.bz2') or str(f).endswith('.tar.xz') or str(f).endswith('.xz'):
with tarfile.open(f, 'r') as tgz:
if verbose:
print(f'Extracting {f} to {extraction_dir}')
tgz.extractall(path=extraction_dir)
elif str(f).endswith('.asc'):
shutil.copy(f, extraction_dir)
else:
raise ValueError('File type not currently supported by extraction '
f'function {str(f)}')
if del_compressed_file:
shutil.rmtree(compressed_files, ignore_errors=True)
def download(
url: str,
checksum=None,
as_browser: bool=False,
output_path=None,
output_filename=None,
**kwargs
):
"""Download file from a URL
Parameters
----------
url : str
URL from which to download
checksum : str or None
MD5 checksum to check against
as_browser : bool
Change User-Agent header to appear as a browser
output_path : str or Path
Specifies the directory location to save the downloaded file
output_filename : str or Path
Specifies the filename save the downloaded file. If left as None the
filename of the download file is obtained from the url filename
kwargs : dict
Keyword arguments passed to :func:`urllib.request.urlopen`
Returns
-------
local_path : pathlib.Path
Name of file written locally
"""
if as_browser:
page = Request(url, headers={'User-Agent': 'Mozilla/5.0'})
else:
page = url
with urlopen(page, **kwargs) as response:
# Get file size from header
file_size = response.length
if output_filename is None:
output_filename = Path(Path(urlparse(url).path).name)
print(f'Using default output_filename {output_filename}')
if output_path is None:
local_path = Path(output_filename)
else:
Path(output_path).mkdir(parents=True, exist_ok=True)
local_path = Path(output_path) / output_filename
# Check if file already downloaded
if local_path.is_file():
if local_path.stat().st_size == file_size:
print(f'Skipping {local_path}, already downloaded')
return local_path
# Copy file to disk in chunks
print(f'Downloading URL {url} to {local_path}')
downloaded = 0
with open(local_path, 'wb') as fh:
while True:
chunk = response.read(_BLOCK_SIZE)
if not chunk:
break
fh.write(chunk)
downloaded += len(chunk)
status = '{:10} [{:3.2f}%]'.format(
downloaded, downloaded * 100. / file_size)
print(status + '\b'*len(status), end='', flush=True)
print('')
if checksum is not None:
downloadsum = hashlib.md5(open(local_path, 'rb').read()).hexdigest()
if downloadsum != checksum:
raise OSError("MD5 checksum for {} does not match. If this is "
"your first time receiving this message, please "
"re-run the script. Otherwise, please contact "
"OpenMC developers by emailing "
"openmc-users@googlegroups.com.".format(local_path))
return local_path
| openmc-data-storage/openmc_data | src/openmc_data/utils.py | utils.py | py | 6,546 | python | en | code | null | github-code | 1 | [
{
"api_name": "warnings.warn",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "warnings.catch_warnings",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "warnings.simplefilter",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "openmc.da... |
20085148878 | """
Test the rendering mechanism to see if inquirer works
"""
from unittest.mock import Mock, create_autospec, patch
import inquirer
from pytest import fixture
from hacenada import render, session
@fixture
def renderer():
rr = render.InquirerRender()
return rr
def test_inquirer_type(renderer):
"""
Do I look up inquirer question type by hacenada typename?
"""
assert renderer._inquirer_dispatch("description") is inquirer.text
@fixture
def seshie():
"""
A mock session
"""
sesh = create_autospec(session.Session)
sesh.storage.description = "DESCRIPTION"
return sesh
def test_render(renderer, steppie, seshie):
"""
Do I render questions to the screen?
"""
prompt = Mock(return_value="here we go")
with patch.object(
render.InquirerRender, "_inquirer_dispatch", autospec=True, return_value=prompt
) as m_prompt:
ret = renderer.render(steppie, seshie)
m_prompt.return_value.assert_called_once_with("DESCRIPTION : q1\noh noo\n>>")
assert ret == {"q1": "here we go"}
def test_render_no_description(renderer, steppie, seshie):
"""
Do I display the right title when there's no description property set?
"""
seshie.storage.description = None
seshie.script.preamble = {"name": "SCRIPT NAME"}
prompt = Mock(return_value="here we go")
with patch.object(
render.InquirerRender, "_inquirer_dispatch", autospec=True, return_value=prompt
) as m_prompt:
renderer.render(steppie, seshie)
m_prompt.return_value.assert_called_once_with("SCRIPT NAME : q1\noh noo\n>>")
| corydodt/Hacenada | src/hacenada/test/test_render.py | test_render.py | py | 1,612 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "hacenada.render.InquirerRender",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "hacenada.render",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "pytest.fixture",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "inquire... |
73205778914 | import sys
from collections import deque
n, w, L = map(int, input().split())
weight = deque(map(int, sys.stdin.readline().split())) # 트럭 무게 리스트 (=> 대기)
bridge = deque() # 다리 위
for i in range(w-1): # w-1만큼 0으로 채우고
bridge.append(0)
bridge.append(weight[0]) # 다리 제일 마지막에 첫번째 트럭 올리기
weight.popleft() # 올렸으니까 popleft, 시간은 1
time = 1
while True:
bridge.popleft()
if weight and sum(bridge) + weight[0] <= L:
# 대기 중인 트럭이 있고 현재 다리 위 트럭들의 무게 합 + 다음 무게 < L이면
bridge.append(weight.popleft()) # 대기 첫번째를 다리 위로 올리기
else:
bridge.append(0) # 최대하중 넘어가면 0을 추가해서 다리 위 트럭이 이동만 하도록
time += 1 # 한 번 이동할 때마다(popleft - append) 시간 + 1
if not weight and sum(bridge) == 0: # 만약 대기 중인 트럭이 없고 다리 위에도 모두 0이면 다 지나간 것
break
print(time)
###########################
# wait = deque()
# truck_sum = 0
# time = 0
# for i in weight:
# wait.append((i, w))
# # print(wait)
# while bridge or wait:
# # if len(bridge) < w and wait:
# if wait:
# truck_wgt, bri_time = wait.popleft()
# if len(bridge) < w and truck_sum + truck_wgt <= L:
# bridge.append((truck_wgt, bri_time))
# truck_sum += truck_wgt
# bri_time -= 1
# if bri_time == 0:
# bridge.popleft()
# truck_sum - truck_wgt
# time += 1
# print(time)
| eunjng5474/Study | week04/B_13335.py | B_13335.py | py | 1,902 | python | ko | code | 2 | github-code | 1 | [
{
"api_name": "collections.deque",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "sys.stdin.readline",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "sys.stdin",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "collections.deque",
... |
44190566884 | from flask import Flask
from flask import redirect
from flask import render_template
from flask import url_for
from flask.ext.script import Manager
from flask.ext.sqlalchemy import SQLAlchemy
from getpass import getuser
from json import dumps
from logging import basicConfig
from logging import ERROR
from logging import getLogger
from operator import itemgetter
from os import getenv
from re import IGNORECASE
from re import sub
from sqlalchemy.sql import text
from subprocess import PIPE
from subprocess import Popen
##################################################################
######################## SETUP ###################################
##################################################################
basicConfig(level=ERROR,
format='%(levelname)s: %(message)s')
logger = getLogger(__name__)
if getuser() == 'marklindberg': # pragma: no cover
SQLALCHEMY_DATABASE_URI = 'mysql://root:aoeuidhtns@127.0.0.1/db_name?charset=utf8'
elif getuser() == 'javier' : # pragma: no cover
SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://root:@127.0.0.1/guestbook?charset=utf8'
elif getuser() == 'pscamman': # pragma: no cover
SQLALCHEMY_DATABASE_URI = 'mysql://root:asdf@127.0.0.1/db?charset=utf8'
else: # pragma: no cover
SQLALCHEMY_DATABASE_URI = \
'{engine}://{username}:{password}@{hostname}/{database}?charset=utf8'.format(
engine='mysql+pymysql',
username=getenv('MYSQL_USER', 'root'),
password=getenv('MYSQL_PASSWORD', ''),
hostname=getenv('MYSQL_HOST', '127.0.0.1'),
database=getenv('MYSQL_DATABASE', 'guestbook'))
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = SQLALCHEMY_DATABASE_URI
app.config['SQLALCHEMY_COMMIT_ON_TEARDOWN'] = True
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
manager = Manager(app)
db = SQLAlchemy(app)
##################################################################
######################## MODELS ##################################
##################################################################
class Artist(db.Model):
__tablename__ = 'artist'
artist_id = db.Column(db.String(191), primary_key=True)
name = db.Column(db.String(256), nullable=False)
multiverse_ids = db.relationship('Edition', backref='artist',
lazy='dynamic')
def __init__(self, artist_id, name):
self.artist_id = artist_id
self.name = name
def __repr__(self):
return "[Artist: artist_id={}, name={}]".format(self.artist_id,
self.name)
@property
def serialize_part(self):
return dict(artist_id=self.artist_id, name=self.name)
@property
def serialize_full(self):
d = self.serialize_part
d['multiverse_ids'] = self.serialize_multiverse_ids
return d
@property
def serialize_multiverse_ids(self):
return [item.serialize for item in self.multiverse_ids]
class Set(db.Model):
__tablename__ = 'set'
set_id = db.Column(db.String(191), primary_key=True)
name = db.Column(db.String(256), nullable=False)
multiverse_ids = db.relationship('Edition', backref ='set', lazy='dynamic')
def __init__(self, set_id, name):
self.set_id = set_id
self.name = name
def __repr__(self):
return "[Set: set_id={}, name={}]".format(self.set_id, self.name)
@property
def serialize_part(self):
return dict(set_id=self.set_id, name=self.name)
@property
def serialize_full(self):
d = self.serialize_part
d['multiverse_ids'] = self.serialize_multiverse_ids
return d
@property
def serialize_multiverse_ids(self):
return [item.serialize for item in self.multiverse_ids]
class Card(db.Model):
__tablename__ = 'card'
card_id = db.Column(db.String(191), primary_key=True)
name = db.Column(db.String(256), nullable=False)
colors = db.Column(db.String(256), nullable=False)
cost = db.Column(db.String(256), nullable=False)
cmc = db.Column(db.Integer, nullable=False)
text = db.Column(db.String(1024), nullable=True)
types = db.Column(db.String(256), nullable=False)
formats = db.Column(db.String(256), nullable=False)
subtypes = db.Column(db.String(256), nullable=True)
power = db.Column(db.String(256), nullable=True)
toughness = db.Column(db.String(256), nullable=True)
multiverse_ids = db.relationship('Edition', backref ='card',
lazy='dynamic')
def __init__(self, cost, cmc, text, types, name, card_id, formats,
subtypes, colors, power, toughness):
self.card_id = card_id
self.name = name
self.colors = colors
self.cost = cost
self.cmc = cmc
self.text = text
self.types = types
self.formats = formats
self.subtypes = subtypes
self.power = power
self.toughness = toughness
def __repr__(self):
return """[Card: card_id={}, name={}, colors={}, cost={}, cmc={},
text={}, types={}, formats={}, subtypes={}, power={},
toughness={}]""".format(
self.card_id, self.name, self.colors, self.cost, self.cmc,
self.text, self.types, self.formats, self.subtypes,
self.power, self.toughness)
@property
def serialize_part(self):
return dict(card_id=self.card_id, name=self.name,
colors=self.colors,
cost=self.cost, cmc=self.cmc, text=self.text,
types=self.types, formats=self.formats,
subtypes=self.subtypes, power=self.power,
toughness=self.toughness)
@property
def serialize_full(self):
d = self.serialize_part
d['multiverse_ids'] = self.serialize_multiverse_ids
return d
@property
def serialize_multiverse_ids(self):
return [item.serialize for item in self.multiverse_ids]
class Edition(db.Model):
__tablename__ = 'edition'
multiverse_id = db.Column(db.String(191), primary_key=True)
artist_id = db.Column(db.String(191),
db.ForeignKey('artist.artist_id'))
set_id = db.Column(db.String(191), db.ForeignKey('set.set_id'))
card_id = db.Column(db.String(191), db.ForeignKey('card.card_id'))
image_url = db.Column(db.String(256), nullable=False)
flavor = db.Column(db.String(512), nullable=True)
rarity = db.Column(db.String(256), nullable=False)
number = db.Column(db.String(256), nullable=False)
layout = db.Column(db.String(256), nullable=False)
def __init__(self, multiverse_id, artist_id, set_id, card_id, image_url,
flavor, rarity, number, layout):
self.multiverse_id = multiverse_id
self.artist_id = artist_id
self.set_id = set_id
self.card_id = card_id
self.image_url = image_url
self.flavor = flavor
self.rarity = rarity
self.number = number
self.layout = layout
def __repr__(self):
return """[Edition: multiverse_id={}, artist_id={}, set_id={},
card_id={}, image_url={}, flavor={}, rarity={},
number={}, layout={}]""".format(self.multiverse_id,
self.artist_id, self.set_id, self.card_id, self.image_url,
self.flavor, self.rarity, self.number, self.layout)
@property
def serialize(self):
return dict(multiverse_id=self.multiverse_id, artist_id=self.artist_id,
set_id=self.set_id, card_id=self.card_id,
image_url=self.image_url, flavor=self.flavor,
rarity=self.rarity, number=self.number, layout=self.layout,
card_name=self.card.name, artist_name=self.artist.name,
set_name=self.set.name)
def serialize_card_table_data_paginated(page_num, sort_col):
col_id_dict = ["e.card_id asc",
"a.artist_id asc",
"s.set_id asc",
"editions asc",
"rarity asc",
"e.card_id desc",
"a.artist_id desc",
"s.set_id desc",
"editions desc",
"rarity desc"]
try:
page_num = int(page_num)
except:
page_num = 0
if page_num < 0:
page_num = 0
try:
col_id = col_id_dict[sort_col]
except:
col_id = col_id_dict[0]
cards_per_page = 25
firstrow = cards_per_page * page_num
sql = '''select c.name,
c.card_id,
c.cost,
GROUP_CONCAT(DISTINCT e.multiverse_id SEPARATOR '|!|') AS editions,
GROUP_CONCAT(DISTINCT e.rarity SEPARATOR '|!|') AS rarities,
GROUP_CONCAT(DISTINCT a.name SEPARATOR '|!|') AS artists,
GROUP_CONCAT(DISTINCT a.artist_id SEPARATOR '|!|') AS artist_ids,
GROUP_CONCAT(DISTINCT s.name SEPARATOR '|!|') AS sets,
GROUP_CONCAT(DISTINCT s.set_id SEPARATOR '|!|') AS set_ids,
count(*) AS num_editions
from card AS c
left join edition AS e ON e.card_id = c.card_id
left join artist AS a ON a.artist_id = e.artist_id
left join `set` AS s ON s.set_id = e.set_id
group by c.name
order by {}
limit 25
offset {}
'''.format(col_id, firstrow)
#convert the list of dicts to an array of objects
ret = []
for table_data in db.engine.execute(sql).fetchall():
artists=[]
dbArtists=table_data['artists'].split('|!|')
artist_ids=table_data['artist_ids'].split('|!|')
for index, artist_name in enumerate(dbArtists):
artists.append({'artist_id':artist_ids[index], 'name':artist_name})
sets=[]
dbSets=table_data['sets'].split('|!|')
set_ids=table_data['set_ids'].split('|!|')
for index, set_name in enumerate(dbSets):
sets.append({'set_id':set_ids[index], 'name':set_name})
ret.append({'name':table_data['name'],
'card_id':table_data['card_id'],
'cost':table_data['cost'],
'editions':table_data['editions'],
'rarities':table_data['rarities'],
'artists':artists,
'sets':sets,
'num_editions':table_data['num_editions']
})
return ret
def serialize_artist_table_data_paginated(page_num, sort_col):
col_id_dict = ["a.artist_id asc",
"total asc",
"commons asc",
"uncommons asc",
"rares asc",
"mythics asc",
"a.artist_id desc",
"total desc",
"commons desc",
"uncommons desc",
"rares desc",
"mythics desc"]
try:
page_num = int(page_num)
except:
page_num = 0
if page_num < 0:
page_num = 0
try:
col_id = col_id_dict[sort_col]
except:
col_id = col_id_dict[0]
cards_per_page = 25
firstrow = cards_per_page * page_num
sql = '''select a.name,
a.artist_id,
count(*) as total,
cast(sum(case when e.rarity='common' then 1 else 0 end) as signed) as commons,
cast(sum(case when e.rarity='uncommon' then 1 else 0 end) as signed) as uncommons,
cast(sum(case when e.rarity='rare' then 1 else 0 end) as signed) as rares,
cast(sum(case when e.rarity='mythic' then 1 else 0 end) as signed) as mythics
from artist as a
inner join edition as e
on a.artist_id=e.artist_id
group by a.artist_id
order by {}
limit 25
offset {}
'''.format(col_id, firstrow)
# convert the list of dicts to an array of objects
ret = []
for table_data in db.engine.execute(sql).fetchall():
ret.append({'name':table_data['name'],
'artist_id':table_data['artist_id'],
'total':table_data['total'],
'commons':table_data['commons'],
'uncommons':table_data['uncommons'],
'rares':table_data['rares'],
'mythics':table_data['mythics']
})
return ret
def serialize_set_table_data_paginated(page_num, sort_col):
col_id_dict = ["s.set_id asc",
"commons asc",
"uncommons asc",
"rares asc",
"mythics asc",
"total asc",
"s.set_id desc",
"commons desc",
"uncommons desc",
"rares desc",
"mythics desc",
"total desc"]
try:
page_num = int(page_num)
except:
page_num = 0
if page_num < 0:
page_num = 0
try:
col_id = col_id_dict[sort_col]
except:
col_id = col_id_dict[0]
cards_per_page = 25
firstrow = cards_per_page * page_num
sql = '''select s.name,
s.set_id,
count(*) as total,
cast(sum(case when e.rarity='common' then 1 else 0 end) as signed) as commons,
cast(sum(case when e.rarity='uncommon' then 1 else 0 end) as signed) as uncommons,
cast(sum(case when e.rarity='rare' then 1 else 0 end) as signed) as rares,
cast(sum(case when e.rarity='mythic' then 1 else 0 end) as signed) as mythics
from `set` as s
inner join edition as e
on s.set_id=e.set_id
group by s.set_id
order by {}
limit 25
offset {}
'''.format(col_id, firstrow)
# convert the list of dicts to an array of objects
ret = []
for table_data in db.engine.execute(sql).fetchall():
ret.append({'name':table_data['name'],
'set_id':table_data['set_id'],
'total':table_data['total'],
'commons':table_data['commons'],
'uncommons':table_data['uncommons'],
'rares':table_data['rares'],
'mythics':table_data['mythics']
})
return ret
def gensql(num_terms, page_num):
cols = ['c.name', 'c.text', 'c.types','c.cost', 'e.image_url', 'c.subtypes', 'c.formats',
'c.colors', 'e.flavor', 'e.rarity', 'e.layout','a.name',
's.set_id', 's.name']
sqlBase = '''
select c.card_id,
a.artist_id,
s.set_id,
c.cost,
e.image_url,
c.name,
c.text,
c.types,
c.subtypes,
c.formats,
c.colors,
e.flavor,
e.rarity,
e.layout,
a.name,
s.set_id,
s.name
from card as c
left join edition as e
on e.card_id = c.card_id
left join artist as a
on a.artist_id = e.artist_id
left join `set` as s
on s.set_id = e.set_id
where'''
countBase = '''
select count(*)
from card as c
left join edition as e
on e.card_id = c.card_id
left join artist as a
on a.artist_id = e.artist_id
left join `set` as s
on s.set_id = e.set_id
where'''
for i in range(0, num_terms):
firstcol = True
if i != 0:
sqlBase += " " + ' or '
countBase += " " + ' or '
sqlBase += " ("
countBase += " ("
for col in cols:
if firstcol:
firstcol = False
else:
sqlBase += "or "
countBase += "or "
sqlBase += " {} like :term".format(col) + str(i) + "\n"
countBase += " {} like :term".format(col) + str(i) + "\n"
sqlBase += " ) \n"
countBase += " ) \n"
sqlBase += " limit 25 offset {}".format(max(page_num * 25,0))
return sqlBase, countBase
def count_matches(targets, source):
matches = 0
for target in targets:
if target.lower() in str(source).lower():
matches += 1
return matches
def bold_search_terms(data, terms):
result_data = []
for field in data:
for term in terms:
field = sub(r'({})'.format(term), r'<font color="DeepSkyBlue"><b>\1</b></font>',
str(field), flags=IGNORECASE)
result_data.append(field)
return list(result_data)
def search_card_names(term_str, page_num):
try:
page_num = int(page_num)
except:
page_num = 0
if page_num < 0:
page_num = 0
num_id_cols = 5
terms = [term for term in term_str.split(" ") if term != '']
parameters = {'term{}'.format(i): '%%' + term + '%%' for i, term in enumerate(terms)}
sql, sql_count = gensql(len(terms), page_num)
total = (db.engine.execute(text(sql_count), parameters).fetchone()[0] + 24)//25
if page_num > (total - 1):
page_num = total - 1
sql, sql_count = gensql(len(terms), page_num)
results = db.engine.execute(text(sql), parameters).fetchall()
result_dict = {i:max(count_matches(terms, col) for col in result) for i, result in enumerate(results)}
sorted_result_dict = list(reversed(sorted(result_dict.items(), key=itemgetter(1))))
sorted_and_ids = [list(results[index][:num_id_cols]) for index, num in sorted_result_dict if num == len(terms)]
sorted_and_results = [result + [total, 'AND'] for result in [bold_search_terms(results[index][num_id_cols:], terms) for index, num in sorted_result_dict if num == len(terms)]]
sorted_or_ids = [list(results[index][:num_id_cols]) for index, num in sorted_result_dict if num < len(terms)]
sorted_or_results = [result + [total, 'OR'] for result in [bold_search_terms(results[index][num_id_cols:], terms) for index, num in sorted_result_dict if num < len(terms)]]
keys = ['card_id', 'artist_id', 'set_id', 'cost', 'image_url', 'name', 'text',
'types', 'subtypes', 'formats', 'colors', 'flavor',
'rarity', 'layout', 'artist_name', 'setid',
'set_name', 'num_pages', 'result_type']
full_and_results = [result_id + result for result_id, result in zip(sorted_and_ids, sorted_and_results)]
full_or_results = [result_id + result for result_id, result in zip(sorted_or_ids, sorted_or_results)]
dict_and_results = [{k:r for k, r in zip(keys, result)} for result in full_and_results]
dict_or_results = [{k:r for k, r in zip(keys, result)} for result in full_or_results]
return dict_and_results + dict_or_results
##################################################################
###################### VIEWS/CONTROLLERS #########################
##################################################################
######################
# Routes for Home page (NOTE:INDEX IS THE ONLY TEMPLATE SERVED BY SERVER, ALL OTHERS ARE LOADED BY ANGULAR!)
######################
@app.route('/index.html', methods=['GET'])
def indexHTML(): # pragma: no cover
return redirect(url_for('index'))
@app.route('/', methods=['GET'])
def index(): # pragma: no cover
logger.debug("index")
return render_template('index.html', cards=Card.query.all())
######################
# Routes for JSON API REST Endpoints
######################
def json_resp(data):
ret = dumps(data, sort_keys=True,
indent=4, separators=(',', ': '))
ret = app.make_response(ret)
ret.mimetype = 'application/json'
return ret
@app.route('/api/search/<path:search_query>/<int:page>', methods=['GET'])
def searchAPI(search_query, page): # pragma: no cover
logger.debug('search')
return json_resp(search_card_names(search_query, page))
@app.route('/api/artists/page/<int:page>/<int:sort_col>', methods=['GET'])
def artistsAPI(page, sort_col): # pragma: no cover
logger.debug("artists")
artists = serialize_artist_table_data_paginated(page, sort_col)
return json_resp(artists)
@app.route('/api/artists/<int:page>', methods=['GET'])
def artistsPublicAPI(page): # pragma: no cover
logger.debug("artists")
LIM = 25 # page length
artists = [artist.serialize_part for artist in Artist.query.limit(LIM).offset(LIM*(page)).all()]
return json_resp(artists)
@app.route('/api/artists/<path:artist_id>', methods=['GET'])
def artistAPI(artist_id): # pragma: no cover
logger.debug("artist")
artist = [Artist.query.get(artist_id).serialize_full]
return json_resp(artist)
@app.route('/api/sets/page/<int:page>/<int:sort_col>', methods=['GET'])
def setsAPI(page, sort_col): # pragma: no cover
logger.debug("sets")
sets = serialize_set_table_data_paginated(page, sort_col)
return json_resp(sets)
@app.route('/api/sets/<int:page>', methods=['GET'])
def setsPublicAPI(page): # pragma: no cover
logger.debug("sets")
LIM = 25 # page length
sets = [card_set.serialize_part for card_set in Set.query.limit(LIM).offset(LIM*(page)).all()]
return json_resp(sets)
@app.route('/api/sets/<path:set_id>', methods=['GET'])
def setAPI(set_id): # pragma: no cover
logger.debug("card_set")
card_set = [Set.query.get(set_id).serialize_full]
return json_resp(card_set)
@app.route('/api/cards/page/<int:page>/<int:sort_col>', methods=['GET'])
def cardsAPI(page, sort_col): # pragma: no cover
logger.debug("cards")
cards = serialize_card_table_data_paginated(page, sort_col)
return json_resp(cards)
@app.route('/api/cards/<int:page>', methods=['GET'])
def cardsPublicAPI(page): # pragma: no cover
logger.debug("cards")
LIM = 25 # page length
cards = [card.serialize_full for card in Card.query.limit(LIM).offset(LIM*(page)).all()]
return json_resp(cards)
@app.route('/api/cards/<path:card_id>', methods=['GET'])
def cardAPI(card_id): # pragma: no cover
logger.debug("card")
card = [Card.query.get(card_id).serialize_full]
return json_resp(card)
@app.route('/api/editions/<path:multiverse_id>', methods=['GET'])
def editionAPI(multiverse_id): # pragma: no cover
logger.debug("edition")
edition = [Edition.query.get(multiverse_id).serialize]
return json_resp(edition)
##################################################################
###################### FLASK MANAGER COMMANDS ####################
##################################################################
def addCard(card_args):
card = Card(**card_args)
db.session.add(card)
db.session.commit()
def addArtist(artist_args):
artist = Artist(**artist_args)
db.session.add(artist)
db.session.commit()
def addEdition(edition_args):
edition = Edition(**edition_args)
db.session.add(edition)
db.session.commit()
def addSet(set_args):
the_set = Set(**set_args)
db.session.add(the_set)
db.session.commit()
@manager.command
def create_db():
logger.debug("create_db")
app.config['SQLALCHEMY_ECHO'] = False
db.create_all()
@manager.command
def drop_db(): # pragma: no cover
logger.debug("drop_db")
app.config['SQLALCHEMY_ECHO'] = False
db.drop_all()
##################################################################
###################### PYTHON MAIN ###############################
##################################################################
logger.debug("Welcome to Data: The Gathering!")
logger.debug("The log statement below is for educational purposes only. Do *not* log credentials.")
logger.debug("%s", SQLALCHEMY_DATABASE_URI)
if __name__ == '__main__': # pragma: no cover
manager.run()
##################################################################
###################### PYTHON TESTS ##############################
##################################################################
@app.route('/tests/runtests')
def tests(): # pragma: no cover
p = Popen(["python3", "tests.py"],
stdout = PIPE,
stderr = PIPE,
stdin = PIPE)
out, err = p.communicate()
return render_template('tests.html', output = (err + out).decode()) | dark-ritual/cs373-idb | app/app.py | app.py | py | 24,898 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "logging.basicConfig",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "logging.ERROR",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "logging.getLogger",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "getpass.getuser",... |
3617920960 | from re import X
import torch
import torch.nn as nn
from torchvision.transforms import functional as F
from PIL import Image
from models.MIMOUNet import build_net
from models.unet import DeblurUNet
from models.face_model.face_gan import FaceGAN
from skimage.metrics import peak_signal_noise_ratio
import cv2
import os
import numpy as np
import torch.nn.functional as F1
from tqdm import tqdm
test_folder = '/data/juicefs_hz_cv_v3/11145199/datas/test_data/20220218/medium_faces_noglass'
base = ['MIMO-UNet', '/data/juicefs_hz_cv_v3/11145199/pretrained/model_lite_face_512.pkl']
mimo_3gan = ['MIMO-UNet', '/data/juicefs_hz_cv_v2/11145199/deblur/results/GANs/0114/MIMO-UNet/weights/model_500.pkl']
mimo_pgan = ['MIMO-UNet', '/data/juicefs_hz_cv_v2/11145199/deblur/results/GANs/0114/patch/MIMO-UNet/weights/model_500.pkl']
grad_gan = ['GradientDeblurGAN', '/data/juicefs_hz_cv_v2/11145199/deblur/finetune/0207/GradientDeblurGAN/weights/model_440.pkl']
mimo_s4unet = ['MS4UNet', '/data/juicefs_hz_cv_v2/11145199/deblur/results/GANs/0126/MS4UNet/weights/model_500.pkl']
normal_unet = ['DeblurUNet', '/data/juicefs_hz_cv_v2/11145199/deblur/results/DeblurUnet/weights/model_320.pkl']
def load_facegan():
base_dir = '/data/juicefs_hz_cv_v3/11145199/work/GPEN'
size = 512
model = {'name': 'GPEN-BFR-512', 'size': 512, 'channel_multiplier': 2, 'narrow': 1}
facegan = FaceGAN(base_dir, size, model['name'], model['channel_multiplier'], model['narrow'], device='cuda')
return facegan
def load_model(model_info):
name, weights = model_info
if name == 'DeblurUNet':
model = DeblurUNet()
else:
model = build_net(name)
model.cuda()
model = nn.DataParallel(model,device_ids=list(range(torch.cuda.device_count())))
state_dict = torch.load(weights)
model.load_state_dict(state_dict['model'])
model.eval()
return model
@torch.no_grad()
def generate(sample_folder, save_folder):
model_infos = [base, mimo_3gan, mimo_pgan, grad_gan, mimo_s4unet, normal_unet]
models = list()
for model_info in model_infos:
net = load_model(model_info)
models.append([model_info[0], net])
facegan = load_facegan()
os.makedirs(save_folder, exist_ok=True)
files = os.listdir(sample_folder)
print('Num: ', len(files))
from tqdm import tqdm
for file_name in tqdm(files):
if not file_name.endswith('.jpg'):
continue
blur_file = os.path.join(sample_folder, file_name)
blur = Image.open(blur_file)
x1 = F.to_tensor(blur).unsqueeze(0).cuda()
x2 = np.array(blur)
x2 = x2[:,:,::-1]
w, h = blur.size
print('==> [%s]-[%d][%d]'%(file_name, w, h))
merge = Image.new('RGB', (w * 4, h * 2))
merge.paste(blur, (0, 0, w, h))
for i, model in enumerate(models):
name, net = model
pred = net(x1)
if name == 'MS4UNet':
output = pred[3]
else:
output = pred[2]
output = torch.clamp(output, 0, 1).cpu()
output += 0.5 / 255
pred_image = F.to_pil_image(output.squeeze(0), 'RGB')
k1 = int((i+1) / 4)
k2 = (i+1) % 4
merge.paste(pred_image, (k2*w, k1*h, (k2+1)*w, (k1+1)*h))
y2 = facegan.process(x2)
y1 = Image.fromarray(y2[:,:,::-1])
merge.paste(y1, (3*w, h, 4*w, 2*h))
merge.save(os.path.join(save_folder, file_name))
print('done ...')
def gpen_test():
facegan = load_facegan()
file = '/data/juicefs_hz_cv_v3/11145199/datas/test_data/20220218/medium_faces_noglass/IMG_20220119_171001.jpg'
x = cv2.imread(file, cv2.IMREAD_COLOR)
y = facegan.process(x)
print(x.shape)
print(y.shape)
cv2.imwrite('x.png', y)
if __name__ == '__main__':
# gpen_test()
sample_folder = test_folder
save_folder ='/data/juicefs_hz_cv_v3/11145199/datas/test_data/20220218/medium_faces_noglass_gen'
# sample_folder = '/data/juicefs_hz_cv_v3/11145199/datas/test_data/20220217/faces'
# save_folder = '/data/juicefs_hz_cv_v3/11145199/datas/test_data/20220217/faces_gen'
generate(sample_folder, save_folder)
| ckirchhoff2021/ImageSynthesis | MMU-DDP/gen2.py | gen2.py | py | 4,212 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "models.face_model.face_gan.FaceGAN",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "models.unet.DeblurUNet",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "models.MIMOUNet.build_net",
"line_number": 44,
"usage_type": "call"
},
{
... |
34121568699 | import time
import PySimpleGUI as sg
from classes.logger import Logger
from classes.microscope_mover import MicroscopeMover, mover
from classes.scanner import Scanner
from classes.solis import Automatization
from gui.helpers import disable_element, enable_element, get_load_path, str_to_int
from gui.scanner_gui import AutomatizationGUI
PADDING = 4
logger = Logger(__name__).get_logger()
paused = False
stopped = False
current_point_nr = 1
def construct_number_with_padding(point_number: int, line_number: int):
digit_count_point = len(str(point_number))
digit_count_line = len(str(line_number))
return f"P{'0'*(PADDING - digit_count_point)}{point_number}x{'0'*(PADDING - digit_count_line)}{line_number}"
def start_scanning(scanner: Scanner, mover: MicroscopeMover, solis: Automatization, integr_time: int):
global paused, stopped, current_point_nr
logger.info("Started scanning sequence")
line_number = 0
point_number = 1
previous_x = 0
one_point = True
if len(scanner.all_scanner_points) >= 2:
step = scanner.all_scanner_points[1] - scanner.all_scanner_points[0]
one_point = False
previous_x = 99999999 if step.x > 0 else -99999999
for i, point in enumerate(scanner.all_scanner_points):
while paused:
time.sleep(0.5)
if stopped:
return
current_point_nr = i + 1
if not one_point:
if step.x < 0 and point.x > previous_x:
line_number += 1
point_number = 1
elif step.x > 0 and point.x < previous_x:
line_number += 1
point_number = 1
previous_x = point.x
point_filename = construct_number_with_padding(point_number, line_number)
scanner.next_scan()
mover.set_coordinates(point)
solis.capture_and_save(filename=point_filename, integr_time=integr_time, first_time=i == 0)
point_number += 1
logger.info("Successfully ended scanning sequence")
stopped, paused = False, False
def main():
global paused, stopped, current_point_nr
solis = Automatization("Andor SOLIS for Spectroscopy: *")
if not solis.success:
return
gui = AutomatizationGUI()
scanner = Scanner()
window = gui.window
started = False
while 1:
event, values = window.read(timeout=1000)
if started and current_point_nr > 1 and not paused:
sg.one_line_progress_meter(
"Progress bar",
current_point_nr,
len(scanner.all_scanner_points),
orientation="h",
keep_on_top=True,
no_button=True,
)
if event == sg.WIN_CLOSED:
break
if event == "-LOADSCANPOINTS-":
points_load_path = get_load_path()
if not points_load_path:
continue
if not scanner.load_coordinates(points_load_path):
continue
window["-POINTCOUNT-"].update(scanner.all_point_count)
stopped, paused = False, False
if event == "-GOFIRSTPOINT-":
if not scanner.all_scanner_points:
logger.error("No scanning points loaded")
continue
mover.set_coordinates(scanner.all_scanner_points[0])
if event == "-STARTSCAN-":
stopped = False
if not scanner.all_scanner_points:
logger.error("No scanning points loaded")
continue
integration_time = str_to_int(values["-INTEGRATIONTIME-"])
if integration_time <= 0:
logger.error("Negative total integration time")
continue
started = True
window.perform_long_operation(lambda: start_scanning(scanner, mover, solis, integration_time), "-ENDSCAN-")
disable_element(window, "-STARTSCAN-")
enable_element(window, "-STOP-")
enable_element(window, "-PAUSE-")
if event == "-ENDSCAN-":
started = False
message = "Scanning stopped by user" if stopped else "Scanning successfully ended"
color = "red" if stopped else "green"
sg.one_line_progress_meter_cancel()
sg.popup_ok(message, keep_on_top=True, background_color=color)
enable_element(window, "-STARTSCAN-")
disable_element(window, "-STOP-")
disable_element(window, "-PAUSE-")
if event == "-PAUSE-":
paused = True if not paused else False
logger.warning("Paused Scanning" if paused else "Continue scanning")
window["-PAUSE-"].update("Unpause" if paused else "Pause")
window["-STOP-"].update(disabled=paused)
if event == "-STOP-":
stopped = True
if __name__ == "__main__":
main()
| LZP-2020-1-0200/Solis-XY | scanner.py | scanner.py | py | 4,903 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "classes.logger.Logger",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "classes.scanner.Scanner",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "classes.microscope_mover.MicroscopeMover",
"line_number": 27,
"usage_type": "name"
},
{... |
27781889678 | # -*- coding: utf-8 -*-
"""
Created on Mon Jan 28 20:19:55 2019
@author: mjkiqce3
"""
import numpy as np
import multiprocessing
from multiprocessing import Pool
def computeerr(n,regressor,cc,inputtest):
for i in range(n): # We can parallelise here
datause=cc[:,:,i]
X=datause[:,0:10]
y=datause[:,-1]
y=np.reshape(y,(600,1))
print ('Now in data set :', i)
n_queries = 10
for idx in range(n_queries): # We cant parallelise here
query_idx, query_instance = regressor.query(X)
regressor.teach(X[query_idx].reshape(1, -1), y[query_idx].reshape(1, -1))
y_pred_final= regressor.predict(inputtest)
return y_pred_final,regressor
if __name__ == "__main__":
number_of_realisations = range(100)
p = Pool(multiprocessing.cpu_count())
p.map(computeerr,number_of_realisations) | clementetienam/Machine-Learning-for-Model-Reduction-to-Fustion-Simulation-data_2 | Clement_Codes/clementpara.py | clementpara.py | py | 899 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "numpy.reshape",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Pool",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "multiprocessing.cpu_count",
"line_number": 27,
"usage_type": "call"
}
] |
19874279600 | import cv2
import numpy as np
#Fill the screen with digits
def fill_digits_motion(num_array , coor_array , indexes , tos):
cap=cv2.VideoCapture(0)
if cap.isOpened() : ret,frame = cap.read()
else:
ret = False
ret,frame1 = cap.read()
ret,frame2 = cap.read()
diff = 0
while ret:
ret,frame = cap.read()
d=cv2.absdiff(frame1,frame2)
grey=cv2.cvtColor(d,cv2.COLOR_BGR2GRAY)
blur =cv2.GaussianBlur(grey,(5,5),0)
ret,th=cv2.threshold(blur,40,255,cv2.THRESH_BINARY)
dilated=cv2.dilate(th,np.ones((3,3),np.uint8),iterations = 3)
c,h=cv2.findContours(dilated,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
for cnt in c :
area = cv2.contourArea(cnt)
if area > 27000 : #This value can be adjusted
approx = cv2.approxPolyDP(cnt, 0.1 * cv2.arcLength(cnt, True), True)
if len(approx) == 4 :
x , y , w , h = cv2.boundingRect(cnt)
diff = x - coor_array[0][0]
font = cv2.FONT_HERSHEY_SIMPLEX
frame1 = cv2.putText(frame1 , tos , (15 , 35) , font , 0.5 , (255 , 255 , 0) , 1 , cv2.LINE_AA)
for i in indexes :
digit = str(num_array[i])
position = coor_array[i]
frame1 = cv2.putText(frame1 , digit , (position[0] + diff + 18 , position[1] + 10) , font , 0.5 , (255 , 0 , 255) , 1 ,cv2.LINE_AA)
cv2.imshow("Sudoku Solved",frame1)
if cv2.waitKey(40) == 27:
break
frame1 = frame2
ret,frame2= cap.read()
cv2.destroyAllWindows() | YashIndane/AR-Sudoku-Solver | python_files/motion_digits2.py | motion_digits2.py | py | 1,761 | python | en | code | 26 | github-code | 1 | [
{
"api_name": "cv2.VideoCapture",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "cv2.absdiff",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"li... |
72243973154 | from google.appengine.ext import webapp
from google.appengine.ext.webapp import util
from google.appengine.api import urlfetch
from django.utils import simplejson as json
base = 'https://github.com/login/oauth/access_token'
client_id = '?client_id='
redirect_url = '&redirect_uri=https://githubanywhere.appspot.com/callback.html'
client_secret = '&client_secret='
class MainHandler(webapp.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'text/plain'
self.response.out.write('Hello world!')
class APIv0LoginOauthAccessTokenHandler(webapp.RequestHandler):
def post(self):
self.response.headers['Content-Type'] = 'text/plain'
# self.response.out.write('APIv0UserFollowHandler\n')
user = GetUserFromPath(self.request.path)
code = self.request.get('code')
url = base + client_id + redirect_url + client_secret + '&code=' + code
result = urlfetch.fetch(url, None, 'POST')
self.response.out.write(result.content)
class APIv0Handler(webapp.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'application/json'
# self.response.out.write('APIv0Handler\n')
url = 'https://github.com/api/v2/json' + self.request.path
access_token = self.request.get('access_token')
if access_token:
url += '?access_token=' + access_token
result = urlfetch.fetch(url, None, 'GET')
self.response.out.write(result.content)
def post(self):
self.response.headers['Content-Type'] = 'application/json'
# self.response.out.write('APIv0UserFollowHandler\n')
url = 'https://github.com/api/v2/json' + self.request.path
access_token = self.request.get('access_token')
if access_token:
url += '?access_token=' + access_token
result = urlfetch.fetch(url, None, 'POST')
self.response.out.write(result.content)
def GetUserFromPath(path, element = 3):
return path.split('/')[element]
def main():
application = webapp.WSGIApplication([
('/', MainHandler),
('/login/oauth/access_token', APIv0LoginOauthAccessTokenHandler),
('/.*', APIv0Handler),
],
debug=True)
util.run_wsgi_app(application)
if __name__ == '__main__':
main() | abraham/github-anywhere | appengine/main.py | main.py | py | 2,176 | python | en | code | 40 | github-code | 1 | [
{
"api_name": "google.appengine.ext.webapp.RequestHandler",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "google.appengine.ext.webapp",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "google.appengine.ext.webapp.RequestHandler",
"line_number": 16,
... |
4644928169 | import base64
import glob
import os
import os.path as op
import posixpath as pp
from urllib.parse import urlencode, urljoin
import pandas as pd
import requests
class EncodeClient:
BASE_URL = "http://www.encodeproject.org/"
# 2020-05-15 compatible with ENCODE Metadata at:
METADATA_URL = "https://www.encodeproject.org/metadata/type=Experiment&status=released/metadata.tsv"
KNOWN_ASSEMBLIES = (
"GRCh38",
"GRCh38-minimal",
"ce10",
"ce11",
"dm3",
"dm6",
"hg19",
"mm10",
"mm10-minimal",
"mm9",
)
def __init__(self, cachedir, assembly, metadata=None):
if assembly not in self.KNOWN_ASSEMBLIES:
raise ValueError("assembly must be in:", self.KNOWN_ASSEMBLIES)
self.cachedir = op.join(cachedir, assembly)
if not op.isdir(self.cachedir):
os.makedirs(self.cachedir, exist_ok=True)
if metadata is None:
metadata_path = op.join(cachedir, "metadata.tsv")
if not op.exists(metadata_path):
print(
"getting metadata from ENCODE, please wait while "
"(~240Mb) file downloads"
)
with requests.get(self.METADATA_URL, stream=True) as r:
r.raise_for_status()
with open(metadata_path, "wb") as f:
for chunk in r.iter_content(chunk_size=8192):
f.write(chunk)
self._meta = pd.read_table(metadata_path, low_memory=False)
table_assemblies = sorted(
self._meta["File assembly"].dropna().unique().tolist()
)
if not set(table_assemblies).issubset(set(self.KNOWN_ASSEMBLIES)):
raise ValueError(
"Table assemblies do not match known assemblies, "
"check ENCODE metadata version"
)
self._meta = self._meta[self._meta["File assembly"] == assembly].copy()
self._meta = self._meta.set_index("File accession")
else:
self._meta = metadata
def _batch_download(self, args):
params = urlencode(args)
url = pp.join("batch_download", params)
url = urljoin(self.BASE_URL, url)
r = requests.get(url)
r.raise_for_status()
return r
def _metadata(self, args):
params = urlencode(args)
url = pp.join("metadata", params, "metadata.tsv")
url = urljoin(self.BASE_URL, url)
r = requests.get(url)
r.raise_for_status()
return r
@property
def meta(self):
return self._meta.copy()
def info(self, accession, width=850, height=450):
from IPython.display import HTML
url = urljoin(self.BASE_URL, pp.join("experiments", accession))
return HTML(
f'<iframe width="{width}px" height="{height}px" src={url}></iframe>'
)
def fetch(self, accession):
url = self.meta.loc[accession, "File download URL"]
# sig = self.meta.loc[accession, 'md5sum']
filename = op.split(url)[1]
path = op.join(self.cachedir, filename)
if op.exists(path):
pass
# print('File "{}" available'.format(filename))
else:
print(f'Downloading "{filename}"')
r = requests.get(url)
r.raise_for_status()
with open(path, "wb") as f:
f.write(r.content)
return path
def fetch_all(self, accessions):
return list(map(self.fetch, accessions))
class FDNClient:
BASE_URL = "https://data.4dnucleome.org/"
def __init__(self, cachedir, assembly, metadata=None, key_id=None, key_secret=None):
self.cachedir = op.join(cachedir, assembly)
if not op.isdir(self.cachedir):
raise OSError(f"Directory doesn't exist: '{cachedir}'")
if metadata is None:
metadata_paths = sorted(glob.glob(op.join(cachedir, "metadata*.tsv")))
metadata_path = metadata_paths[-1]
self._meta = pd.read_table(metadata_path, low_memory=False, comment="#")
if assembly == "GRCh38":
self._meta = self._meta[self._meta["Organism"] == "human"].copy()
self._meta = self._meta.set_index("File Accession")
else:
self._meta = metadata
if key_id is not None:
credential = (key_id + ":" + key_secret).encode("utf-8")
self._token = base64.b64encode(credential)
else:
self._token = None
@property
def meta(self):
return self._meta.copy()
def info(self, accession, width=850, height=450):
from IPython.display import HTML
url = urljoin(self.BASE_URL, pp.join("experiments", accession))
return HTML(
f'<iframe width="{width}px" height="{height}px" src={url}></iframe>'
)
def fetch(self, accession):
url = self.meta.loc[accession, "File Download URL"]
# sig = self.meta.loc[accession, 'md5sum']
filename = op.split(url)[1]
path = op.join(self.cachedir, filename)
if op.exists(path):
pass
# print('File "{}" available'.format(filename))
else:
print(f'Downloading "{filename}"')
if self._token:
headers = {"Authorization": b"Basic " + self._token}
else:
headers = None
r = requests.get(url, headers=headers)
r.raise_for_status()
with open(path, "wb") as f:
f.write(r.content)
return path
def fetch_all(self, accessions):
return list(map(self.fetch, accessions))
| open2c/bioframe | bioframe/sandbox/clients.py | clients.py | py | 5,776 | python | en | code | 127 | github-code | 1 | [
{
"api_name": "os.path.join",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "os.path.isdir",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 37,
... |
74473377953 | from django import forms
from .models import *
class StockCreateForm(forms.ModelForm):
class Meta:
model=Stock
fields=['category','item_name','quantity']
#prevent saving form with blank details
def clean_category(self):
category=self.cleaned_data.get('category')
if not category:
raise forms.ValidationError('This field is required')
for instance in Stock.objects.all():
if instance.category==category:
raise forms.ValidationError(category+' is already created')
return category
def clean_item_name(self):
item_name=self.cleaned_data.get('item_name')
if not item_name:
raise forms.ValidationError('This field is required')
return item_name
class StockSearchForm(forms.ModelForm):
export_to_CSV = forms.BooleanField(required=False)
class Meta:
model = Stock
fields = ['category', 'item_name']
class StockUpdateForm(forms.ModelForm):
class Meta:
model = Stock
fields = ['category', 'item_name', 'quantity']
class IssueForm(forms.ModelForm):
class Meta:
model = Stock
fields = ['issue_quantity', 'issue_to']
class ReceiveForm(forms.ModelForm):
class Meta:
model = Stock
fields = ['receive_quantity', 'receive_by']
class ReorderLevelForm(forms.ModelForm):
class Meta:
model = Stock
fields = ['reorder_level']
class StockHistorySearchForm(forms.ModelForm):
export_to_CSV = forms.BooleanField(required=False)
start_date = forms.DateTimeField(required=False)
end_date = forms.DateTimeField(required=False)
class Meta:
model = StockHistory
fields = ['category', 'item_name', 'start_date', 'end_date']
class CategorysForm(forms.ModelForm):
class Meta:
model=Category
fields=['name']
class CustomerDetailsForm(forms.ModelForm):
class Meta:
model=CustomerDetails
fields=['customer_name','national_id','email','phone_number','home_address']
#preventing null and duplicate entries
def clean_customer_name(self):
customer_name=self.cleaned_data.get('customer_name')
if not customer_name:
raise forms.ValidationError('Customers name is required')
return customer_name
def clean_national_id(self):
national_id=self.cleaned_data.get('national_id')
if not national_id:
raise forms.ValidationError("Customer's National ID is required")
for instance in CustomerDetails.objects.all():
if instance.national_id==national_id:
raise forms.ValidationError('Customer with national ID '+national_id+' already exists')
return national_id
def clean_email(self):
email=self.cleaned_data.get('email')
if not email:
raise forms.ValidationError('This field is required')
for instance in CustomerDetails.objects.all():
if instance.email==email:
raise forms.ValidationError('A customer with this email address already exists')
return email
def clean_phone_number(self):
phone_number=self.cleaned_data.get('phone_number')
if not phone_number:
raise forms.ValidationError('Customers name is required')
for instance in CustomerDetails.objects.all():
if instance.phone_number==phone_number:
raise forms.ValidationError('A customer with this phone number already exists')
return phone_number
class CustomerSearchForm(forms.ModelForm):
export_to_CSV=forms.BooleanField(required=False)
class Meta:
model=CustomerDetails
fields=['customer_name','national_id']
class CustomerUpdateForm(forms.ModelForm):
class Meta:
model=CustomerDetails
fields=['customer_name','national_id','email','phone_number','home_address'] | graham218/Django-simple-stock-mgmt | stock_management_system/stockmgmt/forms.py | forms.py | py | 3,448 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "django.forms.ModelForm",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "django.forms.ValidationError",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "d... |
74826946272 | from setuptools import setup, find_packages
with open('README.rst', encoding='utf-8') as f:
long_description = f.read()
setup(
name='mycroft-ekylibre-utils',
version='0.9',
packages=find_packages(),
url='http://github.com/ekylibre',
author='Ekylibre',
author_email='rdechazelles@ekylibre.com',
description='Ekylibre set of tools for MycroftAI skills',
long_description=long_description,
long_description_content_type='text/x-rst',
install_requires=["requests", 'urllib3'],
include_package_data=True,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
],
)
| ekylibre/mycroft-ekylibre-utils | setup.py | setup.py | py | 732 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "setuptools.setup",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "setuptools.find_packages",
"line_number": 9,
"usage_type": "call"
}
] |
29461251296 | from __future__ import division
import numpy
import matplotlib.cm as cm
import matplotlib.pyplot as plt
#import imp
import os,sys
import numpy as np
#importlibutil
#from scipy.optimize import minimize
from scipy.optimize import basinhopping
import random
import math
micron=1e-6
sys.path.append("/opt/lumerical/v212/api/python")
import lumapi
#lumapi=imp.load_source("lumapi", "C:\\Program Files\\Lumerical\\v212\\api\\python\\lumapi.py")
base_file = "base_rotator.fsp"
#--- COST FUNCTION ------------------------------------------------------------+
# function we are attempting to optimize (minimize)
def func1(x):
if os.path.exists(base_file):
with lumapi.FDTD(filename=base_file, hide='TRUE') as fdtd: #hide='TRUE'
xmax=x[0]*micron
ymin_bot=x[1]*micron
w_bot=x[2]*micron
ymin_top=-x[3]*micron
ymax_top=x[3]*micron
print('xmax:', x[0], ', ymin_bot:', x[1], ', w_bot:', x[2], ', ymin_top:', -x[3])
fdtd.switchtolayout()
fdtd.select("Bottom")
Vbot=fdtd.get("vertices")
Vbot[0, 0]=xmax
Vbot[0, 1]=ymin_bot
Vbot[1, 0]=xmax
Vbot[1, 1]=ymin_bot+w_bot
fdtd.set("vertices", Vbot)
fdtd.select("Top")
Vtop=fdtd.get("vertices")
Vtop[0, 0]=xmax
Vtop[0, 1]=ymin_top
Vtop[1, 0]=xmax
Vtop[1, 1]=ymax_top
fdtd.set("vertices", Vtop)
fdtd.select("FDTD")
fdtd.set("x max", xmax-0.5e-6)
fdtd.select("DFTmonitor_output")
fdtd.set("x", xmax-0.6e-6)
fdtd.set("y min", ymin_bot-0.2e-6)
fdtd.set("y max", ymin_bot+w_bot+0.2e-6)
fdtd.save()
fdtd.run()
bot_monitor=fdtd.getresult("monitor_bottom", "E")
E_abs=abs(bot_monitor["E"])
P=E_abs**2
i1,i2,i3,i4,i5=np.shape(P)
line_pow=P[i1-5, :, 0, 0, 1] #P[xindex, yindex all, 0, 0, Y polarization]
p_target=np.max(line_pow)
print('P_TE:', p_target)
else:
print("base file doesn't exist...")
return -p_target
#--- RUN ----------------------------------------------------------------------+
initial=[50.39, 0.91, 0.65, 0.73] # initial starting location [x1,x2...]
bounds=[(45, 55), (0.8, 1), (0.55, 0.75), (0.6,0.9)] # input bounds [(x1_min,x1_max),(x2_min,x2_max)...]
res=basinhopping(func=func1, x0=initial, niter=5, T=1.0, stepsize=0.5, minimizer_kwargs={'method': 'L-BFGS-B', 'bounds': bounds}, take_step=None, accept_test=None, callback=None, interval=50, disp=False, niter_success=3, seed=None, target_accept_rate=0.5, stepwise_factor=0.9)
#minimize(fun=func1, x0=initial, method='L-BFGS-B', bounds=bounds, options={'eps': 1e-01, 'maxiter': 5})
#initial_optimized=[50.39, 0.91, 0.65, 0.73]
#res=func1(initial_optimized) #18.9%
print(res)
| jobayer07/integrated_photonics_design_optimization | optimize_polarization_rotator_step2.py | optimize_polarization_rotator_step2.py | py | 3,159 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.path.append",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_numb... |
72361816355 | import json
import re
def is_empty_line(line):
pattern = r'^\s*$' # 匹配只包含空白字符的行
return re.match(pattern, line) is not None
def is_digit_line(line):
pattern = r'^\s*\d+\s*$' # 匹配只包含空白字符的行
return re.match(pattern, line) is not None
if __name__ == '__main__':
items = []
with open('./openssl_tls_error', 'r') as f:
lines = f.readlines()
i = 0
while i < len(lines):
line = lines[i]
if is_empty_line(line):
i += 1
continue
if is_digit_line(line):
item = {
'errno': line.strip(),
'title': lines[i + 2].strip(),
'description': lines[i + 4].strip(),
'icon': 'icons/tls.png'
}
i += 5
items.append(item)
else:
i += 1
with open('../openssl_tls_errno.json', 'w', encoding='utf-8') as out:
json.dump(items, out, ensure_ascii=False, indent=4) | zhougy0717/utools_errno | util/parse_openssl_tls_errno.py | parse_openssl_tls_errno.py | py | 1,081 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "re.match",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 38,
"usage_type": "call"
}
] |
72185227553 | from flask import Flask;from flask_ipban import IpBan;from flask_limiter import Limiter;from flask_limiter.util import get_remote_address
from blueprint.main import main
from socket import gethostname
from os import getcwd, path
from yaml import safe_load
import logging, secrets
def loadConfig():
PATH = (getcwd()+'/config.yaml')
if path.isfile(PATH):
with open(PATH,'r+') as config:
return safe_load(config)
else:
print('[err] config.yaml not found!')
return None
def GenerateRandomToken(length=30):
return secrets.token_urlsafe(length)
config = loadConfig()
app = Flask(__name__)
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['HOST_NAME'] = config['HOST_NAME']
app.config['SESSION_COOKIE_DOMAIN'] = False
app.secret_key = GenerateRandomToken()
app.register_blueprint(main)
logging.basicConfig(filename='logs.log',
level=logging.DEBUG,
format=f'%(asctime)s %(levelname)s %(name)s %(threadName)s : %(message)s')
ip_ban = IpBan(ban_seconds=(config['IpBan']))
ip_ban.init_app(app)
limiter = Limiter(
app,
key_func=get_remote_address,
default_limits=config['default_limits']
)
if __name__ == '__main__':
if 'liveconsole' not in gethostname():
app.run(debug=True) | JawadPy/flask-tokyo | app.py | app.py | py | 1,348 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.getcwd",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "yaml.safe_load",
"line_number": 1... |
73088426594 | import asyncio
import logging
import signal
import session
log = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG)
class MyClientSession(session.ClientSession):
async def on_connected(self):
self.update_sock.subscribe('test.topic')
self.client_state = session.ClientSessionState.SUBSCRIBED
self.chat_task = asyncio.Task(self.r())
asyncio.ensure_future(self.chat_task)
async def on_stopped(self):
if not self.chat_task.cancelled():
self.chat_task.cancel()
async def r(self):
count = 0
while True:
count += 1
# print('Sending command ...')
await asyncio.sleep(1)
try:
reply = await self.cmd(f'test_cmd {count}')
except TimeoutError:
log.warning(f"Timeout waiting for cmd.")
except:
log.exception('', exc_info=True)
# if (count % 1000) == 0:
print(f'cmd count {count}')
c = MyClientSession()
c.install()
c.start()
async def update(msg):
if (msg % 100000) == 0:
print(f'update: {msg}')
c.register('test.topic', update)
l = asyncio.get_event_loop()
# async def main():
# results = await asyncio.gather(
# r(), c.run(), return_exceptions=True)
# for e in results:
# if isinstance(e, Exception) and not isinstance(e, asyncio.CancelledError):
# logging.error("Exception thrown during shutdown", exc_info=(type(e), e, e.__traceback__))
def stop(msg):
log.debug(msg)
tasks = asyncio.Task.all_tasks()
for task in tasks:
task.cancel()
l.add_signal_handler(signal.SIGINT, lambda msg: c.exit(), "Halting on SIGINT")
try:
l.run_until_complete(c.run())
except asyncio.CancelledError:
log.debug('CancelledError')
finally:
l.close() | hippysurfer/pyqtzmq | test_client.py | test_client.py | py | 1,856 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "logging.basicConfig",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "session.ClientS... |
22386365690 | import random
import torch
import cv2
import json
import os
from typing import List, Tuple
import asset
from torch.utils.data import Dataset, DataLoader, SubsetRandomSampler
from asset.utils import dmsg,getIfAugmentData
import imgaug.augmenters as iaa
import numpy as np
from tqdm import tqdm
import random as rand
from torchvision import datasets, transforms
class MovingFGeSOnBGDataset(Dataset):
"""Moving foreground dataset on background."""
def __init__(self, infrencePhase, seqLength, shapeX, shapeY,digitCount = 1, scale=2, foregroundScale=0.7, blurIt=True,
minResultSpeed=0, maxResultSpeed=2,color=False):
super(MovingFGeSOnBGDataset).__init__()
if digitCount>2:
raise BaseException("Too much FG requested!")
self.shapeXOrig = shapeX
self.shapeYOrig = shapeY
self.seqLength = seqLength
self.blurIt = blurIt
self.minResultSpeed = minResultSpeed
self.maxResultSpeed = maxResultSpeed
self.foregroundScale = foregroundScale
self.digitCount = digitCount
self.scale = int(scale)
self.shapeX = int(shapeX * scale)
self.shapeY = int(shapeY * scale)
self.color = color
self.MNIST = datasets.MNIST('data', train=not infrencePhase, download=True)
self.STL10 = datasets.STL10('data', split='train' if not infrencePhase else 'test', download=True,
transform=transforms.Compose(
([transforms.Grayscale(1)] if not self.color else [])+
[ transforms.Resize([self.shapeY, self.shapeX])]))
self.STL10Size = len(self.STL10)
def _scaleBlur(self, arry):
if (self.blurIt):
arry = cv2.blur(arry, (self.scale, self.scale))
if self.scale != 1:
arry = cv2.resize(arry, (self.shapeYOrig, self.shapeXOrig), interpolation=cv2.INTER_NEAREST)# cv2.resize wants [shape[1],shape[0]]
if not self.color:
arry = arry[:,:,np.newaxis] # cv2.resize return with no channel!
return np.clip(arry, a_min=0, a_max=1)
def _cImg(self, image, scale, original=False):
if original == True:
return image
res = cv2.resize(image, (int(image.shape[1] * scale), int(image.shape[0] * scale)),
interpolation=cv2.INTER_AREA) # cv2.resize wants [shape[1],shape[0]]
if not self.color:
res = res[:,:,np.newaxis] # cv2.resize return with no channel!
res = np.pad(res, ((4, 4), (4, 4),(0, 0)), "constant", constant_values=0)
res = np.pad(res, ((1, 1), (1, 1),(0, 0)), "constant", constant_values=0.5)
res = np.pad(res, ((1, 1), (1, 1),(0, 0)), "constant", constant_values=0.75)
res = np.pad(res, ((1, 1), (1, 1),(0, 0)), "constant", constant_values=0.5)
return res
def __len__(self):
return len(self.STL10)
def __getitem__(self, idx):
foreground_objs = []
random.seed(idx)
np.random.seed(idx)
for _ in range(self.digitCount):
mnistdig=self.MNIST.__getitem__(random.randint(0, len(self.MNIST) - 1))[0]
mnistdig = np.array(mnistdig)[ :, :,np.newaxis]
if self.color:
mnistdig = np.repeat(mnistdig, 3, axis=2)
randC = random.randint(0,2)
mnistdig[:,:,randC] = random.randint(0,100)
mnistdig = self._cImg(np.moveaxis(mnistdig, 0, 1) / 255., self.scale * self.foregroundScale, False)
foreground_objs.append(mnistdig)
shapeX2 = self.shapeX // 2
shapeY2 = self.shapeY // 2
MINPOS = 2
MAXPOSX = self.shapeX - MINPOS - foreground_objs[0].shape[1]
MAXPOSY = self.shapeY - MINPOS - foreground_objs[0].shape[0]
possiblePos = [
{"p": np.array([MINPOS, MINPOS]), "corner": 'tl'},
{"p": np.array([MINPOS, MAXPOSY]), "corner": 'dl'},
{"p": np.array([MAXPOSX, MINPOS]), "corner": 'tr'},
{"p": np.array([MAXPOSX, MAXPOSY]), "corner": 'dr'},
]
positions = random.sample(possiblePos, 2)
velocities = np.random.randint(low=int(self.minResultSpeed * self.scale),
high=(self.maxResultSpeed * self.scale) + 1, size=(2, 2))
if positions[0]["corner"] == 'dl':
velocities[0][1] *= -1
elif positions[0]["corner"] == 'tr':
velocities[0][0] *= -1
elif positions[0]["corner"] == 'dr':
velocities[0][1] *= -1
velocities[0][0] *= -1
if positions[0]['p'][0] == positions[1]['p'][0]:
velocities[1][0] = 0
velocities[1][0] *= -1 if positions[1]['p'][0] > shapeX2 else 1
velocities[1][1] = min(velocities[1][1], abs(velocities[0][0]) - 1)
velocities[1][1] *= -1 if positions[1]['p'][1] > shapeY2 else 1
elif positions[0]['p'][1] == positions[1]['p'][1]:
velocities[1][1] = 0
velocities[1][1] *= -1 if positions[1]['p'][1] > shapeY2 else 1
velocities[1][0] = min(velocities[1][0], abs(velocities[0][1]) - 1)
velocities[1][0] *= -1 if positions[1]['p'][0] > shapeX2 else 1
else:
axis = 0 if abs(velocities[0][0]) <= abs(velocities[0][1]) else 1 # random.randint(0,1)
naxis = (axis + 1) % 2
velocities[1][axis] = 0
velocities[1][axis] *= -1 if positions[axis]['p'][1] > (shapeX2 if axis == 0 else shapeY2) else 1
velocities[1][naxis] *= np.sign(positions[0]['p'][naxis] - positions[1]['p'][naxis])
stl = self.STL10.__getitem__(idx)
if not self.color:
stl = np.array(stl[0])[ :, :,np.newaxis]
else:
stl = np.array(stl[0])
bg = 1 - (np.moveaxis(stl, 0, 1)/ 255.)
# bg *= 0
channel = 3 if self.color else 1
ResFrame = np.empty((self.seqLength, self.shapeXOrig, self.shapeYOrig,channel), dtype=np.float32)
ResFrameFG = np.empty((self.seqLength, self.shapeXOrig, self.shapeYOrig,channel), dtype=np.float32)
ResFrameAlpha = np.empty((self.seqLength, self.shapeXOrig, self.shapeYOrig,channel), dtype=np.float32)
ResFrameBG = np.empty((self.seqLength, self.shapeXOrig, self.shapeYOrig,channel), dtype=np.float32)
for frame_idx in range(self.seqLength):
frame = np.zeros((self.shapeX, self.shapeY,channel), dtype=np.float32)
frameFG = np.zeros((self.shapeX, self.shapeY,channel), dtype=np.float32)
frameAlpha = np.zeros((self.shapeX, self.shapeY,channel), dtype=np.float32)
frameBG = np.zeros((self.shapeX, self.shapeY,channel), dtype=np.float32)
frameBG = bg
frame += bg
for ax in range(self.digitCount):
positions[ax]['p'] += velocities[ax]
IN = [positions[ax]['p'][0],
positions[ax]['p'][0]
+ foreground_objs[ax].shape[0],
positions[ax]['p'][1],
positions[ax]['p'][1]
+ foreground_objs[ax].shape[1]]
frame[IN[0]:IN[1], IN[2]:IN[3],:] = foreground_objs[ax]
frameFG[IN[0]:IN[1], IN[2]:IN[3],:] = foreground_objs[ax]
frameAlpha[IN[0]:IN[1], IN[2]:IN[3],:] = np.ones_like(foreground_objs[ax])
ResFrame[frame_idx] = self._scaleBlur(frame)
ResFrameFG[frame_idx] = self._scaleBlur(frameFG)
ResFrameAlpha[frame_idx] = self._scaleBlur(frameAlpha)
ResFrameBG[frame_idx] = self._scaleBlur(frameBG)
del frame, frameFG, frameAlpha, frameBG
ResFrame = np.moveaxis(ResFrame, [0,1,2,3] , [0,3,2,1])
ResFrameAlpha = np.moveaxis(ResFrameAlpha, [0,1,2,3] , [0,3,2,1])
ResFrameBG = np.moveaxis(ResFrameBG, [0,1,2,3] , [0,3,2,1])
ResFrameFG = np.moveaxis(ResFrameFG, [0,1,2,3] , [0,3,2,1])
# [batch * depth(# of frames) * channel(# of channels of each image) * height * width]
result = {'GT': ResFrame, 'A': ResFrameAlpha, 'BG': ResFrameBG, 'FG': ResFrameFG,
"velocity": velocities / self.scale}
# result = ResFrame
return result
sequenceFileDict = {
'lin_3': 'canvas_down_sample_just_translation_3_18_06.pt',
'rot_lin_2': 'canvas_down_sample_just_rotation_15_06.pt',
'lin_1': 'canvas_down_sample_just_translation_15_06.pt',
'challenge': 'canvas_down_sample_extreme_16_06.pt',
'cars': 'car_sequence_257.pt',
'small_cars': 'car_sequence_downsampled.pt',
'more_cars': 'car_sequence_downsampled_long.pt',
'stationary_rotation': 'canvas_down_sample_inplace_rotation_2_22_06.pt',
'lin_2': 'canvas_down_sample_just_translation_2_21_06.pt',
'acc_1': 'canvas_down_sample_with_acc_16_06.pt',
'rot_lin_scale_2': 'canvas_down_sample_everything_2_24_06.pt',
'all_cars': 'carData_inv_01_07.pt',
'random_cars': 'carData_inv_permuted_01_07.pt',
'augmented_cars': 'augmented_cars.pt',
'rot_lin_2_NOSCALE': 'canvas_down_sample_no_scale_12_07.pt',
'high_res_test': 'canvas_07_08.pt',
'high_res_test_3': 'canvas_3_07_08.pt',
'circle_sanity_1_px': 'canvas_circle_int_up.pt',
'circle_sanity_2_px': 'canvas_circle_int_up_2.pt'
}
class sequenceData(Dataset):
def __init__(self, config,key='rot_lin_scale_2', device=torch.device('cpu'), size=(65, 65), sequence_length=10,color=False):
self.key = key
self.dict = sequenceFileDict
if key in self.dict:
self.filePath = 'data/pickled_ds/' + self.dict[key]
else:
self.filePath = 'data/pickled_ds/' + key+'.pt'
self.data = torch.load(self.filePath).float()
self.len = self.data.shape[0]
self.dev = device
self.size = size[::-1]
self.sequence_length = sequence_length
self.color = color
self.config =config
def __getitem__(self, ind):
if len(self.data.shape)==4:
data = self.data[ind].unsqueeze(1).to(self.dev)
else:
data = self.data[ind].to(self.dev)
res = torch.nn.functional.interpolate(data, size=self.size,
mode='bilinear', align_corners=False)[:self.sequence_length]
if res.shape[1]==1 and self.color:
res = res.expand(res.shape[0],3,res.shape[2],res.shape[3])
elif res.shape[1]==3 and not self.color:
res,_ = res.max(dim=1)
res=res.unsqueeze(1)
return res
def __len__(self):
return self.len
class savedData(Dataset):
def __init__(self,data,device=torch.device('cpu'), size=(65, 65),limit=1):
self.data = data
self.limit=limit
self.dev = device
self.size = size[::-1]
def reshape(self,inp):
shp=inp.shape
if len(shp)>4:
return torch.nn.functional.interpolate(inp.reshape(-1,1,shp[-2],shp[-1]), size=self.size,
mode='bilinear', align_corners=False).reshape(shp[:-2]+self.size)
elif len(shp)>3:
return torch.nn.functional.interpolate(inp, size=self.size,
mode='bilinear', align_corners=False)
def __getitem__(self, ind):
res = self.data[ind]
if res is dict:
for it in res.keys():
res[it] = self.reshape(res[it])
return res
else:
return self.reshape(res)
def __len__(self):
return int(len(self.data)*self.limit)
def worker_init_fn(worker_id):
np.random.seed(np.random.get_state()[1][0] + worker_id)
def get_data_loaders(config, key='rot_lin_scale_2', size=(65, 65), ratio=[0.15,0.15], batch_size=1,test_batch_size=1, num_workers=1,
device=torch.device('cpu'), limit=1, sequence_length=10):
if key == 'MotionSegmentation':
dataset = MovingFGeSOnBGDataset(infrencePhase=False, seqLength=sequence_length, shapeX=size[0], shapeY=size[1]
,digitCount = config.digitCount, scale=2, foregroundScale=0.7, blurIt=True,
minResultSpeed=2, maxResultSpeed=4,color=config.color)
dlen = len(dataset)
splitTr = int((1-(ratio[0]+ratio[1]))*dlen)
tr_ds,val_ds=torch.utils.data.random_split(dataset,
[splitTr, dlen-(splitTr)])
tr_ds = torch.utils.data.Subset(tr_ds, range(0,int(len(tr_ds)*limit)))
val_ds = torch.utils.data.Subset(val_ds, range(0,int(len(val_ds)*limit)))
train_loader = DataLoader(tr_ds, batch_size=batch_size, num_workers=num_workers, shuffle=True,worker_init_fn=worker_init_fn)
valid_loader = DataLoader(val_ds, batch_size=test_batch_size, num_workers=num_workers, shuffle=False,worker_init_fn=worker_init_fn)
dataset = MovingFGeSOnBGDataset(infrencePhase=True, seqLength=sequence_length, shapeX=size[0], shapeY=size[1]
,digitCount = config.digitCount, scale=2, foregroundScale=0.7, blurIt=True,
minResultSpeed=2, maxResultSpeed=4,color=config.color)
dlen = len(dataset)
limitedDlen = int(dlen * limit)
te_ds,_=torch.utils.data.random_split(dataset,
[limitedDlen, dlen-(limitedDlen)])
test_loader = DataLoader(te_ds, batch_size=test_batch_size, num_workers=num_workers, shuffle=False,worker_init_fn=worker_init_fn)
elif key == 'HugeNGSIM':
fileNames = [
'data/NGSIM/peachtree-camera5-1245pm-0100pm.avi',
'data/NGSIM/peachtree-camera2-1245pm-0100pm.avi',
'data/NGSIM/nb-camera7-0400pm-0415pm.avi',
'data/NGSIM/lankershim-camera4-0830am-0845am.avi',
'data/NGSIM/lankershim-camera5-0830am-0845am.avi',
'data/NGSIM/peachtree-camera1-1245pm-0100pm.avi',
'data/NGSIM/peachtree-camera3-1245pm-0100pm.avi',
'data/NGSIM/peachtree-camera4-1245pm-0100pm.avi',
'data/NGSIM/nb-camera5-0400pm-0415pm.avi',
'data/NGSIM/nb-camera6-0400pm-0415pm.avi'
]
dataset = VideoLoader(fileLoc=fileNames[:-4], fCount=sequence_length,
sampleRate=(1, 1), size=size,color=config.color)
dlen = len(dataset)
limitedDlen = int(dlen * limit)
tr_ds,_=torch.utils.data.random_split(dataset,
[limitedDlen, dlen-(limitedDlen)])
train_loader = DataLoader(tr_ds, batch_size=batch_size, num_workers=num_workers, shuffle=True,worker_init_fn=worker_init_fn)
dataset = VideoLoader(fileLoc=fileNames[-4:-2], fCount=sequence_length,
sampleRate=(1, 1), size=size,color=config.color)
dlen = len(dataset)
limitedDlen = int(dlen * limit)
va_ds,_=torch.utils.data.random_split(dataset,
[limitedDlen, dlen-(limitedDlen)])
valid_loader = DataLoader(va_ds, batch_size=test_batch_size, num_workers=num_workers, shuffle=False,worker_init_fn=worker_init_fn)
dataset = VideoLoader(fileLoc=fileNames[-2:], fCount=sequence_length,
sampleRate=(1, 1), size=size,color=config.color)
dlen = len(dataset)
limitedDlen = int(dlen * limit)
te_ds,_=torch.utils.data.random_split(dataset,
[limitedDlen, dlen-(limitedDlen)])
test_loader = DataLoader(te_ds, batch_size=test_batch_size, num_workers=num_workers, shuffle=False,worker_init_fn=worker_init_fn)
elif 'NGSIM' in key:
fileNames = [
'data/NGSIM/nb-camera5-0400pm-0415pm.avi',
'data/NGSIM/nb-camera6-0400pm-0415pm.avi'
]
dataset = VideoLoader(fileLoc=fileNames, fCount=sequence_length,
sampleRate=(1, 1), size=size,color=config.color)
dlen = len(dataset)
splitTr = int((1-(ratio[0]+ratio[1]))*dlen)
splitVa = int(ratio[0]*dlen)
tr_ds,val_ds,test_ds=asset.utils.random_split(dataset,
[splitTr,splitVa, dlen-(splitTr+splitVa)])
tr_ds = torch.utils.data.Subset(tr_ds, range(0,int(len(tr_ds)*limit)))
val_ds = torch.utils.data.Subset(val_ds, range(0,int(len(val_ds)*limit)))
test_ds = torch.utils.data.Subset(test_ds, range(0,int(len(test_ds)*limit)))
train_loader = DataLoader(tr_ds, batch_size=batch_size, num_workers=num_workers,
pin_memory=True, shuffle=True,worker_init_fn=worker_init_fn)
valid_loader = DataLoader(val_ds, batch_size=test_batch_size, num_workers=num_workers,
pin_memory=False, shuffle=False,worker_init_fn=worker_init_fn)
test_loader = DataLoader(test_ds, batch_size=test_batch_size, num_workers=num_workers,
pin_memory=False, shuffle=False,worker_init_fn=worker_init_fn)
else:
dataset = sequenceData(config,key=key, device=device, size=size, sequence_length=sequence_length,color=config.color)
dlen = len(dataset)
splitTr = int((1-(ratio[0]+ratio[1]))*dlen)
splitVa = int(ratio[0]*dlen)
tr_ds,val_ds,test_ds=torch.utils.data.random_split(dataset,
[splitTr,splitVa, dlen-(splitTr+splitVa)])
tr_ds = torch.utils.data.Subset(tr_ds, range(0,int(len(tr_ds)*limit)))
val_ds = torch.utils.data.Subset(val_ds, range(0,int(len(val_ds)*limit)))
test_ds = torch.utils.data.Subset(test_ds, range(0,int(len(test_ds)*limit)))
train_loader = DataLoader(tr_ds, batch_size=batch_size, num_workers=num_workers,
pin_memory=True, shuffle=True,worker_init_fn=worker_init_fn)
valid_loader = DataLoader(val_ds, batch_size=test_batch_size, num_workers=num_workers,
pin_memory=False, shuffle=False,worker_init_fn=worker_init_fn)
test_loader = DataLoader(test_ds, batch_size=test_batch_size, num_workers=num_workers,
pin_memory=False, shuffle=False,worker_init_fn=worker_init_fn)
return train_loader, valid_loader, test_loader
class VideoLoader(Dataset):
def __init__(self, fileLoc: List[str], fCount: int = 10,
sampleRate: Tuple[int, int] = (1, 1),
size: Tuple[int, int] = (160, 120),color=False):
if fCount < 2:
raise Exception("Input arg is not correct!")
self.fileLoc = fileLoc
self.fCount = fCount
self.size = size
self.sampleRate = sampleRate
self.length = 0
self.frameVideo = []
self.caps = None
self.color = color
for ifl, fl in enumerate(self.fileLoc):
self.curVCI = ifl
cap = cv2.VideoCapture(fl)
vidLen = (int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) - 1)
curStart = 0
curFinish = vidLen
curLen = ((curFinish - curStart) // self.sampleRate[1]) - self.fCount
self.frameVideo.append([self.length, self.length + curLen, curStart])
self.length += curLen
try:
cap.release()
except:
pass
del cap
print("VideoLoader initiated!")
def __del__(self):
if self.caps is not None:
for c in enumerate(self.caps):
try:
c.release()
except:
pass
def __len__(self):
return self.length
def __getitem__(self, idx):
if self.caps is None:
self.caps = [cv2.VideoCapture(vp) for vp in self.fileLoc]
foundC = False
for ifv, fv in enumerate(self.frameVideo):
if idx >= fv[0] and idx < fv[1]:
foundC = True
if self.curVCI != ifv:
self.curVCI = ifv
break
if not foundC:
raise IndexError()
cap = self.caps[self.curVCI]
idx -= self.frameVideo[self.curVCI][0]
frames = torch.Tensor(self.fCount, 3 if self.color else 1, self.size[1], self.size[0])
vidFrameIdx = (idx) * self.sampleRate[1] + self.frameVideo[self.curVCI][2]
cap.set(cv2.CAP_PROP_POS_FRAMES, vidFrameIdx)
srate = random.randint(self.sampleRate[0], self.sampleRate[1])
if getIfAugmentData():
augFull = iaa.Sequential([
iaa.Affine(scale={"x": (0.97, 1), "y": (0.97, 1.)}, rotate=(-0.4, 0.4),
translate_px={"x": (-3, 3), "y": (-3, 3)}, shear=(-0.3, 0.3)),
# iaa.MultiplyAndAddToBrightness(mul=(0.9, 1.1), add=(-10, 10)),
iaa.AddToHueAndSaturation((-20, 20), per_channel=False),
iaa.Fliplr(0.5),
# iaa.Flipud(0.5),
])
augFull_det = augFull.to_deterministic()
for i in range(self.fCount):
for _ in range(srate):
ret, frame = cap.read()
if not ret:
raise Exception("Cannot read the file %s at index %d" % (self.fileLoc[self.curVCI], idx))
if self.size[0] == self.size[1]:
res_min = min(frame.shape[0], frame.shape[1])
frame = frame[:res_min, :res_min]
frame = cv2.GaussianBlur(frame, (5, 5), 0)
frame = cv2.resize(frame, self.size, interpolation=cv2.INTER_LINEAR)
if getIfAugmentData():
frame = augFull_det(image=frame)
if not self.color:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)[:,:,np.newaxis]
else:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = torch.from_numpy(frame / 255.)
frame = frame.permute(2,0,1)
frames[i] = 1 - frame
return frames
| AIS-Bonn/Local_Freq_Transformer_Net | lfdtn/dataloaders.py | dataloaders.py | py | 22,110 | python | en | code | 14 | github-code | 1 | [
{
"api_name": "torch.utils.data.Dataset",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "torchvision.datasets.MNIST",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "torchvision.datasets",
"line_number": 37,
"usage_type": "name"
},
{
"api_name... |
72861968355 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
import os
import sys
import re
import codecs
import chardet
def convert(filename, target_encoding="UTF-8"):
try:
content = codecs.open(filename, 'r').read()
source_encoding = chardet.detect(content)['encoding']
if content is not '' and source_encoding is not None:
content = content.decode(source_encoding)
codecs.open(filename, 'w', encoding=target_encoding).write(content)
newcontent = codecs.open(filename, 'r').read()
new_encoding = chardet.detect(newcontent)['encoding']
print (new_encoding)
except IOError as err:
print("I/O error:{0}".format(err))
def explore(dir):
for root, dirs, files in os.walk(dir):
files = [f for f in files if not f[0] == '.']
dirs[:] = [d for d in dirs if not d[0] == '.']
for file in files:
if re.match('.cpp|.h',os.path.splitext(file)[1]):
print(file)
path = os.path.join(root, file)
convert(path)
if __name__ == "__main__":
if len(sys.argv) >= 2:
arg1 = sys.argv[1]
print (arg1)
if os.path.isfile(arg1):
convert(arg1)
elif os.path.exists(arg1):
explore(arg1)
print ("finished!") | xin0111/PythonTools | change_encoding.py | change_encoding.py | py | 1,319 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "codecs.open",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "chardet.detect",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "codecs.open",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "codecs.open",
"line_number... |
6044731535 | """
Michael Neilson <github: nichael-meilson>
2022-06-30
"""
import pytest
from httpx import AsyncClient
from fastapi import FastAPI
from starlette.status import (
HTTP_201_CREATED,
HTTP_404_NOT_FOUND,
HTTP_422_UNPROCESSABLE_ENTITY,
HTTP_200_OK,
)
from app.models.articles import CreateArticle, ArticleInDB, GetArticle
# decorate all tests with @pytest.mark.asyncio
pytestmark = pytest.mark.asyncio
@pytest.fixture
def new_article():
return CreateArticle(
id=1,
name="string",
species="string",
goal="Study Evolution(ary Processes)",
num_lines=0,
setup="string",
recipient="string",
medium="string",
selective_condition="string",
cripple_mutant=True,
how_cripple_mutant="string",
changing_env=True,
how_changing_env="string",
env_type="string",
how_type_env="string",
complex_stressors=True,
how_complex_stressors="string",
num_generations=0,
abs_time=0,
contamination="string",
fitness=True,
fitness_param="string",
competition="string",
mutation_tracking="string",
wgs=True,
wgs_tech="string",
wgs_platform="string",
num_clones=0,
num_populations=0,
cov_clones=0,
cov_populations=0,
detection_lim=0,
ngs_data_link="string",
max_pop_size=0,
min_pop_size=0,
major_outcome="string",
remarks="string",
complete_mutation_data=True,
reference_id="string",
pmid="string",
paper_url="string",
)
class TestCleaningsRoutes:
@pytest.mark.asyncio
async def test_routes_exist(self, app: FastAPI, client: AsyncClient) -> None:
res = await client.post(app.url_path_for("articles:create-article"), json={})
assert res.status_code != HTTP_404_NOT_FOUND
@pytest.mark.asyncio
async def test_invalid_input_raises_error(
self, app: FastAPI, client: AsyncClient
) -> None:
res = await client.post(app.url_path_for("articles:create-article"), json={})
assert res.status_code == HTTP_422_UNPROCESSABLE_ENTITY
class TestCreateArticle:
async def test_valid_input_creates_article(
self, app: FastAPI, client: AsyncClient, new_article: CreateArticle
) -> None:
res = await client.post(
app.url_path_for("articles:create-article"),
json={"new_article": new_article.dict()},
)
assert res.status_code == HTTP_201_CREATED
created_article = ArticleInDB(**res.json())
new_article_response = ArticleInDB(**new_article.dict())
assert created_article == new_article_response
@pytest.mark.parametrize(
"invalid_payload, status_code",
(
(None, 422),
({}, 422),
({"name": "test_name"}, 422),
({"price": 10.00}, 422),
({"name": "test_name", "description": "test"}, 422),
),
)
async def test_invalid_input_raises_error(
self, app: FastAPI, client: AsyncClient, invalid_payload: dict, status_code: int
) -> None:
res = await client.post(
app.url_path_for("articles:create-article"),
json={"new_article": invalid_payload},
)
assert res.status_code == status_code
class TestGetArticle:
async def test_get_article_by_author(
self, app: FastAPI, client: AsyncClient
) -> None:
res = await client.get(
app.url_path_for("article:get-article"), params={"id": 1}
)
assert res.status_code == HTTP_200_OK
article = GetArticle(**res.json())
assert article.id == 1
class TestGetAllArticles:
async def test_get_all_articles(self, app: FastAPI, client: AsyncClient) -> None:
res = await client.get(app.url_path_for("articles:get-article"))
assert res.status_code == HTTP_200_OK
cleaning = [GetArticle(**article) for article in res.json()]
# TODO think of another test here
assert len(cleaning) > 0
| nichael-meilson/camel2 | src/tests/test_articles.py | test_articles.py | py | 4,126 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pytest.mark",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "app.models.articles.CreateArticle",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name":... |
23207730387 | import dgl
import unittest
import backend as F
from dgl.dataloading import AsyncTransferer
@unittest.skipIf(F._default_context_str == 'cpu',
reason="CPU transfer not allowed")
def test_async_transferer_to_other():
cpu_ones = F.ones([100,75,25], dtype=F.int32, ctx=F.cpu())
tran = AsyncTransferer(F.ctx())
t = tran.async_copy(cpu_ones, F.ctx())
other_ones = t.wait()
assert F.context(other_ones) == F.ctx()
assert F.array_equal(F.copy_to(other_ones, ctx=F.cpu()), cpu_ones)
def test_async_transferer_from_other():
other_ones = F.ones([100,75,25], dtype=F.int32, ctx=F.ctx())
tran = AsyncTransferer(F.ctx())
try:
t = tran.async_copy(other_ones, F.cpu())
except ValueError:
# correctly threw an error
pass
else:
# should have thrown an error
assert False
if __name__ == '__main__':
test_async_transferer_to_other()
test_async_transferer_from_other()
| taotianli/gin_model.py | tests/compute/_test_async_transferer.py | _test_async_transferer.py | py | 975 | python | en | code | 5 | github-code | 1 | [
{
"api_name": "backend.ones",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "backend.int32",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "backend.cpu",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "dgl.dataloading.AsyncTran... |
30478138727 | #!/usr/bin/env python
from __future__ import division
__author__ = "Sam Way"
__copyright__ = "Copyright 2014, The Clauset Lab"
__license__ = "BSD"
__maintainer__ = "Sam Way"
__email__ = "samfway@gmail.com"
__status__ = "Development"
import warnings
from numpy import array, asarray, unique, bincount, min, floor, zeros
from numpy.random import shuffle, permutation
from sklearn.cross_validation import StratifiedKFold
def BalancedKFold(y, n_folds=3, n_iter=1, indices=None, shuffle=False, random_state=None):
""" Return class-balanced cross validation folds """
y = asarray(y)
n_samples = y.shape[0]
unique_labels, y_inv = unique(y, return_inverse=True)
n_classes = len(unique_labels)
label_counts = bincount(y_inv)
min_labels = min(label_counts)
test_per_fold = floor(min_labels/n_folds)
total_test = test_per_fold * n_classes
train_per_fold = test_per_fold * (n_folds-1)
total_train = train_per_fold * n_classes
if train_per_fold < 1:
raise ValueError("The least populated class has too few samples (%d) to "
"use %d-fold cross validation!" % (min_labels, n_folds))
# Peform regular, stratified cross validation, but subsample all class
# labels to even depth
folds = []
for t in xrange(n_iter):
for (training, testing) in StratifiedKFold(y_inv, n_folds):
train = []
test = []
training = permutation(training)
testing = permutation(testing)
saved = 0
counts = zeros(n_classes)
for i in training:
if counts[y_inv[i]] < train_per_fold:
train.append(i)
counts[y_inv[i]] += 1
saved += 1
if saved >= total_train:
break
saved = 0
counts = zeros(n_classes)
for i in testing:
if counts[y_inv[i]] < test_per_fold:
test.append(i)
counts[y_inv[i]] += 1
saved += 1
if saved >= total_test:
break
folds.append((asarray(train), asarray(test)))
return folds
'''
yield (asarray(train), asarray(test))
'''
| samfway/biotm | misc/util.py | util.py | py | 2,307 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.asarray",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.bincount",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "numpy.min",
"line_numbe... |
10880342788 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render
from django.http import HttpResponse
from django.template import loader
from django.template.loader import get_template
from polls.models import Food_Place_ID_Yelp,Food_Place_ID_Zomato, Recipe, User_Detail
from django.contrib.auth.models import User
import httplib
import json
import urllib
from urllib import urlencode
import requests
#the following setups the search page with a simple search box to get user input
## template contains an input forum for the user to type in the Restaurant Name
def find_restaurants(request):
context= {}
template= get_template('find_restaurants.html')
return HttpResponse(template.render(context, request))
#the following function gets the proper restaraunt information based on what the user passed into the the input_box
def restaurants_results(request):
both=False
#get user input and store it into a variable
input_box= request.GET['query']
#check if user typed in the exact name of the restaraunt in the database
search_results= Food_Place_ID_Yelp.objects.filter(name__startswith=input_box)
lis=[]
#name of restaurant
name=None
#image of restaurant pulled from Yelp API
image=None
# Category type of the Restaurant according to Yelp
category=None
# Restaurant Rating
rating=None
# phone number of the restaurant
phone_num=None
# address of the restaurant
address=None
# zipcode of the restaurant
zip_code=None
# Cuisine information which is pulled from zomato api
cusine=None
# menu url that link to zomoato's url page for the particular restaurant that will have access to the restaurant's url if available
menu_url=None
average_cost_for_two=None
# variable contains the link for which the user clicks on to get the reccomended recipes based of the restaurant
recipe=None
#If condition checks to see if the user input was an exact name of a restaurant in the database
if(search_results):
#user typed in the exact restaurant name and now have to do an api call to yelp for reviews (limit 3)
# since we have stored the restuarants id we can make another call to Yelp api to get reviews for this restaurant from yelp's api
#Loading variable that were initalized above with the proper information from the database
search_results1=search_results[0].restaraunt_id
name=search_results[0].name
image=search_results[0].image_url
category=search_results[0].category
rating=search_results[0].rating
phone_num=search_results[0].phone_num
address=search_results[0].address
zip_code=search_results[0].zip_code
#food place contains the restaurants Id which is used in the Yelp API call
food_place_name=search_results1
headers = {
'authorization': "Bearer 1_-tP4IlMNVpRBOj68A5aZJ4FwHdMGCps6xN9PFV0q1AmreUfNclD1Hw0bqQuCSfjthDFl4JQGtfTmvI321ffJ6LcPZ0O2XDYfa5OedFipN4Riw7iibTBCvUR6fVWnYx",
'cache-control': "no-cache",
}
make_connection = httplib.HTTPSConnection("api.yelp.com")
res_id= food_place_name
make_connection.request("GET", "https://api.yelp.com/v3/businesses/"+res_id+"/reviews", headers=headers)
result = make_connection.getresponse()
json_data = result.read()
json_data = json.loads(json_data.decode("utf-8"))
reviews=json_data['reviews']
for review in reviews:
lis.append(review['user']['name'] + ':'+ ' '+ (review['text']))
#check if the restaurant name is in the zomato databse
search_results= Food_Place_ID_Zomato.objects.filter(name__startswith=input_box)
if(search_results):
both=True
#restaurant is in the zomato databse so setup the api call for getting reviews for this restaurant from zomato as well (limit 5)
search_results1=search_results[0].restaraunt_id
cusine=search_results[0].cusine
entry=User_Detail(user_ID= str(request.user.username), user_name= cusine)
entry.save()
recipe="http://127.0.0.1:8000/database/recipes_results?query=" + str(cusine)
menu_url=search_results[0].menu_url
average_cost_for_two=search_results[0].average_cost_for_two
header = {
"User-agent": "curl/7.43.0",
'X-Zomato-API-Key': '4903b587901a5352f403a3a97da3543a'
}
url='https://developers.zomato.com/api/v2.1/reviews?res_id='
res_id=str(search_results1)
url=url+res_id
response = requests.get(url, headers=header)
reviews=response.json()
## parse Zomoato API response and collect all reviews in a list
reviews=reviews['user_reviews']
for review in reviews:
lis.append(review['review']['user']['name']+ ': '+ review['review']['review_text'])
#if user did not type in the exact name of the restaurant then check if the user entered a city name and if so return all restaurants with the user input city name
elif(Food_Place_ID_Yelp.objects.filter(city=input_box)):
lis=[]
lis.append(" You Search Results for the given City: ")
food_places=Food_Place_ID_Yelp.objects.filter(city=input_box)
for place in food_places:
lis.append(place.name+' Type: '+place.category+' Rating: '+str(place.rating)+ ' Number: ' + place.phone_num)
# if user did not type in exact name or city name check if the user entered a zip code and if so find all restaurants with the specified zip code
elif(Food_Place_ID_Yelp.objects.filter(zip_code=input_box)):
lis=[]
lis.append(" You Search Results for the given Zip Code: ")
food_places=Food_Place_ID_Yelp.objects.filter(zip_code=input_box)
for place in food_places:
lis.append(place.name+' Type: '+place.category+' Rating: '+str(place.rating)+ ' Number: ' + place.phone_num)
#if restaurant that was searched for was not in both Yelp and Zomato tables then use a generic template which presents a list of names of the restaurants related to user search query
if(both == False):
template= get_template('restaurants_results.html')
else:
template= get_template('restaurants_both_results.html')
context={'search_results':lis, 'name':name, 'image':image, 'category':category, 'rating':rating, 'phone_num':phone_num, 'address':address, 'zip_code':zip_code, 'cusine':cusine, 'menu_url':menu_url, 'average_cost_for_two':average_cost_for_two, 'recipe':recipe }
return HttpResponse(template.render(context,request))
#setup simple search function for recipes
def find_recipes(request):
context= {}
template= get_template('find_recipes.html')
return HttpResponse(template.render(context, request))
#find recipes either based on title or cusine based of the userinput given from the search recipe page
def recipes_results(request):
template= get_template('recipes_results.html')
input_box= request.GET['query']
search_results= Recipe.objects.filter(title=input_box)
lis=[]
## a special search query which is activated if the user clicks on the reccomendation button on the homepage
if(input_box == 'user'):
##check user search history
search_results=User_Detail.objects.filter(user_ID= request.user.username)
index=len(search_results)
search=search_results[index-1].user_name
search=search[1:5]
## find recipes based of what the user recently searched
search_results=Recipe.objects.filter(cusine__icontains=search)
for search_result in search_results:
lis.append(' Cuisine: '+ search_result.cusine +' recipe title: '+ search_result.title + ' recipe ready time: ' + str(search_result.readyInMinutes) )
food_places=Food_Place_ID_Zomato.objects.filter(cusine__icontains=search)
for place in food_places:
lis.append(place.name+' Type: '+place.cusine+' Address: '+str(place.address)+ ' City: ' + place.city)
elif(search_results):
lis.append("General Information ")
lis.append(' Cuisine: '+ search_results[0].cusine +' recipe title: '+ search_results[0].title + ' recipe ready time: ' + str(search_results[0].readyInMinutes) )
elif(True):
check_cusine=input_box[1:5]
search_results=Recipe.objects.filter(cusine__icontains=check_cusine)
for search_result in search_results:
lis.append('recipe id: '+ str(search_result.recipe_id) + ' Cuisine: '+ search_result.cusine +' recipe title: '+ search_result.title + ' recipe ready time: ' + str(search_result.readyInMinutes) )
elif(Recipe.objects.filter(cusine=input_box)):
lis=[]
lis.append("General Information ")
search_results=Recipe.objects.filter(cusine=input_box)
for search_result in search_results:
lis.append('recipe id: '+ str(search_result.recipe_id) + ' Cuisine: '+ search_result.cusine +' recipe title: '+ search_result.title + ' recipe ready time: ' + str(search_result.readyInMinutes) )
context= { 'search_results':lis}
return HttpResponse(template.render(context,request))
| aquddus95/API-Integration | API-Integration/finalproject/polls/views.py | views.py | py | 8,599 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "django.template.loader.get_template",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "polls.models.Food_Place_ID_Yelp.objects.filter",
"line_number": 33,
"usage_t... |
7695455337 | from .interpretpicklist import Interpretpicklist
from . import dateutils
from datetime import datetime
from . import xmlutilities
from synthesis.exceptions import DataFormatError#, SoftwareCompatibilityError
from . import logger
#from sys import version
from . import dbobjects
from .writer import Writer
from zope.interface import implementer
from sqlalchemy import or_, and_, between
from .conf import settings
from lxml import etree as ET
def buildWorkhistoryAttributes(element):
element.attrib['date_added'] = datetime.now().isoformat()
element.attrib['date_effective'] = datetime.now().isoformat()
@implementer(Writer)
class SvcPointXML5Writer():
# Writer Interface
hmis_namespace = "http://www.hmis.info/schema/2_8/HUD_HMIS_2_8.xsd"
airs_namespace = "http://www.hmis.info/schema/2_8/AIRS_3_0_draft5_mod.xsd"
nsmap = {"hmis" : hmis_namespace, "airs" : airs_namespace}
svcpt_version = '5.00'
def __init__(self, poutDirectory, processingOptions, debugMessages=None):
#print "%s Class Initialized" % self.__name__
if settings.DEBUG:
print("XML File to be dumped to: %s" % poutDirectory)
self.log = logger.Logger(configFile=settings.LOGGING_INI, loglevel=40) # JCS 10/3/11
self.outDirectory = poutDirectory
self.pickList = Interpretpicklist()
# SBB20070626 Adding the declaration for outcomes list
self.options = processingOptions
# SBB20070628 adding a buffer for errors to be displayed at the end of the process.
self.errorMsgs = []
self.db = dbobjects.DB() # JCS 10/05/11
self.db.Base.metadata.create_all()
def write(self):
self.startTransaction()
self.processXML()
self.prettify()
print('==== Self:', self)
xmlutilities.writeOutXML(self, xml_declaration=True, encoding="UTF-8") # JCS, 1 Sep 2012
#self.commitTransaction()
return True
def updateReported(self, currentObject):
# update the reported field of the currentObject being passed in. These should all exist.
try:
if settings.DEBUG:
print('Updating reporting for object: %s' % currentObject.__class__)
currentObject.reported = True
#currentObject.update()
self.commitTransaction()
except:
print("Exception occurred during update the 'reported' flag")
pass
def prettify(self):
xmlutilities.indent(self.root_element)
def dumpErrors(self):
print("Error Reporting")
print("-" * 80)
for row in range(len(self.errorMsgs)):
print("%s %s" % (row, self.errorMsgs[row]))
def setSysID(self, pSysID):
self.sysID = pSysID
def commitTransaction(self):
self.session.commit()
def startTransaction(self):
self.session = self.db.Session()
def pullConfiguration(self, pExportID):
# need to use both ExportID and Processing Mode (Test or Prod)
export = self.session.query(dbobjects.Export).filter(dbobjects.Export.export_id == pExportID).one()
if settings.DEBUG:
print("trying to do pullConfiguration")
#print "export is:", export, "pExportID is", pExportID
#print "export.export_id is: ", export.export_id
#print "dbobjects.SystemConfiguration.source_id is ", dbobjects.SystemConfiguration.source_id
selink = self.session.query(dbobjects.SourceExportLink).filter(dbobjects.SourceExportLink.export_index_id == export.id).one()
#print '==== Selink.id:', selink.id
source = self.session.query(dbobjects.Source).filter(dbobjects.Source.id == selink.source_index_id).one()
#print '==== Source.id:', source.id
self.configurationRec = self.session.query(dbobjects.SystemConfiguration).filter(and_(dbobjects.SystemConfiguration.source_id == source.source_id, dbobjects.SystemConfiguration.processing_mode == settings.MODE)).one()
#print '==== sys config.id', self.configurationRec.id
def processXML(self): # records represents whatever element you're tacking more onto, like entry_exits or clients
if settings.DEBUG:
print("processXML: Appending XML to Base Record")
self.root_element = self.createDoc() #makes root element with XML header attributes
#print '==== root created'
clients = self.createClients(self.root_element) # JCS - tag is <clientRecords> Only node under clients is <Client>
print('==== clientRecords created')
if self.options.reported == True:
Persons = self.session.query(dbobjects.Person).filter(dbobjects.Person.reported == True)
elif self.options.unreported == True:
Persons = self.session.query(dbobjects.Person).filter(or_(dbobjects.Person.reported == False, dbobjects.Person.reported == None))
elif self.options.reported == None:
Persons = self.session.query(dbobjects.Person)
# Now apply the dates to the result set.
if self.options.alldates == None:
Persons = Persons.filter(between(dbobjects.Person.person_id_date_collected, self.options.startDate, self.options.endDate))
pulledConfigID = 0 # JCS Only pull it if it has changed
for self.person in Persons:
#print "person is: ", self.person
export = self.person.fk_person_to_export # this is a single record because:
# person has: export_index_id = Column(Integer, ForeignKey('export.id'))
# export has: fk_export_to_person = relationship('Person', backref='fk_person_to_export')
# Therefore there are multiple persons to one export - but only one export to a person
#print "==== export before pullconfig:", export.id, export # JCS
if pulledConfigID != export.id:
self.pullConfiguration(export.export_id)
pulledConfigID = export.id
self.ph = self.person.fk_person_to_person_historical # JCS This is a list of records
self.race = self.person.fk_person_to_races
self.site_service_part = self.person.site_service_participations # JCS
#information_releases = self.person.fk_person_to_release_of_information # JCS a set
#self.service_event = self.person.fk_person_to_service_event
# Instead of generating a number (above), use the client number that is already provided in the legacy system
# or
# self.iDG.initializeSystemID(self.person.id)
self.sysID = self.person.id # JCS beware set self.sysID
#if settings.DEBUG:
#print "self.person is:", self.person
if self.person: # and not self.person.person_legal_first_name_unhashed+self.person.person_legal_last_name_unhashed == None:
self.client = self.createClient(clients) # JCS - no clients in svc5? yes as clientRecords
# Sub can be: active, anonymous, firstName, suffix, unnamedClient, alias, middleName, childEntryExit,
# childReleaseOfInfo, childGoal
self.customizeClient(self.client)
self.customizeClientPersonalIdentifiers(self.client, self.person)
self.assessment_data = self.createAssessmentData(self.client) # JCS New - self?
self.customizeAssessmentData(self.assessment_data)
if self.site_service_part: # JCS 21 Dec 2012
self.child_entry_exit = self.createChildEntryExit(self.client)
for ssp in self.site_service_part:
self.createEntryExit(self.child_entry_exit, ssp)
# update the reported flag for person (This needs to be applied to all objects that we are getting data from)
self.updateReported(self.person)
# Query Mechanism for Site Service Participation (Entry Exits) same as for Person?
# This is only if we want to create an EE summary at the end for all Clients
# if self.options.reported == True:
# site_service_part = self.session.query(dbobjects.SiteServiceParticipation).filter(dbobjects.SiteServiceParticipation.reported == True)
# elif self.options.unreported == True:
# site_service_part = self.session.query(dbobjects.SiteServiceParticipation).filter(or_(dbobjects.SiteServiceParticipation.reported == False, dbobjects.SiteServiceParticipation.reported == None))
# elif self.options.reported == None:
# site_service_part = self.session.query(dbobjects.SiteServiceParticipation)
# else:
# pass
#
# # setup the date filter also
# site_service_part = site_service_part.filter(between(dbobjects.SiteServiceParticipation.site_service_participation_idid_num_date_collected, self.options.startDate, self.options.endDate))
#
# entry_exits = self.createEntryExits(self.root_element)
# for EE in site_service_part:
# # SBB20100405 do this to pull the configuration record
# person = EE.fk_participation_to_person
# export = person.fk_person_to_export
# self.pullConfiguration(export.export_id)
# self.updateReported(EE) # Reporting Update
# self.sysID = EE.id # JCS beware set self.sysID
# self.createEntryExit(entry_exits, EE)
# End of ProcessXML()
def createDoc(self):
# From hl7
#self.mymap = { None : "urn:hl7-org:v3",
# "voc" : "urn:hl7-org:v3/voc",
# "xsi" : "http://www.w3.org/2001/XMLSchema-instance"}
#root_element = ET.Element("ClinicalDocument", nsmap=self.mymap)
#root_element.attrib["{"+self.mymap["xsi"]+"}schemaLocation"] = "urn:hl7-org:v3 infrastructure/cda/CDA.xsd"
# From hl7 end
#sp5_instance looks like this
# <records xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
# xsi:noNamespaceSchemaLocation="file:/home/eric/workspace/servicepoint_schema/sp5/sp5.xsd"
# odb_identifier="qwo7Wsoi"
# import_identifier="v7:1bl.e">
self.mymap = {"xsi" : "http://www.w3.org/2001/XMLSchema-instance"} # Yes lxml
#root_element = ET.Element("records") # Non-lxml
root_element = ET.Element("records", nsmap=self.mymap) # Yes lxml
#root_element.attrib["xmlns:xsi"] = "http://www.w3.org/2001/XMLSchema-instance" # Non-lxml
#root_element.attrib["xsi:noNamespaceSchemaLocation"] = "sp5.xsd" # Non-lxml
root_element.attrib["{"+self.mymap["xsi"]+"}noNamespaceSchemaLocation"] = "sp5.xsd" # Yes lxml
# Added by JCS 1 Sep 2012
root_element.attrib["odb_identifier"] = "qwo7Wsoi" # Yes lxml
root_element.attrib["import_identifier"] = "v7:1bl.e" # Yes lxml
#root_element.attrib["schema_revision"] = "300_108" # JCS Not in Schema
#root_element.text = "\n"
return root_element
def createClients(self, root_element):
clients = ET.SubElement(root_element, "clientRecords")
return clients
def createClient(self, clients):
client = ET.SubElement(clients, "Client") # Cap 'C' in svc5
return client
def createChildEntryExit(self,client):
child_entry_exit = ET.SubElement(client, "childEntryExit") # JCS new - sub-client
return child_entry_exit
def createEntryExits(self,root_element):
entry_exits = ET.SubElement(root_element, "entryExitRecords") # JCS - not in SVP5?
return entry_exits
def customizeClient(self, client):
#print "==== Customize Client:", self.configurationRec.odbid, self.person.person_id_id_num
client.attrib["record_id"] = "CL-" + str(self.person.id)
#client.attrib["external_id"] = self.person.person_id_id_num # JCS -this item is optional
client.attrib["system_id"] = self.person.person_id_id_num # JCS just a guess ????
client.attrib["date_added"] = dateutils.fixDate(datetime.now())
client.attrib["date_updated"] = dateutils.fixDate(datetime.now())
# SBB20070702 check if self.intakes has none, this is a daily census that is alone
def customizeClientPersonalIdentifiers(self,client,recordset): # params are: self.client, self.person
if recordset.person_legal_first_name_unhashed != "" and recordset.person_legal_first_name_unhashed != None:
first_name = ET.SubElement(client, "firstName")
first_name.text = recordset.person_legal_first_name_unhashed
if recordset.person_legal_last_name_unhashed != "" and recordset.person_legal_last_name_unhashed != None:
last_name = ET.SubElement(client, "lastName")
last_name.text = recordset.person_legal_last_name_unhashed
#we don't have the following elements for daily_census only clients, but SvcPt requires them:
# I simulated this w/my datasets. Column names are as in the program
if recordset.person_legal_middle_name_unhashed != "" and recordset.person_legal_middle_name_unhashed != None:
mi_initial = ET.SubElement(client, "mi_initial")
mi_initial.text = self.fixMiddleInitial(recordset.person_legal_middle_name_unhashed)
# SBB20070831 incoming SSN's are 123456789 and need to be 123-45-6789
fixedSSN = self.fixSSN(recordset.person_social_security_number_unhashed) # JCS .person_SSN_unhashed)
if fixedSSN != "" and fixedSSN != None: #ECJ20071111 Omit SSN if it's blank
soc_sec_no = ET.SubElement(client, "socSecNoDashed")
soc_sec_no.text = fixedSSN
ssn_data_quality = ET.SubElement(client, "ssnDataQualityValue")
ssn_data_quality.text = "full ssn reported (hud)"
def createEntryExit(self, entry_exits, EE): # Outer Node, one EntryExit(ssp)
entry_exit = ET.SubElement(entry_exits, "EntryExit")
entry_exit.attrib["record_id"] = "EE-"+str(EE.id)
# ssp-idid-num looks like it ought to be unique, but isn't in sample input data, so append client id????
entry_exit.attrib["system_id"] = EE.site_service_participation_idid_num+"-"+EE.person.person_id_id_num
# person.site_service_participations = relationship("SiteServiceParticipation", backref="person")
entry_exit.attrib["date_added"] = dateutils.fixDate(datetime.now())
entry_exit.attrib["date_updated"] = dateutils.fixDate(datetime.now())
self.customizeEntryExit(entry_exit, EE)
return entry_exit
def customizeEntryExit(self, entry_exit, EE):
# Schema expects one of ( active, typeEntryExit, client, exitDate, reasonLeavingValue, reasonLeavingOther,
# destinationValue, destinationOther, notes, group )
# There is no type in our input XML, nor a field in ssp. Schema needs {'basic', 'basic center program entry/exit',
# 'hprp', 'hud', 'path', 'quick call', 'standard', 'transitional living program entry/exit'}
type1 = ET.SubElement(entry_exit, "typeEntryExit") # JCS this is a fudge to pass validation
type1.text = "basic" # "hud-40118"
provider_id = ET.SubElement(entry_exit, "provider")
provider_id.text = '%s' % self.configurationRec.providerid
if EE.participation_dates_start_date != "" and EE.participation_dates_start_date != None:
entry_date = ET.SubElement(entry_exit, "entryDate")
entry_date.text = dateutils.fixDate(EE.participation_dates_start_date)
if EE.participation_dates_end_date != "" and EE.participation_dates_end_date != None:
exit_date = ET.SubElement(entry_exit, "exitDate")
exit_date.text = dateutils.fixDate(EE.participation_dates_end_date)
return
def createAssessmentData(self, client): # dynamic content type
assessment_data = ET.SubElement(client, "assessmentData")
return assessment_data
def customizeAssessmentData(self, assessment_data):
if self.person.person_gender_unhashed != "" and self.person.person_gender_unhashed != None:
persGender = ET.SubElement(assessment_data, "svpprofgender" ) #"gender")
persGender.attrib["date_added"] = dateutils.fixDate(self.person.person_gender_unhashed_date_collected)
persGender.attrib["date_effective"] = dateutils.fixDate(self.person.person_gender_unhashed_date_effective)
persGender.text = str(self.person.person_gender_unhashed)
# dob (Date of Birth) lots of:SVPPROFDOB a few:DATEOFBIRTH
if self.person.person_date_of_birth_unhashed != "" and self.person.person_date_of_birth_unhashed != None:
dob = ET.SubElement(assessment_data, "svpprofdob")
dob.attrib["date_added"] = dateutils.fixDate(self.person.person_date_of_birth_unhashed_date_collected)
dob.attrib["date_effective"] = dateutils.fixDate(datetime.now()) # No date effect. in Person
dob.text = dateutils.fixDate(self.person.person_date_of_birth_unhashed)
# Ethnicity lots of:SVPPROFETH a few:Ethnicity uses:ETHNICITYPickOption
if self.person.person_ethnicity_unhashed != "" and self.person.person_ethnicity_unhashed != None:
# Our Interpretpicklist basically has 2 options. The schema has 23
ethText = self.pickList.getValue("EthnicityPick",str(self.person.person_ethnicity_unhashed))
eth = ET.SubElement(assessment_data, "svpprofeth")
eth.attrib["date_added"] = dateutils.fixDate(self.person.person_ethnicity_unhashed_date_collected)
eth.attrib["date_effective"] = dateutils.fixDate(datetime.now()) # No date effect. in Person
eth.text = ethText # str(self.person.person_ethnicity_unhashed)
# Race more than one?? JCS
for race in self.race:
# JCS schema has 'RACEPickOption' - using existing RacePick for now
raceText = self.pickList.getValue("RacePick",str(race.race_unhashed))
# print '==== race:', race.race_unhashed, raceText
if raceText != None:
raceNode = ET.SubElement(assessment_data, "svpprofrace") # JCS "primaryrace" or "svpprofrace"?
raceNode.attrib["date_added"] = dateutils.fixDate(race.race_date_collected)
raceNode.attrib["date_effective"] = dateutils.fixDate(race.race_date_effective)
raceNode.text = raceText
for ph in self.ph:
#print '==== ph person id:', ph.person_index_id #, ph.__dict__
# JCS - Fails if none - seen in going from tbc to here - but don't know if that ever happens
hs = self.session.query(dbobjects.HousingStatus).filter(dbobjects.HousingStatus.person_historical_index_id == ph.id).one()
hsText = self.pickList.getValue("HOUSINGSTATUSPickOption",hs.housing_status)
#print '==== hs:', hsText
if hsText != None:
housingStatus = ET.SubElement(assessment_data, "svp_hud_housingstatus") # JCS
housingStatus.attrib["date_added"] = dateutils.fixDate(hs.housing_status_date_collected)
housingStatus.attrib["date_effective"] = dateutils.fixDate(hs.housing_status_date_effective)
housingStatus.text = hsText
foster = self.session.query(dbobjects.FosterChildEver).filter(dbobjects.FosterChildEver.person_historical_index_id == ph.id).one()
fosterText = self.pickList.getValue("ENHANCEDYESNOPickOption",str(foster.foster_child_ever))
if fosterText != None:
fosterEver = ET.SubElement(assessment_data, "x20wereyoueverafoster") # JCS
fosterEver.attrib["date_added"] = dateutils.fixDate(foster.foster_child_ever_date_collected)
fosterEver.attrib["date_effective"] = dateutils.fixDate(foster.foster_child_ever_date_effective)
fosterEver.text = fosterText
# length of stay at prior residence
losapr = self.session.query(dbobjects.LengthOfStayAtPriorResidence).filter(dbobjects.LengthOfStayAtPriorResidence.person_historical_index_id == ph.id).one()
losaprText = self.pickList.getValue("LENGTHOFTHESTAYPickOption",losapr.length_of_stay_at_prior_residence)
#print '==== losapr:', losaprText
if losaprText != None:
lengthOfStay = ET.SubElement(assessment_data, "hud_lengthofstay") # JCS
lengthOfStay.attrib["date_added"] = dateutils.fixDate(losapr.length_of_stay_at_prior_residence_date_collected)
lengthOfStay.attrib["date_effective"] = dateutils.fixDate(losapr.length_of_stay_at_prior_residence_date_effective)
lengthOfStay.text = losaprText
# "Prior Residence" becomes "typeoflivingsituation"
tols = self.session.query(dbobjects.PriorResidence).filter(dbobjects.PriorResidence.person_historical_index_id == ph.id).one()
tolsText = self.pickList.getValue("LIVINGSITTYPESPickOption",tols.prior_residence_code)
#print '==== (prior) tols:', tolsText
if tolsText != None:
priorLiving = ET.SubElement(assessment_data, "typeoflivingsituation") # JCS
priorLiving.attrib["date_added"] = dateutils.fixDate(tols.prior_residence_code_date_collected)
priorLiving.attrib["date_effective"] = dateutils.fixDate(tols.prior_residence_code_date_effective)
priorLiving.text = tolsText
# There's also a prior_residence_id_id_num populated with a 13 digit number as string JCS
# Physical Disability - Boolean
pdyn = self.session.query(dbobjects.PhysicalDisability).filter(dbobjects.PhysicalDisability.person_historical_index_id == ph.id).one()
pdynText = pdyn.has_physical_disability
#print '==== pdyn:', pdynText
if pdynText != None:
physDisabYN = ET.SubElement(assessment_data, "svpphysicaldisabilit") # JCS
physDisabYN.attrib["date_added"] = dateutils.fixDate(pdyn.has_physical_disability_date_collected)
# This is required, but input is usually blank - something plugs in now()
physDisabYN.attrib["date_effective"] = dateutils.fixDate(pdyn.has_physical_disability_date_effective)
physDisabYN.text = pdynText
# There is also a complex type "disabilities_1"
# Veteran Status - Uses "ENHANCEDYESNOPickOption" which is a union, and allows anything
vvs = self.session.query(dbobjects.VeteranVeteranStatus).filter(dbobjects.VeteranVeteranStatus.person_historical_index_id == ph.id).one()
vvsText = vvs.veteran_status
#print '==== vvs:', vvsText
if vvsText != None:
vetStat = ET.SubElement(assessment_data, "veteran") # JCS
vetStat.attrib["date_added"] = dateutils.fixDate(vvs.veteran_status_date_collected)
vetStat.attrib["date_effective"] = dateutils.fixDate(vvs.veteran_status_date_effective)
vetStat.text = vvsText
# def customizeDisabilities_1(self, disabilities_1, ph):
# #if self.intakes['DisabilityDiscription'] != "":
# noteondisability = ET.SubElement(disabilities_1,'noteondisability')
# noteondisability.attrib["date_added"] = dateutils.fixDate(datetime.now())
# noteondisability.attrib["date_effective"] = dateutils.fixDate(ph.physical_disability_date_collected)
# noteondisability.text = ph.physical_disability
def current_picture(self, node):
''' Internal function. Debugging aid for the export module.'''
if settings.DEBUG:
print("Current XML Picture is")
print("======================\n" * 2)
ET.dump(node)
print("======================\n" * 2)
def calcHourlyWage(self, monthlyWage):
if monthlyWage != "":
if monthlyWage.strip().isdigit():
if float(monthlyWage) > 5000.00:
hourlyWage = float(monthlyWage) / 160.00#IGNORE:@UnusedVariable
else:
hourlyWage = float(monthlyWage)#IGNORE:@UnusedVariable
else:
hourlyWage = 0.00
return str(round(hourlyWage,2))
def fixMiddleInitial(self, middle_initial):
fixed_middle_initial = middle_initial[0].upper().lstrip()
return fixed_middle_initial
def fixSSN(self, incomingSSN):
originalSSN = incomingSSN
if incomingSSN == "" or incomingSSN == None:
return incomingSSN
dashCount = incomingSSN.count('-')
if dashCount > 0:
if dashCount == 2:
# already has the dashes, return the string
if settings.DEBUG:
self.debugMessages.log("incoming SSN is correctly formatted: %s\n" % (incomingSSN))
return incomingSSN
else: # incoming SSN has 1 dash but not 2. This is an error
# fix this data
incomingSSN = incomingSSN.replace( '-', '')
if len(incomingSSN) < 9:
# reformat the string and return
theError = (1020, 'Data format error discovered in trying to cleanup incoming SSN: %s, original SSN: %s' % (incomingSSN, originalSSN))
if settings.DEBUG:
self.debugMessages.log(">>>> Incoming SSN is INcorrectly formatted. Original SSN from input file is: %s and Attempted cleaned up SSN is: %s\n" % (originalSSN, incomingSSN))
raise DataFormatError(theError)
# If we are here, we can simply reformat the string into dashes
if settings.DEBUG:
pass # JCS
# self.debugMessages.log("incoming SSN is INcorrectly formatted: %s. Reformatting to: %s\n" % (incomingSSN, '%s-%s-%s' % (incomingSSN[0:3], incomingSSN[3:5], incomingSSN[5:10])))
return '%s-%s-%s' % (incomingSSN[0:3], incomingSSN[3:5], incomingSSN[5:10])
#if __name__ == "__main__":
# vld = SVCPOINTXMLWriter(".")
# vld.write()
| 211tbc/synthesis | src/svcpointxml5writer.py | svcpointxml5writer.py | py | 26,257 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "datetime.datetime.now",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "datetim... |
1914061684 | """
NOTE:
You will have to install the Haskell program find-clumpiness on your machine before running this script.
For more info, see: https://github.com/GregorySchwartz/find-clumpiness
Also, this script calls the find-clumpiness program using the terminal via linux commands. The commands may not work if you are on Windows.
Lastly, this script requires you to make a temp folder, where you will not care if all of the files in that folder get deleted.
The script uses the temp folder to store the JSON strings of the lineages and clumpiness results, which are deleted after each set of clones is done.
The script then feeds in a list of clones into a function that reformats their trees into a format that can be used by find-clumpiness' "JSON" parser. The end
result of this step produces a file for each clone's tree that can now be used with find-clumpiness.
Next, find-clumpiness is run on the files that were just made, outputting a .txt file for each corresponding file with the results.
Finally, the script builds a dictionary that collects all of the find-clumpiness results by iterating through all of the .txt result files.
Some of the clones produce empty results or do not produce results for the selected criteria, such as clones with only a single node or where the
tissues being compared are only found in a single node. The script then removes all of these empty values from the dictionary so that the end result
has a dictionary with each key (clone) having meaningful results that can be used for plotting. This dictionary is then saved as a .JSON file.
The original find-clumpiness results are in the following format:
property1,property2,value
PBMC,PBMC,1.0
PBMC,MLN,1.0
MLN,PBMC,1.0
MLN,MLN,1.0
and this script turns it into:
results[94] : {'PBMC,PBMC': 1.0, 'PBMC,MLN': 1.0, 'MLN,PBMC': 1.0, 'MLN,MLN': 1.0}
with 94 being the 95th (starts from 0) clumpiness result file. There is 1 file for each clone.
"""
import json
import pickle
import pandas as pd
from pathlib import Path, PureWindowsPath
import os
import re
import subprocess
subjects_dict = {
2: 'Pt19_R',
3: 'Pt21_R',
4: 'Pt23_R',
8: 'Pt14_R',
9: 'Pt20_R',
10: 'Pt17_R',
17: 'Pt25_R',
}
def get_clumpiness_by_POD():
# =============================================================================
# Load tables
# =============================================================================
def load_data(patient_ID, patient_name):
if patient_name in subjects_dict.values():
samples_table = pd.read_pickle(r"/path_to_sykes_children_data_folder/samples.pkl")
metadata_table = pd.read_pickle(r"/path_to_sykes_children_data_folder/sample_metadata.pkl")
metadata_table['value'] = metadata_table['value'].replace("Colon ", "Colon")
metadata_table['value'] = metadata_table['value'].replace("MLN_allograft", "AxLN")
metadata_table['value'] = metadata_table['value'].replace("POD37 44", "POD37")
###Change Ileum to Ileum_allograft for Pt21_R POD1145
POD1145_sample_ids = metadata_table['sample_id'].loc[metadata_table['value'] == 'POD1145']
metadata_table['value'].loc[metadata_table['sample_id'].isin(POD1145_sample_ids)] = metadata_table['value'].loc[metadata_table['sample_id'].isin(POD1145_sample_ids)].replace("Ileum", "Ileum_allograft")
df_of_trees = pd.read_pickle(r"/path_to_sykes_children_data_folder/clones_table/{}_clones_table.pkl".format(patient_ID))
seq_table = pd.read_pickle(r"/path_to_sykes_children_data_folder/sequences_table/{}_sequences_table.pkl".format(patient_ID))
seq_table.dropna(subset=["clone_id"], inplace=True)
seq_table['clone_id'] = seq_table['clone_id'].astype(int)
# =============================================================================
# Load rejection samples and remove these samples from data
# =============================================================================
path = Path(r"/path_to_sykes_children_data_folder/{}_samples_with_rejection.pkl".format(patient_name))
pickle_in = open(path,"rb")
rejection_samples = pickle.load(pickle_in)
pickle_in.close()
seq_table = seq_table.loc[~seq_table['sample_id'].isin(rejection_samples)]
metadata_table = metadata_table.loc[~metadata_table['sample_id'].isin(rejection_samples)]
samples_table = samples_table.loc[~samples_table['id'].isin(rejection_samples)]
clone_stats = pd.read_pickle(r"/path_to_sykes_children_data_folder/clone_stats/{}_clone_stats.pkl".format(patient_ID))
functional_clones = clone_stats['clone_id'].loc[clone_stats['functional'] == 1].unique().tolist()
seq_table = seq_table.loc[seq_table['clone_id'].isin(functional_clones)]
subject_samples = samples_table['id'].loc[samples_table['subject_id'] == patient_ID].unique()
subject_metadata_table = metadata_table.loc[metadata_table['sample_id'].isin(subject_samples)]
tissues = subject_metadata_table['value'].loc[subject_metadata_table['key'] == 'sample_origin'].unique()
node_min_clone_dict = pd.read_pickle(r"/path_to_sykes_children_data_folder/{}_clones_per_node_min_using_tree.pkl".format(patient_name))
return df_of_trees, seq_table, tissues, node_min_clone_dict, rejection_samples
# =============================================================================
# POD grouping function to group PODs together that are within 2 days of each other
# Input is name of patient, such as "Pt19_R"
# Output is list of lists, with each sublist containing the grouped PODs (or single POD if there are no PODs that are grouped with it)
# And a second output in the same format, except each sublist is a list of sample_ids that belong to the corresponding POD
# Example: [[0], [20, 22], [100], [110, 112, 114]], [[sample_ids that belong to 0], [sample_ids that belong to 20, 22], [etc], [etc]]
# =============================================================================
def group_PODs(patient_ID, patient_name):
samples_table = pd.read_pickle(r"/path_to_sykes_children_data_folder/samples.pkl")
metadata_table = pd.read_pickle(r"/path_to_sykes_children_data_folder/sample_metadata.pkl")
metadata_table['value'] = metadata_table['value'].replace("POD37 44", 'POD37')
metadata_table['value'] = metadata_table['value'].replace("Colon ", "Colon")
metadata_table['value'] = metadata_table['value'].replace("MLN_allograft", "AxLN")
metadata_table['value'] = metadata_table['value'].replace("POD37 44", "POD37")
###Change Ileum to Ileum_allograft for Pt21_R POD1145
POD1145_sample_ids = metadata_table['sample_id'].loc[metadata_table['value'] == 'POD1145']
metadata_table['value'].loc[metadata_table['sample_id'].isin(POD1145_sample_ids)] = metadata_table['value'].loc[metadata_table['sample_id'].isin(POD1145_sample_ids)].replace("Ileum", "Ileum_allograft")
# =============================================================================
# Load rejection samples and remove these samples from data
# =============================================================================
path = Path(r"/path_to_sykes_children_data_folder/{}_samples_with_rejection.pkl".format(patient_name))
pickle_in = open(path,"rb")
rejection_samples = pickle.load(pickle_in)
pickle_in.close()
metadata_table = metadata_table.loc[~metadata_table['sample_id'].isin(rejection_samples)]
samples_table = samples_table.loc[~samples_table['id'].isin(rejection_samples)]
subject_samples = samples_table['id'].loc[samples_table['subject_id'] == patient_ID].unique()
metadata_table = metadata_table.loc[metadata_table['sample_id'].isin(subject_samples)]
pod_list_strings = metadata_table['value'].loc[metadata_table['key'] == 'pod'].unique()
pod_list = [int(''.join(i for i in x if i.isdigit())) for x in pod_list_strings]
###sort both using the int version as the guide
pod_list, pod_list_strings = (list(t) for t in zip(*sorted(zip(pod_list, pod_list_strings))))
result = []
result_samples = []
current_pod = pod_list[0]
current_samples = metadata_table['sample_id'].loc[metadata_table['value'] == pod_list_strings[pod_list.index(current_pod)]].unique().tolist()
temp = []
temp_samples = []
for next_pod in pod_list[1:]:
next_samples = metadata_table['sample_id'].loc[metadata_table['value'] == pod_list_strings[pod_list.index(next_pod)]].unique().tolist()
if len(temp) == 0:
temp.append(current_pod)
for sample in current_samples:
temp_samples.append(sample)
if next_pod - current_pod <= 2:
temp.append(next_pod)
for sample in next_samples:
temp_samples.append(sample)
else:
result.append(temp)
result_samples.append(temp_samples)
temp_samples = []
temp = []
current_pod = next_pod
current_samples = next_samples
if pod_list[-1] not in result[-1]:
result.append([pod_list[-1]])
result_samples.append(metadata_table['sample_id'].loc[metadata_table['value'] == pod_list_strings[pod_list.index(pod_list[-1])]].unique().tolist())
return result, result_samples
# =============================================================================
# This function returns two dicts:
# samples_per_tissue: dict[tissue] : [list of sample_ids in that tissue]
# tissues_per_sample: dict[sample_id] : [tissue in that sample_id] ###this will be a list with a single element (the tissue)
# =============================================================================
def get_tissue_sample_ids(patient_ID, patient_name):
tissues_per_sample = {}
samples_per_tissue = {}
if patient_name in subjects_dict.values():
samples_table = pd.read_pickle(r"/path_to_sykes_children_data_folder/samples.pkl")
metadata_table = pd.read_pickle(r"/path_to_sykes_children_data_folder/sample_metadata.pkl")
subject_samples = samples_table['id'].loc[samples_table['subject_id'] == patient_ID].unique()
metadata_table = metadata_table.loc[metadata_table['sample_id'].isin(subject_samples)]
metadata_table['value'] = metadata_table['value'].replace("Colon ", "Colon")
metadata_table['value'] = metadata_table['value'].replace("MLN_allograft", "AxLN")
metadata_table['value'] = metadata_table['value'].replace("POD37 44", "POD37")
###Change Ileum to Ileum_allograft for Pt21_R POD1145
POD1145_sample_ids = metadata_table['sample_id'].loc[metadata_table['value'] == 'POD1145']
metadata_table['value'].loc[metadata_table['sample_id'].isin(POD1145_sample_ids)] = metadata_table['value'].loc[metadata_table['sample_id'].isin(POD1145_sample_ids)].replace("Ileum", "Ileum_allograft")
# =============================================================================
# Load rejection samples and remove these samples from data
# =============================================================================
path = Path(r"/path_to_sykes_children_data_folder/{}_samples_with_rejection.pkl".format(patient_name))
pickle_in = open(path,"rb")
rejection_samples = pickle.load(pickle_in)
pickle_in.close()
metadata_table = metadata_table.loc[~metadata_table['sample_id'].isin(rejection_samples)]
samples_table = samples_table.loc[~samples_table['id'].isin(rejection_samples)]
sample_ids = metadata_table['sample_id'].unique()
tissues = metadata_table['value'].loc[metadata_table['key'] == 'sample_origin'].unique()
for sample_id in sample_ids:
tissues_per_sample[sample_id] = list(metadata_table['value'].loc[(metadata_table['key'] == 'sample_origin') & (metadata_table['sample_id'] == sample_id)])[0]
for tissue in tissues:
samples_per_tissue[tissue] = list(metadata_table['sample_id'].loc[metadata_table['value'] == tissue].unique())
return tissues_per_sample, samples_per_tissue
def get_timepoint_sample_ids(patient_ID, patient_name):
timepoints_per_sample = {}
samples_per_timepoint = {}
if patient_name in subjects_dict.values():
samples_table = pd.read_pickle(r"/path_to_sykes_children_data_folder/samples.pkl")
metadata_table = pd.read_pickle(r"/path_to_sykes_children_data_folder/sample_metadata.pkl")
subject_samples = samples_table['id'].loc[samples_table['subject_id'] == patient_ID].unique()
metadata_table = metadata_table.loc[metadata_table['sample_id'].isin(subject_samples)]
metadata_table['value'] = metadata_table['value'].replace("Colon ", "Colon")
metadata_table['value'] = metadata_table['value'].replace("MLN_allograft", "AxLN")
metadata_table['value'] = metadata_table['value'].replace("POD37 44", "POD37")
###Change Ileum to Ileum_allograft for Pt21_R POD1145
POD1145_sample_ids = metadata_table['sample_id'].loc[metadata_table['value'] == 'POD1145']
metadata_table['value'].loc[metadata_table['sample_id'].isin(POD1145_sample_ids)] = metadata_table['value'].loc[metadata_table['sample_id'].isin(POD1145_sample_ids)].replace("Ileum", "Ileum_allograft")
# =============================================================================
# Load rejection samples and remove these samples from data
# =============================================================================
path = Path(r"/path_to_sykes_children_data_folder/{}_samples_with_rejection.pkl".format(patient_name))
pickle_in = open(path,"rb")
rejection_samples = pickle.load(pickle_in)
pickle_in.close()
metadata_table = metadata_table.loc[~metadata_table['sample_id'].isin(rejection_samples)]
samples_table = samples_table.loc[~samples_table['id'].isin(rejection_samples)]
sample_ids = metadata_table['sample_id'].unique()
timepoints = metadata_table['value'].loc[metadata_table['key'] == 'pod'].unique().tolist()
timepoints_original = metadata_table['value'].loc[metadata_table['key'] == 'pod'].unique().tolist()
timepoints = [int(''.join(i for i in x if i.isdigit())) for x in timepoints]
timepoints, timepoints_original = (list(t) for t in zip(*sorted(zip(timepoints, timepoints_original))))
for sample_id in sample_ids:
result = list(metadata_table['value'].loc[(metadata_table['key'] == 'pod') & (metadata_table['sample_id'] == sample_id)])[0]
result = int(''.join(i for i in result if i.isdigit()))
timepoints_per_sample[sample_id] = result
for timepoint, timepoint_original in zip(timepoints, timepoints_original):
samples_per_timepoint[str(timepoint)] = list(metadata_table['sample_id'].loc[metadata_table['value'] == timepoint_original].unique())
return timepoints_per_sample, samples_per_timepoint
# =============================================================================
# Body of script starts here
# =============================================================================
for patient_ID, patient_name in subjects_dict.items():
"""
###IMPORTANT: BELOW IS THE PATH TO THE TEMP FOLDER WHERE ALL FILES WILL BE DELETED
"""
###change directory to the directly where find-clumpiness results will be saved
temp_folder_where_files_will_be_stored_and_deleted = "/path_to_sykes_children_data_folder/temp"
os.chdir(temp_folder_where_files_will_be_stored_and_deleted)
subprocess.run(r'find . -type f -delete', shell=True, check=True, stdout=subprocess.PIPE, universal_newlines=True)
print("Making list of clones for: " + patient_name)
df_of_trees, seq_table, tissues, node_min_clone_dict, rejection_samples = load_data(patient_ID, patient_name)
clones_per_tissue_pairs = pd.read_pickle(r"/path_to_sykes_children_data_folder/Clumpiness/{}_clones_per_tissue_pairs_new_1.pkl".format(patient_name))
timepoints_per_sample, samples_per_timepoint = get_timepoint_sample_ids(patient_ID, patient_name)
tissues_per_sample, samples_per_tissue = get_tissue_sample_ids(patient_ID, patient_name)
pod_groups, _ = group_PODs(patient_ID, patient_name)
results_1 = {}
node_min = 3 #No reason to get clumpiness for clones with fewer than 3 nodes because their lineages are not complex enough.
df_node_min = seq_table.loc[seq_table['clone_id'].isin(node_min_clone_dict[node_min])]
df_node_min['pod'] = df_node_min['sample_id'].apply(lambda x: timepoints_per_sample[x])
df_node_min['tissue'] = df_node_min['sample_id'].apply(lambda x: tissues_per_sample[x])
for pod_group in pod_groups:
print("Currently analyzing: " + str(pod_group) + " for " + subjects_dict[patient_ID])
clones_in_pod_group = set(df_node_min['clone_id'].loc[df_node_min['pod'].isin(pod_group)].unique().astype(int))
tissues_per_pod_group = df_node_min['tissue'].loc[(df_node_min['pod'].isin(pod_group))].unique()
df_by_pod_and_node_min = df_node_min.loc[df_node_min['clone_id'].isin(clones_in_pod_group)]
results_1["POD" + str(pod_group)] = {}
###Make list of clones_ids to be analyzed
for tissue1 in range(0, len(tissues) - 1):
for tissue2 in range(tissue1 + 1, len(tissues)):
clones_in_tissue_pair = set(clones_per_tissue_pairs[tissues[tissue1] + "+" + tissues[tissue2]])
clones_per_tissue_and_pod_group = clones_in_tissue_pair.intersection(clones_in_pod_group)
###Only add clones that have both tissues at this pod_group
list_of_clones_result = []
if tissues[tissue1] in tissues_per_pod_group and tissues[tissue2] in tissues_per_pod_group:
for clone in clones_per_tissue_and_pod_group:
tissues_per_clone_and_pod = df_by_pod_and_node_min['tissue'].loc[(df_by_pod_and_node_min['clone_id'] == clone)].unique()
if tissues[tissue1] in tissues_per_clone_and_pod and tissues[tissue2] in tissues_per_clone_and_pod:
list_of_clones_result.append(clone)
if len(list_of_clones_result) > 0:
print("Finished making list of clones. Now making files: " + str(len(list_of_clones_result)) + " total files.")
print("POD group: " + str(pod_group) + " Tissues: " + tissues[tissue1] + "+" + tissues[tissue2])
results_1["POD" + str(pod_group)][tissues[tissue1] + "+" + tissues[tissue2]] = {}
# =============================================================================
# Iterate through each tree, reformatting it and making files for find-clumpiness' "JSON" parser
# =============================================================================
df_of_trees_1 = df_of_trees.loc[df_of_trees['id'].isin(list_of_clones_result)]
for clone, clone_id in zip(df_of_trees_1['tree'], df_of_trees_1['id']):
def parse(input):
result = []
for key, value in input.items():
if key == "data":
# test1 = value
result.append(parse_data(value))
if key == "children":
if len(value) > 0:
childrenResult = []
for child in value:
childrenResult.append(parse(child))
result.append(childrenResult)
else:
result.append([])
return result
def parse_data(data):
result = {"nodeID" : "ID", "nodeLabels" : []}
seq_ids = data['seq_ids']
for key, seq_id in seq_ids.items():
metadata = seq_id['metadata']
if len(metadata) > 0:
metadata_pod = int(''.join(i for i in metadata['pod'] if i.isdigit()))
###Only count nodes that are in the current pod_group
if metadata_pod in pod_group:
if 'Colon ' in metadata['sample_origin']:
metadata['sample_origin'] = metadata['sample_origin'].replace("Colon ", "Colon")
if 'Ileum' in metadata['sample_origin'] and metadata['pod'] == 'POD1145':
metadata['sample_origin'] = metadata['sample_origin'].replace("Ileum", "Ileum_allograft")
result['nodeLabels'].append(metadata['sample_origin'])
return result
json_data = json.loads(clone)
result = parse(json_data["tree"])
path = Path(r"{}/{}.JSON".format(temp_folder_where_files_will_be_stored_and_deleted, clone_id))
with open(path, 'w') as outfile:
json.dump(result, outfile)
print("Finished making files. Now running find-clumpiness.")
# =============================================================================
# This command runs find-clumpiness from the terminal on the files produced above
# =============================================================================
subprocess.run(r'for file in *.JSON; do cat ${file%%.*}.JSON | find-clumpiness --format "JSON" -e Majority -o ${file%%.*}.txt; done', shell=True, check=True, stdout=subprocess.PIPE, universal_newlines=True)
print("find-clumpiness finished. Now collecting results as a dictionary.")
###this directory contains the find-clumpiness results files
directory = temp_folder_where_files_will_be_stored_and_deleted
# =============================================================================
# Collect the results from the find-clumpiness output files as a dictionary
# =============================================================================
file_results = {}
for entry in os.scandir(directory):
if entry.path.endswith(".txt") and entry.is_file():
with open(entry) as reader:
file_results[entry.name.split(".")[0]] = {}
line_count = 1
for line in reader.readlines():
line_count += 1
if 'property1,property2,value' not in line:
key_value = line.replace("," + str(re.findall("\d+\.\d+", line)[0]) + '\n', "")
file_results[entry.name.split(".")[0]][key_value] = float(re.findall("\d+\.\d+", line)[0])
###remove empty dictionaries, which can be due to trees with only a single node not returning any values from find-clumpiness or the labels all being in a single node.
final_results = file_results.copy()
node_removed = 0
current_tissues = str(tissues[tissue1] + "," + tissues[tissue2])
for key in file_results:
if current_tissues not in file_results[key]:
del final_results[key]
node_removed += 1
print("Finished with " + tissues[tissue1] + "+" + tissues[tissue2] + ". Removed " + str(node_removed) + " results where there were no values.")
results_1["POD" + str(pod_group)][tissues[tissue1] + "+" + tissues[tissue2]] = final_results
print("Now deleting find-clumpiness files to prepare for next iteration.")
# =============================================================================
# Delete all of the files that were created this iteration via the terminal
# =============================================================================
subprocess.run(r'find . -type f -delete', shell=True, check=True, stdout=subprocess.PIPE, universal_newlines=True)
# =============================================================================
# Save results
# =============================================================================
path = Path(r"/path_to_sykes_children_data_folder/{}_clumpiness_POD_Specific.pkl".format(patient_name))
pickle_out = open(path,"wb")
pickle.dump(results_1, pickle_out)
pickle_out.close()
print("Finished!")
get_clumpiness_by_POD()
| DrexelSystemsImmunologyLab/Pediatric_gut_homeostatsis_paper | Supplemental/preprocessing/get_clumpiness_by_POD.py | get_clumpiness_by_POD.py | py | 27,497 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pandas.read_pickle",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "pandas.read_pickle",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "pandas.read_pickle",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "pandas.read_... |
8500981124 | #!/usr/bin/env python
# coding: utf-8
# # COEN 140 Final Project - Music Genre Classifer
# In[241]:
import os
import json
import numpy as np
import scipy
import pandas as pd
import librosa as lb
import warnings
from sklearn.model_selection import train_test_split
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis, QuadraticDiscriminantAnalysis
from sklearn.linear_model import LogisticRegression
from sklearn.cluster import KMeans
# ## Import functions
# In[3]:
def import_features(n=None):
return pd.read_csv('fma_metadata/features.csv', header=[0,1,2], index_col=0, nrows=n)
def import_tracks(n=None, col=':'):
return pd.read_csv('fma_metadata/tracks.csv', header=[0,1], index_col=0, nrows=n)
def import_genres(n=None):
return pd.read_csv('fma_metadata/genres.csv', header=0, index_col=0, nrows=n)
# ## Feature extraction function
# In[3]:
def get_song_features(name):
# Get the file path to an included audio example
file = os.path.join(os.getcwd(), "test_songs", name)
# Load into waveform 'y', sampling rate 'sr'
y, sr = lb.load(file)
print('> \'{}\' successfully loaded'.format(name))
## Extract all features
df = {}
df["chroma_stft"] = lb.feature.chroma_stft(y=y, sr=sr)
df["chroma_cqt"] = lb.feature.chroma_cqt(y=y, sr=sr)
df["chroma_cens"] = lb.feature.chroma_cens(y=y, sr=sr)
df["tonnetz"] = lb.feature.tonnetz(y=y, sr=sr)
df["mfcc"] = lb.feature.mfcc(y=y, sr=sr)
df["spectral_centroid"] = lb.feature.spectral_centroid(y=y, sr=sr)
df["spectral_bandwidth"] = lb.feature.spectral_bandwidth(y=y, sr=sr)
df["spectral_contrast"] = lb.feature.spectral_contrast(y=y, sr=sr)
df["spectral_rolloff"] = lb.feature.spectral_rolloff(y=y, sr=sr)
df["rmse"] = lb.feature.rms(y=y)
df["zcr"] = lb.feature.zero_crossing_rate(y=y)
print('> Successfully extracted into dict')
return df
# In[4]:
## format new song features
# fetch new song features as dict
feat = get_song_features("one_summers_day.mp3")
# fetch empty array with correct format, then append empty row
df = import_features(0).append(pd.Series(dtype=float), ignore_index=True)
# apply stats to new song features
stats = ['kurtosis','max','mean','median','min','skew','std']
funcs = [scipy.stats.kurtosis, np.amax, np.mean, np.median, np.amin, scipy.stats.skew, np.std]
for ft in df.columns.unique(0):
for st, fn in zip(stats, funcs):
df.loc[:,(ft,st)] = fn(feat[ft], axis=1)
print('> Successfully applied statistics')
# ## Classifier Helper Functions
# In[4]:
def format_test_valid(X, y, drop_unique=False):
# parse genres into array
y = y.apply(lambda g: json.loads(g))
# select/remove empty genres
eg = y.index[y.map(lambda g: len(g)==0)]
X = X.drop(eg)
y = y.drop(eg)
# split into train/validate groups
Xt, Xv, yt, yv = train_test_split(X, y, test_size=0.2, random_state=1)
# remove genres with only 1 entry
if drop_unique:
ug = yt.drop_duplicates(False).index
Xt = Xt.drop(ug)
yt = yt.drop(ug)
return Xt, Xv, yt, yv
def format_track_data(data, cols=None):
# select specified columns
data = data.loc[:,('track',cols)]
data.columns = cols
# parse genres into array
data = data.applymap(lambda t: json.loads(t))
return data
def score(a, b):
assert len(a) == len(b), 'Arrays are not the same size'
c = 0
for v1, v2 in zip(a,b):
if isinstance(v2, (int, np.integer)):
if (v1==v2):
c=c+1
else:
if v1 in v2:
c=c+1
return c/len(a)
def print_scores(ts, vs, name='New'):
print('> {} Scores:'.format(name))
print('Training score: {:.3f}\nValidation score: {:.3f}\n'.format(ts, vs))
# ## Classifier Implementations
# ### LDA Function
# In[264]:
def m1_LDA(X, y, score_method='default', verbose=False):
#split data
Xt, Xv, yt, yv = train_test_split(X, y, test_size=0.2)
yt1 = yt.apply(lambda g: g[0])
# fitting
lda = LinearDiscriminantAnalysis()
lda.fit(Xt, yt1)
# predictions
pt = lda.predict(Xt)
terr = score(pt,yt)
pv = lda.predict(Xv)
verr = score(pv,yv)
if verbose:
print_scores(terr, verr, 'LDA')
return verr
# ### QDA Function
# In[227]:
def m2_QDA(X, y, score_method='default', verbose=False):
#split data
Xt, Xv, yt, yv = train_test_split(X, y, test_size=0.2)
yt1 = yt.apply(lambda g: g[0])
# drop unique tracks
ug = yt1.drop_duplicates(False).index
Xt = Xt.drop(ug)
yt = yt.drop(ug)
yt1 = yt1.drop(ug)
# fitting
qda = QuadraticDiscriminantAnalysis(tol=10**-10)
qda.fit(Xt, yt1)
# predictions
pt = qda.predict(Xt)
terr = score(pt,yt)
pv = qda.predict(Xv)
verr = score(pv,yv)
if verbose:
print_scores(terr, verr, 'QDA')
return verr
# ### KMC Function
# In[198]:
def m3_KMC(X, y, init_method='k-means++', verbose=False):
#split data
Xt, Xv, yt, yv = train_test_split(X, y, test_size=0.2)
yt1 = yt.apply(lambda g: g[0])
# kmeans
centroids = yt1.drop_duplicates()
if init_method == 'centroids':
init_method = Xt.loc[centroids.index]
kmc = KMeans(n_clusters=len(centroids), init=init_method, n_init=10)
kmc.fit(Xt)
# predictions
pt = pd.Series(kmc.predict(Xt), index=Xt.index)
for ci in pt.unique():
same_gen = pt[pt==ci].index.values
#print('{} entries in cluster {}'.format(len(same_gen), ci))
all_gen = np.concatenate(yt[same_gen].values)
mode_gen, cont_gen = scipy.stats.mode(all_gen)
pt.loc[same_gen] = mode_gen[0]
terr = score(pt,yt)
pv = pd.Series(kmc.predict(Xv), index=Xv.index)
for ci in pv.unique():
same_gen = pv[pv==ci].index.values
mode_gen, cont_gen = scipy.stats.mode(np.concatenate(yv[same_gen].values))
pv.loc[same_gen] = mode_gen[0]
verr = score(pv,yv)
if verbose:
print_scores(terr, verr, 'KMeans')
return verr
# ## Testing
# ### Initial Testing
# In[256]:
# Import data
n = None # None = 106,574
X = import_features(n)
y = import_tracks(n)
g = import_genres(n)
# format genres properly
cols = ['genres','genres_all']
y = y.loc[:,('track',cols)]
y.columns = cols
y = y.applymap(lambda t: json.loads(t))
# Remove entries with empty genres
eg = y.index[y['genres'].map(lambda t: len(t)==0)]
X = X.drop(eg)
y = y.drop(eg)
# Add another column holding parent genres
y['top_level'] = y['genres'].apply(lambda t: [g.loc[t[0]]['top_level']])
print('> Data imported successfully')
# In[ ]:
lda = m1_LDA(X, y['genres'], verbose=True)
lda = m1_LDA(X, y['genres_all'], verbose=True)
lda = m1_LDA(X, y['top_level'], verbose=True)
# In[ ]:
warnings.filterwarnings('default')
qda = m2_QDA(X, y['genres'], verbose=True)
qda = m2_QDA(X, y['genres_all'], verbose=True)
qda = m2_QDA(X, y['top_level'], verbose=True)
# In[ ]:
for km in ['k-means++', 'random', 'centroids']:
print('> Testing method: ', km)
m3_KMC(X, y['genres'], km, verbose=True)
m3_KMC(X, y['genres_all'], km, verbose=True)
m3_KMC(X, y['top_level'], km, verbose=True)
# ### Per Feature/Statistic Testing
# In[ ]:
yc = 'top_level'
cols = ['lda','qda','kmc-km++','kmc-rand','kmc-cent']
warnings.filterwarnings('ignore')
## test each feature
res_feat = pd.DataFrame(columns=cols)
for ft in X.columns.unique(level=0):
print('> Testing feature: ', ft)
res_feat.loc[ft, 'lda'] = m1_LDA(X.loc[:,pd.IndexSlice[ft, :, :]], y[yc])
res_feat.loc[ft, 'qda'] = m2_QDA(X.loc[:,pd.IndexSlice[ft, :, :]], y[yc])
res_feat.loc[ft, 'kmc-km++'] = m3_KMC(X.loc[:,pd.IndexSlice[ft, :, :]], y[yc], 'k-means++')
res_feat.loc[ft, 'kmc-rand'] = m3_KMC(X.loc[:,pd.IndexSlice[ft, :, :]], y[yc], 'random')
res_feat.loc[ft, 'kmc-cent'] = m3_KMC(X.loc[:,pd.IndexSlice[ft, :, :]], y[yc], 'centroids')
## test each statistic
res_stat = pd.DataFrame(columns=cols)
for st in X.columns.unique(level=1):
print('> Testing statistic: ', st)
res_stat.loc[st, 'lda'] = m1_LDA(X.loc[:,pd.IndexSlice[:, st, :]], y[yc])
res_stat.loc[st, 'qda'] = m2_QDA(X.loc[:,pd.IndexSlice[:, st, :]], y[yc])
res_stat.loc[st, 'kmc-km++'] = m3_KMC(X.loc[:,pd.IndexSlice[:, st, :]], y[yc], 'k-means++')
res_stat.loc[st, 'kmc-rand'] = m3_KMC(X.loc[:,pd.IndexSlice[:, st, :]], y[yc], 'random')
res_stat.loc[st, 'kmc-cent'] = m3_KMC(X.loc[:,pd.IndexSlice[:, st, :]], y[yc], 'centroids')
## print results
print('Results by feature:\n', res_feat)
print('Results by statistic:\n', res_stat)
## export results
res_feat.to_csv('res_feat_{}.csv'.format(yc))
res_stat.to_csv('res_stat_{}.csv'.format(yc))
# ### Final Testing
# In[296]:
m1_LDA(X.loc[:,pd.IndexSlice[:, :, :]], y['top_level'], verbose=True)
m1_LDA(X.loc[:,pd.IndexSlice[['chroma_cens','mfcc','spectral_contrast'], :, :]], y['top_level'], verbose=True)
qft = ['mfcc', 'spectral_bandwidth', 'spectral_centroid', 'spectral_contrast', 'spectral_rolloff', 'zcr']
m2_QDA(X.loc[:,pd.IndexSlice[:, ['std'], :]], y['top_level'], verbose=True)
m2_QDA(X.loc[:,pd.IndexSlice[qft, ['std'], :]], y['top_level'], verbose=True)
m2_QDA(X.loc[:,pd.IndexSlice[qft, ['mean','median','std'], :]], y['top_level'], verbose=True)
kft = ['mfcc', 'spectral_bandwidth', 'spectral_centroid', 'spectral_contrast', 'spectral_rolloff']
m3_KMC(X.loc[:,pd.IndexSlice[['mfcc'], ['skew'], :]], y['genres_all'], verbose=True)
m3_KMC(X.loc[:,pd.IndexSlice[['mfcc'], ['mean', 'median', 'skew'], :]], y['genres_all'], verbose=True)
m3_KMC(X.loc[:,pd.IndexSlice[['mfcc'], :, :]], y['genres_all'], verbose=True)
m3_KMC(X.loc[:,pd.IndexSlice[kft, ['skew'], :]], y['genres_all'], 'random', verbose=True)
m3_KMC(X.loc[:,pd.IndexSlice[kft, ['mean', 'median', 'skew'], :]], y['genres_all'], 'random', verbose=True)
m3_KMC(X.loc[:,pd.IndexSlice[kft, :, :]], y['genres_all'], 'random', verbose=True)
print('> Testing complete')
| amiller5233/COEN140-genre-classifier | final_project.py | final_project.py | py | 10,097 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pandas.read_csv",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"l... |
15918005232 | from .MetadataEnhancer import MetadataEnhancer
from utils import _try_for_key
class VariableEnhancer(MetadataEnhancer):
def __init__(self, metadata: dict, enrichment_table: dict):
super().__init__(metadata, enrichment_table)
def enhance_metadata(self):
""" enhance_metadata implementation for the variable enhancements.
First the variables in the variableInformation metadata block are
retrieved. Then for all variables we find enhancements using a table.
We add all the retrieved enhancements to the cache with the var as key.
Finally, we add the enhancements to the metadata.
"""
variables = self.get_value_from_metadata('odisseiVariable',
'variableInformation')
for variable_dict in variables:
variable = _try_for_key(variable_dict,
'odisseiVariableName.value')
variable_uri = self.query_enrichment_table(variable)
if variable_uri:
self.add_enhancements_to_metadata(variable_uri, variable_dict)
def add_enhancements_to_metadata(self, variable_uri: str,
variable_dict: dict):
""" Adds the variable enhancements to the metadata.
If there are no enhancements, this method returns.
Else it adds the first matched URI to the variable that was used to
find the match.
:param variable_uri: Enhancements to add to the metadata.
:param variable_dict: The variable field to add the enhancements to.
"""
variable_type_name = 'odisseiVariableVocabularyURI'
self.add_enhancement_to_compound_metadata_field(variable_dict,
variable_type_name,
variable_uri)
| odissei-data/metadata-enhancer | src/enhancers/VariableEnhancer.py | VariableEnhancer.py | py | 1,895 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "MetadataEnhancer.MetadataEnhancer",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "utils._try_for_key",
"line_number": 21,
"usage_type": "call"
}
] |
25772535003 | #!/usr/bin/python
from __future__ import print_function
import atexit
from bcc import BPF
import os
from datetime import datetime
# load BPF program
b= BPF(src_file="kwtracer.c")
b.attach_kprobe(event="iov_iter_copy_from_user_atomic",fn_name="trace_do_user_space_write")
b.attach_kprobe(event="submit_bio", fn_name="trace_submit_bio")
date = datetime.today().strftime("%Y-%m-%d.%H:%M:%S")
old_trace = "./old_trace/tracelog_" + date + ".log"
print(old_trace)
os.system("sudo cp ./trace.log " + old_trace)
trace_file = open('./trace.log', 'w')
def print_event(cpu,data,size):
event = b["events"].event(data)
trace_line = "%s,%s\n" % (event.pid, event.comm)
trace_file.write(trace_line)
b["events"].open_perf_buffer(print_event)
while 1:
try :
b.perf_buffer_poll()
except KeyboardInterrupt:
trace_file.close()
exit()
| BoKyoungHan/kworker_tracer | kwtracer.py | kwtracer.py | py | 884 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "bcc.BPF",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.today",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "os.system",
"li... |
32774847100 | # -*- coding: utf-8 -*-
"""
Created on Sun Nov 25 12:08:01 2018
@author: Jim
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import linear_model
"""
read file
"""
csvfile = "Concrete_Data.csv"
data = pd.read_csv(csvfile)
(row,column)=data.shape
X_train=data['Age (day)'] #input
X_trainsecond=np.zeros([row,2])
for i in range(row):
X_trainsecond[i][0]=X_train[i]
X_trainsecond[i][1]=0
Y_train=data['Concrete compressive strength(MPa, megapascals) '] #output
#print(X_trainsecond.shape,Y_train.shape)
"""
lineaer regression
"""
regr = linear_model.LinearRegression()
regr.fit(X_trainsecond,Y_train)
print('weight: ',regr.coef_)
print('bias: ',regr.intercept_)
print('accuracy(r2_score): ',regr.score(X_trainsecond,Y_train))
"""
visualization, scatter plot and linear regression expresiion
"""
x=([0,0],[0,0])
x[0][0]=min(X_train)
x[1][0]=max(X_train)
y=([0,0])
y[0] = regr.coef_*x[0] + regr.intercept_
y[1] = regr.coef_*x[1] + regr.intercept_
plt.scatter(X_train,Y_train)
plt.plot(x,y)
plt.show()
| startearjimmy/4.Machine-learning | MLHW3/HW3_1.py | HW3_1.py | py | 1,090 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pandas.read_csv",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "sklearn.linear_model.LinearRegression",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "skl... |
4703069650 | import requests
import json
from dotenv import load_dotenv
import os
import ipdb
import traceback
from datetime import datetime
import pandas as pd
import matplotlib.pyplot as plt
load_dotenv()
notion_token = os.environ.get("NOTION_TOKEN")
database_id = os.environ.get("NOTION_DB_ID")
headers = {
"Authorization": "Bearer " + notion_token,
"Content-Type": "application/json",
"Notion-Version": "2021-05-13"
}
def get_lap_time(result, key):
if len(result['properties'][f'{key}']['rich_text']) > 0:
value = result['properties'][f'{key}']['rich_text'][0]['plain_text']
return value
else:
return '00:00'
def convert_db_to_dict(db_json):
results = db_json['results']
db_dict = []
for result in results:
try:
temp = {
'date': result['properties']['Date']['title'][0]['plain_text'],
'lap1': get_lap_time(result, 'Lap 1'),
'lap2': get_lap_time(result, 'Lap 2'),
'lap3': get_lap_time(result, 'Lap 3'),
'sprint': get_lap_time(result, 'Sprint'),
'set1': result['properties'].get('Set 1', {}).get('number', 0),
'set2': result['properties'].get('Set 2', {}).get('number', 0),
'set3': result['properties'].get('Set 3', {}).get('number', 0)
}
# db_dict[result['properties']['Date']['title'][0]['plain_text']] = temp
db_dict.append(temp)
except:
print(traceback.print_exc())
db_dict.sort(key = lambda x: datetime.strptime(x['date'], '%d-%b-%Y'))
return db_dict
def dict_to_df(db_dict):
df = pd.DataFrame(db_dict)
df['total running'] = pd.to_timedelta('00:' + df['lap1']) + pd.to_timedelta('00:' + df['lap2']) + pd.to_timedelta('00:' + df['lap3']) + pd.to_timedelta('00:' + df['sprint'])
df['total running'] = df['total running'].dt.seconds.div(60).astype(int).astype(str).str.zfill(2) + ':' + df['total running'].dt.seconds.mod(60).astype(str).str.zfill(2)
df['total pushups'] = df['set1'] + df['set2'] + df['set3']
return df
# https://prettystatic.com/notion-api-python/
def read_db(database_id, headers):
url = f"https://api.notion.com/v1/databases/{database_id}/query"
res = requests.request("POST", url, headers=headers)
data = res.json()
# print(res.status_code)
# with open('./db.json', 'w', encoding='utf8') as f:
# json.dump(data, f, ensure_ascii=False)
return data
def plot_data(df):
# Convert lap times to seconds
df['lap1_sec'] = pd.to_timedelta('00:' + df['lap1']).dt.total_seconds()
df['lap2_sec'] = pd.to_timedelta('00:' + df['lap2']).dt.total_seconds()
df['lap3_sec'] = pd.to_timedelta('00:' + df['lap3']).dt.total_seconds()
df['sprint_sec'] = pd.to_timedelta('00:' + df['sprint']).dt.total_seconds()
# Calculate total running time and total pushups
df['total_running_sec'] = df['lap1_sec'] + df['lap2_sec'] + df['lap3_sec'] + df['sprint_sec']
df['total_pushups'] = df['set1'] + df['set2'] + df['set3']
# Plot the data
fig, ax = plt.subplots()
df.plot(x='date', y='total_running_sec', kind='bar', ax=ax)
df.plot(x='date', y='total_pushups', kind='line', ax=ax, secondary_y=True)
ax.set_xlabel('Date')
ax.set_ylabel('Total Running Time (seconds)')
ax.right_ax.set_ylabel('Total Pushups')
plt.show()
data = read_db(database_id, headers)
db_dict = convert_db_to_dict(data)
db_df = dict_to_df(db_dict)
plot_data(db_df)
# print(db_df)
| Retro-Devils-Media/coleco | main.py | main.py | py | 3,544 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "dotenv.load_dotenv",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
... |
2673451372 | # This file is executed on every boot (including wake-boot from deepsleep)
import esp
esp.osdebug(None)
#import webrepl
# webrepl.start()
from lib.wifiManager.wifiManager import WifiManager
wifiM = WifiManager()
if wifiM.connect():
print("********WIFI is Connected**********")
else:
wifiM.createAP()
print("********WIFI as AccessPoint********")
| juanpc13/uPython-WebServer | boot.py | boot.py | py | 360 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "esp.osdebug",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "lib.wifiManager.wifiManager.WifiManager",
"line_number": 9,
"usage_type": "call"
}
] |
41054306636 | import boto3
from botocore.exceptions import ClientError
import pytest
from framework_from_conformance_pack import ConformancePack
@pytest.mark.parametrize(
"in_name, error_code",
[("test-name", None), ("garbage", None), ("test-name", "TestException")],
)
def test_get_conformance_pack(make_stubber, monkeypatch, in_name, error_code):
config_client = boto3.client("config")
config_stubber = make_stubber(config_client)
pack = ConformancePack(config_client, None)
cpack_name = "test-name"
monkeypatch.setattr("builtins.input", lambda x: in_name)
config_stubber.stub_describe_conformance_packs([cpack_name], error_code=error_code)
if error_code is None:
if in_name != "garbage":
got_cpack_name = pack.get_conformance_pack()
assert got_cpack_name == cpack_name
else:
with pytest.raises(Exception):
pack.get_conformance_pack()
else:
with pytest.raises(ClientError) as exc_info:
pack.get_conformance_pack()
assert exc_info.value.response["Error"]["Code"] == error_code
def test_create_custom_controls(make_stubber):
config_client = boto3.client("config")
config_stubber = make_stubber(config_client)
auditmanager_client = boto3.client("auditmanager")
auditmanager_stubber = make_stubber(auditmanager_client)
pack = ConformancePack(config_client, auditmanager_client)
pack_name = "test-pack_name"
rule_names = ["rule-1", "rule-2"]
source_ids = ["src-1", "src-2"]
control_ids = [f'ctl-{"1"*36}', f'ctl-{"2"*36}']
config_stubber.stub_describe_conformance_pack_compliance(pack_name, rule_names)
for rule_name, source_id, control_id in zip(rule_names, source_ids, control_ids):
config_stubber.stub_describe_config_rules([rule_name], source_ids=[source_id])
auditmanager_stubber.stub_create_control(
f"Config-{rule_name}", source_id, control_id
)
got_control_ids = pack.create_custom_controls(pack_name)
assert [got["id"] for got in got_control_ids] == control_ids
@pytest.mark.parametrize("error_code", [None, "TestException"])
def test_create_custom_framework(make_stubber, error_code):
auditmanager_client = boto3.client("auditmanager")
auditmanager_stubber = make_stubber(auditmanager_client)
pack = ConformancePack(None, auditmanager_client)
pack_name = "test-pack_name"
control_ids = [{"id": f'ctl-{"1"*36}'}, {"id": f'ctl-{"2"*36}'}]
framework = {"name": f"Config-Conformance-pack-{pack_name}", "id": f'fw-{"1"*36}'}
auditmanager_stubber.stub_create_assessment_framework(
framework["name"],
[{"name": pack_name, "controls": control_ids}],
framework["id"],
error_code=error_code,
)
if error_code is None:
pack.create_custom_framework(pack_name, control_ids)
else:
with pytest.raises(ClientError) as exc_info:
pack.create_custom_framework(pack_name, control_ids)
assert exc_info.value.response["Error"]["Code"] == error_code
| awsdocs/aws-doc-sdk-examples | python/example_code/auditmanager/test/test_framework_from_conformance_pack.py | test_framework_from_conformance_pack.py | py | 3,065 | python | en | code | 8,378 | github-code | 1 | [
{
"api_name": "boto3.client",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "framework_from_conformance_pack.ConformancePack",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pytest.raises",
"line_number": 27,
"usage_type": "call"
},
{
"api_na... |
25504175395 | import logging
import sys
from telemetry.value import histogram
from telemetry.value import histogram_util
from telemetry.value import scalar
from metrics import Metric
_HISTOGRAMS = [
{
'name': 'V8.MemoryExternalFragmentationTotal', 'units': 'percent',
'display_name': 'V8_MemoryExternalFragmentationTotal',
'type': histogram_util.RENDERER_HISTOGRAM,
'description': 'Total external memory fragmentation after each GC in '
'percent.',
},
{
'name': 'V8.MemoryHeapSampleTotalCommitted', 'units': 'kb',
'display_name': 'V8_MemoryHeapSampleTotalCommitted',
'type': histogram_util.RENDERER_HISTOGRAM,
'description': 'The total size of committed memory used by V8 after '
'each GC in KB.'
},
{
'name': 'V8.MemoryHeapSampleTotalUsed', 'units': 'kb',
'display_name': 'V8_MemoryHeapSampleTotalUsed',
'type': histogram_util.RENDERER_HISTOGRAM,
'description': 'The total size of live memory used by V8 after each '
'GC in KB.',
},
{
'name': 'V8.MemoryHeapSampleMaximumCommitted', 'units': 'kb',
'display_name': 'V8_MemoryHeapSampleMaximumCommitted',
'type': histogram_util.RENDERER_HISTOGRAM
},
{
'name': 'Memory.RendererUsed', 'units': 'kb',
'display_name': 'Memory_RendererUsed',
'type': histogram_util.RENDERER_HISTOGRAM
},
{
'name': 'Memory.BrowserUsed', 'units': 'kb',
'display_name': 'Memory_BrowserUsed',
'type': histogram_util.BROWSER_HISTOGRAM
},
]
class MemoryMetric(Metric):
"""MemoryMetric gathers memory statistics from the browser object.
This includes both per-page histogram stats, most about javascript
memory usage, and overall memory stats from the system for the whole
test run."""
def __init__(self, browser):
super(MemoryMetric, self).__init__()
self._browser = browser
start_memory_stats = self._browser.memory_stats
self._start_commit_charge = None
if 'SystemCommitCharge' in start_memory_stats:
self._start_commit_charge = start_memory_stats['SystemCommitCharge']
self._memory_stats = None
self._histogram_start = dict()
self._histogram_delta = dict()
self._started = False
@classmethod
def CustomizeBrowserOptions(cls, options):
options.AppendExtraBrowserArgs([
'--enable-stats-collection-bindings',
# For a hard-coded set of Google pages (such as GMail), we produce
# custom memory histograms (V8.Something_gmail) instead of the generic
# histograms (V8.Something), if we detect that a renderer is only
# rendering this page and no other pages. For this test, we need to
# disable histogram customizing, so that we get the same generic
# histograms produced for all pages.
'--disable-histogram-customizer'
])
def Start(self, page, tab):
"""Start the per-page preparation for this metric.
Here, this consists of recording the start value of all the histograms.
"""
if not self._browser.supports_memory_metrics:
logging.warning('Memory metrics not supported.')
return
self._started = True
for h in _HISTOGRAMS:
histogram_data = histogram_util.GetHistogram(
h['type'], h['name'], tab)
# Histogram data may not be available
if not histogram_data:
continue
self._histogram_start[h['name']] = histogram_data
def Stop(self, page, tab):
"""Prepare the results for this page.
The results are the differences between the current histogram values
and the values when Start() was called.
"""
if not self._browser.supports_memory_metrics:
return
assert self._started, 'Must call Start() first'
for h in _HISTOGRAMS:
# Histogram data may not be available
if h['name'] not in self._histogram_start:
continue
histogram_data = histogram_util.GetHistogram(
h['type'], h['name'], tab)
self._histogram_delta[h['name']] = histogram_util.SubtractHistogram(
histogram_data, self._histogram_start[h['name']])
# Optional argument trace_name is not in base class Metric.
# pylint: disable=arguments-differ
def AddResults(self, tab, results, trace_name=None):
"""Add results for this page to the results object."""
if not self._browser.supports_memory_metrics:
return
assert self._histogram_delta, 'Must call Stop() first'
for h in _HISTOGRAMS:
# Histogram data may not be available
if h['name'] not in self._histogram_start:
continue
results.AddValue(histogram.HistogramValue(
results.current_page, h['display_name'], h['units'],
raw_value_json=self._histogram_delta[h['name']], important=False,
description=h.get('description')))
self._memory_stats = self._browser.memory_stats
if not self._memory_stats['Browser']:
return
AddResultsForProcesses(results, self._memory_stats,
metric_trace_name=trace_name)
if self._start_commit_charge:
end_commit_charge = self._memory_stats['SystemCommitCharge']
commit_charge_difference = end_commit_charge - self._start_commit_charge
results.AddValue(scalar.ScalarValue(
results.current_page,
'commit_charge.' + (trace_name or 'commit_charge'),
'kb', commit_charge_difference, important=False,
description='System commit charge (committed memory pages).'))
results.AddValue(scalar.ScalarValue(
results.current_page, 'processes.' + (trace_name or 'processes'),
'count', self._memory_stats['ProcessCount'], important=False,
description='Number of processes used by Chrome.'))
def AddResultsForProcesses(results, memory_stats, chart_trace_name='final',
metric_trace_name=None,
exclude_metrics=None):
"""Adds memory stats for browser, renderer and gpu processes.
Args:
results: A telemetry.results.PageTestResults object.
memory_stats: System memory stats collected.
chart_trace_name: Trace to identify memory metrics. Default is 'final'.
metric_trace_name: Trace to identify the metric results per test page.
exclude_metrics: List of memory metrics to exclude from results,
e.g. VM, WorkingSetSize, etc.
"""
metric = 'resident_set_size'
if sys.platform == 'win32':
metric = 'working_set'
exclude_metrics = exclude_metrics or {}
def AddResultsForProcessTypes(process_types_memory, process_type_trace):
"""Add all results for a given set of process types.
Args:
process_types_memory: A list of process types, e.g. Browser, 'Renderer'.
process_type_trace: The name of this set of process types in the output.
"""
def AddResult(value_name_memory, value_name_trace, description):
"""Add a result for a given statistic.
Args:
value_name_memory: Name of some statistic, e.g. VM, WorkingSetSize.
value_name_trace: Name of this statistic to be used in the output.
"""
if value_name_memory in exclude_metrics:
return
if len(process_types_memory) > 1 and value_name_memory.endswith('Peak'):
return
values = []
for process_type_memory in process_types_memory:
stats = memory_stats[process_type_memory]
if value_name_memory in stats:
values.append(stats[value_name_memory])
if values:
if metric_trace_name:
current_trace = '%s_%s' % (metric_trace_name, process_type_trace)
chart_name = value_name_trace
else:
current_trace = '%s_%s' % (value_name_trace, process_type_trace)
chart_name = current_trace
results.AddValue(scalar.ScalarValue(
results.current_page, '%s.%s' % (chart_name, current_trace), 'kb',
sum(values) / 1024, important=False, description=description))
AddResult('VM', 'vm_%s_size' % chart_trace_name,
'Virtual Memory Size (address space allocated).')
AddResult('WorkingSetSize', 'vm_%s_%s_size' % (metric, chart_trace_name),
'Working Set Size (Windows) or Resident Set Size (other '
'platforms).')
AddResult('PrivateDirty', 'vm_private_dirty_%s' % chart_trace_name,
'Private Dirty is basically the amount of RAM inside the '
'process that can not be paged to disk (it is not backed by the '
'same data on disk), and is not shared with any other '
'processes. Another way to look at this is the RAM that will '
'become available to the system when that process goes away '
'(and probably quickly subsumed into caches and other uses of '
'it).')
AddResult('ProportionalSetSize',
'vm_proportional_set_size_%s' % chart_trace_name,
'The Proportional Set Size (PSS) number is a metric the kernel '
'computes that takes into account memory sharing -- basically '
'each page of RAM in a process is scaled by a ratio of the '
'number of other processes also using that page. This way you '
'can (in theory) add up the PSS across all processes to see '
'the total RAM they are using, and compare PSS between '
'processes to get a rough idea of their relative weight.')
AddResult('SharedDirty', 'vm_shared_dirty_%s' % chart_trace_name,
'Shared Dirty is the amount of RAM outside the process that can '
'not be paged to disk, and is shared with other processes.')
AddResult('VMPeak', 'vm_peak_size',
'The peak Virtual Memory Size (address space allocated) usage '
'achieved by the * process.')
AddResult('WorkingSetSizePeak', '%s_peak_size' % metric,
'Peak Working Set Size.')
AddResultsForProcessTypes(['Browser'], 'browser')
AddResultsForProcessTypes(['Renderer'], 'renderer')
AddResultsForProcessTypes(['Gpu'], 'gpu')
AddResultsForProcessTypes(['Browser', 'Renderer', 'Gpu'], 'total')
| hanpfei/chromium-net | tools/perf/metrics/memory.py | memory.py | py | 10,223 | python | en | code | 289 | github-code | 1 | [
{
"api_name": "telemetry.value.histogram_util.RENDERER_HISTOGRAM",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "telemetry.value.histogram_util",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "telemetry.value.histogram_util.RENDERER_HISTOGRAM",
"li... |
21358137398 | import neo4j.exceptions
from reporting import user_watcher
def test_watch_users(mocker):
runner = user_watcher.app.test_cli_runner()
mocker.patch(
"reporting.user_watcher._is_shutdown",
side_effect=[False, False, True, True],
)
bootstrap_mock = mocker.patch("reporting.user_watcher._bootstrap")
delete_mock = mocker.patch(
"reporting.services.reporting_neo4j.delete_expired_users",
side_effect=[neo4j.exceptions.ServiceUnavailable, None],
)
sleep_mock = mocker.patch("time.sleep")
runner.invoke(user_watcher.watch_users)
assert bootstrap_mock.call_count == 1
assert sleep_mock.call_count == 1
assert delete_mock.call_count == 1
| paypay/seizu | tests/unit/reporting/user_watcher_test.py | user_watcher_test.py | py | 707 | python | en | code | 6 | github-code | 1 | [
{
"api_name": "reporting.user_watcher.app.test_cli_runner",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "reporting.user_watcher.app",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "reporting.user_watcher",
"line_number": 7,
"usage_type": "name"
... |
71420523233 | from django.urls import path
from . import views
app_name = 'blog'
urlpatterns = [
path('', views.MainListView.as_view(), name='index'),
path('about/', views.AboutUsView.as_view(), name='about'),
path('feedback/', views.FeedBackFormView.as_view(), name='feedback'),
path('feedback/success/', views.FeedBackSuccessView.as_view(), name='feedback_success'),
path('terms_conditions/', views.TermsConditionsView.as_view(), name='terms_conditions'),
path('add_post/', views.AddPostView.as_view(), name='add_post'),
path('search/', views.SearchListView.as_view(), name='search'),
path('<slug:slug>', views.PostDetailView.as_view(), name='post_detail'),
path('<slug:slug>/edit/', views.EditPostView.as_view(), name='edit_post'),
path('tag/<slug:slug>', views.TagListView.as_view(), name='tag')
] | axkiss/FirstBlog | blog_app/urls.py | urls.py | py | 829 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
28537185288 | # -*- coding: utf-8 -*-
import sys
sys.dont_write_bytecode = True
import os
import torch
from core.config import Config
from core import Test
PATH = "/.../.../...-bgl_time-tcniniNet-2-5-Feb-27-2023-18-07-28"
VAR_DICT = {
"test_epoch": 5,
"device_ids": "0",
"inner_train_iter": 100,
"n_gpu": 1,
"test_episode": 100,
"batch_size": 64,
"episode_size": 1,
# "query_num": 20,
"test_shot": 5,
# "shot_num": 20,
}
def main(rank, config):
test = Test(rank, config, PATH)
test.test_loop()
if __name__ == "__main__":
config = Config(os.path.join(PATH, "config.yaml"), VAR_DICT).get_config_dict()
if config["n_gpu"] > 1:
os.environ["CUDA_VISIBLE_DEVICES"] = config["device_ids"]
torch.multiprocessing.spawn(main, nprocs=config["n_gpu"], args=(config,))
else:
main(0, config)
| Aquariuaa/FSLog | Fine_Tuning_Test.py | Fine_Tuning_Test.py | py | 852 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.dont_write_bytecode",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "core.Test",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "core.config.Config",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.path.join... |
32232435595 | #!/usr/bin/env python
# coding: utf-8
# # Raster data analysis
#
# Raster data represent a matrix of cells (or pixels) organized into rows and columns (or a grid). Grid cells can represent data that changes **continuously** across a landscape (surface) such as elevation, air temperature, or . reflectance data from satellite imaging platforms. Grid cells can also represent **discrete** data such as vegetation type or land cover.
#
# ```{image} images/rainier_adams.jpg
# :alt: rainier_adams
# :width: 600px
# :align: center
# ```
#
#
# We recommend three libraries for accessing and analyzing raster data in Python. The first is called `rasterio` which builds on the popular **Geographic Raster Abstraction Library** or `GDAL`. It supports read/write access for over 160 raster formats (e.g. GeoTIFF, NetCDF4) and includes methods for finding dataset information, reprojections, resampling, format conversion, and mosaicking. Once we have imported, resampled the data etc., we can apply fast matrix operations using `NumPy`. Finally, we may also use `xarray` which introduces labels in the form of **dimensions**, **coordinates** and **attributes** on top of raw NumPy-like arrays, a bit like `Pandas`.
#
# ```{image} images/raster_matrix.png
# :alt: raster matrix
# :width: 500px
# :align: center
# ```
# ## Review of raster data
#
# * Raster data represent a matrix of cells (or pixels) organized into rows and columns (or a grid)
#
# ```{image} images/raster_concept.png
# :alt: raster_concept
# :width: 500px
# :align: center
# ```
# ## Examples: surface maps
#
# * Grid cells can represent data that changes **continuously** across a landscape (surface) such as elevation or air temperature.
#
# ```{image} images/elevation.gif
# :alt: elevation
# :width: 400px
# :align: center
# ```
# ## Examples: satellite imagery
#
# * Grid cells can represent from a satellite imagaing platforms such as reflectance.
#
# ```{image} images/satellite.gif
# :alt: satellite
# :width: 400px
# :align: center
# ```
# ### Examples: classification maps
#
# * Grid cells can also represent **discrete** data (e.g. vegetation type or land cover).
#
# ```{image} images/classification.gif
# :alt: elevation
# :width: 600px
# :align: center
# ```
# In this demo, we will be working with elevation data, also known as a Digital Elevation Model (DEM), of the Cascades Mountain Range that includes Mt. Rainier and Mt. Adams.
#
# The data is formatted as a `GeoTIFF` and we will open it using `rasterio` function, `open()`. This function take a **path string** and returns a **dataset object**.
# In[11]:
import rasterio
import numpy as np
src = rasterio.open('data/N46W122.tif')
src
# ````{margin}
# ```{note}
# `src` stands for **source**
# ```
# ````
# ## Dataset attributes
#
# The **dataset object** contains a number of **attributes** which can be explored using the following methods. Remember that a raster **band** is an array of values representing **a single** variable in 2D space. All bands of a dataset have the **same** number of rows and columns.
# In[12]:
print(f"Number of bands: {src.count}")
print(f"Width: {src.width}")
print(f"Height: {src.height}")
# ## Georeferencing
#
# Like vector data, pixels in raster data can be mapped to regions on the Earth's surface. Like `GeoPandas`, we can display the **coordinate reference system** of our data using the `crs` method.
# In[13]:
src.crs
# Now that we know our data has a WGS84 geographic projection (i.e. longitudes and latitudes), we can display the **extent** of our dataset using the `bounds` method.
# In[14]:
src.bounds
# Finally, we can display the dataset's geospatial transform using the `transform` method. This function displays similar information to `bounds` but also contains the **spatial resolution** of the dataset (i.e. the dimensions that each pixel of our dataset represents on the ground). Since our dataset has a **WGS84 geographic projection** (i.e. `EPSG:4326`), the units of spatial resolution are in **degrees**.
# In[15]:
src.transform
# ## Reading raster data
#
# Now that we have some basic information about our data, we can go ahead and import it using the `read()` function. Data from a raster band can be accessed by the band's index number. Note that bands are indexed from 1 due to a GDAL convention.
# In[16]:
srtm = src.read(1)
# ````{margin}
# ```{note}
# `srtm` stands for the [**Shuttle Radar Topography Mission**](https://www2.jpl.nasa.gov/srtm/) which collected this elevation data.
# ```
# ````
# The read() method returns a numpy N-D array.
# In[17]:
srtm
# In[18]:
type(srtm)
# We can have a look at the data using `matplotlib`.
# In[19]:
import matplotlib.pyplot as plt
# Plot data
fig, ax = plt.subplots(figsize=(8,8))
im = ax.imshow(srtm)
ax.set_title("Mt Rainier and Mt Adams", fontsize=14)
cbar = fig.colorbar(im, orientation='vertical')
cbar.ax.set_ylabel('Elevation', rotation=270, fontsize=14)
cbar.ax.get_yaxis().labelpad = 20
# ## Indexing
#
# Many GIS tasks require us to read raster values at given locations. Rasterio dataset objects have an `index()` method for deriving the **array indices** corresponding to points in **georeferenced space**.
#
# Let's demonstrate with an example... what is the elevation of the summit of Mt Rainier? (`-121.760424, 46.852947`)
# In[20]:
# Define latitude and longitude of summit
rainier_summit = [-121.760424, 46.852947]
# Find row/column in corresponding raster dataset
loc_idx = src.index(rainier_summit[0], rainier_summit[1])
print(f"Grid cell index: {loc_idx}")
# We can use **matrix indexing** to find the value of the raster data at that location (see [Week 2 demo](../02a-demo.ipynb#Matrix-indexing-and-slicing) for reminder).
# In[21]:
elevation = srtm[loc_idx]
print(f"The summit of Mt Rainier is at {int(elevation)} m or {int(elevation * 3.281)} feet")
# In[22]:
fig, ax = plt.subplots(figsize=(8,8))
im = ax.imshow(srtm)
# Plot a point on grid
ax.scatter(loc_idx[1], loc_idx[0], s=50, color='red')
ax.set_title("Mt Rainier and Mt Adams", fontsize=14)
cbar = fig.colorbar(im, orientation='vertical')
cbar.ax.set_ylabel('Elevation', rotation=270, fontsize=14)
cbar.ax.get_yaxis().labelpad = 20
# ## More indexing methods
#
# How would we find the index of the **lowest elevation** in this raster dataset? The `NumPy` [`argmin()`](https://numpy.org/doc/stable/reference/generated/numpy.argmin.html) function returns the indices of the minimum values of an array.
# In[23]:
min_idx_value = srtm.argmin()
print(min_idx_value)
# Wait... I thought this dataset has two dimensions... Yes but by default, `argmin()` returns the index as a flattened (1D) array. Fortunately, converting from 1D back to 2D is simple using `np.unravel_index`.
# In[24]:
low_idx = np.unravel_index(min_idx_value, srtm.shape)
print(low_idx)
# In[25]:
elevation = srtm[low_idx]
print(f"The lowest elevation is {elevation} m")
# In[26]:
fig, ax = plt.subplots(figsize=(7,7))
im = ax.imshow(srtm)
# Plot a point on grid
ax.scatter(loc_idx[1], loc_idx[0], s=50, color='red')
ax.scatter(low_idx[1], low_idx[0], s=50, color='orange')
ax.set_title("Mt Rainier and Mt Adams", fontsize=14)
cbar = fig.colorbar(im, orientation='vertical')
cbar.ax.set_ylabel('Elevation', rotation=270, fontsize=14)
cbar.ax.get_yaxis().labelpad = 20
# ## Converting between indices and coordinates
#
# Since `Rasterio` knows the spatial reference system of our data, we can convert between indices (i.e. rows/columns) and coordinates (i.e. lats/longs) very conveniently. To find the latitude and longitude of the lowest elevation grid cell we can use the [`transform`](https://rasterio.readthedocs.io/en/latest/api/rasterio.transform.html) module.
#
# `transform.xy`, for example, takes the **dataset transform** along with the **row and column index** and converts them to x and y values in the dataset's coordinate reference system.
# In[31]:
rasterio.transform.xy(src.transform, low_idx[0], low_idx[1])
# Inversely, `transform.rowcol` takes the **dataset transform** along with the **x and y values in the coordinate reference system** and converts them to a row and column indices.
# In[32]:
rasterio.transform.rowcol(src.transform, rainier_summit[0], rainier_summit[1])
# ```{note}
# Note that `transform.rowcol` provides the same output as `src.index` used earlier.
# ```
# ## Reprojecting
#
# We could use `Rasterio` to reproject raster data... but it’s quite tricky!
#
# ```{image} images/rasterio_reproject.png
# :alt: rasterio reproject
# :width: 700px
# :align: center
# ```
#
# Instead we recommend using [**GDAL utilities**](https://gdal.org/programs/index.html#raster-programs). We can execute these commands in our jupyter notebook cells using the `!` sign.
#
# To reproject our data, we can use [`gdalwarp`](https://gdal.org/programs/gdalwarp.html#gdalwarp). All we need to do is set a **target spatial reference** using the `-t_srs` flag followed by a space, the **input dataset**, and the **output dataset**. Below we set the target spatial reference to [**UTM Zone 10N**](https://epsg.io/32610) (or EPSG:32610) which is the UTM Zone for the Pacific Northwest.
#
# ```{image} images/utm_zones.png
# :alt: utm zones
# :width: 300px
# :align: center
# ```
# In[17]:
get_ipython().system('gdalwarp -t_srs EPSG:32610 data/N46W122.tif data/N46W122_utm.tif')
# If we navigate to our `data` folder we should see a new file called `N46W122_utm.tif`. Let's `open` this new GeoTIFF and check that is has a new projection.
# In[18]:
src = rasterio.open('data/N46W122_utm.tif')
src.crs
# In[19]:
srtm = src.read(1)
fig, ax = plt.subplots(figsize=(7,7))
im = ax.imshow(srtm)
ax.set_title("Mt Rainier and Mt Adams", fontsize=14)
cbar = fig.colorbar(im, orientation='vertical')
cbar.ax.set_ylabel('Elevation', rotation=270, fontsize=14)
cbar.ax.get_yaxis().labelpad = 20
# Why does the data look so strange now? Well, since we reprojected it, our data no longer represents a rectangle/square. Since all arrays have to be rectangles/squares, our reprojection introduced some **NoData values** at the edges. If we have a look at our array, we see that these NoData values are indicated by the integer `-32768` which is the smallest possible value that can be represented by the `int16` data type (i.e. -32,768 to 32,767).
# In[20]:
srtm
# We can mask these NoData values by using NumPy's [**masked array**](https://numpy.org/doc/stable/reference/maskedarray.generic.html) module that makes it easier to deal with arrays that have missing or invalid entries.
# In[21]:
srtm_masked = np.ma.masked_array(srtm, mask=(srtm == -32768))
# Now when we plot the data, the NoData values are not assigned a color.`
# In[22]:
fig, ax = plt.subplots(figsize=(7,7))
im = ax.imshow(srtm_masked)
ax.set_title("Mt Rainier and Mt Adams", fontsize=14)
cbar = fig.colorbar(im, orientation='vertical')
cbar.ax.set_ylabel('Elevation', rotation=270, fontsize=14)
cbar.ax.get_yaxis().labelpad = 20
# ## Resampling
#
# GDAL utilites make it straightforward to change the **spatial resolution** of our raster dataset. To reduce the pixel size of our dataset from around 30 m to 1,000 m can be carried out using `gdalwarp`. This time, however, we specify the `-tr` flag, which stands for **target resolution**, followed by the pixel size we want.
# In[25]:
get_ipython().system('gdalwarp -tr 1000 -1000 data/N46W122_utm.tif data/N46W122_utm_1000.tif')
# Now when we `open`, `read`, `mask`, and `plot` our data, we will see that it looks a lot coarser/pixelated because each grid cell represents 1 km on the ground.
# In[26]:
# Open new raster dataset
src = rasterio.open('data/N46W122_utm_1000.tif')
# Read new raster dataset
srtm_1000 = src.read(1)
# Mask data
srtm_1000_masked = np.ma.masked_array(srtm_1000, mask=(srtm_1000 == -32768))
# Plot
fig, ax = plt.subplots(figsize=(8,8))
im = ax.imshow(srtm_1000_masked)
ax.set_title("Mt Rainier and Mt Adams", fontsize=14)
cbar = fig.colorbar(im, orientation='vertical')
cbar.ax.set_ylabel('Elevation', rotation=270, fontsize=14)
cbar.ax.get_yaxis().labelpad = 20
| owel-lab/programming-for-sds-site | book/_build/jupyter_execute/demos/09a-demo.py | 09a-demo.py | py | 12,182 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "rasterio.open",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 159,
"usage_type": "name"
},
{
"api_name": "matplotl... |
14573968338 | # import library and abbreviate pyplot for easier use
import matplotlib.pyplot as plt
# list of data we'll be plotting
inputValues = [1, 2, 3, 4, 5]
squares = [1, 4, 9, 16, 25]
# using a built-in style
# note: this needs to go BEFORE running subplots() and plot()
plt.style.use('dark_background')
# fig - represents entire figure (collection of plots generated)
# ax - represents a single plot (variable we'll use most of the time)
# subplots() - generate one or more plots in the same figure
fig, ax = plt.subplots()
# plot() - will try to plot data its given in a meaningful way
# linewidth=3 argument controlling the line thickness of our plot
ax.plot(inputValues, squares, linewidth=3)
# Set chart title and label axes
ax.set_title("Square Numbers", fontsize=24)
ax.set_xlabel("Value", fontsize=14)
ax.set_ylabel("Square of Value", fontsize=14)
# Set size of tick labels
ax.tick_params(axis='both', labelsize=14)
# show() opens matplotlib's viewer and displays the plot
plt.show() | jamesastephenson/python-2022 | matplotlib 1 (8-13-22)/mpl_squares.py | mpl_squares.py | py | 1,027 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "matplotlib.pyplot.style.use",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.style",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 10,
"usage_type": "name"
},
{
"api_na... |
17956866528 | from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
from urllib.parse import quote
import sys
from pyquery import PyQuery as pq
import re
from selenium.webdriver.support.select import Select
import time
import random
import config
import save
from lxml import etree
def get_match_info(year, league, stage, text):
match = {
'year': year,
'stage': stage,
'league': league,
}
home_name = text[2].lstrip('21').split('[')
away_name = text[4].lstrip('21').split('[')
match['date'] = text[1].replace("\n", " ", 1)
match['home_name'] = home_name[0]
match['away_name'] = away_name[0]
match['score'] = text[3]
match['half_score'] = text[10]
return match
def get_odds(url):
odds_browser = config.get_webdriver()
odds_wait = WebDriverWait(odds_browser, 10)
try:
odds_browser.get(url)
odds_wait.until(
EC.presence_of_element_located((By.CSS_SELECTOR, '#sel_showType')))
select = Select(odds_browser.find_element_by_id("sel_showType"))
select.select_by_value('1')
odds_wait.until(
EC.presence_of_element_located((By.CSS_SELECTOR, '#dataList')))
html = etree.HTML(odds_browser.page_source)
doc = pq(html)
td = doc('#oddstr_281 td')
text = []
odds = {}
for item in td.items():
text.append(item.text())
odds['start'] = text[2:5]
text = []
end_text = []
next = td.parent().next()
for item in next.items():
end_text.append(item.text())
text = end_text[0].split()
odds['end'] = text[0:3]
except TimeoutException:
get_odds(url)
time.sleep(random.randint(3, 5))
odds_browser.close()
return odds
def get_over_down(url):
odds_browser = config.get_webdriver()
odds_wait = WebDriverWait(odds_browser, 10)
try:
odds_browser.get(url)
odds_wait.until(
EC.presence_of_element_located((By.CSS_SELECTOR, '#odds')))
html = etree.HTML(odds_browser.page_source)
doc = pq(html)
tr = doc('#odds tbody tr')
text = []
odds = {}
for item in tr.items():
for td in item.find('td').items():
result = re.search('Bet365', td.text())
if result:
bet365 = td.parent()
for bet365_td in bet365.find('td').items():
text.append(bet365_td.text())
break
if len(text) < 10:
print("大小球赔率页面错误")
sys.exit()
odds['start'] = text[2:5]
odds['end'] = text[8:11]
except TimeoutException:
get_over_down(url)
time.sleep(random.randint(3, 5))
odds_browser.close()
return odds
def get_match(page, year, league, stage):
html = etree.HTML(page.page_source)
doc = pq(html)
has_odds = False
has_OD = False
items = doc('#Table3 tr a').parent().parent()
for item in items.items():
text = []
tds = item.find('td')
a = tds.find('a')
for td in tds.items():
text.append(td.text())
if len(text) == 11:
if text[3]:
match = get_match_info(year, league, stage, text)
is_exist = save.get_exist_state(match)
if is_exist == 'existed':
continue
else:
return 'no_start'
for item in a.items():
url = item.attr('href')
result = re.search('oddslist', url)
if result:
has_odds = True
odds = []
odds = get_odds(url)
match['odds'] = odds
result = re.search('OverDown', url)
if result:
has_OD = True
over_down = get_over_down(url)
match['over_down'] = over_down
if match:
print(match)
save.save_match(match)
elif len(text) != 0:
print("比赛数据错误")
sys.exit()
return match
| jtyao/jtyao_python | match.py | match.py | py | 4,416 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "config.get_webdriver",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.wait.WebDriverWait",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located",
... |
6307151716 | import torch
import numpy as np
import torch.nn as nn
from itertools import product, permutations
try:
from clarity.enhancer.compressor import CompressorTorch
from clarity.enhancer.nalr import NALRTorch
LIB_CLARITY = True
except ModuleNotFoundError:
print("There's no clarity library")
LIB_CLARITY = False
def loss_sisdr(inputs, targets):
return -si_snr(inputs, targets)
def l2_norm(s1, s2):
norm = torch.sum(s1*s2, -1, keepdim=True)
return norm
def si_snr(s1, s2, eps=1e-8):
s1_s2_norm = l2_norm(s1, s2)
s2_s2_norm = l2_norm(s2, s2)
s_target = s1_s2_norm/(s2_s2_norm+eps)*s2
e_nosie = s1 - s_target
target_norm = l2_norm(s_target, s_target)
noise_norm = l2_norm(e_nosie, e_nosie)
snr = 10*torch.log10((target_norm)/(noise_norm+eps)+eps)
return torch.mean(snr)
def loss_phase_sensitive_spectral_approximation(enhance, target, mixture):
"""
- Erdogan, Hakan, et al. "Phase-sensitive and recognition-boosted speech separation using deep recurrent neural networks."
2015 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). IEEE, 2015.
Despite the use of the MSA/PSA objective function,
it is still desirable to have the output of the network be a mask,
since then the entire dynamic range of the data does not have to be covered by the output of the network.
It is convenient in this context to truncate a to between 0 and 1, to fit the range of a sigmoid unit.
In addition, we conjecture that mask prediction would also avoid global variance problems reported in [10].
[10] “Global variance equalization for improving deep neural network based speech enhancement,” in Proc. of ICASSP,
Florence, Italy, 2014.
"""
eps = nn.Parameter(data=torch.ones((1, ), dtype=torch.float32)*1e-9, requires_grad=False).to(enhance.device)
angle_mixture = torch.tanh(mixture[..., 1] / (mixture[..., 0]+eps))
angle_target = torch.tanh(target[..., 1] / (target[..., 0]+eps))
amplitude_enhance = torch.sqrt(enhance[..., 1]**2 + enhance[..., 0]**2)
amplitude_target = torch.sqrt(target[..., 1]**2 + target[..., 0]**2)
loss = amplitude_enhance - amplitude_target*torch.cos(angle_target-angle_mixture)
loss = torch.mean(loss**2) # mse
return loss
def UtterenceBaasedPermutationInvariantTraining(enhance, target: list, loss_function, mixture=None, return_comb=False):
# O(S^2), S= the number of speaker
assert enhance.shape == target.shape, f"enhance and target shape did not match...{enhance.shape}, {target.shape}"
nspk_enhance = enhance.shape[1]
nspk_target = target.shape[1]
id_spks_enhance = list(range(nspk_enhance))
id_spks_target = list(range(nspk_target))
product_id_spks = product(id_spks_enhance, id_spks_target )
loss_id_spks = torch.zeros(size=(nspk_enhance, nspk_target), dtype=torch.float32, requires_grad=False)
with torch.no_grad():
for i, (ienhance, itarget) in enumerate(product_id_spks):
loss_id_spks[ienhance, itarget] = loss_function(enhance[:, ienhance, ...], target[:, itarget, ...]) if mixture is None else loss_function(enhance[:, ienhance, ...], target[:, itarget, ...], mixture)
combinations_id_spks_enhance = permutations(id_spks_enhance)
combination = None
loss_min = 1e9
for ienhance in combinations_id_spks_enhance:
loss = 0
comb = []
for itarget in id_spks_target:
loss += loss_id_spks[ienhance[itarget], itarget]
comb.append((ienhance[itarget], itarget))
if loss_min > loss:
combination = comb
loss_min = loss
loss = torch.Tensor(torch.zeros(size=(1, )))
loss = loss.to(target.device)
for ienhance, itarget in combination:
if mixture is None:
loss += loss_function(enhance[:, ienhance, ...], target[:, itarget, ...])
else:
loss += loss_function(enhance[:, ienhance, ...], target[:, itarget, ...])
loss /= nspk_enhance
if not return_comb:
return loss
else:
return loss, combination | ooshyun/Speech-Enhancement-Pytorch | src/loss.py | loss.py | py | 4,227 | python | en | code | 9 | github-code | 1 | [
{
"api_name": "torch.sum",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "torch.log10",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "torch.mean",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "torch.nn.Parameter",
"line_numbe... |
25723956292 | '''
Comparing single layer MLP with deep MLP (using TensorFlow)
'''
import numpy as np
import pickle
from math import sqrt
from scipy.optimize import minimize
# Do not change this
def initializeWeights(n_in,n_out):
"""
# initializeWeights return the random weights for Neural Network given the
# number of node in the input layer and output layer
# Input:
# n_in: number of nodes of the input layer
# n_out: number of nodes of the output layer
# Output:
# W: matrix of random initial weights with size (n_out x (n_in + 1))"""
epsilon = sqrt(6) / sqrt(n_in + n_out + 1);
W = (np.random.rand(n_out, n_in + 1)*2* epsilon) - epsilon;
return W
# Replace this with your sigmoid implementation
def sigmoid(z):
return 1.0 / (1.0 + np.exp(-z))
# Replace this with your nnObjFunction implementation
def nnObjFunction(params, *args):
n_input, n_hidden, n_class, training_data, training_label, lambdaval = args
w1 = params[0:n_hidden * (n_input + 1)].reshape((n_hidden, (n_input + 1)))
w2 = params[(n_hidden * (n_input + 1)):].reshape((n_class, (n_hidden + 1)))
obj_val = 0
# Your code here
trainingData_size = training_data.shape[0]
training_output = np.zeros((trainingData_size,n_class))
for i in range(len(training_label)):
training_output[i][int(training_label[i])] = 1
nnOutputValues, hidden_output_withBias, hidden_output, input_matrix_withBias = nnOutput(w1,w2,training_data)
t1 = np.multiply(training_output, np.log(nnOutputValues))
t2 = np.multiply(np.subtract(1,training_output),np.log(np.subtract(1,nnOutputValues)))
t3 = np.add(t1,t2)
obj_val = np.divide(np.sum(t3),-1*trainingData_size)
reg = np.sum(np.power(w1,2)) + np.sum(np.power(w2,2))
# Value of error function
obj_val = obj_val + np.divide(np.multiply(reg,lambdaval),2*trainingData_size)
# Gradient matrix calculation
delta_l = np.subtract(nnOutputValues,training_output)
Z_j = hidden_output_withBias
# delta_Jmatrix is a (output layer neurons * hidden layer neurons) size matrix
delta_Jmatrix = np.dot(delta_l.T,Z_j)
temp = np.add(delta_Jmatrix,np.multiply(lambdaval,w2))
grad_w2 = np.divide(temp,trainingData_size)
# We remove the last column of the hidden layer since we are moving the opposite direction, i.e, back propagation
w2_mod = np.delete(w2,n_hidden,1)
# Same reason as above
Z_j = hidden_output
t4 = np.subtract(1,Z_j)
t5 = np.multiply(t4,Z_j)
t6 = np.dot(delta_l,w2_mod)
t7 = np.multiply(t5,t6)
delta_Jmatrix = np.dot(t7.T,input_matrix_withBias)
grad_w1 = np.divide(np.add(delta_Jmatrix,np.multiply(lambdaval,w1)),trainingData_size)
obj_grad = np.concatenate((grad_w1.flatten(), grad_w2.flatten()),0)
print(obj_val)
return (obj_val, obj_grad)
# Replace this with your nnPredict implementation
def nnPredict(w1,w2,data):
ones_matrix = np.ones((data.shape[0],1))
input_matrix_withBias = np.concatenate((data,ones_matrix),1)
hidden_output = np.dot(input_matrix_withBias,w1.T)
hidden_output = sigmoid(hidden_output)
hidden_output_withBias = np.concatenate((hidden_output,ones_matrix),1)
final_output = np.dot(hidden_output_withBias,w2.T)
final_output = sigmoid(final_output)
labels = np.argmax(final_output,axis=1)
# labels = np.zeros((data.shape[0],1))
# for i in range(data.shape[0]):
# labels[i,0] = np.argmax(final_output[i])
return labels
def nnOutput(w1,w2,data):
ones_matrix = np.ones((data.shape[0],1))
input_matrix_withBias = np.concatenate((data,ones_matrix),1)
hidden_output = np.dot(input_matrix_withBias,w1.T)
hidden_output = sigmoid(hidden_output)
hidden_output_withBias = np.concatenate((hidden_output,ones_matrix),1)
final_output = np.dot(hidden_output_withBias,w2.T)
final_output = sigmoid(final_output)
return final_output, hidden_output_withBias, hidden_output, input_matrix_withBias
# Do not change this
def preprocess():
pickle_obj = pickle.load(file=open('face_all.pickle', 'rb'))
features = pickle_obj['Features']
labels = pickle_obj['Labels']
train_x = features[0:21100] / 255
valid_x = features[21100:23765] / 255
test_x = features[23765:] / 255
labels = labels[0]
train_y = labels[0:21100]
valid_y = labels[21100:23765]
test_y = labels[23765:]
return train_x, train_y, valid_x, valid_y, test_x, test_y
"""**************Neural Network Script Starts here********************************"""
train_data, train_label, validation_data, validation_label, test_data, test_label = preprocess()
# Train Neural Network
# set the number of nodes in input unit (not including bias unit)
n_input = train_data.shape[1]
# set the number of nodes in hidden unit (not including bias unit)
n_hidden = 256
# set the number of nodes in output unit
n_class = 2
# initialize the weights into some random matrices
initial_w1 = initializeWeights(n_input, n_hidden);
initial_w2 = initializeWeights(n_hidden, n_class);
# unroll 2 weight matrices into single column vector
initialWeights = np.concatenate((initial_w1.flatten(), initial_w2.flatten()),0)
# set the regularization hyper-parameter
lambdaval = 10;
args = (n_input, n_hidden, n_class, train_data, train_label, lambdaval)
#Train Neural Network using fmin_cg or minimize from scipy,optimize module. Check documentation for a working example
opts = {'maxiter' :50} # Preferred value.
nn_params = minimize(nnObjFunction, initialWeights, jac=True, args=args,method='CG', options=opts)
params = nn_params.get('x')
#Reshape nnParams from 1D vector into w1 and w2 matrices
w1 = params[0:n_hidden * (n_input + 1)].reshape( (n_hidden, (n_input + 1)))
w2 = params[(n_hidden * (n_input + 1)):].reshape((n_class, (n_hidden + 1)))
#Test the computed parameters
predicted_label = nnPredict(w1,w2,train_data)
#find the accuracy on Training Dataset
print('\n Training set Accuracy:' + str(100*np.mean((predicted_label == train_label).astype(float))) + '%')
predicted_label = nnPredict(w1,w2,validation_data)
#find the accuracy on Validation Dataset
print('\n Validation set Accuracy:' + str(100*np.mean((predicted_label == validation_label).astype(float))) + '%')
predicted_label = nnPredict(w1,w2,test_data)
#find the accuracy on Validation Dataset
print('\n Test set Accuracy:' + str(100*np.mean((predicted_label == test_label).astype(float))) + '%')
| neeradsomanchi/HandWrittenDigitsClassification | facennScript.py | facennScript.py | py | 6,487 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "math.sqrt",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.random.rand",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "numpy.exp",
"line_n... |
29341417351 | import numpy as np
import cv2
from matplotlib import pyplot as plt
I = cv2.imread('/home/kanish/Desktop/image.png', cv2.IMREAD_GRAYSCALE)
_, It = cv2.threshold(I, 0., 255, cv2.THRESH_OTSU)
It = cv2.bitwise_not(It)
_, labels = cv2.connectedComponents(I)
result = np.zeros((I.shape[0], I.shape[1], 3), np.uint8)
for i in range(labels.min(), labels.max() + 1):
mask = cv2.compare(labels, i, cv2.CMP_EQ)
_, ctrs, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
result = cv2.drawContours(result, ctrs, -1, (0xFF, 0, 0))
cv2.imwrite('new_image.png', result) | kanishmathew777/image_processing | backend/image_processing_backend/pathfinder/join.py | join.py | py | 591 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "cv2.imread",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "cv2.IMREAD_GRAYSCALE",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "cv2.threshold",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "cv2.THRESH_OTSU",
... |
41028200186 | from fastapi import status, FastAPI, Request
from fastapi.exceptions import RequestValidationError
import os
from common.enum import MessageEnum
from common.constant import const
import logging
from .response_wrapper import resp_err
import traceback
logger = logging.getLogger(const.LOGGER_API)
def biz_exception(app: FastAPI):
# customize request validation error
@app.exception_handler(RequestValidationError)
async def val_exception_handler(req: Request, rve: RequestValidationError, code: int = status.HTTP_400_BAD_REQUEST):
lst = []
for error in rve.errors():
lst.append('{}=>{}'.format('.'.join(error['loc']), error['msg']))
return resp_err(code, ' , '.join(lst))
# customize business error
@app.exception_handler(BizException)
async def biz_exception_handler(req: Request, exc: BizException):
return resp_err(exc.code, exc.message, exc.ref)
# system error
@app.exception_handler(Exception)
async def exception_handler(req: Request, exc: Exception):
if isinstance(exc, BizException):
return
error_msg = traceback.format_exc()
if os.getenv(const.MODE) != const.MODE_DEV:
error_msg = error_msg.replace("\n", "\r")
logger.error(error_msg)
return resp_err(MessageEnum.BIZ_UNKNOWN_ERR.get_code(),
MessageEnum.BIZ_UNKNOWN_ERR.get_msg())
class BizException(Exception):
def __init__(self,
code: int = MessageEnum.BIZ_DEFAULT_ERR.get_code(),
message: str = MessageEnum.BIZ_DEFAULT_ERR.get_msg(),
ref: list = None):
self.code = code
self.message = message
self.ref = ref
| awslabs/stable-diffusion-aws-extension | middleware_api/lambda/inference/common/exception_handler.py | exception_handler.py | py | 1,722 | python | en | code | 111 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "common.constant.const.LOGGER_API",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "common.constant.const",
"line_number": 11,
"usage_type": "name"
},
{
"api... |
25043375529 | import cv2
import numpy as np
import os
import time
import pickle
from face_detection import RetinaFace
path = '../data/29--Students_Schoolkids/'
# model = 'resnet50'
model = 'mobilenet0.25'
scale = '1'
name = 'retinaFace'
count = 0
CONFIDENCE = 0.1
if __name__ == "__main__":
for fn in os.listdir(path):
filename = fn
raw_img = cv2.imread(os.path.join(path, filename))
detector = RetinaFace()
out_file = '../data'
name = fn.split('.')
name = name[0]
out_file = os.path.join(out_file, name.replace('jpg', 'txt'))
t0 = time.time()
print('start')
faces = detector(raw_img)
t1 = time.time()
print(f'took {round(t1 - t0, 3)} to get {len(faces)} faces')
# with open(out_file + '.txt', 'w') as f:
# # f.write("%s\n" % str(name))
# # f.write("%d\n" % len(faces))
for box, landmarks, score in faces:
box = box.astype(np.int)
if score > CONFIDENCE:
with open(out_file + '.txt', 'a') as f:
f.write("%s %g %d %d %d %d\n" % (str('face'), score, box[0], box[1], box[2] - box[0], box[3] - box[1]))
# f.write("%d %d %d %d %g\n" % (box[0], box[1], box[2] - box[0], box[3] - box[1], score))
# cv2.rectangle(raw_img, (box[0], box[1]), (box[2], box[3]), color=(255, 0, 0), thickness=1)
# while True:
# cv2.imshow('IMG', raw_img)
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
| thisKK/Real-time-multi-face-recognition-base-on-Retinaface | faceDetection/WRILD_FACE.py | WRILD_FACE.py | py | 1,545 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "os.listdir",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 18,
... |
33195953151 | import unittest
from mock import ANY, Mock, patch
from captainhook import pre_commit
class TestMain(unittest.TestCase):
def setUp(self):
self.get_files_patch = patch('captainhook.pre_commit.get_files')
get_files = self.get_files_patch.start()
get_files.return_value = ['file_one']
self.hook_config_patch = patch('captainhook.pre_commit.HookConfig')
self.HookConfig = self.hook_config_patch.start()
self.HookConfig().is_enabled.return_value = True
self.HookConfig().arguments.return_value = ''
self.testmod = Mock(spec=['run'])
self.testmod.run.return_value = None
self.checks_patch = patch('captainhook.pre_commit.checks')
checks = self.checks_patch.start()
checks.return_value = [("testmod", self.testmod)]
self.mkdtemp_patch = patch('captainhook.pre_commit.tempfile.mkdtemp')
self.mkdtemp_patch.start().return_value = '/tmp/dir'
self.shutil_rm_patch = patch('captainhook.pre_commit.shutil.rmtree')
self.shutil_rm_patch.start()
def tearDown(self):
self.checks_patch.stop()
self.hook_config_patch.stop()
self.get_files_patch.stop()
self.mkdtemp_patch.stop()
self.shutil_rm_patch.stop()
def test_calling_run_without_args(self):
result = pre_commit.main()
self.assertEquals(result, 0)
self.testmod.run.assert_called_with(['/tmp/dir/file_one'], '/tmp/dir')
def test_calling_run_with_args(self):
self.HookConfig().arguments.return_value = 'yep'
result = pre_commit.main()
self.assertEquals(result, 0)
self.testmod.run.assert_called_with(
['/tmp/dir/file_one'], '/tmp/dir', 'yep'
)
@patch('captainhook.pre_commit.os.path.isfile')
@patch('captainhook.pre_commit.shutil.copy')
def test_required_files(self, copy, isfile):
self.testmod.REQUIRED_FILES = ['should_be_copied']
isfile.return_value = True
pre_commit.main()
copy.assert_called_with('should_be_copied', ANY)
@patch('captainhook.pre_commit.os.path.isfile')
@patch('captainhook.pre_commit.shutil.copy')
def test_required_files_only_copied_if_exist(self, copy, isfile):
self.testmod.REQUIRED_FILES = ['should_be_copied']
isfile.return_value = False
pre_commit.main()
self.assertEquals(0, copy.call_count)
| alexcouper/captainhook | test/test_pre_commit.py | test_pre_commit.py | py | 2,423 | python | en | code | 54 | github-code | 1 | [
{
"api_name": "unittest.TestCase",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "mock.patch",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "mock.patch",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "mock.Mock",
"line_num... |
16165826774 | # coding: utf-8
from mock import mock
from django.contrib.auth.models import User
from django.conf import settings
from rest_framework import status
class AuthHelperMixin(object):
def setUp(self):
super(AuthHelperMixin, self).setUp()
self.requests_patcher = mock.patch('sw_rest_auth.permissions.requests')
self.requests_mock = self.requests_patcher.start()
def tearDown(self):
super(AuthHelperMixin, self).tearDown()
self.requests_mock.stop()
def assertPermChecked(self, user, perm):
self.assertTrue(self.requests_mock.get.called)
call_args, call_kwargs = self.requests_mock.get.call_args
self.assertEqual(settings.AUTH_SERVICE_CHECK_PERM_URL, call_args[0])
self.assertEqual(user.username, call_kwargs['params']['user'])
self.assertEqual(perm, call_kwargs['params']['perm'])
def force_permission(self, user=None, perm=None, perm_list=None):
if not perm_list and perm:
perm_list = [perm]
def side_effect(url, headers, params, verify):
response_mock = mock.Mock()
if params['user'] == user.username and params['perm'] in perm_list:
response_mock.status_code = status.HTTP_200_OK
else:
response_mock.status_code = status.HTTP_400_BAD_REQUEST
return response_mock
if user and perm_list:
self.requests_mock.get.side_effect = side_effect
else:
self.requests_mock.get.side_effect = None
class AuthTestCaseMixin(AuthHelperMixin):
url = None
perm = None
method_name = 'post'
def get_method_names(self):
if self.get_perm_map():
return self.get_perm_map().keys()
else:
return [self.method_name]
def get_perm_map(self):
if self.perm:
return {self.method_name: self.perm}
else:
return None
def setUp(self):
super(AuthTestCaseMixin, self).setUp()
self.client.force_authenticate(self.get_user())
if self.get_perm_map():
self.force_permission(self.get_user(), perm_list=self.get_perm_map().values())
def test_not_auth(self):
self.client.force_authenticate() # unset authentication
for method_name in self.get_method_names():
response = getattr(self.client, method_name)(self.url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_forbidden(self):
if not self.get_perm_map():
return
self.client.force_authenticate(user=self.get_user())
self.force_permission() # unset permission
for method_name, perm_code in self.get_perm_map().items():
response = getattr(self.client, method_name)(self.url)
self.assertPermChecked(self.get_user(), perm_code)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_perm(self):
if not self.get_perm_map():
return
self.client.force_authenticate(self.get_user())
for method_name, perm_code in self.get_perm_map().items():
self.force_permission(self.get_user(), perm=perm_code)
response = getattr(self.client, method_name)(self.url)
self.assertPermChecked(self.get_user(), perm_code)
self.assertNotEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertNotEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def get_user(self):
if not hasattr(self, 'user') or not self.user:
self.user = User.objects.create(username='tester')
return self.user
| telminov/sw-django-rest-auth | sw_rest_auth/tests/helpers.py | helpers.py | py | 3,698 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "mock.mock.patch",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "mock.mock",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "django.conf.settings.AUTH_SERVICE_CHECK_PERM_URL",
"line_number": 23,
"usage_type": "attribute"
},
{
"a... |
29851347028 | from time import sleep
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import argparse
from msedge.selenium_tools import Edge, EdgeOptions
import pandas as pd
import platform
import datetime
import pandas as pd
import multiprocessing as mp
from functools import partial
import sys
from glob import glob
class Tweet:
"""Wrapper class to contain scraped tweets data"""
def __init__(self, username, timestamp, text, likes, retweets, replies, url):
self.username = username
self.timestamp = timestamp
self.text = text
self.likes = likes
self.retweets = retweets
self.url = url
self.replies = replies
def get_data(card):
"""Extract data from tweet card"""
try:
handle = card.find_element_by_xpath(
'.//span[contains(text(), "@")]').text
except:
return
try:
postdate = card.find_element_by_xpath(
'.//time').get_attribute('datetime')
except:
return
try:
comment = card.find_element_by_xpath('.//div[2]/div[2]/div[1]').text
except:
comment = ""
try:
responding = card.find_element_by_xpath('.//div[2]/div[2]/div[2]').text
except:
responding = ""
text = comment + responding
try:
reply_cnt = card.find_element_by_xpath(
'.//div[@data-testid="reply"]').text
except:
reply_cnt = 0
try:
retweet_cnt = card.find_element_by_xpath(
'.//div[@data-testid="retweet"]').text
except:
retweet_cnt = 0
try:
like_cnt = card.find_element_by_xpath(
'.//div[@data-testid="like"]').text
except:
like_cnt = 0
# handle promoted tweets
try:
promoted = card.find_element_by_xpath(
'.//div[2]/div[2]/[last()]//span').text == "Promoted"
except:
promoted = False
if promoted:
return
# tweet url
try:
element = card.find_element_by_xpath(
'.//a[contains(@href, "/status/")]')
tweet_url = element.get_attribute('href')
except:
return
like_cnt = 0 if not like_cnt else like_cnt
retweet_cnt = 0 if not retweet_cnt else retweet_cnt
reply_cnt = 0 if not reply_cnt else reply_cnt
return Tweet(
handle, postdate, text, like_cnt, retweet_cnt, reply_cnt, tweet_url
)
def log_search_page(driver, start_date, end_date, lang, display_type, words):
"""Builds and searches for a query between start_date and end_date"""
if words != None:
words = str(words).split("//")
words = "%20".join(words) + "%20"
else:
words = ""
if lang != None:
lang = 'lang%3A'+lang
else:
lang = ""
end_date = "until%3A"+end_date+"%20"
start_date = "since%3A"+start_date+"%20"
query = f"https://twitter.com/search?q={words}"\
f"{end_date}{start_date}{lang}&src=typed_query"
driver.get(query)
sleep(1)
# navigate to historical 'Top' or 'Latest' tab
try:
driver.find_element_by_link_text(display_type).click()
except:
print("Latest Button doesnt exist.")
def init_driver(navig, headless, proxy):
# create instance of web driver
# path to the chromdrive.exe
if navig == "chrome":
browser_path = ''
if platform.system() == 'Windows':
print('Detected OS : Windows')
browser_path = './drivers/chromedriver_win.exe'
elif platform.system() == 'Linux':
print('Detected OS : Linux')
browser_path = './drivers/chromedriver_linux'
elif platform.system() == 'Darwin':
print('Detected OS : Mac')
browser_path = './drivers/chromedriver_mac'
else:
raise OSError('Unknown OS Type')
options = Options()
if headless == True:
options.headless = True
else:
options.headless = False
options.add_argument('--disable-gpu')
options.add_argument('log-level=3')
if proxy != None:
options.add_argument('--proxy-server=%s' % proxy)
prefs = {"profile.managed_default_content_settings.images": 2}
options.add_experimental_option("prefs", prefs)
driver = webdriver.Chrome(
options=options, executable_path=browser_path)
driver.set_page_load_timeout(100)
return driver
elif navig == "edge":
browser_path = 'drivers/msedgedriver.exe'
options = EdgeOptions()
if proxy != None:
options.add_argument('--proxy-server=%s' % proxy)
if headless == True:
options.headless = True
options.use_chromium = False
else:
options.headless = False
options.use_chromium = True
options.add_argument('log-level=3')
driver = Edge(options=options, executable_path=browser_path)
return driver
def keep_scroling(driver):
"""Keeps scrolling the Twitter feed and records tweets"""
data = []
scrolling = True
last_position = driver.execute_script("return window.pageYOffset;")
while scrolling:
# get the card of tweets
page_cards = driver.find_elements_by_xpath(
'//div[@data-testid="tweet"]'
)
for card in page_cards:
tweet = get_data(card)
if tweet:
data.append(tweet)
last_date = tweet.timestamp
print("Tweet made at: " + str(last_date)+" is found.")
scroll_attempt = 0
while True:
driver.execute_script(
'window.scrollTo(0, document.body.scrollHeight);')
sleep(1)
curr_position = driver.execute_script("return window.pageYOffset;")
if last_position == curr_position:
scroll_attempt += 1
# end of scroll region
if scroll_attempt > 1:
scrolling = False
break
else:
sleep(1) # attempt another scroll
else:
last_position = curr_position
break
return data
def make_output_path(words, init_date, max_date):
"""Builds an output path to save the tweets for given dates"""
first_term = words.split("//")[0]
first_date = str(init_date).split(" ")[0]
first_end_date = str(max_date).split(" ")[0]
return f"tweeter_output/{first_term}_{first_date}_{first_end_date}.csv"
def scrap(
data_tuple,
words,
days_between,
lang,
display_type,
driver,
headless,
proxy
):
"""
scrap data from twitter using requests, starting from start_date until max_date. The bot make a search between each start_date and end_date
(days_between) until it reaches the max_date.
return:
data : df containing all tweets scraped with the associated features.
save a csv file containing all tweets scraped with the associated features.
"""
start_date, end_date = data_tuple
# initiate the driver
driver = init_driver(driver, headless, proxy)
# log search page between start_date and end_date
log_search_page(driver=driver, words=words, start_date=datetime.datetime.strftime(start_date, '%Y-%m-%d'), end_date=datetime.datetime.strftime(
end_date, '%Y-%m-%d'), lang=lang, display_type=display_type
)
print(f"looking for tweets between {start_date} and {end_date}...")
count_failed = 0
data = keep_scroling(driver)
# retrying in case of timeouts of disconnections (up to 3 times)
while len(data) == 0 and count_failed < 2:
driver.close()
driver = init_driver(driver, headless, proxy)
log_search_page(driver=driver, words=words, start_date=datetime.datetime.strftime(start_date, '%Y-%m-%d'), end_date=datetime.datetime.strftime(
end_date, '%Y-%m-%d'), lang=lang, display_type=display_type
)
data = keep_scroling(driver)
count_failed += 1
driver.close()
return data
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Scrap tweets.')
parser.add_argument('--words', type=str,
help='Queries. they should be devided by "//" : Cat//Dog.', default=None)
parser.add_argument('--max_date', type=str,
help='Max date for search query. example : %%Y-%%m-%%d.', required=True)
parser.add_argument('--start_date', type=str,
help='Start date for search query. example : %%Y-%%m-%%d.', required=True)
parser.add_argument('--interval', type=int,
help='Interval days between each start date and end date for search queries. example : 5.', default=1)
parser.add_argument('--navig', type=str,
help='Navigator to use : chrome or edge.', default="chrome")
parser.add_argument('--lang', type=str,
help='Tweets language. example : "en" for english and "fr" for french.', default="en")
parser.add_argument('--headless', type=bool,
help='Headless webdrives or not. True or False', default=True)
parser.add_argument('--display_type', type=str,
help='Display type of twitter page : Latest or Top', default="Top")
parser.add_argument('--proxy', type=str,
help='Proxy server', default=None)
parser.add_argument('--custom_column_name', type=str,
help='A column to include for the words query', default="airline")
args = parser.parse_args()
words = args.words
max_date = args.max_date
start_date = args.start_date
interval = args.interval
navig = args.navig
lang = args.lang
driver = args.navig
headless = args.headless
display_type = args.display_type
proxy = args.proxy
custom_column_name = args.custom_column_name
# monitor time
start = datetime.datetime.now()
# creating list of requirements
start_date = datetime.datetime.strptime(start_date, '%Y-%m-%d')
end_date = datetime.datetime.strptime(max_date, '%Y-%m-%d')
start_dates = [start_date]
while start_dates[-1] + datetime.timedelta(days=interval) < end_date:
start_dates.append(start_dates[-1] +
datetime.timedelta(days=interval))
end_dates = [start_dates[i]
for i in range(1, len(start_dates))] + [end_date]
# launching one worker thread per interval of time to scrape
pool = mp.Pool(mp.cpu_count())
try:
all_tweets = pool.map(
partial(
scrap,
words=words,
days_between=interval,
lang=lang,
display_type=display_type,
driver=driver,
headless=headless,
proxy=proxy,
),
zip(start_dates, end_dates)
)
# merge and save
merged_tweets = [
t for interval_tweets in all_tweets for t in interval_tweets
]
# save file
df = pd.DataFrame([
{**vars(tweet), **{custom_column_name: "_".join(words.split("//"))}}
for tweet in merged_tweets
])
df.to_csv(make_output_path(words, start_date, end_date), index=False)
end = datetime.datetime.now()
print(f"Time for current execution: {end - start}")
print(f"Found {len(df.index)} tweets.")
except KeyboardInterrupt:
pool.terminate()
pool.join()
sys.exit()
| alexZajac/airlines_performance | explanations_professors/tweeter_data.py | tweeter_data.py | py | 11,622 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "time.sleep",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "platform.system",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "platform.system",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "platform.system",
"... |
41012383162 | import json
import time
import pymongo
import threading
from .TwitchWebsocket.TwitchWebsocket import TwitchWebsocket
from .FlushPrint import ptf
ws = None
statsDict = {}
statsLock = None
statsThread = None
colRewards = None
# True if user is a mod or the broadcaster
def CheckPrivMod(tags):
return (tags["mod"] == "1" or
tags["user-id"] == tags["room-id"])
# True if user is a sub, mod, or the broadcaster
def CheckPrivSub(tags):
return (CheckPrivMod(tags) or
tags["subscriber"] == "1")
def GetSyntax(user, syntax):
return f"[{user}]: The syntax for that command is: {syntax}"
# Record that the given user has redeemed the given reward
def RedeemReward(user, rewardId):
result = colRewards.find_one({ "user" : user })
rewards = {}
if result == None:
rewards[rewardId] = 1
userObj = {
"user" : user,
"rewards" : json.dumps(rewards)
}
colRewards.insert_one(userObj)
else:
rewards = json.loads(result["rewards"])
if rewardId in rewards:
rewards[rewardId] += 1
else:
rewards[rewardId] = 1
colRewards.update_one(
{ "user" : user },
{ "$set" : { "rewards" : json.dumps(rewards) } }
)
# Return true if the given user has redeemed the given reward and decrement
def CheckRemoveReward(user, rewardId):
result = colRewards.find_one({ "user" : user })
if result == None:
return False
rewards = json.loads(result["rewards"])
if rewardId in rewards:
if rewards[rewardId] == 1:
del rewards[rewardId]
else:
rewards[rewardId] -= 1
if len(rewards) == 0:
colRewards.delete_one({ "user" : user })
else:
colRewards.update_one(
{ "user" : user },
{ "$set" : { "rewards" : json.dumps(rewards) } }
)
return True
return False
# Log info for an incoming message
def LogReceived(type, user, message, tags, recordUsage=False):
ptf(f"Received [{type}] from [{user}]: {message}", time=True)
ptf(f"With tags: {tags}")
if (recordUsage):
token = message.lower().split(" ")[0]
RecordUsage(token, user)
# Send a message to twitch chat and log
def SendMessage(response, type="PRIVMSG", user=None):
global ws
userStr = "" if user == None else f" to [{user}]"
if response == None:
ptf(f"No [{type}] message sent{userStr}\n", time=True)
return
if type == "WHISPER":
ws.send_whisper(user, response)
else:
ws.send_message(response)
ptf(f"Sent [{type}]{userStr}: {response}\n", time=True)
# Increment command usage of non-dev users
def RecordUsage(command, user):
global statsDict
global statsLock
with statsLock:
if command in statsDict:
statsDict[command] += 1
else:
statsDict[command] = 1
# Dump current usage stats to file
def StoreUsageAsync():
global statsDict
global statsLock
while True:
# Every 10 minutes
time.sleep(60 * 10)
# No stats to dump
if not statsDict:
continue
with open('UsageStats.json', 'r') as file:
statsJson = json.load(file)
with open('UsageStats.json', 'w') as file:
with statsLock:
for key in statsDict:
if key in statsJson:
statsJson[key] += statsDict[key]
else:
statsJson[key] = statsDict[key]
statsDict = {}
json.dump(statsJson, file, indent=4, sort_keys=True)
# Initialize util fields
def InitializeUtils(socket, chan, mongoClient):
global ws
global statsLock
global statsThread
global colRewards
ws = socket
colRewards = mongoClient.purebotdb[chan + "Rewards"]
colRewards.create_index([("user", pymongo.ASCENDING)])
with open('UsageStats.json', 'a+') as file:
try:
file.seek(0)
json.load(file)
except:
file.truncate(0)
file.write("{}\n")
statsLock = threading.Lock()
statsThread = threading.Thread(target=StoreUsageAsync)
statsThread.start()
| ThomasCulotta/PureBot | Utilities/TwitchUtils.py | TwitchUtils.py | py | 4,299 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "json.dumps",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 60,
... |
32411124306 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
class DataVisualization(object):
def __init__(self, data_frame) :
self.data_frame = data_frame
def view_histogram_by_column(self, column, title):
try:
sns.set(style='whitegrid')
f, ax = plt.subplots(1,1, figsize=(6,4))
ax = sns.histplot(self.data_frame[column], kde = True, color = 'c')
plt.title(title, fontsize = 18, fontweight="bold")
plt.xlabel(column.capitalize(), fontsize=14)
plt.ylabel("Densidad", fontsize=14)
plt.show();
except KeyError:
print("Problema para visualizar gráfico")
def count_by_column(self, column):
return self.data_frame.groupby(column).size()
def view_distribution(self, hue, features):
""" Return None
Permite mostrar unos histogramas para ver la distribución de la categoría (hue) respecto de las variables indicadas en features
"""
sns.pairplot(self.data_frame, hue=hue, height=4, vars=features, kind='scatter');
def view_correlation(self):
plt.figure(figsize=(10, 10))
sns.heatmap(self.data_frame.corr(), annot=True, cmap='Blues_r')
plt.title("Correlación de características", fontsize=18, fontweight="bold")
plt.show()
def view_outliers(self, x, y):
plt.figure(figsize=(5,5))
sns.boxplot(x=x.lower(), y = y.lower(), data=self.data_frame)
plt.title("Outliers {}".format(y.capitalize()), fontsize=18, fontweight="bold")
plt.xlabel(x.capitalize(), fontsize=14)
plt.ylabel(y.capitalize(), fontsize=14)
plt.show() | jaznamezahidalgo/Libreria-EDA | Visualization.py | Visualization.py | py | 1,526 | python | es | code | 0 | github-code | 1 | [
{
"api_name": "seaborn.set",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "seaborn.hist... |
8682854808 | import numpy as np
import matplotlib.pyplot as plt
def f(x, y):
return x - y
def euler_method(f, x0, y0, h, num_steps):
x_values = [x0]
y_values = [y0]
for _ in range(num_steps):
x_next = x_values[-1] + h
y_next = y_values[-1] + h * f(x_values[-1], y_values[-1])
x_values.append(x_next)
y_values.append(y_next)
return x_values, y_values
x0 = 1
y0 = 1
h = 0.1
num_steps = 100
x_values, y_values = euler_method(f, x0, y0, h, num_steps)
plt.figure(figsize=(8, 6))
plt.plot(x_values, y_values, label="Euler's Method")
plt.xlabel('x')
plt.ylabel('y')
plt.title("Euler's Method for dy/dx = x - y")
plt.legend()
plt.grid(True)
plt.show()
| danielkatz19/ODE-s-Runge-Kutta | Correct_Math 312_Group Delta/Commented Code/example_code 2_commented .py | example_code 2_commented .py | py | 711 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "mat... |
13008619672 | # -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from web_news.misc.spiderredis import SpiderRedis
from web_news.items import SpiderItem
from scrapy.loader import ItemLoader
class BjdSpider(SpiderRedis):
name = 'bjd'
allowed_domains = ['www.bjd.com.cn']
start_urls = ['http://www.bjd.com.cn/']
website = u'京报网'
rules = (
Rule(LinkExtractor(allow=r't(\d+)_(\d+)'), callback='parse_item', follow=False),
Rule(LinkExtractor(allow=r'bjd.com.cn'), follow=True),
)
def parse_item(self, response):
l = ItemLoader(item=SpiderItem(), response=response)
try:
date_source_author = response.xpath('//div[@class="info"]/span/text()').extract()
l.add_value('title', response.xpath('//title/text()').extract_first() or '')
l.add_value('date', date_source_author[0] if len(date_source_author)>0 else '1970-01-01 00:00:00')
l.add_value('source', date_source_author[1] if len(date_source_author)>1 else '')
l.add_value('content', ''.join(response.xpath('//div[@class="TRS_Editor"]/descendant-or-self::text()').extract()))
except Exception as e:
self.logger.error('error url: %s error msg: %s' % (response.url, e))
l = ItemLoader(item=SpiderItem(), response=response)
l.add_value('title', '')
l.add_value('date', '1970-01-01 00:00:00')
l.add_value('source', '')
l.add_value('content', '')
pass
finally:
l.add_value('url', response.url)
l.add_value('collection_name', self.name)
l.add_value('website', self.website)
return l.load_item()
| qiangber/web_news | web_news/spiders/bjd.py | bjd.py | py | 1,814 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "web_news.misc.spiderredis.SpiderRedis",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "scrapy.spiders.Rule",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "scrapy.linkextractors.LinkExtractor",
"line_number": 16,
"usage_type": "call"
... |
22123114317 | import unittest
from typing import List
'''
build on top of leetcode 84
cite from https://leetcode.com/problems/maximal-rectangle/discuss/122456/Easiest-solution-build-on-top-of-leetcode84
'''
class Solution:
def maximalRectangle(self, matrix: List[List[str]]) -> int:
if(len(matrix) == 0 or len(matrix[0]) == 0):
return 0
heights = [0] * len(matrix[0])
maxArea = -1
def largestRectangleArea(heights):
# 避开了对edge case
heights.append(0)
stack, res, size = [-1], 0, len(heights)
for i in range(size):
# 后一个index小于前一个index时
while heights[i] < heights[stack[-1]]:
height = heights[stack.pop()]
width = i - stack[-1] - 1
res = max(res, height * width)
stack.append(i)
# heights.pop()
return res
for i in range(len(matrix)):
for j in range(len(matrix[0])):
if matrix[i][j] == '0':
heights[j] = 0
else:
heights[j] += 1
area = largestRectangleArea(heights)
maxArea = max(maxArea, area)
return maxArea
| AllieChen02/LeetcodeExercise | Stack/P85MaximalRectangle/Maximal Rectangle.py | Maximal Rectangle.py | py | 1,302 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "typing.List",
"line_number": 9,
"usage_type": "name"
}
] |
36972627167 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
import browser
import argparse
import sys
from mutagen import File
from mutagen.mp3 import HeaderNotFoundError
import os
DEFAULT_FOLDER = './'
def update_progress(progress, total):
percent = int(progress / total * 100)
sys.stdout.write("\r%d/%d (%d%%)" % (progress, total, percent))
sys.stdout.flush()
def init_command_parser():
parser = argparse.ArgumentParser()
parser = argparse.ArgumentParser(description='Audit a music library')
parser.add_argument('--folder', '-f', help='Music library folder', default=DEFAULT_FOLDER, action=browser.ReadableDir)
return parser.parse_args()
def main():
arguments = init_command_parser()
print("Looking for files...")
fileBrowser = browser.Browser(arguments.folder)
files = list(fileBrowser.find(['mp3']))
number_of_files = len(files)
print("Working on %d files" % number_of_files)
tags_to_delete = ["media", "comment", "copyright", "encodedby", "organization"]
failed_to_open = []
for i, f in enumerate(files):
update_progress(i + 1, number_of_files)
needs_saving = False
try:
music_file = File(os.path.join(f[1], f[0]), easy=True)
except HeaderNotFoundError:
failed_to_open.append(os.path.join(f[1], f[0]))
continue
if "performer" not in music_file or (music_file["performer"] != music_file['artist']):
music_file["performer"] = music_file['artist']
needs_saving = True
for tag in tags_to_delete:
if tag in music_file:
del music_file[tag]
needs_saving = True
if needs_saving is True:
music_file.tags.save()
print("")
print("Done")
print("")
for f in failed_to_open:
print(f)
print("")
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print("Bye !")
sys.exit(0)
| alexandre-p/music-audit | lib/tags_clean_up.py | tags_clean_up.py | py | 2,007 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.stdout.write",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "sys.stdout.flush",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"l... |
599630674 | from __future__ import print_function
import pysb.bng
import numpy
import sympy
import re
import ctypes
import csv
import scipy.interpolate
import sys
from pysundials import cvode
# Thee set of functions set up the system for annealing runs
# and provide the runner function as input to annealing
def spinner(i):
spin = ("|", "/","-", "\\")
print("\r[%s] %d"%(spin[i%4],i), end=' ')
sys.stdout.flush()
# reltol of 1.0e-3, relative error of ~1%. abstol of 1.0e-3, enough for values that oscillate in the hundreds to thousands
def odeinit(model, reltol=1.0e-3, abstol=1.0e-3, nsteps = 1000, itermaxstep = None):
'''
must be run to set up the environment for annealing with pysundials
'''
# Generate equations
pysb.bng.generate_equations(model)
# Get the size of the ODE array
odesize = len(model.odes)
# init the arrays we need
yzero = numpy.zeros(odesize) #initial values for yzero
# assign the initial conditions
for cplxptrn, ic_param in model.initial_conditions:
speci = model.get_species_index(cplxptrn)
yzero[speci] = ic_param.value
# initialize y with the yzero values
y = cvode.NVector(yzero)
# make a dict of ydot functions. notice the functions are in this namespace.
# replace the kxxxx constants with elements from the params array
rhs_exprs = []
for i in range(0,odesize):
# first get the function string from sympy, replace the the "sN" with y[N]
tempstring = re.sub(r's(\d+)', lambda m: 'y[%s]'%(int(m.group(1))), str(model.odes[i]))
# now replace the constants with 'p' array names; cycle through the whole list
#for j in range(0, numparams):
# tempstring = re.sub('(?<![A-Za-z0-9_])%s(?![A-Za-z0-9_])'%(model.parameters[j].name),'p[%d]'%(j), tempstring)
for j, parameter in enumerate(model.parameters):
tempstring = re.sub('(?<![A-Za-z0-9_])%s(?![A-Za-z0-9_])' % parameter.name, 'p[%d]' % j, tempstring)
# make a list of compiled rhs expressions which will be run by the integrator
# use the ydots to build the function for analysis
# (second arg is the "filename", useful for exception/debug output)
rhs_exprs.append(compile(tempstring, '<ydot[%s]>' % i, 'eval'))
# Create the structure to hold the parameters when calling the function
# This results in a generic "p" array
numparams = len(model.parameters)
class UserData(ctypes.Structure):
_fields_ = [('p', cvode.realtype*numparams)] # parameters
PUserData = ctypes.POINTER(UserData)
data = UserData()
data.p[:] = [p.value for p in model.parameters]
paramarray = numpy.array([p.value for p in model.parameters])
# allocate the "p" array as a pointer array that can be called by sundials "f" as needed
def f(t, y, ydot, f_data):
data = ctypes.cast(f_data, PUserData).contents
rhs_locals = {'y': y, 'p': data.p}
for i in range(0, len(model.odes)):
ydot[i] = eval(rhs_exprs[i], rhs_locals)
return 0
# initialize the cvode memory object, use BDF and Newton for stiff systems
cvode_mem = cvode.CVodeCreate(cvode.CV_BDF, cvode.CV_NEWTON)
# allocate the cvode memory as needed, pass the function and the init ys
cvode.CVodeMalloc(cvode_mem, f, 0.0, y, cvode.CV_SS, reltol, abstol)
# point the parameters to the correct array
# if the params are changed later this does not need to be reassigned (???)
cvode.CVodeSetFdata(cvode_mem, ctypes.pointer(data))
# link integrator with linear solver
cvode.CVDense(cvode_mem, odesize)
#stepsize
if itermaxstep != None:
cvode.CVodeSetMaxStep(cvode_mem, itermaxstep)
#list of outputs
xout = numpy.zeros(nsteps)
yout = numpy.zeros([nsteps, odesize])
#initialize the arrays
#print("Initial parameter values:", y)
xout[0] = 0.0 #CHANGE IF NEEDED
#first step in yout
for i in range(0, odesize):
yout[0][i] = y[i]
return [f, rhs_exprs, y, odesize, data, xout, yout, nsteps, cvode_mem, yzero, reltol, abstol], paramarray
def odesolve(model, tfinal, envlist, params, useparams=None, tinit = 0.0, ic=True):
'''
the ODE equation solver tailored to work with the annealing algorithm
model: the model object
envlist: the list returned from annlinit
params: the list of parameters that are being optimized with annealing
useparams: the parameter number to which params[i] corresponds
tinit: initial time
reltol: relative tolerance
abstol: absolute tolerance
ic: reinitialize initial conditions to a value in params or useparams
'''
(f, rhs_exprs, y, odesize, data, xout, yout, nsteps, cvode_mem, yzero, reltol, abstol) = envlist
#set the initial values and params in each run
#all parameters are used in annealing. initial conditions are not, here
if useparams is None:
for i in range(len(params)):
data.p[i] = params[i]
else:
#only a subset of parameters are used for annealing
for i in range(len(useparams)):
#print("changing parameter", model.parameters[useparams[i]],"data.p", data.p[useparams[i]],"to", params[i])
data.p[useparams[i]] = params[i]
# FIXME:
# update yzero if initial conditions are being modified as part of the parameters
# did it this way b/c yzero and data.p may not always be modified at the same time
# the params list should NOT contain the initial conditions if they are not
# to be used in the annealing... so this is a hack based on the fact that the
# initial conditions are contained as part of the model.parameters list.
#
if ic is True:
for cplxptrn, ic_param in model.initial_conditions:
speci = model.get_species_index(cplxptrn)
yzero[speci] = ic_param.value
#reset initial concentrations
y = cvode.NVector(yzero)
# Reinitialize the memory allocations, DOES NOT REALLOCATE
cvode.CVodeReInit(cvode_mem, f, 0.0, y, cvode.CV_SS, reltol, abstol)
tadd = tfinal/nsteps
t = cvode.realtype(tinit)
tout = tinit + tadd
#print("Beginning integration")
#print("TINIT:", tinit, "TFINAL:", tfinal, "TADD:", tadd, "ODESIZE:", odesize)
#print("Integrating Parameters:\n", params)
#print("y0:", yzero)
for step in range(1, nsteps):
ret = cvode.CVode(cvode_mem, tout, y, ctypes.byref(t), cvode.CV_NORMAL)
if ret !=0:
print("CVODE ERROR %i"%(ret))
break
xout[step]= tout
for i in range(0, odesize):
yout[step][i] = y[i]
# increase the time counter
tout += tadd
#print("Integration finished")
#now deal with observables
#obs_names = [name for name, rp in model.observable_patterns]
yobs = numpy.zeros([len(model.observables), nsteps])
#sum up the correct entities
for i, obs in enumerate(model.observables):
coeffs = obs.coefficients
specs = obs.species
yobs[i] = (yout[:, specs] * coeffs).sum(1)
#merge the x and y arrays for easy analysis
xyobs = numpy.vstack((xout, yobs))
return (xyobs,xout,yout, yobs)
def compare_data(xparray, simarray, xspairlist, vardata=False):
"""Compares two arrays of different size and returns the X^2 between them.
Uses the X axis as the unit to re-grid both arrays.
xparray: experimental data
xparrayaxis: which axis of xparray to use for simulation
simarray: simulation data
simarrayaxis: which axis of simarray to use for simulation
"""
# this expects arrays of the form array([time, measurement1, measurement2, ...])
# the time is assumed to be roughly the same for both and the
# shortest time will be taken as reference to regrid the data
# the regridding is done using a b-spline interpolation
# xparrayvar shuold be the variances at every time point
#
# FIXME FIXME FIXME FIXME
# This prob should figure out the overlap of the two arrays and
# get a spline of the overlap. For now just assume the simarray domain
# is bigger than the xparray. FIXME FIXME FIXME
#rngmax = min(xparray[0].max(), simarray[0].max())
#rngmin = round(rngmin, -1)
#rngmax = round(rngmax, -1)
#print("Time overlap range:", rngmin,"to", rngmax)
ipsimarray = numpy.zeros(xparray.shape[1])
objout = []
for i in range(len(xspairlist)):
# create a b-spline of the sim data and fit it to desired range
# import code
# code.interact(local=locals())
#some error checking
#print("xspairlist length:", len(xspairlist[i]))
#print("xspairlist element type:", type(xspairlist[i]))
#print("xspairlist[i] elements:", xspairlist[i][0], xspairlist[i][1])
assert type(xspairlist[i]) is tuple
assert len(xspairlist[i]) == 2
xparrayaxis = xspairlist[i][0]
simarrayaxis = xspairlist[i][1]
tck = scipy.interpolate.splrep(simarray[0], simarray[simarrayaxis])
ipsimarray = scipy.interpolate.splev(xparray[0], tck) #xp x-coordinate values to extract from y splines
# we now have x and y axis for the points in the model array
# calculate the objective function
# 1
# obj(t, params) = -------------(S_sim(t,params)-S_exp(t))^2
# 2*sigma_exp^2
diffarray = ipsimarray - xparray[xparrayaxis]
diffsqarray = diffarray * diffarray
if vardata is True:
#print("using XP VAR",xparrayaxis+1)
xparrayvar = xparray[xparrayaxis+1] # variance data provided in xparray in next column
else:
# assume a default variance
xparrayvar = numpy.ones(xparray.shape[1])
xparrayvar = xparray[xparrayaxis]*.1 # within 10%? FIXME: check w will about this
xparrayvar = xparrayvar * xparrayvar
xparrayvar = xparrayvar*2.0
numpy.seterr(divide='ignore') # FIXME: added to remove the warnings... use caution!!
objarray = diffsqarray / xparrayvar
# check for inf in objarray, they creep up when there are near zero or zero values in xparrayvar
for i in range(len(objarray)):
if numpy.isinf(objarray[i]) or numpy.isnan(objarray[i]):
#print("CORRECTING NAN OR INF. IN ARRAY")
#print(objarray)
objarray[i] = 1e-100 #zero enough
#import code
#code.interact(local=locals())
objout.append(objarray.sum())
#print("OBJOUT(%d,%d):%f OBJOUT(CUM):%f"%(xparrayaxis, simarrayaxis, objarray.sum(), objout))
#print("OBJOUT(total):", objout)
return numpy.asarray(objout)
def getlog(sobolarr, params, omag=1, useparams=[], usemag=None):
# map a set of sobol pseudo-random numbers to a range for parameter evaluation
# sobol: sobol number array of the appropriate length
# params: array of parameters
# omag: order of magnitude over which params should be sampled. this is effectively 3 orders of magnitude when omag=1
#
sobprmarr = numpy.zeros_like(sobolarr)
ub = numpy.zeros(len(params))
lb = numpy.zeros(len(params))
# set upper/lower bounds for generic problem
for i in range(len(params)):
if i in useparams:
ub[i] = params[i] * pow(10,usemag)
lb[i] = params[i] / pow(10,usemag)
else:
ub[i] = params[i] * pow(10, omag)
lb[i] = params[i] / pow(10, omag)
# see for more info http://en.wikipedia.org/wiki/Exponential_family
sobprmarr = lb*(ub/lb)**sobolarr # map the [0..1] sobol array to values sampled over their omags
# sobprmarr is the N x len(params) array for sobol analysis
return sobprmarr
def getlin(sobolarr, params, CV =.25, useparams=[], useCV=None):
""" map a set of sobol pseudo-random numbers to a range for parameter evaluation
sobol: sobol number array of the appropriate length
params: array of parameters
stdev: standard deviation for parameters, this assumes it is unknown for the sampling
function maps the sobol (or any random) [0:1) array linearly to mean-2sigma < x < mean + 2sigma
CV is the coefficient of variance, CV = sigma/mean
"""
sobprmarr = numpy.zeros_like(sobolarr)
ub = numpy.zeros(len(params))
lb = numpy.zeros(len(params))
# set upper/lower bounds for generic problem
for i in range(len(params)):
if i in useparams:
ub[i] = params[i] + params[i]*useCV
lb[i] = params[i] - params[i]*useCV
else:
ub[i] = params[i] + params[i]*CV
lb[i] = params[i] - params[i]*CV
# sobprmarr = (sobolarr*(ub-lb)) + lb #map the [0..1] sobol array to the values for integration
if len(sobprmarr.shape) == 1:
sobprmarr = (sobolarr*(ub-lb)) + lb
elif len(sobprmarr.shape) == 2:
for i in range(sobprmarr.shape[0]):
sobprmarr[i] = (sobolarr[i]*(ub-lb)) + lb
else:
print("array shape not allowed... ")
# sobprmarr is the N x len(params) array for sobol analysis
# lb is the lower bound of params
# ub is the upper bound of params
return sobprmarr
def genCmtx(sobmtxA, sobmtxB):
"""when passing the quasi-random sobol-treated A and B matrixes, this function iterates over all the possibilities
and returns the C matrix for simulations.
See e.g. Saltelli, Ratto, Andres, Campolongo, Cariboni, Gatelli, Saisana, Tarantola Global Sensitivity Analysis"""
nparams = sobmtxA.shape[1] # shape 1 should be the number of params
# allocate the space for the C matrix
sobmtxC = numpy.array([sobmtxB]*nparams)
# Now we have nparams copies of sobmtxB. replace the i_th column of sobmtxC with the i_th column of sobmtxA
for i in range(nparams):
sobmtxC[i,:,i] = sobmtxA[:,i]
return sobmtxC
def parmeval(model, sobmtxA, sobmtxB, sobmtxC, time, envlist, xpdata, xspairlist, ic=True, norm=True, vardata=False, useparams = None, fileobj=None):
''' Function parmeval calculates the yA, yB, and yC_i arrays needed for variance-based global sensitivity analysis
as prescribed by Saltelli and derived from the work by Sobol.
'''
#
#
# assign the arrays that will hold yA, yB and yC_n
yA = numpy.zeros([sobmtxA.shape[0]] + [len(model.observable_patterns)])
yB = numpy.zeros([sobmtxB.shape[0]] + [len(model.observable_patterns)])
yC = numpy.zeros(list(sobmtxC.shape[:2]) + [len(model.observable_patterns)]) # matrix is of shape (nparam, nsamples)
# specify that this is normalized data
if norm is True:
# First process the A and B matrices
print("processing matrix A, %d iterations:", sobmtxA.shape[0])
for i in range(sobmtxA.shape[0]):
outlist = odesolve(model, time, envlist, sobmtxA[i], useparams, ic)
datamax = numpy.max(outlist[0], axis = 1)
datamin = numpy.min(outlist[0], axis = 1)
outlistnorm = ((outlist[0].T - datamin)/(datamax-datamin)).T
outlistnorm[0] = outlist[0][0].copy() # xpdata[0] replace time from original array
yA[i] = compare_data(xpdata, outlistnorm, xspairlist, vardata)
spinner(i)
print("\nprocessing matrix B, %d iterations:", sobmtxB.shape[0])
for i in range(sobmtxB.shape[0]):
outlist = odesolve(model, time, envlist, sobmtxB[i], useparams, ic)
datamax = numpy.max(outlist[0], axis = 1)
datamin = numpy.min(outlist[0], axis = 1)
outlistnorm = ((outlist[0].T - datamin)/(datamax-datamin)).T
# xpdata[0] should be time, get from original array
outlistnorm[0] = outlist[0][0].copy()
yB[i] = compare_data(xpdata, outlistnorm, xspairlist, vardata)
spinner(i)
# now the C matrix, a bit more complicated b/c it is of size params x samples
print("\nprocessing matrix C_n, %d parameters:"%(sobmtxC.shape[0]))
for i in range(sobmtxC.shape[0]):
print("\nprocessing processing parameter %d, %d iterations"%(i,sobmtxC.shape[1]))
for j in range(sobmtxC.shape[1]):
outlist = odesolve(model, time, envlist, sobmtxC[i][j], useparams, ic)
datamax = numpy.max(outlist[0], axis = 1)
datamin = numpy.min(outlist[0], axis = 1)
outlistnorm = ((outlist[0].T - datamin)/(datamax-datamin)).T
# xpdata[0] should be time, get from original array
outlistnorm[0] = outlist[0][0].copy()
yC[i][j] = compare_data(xpdata, outlistnorm, xspairlist, vardata)
spinner(j)
else:
# First process the A and B matrices
print("processing matrix A:")
for i in range(sobmtxA.shape[0]):
outlist = odesolve(model, time, envlist, sobmtxA[i], useparams, ic)
yA[i] = compare_data(xpdata, outlist[0], xspairlist, vardata)
spinner(i)
print("processing matrix B:")
for i in range(sobmtxB.shape[0]):
outlist = odesolve(model, time, envlist, sobmtxB[i], useparams, ic)
yB[i] = compare_data(xpdata, outlistnorm, xspairlist, vardata)
spinner(i)
print("processing matrix C_n")
for i in range(sobmtxC.shape[0]):
print("processing processing parameter %d"%i)
for j in range(sobmtxC.shape[1]):
outlist = odesolve(model, time, envlist, sobmtxC[i][j], useparams, ic)
yC[i][j] = compare_data(xpdata, outlistnorm, xspairlist, vardata)
spinner(j)
if fileobj:
if norm:
writetofile(fileobj, params, outlistnorm, objout)
else:
writetofile(fileobj, params, outlist, objout)
return yA, yB, yC
def getvarsens(yA, yB, yC):
"""Calculate the array of S_i and ST_i for each parameter given yA, yB, yC matrices
from the multi-sampling runs. Calculate S_i and ST_i as follows:
Parameter sensitivity:
----------------------
U_j - E^2
S_j = ------------
V(y)
U_j = 1/n \sum yA * yC_j
E^2 = 1/n \sum yA * 1/n \sum yB
Total effect sensitivity (i.e. non additive part):
--------------------------------------------------
U_-j - E^2
ST_j = 1 - -------------
V(y)
U_-j = 1/n \sum yB * yC_j
E^2 = { 1/n \sum yB * yB }^2
In both cases, calculate V(y) from yA and yB
"""
nparms = yC.shape[0] # should be the number of parameters
nsamples = yC.shape[1] # should be the number of samples from the original matrix
nobs = yC.shape[-1] # the number of observables (this is linked to BNG usage, generalize?)
#first get V(y) from yA and yB
varyA = numpy.var(yA, axis=0, ddof=1)
varyB = numpy.var(yB, axis=0, ddof=1)
# now get the E^2 values for the S and ST calculations
E_s = numpy.average((yA * yB), axis=0)
E_st = numpy.average(yB, axis=0) ** 2
#allocate the S_i and ST_i arrays
Sens = numpy.zeros((nparms,nobs))
SensT = numpy.zeros((nparms,nobs))
# now get the U_j and U_-j values and store them
for i in range(nparms):
Sens[i] = (((yA * yC[i]).sum(axis=0)/(nsamples-1.)) - E_s ) / varyA
SensT[i] = 1.0 - ((((yB * yC[i]).sum(axis=0)/(nsamples-1.)) - E_st) / varyB)
return Sens, SensT
def writetofile(fout, simparms, simdata, temperature):
imax, jmax = simdata.shape
nparms = len(simparms)
fout.write('# TEMPERATURE\n{}\n'.format(temperature))
fout.write('# PARAMETERS ({})\n'.format(len(simparms)))
for i in range(nparms):
fout.write('{}'.format(simparms[i]))
if (i !=0 and i%5 == 0) or (i == nparms-1):
fout.write('\n')
else:
fout.write(', ')
fout.write('# SIMDATA ({},{})\n'.format(imax, jmax))
for i in range(imax):
fout.write('# {}\n'.format(i))
for j in range(jmax):
fout.write('{}'.format(simdata[i][j]))
if (j != 0 and j%10 == 0) or (j == jmax-1):
fout.write('\n')
else:
fout.write(', ')
fout.write('#-------------------------------------------------------------------------------------------------\n')
return
| pysb/pysb | pysb/deprecated/varsens_sundials.py | varsens_sundials.py | py | 20,542 | python | en | code | 152 | github-code | 1 | [
{
"api_name": "sys.stdout.flush",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "pysb.bng.bng.generate_equations",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "pysb... |
14284120751 | # yourapp/views.py
from django.shortcuts import render, redirect
from django.http import HttpResponse
from .scripts import main as script
from .scripts import validate
from .scripts.visualization import visualize
import os
import pandas as pd
data_processed = False
image_directory = os.path.join(os.getcwd(), 'yourapp/static/graphs')
def home(request):
global data_processed
if request.method == "POST":
minority_class = request.POST.get("minority_class")
minority_class_column = request.POST.get("minority_class_column")
algorithm = request.POST.get("algorithm")
csv_file = request.FILES.get("csv_file")
if csv_file is None or not csv_file.name.endswith(".csv"):
return redirect('error', error_message="No CSV file uploaded")
input_df = pd.read_csv(csv_file)
validation_code, message = validate.validate_input(input_df, minority_class, minority_class_column)
if validation_code == 0:
return redirect('error', error_message=message)
status_code = script.function(input_df, minority_class, minority_class_column, algorithm)
if status_code == 1:
data_processed = True
return redirect('download')
else:
return redirect('error', error_message="Internal Error, Please try again.")
return render(request, "templates/index.html")
def error(request):
error_message = request.GET.get("error_message", "Internal Error, Please try again.")
return render(request, "yourapp/error.html", {"message": error_message})
def custom_autoencoder(request):
global data_processed
if request.method == "POST":
minority_class = request.POST.get("minority_class")
minority_class_column = request.POST.get("minority_class_column")
csv_file = request.FILES.get("csv_file")
encoder_dense_layers = request.POST.get("encoder_dense_layers")
bottle_neck = request.POST.get("bottle_neck")
decoder_dense_layers = request.POST.get("decoder_dense_layers")
epochs = request.POST.get("epochs")
decoder_activation = request.POST.get("decoder_activation")
if csv_file is None or not csv_file.name.endswith(".csv"):
return redirect('error', error_message="No CSV file uploaded")
input_df = pd.read_csv(csv_file)
validation_code, message = validate.validate_input(
input_df, minority_class, minority_class_column,
custom=True, algorithm="autoencoder",
encoder_dense_layers=encoder_dense_layers,
bottle_neck=bottle_neck,
decoder_dense_layers=decoder_dense_layers,
epochs=epochs
)
if validation_code == 0:
return redirect('error', error_message=message)
try:
status_code = script.function(
input_df, minority_class, minority_class_column,
algorithm="custom_autoencoder",
encoder_dense_layers=encoder_dense_layers,
bottle_neck=bottle_neck,
decoder_dense_layers=decoder_dense_layers,
epochs=epochs, decoder_activation=decoder_activation
)
except Exception as e:
print(e)
return redirect('error', error_message="Internal Error, Please try again.")
if status_code == 1:
data_processed = True
return redirect('download')
return render(request, "yourapp/custom_autoencoder.html")
def download(request):
global data_processed
if data_processed:
return render(request, "yourapp/download.html")
else:
return redirect('home')
def visualization(request):
if request.method == "POST":
minority_class = request.POST.get("minority_class")
minority_class_column = request.POST.get("minority_class_column")
original_file = request.FILES.get("original_file")
synthetic_file = request.FILES.get("synthetic_file")
pure_files_checkbox = request.POST.get("pure_files")
pure_files_checkbox = True if pure_files_checkbox else False
if original_file is None or not original_file.name.endswith(".csv") or synthetic_file is None or not synthetic_file.name.endswith(".csv"):
return redirect('error', error_message="No CSV file uploaded")
original_df = pd.read_csv(original_file)
synthetic_df = pd.read_csv(synthetic_file)
validation_code_1, message_1 = validate.validate_input(original_df, minority_class, minority_class_column)
validation_code_2, message_2 = validate.validate_input(synthetic_df, minority_class, minority_class_column)
if validation_code_1 == 0:
return redirect('error', error_message=message_1)
if validation_code_2 == 0:
return redirect('error', error_message=message_2)
try:
status_code = visualize(original_df, synthetic_df, minority_class, minority_class_column, pure_files_checkbox)
except Exception as e:
print(e)
return redirect('error', error_message="Internal Error, Please try again.")
if status_code == 1:
image_filenames = ['density_plot.png', 'heatmap_plot.png', 'scatter_plot.png']
return render(request, 'yourapp/visualization_plot.html', {'image_filenames': image_filenames})
return render(request, "yourapp/visualization.html")
def download_synthetic_data(request):
if data_processed:
synthetic_csv_path = "synthetic_data.csv"
return HttpResponse(synthetic_csv_path, content_type='text/csv')
else:
return redirect('home')
def serve_image(request, filename):
return redirect(os.path.join(image_directory, filename))
| SartajBhuvaji/Data-Science-Research-FlaskApp | djangoapp/yourapp/views.py | views.py | py | 5,774 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.path.join",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"... |
29028858099 | from datetime import datetime
from classes.field import Field
class Birthday(Field):
@Field.value.setter
def value(self, value=None):
if value and type(value) == str:
value = value.replace('.', '-')
try:
value = datetime.strptime(value, '%d-%m-%Y').date()
except:
raise ValueError('Invalid date format. Must be dd-mm-yyyy or dd.mm.yyyy')
elif value != None:
raise ValueError('Invalid date format. Must be string dd-mm-yyyy or dd.mm.yyyy')
self._value = value
def __repr__(self):
return datetime.strftime(self.value, '%d-%m-%Y') | IrinaShushkevych/classes_bot_helper | classes/birthday.py | birthday.py | py | 653 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "classes.field.Field",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "class... |
36170445031 | import logging
from typing import List
from volatility3.framework import constants, exceptions, renderers, interfaces
from volatility3.framework.configuration import requirements
from volatility3.framework.objects import utility
from volatility3.plugins.windows import pslist
vollog = logging.getLogger(__name__)
class CmdLine(interfaces.plugins.PluginInterface):
"""Lists process command line arguments."""
_required_framework_version = (2, 0, 0)
_version = (1, 0, 0)
@classmethod
def get_requirements(cls) -> List[interfaces.configuration.RequirementInterface]:
# Since we're calling the plugin, make sure we have the plugin's requirements
return [
requirements.ModuleRequirement(
name="kernel",
description="Windows kernel",
architectures=["Intel32", "Intel64"],
),
requirements.PluginRequirement(
name="pslist", plugin=pslist.PsList, version=(2, 0, 0)
),
requirements.ListRequirement(
name="pid",
element_type=int,
description="Process IDs to include (all other processes are excluded)",
optional=True,
),
]
@classmethod
def get_cmdline(
cls, context: interfaces.context.ContextInterface, kernel_table_name: str, proc
):
"""Extracts the cmdline from PEB
Args:
context: the context to operate upon
kernel_table_name: the name for the symbol table containing the kernel's symbols
proc: the process object
Returns:
A string with the command line
"""
proc_layer_name = proc.add_process_layer()
peb = context.object(
kernel_table_name + constants.BANG + "_PEB",
layer_name=proc_layer_name,
offset=proc.Peb,
)
result_text = peb.ProcessParameters.CommandLine.get_string()
return result_text
def _generator(self, procs):
kernel = self.context.modules[self.config["kernel"]]
for proc in procs:
process_name = utility.array_to_string(proc.ImageFileName)
proc_id = "Unknown"
try:
proc_id = proc.UniqueProcessId
result_text = self.get_cmdline(
self.context, kernel.symbol_table_name, proc
)
except exceptions.SwappedInvalidAddressException as exp:
result_text = f"Required memory at {exp.invalid_address:#x} is inaccessible (swapped)"
except exceptions.PagedInvalidAddressException as exp:
result_text = f"Required memory at {exp.invalid_address:#x} is not valid (process exited?)"
except exceptions.InvalidAddressException as exp:
result_text = "Process {}: Required memory at {:#x} is not valid (incomplete layer {}?)".format(
proc_id, exp.invalid_address, exp.layer_name
)
yield (0, (proc.UniqueProcessId, process_name, result_text))
def run(self):
kernel = self.context.modules[self.config["kernel"]]
filter_func = pslist.PsList.create_pid_filter(self.config.get("pid", None))
return renderers.TreeGrid(
[("PID", int), ("Process", str), ("Args", str)],
self._generator(
pslist.PsList.list_processes(
context=self.context,
layer_name=kernel.layer_name,
symbol_table=kernel.symbol_table_name,
filter_func=filter_func,
)
),
)
| volatilityfoundation/volatility3 | volatility3/framework/plugins/windows/cmdline.py | cmdline.py | py | 3,700 | python | en | code | 1,879 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "volatility3.framework.interfaces.plugins",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "volatility3.framework.interfaces",
"line_number": 12,
"usage_type": "name"... |
690977689 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('behavior_subjects', '0007_auto_20151119_1117'),
]
operations = [
migrations.AddField(
model_name='session',
name='exh_inh_delay',
field=models.IntegerField(blank=True, verbose_name='Exh-inh delay', default=-1),
),
]
| c-wilson/behavior_monitor | behavior_subjects/migrations/0008_session_exh_inh_delay.py | 0008_session_exh_inh_delay.py | py | 460 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.db.migrations.Migration",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.db.migrations",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.AddField",
"line_number": 14,
"usage_type": "call"
},
{
... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.