seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
16638837739 | # !/usr/bin/python
# -*- coding: utf-8 -*-
"""
__author__ = 'qing.li'
"""
from django import template
from django.conf import settings
import re
from collections import OrderedDict
from django.conf import settings
register = template.Library()
@register.inclusion_tag('rbac/menu.html')
def menu(request):
menu_order = OrderedDict()
menu_list = request.session.get(settings.MENU_SESSION_KEY)
for key in sorted(menu_list, key=lambda x: menu_list[x]['weight'], reverse=True):
print(key)
menu_order[key] = menu_list[key]
menu_order[key]['class'] = 'hide'
for i in menu_order[key]['children']:
if i['id'] == request.current_menu_id:
menu_order[key]['class'] = ''
if re.match('^{}$'.format(i['url']), request.path_info):
i['class'] = 'active'
print("request.current_menu_id", request.current_menu_id)
# if i['id'] == request.current_menu_id:
# menu_order[key]['class'] = ''
# for menu in menu_list.values():
# for i in menu['children']:
# if re.match('^{}$'.format(i['url']), request.path_info):
# i['class'] = 'active'
# for i in menu_list:
# url = i['url']
# if re.match('^{}$'.format(url), request.path_info):
# i['class'] = 'active'
return {'menu_list': menu_order}
@register.inclusion_tag('rbac/breadcrumb.html')
def breadcrumb(request):
return {'breadcrumb_list': request.breadcrumb_list}
@register.filter
def has_permission(request, permission):
print("here", type(str(permission)), str(permission), list(request.session.get(settings.PERMISSION_SESSION_KEY).keys()))
if str(permission) in list(request.session.get(settings.PERMISSION_SESSION_KEY).keys()):
return True
@register.simple_tag
def gen_role_url(request, rid):
params = request.GET.copy()
params._mutable = True
params['rid'] = rid
print(params.urlencode())
return params.urlencode()
| QingqinLi/nb_crm | rbac/templatetags/rabc.py | rabc.py | py | 2,027 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.template.Library",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "django.template",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "collections.OrderedDict",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "djang... |
10420483101 | """
.. moduleauthor:: Martí Congost <marti.congost@whads.com>
"""
from typing import Any, Optional, Set, Tuple
from httplib2 import Http
from base64 import urlsafe_b64encode
from json import loads, dumps
from cocktail.modeling import overrides
from .exceptions import CacheKeyError
from .cachekey import CacheKey
from .cachestorage import CacheStorage
from .cacheserializer import CacheSerializer
from .picklecacheserializer import Base64PickleCacheSerializer
from .scope import whole_cache, Scope
ENCODING = "utf-8"
class RESTCacheStorage(CacheStorage):
def __init__(
self,
address: str,
serializer: Optional[CacheSerializer] = None):
self.__address = address.rstrip("/")
if serializer is None:
serializer = Base64PickleCacheSerializer()
self.__serializer = serializer
@property
def address(self) -> str:
return self.__address
@property
def serializer(self) -> CacheSerializer:
return self.__serializer
def _key_request(self, key: str, *args, **kwargs) -> str:
url = (
self.__address
+ "/keys/"
+ urlsafe_b64encode(key.encode(ENCODING)).decode(ENCODING)
)
extra_path = kwargs.pop("extra_path", None)
if extra_path:
url += "/" + extra_path
http = Http()
response, content = http.request(url, *args, **kwargs)
if (400 <= response.status < 500):
raise CacheKeyError(key)
if content and response.get("content-type") == "application/json":
content = loads(content.decode(ENCODING))
return content
@overrides(CacheStorage.exists)
def exists(self, key: CacheKey) -> bool:
try:
self._key_request(key, "HEAD")
except CacheKeyError:
return False
else:
return True
@overrides(CacheStorage.retrieve)
def retrieve(self, key: CacheKey) -> Any:
value = self._key_request(key, "GET", extra_path = "value")
return self.serializer.unserialize(value)
@overrides(CacheStorage.retrieve_with_metadata)
def retrieve_with_metadata(
self,
key: CacheKey) -> Tuple[Any, int, Set[str]]:
data = self._key_request(key, "GET")
return (
self.serializer.unserialize(data["value"].encode(ENCODING)),
data["expiration"],
data["tags"]
)
@overrides(CacheStorage.store)
def store(
self,
key: CacheKey,
value: Any,
expiration: Optional[int] = None,
tags: Optional[Set[str]] = None):
self._key_request(
key,
"POST",
headers = {
"Content-Type": "application/json"
},
body = dumps({
"value": self.__serializer.serialize(value).decode(ENCODING),
"expiration": expiration,
"tags": None if tags is None else list(tags)
})
)
@overrides(CacheStorage.get_expiration)
def get_expiration(self, key: CacheKey) -> Optional[int]:
return self._key_request(key, "GET", extra_path = "expiration")
@overrides(CacheStorage.set_expiration)
def set_expiration(self, key: CacheKey, expiration: Optional[int]):
self._key_request(
key + "/expiration",
"POST",
headers = {
"Content-Type": "application/json"
},
body = dumps(expiration)
)
@overrides(CacheStorage.discard)
def discard(self, key: CacheKey) -> bool:
try:
self._key_request(key, "DELETE")
except CacheKeyError:
return False
else:
return True
@overrides(CacheStorage.clear)
def clear(self, scope: Scope = whole_cache):
url = self.__address + "/clear"
http = Http()
response, content = http.request(
url,
"POST",
headers = {
"Content-Type": "application/json"
},
body = dumps(
None if scope is whole_cache
else list(scope)
)
)
| marticongost/cocktail | cocktail/caching/restcachestorage.py | restcachestorage.py | py | 4,258 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "cachestorage.CacheStorage",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "cacheserializer.CacheSerializer",
"line_number": 26,
"usage_type": "name"
},
{
"api_nam... |
7795341803 | import json
from random import randint
import ideas.utils as utils
IDEA_PATH = './ideas/ideas.json'
class Generator:
def __init__(self, context='general', keys=[]):
self.idea_path = IDEA_PATH
self.context = context
self.rand_items = []
with open(self.idea_path, 'r') as json_file:
self.lst = json.load(json_file)
if len(keys) == 0:
self.keys = self.lst.keys()
else:
self.keys = keys
def get_keys(self):
return list(self.keys)
def export_lst(self):
temp_lst = json.dumps(self.lst, indent=4)
f = open(self.idea_path, 'w+')
f.write(temp_lst)
f.close()
def get_rand_item(self, key):
rand_num = randint(0, (len(self.lst[key]) - 1))
if self.lst[key][rand_num] in self.rand_items:
rand_num = randint(0, (len(self.lst[key]) - 1))
self.rand_items.append(self.lst[key][rand_num])
return self.lst[key][rand_num]
def generate(self):
generated_msg = ''
for key in self.keys:
generated_msg += f'{self.get_rand_item(key)} \n'
self.print_result()
return generated_msg
def generate_multiple(self, how_many=None, key='items'):
for i in range(how_many):
self.get_rand_item(key)
self.print_result()
def print_result(self):
utils.print_result(self.rand_items, self.context)
@staticmethod
def convert_file_to_array(file_location):
with open(file_location, "r") as file_to_convert:
file_array = file_to_convert.readlines()
stripped_array = []
for item in file_array:
stripped_array.append(item.strip())
return stripped_array
| kjbyleni/My_Art_tools | ideas/generator.py | generator.py | py | 1,757 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "json.load",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number... |
779154686 | from typing import Annotated
from fastapi import APIRouter, Depends, status
from fastapi.encoders import jsonable_encoder
from fastapi.responses import JSONResponse
from pydantic_core import ValidationError
from readconnect.shared.domain.dtos.error_response_dto import ErrorResponse
from readconnect.shared.domain.exceptions.exceptions import (
NotFoundError,
InvalidsCredentialsError,
)
from ...application.login_user.login_user_use_case import LoginUserUseCase
from ...application.signup_user.signup_user_use_case import SignupUserUseCase
from ...domain.dtos.login_request_dto import LoginRequestDTO
from ...domain.dtos.login_response_dto import LoginResponseDTO
from ...domain.dtos.signup_request_dto import SignupRequestDTO
auth_router = APIRouter(prefix="/auth")
@auth_router.post(
path="/login",
responses={
200: {"model": LoginResponseDTO},
502: {"model": ErrorResponse},
422: {"model": ErrorResponse},
404: {"model": ErrorResponse},
},
)
async def login(
body: LoginRequestDTO,
login_use_case: Annotated[LoginUserUseCase, Depends(LoginUserUseCase)],
):
try:
response = await login_use_case.execute(body)
return response
except ValidationError as e:
details = f"Ocurrió un problema al realizar su petición. Detalle: {e.__str__()}"
return JSONResponse(
status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
content=jsonable_encoder(ErrorResponse(details=details)),
)
except InvalidsCredentialsError as e:
details = f"Ocurrió un problema al realizar su petición. Detalle: {e.details}"
return JSONResponse(
status_code=e.status_code,
content=jsonable_encoder(ErrorResponse(details=details)),
)
except NotFoundError as e:
details = f"Ocurrió un problema al realizar su petición. Detalle: {e.details}"
return JSONResponse(
status_code=e.status_code,
content=jsonable_encoder(ErrorResponse(details=details)),
)
except Exception as e:
details = f"Ocurrió un problema al realizar su petición. Detalle: {e.__str__()}"
return JSONResponse(
status_code=status.HTTP_502_BAD_GATEWAY,
content=jsonable_encoder(ErrorResponse(details=details)),
)
@auth_router.post(
path="/signup",
responses={
200: {"model": SignupRequestDTO},
502: {"model": ErrorResponse},
422: {"model": ErrorResponse},
404: {"model": ErrorResponse},
409: {"model": ErrorResponse},
},
)
async def signup(
body: SignupRequestDTO,
signup_use_case: Annotated[SignupUserUseCase, Depends(SignupUserUseCase)],
):
try:
response = await signup_use_case.execute(body)
return response
except ValidationError as e:
details = f"Ocurrió un problema al realizar su petición. Detalle: {e.__str__()}"
return JSONResponse(
status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
content=jsonable_encoder(ErrorResponse(details=details)),
)
except InvalidsCredentialsError as e:
details = f"Ocurrió un problema al realizar su petición. Detalle: {e.details}"
return JSONResponse(
status_code=e.status_code,
content=jsonable_encoder(ErrorResponse(details=details)),
)
except NotFoundError as e:
details = f"Ocurrió un problema al realizar su petición. Detalle: {e.details}"
return JSONResponse(
status_code=e.status_code,
content=jsonable_encoder(ErrorResponse(details=details)),
)
except Exception as e:
details = f"Ocurrió un problema al realizar su petición. Detalle: {e.__str__()}"
return JSONResponse(
status_code=status.HTTP_502_BAD_GATEWAY,
content=jsonable_encoder(ErrorResponse(details=details)),
)
| YeisonKirax/readconnect-back | src/readconnect/auth/infrastructure/routes/auth_routes.py | auth_routes.py | py | 3,939 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "fastapi.APIRouter",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "domain.dtos.login_request_dto.LoginRequestDTO",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "typing.Annotated",
"line_number": 33,
"usage_type": "name"
},
{
"... |
29918439800 | #Tarea 20, para lenguajes de programacion
import matplotlib.pyplot as plt
import numpy as np
print ("Minimos cuadrados")
print ("Este programa calcula la pendiente (m), la intercepcion(b) y el coeficiente de relacion(r) de una regresion lineal")
print ("¿Cuantos datos (en pares (x1,y1) se considera 1) desea evaluar? (minimo 3)")
n=int(input())
while n<3:
print ("Ingresa un valor mayor a 2: ")
n=int(input())
x=np.arange(n, dtype=float)
y=np.arange(n, dtype=float)
acc=np.arange(n, dtype=float) #es un acumulador
sx=0
sy=0
sxy=0
sx2=0
sy2=0
for i in range(n):
print ("Ingresa el dato x("+str(i+1)+"):")
x[i]=float(input())
print ("Ingresa el dato y("+str(i+1)+"):")
y[i]=float(input())
sx=sx+x[i] #sx es sumatorai en x
sy=sy+y[i] #sy sumatoria en y
sxy=sxy+x[i]*y[i] #sumatoria en x*y
sx2=sx2+x[i]**2 #sumatoria en x**2
sy2=sy2+y[i]**2 #sumatoria en y**2
print ("TABLA DE DATOS")
print ("#dato, x , y")
for i in range(n):
print (str(i+1)+".-",x[i]," ,",y[i])
for i in range(n):
acc[i]=x[i]
for i in range(0,n): #Este servira para poder encontrar el mas grande y el mas chico valor de x, para graficar
for j in range(0,n-1):
if acc[j]>=acc[j+1]: #if evalua que el numero actuar sea menor que el siguiente
yac=acc[j] #si es menor cambiara el orden, almacenando el valor actual en y
acc[j]=acc[j+1] # y el nuevo valor actual seria el del numero siguiente
acc[j+1]=yac #el numero siguiente sera el anterior, asi que le debolvemos el valor con y
#yp=y promedio, xp=x promedio,r=coeficiente de corelacion
#Σx=sx,Σy=sy,Σxy=sxy,Σx²=sx2
#La pendiente esta dada por m=(Σxy-(Σx)*yp)/(Σx²- (Σx)xp)
yp=sy/n
xp=sx/n
m=(sxy-sx*yp)/(sx2-sx*xp)
r=(n*sxy-sx*sy)/(((n*sx2-sx**2)*(n*sy2-sy**2))**.5)
b=yp-m*xp
print ("Σx=",sx,", Σy=",sy,", Σxy=",sxy,", Σx²=",sx2, ", Σy²=",sy2)
print ("m=",m)
print ("r=",r)
print ("b=",b)
xs=(x[n-1]-x[0])/(n-1)
x1=[acc[0],acc[n-1]]
y1=[m*x1[0]+b,m*x1[1]+b] #como es una ecuacion lineal, no necesita mas puntos, solo 2
plt.plot(x,y,"b*", label="Datos") #los puntos son marcados con estrellas para poder diferenciarlos de la linealizacion
plt.plot(x1,y1,"m-",label="Linializado")
plt.xlabel("x")
plt.ylabel("y")
plt.legend()
plt.title("Minimos cuadrados")
| EdmundoD3/Ejercicios-de-Python | programas python parte 2/T20_Minimos_cuadrados.py | T20_Minimos_cuadrados.py | py | 2,290 | python | es | code | 0 | github-code | 6 | [
{
"api_name": "numpy.arange",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"... |
32129407183 | import json
import bitarray
from find_recipes_simple import process_recipe
import time
import sys
PROCESS_SIZE = 50000000
def execute_find_recipe(item_str, recipe_data):
if not item_str:
return 0, 0
hp_crit, hp, price = process_recipe(recipe_data, item_str)
# crit increases hp by 12, we just need to store if it's different
crit_different = hp_crit != hp
main_data = ((price << 7) + hp) & 0xFFFF
return main_data, crit_different
NUM_INGR = 5
def array2d(first_order, second_order):
array = [None] * first_order
for i in range(first_order):
array[i] = [0] * second_order
return array
class RecipeIterator:
def __init__(self, id_data, start, end):
self.current = start
self.end = end
self.id_data = id_data
self.num_items = len(id_data)
data = array2d(NUM_INGR+1, self.num_items+1)
bino = array2d(self.num_items+NUM_INGR, NUM_INGR+1)
# binomial(n, k), k<=NUM_INGR is bino[n][k]
# Compute binomial with dynamic programming
for n in range(self.num_items+NUM_INGR):
bino[n][0] = 1
for k in range(NUM_INGR+1):
bino[k][k] = 1
for n in range(1,self.num_items+NUM_INGR):
for k in range(1, NUM_INGR+1):
bino[n][k] = bino[n-1][k-1] + bino[n-1][k]
# data[i][m] is size of choosing i ingredients from m, so bino[i+m-1][i]
for m in range(self.num_items+1):
data[0][m] = 1
for i in range(1, NUM_INGR+1):
for m in range(self.num_items+1):
data[i][m] = bino[i+m-1][i]
self.data = data
self.total = data[NUM_INGR][self.num_items]
def get_total(self):
return self.total
def __iter__(self):
return self
def __next__(self):
if self.current >= self.end:
raise StopIteration
input = self.current
self.current += 1
rest_items = self.num_items
items = []
good = False
for item in range(NUM_INGR):
index = 0
for m in range(self.num_items-rest_items+1, self.num_items+1):
if index + self.data[NUM_INGR-1-item][self.num_items-m+1] > input:
items.append(m-1)
good = True
break
index += self.data[NUM_INGR-1-item][self.num_items-m+1]
if not good:
break
rest_items=self.num_items-items[item]
input -= index
if good:
items = [self.id_data[i] for i in items if i != 0]
return ",".join(items)
else:
raise StopIteration
sample = "[08]========================= 100% "
def run_dump(part, is_multi=True):
screen_x = (part % 4) * 38 + 1
screen_y = int(part/4) + 1
part_str = f"[0{part}]" if part < 10 else f"[{part}]"
def update_progress(permillage):
percentage = int(permillage/10)
if percentage >= 100:
progress_bar = "="*25
else:
progress_bar = "="*int(percentage/4)+">"
if is_multi:
sys.stdout.write("\x1b7\x1b[%d;%df%s\x1b8" % (screen_y, screen_x, f"{part_str}{progress_bar} {permillage/10}%"))
else:
print(f"\r{part_str}{progress_bar} {permillage/10}%", end="")
sys.stdout.flush()
# Load the items
with open("../ids.json", "r", encoding="utf-8") as ids_file:
id_data_dict = json.load(ids_file)
id_data = []
for k in id_data_dict:
id_data.append(id_data_dict[k])
with open("recipeData.json", "r", encoding="utf-8") as recipe_file:
recipe_data = json.load(recipe_file)
recipes = RecipeIterator(id_data, part*PROCESS_SIZE,(part+1)*PROCESS_SIZE)
crit_buffer = bitarray.bitarray(endian='little')
progress = 0
permillage = 0
update_progress(0)
with open(f"parts/main{part}.db", "wb") as main_db:
for recipe in recipes:
main_data, crit_flag = execute_find_recipe(recipe, recipe_data)
crit_buffer.append(crit_flag)
main_db.write(bytearray(main_data.to_bytes(2, "big")))
progress += 1
new_permillage = int(progress*1000/PROCESS_SIZE)
if new_permillage != permillage:
update_progress(new_permillage)
permillage = new_permillage
update_progress(1000)
with open(f"parts/crit{part}.db", "wb") as crit_db:
crit_db.write(crit_buffer.tobytes())
if not is_multi:
print()
if __name__ == "__main__":
run_dump(int(sys.argv[1]), False)
| iTNTPiston/botw-recipe | dump/dump.py | dump.py | py | 4,729 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "find_recipes_simple.process_recipe",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "sys.stdout.write",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 110,
"usage_type": "attribute"
},
{
"api_name": ... |
29510369443 | import pandas as pd
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.model_selection import cross_val_predict
from sklearn.metrics import classification_report
data = pd.read_csv('./ner_dataset.csv', encoding='latin1')
data = data.fillna(method="ffill") # ffill前值填充,pfill后值填充
print(data.tail(10))
words = list(set(data["Word"].values)) # 获取词典库
n_words = len(words) # 词典库大小
class MajorityVotingTagger(BaseEstimator, TransformerMixin):
def fit(self, X, y):
"""
:param X: list of words
:param y: list 0f tags
:return:
"""
word2cnt = {}
self.tags = []
for x, t in zip(X, y):
if t not in self.tags:
self.tags.append(t)
if x in word2cnt:
if t in word2cnt[x]:
word2cnt[x][t] += 1
else:
word2cnt[x][t] = 1
else:
word2cnt[x] = {t: 1}
self.majority = {}
for k, d in word2cnt.items():
self.majority[k] = max(d, key=d.get)
def predict(self, X, y=None):
"""Predict the the tag from memory, If word is unknown, predict 'o'"""
return [self.majority.get(x, 'o') for x in X]
words = data["Word"].values.tolist()
tags = data["Tag"].values.tolist()
pred = cross_val_predict(estimator=MajorityVotingTagger(), X=words, y=tags, cv=5)
report = classification_report(y_pred=pred, y_true=tags)
print(report)
| jiangq195/tanxin | starter_code1/NER/majority_voting.py | majority_voting.py | py | 1,532 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pandas.read_csv",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sklearn.base.BaseEstimator",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "sklearn.base.TransformerMixin",
"line_number": 17,
"usage_type": "name"
},
{
"api_name"... |
648044037 | import time
import h5py
from affogato.segmentation import InteractiveMWS
def debug():
z = 0
path = '/home/pape/Work/data/ilastik/mulastik/data/data.h5'
with h5py.File(path, 'r') as f:
# raw = f['raw'][z]
affs = f['prediction'][:, z]
strides = [4, 4]
offsets = [[-1, 0], [0, -1], [-3, 0], [0, -3],
[-9, 0], [0, -9], [-27, 0], [0, -27]]
with h5py.File('./seeds.h5') as f:
seeds = f['data'][:]
assert seeds.shape == affs.shape[1:]
imws = InteractiveMWS(affs, offsets, n_attractive_channels=2,
strides=strides, randomize_strides=True)
print("Compute segmentation without seeds ...")
t0 = time.time()
seg1 = imws()
t0 = time.time() - t0
print("... done in %f s" % t0)
print("Add seeds ...")
t0 = time.time()
imws.update_seeds(seeds)
t0 = time.time() - t0
print("... done in %f s" % t0)
print("Compute segmentation with seeds ...")
t0 = time.time()
seg2 = imws()
t0 = time.time() - t0
print("... done in %f s" % t0)
assert seg1.shape == seg2.shape == seeds.shape
if __name__ == '__main__':
debug()
| constantinpape/affogato | example/interactive/debug.py | debug.py | py | 1,168 | python | en | code | 9 | github-code | 6 | [
{
"api_name": "h5py.File",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "h5py.File",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "affogato.segmentation.InteractiveMWS",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "time.time",
... |
37370928668 | import torch
import torchvision
# post-processing
def handle_preds(preds, device, conf_thresh=0.25, nms_thresh=0.45):
total_bboxes, output_bboxes = [], []
# 将特征图转换为检测框的坐标
N, C, H, W = preds.shape
bboxes = torch.zeros((N, H, W, 6))
pred = preds.permute(0, 2, 3, 1)
# 前背景分类分支
pobj = pred[:, :, :, 0].unsqueeze(dim=-1)
# 检测框回归分支
preg = pred[:, :, :, 1:5]
# 目标类别分类分支
pcls = pred[:, :, :, 5:]
# 检测框置信度
bboxes[..., 4] = (pobj.squeeze(-1) ** 0.6) * (pcls.max(dim=-1)[0] ** 0.4)
bboxes[..., 5] = pcls.argmax(dim=-1)
# 检测框的坐标
gy, gx = torch.meshgrid([torch.arange(H), torch.arange(W)])
bw, bh = preg[..., 2].sigmoid(), preg[..., 3].sigmoid()
bcx = (preg[..., 0].tanh() + gx.to(device)) / W
bcy = (preg[..., 1].tanh() + gy.to(device)) / H
# cx,cy,w,h = > x1,y1,x2,y1
x1, y1 = bcx - 0.5 * bw, bcy - 0.5 * bh
x2, y2 = bcx + 0.5 * bw, bcy + 0.5 * bh
bboxes[..., 0], bboxes[..., 1] = x1, y1
bboxes[..., 2], bboxes[..., 3] = x2, y2
bboxes = bboxes.reshape(N, H*W, 6)
total_bboxes.append(bboxes)
batch_bboxes = torch.cat(total_bboxes, 1)
# 对检测框进行NMS处理
for p in batch_bboxes:
output, temp = [], []
b, s, c = [], [], []
# 阈值筛选
t = p[:, 4] > conf_thresh
pb = p[t]
for bbox in pb:
obj_score = bbox[4]
category = bbox[5]
x1, y1 = bbox[0], bbox[1]
x2, y2 = bbox[2], bbox[3]
s.append([obj_score])
c.append([category])
b.append([x1, y1, x2, y2])
temp.append([x1, y1, x2, y2, obj_score, category])
# Torchvision NMS
if len(b) > 0:
b = torch.Tensor(b).to(device)
c = torch.Tensor(c).squeeze(1).to(device)
s = torch.Tensor(s).squeeze(1).to(device)
keep = torchvision.ops.batched_nms(b, s, c, nms_thresh)
for i in keep:
output.append(temp[i])
output_bboxes.append(torch.Tensor(output))
return output_bboxes
| Zhefan-Xu/onboard_detector | scripts/yolo_detector/utils/tool.py | tool.py | py | 2,179 | python | en | code | 9 | github-code | 6 | [
{
"api_name": "torch.zeros",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "torch.meshgrid",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "torch.arange",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number"... |
31629715534 | from flask import Flask, render_template, redirect, request
from flask import Blueprint
from models.visit import Visit
import repositories.visit_repository as visit_repository
import repositories.country_repository as country_repository
import repositories.user_repository as user_repository
visits_blueprint = Blueprint("visits", __name__)
@visits_blueprint.route("/users/<user_id>")
def visited_countries(user_id):
visited_countries = visit_repository.show_all(user_id)
countries = country_repository.select_all()
user = user_repository.select_by_id(user_id)
return render_template("visits/index.html", all_visits = visited_countries, all_countries = countries, user = user)
@visits_blueprint.route("/visits/<user_id>", methods=['POST'])
def add_visited_country(user_id):
user = user_repository.select_by_id(user_id)
country_user_id = request.form['select_country']
country = country_repository.select_by_id(country_user_id)
visit = Visit(user, country, True)
visit_repository.save(visit)
return redirect('/users/'+ user_id)
@visits_blueprint.route("/visits/<visit_id>/<user_id>/delete", methods= ['GET'])
def delete_visit(visit_id, user_id):
visit_repository.delete(visit_id)
return redirect('/users/' + user_id)
# @visits_blueprint.route("/countries", methods= ['POST'])
# def update_country(name, continent,flag):
# name = request.form['name']
# continent = request.form['continent']
# flag = request.form['flag']
# visit_repository.update()
# return render_template('/countries') | paolaguerralibrero/bucket_list_python_project_w5 | controllers/visit_controller.py | visit_controller.py | py | 1,562 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "flask.Blueprint",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "repositories.visit_repository.show_all",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "repositories.visit_repository",
"line_number": 14,
"usage_type": "name"
},
{
... |
13543436023 | import pandas as pd
import numpy as np
import scipy.stats as stats
import pylab as pl
import re
import seaborn as sns
import matplotlib.pyplot as plt
import random
sns.set(font_scale = 1.5)
pd.set_option('display.max_columns', 15)
pd.set_option('display.max_rows', 40)
filepath = '\\Coding\\DataAnalystInterview\\MarketValue\\ResidentialHouse2019Data.csv'
filepath1 = '\\Coding\\DataAnalystInterview\\MarketValue\\ResidentialCondo2019Data.csv'
DataHouse = pd.read_csv(filepath, header = 0, sep = ',')
DataCondo = pd.read_csv(filepath1,header=0,sep=',')
filepath2 = '\\Coding\\DataAnalystInterview\\Neighbourhoods.csv'
Neighbourhoods = pd.read_csv(filepath2, header = None, sep = ',')
Interquartile = Neighbourhoods[Neighbourhoods[1] > 1.5*(10**8)]
Interquartile = Interquartile[Interquartile[1] < 6*(10**8)]
Interquartile = Interquartile[0].tolist()
Interquartilesample = random.choices(Interquartile, k=5)
print (Interquartilesample)
#Lotsize vs assesed value without removing outliers. Determined Condo v. House using "unit" in legal description
plt.figure()
#sns.scatterplot(x='Lot_Size',y='Assessed_Value',data=DataHouse)
plt.figure()
#sns.scatterplot(x='Lot_Size',y='Assessed_Value',data=DataCondo)
'''Removing lot size outliers/Year Built Outliers'''
DataHouse = pd.read_csv(filepath, header = 0, sep = ',')
DistributionHouse = (DataHouse['Lot_Size'].quantile([0.1, 0.25, 0.75, 1]))
(tophouse,bottomhouse) = 623 +((623-394) * 1.5), 394 - ((623-394) * 1.5)
test = (DataHouse['Assessed_Value'].quantile([0.1, 0.25, 0.75, 1]))
print(test)
DataHouse = DataHouse[DataHouse['Lot_Size'] > bottomhouse]
DataHouse = DataHouse[DataHouse['Lot_Size'] < tophouse]
DataHouse = DataHouse[DataHouse['Actual_Year_Built'] > 1600]
DataHouseNeighbourhood = DataHouse[DataHouse['Neighbourhood'].isin(Interquartilesample)]
'''HOUSES Lot Size vs. Assessed Value'''
plt.figure()
sns.lmplot(x='Lot_Size',y='Assessed_Value', hue = 'Neighbourhood',data=DataHouseNeighbourhood, height = 10)
plt.ylim(0,)
plt.xlim(0,)
#P-Value is the test that the hypothesis is Null (slope = 0) R-Value is the correlation. This gives a weak R and a strong P
slope, intercept, r_value, p_value, std_err = stats.linregress(DataHouseNeighbourhood['Lot_Size'],DataHouseNeighbourhood['Assessed_Value'])
print ('DataNeighborhood : lotsize v. assessed value', slope, intercept, r_value, p_value, std_err)
plt.figure()
sns.lmplot(x='Lot_Size',y='Assessed_Value',data=DataHouse, height = 10)
#P-Value is the test that the hypothesis is Null (slope = 0) R-Value is the correlation. This gives a weak R and a strong P
slope, intercept, r_value, p_value, std_err = stats.linregress(DataHouse['Lot_Size'],DataHouse['Assessed_Value'])
print ('DataHouse: lotsize v. assessed value', slope, intercept, r_value, p_value, std_err)
'''Economies of Scale, Lot Size vs. PPSF'''
plt.figure()
slope, intercept, r_value, p_value, std_err = stats.linregress(DataHouseNeighbourhood['Lot_Size'],DataHouseNeighbourhood['PricePerSquareMeter'])
print ('DataNeighborhood: Economies of Scale', slope, intercept, r_value, p_value, std_err)
sns.lmplot(x='Lot_Size',y='PricePerSquareMeter', hue = 'Neighbourhood', height = 10, data=DataHouseNeighbourhood)
plt.figure()
sns.lmplot(x='Lot_Size',y='PricePerSquareMeter',data=DataHouse, height = 10)
plt.ylim(0,)
plt.xlim(0,)
#P-Value is the test that the hypothesis is Null (slope = 0) R-Value is the correlation. This gives a weak R and a strong P
slope, intercept, r_value, p_value, std_err = stats.linregress(DataHouse['Lot_Size'],DataHouse['PricePerSquareMeter'])
print ('DataHouse: Economies of Scale', slope, intercept, r_value, p_value, std_err)
''' Year Built '''
plt.figure()
sns.lmplot(x='Actual_Year_Built',y='Assessed_Value',hue = 'Neighbourhood', height = 10, data=DataHouseNeighbourhood)
plt.ylim(0,)
plt.xlim(1940,2020)
slope, intercept, r_value, p_value, std_err = stats.linregress(DataHouseNeighbourhood['Actual_Year_Built'],DataHouseNeighbourhood['Assessed_Value'])
print ('DataNeighborhood: Actual Year Built', slope, intercept, r_value, p_value, std_err)
plt.figure()
sns.lmplot(x='Actual_Year_Built',y='Assessed_Value', data = DataHouse, height = 10)
plt.ylim(0,)
plt.xlim(1940,2020)
#P-Value is the test that the hypothesis is Null (slope = 0) R-Value is the correlation. This gives a weak R and a strong P
slope, intercept, r_value, p_value, std_err = stats.linregress(DataHouse['Actual_Year_Built'],DataHouse['Assessed_Value'])
print ('DataHouse: Actual Year Built', slope, intercept, r_value, p_value, std_err)
plt.figure()
sns.lmplot(x='Actual_Year_Built',y='Lot_Size', data = DataHouse, height = 10)
plt.ylim(0,)
plt.xlim(1940,2020)
#P-Value is the test that the hypothesis is Null (slope = 0) R-Value is the correlation. This gives a weak R and a strong P
slope, intercept, r_value, p_value, std_err = stats.linregress(DataHouse['Actual_Year_Built'],DataHouse['Lot_Size'])
print ('DataHouse: Actual Year Built', slope, intercept, r_value, p_value, std_err)
'''Neighbourhood Group'''
DataHouseNeighbourhood.boxplot('Assessed_Value','Neighbourhood',figsize=(27,8))
'''
dummy = pd.get_dummies(DataHouse['Neighbourhood'])
print(dummy.head())
dummy.to_csv(r'C:\\Users\\aviel\\Desktop\\Coding\\Data Analyst Interview\\MarketValue\\test.csv', index = False)
'''
| avielchow/Property-Assessment-Analysis | Analysis.py | Analysis.py | py | 5,400 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "seaborn.set",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pandas.set_option",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pandas.set_option",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
... |
71740452349 | #/usr/bin/env python
import yaml
import os, sys, re
import subprocess
import argparse
## Arguments
parser = argparse.ArgumentParser(description='Create a shared cuda.yml for docker-compose')
parser.add_argument('--gpu', '-g',
action='append',
dest='gpus',
default=[])
parser.add_argument('--verbose',
action='store_true',
help='Verbose logging')
parser.add_argument('--out', '-o',
dest='save_directory',
default='shared/',
help='Directory to write the shared docker-compose')
args = parser.parse_args()
cuda_version = '7.0'
nv_device = '/dev/nvidia'
uvm_device = '{0}-uvm'.format(nv_device)
ctl_device = '{0}ctl'.format(nv_device)
cuda_version_label = 'com.nvidia.cuda.version'
nv_bins_volume = '/usr/local/bin'
nv_bins = ['nvidia-cuda-mps-control',
'nvidia-cuda-mps-server',
'nvidia-debugdump',
'nvidia-persistenced',
'nvidia-smi'
]
nv_libs_volume = '/usr/local/nvidia'
nv_libs_cuda = ['cuda', 'nvcuvid', 'nvidia-compiler', 'nvidia-encode', 'nvidia-ml']
def log(msg, **kwargs):
print('DEBUG: {0}'.format(msg));
if kwargs.get('body'):
print(kwargs.get('body', ''))
def no_error(cmds):
try:
for cmd in cmds.split():
subprocess.Popen([cmd], stdout=open(os.devnull, "w"), stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
return False
def grep(cmd, grp):
grep = subprocess.Popen(['grep', grp], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
orig = subprocess.Popen(cmd, stdout=grep.stdin)
output, errs = grep.communicate()
orig.wait()
if output:
return output.decode('ascii')
def query_nvsmi(section, gpu_id=False):
cmd = ['nvidia-smi','-q']
if gpu_id:
cmd.extend(['-i', gpu_id])
res = grep(cmd, section)
return res.split()[-1]
def library_path(lib):
pat = grep(['ldconfig', '-p'], 'lib{0}.so'.format(lib))
if pat:
return pat.split('=>')[-1].strip(' \t\n\r')
else:
print('Could not find library: {0}'.format(lib))
def library_arch(lib):
proc = subprocess.Popen(['file', '-L', lib], stdout=subprocess.PIPE)
out, errs = proc.communicate()
if errs:
print('There was an error with `which {0}`: {1}'.format(b, errs))
elif out:
return re.sub('-bit', '', out.decode('ascii').split()[2])
def which(b):
proc = subprocess.Popen(['which', b], stdout=subprocess.PIPE)
out, errs = proc.communicate()
if errs:
print('There was an error with `which {0}`: {1}'.format(b, errs))
elif out:
return out.decode('ascii').strip(' \n\t\r')
def format_mount(a, b=None):
if not b:
b = a
return '{0}:{1}'.format(a, b)
driver_version = query_nvsmi('Driver Version')
no_error('nvidia-smi')
#no_error('nvidia-smi nvidia-modprobe')
d = {
'devices': [],
'volumes': []
}
## Add devices
devices = [ctl_device, uvm_device]
d['devices'] = [format_mount(dev) for dev in devices]
if args.gpus:
for gpu in args.gpus:
gpu_minor_version = query_nvsmi('Minor Number', gpu)
if gpu_minor_version:
d['devices'].append(format_mount('{0}{1}'.format(nv_device, gpu_minor_version)))
else:
print('Could not find minor version for gpu: {0}'.format(gpu))
library_paths = [library_path(lib) for lib in nv_libs_cuda]
for lib in library_paths:
if lib:
basename = os.path.basename(lib)
arch = library_arch(lib)
if arch:
mount = None
if arch == '32':
mount = format_mount(lib, '{0}/lib/{1}'.format(nv_libs_volume, basename))
if arch == '64':
mount = format_mount(lib, '{0}/lib64/{1}'.format(nv_libs_volume, basename))
if mount:
d['volumes'].append(mount)
for binary in nv_bins:
b = which(binary)
if b:
d['volumes'].append(format_mount(b, '{0}/{1}'.format(nv_bins_volume, binary)))
cuda_dir = '/usr/local/cuda-{0}/lib64'.format(cuda_version)
files = [x for x in os.listdir(cuda_dir) if os.path.isfile(cuda_dir+os.sep+x)]
for lib in files:
local_file = os.path.join(cuda_dir, lib)
remote_volume = '{0}/{1}'.format(nv_libs_volume, lib)
d['volumes'].append(format_mount(local_file, remote_volume))
d['environment'] = {}
d['environment'].update({'LD_LIBRARY_PATH': '$LD_LIBRARY_PATH:{0}:{1}'.format(cuda_dir, nv_libs_volume)})
out = yaml.safe_dump({'cuda_base': d},
indent=4,
allow_unicode=True,
default_flow_style=False)
log('Writing cuda file', body=out)
with open('{0}/cuda.yml'.format(args.save_directory), 'w') as outfile:
outfile.write(out)
| auser/docker-tensorflow-ipython-celery-rest-server | shared/create_cuda.py | create_cuda.py | py | 4,297 | python | en | code | 7 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "subprocess.Popen",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "os.devnull",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "subprocess.ST... |
42913447777 | import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import matplotlib.cm as cm
import matplotlib.patches as mpatches
import json
import sys
from helpers.helpers_visualisation import get_colors
from scipy.misc import imread
import matplotlib.image as mpimg
class Animation():
def __init__(self,parameters_path):
parameters_project = json.load(open(parameters_path))
processed_parameters = json.load(open(parameters_project["data_processed_parameters"]))
evaluation_parameters = json.load(open(parameters_project["evaluation_parameters"]))
raw_parameters = json.load(open(parameters_project["data_raw_parameters"]))
visualization_parameters = json.load(open(parameters_project["visualization_parameters"]))
self.scene = visualization_parameters["scene"]
self.sample_id = visualization_parameters["sample_id"]
self.pixel_meter_ratios = raw_parameters["pixel_meter_ratios"]
self.meter2pixel_ratio = 1.0/ self.pixel_meter_ratios[self.scene]
# report_name = evaluation_parameters["report_name"]
report_name = visualization_parameters["report_name"]
sub_dir_name = parameters_project["evaluation_reports"] + "{}/scene_reports/".format(report_name)
self.scene_samples = sub_dir_name + "{}_samples.json".format(self.scene)
self.gif_name = parameters_project["animations_reports"] + "{}_{}_{}.gif".format(self.scene,self.sample_id,report_name)
self.image = parameters_project["raw_images"] + "{}.jpg".format(self.scene)
self.rev_dict_types = processed_parameters["types_dic_rev"]
def animate_sample(self):
file_ = json.load(open(self.scene_samples))
sample = file_[str(self.sample_id)]
inputs = np.array(sample["inputs"])
labels = np.array(sample["labels"])
outputs = np.array(sample["outputs"])
types = np.array(sample["types"])
print(types)
types = [ self.rev_dict_types[str(int(type_))] for type_ in types]
img = mpimg.imread(self.image)
prediction = np.concatenate([inputs,outputs], axis = 1)
gt = np.concatenate([inputs,labels], axis = 1)
prediction = prediction * self.meter2pixel_ratio
gt = gt * self.meter2pixel_ratio
nb_colors = gt.shape[0]
colors = get_colors(nb_colors)
animator = Animate(prediction,gt,colors,img,types,self.gif_name)
animator.animate()
class Animate():
def __init__(self,data_pred,data_gt,colors,img,types,gif_name = "test.gif", plot_ = False, save = True):
self.img = img
self.xs_pred = data_pred[:,:,0]
self.ys_pred = data_pred[:,:,1]
self.xs_gt = data_gt[:,:,0]
self.ys_gt = data_gt[:,:,1]
self.types = types
self.nb_agents = self.xs_pred.shape[0]
self.margin = 1
self.nb_frames = self.xs_pred.shape[1]
self.gif_name = gif_name
self.plot_ = plot_
self.save = save
self.fps = 1
self.colors = colors
self.lin_size = 100
lin = np.linspace(0.6, 0.8, self.lin_size)
self.color_dict = {
"bicycle":cm.Blues(lin),
"pedestrian":cm.Reds(lin),
"car":cm.Greens(lin),
"skate":cm.Greys(lin),
"cart":cm.Purples(lin),
"bus":cm.Oranges(lin)
}
self.colors = [self.color_dict[type_][np.random.randint(self.lin_size)] for type_ in self.types]
self.history = 4
self.get_plots()
def get_plots(self):
self.fig, self.ax = plt.subplots(1,2,squeeze= False)
red_patch = mpatches.Patch(color='red', label='Pedestrians')
blue_patch = mpatches.Patch(color='b', label='Bycicles')
green_patch = mpatches.Patch(color='green', label='Cars')
grey_patch = mpatches.Patch(color='grey', label='Skates')
purple_patch = mpatches.Patch(color='purple', label='Carts')
orange_patch = mpatches.Patch(color='orange', label='Buses')
plt.legend(handles=[red_patch,blue_patch,green_patch,grey_patch,purple_patch,orange_patch],loc='best',fontsize = 3.5)
self.ax[0][0].imshow(self.img,origin = "upper")
self.ax[0][1].imshow(self.img,origin = "upper")
self.plots1 = []
self.plots2 = []
for i in range(self.nb_agents):
tup = self.ax[0][0].plot([], [], color = self.colors[i],marker = 'o',markersize = 2,linewidth = 0.5)[0]
if i == 0:
tup = self.ax[0][0].plot([], [], color = self.colors[i],marker = '^',markersize = 2,linewidth = 0.5)[0]
self.plots1.append(tup)
tup = self.ax[0][1].plot([], [], color = self.colors[i],marker = 'o',markersize = 2,linewidth = 0.5)[0]
if i == 0:
tup = self.ax[0][1].plot([], [], color = self.colors[i],marker = '^',markersize = 2,linewidth = 0.5)[0]
self.plots2.append(tup)
def animate(self):
self.ax[0][1].set_title("Groundtruth",loc = "left", fontsize=8)
self.ax[0][0].set_title("Predictions",loc = "left", fontsize=8)
plt.tight_layout()
ani = matplotlib.animation.FuncAnimation(self.fig, self.update, frames=self.nb_frames,repeat=True)
if self.plot_:
plt.show()
if self.save:
ani.save(self.gif_name, writer='imagemagick', fps=self.fps,dpi = 200)
def update(self,frame):
frame = int(frame)
end = frame + 1
start = max(0,end-self.history)
if end < 9:
self.fig.suptitle("Timestep: {}, observation time".format(frame+1), fontsize=8)
else:
self.fig.suptitle("Timestep: {}, prediction time".format(frame+1), fontsize=8)
for i,p in enumerate(self.plots1):
xs = self.xs_pred[i]
ys = self.ys_pred[i]
c = 0
for x,y in zip(xs,ys):
if x == 0 and y == 0:
c += 1
else:
break
xs = xs[c:]
ys = ys[c:]
p.set_data(xs[start:end], ys[start:end])
# p.set_color(self.colors[i])
if frame > 7 :
p.set_marker("+")
p.set_markersize(3)
# p.set_fillstyle("none")
for i,p in enumerate(self.plots2):
xs = self.xs_gt[i]
ys = self.ys_gt[i]
c = 0
for x,y in zip(xs,ys):
if x == 0 and y == 0:
c += 1
else:
break
xs = xs[c:]
ys = ys[c:]
p.set_data(xs[start:end], ys[start:end])
# p.set_data(self.xs_gt[i,start:end], self.ys_gt[i,start:end])
# p.set_color(self.colors[i])
if frame > 7 :
p.set_marker("+")
p.set_markersize(3)
if __name__ == "__main__":
main() | elbuco1/AttentionMechanismsTrajectoryPrediction | src/visualization/classes/animation.py | animation.py | py | 7,151 | python | en | code | 49 | github-code | 6 | [
{
"api_name": "json.load",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 23,
... |
26041286196 | from __future__ import annotations
import itertools
import logging
import os
from typing import Callable, Iterable, cast
from packaging.utils import canonicalize_name as canonicalize_project_name
from pants.backend.python.goals.lockfile import synthetic_lockfile_target_name
from pants.backend.python.macros.common_fields import (
ModuleMappingField,
TypeStubsModuleMappingField,
)
from pants.backend.python.subsystems.setup import PythonSetup
from pants.backend.python.target_types import (
PythonRequirementModulesField,
PythonRequirementResolveField,
PythonRequirementsField,
PythonRequirementTarget,
PythonRequirementTypeStubModulesField,
)
from pants.core.target_types import (
TargetGeneratorSourcesHelperSourcesField,
TargetGeneratorSourcesHelperTarget,
)
from pants.engine.addresses import Address
from pants.engine.fs import DigestContents, GlobMatchErrorBehavior, PathGlobs
from pants.engine.internals.target_adaptor import TargetAdaptor, TargetAdaptorRequest
from pants.engine.rules import Get
from pants.engine.target import (
Dependencies,
GenerateTargetsRequest,
InvalidFieldException,
SingleSourceField,
)
from pants.engine.unions import UnionMembership
from pants.util.pip_requirement import PipRequirement
from pants.util.strutil import softwrap
logger = logging.getLogger(__name__)
ParseRequirementsCallback = Callable[[bytes, str], Iterable[PipRequirement]]
async def _generate_requirements(
request: GenerateTargetsRequest,
union_membership: UnionMembership,
python_setup: PythonSetup,
parse_requirements_callback: ParseRequirementsCallback,
) -> Iterable[PythonRequirementTarget]:
generator = request.generator
requirements_rel_path = generator[SingleSourceField].value
requirements_full_path = generator[SingleSourceField].file_path
overrides = {
canonicalize_project_name(k): v
for k, v in request.require_unparametrized_overrides().items()
}
# Pretend this is just another generated target, for typing purposes.
file_tgt = cast(
"PythonRequirementTarget",
TargetGeneratorSourcesHelperTarget(
{TargetGeneratorSourcesHelperSourcesField.alias: requirements_rel_path},
Address(
request.template_address.spec_path,
target_name=request.template_address.target_name,
relative_file_path=requirements_rel_path,
),
union_membership,
),
)
req_deps = [file_tgt.address.spec]
resolve = request.template.get(
PythonRequirementResolveField.alias, python_setup.default_resolve
)
lockfile = (
python_setup.resolves.get(resolve) if python_setup.enable_synthetic_lockfiles else None
)
if lockfile:
lockfile_address = Address(
os.path.dirname(lockfile),
target_name=synthetic_lockfile_target_name(resolve),
)
target_adaptor = await Get(
TargetAdaptor,
TargetAdaptorRequest(
description_of_origin=f"{generator.alias} lockfile dep for the {resolve} resolve",
address=lockfile_address,
),
)
if target_adaptor.type_alias == "_lockfiles":
req_deps.append(f"{lockfile}:{synthetic_lockfile_target_name(resolve)}")
else:
logger.warning(
softwrap(
f"""
The synthetic lockfile target for {lockfile} is being shadowed by the
{target_adaptor.type_alias} target {lockfile_address}.
There will not be any dependency to the lockfile.
Resolve by either renaming the shadowing target, the resolve {resolve!r} or
moving the target or the lockfile to another directory.
"""
)
)
digest_contents = await Get(
DigestContents,
PathGlobs(
[requirements_full_path],
glob_match_error_behavior=GlobMatchErrorBehavior.error,
description_of_origin=f"{generator}'s field `{SingleSourceField.alias}`",
),
)
module_mapping = generator[ModuleMappingField].value
stubs_mapping = generator[TypeStubsModuleMappingField].value
def generate_tgt(
project_name: str, parsed_reqs: Iterable[PipRequirement]
) -> PythonRequirementTarget:
normalized_proj_name = canonicalize_project_name(project_name)
tgt_overrides = overrides.pop(normalized_proj_name, {})
if Dependencies.alias in tgt_overrides:
tgt_overrides[Dependencies.alias] = list(tgt_overrides[Dependencies.alias]) + req_deps
return PythonRequirementTarget(
{
**request.template,
PythonRequirementsField.alias: list(parsed_reqs),
PythonRequirementModulesField.alias: module_mapping.get(normalized_proj_name),
PythonRequirementTypeStubModulesField.alias: stubs_mapping.get(
normalized_proj_name
),
# This may get overridden by `tgt_overrides`, which will have already added in
# the file tgt.
Dependencies.alias: req_deps,
**tgt_overrides,
},
request.template_address.create_generated(project_name),
union_membership,
)
requirements = parse_requirements_callback(digest_contents[0].content, requirements_full_path)
grouped_requirements = itertools.groupby(
requirements, lambda parsed_req: parsed_req.project_name
)
result = tuple(
generate_tgt(project_name, parsed_reqs_)
for project_name, parsed_reqs_ in grouped_requirements
) + (file_tgt,)
if overrides:
raise InvalidFieldException(
softwrap(
f"""
Unused key in the `overrides` field for {request.template_address}:
{sorted(overrides)}
"""
)
)
return result
| pantsbuild/pants | src/python/pants/backend/python/macros/common_requirements_rule.py | common_requirements_rule.py | py | 6,084 | python | en | code | 2,896 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "typing.Callable",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "typing.Iterable",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "pants.util.pip_requ... |
6015616240 | #!/usr/bin/env python
#coding: utf-8
import asyncio
import yaml
import sys
from NetDevices import DeviceHandler
from git import Repo
import time
FILEPATH = "/root/device_cfg/"
async def get_config(device):
hostname = device.get("hostname")
conn = DeviceHandler(device)
conn.connect()
await conn.login()
r = await conn.get_config()
file_name = FILEPATH + hostname
open(file_name, "w").write(r[1])
print("%s is saved" %file_name)
deviceinfos = {}
try:
yaml_cfg = sys.argv[1]
except IndexError:
print("please give yaml configure file")
sys.exit(1)
f = open(yaml_cfg)
deviceinfos = yaml.load(f.read())
loop = asyncio.get_event_loop()
tasks = []
for device in deviceinfos.get("devices"):
tasks.append(loop.create_task(get_config(device)))
loop.run_until_complete(asyncio.wait(tasks))
localtime = time.asctime(time.localtime(time.time()))
repo = Repo(FILEPATH)
repo.index.add("*")
repo.index.commit("Configs auto saving at %s" %localtime)
| netdevops-engineer/newbie_book | Chapter13/Chapter13/Device.asyncio/Device6.py | Device6.py | py | 989 | python | en | code | 36 | github-code | 6 | [
{
"api_name": "NetDevices.DeviceHandler",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "yaml.load",
"line... |
34075662644 | import random
import sys
from UI import Ui_MainWindow
from PyQt5.QtCore import QPoint, QRect
from PyQt5.QtGui import QPainter, QColor
from PyQt5.QtWidgets import QApplication, QWidget, QMainWindow
MAX_X = MAX_Y = 400
class MyWidget(QMainWindow, Ui_MainWindow):
def __init__(self):
super().__init__()
self.circles = []
self.setupUi(self)
self.pushButton.clicked.connect(self.add_circle)
def add_circle(self):
size = random.randint(0, min(MAX_X, MAX_Y))
rx, ly = random.randint(size, MAX_X), random.randint(size, MAX_Y)
self.circles.append((QRect(QPoint(rx - size, ly - size), QPoint(rx, ly)),
QColor(random.randrange(256),
random.randrange(256),
random.randrange(256))))
self.update()
def paintEvent(self, event):
qp = QPainter()
qp.begin(self)
self.draw(qp)
qp.end()
def draw(self, qp):
for rect, color in self.circles:
qp.setPen(color)
qp.drawEllipse(rect)
app = QApplication(sys.argv)
ex = MyWidget()
ex.show()
sys.exit(app.exec_())
| Orisphera/random-circles | main.py | main.py | py | 1,188 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "PyQt5.QtWidgets.QMainWindow",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "UI.Ui_MainWindow",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "random.randint",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "random.ra... |
4756037188 | """This module provides the CLI for the wrangle-ukds-trade-directories app."""
from . import __app_name__
import argparse
from pathlib import Path
def typecast_args(args):
args.input = Path(args.input)
args.output = Path(args.output)
return args
def test_args(args):
if not Path(args.input).is_dir():
raise RuntimeError("The path specified does not exist")
Path(args.output).mkdir(parents=True, exist_ok=True)
if not Path(args.output).is_dir():
raise RuntimeError("The output path specified does not exist")
return True
def get_args():
# Create the parser
p = argparse.ArgumentParser(
prog=__app_name__, description="Wrangle the UKDS Trade Directories data folder."
)
# Add the arguments
p.add_argument(
"input",
metavar="input",
type=str,
help="The input path where the UKDS trade directories can be found.",
)
p.add_argument(
"output",
metavar="output",
type=str,
help="The output path where the consolidated UKDS trade directories should be located.",
)
# Execute the parse_args() method
args = p.parse_args()
# Set types
args = typecast_args(args)
# Test args
test_args(args)
return args
| Living-with-machines/wrangle-ukds-trade-directories | wrangle_ukds_trade_directories/argparse.py | argparse.py | py | 1,285 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pathlib.Path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number... |
15169976693 | from typing import List
from fastapi_utils.inferring_router import InferringRouter
from sqlalchemy.ext.asyncio import AsyncSession
from fastapi import Depends, Request
from admins.models import Category
from crud_handler import BaseHandler
from database import get_async_session
from fastapi_utils.cbv import cbv
from sqlalchemy import select
from permissions import manage_helpdesk
from staff.models import Group, User
from staff.schemas import GroupSchemaReturn, GroupSchemaCreate, UserSchemaReturn, UserSchemaCreate
group_router = InferringRouter(tags=["Group"])
ROUTE = "/api/groups"
user_router = InferringRouter(tags=["User"])
ROUTE_USER = "/api/users"
@cbv(group_router)
class GroupView(BaseHandler):
session: AsyncSession = Depends(get_async_session)
def __init__(self):
super().__init__(Group)
@group_router.post(f"{ROUTE}/", response_model=GroupSchemaReturn, status_code=201)
async def create_item(self, group_object: GroupSchemaCreate, request: Request):
await manage_helpdesk(request)
return await self.create(self.session, group_object.dict(), object_name="Group")
@group_router.get(f"{ROUTE}/", response_model=List[GroupSchemaReturn], status_code=200)
async def read_groups(self,
request: Request,
offset: int = 0,
limit: int = 5):
await manage_helpdesk(request)
query = select(self.model)
return await self.list(query=query,
session=self.session,
limit=limit,
offset=offset)
@group_router.get(f"{ROUTE}/" + "{group_id}", response_model=GroupSchemaReturn, status_code=200)
async def read_group(self, group_id: int, request: Request):
await manage_helpdesk(request)
query = select(self.model)
return await self.retrieve(query, self.session, group_id)
@group_router.delete(f"{ROUTE}/" + "{group_id}", status_code=204)
async def delete_group(self, group_id: int, request: Request):
await manage_helpdesk(request)
return await self.delete(self.session, group_id)
@group_router.put(f"{ROUTE}/" + "{group_id}", response_model=GroupSchemaReturn, status_code=200)
async def update_group(self, group_id: int, group: GroupSchemaReturn, request: Request):
await manage_helpdesk(request)
group_obj = await self.update(self.session, group_id, group.dict())
await self.session.commit()
return group_obj
@cbv(user_router)
class UserView(BaseHandler):
session: AsyncSession = Depends(get_async_session)
def __init__(self):
super().__init__(User)
@user_router.post(f"{ROUTE_USER}/", response_model=UserSchemaReturn, status_code=201)
async def create_item(self, user_object: UserSchemaCreate, request: Request):
await manage_helpdesk(request)
user_dict = user_object.dict()
group_obj = await self.get_obj(select(Group), self.session, {"id": user_dict.get("group").get("id")})
category_object = await self.get_obj(select(Category), self.session, {"id": user_dict.get("category").get("id")})
user_dict["group"] = group_obj
user_dict["category"] = category_object
return await self.create(self.session, user_dict, object_name="User")
| AlexeyShakov/helpdesk_fast_api | src/staff/endpoints.py | endpoints.py | py | 3,364 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "fastapi_utils.inferring_router.InferringRouter",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "fastapi_utils.inferring_router.InferringRouter",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "crud_handler.BaseHandler",
"line_number": 25,
... |
7318961586 | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^modules/$', views.modules, name='modules'),
url(r'^module/(?P<module>\w+)/list$', views.module_list, name='module_list'),
url(r'^module/(?P<module>\w+)/detail/(?P<id>[\w-]+)/$',
views.module_detail, name='module_detail'),
url(r'^module/(?P<module>\w+)/edit/(?P<id>[\w-]+)/$',
views.module_edit, name='module_edit'),
url(r'^module/(?P<module>\w+)/create/$',
views.module_create, name='module_create'),
url(r'^module/(?P<module>\w+)/remove/$',
views.module_remove_record, name='module_remove_record'),
url(r'^layouts/$', views.edit_layouts, name='edit_layouts'),
url(r'^user_records/(?P<module>\w+)/$',
views.user_records, name='user_records'),
url(r'^layout/list/(?P<module>\w+)/$',
views.edit_list_layout, name='edit_list_layout'),
url(r'^layout/filter/(?P<module>\w+)/$',
views.edit_filter_layout, name='edit_filter_layout'),
url(r'^layout/detail/(?P<module>\w+)/$',
views.edit_detail_layout, name='edit_detail_layout'),
url(r'^layout/edit/(?P<module>\w+)/$',
views.edit_edit_layout, name='edit_edit_layout'),
url(r'^layout/create/(?P<module>\w+)/$',
views.edit_create_layout, name='edit_create_layout'),
url(r'^roles/$', views.edit_roles, name='edit_roles'),
url(r'^role/(?P<role>\w+)$', views.edit_role, name='edit_role'),
url(r'^roles/delete$', views.delete_role, name='delete_role'),
url(r'^roles/create$', views.create_role, name='create_role'),
url(r'^note_attachment/(?P<id>[\w-]+)/$',
views.note_attachment, name='note_attachment'),
url(r'^add_case_update/$', views.add_case_update, name='add_case_update'),
url(r'^close_case/$', views.close_case, name='close_case'),
url(r'^reopen_case/$', views.reopen_case, name='reopen_case'),
url(r'^users/$', views.edit_users, name='edit_users'),
url(r'^user/(?P<user_id>\d+)$', views.edit_user, name='edit_user'),
url(r'^user_profile/$', views.user_profile, name='user_profile'),
url(r'^cache/$', views.cache, name='cache'),
url(r'^pdf_templates/$', views.pdf_templates, name='pdf_templates'),
url(r'^get_pdf/(?P<module>\w+)/(?P<id>[\w-]+)/$',
views.get_pdf, name='get_pdf'),
url(r'^index.php$', views.crm_entry_point, name='crm_entry_point'),
]
| sanchezfauste/bPortal | portal/urls.py | urls.py | py | 2,424 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "django.conf.urls.url",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.co... |
12237739567 | import tensorflow as tf
import numpy as np
tf.enable_eager_execution()
# Dataset
import tensorflow_datasets as tfds
# Constants to eventually parameterise
LOGDIR = './logs/autoencoder_gg/'
# Activation function to use for layers
act_func = tf.nn.tanh
# Enable or disable GPU
SESS_CONFIG = tf.ConfigProto(device_count = {'GPU': 1})
class Model:
' Simple Image Classification Model (defined by CNN) '
def __init__(self, input_shape, num_layers=4, activation=tf.nn.relu, layer_width=64, bottleneck_chans = 4, learn_rate=1e-4):
' Initializes model parameters and optimizer '
' Assumes an input shape of [height, width, channels]'
# Stores model params
self.vars = []
self.layers = []
self.input_shape = input_shape
self.shape_list = []
self.shape_list.append(input_shape)
# Down-sampling Layers
for l in range(num_layers):
# First layer
if(l == 0):
in_chans = input_shape[2]
out_chans = layer_width
cur_shape = [1,] + input_shape
# Last Layer
elif(l == num_layers-1):
in_chans = out_chans
out_chans = bottleneck_chans
# Middle layers
else:
in_chans = out_chans
out_chans = layer_width
f_height = 5
f_width = 5
layer = tf.layers.Conv2D(out_chans, (f_height, f_width), strides=[1,1], padding='valid', activation=activation, kernel_initializer=tf.initializers.random_normal, bias_initializer=tf.initializers.random_normal, name='Conv'+str(l))
layer.build(cur_shape)
cur_shape = layer.compute_output_shape(cur_shape)
self.shape_list.append(cur_shape)
self.layers.append(layer)
# Up-sampling Layers
for l in range(num_layers):
# First layer
if(l == 0):
in_chans = bottleneck_chans
out_chans = layer_width
# Last Layer
elif(l == num_layers-1):
in_chans = out_chans
out_chans = input_shape[2]
# Middle layers
else:
in_chans = out_chans
out_chans = layer_width
f_height = 5
f_width = 5
layer = tf.layers.Conv2DTranspose(out_chans, (f_height, f_width), strides=[1,1], padding='valid', activation=activation, kernel_initializer=tf.initializers.random_normal, bias_initializer=tf.initializers.random_normal, name='ConvTP'+str(l))
layer.build(cur_shape)
cur_shape = layer.compute_output_shape(cur_shape)
self.shape_list.append(cur_shape)
self.layers.append(layer)
# Our Optimizer
self.optimizer = tf.train.AdamOptimizer(learn_rate)
# Grab all variables
for l in self.layers:
self.vars.extend(l.weights)
for idx,shape in enumerate(self.shape_list):
if(idx == 0):
out_shape = None
else:
out_shape = self.layers[idx-1].weights[0].shape
print('Layer: ', str(idx), shape, 'Weights: ', out_shape)
def crunch(self, x_input):
' Generates outputs (predictions) from inputs to the model '
with tf.name_scope('MainGraph'):
for l in range(len(self.layers)):
if(l == 0):
h = self.layers[0](x_input)
tf.contrib.summary.image(self.layers[l].name, h[:,:,:,:3], max_images=1)
else:
h = self.layers[l](h)
tf.contrib.summary.image(self.layers[l].name, h[:,:,:,:3], max_images=1)
#x_hat = tf.sigmoid(h)
#x_hat = h
x_hat = tf.sigmoid(tf.image.per_image_standardization(h))
return x_hat
def learn(self, x_input):
' Learns from the batch '
# Track gradients
with tf.GradientTape() as tape:
tape.watch(x_input)
output = self.crunch(x_input)
tf.contrib.summary.image('Reconstructed Image', output, max_images=3)
with tf.name_scope('Generation_Loss'):
reconstruction_loss = tf.losses.mean_squared_error(labels=x_input, predictions=output)
tf.contrib.summary.scalar('Recon Loss', reconstruction_loss)
grads = tape.gradient(reconstruction_loss, self.vars)
self.optimizer.apply_gradients(zip(grads, self.vars))
#self.optimizer.apply_gradients(zip(grads, self.layers[0].weights))
global_step.assign_add(1)
return output, reconstruction_loss
def validate(self, x_input):
' Takes an image from the validation set and produces an output from it '
output = self.crunch(x_input)
output_rs = tf.reshape(output, [-1, self.input_shape[0]*self.input_shape[1], self.input_shape[2]])
x_input_rs = tf.reshape(x_input, [-1, self.input_shape[0]*self.input_shape[1], self.input_shape[2]])
# Get last three of each
concat = tf.concat([x_input_rs, output_rs], axis=1)
concat_img = tf.reshape(concat, [-1, self.input_shape[0]*2, self.input_shape[1], self.input_shape[2]])
tf.contrib.summary.image('Validation Pair', concat_img, max_images=3)
for l in self.layers:
tf.contrib.summary.histogram('Weights_'+l.name, l.weights[0])
tf.contrib.summary.histogram('Biases_'+l.name, l.weights[1])
# Get Data
# Construct a tf.data.Dataset
#ds_name = 'mnist'
#ds_name = 'cifar10'
#ds_name = 'cifar100'
#ds_name = 'omniglot'
ds_name = 'celeb_a'
#ds_name = 'fashion_mnist'
(ds_train, ds_test), ds_info = tfds.load(name=ds_name, split=['train', 'test'], with_info=True)
img_shape = tf.TensorShape(ds_info.features['image'].shape)
print('DS Shape: ')
print(img_shape)
summary_writer = tf.contrib.summary.create_file_writer(LOGDIR+ds_name, flush_millis=100)
summary_writer.set_as_default()
global_step = tf.train.get_or_create_global_step()
# Creates a classifier model
model = Model(img_shape.as_list())
# Preparing datasets (training and validation)
# Batch size of 1024 the repeats when iterated through
ds_train = ds_train.batch(64).repeat()
ds_test = ds_test.batch(64).repeat()
# Converts validation set into an iterator so we can iterate through it
ds_test_iter = iter(ds_test)
# Perform the training loop (forever)
for idx,batch in enumerate(ds_train):
# Prepare training inputs
x_inputs = tf.math.divide(tf.cast(batch['image'], tf.float32), tf.constant(255.0, dtype=tf.float32))
# Prepare validation inputs
val_batch = next(ds_test_iter)
val_x_inputs = tf.math.divide(tf.cast(val_batch['image'], tf.float32), tf.constant(255.0, dtype=tf.float32))
# Train and validate
with tf.contrib.summary.record_summaries_every_n_global_steps(10):
preds, loss = model.learn(x_inputs)
print('idx: ', idx, 'Loss: ', loss.numpy())
model.validate(val_x_inputs)
| bfakhri/TensorflowEager | autoencoder.py | autoencoder.py | py | 7,158 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "tensorflow.enable_eager_execution",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "tensorflow.nn",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.ConfigProto",
"line_number": 16,
"usage_type": "call"
},
{
"api_na... |
6484230311 | import os
import shutil
import subprocess
from pathlib import Path
import numpy as np
from PIL import Image, ImageOps
from lib import BruteForce, database
from .utils import timeit
MAX_RATIO = 0.90 # 0.60
MIN_RATIO = 0
# PARAM FOR THE RADIOMETRIC APPROACH
# Try to normalize respect mean and std to reject static frames
RESIZE_SCALE_FACTOR = 1 # It can be usefull for reduce computation time
INNOVATION_THRESH = 0.001 # 1.5
def RootSift(img_name, desc_folder, N_kpts):
np_kpt_path = Path("{}.kpt.npy".format(img_name))
abs_np_kpt_path = desc_folder / np_kpt_path
np_dsc_path = Path("{}.dsc.npy".format(img_name))
abs_np_dsc_path = desc_folder / np_dsc_path
kp = np.load(abs_np_kpt_path)
desc = np.load(abs_np_dsc_path)
kp_numb = kp.shape[0]
return kp, desc, kp_numb
def NextImg(last_img):
if last_img + 1 < 10:
next_img = "00000{}".format(last_img + 1)
elif last_img + 1 < 100:
next_img = "0000{}".format(last_img + 1)
elif last_img + 1 < 1000:
next_img = "000{}".format(last_img + 1)
elif last_img + 1 < 10000:
next_img = "00{}".format(last_img + 1)
elif last_img + 1 < 100000:
next_img = "0{}".format(last_img + 1)
elif last_img + 1 < 1000000:
next_img = "{}".format(last_img + 1)
return next_img
# @timeit
def StaticRejection(
STATIC_IMG_REJECTION_METHOD,
img1,
img2,
IMGS_FROM_SERVER,
CURRENT_DIR,
KEYFRAMES_DIR,
COLMAP_EXE_PATH,
MAX_N_FEATURES,
ref_matches,
DEBUG,
newer_imgs,
last_img,
img_dict,
img_batch,
pointer,
colmap_exe,
):
# ROOTSIFT APPROACH
if STATIC_IMG_REJECTION_METHOD == "root_sift":
TEMP_DIR = CURRENT_DIR / "temp"
shutil.rmtree(TEMP_DIR / "pair")
os.makedirs(TEMP_DIR / "pair")
shutil.copy(
IMGS_FROM_SERVER / "{}".format(img1), TEMP_DIR / "pair" / "{}".format(img1)
)
shutil.copy(
IMGS_FROM_SERVER / "{}".format(img2), TEMP_DIR / "pair" / "{}".format(img2)
)
subprocess.run(
[
COLMAP_EXE_PATH / f"{colmap_exe}",
"database_creator",
"--database_path",
TEMP_DIR / "db.db",
],
stdout=subprocess.DEVNULL,
)
subprocess.run(
[
COLMAP_EXE_PATH / f"{colmap_exe}",
"feature_extractor",
"--database_path",
TEMP_DIR / "db.db",
"--image_path",
TEMP_DIR / "pair",
"SiftExtraction.max_num_features",
str(MAX_N_FEATURES),
],
stdout=subprocess.DEVNULL,
)
# subprocess.run(["python3", CURRENT_DIR / "lib" / "RootSIFT.py", "--Path", TEMP_DIR / "db.db", "--Output", TEMP_DIR], stdout=subprocess.DEVNULL)
subprocess.run(
[
COLMAP_EXE_PATH / f"{colmap_exe}",
"sequential_matcher",
"--database_path",
TEMP_DIR / "db.db",
"--SequentialMatching.overlap",
"1",
],
stdout=subprocess.DEVNULL,
)
# subprocess.run([COLMAP_EXE_PATH / f"{colmap_exe}", "mapper", "--project_path", CURRENT_DIR / "lib" / "mapper_for_static_rejection.ini"], stdout=subprocess.DEVNULL)
# kp1, desc1, kp_numb1 = RootSift(img1, TEMP_DIR, 8000)
# kp2, desc2, kp_numb2 = RootSift(img2, TEMP_DIR, 8000)
# opencv_matches = BrForce(desc1, desc2, 'Lowe_ratio_test', 'L2', True, 'intersection', print_debug = False, ratio_thresh=0.8
# matches_matrix = np.zeros((len(opencv_matches), 2))
# for l in range(0,len(opencv_matches)):
# matches_matrix[l][0] = int(opencv_matches[l].queryIdx)
# matches_matrix[l][1] = int(opencv_matches[l].trainIdx
db_p = TEMP_DIR / "db.db"
matches = database.dbReturnMatches(db_p.as_posix(), 15)
os.remove(TEMP_DIR / "db.db")
if len(matches.keys()) != 0:
key = list(matches.keys())[0]
matches_matrix = matches[key]
if ref_matches == []:
ref_matches = matches_matrix
shutil.copy(
IMGS_FROM_SERVER / "{}".format(img1),
KEYFRAMES_DIR / "{}.jpg".format(NextImg(int(last_img))),
)
shutil.copy(
IMGS_FROM_SERVER / "{}".format(img2),
KEYFRAMES_DIR / "{}.jpg".format(NextImg(int(last_img) + 1)),
)
img_dict["{}".format(img1)] = "{}.jpg".format(NextImg(int(last_img)))
img_dict["{}".format(img2)] = "{}.jpg".format(
NextImg(int(last_img) + 1)
)
pointer += 1
return (
ref_matches,
newer_imgs,
NextImg(int(last_img) + 1),
img_dict,
img_batch,
pointer,
) # pointer, delta,
else:
vec_ref = ref_matches[:, 1]
vec = matches_matrix[:, 0]
vec_ref = vec_ref.tolist()
vec = vec.tolist()
vec_ref = [int(v) for v in vec_ref]
vec = [int(v) for v in vec]
intersection = [el for el in vec if el in vec_ref]
control_ratio = len(intersection) / len(vec_ref)
print("control_ratio", control_ratio)
if (
control_ratio < MAX_RATIO and control_ratio > MIN_RATIO
): # and os.path.exists(TEMP_DIR / "0"):
# shutil.copy(IMGS_FROM_SERVER / "{}".format(img1), KEYFRAMES_DIR / "{}".format(img1))
shutil.copy(
IMGS_FROM_SERVER / "{}".format(img2),
KEYFRAMES_DIR / "{}.jpg".format(NextImg(int(last_img))),
)
img_dict["{}".format(img2)] = "{}.jpg".format(
NextImg(int(last_img))
)
print("\n.. added img\n")
ref_matches = matches_matrix
pointer += 1 # + delta
# delta = 0
newer_imgs = True
img_batch.append(img2)
return (
ref_matches,
newer_imgs,
NextImg(int(last_img)),
img_dict,
img_batch,
pointer,
) # pointer, delta,
else:
# delta += 1
print("\n.. NO\n")
return (
ref_matches,
newer_imgs,
last_img,
img_dict,
img_batch,
pointer,
) # pointer, delta,
elif len(matches.keys()) == 0:
# delta += 1
print("\n.. NO .. len(matches.keys()) == 0\n")
return (
ref_matches,
newer_imgs,
last_img,
img_dict,
img_batch,
pointer,
) # pointer, delta,
# RADIOMETRIC APPROACH
elif STATIC_IMG_REJECTION_METHOD == "radiometric":
# 'Try' is necessary because main loop looks for new images and the last one can be incomplete because
# it is copied from other folders, and the procedure can be unfineshed
try:
im1 = Image.open(IMGS_FROM_SERVER / img1)
im2 = Image.open(IMGS_FROM_SERVER / img2)
im1.resize(
(
round(im1.size[0] * RESIZE_SCALE_FACTOR),
round(im1.size[1] * RESIZE_SCALE_FACTOR),
)
)
im2.resize(
(
round(im2.size[0] * RESIZE_SCALE_FACTOR),
round(im2.size[1] * RESIZE_SCALE_FACTOR),
)
)
im1_gray = ImageOps.grayscale(im1)
im2_gray = ImageOps.grayscale(im2)
# Normalization
im1_array = np.array(im1_gray)
im1_array = (im1_array - np.min(im1_array)) / np.max(im1_array)
im2_array = np.array(im2_gray)
im2_array = (im2_array - np.min(im2_array)) / np.max(im2_array)
mean1 = np.mean(im1_array)
mean2 = np.mean(im2_array)
# innovation = np.sum(((im1_array - np.mean(im1_array)) * (im2_array - np.mean(im2_array))) / (np.std(im1_array) * np.std(im2_array)))
# ref = np.sum(((im1_array - np.mean(im1_array)) * (im1_array - np.mean(im1_array))) / (np.std(im1_array) * np.std(im1_array)))
# innovation = innovation/ref
innovation = np.absolute(mean2 - mean1)
if innovation > INNOVATION_THRESH:
if ref_matches == []:
ref_matches = [
"-"
] # It is used for compatibilities with frame rejection approches that needs matches matrix
shutil.copy(
IMGS_FROM_SERVER / "{}".format(img1),
KEYFRAMES_DIR / "{}.jpg".format(NextImg(int(last_img))),
)
shutil.copy(
IMGS_FROM_SERVER / "{}".format(img2),
KEYFRAMES_DIR / "{}.jpg".format(NextImg(int(last_img) + 1)),
)
img_dict["{}".format(img1)] = "{}.jpg".format(
NextImg(int(last_img))
)
img_dict["{}".format(img2)] = "{}.jpg".format(
NextImg(int(last_img) + 1)
)
pointer += 1
return (
ref_matches,
newer_imgs,
NextImg(int(last_img) + 1),
img_dict,
img_batch,
pointer,
)
elif ref_matches == ["-"]:
shutil.copy(
IMGS_FROM_SERVER / "{}".format(img2),
KEYFRAMES_DIR / "{}.jpg".format(NextImg(int(last_img))),
)
img_dict["{}".format(img2)] = "{}.jpg".format(
NextImg(int(last_img))
)
pointer += 1
newer_imgs = True
img_batch.append(img2)
return (
ref_matches,
newer_imgs,
NextImg(int(last_img)),
img_dict,
img_batch,
pointer,
)
else:
print("!! Frame rejeccted. innovation < INNOVATION_THRESH !!", end="\r")
return ref_matches, newer_imgs, last_img, img_dict, img_batch, pointer
except:
print("!! Frame truncated !!")
return ref_matches, newer_imgs, last_img, img_dict, img_batch, pointer
else:
print("Choose 'radiometric' or 'root_sift' as STATIC_IMG_REJECTION_METHOD")
quit()
| franioli/COLMAP_SLAM | lib/static_rejection.py | static_rejection.py | py | 11,514 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pathlib.Path",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": ... |
3081253994 | # -*- coding: utf-8 -*
"""Input
.. module:: input
:synopsis: Module for processing input
"""
import resoncalc.output as output
import resoncalc.detection as detection
from argparse import ArgumentParser
from os import path
from json import load, JSONDecodeError
def process_command():
"""Process command
usage: resoncalc [-h] [-o OUTPUT] [-v] [-s] [-g] [-t TITLE] input
Calculate eigenstates for potential
positional arguments:
input input file
options:
-h, --help show this help message and exit
-o OUTPUT, --output OUTPUT
output directory
-v, --verbose verbose mode
-s, --silent silent mode
-g, --generate generate graphs from data
-t, --title output title, used for generate
"""
# command options
parser = ArgumentParser(description='Calculate eigenstates for potential')
parser.add_argument('string', metavar='input', help='input file')
parser.add_argument('-o', '--output', help='output directory')
parser.add_argument('-v', '--verbose', action='store_true', help='verbose mode')
parser.add_argument('-s', '--silent', action='store_true', help='silent mode')
parser.add_argument('-g', '--generate', action='store_true', help='generate graphs from data')
parser.add_argument('-t', '--title', help='output title, used for generate')
args = parser.parse_args()
# verbose
verbose = args.verbose
if (verbose):
output.log_level = 2
# silent
silent = args.silent
if (silent):
output.stdout = False
# generate
generate = args.generate
# title
title = args.title
# output
outdir = args.output
if (outdir is not None):
if (path.exists(outdir)):
output.outdir = outdir
else:
print('Output directory {0} not found'.format(outdir))
return -1
# input
infile = args.string
tests = None
if (path.exists(infile)):
if (generate):
output.generate_graphs(infile, title)
return 0
else:
tests = load_input(infile)
else:
print('Input file {0} not found'.format(infile))
return -1
# test processing
if (tests is not None):
try:
process_tests(tests)
return 0
except KeyboardInterrupt as ex:
print('Program terminated by user')
return -1
def load_input(fname):
"""Load input tests file
Args:
fname (str): input filename
Returns:
list: tests
"""
try:
with open(fname, 'r') as f:
tests = load(f)
if (type(tests) is not list):
tests = [tests]
return tests
except JSONDecodeError as ex:
print('Failed to parse input file {0}: {1}'.format(fname, ex))
return None
def process_tests(tests):
"""Process tests
Args:
tests (list): tests configuration
"""
for test in tests:
detection.perform_detection_loop(test)
| hydratk/resoncalc | src/resoncalc/input.py | input.py | py | 3,159 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "resoncalc.output.log_level",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "resoncalc.output",
"line_number": 49,
"usage_type": "name"
},
{
"api_name... |
27535933658 | import torch
from torch import nn
import torch.nn.functional as F
from timm.models.layers import to_2tuple, DropPath, trunc_normal_
import math
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class h_sigmoid(nn.Module):
def __init__(self, inplace=True):
super(h_sigmoid, self).__init__()
self.relu = nn.ReLU6(inplace=inplace)
def forward(self, x):
return self.relu(x + 3) / 6
class h_swish(nn.Module):
def __init__(self, inplace=True):
super(h_swish, self).__init__()
self.sigmoid = h_sigmoid(inplace=inplace)
def forward(self, x):
return x * self.sigmoid(x)
class ECALayer(nn.Module):
def __init__(self, channel, gamma=2, b=1, sigmoid=True):
super(ECALayer, self).__init__()
t = int(abs((math.log(channel, 2) + b) / gamma))
k = t if t % 2 else t + 1
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.conv = nn.Conv1d(1, 1, kernel_size=k, padding=k // 2, bias=False)
if sigmoid:
self.sigmoid = nn.Sigmoid()
else:
self.sigmoid = h_sigmoid()
def forward(self, x):
y = self.avg_pool(x)
y = self.conv(y.squeeze(-1).transpose(-1, -2))
y = y.transpose(-1, -2).unsqueeze(-1)
y = self.sigmoid(y)
return x * y.expand_as(x)
class SELayer(nn.Module):
def __init__(self, channel, reduction=4):
super(SELayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Linear(channel, channel // reduction),
nn.ReLU(inplace=True),
nn.Linear(channel // reduction, channel),
h_sigmoid()
)
def forward(self, x):
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b, c, 1, 1)
return x * y
class LocalityFeedForward(nn.Module):
def __init__(self, in_dim, out_dim, stride, expand_ratio=4., act='hs+se', reduction=4,
wo_dp_conv=False, dp_first=False):
super(LocalityFeedForward, self).__init__()
hidden_dim = int(in_dim * expand_ratio)
kernel_size = 3
layers = []
layers.extend([
nn.Conv2d(in_dim, hidden_dim, kernel_size=1, stride=1, padding=0, bias=False),
h_swish() if act.find('hs') >= 0 else nn.ReLU6(inplace=True)])
if not wo_dp_conv:
dp = [
nn.Conv2d(hidden_dim, hidden_dim, kernel_size, stride, kernel_size // 2, groups=hidden_dim, bias=False),
h_swish() if act.find('hs') >= 0 else nn.ReLU6(inplace=True)
]
if dp_first:
layers = dp + layers
else:
layers.extend(dp)
if act.find('+') >= 0:
attn = act.split('+')[1]
if attn == 'se':
layers.append(SELayer(hidden_dim, reduction=reduction))
elif attn.find('eca') >= 0:
layers.append(ECALayer(hidden_dim, sigmoid=attn == 'eca'))
else:
raise NotImplementedError('Activation type {} is not implemented'.format(act))
layers.extend([
nn.Conv2d(hidden_dim, out_dim, 1, 1, 0, bias=False)
])
self.conv = nn.Sequential(*layers)
def forward(self, x):
x = x + self.conv(x)
return x
def window_partition(x, window_size):
B, H, W, C = x.shape
x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
return windows
def window_reverse(windows, window_size, H, W):
B = int(windows.shape[0] / (H * W / window_size / window_size))
x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
return x
class WindowAttention(nn.Module):
def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.dim = dim
self.window_size = window_size
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.relative_position_bias_table = nn.Parameter(
torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads))
coords_h = torch.arange(self.window_size[0])
coords_w = torch.arange(self.window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w], indexing="ij"))
coords_flatten = torch.flatten(coords, 1)
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :]
relative_coords = relative_coords.permute(1, 2, 0).contiguous()
relative_coords[:, :, 0] += self.window_size[0] - 1
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
relative_position_index = relative_coords.sum(-1)
self.register_buffer("relative_position_index", relative_position_index)
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
trunc_normal_(self.relative_position_bias_table, std=.02)
self.softmax = nn.Softmax(dim=-1)
def forward(self, x, mask=None):
B_, N, C = x.shape
qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
q = q * self.scale
attn = (q @ k.transpose(-2, -1))
relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1)
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous()
attn = attn + relative_position_bias.unsqueeze(0)
if mask is not None:
nW = mask.shape[0]
attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
attn = attn.view(-1, self.num_heads, N, N)
attn = self.softmax(attn)
else:
attn = self.softmax(attn)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
def extra_repr(self) -> str:
return f'dim={self.dim}, window_size={self.window_size}, num_heads={self.num_heads}'
def flops(self, N):
flops = 0
flops += N * self.dim * 3 * self.dim
flops += self.num_heads * N * (self.dim // self.num_heads) * N
flops += self.num_heads * N * N * (self.dim // self.num_heads)
flops += N * self.dim * self.dim
return flops
class SwinTransformerBlock(nn.Module):
def __init__(self, dim, input_resolution, num_heads, window_size=7, shift_size=0,
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0.,
act_layer=nn.GELU, norm_layer=nn.LayerNorm, is_local=True):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.num_heads = num_heads
self.window_size = window_size
self.shift_size = shift_size
self.mlp_ratio = mlp_ratio
self.is_local = is_local
if min(self.input_resolution) <= self.window_size:
self.shift_size = 0
self.window_size = min(self.input_resolution)
assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size"
self.norm1 = norm_layer(dim)
self.attn = WindowAttention(
dim, window_size=to_2tuple(self.window_size), num_heads=num_heads,
qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
if is_local:
self.conv = LocalityFeedForward(dim, dim, 1, mlp_ratio, act='hs+se', reduction=dim // 4)
else:
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
if self.shift_size > 0:
H, W = self.input_resolution
img_mask = torch.zeros((1, H, W, 1))
h_slices = (slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None))
w_slices = (slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None))
cnt = 0
for h in h_slices:
for w in w_slices:
img_mask[:, h, w, :] = cnt
cnt += 1
mask_windows = window_partition(img_mask, self.window_size)
mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
else:
attn_mask = None
self.register_buffer("attn_mask", attn_mask)
def forward(self, x):
H, W = self.input_resolution
B, L, C = x.shape
assert L == H * W, "input feature has wrong size"
shortcut = x
x = self.norm1(x)
x = x.view(B, H, W, C)
if self.shift_size > 0:
shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
else:
shifted_x = x
x_windows = window_partition(shifted_x, self.window_size)
x_windows = x_windows.view(-1, self.window_size * self.window_size, C)
attn_windows = self.attn(x_windows, mask=self.attn_mask)
attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
shifted_x = window_reverse(attn_windows, self.window_size, H, W)
if self.shift_size > 0:
x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
else:
x = shifted_x
x = x.view(B, H * W, C)
x = shortcut + self.drop_path(x)
if not self.is_local:
x = x + self.drop_path(self.mlp(self.norm2(x)))
else:
batch_size, num, embed_dim = x.shape
x = x.transpose(1, 2).view(batch_size, embed_dim, H, W)
x = self.conv(x).flatten(2).transpose(1, 2)
return x
def extra_repr(self) -> str:
return f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " \
f"window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}"
def flops(self):
flops = 0
H, W = self.input_resolution
flops += self.dim * H * W
nW = H * W / self.window_size / self.window_size
flops += nW * self.attn.flops(self.window_size * self.window_size)
flops += 2 * H * W * self.dim * self.dim * self.mlp_ratio
flops += self.dim * H * W
return flops
class BasicLayer(nn.Module):
def __init__(self, dim, input_resolution, depth, num_heads, window_size, pos_embed,
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., norm_layer=nn.LayerNorm, is_local=True):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.depth = depth
self.pos_embed = pos_embed
self.blocks = nn.ModuleList([
SwinTransformerBlock(dim=dim, input_resolution=input_resolution,
num_heads=num_heads, window_size=window_size,
shift_size=0 if (i % 2 == 0) else window_size // 2,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop, attn_drop=attn_drop,
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
norm_layer=norm_layer,
is_local=is_local
)
for i in range(depth)])
def forward(self, x):
for j, blk in enumerate(self.blocks):
x = blk(x)
if j == 0:
if self.pos_embed is not None:
x = self.pos_embed(x, self.input_resolution[0], self.input_resolution[1])
return x
def extra_repr(self) -> str:
return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}"
def flops(self):
flops = 0
for blk in self.blocks:
flops += blk.flops()
return flops
def bicubic_upsample(x, H, W):
B, N, C = x.size()
assert N == H * W
x = x.permute(0, 2, 1)
x = x.view(-1, C, H, W)
x = nn.functional.interpolate(x, scale_factor=2, mode='bicubic', align_corners=True)
B, C, H, W = x.size()
x = x.view(-1, C, H * W)
x = x.permute(0, 2, 1)
return x, H, W
def pixel_upsample(x, H, W):
B, N, C = x.size()
assert N == H * W
x = x.permute(0, 2, 1)
x = x.view(-1, C, H, W)
x = nn.PixelShuffle(2)(x)
B, C, H, W = x.size()
x = x.view(-1, C, H * W)
x = x.permute(0, 2, 1)
return x, H, W
class matmul(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x1, x2):
x = x1 @ x2
return x
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.mat = matmul()
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn = (self.mat(q, k.transpose(-2, -1))) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = self.mat(attn, v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
def __init__(
self,
input_resolution,
dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., norm_layer=nn.LayerNorm,
is_local=True
):
super().__init__()
self.input_resolution = input_resolution
self.is_local = is_local
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
if not is_local:
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, drop=drop)
else:
self.conv = LocalityFeedForward(dim, dim, 1, mlp_ratio, act='hs+se', reduction=dim // 4)
def forward(self, x):
x = x + self.drop_path(self.attn(self.norm1(x)))
if not self.is_local:
x = x + self.drop_path(self.mlp(self.norm2(x)))
else:
batch_size, num, embed_dim = x.shape
cls_token, x = torch.split(x, [1, num - 1], dim=1)
x = x.transpose(1, 2).view(batch_size, embed_dim, self.input_resolution[0], self.input_resolution[1])
x = self.conv(x).flatten(2).transpose(1, 2)
x = torch.cat([cls_token, x], dim=1)
return x
class PosCNN(nn.Module):
def __init__(self, in_chans, embed_dim=768, s=1):
super(PosCNN, self).__init__()
self.proj = nn.Sequential(nn.Conv2d(in_chans, embed_dim, 3, s, 1, bias=True, groups=embed_dim), )
self.s = s
def forward(self, x, H, W):
B, N, C = x.shape
feat_token = x
cnn_feat = feat_token.transpose(1, 2).view(B, C, H, W)
if self.s == 1:
x = self.proj(cnn_feat) + cnn_feat
else:
x = self.proj(cnn_feat)
x = x.flatten(2).transpose(1, 2)
return x
def no_weight_decay(self):
return ['proj.%d.weight' % i for i in range(4)]
class SwinTransGenerator(nn.Module):
def __init__(self, embed_dim=256, bottom_width=8, bottom_height=8, window_size=4, depth=None,
is_local=True, is_peg=True):
super(SwinTransGenerator, self).__init__()
self.bottom_width = bottom_width
self.bottom_height = bottom_height
self.is_local = is_local
self.is_peg = is_peg
self.embed_dim = embed_dim
if depth is None:
depth = [4, 2, 2, 2]
self.window_size = 8
self.l1 = nn.Linear(256, (self.bottom_height * self.bottom_width) * self.embed_dim)
self.layer1 = BasicLayer(
dim=embed_dim,
input_resolution=[self.bottom_height, self.bottom_width],
depth=depth[0], num_heads=4, window_size=window_size,
pos_embed=PosCNN(embed_dim, embed_dim) if is_peg else None,
is_local=is_local
)
self.layer2 = BasicLayer(
dim=embed_dim,
input_resolution=[self.bottom_height * 2, self.bottom_width * 2],
depth=depth[1], num_heads=4, window_size=window_size,
pos_embed=PosCNN(embed_dim, embed_dim) if is_peg else None,
is_local=is_local
)
self.layer3 = BasicLayer(
dim=embed_dim // 4,
input_resolution=[self.bottom_height * 4, self.bottom_width * 4],
depth=depth[2], num_heads=4, window_size=window_size,
pos_embed=PosCNN(embed_dim // 4, embed_dim // 4) if is_peg else None,
is_local=is_local
)
self.layer4 = BasicLayer(
dim=embed_dim // 16,
input_resolution=[self.bottom_height * 8, self.bottom_width * 8],
depth=depth[3], num_heads=4, window_size=window_size,
pos_embed=PosCNN(embed_dim // 16, embed_dim // 16) if is_peg else None,
is_local=is_local
)
self.deconv = nn.Sequential(
nn.Conv2d(self.embed_dim // 16, 1, 1, 1, 0)
)
self.sigmoid = nn.Sigmoid()
if not is_peg:
self.pos_embed_1 = nn.Parameter(
torch.zeros(1, self.bottom_height * self.bottom_width, embed_dim)
)
self.pos_embed_2 = nn.Parameter(
torch.zeros(1, (self.bottom_height * 2) * (self.bottom_width * 2), embed_dim)
)
self.pos_embed_3 = nn.Parameter(
torch.zeros(1, (self.bottom_height * 4) * (self.bottom_width * 4), embed_dim // 4)
)
self.pos_embed_4 = nn.Parameter(
torch.zeros(1, (self.bottom_height * 8) * (self.bottom_width * 8), embed_dim // 16)
)
trunc_normal_(self.pos_embed_1, std=.02)
trunc_normal_(self.pos_embed_2, std=.02)
trunc_normal_(self.pos_embed_3, std=.02)
trunc_normal_(self.pos_embed_4, std=.02)
def forward(self, noise):
x = self.l1(noise)
x = x.reshape(-1, self.bottom_width * self.bottom_height, self.embed_dim)
if not self.is_peg:
x = x + self.pos_embed_1
H, W = self.bottom_height, self.bottom_width
x = self.layer1(x)
x, H, W = bicubic_upsample(x, H, W)
if not self.is_peg:
x = x + self.pos_embed_2
x = self.layer2(x)
x, H, W = pixel_upsample(x, H, W)
if not self.is_peg:
x = x + self.pos_embed_3
x = self.layer3(x)
x, H, W = pixel_upsample(x, H, W)
if not self.is_peg:
x = x + self.pos_embed_4
B, _, C = x.size()
x = self.layer4(x)
x = x.reshape(B, H, W, C).permute(0, 3, 1, 2)
x = self.deconv(x)
x = self.sigmoid(x)
return x
class SwinTransDiscriminator(nn.Module):
def __init__(self,
img_height=64, img_width=64, patch_size=4, in_channel=1,
embed_dim=512, depth: list = None,
num_heads=4, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0.,
attn_drop_rate=0., drop_path_rate=0., hybrid_backbone=None, norm_layer=nn.LayerNorm,
is_local=True, is_peg=True):
super(SwinTransDiscriminator, self).__init__()
self.img_height = img_height
self.img_width = img_width
self.patch_size = patch_size
self.window_size = patch_size
self.is_local = is_local
self.is_peg = is_peg
if depth is None:
depth = [4, 2, 2, 2]
self.PatchEmbed_1 = nn.Conv2d(in_channel, embed_dim // 4, kernel_size=patch_size, stride=patch_size, padding=0)
self.PatchEmbed_2 = nn.Conv2d(in_channel, embed_dim // 4, kernel_size=patch_size, stride=patch_size, padding=0)
self.PatchEmbed_3 = nn.Conv2d(in_channel, embed_dim // 2, kernel_size=patch_size, stride=patch_size, padding=0)
self.initial_height = img_height // patch_size
self.initial_width = img_width // patch_size
if not is_peg:
num_patches_1 = (img_height // patch_size) * (img_width // patch_size)
num_patches_2 = (img_height // (2 * patch_size)) * (img_width // (2 * patch_size))
num_patches_3 = (img_height // (4 * patch_size)) * (img_width // (4 * patch_size))
self.pos_embed_1 = nn.Parameter(torch.zeros(1, num_patches_1, embed_dim // 4))
self.pos_embed_2 = nn.Parameter(torch.zeros(1, num_patches_2, embed_dim // 2))
self.pos_embed_3 = nn.Parameter(torch.zeros(1, num_patches_3, embed_dim))
trunc_normal_(self.pos_embed_1, std=.02)
trunc_normal_(self.pos_embed_2, std=.02)
trunc_normal_(self.pos_embed_3, std=.02)
self.pos_drop = nn.Dropout(p=drop_rate)
self.blocks_1 = BasicLayer(
dim=embed_dim // 4,
input_resolution=[self.initial_height, self.initial_width],
depth=depth[0], num_heads=4,
window_size=self.window_size,
pos_embed=PosCNN(embed_dim // 4, embed_dim // 4) if is_peg else None,
is_local=is_local
)
self.blocks_2 = BasicLayer(
dim=embed_dim // 2,
input_resolution=[self.initial_height // 2, self.initial_width // 2],
depth=depth[1], num_heads=4,
window_size=self.window_size,
pos_embed=PosCNN(embed_dim // 2, embed_dim // 2) if is_peg else None,
is_local=is_local
)
self.blocks_3 = BasicLayer(
dim=embed_dim,
input_resolution=[self.initial_height // 4, self.initial_width // 4],
depth=depth[2], num_heads=4,
window_size=self.window_size,
pos_embed=PosCNN(embed_dim, embed_dim) if is_peg else None,
is_local=is_local
)
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth[3])]
self.last_block = nn.Sequential(
Block(
input_resolution=[self.initial_height // 4, self.initial_width // 4],
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[0], norm_layer=norm_layer,
is_local=is_local
)
)
self.norm = norm_layer(embed_dim)
self.out = nn.Linear(embed_dim, 1)
trunc_normal_(self.cls_token, std=.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def forward(self, x):
x_1 = self.PatchEmbed_1(x).flatten(2).permute(0, 2, 1)
x_2 = self.PatchEmbed_2(nn.AvgPool2d(2)(x)).flatten(2).permute(0, 2, 1)
x_3 = self.PatchEmbed_3(nn.AvgPool2d(4)(x)).flatten(2).permute(0, 2, 1)
if not self.is_peg:
x_1 = x_1 + self.pos_embed_1
x = self.pos_drop(x_1)
B, _, C = x.size()
x = self.blocks_1(x)
x = x.permute(0, 2, 1).reshape(B, C, self.initial_height, self.initial_width)
x = nn.AvgPool2d(2)(x)
_, _, H, W = x.shape
x = x.flatten(2)
x = x.permute(0, 2, 1)
x = torch.cat([x, x_2], dim=-1)
if not self.is_peg:
x = x + self.pos_embed_2
x = self.blocks_2(x)
_, _, C = x.shape
x = x.permute(0, 2, 1).view(B, C, H, W)
x = nn.AvgPool2d(2)(x)
_, _, H, W = x.shape
x = x.flatten(2).permute(0, 2, 1)
x = torch.cat([x, x_3], dim=-1)
if not self.is_peg:
x = x + self.pos_embed_3
x = self.blocks_3(x)
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
x = self.last_block(x)
x = self.norm(x)
x = self.out(x[:, 0])
return x
def test_dis():
x = torch.randn((16, 1, 64, 64))
d = SwinTransDiscriminator()
out = d(x)
print(out.shape)
def test_gen():
x = torch.randn((16, 256))
g = SwinTransGenerator(embed_dim=256)
out = g(x)
print(out.shape)
if __name__ == '__main__':
test_gen()
test_dis()
| fym1057726877/Defense | TransGAN/TransGanModel.py | TransGanModel.py | py | 26,999 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "torch.nn.Module",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "torch.nn.GELU",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_nu... |
17168418038 | import json
import re
from urllib import request
import requests
from panopto_oauth2 import PanoptoOAuth2
server = "sph.hosted.panopto.com"
client_id = "29bd20b2-fd78-4bdd-9c40-af7a0133c139"
client_secret = "oZVXzyYlRQun/+xrxaItsdSDm1n7Np6rNqlmjHjgcyQ="
def read_coursera_to_time_sentence(input_path, video_id):
with open(input_path) as f:
lines = f.readlines()
if not lines:
print("{} has caption url but doesn't have caption".format(video_id))
index = 1
start, timestamp, sen, sen_list, time_list = True, False, "", [], []
for line in lines:
if line == "{}\n".format(index):
if index > 1:
sen_list.append(sen.replace("\n", " ").strip())
index += 1
sen = ""
timestamp = True
elif timestamp:
time_list.append(line.replace("\n", ""))
timestamp = False
else:
sen += line
sen_list.append(sen.replace("\n", " ").strip())
return time_list, sen_list
def convert_timestamp_to_sec(timestamp):
timestamp_split = timestamp.split(":")
timestamp_second = int(timestamp_split[0])*3600+int(timestamp_split[1])*60+float(timestamp_split[2].replace(",", "."))
return timestamp_second
def convert_time_list_to_seconds(time_list):
time_list_second = []
for i in range(len(time_list)):
start_time = time_list[i].split(" --> ")[0]
end_time = time_list[i].split(" --> ")[1]
start_time_sec = convert_timestamp_to_sec(start_time)
end_time_sec = convert_timestamp_to_sec(end_time)
time_list_second.append([start_time_sec, end_time_sec])
return time_list_second
def generate_word_list(time_list_second, sen_list):
word_dict, word_list = {}, []
for i in range(len(time_list_second)):
start_time, end_time = time_list_second[i]
sen = sen_list[i]
# split the sentence
sen_split = re.sub(r'[^\w\s]', '', sen_list[0])
sen_split = sen.split(" ")
# sen_split: ['Hi', 'everyone', 'Welcome', 'to']
# sen: 'Hi, everyone. Welcome to'
# start_time, end_time: [10.29, 12.94]
# start assigning each timestamp to each word
## c_index: iterate sen
## w_index: iterate word
w_index, c_index = 0, 0
while c_index < len(sen):
if sen[c_index: (c_index + len(sen_split[w_index]))] == sen_split[w_index]:
time_for_each_word = (end_time - start_time) / len(sen)
word_start = round(c_index * time_for_each_word + start_time, 2)
word_end = round(word_start + len(sen_split[w_index]) * time_for_each_word, 2)
word_dict['word'] = sen_split[w_index]
word_dict['start_time'] = word_start
word_dict['end_time'] = word_end
word_list.append(word_dict)
word_dict = {}
c_index += len(sen_split[w_index])
w_index += 1
else:
c_index += 1
return word_list
def generate_output_dictionary(sen_list, word_list):
full_transcript = ""
for sen in sen_list:
full_transcript += sen + " "
full_transcript = full_transcript.strip()
output_dict = dict()
output_dict['timedtext'] = word_list
output_dict['full_transcript'] = full_transcript
return output_dict
def output_json(output_dict):
with open("output_with_caption/output.json", 'w', encoding="utf-8") as file_obj:
json.dump(output_dict, file_obj, indent=2)
def main():
with open("output_with_caption_url/output.json") as json_file:
videos = json.load(json_file)
video_list = []
count = 1
for i in range(len(videos['Results'])):
video = videos['Results'][i]
url = video['Urls']['CaptionDownloadUrl']
if url is not None:
print("================={}=================".format(i))
count += 1
video_dict = video.copy()
time_list, sen_list = read_coursera_to_time_sentence("caption/" + video['Id'] + ".txt", video['Id'])
time_list_second = convert_time_list_to_seconds(time_list)
word_list = generate_word_list(time_list_second, sen_list)
caption_dict = generate_output_dictionary(sen_list, word_list)
video_dict['caption'] = caption_dict
video_list.append(video_dict)
# Just convert one caption
# To convert all captions, please comment the following if statement
if count == 2:
break
print("================")
print(len(video_list))
output_dict = {"Results": video_list}
output_json(output_dict)
if __name__ == '__main__':
main() | Zhou-Xun/panopto_video_extraction | convert_caption_url.py | convert_caption_url.py | py | 4,773 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "re.sub",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 115,
"usage_type": "call"
}
] |
4592746162 | __all__ = [
'Canceled',
'DuplicateDestinationError',
'Record',
'get_download_manager',
]
import os
import dbus
import logging
from collections import namedtuple
from io import StringIO
from pprint import pformat
try:
import pycurl
except ImportError: # pragma: no cover
pycurl = None
log = logging.getLogger('systemimage')
class Canceled(Exception):
"""Raised when the download was canceled."""
class DuplicateDestinationError(Exception):
"""Raised when two files are downloaded to the same destination."""
def __init__(self, duplicates):
super().__init__()
self.duplicates = duplicates
def __str__(self):
return '\n' + pformat(self.duplicates, indent=4, width=79)
# A namedtuple is convenient here since we want to access items by their
# attribute names. However, we also want to allow for the checksum to default
# to the empty string. We do this by creating a prototypical record type and
# using _replace() to replace non-default values. See the namedtuple
# documentation for details.
_Record = namedtuple('Record', 'url destination checksum')('', '', '')
_RecordType = type(_Record)
def Record(url, destination, checksum=''):
return _Record._replace(
url=url, destination=destination, checksum=checksum)
class DownloadManagerBase:
"""Base class for all download managers."""
def __init__(self):
"""
:param callback: If given, a function that is called every so often
during downloading.
:type callback: A function that takes two arguments, the number
of bytes received so far, and the total amount of bytes to be
downloaded.
"""
# This is a list of functions that are called every so often during
# downloading. Functions in this list take two arguments, the number
# of bytes received so far, and the total amount of bytes to be
# downloaded.
self.callbacks = []
self.total = 0
self.received = 0
self._queued_cancel = False
def __repr__(self): # pragma: no cover
return '<{} at 0x{:x}>'.format(self.__class__.__name__, id(self))
def _get_download_records(self, downloads):
"""Convert the downloads items to download records."""
records = [item if isinstance(item, _RecordType) else Record(*item)
for item in downloads]
destinations = set(record.destination for record in records)
# Check for duplicate destinations, specifically for a local file path
# coming from two different sources. It's okay if there are duplicate
# destination records in the download request, but each of those must
# be specified by the same source url and have the same checksum.
#
# An easy quick check just asks if the set of destinations is smaller
# than the total number of requested downloads. It can't be larger.
# If it *is* smaller, then there are some duplicates, however the
# duplicates may be legitimate, so look at the details.
#
# Note though that we cannot pass duplicates destinations to udm, so we
# have to filter out legitimate duplicates. That's fine since they
# really are pointing to the same file, and will end up in the
# destination location.
if len(destinations) < len(downloads):
by_destination = dict()
unique_downloads = set()
for record in records:
by_destination.setdefault(record.destination, set()).add(
record)
unique_downloads.add(record)
duplicates = []
for dst, seen in by_destination.items():
if len(seen) > 1:
# Tuples will look better in the pretty-printed output.
duplicates.append(
(dst, sorted(tuple(dup) for dup in seen)))
if len(duplicates) > 0:
raise DuplicateDestinationError(sorted(duplicates))
# Uniquify the downloads.
records = list(unique_downloads)
return records
def _do_callback(self):
# Be defensive, so yes, use a bare except. If an exception occurs in
# the callback, log it, but continue onward.
for callback in self.callbacks:
try:
callback(self.received, self.total)
except:
log.exception('Exception in progress callback')
def cancel(self):
"""Cancel any current downloads."""
self._queued_cancel = True
def pause(self):
"""Pause the download, but only if one is in progress."""
pass # pragma: no cover
def resume(self):
"""Resume the download, but only if one is in progress."""
pass # pragma: no cover
def _get_files(self, records, pausable, signal_started):
raise NotImplementedError # pragma: no cover
def get_files(self, downloads, *, pausable=False, signal_started=False):
"""Download a bunch of files concurrently.
Occasionally, the callback is called to report on progress.
This function blocks until all files have been downloaded or an
exception occurs. In the latter case, the download directory
will be cleared of the files that succeeded and the exception
will be re-raised.
This means that 1) the function blocks until all files are
downloaded, but at least we do that concurrently; 2) this is an
all-or-nothing function. Either you get all the requested files
or none of them.
:params downloads: A list of `download records`, each of which may
either be a 2-tuple where the first item is the url to download,
and the second item is the destination file, or an instance of a
`Record` namedtuple with attributes `url`, `destination`, and
`checksum`. The checksum may be the empty string.
:type downloads: List of 2-tuples or `Record`s.
:param pausable: A flag specifying whether this download can be paused
or not. In general, data file downloads are pausable, but
preliminary downloads are not.
:type pausable: bool
:param signal_started: A flag indicating whether the D-Bus
DownloadStarted signal should be sent once the download has
started. Normally this is False, but it should be set to True
when the update files are being downloaded (i.e. not for the
metadata files).
:type signal_started: bool
:raises: FileNotFoundError if any download error occurred. In
this case, all download files are deleted.
:raises: DuplicateDestinationError if more than one source url is
downloaded to the same destination file.
"""
if self._queued_cancel:
# A cancel is queued, so don't actually download anything.
raise Canceled
if len(downloads) == 0:
# Nothing to download. See LP: #1245597.
return
records = self._get_download_records(downloads)
# Better logging of the requested downloads. However, we want the
# entire block of multiline log output to appear under a single
# timestamp.
fp = StringIO()
print('[0x{:x}] Requesting group download:'.format(id(self)), file=fp)
for record in records:
if record.checksum == '':
print('\t{} -> {}'.format(*record[:2]), file=fp)
else:
print('\t{} [{}] -> {}'.format(*record), file=fp)
log.info('{}'.format(fp.getvalue()))
self._get_files(records, pausable, signal_started)
@staticmethod
def allow_gsm():
"""Allow downloads on GSM.
This is a temporary override for the `auto_download` setting.
If a download was attempted on wifi-only and not started because
the device is on GSM, calling this issues a temporary override
to allow downloads while on GSM, for download managers that
support this (currently only UDM).
"""
pass # pragma: no cover
def get_download_manager(*args):
# We have to avoid circular imports since both download managers import
# various things from this module.
from systemimage.curl import CurlDownloadManager
from systemimage.udm import DOWNLOADER_INTERFACE, UDMDownloadManager
# Detect if we have ubuntu-download-manager.
#
# Use PyCURL based downloader if no udm is found, or if the environment
# variable is set. However, if we're told to use PyCURL and it's
# unavailable, throw an exception.
cls = None
use_pycurl = os.environ.get('SYSTEMIMAGE_PYCURL')
if use_pycurl is None:
# Auto-detect. For backward compatibility, use udm if it's available,
# otherwise use PyCURL.
try:
bus = dbus.SystemBus()
bus.get_object(DOWNLOADER_INTERFACE, '/')
udm_available = True
except dbus.exceptions.DBusException:
udm_available = False
if udm_available:
cls = UDMDownloadManager
elif pycurl is None:
raise ImportError('No module named {}'.format('pycurl'))
else:
cls = CurlDownloadManager
else:
cls = (CurlDownloadManager
if use_pycurl.lower() in ('1', 'yes', 'true')
else UDMDownloadManager)
return cls(*args)
| ubports/system-image | systemimage/download.py | download.py | py | 9,745 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "pprint.pformat",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "collections.namedtuple",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "io.StringIO",... |
43987707276 | # test CurlypivSetup
"""
Notes about program
"""
# 1.0 import modules
import numpy as np
# plotting
import matplotlib.pyplot as plt
import matplotlib
from matplotlib import colors
import matplotlib.image as mgimg
from matplotlib import animation
from mpl_toolkits.axes_grid1.anchored_artists import AnchoredSizeBar
matplotlib.rcParams['figure.figsize'] = (7, 6)
import matplotlib.font_manager as fm
fontprops = fm.FontProperties(size=16, weight='bold')
font = {'family' : 'sans-serif',
'weight' : 'light',
'size' : 16}
matplotlib.rc('font', **font)
matplotlib.rcParams['font.sans-serif'] = ['Helvetica']
# OpenPIV
# ----- imports for OpenPIV -----
import sys
# insert at 1, 0 is the script path (or '' in REPL)
sys.path.insert(1, '/Users/mackenzie/PythonProjects/openpiv')
import openpiv.piv
from openpiv import windef
from openpiv.windef import Settings
# 2.0 define class
class CurlypivPIVSetup(object):
def __init__(self, name, save_text, save_plot, testCollection, testSetup,
win1=128, win2=64, show_plot=False, save_plot_path=None, save_text_path=None, vectors_on_image=True,
calculate_zeta=False, replace_Nans_with_zeros=True, save_u_mean_plot=False,
u_min=-40, u_max=40, v_min=-2.5, v_max=2.5):
"""
Notes
"""
# setup
self._name = name
self.save_text = save_text
self.save_plot = save_plot
self.save_u_mean_plot = save_u_mean_plot
self.save_text_path = save_text_path
self.save_plot_path = save_plot_path
self.show_plot = show_plot
self.calculate_zeta = calculate_zeta
# OpenPIV
self.settings = Settings()
# plotting
self.vectors_on_image = vectors_on_image
self.settings.scale_plot = 1
self.colorMap = 'plasma'
self.colorNorm = colors.Normalize(vmin=0, vmax=75)
self.alpha = 0.65
self.scalebar_microns = int(2500 / testSetup.optics.microscope.objective.magnification) # units are microns
self.dpi = 200
# camera
self.img_acq = testSetup.optics.microscope.ccd.img_acq_rate
self.dtf = 1/self.img_acq
self.pixels_to_microns = testSetup.optics.microscope.objective.pixel_to_micron
self.pix_per_um = 1/self.pixels_to_microns
# experimental
self.E_max = 10e3
self.particle_diameter = testSetup.optics.fluorescent_particles.diameter
self.est_zeta = testSetup.chip.channel.material_bottom_wall_surface.zeta
# scientific
self.epsr = 80
self.eps = self.epsr*8.854e-12
self.mu = testSetup.chip.channel.material_fluid.viscosity
# outputs
self.est_u_eof = self.eps*self.est_zeta*self.E_max/self.mu*1e6
self.char_u_eof = -self.est_u_eof*self.pix_per_um*self.dtf
self.char_u = int(np.round(self.char_u_eof))
# more OpenPIV
self.settings.correlation_method = 'linear'
self.settings.normalized_correlation = True
self.settings.deformation_method = 'symmetric' # 'symmetric' or 'second image'
self.settings.windowsizes = (win1, win2) # sizex//4, sizex//8 suggestion is these are power of 2 of each other
self.settings.overlap = (win1//2, win2//2) # should be 50%-75% of window size (Raffel)
self.settings.num_iterations = len(self.settings.windowsizes)
self.settings.subpixel_method = 'gaussian' # subpixel interpolation: 'gaussian','centroid','parabolic'
self.settings.interpolation_order = 3 # interpolation order for the window deformation (suggested: 3-5)
self.settings.scaling_factor = self.pix_per_um # scaling factor pixel/meter
self.settings.dt = self.dtf # time between to frames (in seconds)
self.settings.ROI = ('full')
# snr parameters
self.mask_first_pass = True # Mask first pass
self.mask_multi_pass = True
#self.settings.extract_sig2noise = True # Compute SNR for last pass / if False: SNR set to NaN in output txt.
self.settings.image_mask = True # Do image masking
self.settings.sig2noise_method = 'peak2peak' # Method to compute SNR: 'peak2peak' or 'peak2mean'
self.settings.sig2noise_mask = 3 # (2 - 5) exclusion distance between highest peak and second highest peak in correlation map
# min/max velocity vectors for validation
self.u_min = u_min # microns / second
self.u_max = u_max
self.v_min = v_min # microns / second
self.v_max = v_max
self.settings.MinMax_U_disp = (self.u_min * self.pix_per_um * self.dtf, self.u_max * self.pix_per_um * self.dtf) # filter u (units: pixels/frame)
self.settings.MinMax_V_disp = (self.v_min * self.pix_per_um * self.dtf, self.v_max * self.pix_per_um * self.dtf) # filter v (units: pixels/frame)
# vector validation
self.settings.validation_first_pass = True # Vector validation of first pass
self.u_uncertainty = 10 # if std(u)*2 < uncertainty: don't apply global std threshold
self.v_uncertainty = 10 # if std(v)*2 < uncertainty: don't apply global std threshold
self.settings.std_threshold = 2.75 # global std validation threshold: global mean +/- stdev * std_threshold
self.settings.median_threshold = 2.75 # median validation threshold
self.settings.median_size = 2 # defines the size of the local median kernel
self.settings.sig2noise_validate = True # Enables validation by SNR ratio
self.settings.sig2noise_threshold = 1.2 # [1.2-1.5] Sets snr threshold for removing vectors (R. D. Keane and R. J. Adrian, Measurement Science & Technology, 1990)
# outlier replacement and smoothing
self.settings.replace_vectors = True # Outlier replacement for last pass
self.replace_Nans_with_zeros = replace_Nans_with_zeros # Outlier replacement where all Nans = 0
self.settings.smoothn = False # Enables Garcia smoothing function of velocity fields
self.settings.smoothn_p = [0.01] # [0.5] Smoothing parameter or auto-calculated using generalized cross-validation (GCV) method
self.settings.filter_method = 'distance' # Replace outlier vector method: localmean [square] or disk (unweighted circle), distance (weighted circle)
self.settings.max_filter_iteration = 3 # maximum iterations performed to replace the outliers (max 10)
self.settings.filter_kernel_size = 2 # kernel size for replacing outlier vectors (default
self.settings._freeze()
# print PIV settings
print('Min/Max U-displacement: ', self.settings.MinMax_U_disp, ' (pixels/frame)')
print('Min/Max U-displacement: ', np.array([self.settings.MinMax_U_disp[0], self.settings.MinMax_U_disp[1]], dtype=int)*self.pixels_to_microns/self.dtf, ' (um/s)')
print('Min/Max V-displacement: ', self.settings.MinMax_V_disp, ' (pixels/frame)')
print('Min/Max V-displacement: ', np.array([self.settings.MinMax_V_disp[0],self.settings.MinMax_V_disp[1]], dtype=int)*self.pixels_to_microns/self.dtf, ' (um/s)') | sean-mackenzie/curlypiv | curlypiv/CurlypivPIVSetup.py | CurlypivPIVSetup.py | py | 7,120 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "matplotlib.rcParams",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.font_manager.FontProperties",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "matplotlib.font_manager",
"line_number": 18,
"usage_type": "name"
},
... |
29047649100 | import atexit
import os
import re
import shutil
import subprocess
import sys
import tempfile
def usage():
sys.stderr.write('Usage: {} [-h] -d <directory> [-b <bitrate>]\n'.format(sys.argv[0]))
sys.stderr.write(' -h display help\n')
sys.stderr.write(' -b bitrate [32-320, default 192]\n')
sys.stderr.write(' -d the target directory\n')
sys.exit(1)
def get_track_file(path_prefix, track):
return "%s/%02d %s.mp3" % (path_prefix, int(track['track']), sanitize(track['title']))
def sanitize(s):
return s.replace("/", "-").replace(":", " -")
if __name__ == "__main__":
target_dir = None
bitrate = 192
argc = 1
while argc + 1 < len(sys.argv):
if sys.argv[argc] == "-h":
usage()
elif sys.argv[argc] == "-d":
target_dir = os.path.abspath(sys.argv[argc + 1])
argc += 2
elif sys.argv[argc] == "-b":
bitrate = int(sys.argv[argc + 1])
argc += 2
else:
break
if target_dir is None or argc < len(sys.argv):
usage()
if not os.path.isdir(target_dir):
sys.stderr.write("Directory '{}' doesn't exist\n".format(target_dir))
exit(1)
artist = None
title = None
year = None
genre = None
tracks = list()
work_dir = tempfile.mkdtemp()
os.chdir(work_dir)
atexit.register(shutil.rmtree, work_dir)
subprocess.check_call(["cdda2wav", "-alltracks", "-cddb", "1"])
for file in os.listdir(work_dir):
if file.endswith(".inf"):
path = os.path.join(work_dir, file)
with open(path, mode="r", encoding="iso8859-1") as f:
track = dict()
track['file'] = path[:-3] + "wav"
for line in f:
parts = re.split(r"\s*=\s*", line.rstrip(), 1)
if parts[0] == "Albumperformer" and artist is None:
artist = parts[1].rstrip("'").lstrip("'")
elif parts[0] == "Albumtitle" and title is None:
title = parts[1].rstrip("'").lstrip("'")
elif parts[0] == "Tracknumber":
track['track'] = parts[1]
elif parts[0] == "Tracktitle":
track['title'] = parts[1].rstrip("'").lstrip("'")
tracks.append(track)
cddb_file = os.path.join(work_dir, "audio.cddb")
if os.path.exists(cddb_file):
with open(cddb_file, mode="r", encoding="iso8859-1") as f:
for line in f:
parts = re.split(r"\s*=\s*", line.rstrip(), 1)
if parts[0] == "DYEAR":
year = parts[1]
elif parts[0] == "DGENRE":
genre = parts[1]
track_count = len(tracks)
if track_count == 0:
sys.stderr.write("No CDDB information available. Please process the files in {} manually\n".format(work_dir))
atexit.unregister(shutil.rmtree)
exit(1)
path_prefix = os.path.join(target_dir, sanitize(artist), sanitize(title))
os.makedirs(path_prefix, exist_ok=True)
album_args = ["--ta", artist, "--tl", title, "--ty", year, "--tg", genre]
for track in tracks:
subprocess.check_call(["lame", "-b", str(bitrate), "-B", str(bitrate), "--tt", track['title'], "--tn",
"{}/{}".format(track['track'], track_count)] + album_args +
[track['file'], get_track_file(path_prefix, track)])
| eskaton/py-ripcd | ripcd.py | ripcd.py | py | 3,527 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sys.stderr.write",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "sys.stderr.write",
... |
43633170723 |
from __future__ import absolute_import
import logging
import math
#typing
import torch
import torch.nn.functional as F
#overrides
from allennlp.data import Vocabulary
from allennlp.models.model import Model
from allennlp.modules.token_embedders import Embedding
from allennlp.modules import FeedForward
from allennlp.modules import Seq2SeqEncoder, TimeDistributed, TextFieldEmbedder, SpanPruner
from allennlp.modules.span_extractors import SelfAttentiveSpanExtractor, EndpointSpanExtractor
from allennlp.nn import util, InitializerApplicator, RegularizerApplicator
from allennlp.training.metrics import MentionRecall, ConllCorefScores
try:
from itertools import izip
except:
izip = zip
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class CoreferenceResolver(Model):
u"""
This ``Model`` implements the coreference resolution model described "End-to-end Neural
Coreference Resolution"
<https://www.semanticscholar.org/paper/End-to-end-Neural-Coreference-Resolution-Lee-He/3f2114893dc44eacac951f148fbff142ca200e83>
by Lee et al., 2017.
The basic outline of this model is to get an embedded representation of each span in the
document. These span representations are scored and used to prune away spans that are unlikely
to occur in a coreference cluster. For the remaining spans, the model decides which antecedent
span (if any) they are coreferent with. The resulting coreference links, after applying
transitivity, imply a clustering of the spans in the document.
Parameters
----------
vocab : ``Vocabulary``
text_field_embedder : ``TextFieldEmbedder``
Used to embed the ``text`` ``TextField`` we get as input to the model.
context_layer : ``Seq2SeqEncoder``
This layer incorporates contextual information for each word in the document.
mention_feedforward : ``FeedForward``
This feedforward network is applied to the span representations which is then scored
by a linear layer.
antecedent_feedforward: ``FeedForward``
This feedforward network is applied to pairs of span representation, along with any
pairwise features, which is then scored by a linear layer.
feature_size: ``int``
The embedding size for all the embedded features, such as distances or span widths.
max_span_width: ``int``
The maximum width of candidate spans.
spans_per_word: float, required.
A multiplier between zero and one which controls what percentage of candidate mention
spans we retain with respect to the number of words in the document.
max_antecedents: int, required.
For each mention which survives the pruning stage, we consider this many antecedents.
lexical_dropout: ``int``
The probability of dropping out dimensions of the embedded text.
initializer : ``InitializerApplicator``, optional (default=``InitializerApplicator()``)
Used to initialize the model parameters.
regularizer : ``RegularizerApplicator``, optional (default=``None``)
If provided, will be used to calculate the regularization penalty during training.
"""
def __init__(self,
vocab ,
text_field_embedder ,
context_layer ,
mention_feedforward ,
antecedent_feedforward ,
feature_size ,
max_span_width ,
spans_per_word ,
max_antecedents ,
lexical_dropout = 0.2,
initializer = InitializerApplicator(),
regularizer = None) :
super(CoreferenceResolver, self).__init__(vocab, regularizer)
self._text_field_embedder = text_field_embedder
self._context_layer = context_layer
self._antecedent_feedforward = TimeDistributed(antecedent_feedforward)
feedforward_scorer = torch.nn.Sequential(
TimeDistributed(mention_feedforward),
TimeDistributed(torch.nn.Linear(mention_feedforward.get_output_dim(), 1)))
self._mention_pruner = SpanPruner(feedforward_scorer)
self._antecedent_scorer = TimeDistributed(torch.nn.Linear(antecedent_feedforward.get_output_dim(), 1))
self._endpoint_span_extractor = EndpointSpanExtractor(context_layer.get_output_dim(),
combination=u"x,y",
num_width_embeddings=max_span_width,
span_width_embedding_dim=feature_size,
bucket_widths=False)
self._attentive_span_extractor = SelfAttentiveSpanExtractor(input_dim=text_field_embedder.get_output_dim())
# 10 possible distance buckets.
self._num_distance_buckets = 10
self._distance_embedding = Embedding(self._num_distance_buckets, feature_size)
self._max_span_width = max_span_width
self._spans_per_word = spans_per_word
self._max_antecedents = max_antecedents
self._mention_recall = MentionRecall()
self._conll_coref_scores = ConllCorefScores()
if lexical_dropout > 0:
self._lexical_dropout = torch.nn.Dropout(p=lexical_dropout)
else:
self._lexical_dropout = lambda x: x
initializer(self)
#overrides
def forward(self, # type: ignore
text ,
spans ,
span_labels = None,
metadata = None) :
# pylint: disable=arguments-differ
u"""
Parameters
----------
text : ``Dict[str, torch.LongTensor]``, required.
The output of a ``TextField`` representing the text of
the document.
spans : ``torch.IntTensor``, required.
A tensor of shape (batch_size, num_spans, 2), representing the inclusive start and end
indices of candidate spans for mentions. Comes from a ``ListField[SpanField]`` of
indices into the text of the document.
span_labels : ``torch.IntTensor``, optional (default = None)
A tensor of shape (batch_size, num_spans), representing the cluster ids
of each span, or -1 for those which do not appear in any clusters.
Returns
-------
An output dictionary consisting of:
top_spans : ``torch.IntTensor``
A tensor of shape ``(batch_size, num_spans_to_keep, 2)`` representing
the start and end word indices of the top spans that survived the pruning stage.
antecedent_indices : ``torch.IntTensor``
A tensor of shape ``(num_spans_to_keep, max_antecedents)`` representing for each top span
the index (with respect to top_spans) of the possible antecedents the model considered.
predicted_antecedents : ``torch.IntTensor``
A tensor of shape ``(batch_size, num_spans_to_keep)`` representing, for each top span, the
index (with respect to antecedent_indices) of the most likely antecedent. -1 means there
was no predicted link.
loss : ``torch.FloatTensor``, optional
A scalar loss to be optimised.
"""
# Shape: (batch_size, document_length, embedding_size)
text_embeddings = self._lexical_dropout(self._text_field_embedder(text))
document_length = text_embeddings.size(1)
num_spans = spans.size(1)
# Shape: (batch_size, document_length)
text_mask = util.get_text_field_mask(text).float()
# Shape: (batch_size, num_spans)
span_mask = (spans[:, :, 0] >= 0).squeeze(-1).float()
# SpanFields return -1 when they are used as padding. As we do
# some comparisons based on span widths when we attend over the
# span representations that we generate from these indices, we
# need them to be <= 0. This is only relevant in edge cases where
# the number of spans we consider after the pruning stage is >= the
# total number of spans, because in this case, it is possible we might
# consider a masked span.
# Shape: (batch_size, num_spans, 2)
spans = F.relu(spans.float()).long()
# Shape: (batch_size, document_length, encoding_dim)
contextualized_embeddings = self._context_layer(text_embeddings, text_mask)
# Shape: (batch_size, num_spans, 2 * encoding_dim + feature_size)
endpoint_span_embeddings = self._endpoint_span_extractor(contextualized_embeddings, spans)
# Shape: (batch_size, num_spans, emebedding_size)
attended_span_embeddings = self._attentive_span_extractor(text_embeddings, spans)
# Shape: (batch_size, num_spans, emebedding_size + 2 * encoding_dim + feature_size)
span_embeddings = torch.cat([endpoint_span_embeddings, attended_span_embeddings], -1)
# Prune based on mention scores.
num_spans_to_keep = int(math.floor(self._spans_per_word * document_length))
(top_span_embeddings, top_span_mask,
top_span_indices, top_span_mention_scores) = self._mention_pruner(span_embeddings,
span_mask,
num_spans_to_keep)
top_span_mask = top_span_mask.unsqueeze(-1)
# Shape: (batch_size * num_spans_to_keep)
# torch.index_select only accepts 1D indices, but here
# we need to select spans for each element in the batch.
# This reformats the indices to take into account their
# index into the batch. We precompute this here to make
# the multiple calls to util.batched_index_select below more efficient.
flat_top_span_indices = util.flatten_and_batch_shift_indices(top_span_indices, num_spans)
# Compute final predictions for which spans to consider as mentions.
# Shape: (batch_size, num_spans_to_keep, 2)
top_spans = util.batched_index_select(spans,
top_span_indices,
flat_top_span_indices)
# Compute indices for antecedent spans to consider.
max_antecedents = min(self._max_antecedents, num_spans_to_keep)
# Now that we have our variables in terms of num_spans_to_keep, we need to
# compare span pairs to decide each span's antecedent. Each span can only
# have prior spans as antecedents, and we only consider up to max_antecedents
# prior spans. So the first thing we do is construct a matrix mapping a span's
# index to the indices of its allowed antecedents. Note that this is independent
# of the batch dimension - it's just a function of the span's position in
# top_spans. The spans are in document order, so we can just use the relative
# index of the spans to know which other spans are allowed antecedents.
# Once we have this matrix, we reformat our variables again to get embeddings
# for all valid antecedents for each span. This gives us variables with shapes
# like (batch_size, num_spans_to_keep, max_antecedents, embedding_size), which
# we can use to make coreference decisions between valid span pairs.
# Shapes:
# (num_spans_to_keep, max_antecedents),
# (1, max_antecedents),
# (1, num_spans_to_keep, max_antecedents)
valid_antecedent_indices, valid_antecedent_offsets, valid_antecedent_log_mask =\
self._generate_valid_antecedents(num_spans_to_keep, max_antecedents, util.get_device_of(text_mask))
# Select tensors relating to the antecedent spans.
# Shape: (batch_size, num_spans_to_keep, max_antecedents, embedding_size)
candidate_antecedent_embeddings = util.flattened_index_select(top_span_embeddings,
valid_antecedent_indices)
# Shape: (batch_size, num_spans_to_keep, max_antecedents)
candidate_antecedent_mention_scores = util.flattened_index_select(top_span_mention_scores,
valid_antecedent_indices).squeeze(-1)
# Compute antecedent scores.
# Shape: (batch_size, num_spans_to_keep, max_antecedents, embedding_size)
span_pair_embeddings = self._compute_span_pair_embeddings(top_span_embeddings,
candidate_antecedent_embeddings,
valid_antecedent_offsets)
# Shape: (batch_size, num_spans_to_keep, 1 + max_antecedents)
coreference_scores = self._compute_coreference_scores(span_pair_embeddings,
top_span_mention_scores,
candidate_antecedent_mention_scores,
valid_antecedent_log_mask)
# We now have, for each span which survived the pruning stage,
# a predicted antecedent. This implies a clustering if we group
# mentions which refer to each other in a chain.
# Shape: (batch_size, num_spans_to_keep)
_, predicted_antecedents = coreference_scores.max(2)
# Subtract one here because index 0 is the "no antecedent" class,
# so this makes the indices line up with actual spans if the prediction
# is greater than -1.
predicted_antecedents -= 1
output_dict = {u"top_spans": top_spans,
u"antecedent_indices": valid_antecedent_indices,
u"predicted_antecedents": predicted_antecedents}
if span_labels is not None:
# Find the gold labels for the spans which we kept.
pruned_gold_labels = util.batched_index_select(span_labels.unsqueeze(-1),
top_span_indices,
flat_top_span_indices)
antecedent_labels = util.flattened_index_select(pruned_gold_labels,
valid_antecedent_indices).squeeze(-1)
antecedent_labels += valid_antecedent_log_mask.long()
# Compute labels.
# Shape: (batch_size, num_spans_to_keep, max_antecedents + 1)
gold_antecedent_labels = self._compute_antecedent_gold_labels(pruned_gold_labels,
antecedent_labels)
# Now, compute the loss using the negative marginal log-likelihood.
# This is equal to the log of the sum of the probabilities of all antecedent predictions
# that would be consistent with the data, in the sense that we are minimising, for a
# given span, the negative marginal log likelihood of all antecedents which are in the
# same gold cluster as the span we are currently considering. Each span i predicts a
# single antecedent j, but there might be several prior mentions k in the same
# coreference cluster that would be valid antecedents. Our loss is the sum of the
# probability assigned to all valid antecedents. This is a valid objective for
# clustering as we don't mind which antecedent is predicted, so long as they are in
# the same coreference cluster.
coreference_log_probs = util.last_dim_log_softmax(coreference_scores, top_span_mask)
correct_antecedent_log_probs = coreference_log_probs + gold_antecedent_labels.log()
negative_marginal_log_likelihood = -util.logsumexp(correct_antecedent_log_probs).sum()
self._mention_recall(top_spans, metadata)
self._conll_coref_scores(top_spans, valid_antecedent_indices, predicted_antecedents, metadata)
output_dict[u"loss"] = negative_marginal_log_likelihood
if metadata is not None:
output_dict[u"document"] = [x[u"original_text"] for x in metadata]
return output_dict
#overrides
def decode(self, output_dict ):
u"""
Converts the list of spans and predicted antecedent indices into clusters
of spans for each element in the batch.
Parameters
----------
output_dict : ``Dict[str, torch.Tensor]``, required.
The result of calling :func:`forward` on an instance or batch of instances.
Returns
-------
The same output dictionary, but with an additional ``clusters`` key:
clusters : ``List[List[List[Tuple[int, int]]]]``
A nested list, representing, for each instance in the batch, the list of clusters,
which are in turn comprised of a list of (start, end) inclusive spans into the
original document.
"""
# A tensor of shape (batch_size, num_spans_to_keep, 2), representing
# the start and end indices of each span.
batch_top_spans = output_dict[u"top_spans"].detach().cpu()
# A tensor of shape (batch_size, num_spans_to_keep) representing, for each span,
# the index into ``antecedent_indices`` which specifies the antecedent span. Additionally,
# the index can be -1, specifying that the span has no predicted antecedent.
batch_predicted_antecedents = output_dict[u"predicted_antecedents"].detach().cpu()
# A tensor of shape (num_spans_to_keep, max_antecedents), representing the indices
# of the predicted antecedents with respect to the 2nd dimension of ``batch_top_spans``
# for each antecedent we considered.
antecedent_indices = output_dict[u"antecedent_indices"].detach().cpu()
batch_clusters = []
# Calling zip() on two tensors results in an iterator over their
# first dimension. This is iterating over instances in the batch.
for top_spans, predicted_antecedents in izip(batch_top_spans, batch_predicted_antecedents):
spans_to_cluster_ids = {}
clusters = []
for i, (span, predicted_antecedent) in enumerate(izip(top_spans, predicted_antecedents)):
if predicted_antecedent < 0:
# We don't care about spans which are
# not co-referent with anything.
continue
# Find the right cluster to update with this span.
# To do this, we find the row in ``antecedent_indices``
# corresponding to this span we are considering.
# The predicted antecedent is then an index into this list
# of indices, denoting the span from ``top_spans`` which is the
# most likely antecedent.
predicted_index = antecedent_indices[i, predicted_antecedent]
antecedent_span = (top_spans[predicted_index, 0].item(),
top_spans[predicted_index, 1].item())
# Check if we've seen the span before.
if antecedent_span in spans_to_cluster_ids:
predicted_cluster_id = spans_to_cluster_ids[antecedent_span]
else:
# We start a new cluster.
predicted_cluster_id = len(clusters)
# Append a new cluster containing only this span.
clusters.append([antecedent_span])
# Record the new id of this span.
spans_to_cluster_ids[antecedent_span] = predicted_cluster_id
# Now add the span we are currently considering.
span_start, span_end = span[0].item(), span[1].item()
clusters[predicted_cluster_id].append((span_start, span_end))
spans_to_cluster_ids[(span_start, span_end)] = predicted_cluster_id
batch_clusters.append(clusters)
output_dict[u"clusters"] = batch_clusters
return output_dict
#overrides
def get_metrics(self, reset = False) :
mention_recall = self._mention_recall.get_metric(reset)
coref_precision, coref_recall, coref_f1 = self._conll_coref_scores.get_metric(reset)
return {u"coref_precision": coref_precision,
u"coref_recall": coref_recall,
u"coref_f1": coref_f1,
u"mention_recall": mention_recall}
@staticmethod
def _generate_valid_antecedents(num_spans_to_keep ,
max_antecedents ,
device ):
u"""
This method generates possible antecedents per span which survived the pruning
stage. This procedure is `generic across the batch`. The reason this is the case is
that each span in a batch can be coreferent with any previous span, but here we
are computing the possible `indices` of these spans. So, regardless of the batch,
the 1st span _cannot_ have any antecedents, because there are none to select from.
Similarly, each element can only predict previous spans, so this returns a matrix
of shape (num_spans_to_keep, max_antecedents), where the (i,j)-th index is equal to
(i - 1) - j if j <= i, or zero otherwise.
Parameters
----------
num_spans_to_keep : ``int``, required.
The number of spans that were kept while pruning.
max_antecedents : ``int``, required.
The maximum number of antecedent spans to consider for every span.
device: ``int``, required.
The CUDA device to use.
Returns
-------
valid_antecedent_indices : ``torch.IntTensor``
The indices of every antecedent to consider with respect to the top k spans.
Has shape ``(num_spans_to_keep, max_antecedents)``.
valid_antecedent_offsets : ``torch.IntTensor``
The distance between the span and each of its antecedents in terms of the number
of considered spans (i.e not the word distance between the spans).
Has shape ``(1, max_antecedents)``.
valid_antecedent_log_mask : ``torch.FloatTensor``
The logged mask representing whether each antecedent span is valid. Required since
different spans have different numbers of valid antecedents. For example, the first
span in the document should have no valid antecedents.
Has shape ``(1, num_spans_to_keep, max_antecedents)``.
"""
# Shape: (num_spans_to_keep, 1)
target_indices = util.get_range_vector(num_spans_to_keep, device).unsqueeze(1)
# Shape: (1, max_antecedents)
valid_antecedent_offsets = (util.get_range_vector(max_antecedents, device) + 1).unsqueeze(0)
# This is a broadcasted subtraction.
# Shape: (num_spans_to_keep, max_antecedents)
raw_antecedent_indices = target_indices - valid_antecedent_offsets
# In our matrix of indices, the upper triangular part will be negative
# because the offsets will be > the target indices. We want to mask these,
# because these are exactly the indices which we don't want to predict, per span.
# We're generating a logspace mask here because we will eventually create a
# distribution over these indices, so we need the 0 elements of the mask to be -inf
# in order to not mess up the normalisation of the distribution.
# Shape: (1, num_spans_to_keep, max_antecedents)
valid_antecedent_log_mask = (raw_antecedent_indices >= 0).float().unsqueeze(0).log()
# Shape: (num_spans_to_keep, max_antecedents)
valid_antecedent_indices = F.relu(raw_antecedent_indices.float()).long()
return valid_antecedent_indices, valid_antecedent_offsets, valid_antecedent_log_mask
def _compute_span_pair_embeddings(self,
top_span_embeddings ,
antecedent_embeddings ,
antecedent_offsets ):
u"""
Computes an embedding representation of pairs of spans for the pairwise scoring function
to consider. This includes both the original span representations, the element-wise
similarity of the span representations, and an embedding representation of the distance
between the two spans.
Parameters
----------
top_span_embeddings : ``torch.FloatTensor``, required.
Embedding representations of the top spans. Has shape
(batch_size, num_spans_to_keep, embedding_size).
antecedent_embeddings : ``torch.FloatTensor``, required.
Embedding representations of the antecedent spans we are considering
for each top span. Has shape
(batch_size, num_spans_to_keep, max_antecedents, embedding_size).
antecedent_offsets : ``torch.IntTensor``, required.
The offsets between each top span and its antecedent spans in terms
of spans we are considering. Has shape (1, max_antecedents).
Returns
-------
span_pair_embeddings : ``torch.FloatTensor``
Embedding representation of the pair of spans to consider. Has shape
(batch_size, num_spans_to_keep, max_antecedents, embedding_size)
"""
# Shape: (batch_size, num_spans_to_keep, max_antecedents, embedding_size)
target_embeddings = top_span_embeddings.unsqueeze(2).expand_as(antecedent_embeddings)
# Shape: (1, max_antecedents, embedding_size)
antecedent_distance_embeddings = self._distance_embedding(
util.bucket_values(antecedent_offsets,
num_total_buckets=self._num_distance_buckets))
# Shape: (1, 1, max_antecedents, embedding_size)
antecedent_distance_embeddings = antecedent_distance_embeddings.unsqueeze(0)
expanded_distance_embeddings_shape = (antecedent_embeddings.size(0),
antecedent_embeddings.size(1),
antecedent_embeddings.size(2),
antecedent_distance_embeddings.size(-1))
# Shape: (batch_size, num_spans_to_keep, max_antecedents, embedding_size)
antecedent_distance_embeddings = antecedent_distance_embeddings.expand(*expanded_distance_embeddings_shape)
# Shape: (batch_size, num_spans_to_keep, max_antecedents, embedding_size)
span_pair_embeddings = torch.cat([target_embeddings,
antecedent_embeddings,
antecedent_embeddings * target_embeddings,
antecedent_distance_embeddings], -1)
return span_pair_embeddings
@staticmethod
def _compute_antecedent_gold_labels(top_span_labels ,
antecedent_labels ):
u"""
Generates a binary indicator for every pair of spans. This label is one if and
only if the pair of spans belong to the same cluster. The labels are augmented
with a dummy antecedent at the zeroth position, which represents the prediction
that a span does not have any antecedent.
Parameters
----------
top_span_labels : ``torch.IntTensor``, required.
The cluster id label for every span. The id is arbitrary,
as we just care about the clustering. Has shape (batch_size, num_spans_to_keep).
antecedent_labels : ``torch.IntTensor``, required.
The cluster id label for every antecedent span. The id is arbitrary,
as we just care about the clustering. Has shape
(batch_size, num_spans_to_keep, max_antecedents).
Returns
-------
pairwise_labels_with_dummy_label : ``torch.FloatTensor``
A binary tensor representing whether a given pair of spans belong to
the same cluster in the gold clustering.
Has shape (batch_size, num_spans_to_keep, max_antecedents + 1).
"""
# Shape: (batch_size, num_spans_to_keep, max_antecedents)
target_labels = top_span_labels.expand_as(antecedent_labels)
same_cluster_indicator = (target_labels == antecedent_labels).float()
non_dummy_indicator = (target_labels >= 0).float()
pairwise_labels = same_cluster_indicator * non_dummy_indicator
# Shape: (batch_size, num_spans_to_keep, 1)
dummy_labels = (1 - pairwise_labels).prod(-1, keepdim=True)
# Shape: (batch_size, num_spans_to_keep, max_antecedents + 1)
pairwise_labels_with_dummy_label = torch.cat([dummy_labels, pairwise_labels], -1)
return pairwise_labels_with_dummy_label
def _compute_coreference_scores(self,
pairwise_embeddings ,
top_span_mention_scores ,
antecedent_mention_scores ,
antecedent_log_mask ) :
u"""
Computes scores for every pair of spans. Additionally, a dummy label is included,
representing the decision that the span is not coreferent with anything. For the dummy
label, the score is always zero. For the true antecedent spans, the score consists of
the pairwise antecedent score and the unary mention scores for the span and its
antecedent. The factoring allows the model to blame many of the absent links on bad
spans, enabling the pruning strategy used in the forward pass.
Parameters
----------
pairwise_embeddings: ``torch.FloatTensor``, required.
Embedding representations of pairs of spans. Has shape
(batch_size, num_spans_to_keep, max_antecedents, encoding_dim)
top_span_mention_scores: ``torch.FloatTensor``, required.
Mention scores for every span. Has shape
(batch_size, num_spans_to_keep, max_antecedents).
antecedent_mention_scores: ``torch.FloatTensor``, required.
Mention scores for every antecedent. Has shape
(batch_size, num_spans_to_keep, max_antecedents).
antecedent_log_mask: ``torch.FloatTensor``, required.
The log of the mask for valid antecedents.
Returns
-------
coreference_scores: ``torch.FloatTensor``
A tensor of shape (batch_size, num_spans_to_keep, max_antecedents + 1),
representing the unormalised score for each (span, antecedent) pair
we considered.
"""
# Shape: (batch_size, num_spans_to_keep, max_antecedents)
antecedent_scores = self._antecedent_scorer(
self._antecedent_feedforward(pairwise_embeddings)).squeeze(-1)
antecedent_scores += top_span_mention_scores + antecedent_mention_scores
antecedent_scores += antecedent_log_mask
# Shape: (batch_size, num_spans_to_keep, 1)
shape = [antecedent_scores.size(0), antecedent_scores.size(1), 1]
dummy_scores = antecedent_scores.new_zeros(*shape)
# Shape: (batch_size, num_spans_to_keep, max_antecedents + 1)
coreference_scores = torch.cat([dummy_scores, antecedent_scores], -1)
return coreference_scores
CoreferenceResolver = Model.register(u"coref")(CoreferenceResolver)
| plasticityai/magnitude | pymagnitude/third_party/allennlp/models/coreference_resolution/coref.py | coref.py | py | 32,507 | python | en | code | 1,607 | github-code | 6 | [
{
"api_name": "itertools.izip",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "logging.getLogger",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "allennlp.models.model.Model",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "allennlp... |
73928144189 | import django_rq
import logging
from datetime import datetime, timedelta
import time
from django.core.management.base import BaseCommand
from django_rq import job
from reviews.models import Review
#@job
def get_count_reviews():
logger = logging.getLogger('counter')
count = Review.objects.count()
time.sleep(4)
if count:
logger.info(f'Всего отзывов в системе: {count}')
else:
logger.error('Something went wrong!')
class Command(BaseCommand):
help = "Выводит на экран и в лог количество записей в таблице Review"
def handle(self, *args, **options):
scheduler = django_rq.get_scheduler('default')
scheduler.schedule(
scheduled_time=datetime.now(),
func=get_count_reviews,
interval=10,
repeat=4,
)
| idesu/review_moderation_lite | reviews/management/commands/log.py | log.py | py | 881 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "reviews.models.Review.objects.count",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "reviews.models.Review.objects",
"line_number": 14,
"usage_type": "attribute"
},
... |
20001859716 | from nltk.stem import WordNetLemmatizer
import re
from nltk.tokenize import word_tokenize, sent_tokenize
def myTokenizer(readFileDir, saveFileDir, stopwords, lim):
readFile = open(readFileDir, "r", encoding="UTF8")
saveFile = open(saveFileDir, "w", encoding="UTF8")
preprocessed = []
lemmatizer = WordNetLemmatizer()
while True:
line = readFile.readline()
if len(line) == 0:
print("File read finished")
readFile.close()
break
sentTokenized = sent_tokenize(line)
for sent in sentTokenized:
sent = re.sub("[^a-zA-Z]", " ", sent)
wordTokenized = word_tokenize(sent)
i = 0
while i < len(wordTokenized):
if len(wordTokenized[i]) <= lim or \
wordTokenized[i] in stopwords:
wordTokenized.remove(wordTokenized[i])
else:
wordTokenized[i] = wordTokenized[i].lower()
wordTokenized[i] = lemmatizer.lemmatize(wordTokenized[i])
saveFile.write(wordTokenized[i])
if i < len(wordTokenized) - 1:
saveFile.write(" ")
i += 1
saveFile.write("\n")
preprocessed.append(wordTokenized)
saveFile.close()
return preprocessed | jkjan/NLP | Word2VecSkip-Gram/myTokenizer.py | myTokenizer.py | py | 1,370 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "nltk.stem.WordNetLemmatizer",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "nltk.tokenize.sent_tokenize",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "nltk.to... |
27082652563 | import time
from datetime import timedelta
import requests
from celery import Celery
from celery.utils.log import get_task_logger
app = Celery('parsing')
app.conf.update(broker_url='amqp://guest:guest@rabbitmq:5672', broker_connection_retry_on_startup=True)
app.conf.beat_schedule = {
'add-every-monday-morning': {
'task': 'parsing',
'schedule': timedelta(seconds=15)
},
}
# crontab(seconds='*/15')
celery_logger = get_task_logger(__name__)
@app.task(name='parsing')
def parse_excel_task():
time.sleep(5)
print('zalupa')
celery_logger.info('parsing')
requests.post('http://api:8000/api/v1/parser/parse-excel')
| puplishe/testproject | fastapi1/celery/celery.py | celery.py | py | 654 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "celery.Celery",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "celery.utils.log.get_task_logger",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "time... |
26495796671 | from django.contrib import admin,messages
from .models import *
from inline_actions.admin import InlineActionsModelAdminMixin
from commun.util import Util
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
media_root = settings.MEDIA_ROOT
admin.site.site_header = "Administration ERP"
admin.site.site_title = "ERP"
admin.site.index_title = "Bienvenu à l'ERP"
@admin.register(Expediteur)
class ExpediteurAdmin(admin.ModelAdmin):
list_display = ("name","structure","employe")
search_fields = ("name",)
@admin.register(TypeCourier)
class TypeCourierAdmin(admin.ModelAdmin):
list_display = ("name",)
@admin.register(Classification)
class ClassificationAdmin(admin.ModelAdmin):
list_display = ("name",)
@admin.register(Status)
class StatusAdmin(admin.ModelAdmin):
list_display = ("name",)
@admin.register(Attachment)
class AttachmentAdmin(admin.ModelAdmin):
list_display = ("name","file",)
##############
class AttachmentTabular(admin.TabularInline):
model = Attachment
extra = 1
@admin.register(Courier)
class CourierAdmin(InlineActionsModelAdminMixin,admin.ModelAdmin):
inlines = [AttachmentTabular]
autocomplete_fields = ("expediteur","destinataires","visible_a")
inline_actions = ('send_by_mail','delete')
def get_queryset(self, request):
qs = super().get_queryset(request)
if request.user.is_superuser:
return qs
return qs.filter(deleted=False)
def get_list_display(self, request):
list=("objet","expediteur","date_expedition","date_arrivee","type","classification","status")
if request.user.is_superuser:
return list+('deleted',)+ (super().get_list_display(request).pop(),)
return list+(super().get_list_display(request).pop(),)
def send_by_mail(self, request, obj:Courier, parent_obj):
dists=obj.visible_a.all()
to=[]
for dist in dists:
to.append(dist.getExpediteur().email)
source=obj.expediteur.getExpediteur().email
attachments=list(obj.attachments.all())
arr=list()
files=list()
for dist in dists:
arr.append(dist.email)
for attch in attachments:
files.append(attch.file.name)
try:
res=Util.send_email(subject=obj.objet, message=obj.objet, source=source, to=to, cc=[], attachments=files)
messages.success(request, _("Email Envoyé"))
except Exception :
messages.error(request, _("Erreur d'envoie"))
send_by_mail.short_description = 'envoyer email'
def delete(self, request, obj:Courier, parent_obj):
obj.deleted=True
obj.save()
delete.short_description = 'supprimer'
| bofilio/erp-backend | couriers/admin.py | admin.py | py | 2,740 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.conf.settings.MEDIA_ROOT",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.site",
"line_number": 9,
"usage_type": "attribute"
},
{
... |
20571972816 | from ply.lex import LexToken
from windyquery.validator import ValidationError
from ._base import Base
TOKEN = 'SCHEMA'
class SchemaToken(LexToken):
def __init__(self, value):
self.type = TOKEN
self.value = value
self.lineno = 0
self.lexpos = 0
class Schema(Base):
def schema(self, s: str):
try:
s = self.validator.validate_schema(s)
except ValidationError as err:
raise UserWarning(f'invalid schema: {err}') from None
self.append(SchemaToken(s))
| bluerelay/windyquery | windyquery/collector/schema.py | schema.py | py | 540 | python | en | code | 68 | github-code | 6 | [
{
"api_name": "ply.lex.LexToken",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "_base.Base",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "windyquery.validator.ValidationError",
"line_number": 21,
"usage_type": "name"
}
] |
15018836415 | import tkinter as tk
import customtkinter as ctk
from customtkinter import ThemeManager
from View.GUI.CustomWidgets.NotebookCloseableTabs import NotebookCloseableTabs
from View.GUI.Windows.ResultWindow.ComputationStatusBoard import ComputationStatusBoard
from View.Observer import Observer
class ComputationNotebook(NotebookCloseableTabs, Observer):
def __init__(self, parent, computation, controller):
fg_color = ThemeManager.theme["color_scale"]["inner"]
super().__init__(parent, color_notebook_bg=fg_color)
self.add_widget(ComputationStatusBoard(self, computation, controller), "Status Board")
self.results = []
self.handler_id = None
self.subscribed_results = []
Observer.__init__(self, computation)
for result in computation.results:
if result.is_finished:
self.results.append(result)
else:
result.subscribe(self)
self.subscribed_results.append(result)
self.handle_tasks()
def update_(self, updated_component):
if updated_component[1] == "finished_result":
result = updated_component[0]
self.results.append(result)
def start_task_handler(self):
"""
Starts the task handler.
"""
self.handler_id = self.after(2000, self.handle_tasks)
def handle_tasks(self):
"""
Handles received results.
"""
for result in list(self.results):
self.add_result(result)
self.results.remove(result)
self.start_task_handler()
def destroy(self):
self.after_cancel(self.handler_id)
self.observed_subject.unsubscribe(self)
self.observed_subject = None
for result in self.subscribed_results:
result.unsubscribe(self)
self.subscribed_results.clear()
super().destroy()
def add_result(self, result):
"""
Adds results to the notebook.
:param result: result object
"""
fg_color = ThemeManager.theme["color_scale"]["inner"]
textbox = ctk.CTkTextbox(self, fg_color=fg_color, bg_color=fg_color, wrap="word")
textbox.insert(tk.END, result.result_text)
self.add_widget(textbox, result.configuration_name)
| Moni5656/npba | View/GUI/Windows/ResultWindow/ComputationNotebook.py | ComputationNotebook.py | py | 2,306 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "View.GUI.CustomWidgets.NotebookCloseableTabs.NotebookCloseableTabs",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "View.Observer.Observer",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "customtkinter.ThemeManager.theme",
"line_number": 1... |
18187153319 | import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
t = np.linspace(0, 2*np.pi, 20)
p = np.linspace(0, np.pi, 10)
theta,phi = np.meshgrid(t,p)
x = np.cos(theta)*np.sin(phi)
y = np.sin(theta)*np.sin(phi)
z = np.cos(phi)
fig = plt.figure(figsize=(10,4))
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122, projection='3d')
ax1.plot(theta.flatten(), phi.flatten(), 'o')
ax1.set_xlabel("$\\theta$")
ax1.set_ylabel("$\\phi$")
ax2.plot_surface(x,y,z, edgecolors='0.2')
plt.show()
| notmatthancock/notmatthancock.github.io | code/py/sphere-sampling/sphere-uniform-theta-phi.py | sphere-uniform-theta-phi.py | py | 524 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.linspace",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "numpy.linspace",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number"... |
28130211082 | ## import that shit babyyy
from PyQt5.QtWidgets import QApplication, QWidget, QVBoxLayout, QLabel, QPushButton,QStackedWidget,QScrollArea, QProgressBar, QHBoxLayout, QLineEdit
from PyQt5.QtCore import QObject, QThread, pyqtSignal,Qt
# from pyqtgraph import PlotWidget, plot
import pyqtgraph as pg
from os.path import exists,join
from os import mkdir, remove
import spotipy
from datetime import datetime
from spotipy.oauth2 import SpotifyOAuth, SpotifyClientCredentials, CacheFileHandler
from shutil import rmtree
# import matplotlib.pyplot as plt, matplotlib.dates as mdates
import csv
## gui
class graphWindow(QWidget):
def __init__(self):
super().__init__()
self.layout = QVBoxLayout()
self.stats = QLabel(data.totalSongs())
self.layout.addWidget(self.stats)
self.graph= pg.PlotWidget()
axis = pg.DateAxisItem()
self.graph.setAxisItems({'bottom':axis})
self.loadGraph()
self.layout.addWidget(self.graph)
self.move(0,0)
self.setLayout(self.layout)
def loadGraph(self):
self.setWindowTitle(username)
self.graph.clear()
# graph.plot((lambda date : [datetime.datetime.strptime(i,'%Y-%m-%d').timestamp() for i in date])(date), numSongs)
date_num= {}
lastDate= ''
for i in songData:
date= songData[i][0]
if date != lastDate:
lastDate= date
dateTime= datetime.strptime(date,'%Y-%m-%d').timestamp()
date_num[dateTime]=0
date_num[dateTime]+=1
y= sorted(date_num)
x= [date_num[i] for i in y]
length= len(date_num)
cumulative_x= [len(songData)]
for i in range(length-1, 0,-1): # working backwards from totals songs subrtacting songs added per day
elem= cumulative_x[0]- x[i]
cumulative_x.insert(0,elem)
perDay= ''
if len(y) > 1:
perDay= ' Songs per day: %s' % round((cumulative_x[-1]- cumulative_x[0])/(datetime.fromtimestamp(y[-1])- datetime.fromtimestamp(y[0])).days, 2)
self.stats.setText(data.totalSongs()+ perDay)
self.graph.plot(y,cumulative_x)
print('Graph Loaded')
class MainWindow(QWidget):
def __init__(self):
super(MainWindow, self).__init__()
QApplication.font()
self.graph= graphWindow() # new instance of graph window so to call the functions(of this specific graph) use self.graph.classfunc() <-- ignoring self
self.resize(150,150)
self.loadedUser= my_id
# create pages(stacks)
self.home = QWidget()
self.changeUser = QWidget()
self.main= QWidget()
self.missingPage = QWidget()
self.duplicatePage = QWidget()
self.followArtists = QWidget()
self.searchPage= QWidget()
self.log= QWidget()
self.addUser= QWidget()
# create stack and add all pages
self.Stack = QStackedWidget (self)
self.Stack.addWidget (self.home)
self.Stack.addWidget (self.changeUser)
self.Stack.addWidget (self.main)
self.Stack.addWidget (self.missingPage)
self.Stack.addWidget (self.duplicatePage)
self.Stack.addWidget (self.followArtists)
self.Stack.addWidget (self.searchPage)
self.Stack.addWidget (self.log)
self.Stack.addWidget (self.addUser)
# developing the pages
self.create_home()
self.create_changeUser()
self.create_main()
self.create_missingPage()
self.create_duplicatePage()
self.create_followArtists()
self.create_searchPage()
self.create_logPage()
self.create_addUserPage()
#placing stack in window (class)
layout= QVBoxLayout()
layout.addWidget(self.Stack)
self.setLayout(layout)
self.setWindowTitle("Home")
self.show()
# Home page
def create_home(self):
layout= QVBoxLayout()
layout.setAlignment(Qt.AlignCenter)
hLayout1= QHBoxLayout()
hLayout2= QHBoxLayout()
hLayout3= QHBoxLayout()
self.currentUserLabel= QLabel("Current User: %s" % username)
self.currentUserLabel.setAlignment(Qt.AlignCenter)
layout.addWidget(self.currentUserLabel)
button1= QPushButton("Change User")
button1.clicked.connect(self.showChangeUser)
layout.addWidget(button1)
button2= QPushButton("Run")
button2.clicked.connect(self.run)
hLayout1.addWidget(button2)
button3= QPushButton("Graph")
button3.clicked.connect(self.showGraph)
hLayout1.addWidget(button3)
layout.addLayout(hLayout1)
button4= QPushButton("Missing")
button4.clicked.connect(self.showMissingPage)
hLayout2.addWidget(button4)
button5= QPushButton("Duplicate")
button5.clicked.connect(self.showDuplicatePage)
hLayout2.addWidget(button5)
layout.addLayout(hLayout2)
button6= QPushButton("Follow artists")
button6.clicked.connect(self.showFollowArtists)
hLayout3.addWidget(button6)
button7= QPushButton("Search")
button7.clicked.connect(self.showSearchPage)
hLayout3.addWidget(button7)
layout.addLayout(hLayout3)
button8= QPushButton('Log')
button8.clicked.connect(self.showLogPage)
layout.addWidget(button8)
self.home.setLayout(layout)
#Change user page
def create_changeUser(self):
layout= QVBoxLayout()
scroll, scrollContent, self.userScrollLayout= self.scrollBox()
scroll.setWidget(scrollContent)
layout.addWidget(scroll)
hLayout= QHBoxLayout()
checkUser= QPushButton('Add')
checkUser.clicked.connect(lambda event : self.showAddUser())
hLayout.addWidget(checkUser)
hLayout.addWidget(self.homeButton())
layout.addLayout(hLayout)
self.changeUser.setLayout(layout)
def updateChangeUser(self):
data.get_id_user()
self.deleteLayoutItems(self.userScrollLayout)
for i in id_user:
button= QPushButton(id_user[i])
button.clicked.connect(lambda event, x=i: data.changeActiveUser(x)) # clicked.connect passes a bool to the lambda func so event takes that who knwos why x=i to save the variable as i doesnt stay??????
button.clicked.connect(lambda event : self.showHome()) # go(0)
button.clicked.connect(lambda event : self.graph.loadGraph())
self.userScrollLayout.addWidget(button)
print('Updated Change User')
# missing page
def create_missingPage(self): # this wont update after run
layout= QVBoxLayout()
hLayout= QHBoxLayout()
self.missingChange= QPushButton()
self.missingChange.clicked.connect(self.showAllMissing)
self.missingScroll, self.missingScrollContent, self.missingScrollLayout= self.scrollBox()
layout.addWidget(self.missingScroll)
self.missingScroll.setWidget(self.missingScrollContent)
hLayout.addWidget(self.missingChange)
hLayout.addWidget(self.homeButton())
layout.addLayout(hLayout)
self.missingPage.setLayout(layout)
def showAllMissing(self):
self.setWindowTitle("Missing - All")
self.changeScrollContent(data.missing(), func= 0, scrollLayout= self.missingScrollLayout, connectionFunction=self.missingUserConf)
self.missingChange.setText('Show Deleted')
self.changeConnection(self.missingChange.clicked, self.showDeleted)
def showDeleted(self):
self.setWindowTitle("Missing - Deleted")
self.changeScrollContent(data.deleted(data.missing()), func= 1, scrollLayout= self.missingScrollLayout, connectionFunction=self.missingUserConf)
self.missingChange.setText('Show Missing')
self.changeConnection(self.missingChange.clicked, self.showMissing)
def showMissing(self):
self.setWindowTitle("Missing - Missing")
self.changeScrollContent(data.remDel(data.missing()), func= 2, scrollLayout= self.missingScrollLayout, connectionFunction=self.missingUserConf)
self.missingChange.setText('Show Unconf')
self.changeConnection(self.missingChange.clicked, self.showUnConfMissing)
def showUnConfMissing(self):
self.setWindowTitle("Missing - Unconfirmed")
self.changeScrollContent(data.remConf(data.missing()), func= 3, scrollLayout= self.missingScrollLayout, connectionFunction=self.missingUserConf)
self.missingChange.setText('Show All')
self.changeConnection(self.missingChange.clicked, self.showAllMissing)
# duplicate page
def create_duplicatePage(self):
layout= QVBoxLayout()
hLayout= QHBoxLayout()
self.duplicateChange= QPushButton()
self.duplicateChange.clicked.connect(self.showAllDuplicate)
self.duplicateScroll, self.duplicateScrollContent, self.duplicateScrollLayout= self.scrollBox()
layout.addWidget(self.duplicateScroll)
self.duplicateScroll.setWidget(self.duplicateScrollContent)
hLayout.addWidget(self.duplicateChange)
hLayout.addWidget(self.homeButton())
layout.addLayout(hLayout)
self.duplicatePage.setLayout(layout)
def showAllDuplicate(self):
self.setWindowTitle("Duplicates - All")
self.changeScrollContent(data.duplicates(), func= 0, scrollLayout= self.duplicateScrollLayout, connectionFunction= self.duplicateUserConf)
self.duplicateChange.setText('Show Allowed')
self.changeConnection(self.duplicateChange.clicked, self.showAllowedDuplicate)
def showIllegalDuplicate(self):
self.setWindowTitle("Duplicates - Illegal")
self.changeScrollContent(data.remAllowedDuplicates(data.duplicates()), func= 1, scrollLayout= self.duplicateScrollLayout, connectionFunction= self.duplicateUserConf)
self.duplicateChange.setText('Show All')
self.changeConnection(self.duplicateChange.clicked, self.showAllDuplicate)
def showAllowedDuplicate(self):
self.setWindowTitle("Duplicates - Allowed")
self.changeScrollContent(list(allowedDup.keys()), func= 2, scrollLayout= self.duplicateScrollLayout, connectionFunction= self.duplicateUserConf)
self.duplicateChange.setText('Show illegal')
self.changeConnection(self.duplicateChange.clicked, self.showIllegalDuplicate)
# main(run) page
def create_main(self):
layout= QVBoxLayout()
self.mainLabel= QLabel("change with window.mainLabel.setText(str)")
layout.addWidget(self.mainLabel)
self.progress = QProgressBar()
layout.addWidget(self.progress)
self.main.setLayout(layout)
# follow artists page
def create_followArtists(self):
layout= QVBoxLayout()
scroll, scrollContent, self.followScrollLayout= self.scrollBox()
scroll.setWidget(scrollContent)
layout.addWidget(scroll)
self.followLabel= QLabel()
layout.addWidget(self.followLabel)
self.followProgress= QProgressBar()
self.followProgress.setAlignment(Qt.AlignCenter)
layout.addWidget(self.followProgress)
layout.addWidget(self.homeButton())
self.followArtists.setLayout(layout)
def updateFollowArtists(self):
self.deleteLayoutItems(self.followScrollLayout)
for playlistId in ids_playlists:
button= QPushButton(ids_playlists[playlistId])
button.clicked.connect(lambda event , playlistId= playlistId: self.create_followWorker(playlistId))
self.followScrollLayout.addWidget(button)
print('Updated follow playlists')
def create_followWorker(self, playlistId): # creates worker to follow artists which updates follow artists page
self.followWorker = Worker(caller= 'follow', playlistId= playlistId)
self.followThread = QThread()
self.followWorker.moveToThread(self.followThread)
self.followThread.started.connect(self.followWorker.run)
self.followWorker.finished.connect(self.followThread.quit)
self.followWorker.progress.connect(self.update_followProgress)
self.followWorker.mainLab.connect(self.update_followLabel)
self.followWorker.finished.connect(self.followWorker.deleteLater)
self.followThread.finished.connect(self.followThread.deleteLater)
self.followThread.start()
# search page
def create_searchPage(self):
layout= QVBoxLayout()
self.searchThread= QThread()
scroll, scrollContent, self.searchScrollLayout= self.scrollBox()
self.searchBar= QLineEdit()
self.searchBar.textChanged.connect(lambda event : self.search())
layout.addWidget(self.searchBar)
# search bar enter connect or button
layout.addWidget(scroll)
scroll.setWidget(scrollContent)
layout.addWidget(self.homeButton())
self.searchPage.setLayout(layout)
def search(self):
# stop previous search if ongoing(close thread opended in show search)
self.searchThread.quit()
toSearch= self.searchBar.text()
self.searchWorker= Worker(caller= 'search')
self.searchWorker.moveToThread(self.searchThread)
self.searchThread.started.connect(self.searchWorker.run)
self.searchWorker.finished.connect(self.searchThread.quit)
self.searchWorker.finished.connect(self.searchWorker.deleteLater)
self.searchWorker.searchResults.connect(self.addResults)
if toSearch != '':
self.searchThread.start()
else:
self.setWindowTitle('Search')
self.deleteLayoutItems(self.searchScrollLayout)
def clearSearch(self):
print('Cleared search')
self.searchBar.setText('')
# self.deleteLayoutItems(self.searchScrollLayout)
def addResults(self,trackIds):
resultLayout= QVBoxLayout()
resultLayout.setAlignment(Qt.AlignTop)
self.setWindowTitle('Search - %s' % len(trackIds))
for trackId in trackIds[:100]: # lagg if too many
hLayout= QHBoxLayout()
self.addSong(trackId,hLayout)
resultLayout.addLayout(hLayout)
self.deleteLayoutItems(self.searchScrollLayout) # using another layout and moving delete layout here removes flicker
self.searchScrollLayout.addLayout(resultLayout)
# log page
def create_logPage(self):
layout= QVBoxLayout()
scroll, scrollContent, self.logScrollLayout= self.scrollBox()
layout.addWidget(scroll)
scroll.setWidget(scrollContent)
hLayout= QHBoxLayout()
clear= QPushButton('Clear')
clear.clicked.connect(lambda event : self.clearLog())
hLayout.addWidget(clear)
hLayout.addWidget(self.homeButton())
layout.addLayout(hLayout)
self.log.setLayout(layout)
def updateLog(self): #refreshes scroll area with string from log file
label= QLabel(data.get_log())
self.deleteLayoutItems(self.logScrollLayout)
self.logScrollLayout.addWidget(label)
def clearLog(self): # clears log then refreshes log scroll area
data.clear_log()
self.updateLog()
# create user page
def create_addUserPage(self):
layout= QVBoxLayout()
self.createThread= QThread()
self.addUserLayout= QVBoxLayout()
layout.addLayout(self.addUserLayout)
hLayout= QHBoxLayout()
self.createButton= QPushButton('Next')
self.createButton.clicked.connect(lambda event, string= 'Id has not been input' : self.updateWarning())
hLayout.addWidget(self.createButton)
self.addUserBack= QPushButton('Back')
self.addUserBack.clicked.connect(lambda event : self.showChangeUser())
hLayout.addWidget(self.addUserBack)
layout.addLayout(hLayout)
self.addUser.setLayout(layout)
def create_addUserLayout(self):
label= QLabel()
label.setText('Spotify Account Url:')
self.addUserLayout.addWidget(label)
self.Url= QLineEdit()
self.Url.textChanged.connect(lambda event : self.checkUser())
self.addUserLayout.addWidget(self.Url)
label1= QLabel()
label1.setText('Username:')
self.addUserLayout.addWidget(label1)
self.Username= QLabel()
self.addUserLayout.addWidget(self.Username)
self.warning= QLabel()
self.warning.setStyleSheet('color: red')
self.addUserLayout.addWidget(self.warning)
def checkUser(self): # creates worker to check if if is viable need to change this so if no last user it works lol
self.Url.text()
# seems like workers arent being deleted
self.create= Worker(caller= 'check')
self.create.moveToThread(self.createThread)
self.createThread.started.connect(self.create.run)
self.create.finished.connect(self.createThread.quit)
self.create.finished.connect(self.create.deleteLater)
self.create.warning.connect(self.updateWarning)
self.create.searchResults.connect(self.updateUsername) # username has been found
# self.create.progress.connect(self.changeCreateConnection) # when progress is changed(auth conf) mainlab then changes username
self.createThread.start()
def updateWarning(self,string): # changes the warning label on the change user page if warning emitted means bad username
self.warning.setText(string)
self.Username.setText('Your Username will appear here')
self.changeConnection(self.createButton.clicked, lambda event : self.checkUser())
def updateUsername(self,newUserInfo): # updates username variable; when this func is called it means username is found so it changes state of button to allow progress
self.newUsername= newUserInfo[1]
self.warning.setText('')
self.newId= newUserInfo[0]
self.Username.setText(self.newUsername)
self.changeConnection(self.createButton.clicked, lambda event : self.getVerification()) # button changes to allow progressaw
def getVerification(self): # uses self.newId as user can still change the text box
self.setAnweredState()
self.deleteLayoutItems(self.addUserLayout)
label= QLabel()
print('align these pleaseeeeee')
label.setText('Redirect Url:')
self.addUserLayout.addWidget(label)
self.redirect= QLineEdit()
self.addUserLayout.addWidget(self.redirect)
self.getAuthor= QThread()
self.getFirstSp= checkAuth()
self.getFirstSp.moveToThread(self.getAuthor)
self.getAuthor.started.connect(self.getFirstSp.run)
self.getFirstSp.finished.connect(self.getAuthor.quit)
self.getFirstSp.finished.connect(self.getAuthor.deleteLater)
self.getFirstSp.finished.connect(self.getFirstSp.deleteLater)
self.getFirstSp.sp.connect(lambda sp : self.confAuth(sp)) # sp is given if None it has failed so need to retry
self.getAuthor.start()
self.changeConnection(self.createButton.clicked, lambda event, state= True : self.setAnweredState(state)) # button changes to allow progress
# if auth worked
# self.addConfUser()
def setAnweredState(self, state= False):
self.answered= state
def confAuth(self, sp): # if auth worked/ didnt
if sp == None: self.updateAddUser() # go back
else: # set upd saved ids playlists
self.deleteLayoutItems(self.addUserLayout)
scroll, scrollContent, scrollLayout= self.scrollBox()
scroll.setWidget(scrollContent)
self.addUserLayout.addWidget(scroll)
self.playlistsToAdd= []
for playlistInfo in spotify.find_userPlaylists(sp, self.newId): #returns [ [id,name] ,..]
background= QWidget()
hLayout= QHBoxLayout()
print('if buttons align wrong change here')
hLayout.setAlignment(Qt.AlignLeft)
button1= QPushButton('Y')
button1.clicked.connect(lambda event, state= True, playlistInfo= playlistInfo, background= background : self.setPlaylistState(state, playlistInfo, background))
hLayout.addWidget(button1)
button2= QPushButton('N')
button2.clicked.connect(lambda event, state= False, playlistInfo= playlistInfo, background= background : self.setPlaylistState(state, playlistInfo, background))
hLayout.addWidget(button2)
label= QLabel()
label.setText(playlistInfo[1])
hLayout.addWidget(label)
background.setLayout(hLayout)
scrollLayout.addWidget(background)
self.changeConnection(self.createButton.clicked, self.addConfUser) # creates user saved playlist ids then goes home if only user sets user to made one
def setPlaylistState(self, state, playlistInfo, background):
if state:
if playlistInfo not in self.playlistsToAdd:
self.playlistsToAdd.append(playlistInfo)
background.setStyleSheet('color: green')
else:
if playlistInfo in self.playlistsToAdd:
self.playlistsToAdd.remove(playlistInfo)
background.setStyleSheet('color: red')
def addConfUser(self): # if create on add user pasge is pressed a user with gathered id and user name is created
self.create= Worker(caller= 'create')
self.create.moveToThread(self.createThread)
self.createThread.started.connect(self.create.run)
self.create.finished.connect(self.createThread.quit)
self.create.finished.connect(self.create.deleteLater)
self.create.finished.connect(self.createThread.deleteLater)
self.create.finished.connect(self.showHome)
self.createThread.start()
def updateAddUser(self): # resets add user page to before user id has been checked or just sets it up
self.deleteLayoutItems(self.addUserLayout)
self.create_addUserLayout()
# self.Url.setText('')
# useful code
def homeButton(self): # creates home button widget
button1= QPushButton("Home")
button1.clicked.connect(self.showHome)
return button1
def changeConnection(self, signal, newConnection): # changes connection of signal event eg button.clicked
signal.disconnect()
signal.connect(newConnection)
def scrollBox(self): # creates scroll widget
scroll= QScrollArea()
scroll.setWidgetResizable(True)
scrollContent = QWidget(scroll)
scrollLayout = QVBoxLayout(scrollContent)
scrollLayout.setAlignment(Qt.AlignTop)
return scroll, scrollContent, scrollLayout
def addSong(self, trackId, layout): # adds hlayout (song name , artist, playlists) to layout
song= songData[trackId]
songName= QLabel(song[1])
songName.setFixedWidth(70)
layout.addWidget(songName)
songArtists= QLabel(', '.join(song[2]))
songArtists.setFixedWidth(70)
layout.addWidget(songArtists)
songPlaylists= QLabel(', '.join([ids_playlists[playlist[0]] for playlist in song[3]]))
layout.addWidget(songPlaylists)
def changeScrollContent(self, trackIds, func, scrollLayout, connectionFunction): # refreshes provided scrollLayout and adds all songs in provided list must give function(object) with 2 states(bool) for yes/no buttons
self.deleteLayoutItems(scrollLayout)
for trackId in trackIds:
hScrollLayout= QHBoxLayout()
hButtonsLayout= QHBoxLayout()
hButtonsLayout.setSpacing(0)
hButtonsLayout.setContentsMargins(0,0,0,0) # trying to get the buttons closer together
button1= QPushButton('Y')
button2= QPushButton('N')
button1.clicked.connect(lambda event, Id= trackId, state= True, func= func, layout= hScrollLayout : connectionFunction(Id,state,func,layout))
button2.clicked.connect(lambda event, Id= trackId, state= False, func= func, layout= hScrollLayout : connectionFunction(Id,state,func,layout))
button1.setFixedWidth(30)
button1.setContentsMargins(0,0,0,0)
hButtonsLayout.addWidget(button1)
button2.setFixedWidth(30)
button2.setContentsMargins(0,0,0,0)
hButtonsLayout.addWidget(button2)
hScrollLayout.addLayout(hButtonsLayout)
self.addSong(trackId,hScrollLayout)
scrollLayout.addLayout(hScrollLayout)
def deleteLayoutItems(self, layout): # deletes items in layout but it might only forget them lol
if layout is not None:
while layout.count():
item = layout.takeAt(0)
widget = item.widget()
if widget is not None:
widget.setParent(None)
else:
self.deleteLayoutItems(item.layout())
def missingUserConf(self, trackId, state, func, layout): # on button press it hides song from missing scroll(if needed) and changes deleted state
hide= False
if func != 0:
if func == 1 and not state: hide= True
elif func == 2 and state: hide= True
elif func == 3: hide= True
if hide:
self.deleteLayoutItems(layout) ## remove from view if not showing all what about showing
layout.deleteLater()
data.setDeletedState(trackId,state)
def duplicateUserConf(self,trackId,state,func,layout): # on button press it hides song from duplicate scroll(if needed) and adds/removes from allowed duplicates
hide= False
if func != 0:
if func == 1 and state: hide= True
elif func == 2 and not state: hide= True
if hide: ## this could be turned into a func
self.deleteLayoutItems(layout) ## remove from view if not showing all what about showing
layout.deleteLater()
if state: ## add to allowed duplicates
data.add_allowedDup(trackId, [playlistData[0] for playlistData in songData[trackId][3]])
else: ## remove from allowed duplicates
data.rem_fromAllowedDup(trackId)
## button commands
def showGraph(self):
if self.graph.isVisible(): self.graph.hide()
else: self.graph.show()
def waitHome(self):
from time import sleep
sleep(1)
self.showHome()
def showHome(self): # the go funcs could be changed into func with passed variable for index and list of names with same index
self.currentUserLabel.setText("Current User: %s" % username)
self.setWindowTitle("Home")
self.Stack.setCurrentIndex(0)
self.resize(150, 150)
def showChangeUser(self):
self.updateChangeUser()
self.setWindowTitle("Change User")
self.Stack.setCurrentIndex(1)
def update_mainLabel(self,elem): # changes label on main page
self.mainLabel.setText(elem)
def run(self):
self.setWindowTitle("Sponitor")
self.Stack.setCurrentIndex(2)
self.update_mainLabel('Starting')
self.update_progress(0)
self.thread = QThread()
self.worker = Worker(caller= 'main')
self.worker.moveToThread(self.thread)
self.thread.started.connect(self.worker.run)
self.worker.progress.connect(self.update_progress)
self.worker.mainLab.connect(self.update_mainLabel)
self.worker.finished.connect(self.thread.quit)
self.worker.finished.connect(self.worker.deleteLater)
self.thread.finished.connect(self.thread.deleteLater)
self.thread.finished.connect(self.waitHome)
self.thread.finished.connect(lambda event=None : self.graph.loadGraph())
self.thread.finished.connect(lambda event=None : self.updateMD())
self.thread.start()
def showMissingPage(self):
self.updateMD()
self.showUnConfMissing()
self.Stack.setCurrentIndex(3)
self.resize(430,300)
def showDuplicatePage(self):
self.updateMD()
self.showIllegalDuplicate()
self.Stack.setCurrentIndex(4)
self.resize(430,300)
def showFollowArtists(self):
self.updateFollowArtists()
self.setWindowTitle("Follow Artists")
self.Stack.setCurrentIndex(5)
def showSearchPage(self):
self.clearSearch()
self.setWindowTitle("Search")
self.Stack.setCurrentIndex(6)
def showLogPage(self):
self.updateLog()
self.setWindowTitle("Log")
self.Stack.setCurrentIndex(7)
def showAddUser(self):
self.updateAddUser()
self.setWindowTitle("Create User")
self.Stack.setCurrentIndex(8)
def update_progress(self, progress): # updates progress bar on main page
self.progress.setValue(progress)
def updateMD(self): # refreshes missing and duplicate scrollareas
self.showUnConfMissing()
self.showIllegalDuplicate()
print('Updated Missing, Duplicates')
def update_followLabel(self, text): ## could shorten this and update prgo with a lambda func that ypu give the self var to
self.followLabel.setText(text) # changes label on follow artists page
def update_followProgress(self, pos): # changes progress bar on folllow artists page
self.followProgress.setValue(pos)
class checkAuth(QObject):
finished = pyqtSignal()
sp = pyqtSignal(object)
def __init__(self):
super(checkAuth, self).__init__()
def run(self):
print('check Auth')
sp= spotify.getSp(window.newId,window)
if sp == False:
self.sp.emit(None)
else:
self.sp.emit(sp)
self.finished.emit()
class Worker(QObject):
finished = pyqtSignal()
progress = pyqtSignal(int)
mainLab= pyqtSignal(str)
warning= pyqtSignal(str)
searchResults= pyqtSignal(list)
def __init__(self, caller= '', playlistId= ''):
super(Worker, self).__init__()
self.caller= caller
self.playlistId= playlistId
def run(self):
# Here we pass the update_progress (uncalled!)
# function to the long_running_function:
if self.caller == 'main':
spotify.updateSongs(self.update_label, self.update_progress)
elif self.caller == 'follow':
spotify.followArtistsInPlaylists(self.update_label, self.update_progress, self.playlistId)
elif self.caller == 'search':
self.searchResults.emit(spotify.search(window.searchBar.text()))
elif self.caller == 'check':
data.check_new_user(window.Url.text(), self.update_warning, self.update_label,self.update_results)
elif self.caller == 'create':
data.create_new_user(window.newId, window.newUsername, window.playlistsToAdd)
self.finished.emit()
def update_results(self,results):
self.searchResults.emit(results)
def update_warning(self, string):
self.warning.emit(string)
def update_progress(self, percent):
self.progress.emit(percent)
def update_label(self, string):
self.mainLab.emit(string)
## spotify monitor
class data():
# def create_saved_ids_playlists(saved_ids_playlists):# creates/ updates saved ids_playlists(playlists that get saved)
# with open(join(my_id, 'saved_ids_playlists.txt'),'w+',encoding='utf-8') as file: # replace with if loc not exists create_file
# first= True
# for i in list(saved_ids_playlists.keys()):
# to_write= i+'##'+ saved_ids_playlists[i]
# if not first:
# to_write= '\n'+to_write
# file.write(to_write)
# first=False
def checkFile(loc):
if not exists(loc):
data.createFile(loc)
def create_saved_ids_playlists(Id,playlistInfo):
toAdd= []
for playlist in playlistInfo:
toAdd.append("##".join(playlist))
toAdd= '\n'.join(toAdd)
loc= join(Id, 'saved_ids_playlists.txt')
with open(loc, 'w+', encoding= 'UTF-8') as file:
file.write(toAdd)
def get_saved_ids_playlists(): # returns dict of id:playlists that need to be saved
global ids_playlists
ids_playlists={}
loc= join(my_id, 'saved_ids_playlists.txt')
if not exists(loc):
data.add_log(loc+' does not exist for '+ username)
with open(loc,'r',encoding='utf-8') as file:
for i in file.readlines():
i= i.replace('\n','')
i= i.split('##')
ids_playlists[i[0]]= i[1]
if len(ids_playlists) == 0:
print('create_ids_playlists(code meee)')
def createFile(file_loc, string= ''):
with open(file_loc,'w+',encoding='utf-8') as last:
if string != '':
last.write(string)
print("Created %s." % file_loc)
def get_id_user():# returns dict{id:user}(str)
global id_user
id_user={}
idUser_loc= 'id_user.txt'
data.checkFile(idUser_loc)
with open(idUser_loc,'r',encoding='utf-8') as ids:
for line in ids.readlines():
temp= line.split('##')
id_user[temp[0]]= temp[1].replace('\n','')
def get_log():
loc= join(my_id, username+ '_log.txt')
data.checkFile(loc)
with open(loc, 'r', encoding= 'UTF-8') as file:
log= file.read()
if log == '':
log= 'No log entries'
return log
def add_log(string):
loc= join(my_id, username+ '_log.txt')
data.checkFile(loc)
with open(loc, 'a', encoding= 'UTF-8') as file:
file.write('\n'+ string)
def clear_log():
loc= join(my_id, username+ '_log.txt')
with open(loc, 'w+', encoding= 'UTF-8') as file:
print('Cleared log')
def check_new_user(Id, update_warning, update_label, update_results): # adds id and username to file returns user id
if 'user/' in Id: Id= Id.split('user/')[1][:25]
tempUsername= spotify.verifyUsername(Id)
if tempUsername == False:
spotify.update_ui(text= 'Cannot fetch username', update_label= update_warning)
return
else:
spotify.update_ui(text= tempUsername, update_label= update_label)
update_results([Id,tempUsername])
def create_new_user(Id,temp_username, playlistInfo):
data.get_id_user()
length= len(id_user)
mkdir(Id)
with open('id_user.txt','a+',encoding='utf-8') as ids:
to_write= Id+ '##'+ temp_username
if length > 0: to_write= '\n'+ to_write
ids.write(to_write)
data.get_id_user()
data.create_saved_ids_playlists(Id,playlistInfo)
data.add_log('Created user %s - %s' % (temp_username, Id))
## update with gui
def remove_user(): # removes user from id_user and deletes their files
print('Remove user')
user_id= data.select_user()
if user_id == my_id:
print('this would result in no current user')
#id last user user to be removed then change it (select new user)
# what if removing all users? return to home (only oprion is create new usedr
# homepage()
username_to_delete= data.user(user_id)
password= input('Input password to confirm deletion of %s\n' % username_to_delete)
if password == 'delete':
if exists(username_to_delete):
rmtree(username_to_delete) # cant remove folders with nhabitants
else:
print("Folder already deleted?")
with open('id_user.txt','r',encoding='utf-8') as file:
temp= file.read()
temp= temp.replace(my_id+'##'+username_to_delete+'\n','') # either or
temp= temp.replace('\n'+my_id+'##'+username_to_delete,'')
remove('id_user.txt')
with open('id_user.txt','w+',encoding='utf-8') as file:
file.write(temp)
# remove from id_user
else:print('Incorrect password')
## update with gui
def select_user(): # returns selected id but does not change last user
data.get_id_user()
for i,item in enumerate(list(id_user.keys())):
print(str(i+1)+') '+ id_user[item] )
while True:
temp= input('Select user(num): ')
try:
temp= int(temp)
break
except:print('Invalid input')
selected_id= list(id_user.keys())[temp-1]
print('User selected:', id_user[selected_id])
return selected_id
def update_last_user_id(my_id): # updates user id in file
with open('last_id.txt','w+',encoding='utf-8') as last:
last.write(my_id)
def get_last_user_id():# returns last user to load in with
last_idLoc= 'last_id.txt'
data.checkFile(last_idLoc)
with open(last_idLoc,'r',encoding='utf-8') as last:
return last.read()
def changeActiveUser(Id):
print(Id)
global ids_playlists,my_id,username
my_id= Id
data.update_last_user_id(my_id)
username= id_user[my_id]
data.get_saved_ids_playlists()
data.load_songData()
print('Active user changed to', username)
def save_songData():
columns= ['Track Id','Date First Added','Name','Artists','Current Playlists/Date Addded','Missing','Deleted']
with open(join(my_id,username+'_songData.csv'), 'w', newline='', encoding= 'UTF-8') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=columns)
writer.writeheader()
for trackId in songData:
song= songData[trackId]
artists=seperator.join(song[2])
playlists_dates=[]
for playlist_date in song[3]:
playlists_dates.append(seperator.join(playlist_date))
playlists_dates= seperator2.join(playlists_dates)
row= dict(zip(columns, [trackId,song[0],song[1], artists, playlists_dates,song[4],song[5]]))
writer.writerow(row)
print('Saved songData')
def load_songData():
global songData
songData= {}
loc= join(my_id,username+'_songData.csv')
data.checkFile(loc)
with open(loc, 'r', newline='', encoding= 'UTF-8') as csvfile:
morp= csv.reader(csvfile)
for pos,row in enumerate(morp):
if pos != 0:
artists= row[3].split(seperator)
playlists_dates= []
for elem in row[4].split(seperator2):
playlists_dates.append(elem.split(seperator))
songData[row[0]]= [row[1], row[2], artists, playlists_dates, row[-2], row[-1]]
print('Loaded songData')
#new
def r_nestedElem(pos,nestedList): # returns indexed val of nested list
temp=[]
for eggList in nestedList:
temp.append(eggList[pos])
return temp
def get_allowedDup():
global allowedDup
path= join(my_id,username+'_allowedDuplicates.txt')
data.checkFile(path)
with open(path, 'r', encoding= 'UTF-8') as file:
temp= file.readlines()
allowedDup= {}
if temp != ['']:
for i in temp:
i= i.replace('\n','')
i= i.split(seperator)
allowedDup[i[0]]= i[1:]
print('Loaded allowed duplicates')
def save_allowedDup():
path= join(my_id,username+'_allowedDuplicates.txt')
temp= '\n'.join([i+ seperator+ seperator.join(allowedDup[i]) for i in allowedDup])
with open(path, 'w+', encoding= 'UTF-8') as file:
file.write(temp)
def add_allowedDup(trackId, playlists):
if type(playlists) != list: playlists= [playlists]
if trackId in allowedDup:
allowedDup[trackId].extend(playlists) ## adds new playlists to end of allowed playlist list
else:
allowedDup[trackId]= playlists
data.save_allowedDup()
# return allowedDup
def rem_fromAllowedDup(trackId): ## removes track from allowed duplicates file
allowedDup.pop(trackId)
data.save_allowedDup()
# return allowedDup
def remAllowedDuplicates(trackIds= {}): #removes duplicates that are not allowed returns list of ids
for trackId in allowedDup:
if trackId in trackIds: # if it is an allowed duplicate
allowedPlaylistIds= allowedDup[trackId]
rem= True
for playlistId in trackIds[trackId]:
if playlistId not in allowedPlaylistIds:
rem= False
if rem: # if allowed to be duplicate remove
del trackIds[trackId]
else:# it has been added to another playlist so user has to re authenticate it as allowed
data.rem_fromAllowedDup(trackId)
return list(trackIds.keys())
def duplicates():# returns all duplicates except songs that have been user deleted
duplicates= {}
for trackId in songData:
song= songData[trackId]
if len(song[3]) > 1 and not song[5] == 'true': ## if duplicate(in multiple playlists) and not deleted
duplicates[trackId]= [playlistData[0] for playlistData in song[3]]
return duplicates
# also ignore missing and deleted duplicates?
for trackId in songData:
song= songData[trackId]
if len(song[3]) > 1 and not song[4] == 'true' or song[5] == 'true':# if song duplicated and not missing or deleted then count it
if trackId in allowedDup:
allowed+= len(allowedDup[trackId])-1 #allowed to be duplicated - 'original'
for i in song[3]:
if i[0] not in allowedDup[trackId]:
if input('Allowed duplicate %s is also in %s allow?:' % (song[1], ids_playlists[i[0]])) == 'y':
allowedDup= data.add_allowedDup(trackId,i[0], allowedDup)
allowed+=1
else:
print(song[1],','.join(song[2]),','.join([ids_playlists[playlist[0]] for playlist in song[3]]))
if input('add to allowed duplicate list? ') == 'y':
playlists= [playlist[0] for playlist in song[3]]
allowedDup= data.add_allowedDup(trackId,playlists, allowedDup)
allowed+= len(song[3])
total+=len(song[3])-1
return total,allowed
# 'track_id':['date first added', 'name', ['artist'], (current playlist/s)[['current playlist','date added'], ...], 'missing', 'deleted]
#new
#add gui
def setDeletedState(trackId, state):
if state:
changeState= 'true' # delete tag updated
else:
changeState= 'false' # delete tag updated
song= songData[trackId]
if changeState != song[5]: #only updates songData if state changed
song[5]= changeState
data.add_log('%s delteted state set to %s' % (song[1], changeState))
songData[trackId]= song
data.save_songData()
def missing():# returns list of missing trackIds
missingList= []
for trackId in songData:
if songData[trackId][4] == 'true': # if missing
missingList.append(trackId)
return missingList
def remConf(trackIds):
toRem=[]
for trackId in trackIds:
if songData[trackId][5] in ['true','false']: # if it has been user confirmed remove it
toRem.append(trackId)
for trackId in toRem:
trackIds.remove(trackId)
return trackIds
def remDel(trackIds):
toRem=[]
for trackId in trackIds:
if songData[trackId][5] =='true': # if it has been user confirmed remove it
toRem.append(trackId)
for trackId in toRem:
trackIds.remove(trackId)
return trackIds
def deleted(missingList): # returns deleted(user confirmed) songs from list of misssing trackId
deletedList= []
for trackId in missingList:
if songData[trackId][5] == 'true':
deletedList.append(trackId)
return deletedList
def totalSongs():
missingList= data.missing()
deletedList= data.deleted(missingList)
delSongs= len(deletedList)
missSongs= len(missingList)- delSongs
duplicates= data.duplicates() #dictionary
dupSongs= len(duplicates)
total= len(songData)-delSongs
return 'Total songs: %s Duplicate songs: %s Missing songs: %s' % (total,dupSongs, missSongs)
class spotify():
# new and not sure if working
def getAuth(Id,window=None):
print('getting Auth', window)
as_dict= True
cid, secret= spotify.get_keys()
scope = ['user-library-read', 'playlist-read-private', 'playlist-read-collaborative', 'user-follow-read', 'user-follow-modify']
# sp= getAuth(cid, secret, scope, Id).sp
handler= CacheFileHandler(username= Id)
auth= SpotifyOAuth(scope=scope,client_id=cid, client_secret=secret,redirect_uri= 'https://i.dailymail.co.uk/i/pix/2012/06/04/article-2154283-136ED7F3000005DC-412_634x412.jpg', cache_handler=handler, show_dialog=True)#, username= my_id
def getCode(window):
print('get code', window)
auth._open_auth_url()
if window == None: # if no window open
redirect= input('Redirect Url: ')
else:
while window.answered == False:
pass
redirect= window.redirect.text()
state, code= auth.parse_auth_response_url(redirect)
return code
token_info = auth.validate_token(auth.cache_handler.get_cached_token())
if token_info is not None:
if auth.is_token_expired(token_info):
token_info = auth.refresh_access_token(
token_info["refresh_token"]
)
auth._save_token_info(token_info if as_dict else token_info["access_token"])
return auth
payload = {
"redirect_uri": auth.redirect_uri,
"code": getCode(window),
"grant_type": "authorization_code",
}
if auth.scope:
payload["scope"] = auth.scope
if auth.state:
payload["state"] = auth.state
headers = auth._make_authorization_headers()
response = auth._session.post( # token info needed
auth.OAUTH_TOKEN_URL,
data=payload,
headers=headers,
verify=True,
proxies=auth.proxies,
timeout=auth.requests_timeout,
)
token_info = response.json()
token_info = auth._add_custom_values_to_token_info(token_info)
auth.cache_handler.save_token_to_cache(token_info)
auth._save_token_info(token_info if as_dict else token_info["access_token"])
return auth
#new
def getSp(Id, window= None):
print('getting Sp')
try:
# auth= SpotifyOAuth(scope=scope,client_id=cid, client_secret=secret,redirect_uri= 'https://i.dailymail.co.uk/i/pix/2012/06/04/article-2154283-136ED7F3000005DC-412_634x412.jpg', username= Id, show_dialog=True)#, username= my_id
# sp = spotipy.Spotify(client_credentials_manager=auth)
sp = spotipy.Spotify(client_credentials_manager=spotify.getAuth(Id, window))
test= sp.current_user_playlists(limit=1)
print('got authentication')
except:
data.add_log('Authentication failed for %s' % username)
return False
return sp
def verifyUsername(Id):
cid, secret= spotify.get_keys()
auth= SpotifyClientCredentials(client_id= cid, client_secret= secret)
tempSp = spotipy.Spotify(client_credentials_manager= auth)
try:
newUsername= tempSp.user(Id)['display_name']
return newUsername
except:
return False
def find_userPlaylists(sp,Id): # generates all user playlists user to create ids_playlists
playlistInfo= [[playlist['owner']['id'],playlist['uri'], playlist['name']] for playlist in sp.current_user_playlists(limit=50)['items']]
toReturn= []
for playlist in playlistInfo:
if playlist[0]== Id: # if id owner is the playlist owner
toReturn.append(playlist[1:])
return toReturn
def update_ui(text= None, percent= None, update_label= None, update_progress= None):
if text != None:
print('text:',text)
if update_label != None:
update_label(string= text)
if percent != None and update_progress != None:
update_progress(percent= percent)
#new
#add gui
## update with gui ( parse self then call gui.setMainLabel(self,string)
def updateSongs(update_label= None, update_progress= None): # does not get active user songs only jamies because of spotipy things
global songData
state= 'Auto' if __name__ == 'Main' else 'Manual'
data.add_log('\n%s: (%s) Updating songs for %s:' % (state , datetime.now().strftime("%d-%m-%Y %H:%M:%S"), username) )
sp= spotify.getSp(my_id)
playlistIds= [playlist['uri'] for playlist in sp.current_user_playlists(limit=50)['items']] # if you have more than 50 playlists fuck you
# songData= [['spotify:track:2dje3ZBu1j1r0QfR7mtS0l', 'spotify:playlist:1JTU5zqtgA1zzqb90papUO', '2021-08-16'], ['spotify:track:5H3swhQ72PiGd5PYz4P61P', 'spotify:playlist:1JTU5zqtgA1zzqb90papUO', '2021-08-16']]
loadedSongs=[]# [[id, [ [playlist,date added] ]],...next]
playlistsForDown= list(ids_playlists.keys())
num=0
for playlist_id in playlistIds:
if playlist_id in playlistsForDown:
spotify.update_ui(text= 'Loading %s...' % ids_playlists[playlist_id], update_label= update_label)
start= 0
while True:# the limit is 100 songs so it must be iterated to get all songs
total=0
for items in sp.playlist_tracks(playlist_id, offset=start)["items"]:
artists=[]
for artist in items['track']['artists']:
artists.append(artist['name'])
loadedSongs.append([items['track']['uri'],[[playlist_id, items['added_at'][:-10]]],items['track']['name'],artists])
total+=1
start+=100 # if playlist is exactly a mutiple of 100 this still works
if total != 100:
break
num+=1
spotify.update_ui(percent= round((num/len(playlistsForDown))*100), update_progress= update_progress)
if loadedSongs == []:
spotify.update_ui(text= 'No songs found', update_label= update_label)
else:
spotify.update_ui(text= 'Begin compilation...', update_label= update_label)
loaded_songData={}
total= len(loadedSongs)
pos=0
while loadedSongs != []:
song= loadedSongs.pop(0)
# song= loadedSongs.pop(0) # removes first song and sets song equal to it
# song= [track_id,[ [current_playlist,dateAdded] ],name,[artists]]
trackId= song[0]
while True:
all_trackIds= data.r_nestedElem(0,loadedSongs) # run everytime to update (0 refers to id)
if trackId in all_trackIds:# if duplicate exists
temp= loadedSongs.pop(all_trackIds.index(trackId)) # removes duplictate song and sets temp equal to it
# combine duplicated song data
song[1].append(temp[1][0])# song[1]= [[current_playlist_a,dateAdded_a],[current_playlist_b,dateAdded_b]]
song[1]= sorted(song[1], key= lambda playDate: datetime.strptime(playDate[1],'%Y-%m-%d').timestamp()) # sorts list of current playlists by date added
else:break
loaded_songData[trackId]= song[1:] # [ [ [cur play,date] ],name,artist]
pos+=1
# print('%s/%s' % (pos, total), end= '\r')
spotify.update_ui(percent= round((pos/total)*100), update_progress= update_progress)
# loaded_songData should be { id: [ [curPlaylist,dateAdded] ]],id: [ [curPlaylistA,dateAddedA],[curPlaylistB,dateAddedB] ] }
# when value in loaded_songData has more than one elem it is duplicated
#songData format
# 'track_id':['date first added', 'name', ['artist'], (current playlist/s)[['current playlist','date added'], ...], 'missing', 'deleted]
data.load_songData()
# if update_ui != None: update_ui(percent=50)
# for saved tracks
text= 'total songs: %s' % total
for trackId in songData:
song= songData[trackId]
if trackId in loaded_songData:
song[4]= 'false' # set missing value
song[5]= 'notConf' # set deleted value to not Confirmed so if missing user has to set deleted to either true or false
# loaded song= [ [curPlaylist,dateAdded],name ,[aritists,..] ]
loadedSong= loaded_songData[trackId]
if song[3] != loadedSong[0]:# if current playlists have changed update songData
tempSong= loadedSong[0]
for playlist in song[3]:
# playlist= [playlist,date added]
if playlist not in loadedSong[0]:
temp= '%s removed from %s' % (song[1], ids_playlists[playlist[0]])
data.add_log(temp)
text+=temp
print(temp) ## throwing key error if duplicate in same playlist removed?
else:
tempSong.remove(playlist) # remove playlists that are present in both leaving only new playlists
if tempSong != []: # if new playlist ^^ added
temp= '%s added to %s'% (song[1], ids_playlists[tempSong[0][0]])
data.add_log(temp)
text+=temp
print(temp) ## throwing key error if duplicate in same playlist removed?
song[3]= loadedSong[0] # current playlists updated
if song[1] != loadedSong[1] or song[2] != loadedSong[2]:# if name or artist changed then update
temp= 'Name or artists changed from\n%s %s to %s %s' %(song[1], ','.join(song[2]), loadedSong[1], ','.join(loadedSong[2]))
data.add_log(temp)
print(temp)
if input('Confirm rename? y/n(add to gui somehow)') == 'y':
song[1]= loadedSong[1]
song[2]= loadedSong[2]
# remove song from loaded_songData to leave only new songs
del loaded_songData[trackId]
else:
# song is missing/deleted
if song[4] == 'false': # first time recorded as missing
data.add_log('%s - %s is missing' % (song[1], ','.join(song[2])))
song[4]= 'true' # missing tag updated
songData[trackId]= song # songData updated with new values
spotify.update_ui(text= text, update_label= update_label)
# if update_ui != None: update_ui(percent=75)
# new songs
# only new songs left in loaded data
if loaded_songData != {}: # if new songs exist
numNew= len(loaded_songData)
temp= '\nAdding %s new song(s)' % numNew
data.add_log(temp)
print(temp)
for pos,newTrackId in enumerate(loaded_songData):
print('%s/%s' % (pos, numNew), end= '\r')
song= loaded_songData[newTrackId]# [ [ [cur playlist, date added ], []... ], name, [artists]]
playlist_date= song[0]
dateFirstAdded= playlist_date[0][1] # first date recorded as loaded song data is sorted
# name, artist= spotify.get_nameArtist(sp, newTrackId) # if track worked i would have used this but i have to add names from search through playlist now :(
name= song[1]
artist= song[2]
songData[newTrackId]= [dateFirstAdded,name,artist,playlist_date,'false','false'] # not missing or deleted # could be added to multiple new playlists?
temp= '%s, %s added to %s' % (name, artist[0], ids_playlists[playlist_date[0][0]])
data.add_log(temp)
print(temp)
data.save_songData()
data.totalSongs()
spotify.update_ui(text= 'Done', update_label= update_label)
## update with gui
def get_keys(): # returns client id, client secret
accessLoc= 'spotify access.txt'
if not exists(accessLoc):
cid=input('File %s does not exist\nInput client id: ' % accessLoc)
secret= input('Input client secret: ')
data.createFile(accessLoc, string= cid+'\n'+secret)
else:
with open(accessLoc,'r',encoding= 'utf-8') as keys:
keys= keys.readlines()
cid= keys[0].replace('\n','')
secret= keys[1]
return cid , secret
## update with gui
# def user_playlists(sp,saved_ids_playlists={}):
# # creates dict of found(within saved ids) user made playlists (id; name) for downloading
# # DO NOT PARSE SAVED PLAY IDS IF FIRST TIME SETUP
# ids_playlists={}
# results = sp.current_user_playlists(limit=50)# if you have more than 50 playlists i dont like you :)
# pos=0
# for i in results['items']:
# if i['owner']['id'] == my_id:
# ids_playlists[results['items'][pos]['uri']]= results['items'][pos]['name']
# if saved_ids_playlists != {}: # remove the ones not needed useful option for first set up to find all playlists if needed
# for play_id in list(ids_playlists.keys()):
# if play_id not in list(saved_ids_playlists.keys()): del ids_playlists[play_id]
# pos+=1
# if saved_ids_playlists == {}:
# print('Found %s user playlists:' % len(ids_playlists))
# for i,item in enumerate(ids_playlists.keys()):
# print(i+1,ids_playlists[item]+ ' ---> '+ item) #newest name used (but saved with oldest name) incase user changes playlist id
# del_list= []
# for item in ids_playlists.keys():
# if input('save %s?[y]' % ids_playlists[item]) != 'y':
# del_list.append(item)
# print('deleted')
# for item in del_list:
# del ids_playlists[item]
# else:
# print('Found %s user playlists for download:\n' % (str(len(ids_playlists))+'/'+ str(len(saved_ids_playlists))))
# for i in ids_playlists.keys():
# print(ids_playlists[i]) #newest name used (but saved with oldest name) incase user changes playlist idi actually resaved with new name
# print()
# print('Loading...',end='\r')
# return ids_playlists
## major change needed ? move to data
def update_saved_ids_playlists(saved_ids_playlists,update_dict): # replaces old playlist names with new ones
for i in list(update_dict.keys()):
saved_ids_playlists[i]= update_dict[i]
return saved_ids_playlists
## gui
def search(searchString):
searchString= searchString.lower()
results= []
for pos, data in enumerate(songData.values()):
if data[5] != 'true': # if not deleted
if searchString in data[1].lower(): # artist name
results.append(pos)
for artistName in data[2]:
if searchString in artistName.lower():
if pos not in results: results.append(pos) # could have already been adde
# Ids= list(songData.keys())
# for pos in results:
# song= songData[Ids[pos]]
# name= song[1]
# artist= song[2][0]
# currentPlaylist= ids_playlists[song[3][0][0]]
# print('%s, %s --- %s' % (name,artist, currentPlaylist))
results= [list(songData.keys())[pos] for pos in results] # turns list of positions into correlated song ids from songData
return results
def followArtistsInPlaylists(update_label, update_progress, playlistId): # follows artists that have more than one song in the playlist
tempArtists= []
toFollow= []
playlistSongs= []
sp= spotify.getSp(my_id)
length= len(songData)
spotify.update_ui(percent= 0, update_progress= update_progress)
spotify.update_ui(text= 'Collecting songs from playlist...', update_label= update_label)
for pos,Id in enumerate(songData):
data= songData[Id]
if data[3][0][0] == playlistId: # current playlist id
playlistSongs.append(Id)
spotify.update_ui(percent= round((pos/length)*100), update_progress= update_progress)
spotify.update_ui(text= 'Converting track ids to artist ids...', update_label= update_label)
spotify.update_ui(percent= 0, update_progress= update_progress)
length= len(playlistSongs)
if length > 50:
pos= 50
while pos <= length+ 49:
tempArtists.extend([ song['artists'][0]['id'] for song in sp.tracks(playlistSongs[pos-50:pos])['tracks']])
spotify.update_ui(percent= round((pos/length)*100), update_progress= update_progress)
pos+=50
else:
tempArtists= [ song['artists'][0]['id'] for song in sp.tracks(playlistSongs)['tracks']]
spotify.update_ui(percent= 100, update_progress= update_progress)
while tempArtists != []:
artistId= tempArtists.pop(0)
if artistId in tempArtists: # if multiple songs by artists exist in playlist
while True:
try:
tempArtists.remove(artistId)
except:
break
toFollow.append(artistId)
following= []
pos= 50
spotify.update_ui(text= 'Finding followed artists...', update_label= update_label)
while pos <= len(toFollow)+ 49:
following.extend(sp.current_user_following_artists(toFollow[pos-50:pos])) # has a limit even though docs do not mention it
pos+=50
total= 0
for i in following:
if not i: total+=1
print(total)
if total == 0:
spotify.update_ui(text= 'No artists to follow', update_label= update_label)
return
# self.sp.user_follow_artists(artists) # can do entire list of artists at once(probs max 50 at a time)
length= len(toFollow)
for pos, artistId in enumerate(toFollow):
if not following[pos]: # if not following artist
name= sp.artist(artistId)['name']
temp= 'Followed %s' % name
data.add_log(temp)
spotify.update_ui(text= temp, update_label= update_label)
spotify.update_ui(percent= round((pos/length)*100), update_progress= update_progress)
sp.user_follow_artists([artistId])
spotify.update_ui(percent= 100, update_progress= update_progress)
spotify.update_ui(text= 'Finished', update_label= update_label)
#on start
print('newly missing songs do not end up in unonfirmed area after running once deleteing then running again also happens when just deleted???')
seperator= "%$%"
seperator2= "$%$"
# if auto run just close if id and stuff is missing and dont run gui
my_id= data.get_last_user_id() # might do weird shit cus i changed this from user_id
if my_id == '': # if no last user means this is first open
if __name__ != '__main__':
data.add_log('!! NO LAST USER PROGRAM ENDED !!')
quit()
else: print('create a user')
data.get_id_user()
data.changeActiveUser(my_id) #updates user to latest
data.get_allowedDup()
if __name__ == '__main__':
# from followArtists import followArtists as fA
# fA('spotify:playlist:33JwDwoh3u3HjKix4i995j' ,songData, spotify.getSp())
# input('this is an input')
#gui
# spotify.updateSongs()
# data.totalSongs()
# while True:
# results= spotify.search(input())
# if results != []:
# for i in results:
# print('%s:%s' % (i,songData[i][1]))
# else: print('no songs')
app = QApplication([])
window = MainWindow()
window.show()
# run should also add search
# spotify.updateSongs()
# then have button
# print(data.totalSongs())
# then have button for accept or not
app.exec()
else:
spotify.updateSongs()
| BadCodeswJamie/Sponitor | sponitor.py | sponitor.py | py | 68,332 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "PyQt5.QtWidgets.QWidget",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QVBoxLayout",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QLabel",
"line_number": 21,
"usage_type": "call"
},
{
"api_na... |
38858291652 | """
Place to register all blueprints
"""
def customize_social_auth():
"""
Customize certain routes of social auth
"""
from datetime import datetime
from flask import Blueprint, current_app, g
from flask_user import login_required, signals
from social_core.actions import do_auth, do_complete, do_disconnect
from social_flask.routes import do_login as base_do_login
from social_flask.utils import psa
social_auth = Blueprint("social", "social_flask")
def do_login(backend, user, social_user):
ret = base_do_login(backend=backend, user=user, social_user=social_user)
# Set email_confirmed_at if not already set, is assuming that a
# user only have one email, that is, the User is the UserMailClass
if ret and not user.email_confirmed_at:
user.email_confirmed_at = datetime.utcnow()
user.save()
# Send confirmed_email signal
signals.user_confirmed_email.send(
current_app._get_current_object(), user=user
)
return ret
@social_auth.route("/sign-in/<string:backend>", methods=("GET", "POST"))
@psa("social.complete")
def auth(backend):
return do_auth(g.backend)
@social_auth.route("/complete/<string:backend>", methods=("GET", "POST"))
@psa("social.complete")
def complete(backend, *args, **kwargs):
"""Overrided view to auto confirm emails due to being confirmed by
auth provider inside login"""
return do_complete(g.backend, login=do_login, user=g.user, *args, **kwargs)
@social_auth.route("/disconnect/<string:backend>", methods=("POST",))
@social_auth.route(
"/disconnect/<string:backend>/<int:association_id>", methods=("POST",)
)
@social_auth.route(
"/disconnect/<string:backend>/<string:association_id>", methods=("POST",)
)
@login_required
@psa()
def disconnect(backend, association_id=None):
"""Disconnects given backend from current logged in user."""
return do_disconnect(g.backend, g.user, association_id)
return social_auth
def register_blueprint(app):
"""
Given an app register all blueprints for the application
"""
from .apps import api, chat, media, posts, user
# Register Blueprints
app.register_blueprint(customize_social_auth(), uri_prefix="oauth")
app.register_blueprint(api.bp, url_prefix="/api")
app.register_blueprint(chat.bp, url_prefix="/chat")
app.register_blueprint(media.bp, url_prefix="/media")
app.register_blueprint(posts.bp, url_prefix="/post")
app.register_blueprint(user.bp, url_prefix="/user")
if app.config["DEBUG"]:
from .apps import showroom
app.register_blueprint(showroom.bp, url_prefix="/showroom")
| Vixx-X/ati-project | src/backend/blueprints.py | blueprints.py | py | 2,801 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "flask.Blueprint",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "social_flask.routes.do_login",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.utcnow",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": ... |
42295145901 | import geopy
bairros_list = [
"Boa Vista",
"Bom Pastor",
"Centro",
"Granbery",
"Jardim Glória",
"Santa Helena",
"São Mateus",
"Teixeiras",
"Bairu",
"Bonfim",
"Botanágua",
"Centenário",
"Cesário Alvim",
"Grajaú",
"Linhares",
"Manoel Honório",
"Marumbi",
"Nossa Senhora Aparecida",
"Progresso",
"Santa Rita",
"Santa Cândida",
"São Benedito",
"São Bernardo",
"Vitorino Braga",
"Eldorado",
"Granjas Betânea",
"Jardim Bom Clima",
"Mariano Procópio",
"Grama",
"Jardim Emaús",
"Parque Independência",
"Santa Therezinha",
"Filgueiras",
"Vale dos Bandeirantes",
"Barbosa Lage",
"Barreira do Triunfo",
"Benfica",
"Milho Branco",
"Carlos Chagas",
"Cerâmica",
"Esplanada",
"Francisco Bernardino",
"Industrial",
"Jardim Natal",
"Jóquei Clube",
"Nova Era",
"Paula Lima",
"Remonta",
"Represa",
"Santa Cruz",
"São Dimas",
"Vila Esperança",
"Aeroporto",
"Borboleta",
"Cruzeiro Santo Antônio",
"Martelos",
"Morro do Imperador",
"Nova Califórnia",
"Novo Horizonte",
"São Pedro",
"Serro Azul",
"Barão do Retiro",
"Floresta",
"Nossa Senhora de Lourdes",
"Santo Antônio",
"Vila Furtado de Menezes",
"Vila Olavo Costa",
"Niterói",
"Costa Carvalho",
"Bomba de Fogo",
"Cascatinha",
"Graminha",
"Ipiranga",
"Jardim Laranjeiras",
"Sagrado Coração de Jesus",
"Salvaterra",
"Santa Efigênia",
"Santa Luzia",
"São Geraldo",
]
location_dict = {}
for bairro in bairros_list:
geolocator = geopy.geocoders.Nominatim(user_agent="geolocalização")
location = geolocator.geocode(bairro + ', Juiz de Fora - MG')
try:
lat = location.latitude
lon = location.longitude
location_dict[bairro]={
'latitude': lat,
'logitude': lon,
}
print(bairro + ', Juiz de Fora - MG.')
except:
print(bairro + ', não funciona.')
print(location_dict) | igortitoneli/Api_Vitrine | bairro_location.py | bairro_location.py | py | 2,455 | python | is | code | 0 | github-code | 6 | [
{
"api_name": "geopy.geocoders.Nominatim",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "geopy.geocoders",
"line_number": 88,
"usage_type": "attribute"
}
] |
14974723036 | import torch
import torch.nn as nn
from torch_geometric.loader import DataLoader
from torch_geometric.data import Data, Batch
from torch.utils.data import Dataset
import torch_geometric.transforms as T
from torch_geometric.nn import GATConv
import torch.nn.functional as F
class GATNet(torch.nn.Module):
def __init__(self, num_graph_node_features, num_boundary_node_features):
super(GATNet, self).__init__()
self.graph_conv1 = GATConv(num_graph_node_features, 32, heads=4)
input_of_conv2 = num_graph_node_features + 32*4
self.graph_conv2 = GATConv(input_of_conv2, 32, heads=8)
input_of_conv3 = num_graph_node_features + 32*8
self.graph_conv3 = GATConv(input_of_conv3, 64, heads=8)
input_of_conv4 = num_graph_node_features + 64*8
self.graph_conv4 = GATConv(input_of_conv4, 128, heads=8)
shape_of_graphs_befor_concatination = num_graph_node_features + 128*8
self.boundary_conv1 = GATConv(num_boundary_node_features, 32, heads=4)
input_of_boundary_conv2 = 32*4 + num_boundary_node_features
self.boundary_conv2 = GATConv(input_of_boundary_conv2, 32, heads=8)
shape_of_boundary_befor_concatination = num_boundary_node_features + 32 * 8
# Output of graph_conv8 + output of boundary_conv5 + 2 step connection from real nodes and boundary nodes
inputs_concatination = shape_of_graphs_befor_concatination + shape_of_boundary_befor_concatination
self.Concatination1 = GATConv(inputs_concatination, 128, heads=8)
self.width_layer1 = nn.Linear(128*8, 128)
self.height_layer1 = nn.Linear(128*8, 128)
self.width_output = nn.Linear(128, 1)
self.height_output = nn.Linear(128, 1)
self.dropout = torch.nn.Dropout(0.2)
def forward(self, graph, boundary):
x_graph, g_edge_index, g_edge_attr, g_batch = graph.x.to(torch.float32), graph.edge_index, graph.edge_attr, graph.batch
x_boundary, b_edge_indexy, b_edge_attr, b_batch = boundary.x.to(torch.float32), boundary.edge_index, boundary.edge_attr, boundary.batch
NUM_OF_NODES = x_graph.shape[0]
# During testing, as we input only one graph.
if g_batch == None:
g_batch = torch.zeros(x_graph.shape[0], dtype=torch.long)
if b_batch == None:
b_batch = torch.zeros(x_boundary.shape[0], dtype=torch.long)
x_graph_res = x_graph
x_boundary_res = x_boundary
# Passing the graph throught a message passing to embed its features
x_graph = F.leaky_relu(self.graph_conv1(x_graph, g_edge_index, g_edge_attr))
x_graph = self.dropout(x_graph) # Concatinate with step connection from real values.
x_graph = torch.cat([x_graph, x_graph_res], dim=1)
x_graph = F.leaky_relu(self.graph_conv2(x_graph, g_edge_index, g_edge_attr))
x_graph = self.dropout(x_graph)
x_graph = torch.cat([x_graph, x_graph_res], dim=1)
x_graph = F.leaky_relu(self.graph_conv3(x_graph, g_edge_index))
x_graph = self.dropout(x_graph)
x_graph = torch.cat([x_graph, x_graph_res], dim=1)
x_graph = F.leaky_relu(self.graph_conv4(x_graph, g_edge_index))
x_graph = self.dropout(x_graph)
x_graph = torch.cat([x_graph, x_graph_res], dim=1)
# Passing the boundary throught a message passing to embed its features
x_boundary = F.leaky_relu(self.boundary_conv1(x_boundary, b_edge_indexy, b_edge_attr))
x_boundary = self.dropout(x_boundary)
x_boundary = torch.cat([x_boundary, x_boundary_res], dim=1)
x_boundary = F.leaky_relu(self.boundary_conv2(x_boundary, b_edge_indexy, b_edge_attr))
x_boundary = self.dropout(x_boundary)
x_boundary = torch.cat([x_boundary, x_boundary_res], dim=1)
# Pooling the bounadry to 1D vector by getting max value in each feature for all nodes.
x_boundary_pooled = F.max_pool1d(x_boundary.transpose(0, 1), kernel_size=x_boundary.shape[0]).view(1, -1)
# Concatinating the graph & the boundary
x = torch.cat([x_graph, x_boundary_pooled.repeat(NUM_OF_NODES, 1)], dim=1)
x = F.leaky_relu(self.Concatination1(x, g_edge_index))
x = self.dropout(x)
width = F.leaky_relu(self.width_layer1(x))
width = self.dropout(width)
width = self.width_output(width)
height = F.leaky_relu(self.height_layer1(x))
height = self.dropout(height)
height = self.height_output(height)
return width.squeeze(), height.squeeze()
def load_model(checkpoint_path, device):
model = GATNet(9, 3)
model = model.to(device)
checkpoint = torch.load(checkpoint_path)
model.load_state_dict(checkpoint['model_state_dict'])
model.eval()
return model | mo7amed7assan1911/Floor_Plan_Generation_using_GNNs | model.py | model.py | py | 5,015 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "torch.nn",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "torch_geometric.nn.GATConv",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "torch_geometric.nn.GATConv",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "t... |
25457066690 | import telegram
from twilio.rest import Client
from twilio.base.exceptions import TwilioRestException
import threading
class MessageSender:
def __init__(self, config):
self.lock = threading.Lock()
self.telegram_bot = None
if config["telegram"] is not None:
self.telegram_chat_id = config["telegram"]["chat_id"]
self.telegram_bot = telegram.Bot(token=config["telegram"]["token"])
self.twilio_client = None
if config["twilio"] is not None:
self.toNumber = config["twilio"]["toNumber"]
self.fromNumber = config["twilio"]["fromNumber"]
self.accountSid = config["twilio"]["accountSid"]
self.authToken = config["twilio"]["authToken"]
self.twilio_client = Client(self.accountSid, self.authToken)
def send_message(self, content, sender=None):
with self.lock:
if sender is not None:
content = "Sender: " + sender + ": " + content
if self.telegram_bot is not None:
self.telegram_bot.send_message(chat_id=self.telegram_chat_id, text=content)
if self.twilio_client is not None:
try:
self.twilio_client.messages.create(to=self.toNumber, from_=self.fromNumber,
body=content)
except (NameError, TwilioRestException):
pass
| wanmeihuali/Agressive-Store-Bots | MessageSender.py | MessageSender.py | py | 1,434 | python | en | code | null | github-code | 6 | [
{
"api_name": "threading.Lock",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "telegram.Bot",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "twilio.rest.Client",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "twilio.base.exceptions.... |
16539466565 | # -*- coding: utf-8 -*-
"""Главный и единственный модуль в игре.
Игра специально написана в минималистичном стиле, мне
хотелось воплотить текстовый движок настолько лаконично,
насколько это вообще возможно.
Подкладывая этому скрипту различные json с метаданными, можно
запускать произвольные текстовые истории.
"""
import json
import os
import textwrap
from collections import defaultdict
from typing import Dict, Any, NewType
Context = NewType('Context', Dict[str, Any])
Location = NewType('Location', Dict[str, Any])
Locations = Dict[str, Location]
BASE_PATH = os.path.join('game', 'locations')
def clear_screen_windows() -> None:
"""Очистить содержимое экрана на Windows.
"""
os.system('cls')
def clear_screen_nix() -> None:
"""Очистить содержимое экрана на *nix.
"""
os.system('clear')
def get_locations() -> Locations:
"""Загрузить локации с жёсткого диска и вернуть в виде словаря.
Пример данных на выходе:
{
'start':
{
'title': 'Стартовая локация',
'options': ...,
},
...
}
"""
locations = {}
for path, dirs, filenames in os.walk(BASE_PATH):
for filename in filenames:
full_path = os.path.join(path, filename)
with open(full_path, mode='r', encoding='utf-8') as file:
contents = json.load(file)
for name, content in contents.items():
locations[name] = Location(content)
return locations
def get_context() -> Context:
"""Вернуть набор переменных игры.
Здесь можно заполнить параметры по умолчанию.
"""
return Context({
'times_visited': defaultdict(int),
})
def get_header(position: str, location: Location, context: Context) -> str:
"""Извлечь заголовок локации.
Текст зависит от того, бывали ли мы тут раньше.
"""
if all([context['times_visited'][position] == 0,
'initial_header' in location]):
return location['initial_header']
return location['header']
def is_visible(option: dict, context: Context) -> bool:
"""Вернуть True если мы можем видеть эту позицию.
"""
condition = option.get('condition')
if condition is None:
return True
return bool(eval(condition, {}, context))
def get_input_from_user(variants: dict) -> str:
"""Получить от пользователя вариант ответа, который он хочет выбрать.
"""
while True:
variant = input('>')
if variant.strip().lower() in variants:
return variant
print('Выберите один из предоставленных вариантов')
def apply_side_effect(chosen_option: dict, context: Context) -> None:
"""Применить побочныё эффект выбора.
Мутируем переменные контекста, чтобы управлять логикой.
Сами решения принимаются на этапе разработки JSON и в данном
скрипте никак не представлены.
"""
side_effect = chosen_option.get('side_effect')
if side_effect:
exec(side_effect, {}, context)
def enter_location(position: str, locations: Locations,
context: Context) -> Location:
"""Применить операции входа в новую локацию и вернуть её экземпляр.
"""
clear_screen()
location: Location = locations[position]
output_location(position, location, context)
context['times_visited'][position] += 1
return location
def ask_user(location: Location, context: Context) -> dict:
"""Получить обратную связь от пользователя и вернуть экземпляр опции.
Пример данных на выходе:
{
'condition': 'True',
'label': 'Вариант 2',
'goto': 'end',
...
}
Приходится использовать отдельную переменную number из-за
невидимых вариантов в меню выбора опций. Без него нумерация
будет не по порядку.
"""
visible_choices = {}
number = 0
for option in location['options']:
if is_visible(option, context):
number += 1
visible_choices[str(number)] = option
print(f'[{number}] {option["label"]}')
user_choice_number = get_input_from_user(visible_choices)
chosen_option = visible_choices[user_choice_number]
return chosen_option
def output_location(position: str, location: Location, context: Context,
terminal_width: int = 80) -> None:
"""Вывести на экран заголовок локации и краткое описание входа.
"""
print('-' * terminal_width)
header = get_header(position, location, context)
for substring in header.split('\n'):
if substring:
lines = textwrap.wrap(text=substring,
width=terminal_width)
for line in lines:
print(line)
print('-' * terminal_width)
def main():
"""Главный событийный цикл игры.
В бесконечном цикле крутит JSON-ы, пор пока игрок не нажмёт
Ctrl+C или не доберётся до локации с именем end.
"""
locations = get_locations()
context = get_context()
position = 'start'
while position != 'end':
location = enter_location(position, locations, context)
option = ask_user(location, context)
apply_side_effect(option, context)
position = option['goto']
print('Спасибо за игру!')
if __name__ == '__main__':
if os.name == 'nt':
clear_screen = clear_screen_windows
else:
clear_screen = clear_screen_nix
try:
main()
except KeyboardInterrupt:
print('Выход по команде с клавиатуры')
| IgorZyktin/BGCGame | game/__main__.py | __main__.py | py | 6,720 | python | ru | code | 0 | github-code | 6 | [
{
"api_name": "typing.NewType",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "typing.Dict",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "typing.NewType",
"line_numb... |
9499182379 | import torch
from torch import nn
from torch.utils.data import DataLoader
from torchvision import datasets
from torchvision.transforms import ToTensor
# 공개 데이터셋에서 학습 데이터를 내려받습니다.
training_data = datasets.FashionMNIST(
root="Fashion_MNIST_Data",
train=True,
download=True,
transform=ToTensor(),
)
# 공개 데이터셋에서 테스트 데이터를 내려받습니다.
test_data = datasets.FashionMNIST(
root="Fashion_MNIST_Data",
train=False,
download=True,
transform=ToTensor(),
)
batch_size = 64
# 데이터로더를 생성합니다.
train_dataloader = DataLoader(training_data, batch_size=batch_size)
test_dataloader = DataLoader(test_data, batch_size=batch_size)
for X, y in test_dataloader:
print(f"Shape of X [N, C, H, W]: {X.shape}")
print(f"Shape of y: {y.shape} {y.dtype}")
break
# 학습에 사용할 CPU나 GPU, MPS 장치를 얻습니다.
device = (
"cuda"
if torch.cuda.is_available()
else "mps"
if torch.backends.mps.is_available()
else "cpu"
)
print(f"Using {device} device")
# 모델을 정의합니다.
class NeuralNetwork(nn.Module):
def __init__(self):
super().__init__()
self.flatten = nn.Flatten()
self.linear_relu_stack = nn.Sequential(
nn.Linear(28*28, 512),
nn.ReLU(),
nn.Linear(512, 512),
nn.ReLU(),
nn.Linear(512, 10)
)
def forward(self, x):
x = self.flatten(x)
logits = self.linear_relu_stack(x)
return logits
model = NeuralNetwork().to(device)
print(model)
loss_fn = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)
def train(dataloader, model, loss_fn, optimizer):
size = len(dataloader.dataset)
for batch, (X, y) in enumerate(dataloader):
X, y = X.to(device), y.to(device)
# 예측 오류 계산
pred = model(X)
loss = loss_fn(pred, y)
# 역전파
optimizer.zero_grad()
loss.backward()
optimizer.step()
if batch % 100 == 0:
loss, current = loss.item(), (batch + 1) * len(X)
print(f"loss: {loss:>7f} [{current:>5d}/{size:>5d}]")
def test(dataloader, model, loss_fn):
size = len(dataloader.dataset)
num_batches = len(dataloader)
model.eval()
test_loss, correct = 0, 0
with torch.no_grad():
for X, y in dataloader:
X, y = X.to(device), y.to(device)
pred = model(X)
test_loss += loss_fn(pred, y).item()
correct += (pred.argmax(1) == y).type(torch.float).sum().item()
test_loss /= num_batches
correct /= size
print(f"Test Error: \n Accuracy: {(100*correct):>0.1f}%, Avg loss: {test_loss:>8f} \n")
epochs = 5
for t in range(epochs):
print(f"Epoch {t+1}\n-------------------------------")
train(train_dataloader, model, loss_fn, optimizer)
test(test_dataloader, model, loss_fn)
print("Done!")
torch.save(model.state_dict(), "model.pth")
print("Saved PyTorch Model State to model.pth")
| hotsoycandy/learn-pytorch | train.py | train.py | py | 2,907 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "torchvision.datasets.FashionMNIST",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "torchvision.datasets",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.ToTensor",
"line_number": 12,
"usage_type": "call"
},
{
... |
5544798991 | from common import execute_sh
from retry import retry
import os
import logging
import json
"""
Issue kubectl commands on the running linode cluster
"""
WEATHER_API_TOKEN = os.environ.get("WEATHER_API_TOKEN")
KUBERNETES_NODE_COUNT = "2"
@retry(tries=60, delay=30)
def get_nodes():
# Verify kubectl is communicating with cluster
cmd = ["kubectl", "--output=json", "get", "nodes"]
output = execute_sh(cmd)
json_object = json.loads(output)
nodes = json_object["items"]
if len(nodes) != int(KUBERNETES_NODE_COUNT):
raise Exception(f"kubectl expected {KUBERNETES_NODE_COUNT} nodes but found {len(nodes)}")
logging.info(f"kubectl OK: Retrieved node count: {len(nodes)}")
return
@retry(tries=5, delay=10)
def apply_deployment():
cmd = ["kubectl", "--output=json", "apply", "-f", "resources/deployment.yaml"]
output = None
try:
output = execute_sh(cmd)
except Exception as e:
raise Exception(f"retrying {cmd}")
json_object = json.loads(output)
logging.debug(f"json ==> {json_object}")
logging.info(f"kubectl deployment applied OK")
return
def apply_service():
cmd = ["kubectl", "--output=json", "apply", "-f", "resources/service.yaml"]
output = execute_sh(cmd)
json_object = json.loads(output)
logging.debug(f"json ==> {json_object}")
logging.info(f"kubectl service applied OK")
return
@retry(tries=5, delay=10)
def delete_service():
cmd = ["kubectl", "delete", "svc", "gwa"]
execute_sh(cmd)
def create_secrets():
"""Create k8s secret for the api key etc"""
cmd = [
"kubectl",
"create",
"secret",
"generic",
"gws-secret",
f"--from-literal=WEATHER_API_TOKEN={WEATHER_API_TOKEN}",
]
execute_sh(cmd)
@retry(tries=20, delay=10)
def get_ingress_ip():
cmd = ["kubectl", "--output=json", "get", "svc", "gwa"]
output = execute_sh(cmd)
json_object = json.loads(output)
logging.debug(f"json ==> {json_object}")
ingress_ip = json_object["status"]["loadBalancer"]["ingress"][0]["ip"]
if not ingress_ip:
raise Exception(f"Ingress IP is empty in the returned json")
logging.info(f"Load Balance Ingress is: {ingress_ip}")
return ingress_ip
def apply_argocd():
cmd = ["ls", "-al"]
output = execute_sh(cmd)
cmd = ["kubectl", "create", "namespace", "argocd"]
output = execute_sh(cmd)
# cmd = ["kubectl", "apply", "--namespace=argocd", "--dry-run=server", "-k", "."]
# output = execute_sh(cmd, "./resources")
return
| HarrisKirk/blue-green-dreams | gwa-deploy/kubectl.py | kubectl.py | py | 2,558 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "os.environ.get",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "common.execute_sh",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "json.loads",
"li... |
10899943389 | import cv2
import numpy as np
from keras.models import load_model
image = cv2 .imread("Bore2.jpg",0)
image = cv2.resize(image, (48, 48))
image = image[..., np.newaxis]
# dst = np.expand_dims(image, axis=1)
print(image.shape)
# exit()
model = load_model("model_v6_23.hdf5")
predicted_class = np.argmax(model.predict(image))
print(predicted_class)
exit()
#
# image = cv2.imread("1.jpg")
# cv2.imshow("asli", image)
# cv2.waitKey()
# print(image.shape)
# # image_size=(48,e48)
# width, height = 48, 48
# # face = np.asarray(image).reshape(width, height)
# frame = cv2.resize(image, (0, 0), fx=0.5, fy=0.5)
# # face = cv2.resize(image.astype('uint8'),image_size)
# print(frame.shape)
# cv2.imshow("", frame)
# cv2.waitKey()
# from collections import Counter
#
#
# def most_frequent(List):
# occurence_count = Counter(List)
# print(occurence_count)
# print(type(occurence_count))
# charater = occurence_count.most_common(1)[0][0]
# print(charater)
#
# exit()
# # chracter = occurence_count.most_common(1)[0][0]
# # repr(chracter)
#
#
# List =['Cat', 'Cat', 'Dog']
# print(most_frequent(List))
| hassanahmed95/My_True_Face | ML_training/test_file.py | test_file.py | py | 1,134 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "cv2.imread",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "cv2.resize",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "numpy.newaxis",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "keras.models.load_model",
"... |
655272067 | import os
import napari
import z5py
def view_result(sample, checkpoint_name):
halo = [25, 512, 512]
path = f'./data/{sample}.n5'
with z5py.File(path, 'r') as f:
ds = f['raw']
bb = tuple(slice(max(sh // 2 - ha, 0),
min(sh // 2 + ha, sh))
for sh, ha in zip(ds.shape, halo))
raw = ds[bb]
ds = f['labels']
labels = ds[bb]
prefix = f'predictions/{checkpoint_name}/'
fg_key = prefix + 'foreground'
if fg_key in f:
ds = f[fg_key]
fg = ds[bb]
else:
fg = None
bd_key = prefix + 'boundaries'
aff_key = prefix + 'affinities'
if bd_key in f:
ds = f[bd_key]
boundaries = ds[bb]
elif aff_key in f:
ds = f[aff_key]
bb_affs = (slice(None),) + bb
boundaries = ds[bb_affs]
else:
boundaries = None
prefix = f'segmentation/{checkpoint_name}'
# ws_key = prefix + '/watershed'
# if ws_key in f:
# ds = f[ws_key]
# ws = ds[bb]
# else:
# ws = None
ws = None
mc_key = prefix + '/multicut_postprocessed'
if mc_key in f:
ds = f[mc_key]
mc = ds[bb]
else:
mc = None
mws_key = prefix + '/mutex_watershed_postprocessed'
if mws_key in f:
ds = f[mws_key]
mws = ds[bb]
else:
mws = None
with napari.gui_qt():
viewer = napari.Viewer()
viewer.add_image(raw)
if fg is not None:
viewer.add_image(fg)
if boundaries is not None:
viewer.add_image(boundaries)
if ws is not None:
viewer.add_labels(ws)
if mc is not None:
viewer.add_labels(mc)
if mws is not None:
viewer.add_labels(mws)
viewer.add_labels(labels)
def view_results(samples, checkpoint):
checkpoint_name = os.path.split(checkpoint)[1]
for sample in samples:
view_result(sample, checkpoint_name)
if __name__ == '__main__':
view_result('human_small', 'affinity_model_default_human_rat')
| constantinpape/torch-em | experiments/unet-segmentation/mitochondria-segmentation/mito-em/challenge/check_result.py | check_result.py | py | 2,239 | python | en | code | 42 | github-code | 6 | [
{
"api_name": "z5py.File",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "napari.gui_qt",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "napari.Viewer",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "os.path.split",
"line_numbe... |
25968013406 | import argparse
import time
from threading import Thread
import requests
class RequestThread(Thread):
def __init__(self, url):
self.url = url
super(RequestThread, self).__init__(target=self.make_request)
def make_request(self):
requests.get(self.url)
class Worker(object):
def __init__(self):
self.thread = None
@property
def busy(self):
if self.thread and not self.thread.is_alive():
self.thread = None
return self.thread is not None
def run_thread(self, thread):
self.thread = thread
self.thread.start()
def join(self):
self.thread.join()
class WorkerGroup(object):
def __init__(self, num_workers):
self.workers = [self._generate_worker() for i in range(num_workers)]
def get_available_worker(self):
for worker in self.workers:
if not worker.busy:
return worker
time.sleep(0.5)
return self.get_available_worker()
def _generate_worker(self):
return Worker()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('url')
parser.add_argument('workers', type=int)
parser.add_argument('total_requests', type=int)
args = parser.parse_args()
threads = [RequestThread(args.url) for i in range(args.total_requests)]
worker_group = WorkerGroup(args.workers)
while threads:
worker = worker_group.get_available_worker()
worker.run_thread(threads.pop())
for worker in worker_group.workers:
worker.join()
| wgaggioli/elk-example | threaded_requests.py | threaded_requests.py | py | 1,602 | python | en | code | 6 | github-code | 6 | [
{
"api_name": "threading.Thread",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "requests.get",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
... |
6771398570 | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import pymysql
class ProxyPoolCrawlerPipeline(object):
def process_item(self, item, spider):
return item
# 存储到mysql数据库
class ProxyPoolCrawler2mysql(object):
# # 测试ip是否有效,有效再插入数据库
# def test_alive(proxy):
# http_url = "http://www.baidu.com"
# proxy_url = "http://{0}".format(proxy)
# try:
# proxy_dict = {
# "http": proxy_url,
# }
# response = requests.get(http_url, proxies=proxy_dict, timeout=5)
# except Exception as e:
# # print("invalid ip and port")
# return False
# else:
# code = response.status_code
# if code >= 200 and code < 300:
# # print("effective ip")
# return True
# else:
# # print("invalid ip and port")
# return False
def process_item(self, item, spider):
address = item['address']
connection = pymysql.connect(
host='localhost', # 连接的是本地数据库
user='root', # 自己的mysql用户名
passwd='', # 自己的密码
db='proxypool', # 数据库的名字
charset='utf8mb4', # 默认的编码方式:
cursorclass=pymysql.cursors.DictCursor)
try:
with connection.cursor() as cursor:
# 创建更新值的sql语句
sql = """INSERT INTO proxy(address)
VALUES (%s)"""
# 执行sql语句
# excute 的第二个参数可以将sql缺省语句补全,一般以元组的格式
cursor.execute(
sql, (address))
# 提交本次插入的记录
connection.commit()
finally:
# 关闭连接
connection.close()
return item | ShawnRong/proxy-pool-crawler | proxy_pool_crawler/pipelines.py | pipelines.py | py | 2,136 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pymysql.connect",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "pymysql.cursors",
"line_number": 48,
"usage_type": "attribute"
}
] |
22257701787 | """Filters module with a class to manage filters/algorithms for polydata datasets."""
import collections.abc
import logging
import numpy as np
import pyvista
from pyvista import (
abstract_class, _vtk, NORMALS, generate_plane, assert_empty_kwargs, vtk_id_list_to_array, get_array
)
from pyvista.core.errors import NotAllTrianglesError
from pyvista.core.filters import _get_output, _update_alg
from pyvista.core.filters.data_set import DataSetFilters
@abstract_class
class PolyDataFilters(DataSetFilters):
"""An internal class to manage filters/algorithms for polydata datasets."""
def edge_mask(poly_data, angle):
"""Return a mask of the points of a surface mesh that has a surface angle greater than angle.
Parameters
----------
angle : float
Angle to consider an edge.
"""
if not isinstance(poly_data, pyvista.PolyData): # pragma: no cover
poly_data = pyvista.PolyData(poly_data)
poly_data.point_arrays['point_ind'] = np.arange(poly_data.n_points)
featureEdges = _vtk.vtkFeatureEdges()
featureEdges.SetInputData(poly_data)
featureEdges.FeatureEdgesOn()
featureEdges.BoundaryEdgesOff()
featureEdges.NonManifoldEdgesOff()
featureEdges.ManifoldEdgesOff()
featureEdges.SetFeatureAngle(angle)
featureEdges.Update()
edges = _get_output(featureEdges)
orig_id = pyvista.point_array(edges, 'point_ind')
return np.in1d(poly_data.point_arrays['point_ind'], orig_id,
assume_unique=True)
def boolean_cut(poly_data, cut, tolerance=1E-5, inplace=False):
"""Perform a Boolean cut using another mesh.
Parameters
----------
cut : pyvista.PolyData
Mesh making the cut
inplace : bool, optional
Updates mesh in-place.
Returns
-------
mesh : pyvista.PolyData
The cut mesh.
"""
if not isinstance(cut, pyvista.PolyData):
raise TypeError("Input mesh must be PolyData.")
if not poly_data.is_all_triangles() or not cut.is_all_triangles():
raise NotAllTrianglesError("Make sure both the input and output are triangulated.")
bfilter = _vtk.vtkBooleanOperationPolyDataFilter()
bfilter.SetOperationToIntersection()
# bfilter.SetOperationToDifference()
bfilter.SetInputData(1, cut)
bfilter.SetInputData(0, poly_data)
bfilter.ReorientDifferenceCellsOff()
bfilter.SetTolerance(tolerance)
bfilter.Update()
mesh = _get_output(bfilter)
if inplace:
poly_data.overwrite(mesh)
return poly_data
else:
return mesh
def boolean_add(poly_data, mesh, inplace=False):
"""Add a mesh to the current mesh.
Does not attempt to "join" the meshes.
Parameters
----------
mesh : pyvista.PolyData
The mesh to add.
inplace : bool, optional
Updates mesh in-place.
Returns
-------
joinedmesh : pyvista.PolyData
The joined mesh.
"""
if not isinstance(mesh, pyvista.PolyData):
raise TypeError("Input mesh must be PolyData.")
vtkappend = _vtk.vtkAppendPolyData()
vtkappend.AddInputData(poly_data)
vtkappend.AddInputData(mesh)
vtkappend.Update()
mesh = _get_output(vtkappend)
if inplace:
poly_data.overwrite(mesh)
return poly_data
else:
return mesh
def __add__(poly_data, mesh):
"""Merge these two meshes."""
if not isinstance(mesh, _vtk.vtkPolyData):
return DataSetFilters.__add__(poly_data, mesh)
return PolyDataFilters.boolean_add(poly_data, mesh)
def boolean_union(poly_data, mesh, inplace=False):
"""Combine two meshes and attempts to create a manifold mesh.
Parameters
----------
mesh : pyvista.PolyData
The mesh to perform a union against.
inplace : bool, optional
Updates mesh in-place.
Returns
-------
union : pyvista.PolyData
The union mesh.
"""
if not isinstance(mesh, pyvista.PolyData):
raise TypeError("Input mesh must be PolyData.")
bfilter = _vtk.vtkBooleanOperationPolyDataFilter()
bfilter.SetOperationToUnion()
bfilter.SetInputData(1, mesh)
bfilter.SetInputData(0, poly_data)
bfilter.ReorientDifferenceCellsOff()
bfilter.Update()
mesh = _get_output(bfilter)
if inplace:
poly_data.overwrite(mesh)
return poly_data
else:
return mesh
def boolean_difference(poly_data, mesh, inplace=False):
"""Combine two meshes and retains only the volume in common between the meshes.
Parameters
----------
mesh : pyvista.PolyData
The mesh to perform a union against.
inplace : bool, optional
Updates mesh in-place.
Returns
-------
union : pyvista.PolyData
The union mesh.
"""
if not isinstance(mesh, pyvista.PolyData):
raise TypeError("Input mesh must be PolyData.")
bfilter = _vtk.vtkBooleanOperationPolyDataFilter()
bfilter.SetOperationToDifference()
bfilter.SetInputData(1, mesh)
bfilter.SetInputData(0, poly_data)
bfilter.ReorientDifferenceCellsOff()
bfilter.Update()
mesh = _get_output(bfilter)
if inplace:
poly_data.overwrite(mesh)
return poly_data
else:
return mesh
def intersection(poly_data, mesh, split_first=True, split_second=True):
"""Compute the intersection between two meshes.
Parameters
----------
mesh : pyvista.PolyData
The mesh to intersect with.
split_first : bool, optional
If `True`, return the first input mesh split by the intersection with the
second input mesh.
split_second : bool, optional
If `True`, return the second input mesh split by the intersection with the
first input mesh.
Returns
-------
intersection: pyvista.PolyData
The intersection line.
first_split: pyvista.PolyData
The first mesh split along the intersection. Returns the original first mesh
if `split_first` is False.
second_split: pyvista.PolyData
The second mesh split along the intersection. Returns the original second mesh
if `split_second` is False.
Examples
--------
Intersect two spheres, returning the intersection and both spheres
which have new points/cells along the intersection line.
>>> import pyvista as pv
>>> s1 = pv.Sphere()
>>> s2 = pv.Sphere(center=(0.25, 0, 0))
>>> intersection, s1_split, s2_split = s1.intersection(s2)
The mesh splitting takes additional time and can be turned
off for either mesh individually.
>>> intersection, _, s2_split = s1.intersection(s2, \
split_first=False, \
split_second=True)
"""
intfilter = _vtk.vtkIntersectionPolyDataFilter()
intfilter.SetInputDataObject(0, poly_data)
intfilter.SetInputDataObject(1, mesh)
intfilter.SetComputeIntersectionPointArray(True)
intfilter.SetSplitFirstOutput(split_first)
intfilter.SetSplitSecondOutput(split_second)
intfilter.Update()
intersection = _get_output(intfilter, oport=0)
first = _get_output(intfilter, oport=1)
second = _get_output(intfilter, oport=2)
return intersection, first, second
def curvature(poly_data, curv_type='mean'):
"""Return the pointwise curvature of a mesh.
Parameters
----------
mesh : vtk.polydata
vtk polydata mesh
curvature string, optional
One of the following strings
Mean
Gaussian
Maximum
Minimum
Returns
-------
curvature : np.ndarray
Curvature values
"""
curv_type = curv_type.lower()
# Create curve filter and compute curvature
curvefilter = _vtk.vtkCurvatures()
curvefilter.SetInputData(poly_data)
if curv_type == 'mean':
curvefilter.SetCurvatureTypeToMean()
elif curv_type == 'gaussian':
curvefilter.SetCurvatureTypeToGaussian()
elif curv_type == 'maximum':
curvefilter.SetCurvatureTypeToMaximum()
elif curv_type == 'minimum':
curvefilter.SetCurvatureTypeToMinimum()
else:
raise ValueError('Curv_Type must be either "Mean", '
'"Gaussian", "Maximum", or "Minimum"')
curvefilter.Update()
# Compute and return curvature
curv = _get_output(curvefilter)
return _vtk.vtk_to_numpy(curv.GetPointData().GetScalars())
def plot_curvature(poly_data, curv_type='mean', **kwargs):
"""Plot the curvature.
Parameters
----------
curvtype : str, optional
One of the following strings indicating curvature type:
* ``'Mean'``
* ``'Gaussian'``
* ``'Maximum'``
* ``'Minimum'``
**kwargs : optional
See :func:`pyvista.plot`
Returns
-------
cpos : list
List of camera position, focal point, and view up.
Examples
--------
Plot the mean curvature of an example mesh.
>>> from pyvista import examples
>>> hills = examples.load_random_hills()
>>> cpos = hills.plot_curvature(smooth_shading=True)
"""
kwargs.setdefault('scalar_bar_args',
{'title': f'{curv_type.capitalize()} Curvature'})
return poly_data.plot(scalars=poly_data.curvature(curv_type),
**kwargs)
def triangulate(poly_data, inplace=False):
"""Return an all triangle mesh.
More complex polygons will be broken down into tetrahedrals.
Parameters
----------
inplace : bool, optional
Updates mesh in-place.
Returns
-------
mesh : pyvista.PolyData
Mesh containing only triangles.
"""
trifilter = _vtk.vtkTriangleFilter()
trifilter.SetInputData(poly_data)
trifilter.PassVertsOff()
trifilter.PassLinesOff()
trifilter.Update()
mesh = _get_output(trifilter)
if inplace:
poly_data.overwrite(mesh)
return poly_data
else:
return mesh
def smooth(poly_data, n_iter=20, relaxation_factor=0.01, convergence=0.0,
edge_angle=15, feature_angle=45,
boundary_smoothing=True, feature_smoothing=False, inplace=False):
"""Adjust point coordinates using Laplacian smoothing.
The effect is to "relax" the mesh, making the cells better shaped and
the vertices more evenly distributed.
Parameters
----------
n_iter : int
Number of iterations for Laplacian smoothing.
relaxation_factor : float, optional
Relaxation factor controls the amount of displacement in a single
iteration. Generally a lower relaxation factor and higher number of
iterations is numerically more stable.
convergence : float, optional
Convergence criterion for the iteration process. Smaller numbers
result in more smoothing iterations. Range from (0 to 1).
edge_angle : float, optional
Edge angle to control smoothing along edges (either interior or boundary).
feature_angle : float, optional
Feature angle for sharp edge identification.
boundary_smoothing : bool, optional
Boolean flag to control smoothing of boundary edges.
feature_smoothing : bool, optional
Boolean flag to control smoothing of feature edges.
inplace : bool, optional
Updates mesh in-place.
Returns
-------
mesh : pyvista.PolyData
Smoothed mesh.
Examples
--------
Smooth the edges of an all triangular cube
>>> import pyvista as pv
>>> cube = pv.Cube().triangulate().subdivide(5).clean()
>>> smooth_cube = cube.smooth(1000, feature_smoothing=False)
>>> n_edge_cells = cube.extract_feature_edges().n_cells
>>> n_smooth_cells = smooth_cube.extract_feature_edges().n_cells
>>> print(f'Sharp Edges on Cube: {n_edge_cells}')
Sharp Edges on Cube: 384
>>> print(f'Sharp Edges on Smooth Cube: {n_smooth_cells}')
Sharp Edges on Smooth Cube: 12
"""
alg = _vtk.vtkSmoothPolyDataFilter()
alg.SetInputData(poly_data)
alg.SetNumberOfIterations(n_iter)
alg.SetConvergence(convergence)
alg.SetFeatureEdgeSmoothing(feature_smoothing)
alg.SetFeatureAngle(feature_angle)
alg.SetEdgeAngle(edge_angle)
alg.SetBoundarySmoothing(boundary_smoothing)
alg.SetRelaxationFactor(relaxation_factor)
alg.Update()
mesh = _get_output(alg)
if inplace:
poly_data.overwrite(mesh)
return poly_data
else:
return mesh
def decimate_pro(poly_data, reduction, feature_angle=45.0, split_angle=75.0, splitting=True,
pre_split_mesh=False, preserve_topology=False, inplace=False):
"""Reduce the number of triangles in a triangular mesh.
It forms a good approximation to the original geometry. Based on the algorithm
originally described in "Decimation of Triangle Meshes", Proc Siggraph 92.
Parameters
----------
reduction : float
Reduction factor. A value of 0.9 will leave 10 % of the original number
of vertices.
feature_angle : float, optional
Angle used to define what an edge is (i.e., if the surface normal between
two adjacent triangles is >= feature_angle, an edge exists).
split_angle : float, optional
Angle used to control the splitting of the mesh. A split line exists
when the surface normals between two edge connected triangles are >= split_angle.
splitting : bool, optional
Controls the splitting of the mesh at corners, along edges, at non-manifold
points, or anywhere else a split is required. Turning splitting off
will better preserve the original topology of the mesh, but may not
necessarily give the exact requested decimation.
pre_split_mesh : bool, optional
Separates the mesh into semi-planar patches, which are disconnected
from each other. This can give superior results in some cases. If pre_split_mesh
is set to True, the mesh is split with the specified split_angle. Otherwise
mesh splitting is deferred as long as possible.
preserve_topology : bool, optional
Controls topology preservation. If on, mesh splitting and hole elimination
will not occur. This may limit the maximum reduction that may be achieved.
inplace : bool, optional
Updates mesh in-place.
Returns
-------
mesh : pyvista.PolyData
Decimated mesh.
"""
alg = _vtk.vtkDecimatePro()
alg.SetInputData(poly_data)
alg.SetTargetReduction(reduction)
alg.SetPreserveTopology(preserve_topology)
alg.SetFeatureAngle(feature_angle)
alg.SetSplitting(splitting)
alg.SetSplitAngle(split_angle)
alg.SetPreSplitMesh(pre_split_mesh)
alg.Update()
mesh = _get_output(alg)
if inplace:
poly_data.overwrite(mesh)
return poly_data
else:
return mesh
def tube(poly_data, radius=None, scalars=None, capping=True, n_sides=20,
radius_factor=10, preference='point', inplace=False):
"""Generate a tube around each input line.
The radius of the tube can be set to linearly vary with a scalar value.
Parameters
----------
radius : float
Minimum tube radius (minimum because the tube radius may vary).
scalars : str, optional
scalars array by which the radius varies
capping : bool, optional
Turn on/off whether to cap the ends with polygons. Default ``True``.
n_sides : int, optional
Set the number of sides for the tube. Minimum of 3.
radius_factor : float, optional
Maximum tube radius in terms of a multiple of the minimum radius.
preference : str, optional
The field preference when searching for the scalars array by name.
inplace : bool, optional
Updates mesh in-place.
Returns
-------
mesh : pyvista.PolyData
Tube-filtered mesh.
Examples
--------
Convert a single line to a tube
>>> import pyvista as pv
>>> line = pv.Line()
>>> tube = line.tube(radius=0.02)
>>> print('Line Cells:', line.n_cells)
Line Cells: 1
>>> print('Tube Cells:', tube.n_cells)
Tube Cells: 22
"""
if not isinstance(poly_data, pyvista.PolyData):
poly_data = pyvista.PolyData(poly_data)
if n_sides < 3:
n_sides = 3
tube = _vtk.vtkTubeFilter()
tube.SetInputDataObject(poly_data)
# User Defined Parameters
tube.SetCapping(capping)
if radius is not None:
tube.SetRadius(radius)
tube.SetNumberOfSides(n_sides)
tube.SetRadiusFactor(radius_factor)
# Check if scalars array given
if scalars is not None:
if not isinstance(scalars, str):
raise TypeError('scalars array must be given as a string name')
_, field = poly_data.get_array(scalars, preference=preference, info=True)
# args: (idx, port, connection, field, name)
tube.SetInputArrayToProcess(0, 0, 0, field.value, scalars)
tube.SetVaryRadiusToVaryRadiusByScalar()
# Apply the filter
tube.Update()
mesh = _get_output(tube)
if inplace:
poly_data.overwrite(mesh)
return poly_data
else:
return mesh
def subdivide(poly_data, nsub, subfilter='linear', inplace=False):
"""Increase the number of triangles in a single, connected triangular mesh.
Uses one of the following vtk subdivision filters to subdivide a mesh.
vtkButterflySubdivisionFilter
vtkLoopSubdivisionFilter
vtkLinearSubdivisionFilter
Linear subdivision results in the fastest mesh subdivision,
but it does not smooth mesh edges, but rather splits each
triangle into 4 smaller triangles.
Butterfly and loop subdivision perform smoothing when
dividing, and may introduce artifacts into the mesh when
dividing.
Subdivision filter appears to fail for multiple part meshes.
Should be one single mesh.
Parameters
----------
nsub : int
Number of subdivisions. Each subdivision creates 4 new
triangles, so the number of resulting triangles is
``nface*4**nsub`` where ``nface`` is the current number of
faces.
subfilter : string, optional
Can be one of the following: 'butterfly', 'loop', 'linear'.
inplace : bool, optional
Updates mesh in-place. Default ``False``.
Returns
-------
mesh : Polydata object
``pyvista`` polydata object.
Examples
--------
>>> from pyvista import examples
>>> import pyvista
>>> mesh = pyvista.PolyData(examples.planefile)
>>> submesh = mesh.subdivide(1, 'loop')
Alternatively, update the mesh in-place.
>>> submesh = mesh.subdivide(1, 'loop', inplace=True)
"""
subfilter = subfilter.lower()
if subfilter == 'linear':
sfilter = _vtk.vtkLinearSubdivisionFilter()
elif subfilter == 'butterfly':
sfilter = _vtk.vtkButterflySubdivisionFilter()
elif subfilter == 'loop':
sfilter = _vtk.vtkLoopSubdivisionFilter()
else:
raise ValueError("Subdivision filter must be one of the following: "
"'butterfly', 'loop', or 'linear'")
# Subdivide
sfilter.SetNumberOfSubdivisions(nsub)
sfilter.SetInputData(poly_data)
sfilter.Update()
submesh = _get_output(sfilter)
if inplace:
poly_data.overwrite(submesh)
return poly_data
else:
return submesh
def subdivide_adaptive(poly_data, max_edge_len=None, max_tri_area=None,
max_n_tris=None, max_n_passes=None, inplace=False):
"""Increase the number of triangles in a triangular mesh based on edge and/or area metrics.
This filter uses a simple case-based, multi-pass approach to
repeatedly subdivide the input triangle mesh to meet the area
and/or edge length criteria. New points may be inserted only
on edges; depending on the number of edges to be subdivided a
different number of triangles are inserted ranging from two
(i.e., two triangles replace the original one) to four.
Point and cell data is treated as follows: The cell data from
a parent triangle is assigned to its subdivided
children. Point data is interpolated along edges as the edges
are subdivided.
This filter retains mesh watertightness if the mesh was
originally watertight; and the area and max triangles criteria
are not used.
Parameters
----------
max_edge_len : float, optional
The maximum edge length that a triangle may have. Edges
longer than this value are split in half and the
associated triangles are modified accordingly.
max_tri_area : float, optional
The maximum area that a triangle may have. Triangles
larger than this value are subdivided to meet this
threshold. Note that if this criterion is used it may
produce non-watertight meshes as a result.
max_n_tris : int, optional
The maximum number of triangles that can be created. If
the limit is hit, it may result in premature termination
of the algorithm and the results may be less than
satisfactory (for example non-watertight meshes may be
created). By default, the limit is set to a very large
number (i.e., no effective limit).
max_n_passes : int, optional
The maximum number of passes (i.e., levels of
subdivision). If the limit is hit, then the subdivision
process stops and additional passes (needed to meet other
criteria) are aborted. The default limit is set to a very
large number (i.e., no effective limit).
inplace : bool, optional
Updates mesh in-place.
Returns
-------
:class:`pyvista.PolyData`
Subdivided mesh
Examples
--------
>>> from pyvista import examples
>>> import pyvista
>>> mesh = pyvista.PolyData(examples.planefile)
>>> submesh = mesh.subdivide_adaptive(max_n_passes=2)
Alternatively, update the mesh in-place.
>>> submesh = mesh.subdivide_adaptive(max_n_passes=2, inplace=True)
"""
sfilter = _vtk.vtkAdaptiveSubdivisionFilter()
if max_edge_len:
sfilter.SetMaximumEdgeLength(max_edge_len)
if max_tri_area:
sfilter.SetMaximumTriangleArea(max_tri_area)
if max_n_tris:
sfilter.SetMaximumNumberOfTriangles(max_n_tris)
if max_n_passes:
sfilter.SetMaximumNumberOfPasses(max_n_passes)
sfilter.SetInputData(poly_data)
sfilter.Update()
submesh = _get_output(sfilter)
if inplace:
poly_data.overwrite(submesh)
return poly_data
else:
return submesh
def decimate(poly_data, target_reduction, volume_preservation=False,
attribute_error=False, scalars=True, vectors=True,
normals=False, tcoords=True, tensors=True, scalars_weight=0.1,
vectors_weight=0.1, normals_weight=0.1, tcoords_weight=0.1,
tensors_weight=0.1, inplace=False, progress_bar=False):
"""Reduce the number of triangles in a triangular mesh using vtkQuadricDecimation.
Parameters
----------
mesh : vtk.PolyData
Mesh to decimate
target_reduction : float
Fraction of the original mesh to remove.
TargetReduction is set to 0.9, this filter will try to reduce
the data set to 10% of its original size and will remove 90%
of the input triangles.
volume_preservation : bool, optional
Decide whether to activate volume preservation which greatly reduces
errors in triangle normal direction. If off, volume preservation is
disabled and if AttributeErrorMetric is active, these errors can be
large. Defaults to False.
attribute_error : bool, optional
Decide whether to include data attributes in the error metric. If
off, then only geometric error is used to control the decimation.
Defaults to False.
scalars : bool, optional
If attribute errors are to be included in the metric (i.e.,
AttributeErrorMetric is on), then the following flags control which
attributes are to be included in the error calculation. Defaults to
True.
vectors : bool, optional
See scalars parameter. Defaults to True.
normals : bool, optional
See scalars parameter. Defaults to False.
tcoords : bool, optional
See scalars parameter. Defaults to True.
tensors : bool, optional
See scalars parameter. Defaults to True.
scalars_weight : float, optional
The scaling weight contribution of the scalar attribute. These
values are used to weight the contribution of the attributes towards
the error metric. Defaults to 0.1.
vectors_weight : float, optional
See scalars weight parameter. Defaults to 0.1.
normals_weight : float, optional
See scalars weight parameter. Defaults to 0.1.
tcoords_weight : float, optional
See scalars weight parameter. Defaults to 0.1.
tensors_weight : float, optional
See scalars weight parameter. Defaults to 0.1.
inplace : bool, optional
Updates mesh in-place.
progress_bar : bool, optional
Display a progress bar to indicate progress.
Returns
-------
outmesh : pyvista.PolyData
Decimated mesh.
Examples
--------
Decimate a sphere while preserving its volume
>>> import pyvista as pv
>>> sphere = pv.Sphere(theta_resolution=90, phi_resolution=90)
>>> print(sphere.n_cells)
15840
>>> dec_sphere = sphere.decimate(0.9, volume_preservation=True)
>>> print(dec_sphere.n_cells)
1584
Notes
-----
If you encounter a segmentation fault or other error, consider
using ``clean`` to remove any invalid cells before using this
filter.
"""
# create decimation filter
alg = _vtk.vtkQuadricDecimation() # vtkDecimatePro as well
alg.SetVolumePreservation(volume_preservation)
alg.SetAttributeErrorMetric(attribute_error)
alg.SetScalarsAttribute(scalars)
alg.SetVectorsAttribute(vectors)
alg.SetNormalsAttribute(normals)
alg.SetTCoordsAttribute(tcoords)
alg.SetTensorsAttribute(tensors)
alg.SetScalarsWeight(scalars_weight)
alg.SetVectorsWeight(vectors_weight)
alg.SetNormalsWeight(normals_weight)
alg.SetTCoordsWeight(tcoords_weight)
alg.SetTensorsWeight(tensors_weight)
alg.SetTargetReduction(target_reduction)
alg.SetInputData(poly_data)
_update_alg(alg, progress_bar, 'Decimating')
mesh = _get_output(alg)
if inplace:
poly_data.overwrite(mesh)
return poly_data
else:
return mesh
def compute_normals(poly_data, cell_normals=True,
point_normals=True, split_vertices=False,
flip_normals=False, consistent_normals=True,
auto_orient_normals=False,
non_manifold_traversal=True,
feature_angle=30.0, inplace=False):
"""Compute point and/or cell normals for a mesh.
The filter can reorder polygons to insure consistent
orientation across polygon neighbors. Sharp edges can be split
and points duplicated with separate normals to give crisp
(rendered) surface definition. It is also possible to globally
flip the normal orientation.
The algorithm works by determining normals for each polygon
and then averaging them at shared points. When sharp edges are
present, the edges are split and new points generated to
prevent blurry edges (due to Gouraud shading).
Parameters
----------
cell_normals : bool, optional
Calculation of cell normals. Defaults to ``True``.
point_normals : bool, optional
Calculation of point normals. Defaults to ``True``.
split_vertices : bool, optional
Splitting of sharp edges. Defaults to ``False``.
flip_normals : bool, optional
Set global flipping of normal orientation. Flipping
modifies both the normal direction and the order of a
cell's points. Defaults to ``False``.
consistent_normals : bool, optional
Enforcement of consistent polygon ordering. Defaults to ``True``.
auto_orient_normals : bool, optional
Turn on/off the automatic determination of correct normal
orientation. NOTE: This assumes a completely closed
surface (i.e. no boundary edges) and no non-manifold
edges. If these constraints do not hold, all bets are
off. This option adds some computational complexity, and
is useful if you do not want to have to inspect the
rendered image to determine whether to turn on the
``flip_normals`` flag. However, this flag can work with
the ``flip_normals`` flag, and if both are set, all the
normals in the output will point "inward". Defaults to
``False``.
non_manifold_traversal : bool, optional
Turn on/off traversal across non-manifold edges. Changing
this may prevent problems where the consistency of
polygonal ordering is corrupted due to topological
loops. Defaults to ``True``.
feature_angle : float, optional
The angle that defines a sharp edge. If the difference in
angle across neighboring polygons is greater than this
value, the shared edge is considered "sharp". Defaults to
30.0.
inplace : bool, optional
Updates mesh in-place. Defaults to ``False``.
Returns
-------
mesh : pyvista.PolyData
Updated mesh with cell and point normals.
Examples
--------
Compute the point normals of the surface of a sphere.
>>> import pyvista as pv
>>> sphere = pv.Sphere()
>>> sphere = sphere.compute_normals(cell_normals=False)
>>> normals = sphere['Normals']
>>> normals.shape
(842, 3)
Alternatively, create a new mesh when computing the normals
and compute both cell and point normals.
>>> import pyvista as pv
>>> sphere = pv.Sphere()
>>> sphere_with_norm = sphere.compute_normals()
>>> sphere_with_norm.point_arrays['Normals'].shape
(842, 3)
>>> sphere_with_norm.cell_arrays['Normals'].shape
(1680, 3)
Notes
-----
Previous arrays named "Normals" will be overwritten.
Normals are computed only for polygons and triangle
strips. Normals are not computed for lines or vertices.
Triangle strips are broken up into triangle polygons. You may
want to restrip the triangles.
May be easier to run ``mesh.point_normals`` or ``mesh.cell_normals``.
"""
normal = _vtk.vtkPolyDataNormals()
normal.SetComputeCellNormals(cell_normals)
normal.SetComputePointNormals(point_normals)
normal.SetSplitting(split_vertices)
normal.SetFlipNormals(flip_normals)
normal.SetConsistency(consistent_normals)
normal.SetAutoOrientNormals(auto_orient_normals)
normal.SetNonManifoldTraversal(non_manifold_traversal)
normal.SetFeatureAngle(feature_angle)
normal.SetInputData(poly_data)
normal.Update()
mesh = _get_output(normal)
if point_normals:
mesh.GetPointData().SetActiveNormals('Normals')
if cell_normals:
mesh.GetCellData().SetActiveNormals('Normals')
if inplace:
poly_data.overwrite(mesh)
return poly_data
else:
return mesh
def clip_closed_surface(poly_data, normal='x', origin=None,
tolerance=1e-06, inplace=False):
"""Clip a closed polydata surface with a plane.
This currently only supports one plane but could be
implemented to handle a plane collection.
It will produce a new closed surface by creating new polygonal
faces where the input data was clipped.
Non-manifold surfaces should not be used as input for this
filter. The input surface should have no open edges, and must
not have any edges that are shared by more than two faces. In
addition, the input surface should not self-intersect, meaning
that the faces of the surface should only touch at their
edges.
Parameters
----------
normal : str, list, optional
Plane normal to clip with. Plane is centered at
``origin``. Normal can be either a 3 member list
(e.g. ``[0, 0, 1]``) or one of the following strings:
``'x'``, ``'y'``, ``'z'``, ``'-x'``, ``'-y'``, or
``'-z'``.
origin : list, optional
Coordinate of the origin (e.g. ``[1, 0, 0]``). Defaults
to the center of the mesh.
tolerance : float, optional
The tolerance for creating new points while clipping. If
the tolerance is too small, then degenerate triangles
might be produced.
inplace : bool, optional
Updates mesh in-place. Defaults to ``False``.
Returns
-------
clipped_mesh : pyvista.PolyData
The clipped mesh.
Examples
--------
Clip a sphere in the X direction centered at the origin. This
will leave behind half a sphere in the positive X direction.
>>> import pyvista as pv
>>> sphere = pv.Sphere()
>>> clipped_mesh = sphere.clip_closed_surface()
Clip the sphere at the xy plane and leave behind half the
sphere in the positive Z direction. Shift the clip upwards to
leave a smaller mesh behind.
>>> clipped_mesh = sphere.clip_closed_surface('z', origin=[0, 0, 0.3])
"""
# verify it is manifold
if poly_data.n_open_edges > 0:
raise ValueError("This surface appears to be non-manifold.")
if isinstance(normal, str):
normal = NORMALS[normal.lower()]
# find center of data if origin not specified
if origin is None:
origin = poly_data.center
# create the plane for clipping
plane = generate_plane(normal, origin)
collection = _vtk.vtkPlaneCollection()
collection.AddItem(plane)
alg = _vtk.vtkClipClosedSurface()
alg.SetGenerateFaces(True)
alg.SetInputDataObject(poly_data)
alg.SetTolerance(tolerance)
alg.SetClippingPlanes(collection)
alg.Update() # Perform the Cut
result = _get_output(alg)
if inplace:
poly_data.overwrite(result)
return poly_data
else:
return result
def fill_holes(poly_data, hole_size, inplace=False, progress_bar=False): # pragma: no cover
"""
Fill holes in a pyvista.PolyData or vtk.vtkPolyData object.
Holes are identified by locating boundary edges, linking them together
into loops, and then triangulating the resulting loops. Note that you
can specify an approximate limit to the size of the hole that can be
filled.
Parameters
----------
hole_size : float
Specifies the maximum hole size to fill. This is represented as a
radius to the bounding circumsphere containing the hole. Note that
this is an approximate area; the actual area cannot be computed
without first triangulating the hole.
inplace : bool, optional
Return new mesh or overwrite input.
progress_bar : bool, optional
Display a progress bar to indicate progress.
Returns
-------
mesh : pyvista.PolyData
Mesh with holes filled.
Examples
--------
Create a partial sphere with a hole and then fill it
>>> import pyvista as pv
>>> sphere_with_hole = pv.Sphere(end_theta=330)
>>> sphere = sphere_with_hole.fill_holes(1000)
>>> edges = sphere.extract_feature_edges(feature_edges=False,
... manifold_edges=False)
>>> assert edges.n_cells == 0
"""
logging.warning('pyvista.PolyData.fill_holes is known to segfault. '
'Use at your own risk')
alg = _vtk.vtkFillHolesFilter()
alg.SetHoleSize(hole_size)
alg.SetInputData(poly_data)
_update_alg(alg, progress_bar, 'Filling Holes')
mesh = _get_output(alg)
if inplace:
poly_data.overwrite(mesh)
return poly_data
else:
return mesh
def clean(poly_data, point_merging=True, tolerance=None, lines_to_points=True,
polys_to_lines=True, strips_to_polys=True, inplace=False,
absolute=True, progress_bar=False, **kwargs):
"""Clean the mesh.
This merges duplicate points, removes unused points, and/or
removes degenerate cells.
Parameters
----------
point_merging : bool, optional
Enables point merging. On by default.
tolerance : float, optional
Set merging tolerance. When enabled merging is set to
absolute distance. If ``absolute`` is ``False``, then the
merging tolerance is a fraction of the bounding box
length. The alias ``merge_tol`` is also excepted.
lines_to_points : bool, optional
Turn on/off conversion of degenerate lines to points.
Enabled by default.
polys_to_lines : bool, optional
Turn on/off conversion of degenerate polys to lines.
Enabled by default.
strips_to_polys : bool, optional
Turn on/off conversion of degenerate strips to polys.
inplace : bool, optional
Updates mesh in-place. Default ``False``.
absolute : bool, optional
Control if ``tolerance`` is an absolute distance or a
fraction.
progress_bar : bool, optional
Display a progress bar to indicate progress.
Returns
-------
mesh : pyvista.PolyData
Cleaned mesh.
Examples
--------
Create a mesh with a degenerate face and then clean it,
removing the degenerate face
>>> import pyvista as pv
>>> import numpy as np
>>> points = np.array([[0, 0, 0], [0, 1, 0], [1, 0, 0]])
>>> faces = np.array([3, 0, 1, 2, 3, 0, 3, 3])
>>> mesh = pv.PolyData(points, faces)
>>> mout = mesh.clean()
>>> print(mout.faces) # doctest:+SKIP
[3 0 1 2]
"""
if tolerance is None:
tolerance = kwargs.pop('merge_tol', None)
assert_empty_kwargs(**kwargs)
alg = _vtk.vtkCleanPolyData()
alg.SetPointMerging(point_merging)
alg.SetConvertLinesToPoints(lines_to_points)
alg.SetConvertPolysToLines(polys_to_lines)
alg.SetConvertStripsToPolys(strips_to_polys)
if isinstance(tolerance, (int, float)):
if absolute:
alg.ToleranceIsAbsoluteOn()
alg.SetAbsoluteTolerance(tolerance)
else:
alg.SetTolerance(tolerance)
alg.SetInputData(poly_data)
_update_alg(alg, progress_bar, 'Cleaning')
output = _get_output(alg)
# Check output so no segfaults occur
if output.n_points < 1:
raise ValueError('Clean tolerance is too high. Empty mesh returned.')
if inplace:
poly_data.overwrite(output)
return poly_data
else:
return output
def geodesic(poly_data, start_vertex, end_vertex, inplace=False,
keep_order=True):
"""Calculate the geodesic path between two vertices using Dijkstra's algorithm.
This will add an array titled ``'vtkOriginalPointIds'`` of the input
mesh's point ids to the output mesh. The default behavior of the
underlying ``vtkDijkstraGraphGeodesicPath`` filter is that the
geodesic path is reversed in the resulting mesh. This is overridden
in PyVista by default.
Parameters
----------
start_vertex : int
Vertex index indicating the start point of the geodesic segment.
end_vertex : int
Vertex index indicating the end point of the geodesic segment.
inplace : bool, optional
Whether the input mesh should be replaced with the path. The
geodesic path is always returned.
keep_order : bool, optional
If ``True``, the points of the returned path are guaranteed
to start with the start vertex (as opposed to the end vertex).
.. versionadded:: 0.32.0
Returns
-------
output : pyvista.PolyData
``PolyData`` object consisting of the line segment between the
two given vertices. If ``inplace`` is ``True`` this is the
same object as the input mesh.
Examples
--------
Plot the path between two points on a sphere.
>>> import pyvista as pv
>>> sphere = pv.Sphere()
>>> path = sphere.geodesic(0, 100)
>>> pl = pv.Plotter()
>>> actor = pl.add_mesh(sphere)
>>> actor = pl.add_mesh(path, line_width=5, color='k')
>>> cpos = pl.show()
"""
if not (0 <= start_vertex < poly_data.n_points and
0 <= end_vertex < poly_data.n_points):
raise IndexError('Invalid point indices.')
if not poly_data.is_all_triangles():
raise NotAllTrianglesError("Input mesh for geodesic path must be all triangles.")
dijkstra = _vtk.vtkDijkstraGraphGeodesicPath()
dijkstra.SetInputData(poly_data)
dijkstra.SetStartVertex(start_vertex)
dijkstra.SetEndVertex(end_vertex)
dijkstra.Update()
original_ids = vtk_id_list_to_array(dijkstra.GetIdList())
output = _get_output(dijkstra)
output["vtkOriginalPointIds"] = original_ids
# Do not copy textures from input
output.clear_textures()
# ensure proper order if requested
if keep_order and original_ids[0] == end_vertex:
output.points[...] = output.points[::-1, :]
output["vtkOriginalPointIds"] = output["vtkOriginalPointIds"][::-1]
if inplace:
poly_data.overwrite(output)
return poly_data
else:
return output
def geodesic_distance(poly_data, start_vertex, end_vertex):
"""Calculate the geodesic distance between two vertices using Dijkstra's algorithm.
Parameters
----------
start_vertex : int
Vertex index indicating the start point of the geodesic segment.
end_vertex : int
Vertex index indicating the end point of the geodesic segment.
Returns
-------
length : float
Length of the geodesic segment.
Examples
--------
>>> import pyvista as pv
>>> sphere = pv.Sphere()
>>> length = sphere.geodesic_distance(0, 100)
>>> print(f'Length is {length:.3f}')
Length is 0.812
"""
path = poly_data.geodesic(start_vertex, end_vertex)
sizes = path.compute_cell_sizes(length=True, area=False, volume=False)
distance = np.sum(sizes['Length'])
del path
del sizes
return distance
def ray_trace(poly_data, origin, end_point, first_point=False, plot=False,
off_screen=False):
"""Perform a single ray trace calculation.
This requires a mesh and a line segment defined by an origin
and end_point.
Parameters
----------
origin : np.ndarray or list
Start of the line segment.
end_point : np.ndarray or list
End of the line segment.
first_point : bool, optional
Returns intersection of first point only.
plot : bool, optional
Plots ray trace results
off_screen : bool, optional
Plots off screen when ``plot=True``. Used for unit testing.
Returns
-------
intersection_points : np.ndarray
Location of the intersection points. Empty array if no
intersections.
intersection_cells : np.ndarray
Indices of the intersection cells. Empty array if no
intersections.
Examples
--------
Compute the intersection between a ray from the origin and
[1, 0, 0] and a sphere with radius 0.5 centered at the origin
>>> import pyvista as pv
>>> sphere = pv.Sphere()
>>> point, cell = sphere.ray_trace([0, 0, 0], [1, 0, 0], first_point=True)
>>> print(f'Intersected at {point[0]:.3f} {point[1]:.3f} {point[2]:.3f}')
Intersected at 0.499 0.000 0.000
"""
points = _vtk.vtkPoints()
cell_ids = _vtk.vtkIdList()
poly_data.obbTree.IntersectWithLine(np.array(origin),
np.array(end_point),
points, cell_ids)
intersection_points = _vtk.vtk_to_numpy(points.GetData())
if first_point and intersection_points.shape[0] >= 1:
intersection_points = intersection_points[0]
intersection_cells = []
if intersection_points.any():
if first_point:
ncells = 1
else:
ncells = cell_ids.GetNumberOfIds()
for i in range(ncells):
intersection_cells.append(cell_ids.GetId(i))
intersection_cells = np.array(intersection_cells)
if plot:
plotter = pyvista.Plotter(off_screen=off_screen)
plotter.add_mesh(poly_data, label='Test Mesh')
segment = np.array([origin, end_point])
plotter.add_lines(segment, 'b', label='Ray Segment')
plotter.add_mesh(intersection_points, 'r', point_size=10,
label='Intersection Points')
plotter.add_legend()
plotter.add_axes()
plotter.show()
return intersection_points, intersection_cells
def multi_ray_trace(poly_data, origins, directions, first_point=False, retry=False):
"""Perform multiple ray trace calculations.
This requires a mesh with only triangular faces,
an array of origin points and an equal sized array of
direction vectors to trace along.
The embree library used for vectorisation of the ray traces is known to occasionally
return no intersections where the VTK implementation would return an intersection.
If the result appears to be missing some intersection points, set retry=True to run a second pass over rays
that returned no intersections, using the VTK ray_trace implementation.
Parameters
----------
origins : np.ndarray or list
Starting point for each trace.
directions : np.ndarray or list
Direction vector for each trace.
first_point : bool, optional
Returns intersection of first point only.
retry : bool, optional
Will retry rays that return no intersections using the ray_trace
Returns
-------
intersection_points : np.ndarray
Location of the intersection points. Empty array if no
intersections.
intersection_rays : np.ndarray
Indices of the ray for each intersection point. Empty array if no
intersections.
intersection_cells : np.ndarray
Indices of the intersection cells. Empty array if no
intersections.
Examples
--------
Compute the intersection between rays from the origin in
directions ``[1, 0, 0]``, ``[0, 1, 0]`` and ``[0, 0, 1]``, and
a sphere with radius 0.5 centered at the origin
>>> import pyvista as pv # doctest: +SKIP
... sphere = pv.Sphere()
... points, rays, cells = sphere.multi_ray_trace([[0, 0, 0]]*3, [[1, 0, 0], [0, 1, 0], [0, 0, 1]], first_point=True)
... string = ", ".join([f"({point[0]:.3f}, {point[1]:.3f}, {point[2]:.3f})" for point in points])
... print(f'Rays intersected at {string}')
Rays intersected at (0.499, 0.000, 0.000), (0.000, 0.497, 0.000), (0.000, 0.000, 0.500)
"""
if not poly_data.is_all_triangles():
raise NotAllTrianglesError
try:
import trimesh, rtree, pyembree
except (ModuleNotFoundError, ImportError):
raise ImportError(
"To use multi_ray_trace please install trimesh, rtree and pyembree with:\n"
"\tconda install trimesh rtree pyembree"
)
origins = np.asarray(origins)
directions = np.asarray(directions)
faces_as_array = poly_data.faces.reshape((poly_data.n_faces, 4))[:, 1:]
tmesh = trimesh.Trimesh(poly_data.points, faces_as_array)
locations, index_ray, index_tri = tmesh.ray.intersects_location(
origins, directions, multiple_hits=not first_point
)
if retry:
# gather intersecting rays in lists
loc_lst, ray_lst, tri_lst = [arr.tolist() for arr in [locations, index_ray, index_tri]]
# find indices that trimesh failed on
all_ray_indices = np.arange(len(origins))
retry_ray_indices = np.setdiff1d(all_ray_indices, index_ray, assume_unique=True)
# compute ray points for all failed rays at once
origins_retry = origins[retry_ray_indices, :] # shape (n_retry, 3)
directions_retry = directions[retry_ray_indices, :]
unit_directions = directions_retry / np.linalg.norm(directions_retry,
axis=1, keepdims=True)
second_points = origins_retry + unit_directions * poly_data.length # shape (n_retry, 3)
for id_r, origin, second_point in zip(retry_ray_indices, origins_retry, second_points):
locs, indices = poly_data.ray_trace(origin, second_point, first_point=first_point)
if locs.any():
if first_point:
locs = locs.reshape([1, 3])
ray_lst.extend([id_r] * indices.size)
tri_lst.extend(indices)
loc_lst.extend(locs)
# sort result arrays by ray index
index_ray = np.array(ray_lst)
sorting_inds = index_ray.argsort()
index_ray = index_ray[sorting_inds]
index_tri = np.array(tri_lst)[sorting_inds]
locations = np.array(loc_lst)[sorting_inds]
return locations, index_ray, index_tri
def plot_boundaries(poly_data, edge_color="red", **kwargs):
"""Plot boundaries of a mesh.
Parameters
----------
edge_color : str, optional
The color of the edges when they are added to the plotter.
kwargs : optional
All additional keyword arguments will be passed to
:func:`pyvista.BasePlotter.add_mesh`
"""
edges = DataSetFilters.extract_feature_edges(poly_data)
plotter = pyvista.Plotter(off_screen=kwargs.pop('off_screen', None),
notebook=kwargs.pop('notebook', None))
plotter.add_mesh(edges, color=edge_color, style='wireframe', label='Edges')
plotter.add_mesh(poly_data, label='Mesh', **kwargs)
plotter.add_legend()
return plotter.show()
def plot_normals(poly_data, show_mesh=True, mag=1.0, flip=False,
use_every=1, **kwargs):
"""Plot the point normals of a mesh.
Parameters
----------
show_mesh : bool, optional
Plot the mesh itself. Defaults to ``True``.
mag : float, optional
Size magnitude of the normal arrows. Defaults to 1.0.
flip : bool, optional
Flip the normal direction when ``True``. Default
``False``.
use_every : int, optional
Display every nth normal. By default every normal is
displayed. Display every 10th normal by setting this
parameter to 10.
Examples
--------
Plot the normals of a sphere.
>>> import pyvista as pv
>>> sphere = pv.Sphere()
>>> cpos = sphere.plot_normals(mag=0.1)
"""
plotter = pyvista.Plotter(off_screen=kwargs.pop('off_screen', None),
notebook=kwargs.pop('notebook', None))
if show_mesh:
plotter.add_mesh(poly_data, **kwargs)
normals = poly_data.point_normals
if flip:
normals *= -1
plotter.add_arrows(poly_data.points[::use_every],
normals[::use_every], mag=mag, show_scalar_bar=False)
return plotter.show()
def remove_points(poly_data, remove, mode='any', keep_scalars=True, inplace=False):
"""Rebuild a mesh by removing points.
Only valid for all-triangle meshes.
Parameters
----------
remove : np.ndarray
If remove is a bool array, points that are ``True`` will
be removed. Otherwise, it is treated as a list of
indices.
mode : str, optional
When ``'all'``, only faces containing all points flagged
for removal will be removed. Default ``'any'``.
keep_scalars : bool, optional
When ``True``, point and cell scalars will be passed on to
the new mesh.
inplace : bool, optional
Updates mesh in-place. Defaults to ``False``.
Returns
-------
mesh : pyvista.PolyData
Mesh without the points flagged for removal.
ridx : np.ndarray
Indices of new points relative to the original mesh.
Examples
--------
Remove the first 100 points from a sphere.
>>> import pyvista as pv
>>> sphere = pv.Sphere()
>>> reduced_sphere, ridx = sphere.remove_points(range(100))
"""
remove = np.asarray(remove)
# np.asarray will eat anything, so we have to weed out bogus inputs
if not issubclass(remove.dtype.type, (np.bool_, np.integer)):
raise TypeError('Remove must be either a mask or an integer array-like')
if remove.dtype == np.bool_:
if remove.size != poly_data.n_points:
raise ValueError('Mask different size than n_points')
remove_mask = remove
else:
remove_mask = np.zeros(poly_data.n_points, np.bool_)
remove_mask[remove] = True
if not poly_data.is_all_triangles():
raise NotAllTrianglesError
f = poly_data.faces.reshape(-1, 4)[:, 1:]
vmask = remove_mask.take(f)
if mode == 'all':
fmask = ~(vmask).all(1)
else:
fmask = ~(vmask).any(1)
# Regenerate face and point arrays
uni = np.unique(f.compress(fmask, 0), return_inverse=True)
new_points = poly_data.points.take(uni[0], 0)
nfaces = fmask.sum()
faces = np.empty((nfaces, 4), dtype=pyvista.ID_TYPE)
faces[:, 0] = 3
faces[:, 1:] = np.reshape(uni[1], (nfaces, 3))
newmesh = pyvista.PolyData(new_points, faces, deep=True)
ridx = uni[0]
# Add scalars back to mesh if requested
if keep_scalars:
for key in poly_data.point_arrays:
newmesh.point_arrays[key] = poly_data.point_arrays[key][ridx]
for key in poly_data.cell_arrays:
try:
newmesh.cell_arrays[key] = poly_data.cell_arrays[key][fmask]
except:
logging.warning(f'Unable to pass cell key {key} onto reduced mesh')
# Return vtk surface and reverse indexing array
if inplace:
poly_data.overwrite(newmesh)
return poly_data, ridx
else:
return newmesh, ridx
def flip_normals(poly_data):
"""Flip normals of a triangular mesh by reversing the point ordering.
Examples
--------
Flip the normals of a sphere and plot the normals before and
after the flip.
>>> import pyvista as pv
>>> sphere = pv.Sphere()
>>> cpos = sphere.plot_normals(mag=0.1)
>>> sphere.flip_normals()
>>> cpos = sphere.plot_normals(mag=0.1)
"""
if not poly_data.is_all_triangles:
raise NotAllTrianglesError('Can only flip normals on an all triangle mesh')
f = poly_data.faces.reshape((-1, 4))
f[:, 1:] = f[:, 1:][:, ::-1]
poly_data.faces = f
def delaunay_2d(poly_data, tol=1e-05, alpha=0.0, offset=1.0, bound=False,
inplace=False, edge_source=None, progress_bar=False):
"""Apply a delaunay 2D filter along the best fitting plane.
Parameters
----------
tol : float, optional
Specify a tolerance to control discarding of closely
spaced points. This tolerance is specified as a fraction
of the diagonal length of the bounding box of the points.
Defaults to ``1e-05``.
alpha : float, optional
Specify alpha (or distance) value to control output of
this filter. For a non-zero alpha value, only edges or
triangles contained within a sphere centered at mesh
vertices will be output. Otherwise, only triangles will be
output. Defaults to ``0.0``.
offset : float, optional
Specify a multiplier to control the size of the initial,
bounding Delaunay triangulation. Defaults to ``1.0``.
bound : bool, optional
Boolean controls whether bounding triangulation points
and associated triangles are included in the
output. These are introduced as an initial triangulation
to begin the triangulation process. This feature is nice
for debugging output. Default ``False``.
inplace : bool, optional
If ``True``, overwrite this mesh with the triangulated
mesh. Default ``False``.
edge_source : pyvista.PolyData, optional
Specify the source object used to specify constrained
edges and loops. If set, and lines/polygons are defined, a
constrained triangulation is created. The lines/polygons
are assumed to reference points in the input point set
(i.e. point ids are identical in the input and
source).
progress_bar : bool, optional
Display a progress bar to indicate progress. Default
``False``.
Examples
--------
Extract the points of a sphere and then convert the point
cloud to a surface mesh. Note that only the bottom half is
converted to a mesh.
>>> import pyvista as pv
>>> points = pv.PolyData(pv.Sphere().points)
>>> mesh = points.delaunay_2d()
>>> mesh.is_all_triangles()
True
"""
alg = _vtk.vtkDelaunay2D()
alg.SetProjectionPlaneMode(_vtk.VTK_BEST_FITTING_PLANE)
alg.SetInputDataObject(poly_data)
alg.SetTolerance(tol)
alg.SetAlpha(alpha)
alg.SetOffset(offset)
alg.SetBoundingTriangulation(bound)
if edge_source is not None:
alg.SetSourceData(edge_source)
_update_alg(alg, progress_bar, 'Computing 2D Triangulation')
# Sometimes lines are given in the output. The
# `.triangulate()` filter cleans those
mesh = _get_output(alg).triangulate()
if inplace:
poly_data.overwrite(mesh)
return poly_data
else:
return mesh
def compute_arc_length(poly_data):
"""Compute the arc length over the length of the probed line.
It adds a new point-data array named ``"arc_length"`` with the
computed arc length for each of the polylines in the
input. For all other cell types, the arc length is set to 0.
Returns
-------
arc_length : float
Arc length of the length of the probed line.
Examples
--------
>>> import pyvista as pv
>>> sphere = pv.Sphere()
>>> path = sphere.geodesic(0, 100)
>>> length = path.compute_arc_length()['arc_length'][-1]
>>> print(f'Length is {length:.3f}')
Length is 0.812
This is identical to the geodesic_distance.
>>> length = sphere.geodesic_distance(0, 100)
>>> print(f'Length is {length:.3f}')
Length is 0.812
You can also plot the arc_length
>>> arc = path.compute_arc_length()
>>> cpos = arc.plot(scalars="arc_length")
"""
alg = _vtk.vtkAppendArcLength()
alg.SetInputData(poly_data)
alg.Update()
return _get_output(alg)
def project_points_to_plane(poly_data, origin=None, normal=(0, 0, 1),
inplace=False):
"""Project points of this mesh to a plane.
Parameters
----------
origin : np.ndarray or collections.abc.Sequence, optional
Plane origin. Defaults the approximate center of the
input mesh minus half the length of the input mesh in the
direction of the normal.
normal : np.ndarray or collections.abc.Sequence, optional
Plane normal. Defaults to +Z ``[0, 0, 1]``
inplace : bool, optional
Overwrite the original mesh with the projected points
Examples
--------
Flatten a sphere to the XY plane
>>> import pyvista as pv
>>> sphere = pv.Sphere()
>>> projected = sphere.project_points_to_plane([0, 0, 0])
"""
if not isinstance(normal, (np.ndarray, collections.abc.Sequence)) or len(normal) != 3:
raise TypeError('Normal must be a length three vector')
if origin is None:
origin = np.array(poly_data.center) - np.array(normal)*poly_data.length/2.
# choose what mesh to use
if not inplace:
mesh = poly_data.copy()
else:
mesh = poly_data
# Make plane
plane = generate_plane(normal, origin)
# Perform projection in place on the copied mesh
f = lambda p: plane.ProjectPoint(p, p)
np.apply_along_axis(f, 1, mesh.points)
return mesh
def ribbon(poly_data, width=None, scalars=None, angle=0.0, factor=2.0,
normal=None, tcoords=False, preference='points'):
"""Create a ribbon of the lines in this dataset.
Parameters
----------
width : float, optional
Set the "half" width of the ribbon. If the width is
allowed to vary, this is the minimum width. The default is
10% the length.
scalars : str, optional
String name of the scalars array to use to vary the ribbon
width. This is only used if a scalars array is specified.
angle : float, optional
Angle in degrees of the offset angle of the ribbon from
the line normal. The default is 0.0.
factor : float, optional
Set the maximum ribbon width in terms of a multiple of the
minimum width. The default is 2.0
normal : tuple(float), optional
Normal to use as default.
tcoords : bool, str, optional
If ``True``, generate texture coordinates along the
ribbon. This can also be specified to generate the texture
coordinates with either ``'length'`` or ``'normalized'``.
Examples
--------
Convert a line to a ribbon and plot it.
>>> import pyvista as pv
>>> sphere = pv.Sphere()
>>> path = sphere.geodesic(0, 100)
>>> ribbon = path.ribbon()
>>> cpos = pv.plot([sphere, ribbon])
Notes
-----
If there are no lines in the input dataset, then the output
will be an empty ``pyvista.PolyData`` mesh.
"""
if scalars is not None:
arr, field = get_array(poly_data, scalars, preference=preference, info=True)
if width is None:
width = poly_data.length * 0.1
alg = _vtk.vtkRibbonFilter()
alg.SetInputDataObject(poly_data)
alg.SetWidth(width)
if normal is not None:
alg.SetUseDefaultNormal(True)
alg.SetDefaultNormal(normal)
alg.SetAngle(angle)
if scalars is not None:
alg.SetVaryWidth(True)
alg.SetInputArrayToProcess(0, 0, 0, field.value, scalars) # args: (idx, port, connection, field, name)
alg.SetWidthFactor(factor)
else:
alg.SetVaryWidth(False)
if tcoords:
alg.SetGenerateTCoords(True)
if isinstance(tcoords, str):
if tcoords.lower() == 'length':
alg.SetGenerateTCoordsToUseLength()
elif tcoords.lower() == 'normalized':
alg.SetGenerateTCoordsToNormalizedLength()
else:
alg.SetGenerateTCoordsToUseLength()
else:
alg.SetGenerateTCoordsToOff()
alg.Update()
return _get_output(alg)
def extrude(poly_data, vector, inplace=False, progress_bar=False):
"""Sweep polygonal data creating a "skirt" from free edges.
This will create a line from vertices.
This takes polygonal data as input and generates polygonal
data on output. The input dataset is swept according to some
extrusion function and creates new polygonal primitives. These
primitives form a "skirt" or swept surface. For example,
sweeping a line results in a quadrilateral, and sweeping a
triangle creates a "wedge".
There are a number of control parameters for this filter. You
can control whether the sweep of a 2D object (i.e., polygon or
triangle strip) is capped with the generating geometry via the
"Capping" parameter.
The skirt is generated by locating certain topological
features. Free edges (edges of polygons or triangle strips
only used by one polygon or triangle strips) generate
surfaces. This is true also of lines or polylines. Vertices
generate lines.
This filter can be used to create 3D fonts, 3D irregular bar
charts, or to model 2 1/2D objects like punched plates. It
also can be used to create solid objects from 2D polygonal
meshes.
Parameters
----------
mesh : pyvista.PolyData
Mesh to extrude.
vector : np.ndarray or list
Direction and length to extrude the mesh in.
inplace : bool, optional
Overwrites the original mesh in-place.
progress_bar : bool, optional
Display a progress bar to indicate progress.
Examples
--------
Extrude a half arc circle
>>> import pyvista
>>> arc = pyvista.CircularArc([-1, 0, 0], [1, 0, 0], [0, 0, 0])
>>> mesh = arc.extrude([0, 0, 1])
>>> cpos = mesh.plot()
"""
alg = _vtk.vtkLinearExtrusionFilter()
alg.SetExtrusionTypeToVectorExtrusion()
alg.SetVector(*vector)
alg.SetInputData(poly_data)
_update_alg(alg, progress_bar, 'Extruding')
output = pyvista.wrap(alg.GetOutput())
if inplace:
poly_data.overwrite(output)
return poly_data
else:
return output
def extrude_rotate(poly_data, resolution=30, inplace=False,
translation=0.0, dradius=0.0, angle=360.0, progress_bar=False):
"""Sweep polygonal data creating "skirt" from free edges and lines, and lines from vertices.
This is a modeling filter.
This takes polygonal data as input and generates polygonal
data on output. The input dataset is swept around the z-axis
to create new polygonal primitives. These primitives form a
"skirt" or swept surface. For example, sweeping a line
results in a cylindrical shell, and sweeping a circle
creates a torus.
There are a number of control parameters for this filter.
You can control whether the sweep of a 2D object (i.e.,
polygon or triangle strip) is capped with the generating
geometry via the "Capping" instance variable. Also, you can
control the angle of rotation, and whether translation along
the z-axis is performed along with the rotation.
(Translation is useful for creating "springs".) You also can
adjust the radius of the generating geometry using the
"DeltaRotation" instance variable.
The skirt is generated by locating certain topological
features. Free edges (edges of polygons or triangle strips
only used by one polygon or triangle strips) generate
surfaces. This is true also of lines or polylines. Vertices
generate lines.
This filter can be used to model axisymmetric objects like
cylinders, bottles, and wine glasses; or translational/
rotational symmetric objects like springs or corkscrews.
Parameters
----------
resolution : int, optional
Number of pieces to divide line into.
inplace : bool, optional
Overwrites the original mesh inplace.
translation : float, optional
Total amount of translation along the z-axis.
dradius : float, optional
Change in radius during sweep process.
angle : float, optional
The angle of rotation.
progress_bar : bool, optional
Display a progress bar to indicate progress.
Examples
--------
>>> import pyvista
>>> line = pyvista.Line(pointa=(0, 0, 0), pointb=(1, 0, 0))
>>> mesh = line.extrude_rotate(resolution = 4)
>>> cpos = mesh.plot()
"""
if resolution <= 0:
raise ValueError('`resolution` should be positive')
alg = _vtk.vtkRotationalExtrusionFilter()
alg.SetInputData(poly_data)
alg.SetResolution(resolution)
alg.SetTranslation(translation)
alg.SetDeltaRadius(dradius)
alg.SetAngle(angle)
_update_alg(alg, progress_bar, 'Extruding')
output = pyvista.wrap(alg.GetOutput())
if inplace:
poly_data.overwrite(output)
return poly_data
else:
return output
def strip(poly_data, join=False, max_length=1000, pass_cell_data=False,
pass_cell_ids=False, pass_point_ids=False):
"""Strip poly data cells.
Generates triangle strips and/or poly-lines from input
polygons, triangle strips, and lines.
Polygons are assembled into triangle strips only if they are
triangles; other types of polygons are passed through to the
output and not stripped. (Use ``triangulate`` filter to
triangulate non-triangular polygons prior to running this
filter if you need to strip all the data.) The filter will
pass through (to the output) vertices if they are present in
the input polydata.
Also note that if triangle strips or polylines are defined in
the input they are passed through and not joined nor
extended. (If you wish to strip these use ``triangulate``
filter to fragment the input into triangles and lines prior to
running this filter.)
Parameters
----------
join : bool, optional
If ``True``, the output polygonal segments will be joined
if they are contiguous. This is useful after slicing a
surface. The default is ``False``.
max_length : int, optional
Specify the maximum number of triangles in a triangle
strip, and/or the maximum number of lines in a poly-line.
pass_cell_data : bool, optional
Enable/Disable passing of the CellData in the input to the
output as FieldData. Note the field data is transformed.
Default is ``False``.
pass_cell_ids : bool, optional
If ``True``, the output polygonal dataset will have a
celldata array that holds the cell index of the original
3D cell that produced each output cell. This is useful for
picking. The default is ``False`` to conserve memory.
pass_point_ids : bool, optional
If ``True``, the output polygonal dataset will have a
pointdata array that holds the point index of the original
vertex that produced each output vertex. This is useful
for picking. The default is ``False`` to conserve memory.
Examples
--------
>>> from pyvista import examples
>>> mesh = examples.load_airplane()
>>> slc = mesh.slice(normal='z', origin=(0,0,-10))
>>> stripped = slc.strip()
>>> stripped.n_cells
1
"""
alg = _vtk.vtkStripper()
alg.SetInputDataObject(poly_data)
alg.SetJoinContiguousSegments(join)
alg.SetMaximumLength(max_length)
alg.SetPassCellDataAsFieldData(pass_cell_data)
alg.SetPassThroughCellIds(pass_cell_ids)
alg.SetPassThroughPointIds(pass_point_ids)
alg.Update()
return _get_output(alg)
| rohankumardubey/pyvista | pyvista/core/filters/poly_data.py | poly_data.py | py | 77,050 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pyvista.core.filters.data_set.DataSetFilters",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "pyvista.PolyData",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "pyvista.PolyData",
"line_number": 30,
"usage_type": "call"
},
{
... |
26829757478 | #!/usr/bin/python
import json
import utils
import logging
import os
import subprocess
import smt_encoding
import pysmt.shortcuts
import re
import requests
import core_data
import hyportage_pattern
import hyportage_db
from pysmt.smtlib.parser import SmtLib20Parser
import cStringIO
"""
This file contains all the functions related to solving the constraints generated from a set of spls,
in order to compute a new configuration of the system
"""
__author__ = "Michael Lienhardt and Jacopo Mauro"
__copyright__ = "Copyright 2017, Michael Lienhardt and Jacopo Mauro"
__license__ = "GPL3"
__version__ = "0.5"
__maintainer__ = "Michael Lienhardt and Jacopo Mauro"
__email__ = "michael lienhardt@laposte.net & mauro.jacopo@gmail.com"
__status__ = "Prototype"
##########################################################################
# UTILITIES TO CALL THE HYVAR-REC SOLVER
##########################################################################
def run_local_hyvar(json_data, explain_modality, cmd, par_cores):
"""
Run hyvar locally assuming that there is a command hyvar-rec
"""
file_name = utils.get_new_temp_file(".json")
with open(file_name, "w") as f:
json.dump(json_data, f)
cmd.extend(["--constraints-minimization", "--features-as-boolean", "--no-default-preferences"])
if explain_modality: cmd.append("--explain")
if par_cores > 1: cmd.extend(["-p", unicode(par_cores)])
cmd.append(file_name)
# executing the solver
utils.phase_start("Running: " + unicode(cmd))
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = process.communicate()
if process.returncode != 0:
logging.error("command ended with an error code: " + str(process.returncode))
return None
logging.debug("Stderr of the command")
logging.debug(err)
utils.phase_end("Execution ended")
res = json.loads(out)
return res
def run_remote_hyvar(json_data, explain_modality, url):
"""
Run hyvar
"""
if explain_modality: url += "/explain"
else: url += "/process"
utils.phase_start("Invoking url: " + url)
response = requests.post(url, data=json.dumps(json_data), headers={'content-type': 'application/json'})
utils.phase_end("Execution ended")
if response.status_code != requests.codes.ok:
logging.error("server answered with an error code: " + str(response.status_code))
return None
res = response.json()
if 'error' in res:
logging.error("server answered with an error message: " + json.dumps(res))
return None
return res
run_hyvar = None
##########################################################################
# 1. INITIALIZE THE DATA (COMPUTE REQUEST AND UPDATE THE DATABASE)
##########################################################################
def process_request(pattern_repository, id_repository, config, atoms):
"""
This function simply takes the user request and his USE configuration from the environment,
and translate them in relevant data
:param pattern_repository: the pattern repository of hyportage
:param id_repository: the id repository of hyportage
:param mspl: the mspl of hyportage
:param spl_groups: the spl_groups of hyportage
:param config: the config object
:param atoms: the user request
:return: the list of spls involved in the request (extended with the ones in the config),
with the corresponding SMT constraint.
Additionally, the pattern repository is extended with the new patterns,
and the config object have been updated with th USE user configuration
"""
requested_patterns = set([hyportage_pattern.pattern_create_from_atom(atom) for atom in atoms])
requested_patterns.update(config.pattern_required_flat)
config.set_use_manipulation_env(os.environ.get("USE", "").split())
root_spls, constraint = smt_encoding.convert_patterns(pattern_repository, id_repository, requested_patterns)
return root_spls, constraint
##########################################################################
# 2. SOLVER WRAPPER
##########################################################################
def get_preferences_core(id_repository, mspl, installed_spls, spl_name_set):
"""
the priority of preferences is decided as follows:
- remove less packages installed as possible,
- minimize number of new packages to install
:param id_repository: the repository of hyportage
:param mspl: the mspl of hyportage
:param installed_spls: the currently installed spls with their configuration
:param spl_name_set: the names of the spls considered in the reconfiguration process
:return: an equation encoding the preference as described previously
"""
installed_spls = {
spl_name
for spl_name in installed_spls.iterkeys()
if (spl_name in spl_name_set) and (not mspl[spl_name].is_deprecated)}
uninstalled_spls = spl_name_set - installed_spls
# preference for removing less packages installed and not deprecated as possible
res = '0'
if installed_spls:
res = " + ".join([
smt_encoding.get_spl_hyvarrec(id_repository, spl_name) for spl_name in installed_spls])
# preference to minimize number of new packages to install
if uninstalled_spls:
res = res + " - (" + (" + ".join([
smt_encoding.get_spl_hyvarrec(id_repository, spl_name) for spl_name in uninstalled_spls])) + ")"
return [res]
def get_preferences_use_flags(id_repository, mspl, spl_names):
"""
This function translates the use flag default selection of the spl in parameter into a preferences
TODO: implement this function. It requires an API change in the SPL class to do it efficiently.
:param id_repository: the id repository of hyportage
:param mspl: the mspl of hyportage
:param spl_names: the names of the spls to include in the preferences construction
:return: an equation encoding the default configuration of the spls in parameter
"""
use_flags_positive = set()
use_flag_negative = set()
return 0
def installed_spls_to_solver(id_repository, installed_spls, spl_names):
res = []
for spl_name, use_selection in installed_spls.iteritems():
if spl_name in spl_names:
res.append(smt_encoding.get_spl_hyvarrec(id_repository, spl_name))
return res
def get_better_constraint_visualization(id_repository, mspl, constraints):
"""
function that manipulates the constraints in a more readable form for analysing them.
Useful for debugging or error reporting
:param id_repository: the id repository of hyportage
:param mspl: the mspl of hyportage
:param constraints: the constraints to manipulate
:return: the manipulated constraints
"""
ls = []
parser = SmtLib20Parser()
for i in constraints:
f = cStringIO.StringIO(i)
script = parser.get_script(f)
f.close()
formula = script.get_last_formula()
formula = pysmt.shortcuts.to_smtlib(formula, daggify=False)
# translate packages
where_declared = "user-required: "
spl_ids = set(re.findall('(p[0-9]+)', formula))
for spl_id in spl_ids:
name = id_repository.ids[spl_id][1]
formula = re.sub(spl_id, name, formula)
if i in mspl[name].smt:
where_declared = name + ": "
# translate uses
use_ids = set(re.findall('(u[0-9]+)', formula))
for use_id in use_ids:
formula = re.sub(use_id, id_repository.ids[use_id][2] + "#" + id_repository.ids[use_id][1], formula)
ls.append(where_declared + formula)
return ls
def generate_to_install_spls(id_repository, mspl, feature_list):
"""
translate the output of the solver into a system to install (mapping from spl names to use flag selection)
:param id_repository: the id repository of hyportage
:param mspl: the mspl of hyportage
:param feature_list: the solution found by the solver
:return: a dictionary spl_name -> use flag selection
"""
# 1. translate the computed solution into a configuration
res_core = core_data.dictSet()
use_flags = []
for feature in feature_list:
el = id_repository.data_from_id(feature)
if el[0] == "package": # el = ("package", spl_name)
res_core.add_key(el[1])
else: # el = ("use", use, spl_name)
use_flags.append((el[1], el[2]))
for use_flag, spl_name in use_flags:
if spl_name in res_core:
res_core.add(spl_name, use_flag)
# 2. compares the computed solution to the actual spl use flag configuration, and generate the final configuration
res = core_data.dictSet()
for spl_name, use_selection_core in res_core.iteritems():
spl = mspl[spl_name]
if spl.use_selection_core == use_selection_core:
res[spl_name] = spl.use_selection_full
else:
annex_use_flags = spl.use_selection_full - spl.iuses_core
res[spl_name] = use_selection_core | annex_use_flags
return res
def solve_spls(
id_repository, config, mspl, spl_groups,
spls, annex_constraint, exploration_use, exploration_mask, exploration_keywords, explain_modality=False):
"""
Solves the spls in input locally assuming that there is a command hyvar-rec
:param id_repository: the id repository of hyportage
:param config: the config of hyportage
:param mspl: the mspl of hyportage
:param spl_groups: the spl groups of hyportage
:param spls: the spls to solve
:param annex_constraint: the additional constraint to add in the solver input
:param exploration_use: boolean saying if the solver can change the use flag default selection
:param exploration_mask: boolean saying if the solver can change the use mask status of the packages
:param exploration_keywords: boolean saying if the solver can change the keywords of the packages
:param explain_modality: boolean saying if a problem should be explained (by default: False)
:return: the solution found by the solver, if it exists
"""
# 1. construct the input data for the solver
# 1.1. construct the constraint
constraint = annex_constraint[:]
spl_group_names = core_data.dictSet()
#tmp = 0
for spl in spls:
spl_group_names.add(core_data.spl_core_get_spl_group_name(spl.core), spl)
included = (spl.unmasked or exploration_mask) and (spl.unmasked_keyword or exploration_keywords)
if included:
#tmp = tmp + 1
constraint.extend(spl.smt)
if exploration_use:
constraint.extend(spl.smt_use_exploration)
else:
constraint.extend(spl.smt_use_selection)
else:
#logging.info("spl \"" + spl.name + "\" is not scheduled for possible installation")
constraint.extend(spl.smt_false)
for spl_group_name, spls_tmp in spl_group_names.iteritems():
spl_group = spl_groups[spl_group_name]
constraint.extend(spl_group.smt)
for spl in spl_group:
if spl not in spls_tmp: constraint.append(smt_encoding.smt_to_string(smt_encoding.get_spl_smt_not(id_repository, spl.name)))
#logging.info("included spl: " + str(tmp))
logging.debug("number of constraints to solve: " + str(len(constraint)))
# 1.2. construct the preferences
spl_names = {spl.name for spl in spls}
preferences = get_preferences_core(id_repository, mspl, config.installed_packages, spl_names)
# 1.3. construct the current system
current_system = [] #installed_spls_to_solver(id_repository, installed_spls, spl_names)
data_configuration = {"selectedFeatures": current_system, "attribute_values": [], "context_values": []} # current configuration
data_smt_constraints = {"formulas": constraint, "features": [], "other_int_symbols": []}
data = {
"attributes": [], # attributes of the features (empty in our case)
"contexts": [], # contexts to consider (empty in our case)
"configuration": data_configuration,
"constraints": [], # constraints to fill in hyvarrec format (empty in our case for efficiency)
"preferences": preferences, # preferences in hyvarrec format
"smt_constraints": data_smt_constraints,
"hyvar_options": ["--features-as-boolean", "--constraints-minimization", "--no-default-preferences"]
}
# 2. run hyvar-rec
res = run_hyvar(data)
if res is None: return None
logging.debug("HyVarRec output: " + json.dumps(res))
# 4. managing the solver output
if res["result"] != "sat":
if explain_modality:
# todo handle explain modality when the answer is unsat
# try to print a better explanation of the constraints
constraints = get_better_constraint_visualization(id_repository, mspl, res["constraints"])
logging.error("Conflict detected. Explanation:\n" + "\n".join(constraints) + '\n')
return None
return generate_to_install_spls(id_repository, mspl, res['features'])
def generate_installation_files(
mspl,
path_emerge_script, path_use_flag_configuration, path_mask_configuration, path_keywords_configuration,
old_installation, new_installation):
"""
This function generates two files:
1. the script file to execute to install and uninstall the spls found by the solver
2. spl configuration file (usually package.use) from the solution found by the solver
:param mspl: the mspl of hyportage
:param path_emerge_script: path to the script file to generate
:param path_use_flag_configuration: path to the configuration file to generate
:param path_mask_configuration: the path to the unmask file to generate
:param path_keywords_configuration: the path to the accept_keywords file to generate
:param old_installation: the currently installed spls
:param new_installation: the spls to install, found by the solver
:return: None (but the script file has been generated)
"""
# the spls to emerge are the ones that are not present in the old installation, or that have a new configuration
added_spl_names = []
for spl_name, product in new_installation.iteritems():
if spl_name in old_installation:
if old_installation[spl_name] != product:
added_spl_names.append(spl_name)
else: added_spl_names.append(spl_name)
# the spls to remove are the ones that are not in the new configuration and that are not replaced by a new version
removed_spl_names = []
new_spl_goups_info = core_data.dictSet()
for spl_name in new_installation.iterkeys():
spl = mspl[spl_name]
new_spl_goups_info.add(core_data.spl_core_get_spl_group_name(spl.core), spl)
for spl_name in old_installation.iterkeys():
spl = mspl[spl_name]
new_versions = new_spl_goups_info.get(core_data.spl_core_get_spl_group_name(spl.core))
if new_versions is None:
removed_spl_names.append(spl_name)
else:
replaced = False
for new_spl in new_versions:
if (spl.slot == new_spl.slot) and (spl.name != new_spl.name):
replaced = True
break
if replaced: removed_spl_names.append(spl_name)
# write the files
added_spl_names.sort()
with open(path_emerge_script, 'w') as f:
f.write("#!/bin/bash\n")
f.write("\n")
f.write("# File auto-generated by the hyportage tool\n")
f.write("# Do not update, any modification on this file will will overwritten by the tool\n")
f.write("\n")
if added_spl_names:
f.write("emerge -p --newuse " + " ".join(["=" + spl_name for spl_name in added_spl_names]) + "\n")
if removed_spl_names:
f.write("emerge -p --unmerge " + " ".join(["=" + spl_name for spl_name in removed_spl_names]) + "\n")
f.write("\n")
with open(path_use_flag_configuration, 'w') as f:
f.write("# File auto-generated by the hyportage tool\n")
f.write("# Do not update, any modification on this file will will overwritten by the tool\n")
f.write("\n")
for spl_name in added_spl_names:
use_selection = new_installation[spl_name]
string = "=" + spl_name + " "
string = string + " ".join(use_selection)
use_unselection = mspl[spl_name].iuses_full - use_selection
if use_unselection:
string = string + " -" + " -".join(use_unselection) + "\n"
f.write(string)
f.write("\n")
with open(path_mask_configuration, 'w') as f:
f.write("# File auto-generated by the hyportage tool\n")
f.write("# Do not update, any modification on this file will will overwritten by the tool\n")
f.write("\n")
for spl_name in added_spl_names:
f.write("=" + spl_name + "\n")
#if not mspl[spl_name].unmasked:
# f.write("=" + spl_name)
f.write("\n")
with open(path_keywords_configuration, 'w') as f:
f.write("# File auto-generated by the hyportage tool\n")
f.write("# Do not update, any modification on this file will will overwritten by the tool\n")
f.write("\n")
for spl_name in added_spl_names:
#f.write("=" + spl_name + " ~" + hyportage_db.mspl_config.arch + "\n")
f.write("=" + spl_name + " **\n")
#if not mspl[spl_name].unmasked_keyword:
# f.write("=" + spl_name + " ~" + hyportage_db.mspl_config.arch)
f.write("\n")
##########################################################################
# 3. SPL SET COMPUTATION
##########################################################################
def next_spls(pattern_repository, spl):
res = set()
pattern_iterator = spl.dependencies.iterkeys()
for pattern in pattern_iterator:
res.update(pattern_repository.get_with_default(pattern).matched_spls)
return res
def get_dependency_transitive_closure(pattern_repository, mspl, spls):
for spl in mspl.itervalues():
spl.visited = False
nexts = spls
res = set()
while len(nexts) > 0:
accu = set()
for spl in nexts:
spl.visited = True
res.add(spl)
accu.update(next_spls(pattern_repository, spl))
nexts = filter(lambda spl: not spl.visited, accu)
return res
| HyVar/gentoo_to_mspl | host/scripts/reconfigure.py | reconfigure.py | py | 16,982 | python | en | code | 10 | github-code | 6 | [
{
"api_name": "utils.get_new_temp_file",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "utils.phase_start",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "subprocess.Popen"... |
31228319050 | #coding=utf-8
from thuproxy.alipay_api import *
from thuproxy.proxy_account_views import *
import datetime
import uuid
import urllib.request
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response, RequestContext
# 优惠比率
RATE = 0.6
# 根据支付类型选择策略
@login_required(login_url="/login/")
def alipay_apply(request, pay_type):
is_user_login = request.user.is_authenticated()
user = request.user
proxy_account = ProxyAccount.objects.get(user=request.user)
pay_list = Pay.objects.filter(user_id=user.id, status='U')
if len(pay_list) != 0:
request.session["error"] = "need_repay"
return HttpResponseRedirect('/homepage')
# 判断支付类型
if pay_type == 'first':
if proxy_account.expired_date is not None:
request.session["error"] = "first_pay"
return HttpResponseRedirect('/homepage')
return render_to_response('alipay_create_order_first.html', locals(), context_instance=RequestContext(request))
elif pay_type == 'upgrade':
if proxy_account.type == 50:
request.session["error"] = "upgrade"
return HttpResponseRedirect('/homepage')
if datetime.datetime.now().date() < proxy_account.expired_date:
remain_time = proxy_account.expired_date - datetime.datetime.now().date()
proxy_account.remain_time = int(remain_time.days)
elif datetime.datetime.now().date() >= proxy_account.expired_date:
request.session["error"] = "date"
return HttpResponseRedirect('/homepage')
return render_to_response('alipay_create_order_upgrade.html', locals(),
context_instance=RequestContext(request))
elif pay_type == 'downgrade':
if proxy_account.type == 1:
request.session["error"] = "downgrade"
return HttpResponseRedirect('/homepage')
remain_time = proxy_account.expired_date - datetime.datetime.now().date()
proxy_account.remain_time = int(remain_time.days)
return render_to_response('alipay_create_order_downgrade.html', locals(),
context_instance=RequestContext(request))
elif pay_type == 'continue':
return render_to_response('alipay_create_order_continue.html', locals(),
context_instance=RequestContext(request))
else:
return HttpResponse('充值请求错误')
# 生成订单
@login_required(login_url="/login/")
def alipay_create_orders(request):
is_user_login = request.user.is_authenticated()
user = request.user
proxy_account = ProxyAccount.objects.get(user=request.user)
m = request.POST['money']
money = float(m) * RATE
money = round(money, 2)
pay_type = int(request.POST['pay_type'])
today = timezone.now()
try:
# 升级的情况下,需要记录的是账号剩余天数而非需要付费的月数
if pay_type == 3:
day = request.POST['day']
pay = Pay(out_trade_no=uuid.uuid1().hex, user=user, total_fee=money, type=int(pay_type),
month=int(day), status='U', create_date=today)
else:
month = request.POST['month']
pay = Pay(out_trade_no=uuid.uuid1().hex, user=user, total_fee=money, type=int(pay_type),
month=int(month), status='U', create_date=today)
pay.save()
params = {'out_trade_no': pay.out_trade_no, 'subject': u'清云加速',
'body': u'流量购买费用', 'total_fee': str(money)}
total_fee = pay.total_fee
alipay = Alipay(notifyurl="http://scholar.thucloud.com/alipay/callback",
returnurl="http://scholar.thucloud.com/alipay/success",
showurl="http://scholar.thucloud.com/alipay/success")
params.update(alipay.conf)
sign = alipay.buildSign(params)
return render_to_response('alipay_show_order.html', locals())
except Exception as e:
print(e)
return HttpResponse('生成订单错误')
# 生成需要重新支付的订单
@login_required(login_url="/login/")
def alipay_repay_orders(request, pay_no):
is_user_login = request.user.is_authenticated()
user = request.user
proxy_account = ProxyAccount.objects.get(user=request.user)
try:
pay_list = Pay.objects.filter(out_trade_no=pay_no)
if len(pay_list) != 1:
request.session["error"] = "repay"
return HttpResponseRedirect('/homepage')
else:
pay = pay_list[0]
params = {'out_trade_no': pay.out_trade_no, 'subject': u'清云加速',
'body': u'流量购买费用', 'total_fee': str(pay.total_fee)}
total_fee = pay.total_fee
alipay = Alipay(notifyurl="http://scholar.thucloud.com/alipay/callback",
returnurl="http://scholar.thucloud.com/alipay/success",
showurl="http://scholar.thucloud.com/alipay/success")
params.update(alipay.conf)
sign = alipay.buildSign(params)
money = pay.total_fee
return render_to_response('alipay_show_order.html', locals())
except Exception as e:
print(e)
return HttpResponse('显示订单错误')
@csrf_exempt
def alipay_callback(request):
try:
print(datetime.datetime.now())
print("call back start")
params = request.POST.dict()
if not isinstance(params, dict):
print('error params not dict')
alipay = Alipay()
# 判断是否为有效返回
sign = None
if 'sign' in params:
sign = params['sign']
loc_sign = alipay.buildSign(params)
if sign is None or loc_sign != sign:
return HttpResponse("fail")
print("sign is ok")
# 判断交易状态是否有效,以免重复判断交易成功
if params['trade_status'] != 'TRADE_FINISHED' and params['trade_status'] != 'TRADE_SUCCESS':
print('trade status error')
return HttpResponse("fail")
else:
print("trade status ok")
print("url: ")
url = verifyURL['https'] + "&partner=%s¬ify_id=%s" % (alipay.conf['partner'], params['notify_id'])
print(url)
response = urllib.request.urlopen(url)
html = response.read()
print("aliypay.com return: %s" % html)
# 支付宝返回有效信息
if html == b'true':
print('result is true')
try:
out_trade_no = params['out_trade_no']
print('out trade no ', out_trade_no)
trade_no = params['trade_no']
print('trade no ', trade_no)
total_fee = params['total_fee']
pay = Pay.objects.get(out_trade_no=out_trade_no)
# todo: handle other error status
if pay is None:
return HttpResponse("无此订单,请重新下单")
if pay.status == 'S':
return HttpResponse("已经成功支付了")
print('user', pay.user)
proxy_account = ProxyAccount.objects.get(user=pay.user)
print('proxy_account', proxy_account)
print('pay total fee', pay.total_fee)
month = pay.month
pay_type = pay.type
real_fee = float(total_fee) / RATE
print('month', month)
print('pay type', pay_type)
print('real fee', real_fee)
# 初次缴费
if pay_type == 1:
account_type = int(real_fee)/int(month)
print("accounttype", account_type)
if account_type not in {1, 5, 10, 20, 50}:
return HttpResponse("accout_type_error")
else:
print("success:", account_type, " month", month)
proxy_account.type = account_type
today = datetime.datetime.now()
if proxy_account.expired_date is not None:
return HttpResponse("not init")
else:
print("init date")
expired_date = today + datetime.timedelta(30*int(month))
if proxy_account.paydate is None:
create_pac(proxy_account)
print("create_pac done")
open_listen_port(proxy_account.port, proxy_account.type)
print("open_listen_port done")
proxy_account.paydate = today
proxy_account.expired_date = expired_date
elif pay_type == 2: # 续费
account_type = int(real_fee)/int(month)
print("accounttype", account_type)
if account_type != proxy_account.type or proxy_account.expired_date is None:
return HttpResponse("accout_type_error")
else:
print("success:", account_type, " month", month)
today = datetime.date.today()
print("add month")
if proxy_account.expired_date < today: # 欠费啦
expired_date = today + datetime.timedelta(30*int(month))
reopen_port(proxy_account.port)
else:
expired_date = proxy_account.expired_date + datetime.timedelta(30*int(month))
proxy_account.expired_date = expired_date
elif pay_type == 3: # 升级
today = datetime.date.today()
if proxy_account.expired_date < today: # 欠费啦
return HttpResponse("fail")
upgrade_delta = (real_fee/month)*30
upgrade_delta = int(upgrade_delta+0.1)
print(upgrade_delta)
proxy_account.type += upgrade_delta
if proxy_account.type not in {1, 5, 10, 20, 50}:
return HttpResponse("accout_type_error")
if ACCOUNT_TRAFFIC_LIMIT[int(proxy_account.type)] > proxy_account.traffic:
reopen_port(proxy_account.port)
# 修改带宽和流量
upgrade_port(proxy_account.port, proxy_account.type)
else:
pay.status = 'F'
pay.save()
return HttpResponse("fail")
print("sava pay")
pay.status = 'S'
pay.trade_no = trade_no
pay.total_fee = real_fee
pay.save()
print("sava proxy_account")
proxy_account.save()
return HttpResponse("success")
except Exception as e:
print(e)
return HttpResponse("fail")
except Exception as e:
return HttpResponse("fail")
print(e)
@login_required(login_url="/login/")
def alipay_success(request):
is_user_login = request.user.is_authenticated()
proxy_account = ProxyAccount.objects.get(user=request.user)
return render_to_response('alipay_success.html', locals(), context_instance=RequestContext(request))
@login_required(login_url="/login/")
def alipay_cancel(request, pay_no):
print(pay_no)
pay = Pay.objects.filter(out_trade_no=pay_no)
if len(pay) != 1:
request.session["error"] = "cancel"
return HttpResponseRedirect('/homepage')
else:
pay[0].status = 'C'
pay[0].save()
return HttpResponseRedirect('/homepage')
def alipay_test(request):
pay_type = int(request.POST['pay_type'])
month = int(request.POST['month'])
total_fee = float(request.POST['money'])
total_fee *= RATE
proxy_account = ProxyAccount.objects.get(user=request.user)
real_fee = float(total_fee/RATE)
print('realfee', real_fee)
if pay_type == 1:
account_type = int(real_fee)/int(month)
print("accounttype", account_type)
if account_type not in {1, 5, 10, 20, 50}:
return HttpResponse("accout_type_error")
else:
print("success:", account_type, " month", month)
proxy_account.type = account_type
today = datetime.datetime.now()
if proxy_account.expired_date is not None:
print("add month")
return HttpResponse("not init")
else:
print("init month")
expired_date = today + datetime.timedelta(30*int(month))
if proxy_account.paydate is None:
print("init paydate")
create_pac(proxy_account)
print("create_pac done")
open_listen_port(proxy_account.port, proxy_account.type)
print("open_listen_port done")
proxy_account.paydate = today
proxy_account.expired_date = expired_date
elif pay_type == 2:
account_type = int(real_fee)/int(month)
print("accounttype", account_type)
if account_type != proxy_account.type or proxy_account.expired_date is None:
return HttpResponse("accout_type_error")
else:
print("success:", account_type, " month", month)
today = datetime.date.today()
print("add month")
if proxy_account.expired_date < today:
expired_date = today + datetime.timedelta(30*int(month))
else:
expired_date = proxy_account.expired_date + datetime.timedelta(30*int(month))
proxy_account.expired_date = expired_date
elif pay_type == 3:
upgrade_delta = (real_fee/month)*30
upgrade_delta = int(upgrade_delta+0.1)
print(upgrade_delta)
proxy_account.type += upgrade_delta
if proxy_account.type not in {1, 5, 10, 20, 50}:
return HttpResponse("accout_type_error")
reopen_port(proxy_account.port)
else:
return HttpResponse("fail")
print("sava proxy_account")
proxy_account.save()
return HttpResponseRedirect('/homepage')
| flyz1360/scholarcloud | thuproxy/pay_views.py | pay_views.py | py | 14,990 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render_to_response",
"line_number": 31,
"usage_type": ... |
11047387601 | from astropy.wcs.utils import wcs_to_celestial_frame as wcs
from astropy.coordinates import SkyCoord
import astropy.units as u
from scipy import ndimage
import numpy as np
import math as m
__author__ = "Norbert Gyenge"
__email__ = "n.g.gyenge@sheffield.ac.uk"
def Sunspot_coord(photosphere_full, dx, dy, spot):
'''Sunspot coordinate estimation.
Parameters
----------
photosphere_full - Fits image.
dx, dy - Region of interest box coordinate
spot - Masked submap.
Returns
-------
array[0] - x (arcsec)
array[1] - y (arcsec)
array[2] - r (polar)
array[3] - theta (polar)
array[4] - b (Carrington)
array[5] - l (Carrington)
array[6] - lcm (Carrington)
References
----------
Thompson (2006), A&A, 449, 791'''
# The origo of the coordinate system is the left bottom corner.
y_on_cut, x_on_cut = ndimage.measurements.center_of_mass(spot)
# Restore the region of interest box corner coordinate in pixels
#x_on_im = dx[0] / photosphere_full.scale[0]
#y_on_im = dy[0] / photosphere_full.scale[1]
x_on_im = dx[0]
y_on_im = dy[0]
# Estimate the spot's coordinate in pixels
x, y = (x_on_cut * u.pix) + x_on_im, (y_on_cut * u.pix) + y_on_im
# Convert the spot's coordinate in arcsecs
c = photosphere_full.pixel_to_world(x, y)
Solar_X, Solar_Y = c.Tx, c.Ty
# Polar coordinates
r = np.sqrt(pow(Solar_X.value, 2) + pow(Solar_Y.value, 2)) * u.arcsec
theta = m.atan2(Solar_Y.value, Solar_X.value) * (180 / np.pi) * u.deg
# Use SkyCoord for further conversion
c = SkyCoord(Solar_X, Solar_Y, frame=wcs(photosphere_full.wcs))
# Convert to heliographic stonyhurst
d = c.heliographic_stonyhurst
# Extract the latitude and LCM
latitude = d.lat
lcm = d.lon
# Convert to heliographic Carrington for longitude
longitude = lcm + (photosphere_full.meta['crln_obs'] * u.deg)
return [Solar_X, Solar_Y, r, theta, latitude, longitude, lcm]
| gyengen/SheffieldSolarCatalog | engine/ssc/sunspot/coordinates.py | coordinates.py | py | 2,040 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "scipy.ndimage.measurements.center_of_mass",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "scipy.ndimage.measurements",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "scipy.ndimage",
"line_number": 36,
"usage_type": "name"
},
... |
12168811050 | # import requests module
import requests
import logging
import time
# Setting up Logging
logging.basicConfig(level = logging.INFO)
logger = logging.getLogger()
# URL
url = "https://google.com"
# Make request method
def make_request(url):
logging.info("Fetching URL")
try:
response = requests.get(url)
# print response
logger.info(f"Response from URL: {str(response).split()[-1]}")
# print elapsed time
logger.info(f"Elapsed time: {response.elapsed.total_seconds()}")
except requests.exceptions.RequestException as e:
logger.critical(f"Unable to fecth URL: {url}")
raise SystemExit(e)
def main():
logging.info("Starting monitoring application")
while True:
# Call make_request method
make_request(url)
# Run every 60 seconds
time.sleep(60)
if __name__ == "__main__":
main() | tolkiger/terraform-ecs-fargate-cicd-pipeline | monitoring.py | monitoring.py | py | 891 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "logging.basicConfig",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "logging.info",
... |
32644440027 | """Shifters Rig Main class."""
import datetime
import getpass
import os.path
import sys
import json
# Maya
import pymel.core as pm
from pymel.core import datatypes
from pymel import versions
# mgear
import mgear
import mgear.core.utils
from . import guide, component
from mgear.core import primitive, attribute, skin, dag, icon, node
from mgear import shifter_classic_components
from mgear import shifter_epic_components
from mgear.shifter import naming
import importlib
from mgear.core import utils
PY2 = sys.version_info[0] == 2
# check if we have loaded the necessary plugins
if not pm.pluginInfo("mgear_solvers", q=True, loaded=True):
try:
pm.loadPlugin("mgear_solvers")
except RuntimeError:
pm.displayError("You need the mgear_solvers plugin!")
if not pm.pluginInfo("matrixNodes", q=True, loaded=True):
pm.loadPlugin("matrixNodes")
COMPONENT_PATH = os.path.join(os.path.dirname(__file__), "component")
TEMPLATE_PATH = os.path.join(COMPONENT_PATH, "templates")
SYNOPTIC_PATH = os.path.abspath(
os.path.join(os.path.dirname(__file__), os.pardir, "synoptic", "tabs")
)
SHIFTER_COMPONENT_ENV_KEY = "MGEAR_SHIFTER_COMPONENT_PATH"
def log_window():
if mgear.logMode and mgear.use_log_window:
log_window_name = "mgear_shifter_build_log_window"
log_window_field_reporter = "mgear_shifter_log_field_reporter"
# call pm.window(log_window_name, exists=True) 2 times to avoid
# false check in Maya 2024
pm.window(log_window_name, exists=True)
if not pm.window(log_window_name, exists=True):
log_win = pm.window(
log_window_name,
title="Shifter Build Log",
iconName="Shifter Log",
width=800,
height=500,
)
form = pm.formLayout()
reporter = pm.cmdScrollFieldReporter(
log_window_field_reporter, width=400, height=200, clr=True
)
btn_close = pm.button(
label="Close",
command=lambda *args: pm.deleteUI(log_win, window=True),
)
margin_v = 5
margin_h = 5
pm.formLayout(
form,
e=True,
attachForm=[
(reporter, "top", margin_v),
(reporter, "right", margin_h),
(reporter, "left", margin_h),
(btn_close, "bottom", margin_v),
(btn_close, "right", margin_h),
(btn_close, "left", margin_h),
],
attachControl=[
(reporter, "bottom", margin_v, btn_close),
],
)
pm.setParent("..")
pm.showWindow(log_win)
else:
pm.cmdScrollFieldReporter(log_window_field_reporter, e=True, clr=True)
pm.showWindow(log_window_name)
mgear.logInfos()
def getComponentDirectories():
"""Get the components directory"""
# TODO: ready to support multiple default directories
return mgear.core.utils.gatherCustomModuleDirectories(
SHIFTER_COMPONENT_ENV_KEY,
[
os.path.join(os.path.dirname(shifter_classic_components.__file__)),
os.path.join(os.path.dirname(shifter_epic_components.__file__)),
],
)
# return mgear.core.utils.gatherCustomModuleDirectories(
# SHIFTER_COMPONENT_ENV_KEY,
# os.path.join(os.path.dirname(shifter_classic_components.__file__)))
def importComponentGuide(comp_type):
"""Import the Component guide"""
dirs = getComponentDirectories()
defFmt = "mgear.core.shifter.component.{}.guide"
customFmt = "{}.guide"
module = mgear.core.utils.importFromStandardOrCustomDirectories(
dirs, defFmt, customFmt, comp_type
)
return module
def importComponent(comp_type):
"""Import the Component"""
dirs = getComponentDirectories()
defFmt = "mgear.core.shifter.component.{}"
customFmt = "{}"
module = mgear.core.utils.importFromStandardOrCustomDirectories(
dirs, defFmt, customFmt, comp_type
)
return module
def reloadComponents(*args):
"""Reload all componets
Args:
*args: Dummy
"""
compDir = getComponentDirectories()
for x in compDir:
for com in compDir[x]:
try:
if PY2:
reload(importComponent(com))
reload(importComponentGuide(com))
else:
importlib.reload(importComponent(com))
importlib.reload(importComponentGuide(com))
print("reload : {}.{}".format(os.path.basename(x), com))
except ImportError:
pass
class Rig(object):
"""The main rig class.
Attributes:
guide: guide.Rig() initialization.
groups (dic): Rig groups (Maya sets)
components (dic): Dictionary for the rig components.
Keys are the component fullname (ie. 'arm_L0')
componentsIndex (list): Components index list.
"""
def __init__(self):
self.guide = guide.Rig()
self.groups = {}
self.subGroups = {}
self.components = {}
self.componentsIndex = []
self.customStepDic = {}
self.build_data = {}
self.component_finalize = False
def buildFromDict(self, conf_dict):
log_window()
startTime = datetime.datetime.now()
mgear.log("\n" + "= SHIFTER RIG SYSTEM " + "=" * 46)
self.stopBuild = False
self.guide.set_from_dict(conf_dict)
endTime = datetime.datetime.now()
finalTime = endTime - startTime
mgear.log(
"\n"
+ "= SHIFTER FILE READ {} [ {} ] {}".format("=" * 16, finalTime, "=" * 7)
)
# Build
mgear.log("\n" + "= BUILDING RIG " + "=" * 46)
self.from_dict_custom_step(conf_dict, pre=True)
self.build()
self.from_dict_custom_step(conf_dict, pre=False)
# Collect post-build data
build_data = self.collect_build_data()
endTime = datetime.datetime.now()
finalTime = endTime - startTime
pm.flushUndo()
pm.displayInfo(
"Undo history have been flushed to avoid "
"possible crash after rig is build. \n"
"More info: "
"https://github.com/miquelcampos/mgear/issues/72"
)
mgear.log(
"\n"
+ "= SHIFTER BUILD RIG DONE {} [ {} ] {}".format(
"=" * 16, finalTime, "=" * 7
)
)
return build_data
def buildFromSelection(self):
"""Build the rig from selected guides."""
startTime = datetime.datetime.now()
mgear.log("\n" + "= SHIFTER RIG SYSTEM " + "=" * 46)
self.stopBuild = False
selection = pm.ls(selection=True)
if not selection:
selection = pm.ls("guide")
if not selection:
mgear.log(
"Not guide found or selected.\n"
+ "Select one or more guide root or a guide model",
mgear.sev_error,
)
return
# check if is partial build or full guide build
ismodel = False
if selection[0].hasAttr("ismodel"):
self.preCustomStep(selection)
ismodel = True
if not self.stopBuild:
mgear.log("\n" + "= GUIDE VALIDATION " + "=" * 46)
# Check guide is valid
self.guide.setFromSelection()
if not self.guide.valid:
return
# Build
mgear.log("\n" + "= BUILDING RIG " + "=" * 46)
self.build()
if ismodel:
self.postCustomStep()
# Collect post-build data
build_data = self.collect_build_data()
endTime = datetime.datetime.now()
finalTime = endTime - startTime
pm.flushUndo()
pm.displayInfo(
"Undo history have been flushed to avoid "
"possible crash after rig is build. \n"
"More info: "
"https://github.com/miquelcampos/mgear/issues/72"
)
mgear.log(
"\n"
+ "= SHIFTER BUILD RIG DONE {} [ {} ] {}".format(
"=" * 16, finalTime, "=" * 7
)
)
return build_data
def build(self):
"""Build the rig."""
self.options = self.guide.values
self.guides = self.guide.components
self.customStepDic["mgearRun"] = self
self.initialHierarchy()
self.processComponents()
self.finalize()
return self.model
def stepsList(self, checker, attr):
if self.options[checker] and self.options[attr]:
return self.options[attr].split(",")
else:
return None
def from_dict_custom_step(self, conf_dict, pre=True):
if pre:
pre_post = "doPreCustomStep"
pre_post_path = "preCustomStep"
else:
pre_post = "doPostCustomStep"
pre_post_path = "postCustomStep"
p_val = conf_dict["guide_root"]["param_values"]
if p_val[pre_post]:
customSteps = p_val[pre_post_path]
self.customStep(customSteps.split(","))
def customStep(self, customSteps=None):
if customSteps:
for step in customSteps:
if not self.stopBuild:
if step.startswith("*"):
continue
self.stopBuild = guide.helperSlots.runStep(
step.split("|")[-1][1:], self.customStepDic
)
else:
pm.displayWarning("Build Stopped")
break
def preCustomStep(self, selection):
if (
selection[0].hasAttr("ismodel")
and selection[0].attr("doPreCustomStep").get()
):
customSteps = selection[0].attr("preCustomStep").get()
if customSteps:
mgear.log("\n" + "= PRE CUSTOM STEPS " + "=" * 46)
# use forward slash for OS compatibility
if sys.platform.startswith("darwin"):
customSteps = [
cs.replace("\\", "/") for cs in customSteps.split(",")
]
self.customStep(customSteps)
else:
self.customStep(customSteps.split(","))
def postCustomStep(self):
customSteps = self.stepsList("doPostCustomStep", "postCustomStep")
if customSteps:
mgear.log("\n" + "= POST CUSTOM STEPS " + "=" * 46)
# use forward slash for OS compatibility
if sys.platform.startswith("darwin"):
customSteps = [cs.replace("\\", "/") for cs in customSteps]
self.customStep(customSteps)
# @utils.timeFunc
def get_guide_data(self):
"""Get the guide data
Returns:
str: The guide data
"""
if self.guide.guide_template_dict:
return json.dumps(self.guide.guide_template_dict)
else:
return json.dumps(self.guide.get_guide_template_dict())
def initialHierarchy(self):
"""Build the initial hierarchy of the rig.
Create the rig model, the main properties,
and a couple of base organisation nulls.
Get the global size of the rig.
"""
mgear.log("Initial Hierarchy")
# --------------------------------------------------
# Model
self.model = primitive.addTransformFromPos(None, self.options["rig_name"])
attribute.lockAttribute(self.model)
# --------------------------------------------------
# INFOS
self.isRig_att = attribute.addAttribute(self.model, "is_rig", "bool", True)
self.rigName_att = attribute.addAttribute(
self.model, "rig_name", "string", self.options["rig_name"]
)
self.user_att = attribute.addAttribute(
self.model, "user", "string", getpass.getuser()
)
self.isWip_att = attribute.addAttribute(
self.model, "wip", "bool", self.options["mode"] != 0
)
self.date_att = attribute.addAttribute(
self.model, "date", "string", str(datetime.datetime.now())
)
self.mayaVersion_att = attribute.addAttribute(
self.model,
"maya_version",
"string",
str(pm.mel.eval("getApplicationVersionAsFloat")),
)
self.gearVersion_att = attribute.addAttribute(
self.model, "gear_version", "string", mgear.getVersion()
)
self.synoptic_att = attribute.addAttribute(
self.model, "synoptic", "string", str(self.options["synoptic"])
)
self.comments_att = attribute.addAttribute(
self.model, "comments", "string", str(self.options["comments"])
)
self.ctlVis_att = attribute.addAttribute(self.model, "ctl_vis", "bool", True)
if versions.current() >= 201650:
self.ctlVisPlayback_att = attribute.addAttribute(
self.model, "ctl_vis_on_playback", "bool", True
)
self.jntVis_att = attribute.addAttribute(self.model, "jnt_vis", "bool", True)
# adding the always draw shapes on top to global attribute
if versions.current() >= 20220000:
self.ctlXRay_att = attribute.addAttribute(
self.model, "ctl_x_ray", "bool", False
)
self.qsA_att = attribute.addAttribute(self.model, "quickselA", "string", "")
self.qsB_att = attribute.addAttribute(self.model, "quickselB", "string", "")
self.qsC_att = attribute.addAttribute(self.model, "quickselC", "string", "")
self.qsD_att = attribute.addAttribute(self.model, "quickselD", "string", "")
self.qsE_att = attribute.addAttribute(self.model, "quickselE", "string", "")
self.qsF_att = attribute.addAttribute(self.model, "quickselF", "string", "")
self.rigGroups = self.model.addAttr("rigGroups", at="message", m=1)
self.rigPoses = self.model.addAttr("rigPoses", at="message", m=1)
self.rigCtlTags = self.model.addAttr("rigCtlTags", at="message", m=1)
self.rigScriptNodes = self.model.addAttr("rigScriptNodes", at="message", m=1)
self.guide_data_att = attribute.addAttribute(
self.model, "guide_data", "string", self.get_guide_data()
)
# ------------------------- -------------------------
# Global Ctl
if self.options["worldCtl"]:
if self.options["world_ctl_name"]:
name = self.options["world_ctl_name"]
else:
name = "world_ctl"
icon_shape = "circle"
else:
name = "global_C0_ctl"
icon_shape = "crossarrow"
self.global_ctl = self.addCtl(
self.model,
name,
datatypes.Matrix(),
self.options["C_color_fk"],
icon_shape,
w=10,
)
attribute.setRotOrder(self.global_ctl, "ZXY")
# Connect global visibility
pm.connectAttr(self.ctlVis_att, self.global_ctl.attr("visibility"))
if versions.current() >= 201650:
pm.connectAttr(
self.ctlVisPlayback_att, self.global_ctl.attr("hideOnPlayback")
)
attribute.lockAttribute(self.global_ctl, ["v"])
# --------------------------------------------------
# Setup in world Space
self.setupWS = primitive.addTransformFromPos(self.model, "setup")
attribute.lockAttribute(self.setupWS)
# --------------------------------------------------
# Basic set of null
if self.options["joint_rig"]:
self.jnt_org = primitive.addTransformFromPos(self.model, "jnt_org")
if self.options["force_SSC"]:
self.global_ctl.s >> self.jnt_org.s
pm.connectAttr(self.jntVis_att, self.jnt_org.attr("visibility"))
def processComponents(self):
"""
Process the components of the rig, following the creation steps.
"""
# Init
self.components_infos = {}
for comp in self.guide.componentsIndex:
guide_ = self.guides[comp]
mgear.log("Init : " + guide_.fullName + " (" + guide_.type + ")")
module = importComponent(guide_.type)
Component = getattr(module, "Component")
comp = Component(self, guide_)
if comp.fullName not in self.componentsIndex:
self.components[comp.fullName] = comp
self.componentsIndex.append(comp.fullName)
self.components_infos[comp.fullName] = [
guide_.compType,
guide_.getVersion(),
guide_.author,
]
# Creation steps
self.steps = component.Main.steps
for i, name in enumerate(self.steps):
# for count, compName in enumerate(self.componentsIndex):
for compName in self.componentsIndex:
comp = self.components[compName]
mgear.log(name + " : " + comp.fullName + " (" + comp.type + ")")
comp.stepMethods[i]()
if name == "Finalize":
self.component_finalize = True
if self.options["step"] >= 1 and i >= self.options["step"] - 1:
break
def finalize(self):
"""Finalize the rig."""
groupIdx = 0
# Properties --------------------------------------
mgear.log("Finalize")
# clean jnt_org --------------------------------------
if self.options["joint_rig"]:
mgear.log("Cleaning jnt org")
jnt_org_child = dag.findChildrenPartial(self.jnt_org, "org")
if jnt_org_child:
for jOrg in jnt_org_child:
if not jOrg.listRelatives(c=True):
pm.delete(jOrg)
# Groups ------------------------------------------
mgear.log("Creating groups")
# Retrieve group content from components
for name in self.componentsIndex:
component_ = self.components[name]
for name, objects in component_.groups.items():
self.addToGroup(objects, name)
for name, objects in component_.subGroups.items():
self.addToSubGroup(objects, name)
# Create master set to group all the groups
masterSet = pm.sets(n=self.model.name() + "_sets_grp", em=True)
pm.connectAttr(masterSet.message, self.model.rigGroups[groupIdx])
groupIdx += 1
# Creating all groups
pm.select(cl=True)
for name, objects in self.groups.items():
s = pm.sets(n=self.model.name() + "_" + name + "_grp")
s.union(objects)
pm.connectAttr(s.message, self.model.rigGroups[groupIdx])
groupIdx += 1
masterSet.add(s)
for parentGroup, subgroups in self.subGroups.items():
pg = pm.PyNode(self.model.name() + "_" + parentGroup + "_grp")
for sg in subgroups:
sub = pm.PyNode(self.model.name() + "_" + sg + "_grp")
if sub in masterSet.members():
masterSet.remove(sub)
pg.add(sub)
# create geo group
geoSet = pm.sets(n=self.model.name() + "_geo_grp", em=True)
pm.connectAttr(geoSet.message, self.model.rigGroups[groupIdx])
masterSet.add(geoSet)
groupIdx += 1
# Bind pose ---------------------------------------
# controls_grp = self.groups["controllers"]
# pprint(controls_grp, stream=None, indent=1, width=100)
ctl_master_grp = pm.PyNode(self.model.name() + "_controllers_grp")
pm.select(ctl_master_grp, replace=True)
dag_node = pm.dagPose(save=True, selection=True)
pm.connectAttr(dag_node.message, self.model.rigPoses[0])
print(dag_node)
# hide all DG nodes inputs in channel box -----------------------
# only hides if components_finalize or All steps are done
if self.component_finalize:
for c in self.model.listHistory(ac=True, f=True):
if c.type() != "transform":
c.isHistoricallyInteresting.set(False)
# Bind skin re-apply
if self.options["importSkin"]:
try:
pm.displayInfo("Importing Skin")
skin.importSkin(self.options["skin"])
except RuntimeError:
pm.displayWarning(
"Skin doesn't exist or is not correct. "
+ self.options["skin"]
+ " Skipped!"
)
def collect_build_data(self):
"""Collect post build data
by default the data is stored in the root joint.
Returns:
dict: The collected data
"""
self.build_data["Components"] = []
for c, comp in self.customStepDic["mgearRun"].components.items():
self.build_data["Components"].append(comp.build_data)
if self.options["data_collector_embedded"]:
root_jnt = self.get_root_jnt_embbeded()
self.add_collected_data_to_root_jnt(root_jnt=root_jnt)
if self.options["data_collector"]:
self.data_collector_output(self.options["data_collector_path"])
return self.build_data
def data_collector_output(self, file_path=None):
"""Output collected data to a Json file
Args:
file_path (Str, optional): Output path for the Json File
"""
if not file_path:
ext_filter = "Shifter Collected data (*{})".format(guide.DATA_COLLECTOR_EXT)
file_path = pm.fileDialog2(fileMode=0, fileFilter=ext_filter)[0]
with open(file_path, "w") as f:
f.write(json.dumps(self.build_data, indent=4))
file_path = None
def add_collected_data_to_root_jnt(self, root_jnt=None):
"""Add collected data to root joint
Root joint is the first joint generated in the rig.
"""
if not root_jnt:
for c in self.componentsIndex:
comp = self.customStepDic["mgearRun"].components[c]
if not root_jnt and comp.jointList:
root_jnt = comp.jointList[0]
break
if root_jnt:
attribute.addAttribute(
root_jnt,
"collected_data",
"string",
str(json.dumps(self.build_data)),
)
def get_root_jnt_embbeded(self):
"""Get the root joint to embbed the data
Returns:
pyNode: Joint
"""
j_name = self.options["data_collector_embedded_custom_joint"]
if j_name:
try:
return pm.PyNode(j_name)
except pm.MayaNodeError:
pm.displayError("{} doesn't exist or is not unique".format(j_name))
def addCtl(self, parent, name, m, color, iconShape, **kwargs):
"""Create the control and apply the shape, if this is alrealdy stored
in the guide controllers grp.
Args:
parent (dagNode): The control parent
name (str): The control name.
m (matrix): The transfromation matrix for the control.
color (int or list of float): The color for the control in index
or RGB.
iconShape (str): The controls default shape.
kwargs (variant): Other arguments for the iconShape type variations
Returns:
dagNode: The Control.
"""
if "degree" not in kwargs.keys():
kwargs["degree"] = 1
bufferName = name + "_controlBuffer"
if bufferName in self.guide.controllers.keys():
ctl_ref = self.guide.controllers[bufferName]
ctl = primitive.addTransform(parent, name, m)
for shape in ctl_ref.getShapes():
ctl.addChild(shape, shape=True, add=True)
pm.rename(shape, name + "Shape")
else:
ctl = icon.create(parent, name, m, color, iconShape, **kwargs)
self.addToGroup(ctl, "controllers")
# Set the control shapes isHistoricallyInteresting
for oShape in ctl.getShapes():
oShape.isHistoricallyInteresting.set(False)
# connecting the always draw shapes on top to global attribute
if versions.current() >= 20220000:
pm.connectAttr(self.ctlXRay_att, oShape.attr("alwaysDrawOnTop"))
# set controller tag
if versions.current() >= 201650:
pm.controller(ctl)
self.add_controller_tag(ctl, None)
attribute.addAttribute(ctl, "isCtl", "bool", keyable=False)
attribute.addAttribute(
ctl, "ctl_role", "string", keyable=False, value="world_ctl"
)
return ctl
def addToGroup(self, objects, names=["hidden"]):
"""Add the object in a collection for later group creation.
Args:
objects (dagNode or list of dagNode): Object to put in the group.
names (str or list of str): Names of the groups to create.
"""
if not isinstance(names, list):
names = [names]
if not isinstance(objects, list):
objects = [objects]
for name in names:
if name not in self.groups.keys():
self.groups[name] = []
self.groups[name].extend(objects)
def addToSubGroup(self, subGroups, parentGroups=["hidden"]):
"""Add the object in a collection for later SubGroup creation.
Args:
subGroups (dagNode or list of dagNode): Groups (core set) to add
as a Subgroup.
namparentGroupses (str or list of str): Names of the parent groups
to create.
"""
if not isinstance(parentGroups, list):
parentGroups = [parentGroups]
if not isinstance(subGroups, list):
subGroups = [subGroups]
for pg in parentGroups:
if pg not in self.subGroups.keys():
self.subGroups[pg] = []
self.subGroups[pg].extend(subGroups)
def add_controller_tag(self, ctl, tagParent):
ctt = node.add_controller_tag(ctl, tagParent)
if ctt:
ni = attribute.get_next_available_index(self.model.rigCtlTags)
pm.connectAttr(
ctt.message, self.model.attr("rigCtlTags[{}]".format(str(ni)))
)
def getLocalName(self, guideName):
"""This function return the local name, cutting the Maya fullname
and taking the latest part.
ie. "parentA|parentB|arm_C0_root" will return "arm_C0_root"
Args:
guideName (str): The guide name.
Returns:
str: The local Name
"""
if guideName is None:
return None
localName = guideName.split("|")[-1]
return localName
def getComponentName(self, guideName, local=True):
"""
This function return the component name
ie. "arm_C0_root" return "arm_C0"
Args:
guideName (str): The guide name.
Returns:
str: The compnent Name
"""
if guideName is None:
return None
if local:
guideName = self.getLocalName(guideName)
names = naming.get_component_and_relative_name(guideName)
if names:
return names[0]
def getRelativeName(self, guideName):
"""This function return the name of the relative in the guide
ie. "arm_C0_root" return "root"
Args:
guideName (str): The guide name.
Returns:
str: The relative Name
"""
if guideName is None:
return None
localName = self.getLocalName(guideName)
names = naming.get_component_and_relative_name(localName)
if names:
return names[1]
def findRelative(self, guideName, relatives_map={}):
"""Return the objects in the rig matching the guide object.
Args:
guideName (str): Name of the guide object.
relatives_map (dict, optional): Custom relative mapping to
point any object in a component. For example used to point
Auto in upvector reference.
Returns:
transform: The relative object
"""
if guideName is None:
return self.global_ctl
if guideName in relatives_map.keys():
return relatives_map[guideName]
comp_name = self.getComponentName(guideName)
relative_name = self.getRelativeName(guideName)
if comp_name not in self.components.keys():
return self.global_ctl
return self.components[comp_name].getRelation(relative_name)
def findControlRelative(self, guideName):
"""Return the control objects in the rig matching the guide object.
Args:
guideName (str): Name of the guide object.
Returns:
transform: The relative control object
"""
if guideName is None:
return self.global_ctl
# localName = self.getLocalName(guideName)
comp_name = self.getComponentName(guideName)
relative_name = self.getRelativeName(guideName)
if comp_name not in self.components.keys():
return self.global_ctl
return self.components[comp_name].getControlRelation(relative_name)
# TODO: update findComponent and other find methods with new funtions like
# comp_name and others. Better composability
def findComponent(self, guideName):
"""Return the component from a guide Name.
Args:
guideName (str): Name of the guide object.
Returns:
transform: The component
"""
if guideName is None:
return None
comp_name = self.getComponentName(guideName, False)
# comp_name = "_".join(guideName.split("_")[:2])
if comp_name not in self.components.keys():
return None
return self.components[comp_name]
def findUIHost(self, guideName):
"""Return the UI host of the compoent
Args:
guideName (str): Name of the guide object.
Returns:
transform: The relative object
"""
if guideName is None:
return self.ui
comp_name = self.getComponentName(guideName, False)
# comp_name = "_".join(guideName.split("_")[:2])
if comp_name not in self.components.keys():
return self.ui
if self.components[comp_name].ui is None:
self.components[comp_name].ui = pm.UIHost(self.components[comp_name].root)
return self.components[comp_name].ui
| mgear-dev/mgear4 | release/scripts/mgear/shifter/__init__.py | __init__.py | py | 31,325 | python | en | code | 209 | github-code | 6 | [
{
"api_name": "sys.version_info",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "pymel.core.pluginInfo",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "pymel.core",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "pymel.core.loa... |
43627424884 | from typing import List
class Solution:
def numOfMinutes(self, n: int, headID: int, manager: List[int], informTime: List[int]) -> int:
employees = {}
for i, m in enumerate(manager):
if i != headID:
employees[m] = employees.get(m, []) + [i]
queue = [[headID, informTime[headID]]]
res = 0
while queue:
node, time = queue.pop(0)
for empl in employees.get(node, []):
empl_time = time+informTime[empl]
res = max(res, empl_time)
queue.append([empl, empl_time])
return res
def test(self):
test_cases = [
[1, 0, [-1], [0]],
[6, 2, [2, 2, -1, 2, 2, 2], [0, 0, 1, 0, 0, 0]],
[7, 6, [1,2,3,4,5,6,-1], [0,6,5,4,3,2,1]],
[4, 2, [3,3,-1,2], [0,0,162,914]],
[15, 0, [-1,0,0,1,1,2,2,3,3,4,4,5,5,6,6], [1,1,1,1,1,1,1,0,0,0,0,0,0,0,0]],
[11, 4, [5,9,6,10,-1,8,9,1,9,3,4], [0,213,0,253,686,170,975,0,261,309,337]],
]
for n, headID, manager, informTime in test_cases:
res = self.numOfMinutes(n, headID, manager, informTime)
print('res: %s' % res)
print('-='*30 + '-')
if __name__ == '__main__':
Solution().test()
| MichaelTQ/LeetcodePythonProject | solutions/leetcode_1351_1400/LeetCode1376_TimeNeededToInformAllEmployees.py | LeetCode1376_TimeNeededToInformAllEmployees.py | py | 1,290 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "typing.List",
"line_number": 5,
"usage_type": "name"
}
] |
31994054521 | #coding: utf-8
import json, random, os
import hashlib
import requests
from flask_babel import _
from webapp import app
#利用百度翻译API提供文本翻译
def translate(text, source_language, dest_language):
if 'BD_TRANSLATOR_KEY' not in app.config or not app.config['BD_TRANSLATOR_KEY']:
return _('Error: the translation service is not configured.')
url = "http://api.fanyi.baidu.com/api/trans/vip/translate"
appid = '20200321000402156'
salt = random.randint(32768, 65536) #生成一个随机数
sign = appid + text +str(salt) + app.config['BD_TRANSLATOR_KEY']
m = hashlib.new('md5')
m.update(sign.encode(encoding='utf-8'))
msign = m.hexdigest() #得到原始签名的MD5值
if dest_language == 'es': #pybabel 与百度翻译对应的语言缩写不一致
dest_language = 'spa'
data= {
'q': text,
'from':source_language or 'auto',
'to':dest_language,
'appid':appid,
'salt':salt,
'sign':msign
}
r = requests.get(url, params=data)
if r.status_code != 200:
return _('Error: the translation service failed.')
# print(json.loads(r.content.decode('utf-8')))
return json.loads(r.content.decode('utf-8'))['trans_result'][0]['dst']
if __name__ == '__main__':
result = translate('我命由我不由天','', 'spa')
print(result)
print(type(result)) | huawenjin1995/Microblog | webapp/translate.py | translate.py | py | 1,421 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "webapp.app.config",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "webapp.app",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "flask_babel._",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "random.randint",
... |
36093046858 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import unittest
import mock
from napixd.exceptions import InternalRequestFailed
from napixd.application import Napixd
from napixd.services.contexts import NapixdContext
from napixd.loader.loader import Loader, Load
from napixd.http.router.router import Router
from napixd.http.request import Request
class MyService(object):
def __init__(self, mgr, alias, conf=None):
self.alias = alias
self.url = self.alias
def setup_bottle(self, app):
app.route('/' + self.url, self.keep)
def keep(self):
pass
def __eq__(self, other):
return self.__class__ == other.__class__ and self.alias == other.alias
class TestReload(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.patch_service = mock.patch('napixd.application.Service', MyService)
def setUp(self):
self.Service = self.patch_service.start()
loader = mock.Mock(spec=Loader)
self.load = load = loader.load.return_value = mock.Mock(spec=Load)
self.m1 = m1 = mock.Mock(alias='m1')
self.m2 = m2 = mock.Mock(alias='m2')
load.managers = [m1, m2]
load.new_managers = []
load.old_managers = []
load.error_managers = []
self.server = server = mock.MagicMock(spec=Router)
self.napixd = Napixd(loader, server)
load.managers = []
def tearDown(self):
self.patch_service.stop()
def test_add_filter(self):
self.server.add_filter.assert_called_once_with(self.napixd)
def test_as_plugin(self):
cb = mock.Mock()
req = mock.Mock()
r = self.napixd(cb, req)
self.assertEqual(r, cb.return_value)
cb.assert_called_once_with(NapixdContext(self.napixd, req))
def test_find_service(self):
s = self.napixd.find_service('m1')
self.assertEqual(s, MyService(None, 'm1'))
def test_find_not_service(self):
self.assertRaises(InternalRequestFailed, self.napixd.find_service, 'm3')
def test_zero(self):
assert not self.server.route.assert_has_calls([
mock.call('/', self.napixd.slash),
mock.call('/m1', mock.ANY),
mock.call('/m2', mock.ANY),
])
self.assertEqual(self.napixd.slash(mock.Mock(spec=Request)),
['/m1', '/m2'])
def test_reload_new(self):
assert not self.server.route.reset_mock()
m3 = mock.Mock(alias='m3')
self.load.new_managers = [m3]
self.napixd.reload()
self.server.route.assert_called_once_with('/m3', mock.ANY)
self.assertEqual(self.server.unroute.call_count, 0)
self.assertEqual(self.napixd.slash(mock.Mock(spec=Request)),
['/m1', '/m2', '/m3'])
def test_reload_old(self):
self.server.route.reset_mock()
self.load.old_managers = [mock.Mock(alias='m2')]
self.napixd.reload()
self.server.unroute.assert_called_once_with('/m2', all=True)
self.assertEqual(self.server.route.call_count, 0)
self.assertEqual(self.napixd.slash(mock.Mock(spec=Request)),
['/m1'])
def test_reload_error(self):
self.server.route.reset_mock()
error = mock.Mock(alias='m2')
self.load.old_managers = [mock.Mock(alias='m2')]
self.load.error_managers = [error]
self.napixd.reload()
self.server.unroute.assert_called_once_with('/m2', all=True)
self.server.route.assert_has_calls([
mock.call('/m2', mock.ANY),
mock.call('/m2/', mock.ANY, catchall=True),
])
self.assertEqual(self.napixd.slash(mock.Mock(spec=Request)),
['/m1', '/m2'])
def test_reload_error_and_error(self):
self.load.old_managers = [mock.Mock(alias='m2')]
self.load.error_managers = [mock.Mock(alias='m2')]
self.napixd.reload()
self.server.reset_mock()
error = mock.Mock(alias='m2')
self.load.old_managers = []
self.load.error_managers = [error]
self.server.__contains__.return_value = True
self.napixd.reload()
self.server.unroute.assert_called_once_with('/m2', all=True)
self.server.route.assert_has_calls([
mock.call('/m2', mock.ANY),
mock.call('/m2/', mock.ANY, catchall=True),
])
| napix/NapixServer | tests/test_application.py | test_application.py | py | 4,444 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "unittest.TestCase",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "mock.patch",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "mock.Mock",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "napixd.loader.loader.Load... |
3708389787 | import re
import redis
import pickle
import requests
from bs4 import BeautifulSoup
from dateutil.parser import parse
import errors
red = redis.StrictRedis(host='redis', port=6379, db=0)
try:
red.get('test')
except ConnectionError:
red = None
_POST_COMMENT_URL = \
'https://telopeapark.managebac.com/groups/{}/messages/{}/comments'
_CACHE_EXPIRE = 5 * 60 # 5min
class Messages(list):
'''
Represents the :class:`Message` s for a given class on managebac
Gets the messages as :class:`LazyMessage` s
Raises:
BadToken, ManageBacCommunicationException
'''
def __init__(self, url, token):
r = requests.get(url + '/archive', cookies=token)
if r.ok and r.status_code == 200:
soup = BeautifulSoup(r.text)
class_name = soup.h1.div.next_sibling.next_sibling.text
for topic in soup.findAll(class_='topic'):
url = topic.a['href']
self.append(LazyMessage(
id_=int(re.search('/messages/([0-9]+)', url).group(1)),
class_id=int(re.search(
'/[classesgroups]+/([0-9]+)/', url).group(1)),
class_name=class_name,
by=re.search('by\n(.+)', topic.label.text).group(1),
title=topic.a.text
))
elif r.status_code == 302:
raise errors.BadToken
else:
raise errors.ManageBacCommunicationException
class Message():
'''
Represents a message that you post on managebac
The constructor downloads a message and fill the object
Args:
* `url` (str) - url of the message
* `token` - returned by :func:`managebac.login`
Sets Values:
* `id_` (int)
* `title` (string)
* `by` (string)
* `text` (string) - just a string, no HTML
* `time` (:class:`datetime.datetime`)
* `avatar` (string): a image URL
* `comments` (list of :class:`Comment`)
* `class_name` (string)
* `class_id` (int)
Raises:
BadToken, ManageBacCommunicationException
'''
def __init__(self, url, token):
r = requests.get(url, cookies=token)
if r.ok and r.status_code == 200:
self.id_ = int(re.search('/messages/([0-9]+)', url).group(1))
self.class_id = int(re.search(
'/[classesgroups]+/([0-9]+)/', url).group(1))
soup = BeautifulSoup(r.text)
self.class_name = soup.h1.div.next_sibling.next_sibling.text
message = soup.find(class_='reply_target')
self.avatar = message.img['src']
self.time = parse(message.find(class_='time').text)
self.by = message.strong.text.strip()
self.title = message.a.text
self.text = message.find(class_='content').text
self.comments = []
for el in message.find_next_siblings(class_='topic'):
self.comments.append(Comment(
avatar=el.img['src'],
time=parse(el.find(class_='time').text),
by=el.strong.text.strip(),
text=el.find(class_='content').text
))
self.loaded = True
if red:
cache_id = 'cache:message:{}'.format(self.id_)
red.set(cache_id, pickle.dumps(self))
red.expire(cache_id, _CACHE_EXPIRE)
elif r.status_code == 302:
raise errors.BadToken
else:
raise errors.ManageBacCommunicationException
def post_comment(self, text, token):
'''
Post a comment below the message on managebac.
Args:
* `text` (str) - plain text to post
* `token` - the users login from :func:`managebac.login`
'''
r = requests.post(_POST_COMMENT_URL.format(self.class_id, self.id_),
cookies=token, data={'post[body]': text})
if r.ok and r.status_code == 200:
return
elif r.status_code == 302:
raise errors.BadToken
else:
raise errors.ManageBacCommunicationException
def __unicode__(self):
return u'Message({} said "{}":"{}" ({}), at {} in {} ({}), {})'.format(
self.by, self.title, self.text, self.id_, self.time,
self.class_name, self.class_id, map(unicode, self.comments))
class LazyMessage(Message):
'''
A lazy loaded message class
By default, it only includes the following attributes:
* `id_` (int)
* `title` (string)
* `by` (string)
* `class_name` (str)
* `class_id` (int)
It also introduces the `loaded` (bool) attribute
'''
def __init__(self, **kwargs):
self.loaded = False
for k, v in kwargs.iteritems():
setattr(self, k, v)
if red:
old = red.get('cache:message:{}'.format(self.id_))
if old:
self = pickle.loads(old)
self.loaded = True
def load(self, token):
'''
Same as :class:`Message`, but with the URL autogenerated
'''
Message.__init__(self, 'https://telopeapark.managebac.com/groups/{}'
'/messages/{}'.format(self.class_id, self.id_),
token)
def __unicode__(self):
if self.loaded:
return Message.__unicode__(self)
return u'LazyMessage({} said {} ({}), in {} ({}))'.format(
self.by, self.title, self.id_, self.class_name, self.class_id)
class Comment():
'''
A (dumb) object that represents a comment on a :class:`Message`
The constructor makes a new Comment from the kwargs. Expects the
same args as a :class:`Message`, but without the `id_`,
`title` or `class_*`
'''
def __init__(self, **kwargs):
'''
'''
for k, v in kwargs.iteritems():
setattr(self, k, v)
def __unicode__(self):
return u'Comment({} said "{}", at {})'.format(
self.by, self.text, self.time)
| samdroid-apps/ManageBacToTheFuture | lib/message.py | message.py | py | 6,140 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "redis.StrictRedis",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "re.search",
"lin... |
26423979164 | import numpy as np
import torch
import time
from torch.autograd import Variable
import torchvision.transforms as transforms
import torchvision.datasets as dsets
import collections
import torch.utils.data as data
class Model(torch.nn.Module):
def __init__(self, input_dim=784, output_dim=10):
super(Model, self).__init__()
self.linear = torch.nn.Linear(input_dim, output_dim)
self.input_dim = input_dim
self.output_dim = output_dim
def forward(self, x):
outputs = self.linear(x)
return outputs
class MyDataset(data.Dataset):
def __init__(self, images, labels):
self.images = images
self.labels = labels
def __getitem__(self, index): # return tensor type
img, target = self.images[index], self.labels[index]
return img, target
def __len__(self):
return len(self.images)
def get_mnist_train_list():
# build-in mnist dataset
train_dataset = dsets.MNIST(root='./data', train=True, transform=transforms.ToTensor(), download=False)
img_list = [ x for x, y in train_dataset ]
lab_list = [ y for x, y in train_dataset ]
return img_list, lab_list
def exp_data():
train_dataset = dsets.MNIST(root='./data', train=True, transform=transforms.ToTensor(), download=False)
for (x, y) in train_dataset:
print( type(x), type(y) )
#exp_data()
def transform_train_list(images_list, labels_list, bt_size):
train_images = [ img for img in images_list ]
train_labels = [ lab for lab in labels_list ]
#print("type", type(train_images[0]))
new_dataset = MyDataset(train_images, train_labels)
train_loader = torch.utils.data.DataLoader(dataset=new_dataset, batch_size=bt_size, shuffle=False)
return train_loader
def evaluate(model_name, test_images, test_labels, l_rate=0.001):
test_model = load_model(model_name)
test_loader = transform_train_list(test_images, test_labels, 1)
# init
correct = 0
total = 0
loss = 0
#optimizer = torch.optim.SGD(test_model.parameters(), lr=l_rate)
criterion = torch.nn.CrossEntropyLoss()
# get acc & loss
for i, (img, lab) in enumerate(test_loader):
img = Variable(img.view(-1, 28 * 28))
lab = Variable(lab)
#optimizer.zero_grad()
outputs = test_model(img)
loss += criterion(outputs, lab)
#optimizer.step()
_, predicted = torch.max(outputs.data, 1)
total+= lab.size(0)
correct+= (predicted == lab).sum()
# get average
loss /= len(test_labels)
accuracy = int(correct)/total
return loss, accuracy
#训练函数
def train(train_model, train_raw_img, train_raw_lab, E, bt_size=100, epochs=1, lr_rate=0.001): # E means iteration
# get train loader
train_loader = transform_train_list(train_raw_img, train_raw_lab, bt_size)
# 计算 softmax 分布之上的交叉熵损失
criterion = torch.nn.CrossEntropyLoss()
#SGD
optimizer = torch.optim.SGD(train_model.parameters(), lr=lr_rate)
# train
tms = []
#tic = time.time()
for epoch in range(epochs):
print('epoch {}:'.format(epoch + 1))
for i in range(E):
print("--\titeration {}".format(i+1))
img, lab = next(iter(train_loader))
img = Variable(img.view(-1, 28 * 28))
lab = Variable(lab)
optimizer.zero_grad()
tic = time.time()
outputs = train_model(img)
#print(lab)
loss = criterion(outputs, lab)
loss.backward()
optimizer.step()
toc = time.time()
tms.append(toc-tic)
#print(loss)
#toc = time.time()
return np.sum(tms)
def save_model(model,path):
# torch.save(model, path, _use_new_zipfile_serialization=False)
torch.save(model, path)
def load_model(model_name):
model = torch.load(model_name)
return model
def aggregate(client_select, client_set, model_name):
models_list = []
for i in client_select:
client = client_set[i]
name = 'models/model{}.pkl'.format(client.ID)
model = torch.load(name)
models_list.append(model)
models_dict = [i.state_dict() for i in models_list]
weight_keys = list(models_dict[0].keys())
server_model_dict = collections.OrderedDict()
for key in weight_keys:
key_sum = 0
sumation = 0
for i in range(len(models_list)):
client = client_set[ client_select[i] ]
key_sum += models_dict[i][key] * client.num_traindata
sumation += client.num_traindata
server_model_dict[key] = key_sum / sumation
server_model = torch.load(model_name)
server_model.load_state_dict(server_model_dict)
torch.save(server_model, model_name, _use_new_zipfile_serialization=False)
#print('aggregation done!')
def predict(model_name, img):
img = Variable(img.view(-1, 28 * 28))
model = torch.load(model_name)
lab = model(img)
x = -100
p = 0
#print(lab[0])
print("hh", lab)
for i in range(len(lab[0])):
if lab[0][i].double() > x:
print(lab[0][i].double(), i)
x = lab[0][i].double()
p = i
#print("hhhhhh", lab, p)
return p
| WENLIXIAO-CS/FL-IoT-Demo | Fed-IoT-demo-lightly/Pytorch_Model.py | Pytorch_Model.py | py | 5,470 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "torch.nn",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.Linear",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.data.Dataset"... |
39312594943 | import os
import requests
from bs4 import BeautifulSoup, Tag
from collections import Counter
import re
import string
import nltk
from nltk.corpus import stopwords
from nltk.corpus import words
from nltk.tokenize import word_tokenize
from gensim.models import Word2Vec
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import tempfile
import gradio as gr
import openai
from googlesearch import search
from pytrends.request import TrendReq
from sklearn.manifold import MDS, TSNE
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.cluster import KMeans
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from IPython.display import HTML
import numpy as np
import matplotlib.cm as cm
from urllib.parse import urlparse, urljoin
import os
from celery import Celery
nltk.download('stopwords')
nltk.download('punkt')
nltk.download('words')
# Set your OpenAI API key here
openai.api_key = os.environ['OPENAI_API_KEY']
#@title Define functions
def get_image_html(fig):
buf = io.BytesIO()
fig.savefig(buf, format='png')
buf.seek(0)
return '<img src="data:image/png;base64,{}"/>'.format(base64.b64encode(buf.getvalue()).decode('ascii'))
def search_top_competitors(keywords, num_results=10):
competitors = set()
for keyword in keywords:
for url in search(keyword, num_results=num_results):
competitors.add(url)
return list(competitors)
def get_page_content(url):
response = requests.get(url)
return BeautifulSoup(response.text, 'html.parser')
def get_meta_tags(soup):
meta_tags = soup.find_all('meta')
return {tag.get('name'): tag.get('content') for tag in meta_tags if tag.get('name')}
def get_heading_tags(soup):
headings = {}
for tag in ['h1', 'h2', 'h3', 'h4', 'h5', 'h6']:
headings[tag] = [heading.text for heading in soup.find_all(tag)]
return headings
def analyze_keywords(keywords_counter, top_n=10):
return keywords_counter.most_common(top_n)
def visualize_keywords(keywords_counter, top_n=10):
common_keywords = analyze_keywords(keywords_counter, top_n)
df = pd.DataFrame(common_keywords, columns=['Keyword', 'Count'])
df.set_index('Keyword', inplace=True)
df.plot(kind='bar', figsize=(12, 6))
plt.title('Top Keywords')
plt.xlabel('Keywords')
plt.ylabel('Frequency')
fig = plt.gcf() # Get the current figure
plt.tight_layout()
temp_image_file = tempfile.NamedTemporaryFile(delete=False, suffix=".png")
plt.savefig(temp_image_file.name, format='png')
plt.close()
return temp_image_file.name
def plot_trends(keywords):
pytrends = TrendReq(hl='en-US', tz=360, retries=3)
pytrends.build_payload(keywords, cat=0, timeframe='today 12-m', geo='', gprop='')
trends_data = pytrends.interest_over_time()
return trends_data
def preprocess_text(text, min_word_length=3):
stop_words = set(stopwords.words('english'))
words = word_tokenize(text.lower())
words = [word for word in words if word.isalnum()]
words = [word for word in words if len(word) >= min_word_length and word not in stop_words]
return words
def visualize_clusters(words, model):
matrix = np.zeros((len(words), model.vector_size))
for i, word in enumerate(words):
matrix[i, :] = model.wv[word]
mds = MDS(n_components=2, dissimilarity='precomputed', random_state=42)
distance_matrix = 1 - cosine_similarity(matrix)
coords = mds.fit_transform(distance_matrix)
x, y = coords[:, 0], coords[:, 1]
for i, word in enumerate(words):
plt.scatter(x[i], y[i], alpha=0.5)
plt.text(x[i], y[i], word, fontsize=10)
plt.title('Word Clusters based on Thematic Relatedness')
plt.show()
def create_cluster_table(words, model, clusters):
matrix = np.zeros((len(words), model.vector_size))
for i, word in enumerate(words):
matrix[i, :] = model.wv[word]
# Create a dictionary to store words per cluster
cluster_dict = {}
for i, word in enumerate(words):
cluster_id = clusters[i]
if cluster_id not in cluster_dict:
cluster_dict[cluster_id] = []
cluster_dict[cluster_id].append(word)
# Create a DataFrame from the dictionary
max_words = max(len(cluster_words) for cluster_words in cluster_dict.values())
num_clusters = len(cluster_dict)
data = {f"Cluster {i}": cluster_dict.get(i, []) + [None] * (max_words - len(cluster_dict.get(i, [])))
for i in range(num_clusters)}
df = pd.DataFrame(data)
return df
def clean_text(text):
# Separate words that are meant to be separated
text = re.sub(r'([a-z])([A-Z])', r'\1 \2', text)
# Tokenize the text
tokens = nltk.word_tokenize(text)
# Remove nonsensical words
try:
english_words = set(words)
except:
english_words = set(words.words())
clean_tokens = [token for token in tokens if token.lower() in english_words or token.istitle()]
# Join tokens back into a string
clean_text = ' '.join(clean_tokens)
return clean_text
def visualize_clusters_og(words, model):
matrix = np.zeros((len(words), model.vector_size))
for i, word in enumerate(words):
matrix[i, :] = model.wv[word]
n_clusters = 5
kmeans = KMeans(n_clusters=n_clusters, random_state=42, n_init=10)
clusters = kmeans.fit_predict(matrix)
tsne = TSNE(n_components=2, random_state=42)
coords = tsne.fit_transform(matrix)
x, y = coords[:, 0], coords[:, 1]
colors = cm.rainbow(np.linspace(0, 1, n_clusters))
plt.figure(figsize=(8, 8))
for i, word in enumerate(words):
plt.scatter(x[i], y[i], c=[colors[clusters[i]]], alpha=0.7)
plt.text(x[i], y[i], word, fontsize=10)
plt.xticks([])
plt.yticks([])
plt.title('Word Clusters based on Thematic Relatedness')
plt.show()
def visualize_clusters_plot(words, model):
matrix = np.zeros((len(words), model.vector_size))
for i, word in enumerate(words):
matrix[i, :] = model.wv[word]
n_clusters = 4
kmeans = KMeans(n_clusters=n_clusters, random_state=42, n_init=10)
clusters = kmeans.fit_predict(matrix)
try:
tsne = TSNE(n_components=2, random_state=42)
coords = tsne.fit_transform(matrix)
except ValueError:
max_perplexity = len(words) - 1
tsne = TSNE(n_components=2, random_state=42, perplexity=max_perplexity)
coords = tsne.fit_transform(matrix)
x, y = coords[:, 0], coords[:, 1]
colors = cm.rainbow(np.linspace(0, 1, n_clusters))
fig, axs = plt.subplots(2, 2, figsize=(8, 8), gridspec_kw={'width_ratios': [sum(clusters == 0) + sum(clusters == 1), sum(clusters == 2) + sum(clusters == 3)], 'height_ratios': [sum(clusters == 0) + sum(clusters == 2), sum(clusters == 1) + sum(clusters == 3)]})
fig.subplots_adjust(wspace=0, hspace=0)
for ax in axs.ravel():
ax.axis('off')
for i, word in enumerate(words):
cluster_idx = clusters[i]
ax = axs[cluster_idx // 2, cluster_idx % 2]
ax.scatter(x[i], y[i], c=[colors[cluster_idx]], alpha=0.7)
ax.text(x[i], y[i], word, fontsize=10)
plt.legend(loc="best", fontsize=13)
plt.tight_layout()
temp_image_file = tempfile.NamedTemporaryFile(delete=False, suffix=".png")
plt.savefig(temp_image_file.name, format='png')
plt.close()
return temp_image_file.name, clusters
def sanitize_url(url):
if not re.match('^(http|https)://', url):
url = 'http://' + url
if not re.match('^(http|https)://www\.', url):
url = re.sub('^(http|https)://', r'\g<0>www.', url)
return url
# Configure the Celery app
app = Celery("tasks", broker=os.environ['REDIS_URL'], backend=os.environ['REDIS_URL'])
# Define the inputs and outputs
competitor_url_input = gr.inputs.Textbox(label="Competitor URL", placeholder="Enter a competitor URL")
full_site_scrape_checkbox = gr.inputs.Checkbox(label="Tick for full site scrape (otherwise landing page only)")
meta_tags_output = gr.outputs.Textbox(label="Meta Tags")
heading_tags_output = gr.outputs.Textbox(label="Heading Tags")
top10keywords_output = gr.outputs.Textbox(label="Top 10 Keywords")
cluster_table_output = gr.outputs.HTML(label="Cluster Table")
cluster_plot_output = gr.outputs.Image(type='filepath', label="Cluster Plot")
keyword_plot_output = gr.outputs.Image(type='filepath', label="Keyword Plot")
seo_analysis_output = gr.outputs.Textbox(label="SEO Analysis")
def append_unique_elements(source, target):
for element in source:
if isinstance(element, Tag) and element not in target:
target.append(element)
def get_internal_links(url: str):
response = requests.get(url)
soup = BeautifulSoup(response.content, "html.parser")
internal_links = set()
for link in soup.find_all("a"):
href = link.get("href")
if href:
joined_url = urljoin(url, href)
parsed_url = urlparse(joined_url)
if parsed_url.netloc == urlparse(url).netloc:
internal_links.add(joined_url)
return internal_links
def analyze_single_page(competitor_url: str):
sanitized_url = sanitize_url(competitor_url)
soup = get_page_content(sanitized_url)
# Scrape and analyze meta tags
meta_tags = get_meta_tags(soup)
topmetatags = ""
for name, content in meta_tags.items():
if "description" in name.lower():
topmetatags += (f"{name}: {content}\n")
# Scrape and analyze heading tags
heading_tags = get_heading_tags(soup)
topheadingtags = ""
for tag, headings in heading_tags.items():
filtered_headings = [heading for heading in headings if len(heading) > 2]
if filtered_headings:
topheadingtags += (f"{tag}: {', '.join(filtered_headings)}\n")
# Scrape, analyze, and visualize keywords from page content
page_text = soup.get_text()
page_text_cleaned = clean_text(page_text)
preprocessed_text = preprocess_text(page_text_cleaned)
keywords_counter = Counter(preprocessed_text)
top10keywords = ""
for keyword, count in analyze_keywords(keywords_counter, top_n=10):
top10keywords += (f"{keyword}: {count}\n")
# Semantic clustering and visualization
sentences = [preprocessed_text[i:i+10] for i in range(0, len(preprocessed_text), 10)]
model = Word2Vec(sentences, vector_size=100, window=5, min_count=1, workers=4)
words = [word for word, _ in analyze_keywords(keywords_counter, top_n=50)]
clusters = [model.wv.doesnt_match(words)] * len(words)
cluster_plot,clusters = visualize_clusters_plot(words, model)
cluster_table = create_cluster_table(words, model, clusters)
keyword_plot = visualize_keywords(keywords_counter, top_n=10)
table_string = cluster_table.to_string(index=False)
SEO_prompt = f"""The following information is given about a company's website:
Meta Tags:
{{meta_tags}}
Heading Tags:
{{heading_tags}}
Top 10 Keywords:
{{top10keywords}}
The following table represents clusters of thematically related words identified using NLP and clustering techniques. Each column represents a different cluster, and the words in each column are thematically related.
{table_string}
Please analyze the provided information and perform the following tasks:
1. Predict what the website is all about (the market sector).
2. Based on the market sector of the company, give a name to each cluster based on the theme it represents. The name needs to be the best summary of all the words in the cluster.
3. Perform a SWOT analysis (Strengths, Weaknesses, Opportunities, and Threats) from an SEO perspective for the company as a whole, taking into account the meta tags, heading tags, top 10 keywords, and the clusters.
Please provide your analysis in a clear and concise manner.
4. Lastly, suggest a list of 5 single words and 5 phrases (no longer than 3 words each) that the company should be using to improve their SEO
""".format(meta_tags=meta_tags, heading_tags=heading_tags, top10keywords=top10keywords, table_string=table_string)
def analyse_SEO(SEO_prompt):
response = openai.Completion.create(
model="text-davinci-003",
prompt = SEO_prompt,
temperature=0.7,
max_tokens=1000,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
gpt3_response = response.get('choices')[0].text
return gpt3_response,response
seo_analysis = analyse_SEO(SEO_prompt)
return topmetatags, topheadingtags, top10keywords, cluster_table.to_html(), cluster_plot, keyword_plot, seo_analysis[0]
# Wrap the analyze_website function with the Celery app.task decorator
@app.task
def analyze_website_task(competitor_url: str, full_site_scrape: bool = False):
if not full_site_scrape:
topmetatags, topheadingtags, top10keywords, cluster_table, cluster_plot, keyword_plot, seo_analysis = analyze_single_page(competitor_url)
return topmetatags, topheadingtags, top10keywords, cluster_table, cluster_plot, keyword_plot, seo_analysis
sanitized_url = sanitize_url(competitor_url)
internal_links = get_internal_links(sanitized_url)
soup_collection = BeautifulSoup("<html><head></head><body></body></html>", "html.parser")
for link in internal_links:
try:
soup = get_page_content(link)
append_unique_elements(soup.head, soup_collection.head)
append_unique_elements(soup.body, soup_collection.body)
except Exception as e:
print(f"Failed to analyze link: {link}. Error: {e}")
print('got all the links')
# Scrape and analyze meta tags
meta_tags = get_meta_tags(soup_collection)
topmetatags = ""
for name, content in meta_tags.items():
if "description" in name.lower():
topmetatags += (f"{name}: {content}\n")
print('fetched metatags')
# Scrape and analyze heading tags
heading_tags = get_heading_tags(soup_collection)
topheadingtags = ""
for tag, headings in heading_tags.items():
filtered_headings = [heading for heading in headings if len(heading) > 2]
if filtered_headings:
topheadingtags += (f"{tag}: {', '.join(filtered_headings)}\n")
print("fetched heading tags")
# Scrape, analyze, and visualize keywords from page content
page_text = soup_collection.get_text()
page_text_cleaned = clean_text(page_text)
preprocessed_text = preprocess_text(page_text_cleaned)
keywords_counter = Counter(preprocessed_text)
top10keywords = ""
for keyword, count in analyze_keywords(keywords_counter, top_n=10):
top10keywords += (f"{keyword}: {count}\n")
print("fetched keywords")
# Semantic clustering and visualization
sentences = [preprocessed_text[i:i+10] for i in range(0, len(preprocessed_text), 10)]
model = Word2Vec(sentences, vector_size=100, window=5, min_count=1, workers=4)
words = [word for word, _ in analyze_keywords(keywords_counter, top_n=50)]
clusters = [model.wv.doesnt_match(words)] * len(words)
print("calculated clusters")
cluster_plot,clusters = visualize_clusters_plot(words, model)
cluster_table = create_cluster_table(words, model, clusters)
keyword_plot = visualize_keywords(keywords_counter, top_n=10)
print("plotted figures")
table_string = cluster_table.to_string(index=False)
print("created table string")
heading_tags_compressed = {}
for key, values in heading_tags.items():
count = Counter(values)
sorted_values = sorted(count.keys(), key=lambda x: count[x], reverse=True)
filtered_values = [value for value in sorted_values if value.strip() != ""]
heading_tags_compressed[key] = filtered_values[:10]
heading_tags_clean = {}
for key, values in heading_tags.items():
count = Counter(values)
sorted_values_clean = sorted(count.keys(), key=lambda x: count[x], reverse=True)
heading_tags_clean = [value for value in sorted_values_clean if value.strip() != ""]
print("cleaned up heading tags")
SEO_prompt = f"""The following information is given about a company's website:
Meta Tags:
{{meta_tags}}
Heading Tags:
{{heading_tags_compressed}}
Top 10 Keywords:
{{top10keywords}}
The following table represents clusters of thematically related words identified using NLP and clustering techniques. Each column represents a different cluster, and the words in each column are thematically related.
{table_string}
Please analyze the provided information and perform the following tasks:
1. Predict what the website is all about (the market sector).
2. Based on the market sector of the company, give a name to each cluster based on the theme it represents. The name needs to be the best summary of all the words in the cluster.
3. Perform a SWOT analysis (Strengths, Weaknesses, Opportunities, and Threats) from an SEO perspective for the company as a whole, taking into account the meta tags, heading tags, top 10 keywords, and the clusters.
Please provide your analysis in a clear and concise manner.
4. Lastly, suggest a list of 10 words and 10 phrases that the company should be using to improve their SEO
""".format(meta_tags=meta_tags, heading_tags_compressed=heading_tags_compressed, top10keywords=top10keywords, table_string=table_string)
print("defined SEO prompt")
def analyse_SEO(SEO_prompt):
response = openai.Completion.create(
model="text-davinci-003",
prompt = SEO_prompt,
temperature=0.7,
max_tokens=1000,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
gpt3_response = response.get('choices')[0].text
return gpt3_response,response
seo_analysis = analyse_SEO(SEO_prompt)
print("ran seo analysis")
print(topmetatags, heading_tags_clean,top10keywords,cluster_table.to_html(), cluster_plot, keyword_plot,seo_analysis[0])
return topmetatags, heading_tags_clean, top10keywords, cluster_table.to_html(), cluster_plot, keyword_plot, seo_analysis[0]
gr.Interface(
fn=analyze_website_task,
inputs=[competitor_url_input, full_site_scrape_checkbox],
outputs=[
meta_tags_output,
heading_tags_output,
top10keywords_output,
cluster_table_output,
cluster_plot_output,
keyword_plot_output,
seo_analysis_output,
],
title="SEO Analysis Tool",
description="Enter a competitor URL to perform a SEO analysis (some javascript pages will deny full scrape).",
layout="vertical"
).launch(share=True,debug=True) | PhiloSolares/seo_analysis | seo_analysis_tool.py | seo_analysis_tool.py | py | 18,812 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "nltk.download",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "nltk.download",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "nltk.download",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "openai.api_key",
"line_... |
70945127228 | # remote DB 연동
import MySQLdb
import pandas as pd
import numpy as np
import ast
import csv
"""
config = {
'host':'127.0.0.1',
'user':'root',
'password':'123',
'database':'test',
'port':3306,
'charset':'utf8',
'use_unicode':True
}
"""
try: # db 읽는 방법
with open("mariadb_connect.txt", "r") as fr:
config = fr.read() # String 형태
except Exception as e:
print('read err : ' + str(e))
print(config)
config = ast.literal_eval(config)
print(type(config)) # dict type으로 들어옴
#############################################
# sql 명령
try:
conn = MySQLdb.connect(**config) # 연결을 dict type으로 (String type은 err)
cursor = conn.cursor()
sql = """
select jikwon_no, jikwon_name, buser_name, jikwon_jik, jikwon_gen, jikwon_pay
from jikwon inner join buser
on buser.buser_no = jikwon.buser_num
"""
cursor.execute(sql)
# for (a,b,c,d,e,f) in cursor:
# print(a,b,c,d,e,f)
with open("jikwon_datas.csv", "w", encoding="utf-8") as fw:
writer = csv.writer(fw)
for row in cursor:
writer.writerow(row)
print('저장 성공')
# 읽기 1 : csv
df = pd.read_csv("jikwon_datas.csv", header=None, names=('번호','이름','부서','직급','성별','연봉'))
print(df[:3])
# 읽기 2 : sql
df2 = pd.read_sql(sql, conn)
#df2.columns = '번호','이름','부서','직급','성별','연봉' # 튜플타입
df2.columns = ('번호','이름','부서','직급','성별','연봉') # 튜플타입
print(df2.head(3))
print(len(df2))
print('\n *직급 : \n',df2['직급'].value_counts())
print('\n *부서 : \n',df2['부서'].value_counts())
print('\n *연봉 합계 : \n',df2.loc[:,'연봉'].sum() / len(df2))
print('\n *연봉 평균 : \n',df2.loc[:,'연봉'].mean())
print('\n *연봉 상세 : \n',df2.loc[:,['연봉']].describe())
print('\n *연봉 5000 이상 : \n',df2.loc[df2.loc[:,'연봉']>=5000])
print('\n *연봉 5000 이상 , 부서는 영업부: \n',df2.loc[(df2.loc[:,'연봉']>=5000) & (df2['부서']=='영업부')])
print('\n 교차표----------- \n')
ctab = pd.crosstab(df2['성별'], df2['직급'], margins=True)
print('\n 교차표 : \n',ctab)
import matplotlib.pyplot as plt
plt.rc('font', family='malgun gothic')
# 직급별 연봉 평균
jik_ypay = df2.groupby(['직급'])['연봉'].mean()
print('\n 직급별 연봉 평균 : \n',jik_ypay)
print('\n 직급별 연봉 평균 : \n',jik_ypay.index)
print('\n 직급별 연봉 평균 : \n',jik_ypay.values)
plt.pie(jik_ypay,
labels=jik_ypay.index,
labeldistance=0.5,
counterclock=False, # 시계반대방향
shadow=True,
explode=(0.2,0,0,0.2,0))
plt.show()
#############################################
except Exception as e:
print('err : ', e)
finally:
cursor.close()
conn.close()
############################################# | kangmihee/EX_python | py_pandas_db/pack/pandas_db2.py | pandas_db2.py | py | 3,238 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "ast.literal_eval",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "MySQLdb.connect",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "csv.writer",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"li... |
36802074369 | #! /usr/bin/python3
import sqlite3
import pandas as pd
pd.set_option('display.max_columns', 500)
path = '/home/mayijun/CITI2017/'
# Calculate station days
conn = sqlite3.connect(path + 'CITI2017.sqlite3')
sql = """SELECT DISTINCT startstationid AS stationid,startdate AS date FROM trip WHERE startweekday NOT IN ('Saturday','Sunday')"""
start = pd.read_sql(sql, conn)
sql = """SELECT DISTINCT endstationid AS stationid,enddate AS date FROM trip WHERE endweekday NOT IN ('Saturday','Sunday')"""
end = pd.read_sql(sql, conn)
wkday = pd.concat([start, end])
wkday = wkday.drop_duplicates()
wkday = wkday.groupby(['stationid'], as_index=False)['date'].count()
wkday.columns = ['stationid', 'weekdays']
sql = """SELECT DISTINCT startstationid AS stationid,startdate AS date FROM trip WHERE startweekday IN ('Saturday','Sunday')"""
start = pd.read_sql(sql, conn)
sql = """SELECT DISTINCT endstationid AS stationid,enddate AS date FROM trip WHERE endweekday IN ('Saturday','Sunday')"""
end = pd.read_sql(sql, conn)
wkend = pd.concat([start, end])
wkend = wkend.drop_duplicates()
wkend = wkend.groupby(['stationid'], as_index=False)['date'].count()
wkend.columns = ['stationid', 'weekends']
df = pd.merge(wkday, wkend, how='outer', on='stationid')
df.to_csv(path + 'stationdays.csv', index=False, na_rep=0)
conn.close()
# Merge station list and station days
conn = sqlite3.connect(path + 'CITI2017.sqlite3')
station = pd.read_csv(path + 'station.csv')
wkd = pd.read_csv(path + 'stationdays.csv')
station=station[['stationid','stationname','stationlat','stationlong']]
df = pd.merge(station, wkd, how='outer', on='stationid').sort_values('stationid')
df.to_sql('station', conn, if_exists='replace', index=False)
conn.close() | NYCPlanning/td-citibike | 2017/stationdays.py | stationdays.py | py | 1,726 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "pandas.set_option",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pandas.read_sql",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pandas.read_sql",
... |
31215041211 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('siteScrape', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='teacher',
name='averageGrade',
field=models.CharField(default=b'', max_length=3),
),
migrations.AddField(
model_name='teacher',
name='responseRate',
field=models.DecimalField(default=0, max_digits=3, decimal_places=2),
),
migrations.AlterField(
model_name='teacher',
name='quarters',
field=models.IntegerField(default=0),
),
]
| anikan/Classify | migrations/0002_auto_20150910_1439.py | 0002_auto_20150910_1439.py | py | 755 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.db.migrations.Migration",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.db.migrations",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.AddField",
"line_number": 14,
"usage_type": "call"
},
{
... |
13956703300 | from django.db.models import Field
from . import forms
from . import validators
from .ipv6cidr import clean_ipv6_cidr
from django.utils.translation import gettext_lazy as _, ngettext_lazy
class GenericIPNetworkField(Field):
"""
Support CIDR input
ipv4 0.0.0.0/0
ipv6 ::::/0
"""
empty_strings_allowed = False
description = _("GenericIPNetworkField")
default_error_messages = {}
def __init__(self, verbose_name=None, name=None, protocol='both',
*args, **kwargs):
self.protocol = protocol
self.default_validators, invalid_error_message = \
validators.ip_network_validators(protocol)
self.default_error_messages['invalid'] = invalid_error_message
kwargs['max_length'] = 43 # ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff/128
super().__init__(verbose_name, name, *args, **kwargs)
def check(self, **kwargs):
errors = super().check(**kwargs)
errors.extend(self._check_blank_and_null_values(**kwargs))
return errors
def _check_blank_and_null_values(self, **kwargs):
if not getattr(self, 'null', False) and getattr(self, 'blank', False):
return [
checks.Error(
'GenericIPNetworkField cannot have blank=True if null=False, '
'as blank values are stored as nulls.',
obj=self,
id='fields.E150',
)
]
return []
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
if self.protocol != "both":
kwargs['protocol'] = self.protocol
if kwargs.get("max_length") == 43:
del kwargs['max_length']
return name, path, args, kwargs
def get_internal_type(self):
return "GenericIPNetworkField"
def to_python(self, value):
import ipdb;ipdb.set_trace()
if value is None:
return None
if not isinstance(value, str):
value = str(value)
value = value.strip()
if ':' in value:
return clean_ipv6_network(value, self.unpack_ipv4, self.error_messages['invalid'])
return value
def get_db_prep_value(self, value, connection, prepared=False):
if not prepared:
value = self.get_prep_value(value)
return connection.ops.adapt_ipaddressfield_value(value)
def get_prep_value(self, value):
value = super().get_prep_value(value)
if value is None:
return None
if value and ':' in value:
try:
return clean_ipv6_cidr(value)
except exceptions.ValidationError:
pass
return str(value)
def formfield(self, **kwargs):
defaults = {
'protocol': self.protocol,
'form_class': forms.GenericIPNetworkField,
}
defaults.update(kwargs)
return super().formfield(**defaults)
| MilkBotttle/BFP | fields/cidr.py | cidr.py | py | 2,978 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.db.models.Field",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.gettext_lazy",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "ipdb.set_trace",
"line_number": 55,
"usage_type": "call"
},
{
"api_na... |
26191759535 | import requests
import json
import re
import time
import csv
import MySQLdb as mdb
from bs4 import BeautifulSoup
import numpy as np
import pandas as pd
from helpers import *
"""
This script scrapes and stores the fantasy points achieved by each player for the 2013 season.
The data is stored in a .csv file.
"""
def main():
#open .csv file to store data and writes col names
wfile = open("espn-actual2.csv", "wb")
field_names = ['game_id', 'plyr_id', 'tot_pts','week']
writer = csv.writer(wfile)
writer.writerow(field_names)
#for each week in the 2013 season, the player id, game id, and points scored are scraped and stored in the .csv file
for w in range(1,3):
for pg in range(0, 300, 50):
pts_url = "http://games.espn.go.com/ffl/leaders?&scoringPeriodId=%s&seasonId=2013&startIndex=%s" % (w, pg)
pts_res = requests.get(pts_url)
soup = BeautifulSoup(pts_res.content)
for tr in soup.find_all('tr', class_="pncPlayerRow"):
id_match = re.search(r'plyr(\d+)',tr['id'])
id = int(id_match.group(1))
td_pts = tr.find('td', class_="playertableStat appliedPoints appliedPointsProGameFinal")
projpts = td_pts.contents[0].encode('ascii', 'ignore')
td_game = tr.find('td', class_="gameStatusDiv")
href = td_game.find('a')
href_str = str(href)
game_match = re.search(r'gameId=(\d+)', href_str)
game_id = game_match.group(1)
if projpts == '--':
projpts = 0
data = [game_id, id, projpts, w]
writer.writerow(data)
wfile.close()
if __name__ == '__main__':
main() | kwheeler27/insight_datasci | data/actual_fantasy_pts.py | actual_fantasy_pts.py | py | 1,604 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "csv.writer",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_numbe... |
14839168624 | from django.contrib.auth import get_user_model
from django.shortcuts import get_object_or_404, render, redirect
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator
from django.conf import settings
from .forms import PostForm, CommentForm
from .models import Post, Group, User, Follow
User = get_user_model()
def paginator(request, object_list, per_page):
paginate = Paginator(object_list, per_page)
page_number = request.GET.get('page')
page_obj = paginate.get_page(page_number)
return page_obj
def index(request):
posts = Post.objects.select_related('author', 'group')
page_obj = paginator(request, posts, settings.SORT_POSTS)
template = 'posts/index.html'
context = {
'posts': posts,
'page_obj': page_obj,
}
return render(request, template, context)
def group_posts(request, slug):
group = get_object_or_404(Group, slug=slug)
posts = group.groups.select_related('author')
page_obj = paginator(request, posts, settings.SORT_POSTS)
template = 'posts/group_list.html'
context = {
'group': group,
'posts': posts,
'page_obj': page_obj,
}
return render(request, template, context)
def profile(request, username):
author = get_object_or_404(User, username=username)
posts = author.posts.select_related('group')
count = posts.count()
following = request.user.is_authenticated and Follow.objects.filter(
user=request.user,
author=author
)
page_obj = paginator(request, posts, settings.SORT_POSTS)
context = {
'page_obj': page_obj,
'count': count,
'author': author,
'following': following,
}
template = 'posts/profile.html'
return render(request, template, context)
def post_detail(request, post_id):
post = get_object_or_404(Post, pk=post_id)
post_title = post.text[:30]
form = CommentForm(request.POST or None)
author = post.author
author_posts = author.posts.all().count()
comments = post.comments.select_related('author')
context = {
'post': post,
'post_title': post_title,
'author': author,
'author_posts': author_posts,
'form': form,
'comments': comments,
}
template = 'posts/post_detail.html'
return render(request, template, context)
@login_required
def post_create(request):
form = PostForm(request.POST or None, files=request.FILES or None)
if form.is_valid():
post = form.save(commit=False)
post.author = request.user
post.save()
return redirect('posts:profile', request.user)
template = 'posts/create_post.html'
context = {'form': form}
return render(request, template, context)
@login_required
def post_edit(request, post_id):
post = get_object_or_404(Post, pk=post_id)
form = PostForm(
request.POST or None,
files=request.FILES or None,
instance=post
)
if post.author != request.user:
return redirect('posts:post_detail', post_id)
if form.is_valid():
edited_post = form.save()
edited_post.save()
return redirect('posts:post_detail', post_id)
context = {
'form': form,
'is_edit': True,
'post': post,
}
template = 'posts/create_post.html'
return render(request, template, context)
@login_required
def add_comment(request, post_id):
post = get_object_or_404(Post, id=post_id)
form = CommentForm(request.POST or None)
if form.is_valid():
comment = form.save(commit=False)
comment.author = request.user
comment.post = post
comment.save()
return redirect('posts:post_detail', post_id=post_id)
@login_required
def follow_index(request):
post_list = Post.objects.filter(author__following__user=request.user)
page_obj = paginator(request, post_list, settings.SORT_POSTS)
context = {
'page_obj': page_obj,
}
return render(request, 'posts/follow.html', context)
@login_required
def profile_follow(request, username):
author = get_object_or_404(User, username=username)
if author != request.user:
Follow.objects.get_or_create(user=request.user, author=author)
return redirect('posts:profile', username)
@login_required
def profile_unfollow(request, username):
author = get_object_or_404(User, username=username)
Follow.objects.filter(user=request.user, author=author).delete()
return redirect('posts:profile', username)
| Medbrat4669/yatube_project | yatube/posts/views.py | views.py | py | 4,552 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "models.User",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.get_user_model",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.core.paginator.Paginator",
"line_number": 14,
"usage_type": "call"
},
{
"ap... |
26682571553 | import sys
import os
import re
import logging
from PyQt5.QtWidgets import QApplication, QWidget, QVBoxLayout, QComboBox, QPushButton, QLabel, QFileDialog, QMainWindow, QMessageBox, QCheckBox
from gui import Ui_MainWindow
from function import *
project_file = ".project"
cproject_file = ".cproject"
recovery_file_sufix = "_old"
HAL_project = "STM32Cube HAL Driver"
Base_project = "Base CMSIS Driver"
EDF_PATH_VAR = "EDF_PATH"
device_cortex_series = {
'STM32F0': 'M0',
'STM32C0': 'M0+',
'STM32L0': 'M0+',
'STM32G0': 'M0+',
'STM32F1': 'M3',
'STM32F2': 'M3',
'STM32L1': 'M3',
'STM32F3': 'M4',
'STM32F4': 'M4',
'STM32G4': 'M4',
'STM32L4': 'M4',
'STM32F7': 'M7',
'STM32h7': 'M7',
}
logging.basicConfig(level=logging.INFO, format='[%(asctime)s] %(levelname)s: %(message)s')
#----------------------------------------------------------------------------------------------------------------------------------------------------------------
class installer_app(Ui_MainWindow, QWidget):
def __init__(self, window, app):
self._window = window
self._app = app
super().__init__()
self.setupUi(self._window)
logging.info("Setup tools statting up.")
# Add widget event handler.
self.Btn_Browser.clicked.connect(self.onButtonBrowserClicked)
self.Btn_Setup.clicked.connect(self.onButtonSetupClicked)
self.Btn_Restore.clicked.connect(self.onButtonRestoreClicked)
self.Btn_Quit.clicked.connect(self.onButtonQuitClicked)
self.Enable_Widget(False)
# Get EDF_PATH environment variable, exit if EDF_PATH is not define.
self._edf_path = get_EDF_PATH()
if self._edf_path == None:
sys.exit()
# Check argument.
if len(sys.argv) > 1:
arg_project_dir = sys.argv[1]
if arg_project_dir:
self.onButtonBrowserClicked(arg_project_dir)
# Show installer application.
self._window.show()
sys.exit(self._app.exec_())
# Enable/disable widget function.
def Enable_Widget(self, en):
self.Btn_Setup.setDisabled(not en)
self.Btn_Restore.setDisabled(not en)
self.Box_Optimize.setDisabled(not en)
self.CB_Printf_Float.setDisabled(not en)
self.CB_Scanf_Float.setDisabled(not en)
#----------------------------------------------------------------------------------------------------------------------------------------------------------------
# Quit button clickec handler.
def onButtonQuitClicked(self):
sys.exit()
#----------------------------------------------------------------------------------------------------------------------------------------------------------------
# Browser button clicked handler.
def onButtonBrowserClicked(self, arg):
# Get project directory in line edit.
if arg:
self._project_dir = arg
else:
self._project_dir = QFileDialog.getExistingDirectory(self, "Project browser", os.path.expanduser("~"), QFileDialog.ShowDirsOnly)
if self._project_dir:
self.LE_Project_Dir.setText(self._project_dir) # Set show directory.
self._project_file_dir = self._project_dir + "/" + project_file
self._cproject_file_dir = self._project_dir + "/" + cproject_file
# Get project background.
self._project_bgr = get_project_background(self._cproject_file_dir)
self.Btn_Project_Bgr.setText(self._project_bgr)
# Get project name.
self._project_name = get_project_name(self._project_file_dir)
if self._project_name != None:
# Get .ioc file name.
if self._project_bgr == HAL_project:
self._ioc_file_dir = self._project_dir + "/" + self._project_name + ".ioc"
else:
logging.error("Can't get project name.")
make_message_box(QMessageBox.Critical, "Error", "Can't get project name.")
return
# Get device full name in .cproject file.
self._device_full_name = get_file_target_name(self._cproject_file_dir)
# Get device series.
self._device_family_series = self._device_full_name[:7]
# Get device cortex series.
self._device_cortex_series = device_cortex_series[self._device_family_series]
# Get project install state.
if get_install_state(self._cproject_file_dir) != True:
self.Btn_Setup.setDisabled(True)
else:
self.Btn_Setup.setDisabled(False)
if self._device_full_name == None:
self.Btn_Device_Full_Name.setText("STM32xxxxxxxxx")
self.Btn_Device_Series.setText("Unknown")
make_message_box(QMessageBox.Critical, "Error", "Unknown STM32 device name.")
logging.error("Unknown device name in project.")
self.Enable_Widget(False)
return
else:
self.Btn_Device_Full_Name.setText(self._device_full_name)
self.Btn_Device_Series.setText(self._device_full_name[:7] + f'(Cortex-{self._device_cortex_series})')
self.Enable_Widget(True)
#----------------------------------------------------------------------------------------------------------------------------------------------------------------
# Setup button handler.
def onButtonSetupClicked(self):
logging.info(f"Project Information:")
logging.info(f"\tDirectory: {self._project_dir}")
logging.info(f"\tName: {self._project_name}")
logging.info(f"\tDevice: {self._device_full_name}")
logging.info(f"\tSeries: {self._device_family_series}")
logging.info(f"\Core: {self._device_cortex_series}")
# Create recovery file.
copy_file(self._cproject_file_dir, self._cproject_file_dir + recovery_file_sufix, False)
copy_file(self._project_file_dir, self._project_file_dir + recovery_file_sufix, False)
if self._project_bgr == HAL_project:
copy_file(self._ioc_file_dir, self._ioc_file_dir + recovery_file_sufix, False)
# Get build option optimize.
sel_opt = self.Box_Optimize.currentText()
start_index = sel_opt.find("(") + 1
end_index = sel_opt.find(")", start_index)
self.build_optimize_level = sel_opt[start_index:end_index]
logging.info(f"\tOptimize level: {self.build_optimize_level}")
# Get option printf float.
if self.CB_Printf_Float.checkState() == 0:
self.printf_float = "false"
else:
self.printf_float = "true"
logging.info(f"\tPrintf float: {self.printf_float}")
# Get option scanf float.
if self.CB_Scanf_Float.checkState() == 0:
self.scanf_float = "false"
else:
self.scanf_float = "true"
logging.info(f"\tScanf float: {self.scanf_float}")
# Config .cproject file.
c_symbols_replace = "<listOptionValue builtIn=\"false\" value=\"DEBUG\"/>\n"
symbols_insert = ( "\n\t\t\t\t\t\t\t\t\t<listOptionValue builtIn=\"false\" value=\"DEBUG\"/>\n"
"\t\t\t\t\t\t\t\t\t<listOptionValue builtIn=\"false\" value=\"_GNU_SOURCE\"/>\n"
"\t\t\t\t\t\t\t\t\t<listOptionValue builtIn=\"false\" value=\"__STM32__\"/>\n"
f"\t\t\t\t\t\t\t\t\t<listOptionValue builtIn=\"false\" value=\"{self._device_family_series}\"/>\n"
f"\t\t\t\t\t\t\t\t\t<listOptionValue builtIn=\"false\" value=\"{self._device_full_name}\"/>\n"
f"\t\t\t\t\t\t\t\t\t<listOptionValue builtIn=\"false\" value=\"DEVICE_NAME="{self._device_full_name}"\"/>")
symbols_insert_HAL = ("\n\t\t\t\t\t\t\t\t\t<listOptionValue builtIn=\"false\" value=\"USE_HAL_DRIVER\"/>")
source_folder_replace = "<sourceEntries>"
source_folder_insert = ("\t\t\t\t\t\t<entry flags=\"VALUE_WORKSPACE_PATH|RESOLVED\" kind=\"sourcePath\" name=\"edf_core\"/>\n"
"\t\t\t\t\t\t<entry flags=\"VALUE_WORKSPACE_PATH|RESOLVED\" kind=\"sourcePath\" name=\"edf_rtos\"/>\n"
"\t\t\t\t\t\t<entry flags=\"VALUE_WORKSPACE_PATH|RESOLVED\" kind=\"sourcePath\" name=\"edf_middlewares\"/>\n"
"\t\t\t\t\t\t<entry flags=\"VALUE_WORKSPACE_PATH|RESOLVED\" kind=\"sourcePath\" name=\"main\"/>")
c_include_path_replace = "name=\"Include paths (-I)\" superClass=\"com.st.stm32cube.ide.mcu.gnu.managedbuild.tool.c.compiler.option.includepaths\" useByScannerDiscovery=\"false\" valueType=\"includePath\">"
cpp_include_path_replace = "name=\"Include paths (-I)\" superClass=\"com.st.stm32cube.ide.mcu.gnu.managedbuild.tool.cpp.compiler.option.includepaths\" useByScannerDiscovery=\"false\" valueType=\"includePath\">"
include_path_insert = ( "\t\t\t\t\t\t\t\t\t<listOptionValue builtIn=\"false\" value=\"../buildconfig\"/>\n"
f"\t\t\t\t\t\t\t\t\t<listOptionValue builtIn=\"false\" value=\""${{{EDF_PATH_VAR}}}/components/core/include"\"/>\n"
f"\t\t\t\t\t\t\t\t\t<listOptionValue builtIn=\"false\" value=\""${{{EDF_PATH_VAR}}}/components/freertos/os_c{self._device_cortex_series.lower()}"\"/>\n")
c_optimize_level_replace = 'name="Optimization level" superClass="com.st.stm32cube.ide.mcu.gnu.managedbuild.tool.c.compiler.option.optimization.level" useByScannerDiscovery="false"/>'
cpp_optimize_level_replace = 'name="Optimization level" superClass="com.st.stm32cube.ide.mcu.gnu.managedbuild.tool.cpp.compiler.option.optimization.level" useByScannerDiscovery="false"/>'
c_optimize_level_pattern = r'name="Optimization level" superClass="com.st.stm32cube.ide.mcu.gnu.managedbuild.tool.c.compiler.option.optimization.level" useByScannerDiscovery="false" value="com\.st\.stm32cube\.ide\.mcu\.gnu\.managedbuild\.tool\.c\.compiler\.option\.optimization\.level\.value\..*?\" valueType="enumerated"/>'
cpp_optimize_level_pattern = r'name="Optimization level" superClass="com.st.stm32cube.ide.mcu.gnu.managedbuild.tool.cpp.compiler.option.optimization.level" useByScannerDiscovery="false" value="com\.st\.stm32cube\.ide\.mcu\.gnu\.managedbuild\.tool\.cpp\.compiler\.option\.optimization\.level\.value\..*?\" valueType="enumerated"/>'
c_optimize_level_insert = f'name="Optimization level" superClass="com.st.stm32cube.ide.mcu.gnu.managedbuild.tool.c.compiler.option.optimization.level" useByScannerDiscovery="false" value="com.st.stm32cube.ide.mcu.gnu.managedbuild.tool.c.compiler.option.optimization.level.value.{self.build_optimize_level}\" valueType="enumerated"/>'
cpp_optimize_level_insert = f'name="Optimization level" superClass="com.st.stm32cube.ide.mcu.gnu.managedbuild.tool.cpp.compiler.option.optimization.level" useByScannerDiscovery="false" value="com.st.stm32cube.ide.mcu.gnu.managedbuild.tool.cpp.compiler.option.optimization.level.value.{self.build_optimize_level}\" valueType="enumerated"/>'
printf_float_pattern = r'<option id="com\.st\.stm32cube\.ide\.mcu\.gnu\.managedbuild\.option\.nanoprintffloat\.\d+" name="Use float with printf from newlib-nano \(-u _printf_float\)" superClass="com\.st\.stm32cube\.ide\.mcu\.gnu\.managedbuild\.option\.nanoprintffloat" useByScannerDiscovery="false" value=".+" valueType="boolean"/>'
printf_float_replace = f'<option id="com.st.stm32cube.ide.mcu.gnu.managedbuild.option.nanoprintffloat.1816458521" name="Use float with printf from newlib-nano (-u _printf_float)" superClass="com.st.stm32cube.ide.mcu.gnu.managedbuild.option.nanoprintffloat" useByScannerDiscovery="false" value="{self.printf_float}" valueType="boolean"/>'
scanf_float_pattern = r'<option id="com\.st\.stm32cube\.ide\.mcu\.gnu\.managedbuild\.option\.nanoscanffloat\.\d+" superClass="com\.st\.stm32cube\.ide\.mcu\.gnu\.managedbuild\.option\.nanoscanffloat" value=".+" valueType="boolean"/>'
scanf_float_replace = f'<option id="com.st.stm32cube.ide.mcu.gnu.managedbuild.option.nanoscanffloat.653624109" superClass="com.st.stm32cube.ide.mcu.gnu.managedbuild.option.nanoscanffloat" value="{self.scanf_float}" valueType="boolean"/>'
try:
with open(self._cproject_file_dir, 'r') as f:
file_content = f.read()
new_file_content = file_content
# Xóa hết các symbols hiện có.
# pattern = re.compile(c_symbols_pattern, re.DOTALL)
# new_file_content = re.sub(pattern, r'\1\n\t\t\t\t\t\t\t\t\2', file_content)
# pattern = re.compile(cpp_symbols_pattern, re.DOTALL)
# new_file_content = re.sub(pattern, r'\1\n\t\t\t\t\t\t\t\t\2', new_file_content)
# Insert symbols
if self._project_bgr == HAL_project:
new_file_content = new_file_content.replace(c_symbols_replace, c_symbols_replace + symbols_insert + symbols_insert_HAL)
# new_file_content = new_file_content.replace(cpp_symbols_replace, cpp_symbols_replace + symbols_insert + symbols_insert_HAL)
else:
new_file_content = new_file_content.replace(c_symbols_replace, c_symbols_replace + symbols_insert)
# new_file_content = new_file_content.replace(cpp_symbols_replace, cpp_symbols_replace + symbols_insert)
# Insert c include path
new_file_content = new_file_content.replace(c_include_path_replace, c_include_path_replace + "\n" + include_path_insert)
# Insert cpp include path
new_file_content = new_file_content.replace(cpp_include_path_replace, cpp_include_path_replace + "\n" + include_path_insert)
# Insert source folder directory
new_file_content = new_file_content.replace(source_folder_replace, source_folder_replace + "\n" + source_folder_insert)
# Insert c optimize level
if new_file_content.find(c_optimize_level_replace) != -1:
new_file_content = new_file_content.replace(c_optimize_level_replace, c_optimize_level_insert)
else:
new_file_content = re.sub(c_optimize_level_pattern, c_optimize_level_insert, new_file_content, count=1)
# Insert cpp optimize level
if new_file_content.find(cpp_optimize_level_replace) != -1:
new_file_content = new_file_content.replace(cpp_optimize_level_replace, cpp_optimize_level_insert)
else:
new_file_content = re.sub(cpp_optimize_level_pattern, cpp_optimize_level_insert, new_file_content, count=1)
# Change printf float option.
new_file_content = find_and_replace_printf_or_scanf(new_file_content, printf_float_pattern, printf_float_replace)
# Change scanf float option.
new_file_content = find_and_replace_printf_or_scanf(new_file_content, scanf_float_pattern, scanf_float_replace)
with open(self._cproject_file_dir, 'w') as f:
f.write(new_file_content)
logging.info(f"Config .cproject file successful")
except FileNotFoundError:
make_message_box(QMessageBox.Critical, "Error", f"{self._cproject_file_dir}: No such file in directory.")
logging.error(f"{self._cproject_file_dir} -> No such file in directory.")
return
# Config .project file.
project_desciption_replace = "</projectDescription>"
project_desciption_insert = ("\t<linkedResources>\n"
"\t\t<link>\n"
"\t\t\t<name>edf_core</name>\n"
"\t\t\t<type>2</type>\n"
f"\t\t\t<location>{self._edf_path}/components/core/source</location>\n"
"\t\t</link>\n"
"\t\t<link>\n"
"\t\t\t<name>edf_rtos</name>\n"
"\t\t\t<type>2</type>\n"
f"\t\t\t<location>{self._edf_path}/components/freertos/os_c{self._device_cortex_series.lower()}</location>\n"
"\t\t</link>\n"
"\t\t<link>\n"
"\t\t\t<name>edf_middlewares</name>\n"
"\t\t\t<type>2</type>\n"
f"\t\t\t<location>{self._edf_path}/components/middlewares</location>\n"
"\t\t</link>\n"
"\t</linkedResources>\n"
"</projectDescription>")
linked_source_check = "<name>edf_core</name>"
try:
with open(self._project_file_dir, 'r') as f:
file_content = f.read()
if not (linked_source_check in file_content):
new_file_content = file_content.replace(project_desciption_replace, project_desciption_insert)
with open(self._project_file_dir, 'w') as file:
file.write(new_file_content)
logging.info(f"Config .project file successful")
except FileNotFoundError:
make_message_box(QMessageBox.Critical, "Error", f"{self._project_file_dir}: No such file in directory.")
logging.error(f"{self._project_file_dir} -> No such file in directory.")
return
# Config .ioc file.
if self._project_bgr == HAL_project:
with open(self._ioc_file_dir, 'r') as f:
file_content = f.read()
busfault_handler_replace = "NVIC.BusFault_IRQn=true\:0\:0\:false\:false\:true\:false\:false\:false"
debugmon_handler_replace = "NVIC.DebugMonitor_IRQn=true\:0\:0\:false\:false\:true\:false\:false\:false"
hardfault_handler_replace = "NVIC.HardFault_IRQn=true\:0\:0\:false\:false\:true\:false\:false\:false"
memmanage_handler_replace = "NVIC.MemoryManagement_IRQn=true\:0\:0\:false\:false\:true\:false\:false\:false"
nmi_handler_replace = "NVIC.NonMaskableInt_IRQn=true\:0\:0\:false\:false\:true\:false\:false\:false"
pendsv_handler_replace = "NVIC.PendSV_IRQn=true\:0\:0\:false\:false\:true\:false\:false\:false"
svcall_handler_replace = "NVIC.SVCall_IRQn=true\:0\:0\:false\:false\:true\:false\:false\:false"
systick_handler_replace = "NVIC.SysTick_IRQn=true\:15\:0\:false\:false\:true\:false\:true\:false"
usagefault_handler_replace = "NVIC.UsageFault_IRQn=true\:0\:0\:false\:false\:true\:false\:false\:false"
busfault_handler_insert = "NVIC.BusFault_IRQn=true\:0\:0\:false\:false\:false\:false\:false\:false"
debugmon_handler_insert = "NVIC.DebugMonitor_IRQn=true\:0\:0\:false\:false\:false\:false\:false\:false"
hardfault_handler_insert = "NVIC.HardFault_IRQn=true\:0\:0\:false\:false\:false\:false\:false\:false"
memmanage_handler_insert = "NVIC.MemoryManagement_IRQn=true\:0\:0\:false\:false\:false\:false\:false\:false"
nmi_handler_insert = "NVIC.NonMaskableInt_IRQn=true\:0\:0\:false\:false\:false\:false\:false\:false"
pendsv_handler_insert = "NVIC.PendSV_IRQn=true\:0\:0\:false\:false\:false\:false\:false\:false"
svcall_handler_insert = "NVIC.SVCall_IRQn=true\:0\:0\:false\:false\:false\:false\:false\:false"
systick_handler_insert = "NVIC.SysTick_IRQn=true\:15\:0\:false\:false\:false\:false\:false\:false"
usagefault_handler_insert = "NVIC.UsageFault_IRQn=true\:0\:0\:false\:false\:false\:false\:false\:false"
if busfault_handler_replace in file_content:
new_file_content = file_content.replace(busfault_handler_replace, busfault_handler_insert)
if debugmon_handler_replace in file_content:
new_file_content = new_file_content.replace(debugmon_handler_replace, debugmon_handler_insert)
if hardfault_handler_replace in file_content:
new_file_content = new_file_content.replace(hardfault_handler_replace, hardfault_handler_insert)
if memmanage_handler_replace in file_content:
new_file_content = new_file_content.replace(memmanage_handler_replace, memmanage_handler_insert)
if nmi_handler_replace in file_content:
new_file_content = new_file_content.replace(nmi_handler_replace, nmi_handler_insert)
if pendsv_handler_replace in file_content:
new_file_content = new_file_content.replace(pendsv_handler_replace, pendsv_handler_insert)
if svcall_handler_replace in file_content:
new_file_content = new_file_content.replace(svcall_handler_replace, svcall_handler_insert)
if systick_handler_replace in file_content:
new_file_content = new_file_content.replace(systick_handler_replace, systick_handler_insert)
if usagefault_handler_replace in file_content:
new_file_content = new_file_content.replace(usagefault_handler_replace, usagefault_handler_insert)
with open(self._ioc_file_dir, 'w') as f:
f.write(new_file_content)
logging.info(f"Config .ioc file successful")
# Add edf_main_application into file main.c.
if self._project_bgr == HAL_project:
main_file_path = os.path.join(self._project_dir, "Core/Src/main.c")
try:
with open(main_file_path, 'r') as f:
file_content = f.read()
# Lấy các hàm init của STM32 HAL Driver.
pattern = re.compile(r'/\*\s*Initialize all configured peripherals\s*\*/\n\s*(.*?)\n\s*/\*\s*USER CODE BEGIN 2\s*\*/', re.DOTALL)
match = pattern.search(file_content)
if match:
HAL_init_func = match.group(1)
if file_content.find("/* USER CODE BEGIN 0 */") != -1:
new_file_content = file_content.replace("/* USER CODE BEGIN 0 */",
"/* USER CODE BEGIN 0 */\n"
"void HAL_driver_init(void){\n"
f"\t{HAL_init_func}"
"\n}"
)
if file_content.find("main_application") == -1:
if file_content.find("/* USER CODE BEGIN 1 */") != -1:
new_file_content = new_file_content.replace("/* USER CODE BEGIN 1 */",
"/* USER CODE BEGIN 1 */\n"
"\textern int edf_main_application(void);\n"
"\treturn edf_main_application();"
)
with open(main_file_path, 'w') as file:
file.write(new_file_content)
# Hien message box neu khong mo duoc file.
except FileNotFoundError:
make_message_box(QMessageBox.Critical, "Error", "main.c: No such file in directory.")
logging.error(f"/Core/main.c -> No such file in directory.")
return
# Remove exception_interrupt_handler from file stm32fxxx_it.c.
if self._project_bgr == HAL_project:
it_file = "Core/Src/" + self._device_family_series.lower() + "xx_it.c"
it_file_path = os.path.join(self._project_dir, it_file)
start_marker = ("/**\n"
" * @brief This function handles Non maskable interrupt.\n"
" */\n"
"void NMI_Handler(void)")
end_marker = "/* USER CODE END SysTick_IRQn 1 */\n}"
try:
with open(it_file_path, "r") as input_file:
content = input_file.read()
start_index = content.find(start_marker)
end_index = content.find(end_marker, start_index)
if start_index != -1 and end_index != -1:
output_content = content[:start_index] + content[end_index + len(end_marker):]
with open(it_file_path, "w") as output_file:
output_file.write(output_content)
else:
logging.error(f"{it_file_path}: Error during edit file.")
except FileNotFoundError:
make_message_box(QMessageBox.Critical, "Error", f"{it_file_path}: No such file in directory.")
logging.error(f"{it_file_path}: No such file in directory.")
return
# Create folder main.
try:
os.system(f"mkdir {self._project_dir}/main")
os.system(f"cp {self._edf_path}/components/templates/source/app_main.cpp {self._project_dir}/main/")
# Create folder config.
os.system(f"mkdir {self._project_dir}/buildconfig")
os.system(f"cp {self._edf_path}/components/templates/header/* {self._project_dir}/buildconfig")
os.system(f"cp {self._edf_path}/components/templates/kconfig/* {self._project_dir}/buildconfig")
except Exception as e:
logging.error(f"Error creating folder: {e}")
# Get project state.
try:
with open(self._cproject_file_dir, 'r') as f:
file_content = f.read()
if file_content.find("STM_EDF_VERSION") != -1:
self.Btn_Setup.setDisabled(True)
else:
self.Btn_Setup.setDisabled(False)
except FileNotFoundError:
make_message_box(QMessageBox.Critical, "Error", ".cproject: No such file in directory.")
logging.error(f"{self._cproject_file_dir} -> No such file in directory.")
return
make_message_box(QMessageBox.Information, "Progress", "Setup successful.")
logging.info("Setup successful.")
os.system(f"{self._edf_path}/tools/kconfig/dist/kconfig {self._project_dir}")
#----------------------------------------------------------------------------------------------------------------------------------------------------------------
# Restore button handler.
def onButtonRestoreClicked(self):
logging.info(f"Uninstall STM32 RTOSSDK form {self._project_dir}")
# Trả về cấu hình ban đầu cho .cproject và .project.
if (not copy_file(self._cproject_file_dir + recovery_file_sufix, self._cproject_file_dir, True)) or \
not copy_file(self._project_file_dir + recovery_file_sufix, self._project_file_dir, True) or \
not copy_file(self._ioc_file_dir + recovery_file_sufix, self._ioc_file_dir, True) :
make_message_box(QMessageBox.Critical, "Error", "Can't Restore from project.")
logging.error("Can't uninstall from project.")
# Remove edf function main.c and stm32xxxxx_it.c
if self._project_bgr == HAL_project:
# main.c
main_file_path = os.path.join(self._project_dir, "Core/Src/main.c")
try:
with open(main_file_path, 'r') as f:
file_content = f.read()
pattern = re.compile(r'\/\*\s*USER CODE BEGIN 0\s*\*\/\n.*?\n\s*\/\*\s*USER CODE END 0\s*\*\/', re.DOTALL)
main_c_replace = '/* USER CODE BEGIN 0 */\n\n/* USER CODE END 0 */'
new_file_content = re.sub(pattern, main_c_replace, file_content)
pattern = re.compile(r'\/\*\s*USER CODE BEGIN 1\s*\*\/\n.*?\n\s*\/\*\s*USER CODE END 1\s*\*\/', re.DOTALL)
main_c_replace = '/* USER CODE BEGIN 1 */\n\n /* USER CODE END 1 */'
new_file_content = re.sub(pattern, main_c_replace, new_file_content)
with open(main_file_path, 'w') as file:
file.write(new_file_content)
# Hien message box neu khong mo duoc file.
except FileNotFoundError:
make_message_box(QMessageBox.Critical, "Error", "main.c: No such file in directory.")
logging.error("main.c: No such file in directory.")
return
# Get project state.
try:
with open(self._cproject_file_dir, 'r') as f:
file_content = f.read()
if file_content.find("STM_EDF_VERSION") != -1:
self.Btn_Setup.setDisabled(True)
else:
self.Btn_Setup.setDisabled(False)
except FileNotFoundError:
make_message_box(QMessageBox.Critical, "Error", ".cproject: No such file in directory.")
make_message_box(QMessageBox.Information, "Progress", "Restore successful.")
logging.info("Uninstall successful.")
#----------------------------------------------------------------------------------------------------------------------------------------------------------------
if __name__ == '__main__':
app = QApplication(sys.argv)
window = QMainWindow()
inst_gui = installer_app(window, app)
# extern int main_application(void);
# return main_application();
# <linkedResources>
# <link>
# <name>rtossdk</name>
# <type>2</type>
# <location>/home/anh/Projects/CODE/STM32/RTOSSDK/rtossdk</location>
# </link>
# </linkedResources>
# if self.projectbgr == HAL_project:
# it_file = "Core/Src/" + self.device_series.lower() + "xx_it.c";
# it_file_path = os.path.join(self.projectdir, it_file)
# try:
# with open(it_file_path, 'r') as f:
# file_content = f.read()
# # Include libary and declare variable
# if file_content.find("/* USER CODE BEGIN Includes */") != -1:
# new_file_content = file_content.replace("/* USER CODE BEGIN Includes */",
# "/* USER CODE BEGIN Includes */\n"
# "#include \"freertos_port/app_port/freertos_port.h\""
# )
# if file_content.find("/* USER CODE BEGIN EV */") != -1:
# new_file_content = new_file_content.replace("/* USER CODE BEGIN EV */",
# "/* USER CODE BEGIN EV */\n"
# "extern void exception_interrupt_handler(const char *tag, char *message);\n"
# "static const char *Excep_TAG = \"EXCEPTION\";"
# )
# # Fault notify.
# if file_content.find("/* USER CODE BEGIN HardFault_IRQn 0 */") != -1:
# new_file_content = new_file_content.replace("/* USER CODE BEGIN HardFault_IRQn 0 */",
# "/* USER CODE BEGIN HardFault_IRQn 0 */\n"
# "\texception_interrupt_handler(Excep_TAG, (char *)\"Hard fault exception was unhandled(call HardFault_Handler)...\");"
# )
# if file_content.find("/* USER CODE BEGIN MemoryManagement_IRQn 0 */") != -1:
# new_file_content = new_file_content.replace("/* USER CODE BEGIN MemoryManagement_IRQn 0 */",
# "/* USER CODE BEGIN MemoryManagement_IRQn 0 */\n"
# "\texception_interrupt_handler(Excep_TAG, (char *)\"Memory management interrupt was unhandled(call MemManage_Handler)...\");"
# )
# if file_content.find("/* USER CODE BEGIN BusFault_IRQn 0 */") != -1:
# new_file_content = new_file_content.replace("/* USER CODE BEGIN BusFault_IRQn 0 */",
# "/* USER CODE BEGIN BusFault_IRQn 0 */\n"
# "\texception_interrupt_handler(Excep_TAG, (char *)\"Bus fault exception was unhandled(call BusFault_Handler)...\");"
# )
# if file_content.find("/* USER CODE BEGIN UsageFault_IRQn 0 */") != -1:
# new_file_content = new_file_content.replace("/* USER CODE BEGIN UsageFault_IRQn 0 */",
# "/* USER CODE BEGIN UsageFault_IRQn 0 */\n"
# "\texception_interrupt_handler(Excep_TAG, (char *)\"Usage fault exception was unhandled(call UsageFault_Handler)...\");"
# )
# # Port freertos handler.
# if file_content.find("/* USER CODE BEGIN SVCall_IRQn 0 */") != -1:
# new_file_content = new_file_content.replace("/* USER CODE BEGIN SVCall_IRQn 0 */",
# "/* USER CODE BEGIN SVCall_IRQn 0 */\n"
# "\tfreertos_svc_handler();"
# )
# if file_content.find("/* USER CODE BEGIN PendSV_IRQn 0 */") != -1:
# new_file_content = new_file_content.replace("/* USER CODE BEGIN PendSV_IRQn 0 */",
# "/* USER CODE BEGIN PendSV_IRQn 0 */\n"
# "\tfreertos_pendsv_handler();"
# )
# if file_content.find("/* USER CODE BEGIN SysTick_IRQn 0 */") != -1:
# new_file_content = new_file_content.replace("/* USER CODE BEGIN SysTick_IRQn 0 */",
# "/* USER CODE BEGIN SysTick_IRQn 0 */\n"
# "\textern void systick_app_systick_process(void);\n"
# "\tsystick_app_systick_process();\n"
# "\tfreertos_tick_handler();"
# )
# with open(it_file_path, 'w') as file:
# file.write(new_file_content)
# # Hien message box neu khong mo duoc file.
# except FileNotFoundError:
# make_message_box(QMessageBox.Critical, "Error", f"{it_file}: No such file in directory.")
# return
# # stm32xxxxx_it.c
# it_file = "Core/Src/" + self.device_series.lower() + "xx_it.c";
# it_file_path = os.path.join(self.projectdir, it_file)
# try:
# with open(it_file_path, 'r') as f:
# file_content = f.read()
# pattern = re.compile(r'\/\*\s*USER CODE BEGIN EV\s*\*\/\n.*?\n\s*\/\*\s*USER CODE END EV\s*\*\/', re.DOTALL)
# main_c_replace = '/* USER CODE BEGIN EV */\n\n /* USER CODE END EV */'
# new_file_content = re.sub(pattern, main_c_replace, file_content)
# pattern = re.compile(r'\/\*\s*USER CODE BEGIN HardFault_IRQn 0\s*\*\/\n.*?\n\s*\/\*\s*USER CODE END HardFault_IRQn 0\s*\*\/', re.DOTALL)
# main_c_replace = '/* USER CODE BEGIN HardFault_IRQn 0 */\n\n /* USER CODE END HardFault_IRQn 0 */'
# new_file_content = re.sub(pattern, main_c_replace, new_file_content)
# pattern = re.compile(r'\/\*\s*USER CODE BEGIN MemoryManagement_IRQn 0\s*\*\/\n.*?\n\s*\/\*\s*USER CODE END MemoryManagement_IRQn 0\s*\*\/', re.DOTALL)
# main_c_replace = '/* USER CODE BEGIN MemoryManagement_IRQn 0 */\n\n /* USER CODE END MemoryManagement_IRQn 0 */'
# new_file_content = re.sub(pattern, main_c_replace, new_file_content)
# pattern = re.compile(r'\/\*\s*USER CODE BEGIN BusFault_IRQn 0\s*\*\/\n.*?\n\s*\/\*\s*USER CODE END BusFault_IRQn 0\s*\*\/', re.DOTALL)
# main_c_replace = '/* USER CODE BEGIN BusFault_IRQn 0 */\n\n /* USER CODE END BusFault_IRQn 0 */'
# new_file_content = re.sub(pattern, main_c_replace, new_file_content)
# pattern = re.compile(r'\/\*\s*USER CODE BEGIN UsageFault_IRQn 0\s*\*\/\n.*?\n\s*\/\*\s*USER CODE END UsageFault_IRQn 0\s*\*\/', re.DOTALL)
# main_c_replace = '/* USER CODE BEGIN UsageFault_IRQn 0 */\n\n /* USER CODE END UsageFault_IRQn 0 */'
# new_file_content = re.sub(pattern, main_c_replace, new_file_content)
# pattern = re.compile(r'\/\*\s*USER CODE BEGIN SysTick_IRQn 0\s*\*\/\n.*?\n\s*\/\*\s*USER CODE END SysTick_IRQn 0\s*\*\/', re.DOTALL)
# main_c_replace = '/* USER CODE BEGIN SysTick_IRQn 0 */\n\n /* USER CODE END SysTick_IRQn 0 */'
# new_file_content = re.sub(pattern, main_c_replace, new_file_content)
# with open(it_file_path, 'w') as file:
# file.write(new_file_content)
# # Hien message box neu khong mo duoc file.
# except FileNotFoundError:
# make_message_box(QMessageBox.Critical, "Error", "main.c: No such file in directory.")
# return
| maivananh111/stm-edf | tools/setup/setup.py | setup.py | py | 38,714 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "logging.basicConfig",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "gui.Ui_MainWindow",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidge... |
27213329955 | import sys
from collections import deque
dx = [-1, 1, 0, 0]
dy = [0, 0, -1, 1]
INF = 1e9
N, M = map(int, sys.stdin.readline().rstrip().split())
maps = [list(map(int, sys.stdin.readline().rstrip().split())) for _ in range(N)]
hospital_comb = []
answer = INF
def dfs(hospital_list, pick_list, idx):
if idx == len(hospital_list):
if len(pick_list) == M:
hospital_comb.append(pick_list[:])
return
pick_list.append(hospital_list[idx])
dfs(hospital_list, pick_list, idx + 1)
pick_list.pop()
dfs(hospital_list, pick_list, idx + 1)
def bfs(hospital_list):
global answer
q = deque([])
visited = [[False] * N for _ in range(N)]
time_maps = [[0] * N for _ in range(N)]
for h in hospital_list:
q.append((h[0], h[1], 0))
visited[h[0]][h[1]] = True
while q:
x, y, cnt = q.popleft()
for i in range(4):
nx, ny = x + dx[i], y + dy[i]
if 0 <= nx < N and 0 <= ny < N and not visited[nx][ny]:
if maps[nx][ny] == 0 or maps[nx][ny] == -2:
q.append((nx, ny, cnt + 1))
visited[nx][ny] = True
time_maps[nx][ny] = cnt + 1
time = 0
for i in range(N):
for j in range(N):
if maps[i][j] == 0 and time_maps[i][j] == 0:
return
if maps[i][j] == 0:
time = max(time, time_maps[i][j])
answer = min(answer, time)
hospital = []
for i in range(N):
for j in range(N):
if maps[i][j] == 2:
hospital.append((i, j))
maps[i][j] = -2
if maps[i][j] == 1:
maps[i][j] = -1
dfs(hospital, [], 0)
for i in range(len(hospital_comb)):
bfs(hospital_comb[i])
print(-1) if answer == INF else print(answer)
| hammii/Algorithm | CodeTree_python/바이러스_백신.py | 바이러스_백신.py | py | 1,804 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "sys.stdin.readline",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sys.stdin",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "sys.stdin.readline",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sys.stdin",
"li... |
12960752319 | import warnings
warnings.filterwarnings('ignore')
from popsycle import synthetic
import numpy as np
import matplotlib.pyplot as plt
from astropy.table import Table
import h5py
def test_h5_output(ebf_file, reference_h5_file, extra_col= False):
""""
Parameters
----------
ebf_file : str
Name of the ebf file used to generate the reference h5 file
reference_h5_file : str
Name of the file to compare new output to (should be run with seed=42 on the ebf_file)
extra_col : boolean, defaults to False
Tells the code whether or not the new h5 file will have additional columns (ie does the new version of
popsycle give more information than before
"""
#create the new h5 file by running popsycle
synthetic.perform_pop_syn(ebf_file = ebf_file,
output_root = 'test',
iso_dir = '/u/casey/scratch/work/microlens/popsycle_test/isochrones/',
bin_edges_number = None, overwrite = True, seed=42);
#read in the data from the reference h5 file
hfr = h5py.File(reference_h5_file, 'r')
ref_dset = np.concatenate((hfr['l0b0'], hfr['l0b1'], hfr['l1b0'], hfr['l1b1']),
axis=1)
hfr.close()
#read in the data from the test h5 file created by popsycle
hft = h5py.File('test.h5', 'r')
test_dset = np.concatenate((hft['l0b0'], hft['l0b1'], hft['l1b0'], hft['l1b1']),
axis=1)
hft.close()
#see if we have the right number of columns
if test_dset.shape[0] != ref_dset.shape[0]:
if not extra_col:
print("the h5 files are not the same size. Run again with extra_col=True if you have added columns")
#test to see whether the files are the same
matched_col=0 #initialize matched_col counter
for i in range(0, ref_dset.shape[0]):
test_col = test_dset[i,:]
ref_col = ref_dset[i, :]
if test_col.all() == ref_col.all():
matched_col = matched_col+1
#check to see if disagreements are because of nans
else:
bad_idxs = np.where(ref_col != test_col)
ref_nan_idx = np.where(ref_col == np.nan)
test_nan_idx = np.where(test_col == np.nan)
if test_nan_idx.all() == ref_nan_idx.all() and bad_idxs.all() == ref_nan_idx.all():
matched_col = matched_col+1
else:
matched_col= matched_col
print('Test failed in column', i)
if matched_col == ref_dset.shape[0]:
print("The new test h5 file matched the reference file!")
else:
print("The new test h5 file does not match the reference file")
return
synthetic.calc_events(hdf5_file = 'test.h5',
output_root2 = 'test',
radius_cut = 2,
obs_time = 1000,
n_obs = 11,
theta_frac = 2,
blend_rad = 0.65,
overwrite = True,
n_proc = 1)
synthetic.refine_events(input_root = 'test',
filter_name = 'I',
photometric_system = 'ubv',
red_law = 'Damineli16',
overwrite = True,
output_file = 'default')
hfr = h5py.File('trial_1.h5', 'r')
print((list(hfr.keys())))
dsetr = hfr['l0b0']
print(dsetr)
hfr.close()
hft = h5py.File('test.h5', 'r+')
print(list(hft.keys()))
dsett = hft['l0b0']
print(dsett)
dsett.resize((27, 176660))
print(dsett)
print('test==trial', dsetr == dsett)
hft.close()
tabr = Table.read('trial_1_refined_events_i_Damineli16.fits')
tabt = Table.read('test_refined_events_i_Damineli16.fits')
print('The col names of the trial 1 fits are:', tabr.colnames)
print('The col names of the test fits are:', tabt.colnames)
tabt.remove_columns(['teff_S', 'grav_S', 'lum_S', 'teff_L', 'grav_L', 'lum_L'])
print('Check if tables differ:', tabr == tabt)
| jluastro/PopSyCLE | popsycle/tests/output_test_synthetic.py | output_test_synthetic.py | py | 4,102 | python | en | code | 13 | github-code | 6 | [
{
"api_name": "warnings.filterwarnings",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "popsycle.synthetic.perform_pop_syn",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "popsycle.synthetic",
"line_number": 26,
"usage_type": "name"
},
{
"api_... |
35721608965 | import os
import json
import numpy as np
import preprocessing as preprocessing
from tensorflow import keras
def init():
global model
global vocab
global max_len
model = keras.models.load_model(os.path.join(os.getenv('AZUREML_MODEL_DIR'), 'model'), compile=False)
with open(os.path.join(os.getenv('AZUREML_MODEL_DIR'), 'model', 'vocab.json')) as json_file:
vocab = json.load(json_file)
with open(os.path.join(os.getenv('AZUREML_MODEL_DIR'), 'model', 'params.json')) as json_file:
params = json.load(json_file)
max_len = params['max_len']
def run(raw_data):
tweets = np.array(json.loads(raw_data)["data"])
processed_tweets = preprocessing.process_tweets(tweets, vocab, max_len)
result = model.predict(processed_tweets).ravel()
return result.tolist() | luisespriella9/disastersLocator | src/scoring.py | scoring.py | py | 809 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "tensorflow.keras.models.load_model",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.models",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras",
"line_number": 11,
"usage_type": "name"
},
{
"... |
44632376656 | # coding:utf-8
from bs4 import BeautifulSoup
import urllib.request as req
import sqlite3
from contextlib import closing
url="http://su-gi-rx.com/2017/07/16/python_4/"
dbname='database.db'
conn=sqlite3.connect(dbname)
c=conn.cursor()
table_name = 'test'
def get_html():
#urlopen()でデータ取得
res=req.urlopen(url)
#BeautifulSoup()で解析
soup=BeautifulSoup(res,'html.parser')
#任意のデータを抽出
title1=soup.find("h1").string
#print("title=",title1)
p_list=soup.find_all("p")
#print("text=",p_list)
return [(str(title1),str(p_list))]
def create_table(tname):
#executeメソッドでSOL文を実行する
create_table='''create table if NOT EXISTS {0} (title varchar(64),p_list varchar(32))'''.format(tname)
c.execute(create_table)
def insert_data(tname,data):
insert_sql='insert into {0} (title,p_list) values(?,?)'.format(tname)
c.executemany(insert_sql,test)
conn.commit()
if __name__=='__main__':
create_table(table_name)
test=get_html()
insert_data(table_name,test)
select_sql = 'select * from {0}'.format(table_name)
for row in c.execute(select_sql):
print(row)
conn.close()
| riku-nagisa/python1 | html_ren.py | html_ren.py | py | 1,261 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sqlite3.connect",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "urllib.request.urlopen",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "urllib.request",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "bs4.BeautifulSou... |
2721248561 | import copy, random, datetime
suppliesDataSets = [
[
{
"labels": [], #food
"data": []
},
{
"labels": [], #drink
"data": []
},
{
"labels": [], #medicine
"data": []
}
]
]
now = datetime.datetime.now()
for i in range(3):
for j in range(42):
t_ = (now + datetime.timedelta(days=-j)).strftime('%Y-%m-%d')
suppliesDataSets[0][i]["labels"].append(t_)
k = random.randint(0, 200)
suppliesDataSets[0][i]["data"].append(k)
for i in range(8):
cloned_ = copy.deepcopy(suppliesDataSets[0])
suppliesDataSets.append(cloned_) | e1833-tomohiro/Kadai | backend/store.py | store.py | py | 680 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "datetime.datetime.now",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "datetime.timedelta",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "rando... |
29520352035 | import requests
from bs4 import BeautifulSoup as bs
import csv
from itertools import chain
def get_urls():
"""
Skilar lista sem inniheldur slóðir á allar undirsíður
með kosningaúrslitum
"""
main_page = requests.get("http://www.kosningastofnun.in/")
page_soup = bs(main_page.content, "html.parser")
urls = [title.a.get("href") for title in page_soup.find_all(class_="blog-post-title")]
return urls
def scrape(urls):
"""
Tekur inn lista af slóðum og skrapar fyrstu töfluna sem finnst á hverri slóð.
Skilar lista af dicts þar sem hvert dict er með töfluhausa sem lykla og
töfluhólf sem gildi
"""
all_dicts = []
for url in urls:
elections_page = requests.get(url)
elections_soup = bs(elections_page.content, "html.parser")
# Finnum alla töfluhausa og setjum í lista:
headers = [header.string.strip() for header in elections_soup.find_all("th")]
# Förum svo í gegnum hverja röð og setjum gildin í lista:
for row in elections_soup.find_all("tr"):
results = [cell.string.strip() for cell in row.find_all("td")]
# Athugum hvort þetta er gild niðurstöðulína, annars gerum við ekkert:
if results:
# Nú getum við sett þessar niðurstöður í dict með hausunum:
elections = dict(zip(headers, results))
# Ef dagsetning var ekki með í þessum niðurstöðum þurfum við
# að skrapa hana úr titlinum og setja inn í dictið:
if "Dagsetning" not in elections:
full_title = elections_soup.h2.a.string
# Splittum titlinum á fyrsta bili og tökum seinni hlutann:
elections["Dagsetning"] = full_title.split(" ", 1)[1]
# Og setjum svo dictið í stóra listann:
all_dicts.append(elections)
return all_dicts
def save_csv(list_of_dicts):
"""
Tekur inn lista af dicts og skrifar út í CSV-skrána "kosningar.csv" í möppunni
sem skriftan er keyrð úr
"""
with open("kosningar.csv", "w") as csv_file:
fieldnames = set([key for key in chain(*list_of_dicts)])
# Röðum dálkaheitunum, fyrst í stafrófsröð og svo aftur til að fá
# dagsetningu fremst (ekki nauðsynlegt):
fieldnames = sorted(fieldnames)
fieldnames = sorted(fieldnames, key=lambda x: x == "Dagsetning", reverse=True)
writer = csv.DictWriter(csv_file, fieldnames)
writer.writeheader()
writer.writerows(list_of_dicts)
# Keyrum heila klabbið:
if __name__ == "__main__":
save_csv(scrape(get_urls()))
| flother/data-acq-viz | 2018/kosningaskrapari-lausn.py | kosningaskrapari-lausn.py | py | 2,713 | python | is | code | 1 | github-code | 6 | [
{
"api_name": "requests.get",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"... |
73264632828 | from game_objects.projectile import Projectile
from game_objects.player import Player
from pyglet import clock
from widgets.event_window import EventWindow
import pyglet
import cv2 as cv
import time
window = EventWindow(fullscreen=True)
# soul_image = pyglet.image.load('soul.png')
# soul = pyglet.sprite.Sprite(soul_image)
soul = Player(src='soul.png')
print(soul.width, soul.height)
print(window.width / 2, window.height / 2)
soul.move(window.width / 2 - soul.width, window.height / 2 - soul.height)
soul.scale = 1.3
print('Soul: ', soul.x, soul.y, soul.width, soul.height)
soul_np = cv.imread('soul.png')
projectile = Projectile(src='projectiles/dull_knife.png', speed=10, x=window.width * 0.7, y=window.height)
# projectile.x = window.width / 3
# projectile.rectangle.x = projectile.x
# projectile.y = window.height / 2
# projectile.rectangle.y = projectile.y
projectile.point(soul.x, soul.y)
@window.event
def on_draw():
window.clear()
projectile.draw()
soul.draw()
def move_forward(dt):
# projectile.forward()
# projectile.rotate(pi/180)
if soul.check_for_collision(projectile):
print('hit!',
projectile.get_left_bound(),
projectile.get_right_bound(),
soul.get_left_bound(),
)
clock.unschedule(move_forward)
start = time.time()
img1 = cv.imread('test_images/dull_knife.png')
img1 = projectile.np
# img2 = cv.imread('test_images/heart_not_overlapping_3.png')
img2 = cv.imread('test_images/heart_overlapping_2.png')
img1gray = cv.threshold(img1, 1, 255, cv.THRESH_BINARY)
if __name__ == '__main__':
pyglet.clock.schedule_interval(move_forward, 1 / 120)
pyglet.app.run()
| KimPalao/Headshot | collision_test.py | collision_test.py | py | 1,686 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "widgets.event_window.EventWindow",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "game_objects.player.Player",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": ... |
72588123388 | from numpy.testing import *
import numpy
import numpy.random
from adolc import *
from adolc.cgraph import *
from adolc.tangent import *
class TangentOperationsTests(TestCase):
def test_constructor(self):
t1 = Tangent(1,2)
t2 = Tangent(adouble(1),2)
def test_float_tangent_float_tangent(self):
tx = Tangent(2.,3.)
ty = Tangent(5.,7.)
tz = tx + ty
assert_array_almost_equal([tz.x,tz.xdot], [2+5, 3+7])
tz = tx - ty
assert_array_almost_equal([tz.x,tz.xdot], [2-5, 3-7])
tz = tx * ty
assert_array_almost_equal([tz.x,tz.xdot], [2*5, 3*5 + 2*7])
tz = tx / ty
assert_array_almost_equal([tz.x,tz.xdot], [2./5., (3*5 - 2*7.)/5**2])
def test_double_tangent_adouble(self):
tx = Tangent(2,3)
ay = adouble(5)
tz = tx + ay
assert_array_almost_equal([tz.x.val,tz.xdot], [2+5, 3])
tz = tx * ay
assert_array_almost_equal([tz.x.val,tz.xdot.val], [2*5, 3*5])
def test_adouble_tangent_adouble_addition(self):
tx = Tangent(adouble(2), 1)
ty = Tangent(adouble(3), 0)
tz = tx + ty
assert_array_almost_equal([tz.x.val,tz.xdot], [5, 1])
class SemiImplicitOdeLhsTest(TestCase):
"""
This is a test example taken from PYSOLVIND
In chemical engineering, semi-implicit ODEs of the type::
d/dt g(t,y(t)) = f(t,y(t))
y(0) = y_0
have to be solved. PYSOLVIND requires a function afcn that computes::
d/dy g(t,y) d/dt y
where d/dt y = xdd
y = xd
"""
def test_differentiation_of_gfcn(self):
def gfcn(a):
print('called gfcn')
ty = [Tangent(a.xd[0], a.xdd[0]),Tangent(a.xd[1], a.xdd[1]), Tangent(a.xd[2], a.xdd[2])]
tlhs = [ty[0] * ty[2], ty[1] * ty[2], ty[2]]
a.lhs[0] = tlhs[0].xdot
a.lhs[1] = tlhs[1].xdot
a.lhs[2] = tlhs[2].xdot
def afcn(a):
a.lhs[0] = a.xd[2] * a.xdd[0] + a.xd[0] * a.xdd[2]
a.lhs[1] = a.xd[2] * a.xdd[1] + a.xd[1] * a.xdd[2]
a.lhs[2] = a.xdd[2]
class Args:
def __init__(self):
self.xd = numpy.random.rand(3)
self.xdd = numpy.random.rand(3)
self.lhs = numpy.zeros(3)
args = Args()
gfcn(args)
result1 = args.lhs.copy()
afcn(args)
result2 = args.lhs.copy()
assert_array_almost_equal(result1, result2)
# class FunctionExampleTests(TestCase):
# def test_utps_on_jacobian(self):
# def f(x,p):
# print p
# print p[0] + p[1]
# return (p[0] + p[1]) * x**2
# AP = AdolcProgram()
# AP.trace_on(1)
# ax = adouble(3.)
# ap = adouble([5.,7.])
# AP.independent(ax)
# AP.independent(ap)
# tp = [Tangent(ap[0],1),Tangent(ap[1],0)]
# tf = f(ax,tp)
# aJ = tf.xdot
# print aJ
# AP.dependent(aJ)
# AP.trace_off()
# g = gradient(1, [1,2,3])
# print g
if __name__ == '__main__':
try:
import nose
except:
print('Please install nose for unit testing')
nose.runmodule()
| b45ch1/pyadolc | adolc/tests/test_tangent.py | test_tangent.py | py | 3,503 | python | en | code | 43 | github-code | 6 | [
{
"api_name": "numpy.random.rand",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 81,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.rand",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "numpy.random",
... |
27009702338 | import numpy as np
import run as r
'''
[id]
115
[name]
BayesianRidge
[input]
x_train 训练集 训练集标签数据集 二维数组 必须 定数
y_train 测试集 测试集数据集 二维数组 必须 定数
x_test 训练集标签 训练集标签标签 一维数组 必须 定数
y_test 测试集标签 测试集标签 一维数组 必须 定数
n_iter n_iter 默认为300,最大迭代次数。应该大于或等于1,可选整数 整数 不必须 定数
tol tol 默认为1e-3,如果w收敛,则停止算法,可选浮点数 浮点数 不必须 定数
alpha_1 alpha_1 默认为1e-6,Hyper-parameter:shape参数,用于先于Alpha参数的Gamma分布,可选浮点数 浮点数 不必须 定数
alpha_2 alpha_2 默认为1e-6,超参数:Gamma分布优先于alpha参数的反比例参数(速率参数),可选浮点数 浮点数 不必须 定数
lambda_1 lambda_1 默认为1e-6,Hyper-parameter:shape参数,用于先于lambda参数的Gamma分布,可选浮点数 浮点数 不必须 定数
lambda_2 lambda_2 默认为1e-6,超参数:Gamma分布先于lambda参数的反比例参数(速率参数),可选浮点数 浮点数 不必须 定数
alpha_init alpha_init 默认为None,alpha的初始值(噪声的精度)。如果未设置,则alpha_init为1/Var(y),可选浮点数 浮点数 不必须 定数
lambda_init lambda_init 默认为None,Lambda的初始值(权重的精度)。如果未设置,则lambda_init为1。..版本添加::0.22,可选浮点数 浮点数 不必须 定数
compute_score compute_score 默认为False,如果为True,则在每次优化迭代时计算对数边际可能性,可选布尔值 布尔值 不必须 定数
fit_intercept 计算截距 默认为True,是否计算此模型的截距。截距不被视为概率参数,因此没有关联的方差。如果将其设置为False,则在计算中将不使用截距(即,数据应居中),可选整数,布尔值 字符串 不必须 定数
normalize 归一化 默认为False,当fit_intercept设置为False时,将忽略此参数。如果为True,则将在回归之前通过减去均值并除以l2-范数来对回归变量X进行归一化,可选布尔值 布尔值 不必须 定数
copy_X 是否复制 默认为True,如果为True,将复制X;否则为X。否则,它可能会被覆盖,可选布尔值 布尔值 不必须 定数
verbose 详细程度 默认为False,拟合模型时为详细模式,可选布尔值 布尔值 不必须 定数
[output]
train_predict 预测 训练集预测结果 一维数组(数值)
test_predict 预测 测试集预测结果 一维数组(数值)
train_score 正确率 训练集预测结果的正确率 数字
test_score 正确率 测试集预测结果的正确率 数字
coef_ 参数向量 回归模型的系数(均值) 一维数组
intercept_ 截距 决策特征中的独立术语。如果fit_intercept=False,则设置为0.0 整数
alpha_ alpha 估计的噪声精度 浮点数
lambda_ lambda_ 估计重量的精度 浮点数
sigma_ sigma_ 权重的估计方差-协方差矩阵 二维数组
scores_ scores_ 如果calculated_score为True,则在每次优化迭代时对数边际似然值(要最大化)。该数组以从alpha和lambda的初始值获得的对数边际似然值开始,以以估计的alpha和lambda的值结束 一维数组
n_iter_ 迭代次数 达到停止标准的实际迭代次数 整数
[outline]
贝叶斯岭回归。
[describe]
贝叶斯岭回归。
拟合贝叶斯岭模型。
有关详细信息,请参见注释部分。
正则化参数的实现和优化lambda(权重的精度)和alpha(噪声的精度)
'''
def main(x_train, y_train, x_test, y_test,
n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6, lambda_1=1.e-6, lambda_2=1.e-6, alpha_init=None,
lambda_init=None, compute_score=False, fit_intercept=True, normalize=False, copy_X=True, verbose=False
):
if type(x_train) is str:
x_train = eval(x_train)
if type(y_train) is str:
y_train = eval(y_train)
if type(x_test) is str:
x_test = eval(x_test)
if type(y_test) is str:
y_test = eval(y_test)
if type(n_iter) is str:
n_iter = eval(n_iter)
if type(tol) is str:
tol = eval(tol)
if type(alpha_1) is str:
alpha_1 = eval(alpha_1)
if type(alpha_2) is str:
alpha_2 = eval(alpha_2)
if type(lambda_1) is str:
lambda_1 = eval(lambda_1)
if type(lambda_2) is str:
lambda_2 = eval(lambda_2)
if type(alpha_init) is str:
alpha_init = eval(alpha_init)
if type(lambda_init) is str:
lambda_init = eval(lambda_init)
if type(compute_score) is str:
compute_score = eval(compute_score)
if type(fit_intercept) is str:
fit_intercept = eval(fit_intercept)
if type(normalize) is str:
normalize = eval(normalize)
if type(copy_X) is str:
copy_X = eval(copy_X)
if type(verbose) is str:
verbose = eval(verbose)
return r.run(x_train=x_train, y_train=y_train, x_test=x_test, y_test=y_test, n_iter=n_iter,
tol=tol,
alpha_1=alpha_1,
alpha_2=alpha_2,
lambda_1=lambda_1,
lambda_2=lambda_2,
alpha_init=alpha_init,
lambda_init=lambda_init,
compute_score=compute_score,
fit_intercept=fit_intercept,
normalize=normalize,
copy_X=copy_X,
verbose=verbose)
if __name__ == '__main__':
import numpy as np
import json
array = np.loadtxt('D:\\123_2.csv', delimiter=',')
array = array[0:20, :]
y = array[:, -1].tolist()
x = np.delete(array, -1, axis=1).tolist()
array = array.tolist()
back = main(x, y, x, y)
print(back)
for i in back:
print(i + ":" + str(back[i]))
json.dumps(back)
| lisunshine1234/mlp-algorithm-python | machine_learning/regression/linear_models/BayesianRidge/main.py | main.py | py | 5,830 | python | zh | code | 0 | github-code | 6 | [
{
"api_name": "run.run",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "numpy.loadtxt",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "numpy.delete",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": ... |
38852384412 | from django.core import validators
from rest_framework import serializers
from django.utils.translation import gettext_lazy as _
from degvabank.apps.account.models import Account
from degvabank.apps.card.models import CreditCard
from degvabank.apps.transaction.utils import is_our_number
from .models import Transaction
class TransactionSerializer(serializers.ModelSerializer):
class Meta:
model = Transaction
fields = "__all__"
class UserTransactionSerializer(serializers.ModelSerializer):
acc = card = dst = dst_not_our = None
document_id = serializers.CharField(
write_only=True,
max_length=15,
validators=[
validators.RegexValidator(
regex=r"^[eEvVjJ]\d+$",
message=_("your document id is not well formatted"),
),
],
)
def validate_source(self, value):
user = self.context["request"].user
self.acc = user.accounts.filter(id=value, is_active=True).first()
self.card = user.credit_cards.filter(number=value, is_active=True).first()
if not (self.acc or self.card):
raise serializers.ValidationError(_("Invalid source account or card"))
return value
def validate_target(self, value):
# TODO: if value not ours: return value
if not is_our_number(value):
self.dst = value
self.dst_not_our = True
return value
dst_acc = Account.objects.filter(id=value, is_active=True).first()
dst_card = CreditCard.objects.filter(number=value, is_active=True).first()
if not (dst_acc or dst_card):
raise serializers.ValidationError(
_("Target account or card does not exists")
)
self.dst = dst_card or dst_acc
return value
def validate_document_id(self, value):
if not self.dst_not_our and self.dst and self.dst.user.document_id.lower() != str(value).lower():
raise serializers.ValidationError(
_("Target account or card is not associated with that document id")
)
return value
def validate_amount(self, value):
if self.acc and self.acc.balance < value:
raise serializers.ValidationError(_("Insufficent balance"))
if self.card and self.card.credit < value:
raise serializers.ValidationError(_("Insufficent balance"))
return value
class Meta:
model = Transaction
fields = [
"id",
"source",
"target",
"document_id",
"amount",
"type",
"status",
"reason",
"date",
]
read_only_fields = ("type", "status", "date", "id")
def create(self, validated_data):
field_names = [field.name for field in self.Meta.model._meta.get_fields()]
data = {a: b for a, b in validated_data.items() if a in field_names}
kwargs = {
"amount": data["amount"],
"reason": data["reason"],
"source": {
"number": data["source"]
},
"target": {
"number": data["target"],
"document_id": validated_data["document_id"],
}
}
return self.Meta.model.objects.create_any_transaction(**kwargs)
class TransactionCardSerializer(serializers.Serializer):
number = serializers.CharField()
security_code = serializers.CharField()
expiration_date = serializers.DateTimeField()
document_id = serializers.CharField(
required=False,
write_only=True,
max_length=15,
validators=[
validators.RegexValidator(
regex=r"^[eEvVjJ]\d+$",
message=_("your document id is not well formatted"),
),
],
)
class TransactionAccountSerializer(serializers.Serializer):
number = serializers.CharField()
document_id = serializers.CharField(
required=True,
write_only=True,
max_length=15,
validators=[
validators.RegexValidator(
regex=r"^[eEvVjJ]\d+$",
message=_("your document id is not well formatted"),
),
],
)
class ForeignTransactionSerializer(serializers.ModelSerializer):
acc_src = TransactionAccountSerializer(required=False)
acc_dst = TransactionAccountSerializer(required=False)
card_src = TransactionCardSerializer(required=False)
card_dst = TransactionCardSerializer(required=False)
class Meta:
model = Transaction
fields = [
"id",
"acc_src",
"acc_dst",
"card_src",
"card_dst",
"amount",
"type",
"status",
"reason",
"date",
]
read_only_fields = ("type", "status", "date", "id")
def create(self, validated_data):
field_names = [field.name for field in self.Meta.model._meta.get_fields()]
data = {a: b for a, b in validated_data.items() if a in field_names}
kwargs = {
"amount": data["amount"],
"reason": data["reason"],
"source": validated_data.get("acc_src") or validated_data.get("card_src"),
"target": validated_data.get("acc_dst") or validated_data.get("card_dst")
}
return self.Meta.model.objects.create_any_transaction(from_foreign=True, **kwargs)
| Vixx-X/DEGVABank-backend | degvabank/degvabank/apps/transaction/serializers.py | serializers.py | py | 5,507 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "rest_framework.serializers.ModelSerializer",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.serializers",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "models.Transaction",
"line_number": 14,
"usage_type": "name"
... |
38696958854 | # coding=utf-8
import requests
class Airbnb(object):
"""Interface to get data from airbnb api. You can use :
api_instance = Airbnb()
api_instance.get_logement("Paris")
api_instance.get_review(logement_id)
api_instance.get_logement_details(logement_id)"""
def get_user_infos(self, user_id):
url = "https://api.airbnb.com/v2/users/%s?" % user_id
params = {
"client_id" : "3092nxybyb0otqw18e8nh5nty",
"_format" : "v1_legacy_show",
"locale" : "en-Us",
"currency" : "USD"
}
response = requests.get(url, params=params)
if response.status_code != 200:
return response.status_code
return response.content
def get_logement_details(self, logement_id):
try:
url = "https://api.airbnb.com/v2/listings/" + logement_id
except TypeError: #in case logement_id input is an integer.
url = "https://api.airbnb.com/v2/listings/" + str(logement_id)
params = {
"client_id" : "3092nxybyb0otqw18e8nh5nty", # compulsory : API KEY
"_format" : "v1_legacy_for_p3", # compulsory
"locale" : "en-US", # optionnal from here.
"currency" : "USD",
"_source" : "mobile_p3",
"number_of_guests" : "1"
}
response = requests.get(url, params=params)
if response.status_code != 200:
return response.status_code
return response.content
def get_review(self, logement_id, offset):
url = "https://api.airbnb.com/v2/reviews"
params = {
"client_id" : "d306zoyjsyarp7ifhu67rjxn52tv0t20",
"locale" : "en-US",
"currency" : "USD",
"_format" : "for_mobile_client",
"_limit" : "50",
"_offset" : offset,
"_order" : "language",
"listing_id" : logement_id,
"role" : "all"
}
response = requests.get(url, params=params)
if response.status_code != 200:
print("response status code : %s" % response.status_code)
return response.status_code
return response.content
def get_logement(self, city, checkin, checkout, offset):
"""With this function you can get lots of infos (especially the housing
ID), then get data about reviews or details of it.
The method take a city name (string) as input and return a
utf-8 encoded json string you can easily parse with json.loads() or
a HTTP status code if an error occured."""
url = "https://api.airbnb.com/v2/search_results"
key1 = "3092nxybyb0otqw18e8nh5nty"
key2 = "d306zoyjsyarp7ifhu67rjxn52tv0t20"
params = {
"client_id" : key2,
"locale" : "en-US",
"currency" : "USD",
"_limit" : "50",
"_format" : "for_search_results_with_minimal_pricing",
"_offset" : offset,
"fetch_facets" : "true",
"guests" : "1",
"ib" : "false",
"ib_add_photo_flow" : "true",
"location" : city,
"min_bathrooms" : "0",
"min_bedrooms" : "0",
"min_beds" : "1",
"min_num_pic_urls" : "0",
"price_max" : "5000",
"price_min" : "0",
"checkin" : checkin,
"checkout" : checkout,
"sort" : "1",
"user_lat" : "37.3398634",
"user_lng" : "-122.0455164"
}
response = requests.get(url, params=params)
if response.status_code != 200:
return response.status_code
return response.content
def get_available(self, logement_id, month, year, count):
"""Endpoint to get all availability for a precise listing.
We neede as input the listting id, month and year to begin from
and count as number of months to get result from.
It returns utf-8 encoded json string to parse with json.loads() or
an HTTP status code if the request failed."""
url = "https://www.airbnb.fr/api/v2/calendar_months"
params = {
"key" : "d306zoyjsyarp7ifhu67rjxn52tv0t20",
"currency" : "EUR",
"locale" : "fr",
"listing_id" : logement_id,
"month" : month,
"year" : year,
"count" : count,
"_format" : "with_conditions"
}
response = requests.get(url, params=params)
if response.status_code != 200:
return response.status_code
return response.content
def get_logement_by_gps(self, ne_lat, ne_lng, sw_lat, sw_lng, zoom, page_number, checkin=None, checkout=None):
url = "https://www.airbnb.fr/api/v2/explore_tabs"
params = {
"items_per_grid" : "18",
"key" : "d306zoyjsyarp7ifhu67rjxn52tv0t20",
"ne_lat" : ne_lat,
"ne_lng" : ne_lng,
"sw_lat" : sw_lat,
"sw_lng" : sw_lng,
"zoom" : zoom,
"location" : "paris",
"search_by_map" : "true",
"_format" : "for_explore_search_web",
"experiences_per_grid" : "20",
"guidebooks_per_gri" : "=20",
"fetch_filters" : "true",
"supports_for_you_v3" : "true",
"screen_size" : "large",
"timezone_offset" : "120",
"auto_ib" : "true",
"tab_id" : "home_tab",
"federated_search_session_id" : "87339300-cc93-4d01-b366-dc3896f7788b",
"_intents" : "p1",
"currency" : "EUR",
"locale" : "fr",
"section_offset" : page_number - 1
}
if checkin and checkout:
params['checkin'] = checkin
params['checkout'] = checkout
response = requests.get(url, params=params)
if response.status_code != 200:
return response.status_code
return response.content
if __name__ == '__main__':
# get_review("17834617")
airbnb = Airbnb()
print(airbnb.get_logement_by_gps(48.8632953507299, 2.3455012817150873, 48.86068875819463, 2.3429478187329096, 18, 2))
# print(airbnb.get_available(17834617, 5, 2017, 4))
# print(airbnb.get_logement_details(17834617))
# airbnb.get_logement("Bordeaux", 1, 2)
| pablo-a/airbnb | airbnb_api.py | airbnb_api.py | py | 6,552 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "requests.get",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_numbe... |
70321394427 | import multiprocessing as mp
from skopt import Optimizer
from skopt.space import Real, Integer
import subprocess
import time
import pickle
from ID_CNN_V01 import setup_thread_environment
from _utils.ID_utils import get_convolutions, Colors, check_available_gpus
n_calls = 7
dim_learning_rate = Real(low=1e-7, high=3e-2, prior='log-uniform', name='learning_rate')
dim_n_convolutions = Integer(low=1, high=3, name='n_convolutions')
dim_dense_nodes = Integer(low=128, high=200, name='n_dense_nodes')
class ParameterBatch:
"""
Class of Object containing all hyperparameters needed to run the network.
"""
def __init__(self,
learning_rate=0.0005,
input_shape=[480, 640, 1],
batch_size=16,
convolutions=[(64, 7, 7), (128, 5, 5)],
gpu_id=2,
n_dense_nodes=128,
n_max_epochs=30,
n_runs=1,
training=True,
train_csv_file="_data/hetzell_shearlet_training_data.csv",
eval_csv_file="_data/hetzell_shearlet_evaluation_data.csv",
test_csv_file="_data/hetzell_shearlet_testing_data.csv"
):
self.learning_rate = learning_rate
self.input_shape = input_shape
self.batch_size = batch_size
self.convolutions = convolutions
self.gpu_id = gpu_id
self.n_dense_nodes = n_dense_nodes
self.n_max_epochs = n_max_epochs
self.n_runs = n_runs
self.training = training
self.train_csv_file = train_csv_file
self.test_csv_file = test_csv_file
self.eval_csv_file = eval_csv_file
def map_val_to_param_batch(vals, gpu_id):
"""
Maps the values given by an Optimizer into a ParameterBatch object.
:param vals: list of values from Optimizer
:param gpu_id: the gpu_id passed to the ParameterBatch
:return: ParameterBatch object
"""
params = ParameterBatch(learning_rate=vals[0],
convolutions=get_convolutions(vals[1]),
n_dense_nodes=vals[2],
gpu_id=gpu_id)
return params
def bayesian_optimize(n_calls=12):
"""
Apply bayesian optimization to a network. Access global variable reserved_gpus, Ask Optimizer for one point for each GPU,
train and evaluate at that point in parallellized threads, repeat n_calls times. Then train and test the best setup.
:return: ---
"""
start_time = time.time()
p = mp.Pool(len(reserved_gpus))
optimizer = Optimizer(dimensions=[dim_learning_rate, dim_n_convolutions, dim_dense_nodes],
random_state=1)
for i in range(1, n_calls + 1):
gpus = list(reserved_gpus)
vals = optimizer.ask(n_points=len(reserved_gpus))
points = []
for point in vals:
param_batch = map_val_to_param_batch(point, gpus.pop(0))
points.append(param_batch)
loss = p.map(setup_thread_environment, points)
optimizer.tell(vals, loss)
print("#" * 100)
print(Colors.OKBLUE, "Optimization cylce", i, "done.", Colors.ENDC)
print("#" * 100)
print("Best setup found:")
p.close()
print(min(optimizer.yi)) # print the best objective found
sorted_sets = sorted(list(zip(optimizer.yi, optimizer.Xi)), key=lambda tup: tup[0])
print("BEST SET:", sorted_sets[0])
print("#" * 100)
print(Colors.OKBLUE, "Starting Testing of Best Set.", Colors.ENDC)
print("#" * 100)
gpus = list(reserved_gpus)
test_args = map_val_to_param_batch(sorted_sets[0][1], gpus.pop(0))
test_args.training = False
avg_test_accuracy = setup_thread_environment(test_args)
print("Test accuracy:", avg_test_accuracy)
end_time = time.time()
print("It took:", str(end_time - start_time), "seconds")
pickle.dump(sorted_sets, open("_logs/optimizer_points.pkl", "wb"))
try:
file_path = "_logs/dl_optimizer_result.txt"
label_file = open(file_path, "w")
label_file.write("Best setup found:\n")
label_file.write(str(sorted_sets[0]))
label_file.write("\nTime to process: ")
label_file.write(str(end_time - start_time))
label_file.write("\nTest Accuracy: ")
label_file.write(str(avg_test_accuracy))
finally:
label_file.close()
def main():
"""
clears the logdir, finds the reserved_gpus and starts the bayesian optimization.
:return: ---
"""
global reserved_gpus
command_str = "(rm -r _logs)"
subprocess.run(command_str, shell=True)
reserved_gpus = check_available_gpus()
print("GPUs", reserved_gpus, "are available.")
bayesian_optimize(n_calls=n_calls)
if __name__ == "__main__":
main()
| lorenz-h/DataRepresentationLearning | Old Experiments/ImitationDuckie_V1/_old_versions/ID_Optimizer.py | ID_Optimizer.py | py | 4,792 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "skopt.space.Real",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "skopt.space.Integer",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "skopt.space.Integer",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "_utils.ID_ut... |
28173684280 | # importing all libraries
import cadquery as cq
from cadquery import exporters
import pyaudio
import json
from vosk import Model, KaldiRecognizer
import pyttsx3
from tkinter import *
from PIL import ImageTk, Image
import tkinter.font as TkFont
import os
from word2number import w2n
#loading vosk ml audio recognition model
model = Model(
r"path\to\any\vosk\voice recognition model"
)
model = KaldiRecognizer(model, 16000)
mic = pyaudio.PyAudio()
stream = mic.open(format=pyaudio.paInt16,
channels=1,
rate=16000,
input=True,
frames_per_buffer=8192)
# initialize Text-to-speech engine
engine = pyttsx3.init()
voices = engine.getProperty('voices') #getting details of current voice
engine.setProperty('voice', voices[1].id)
engine.setProperty('rate', 150)
def say(text):
engine.say(text)
engine.runAndWait()
stream.start_stream()
#default name
name = "any_default_name"
coldstart=True
def record_audio():
while True:
data = stream.read(44100, exception_on_overflow=False)
if model.AcceptWaveform(data):
text = json.loads(model.Result())["text"]
try:
if len(text.split()) > 0:
return text
except:
continue
def introduce():
say("Welcome everyone to the interactive interface. I hope you are doing well"
)
def voice_set():
z = clicked.get()
if z[-1] == "e":
engine.setProperty('voice', voices[0].id)
elif z[-1] == "1":
engine.setProperty('voice', voices[1].id)
else:
engine.setProperty('voice', voices[2].id)
say("Voice changed successfully")
def entry_box_update(text):
e.delete(0, END)
e.insert(0, text)
root.update()
def end_instance():
b.configure(text="Instance ended. Click to start again.",
bg='red',
fg='white')
root.update()
def finalize_design(result):
exporters.export(result,"output.step")
# end_instance()
os.startfile("output.step")
def start():
#initialize name to change
global name
global coldstart
stop = False
e.delete(0, END)
b.configure(text="Instance started", bg='green', fg='white')
root.update()
temp = "New Instance started successfully."
if coldstart:
temp+=" You can take my name, " + str(
name) + " to start interacting."
entry_box_update(temp)
say(temp)
#wait till name is taken
while coldstart:
text = record_audio()
entry_box_update(text)
text = " " + text + " "
if name in text:
temp = "Welcome, my name is " + str(name) + ". How may I help you?"
entry_box_update(temp)
say(temp)
coldstart=False
break
if " stop " in text or " end " in text:
stop = True
say("Ok, ending the instance")
end_instance()
break
if "repeat" in text:
temp = "New Instance started successfully. You can take my name, " + str(
name) + " to start interacting."
entry_box_update(temp)
say(temp)
while not stop:
text = record_audio()
entry_box_update(text)
text = " " + text + " "
if "repeat" in text:
temp = "Welcome, my name is " + str(name) + ". How may I help you?"
entry_box_update(temp)
say(temp)
if " end " in text or " stop " in text:
say("Ok, ending the instance")
end_instance()
break
if " name " in text:
say("Ok, tell me my new name")
temp = record_audio()
name = temp
n2.delete(0, END)
n2.insert(0, temp)
say("ok my name is " + str(temp))
coldstart=True
start()
end_instance()
break
#shapes start here
if "cube" in text:
say("OK, designing a cube")
result = cq.Workplane("XY").box(1, 1, 1)
finalize_design(result)
# break
if "cylinder" in text:
say("OK, designing a cylinder")
result = cq.Workplane("XY").circle(10).extrude(50)
finalize_design(result)
# break
if "cuboid" in text:
say("OK, designing a cuboid")
result = cq.Workplane("XY").box(5, 10, 20)
finalize_design(result)
# break
if "column" in text:
say("OK, designing a column")
(L, H, W, t) = (100.0, 20.0, 20.0, 1.0)
pts = [
(0, H / 2.0),
(W / 2.0, H / 2.0),
(W / 2.0, (H / 2.0 - t)),
(t / 2.0, (H / 2.0 - t)),
(t / 2.0, (t - H / 2.0)),
(W / 2.0, (t - H / 2.0)),
(W / 2.0, H / -2.0),
(0, H / -2.0),
]
result = cq.Workplane("front").polyline(pts).mirrorY().extrude(L)
finalize_design(result)
# break
if "box" in text:
say("OK, designing a box")
result = cq.Workplane("front").box(2, 2, 2).faces("+Z").shell(0.05)
finalize_design(result)
# break
if "cone" in text:
say("Ok, designing a cone")
result = (cq.Workplane("front").box(4.0, 4.0, 0.25).faces(">Z").circle(1.5).workplane(offset=3.0).rect(0.75, 0.5).loft(combine=True))
finalize_design(result)
# break
if "spring" in text:
say("Ok, designing a spring")
r = 0.5 # Radius of the helix
p = 0.4 # Pitch of the helix - vertical distance between loops
h = 2.4 # Height of the helix - total height
# Helix
wire = cq.Wire.makeHelix(pitch=p, height=h, radius=r)
helix = cq.Workplane(obj=wire)
# Final result: A 2D shape swept along a helix.
result = (
cq.Workplane("XZ") # helix is moving up the Z axis
.center(r, 0) # offset isosceles trapezoid
.polyline(((-0.15, 0.1), (0.0, 0.05), (0, 0.35), (-0.15, 0.3)))
.close() # make edges a wire
.sweep(helix, isFrenet=True) # Frenet keeps orientation as expected
)
finalize_design(result)
# break
def delet(dummy):
n2.delete(0, END)
def change_name():
global name
coldstart=True
name = n2.get()
say("Changed name to " + str(name) + " successfully")
#initialize
root = Tk()
# root.geometry("1280x720")
#structures
root.title("Automated Engine Design using Machine Learning")
root.iconbitmap(r"path\to\ico\file")
#logo
myimg = ImageTk.PhotoImage(Image.open("logo\path"))
DMCE_logo = Label(image=myimg, bg="white")
DMCE_logo.grid(row=1, column=1, rowspan=2)
#title label
title_label = Label(
root,
text=
"'Automated Design using Voice Recognition'",
font=TkFont.Font(family="Times New Roman", size=24, weight="bold"),
).grid(row=1, column=2, columnspan=5, padx=10, pady=10, sticky=W + E)
#subtitle label
subtitle_label = Label(
root,
text="Python Project AY:2022-2023",
font=TkFont.Font(family="Times New Roman", size=15),
bd=1
).grid(
row=2,
column=2,
# padx=10,
# pady=10,
columnspan=5,
sticky=W + E)
#desclabel
desc_label = Label(
root,
text=
"\tThis application has been developed as an interface for 'Automated Design using Voice Recognition'.",
font=TkFont.Font(family="Times New Roman", size=12),
bd=1,
anchor=E,
justify="left").grid(row=3, column=2, columnspan=5, sticky=W + E)
#buttons below description
it = Button(root, text="Introduction", command=introduce)
it.grid(row=4, column=2, pady=10)
#options tab
options = ["Voice Male", "Voice Female 1", "Voice Female 2"]
clicked = StringVar()
clicked.set("Voice Female 1")
#option dropdown
nm = OptionMenu(root, clicked, *options)
nm.grid(row=4, column=3, pady=10)
#setting voices
n1 = Button(root, text="Set voice", command=voice_set)
n1.grid(row=4, column=4, pady=10)
#name
n2 = Entry(root, bg="lightgrey")
n2.insert(0, "Name: " + name)
n2.bind("<1>", delet)
n2.grid(row=4, column=5, pady=10)
#name button
n3 = Button(root, text="Set Name", command=change_name)
n3.grid(row=4, column=6, pady=10)
#credits label
name_label = Label(
root,
text=
"Developed By:\n\nParas Raorane",
font=TkFont.Font(family="Times New Roman", size=12),
bd=1,
anchor=W,
pady=10,
padx=10).grid(row=6, column=1, rowspan=2)
#label before terminal
Label(
root,
text="Interactive Terminal",
font=TkFont.Font(family="Times New Roman", size=12, weight="bold"),
).grid(row=5, column=2, columnspan=5, sticky=W + E)
#main entry
e = Entry(root, bg="lightgrey", width=100) #, borderwidth=10)
e.grid(row=6, column=2, columnspan=5, sticky=W + E)
# #inserting text into the box
e.insert(
0,
"Detected text will be displayed here. You can make changes as required.")
b = Button(root, text="Initialize", command=start)
b.grid(row=7, column=2, columnspan=5, pady=10)
root.mainloop() | N3dal/YourFirstContribution | Python/interactivedesign.py | interactivedesign.py | py | 9,411 | python | en | code | null | github-code | 6 | [
{
"api_name": "vosk.Model",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "vosk.KaldiRecognizer",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pyaudio.PyAudio",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pyaudio.paInt16",
... |
72764469628 | from sklearn.feature_extraction.text import TfidfVectorizer
from wordcloud import WordCloud
import numpy as np
def get_wordcloud(data, stop_words):
vectorizer = TfidfVectorizer(
use_idf=False, stop_words=stop_words, ngram_range=(2, 2))
vectors = vectorizer.fit_transform(data)
counts = np.array(vectors.sum(axis=0))[0]
dico = dict()
words = vectorizer.get_feature_names_out()
for i in range(len(words)):
w = words[i]
dico[w] = counts[i]
return WordCloud(background_color='white', stopwords=stop_words, max_words=100).generate_from_frequencies(dico)
| Amayas29/review-analysis | src/iads/nlp/visualisation.py | visualisation.py | py | 607 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sklearn.feature_extraction.text.TfidfVectorizer",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "wordcloud.WordCloud",
"line_number": 20,
"usage_type": "call"
}
] |
26531291291 | from pyhpecfm import system
from lib.actions import HpecfmBaseAction
class eventLookup(HpecfmBaseAction):
def run(self):
cfm_audits = system.get_audit_logs(self.client)
if isinstance(cfm_audits, list):
# Create a empty list for alarms
event_data = []
# Loop through cfm_audits and process EVENTS
for event in cfm_audits:
typex = event['record_type']
if typex == 'EVENT':
# Build dictionary to add to list
out = {
'u_eventType': event['data']['event_type'],
'u_typex': event['record_type'],
'u_sev': event['severity'],
'u_uuid': event['uuid'],
'u_desc': event['description'],
'u_name' : event['data']['object_name'],
'u_typeo' : event['data']['object_type']
}
event_data.append(out)
return (True, event_data)
return (False, cfm_audits)
| HewlettPackard/stackstorm-hpe-cfm | actions/get_events.py | get_events.py | py | 1,125 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "lib.actions.HpecfmBaseAction",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "pyhpecfm.system.get_audit_logs",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pyhpecfm.system",
"line_number": 6,
"usage_type": "name"
}
] |
34540300851 | import configparser
import argparse
import json
import boto3
import utility
from collections import OrderedDict
global emr_configuration, emr_applications, cluster_config, optional_instance_config
emr_configuration = "emr_cluster.config"
emr_applications = ["Hadoop", "Spark", "Ganglia"]
cluster_config = "source/cluster_creator/cluster_config.json"
optional_instance_config = {"vpc_subnet": "Ec2SubnetId",
"master_security_group": "EmrManagedMasterSecurityGroup",
"slave_security_group": "EmrManagedSlaveSecurityGroup",
"service_access_security_group": "ServiceAccessSecurityGroup"}
def check_configuration(config):
if not utility.check_config(config, "EMR", ["release_label", "software_installer_location",
"genome_folder_location"]):
return False
if not utility.check_upload_config(config["EMR"], "upload_bootstrap_scripts", "bootstrap_scripts",
"bootstrap_scripts_local_location", "bootstrap_scripts_s3_location"):
return False
if not utility.check_config(config, "EMR_nodes", ["key_name", "service_role", "instance_profile",
"master_instance_type", "master_instance_count",
"core_instance_type", "core_instance_count"]):
return False
release_version = config["EMR"]["release_label"].split("-")[-1].split(".")
major_release_version = int(release_version[0])
minor_release_version = int(release_version[1])
if config["EMR_nodes"].get("custom_ami_id", "").strip() != "" \
and not (major_release_version >= 5 and minor_release_version >= 7):
print("\033[31mERROR: \033[0mCustom AMI can only be used with EMR release >= 5.7")
return False
return True
def build_command(config):
global emr_applications, cluster_config
emr_arguments = OrderedDict()
# EMR configs
if config["EMR"]["name"]:
emr_arguments["Name"] = config["EMR"]["name"]
if config["EMR"]["log_uri"]:
emr_arguments["LogUri"] = config["EMR"]["log_uri"]
emr_arguments["ReleaseLabel"] = config["EMR"]["release_label"]
# Instances config
emr_arguments["Instances"] = OrderedDict()
instance_groups = []
for node_type in ["master", "core"]:
instance_specification = {}
if int(config["EMR_nodes"][node_type + "_instance_count"]) == 0:
continue
instance_specification['Name'] = node_type + "_node"
instance_specification['InstanceRole'] = node_type.upper()
if config["EMR_nodes"].getboolean(node_type + "_instance_spot"):
instance_specification['Market'] = "SPOT"
instance_specification['BidPrice'] = config["EMR_nodes"][node_type + "_instance_bid_price"]
else:
instance_specification['Market'] = "ON_DEMAND"
instance_specification['InstanceType'] = config["EMR_nodes"][node_type + "_instance_type"]
instance_specification['InstanceCount'] = int(config["EMR_nodes"][node_type + "_instance_count"])
instance_groups.append(instance_specification)
emr_arguments["Instances"]["InstanceGroups"] = instance_groups
if config["EMR_nodes"]["key_name"]:
emr_arguments["Instances"]["Ec2KeyName"] = config["EMR_nodes"]["key_name"]
emr_arguments["Instances"]["KeepJobFlowAliveWhenNoSteps"] = True
for instance_config in optional_instance_config:
if instance_config in config["EMR_nodes"] and config["EMR_nodes"][instance_config].strip() != "":
emr_arguments["Instances"][optional_instance_config[instance_config]] = config["EMR_nodes"][instance_config]
emr_arguments["Steps"] = [
{
"Name": "Setup Hadoop Debugging",
"ActionOnFailure": "TERMINATE_CLUSTER",
"HadoopJarStep": {
"Jar": "/var/lib/aws/emr/step-runner/hadoop-jars/command-runner.jar",
"MainClass": "state-pusher-script"
}
}
]
if "bootstrap_scripts" in config["EMR"]:
bootstrap_actions = []
for bootstrap_script in config["EMR"]["bootstrap_scripts"].split(","):
bootstrap_script = bootstrap_script.strip()
bootstrap_action_args = []
if bootstrap_script == "install_software.sh":
bootstrap_action_args = [config["EMR"]["software_installer_location"]]
elif bootstrap_script == "copy_reference.sh":
bootstrap_action_args = [config["EMR"]["genome_folder_location"]]
bootstrap_actions.append({
"Name": bootstrap_script,
"ScriptBootstrapAction": {
"Path": config["EMR"]["bootstrap_scripts_s3_location"].rstrip("/") + "/" + bootstrap_script,
"Args": bootstrap_action_args
}
})
emr_arguments["BootstrapActions"] = bootstrap_actions
emr_arguments["Applications"] = [{'Name': app} for app in emr_applications]
emr_arguments["Configurations"] = json.loads(open(cluster_config).read()) if cluster_config else []
emr_arguments["VisibleToAllUsers"] = True
emr_arguments["JobFlowRole"] = config["EMR_nodes"]["instance_profile"]
emr_arguments["ServiceRole"] = config["EMR_nodes"]["service_role"]
if "custom_ami_id" in config["EMR_nodes"]:
emr_arguments["CustomAmiId"] = config["EMR_nodes"]["custom_ami_id"]
if "ebs_root_volume_size" in config["EMR_nodes"]:
emr_arguments["EbsRootVolumeSize"] = config["EMR_nodes"]["ebs_root_volume_size"]
return emr_arguments
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Cluster launcher for spark-based RNA-seq Pipeline')
parser.add_argument('--config', '-c', action="store", dest="emr_config", help="EMR configuration file")
parser.add_argument('--dry-run', '-d', action="store_true", dest="dry_run",
help="Produce the configurations for the cluster to be created")
parser_result = parser.parse_args()
if parser_result.emr_config and parser_result.emr_config.strip() != "":
emr_configuration = parser_result.emr_config
config = configparser.ConfigParser()
config.read(emr_configuration)
if check_configuration(config):
if config["EMR"].get("upload_bootstrap_scripts", "False") == "True":
utility.upload_files_to_s3(
[(bootstrap_script.strip(), config["EMR"]["bootstrap_scripts_local_location"],
config["EMR"]["bootstrap_scripts_s3_location"])
for bootstrap_script in config["EMR"]["bootstrap_scripts"].split(",")],
parser_result.dry_run)
emr_argument = build_command(config)
if not parser_result.dry_run:
emr_client = boto3.client("emr")
cluster_launch = emr_client.run_job_flow(**emr_argument)
print("Cluster has been launched with ID", cluster_launch["JobFlowId"])
else:
print("\n".join(["{} = {}".format(*emr_arg) for emr_arg in list(emr_argument.items())]))
| VCCRI/Falco | launch_cluster.py | launch_cluster.py | py | 7,234 | python | en | code | 37 | github-code | 6 | [
{
"api_name": "utility.check_config",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "utility.check_upload_config",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "utility.check_config",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": ... |
42535314476 | # OKANCAN COSAR
# 12253018
import Helper
import Constant
import Step
import sys
populasyon = []
def Calculate(populasyonlar):
# for i in Helper.populasyonDict(populasyonlar):
# # guzel yazdirma
# ff = ""
# for ix in i[0]:
# ff = ff + str(ix)
# print("(", ff, "),", i[1])
# # guzel yazdirma sonu
# Ebeveynleri sec (PARENT SELECT)
parents = Step.parentSelect(populasyonlar)
# Ebeveynleri caprazla (RECOMBINE) ve yavrulari mutasyona tabi tut (MUTATE)
# (Offsprings(Cocuklar))
cocuklar = Step.recombineAndMutate(parents)
# Degerlerini hesaplayip dictionary yapar.
# sort'laryip ilk 50 yi dondurur
populasyonlarx = Step.survivalSelect(cocuklar + populasyonlar)
return populasyonlarx
def main():
global populasyon
# Bitis kosulu saglanana kadar TEKRARLA(REPEAT)
for iterasyon in range(Constant.ITERASYONSAYISI):
# print(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Generation: " + str(iterasyon) + " <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<")
if iterasyon == 0:
# Baslangic populasyonunu rastgele olustur (INITIALISE)
populasyon = Step.Initialise()
populasyon = Calculate(populasyon)
# elif iterasyon == Constant.ITERASYONSAYISI-1:
# print("\n\nFinal Population:")
# # guzel yazdirma
# ff = ""
# for ix in Helper.populasyonDict(populasyon):
# ff = ff + str(ix) + "\n"
# print(ff)
# # guzel yazdirma sonu
else:
populasyon = Calculate(populasyon)
from datetime import datetime
if __name__ == '__main__':
# f = open("out.txt", "w")
# sys.stdout = f
a = datetime.now().microsecond * 0.001
main()
b = datetime.now().microsecond * 0.001
print(b - a)
| OkancanCosar/01-Knapsack-with-GA | python/index.py | index.py | py | 1,827 | python | tr | code | 2 | github-code | 6 | [
{
"api_name": "Step.parentSelect",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "Step.recombineAndMutate",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "Step.survivalSelect",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "Constan... |
41095556533 | import sqlite3
conn = sqlite3.connect("tickets5.db")
cur = conn.cursor()
def displayAllTickets():
sql = "SELECT * FROM tickets"
cur.execute(sql)
results = cur.fetchall()
if results:
printStuff(results)
else:
print("No data found")
print()
def addTicket():
actual_speed = int(input("Enter actual speed: "))
posted_speed = int(input("Enter posted speed: "))
age = int(input("Enter age of offender: "))
violator_sex = str(input("Enter sex of offender: "))
data = (None, actual_speed, posted_speed, age, violator_sex)
sql = "INSERT INTO tickets VALUES (?, ?, ?, ?, ?)"
cur.execute(sql, data)
conn.commit()
def displayTicketsByOffender():
violator_sex = input("Enter sex of offender: ")
data = (violator_sex, )
sql = "SELECT * FROM tickets WHERE violator_sex = ?"
cur.execute(sql, data)
results = cur.fetchall()
if results:
printStuff(results)
else:
print("Name not found")
print()
def printStuff(data):
print("%-10s %-12s %-10s %-5s %-12s " % ("ticketID", "Posted MPH", "MPH Over", "Age", "Violator Sex"))
for row in data:
over = row[1] - row[2]
print(" %-10d %-12d %-10d %-5d %-12s " % (row[0], row[1], over, row[3], row[4]))
print()
def main():
while True:
print("""
Menu options. Choose 1, 2, 3, or 4:
1. Display all Tickets
2. Add a Ticket
3. Filter by Offender Sex
4. Save & Exit
""")
opt = input("Enter your choice, 1, 2, 3, or 4: ")
if opt == "1":
displayAllTickets()
elif opt == "2":
addTicket()
elif opt == "3":
displayTicketsByOffender()
elif opt == "4":
print()
print("Goodbye")
if conn:
conn.close
break
else:
print("Invalid entry, please re-enter your choice")
print()
main() | LilGotit/brain-drizzle | TicketsDatabase/ticketDatabase.py | ticketDatabase.py | py | 2,130 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sqlite3.connect",
"line_number": 3,
"usage_type": "call"
}
] |
29875084962 | ## ~~~~~~~~~~~~~~~~~~
# Deep Willy Network
## ~~~~~~~~~~~~~~~~~~
import numpy as np, json, sys, os
sys.path.append(os.path.dirname(__file__))
from willies import *
class DeepWilly(object):
_willy_classes = {'connected': ConnectedWilly,
'dropout': DropoutWilly,
'convolutional': ConvolutionalWilly,
'pooling': PoolingWilly,
'stacking': StackingWilly}
def __init__(self, cost_func = 'cross entropy'):
self.willies = []
self.n_willies = 0
self.cost_func = cost_func
if cost_func == 'cross entropy':
self.cost = lambda a, y: -np.mean(y * np.log(a + (a == 0)) + (1-y) * np.log(1-a + (a == 1)))
self.dcost = lambda a, y: (1/a.shape[0]) * (a - y) / (a * (1 - a) + 1e-99)
elif cost_func == 'quadratic':
self.cost = lambda a, y: 0.5 * np.mean((a - y)**2)
self.dcost = lambda a, y: (1/a.shape[0]) * (a - y)
else:
raise Exception('Unsupported cost function: ' + cost_func)
def add(self, willy):
''' Add a willy to the network and set it up based on the previous willy's output (if not the first one). '''
if len(self.willies) == 0:
if not willy.is_set_up:
raise Exception('Input shape or number must be provided to first Willy.')
else:
willy.set_up(self.willies[-1].out_shape)
self.willies.append(willy)
self.n_willies += 1
def forward_prop(self, X):
''' Forward propagates X through the willies. '''
for willy in self.willies:
X = willy.forward_prop(X)
return X
def backward_prop(self, X, y,
learn_rate, mom_rate, reg_rate):
''' Backward propagates errors while simultaneously updating weights in each layer. '''
# Output layer dC/da
batch_size = X.shape[0]
dA = self.dcost(self.willies[-1].A, y)
# Backpropagate, updating weights
XA = [X] + [willy.A for willy in self.willies[:-1]]
XA = XA[::-1]
for w, willy in enumerate(reversed(self.willies)):
willy.update_weights(dA, XA[w],
learn_rate, mom_rate, reg_rate)
dA = willy.backward_prop()
@staticmethod
def get_batches(X, y, batch_size):
''' Shuffles training data and gets random batch of desired size. '''
if batch_size == -1 or batch_size >= X.shape[0]:
return [X], [y]
# Shuffle data
shuffled_indices = np.random.permutation(len(X))
shuffled_X = X[shuffled_indices]
shuffled_y = y[shuffled_indices]
# Get batch of desired size
X_batches = []
y_batches = []
for i in range(X.shape[0]//batch_size):
X_batches.append(shuffled_X[int(batch_size*i):int(batch_size*(i+1))])
y_batches.append(shuffled_y[int(batch_size*i):int(batch_size*(i+1))])
return X_batches, y_batches
def train(self, X, y,
num_iterations, batch_size,
learn_rate, reg_rate, mom_rate = 0,
verbose = False):
''' Builds network, trains using given data and training parameters. '''
# Change dtypes
X = X.astype(np.float32)
y = y.astype(np.float32)
# Initialise momenta to 0
for willy in self.willies:
willy.reset_momenta()
# Train network
for iteration in range(num_iterations):
# Get batches:
X_batches, y_batches = self.get_batches(X, y, batch_size)
for batchX, batchy in zip(X_batches, y_batches):
# Forward propagate
self.forward_prop(batchX)
# Backward propagate & update weights
self.backward_prop(batchX, batchy,
learn_rate, mom_rate, reg_rate)
# Print progress
if verbose:
if iteration % verbose == 0:
print("Training cost on last batch: ", self.cost(self.willies[-1].A, batchy))
def predict(self, X, pred_type = 'as is'):
self.yhat = self.forward_prop(X)
if pred_type == 'binary':
self.yhat = 1 * (self.yhat > 0.5)
elif pred_type == 'argmax':
self.yhat = np.argmax(self.yhat, axis = 1).reshape(-1, 1)
else:
assert pred_type == 'as is', \
"Provided argument pred_type (" + pred_type + ") not supported."
return self.yhat
def accuracy(self, X, y, pred_type = 'as is'):
''' Gets accuracy of predictions. '''
return np.mean(self.predict(X, pred_type) == y)
def save(self, filename):
''' Saves deep willy to file \filename. '''
willy_data = []
for willy in self.willies:
willy_data.append(willy.save())
data = {'cost': self.cost_func,
'willies': willy_data}
file = open(filename, "w")
json.dump(data, file)
file.close()
@classmethod
def load(cls, filename):
''' Loads deep willy from file \filename. '''
file = open(filename, "r")
data = json.load(file)
file.close()
deep_willy = cls(data['cost_func'])
for willy in data['willies']:
willy_class = DeepWilly._willy_classes[willy['willy']]
deep_willy.add(willy_class.load(willy))
return deep_willy
def copy(self):
''' Replicates this deep willy using its attributes. '''
copy = DeepWilly(cost_func = self.cost_func)
for willy in self.willies:
copy.willies.append(willy.copy())
copy.n_willies += 1
return copy
| gavarela/willyai | willyai/deepWilly.py | deepWilly.py | py | 6,171 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sys.path.append",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number... |
30906490211 | import requests, re
def scrape_images(link):
# define our user headers
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36"
}
gallery_append = 'media?id=media0&ref=photoCollage&channel=RES_BUY'
link_replace = '?channel=RES_BUY'
regex_pattern = r'((?:https:)(?:[/|.|\w|\s|-])*(?:IMG_\d{2}_\d{4})\.(?:jpg|gif|png|jpeg))'
gallery_link = link.replace(link_replace, gallery_append)
res = requests.get(gallery_link, headers=headers)
res.raise_for_status()
matches = re.findall(regex_pattern, res.text)
matches_clean = list(dict.fromkeys(matches))
return matches_clean
| GregorMonsonFD/holmly_sourcing_legacy | scripts/python/pdfGen/rightmove_image_extract.py | rightmove_image_extract.py | py | 717 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "requests.get",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 19,
"usage_type": "call"
}
] |
70829089789 | import logging.config
DEFAULT_LEVEL = logging.WARNING
DEFAULT_FMT = '%(asctime)s | %(levelname)-8s | %(message)s'
def install(level=DEFAULT_LEVEL, fmt=DEFAULT_FMT):
logging.basicConfig(level=level, format=fmt)
try:
import sys
import colorlog
formatter = colorlog.ColoredFormatter(
fmt='%(log_color)s' + fmt + '%(reset)s',
log_colors={
'DEBUG': 'blue',
'INFO': 'green',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'white,bg_red',
}
)
handler = colorlog.StreamHandler(sys.stdout)
handler.setFormatter(formatter)
logging.root.handlers.clear()
logging.root.addHandler(handler)
except:
pass
| Arcensoth/pymcutil | pymcutil/logging/__init__.py | __init__.py | py | 799 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "logging.config.WARNING",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "logging.config",
"line_number": 3,
"usage_type": "name"
},
{
"api_name": "logging.config.basicConfig",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "lo... |
43634658293 |
from __future__ import division
from __future__ import absolute_import
#typing
#overrides
from allennlp.common import squad_eval
from allennlp.training.metrics.metric import Metric
class SquadEmAndF1(Metric):
u"""
This :class:`Metric` takes the best span string computed by a model, along with the answer
strings labeled in the data, and computed exact match and F1 score using the official SQuAD
evaluation script.
"""
def __init__(self) :
self._total_em = 0.0
self._total_f1 = 0.0
self._count = 0
#overrides
def __call__(self, best_span_string, answer_strings):
u"""
Parameters
----------
value : ``float``
The value to average.
"""
exact_match = squad_eval.metric_max_over_ground_truths(
squad_eval.exact_match_score,
best_span_string,
answer_strings)
f1_score = squad_eval.metric_max_over_ground_truths(
squad_eval.f1_score,
best_span_string,
answer_strings)
self._total_em += exact_match
self._total_f1 += f1_score
self._count += 1
#overrides
def get_metric(self, reset = False) :
u"""
Returns
-------
Average exact match and F1 score (in that order) as computed by the official SQuAD script
over all inputs.
"""
exact_match = self._total_em / self._count if self._count > 0 else 0
f1_score = self._total_f1 / self._count if self._count > 0 else 0
if reset:
self.reset()
return exact_match, f1_score
#overrides
def reset(self):
self._total_em = 0.0
self._total_f1 = 0.0
self._count = 0
def __str__(self):
return "SquadEmAndF1(em={self._total_em}, f1={self._total_f1})"
SquadEmAndF1 = Metric.register(u"squad")(SquadEmAndF1)
| plasticityai/magnitude | pymagnitude/third_party/allennlp/training/metrics/squad_em_and_f1.py | squad_em_and_f1.py | py | 1,963 | python | en | code | 1,607 | github-code | 6 | [
{
"api_name": "allennlp.training.metrics.metric.Metric",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "allennlp.common.squad_eval.metric_max_over_ground_truths",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "allennlp.common.squad_eval",
"line_number": ... |
19181221365 | """Add agreed to TOS int field
Revision ID: 51398a87b2ef
Revises: 95c58503e9c0
Create Date: 2020-12-02 09:43:04.949189
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "51398a87b2ef"
down_revision = "95c58503e9c0"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("users", sa.Column("accepted_tos", sa.Integer(), nullable=False, server_default="0"))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("users", "accepted_tos")
# ### end Alembic commands ###
| Almenon/couchers | app/backend/src/couchers/migrations/versions/51398a87b2ef_add_agreed_to_tos_int_field.py | 51398a87b2ef_add_agreed_to_tos_int_field.py | py | 696 | python | en | code | null | github-code | 6 | [
{
"api_name": "alembic.op.add_column",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer... |
16760366561 | from django.shortcuts import render,redirect,get_object_or_404
# CSRF
from django.views.decorators.csrf import csrf_exempt
from django.utils.http import urlsafe_base64_encode,urlsafe_base64_decode
from django.utils.encoding import force_bytes
from django.core.mail import EmailMessage
from django.utils.encoding import force_bytes, force_text
from django.core.mail import BadHeaderError, send_mail
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.sites.shortcuts import get_current_site
from django.template.loader import render_to_string
from django.utils.http import urlsafe_base64_encode,urlsafe_base64_decode
from django.utils.encoding import force_bytes
from django.core.mail import EmailMessage
from django.utils.encoding import force_bytes, force_text
from django.contrib import messages
import pdb
def home(request):
return render(request, 'index.html')
# def contact(request):
# return render(request, 'contact.html')
@csrf_exempt
def sendemail(request):
username = request.POST.get('username')
subject = "돌직구 사용자"+ username+ "님이 보내신 문의 메일입니다."
message = request.POST.get('message')
useremail = request.POST.get("useremail")
if subject and message and useremail:
try:
send_mail(subject, message, useremail, ["rockjiggu16@gmail.com"])
except BadHeaderError:
return HttpResponse('Invalid header found.')
return redirect('home')
else:
return HttpResponse("정확하게 입력해주세요.")
@csrf_exempt
def contact(request):
if request.method == "POST":
username = request.POST["username"]
subject = "돌직구 사용자"+ username+ "님이 보내신 문의 메일입니다."
message = request.POST["message"]
useremail = request.POST["useremail"]
emailContent = render_to_string('email.html', {
"subject": subject,
"useremail": useremail,
"message":message,
})
emailAddress = "rockjiggu16@gmail.com"
emailObject = EmailMessage(subject, emailContent, to=[emailAddress])
emailObject.content_subtype = "html"
result = emailObject.send()
if result == 1:
messages.info(request, "성공적으로 문의가 돌직구에 전달되었습니다.")
else:
messgaes.info(request, "문의에 실패하였습니다.")
return redirect('contact')
else:
return render(request, 'contact.html')
| suna-ji/RockJiggu | RockJiggu/views.py | views.py | py | 2,535 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.shortcuts.render",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "django.core.mail.send_mail",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "django.core.mail.BadHeaderError",
"line_number": 36,
"usage_type": "name"
},
{
... |
648818691 | from argparse import ArgumentParser
from inference import Infer
parser = ArgumentParser()
parser.add_argument("modelname", help="name of model to use")
parser.add_argument("imagepath", help="relative path to image")
parser.add_argument("--use_gpu", help="use gpu or not", nargs="?", default=False, const=True, type = bool)
args = parser.parse_args()
infer = Infer(args.use_gpu)
try:
infer.infer(args.imagepath, args.modelname)
except:
print("Something BAD happened!!!") | Deepesh22/Crowd-Counting | cli.py | cli.py | py | 482 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "inference.Infer",
"line_number": 12,
"usage_type": "call"
}
] |
40176582534 | #import gevent.monkey
#gevent.monkey.patch_all()
import os
import sys
import time
import pprint
import logging
import requests
import grequests
import threading
import urllib.parse
from bs4 import BeautifulSoup
import db
import parse
logger = logging.getLogger('scraper')
logger.setLevel(logging.DEBUG)
SRC_DIR = os.path.dirname(os.path.realpath(__file__))
LOG_DIR = os.path.join(SRC_DIR, "..", "log")
LOG_FILENAME = "scraper.log"
LOG_FILEPATH = os.path.join(LOG_DIR, LOG_FILENAME)
fh = logging.FileHandler(LOG_FILEPATH, mode='w')
fh.setLevel(logging.ERROR)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(funcName)s - %(lineno)d - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
# Set low so server isn't bombarded and begins to refuse.
MAX_SESSIONS = 1
domain = 'https://www.beeradvocate.com/'
places_rel_url = 'place/list/'
places_url = urllib.parse.urljoin(domain, places_rel_url)
places_params = {'start': 0, 'brewery': 'Y', 'sort': 'name'}
progress = {'breweries': 0, 'beers': 0, 'errors': 0}
def exception_handler(r, e):
progress['errors'] += 1
logger.error("REQUEST URL: {} EXCEPTION: {}".format(r.url, e))
logger.error("{} ERRORS HAVE OCCURRED".format(progress['errors']))
def get_last_page_start():
response = requests.get(url=places_url, params=places_params)
soup = BeautifulSoup(response.text, features='lxml')
last_page_tag = soup.find('a', text="last")
last_link = last_page_tag['href']
parsed = urllib.parse.urlparse(last_link)
last_start_str = urllib.parse.parse_qs(parsed.query)['start'][0]
last_start = int(last_start_str)
logging.debug("get_last_page_start: last_start: {}".format(last_start))
return last_start
def get_breweries():
STEP = 20
last_page_start = get_last_page_start()
reqs = []
for start in range(0, last_page_start, STEP):
params = places_params.copy()
params['start'] = start
reqs.append(grequests.get(places_url, params=params, callback=get_breweries_handler))
logger.info("STARTING THREADS to fetch brewery details.")
res = grequests.map(reqs, size=MAX_SESSIONS, exception_handler=exception_handler)
def get_breweries_handler(response, *args, **kwargs):
soup = BeautifulSoup(response.text, features='lxml')
this_page_breweries = parse.places.breweries(soup)
logger.info("this_page_breweries: {}".format(pprint.pformat(this_page_breweries)))
logger.info("response time (s): {}".format(response.elapsed))
db.breweries.extendleft(this_page_breweries)
progress['breweries'] += len(this_page_breweries)
logger.info("FETCHED: {} breweries.".format(progress['breweries']))
def get_brewery_details(paths):
params = {'view': 'beers', 'show': 'all'}
reqs = []
for p in paths:
url = urllib.parse.urljoin(domain, p)
reqs.append(grequests.get(url, params=params,
callback=get_brewery_details_handler))
logger.info("STARTING THREADS to fetch brewery details.")
res = grequests.map(reqs, size=MAX_SESSIONS, exception_handler=exception_handler)
def get_brewery_details_handler(response, *args, **kwargs):
logger.info("RESPONSE received from {}".format(response.url))
soup = BeautifulSoup(response.text, features='lxml')
#############################################################################
# This is possibly redundant as all this information can be gathered in the
# previous operation when the links are fetched from places list.
brewery = {}
brewery['id'] = parse.url.brewery_id(response.url)
brewery['name'] = parse.brewery.name(soup)
db.breweries.appendleft(brewery)
logger.info("ADDED brewery {} to write queue.".format(pprint.pformat(brewery)))
#############################################################################
this_brewery_beers = parse.brewery.beers(soup)
db.beers.extendleft(this_brewery_beers)
logger.info("ADDED {} beers to write queue.".format(len(this_brewery_beers)))
progress['breweries'] += 1
progress['beers'] += len(this_brewery_beers)
logger.info("FETCHED: {} breweries and {} beers.".format(progress['breweries'], progress['beers']))
time.sleep(1)
def get_beer_details(paths):
# This function is redundant when first populating tha database as all info
# can be extracted from the brewery profile page (except ranking which can be
# calculated from scores stored in the database. It is useful to update the
# info for beers already in the database but even when updating the previous
# operation of fetching the brewery has most likely been performed anyway.
reqs = []
for p in paths:
url = urllib.parse.urljoin(domain, p)
reqs.append(grequests.get(url, allow_redirects=True, callback=get_beer_details_handler))
logger.info("STARTING THREADS to fetch beer details.")
res = grequests.map(reqs, size=MAX_SESSIONS, exception_handler=exception_handler)
def get_beer_details_handler(response, *args, **kwargs):
print(response.status_code)
print(response.url)
soup = BeautifulSoup(response.text, features='lxml')
print(soup)
beer = {}
beer['id'] = parse.url.beer_id(response.url)
beer['brewery_id'] = parse.url.brewery_id(response.url)
beer['name'] = parse.beer.name(soup)
logger.info("name: {}".format(beer['name']))
beer['score'] = parse.beer.score(soup)
logger.info("score: {}".format(beer['score']))
beer['ratings'] = parse.beer.ratings(soup)
logger.info("ratings: {}".format(beer['ratings']))
beer['ranking'] = parse.beer.ranking(soup)
logger.info("ranking: {}".format(beer['ranking']))
beer['style'] = parse.beer.style(soup)
logger.info("style: {}".format(beer['style']))
beer['abv'] = parse.beer.abv(soup)
logger.info("abv: {}".format(beer['abv']))
db.beers.appendleft(beer)
logger.info("ADDED beer with ID = {} to write queue.".format(beer['id']))
def breweries():
consumer_thread = threading.Thread(target=db.consumer)
consumer_thread.start()
get_breweries()
db.fetching_breweries = False
consumer_thread.join()
def brewery_details():
to_fetch = db.read_brewery_ids()
logger.info("{} breweries to fetch".format(len(to_fetch)))
paths = ["/beer/profile/{}/".format(b) for b in to_fetch]
consumer_thread = threading.Thread(target=db.consumer)
consumer_thread.start()
get_brewery_details(paths)
db.fetching_breweries = False
consumer_thread.join()
def beer_details():
to_fetch = db.read_beer_ids()
logger.info("{} beers to fetch".format(len(to_fetch)))
paths = ["/beer/profile/{}/{}".format(b[0], b[1]) for b in to_fetch]
consumer_thread = threading.Thread(target=db.consumer)
consumer_thread.start()
get_beer_details(paths[0:1])
db.fetching_breweries = False
consumer_thread.join()
def print_usage():
print("USAGE: python3 scraper.py {breweries|brewery_details|beer_details}")
if __name__ == "__main__":
if len(sys.argv) < 2:
print_usage()
elif len(sys.argv) == 2 and sys.argv[1] == "brewery_details":
brewery_details()
elif len(sys.argv) == 2 and sys.argv[1] == "breweries":
breweries()
elif len(sys.argv) == 2 and sys.argv[1] == "beer_details":
beer_details()
else:
print_usage()
| JohnMcAninley/beer-goggles | scraper/src/scraper.py | scraper.py | py | 7,113 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path",
"l... |
30544609936 | # stemming
# e.g. stemming will convert ["python","pythoner","pythoning","pythoned","pythonly"] to python
# e.g. stemming will convert ["interesting","interested"] to interest
# stemming may create some words that do not exits
from nltk.stem import PorterStemmer
from nltk.tokenize import word_tokenize
ps = PorterStemmer()
example_words = ["python","pythoner","pythoning","pythoned","pythonly"]
for w in example_words:
print(ps.stem(w))
new_text = ("It is very import to be pythonly while you are pythoning with python. "
"All pythoners have have pythoned poorly at least once.")
words = word_tokenize(new_text)
for w in words:
print(ps.stem(w)) | limingwu8/ML | NLP/demo03.py | demo03.py | py | 669 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "nltk.stem.PorterStemmer",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "nltk.tokenize.word_tokenize",
"line_number": 16,
"usage_type": "call"
}
] |
2026784639 | import msvcrt
import zipfile
import threading
from selenium import webdriver
from selenium.webdriver.edge.options import Options
from bs4 import BeautifulSoup
from concurrent.futures import ThreadPoolExecutor,wait, FIRST_COMPLETED, ALL_COMPLETED
resultList = []
unfoundList = []
alltask = []
def main():
file = 'C:\\Users\\niu\Desktop\\上传\CartoonCustomSticker.aab'
dependenciesDict = {}
threadList = []
if zipfile.is_zipfile(file):
print()
z = zipfile.ZipFile(file, 'r')
namelist = z.namelist()
pool = ThreadPoolExecutor(len(namelist))
for zFile in namelist:
if zFile.endswith('.version'):
# print('"' + zFile.split('/').pop()[:-8] + '":"' + str(z.read(zFile), encoding='utf-8').strip() + '",')
# print(zFile.split('/').pop()[:-8])
pair = zFile.split('/').pop()[:-8].split('_')
# pool.submit(getLatestVersion,pair[0],pair[1])
# alltask.append(task)
t = threading.Thread(target=getLatestVersion,args=(pair[0],pair[1]))
t.start()
# threadList.append(t)
# getLatestVersion(pair[0],pair[1])
# dependenciesDict[pair[0]] = pair[1]
print('查询中...')
# t.start()
# t.join()
# wait(alltask,return_when=ALL_COMPLETED)
# for group, artifact in dependenciesDict.items():
# thread = threading.Thread(target=getLatestVersion, args=(group, artifact))
# getLatestVersion(group, artifact)
# threadList.append(thread)
# for thread in threadList:
# thread.start()
# for thread in threadList:
# thread.join()
print('结果如下:')
for item in resultList:
print(item)
print('\n未查询到的依赖如下:')
print(unfoundList)
msvcrt.getch()
def getLatestVersion(group_id, artifact_id):
global resultList
global unfoundList
url = f'https://mvnrepository.com/artifact/{group_id}/{artifact_id}'
# url = 'https://mvnrepository.com/artifact/androidx.appcompat/appcompat' # 这里是Spring Boot库的URL
options = Options()
options.use_chromium = True
options.add_argument("headless")
options.add_argument("disable-gpu")
options.add_argument(
"user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3")
driver = webdriver.Edge(options=options) # 这里使用Edge浏览器
driver.get(url)
html = driver.page_source
# print(f'groupId:{group_id}, artifact:{artifact_id}')
# print(html)
soup = BeautifulSoup(html, 'html.parser')
version_element = soup.find('a', {'class': 'vbtn release'})
if not version_element is None:
latest_version = version_element.text.strip()
print(f'"{group_id}:{artifact_id}":"{latest_version}",')
resultList.append(f'"{group_id}:{artifact_id}":"{latest_version}",')
else:
print(f'{group_id}_{artifact_id}')
unfoundList.append(f'{group_id}:{artifact_id}')
driver.quit()
main()
| Nienter/mypy | personal/getNewestVersion.py | getNewestVersion.py | py | 3,123 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "zipfile.is_zipfile",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "zipfile.ZipFile",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "concurrent.futures.ThreadPoolExecutor",
"line_number": 25,
"usage_type": "call"
},
{
"api_name... |
71885865788 | #! /usr/bin/env python
import sys
from collections import defaultdict
from intcode import IntCode
lines = []
for line in sys.stdin:
lines.append(line.rstrip('\n'))
class Robot():
program=None
direction=(0,1)
position=(0,0)
panels=None
def __init__(self, line) -> None:
self.program = IntCode(line.split(','), inputs=[])
self.panels = defaultdict(int)
def move(self, val):
dx,dy=self.direction
if val==1:
self.direction=dy,-dx
else: # val==0:
self.direction=-dy,dx
self.position = tuple(self.position[i]+self.direction[i] for i in (0,1,))
def run(self):
while not self.program.finished:
self.program.inputs.append(self.panels[self.position])
self.program.run()
if not self.program.finished:
color, new_dir = self.program.outputs
self.program.outputs.clear()
self.panels[self.position] = color
self.move(new_dir)
# Part 1
print("-- Part 1 --")
robot = Robot(lines[0])
robot.run()
dim = len(robot.panels.keys())
print(dim)
# Part 2
print("-- Part 2 --")
robot = Robot(lines[0])
robot.panels[0,0] = 1 # we start on a white panel instead
robot.run()
min_x, min_y, max_x, max_y = dim,dim,-dim,-dim
for (x,y) in robot.panels.keys():
min_x = min(x,min_x)
max_x = max(x,max_x)
min_y = min(y,min_y)
max_y = max(y,max_y)
for y in reversed(range(min_y, max_y+1)):
print(''.join( '@' if robot.panels[x,y]==1 else ' ' for x in range(min_x, max_x+1))) | albatros69/aoc-2019 | day-11/paint.py | paint.py | py | 1,580 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sys.stdin",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "intcode.IntCode",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 19,
"usage_type": "call"
}
] |
16731268894 | from abc import ABC, ABCMeta, abstractmethod
from datetime import datetime
from pprint import pprint
from typing import Dict
try:
from dialogflow_v2 import SessionsClient
from dialogflow_v2.proto.session_pb2 import (
DetectIntentResponse,
QueryInput,
QueryResult,
TextInput,
)
except:
SessionsClient = None
DetectIntentResponse = None
QueryInput = None
QueryResult = None
TextInput = None
from haps import SINGLETON_SCOPE, base, egg, scope
from haps.config import Config
from botkit.builtin_services.nlu.dialogflowconfig import DialogflowConfig
from botkit.builtin_services.nlu.messageunderstanding import MessageUnderstanding
@scope(SINGLETON_SCOPE)
@base
class INLUService(ABC):
@abstractmethod
def detect_intents(self, chat_id: int, message: str, language_code: str = None):
pass
@egg
class DialogflowService(INLUService):
config: DialogflowConfig = Config(DialogflowConfig.KEY)
def __init__(self):
self.session_client = SessionsClient.from_service_account_file(
self.config.json_credentials_file
)
def detect_intents(
self, chat_id: int, message: str, language_code: str = "en"
) -> MessageUnderstanding:
session = self.session_client.session_file(self.config.project_id, chat_id)
text_input = TextInput(text=message, language_code=language_code)
query_input = QueryInput(text=text_input)
response: DetectIntentResponse = self.session_client.detect_intent(
session=session, query_input=query_input
)
result: QueryResult = response.query_result
# Ignored result fields:
# - all_required_params_present
# - fulfillment_text
# - fulfillment_messages
# - webhook_source
# - webhook_payload
# - output_contexts
# - diagnostic_info
return MessageUnderstanding(
text=result.query_text,
language_code=result.language_code,
action=result.action,
intent=result.intent.display_name,
parameters=self._normalize_parameters(result.parameters),
contexts=result.output_contexts,
confidence=result.speech_recognition_confidence or result.intent_detection_confidence,
date=datetime.now(),
)
def _normalize_parameters(self, params: Dict):
result = {}
for k, v in params.items():
if "date" in k and v:
if hasattr(v, "keys") and "date_time" in v:
accessor = v["date_time"]
else:
accessor = v
print(accessor)
time_and_date: datetime = dateutil.parser.parse(accessor)
result[k] = time_and_date
continue
result[k] = v
return result
if __name__ == "__main__":
conf = DialogflowConfig(
project_id="userbot-9994a",
json_credentials_file="C:/projects/userbot/dialogflow-credentials.json",
)
c = DialogflowService(conf)
nlu = c.detect_intents(123, "!remind @tWiTfAcE to buy milk tomorrow at 6", "en")
# print(nlu)
pprint(nlu)
| autogram/Botkit | botkit/builtin_services/nlu/nluservice.py | nluservice.py | py | 3,231 | python | en | code | 10 | github-code | 6 | [
{
"api_name": "dialogflow_v2.SessionsClient",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "dialogflow_v2.proto.session_pb2.DetectIntentResponse",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "dialogflow_v2.proto.session_pb2.QueryInput",
"line_number":... |
10120432429 | """
Library and Wrapper for DHT11 and DHT22 sensors.
Based on https://github.com/JurassicPork/DHT_PyCom/tree/pulses_get
Extensions: Renamed module filename to dht (from dth.py) and added wrapper function
For hardware connection: YELLOW/WHITE: PIN1 VCC through GPIO, PIN2: DATA through GPIO, PIN3: NC, PIN4: GDN. Use also a 4.7k PULL-UP for DATA
"""
import utime
import pycom
import sensors
from machine import Pin
import logging
class DTHResult:
'DHT sensor result returned by DHT.read() method'
ERR_NO_ERROR = 0
ERR_MISSING_DATA = 1
ERR_CRC = 2
error_code = ERR_NO_ERROR
temperature = -1
humidity = -1
def __init__(self, error_code, temperature, humidity):
self.error_code = error_code
self.temperature = temperature
self.humidity = humidity
def is_valid(self):
return self.error_code == DTHResult.ERR_NO_ERROR
class DTH:
'DHT sensor (dht11, dht21,dht22) reader class for Pycom'
#__pin = Pin('P3', mode=Pin.OPEN_DRAIN)
__dhttype = 0
def __init__(self, pin, sensor=0):
self.__pin = Pin(pin, mode=Pin.OPEN_DRAIN)
self.__dhttype = sensor
self.__pin(1)
utime.sleep(1.0)
def read(self):
# pull down to low
self.__send_and_sleep(0, 0.019)
data = pycom.pulses_get(self.__pin,100)
self.__pin.init(Pin.OPEN_DRAIN)
self.__pin(1)
#print(data)
bits = []
for a,b in data:
if a ==1 and 18 <= b <= 28:
bits.append(0)
if a ==1 and 65 <= b <= 75:
bits.append(1)
#print("longueur bits : %d " % len(bits))
if len(bits) != 40:
return DTHResult(DTHResult.ERR_MISSING_DATA, 0, 0)
#print(bits)
# we have the bits, calculate bytes
the_bytes = self.__bits_to_bytes(bits)
# calculate checksum and check
checksum = self.__calculate_checksum(the_bytes)
if the_bytes[4] != checksum:
return DTHResult(DTHResult.ERR_CRC, 0, 0)
# ok, we have valid data, return it
[int_rh, dec_rh, int_t, dec_t, csum] = the_bytes
if self.__dhttype==0: #dht11
rh = int_rh #dht11 20% ~ 90%
t = int_t #dht11 0..50°C
else: #dht21,dht22
rh = ((int_rh * 256) + dec_rh)/10
t = (((int_t & 0x7F) * 256) + dec_t)/10
if (int_t & 0x80) > 0:
t *= -1
return DTHResult(DTHResult.ERR_NO_ERROR, t, rh)
def __send_and_sleep(self, output, mysleep):
self.__pin(output)
utime.sleep(mysleep)
def __bits_to_bytes(self, bits):
the_bytes = []
byte = 0
for i in range(0, len(bits)):
byte = byte << 1
if (bits[i]):
byte = byte | 1
else:
byte = byte | 0
if ((i + 1) % 8 == 0):
the_bytes.append(byte)
byte = 0
#print(the_bytes)
return the_bytes
def __calculate_checksum(self, the_bytes):
return the_bytes[0] + the_bytes[1] + the_bytes[2] + the_bytes[3] & 255
def get_reading(data_pin, dht_model, vcc_pin=None):
""" Returns temperature & humidity reading, for given VCC and DATA pins """
sensors.set_sensor_power_on(vcc_pin)
# measurement
if dht_model == "DHT11":
th = DTH(data_pin, 0)
elif dht_model == "DHT22":
th = DTH(data_pin, 1)
else:
th = None
temp = None
hum = None
if th:
result = th.read()
if result.is_valid():
temp = result.temperature
hum = result.humidity
else:
logging.error("DHT model [" + dht_model + "]: invalid result.")
sensors.set_sensor_power_off(vcc_pin)
# return results
# print('Temperature: {:3.2f}'.format(result.temperature/1.0))
# print('Humidity: {:3.2f}'.format(result.humidity/1.0))
return (temp, hum)
| insighio/insighioNode | insighioNode/lib/sensors/dht.py | dht.py | py | 3,993 | python | en | code | 5 | github-code | 6 | [
{
"api_name": "machine.Pin",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "machine.Pin.OPEN_DRAIN",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "utime.sleep",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "pycom.pulses_get"... |
10251411217 | """
Configuration reader for the population_gravity model
@author Chris R. Vernon
@email: chris.vernon@pnnl.gov
License: BSD 2-Clause, see LICENSE and DISCLAIMER files
"""
import datetime
import os
import simplejson
import rasterio
import yaml
import pandas as pd
import population_gravity.downscale_utilities as utils
class ReadConfig:
"""Read configuration data either provided in the configuration YAML file or as passed in via arguments.
:param config_file: string. Full path to configuration YAML file with file name and
extension. If not provided by the user, the code will default to the
expectation of alternate arguments.
:param grid_coordinates_file: string. Full path with file name and extension to the CSV file
containing the coordinates for each 1 km grid cell within the target
state. File includes a header with the fields XCoord, YCoord, FID.
Where data types and field descriptions are as follows:
(XCoord, float, X coordinate in meters),
(YCoord, float, Y coordinate in meters),
(FID, int, Unique feature id)
:param historical_suitability_raster: string. Full path with file name and extension to the suitability
raster containing values from 0.0 to 1.0 for each 1 km grid cell
representing suitability depending on topographic and land use and
land cover characteristics within the target state.
:param base_rural_pop_raster: string. Full path with file name and extension to a raster containing
rural population counts for each 1 km grid cell for the historical
base time step.
:param base_urban_pop_raster: string. Full path with file name and extension to a raster containing
urban population counts for each 1 km grid cell for the historical
base time step.
:param projected_population_file: string. Full path with file name and extension to a CSV file containing
population projections per year separated into urban and rural
categories. Field descriptions for require fields as follows:
(Year, integer, four digit year),
(UrbanPop, float, population count for urban),
(RuralPop, float, population count for rural),
(Scenario, string, scenario as set in the `scenario` variable)
:param one_dimension_indices_file: string. Full path with file name and extension to the text file
containing a file structured as a Python list (e.g. [0, 1]) that
contains the index of each grid cell when flattened from a 2D array to
a 1D array for the target state.
:param output_directory: string. Full path with file name and extension to the output directory
where outputs and the log file will be written.
:param alpha_urban: float. Alpha parameter for urban. Represents the degree to which the
population size of surrounding cells translates into the suitability
of a focal cell. A positive value indicates that the larger the
population that is located within the 100 km neighborhood, the more
suitable the focal cell is. More negative value implies less suitable.
Acceptable range: -2.0 to 2.0
:param beta_urban: float. Beta parameter for urban. Reflects the significance of distance
to surrounding cells on the suitability of a focal cell. Within 100 km,
beta determines how distance modifies the effect on suitability.
Acceptable range: -0.5 to 2.0
:param alpha_rural: float. Alpha parameter for rural. Represents the degree to which the
population size of surrounding cells translates into the suitability
of a focal cell. A positive value indicates that the larger the
population that is located within the 100 km neighborhood, the more
suitable the focal cell is. More negative value implies less suitable.
Acceptable range: -2.0 to 2.0
:param beta_rural: float. Beta parameter for rural. Reflects the significance of distance
to surrounding cells on the suitability of a focal cell. Within 100 km,
beta determines how distance modifies the effect on suitability.
Acceptable range: -0.5 to 2.0
:param scenario: string. String representing the scenario with no spaces. Must match
what is in the `projected_population_file` if passing population
projections in using a file.
:param state_name: string. Target state name with no spaces separated by an underscore.
:param historic_base_year: int. Four digit historic base year.
:param projection_year: int. Four digit first year to process for the projection.
:param projection_end_year: int. Four digit last year to process for the projection.
:param time_step: int. Number of steps (e.g. number of years between projections)
:param rural_pop_proj_n: float. Rural population projection count for the projected year being
calculated. These can be read from the `projected_population_file`
instead.
:param urban_pop_proj_n: float. Urban population projection count for the projected year being
calculated. These can be read from the `projected_population_file`
instead.
:param calibration_urban_year_one_raster: string. Only used for running calibration. Full path with file name and
extension to a raster containing urban population counts for each 1 km
grid cell for year one of the calibration.
:param calibration_urban_year_two_raster: string. Only used for running calibration. Full path with file name and
extension to a raster containing urban population counts for each 1 km
grid cell for year two of the calibration.
:param calibration_rural_year_one_raster: string. Only used for running calibration. Full path with file name and
extension to a raster containing rural population counts for each 1 km
grid cell for year one of the calibration.
:param calibration_rural_year_two_raster: string. Only used for running calibration. Full path with file name and
extension to a raster containing rural population counts for each 1 km
grid cell for year two of the calibration.
:param kernel_distance_meters: float. Distance kernel in meters; default 100,000 meters.
:param write_raster: boolean. Optionally export raster output; Default True
:param write_csv: boolean. Optionally export raster as a CSV file without nodata values
:param write_array2d: boolean. Optionally export a NumPy 2D array for each output in the shape
of the template raster
:param write_array1d: boolean. Optionally export a Numpy 1D flattened array of only grid cells
within the target state
:param run_number: int. Add on for the file name when running sensitivity analysis
:param write_logfile: boolean. Optionally write log to file.; Default True
:param compress_csv: boolean. Optionally compress CSV file to GZIP if outputting in CSV
:param output_total: boolean. Choice to output total (urban + rural) dataset; Defualt True
"""
# format for datetime string
DATETIME_FORMAT = '%Y-%m-%d_%Hh%Mm%Ss'
# key names from YAML config file
OUT_DIR_KEY = 'output_directory'
START_STEP_KEY = 'start_step'
THROUGH_STEP_KEY = 'through_step'
ALPHA_KEY = 'alpha_param'
BETA_KEY = 'beta_param'
# definition of acceptable range of values for parameters
MAX_PARAM_VALUE = 10.0
MIN_PARAM_VALUE = -10.0
def __init__(self, config_file=None, grid_coordinates_file=None, historical_suitability_raster=None,
base_rural_pop_raster=None, base_urban_pop_raster=None, projected_population_file=None,
one_dimension_indices_file=None, output_directory=None, alpha_urban=None, beta_urban=None,
alpha_rural=None, beta_rural=None, scenario=None, state_name=None, historic_base_year=None,
projection_year=None, rural_pop_proj_n=None,
urban_pop_proj_n=None, calibration_urban_year_one_raster=None, calibration_urban_year_two_raster=None,
calibration_rural_year_one_raster=None, calibration_rural_year_two_raster=None,
kernel_distance_meters=None, write_raster=True, write_csv=False, write_array1d=False,
write_array2d=False, run_number='', write_logfile=True, compress_csv=True, output_total=True,
write_suitability=False, pass_one_alpha_upper=1.0, pass_one_alpha_lower=-1.0,
pass_one_beta_upper=1.0, pass_one_beta_lower=0.0, pass_two_alpha_upper=2.0, pass_two_alpha_lower=-2.0,
pass_two_beta_upper=2.0, pass_two_beta_lower=-0.5, brute_n_alphas=10, brute_n_betas=5):
self._config_file = config_file
self._alpha_urban = alpha_urban
self._alpha_rural = alpha_rural
self._beta_urban = beta_urban
self._beta_rural = beta_rural
self._kernel_distance_meters = kernel_distance_meters
self._output_directory = output_directory
self.grid_coordinates_file = self.validate_file(grid_coordinates_file)
self.grid_coordinates_array = self.get_grid_coordinates_array()
# Full path with file name and extension to the suitability raster containing values from 0.0 to 1.0
# for each 1 km grid cell representing suitability depending on topographic and land use and land cover
# characteristics within the target state.
self.historical_suitability_raster = self.validate_file(historical_suitability_raster)
self.base_rural_pop_raster = self.validate_file(base_rural_pop_raster)
self.base_urban_pop_raster = self.validate_file(base_urban_pop_raster)
self._projected_population_file = projected_population_file
self._one_dimension_indices_file = one_dimension_indices_file
# Target scenario name
self.scenario = scenario.lower()
# Target state name
self.state_name = state_name.lower()
# Four digit historic base year
self.historic_base_year = self.validate_step(historic_base_year, 'historic_base_year')
# Four digit first year to process for the projection
self.projection_year = self.validate_step(projection_year, 'projection_year')
self._rural_pop_proj_n = rural_pop_proj_n
self._urban_pop_proj_n = urban_pop_proj_n
# Optionally save outputs to a raster
self.write_raster = write_raster
# Optionally export raster as a CSV file without nodata values; option set to compress CSV using gzip.
# Exports values for non-NODATA grid cells as field name `value`
self.write_csv = write_csv
# Optionally save outputs to a 1D array for cells within the target state
self.write_array1d = write_array1d
# Optionally save outputs to a 1D array for cells within the target state
self.write_array2d = write_array2d
# An integer add on for the file name when running sensitivity analysis
self.run_number = run_number
# Optionally write log outputs to a file
self.write_logfile = write_logfile
# Compress CSV to GZIP option
self.compress_csv = compress_csv
# Choice to output total dataset (urban + rural)
self.output_total = output_total
self.write_suitability = write_suitability
# specific to calibration run
self.calibration_urban_year_one_raster = calibration_urban_year_one_raster
self.calibration_urban_year_two_raster = calibration_urban_year_two_raster
self.calibration_rural_year_one_raster = calibration_rural_year_one_raster
self.calibration_rural_year_two_raster = calibration_rural_year_two_raster
self.pass_one_alpha_upper = pass_one_alpha_upper
self.pass_one_alpha_lower = pass_one_alpha_lower
self.pass_one_beta_upper = pass_one_beta_upper
self.pass_one_beta_lower = pass_one_beta_lower
self.pass_two_alpha_upper = pass_two_alpha_upper
self.pass_two_alpha_lower = pass_two_alpha_lower
self.pass_two_beta_upper = pass_two_beta_upper
self.pass_two_beta_lower = pass_two_beta_lower
self.brute_n_alphas = brute_n_alphas
self.brute_n_betas = brute_n_betas
# get a copy of the raster metadata from a states input raster
self.template_raster_object, self.metadata = utils.get_raster_with_metadata(self.historical_suitability_raster)
# import population projection file if exists
self.df_projected = self.process_df_projected()
# Get a bounding box from the historical raster
self.bbox = utils.create_bbox(self.template_raster_object)
# Get a current time in a string matching the specified datetime format
self.date_time_string = datetime.datetime.now().strftime(self.DATETIME_FORMAT)
# Convenience wrapper for the DATETIME_FORMAT class attribute
self.datetime_format = self.DATETIME_FORMAT
# Validate output directory
self.output_directory = self.set_output_directory()
# Full path with file name and extension to the logfile
self.logfile = os.path.join(self.output_directory, f'logfile_{self.scenario}_{self.state_name}_{self.date_time_string}.log')
@property
def alpha_urban(self):
"""Alpha urban parameter for model."""
return self.validate_parameter(self._alpha_urban, 'alpha_urban')
@alpha_urban.setter
def alpha_urban(self, value):
"""Setter for alpha urban parameter."""
self._alpha_urban = self.validate_parameter(value, 'alpha_urban')
@property
def alpha_rural(self):
"""Alpha rural parameter for model."""
return self.validate_parameter(self._alpha_rural, 'alpha_rural')
@alpha_rural.setter
def alpha_rural(self, value):
"""Setter for alpha rural parameter."""
self._alpha_rural = self.validate_parameter(value, 'alpha_rural')
@property
def beta_urban(self):
"""Beta urban parameter for model."""
return self.validate_parameter(self._beta_urban, 'beta_urban')
@beta_urban.setter
def beta_urban(self, value):
"""Setter for beta urban parameter."""
self._beta_urban = self.validate_parameter(value, 'beta_urban')
@property
def beta_rural(self):
"""Beta rural parameter for model."""
return self.validate_parameter(self._beta_rural, 'beta_rural')
@beta_rural.setter
def beta_rural(self, value):
"""Setter for beta rural parameter."""
self._beta_rural = self.validate_parameter(value, 'beta_rural')
@property
def kernel_distance_meters(self):
"""Distance kernel in meters; default 100,000 meters."""
return self.validate_float(self._kernel_distance_meters)
@kernel_distance_meters.setter
def kernel_distance_meters(self, value):
"""Setter for kernel_distance_meters."""
self._kernel_distance_meters = value
def process_df_projected(self):
"""From population projection file if exists."""
if (self.urban_pop_proj_n is None) and (self.rural_pop_proj_n is None):
df = pd.read_csv(self.projected_population_file)
# make header lower case
df.columns = [i.lower() for i in df.columns]
# make scenario column lower case
df['scenario'] = df['scenario'].str.lower()
return df
else:
return None
def set_output_directory(self):
"""Validate output directory."""
if self.config is None:
return self.validate_directory(self._output_directory)
else:
key = self.validate_key(self.config, self.OUT_DIR_KEY)
return self.validate_directory(key)
@property
def historical_suitability_2darray(self):
"""Read in historical suitability mask as an array"""
return utils.raster_to_array(self.historical_suitability_raster)
@property
def historical_suitability_array(self):
"""Flatten historical suitability array."""
return self.historical_suitability_2darray.flatten()
@property
def df_indicies(self):
"""Build data frame in the shape of the raster array."""
return utils.all_index_retriever(self.historical_suitability_2darray, ["row", "column"])
@property
def one_dimension_indices_file(self):
"""File that describe grid indices of points that fall within the state boundary."""
return self.validate_file(self._one_dimension_indices_file)
@property
def one_dimension_indices(self):
"""Grid indices for the state to an array."""
with open(self.one_dimension_indices_file, 'r') as r:
return simplejson.load(r)
def get_grid_coordinates_array(self):
"""Grid coordinates to array."""
# return np.genfromtxt(self.grid_coordinates_file, delimiter=',', skip_header=1, usecols=(0, 1, 2), dtype=float)
df = pd.read_csv(self.grid_coordinates_file)
df.sort_values('FID', inplace=True)
df.set_index('FID', drop=False, inplace=True)
df.index.name = None
df = df[['XCoord', 'YCoord', 'FID']].copy()
return df.values
@property
def urban_pop_proj_n(self):
"""Urban population projection count for the projected year being calculated. These can be read from
the `projected_population_file` instead.
"""
return self.validate_float(self._urban_pop_proj_n)
@property
def rural_pop_proj_n(self):
"""Rural population projection count for the projected year being calculated. These can be read from
the `projected_population_file` instead.
"""
return self.validate_float(self._rural_pop_proj_n)
@property
def projected_population_file(self):
"""Full path with file name and extension to a CSV file containing population projections per year
separated into urban and rural categories.
"""
return self.validate_file(self._projected_population_file)
@property
def config(self):
"""Read the YAML config file object"""
if self._config_file is None:
return None
else:
with open(self._config_file, 'r') as yml:
return yaml.load(yml)
@property
def template_raster(self):
"""Generate template raster specifications.
:return: [0] 2D array of template raster values
[1] 1D flattened array
[2] row count
[3] column count
[4] profile
"""
with rasterio.open(self.historical_suitability_raster) as src_raster:
profile = src_raster.profile
array2d = src_raster.read(1)
row_count = array2d.shape[0]
col_count = array2d.shape[1]
array1d = array2d.flatten()
return array2d, array1d, row_count, col_count, profile
def validate_parameter(self, param, key):
"""Validate parameter existence and range.
:param param: Parameter value
:type param: float
:param key: Configuration key from YAML file
:type key: str
:return: int; parameter
"""
if self.config is None:
is_float = self.validate_float(param)
return self.validate_range(is_float)
else:
is_key = self.validate_key(self.config, key)
is_float = self.validate_float(is_key)
return self.validate_range(is_float)
def validate_range(self, value):
"""Ensure value falls within an acceptable range."""
if (value >= self.MIN_PARAM_VALUE) and (value <= self.MAX_PARAM_VALUE):
return value
else:
raise ValueError(f"Parameter value '{value}' is not within the valid range of {self.MIN_PARAM_VALUE} - {self.MAX_PARAM_VALUE}.")
@staticmethod
def validate_float(val):
"""Ensure parameter value is type float"""
if val is not None:
try:
return float(val)
except TypeError:
raise TypeError(f"Parameter value '{val}' is not a float.")
else:
return None
@staticmethod
def validate_directory(directory):
"""Validate directory to ensure it exists.
:param directory: Full path to the target directory.
:type directory: str
:return: Full path of a valid directory
"""
if (directory is not None) and (os.path.isdir(directory)):
return directory
elif (directory is not None) and (os.path.isdir(directory is False)):
raise NotADirectoryError(f"Directory: {directory} does not exist.")
else:
return None
@staticmethod
def validate_file(file):
"""Validate file to ensure it exists.
:param file: Full path to the target file.
:type file: str
:return: Full path of a valid file
"""
if (file is not None) and (os.path.isfile(file)):
return file
elif (file is not None) and (os.path.isfile(file) is False):
raise FileNotFoundError(f"File: {file} does not exist.")
else:
return None
def validate_step(self, step, key):
"""Validate step existence and value.
:param step: Time step value
:type step: int
:param key: Configuration key from YAML file
:type key: str
:return: int; time step
"""
if self.config is None:
return self.validate_int(step)
else:
is_key = self.validate_key(self.config, key)
return self.validate_int(is_key)
@staticmethod
def validate_int(n):
"""Ensure time step is type int"""
if n is not None:
try:
return int(n)
except TypeError:
raise TypeError(f"Value '{n}' is not an integer.")
else:
return None
@staticmethod
def validate_key(yaml_object, key):
"""Check to see if key is in YAML file, if not return None.
:param yaml_object: YAML object for the configuration file
:param key: Target key name from the configuration file.
:type key: str
:return: Value from configuration file matching the key. If no key present,
return None.
"""
try:
return yaml_object[key]
except KeyError:
return None
@staticmethod
def get_yaml(config_file):
"""Read the YAML config file
:param config_file: Full path with file name and extension to the input config.yml file
:return: YAML config object
"""
with open(config_file, 'r') as yml:
return yaml.load(yml)
| IMMM-SFA/population_gravity | population_gravity/read_config.py | read_config.py | py | 26,485 | python | en | code | 4 | github-code | 6 | [
{
"api_name": "population_gravity.downscale_utilities.get_raster_with_metadata",
"line_number": 260,
"usage_type": "call"
},
{
"api_name": "population_gravity.downscale_utilities",
"line_number": 260,
"usage_type": "name"
},
{
"api_name": "population_gravity.downscale_utilities.c... |
6131848495 | import numpy as np
from PIL import Image
radar = np.load('./origin_data/radars_2020-11-01_2022-12-31.npy')
color_image = Image.open('./cool_data/mask.png')
mask = np.array(color_image.convert('L'))
for i in range(825):
for j in range(200):
if mask[i, j] > 200:
mask[i, j] = 0
else:
mask[i, j] = 1
index = 0
ice = np.array(radar)
for index in range(radar.shape[0]):
for i in range(825):
for j in range(200):
if radar[index, i, j] > 0.53:
ice[index, i, j] = 1
else:
ice[index, i, j] = 0
ice[index] = ice[index]*mask
print(index)
np.save('./cool_data/ice_mask.npy',ice)
| Ronningen/DDIN1 | ice_mask_generation.py | ice_mask_generation.py | py | 720 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "numpy.load",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 7,
... |
43627572834 | from typing import List
class Solution:
def maxBoxesInWarehouse(self, boxes: List[int], warehouse: List[int]) -> int:
boxes.sort()
p1, p2 = 0, len(warehouse)-1
res = 0
for i in range(len(boxes)-1, -1, -1):
if boxes[i] <= warehouse[p1]:
p1 += 1
res += 1
elif boxes[i] <= warehouse[p2]:
p2 -= 1
res += 1
if p1 > p2:
break
return res
def test(self):
test_cases = [
[[1,2,2,3,4], [3,4,1,2]],
[[3,5,5,2], [2,1,3,4,5]],
[[1,2,3], [1,2,3,4]],
[[4,5,6], [3,3,3,3,3]],
]
for boxes, warehouse in test_cases:
res = self.maxBoxesInWarehouse(boxes, warehouse)
print('res: %s' % res)
print('-='*30 + '-')
if __name__ == '__main__':
Solution().test()
| MichaelTQ/LeetcodePythonProject | solutions/leetcode_1551_1600/LeetCode1580_PutBoxesIntoTheWarehouseII.py | LeetCode1580_PutBoxesIntoTheWarehouseII.py | py | 918 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "typing.List",
"line_number": 5,
"usage_type": "name"
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.