seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9388340974 | """Functions and constants used in several modules of the gtphipsi package.
This module exports the following functions:
- get_name_from_badge (badge)
- get_all_big_bro_choices ()
- create_user_and_profile (form_data)
- log_page_view (request, name)
This module exports the following constant definitions:
- REFERRER
"""
import logging
from django.conf import settings
from django.contrib.auth.models import Group, Permission
from gtphipsi.brothers.bootstrap import INITIAL_BROTHER_LIST
from gtphipsi.brothers.models import User, UserProfile, VisibilitySettings
log = logging.getLogger('django.request')
# The literal name of the HTTP Referrer header. The typo below in 'referrer' is intentional.
REFERRER = 'HTTP_REFERER'
def get_name_from_badge(badge):
"""Return a brother's first and last name given his badge number, assuming he doesn't have an account."""
return INITIAL_BROTHER_LIST[badge][1] if 0 < badge < len(INITIAL_BROTHER_LIST) else None
def get_all_big_bro_choices():
"""Return a list of tuples (in the format (badge, name)) of all possible big brothers."""
list = INITIAL_BROTHER_LIST
for profile in UserProfile.objects.filter(badge__gte=len(INITIAL_BROTHER_LIST)).order_by('badge'):
tup = (profile.badge, profile.common_name())
if tup not in list:
list.append(tup)
return list
def create_user_and_profile(form_data):
"""Create and save a new User and UserProfile from the cleaned_data dictionary of a UserForm instance."""
status = form_data['status']
# create and save the User instance
user = User.objects.create_user(form_data['username'], form_data['email'], form_data['password'])
user.first_name = form_data['first_name']
user.last_name = form_data['last_name']
_create_user_permissions(user, status != 'A', form_data['make_admin'])
user.save()
# create and save the UserProfile instance
public, chapter = _create_visibility_settings()
profile = UserProfile.objects.create(user=user, middle_name=form_data['middle_name'], suffix=form_data['suffix'],
nickname=form_data['nickname'], badge=form_data['badge'], status=status,
big_brother=int(form_data['big_brother']), major=form_data['major'],
hometown=form_data['hometown'], current_city=form_data['current_city'],
phone=form_data['phone'], initiation=form_data['initiation'],
graduation=form_data['graduation'], dob=form_data['dob'],
public_visibility=public, chapter_visibility=chapter)
profile.save()
def log_page_view(request, name):
"""Log a view to the specified page (view), including information about the client viewing the page."""
method = request.method
path = request.path
if method == 'POST':
post = ', POST Data: { '
for key, value in request.POST.iteritems():
if key not in ['csrfmiddlewaretoken', 'password', 'confirm', 'old_pass', 'secret_key', 'admin_password']:
post += '%s: \'%s\', ' % (key, unicode(value))
post += '}'
else:
post = ''
if request.user.is_authenticated():
profile = request.user.get_profile()
client_string = ' User: %s (%s ... %d),' % (request.user.username, profile.common_name(), profile.badge)
else:
client_string = ''
if 'HTTP_USER_AGENT' in request.META:
user_agent = request.META['HTTP_USER_AGENT']
else:
user_agent = '<not supplied>'
log.debug('[%s]%s Request: %s %s%s, User Agent: %s' % (name, client_string, method, path, post, user_agent))
## ============================================= ##
## ##
## Private Functions ##
## ##
## ============================================= ##
def _create_user_permissions(user, undergrad, admin):
"""Add a new user to the appropriate permissions group(s)."""
if undergrad:
group, created = Group.objects.get_or_create(name='Undergraduates')
if created:
group.permissions = Permission.objects.filter(codename__in=settings.UNDERGRADUATE_PERMISSIONS)
group.save()
user.groups.add(group)
else:
group, created = Group.objects.get_or_create(name='Alumni')
if created:
group.permissions = Permission.objects.filter(codename__in=settings.ALUMNI_PERMISSIONS)
group.save()
user.groups.add(group)
if admin:
group, created = Group.objects.get_or_create(name='Administrators')
if created:
group.permissions = Permission.objects.filter(codename__in=settings.ADMINISTRATOR_PERMISSIONS)
group.save()
user.groups.add(group)
def _create_visibility_settings():
"""Create default public and chapter visibility settings for a new user profile."""
public_visibility = VisibilitySettings.objects.create(full_name=False, big_brother=False, major=False,
hometown=False, current_city=False, initiation=False,
graduation=False, dob=False, phone=False, email=False)
public_visibility.save()
chapter_visibility = VisibilitySettings.objects.create(full_name=True, big_brother=True, major=True, hometown=True,
current_city=True, initiation=True, graduation=True, dob=True,
phone=True, email=True)
chapter_visibility.save()
return public_visibility, chapter_visibility
| will2dye4/gtphipsi | common.py | common.py | py | 5,867 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "gtphipsi.brothers.bootstrap.INITIAL_BROTHER_LIST",
"line_number": 31,
"usage_type": "argument"
},
{
"api_name": "gtphipsi.brothers.bootstrap.INITIAL_BROTHER_LIST",
"line_number": 36,
... |
27391300473 | # flake8: NOQA;
import os
import sys
from collections.abc import Generator
import pytest
from fastapi import FastAPI
from fastapi.testclient import TestClient
current: str = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(os.path.join(current, "src"))
from database import Database
from main import create_app
@pytest.fixture(scope="session")
def database() -> Generator:
database = Database(database_url=os.getenv("TEST_DATABASE_URL"))
yield database
@pytest.fixture
def app(database) -> Generator:
app: FastAPI = create_app()
app.container.db.override(database)
database.create_database()
yield app
database.drop_database()
@pytest.fixture
def client(app) -> Generator:
with TestClient(app) as client:
yield client
@pytest.fixture
def db_session(database):
return database.session
| ebysofyan/dcentric-health-hometest | chatroom-backend/tests/conftest.py | conftest.py | py | 868 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.path.dirname",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "os.path.realpath",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "sys.path.append",
"... |
73111312187 | from langchain.document_loaders import TextLoader
from langchain.text_splitter import CharacterTextSplitter, NLTKTextSplitter
import glob
import os
from transformers import AutoModel, AutoTokenizer
from dotenv import load_dotenv
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.vectorstores import Chroma
load_dotenv()
llm_model_name_or_path = os.environ.get("LLM_MODEL_NAME_OR_PATH")
embedding_model_name_or_path = os.environ.get("EMBEDDING_MODEL_NAME_OR_PATH")
vectorstore_persist_directory = os.environ.get("VECTORSTORE_PERSIST_DIRECTORY")
# tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
embedding = HuggingFaceEmbeddings(model_name=embedding_model_name_or_path)
text_splitter = CharacterTextSplitter()
file_paths = glob.glob("./source_documents/**/*.txt", recursive=True)
documents = []
for file_path in file_paths:
print(f"{file_path}: Loading")
loader = TextLoader(file_path, autodetect_encoding=True)
docs = loader.load()
print(f"{file_path}: Splitting")
# text_splitter = CharacterTextSplitter.from_huggingface_tokenizer(tokenizer=tokenizer)
# text_splitter = NLTKTextSplitter()
docs = text_splitter.split_documents(docs)
documents.extend(docs)
# page_contents = []
# page_metadatas = []
# for document in texts:
# page_contents.append(document.page_content)
# page_metadatas.append(document.metadata)
# vectors = embedding.embed_documents(texts=page_contents)
print(f"(ALL): Embedding and saving")
db = Chroma(persist_directory=vectorstore_persist_directory, embedding_function=embedding)
db.add_documents(documents=documents)
db.persist()
print(f"(ALL): Done") | shaunxu/try-langchain | injest.py | injest.py | py | 1,703 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "dotenv.load_dotenv",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
... |
73798071227 | from django.core.paginator import EmptyPage, PageNotAnInteger, Paginator
from django.http import HttpResponse, HttpResponseRedirect, QueryDict
from django.core.serializers.json import DjangoJSONEncoder
from django.contrib.auth import authenticate, login, logout
from django.views.generic import View, TemplateView
from django.contrib.sessions.models import Session
from django.contrib.auth.models import User
from django.template.loader import render_to_string
from django.core.mail import send_mail
from maracay.backEnd import backStart, profileBackend, filterProducts, adminSite
from django.shortcuts import render
from django.core.cache import cache
from django.conf import settings
from threading import Thread
from maracay.models import Tools, Profile as ProfileDB, PurchaseConfirmation, TokenPassword, PagosImagenes, purchaseHistory, Product, DolarBolivar
from maracay import get_client_ip, config, formatoBolivares
import json,random, string
from django.contrib import admin
import os
from maracay.sendinblue import sendinblue_send
from django.core.files.storage import FileSystemStorage
import base64
from datetime import datetime
import os,stat
from django.core.files.base import ContentFile
import xlrd
from maracay.task import help_form,forgot_pass
# Create your views here.
class GoogleVerificacion(TemplateView):
def get(self, request, *args, **kwargs):
return render(request, 'market/googlebebc5688f09bbff0.html',{})
#Main Class
class Maracay(TemplateView):
template_name = 'market/index.html'
#index
def get(self, request, *args, **kwargs):
_allproducts = backStart(request)
_allproducts.get()
if 'pagination' not in request.GET:
data = _allproducts.response_data
data['code'] = _allproducts.code
contact_list = data['cantTotal']
paginator = Paginator(contact_list, 12) # Show 25 contacts per page
page = request.GET.get('page')
contacts = paginator.get_page(page)
formatoBolivares(contacts)
direction = '/static/images/upload/imagesp/'
return render(request, 'market/index.html',{'direction':direction,'contacts':contacts,'data':json.dumps(data['data'])})
'''else:
print ("22222")
data = _allproducts.response_data
data['code'] = _allproducts.code
contact_list = data['cantTotal']
paginator = Paginator(contact_list, 10) # Show 25 contacts per page
page = request.GET.get('page')
contacts = paginator.get_page(page)
dataAll = {'contacts':contacts}
return HttpResponse(json.dumps(dataAll, cls=DjangoJSONEncoder), content_type='application/json')'''
#post
def post(self, request, *args, **kwargs):
pass
class Account(View):
def get(self, request, *args, **kwargs):
if str(request.user) != 'AnonymousUser':#si esta logeado su data
_accountData = profileBackend(request)
_accountData.accountData()
data = _accountData.response_data
return render(request, 'market/account.html', {'data':data['data']})
else: # registro
return render(request, 'market/register.html', {})
class Login(View):
def __init__(self):
self.requireds = ['email', 'password', 'csrfmiddlewaretoken']
def post(self, request, *args, **kwargs):
# __ip = get_client_ip(request)
for key in self.requireds:
if not key in request.POST.keys():
return HttpResponse(status=400, content_type='application/json')
for session in Session.objects.filter(session_key=request.session.session_key):
if session:
#No se puede iniciar Sesion usuario ya tiene una sesion activa
return HttpResponse(json.dumps({'code':400,'message':'Ya tiene una sesiòn activa'}, cls=DjangoJSONEncoder), content_type='application/json')
# if cache.get('cache_ip__%s'%__ip):
# return HttpResponse(json.dumps({'code':400,'message':'Debe esperar 5 minutos'}, cls=DjangoJSONEncoder), content_type='application/json')
user = authenticate(username=request.POST['email'], password=request.POST['password'])
if user:
cache.clear()
login(request, user)
return HttpResponse(json.dumps({'code':200}, cls=DjangoJSONEncoder), content_type='application/json')
else:
return HttpResponse(json.dumps({'code':400,'message':'Intento fallido'}, cls=DjangoJSONEncoder), content_type='application/json')
#
# __cache_count_error = cache.get('cache_error__%s'%__ip)
# __cache_exist = cache.get('cache_ip__%s'%__ip)
# if __cache_exist:
# return HttpResponse(json.dumps({'code':400,'message':'Debe esperar 5 minutos'}, cls=DjangoJSONEncoder), content_type='application/json')
# else:
# if __cache_count_error:
# if __cache_count_error == 1:
# cache.set('cache_error__%s'%__ip,1+1,60)
# return HttpResponse(json.dumps({'code':400,'message':'Segundo intento fallido'}, cls=DjangoJSONEncoder), content_type='application/json')
# elif __cache_count_error == 2:
# cache.set('cache_ip__%s'%__ip,__ip,300)
# return HttpResponse(json.dumps({'code':400,'message':'Tercer intento fallido/Debe esperar 5 minutos'}, cls=DjangoJSONEncoder), content_type='application/json')
# else:
# cache.set('cache_error__%s'%__ip,1,60)
# return HttpResponse(json.dumps({'code':400,'message':'Primer intento fallido'}, cls=DjangoJSONEncoder), content_type='application/json')
class Logout(View):
def get(self, request, *args, **kwargs):
logout(request)
_allproducts = backStart(request)
_allproducts.get()
if 'pagination' not in request.GET:
data = _allproducts.response_data
data['code'] = _allproducts.code
contact_list = data['cantTotal']
paginator = Paginator(contact_list, 12) # Show 25 contacts per page
page = request.GET.get('page')
contacts = paginator.get_page(page)
formatoBolivares(contacts)
direction = '/static/images/upload/imagesp/'
return render(request, 'market/index.html',{'direction':direction,'contacts':contacts,'data':json.dumps(data['data'])})
class Profile(View):
def get(self, request, *args, **kwargs):
print ("Profile")
#creacion de usuarios
def post(self, request, *args, **kwargs):
_newUser = profileBackend(request)
_newUser.post()
data = _newUser.response_data
data['code'] = _newUser.code
user = authenticate(username=request.POST['email'], password=request.POST['password'])
if user:login(request, user)
return HttpResponse(json.dumps(data, cls=DjangoJSONEncoder), content_type='application/json')
def put(self, request, *args, **kwargs):
request.POST=QueryDict(request.read())
try:
data = {'code':200}
if request.POST['flagProfileonly'] == 'false':
dataUser = User.objects.get(pk=int(request.POST['user']))
dataUser.first_name=request.POST['name']
dataUser.last_name=request.POST['lastname']
dataProfile = ProfileDB.objects.get(user=dataUser.id)
dataProfile.phone=request.POST['phone']
dataProfile.rif=request.POST['rif']
dataUser.save()
dataProfile.save()
else:
dataProfile = ProfileDB.objects.get(user=User.objects.get(pk=int(request.POST['user'])))
dataProfile.direction=request.POST['direction']
dataProfile.localphone=request.POST['localphone']
dataProfile.reference=request.POST['reference']
dataProfile.save()
return HttpResponse(json.dumps(data, cls=DjangoJSONEncoder), content_type='application/json')
except Exception as e:
print ("Profile",e)
data = {'code':500}
return HttpResponse(json.dumps(data, cls=DjangoJSONEncoder), content_type='application/json')
#Seccion de Administrador
def AllProductsAdminTable(request):
#poner esto and request.user.is_superuser==True para el admin
# if str(request.user) != 'AnonymousUser' :#si esta logeado su data
_allproductstable = adminSite(request)
_allproductstable.allProductsTable()
data = _allproductstable.response_data
print("data",data)
# data = {"a":"a"}
return HttpResponse(json.dumps(data, cls=DjangoJSONEncoder), content_type='application/json')
# else:
# return render(request, 'market/adminIndex.html', {})
class ControlAdmin(View):
def get(self, request, *args, **kwargs):
try:
#poner esto and request.user.is_superuser==True para el admin
if str(request.user) != 'AnonymousUser' and request.user.is_superuser==True:#si esta logeado su data
_allproductsfilter = adminSite(request)
_allproductsfilter.dataProductUser()
lista_template = ['productos','cotizacion','precios','inventario']
data = _allproductsfilter.response_data
data['code'] = _allproductsfilter.code
contact_list = data['cantTotal']
# paginator = Paginator(contact_list, 10) # Show 25 contacts per page
# page = request.GET.get('page')
# contacts = paginator.get_page(page)
# dataAll = {'contacts':contacts}
flag = False
direction = '/static/images/upload/imagesp/'
for value in lista_template:
# print("value",value)
if value in request.GET:
flag=True
data[value]=True
if not flag:
data['cotizacion']=True
# print("Data",data)
return render(request, 'market/admintemplates/adminGestion.html', {'valores':data,'direction':direction,'data':data['data'],'flag':'all'})
#mandar los productos con nombre , y precio en dolares y dejar dos campos vacion que seran cant y total
#llenarlo en el fron dinamicamente y hacer la multiplicacion y ya y poner un filtro para mostrar solo los que
#estan llenos y buscar poner un boton para eportarlo y ya
else: # registro
return render(request, 'market/admintemplates/adminIndex.html', {})
except Exception as e:
print("ControlAdmin get",e)
def post(self, request, *args, **kwargs):
try:
archivo = request.POST.get('archivo')
nombre_archivo = request.POST.get('nombre_archivo')
format, imgstr = archivo.split(';base64,')
ext = format.split('/')[-1]
data = ContentFile(base64.b64decode(imgstr))
localtion_save = settings.MEDIA_ROOT
fs = FileSystemStorage(location=localtion_save)
fs.save(nombre_archivo, data)
#Abrimos el archivo excel
documento = xlrd.open_workbook(settings.MEDIA_ROOT+'/'+nombre_archivo)
sheet_excel = documento.sheet_names()
if request.POST.get('flag'):
if 'INVENTARIO' in sheet_excel:
data = {"code":200,"mensaje":"Subido Correctamente"}
inventariocritico = []
inventariocritico_return = []
lista_productos_inventario = documento.sheet_by_index(sheet_excel.index('INVENTARIO'))
# print (lista_productos_inventario.row_values(3))
# print(lista_productos_inventario.nrows)
for i in range(100): #
if i !=0 and i>=3:
fila = lista_productos_inventario.row(i) #
stock = int(float(str(fila[5]).split("number:")[1]))
if stock <=5:
inventariocritico.append([str(fila[1]).split("text:"),str(fila[2]).split("number:"),str(fila[5]).split("number:")])
for value in inventariocritico:
nombre_producto = value[0][1].replace("'","")
cantidad_en_stock_del_producto = round(float(value[2][1]),2)
inventariocritico_return.append({"producto":nombre_producto,"stockcritico":cantidad_en_stock_del_producto})
print("borrar excel del sistema ")
data = {"code":200,"mensaje":"Critico","data":inventariocritico_return}
os.remove(settings.MEDIA_ROOT+'/'+nombre_archivo)
return HttpResponse(json.dumps(data, cls=DjangoJSONEncoder), content_type='application/json')
else:
if 'CALCULADOR' in sheet_excel:
lista_productos_precios_venta = documento.sheet_by_index(sheet_excel.index('CALCULADOR'))
listafinal = []
listafinalreal = []
for i in range(lista_productos_precios_venta.nrows): #
if i !=0:
fila = lista_productos_precios_venta.row(i) #
listafinal.append([str(fila[1]).split("text:"),str(fila[2]).split("number:"),str(fila[5]).split("number:")])
for product_precio in listafinal:
nombre_producto = product_precio[0][1].replace("'","")
precio_producto = round(float(product_precio[1][1]),2)
categoria = round(float(product_precio[2][1]))
try:
producto_para_actualizar = Product.objects.get(name=nombre_producto)
producto_para_actualizar.price = precio_producto
producto_para_actualizar.pricebs = round((float(precio_producto)*float(DolarBolivar.objects.get().bolivar)),2)
producto_para_actualizar.save()
except Exception as e:
if categoria != 0:
print("No existe y lo creo")
actualizado = Product.objects.create(
name=nombre_producto,
price=precio_producto,
category=categoria,
pricebs=round((float(precio_producto)*float(DolarBolivar.objects.get().bolivar)),2))
actualizado.save()
else:
print("salta porque no es categoria valida")
else:
data = {"code":500,"mensaje":"Error Verifique el archivo subido"}
print("borrar excel del sistema ")
os.remove(settings.MEDIA_ROOT+'/'+nombre_archivo)
data = {"code":200,"mensaje":"Subido Correctamente"}
return HttpResponse(json.dumps(data, cls=DjangoJSONEncoder), content_type='application/json')
except Exception as e1:
print("borrar excel del sistema error")
try:
os.remove(settings.MEDIA_ROOT+'/'+nombre_archivo)
data = {"code":500,"error":"BackEnd "+str(e1)}
return HttpResponse(json.dumps(data, cls=DjangoJSONEncoder), content_type='application/json')
except Exception as e:
data = {"code":500,"error":"BackEnd "+str(e1)}
return HttpResponse(json.dumps(data, cls=DjangoJSONEncoder), content_type='application/json')
#Fin de la Seccion de Administrador
def Conditions(request):
return render(request, 'market/conditions.html', {})
def Help(request):
return render(request, 'market/help.html', {})
def We(request):
return render(request, 'market/we.html', {})
def Places(request):
return render(request, 'market/places.html', {})
def Payment(request):
return render(request, 'market/payment.html', {})
def Delivery(request):
return render(request, 'market/delivery.html', {})
####CARRITO DE COMPRAS#####
def CartShopping(request):
if str(request.user) != 'AnonymousUser':#si esta logeado su data
try:
dataUser = User.objects.get(email=request.user)
return render(request, 'market/cartshopping.html', {
'name':dataUser.first_name,
'apellido':dataUser.last_name,
'phone':dataUser.user_profile.phone,
'direction':dataUser.user_profile.direction,
'rif':dataUser.user_profile.rif,
'localphone':dataUser.user_profile.localphone,
'reference':dataUser.user_profile.reference,
'code':200
})
except Exception as e:
print ("CartShopping",e)
return render(request, 'market/cartshopping.html', {})
else:
return render(request, 'market/cartshopping.html', {})
#Section Filters
def AllProducts(request):
_allproductsfilter = filterProducts(request)
_allproductsfilter.allProductsFilter()
data = _allproductsfilter.response_data
data['code'] = _allproductsfilter.code
contact_list = data['cantTotal']
paginator = Paginator(contact_list, 10) # Show 25 contacts per page
page = request.GET.get('page')
contacts = paginator.get_page(page)
dataAll = {'contacts':contacts}
direction = '/static/images/upload/imagesp/'
formatoBolivares(contacts)#formato en bolivares
return render(request, 'market/allProducts.html',{'all':1,'direction':direction,'contacts':contacts,'data':json.dumps(data['data'])})
def ViveresProducts(request):
_viveresproductsfilter = filterProducts(request)
_viveresproductsfilter.viveresProductsFilter()
data = _viveresproductsfilter.response_data
data['code'] = _viveresproductsfilter.code
contact_list = data['cantTotal']
paginator = Paginator(contact_list, 10) # Show 25 contacts per page
page = request.GET.get('page')
contacts = paginator.get_page(page)
formatoBolivares(contacts)#formato en bolivares
dataAll = {'contacts':contacts}
direction = '/static/images/upload/imagesp/'
return render(request, 'market/viveresProducts.html',{'viveres':1,'direction':direction,'contacts':contacts,'data':json.dumps(data['data'])})
def ChucheriasProducts(request):
_chucheriasproductsfilter = filterProducts(request)
_chucheriasproductsfilter.chucheriasProductsFilter()
data = _chucheriasproductsfilter.response_data
data['code'] = _chucheriasproductsfilter.code
contact_list = data['cantTotal']
paginator = Paginator(contact_list, 10) # Show 25 contacts per page
page = request.GET.get('page')
contacts = paginator.get_page(page)
formatoBolivares(contacts)#formato en bolivares
dataAll = {'contacts':contacts}
direction = '/static/images/upload/imagesp/'
return render(request, 'market/chucheriaProducts.html',{'chucherias':1,'direction':direction,'contacts':contacts,'data':json.dumps(data['data'])})
def FrigorificoProducts(request):
_frigorificoproductsfilter = filterProducts(request)
_frigorificoproductsfilter.frigorificoProductsFilter()
data = _frigorificoproductsfilter.response_data
data['code'] = _frigorificoproductsfilter.code
contact_list = data['cantTotal']
paginator = Paginator(contact_list, 10) # Show 25 contacts per page
page = request.GET.get('page')
contacts = paginator.get_page(page)
formatoBolivares(contacts)#formato en bolivares
dataAll = {'contacts':contacts}
direction = '/static/images/upload/imagesp/'
return render(request, 'market/frigorificoProducts.html',{'direction':direction,'contacts':contacts,'data':json.dumps(data['data'])})
def EnlatadosProducts(request):
_enlatadosproductsfilter = filterProducts(request)
_enlatadosproductsfilter.enlatadosProductsFilter()
data = _enlatadosproductsfilter.response_data
data['code'] = _enlatadosproductsfilter.code
contact_list = data['cantTotal']
paginator = Paginator(contact_list, 10) # Show 25 contacts per page
page = request.GET.get('page')
contacts = paginator.get_page(page)
formatoBolivares(contacts)#formato en bolivares
dataAll = {'contacts':contacts}
direction = '/static/images/upload/imagesp/'
return render(request, 'market/enlatadosProducts.html',{'direction':direction,'contacts':contacts,'data':json.dumps(data['data'])})
def CharcuteriaProducts(request):
_charcuteriaproductsfilter = filterProducts(request)
_charcuteriaproductsfilter.charcuteriaProductsFilter()
data = _charcuteriaproductsfilter.response_data
data['code'] = _charcuteriaproductsfilter.code
contact_list = data['cantTotal']
paginator = Paginator(contact_list, 10) # Show 25 contacts per page
page = request.GET.get('page')
contacts = paginator.get_page(page)
formatoBolivares(contacts)#formato en bolivares
dataAll = {'contacts':contacts}
direction = '/static/images/upload/imagesp/'
return render(request, 'market/charcuteriaProducts.html',{'charcuteria':1,'direction':direction,'contacts':contacts,'data':json.dumps(data['data'])})
def CarnesProducts(request):
_carnesproductsfilter = filterProducts(request)
_carnesproductsfilter.carnesProductsFilter()
data = _carnesproductsfilter.response_data
data['code'] = _carnesproductsfilter.code
contact_list = data['cantTotal']
paginator = Paginator(contact_list, 10) # Show 25 contacts per page
page = request.GET.get('page')
contacts = paginator.get_page(page)
formatoBolivares(contacts)#formato en bolivares
dataAll = {'contacts':contacts}
direction = '/static/images/upload/imagesp/'
return render(request, 'market/carnesProducts.html',{'carne':1,'direction':direction,'contacts':contacts,'data':json.dumps(data['data'])})
def PersonalesProducts(request):
_personalesproductsfilter = filterProducts(request)
_personalesproductsfilter.personalesProductsFilter()
data = _personalesproductsfilter.response_data
data['code'] = _personalesproductsfilter.code
contact_list = data['cantTotal']
paginator = Paginator(contact_list, 10) # Show 25 contacts per page
page = request.GET.get('page')
contacts = paginator.get_page(page)
formatoBolivares(contacts)#formato en bolivares
dataAll = {'contacts':contacts}
direction = '/static/images/upload/imagesp/'
return render(request, 'market/personalesProducts.html',{'personales':1,'direction':direction,'contacts':contacts,'data':json.dumps(data['data'])})
#Section Filter Prodcuts Admin
def AllProductsAdmin(request):
if str(request.user) != 'AnonymousUser':#si esta logeado su data
_allproductsfilter = adminSite(request)
_allproductsfilter.dataProductUser()
data = _allproductsfilter.response_data
data['code'] = _allproductsfilter.code
contact_list = data['cantTotal']
paginator = Paginator(contact_list, 10) # Show 25 contacts per page
page = request.GET.get('page')
contacts = paginator.get_page(page)
formatoBolivares(contacts)#formato en bolivares
dataAll = {'contacts':contacts}
direction = '/static/images/upload/imagesp/'
return render(request, 'market/adminGestion.html', {'direction':direction,'data':contacts,'flag':'all'})
else:
return render(request, 'market/adminIndex.html', {})
def ViveresProductsAdmin(request):
if str(request.user) != 'AnonymousUser':#si esta logeado su data
_viveresproductsfilter = adminSite(request)
_viveresproductsfilter.viveresProductsFilterAdmin()
data = _viveresproductsfilter.response_data
data['code'] = _viveresproductsfilter.code
contact_list = data['cantTotal']
paginator = Paginator(contact_list, 10) # Show 25 contacts per page
page = request.GET.get('page')
contacts = paginator.get_page(page)
formatoBolivares(contacts)#formato en bolivares
dataAll = {'contacts':contacts}
direction = '/static/images/upload/imagesp/'
return render(request, 'market/adminGestion.html', {'direction':direction,'data':contacts,'flag':'vive'})
else:
return render(request, 'market/adminIndex.html', {})
def FrigorificoProductsAdmin(request):
if str(request.user) != 'AnonymousUser':#si esta logeado su data
_frigorificoproductsfilter = adminSite(request)
_frigorificoproductsfilter.frigorificoProductsFilterAdmin()
data = _frigorificoproductsfilter.response_data
data['code'] = _frigorificoproductsfilter.code
contact_list = data['cantTotal']
paginator = Paginator(contact_list, 10) # Show 25 contacts per page
page = request.GET.get('page')
contacts = paginator.get_page(page)
formatoBolivares(contacts)#formato en bolivares
dataAll = {'contacts':contacts}
direction = '/static/images/upload/imagesp/'
return render(request, 'market/adminGestion.html', {'direction':direction,'data':contacts,'flag':'frigo'})
else:
return render(request, 'market/adminIndex.html', {})
def EnlatadosProductsAdmin(request):
if str(request.user) != 'AnonymousUser':#si esta logeado su data
_enlatadosproductsfilter = adminSite(request)
_enlatadosproductsfilter.enlatadosProductsFilterAdmin()
data = _enlatadosproductsfilter.response_data
data['code'] = _enlatadosproductsfilter.code
contact_list = data['cantTotal']
paginator = Paginator(contact_list, 10) # Show 25 contacts per page
page = request.GET.get('page')
contacts = paginator.get_page(page)
formatoBolivares(contacts)#formato en bolivares
dataAll = {'contacts':contacts}
direction = '/static/images/upload/imagesp/'
return render(request, 'market/adminGestion.html', {'direction':direction,'data':contacts,'flag':'enla'})
else:
return render(request, 'market/adminIndex.html', {})
#Caja
def CartOrder(request):
data = {}
if str(request.user) != 'AnonymousUser':#si esta logeado su data
try:
dataUser = User.objects.get(email=request.user)
data = {
'user':dataUser.id,
'name':dataUser.first_name,
'email':dataUser.email,
'apellido':dataUser.last_name,
'phone':dataUser.user_profile.phone,
'direction':dataUser.user_profile.direction,
'rif':dataUser.user_profile.rif,
'localphone':dataUser.user_profile.localphone,
'reference':dataUser.user_profile.reference,
'code':200
}
except Exception as e:
logout(request)
_allproducts = backStart(request)
_allproducts.get('all')
data = _allproducts.response_data
data['code'] = _allproducts.code
return render(request, 'market/index.html',{'data':data['data'][0] if data['data'] else {} })
return render(request, 'market/order.html',data)
#confirmacioncompra
def ConfimationOrder(request):
if str(request.user) == 'AnonymousUser':
return render(request, 'market/registerLogin.html', {})
try:
dataUser = ProfileDB.objects.get(user__email=request.user)
data = {
'user':dataUser.user.id,
'name':dataUser.user.first_name,
'email':dataUser.user.email,
'code':200,
'costoenvio':dataUser.costoenvio,
'compra':[],
'tipoPago':'',
}
compra = PurchaseConfirmation.objects.filter(user=dataUser.user).last()
allProducts = PurchaseConfirmation.objects.filter(code=compra.code)
totalGeneral=0
for value in allProducts:
data['code'] = value.code
data['compra'].append({
'name':value.product.name,
'price':"$"+str(value.product.price)+' / '+str(value.cant_product),
'image':'/static/images/upload/imagesp/'+value.product.name_image,
'total':"$"+str(round(float(value.product.price)*int(value.cant_product),2)),
})
totalGeneral = totalGeneral+(float(value.product.price)*int(value.cant_product))
for value2 in purchaseHistory.objects.filter(code_purchase=compra.code):
data['lugarpago'] = value2.lugarpago
data['moneda'] = value2.moneda
data['tipoPago'] = value2.payment_type
data['totalenmodena'] = value2.total
data['totalGeneral'] = round(totalGeneral,2)
data['totalCompleto'] =round(data['totalGeneral']+data['costoenvio'],2)
if data['moneda'] == 'Bs':
data['totalenmodena']="{:,.2f}".format(float(data['totalenmodena'])).replace(","," ")
data['totalenmodena']=data['totalenmodena'].replace(".",",")
data['totalenmodena']=data['totalenmodena'].replace(" ",".")
return render(request, 'market/confirmationOrder.html',data)
except Exception as e:
print("ConfimationOrder",e)
#envio de formulario de ayuda
def HelpForm(request):
try:
#antes de entrar en el hilo verifico si ese codigo de compra existe
codigo = request.POST.get('codigo')
if codigo:
try:
PagosImagenes.objects.get(codigo_compra=codigo)
except Exception as e:
print("codigo invalido",e)
data = {'code':500,"error":"Código invalido"}
return HttpResponse(json.dumps(data, cls=DjangoJSONEncoder), content_type='application/json')
kwargs_ = {
"asunto": request.POST.get('asunto'),
"email": request.POST.get('email'),
"mensaje": request.POST.get('mensaje'),
"imagen": request.POST.get('imagen'),
"nombre_imagen": request.POST.get('nombre_imagen'),
"codigo": request.POST.get('codigo'),
"origin":request.headers['Origin'],
}
extension = request.POST.get('extension')
if extension:
extension = '.'+extension.split("/")[1]
kwargs_["extension"] = extension
envio_email = help_form.delay(kwargs_)
except Exception as e:
print("HelpForm",e)
data = {'code':200}
return HttpResponse(json.dumps(data, cls=DjangoJSONEncoder), content_type='application/json')
def CartOrderEntrega(request):
if str(request.user) == 'AnonymousUser':
return render(request, 'market/registerLogin.html', {})
data = {}
_allproducts = backStart(request)
_allproducts.guardaCompra()
data['code'] = _allproducts.code
if data['code'] !=500:
data = {'code':200}
else:
data = {'code':500,'message':'Error al procesar su compra'}
return HttpResponse(json.dumps(data, cls=DjangoJSONEncoder), content_type='application/json')
#pagina de recuperacion de clave
def Restore(request):
return render(request, 'market/restore.html', {})
#envio de recuperacion de clave
def Forgot(request):
try:
dataUser = User.objects.get(email=request.POST['email'])
########################codigo de seguridad de cambio de clave##########
def ran_gen(size, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for x in range(size))
tokenCode = ran_gen(30,"abcdefghijkLmnNopqrstuvwxyz0123456789*")
########################################################################
try:
token = TokenPassword.objects.get(user=dataUser)
token.token = tokenCode
except Exception as e:
dataToke = {'token':tokenCode,'user':dataUser}
token = TokenPassword(**dataToke)
token.save()
kwargs_ = {
"email":str(dataUser.email),
"uriab":request.build_absolute_uri(),
"token":token.token
}
envio_email_forgot = forgot_pass.delay(kwargs_)
data = {'code':200}
return HttpResponse(json.dumps(data, cls=DjangoJSONEncoder), content_type='application/json')
except Exception as e:
print (e)
data = {'code':500,'message':'Email no existe'}
return HttpResponse(json.dumps(data, cls=DjangoJSONEncoder), content_type='application/json')
def ForgotMail(request):
if 'token' in request.GET:
try:
TokenPassword.objects.get(token=request.GET.get('token'))
return render(request, 'market/forgotPasswordFinal.html', {'token':request.GET['token']})
except Exception as e:
return render(request, 'market/error404.html', {})
else:
return render(request, 'market/error404.html', {})
def Detail(request):
if 'code' in request.GET:
_detailproducts = backStart(request)
_detailproducts.detailProducts()
data = _detailproducts.response_data
direction = '/static/images/upload/imagesp/'
return render(request, 'market/detailProduct.html', {'direction':direction,'data':data['data'],'data2':data['data2'][0]})
else:
data = {'code':500,'message':'Codigo invalido'}
return HttpResponse(json.dumps(data, cls=DjangoJSONEncoder), content_type='application/json')
def Register(request):
return render(request, 'market/register.html', {'flag':1})
def SendEmailClient(request):
try:
email = request.POST.get("email")
if email:
dataUser = User.objects.get(email=request.POST['email'])
sendinblue_send('registro',dataUser.email,dataUser.first_name,dataUser.last_name,None)
data = {'code':200,'message':''}
return HttpResponse(json.dumps(data, cls=DjangoJSONEncoder), content_type='application/json')
except Exception as e:
print("SendEmailClient",e)
| alfonsoolavarria/cm | maracay/views.py | views.py | py | 34,284 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.views.generic.TemplateView",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "django.views.generic.TemplateView",
"line_number": 36,
"usage_type": "name"
}... |
6713641650 | """
Utilities for dictionaries of xy tuple values.
"""
from __future__ import print_function, division
import random
from collections import defaultdict
def center(pos, dimensions):
x = [p[0] for p in pos.values()]
y = [p[1] for p in pos.values()]
minx, maxx = min(x), max(x)
miny, maxy = min(y), max(y)
dx = dimensions[0]/2. - ((maxx + minx)/2)
dy = dimensions[1]/2. - ((maxy + miny)/2)
for ID, p in pos.items():
pos[ID] = (p[0]+dx, p[1]+dy)
def scale_offset(pos, scale=1, dx=0, dy=0):
for ID, (x, y) in pos.items():
x1 = x*scale + dx
y1 = y*scale + dy
pos[ID] = [x1, y1]
def fix_overlapping(pos, r=10):
random.seed(0xDABBAD00)
positions = defaultdict(set)
ran = random.random
for k, p in pos.items():
tp = (int(p[0]), int(p[1]))
positions[tp].add(k)
for p, ks in positions.items():
if len(ks) > 1:
for k in ks:
pos[k] = (pos[k][0]+ran()*r, pos[k][1]+ran()*r)
def get_center(pos):
cx = 0#self.bounds[0]/2.
cy = 0#self.bounds[1]/2.
for body in self.world.bodies:
cx += body.position[0]
cy += body.position[1]
cx /= len(self.world.bodies)
cy /= len(self.world.bodies)
return cx, cy
def rotate(pos, angle):
# pos_matrix = np.array(pos.values())
# rot_matrix = np.matrix(((math.cos(angle),-math.sin(angle)), (math.sin(angle), math.cos(angle))))
center = get_center(pos)
# for ID, (x, y) in pos.items():
# x1 = x-cx
# x2 = y-cy
return {ID: rotate_point(center, p) for ID, p in pos.items()}
# foo = {ID, x-cx, x}
| joel-simon/evo_floorplans | floor_plans/pos_utils.py | pos_utils.py | py | 1,635 | python | en | code | 84 | github-code | 6 | [
{
"api_name": "random.seed",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "random.random",
"line_number": 30,
"usage_type": "attribute"
}
] |
7781848764 | import imutils
import cv2
import numpy as np
class DistanceCalculator:
def __init__(self, distance_ref, width_ref, pixels):
self.distance_ref = distance_ref
self.width_ref = width_ref
self.focal_ref = (pixels*distance_ref)/width_ref
def find_object(self, original):
"""
find object that went to calculate camera-object distance
we can applay a mask to take only region of intrest
but here we applay only max contour detection
"""
# convert the image to grayscale, blur it, and detect edges
gray = cv2.cvtColor(original, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (5, 5), 0)
edged = cv2.Canny(gray, 35, 125)
# find the contours in the edged image and keep the largest one
cnts = cv2.findContours(
edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
cv2.drawContours(original, cnts, -1, (0, 0, 255))
if len(cnts):
c = max(cnts, key=cv2.contourArea)
return cv2.minAreaRect(c)
else:
return (0, 0), (self.width_ref, 0), 0
def _calc_distance(self, pixels):
""" real distance"""
return (self.width_ref*self.focal_ref)/pixels
def calc_distance(self, original):
""" calculate camera-object distance """
# applay rectangle max area filter and draw contours
(x, y), (width, height), angle = self.find_object(original=original)
print("distance %d" % self._calc_distance(width))
box = cv2.boxPoints(((x, y), (width, height), angle))
box = np.int0(box)
cv2.drawContours(original, [box], -1, (0, 255, 0), 2)
cv2.putText(original, "%.2f cm" % (self._calc_distance(
width)), (2, 506), cv2.FONT_HERSHEY_SIMPLEX, 2.0, (0, 255, 0), 2)
| tarekbrahmi/Open-cv-project | MyProjects/distance-calculator/example2/DistanceCalculator.py | DistanceCalculator.py | py | 1,870 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "cv2.cvtColor",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "cv2.GaussianBlur",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "cv2.Canny",
... |
7711698828 | from PyPDF2 import PdfReader
def get_pdf_text(pdfs):
"""
Get the pdf and extract the text content
Parameters:
pdf_docs (pdf) : all the pdfs
Returns:
string : returns text from the pdfs
"""
text = ""
for pdf in pdfs:
pdf_reader = PdfReader(pdf)
for page in pdf_reader.pages:
text += page.extract_text()
return text
| arunavabasu-03/PDFAssist | src/helpers/getPdf.py | getPdf.py | py | 399 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "PyPDF2.PdfReader",
"line_number": 17,
"usage_type": "call"
}
] |
73016495228 | from tkinter import *
from tkinter import ttk
import sqlite3
import time
#--------------------------------------
# DEFININDO MODULO HORA E DATA
#--------------------------------------
time = time.localtime()
hour = ('{}:{}'.format(time[3], time[4]))
date = ('{}/{}/{}'.format(time[0], time[1], time[2]))
#--------------------------------------
# GESTOR DE BANCO DE DADOS
#--------------------------------------
con = sqlite3.connect('database.db')
c = con.cursor()
sql = 'SELECT * FROM Users WHERE user = ?'
c.execute('CREATE TABLE IF NOT EXISTS Users(user text, passw text, cargo text)')
c.execute("""
CREATE TABLE IF NOT EXISTS Clientes(data text, cargo text, user, name text, cpf text, tel text,email text)
""")
#--------------------------------------
# QUERRY DE LOGIN
#--------------------------------------
def login(user, passw):
c.execute(sql, (user,))
auth = c.fetchone()
if auth == None:
return False
else:
if (user, passw) == (auth[0], auth[1]):
return True
else:
return False
#--------------------------------------
# QUERRY DE CARGO
#--------------------------------------
def cargo(user):
c.execute(sql, (user,))
global auth
auth = c.fetchone()
return auth[2]
#--------------------------------------
# CADASTRO DE CLIENTES
#--------------------------------------
def cadastro():
#--------------------------------------
# GESTOR DE INFORMAÇÃO
#--------------------------------------
def get():
clt = 'INSERT INTO Clientes(data, cargo, user, name, cpf, tel, email) VALUES (?,?,?,?,?,?,?)'
data = "{} {}".format(date, hour)
user = auth[0]
cargo = auth[2]
name = et_name.get()
cpf = et_cpf.get()
tel = et_tel.get()
email = et_email.get()
c.execute(clt,(data, cargo, user, name, cpf, tel, email),)
con.commit()
root = Tk()
cad = LabelFrame(root, text='Cadastro')
root.title("S4U® CADASTRO")
Label(cad, text='Nome').grid(row=0, column=0)
Label(cad, text='CPF').grid(row=1, column=0)
Label(cad, text='Telefone').grid(row=2, column=0)
Label(cad, text='E-Mail').grid(row=3, column=0)
et_name = Entry(cad)
et_cpf = Entry(cad)
et_tel = Entry(cad)
et_email = Entry(cad)
et_name.grid(row=0, column=1)
et_cpf.grid(row=1, column=1)
et_tel.grid(row=2, column=1)
et_email.grid(row=3, column=1)
cad.grid(row=0, columnspan=4)
Button(root, text='Salvar', command=get).grid(row=1, column=0, sticky=W+E)
Button(root, text='Cadastrar Equipamento').grid(row=1, column=1, sticky=W+E)
Button(root, text='Limpar').grid(row=1, column=2, sticky=W+E)
Button(root, text='Sair').grid(row=1, column=3, sticky=W+E)
root.mainloop()
#--------------------------------------
# GESTOR DE CONSULTA
#--------------------------------------
def consulta():
#--------------------------------------
# BUSCANDO CLIENTES
#--------------------------------------
def refresh():
for clear in treeview.get_children():
treeview.delete(clear)
c.execute('SELECT * FROM Clientes')
for sql_cliente in c.fetchall():
treeview.insert('', 0, text=sql_cliente[3], values=(sql_cliente[5], sql_cliente[6]))
def busca(event):
for item in treeview.selection():
item_text = treeview.item(item, "text")
sql_busca = 'SELECT * FROM Clientes WHERE name = ?'
for sql_consulta in c.execute(sql_busca, (item_text,)):
lb_tempo['text'] = sql_consulta[0]
lb_user['text'] = (sql_consulta[1].title(), sql_consulta[2].title())
lb_name['text'] = sql_consulta[3].title()
lb_cpf['text'] = sql_consulta[4]
lb_tel['text'] = sql_consulta[5]
lb_email['text'] = sql_consulta[6].title()
root = Tk()
root.title('S4U® CONSULTA')
consult = LabelFrame(root, text='Consulta')
Label(consult, text='Data: ').grid(row=0, column=0, sticky=E)
Label(consult, text='Funcionario: ').grid(row=1, column=0, sticky=E)
Label(consult, text='Nome: ').grid(row=2, column=0, sticky=E)
Label(consult, text='CPF: ').grid(row=3, column=0, sticky=E)
Label(consult, text='Telefone: ').grid(row=4, column=0, sticky=E)
Label(consult, text='E-Mail: ').grid(row=5, column=0, sticky=E)
#--------------------------------------
# EXIBIR INFORMAÇÕES
#--------------------------------------
lb_tempo = Label(consult, text='')
lb_user = Label(consult, text='')
lb_name = Label(consult, text='')
lb_cpf = Label(consult, text='')
lb_tel = Label(consult, text='')
lb_email = Label(consult, text='')
lb_tempo.grid(row=0, column=1, sticky=W)
lb_user.grid(row=1, column=1, sticky=W)
lb_name.grid(row=2, column=1, sticky=W)
lb_cpf.grid(row=3, column=1, sticky=W)
lb_tel.grid(row=4, column=1, sticky=W)
lb_email.grid(row=5, column=1, sticky=W)
consult.grid(row=0, columnspan=4, sticky=W+E)
#--------------------------------------
# INTERFACE GRAFICA DE BUSCA
#--------------------------------------
Label(root, text='Pesquisar:').grid(row=1, column=0, sticky=E)
Button(root, text='Pesquisar').grid(row=1, column=2, sticky=W+E)
Button(root, text='Buscar', command=refresh).grid(row=1, column=3, sticky=W+E)
et_busca = Entry(root)
treeview = ttk.Treeview(root, columns=('#0', '#1'))
treeview.heading('#0', text='Nome')
treeview.heading('#1', text='Telefone')
treeview.heading('#2', text='E-Mail')
treeview.bind("<<TreeviewSelect>>", busca)
et_busca.grid(row=1, column=1, sticky=W+E)
treeview.grid(row=2, columnspan=4, sticky=W+E)
refresh()
root.mainloop()
| S4UDeveloper/MDI | DB/Database.py | Database.py | py | 5,792 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "time.localtime",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "tkinter.ttk.Treeview",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "tkinter.ttk",
... |
25687922492 | import astroid
from hypothesis import assume, given, settings, HealthCheck
from .. import custom_hypothesis_support as cs
from typing import Any, Dict, List, Set, Tuple
settings.load_profile("pyta")
@given(cs.subscript_node())
@settings(suppress_health_check=[HealthCheck.too_slow])
def test_index(node):
module, _ = cs._parse_text(node)
for index_node in module.nodes_of_class(astroid.Index):
assert index_node.inf_type.getValue() == index_node.value.inf_type.getValue()
@given(cs.expr_node())
@settings(suppress_health_check=[HealthCheck.too_slow])
def test_expr(expr):
module, _ = cs._parse_text(expr)
for expr_node in module.nodes_of_class(astroid.Expr):
assert expr_node.inf_type.getValue() == expr_node.value.inf_type.getValue()
| ihasan98/pyta | tests/test_type_inference/test_literals.py | test_literals.py | py | 772 | python | en | code | null | github-code | 6 | [
{
"api_name": "hypothesis.settings.load_profile",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "hypothesis.settings",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "astroid.Index",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": ... |
34714688235 | import argparse
import torch
import torch.utils.data
import src.utils as utils
from src.utils import alphabet
from src.utils import strLabelConverterForAttention as converter
import src.dataset as dataset
import model
parser = argparse.ArgumentParser()
parser.add_argument('--testList', default='label/test_label.txt')
parser.add_argument('--workers', type=int, help='number of data loading workers', default=2)
parser.add_argument('--batchSize', type=int, default=32, help='input batch size')
parser.add_argument('--cuda', action='store_true', help='enables cuda', default=True)
parser.add_argument('--gpuid', type=int, default=0, help='which GPU to use')
parser.add_argument('--height', type=int, default=32, help='the height of the input image to network')
parser.add_argument('--width', type=int, default=208, help='the width of the input image to network')
parser.add_argument('--encoder', type=str, default='', help="path to encoder (to continue training)")
parser.add_argument('--decoder', type=str, default='', help='path to decoder (to continue training)')
parser.add_argument('--loadModelEpoch', type=int, default=0, help='load model from epoch n to continue training, override the previous two')
opt = parser.parse_args()
if opt.cuda:
torch.cuda.set_device(opt.gpuid)
def predict(encoder, decoder, criterion, batchsize, dataset, workers=2):
for e, d in zip(encoder.parameters(), decoder.parameters()):
e.requires_grad = False
d.requires_grad = False
encoder.eval()
decoder.eval()
data_loader = torch.utils.data.DataLoader(dataset, shuffle=False, batch_size=batchsize, num_workers=workers)
iterator = iter(data_loader)
n_correct = 0 # correct characters (including EOS)
n_total = 0 # total characters (including EOS)
n_current = 0 # current position
loss_avg = utils.averager()
EOS_TOKEN = 1 # end of sequence
for _ in range(len(data_loader)):
data = iterator.next()
cpu_images, cpu_texts = data
b = cpu_images.size(0)
image = torch.FloatTensor(batchsize, 3, 1, 1)
image = image.cuda()
utils.loadData(image, cpu_images)
target_variable = converter(alphabet).encode(cpu_texts)
target_variable = target_variable.cuda()
encoder_outputs = encoder(image) # cnn+biLstm做特征提取
decoder_input = target_variable[0].cuda() # 初始化decoder的开始,从0开始输出
decoder_hidden = decoder.initHidden(b).cuda()
loss = 0.0
decoded_words = []
decoded_labels = []
flag = [True] * batchsize
for _ in range(batchsize):
new_list = []
decoded_words.append(new_list)
new_list = []
decoded_labels.append(new_list)
for di in range(1, target_variable.shape[0]): # 最大字符串的长度
decoder_output, decoder_hidden, decoder_attention = decoder(decoder_input, decoder_hidden, encoder_outputs)
loss += criterion(decoder_output, target_variable[di]) # 每次预测一个字符
topv, topi = decoder_output.data.topk(1)
ni = topi.squeeze()
decoder_input = ni
for count in range(batchsize):
if flag[count]:
if ni[count] == EOS_TOKEN:
decoded_words[count].append('<EOS>')
decoded_labels[count].append(EOS_TOKEN)
flag[count] = False
else:
decoded_words[count].append(converter(alphabet).decode(ni[count]))
decoded_labels[count].append(ni[count])
loss_avg.add(loss)
for count in range(batchsize):
n_total += len(cpu_texts[count]) + 1 # EOS included
for pred, target in zip(decoded_labels[count], target_variable[1:,count]):
if pred == target:
n_correct += 1
texts = cpu_texts[count]
print('%d Pred:%-20s, GT: %-20s' % (n_current, decoded_words[count], texts))
n_current += 1
accuracy = n_correct / float(n_total)
print('Loss: %f, Accuracy: %f' % (loss_avg.val(), accuracy))
if __name__ == '__main__':
test_dataset = dataset.listDataset(list_file=opt.testList, transform=dataset.resizeNormalize((opt.width, opt.height)))
nclass = len(alphabet) + 3
nc = 1
criterion = torch.nn.NLLLoss()
encoder = model.encoder(opt.height, nc=nc, nh=256)
decoder = model.decoder(nh=256, nclass=nclass, dropout_p=0.1)
if opt.encoder:
print('loading pretrained encoder model from %s' % opt.encoder)
encoder.load_state_dict(torch.load(opt.encoder))
if opt.decoder:
print('loading pretrained decoder model from %s' % opt.decoder)
decoder.load_state_dict(torch.load(opt.decoder))
if opt.loadModelEpoch > 0:
encoder_path = 'model/encoder_%d.pth' % opt.loadModelEpoch
print('loading pretrained encoder model from %s' % encoder_path)
encoder.load_state_dict(torch.load(encoder_path))
decoder_path = 'model/decoder_%d.pth' % opt.loadModelEpoch
print('loading pretrained decoder model from %s' % decoder_path)
decoder.load_state_dict(torch.load(decoder_path))
if opt.cuda:
encoder.cuda()
decoder.cuda()
criterion = criterion.cuda()
print("Testing:")
predict(encoder, decoder, criterion, opt.batchSize, dataset=test_dataset)
| WANGPeisheng1997/HandwrittenTextRecognition | cnn+lstm+attention/test.py | test.py | py | 5,492 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "torch.cuda.set_device",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "torch.u... |
12918395650 | #!/usr/bin/env python3
""" Import build-in and custom modules to check system utilization and connection"""
import shutil
import psutil
import network
site_name = "http://www.google.com"
# Verifies that there's enough free space on disk.
def check_disk_usage(disk):
du = shutil.disk_usage(disk)
free = du.free / du.total * 100
return free > 20
# Verifies that there's enough unused CPU.
def check_cpu_usage():
usage = psutil.cpu_percent(1)
return usage < 75
# If there's not enough disk, or not enough CPU, print an error.
# Output information about connection.
if not check_disk_usage('/') or not check_cpu_usage():
print("ERROR!")
elif network.check_localhost() and network.check_connectivity(site_name):
print("Everything ok")
else:
print("Network checks failed")
| TyapinIA/Coursera_Google_IT_Automation_with_Python | psutil_shutil/health_check.py | health_check.py | py | 803 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "shutil.disk_usage",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "psutil.cpu_percent",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "network.check_localhost",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "network.... |
35069305556 | from enum import unique
from flask_sqlalchemy import SQLAlchemy
from .utils import utcnow
db = SQLAlchemy()
class Home(db.Model):
__tablename__ = "home"
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(120), unique=False, nullable=False)
content = db.Column(db.String(250), unique=True, nullable=False)
image_url = db.Column(db.String(250), unique=True, nullable=True)
label = db.Column(db.String(120), unique=False, nullable=False)
created = db.Column(db.DateTime, default=utcnow)
updated = db.Column(db.DateTime, default=utcnow)
def __repr__(self):
return '<Home %r>' % self.home_content
def serialize(self):
return {
"id": self.id,
"title": self.title,
"content": self.content,
"image_url": self.image_url,
"label": self.label,
"created": self.created,
"updated": self.updated
# do not serialize the password, its a security breach
}
class ContactForm(db.Model):
__tablename__ = "contact_form"
id = db.Column(db.Integer, primary_key=True)
first_name = db.Column(db.String(120), unique=False, nullable=False)
last_name = db.Column(db.String(120), unique=False, nullable=False)
title = db.Column(db.String(120), unique=False, nullable=False)
email = db.Column(db.String(250), unique=False, nullable=False)
message = db.Column(db.Text, unique=False, nullable=False)
created = db.Column(db.DateTime, default=utcnow)
def __repr__(self):
return '<ContactForm %r>' % self.contact_form
def serialize(self):
return {
"id": self.id,
"first_name": self.first_name,
"last_name": self.last_name,
"title": self.title,
"email": self.email,
"message": self.message,
"created": self.created,
# do not serialize the password, its a security breach
}
| jgustavoj/midwestern-project | src/api/models.py | models.py | py | 1,995 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "flask_sqlalchemy.SQLAlchemy",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "utils.utcnow",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "utils.utcnow",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "utils.utcnow",
... |
37663232255 | # -*- coding: utf-8 -*-
"""
Created on Sat Feb 13 02:55:11 2021
@author: Anato
"""
from pathlib import Path
source_path = Path(__file__).resolve()
source_dir = source_path.parent
main_dir = str(source_dir.parent)
info_dir = main_dir + '/info/'
def open_info(file_name, mode):
return open(info_dir + file_name + '.txt', mode)
import scrapy
from urllib.parse import urljoin
class MySpider(scrapy.Spider):
name = "cfspider"
allowed_domains = ["codeforces.com"]
visited_urls = []
d = {}
def start_requests(self):
with open_info('to_check', 'r') as f:
self.d = dict.fromkeys([el for el in f.read().split()], 1)
fs = open_info('result', 'w')
fs.close()
url = ""
with open_info('s_url', 'r') as f:
url = f.read() + '/standings/page/1'
#self.logger.info(url)
yield scrapy.Request(url = url, callback = self.parse)
def parse(self, response):
a = response.xpath('//tr[@participantid]/td[2]/a/text()').extract()
#with open('debug.txt', 'a') as f:
# for el in a:
# f.write(el + '\n')
with open_info('result', 'a') as f:
for el in a:
if el in self.d:
f.write(el + '\n')
next_pages = response.xpath('//a[contains(@href,"standings/page")]/@href').extract()
for next_page in next_pages:
url = urljoin(response.url + '/', next_page)
if url not in self.visited_urls:
self.visited_urls.append(url)
yield response.follow(url, callback = self.parse)
| Anatoly7/codeforces-spider | tutorial/spiders/codeforces_spider.py | codeforces_spider.py | py | 1,714 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pathlib.Path",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "scrapy.Spider",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "scrapy.Request",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "urllib.parse.urljoin",... |
26416473947 | # -*- coding: UTF-8 -*-
from flask import Flask
from flask import request
from flask import json
import requests
app = Flask(__name__)
# http://blog.luisrei.com/articles/flaskrest.html
@app.route('/oslh2b', methods=['POST'])
def oslh2b():
if request.method == 'POST':
json_headers = request.headers
data = json.loads(request.data)
destination_url = data["destination_url"]
data.pop("destination_url", None)
json_data = json.dumps(data)
r = requests.post(destination_url, data=json_data, headers=json_headers)
data = {}
data["body"] = json.loads(r.text)
data["headers"] = r.headers
return str(data)
def config2dict(request_data):
'''
Convert a lot of lines with two strings per line in a dictionary
OS_AUTH_URL http://openstack-vcenter:5000/v3
OS_PROJECT_ID 9d7812704e104a208603c5d0481bd952
OS_PROJECT_NAME admin
OS_USER_DOMAIN_NAME default
OS_USERNAME admin
OS_PASSWORD admin
OS_REGION_NAME RegionOne
name prueba
'''
configuration = {}
for line in request_data.splitlines():
if len(line.split()) == 2:
configuration[line.split()[0]] = line.split()[1]
return(configuration)
def get_auth_token(config):
headers = {}
headers["Content-Type"] = 'application/json'
data = """
{
"auth": {
"identity": {
"methods": [
"password"
],
"password": {
"user": {
"name": "%s",
"password": "%s",
"domain": {
"name": "%s"
}
}
}
},
"scope": {
"project": {
"id": "%s",
"domain": {
"name": "%s"
}
}
}
}
}
""" % (config["OS_USERNAME"],
config["OS_PASSWORD"],
config["OS_USER_DOMAIN_NAME"],
config["OS_PROJECT_ID"],
config["OS_USER_DOMAIN_NAME"])
#print data
headers["Content-Type"] = 'application/json'
#
r = requests.post(config["OS_AUTH_URL"] + "/auth/tokens",
data=data, headers=headers)
token = json.loads(r.text)
token_id = r.headers["X-Subject-Token"]
#print json.dumps(token, indent=4)
#print (token_id)
return (token, token_id)
def get_endpoint(token, endpoint_type, interface_type):
url = ""
for i in range(len(token["token"]["catalog"])):
if (token["token"]["catalog"][i]["type"] == endpoint_type):
for j in range(len(token["token"]["catalog"][i]["endpoints"])):
if (token["token"]["catalog"][i]["endpoints"][j]["interface"] == interface_type):
url = token["token"]["catalog"][i]["endpoints"][j]["url"]
return (url)
def create_network(token, token_id, env_name):
# Redes:
# - guardamos red con salida pública
# - creamos red y subred privada
# - creamos puerto en subred privada
# - creamos router en subred privada y pública
# - asignamos puerto a router
network_url = get_endpoint(token, "network", "public")
headers = {}
headers["Content-Type"] = 'application/json'
headers["X-Auth-Token"] = token_id
r = requests.get(network_url + "/v2.0/networks", headers=headers)
#r = requests.post(network_url + "/v2.0/networks",headers=headers,data=data)
#print r.text
networks = json.loads(r.text)
#print json.dumps(networks, indent=4)
# # Obtenemos el network_id de la red de publica
public_network = {}
for network in networks["networks"]:
if network["router:external"]:
public_network = network
print ((json.dumps(public_network, indent=4)))
# public_network_id = pretty_response["network"]["id"]
# Creamos la red de la instancia
private_net_name = env_name + "_net"
data = """
{
"network": {
"name": "%s",
"admin_state_up": true
}
}
""" % private_net_name
r = requests.post(network_url + "/v2.0/networks",
headers=headers, data=data)
private_net = json.loads(r.text)
print ((json.dumps(private_net, indent=4)))
# Creamos la subred de la instancia
# subnetwork_url = "http://openstack.paradigmadigital.com:9696/v2.0/subnets"
private_subnet_name = env_name + "_subnet"
data = """
{
"subnet": {
"name": "%s",
"ip_version": 4,
"network_id": "%s",
"cidr": "172.17.235.0/24",
"gateway_ip": "172.17.235.1",
"allocation_pools": [
{
"start": "172.17.235.10",
"end": "172.17.235.100"
}
],
"enable_dhcp": "true"
}
}
""" % (private_subnet_name, private_net["network"]["id"])
r = requests.post(network_url + "/v2.0/subnets", headers=headers, data=data)
private_subnet = json.loads(r.text)
print ((json.dumps(private_subnet, indent=4)))
# Creamos un router para dar salida a la red publica hacia el exterior
# routers_url = "http://openstack.paradigmadigital.com:9696/v2.0/routers"
router_name = env_name + "_router"
data = """
{
"router": {
"name": "%s",
"external_gateway_info": {
"network_id": "%s"
}
}
}
""" % (router_name, public_network["id"])
r = requests.post(network_url + "/v2.0/routers", headers=headers, data=data)
external_router = json.loads(r.text)
print ((json.dumps(external_router, indent=4)))
# Conectamos el router público con la red privada
# add_router_interface_url = routers_url + "/" + external_router_id +
# "/add_router_interface"
data = """
{
"subnet_id": "%s"
}
""" % private_subnet["subnet"]["id"]
r = requests.put(network_url + "/v2.0/routers/" + external_router["router"]["id"] + "/add_router_interface",
headers=headers, data=data)
external_router_connections = json.loads(r.text)
print ((json.dumps(external_router_connections, indent=4)))
network_env = {}
network_env["public"] = public_network
network_env["private_net"] = private_net["network"]
network_env["private_subnet"] = private_subnet["subnet"]
network_env["external_router"] = external_router["router"]
return (network_env)
def create_server(token, token_id, env):
server_env = {}
headers = {}
headers["Content-Type"] = 'application/json'
headers["X-Auth-Token"] = token_id
#print ((json.dumps(token, indent=4)))
name = env["name"] + "_computer"
#image = "a9f3ef90-da4f-47f4-b05a-c8180b3bda60"
image = "78eb8e56-d6b5-424d-9a94-f92e02c498f7"
flavor = "2"
#print ((json.dumps(env, indent=4)))
data = """
{
"server" : {
"name" : "%s",
"imageRef" : "%s",
"flavorRef" : "%s",
"availability_zone": "nova",
"security_groups": [
{
"name": "default"
}
],
"networks": [
{
"uuid": "%s"
}
]
}
}
""" % (name, image, flavor, env["network"]["private_net"]["id"])
compute_url = get_endpoint(token, "compute", "public")
r = requests.post(compute_url + "/servers", headers=headers, data=data)
#print (r.text)
server_env = json.loads(r.text)
#network_url = get_endpoint(token, "network", "public")
#print ((json.dumps(server_env, indent=4)))
return (server_env["server"])
def dict2config(dictio):
config = ""
for key in list(dictio.keys()):
config = config + str(key) + " " + str(dictio[key]) + "\n"
return (config)
@app.route('/create_computer_mock', methods=['POST'])
def create_computer_mock():
data = """
router_id 647a4c8f-f055-461d-bd91-b7c224f4acd9
server_id 00633113-acfc-41fc-8b23-88d2e84c1a90
name prueba
OS_USERNAME admin
subnet_id ca2cfacb-de0a-4705-a334-9a4cbb709f41
OS_PROJECT_ID 9d7812704e104a208603c5d0481bd952
OS_REGION_NAME RegionOne
OS_USER_DOMAIN_NAME default
OS_AUTH_URL http://openstack-vcenter:5000/v3
OS_PROJECT_NAME admin
OS_PASSWORD admin
net_id bee1007e-1289-4c75-9dd5-dbe11a3fdba5
"""
return (data)
@app.route('/create_computer', methods=['POST'])
def create_computer():
'''
create_computer
This creates a server and returns a list of net_id, subnet_id, router_id,
server_id and console_url.
Console url can be configured in a media prim.
The other data can be used to delete the server.
'''
if request.method == 'POST':
env = {}
config = {}
config = config2dict(request.data)
env["name"] = config["name"]
token, token_id = get_auth_token(config)
env["network"] = create_network(token, token_id, env["name"])
config["net_id"] = env["network"]["private_net"]["id"]
config["router_id"] = env["network"]["external_router"]["id"]
config["subnet_id"] = env["network"]["private_subnet"]["id"]
env["server"] = create_server(token, token_id, env)
config["server_id"] = env["server"]["id"]
return dict2config(config)
def delete_server(token, token_id, server_id):
headers = {}
headers["Content-Type"] = 'application/json'
headers["X-Auth-Token"] = token_id
compute_url = get_endpoint(token, "compute", "public")
requests.delete(compute_url + "/servers/" + server_id, headers=headers)
return
def delete_network(token, token_id, net_id, subnet_id, router_id):
headers = {}
headers["Content-Type"] = 'application/json'
headers["X-Auth-Token"] = token_id
network_url = get_endpoint(token, "network", "public")
#r = requests.put(network_url + "/v2.0/routers/" + external_router["router"]["id"] + "/add_router_interface",
#headers=headers, data=data)
#r = requests.post(network_url + "/v2.0/routers", headers=headers, data=data)
#r = requests.post(network_url + "/v2.0/subnets", headers=headers, data=data)
return
@app.route('/delete_computer', methods=['POST'])
def delete_computer():
'''
delete_computer
This deletes a computer and returns 200 if ok
'''
if request.method == 'POST':
config = {}
config = config2dict(request.data)
token, token_id = get_auth_token(config)
delete_server(token, token_id, config["server_id"])
delete_network(token, token_id,
config["net_id"], config["subnet_id"],
config["router_id"])
def get_console(token, token_id, server):
headers = {}
headers["Content-Type"] = 'application/json'
headers["X-Auth-Token"] = token_id
data = """
{
"os-getVNCConsole": {
"type": "novnc"
}
}
"""
#data = """
#{
#"os-getSPICEConsole": {
#"type": "spice-html5"
#}
#}
#"""
compute_url = get_endpoint(token, "compute", "public")
r = requests.post(compute_url + "/servers/" + server["id"] + "/action",
headers=headers, data=data)
print ((r.text))
console_env = json.loads(r.text)
print ((json.dumps(console_env, indent=4)))
return (console_env["console"])
@app.route('/get_console_url', methods=['POST'])
def get_console_url():
if request.method == 'POST':
env = {}
config = {}
config = config2dict(request.data)
env["name"] = config["name"]
token, token_id = get_auth_token(config)
env["server"] = {}
env["server"]["id"] = config["server_id"]
env["console"] = get_console(token, token_id, env["server"])
return ("console_url " + env["console"]["url"])
if __name__ == '__main__':
app.run()
| elmanytas/osl-computer | ansible-flask/roles/flaskapp/files/flaskapp/flaskapp/__init__.py | __init__.py | py | 12,045 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "flask.Flask",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "flask.request.head... |
24796364963 | from __future__ import division
import os
import re
import sys
import struct
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
def load(fname):
color = None
width = None
height = None
scale = None
endian = None
file = open(fname)
header = file.readline().rstrip()
if header == 'PF':
color = True
elif header == 'Pf':
color = False
else:
raise Exception('Not a PFM file.')
dim_match = re.match(r'^(\d+)\s(\d+)\s$', file.readline())
if dim_match:
width, height = map(int, dim_match.groups())
else:
raise Exception('Malformed PFM header.')
scale = float(file.readline().rstrip())
if scale < 0: # little-endian
endian = '<'
scale = -scale
else:
endian = '>' # big-endian
data = np.fromfile(file, endian + 'f')
shape = (height, width, 3) if color else (height, width)
return np.flipud(np.reshape(data, shape)).astype(np.float32), scale
def save(fname, image, scale=1):
file = open(fname, 'w')
color = None
if image.dtype.name != 'float32':
raise Exception('Image dtype must be float32.')
if len(image.shape) == 3 and image.shape[2] == 3: # color image
color = True
elif len(image.shape) == 2 or len(image.shape) == 3 and image.shape[2] == 1: # greyscale
color = False
else:
raise Exception('Image must have H x W x 3, H x W x 1 or H x W dimensions.')
file.write('PF\n' if color else 'Pf\n')
file.write('%d %d\n' % (image.shape[1], image.shape[0]))
endian = image.dtype.byteorder
if endian == '<' or endian == '=' and sys.byteorder == 'little':
scale = -scale
file.write('%f\n' % scale)
np.flipud(image).tofile(file)
def show(img):
imgplot = plt.imshow(img.astype(np.float32), cmap='gray');
plt.show();
| kbatsos/CBMV | pylibs/pfmutil.py | pfmutil.py | py | 1,781 | python | en | code | 52 | github-code | 6 | [
{
"api_name": "re.match",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "numpy.fromfile",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "numpy.flipud",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number... |
4669072111 | from Bio.Seq import Seq
def get_pattern_count(text, pattern):
seq = Seq(text)
return seq.count_overlap(pattern)
with open('rosalind_ba1e.txt') as file:
genome = file.readline().rstrip()
k, l, t = map(lambda x: int(x), file.readline().rstrip().split(' '))
genome_len = len(genome)
clump = []
for i in range(genome_len - l + 1):
current_genome = genome[i:i+l]
current_genome_len = len(current_genome)
for j in range(current_genome_len - k + 1):
pattern = current_genome[j:j+k]
pattern_count = get_pattern_count(current_genome, pattern)
if pattern_count >= t and pattern not in clump:
clump.append(pattern)
print(pattern)
output = ' '.join(clump)
print(output)
with open('output.txt', 'w') as file:
file.write(output)
| Partha-Sarker/Rosalind-Problems | Lab Assignment - 1/chapter 1/ba1e Find Patterns Forming Clumps in a String.py | ba1e Find Patterns Forming Clumps in a String.py | py | 802 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "Bio.Seq.Seq",
"line_number": 5,
"usage_type": "call"
}
] |
3977389831 | import psycopg2
import csv
from db.create_connection import create_connection as create_connection
def import_menu_from_csv():
conn = create_connection()
cursor = conn.cursor()
with open("menu.csv", mode="r", encoding="utf-8") as csv_file:
csv_reader = csv.DictReader(csv_file)
for row in csv_reader:
cursor.execute("""
INSERT INTO menu (name, type, price)
VALUES (%s, %s, %s)
""", (row["name"], row["type"], row["price"]))
conn.commit()
conn.close()
cursor.close()
def menu_cleaning():
try:
conn = create_connection()
cursor = conn.cursor()
cursor.execute("DELETE FROM menu;")
conn.commit()
deleted_rows = cursor.rowcount
return deleted_rows > 0
except psycopg2.Error as e:
conn.rollback()
raise e
finally:
if conn is not None:
conn.close()
cursor.close() | Tolik1923/restaurantordertaker | Back-end/db/exsport_menu.py | exsport_menu.py | py | 1,024 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "db.create_connection.create_connection",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "csv.DictReader",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "db.create_connection.create_connection",
"line_number": 23,
"usage_type": "call"
}... |
30950783677 | import numpy as np
import sys
import matplotlib.pyplot as plt
sys.path.append('../../analysis_scripts')
from dumpfile import DumpFile
from pickle_dump import save_obj, load_obj
from spatialcorrelations import calculate_items
if __name__ == "__main__":
rho = sys.argv[1]
fps = np.array([0])#,1,5,10,20,40,60,80,100])
load_prefix = '../raw_data_processing/pickled_data/'
for fp in fps:
dc_name = load_prefix + f'ret_o_{fp}_{rho}'
ret_o = load_obj(dc_name)
gmatrix = ret_o['sum_g']
Nsamples = ret_o['g_cnt']
rs = gmatrix[:,0]/Nsamples
gs = gmatrix[:,1]/Nsamples
plt.plot(rs,gs)
plt.show()
| samueljmcameron/ABPs_coarse_graining | experiments/2020_03_19/correlations/plot_correlations.py | plot_correlations.py | py | 686 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sys.path.append",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_num... |
25692788695 | # !/urs/bin/env python3
# -*- coding: utf-8 -*-
"""
Project: LAGOU Spider
@author: Troy
@email: ots239ltfok@gmail.com
"""
# 项目构架:
# p1: 依据搜索关键词 城市 职业, 爬取索引页, 解析并获取相关岗位url接连
# p2: 解析url链接, 获取数据
# p3: 存储到MongoDB
# 技术路径: requests urllib json re pq pymongo
import requests
from requests.exceptions import ConnectionError
from pyquery import PyQuery as pq
import urllib
import json
import pymongo
import numpy as np
import time
from config import *
client = pymongo.MongoClient(MONGO_URL)
db = client[MONGO_DB]
proxy = None
def started_search_url(start_url, page):
headers = {
'Accept' : 'application/json, text/javascript, */*; q=0.01',
'Accept-Encoding' : 'gzip, deflate, br',
'Accept-Language' : 'zh-CN,zh;q=0.9',
'Cache-Control' : 'no-cache',
'Connection' : 'keep-alive',
'Content-Type' : 'application/x-www-form-urlencoded; charset=UTF-8',
'Cookie' : COOKIE,
'Host' : 'www.lagou.com',
'Origin' : 'https://www.lagou.com',
'Pragma' : 'no-cache',
'Referer' : REFERER,
'User-Agent' : 'Mozilla/5.0 Chrome/58.0.3029.81 Safari/537.36',
}
query_parameters = {
'city' : CITY,
'needAddtionalResult' : 'false',
'isSchoolJob' : '0'
}
form_data = {
'first' : 'false',
'pn' : page,
'kd' : KEYWORD
}
url = start_url + urllib.parse.urlencode(query_parameters)
try:
res = requests.post(url, headers=headers, data=form_data, allow_redirects=False)
if res.status_code == 200:
print('get succeed 200, page:', page)
res.encoding = res.apparent_encoding
res = json.loads(res.text)
return res['content']['positionResult']['result']
else:
print('get failed, status code:', res.status_code)
return None
except ConnectionError as e:
print('requests error:', e.args)
return None
def get_base_data(data):
try:
companyId = data['companyId']
companyFullName = data['companyFullName']
companyShortName = data['companyShortName']
companySize = data['companySize']
positionAdvantage = data['positionAdvantage']
city = data['city']
latitude = data['latitude']
longitude = data['longitude']
stationname = data['stationname']
subwayline = data['subwayline']
financeStage = data['financeStage']
positionName = data['positionName']
firstType = data['firstType']
secondType = data['secondType']
workYear = data['workYear']
education = data['education']
district = data['district']
salary = data['salary']
positionLables = data['positionLables']
positionId = data['positionId']
html = request_index_search(positionId)
position_description = parse_url_detail(html)
result = {
'companyId' : companyId,
'companyFullName' : companyFullName,
'companyShortName' : companyShortName,
'positionAdvantage' : positionAdvantage,
'latitude' : latitude,
'longitude' : longitude,
'stationname' : stationname,
'subwayline' : subwayline,
'financeStage' : financeStage,
'positionName' : positionName,
'firstType' : firstType,
'secondType' : secondType,
'workyear' : workYear,
'education' : education,
'district' : district,
'salary' : salary,
'positionLables' : positionLables,
'positionId' : positionId,
'position_description' : position_description
}
return result
except TypeError :
print('data get error')
return None
def get_proxy():
try:
response = requests.get(PROXIES_URL)
if response.status_code == 200:
return response.text
return None
except ConnectionError:
return None
def request_index_search(positionId):
global proxy
url = 'https://www.lagou.com/jobs/{}.html'.format(positionId)
headers = {
'Accept' : 'application/json, text/javascript, */*; q=0.01',
'Accept-Encoding' : 'gzip, deflate, br',
'Accept-Language' : 'zh-CN,zh;q=0.9',
'Cache-Control' : 'no-cache',
'Connection' : 'keep-alive',
'Content-Type' : 'application/x-www-form-urlencoded; charset=UTF-8',
'Cookie' : COOKIE,
'Host' : 'www.lagou.com',
'Pragma' : 'no-cache',
'User-Agent' : 'Mozilla/5.0 Chrome/58.0.3029.81 Safari/537.36'
}
try:
if proxy:
proxies = {
'https' : 'https://' + proxy
}
res = requests.get(url, headers=headers, proxies=proxies, allow_redirects=False)
else:
res = requests.get(url, headers=headers, allow_redirects=False)
print('Res.status_code:', res.status_code)
if res.status_code == 200:
print('get detail url succeed', url)
res.encoding = res.apparent_encoding
return res.text
if res.status_code == 302:
print('chunkError', res.status_code, url)
proxy = get_proxy()
if proxy:
return request_index_search(positionId)
else:
print('proxy is fail')
return None
except ConnectionError as e:
print('get url error:', e.args, url)
return None
def parse_url_detail(html):
doc = pq(html)
position_description = doc('#job_detail > dd.job_bt > div').text()
return position_description
def save_to_mongoDB(result):
if db[MONGO_TABLE].update({'positionId' : result['positionId']}, {'$set' : result}, True):
print('save to mongoDB Succeed', result)
else:
print('save to mongoDB Failed', result)
def main(pn):
time.sleep(np.random.randint(0.1, 1))
datas = started_search_url(start_url=START_URL, page=pn)
print(datas)
for data in datas:
result = get_base_data(data)
save_to_mongoDB(result)
if __name__ == '__main__':
for pn in range(1, 20):
main(pn)
| Troysps/spider | lagou/spider.py | spider.py | py | 6,376 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "pymongo.MongoClient",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "urllib.parse.urlencode",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "urllib.parse",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "requests... |
17657067303 | from tkinter import *
import pygame
from tkinter import filedialog
import time
from mutagen.mp3 import MP3
import random
from AudioFile import AudioFile, Song, Podcast
from Exceptions import *
from Playlist import Playlist
from Artist import Artist
from User import User
from LastFmConnection import LastFmConnection
from GUI import GUI
from tkinter import simpledialog
from tkinter import StringVar
from tkinter.scrolledtext import ScrolledText
from urllib.request import urlopen
from suds.client import Client
from PIL import Image, ImageTk
import requests
from io import BytesIO
root = Tk()
root.title('Play Mode')
root.iconbitmap('D:\\computer\\cs@aut\\term2\\AP\\music player\\icons\\title-icon.ico')
root.geometry("500x350")
pygame.mixer.init()
songs_list = []
n_shuffle = None
class MusicPlayer():
_audioList = []
_masterPlaylistName = "Main Library"
def __init__(self):
self._playlists = []
self.currentPlaylist = None
self.currentSong = None
self.currentUser = None
self._audioList.append(Song(None, "Darude Sandstorm", rating=2))
self._audioList.append(Song(None, "Baby Dont Hurt Me", rating=1))
self._audioList.append(Song(None, "I Want To Break Free", rating=4))
self.newPlaylist(self._masterPlaylistName, self._audioList)
self.importSongWithREST("Sweet Mountain River", "Monster Truck")
self.importSongWithREST("Aural Psynapse", "deadmau5")
self.importSongWithREST("Piano Man", "Billy Joel")
self.importSongWithREST("Best Of You", "Foo Fighters")
self.importSongWithREST("One More Time", "Daft Punk")
self.gui = GUI()
self.gui.startGUI(self)
def loadUserInformation(self):
self._audioList = self.currentUser.allAudioFiles
self._playlists = self.currentUser.allPlaylists
self.gui.updatePlaylistBox()
def saveUserInformation(self):
if (self.currentUser != None):
self.currentUser.saveUser(self._audioList, self._playlists)
self.gui.displayMessage("User saved as: " + self.currentUser.name)
else:
self.gui.displayMessage("You must first load or create a new user!")
def newPlaylist(self, name:str = None, songs:list = None):
newPlaylist = Playlist(name)
if (songs != None):
for s in songs:
newPlaylist.addAudio(s)
self._playlists.append(newPlaylist)
print("DEBUG: playlist created:" + newPlaylist.name)
def newSong(self, response):
if (response != None):
newSong = Song(None, response[0])
if (response[1] != ''):
newSong.artist = Artist(response[1])
if (response[2] != '' and int(response[2]) > 0 and int(response[2]) <= 5):
newSong.rating = int(response[2])
self.addAudioToMasterList(newSong)
self.gui.focusMasterPlaylist()
else:
self.gui.displayMessage("Incorrect or Missing Song Information!")
def getPlaylist(self, getN:str):
for p in self._playlists:
if (p.name == getN):
return p
raise NotFoundException("Playlist not found.")
def getAudio(self, sName:str, detail = None):
for s in self._audioList:
if (s.name == sName):
if (detail == None):
return s
elif (type(s) is Song and s.artist.name == str(detail)):
return s
elif (type(s) is Podcast and s.episode == int(detail)):
return s
raise NotFoundException("Audio not found.")
def deleteAudio(self, audio:AudioFile):
for p in self._playlists:
for s in p.songList:
if (s == audio):
p.songList.remove(s)
self._audioList.remove(audio)
self.gui.displayMessage("Song Deleted!")
def addAudioToMasterList(self, audio:AudioFile):
self._audioList.append(audio)
self.getPlaylist(self._masterPlaylistName).addAudio(audio)
def savePlaylistXML(self):
root = ET.Element("root")
for song in self.currentPlaylist.songList:
song.addXML(root)
print(ET.tostring(root, encoding='utf8').decode('utf8'))
tree = ET.ElementTree(root)
tree.write((self.currentPlaylist.name + ".xml"))
self.gui.displayMessage("Playlist successfully exported!")
def loadPlaylistXML(self, name):
try:
self.getPlaylist(name)
self.gui.displayMessage("Playlist already created with that name.")
except NotFoundException:
playlistTree = ET.parse(name + ".xml")
root = playlistTree.getroot()
newPlaylist = Playlist(name)
for child in root:
try:
song = self.getAudio(child[0].text, child[2].text)
newPlaylist.addAudio(song)
except NotFoundException:
song = self.newSong([child[0].text, child[2].text, child[1].text])
self.addAudioToMasterList(song)
newPlaylist.addAudio(self.getAudio(child[0].text, child[2].text))
self._playlists.append(newPlaylist)
print("DEBUG: playlist created:" + newPlaylist.name)
self.gui.updatePlaylistBox()
self.gui.displayMessage("Playlist " + name + " successfully imported!")
def importSongWithREST(self, songTitle, songArtist):
try:
c = LastFmConnection()
details = c.getSongDetails(songTitle, songArtist)
except LastFMException as e:
return ( "Error: LastFM error code " + str(e.code) )
except GenericConnectionException:
return ("Error: Unable to establish connection..")
newSong = Song(details[0], details[1], Artist(details[2]))
self.addAudioToMasterList(newSong)
return ("Song successfully imported!")
@property
def playlists(self):
return self._playlists
@playlists.setter
def playlists(self, playlists:str):
self._playlists = playlists
@property
def audioList(self):
return self._playlists
@audioList.setter
def audioList(self, audioList:str):
self._audioList = audioList
@property
def masterPlaylistName(self):
return self._masterPlaylistName
@masterPlaylistName.setter
def masterPlaylistName(self, masterPlaylistName:str):
self._masterPlaylistName = masterPlaylistName
mp = MusicPlayer()
class Song(object):
def __init__(self, title, artist, genre):
self.title = title
self.artist = artist
self.genre = genre
def get_title(self):
return self.title
def get_artist(self):
return self.artist
def get_genre(self):
return self.genre
def choose_directory():
global folder_selected
folder_selected = filedialog.askdirectory()
def add_song():
song = filedialog.askopenfilename(initialdir='audio/', title="Choose A Song", filetypes=(("mp3 Files", "*.mp3"), ("WAV Files","*.WAV"),))
song = song.replace(folder_selected, "")
song = song.replace("/", "")
song = song.replace(".mp3", "")
song_box.insert(END, song)
songs_list.append(song)
def add_many_songs():
songs = filedialog.askopenfilenames(initialdir='audio/', title="Choose A Song", filetypes=(("mp3 Files", "*.mp3"), ))
for song in songs:
song = song.replace(folder_selected, "")
song = song.replace("/", "")
song = song.replace(".mp3", "")
songs_list.append(song)
song_box.insert(END, song)
def play_time():
current_time = pygame.mixer.music.get_pos() / 1000
converted_current_time = time.strftime('%M:%S', time.gmtime(current_time))
song = song_box.get(ACTIVE)
song = folder_selected + song +'.mp3'
song_mut = MP3(song)
song_length = song_mut.info.Length
converted_song_length = time.strftime('%M:%S', time.gmtime(song_length))
status_bar.config(text= f'Time Elapsed: {converted_current_time} of {converted_song_length} ')
status_bar.after(1000, play_time)
play_time()
def play():
song = song_box.get(ACTIVE)
song = folder_selected + '/' + song + '.mp3'
pygame.mixer.music.load(song)
pygame.mixer.music.play(loops=0)
def play_my_song(song):
pygame.mixer.music.load(song)
pygame.mixer.music.play(loops=0)
def stop():
pygame.mixer.music.stop()
song_box.selection_clear(ACTIVE)
status_bar.config(text='')
def next_song():
next_one = song_box.curselection()
next_one = next_one[0]+1
song = song_box.get(next_one)
song = folder_selected + '/' + song + '.mp3'
pygame.mixer.music.load(song)
pygame.mixer.music.play(loops=0)
song_box.selection_clear(0, END)
song_box.activate(next_one)
song_box.selection_set(next_one, last=None)
def previous_song():
next_one = song_box.curselection()
next_one = next_one[0]-1
song = song_box.get(next_one)
song = folder_selected + '/' + song + '.mp3'
pygame.mixer.music.load(song)
pygame.mixer.music.play(loops=0)
song_box.selection_clear(0, END)
song_box.activate(next_one)
song_box.selection_set(next_one, last=None)
global paused
paused = False
def delete_song():
song_box.delete(ANCHOR)
pygame.mixer.music.stop()
def delete_all_songs():
song_box.delete(0, END)
pygame.mixer.music.stop()
def pause(is_paused):
global paused
paused = is_paused
if paused:
pygame.mixer.music.unpause()
paused = False
else:
pygame.mixer.music.pause()
paused = True
def shuffle():
#when you click on shuffle button a new song will play immediately
i = 0
play()
for i in range(len(songs_list)):
songs_number = random.randint(0,len(songs_list)+1)
song = folder_selected + '/' + songs_list[songs_number] + '.mp3'
play_my_song(song)
def repeat():
song = song_box.get(ACTIVE)
song = folder_selected + '/' + song + '.mp3'
pygame.mixer.music.load(song)
pygame.mixer.music.play(-1)
def division_by_artist():
pass
#in joda krdnst ke mikha
def your_playlist():
pass
song_box = Listbox(root, bg="black", fg="red", width=60, selectbackground="red", selectforeground="black")
song_box.pack(pady=20)
back_img = PhotoImage(file='D:/computer/cs@aut/term2/AP/music player/icons/back.png')
stop_img = PhotoImage(file='D:/computer/cs@aut/term2/AP/music player/icons/stop.png')
play_img = PhotoImage(file='D:/computer/cs@aut/term2/AP/music player/icons/play.png')
pause_img = PhotoImage(file='D:/computer/cs@aut/term2/AP/music player/icons/pause.png')
next_img = PhotoImage(file='D:/computer/cs@aut/term2/AP/music player/icons/next.png')
shuffle_img = PhotoImage(file='D:/computer/cs@aut/term2/AP/music player/icons/shuffle.png')
repeat_img = PhotoImage(file='D:/computer/cs@aut/term2/AP/music player/icons/repeat.png')
controls_frame = Frame(root,pady=40)
controls_frame.pack()
back_button = Button(controls_frame, image=back_img, borderwidth=0, command= previous_song)
next_button = Button(controls_frame, image=next_img, borderwidth=0, command=next_song)
play_button = Button(controls_frame, image=play_img, borderwidth=0, command=play)
pause_button = Button(controls_frame, image=pause_img, borderwidth=0, command=lambda: pause(paused))
stop_button = Button(controls_frame, image=stop_img, borderwidth=0, command=stop)
shuffle_button = Button(controls_frame, image=shuffle_img, borderwidth=0, command=shuffle)
repeat_button = Button(controls_frame, image=repeat_img, borderwidth=0, command=repeat)
back_button.grid(row=0, column=0, padx=10, pady=10)
stop_button.grid(row=0, column=1, padx=10, pady=10)
play_button.grid(row=0, column=2, padx=10, pady=10)
pause_button.grid(row=0, column=3, padx=10, pady=10)
next_button.grid(row=0, column=4, padx=10, pady=10)
shuffle_button.grid(row=0, column=5, padx=10, pady=10)
repeat_button.grid(row=0, column=6, padx=10, pady=10)
#menu part
my_menu = Menu(root)
root.config(menu=my_menu)
#choose directory for menu
choose_directory_menu = Menu(my_menu)
my_menu.add_cascade(label="Directory", menu=choose_directory_menu)
choose_directory_menu.add_command(label="Choose Directory", command=choose_directory)
#add song for menu
add_song_menu = Menu(my_menu)
my_menu.add_cascade(label="Add Songs", menu=add_song_menu)
add_song_menu.add_command(label="Add A Song To Queue", command=add_song)
add_song_menu.add_command(label="Add Many Songs To Queue", command=add_many_songs)
#remove song for menu
remove_song_menu = Menu(my_menu)
my_menu.add_cascade(label="remove Songs", menu=remove_song_menu)
remove_song_menu.add_command(label="delete A Song from Queue", command=delete_song)
remove_song_menu.add_command(label="delete All Songs from Queue", command=delete_all_songs)
status_bar = Label(root, text='', bd=1, relief=GROOVE,anchor=E )
status_bar.pack(fill=X, side=BOTTOM, ipady=2)
division_by_artist = Menu(my_menu)
my_menu.add_cascade(label="Artists",menu=add_song_menu)
division_by_artist.add_command(label="Artists", command= Song)
your_playlist = Menu(my_menu)
my_menu.add_cascade(label="fav music",menu=add_song_menu)
your_playlist.add_command(label="select you're fav music", command = MusicPlayer)
root.mainloop() | ydamirkol/music-player | play mode3.py | play mode3.py | py | 13,889 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pygame.mixer.init",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "AudioFile.Song",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "AudioFile.Song",
... |
69900225789 | import enum
from PySide2 import QtCore
from PySide2.QtCore import QPoint
from PySide2.QtGui import QColor, QFont, QFontDatabase
from PySide2.QtWidgets import QGraphicsSceneMouseEvent, QGraphicsItem
class NodeState(enum.Enum):
normal = 0
used = 1
highlight = 2
class Node(QGraphicsItem):
Type = QGraphicsItem.UserType + 1
def __init__(self, graphWidget, name: str, group_name: str, size=22):
QGraphicsItem.__init__(self)
self.state = NodeState.normal
self.size = size
self.fixedFont = QFont("Monospace")
self.fixedFont.setStyleHint(QFont.TypeWriter)
self.group_name = group_name
self.name = name
self.tag = group_name + " " + self.name
self.color = QColor('light green')
# self.setFlag(QGraphicsItem.ItemIsMovable)
# self.setFlag(QGraphicsItem.ItemIsSelectable)
self.setCacheMode(self.DeviceCoordinateCache)
self.setZValue(-1)
def mousePressEvent(self, event: QGraphicsSceneMouseEvent):
# self.state=NodeState.highlight
print(self.state)
# self.color = QColor('cyan')
# self.adjust()
# self.update()
#
# def adjust(self):
# if self.state != NodeState.highlight:
# print("sd")
# if self.state == NodeState.normal:
# self.color = QColor('light green')
# elif self.state == NodeState.used:
# self.color = QColor('yellow')
# elif self.state == NodeState.highlight:
# self.color = QColor('cyan')
# print(("Ss"))
def type(self):
return Node.Type
def boundingRect(self):
return QtCore.QRectF((self.size // 2) * -1, (self.size // 2) * -1, self.size, self.size)
def paint(self, painter, option, widget):
if self.state == NodeState.normal:
self.color = QColor('light green')
elif self.state == NodeState.used:
self.color = QColor('yellow')
elif self.state == NodeState.highlight:
self.color = QColor('cyan')
painter.setPen(QColor("black"))
painter.setBrush(self.color)
painter.drawRect((self.size // 2) * -1, (self.size // 2) * -1, self.size, self.size)
painter.setPen(QColor("black"))
painter.setFont(self.fixedFont)
if len(self.name) >= 3:
textpoint = QPoint(-11, 3)
elif len(self.name) >= 2:
textpoint = QPoint(-7, 3)
else:
textpoint = QPoint(-4, 3)
painter.drawText(textpoint, self.name)
| JIuH4/KB_V2 | ui_elements/graph_items/node.py | node.py | py | 2,560 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "enum.Enum",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "PySide2.QtWidgets.QGraphicsItem",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "PySide2.QtWidgets.QGraphicsItem.UserType",
"line_number": 16,
"usage_type": "attribute"
}... |
8773605987 | import streamlit as st
from utils import get_modelpaths
from Scripts.video_processor import webcam_input
def main():
model_list = ["AnimeGANv2_Hayao","AnimeGANv2_Shinka","AnimeGANv2_Paprika"]
st.title("Real-time Anime to Anime Converter")
model_name = st.selectbox("Select model name", model_list)
model_path = get_modelpaths(model_name)
webcam_input(model_path)
if __name__ == "__main__":
main() | avhishekpandey/RealTime_video-to-anime | app.py | app.py | py | 427 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "streamlit.title",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "streamlit.selectbox",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "utils.get_modelpaths",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "Scripts.video... |
21325562870 | import pytest
from pysyncgateway import Database, Query
@pytest.fixture
def database(admin_client):
"""
Returns:
Database: 'db' database written to Sync Gateway.
"""
database = Database(admin_client, 'db')
database.create()
return database
@pytest.fixture
def query(database):
"""
Returns:
Query: Not written to Sync Gateway.
"""
return Query(database, 'all_lists')
@pytest.fixture
def slow_view(database):
"""
A view that returns all documents, but slowly. This uses a horrible
sleep-like function that locks up Walrus for 1.5s per document. Fixture
populates the database with a document to ensure that calling the
view takes at least 1 second in total.
NOTE: On Circle, it looks like processing the view might be done in
parallel because it is able to return a view containing 2 documents in just
over the time in the delay function.
Returns:
Query: Called 'slow_lists', written to Sync Gateway, with a single view
called 'all' that takes 1.5 second per document in the database.
"""
database.get_document('a').create_update()
query = Query(database, 'slow_lists')
query.data = {
'views': {
'all': {
'map':
"""
function(doc, meta) {
function pausecomp(millis){
var date = new Date();
var curDate = null;
do { curDate = new Date(); }
while(curDate-date < millis);
}
pausecomp(1500);
emit(meta.id,doc);
}
""",
},
},
}
query.create_update()
return query
@pytest.fixture
def food_query(database):
"""
Populates the database with some foods and builds a query, all written to
Sync Gateway. View does not need hotting up because docs are in place when
it is created.
Returns:
Query: With 'all' view populated where key will search for the foods
where the first letter of the name of the food matches.
"""
for name, data in [
('lightbulb', {
'type': 'fixture',
'name': 'Lightbulb',
}),
('apple', {
'type': 'food',
'name': 'apple',
}),
('banana', {
'type': 'food',
'name': 'banana',
}),
('apricot', {
'type': 'food',
'name': 'apricot',
}),
('walrus', {
'type': 'animal',
'name': 'I AM THE WALRUS',
}),
('almond', {
'type': 'food',
'name': 'almond',
}),
('pumpkin', {
'type': 'food',
'name': 'pumpkin',
}),
]:
doc = database.get_document(name)
doc.data = data
doc.create_update()
query = Query(database, 'food_index')
query.data = {
'views': {
'all': {
'map':
"""
function(doc, meta) {
if(doc.type == "food" && doc.name) {
emit(doc.name[0], doc)
}
}
""",
},
},
}
query.create_update()
return query
| constructpm/pysyncgateway | tests/query/conftest.py | conftest.py | py | 3,136 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "pysyncgateway.Database",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "pysyncgateway.Query",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "pytest.... |
44701138323 | import os
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import matplotlib.gridspec as gridspec
def plot(samples):
x_dim=samples.shape[1]
color=samples.shape[3]
fig = plt.figure(figsize=(4, 4))
gs = gridspec.GridSpec(4, 4)
gs.update(wspace=0.05, hspace=0.05)
for i, sample in enumerate(samples):
ax = plt.subplot(gs[i])
plt.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect('equal')
if samples.shape[3] == 3:
sample = sample.reshape(x_dim, x_dim, color)
plt.imshow(sample)
else:
sample = sample.reshape(x_dim, x_dim)
plt.imshow(sample, cmap='Greys_r')
return fig
def generateSamples(out_dir, z_dim=100):
# fileNames=[]
if not os.path.exists(out_dir+'/generated/'):
os.makedirs(out_dir+'/generated/')
for root, dirs, files in os.walk(out_dir+"/model/"):
for filename in sorted(files):
if os.path.splitext(filename)[1].lower() =='.meta':
model=root+os.path.splitext(filename)[0]
imageName=os.path.splitext(filename)[0]
print(model)
# fileNames.append(root+os.path.splitext(filename)[0])
tf.reset_default_graph()
with tf.Session() as sess:
# z = tf.placeholder(tf.float32, shape=[None, z_dim])
# saver = tf.train.Saver()
saver=tf.train.import_meta_graph(model+'.meta')
saver.restore(sess, model)
graph=tf.get_default_graph()
tName1=graph.get_operation_by_name('z').name+':0'
z=graph.get_tensor_by_name(tName1)
tName2=graph.get_operation_by_name('generator/final_gen').name+':0'
gen=graph.get_tensor_by_name(tName2)
np.random.seed(42)
batch_z = np.random.normal(-1.0, 1.0, size=[16, z_dim]).astype(np.float32)
samples = sess.run(gen, feed_dict={z: batch_z})
fig = plot(samples)
plt.savefig(out_dir+'/generated/{}.png'
.format(imageName), bbox_inches='tight')
plt.show()
plt.close()
| adityagarg/improvedWGANs | utils.py | utils.py | py | 2,488 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "matplotlib.gridspec.GridSpec",
"line_number": 13,
"usage_type": "call"
},
{
"api_name"... |
74055844987 | import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from collections import namedtuple
from .set2set import Set2Vec
ReadoutConfig = namedtuple(
'ReadoutConfig', [
'hidden_dim',
'readout_hidden_dim',
'mode',
'target_dim',
]
)
class Readout(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.classify = (self.config.mode == 'clf')
self.hidden_dim = config.hidden_dim
self.target_dim = config.target_dim
self.readout_hidden_dim = config.readout_hidden_dim
self.activation = nn.LeakyReLU
def forward(self, G):
pass
class DTNNReadout(Readout):
def __init__(self, config):
super().__init__(config)
net = nn.Sequential(
nn.Linear(self.hidden_dim, self.readout_hidden_dim),
self.activation(),
nn.BatchNorm1d(self.readout_hidden_dim),
nn.Linear(self.readout_hidden_dim, self.target_dim),
)
self.net = net
def forward(self, h):
bs, gd, dd = (s for s in h.size())
x = h.view(-1, dd)
x = self.net(x)
x = x.view(bs, gd, -1)
x = x.sum(1)
return x
class FullyConnectedReadout(Readout):
def __init__(self, config):
super().__init__(config)
net = nn.Sequential(
nn.Linear(self.hidden_dim, self.readout_hidden_dim),
self.activation(),
nn.BatchNorm1d(self.readout_hidden_dim),
nn.Linear(self.readout_hidden_dim, self.target_dim),
)
self.net = net
def forward(self, h):
x = torch.mean(h, 1)
x = self.net(x)
return x
class SetReadout(Readout):
def __init__(self, config):
super().__init__(config)
self.set2vec = Set2Vec(self.hidden_dim, self.target_dim, config.readout_hidden_dim)
def forward(self, h):
x = self.set2vec(h)
return x
class VCNReadout(Readout):
def __init__(self, config):
super().__init__(config)
self.module_list = nn.ModuleList()
for target in self.target_names:
self.module_list.append(nn.Linear(self.hidden_dim, target.dim))
def forward(self, G):
h_dict = {v: G.node[v]['hidden'] for v in G.nodes()}
out = {}
for i, target in enumerate(self.target_names):
out[target.name] = self.module_list[i](h_dict[target.name])
return out
class VertexReadout(Readout):
def __init__(self, config):
super().__init__(config)
net = nn.Sequential(
nn.Linear(self.hidden_dim, self.readout_hidden_dim),
self.activation(),
nn.BatchNorm2d(self.readout_hidden_dim),
nn.Linear(self.readout_hidden_dim, self.target_dim),
)
self.net = net
def forward(self, h):
bs, gd, dd = (s for s in h.size())
x = h.view(-1, dd)
x = self.net(x)
x = x.view(bs, gd, -1)
return x
def make_readout(readout_config):
if readout_config.function == 'fully_connected':
return FullyConnectedReadout(readout_config.config)
elif readout_config.function == 'dtnn':
return DTNNReadout(readout_config.config)
elif readout_config.function == 'vcn':
return VCNReadout(readout_config.config)
elif readout_config.function == 'vertex':
return VertexReadout(readout_config.config)
elif readout_config.function == 'set':
return SetReadout(readout_config.config)
else:
raise ValueError("Unsupported readout function! ({})".format(readout_config.function))
| isaachenrion/gcn | models/mpnn/readout/readout.py | readout.py | py | 3,763 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "collections.namedtuple",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "torch.nn.LeakyReL... |
22755470032 | from collections import namedtuple
import time
from .utils import (
client_array_operation,
make_valid_data,
create_host_urn,
create_resource_arn,
create_hash,
set_required_access_v2,
transformation,
ipaddress_to_urn
)
from .registry import RegisteredResourceCollector
from schematics import Model
from schematics.types import StringType, ModelType, ListType, BooleanType
InstanceData = namedtuple("InstanceData", ["instance", "instance_type"])
class Tag(Model):
Key = StringType(required=True)
Value = StringType(required=True)
class Subnet(Model):
SubnetId = StringType(required=True)
Tags = ListType(ModelType(Tag), default=[])
AvailabilityZone = StringType(required=True)
VpcId = StringType(required=True)
class Vpc(Model):
VpcId = StringType(required=True)
IsDefault = BooleanType(default=False)
Tags = ListType(ModelType(Tag), default=[])
class SecurityGroup(Model):
GroupName = StringType(default="UKNOWN")
GroupId = StringType(required=True)
VpcId = StringType()
class VpnGateway(Model):
class VpnGatewayVpcAttachment(Model):
VpcId = StringType(required=True)
State = StringType(default="UNKNOWN")
VpnGatewayId = StringType(required=True)
VpcAttachments = ListType(ModelType(VpnGatewayVpcAttachment), default=[])
class InstanceType(Model):
InstanceType = StringType(required=True)
Hypervisor = StringType(default="")
class Instance(Model):
class InstanceState(Model):
Name = StringType(required=True)
class SecurityGroup(Model):
GroupId = StringType(required=True)
InstanceId = StringType(required=True)
InstanceType = StringType(required=True)
State = ModelType(InstanceState)
Tags = ListType(ModelType(Tag), default=[])
PrivateIpAddress = StringType()
PublicDnsName = StringType()
PublicIpAddress = StringType()
SubnetId = StringType()
VpcId = StringType()
SecurityGroups = ListType(ModelType(SecurityGroup), default=[])
class RunInstances(Model):
class ResponseElements(Model):
class InstancesSet(Model):
class RunInstance(Model):
instanceId = StringType(required=True)
items = ListType(ModelType(RunInstance), required=True)
instancesSet = ModelType(InstancesSet, required=True)
responseElements = ModelType(ResponseElements, required=True)
class Ec2InstanceCollector(RegisteredResourceCollector):
API = "ec2"
API_TYPE = "regional"
COMPONENT_TYPE = "aws.ec2"
def __init__(self, location_info, client, agent):
RegisteredResourceCollector.__init__(self, location_info, client, agent)
self.instance_types = {}
def process_all(self, filter=None):
if not filter or "instances" in filter:
self.process_instances()
if not filter or "security_groups" in filter:
self.process_security_groups()
if not filter or "vpcs" in filter:
self.process_vpcs()
if not filter or "subnets" in filter:
self.process_subnets()
if not filter or "vpn_gateways" in filter:
self.process_vpn_gateways()
@set_required_access_v2("ec2:DescribeInstanceTypes")
def collect_instance_type(self, instance_type):
# Items never change, only added, safe to hold in memory
if instance_type not in self.instance_types:
instance_type_data = self.client.describe_instance_types(InstanceTypes=[instance_type]).get(
"InstanceTypes", []
)
if instance_type_data:
self.instance_types[instance_type] = instance_type_data[0]
return self.instance_types.get(instance_type, {})
def collect_instance(self, instance_data):
instance_type = instance_data.get("InstanceType", "")
instance_type_data = self.collect_instance_type(instance_type) or {}
return InstanceData(instance=instance_data, instance_type=instance_type_data)
def collect_instances(self, **kwargs):
for reservation in client_array_operation(
self.client,
"describe_instances",
"Reservations",
Filters=[
{
"Name": "instance-state-code", # Don't return terminated instances
"Values": [
"0", # pending
"16", # running
"32", # shutting-down
"64", # stopping
"80", # stopped
],
}
],
**kwargs
):
for instance_data in reservation.get("Instances", []):
yield self.collect_instance(instance_data)
@set_required_access_v2("ec2:DescribeInstances")
def process_instances(self, **kwargs):
for data in self.collect_instances(**kwargs):
self.process_instance(data)
def process_some_instances(self, ids):
self.process_instances(InstanceIds=ids)
@transformation()
def process_instance_type(self, data):
instance_type = InstanceType(data, strict=False)
instance_type.validate()
return instance_type
@transformation()
def process_instance(self, data):
instance = Instance(data.instance, strict=False)
instance.validate()
self.agent.event(
{
"timestamp": int(time.time()),
"event_type": "ec2_state",
"msg_title": "EC2 instance state",
"msg_text": instance.State.Name,
"host": instance.InstanceId,
"tags": ["state:" + instance.State.Name],
}
)
output = make_valid_data(data.instance)
urns = [
create_host_urn(instance.InstanceId),
create_resource_arn(
"ec2",
self.location_info.Location.AwsRegion,
self.location_info.Location.AwsAccount,
"instance",
instance.InstanceId,
),
]
if not instance.Tags:
output["Tags"] = []
output["Tags"].append({"Key": "host", "Value": instance.InstanceId})
output["Tags"].append({"Key": "instance-id", "Value": instance.InstanceId})
if instance.PrivateIpAddress:
urns.append(ipaddress_to_urn(instance.PrivateIpAddress, instance.VpcId))
output["Tags"].append({"Key": "private-ip", "Value": instance.PrivateIpAddress})
if instance.PublicDnsName:
urns.append(create_host_urn(instance.PublicDnsName))
output["Tags"].append({"Key": "fqdn", "Value": instance.PublicDnsName})
if instance.PublicIpAddress:
urns.append(create_host_urn(instance.PublicIpAddress))
output["Tags"].append({"Key": "public-ip", "Value": instance.PublicIpAddress})
output["URN"] = urns
if data.instance_type: # Don't run if instance type not found
instance_type = self.process_instance_type(data.instance_type)
output["isNitro"] = instance_type.Hypervisor == "nitro"
# Map the subnet and if not available then map the VPC
if instance.SubnetId:
self.emit_relation(instance.InstanceId, instance.SubnetId, "uses-service", {})
elif instance.VpcId: # pragma: no cover
self.emit_relation(instance.InstanceId, instance.VpcId, "uses-service", {})
for security_group in instance.SecurityGroups:
self.emit_relation(instance.InstanceId, security_group.GroupId, "uses-service", {})
self.emit_component(instance.InstanceId, "instance", output)
def collect_security_groups(self, **kwargs):
for security_group in client_array_operation(self.client,
"describe_security_groups",
"SecurityGroups",
**kwargs):
yield security_group
@set_required_access_v2("ec2:DescribeSecurityGroups")
def process_security_groups(self, **kwargs):
for security_group_data in self.collect_security_groups(**kwargs):
self.process_security_group(security_group_data)
@transformation()
def process_security_group(self, data):
security_group = SecurityGroup(data, strict=False)
security_group.validate()
output = make_valid_data(data)
output["Version"] = create_hash(output)
output["Name"] = security_group.GroupName
output["URN"] = [
create_resource_arn(
"ec2",
self.location_info.Location.AwsRegion,
self.location_info.Location.AwsAccount,
"security-group",
security_group.GroupId,
)
]
if security_group.VpcId: # pragma: no cover
self.emit_relation(security_group.VpcId, security_group.GroupId, "has-resource", {})
self.emit_component(security_group.GroupId, "security-group", output)
def collect_vpcs(self):
for vpc in client_array_operation(self.client, "describe_vpcs", "Vpcs"):
yield vpc
@set_required_access_v2("ec2:DescribeVpcs")
def process_vpcs(self):
for vpc_data in self.collect_vpcs():
self.process_vpc(vpc_data)
@transformation()
def process_vpc(self, data):
vpc = Vpc(data, strict=False)
vpc.validate()
output = make_valid_data(data)
# construct a name
vpc_name = vpc.VpcId
name_tag = [tag for tag in vpc.Tags if tag.Key == "Name"]
if vpc.IsDefault:
vpc_name = "default"
elif len(name_tag) > 0:
vpc_name = name_tag[0].Value
output["Name"] = vpc_name
# add a URN
output["URN"] = [
create_resource_arn(
"ec2", self.location_info.Location.AwsRegion, self.location_info.Location.AwsAccount, "vpc", vpc.VpcId
)
]
self.emit_component(vpc.VpcId, "vpc", output)
def collect_subnets(self):
for subnet in client_array_operation(self.client, "describe_subnets", "Subnets"):
yield subnet
@set_required_access_v2("ec2:DescribeSubnets")
def process_subnets(self):
for subnet_data in self.collect_subnets():
self.process_subnet(subnet_data)
@transformation()
def process_subnet(self, data):
subnet = Subnet(data, strict=False)
subnet.validate()
output = make_valid_data(data)
# construct a name
subnet_name = subnet.SubnetId
name_tag = [tag for tag in subnet.Tags if tag.Key == "Name"]
if len(name_tag) > 0:
subnet_name = name_tag[0].Value
if subnet.AvailabilityZone: # pragma: no cover
subnet_name = "{}-{}".format(subnet_name, subnet.AvailabilityZone)
output["Name"] = subnet_name
# add a URN
output["URN"] = [
create_resource_arn(
"ec2",
self.location_info.Location.AwsRegion,
self.location_info.Location.AwsAccount,
"subnet",
subnet.SubnetId,
)
]
self.emit_component(subnet.SubnetId, "subnet", output)
self.emit_relation(subnet.SubnetId, subnet.VpcId, "uses-service", {})
def collect_vpn_gateways(self):
for vpn_gateway in client_array_operation(self.client,
"describe_vpn_gateways",
"VpnGateways",
Filters=[{"Name": "state", "Values": ["pending", "available"]}]
):
yield vpn_gateway
@set_required_access_v2("ec2:DescribeVpnGateways")
def process_vpn_gateways(self):
for vpn_gateway_data in self.collect_vpn_gateways():
self.process_vpn_gateway(vpn_gateway_data)
@transformation()
def process_vpn_gateway(self, data):
vpn_gateway = VpnGateway(data, strict=False)
vpn_gateway.validate()
output = make_valid_data(data)
output["Name"] = vpn_gateway.VpnGatewayId
self.emit_component(vpn_gateway.VpnGatewayId, "vpn-gateway", output)
for vpn_attachment in vpn_gateway.VpcAttachments:
if vpn_attachment.State == "attached":
self.emit_relation(vpn_gateway.VpnGatewayId, vpn_attachment.VpcId, "uses-service", {})
@transformation()
def process_batch_instances(self, event, seen):
data = RunInstances(event, strict=False)
data.validate()
instance_ids = [
instance.instanceId
for instance in data.responseElements.instancesSet.items
if instance.instanceId not in seen
]
self.process_instances(InstanceIds=instance_ids)
seen.update(set(instance_ids))
def process_state_notification(self, event, seen):
instance_id = event.get("instance-id", "")
if instance_id not in seen:
seen.add(instance_id)
if event.get("state") == "terminated":
self.agent.delete(instance_id)
else:
self.process_instances(InstanceIds=[instance_id])
def process_one_instance(self, instance_id):
self.process_instances(InstanceIds=[instance_id])
def process_one_security_group(self, security_group_id):
self.process_security_groups(GroupIds=[security_group_id])
EVENT_SOURCE = "ec2.amazonaws.com"
CLOUDTRAIL_EVENTS = [
{"event_name": "RunInstances", "processor": process_batch_instances},
{"event_name": "StartInstances", "processor": process_batch_instances},
{"event_name": "StopInstances", "processor": process_batch_instances},
{"event_name": "TerminateInstances", "processor": process_batch_instances},
{"event_name": "InstanceStateChangeNotification", "processor": process_state_notification},
{"event_name": "AttachVolume", "path": "responseElements.instanceId", "processor": process_one_instance},
{"event_name": "DetachVolume", "path": "responseElements.instanceId", "processor": process_one_instance},
{
"event_name": "ModifyInstanceAttribute",
"path": "requestParameters.instanceId",
"processor": process_one_instance,
},
{
"event_name": "RevokeSecurityGroupIngress",
"path": "requestParameters.groupId",
"processor": process_one_security_group,
},
{
"event_name": "AuthorizeSecurityGroupIngress",
"path": "requestParameters.groupId",
"processor": process_one_security_group,
},
]
| StackVista/stackstate-agent-integrations | aws_topology/stackstate_checks/aws_topology/resources/ec2.py | ec2.py | py | 14,997 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "collections.namedtuple",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "schematics.Model",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "schematics.types.StringType",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "s... |
28194386524 | from __future__ import print_function, division
import os
import time
import random
import numpy as np
from base import BaseModel
from replay_memory import ReplayMemory
from utils import save_pkl, load_pkl
import tensorflow as tf
import matplotlib.pyplot as plt
class Agent(BaseModel):
def __init__(self, config, environment, sess):
self.sess = sess
self.weight_dir = 'weight'
self.record_dir = 'record'
self.recordfile_name = ''
self.now_time = time.strftime("%m-%d-%H-%M",time.localtime(time.time()))
self.env = environment
model_dir = './Model/a.model'
self.memory = ReplayMemory(model_dir)
self.max_step = 100000
# The number of RB, The number of vehicle
self.RB_number = 20
self.num_vehicle = len(self.env.vehicles)
# The following two variables are used to store the transmission power
# and channel selection of each V2V link
# The one is used for testing, and the other is used for training
self.action_all_with_power = np.zeros([self.num_vehicle, 3, 2],
dtype='int32') # this is actions that taken by V2V links with power
self.action_all_with_power_training = np.zeros([self.num_vehicle, 3, 2],
dtype='int32')
self.reward = []
# Settings related to learning rate
self.learning_rate = 0.01 # 0.01
self.learning_rate_minimum = 0.0001
self.learning_rate_decay = 0.96
self.learning_rate_decay_step = 500000
# each 100 steps update the target_q network
self.target_q_update_step = 100 # 100
#Discount factor
self.discount = 0.5
self.double_q = True
self.build_dqn()
# The number of V2V links.
self.V2V_number = 3 * len(self.env.vehicles) # every vehicle need to communicate with 3 neighbors
self.training = True
# This function is used to store the transmit power and channel selected by each V2V link
# Store in an <"action"> matrix
def merge_action(self, idx, action):
self.action_all_with_power[idx[0], idx[1], 0] = action % self.RB_number
self.action_all_with_power[idx[0], idx[1], 1] = int(np.floor(action / self.RB_number))
def get_state(self, idx):
# ===============================
# Get State from the environment
# ===============================
vehicle_number = len(self.env.vehicles)
V2V_channel = (self.env.V2V_channels_with_fastfading[idx[0], self.env.vehicles[idx[0]].destinations[idx[1]],
:] - 80) / 60
V2I_channel = (self.env.V2I_channels_with_fastfading[idx[0], :] - 80) / 60
Eve_channel_I = (self.env.Eve_channels_with_fastfading_I[idx[0], :] - 80) / 60
Eve_channel_V = (self.env.Eve_channels_with_fastfading_V[idx[0], self.env.vehicles[idx[0]].destinations[idx[1]],
:] - 80) / 60
V2V_interference = (-self.env.V2V_Interference_all[idx[0], idx[1], :] - 60) / 60
# The <"NeiSelection"> representative RB occupation
NeiSelection = np.zeros(self.RB_number)
for i in range(3):
for j in range(3):
if self.training:
NeiSelection[self.action_all_with_power_training[self.env.vehicles[idx[0]].neighbors[i], j, 0]] = 1
else:
NeiSelection[self.action_all_with_power[self.env.vehicles[idx[0]].neighbors[i], j, 0]] = 1
for i in range(3):
if i == idx[1]:
continue
if self.training:
if self.action_all_with_power_training[idx[0], i, 0] >= 0:
NeiSelection[self.action_all_with_power_training[idx[0], i, 0]] = 1
else:
if self.action_all_with_power[idx[0], i, 0] >= 0:
NeiSelection[self.action_all_with_power[idx[0], i, 0]] = 1
# Status include V2I_channel, V2V_interference, V2V_channel, Eve_channel_I, Eve_channel_V, NeiSelection
return np.concatenate((V2I_channel, V2V_interference, V2V_channel, Eve_channel_I, Eve_channel_V, NeiSelection))
def predict(self, s_t, step, test_ep=False):
# ==========================
# Select actions
# ==========================
ep = 1 / (step / 1000000 + 1)
# Random selection or training selection
if random.random() < ep and test_ep is False: # epsion to balance the exporation and exploition
# Each number from 0 ~ 60 represents a choice
action = np.random.randint(60) # 20RBs X 3 power level
else:
action = self.q_action.eval({self.s_t: [s_t]})[0]
return action
# This function used for collcet data for training, and training a mini batch
def observe(self, prestate, state, reward, action):
# -----------
# Collect Data for Training and Experience replay
# ---------
self.memory.add(prestate, state, reward, action) # add the state and the action and the reward to the memory
# print(self.step)
if self.step > 0:
if self.step % 50 == 0:
# print('Training')
self.q_learning_mini_batch() # training a mini batch
# self.save_weight_to_pkl()
if self.step % self.target_q_update_step == self.target_q_update_step - 1:
# print("Update Target Q network:")
self.update_target_q_network() # update the Target-Q network parameter
def save_record(self, record_content):
if not os.path.exists(self.record_dir):
os.makedirs(self.record_dir)
if(self.recordfile_name == ''):
if(self.double_q == True and self.dueling_q == True):
self.recordfile_name = "double_q&dueling_q"
else:
if(self.double_q == True):
self.recordfile_name = "double_q"
else:
if(self.dueling_q == True):
self.recordfile_name = "dueling_q"
else:
self.recordfile_name = "normal_q"
with open(os.path.join(self.record_dir, "V-num-%d_%s-%s.txt" % \
(self.num_vehicle, self.now_time, self.recordfile_name)), 'a') as f:
f.write(record_content)
# The network training and testing funtion
def train(self):
num_game, self.update_count, ep_reward = 0, 0, 0.
total_reward, self.total_loss, self.total_q = 0., 0., 0.
max_avg_ep_reward = 0
ep_reward, actions = [], []
mean_big = 0
number_big = 0
mean_not_big = 0
number_not_big = 0
print(self.num_vehicle)
#!Step1: Start a new simulation environment
self.env.new_random_game(self.num_vehicle) # episode
for self.step in (range(0, 40000)): # need more configuration
#!Step2: Begin training, the tutal steps is 40000
# initialize set some varibles
if self.step == 0:
num_game, self.update_count, ep_reward = 0, 0, 0.
total_reward, self.total_loss, self.total_q = 0., 0., 0.
ep_reward, actions = [], []
# Restart a new simulation environment
if (self.step % 2000 == 1):
self.env.new_random_game(self.num_vehicle)
print(self.step)
state_old = self.get_state([0, 0])
# print("state", state_old)
self.training = True
for k in range(1):
for i in range(len(self.env.vehicles)):
for j in range(3):
#!Step3: Get training data for each pair of V2V links and training
# Include <"state_old, state_new, reward_train, action">
# Besides: The training a batch in <"observe"> function
state_old = self.get_state([i, j])
action = self.predict(state_old, self.step)
# self.merge_action([i,j], action)
self.action_all_with_power_training[i, j, 0] = action % self.RB_number
self.action_all_with_power_training[i, j, 1] = int(np.floor(action / self.RB_number))
reward_train = self.env.act_for_training(self.action_all_with_power_training, [i, j])
state_new = self.get_state([i, j])
self.observe(state_old, state_new, reward_train, action)
if (self.step % 2000 == 0) and (self.step > 0):
#!Step4: Testing
self.training = False
number_of_game = 10
if (self.step % 10000 == 0) and (self.step > 0):
number_of_game = 50
if (self.step == 38000):
number_of_game = 100
V2V_Eifficency_list = np.zeros(number_of_game)
V2I_Eifficency_list = np.zeros(number_of_game)
V2V_security_rate_list = np.zeros(number_of_game)
for game_idx in range(number_of_game):
self.env.new_random_game(self.num_vehicle)
test_sample = 200
Eifficency_V2V = []
Eifficency_V2I = []
Security_rate = []
print('test game idx:', game_idx)
for k in range(test_sample):
action_temp = self.action_all_with_power.copy()
for i in range(len(self.env.vehicles)):
self.action_all_with_power[i, :, 0] = -1
sorted_idx = np.argsort(self.env.individual_time_limit[i, :])
for j in sorted_idx:
state_old = self.get_state([i, j])
action = self.predict(state_old, self.step, True)
self.merge_action([i, j], action)
if i % (len(self.env.vehicles) / 10) == 1: # add 10
action_temp = self.action_all_with_power.copy()
V2V_reward, V2I_reward, V2V_security_rate = self.env.act_asyn(action_temp)
Eifficency_V2V.append(np.sum(V2V_reward))
Eifficency_V2I.append(np.sum(V2I_reward))
Security_rate.append(np.sum(V2V_security_rate))
# print("actions", self.action_all_with_power)
V2V_Eifficency_list[game_idx] = np.mean(np.asarray(Eifficency_V2V))
V2I_Eifficency_list[game_idx] = np.mean(np.asarray(Eifficency_V2I))
V2V_security_rate_list[game_idx] = np.mean(np.asarray(Security_rate))
# print("action is", self.action_all_with_power)
# print('failure probability is, ', percent)
# print('action is that', action_temp[0,:])
#!Step5: Save weight parameters
self.save_weight_to_pkl()
print('The number of vehicle is ', len(self.env.vehicles))
print('Mean of the V2V Eifficency is that ', np.mean(V2V_Eifficency_list))
print('Mean of the V2I Eifficency is that ', np.mean(V2I_Eifficency_list))
print('Mean of V2V Security Rate is that ', np.mean(V2V_security_rate_list))
self.save_record("V2V Efficiency: %f \tV2I Efficiency: %f\tSecurity Rate: %f\tCompound Efficiency: %f\tStep : %d\n" % \
(np.mean(V2V_Eifficency_list),np.mean(V2I_Eifficency_list),\
np.mean(V2V_security_rate_list)/self.num_vehicle,\
0.1 * np.mean(V2I_Eifficency_list) + 0.9 * np.mean(V2V_Eifficency_list), self.step))
# print('Test Reward is ', np.mean(test_result))
def q_learning_mini_batch(self):
# Training the DQN model
s_t, s_t_plus_1, action, reward = self.memory.sample()
t = time.time()
if self.double_q: # double Q learning
pred_action = self.q_action.eval({self.s_t: s_t_plus_1})
q_t_plus_1_with_pred_action = self.target_q_with_idx.eval({self.target_s_t: s_t_plus_1,
self.target_q_idx: [[idx, pred_a] for idx, pred_a
in enumerate(pred_action)]})
target_q_t = self.discount * q_t_plus_1_with_pred_action + reward
else:
q_t_plus_1 = self.target_q.eval({self.target_s_t: s_t_plus_1})
max_q_t_plus_1 = np.max(q_t_plus_1, axis=1)
target_q_t = self.discount * max_q_t_plus_1 + reward
_, q_t, loss, w = self.sess.run([self.optim, self.q, self.loss, self.w],
{self.target_q_t: target_q_t, self.action: action, self.s_t: s_t,
self.learning_rate_step: self.step}) # training the network
print('loss is ', loss)
self.total_loss += loss
self.total_q += q_t.mean()
self.update_count += 1
def build_dqn(self):
# --- Building the DQN -------
self.w = {}
self.t_w = {}
initializer = tf.truncated_normal_initializer(0, 0.02)
activation_fn = tf.nn.relu
n_hidden_1 = 500
n_hidden_2 = 250
n_hidden_3 = 120
n_input = 120
n_output = 60
# The DQN network weights and biases
def encoder(x):
weights = {
'encoder_h1': tf.Variable(tf.truncated_normal([n_input, n_hidden_1], stddev=0.1)),
'encoder_h2': tf.Variable(tf.truncated_normal([n_hidden_1, n_hidden_2], stddev=0.1)),
'encoder_h3': tf.Variable(tf.truncated_normal([n_hidden_2, n_hidden_3], stddev=0.1)),
'encoder_h4': tf.Variable(tf.truncated_normal([n_hidden_3, n_output], stddev=0.1)),
'encoder_b1': tf.Variable(tf.truncated_normal([n_hidden_1], stddev=0.1)),
'encoder_b2': tf.Variable(tf.truncated_normal([n_hidden_2], stddev=0.1)),
'encoder_b3': tf.Variable(tf.truncated_normal([n_hidden_3], stddev=0.1)),
'encoder_b4': tf.Variable(tf.truncated_normal([n_output], stddev=0.1)),
}
layer_1 = tf.nn.relu(tf.add(tf.matmul(x, weights['encoder_h1']), weights['encoder_b1']))
layer_2 = tf.nn.relu(tf.add(tf.matmul(layer_1, weights['encoder_h2']), weights['encoder_b2']))
layer_3 = tf.nn.relu(tf.add(tf.matmul(layer_2, weights['encoder_h3']), weights['encoder_b3']))
layer_4 = tf.nn.relu(tf.add(tf.matmul(layer_3, weights['encoder_h4']), weights['encoder_b4']))
return layer_4, weights
# Used for prediction
with tf.variable_scope('prediction'):
self.s_t = tf.placeholder('float32', [None, n_input])
self.q, self.w = encoder(self.s_t)
self.q_action = tf.argmax(self.q, dimension=1)
# Used for get target-Q
with tf.variable_scope('target'):
self.target_s_t = tf.placeholder('float32', [None, n_input])
self.target_q, self.target_w = encoder(self.target_s_t)
self.target_q_idx = tf.placeholder('int32', [None, None], 'output_idx')
self.target_q_with_idx = tf.gather_nd(self.target_q, self.target_q_idx)
# Used for update the target-Q network parameters
with tf.variable_scope('pred_to_target'):
self.t_w_input = {}
self.t_w_assign_op = {}
for name in self.w.keys():
print('name in self w keys', name)
self.t_w_input[name] = tf.placeholder('float32', self.target_w[name].get_shape().as_list(), name=name)
self.t_w_assign_op[name] = self.target_w[name].assign(self.t_w_input[name])
def clipped_error(x):
try:
return tf.select(tf.abs(x) < 1.0, 0.5 * tf.square(x), tf.abs(x) - 0.5)
except:
return tf.where(tf.abs(x) < 1.0, 0.5 * tf.square(x), tf.abs(x) - 0.5)
# Used for Optimizer
with tf.variable_scope('optimizer'):
self.target_q_t = tf.placeholder('float32', None, name='target_q_t')
self.action = tf.placeholder('int32', None, name='action')
action_one_hot = tf.one_hot(self.action, n_output, 1.0, 0.0, name='action_one_hot')
q_acted = tf.reduce_sum(self.q * action_one_hot, reduction_indices=1, name='q_acted')
self.delta = self.target_q_t - q_acted
self.global_step = tf.Variable(0, trainable=False)
self.loss = tf.reduce_mean(tf.square(self.delta), name='loss')
self.learning_rate_step = tf.placeholder('int64', None, name='learning_rate_step')
self.learning_rate_op = tf.maximum(self.learning_rate_minimum,
tf.train.exponential_decay(self.learning_rate, self.learning_rate_step,
self.learning_rate_decay_step,
self.learning_rate_decay, staircase=True))
self.optim = tf.train.RMSPropOptimizer(self.learning_rate_op, momentum=0.95, epsilon=0.01).minimize(
self.loss)
tf.initialize_all_variables().run()
self.update_target_q_network()
def update_target_q_network(self):
for name in self.w.keys():
self.t_w_assign_op[name].eval({self.t_w_input[name]: self.w[name].eval()})
# These two functions are used to save and load weight parameters
def save_weight_to_pkl(self):
if not os.path.exists(self.weight_dir):
os.makedirs(self.weight_dir)
for name in self.w.keys():
save_pkl(self.w[name].eval(), os.path.join(self.weight_dir, "%s.pkl" % name))
def load_weight_from_pkl(self):
with tf.variable_scope('load_pred_from_pkl'):
self.w_input = {}
self.w_assign_op = {}
for name in self.w.keys():
self.w_input[name] = tf.placeholder('float32')
self.w_assign_op[name] = self.w[name].assign(self.w_input[name])
for name in self.w.keys():
self.w_assign_op[name].eval({self.w_input[name]: load_pkl(os.path.join(self.weight_dir, "%s.pkl" % name))})
self.update_target_q_network()
| BandaidZ/OptimizationofSEandEEBasedonDRL | agent.py | agent.py | py | 18,757 | python | en | code | 13 | github-code | 6 | [
{
"api_name": "base.BaseModel",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "time.strftime",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "time.localtime",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_num... |
5272336888 | import gradio as gr
import pytesseract
from langchain import PromptTemplate
from langchain.chains import RetrievalQA
from langchain.chat_models import ChatOpenAI
from langchain.embeddings import OpenAIEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import Chroma
from pdf2image import convert_from_path
# 質問テンプレート
template = """
あなたは親切なアシスタントです。下記の質問に日本語で回答してください。
質問:{question}
回答:
"""
prompt = PromptTemplate(
input_variables=["question"],
template=template,
)
def pdf_to_text_ocr(pdf_file):
images = convert_from_path(pdf_file)
text = ""
for image in images:
text += pytesseract.image_to_string(image, lang="jpn+eng")
return text
def process_input(pdf_file, input_text):
# PDFファイルの読み込み
pdf_text = pdf_to_text_ocr(pdf_file.name)
# テキストの分割
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.create_documents([pdf_text])
# 埋め込みの作成
embeddings = OpenAIEmbeddings()
vectordb = Chroma.from_documents(texts, embeddings)
# RetrievalQAの作成
qa = RetrievalQA.from_chain_type(llm=ChatOpenAI(model_name="gpt-3.5-turbo"), chain_type="stuff",
retriever=vectordb.as_retriever())
# 質問の送信と回答の取得
question = input_text
query = prompt.format(question=question)
response = qa.run(query)
return response
# UIコンポーネントの作成
pdf_upload = gr.inputs.File(type="file", label="PDFファイルをアップロード")
textarea = gr.inputs.Textbox(lines=15, placeholder="GPTの応答がここに表示されます...", label="GPT")
input_box = gr.inputs.Textbox(lines=1, placeholder="ここに質問を入力してください", label="")
iface = gr.Interface(
fn=process_input,
inputs=[pdf_upload, input_box],
outputs=textarea,
layout="vertical",
css=".gr-input {width: 80%;}",
allow_flagging='never'
)
iface.launch()
| motomk/pdf_gpt | main.py | main.py | py | 2,156 | python | ja | code | 0 | github-code | 6 | [
{
"api_name": "langchain.PromptTemplate",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pdf2image.convert_from_path",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "pytesseract.image_to_string",
"line_number": 29,
"usage_type": "call"
},
{
"... |
24143273312 | from selenium.webdriver import Chrome,ChromeOptions
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
import xlsxwriter
opts = ChromeOptions()
opts.add_experimental_option("detach", True)
driver = Chrome(chrome_options=opts)
driver.get("https://google.com")
driver.maximize_window()
searchBox = driver.find_element(By.CLASS_NAME,"gLFyf")
searchBox.send_keys("IBTECH")
searchBox.send_keys(Keys.ENTER)
def purifyExtensions(i, j):
if '.net' in newLinks[i][j]:
newRefs.append(newLinks[i][j].split('.net'))
elif '.io' in newLinks[i][j]:
newRefs.append(newLinks[i][j].split('.io'))
elif '.gov' in newLinks[i][j]:
newRefs.append(newLinks[i][j].split('.gov'))
elif '.org' in newLinks[i][j]:
newRefs.append(newLinks[i][j].split('.org'))
elif '.dev' in newLinks[i][j]:
newRefs.append(newLinks[i][j].split('.dev'))
else:
newRefs.append(newLinks[i][j].split('.com'))
newLinks = []
newRefs = []
for i in range(3):
links = driver.find_elements(By.CLASS_NAME,"yuRUbf")
driver.implicitly_wait(3)
for k in range(1):
for j in range(9):
newLinks.append(links[j].text.split('https://'))
if i == 0:
for j in range(1):
for k in range(9):
purifyExtensions(k,1)
#newRefs.append(newLinks[k][1].split('.com'))
driver.find_element(By.XPATH, '//*[@id="pnnext"]/span[2]').click()
elif i == 1:
for j in range(1):
for k in range(9):
if k ==4:
purifyExtensions(k+9,0)
#newRefs.append(newLinks[k + 9][0].split('.com'))
else:
purifyExtensions(k + 9, 1)
#newRefs.append(newLinks[k + 9][1].split('.com'))
driver.find_element(By.XPATH, '//*[@id="pnnext"]/span[2]').click()
elif i == 2:
for j in range(1):
for k in range(9):
purifyExtensions(k+18,1)
#newRefs.append(newLinks[k+18][1].split('.com'))
driver.close()
workbook = xlsxwriter.Workbook('import_file.xlsx')
worksheet = workbook.add_worksheet()
worksheet.set_column('A:A', len(newLinks))
worksheet.set_column('B:B', len(newLinks))
text1 = 'A{n:.2f}'
text2 = 'B{n:.2f}'
for j in range(len(newLinks)):
for k in range(1):
stringLink = newLinks[j][k]
stringRef = newRefs[j][k]
worksheet.write(text1.format(n = j+1), stringLink)
worksheet.write(text2.format(n = j+1), 'https://' + stringRef + '.com')
workbook.close()
| keremguzel/selenium-excel-import | main.py | main.py | py | 2,595 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "selenium.webdriver.ChromeOptions",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.common.by.By.CLASS_NAME",
"line_number": 15,
"usage_type": "a... |
17034031092 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
import bpy
from bpy.types import Header, Menu, Panel, PropertyGroup
from fd_datablocks import enums, const
import os
from bpy.props import (StringProperty,
BoolProperty,
IntProperty,
FloatProperty,
FloatVectorProperty,
BoolVectorProperty,
PointerProperty,
CollectionProperty,
EnumProperty)
def find_node(material, nodetype):
if material and material.node_tree:
ntree = material.node_tree
for node in ntree.nodes:
if getattr(node, "type", None) == nodetype:
return node
return None
def find_node_input(node, name):
for input in node.inputs:
if input.name == name:
return input
return None
def panel_node_draw(layout, id_data, output_type, input_name):
if not id_data.use_nodes:
layout.operator("cycles.use_shading_nodes", icon='NODETREE')
return False
ntree = id_data.node_tree
node = find_node(id_data, output_type)
if not node:
layout.label(text="No output node")
else:
input = find_node_input(node, input_name)
layout.template_node_view(ntree, node, input)
return True
class PANEL_scenes(Panel):
bl_space_type = "VIEW_3D"
bl_region_type = "TOOLS"
bl_category = "Scenes"
bl_context = "objectmode"
bl_label = " "
bl_options = {'HIDE_HEADER'}
#bl_idname = "mvProject.part_properties"
def draw_header(self,context):
layout = self.layout
row = layout.row(align=True)
row.label("Scenes: ",icon='SCENE_DATA')
@classmethod
def poll(cls, context):
return True
def draw(self, context):
unit = context.scene.unit_settings
scene = context.scene
layout = self.layout
space = context.space_data
col = layout.column(align=True)
box = col.box()
row = box.row(align=True)
row.template_ID(context.screen, "scene", new="fd_scene.create_scene", unlink="scene.delete")
box = col.box()
row = box.row()
row.prop(scene, "camera",text="Active Camera")
row = box.row()
row.label("Main Units:")
row.row().prop(unit, "system", expand=True)
row = box.row()
row.label("Angle Units:")
row.row().prop(unit, "system_rotation", expand=True)
if space.type == 'VIEW_3D' and scene.unit_settings.system == 'NONE':
row = box.row()
row.label("Grid Spacing:")
row.row().prop(space, "grid_scale", expand=True)
box = col.box()
scene.mv.PromptPage.draw_prompt_page(box,scene)
class PANEL_worlds(Panel):
bl_space_type = "VIEW_3D"
bl_region_type = "TOOLS"
bl_category = "Worlds"
bl_context = "objectmode"
bl_label = " "
bl_options = {'HIDE_HEADER'}
#bl_idname = "mvProject.part_properties"
def draw_header(self,context):
layout = self.layout
row = layout.row(align=True)
row.label("World Management: ",icon=const.icon_world)
@classmethod
def poll(cls, context):
return True
def draw(self, context):
scene = context.scene
world = context.scene.world
layout = self.layout
col = layout.column(align=True)
box = col.box()
row = box.row(align=True)
row.template_ID(context.scene, "world", new="world.new")
box = col.box()
if not panel_node_draw(box, world, 'OUTPUT_WORLD', 'Surface'):
box.prop(world, "horizon_color", text="Color")
box = col.box()
world.mv.PromptPage.draw_prompt_page(box,world)
class PANEL_materials(Panel):
bl_space_type = "VIEW_3D"
bl_region_type = "TOOLS"
bl_category = "Materials"
bl_context = "objectmode"
bl_label = " "
bl_options = {'HIDE_HEADER'}
def draw_header(self,context):
layout = self.layout
row = layout.row(align=True)
row.label("Material Management: ",icon=const.icon_material)
@classmethod
def poll(cls, context):
return True
def draw(self, context):
layout = self.layout
box = layout.box()
row = box.row()
row.operator("fd_material.apply_materials_from_pointers",text="Assign Materials",icon=const.icon_material)
row.operator("fd_material.clear_unused_materials_from_file",text="Clear Unused",icon='ZOOMOUT')
row.operator("fd_material.clear_all_materials_from_file",text="Clear All",icon='PANEL_CLOSE')
box.template_list("MATERIAL_UL_matslots", "", bpy.data, "materials", context.scene.mv, "active_material_index", rows=5)
if len(bpy.data.materials) > 0:
box = layout.box()
material = bpy.data.materials[context.scene.mv.active_material_index]
material.mv.draw_properties(box,material)
class PANEL_libraries(Panel):
bl_space_type = "VIEW_3D"
bl_region_type = "TOOLS"
bl_category = "Libraries"
bl_context = "objectmode"
bl_label = " "
bl_options = {'HIDE_HEADER'}
def draw_header(self,context):
layout = self.layout
row = layout.row(align=True)
row.label("Library Management: ",icon=const.icon_library)
@classmethod
def poll(cls, context):
return True
def draw(self, context):
dm = context.scene.mv.dm
layout = self.layout
col = layout.column(align=True)
box = col.box()
if os.path.exists(dm.Libraries.path):
Libraries = context.scene.mv.dm.Libraries
row = box.row(align=True)
row.prop(dm.Libraries,"path",text="",icon='FILE_TICK')
# box = col.box()
# row = box.row(align=True)
# Libraries.draw_active_pointer_library_menus(row)
else:
row = box.row(align=True)
row.prop(dm.Libraries,"path",text="",icon='ERROR')
dm.Specgroups.draw_spec_groups(box)
#------REGISTER
classes = [
PANEL_scenes,
PANEL_worlds,
PANEL_materials,
PANEL_libraries
]
def register():
for c in classes:
bpy.utils.register_class(c)
def unregister():
for c in classes:
bpy.utils.unregister_class(c)
if __name__ == "__main__":
register()
| satishgoda/fluid-designer-scripts | scripts/startup/fluid_ui/space_fluid_view3d_tools.py | space_fluid_view3d_tools.py | py | 7,224 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "bpy.types.Panel",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "bpy.types.Panel",
"line_number": 113,
"usage_type": "name"
},
{
"api_name": "fd_datablocks.const.icon_world",
"line_number": 125,
"usage_type": "attribute"
},
{
"api_name": ... |
34373278865 | import os
from unittest import TestCase
import jinja2
from apply.issue.issure_js_auto_code.db_util import res_to_dict
from config.db_conf import localhost_oa_engine
from util.str_util import to_lower_camel, to_snake, to_upper_camel
class Form:
@staticmethod
def get_tables(db):
sql = "select TABLE_NAME from INFORMATION_SCHEMA.TABLES where TABLE_SCHEMA = %(db)s"
res = localhost_oa_engine.execute(sql, {"db": db})
return res_to_dict(res)
@staticmethod
def get_table_info(table_name, db=None):
# 表名,表注释
# 字段名,字段类型,字段注释, 枚举值
# json数据 -- class数据-- 类数据
sql = """SELECT COL.COLUMN_NAME, COL.COLUMN_TYPE, COL.COLUMN_COMMENT, COL.DATA_TYPE
FROM INFORMATION_SCHEMA.COLUMNS COL
Where COL.table_schema = %(db)s AND COL.TABLE_NAME = %(table_name)s"""
args = {"db": db, "table_name": table_name}
res = localhost_oa_engine.execute(sql, args)
data = res_to_dict(res)
for item in data:
item["COLUMN_NAME"] = to_lower_camel(item["COLUMN_NAME"])
return data
@staticmethod
def to_file(data_dic, path, resource_dir, template_file):
template_loader = jinja2.FileSystemLoader(searchpath=resource_dir)
template_env = jinja2.Environment(loader=template_loader)
template = template_env.get_template(template_file)
output_text = template.render(data_dic)
with open(path, "w", encoding="utf-8") as f:
f.write(output_text)
class TestAutoCode(TestCase):
def test_run(self):
table = "organization"
rows = Form.get_table_info(table, "oa")
data = {
"list": rows
}
Form.to_file(data, f"{table}.vue", os.path.dirname(__file__), "templates/vue.template")
def test_run_vue_js(self):
tables = Form.get_tables("oa")
for table in tables:
table_name = table["TABLE_NAME"]
table_upper_caml = to_upper_camel(table_name)
rows = Form.get_table_info(table_name, "oa")
data = {
"list": rows,
"tableUpperCaml": table_upper_caml,
"tableConst": to_snake(table_name).upper(),
}
Form.to_file(data, f"tmp/{table_upper_caml}.vue", os.path.dirname(__file__), "templates/vue.template")
def test_run_index_js(self):
tables = Form.get_tables("oa")
table_infos = []
for table in tables:
table_name = table["TABLE_NAME"]
table_upper_caml = to_upper_camel(table_name)
table_infos.append({
"tableUpperCaml": table_upper_caml,
"tableLowerCaml": to_lower_camel(table_name),
"tableConst": to_snake(table_name).upper(),
})
data = {
"list": table_infos
}
Form.to_file(data, "tmp/routes.js", os.path.dirname(__file__), "templates/routes.template")
def test_run_config_js(self):
tables = Form.get_tables("oa")
table_infos = []
for table in tables:
table_name = table["TABLE_NAME"]
table_infos.append({
"tableLowerCaml": to_lower_camel(table_name),
"tableConst": to_snake(table_name).upper(),
})
data = {
"list": table_infos
}
Form.to_file(data, "tmp/config.js", os.path.dirname(__file__), "templates/config.template")
| QQ1134614268/PythonTemplate | src/apply/issue/issure_js_auto_code/js_auto_code_v0.py | js_auto_code_v0.py | py | 3,508 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "config.db_conf.localhost_oa_engine.execute",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "config.db_conf.localhost_oa_engine",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "apply.issue.issure_js_auto_code.db_util.res_to_dict",
"line_num... |
32585270834 | import cv2
import numpy as np
from .base import BaseTask
class BlurAndPHash(BaseTask):
def __init__(self):
super().__init__(taskID=4, taskName='BlurAndPHash')
self.thresholdLaplacian = 120
self.thresholdDiffStop = 120
self.thresholdDiffPre = 25
self.hashLen = 32
self.preStopPHash = None
self.prePHash = None
self.n = 0
def exec(self, inputData):
frame, isLastFrame = inputData
if isLastFrame:
return None, isLastFrame
currPHash = self.getPHash(frame)
if currPHash is None:
return None
if self.preStopPHash is None:
self.preStopPHash = currPHash
self.prePHash = currPHash
return frame, isLastFrame
diffStop = self.hamDistance(self.preStopPHash, currPHash)
diffPre = self.hamDistance(self.prePHash, currPHash)
self.prePHash = currPHash
if diffStop >= self.thresholdDiffStop \
or diffPre <= self.thresholdDiffPre:
return None
self.n += 1
if self.n <= 3:
return None
self.n = 0
self.preStopPHash = currPHash
return frame, isLastFrame
def getPHash(self, img):
pHash = None
laplacian = cv2.Laplacian(img, cv2.CV_64F).var()
if laplacian <= self.thresholdLaplacian:
return pHash
imgGray = cv2.resize(
cv2.cvtColor(img, cv2.COLOR_RGB2GRAY),
(self.hashLen, self.hashLen),
cv2.INTER_AREA)
height, width = imgGray.shape[:2]
matrixOriginal = np.zeros(
(height, width),
np.float32)
matrixOriginal[:height, :width] = imgGray
matrix = cv2.dct(cv2.dct(matrixOriginal))
matrix.resize(self.hashLen, self.hashLen)
matrixFlatten = matrix.flatten()
medianValue = sum(matrixFlatten) * 1. / len(matrixFlatten)
pHash = 0
for i in matrixFlatten:
pHash <<= 1
if i >= medianValue:
pHash += 1
return pHash
@staticmethod
def hamDistance(x, y):
tmp = x ^ y
distance = 0
while tmp > 0:
distance += tmp & 1
tmp >>= 1
return distance
| Cloudslab/FogBus2 | containers/taskExecutor/sources/utils/taskExecutor/tasks/blurAndPHash.py | blurAndPHash.py | py | 2,292 | python | en | code | 17 | github-code | 6 | [
{
"api_name": "base.BaseTask",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "cv2.Laplacian",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "cv2.CV_64F",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "cv2.resize",
"line_num... |
58242642 | try:
from zohocrmsdk.src.com.zoho.crm.api.exception import SDKException
from zohocrmsdk.src.com.zoho.crm.api.util import Constants
except Exception:
from ..exception import SDKException
from ..util import Constants
class Backup(object):
def __init__(self):
"""Creates an instance of Backup"""
self.__rrule = None
self.__id = None
self.__start_date = None
self.__scheduled_date = None
self.__status = None
self.__requester = None
self.__key_modified = dict()
def get_rrule(self):
"""
The method to get the rrule
Returns:
string: A string representing the rrule
"""
return self.__rrule
def set_rrule(self, rrule):
"""
The method to set the value to rrule
Parameters:
rrule (string) : A string representing the rrule
"""
if rrule is not None and not isinstance(rrule, str):
raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: rrule EXPECTED TYPE: str', None, None)
self.__rrule = rrule
self.__key_modified['rrule'] = 1
def get_id(self):
"""
The method to get the id
Returns:
int: An int representing the id
"""
return self.__id
def set_id(self, id):
"""
The method to set the value to id
Parameters:
id (int) : An int representing the id
"""
if id is not None and not isinstance(id, int):
raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: id EXPECTED TYPE: int', None, None)
self.__id = id
self.__key_modified['id'] = 1
def get_start_date(self):
"""
The method to get the start_date
Returns:
datetime: An instance of datetime
"""
return self.__start_date
def set_start_date(self, start_date):
"""
The method to set the value to start_date
Parameters:
start_date (datetime) : An instance of datetime
"""
from datetime import datetime
if start_date is not None and not isinstance(start_date, datetime):
raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: start_date EXPECTED TYPE: datetime', None, None)
self.__start_date = start_date
self.__key_modified['start_date'] = 1
def get_scheduled_date(self):
"""
The method to get the scheduled_date
Returns:
datetime: An instance of datetime
"""
return self.__scheduled_date
def set_scheduled_date(self, scheduled_date):
"""
The method to set the value to scheduled_date
Parameters:
scheduled_date (datetime) : An instance of datetime
"""
from datetime import datetime
if scheduled_date is not None and not isinstance(scheduled_date, datetime):
raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: scheduled_date EXPECTED TYPE: datetime', None, None)
self.__scheduled_date = scheduled_date
self.__key_modified['scheduled_date'] = 1
def get_status(self):
"""
The method to get the status
Returns:
string: A string representing the status
"""
return self.__status
def set_status(self, status):
"""
The method to set the value to status
Parameters:
status (string) : A string representing the status
"""
if status is not None and not isinstance(status, str):
raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: status EXPECTED TYPE: str', None, None)
self.__status = status
self.__key_modified['status'] = 1
def get_requester(self):
"""
The method to get the requester
Returns:
Requester: An instance of Requester
"""
return self.__requester
def set_requester(self, requester):
"""
The method to set the value to requester
Parameters:
requester (Requester) : An instance of Requester
"""
try:
from zohocrmsdk.src.com.zoho.crm.api.backup.requester import Requester
except Exception:
from .requester import Requester
if requester is not None and not isinstance(requester, Requester):
raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: requester EXPECTED TYPE: Requester', None, None)
self.__requester = requester
self.__key_modified['requester'] = 1
def is_key_modified(self, key):
"""
The method to check if the user has modified the given key
Parameters:
key (string) : A string representing the key
Returns:
int: An int representing the modification
"""
if key is not None and not isinstance(key, str):
raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: key EXPECTED TYPE: str', None, None)
if key in self.__key_modified:
return self.__key_modified.get(key)
return None
def set_key_modified(self, key, modification):
"""
The method to mark the given key as modified
Parameters:
key (string) : A string representing the key
modification (int) : An int representing the modification
"""
if key is not None and not isinstance(key, str):
raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: key EXPECTED TYPE: str', None, None)
if modification is not None and not isinstance(modification, int):
raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: modification EXPECTED TYPE: int', None, None)
self.__key_modified[key] = modification
| zoho/zohocrm-python-sdk-5.0 | zohocrmsdk/src/com/zoho/crm/api/backup/backup.py | backup.py | py | 4,949 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "exception.SDKException",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "util.Constants.DATA_TYPE_ERROR",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "util.Constants",
"line_number": 40,
"usage_type": "name"
},
{
"api_nam... |
3831338977 | # encoding=utf-8
import logging
import logging.config
import os
import sys
import time
import traceback
import datetime
def init_log(name='root'):
path = os.path.dirname(__file__)
config_file = path + os.sep + 'logger.conf'
log_path = os.path.join(os.path.abspath(__file__ + ('/..' * 3)), 'zz_logs')
if not os.path.exists(log_path):
os.makedirs(log_path)
log_path = os.path.join(log_path, str(datetime.datetime.now().date()) + '.log')
if os.path.isfile(config_file) is False:
raise Exception("Config file {} not found".format(config_file))
datalines = list()
with open(config_file, 'r') as f:
for data in f.readlines():
if '$path' in data:
data = data.replace('$path', log_path)
datalines.append(data)
f = open(config_file + '_bak', 'w')
f.writelines(datalines)
f.close()
del datalines
logging.config.fileConfig(config_file + '_bak')
# os.remove(config_file + '_bak')
return logging.getLogger(name)
# decorator print log
def addlog(name=''):
begin = time.time()
def _addlog(func):
def wapper(*args, **kwargs):
data = None
begin1 = time.time()
try:
s = traceback.extract_stack()
file = s[-2][0]
__project_name = os.path.abspath(__file__ + ('/..' * 3))
file_name = file[file.find(__project_name) + len(__project_name) + 1:file.rfind(r'.')]
func_descrip = (file_name + '.' + func.__name__) if name == '' else name
log.info('Start Execute:%s ...' % func_descrip)
data = func(*args, **kwargs)
inner_secs = time.time() - begin1
log.info('Complete:%s , Time Consume: %s, Total Time: %s ' % (func_descrip,
time_str(inner_secs), time_str(time.time() - begin)))
except Exception as e:
# traceback.print_exc()
log.exception('Failure Calling Time Consume:%s, Total Time:%s, Err Message:%s', time_str(time.time() - begin1),
time_str(time.time() - begin), e)
# traceback.print_exc(file=open(log_file, 'a'))
sys.exit(0)
return data
return wapper
return _addlog
def time_str(second):
return ('%.2f sec' % second) if second < 60 else ('%.2f min' % (second / 60.0))
log = init_log()
# example
@addlog()
def log_test1():
time.sleep(1)
@addlog(name='test2')
def log_test2():
time.sleep(1)
log_test1()
time.sleep(2)
raise ValueError('A very specific bad thing happened.')
if __name__ == "__main__":
col = 'aaaa'
missing_rate = 0.26587
log.info('%s has missing rate as %f' % (col, missing_rate))
# log_test2()
#
# __project_name = os.path.abspath(__file__ + ('/..' * 3))
# print(__project_name)
log.debug('debug')
log.info('test - debug')
log.warning('warining')
| charliedream1/ai_quant_trade | tools/log/log_util.py | log_util.py | py | 3,053 | python | en | code | 710 | github-code | 6 | [
{
"api_name": "os.path.dirname",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "os.sep",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_num... |
45413329386 | import os
import pathlib
import pandas as pd
import keyring
import dropbox
from dropbox.exceptions import AuthError
# Directory
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
dropbox_home = "https://www.dropbox.com/home/"
dropbox_app = "MAD_WahooToGarmin"
dropbox_app_dir = "/Apps/WahooFitness/"
DROPBOX_ACCESS_TOKEN = keyring.get_password("dropbox", dropbox_app)
# https://practicaldatascience.co.uk/data-science/how-to-use-the-dropbox-api-with-python
def dropbox_connect():
"""Create a connection to Dropbox."""
try:
dbx = dropbox.Dropbox(DROPBOX_ACCESS_TOKEN)
except AuthError as e:
print("Error connecting to Dropbox with access token: " + str(e))
return dbx
def dropbox_list_files(path):
"""Return a Pandas dataframe of files in a given Dropbox folder path in the Apps directory.
"""
dbx = dropbox_connect()
try:
files = dbx.files_list_folder(path).entries
files_list = []
for file in files:
if isinstance(file, dropbox.files.FileMetadata):
metadata = {
"filename": file.name,
"path_display": file.path_display,
"client_modified": pd.Timestamp(file.client_modified).isoformat(),
"server_modified": pd.Timestamp(file.server_modified).isoformat(),
}
files_list.append(metadata)
df = pd.DataFrame.from_records(files_list)
return df.sort_values(by="server_modified", ascending=False)
except Exception as e:
print("Error getting list of files from Dropbox: " + str(e))
def dropbox_download_file(dropbox_file_path, local_file_path):
"""Download a file from Dropbox to the local machine."""
try:
dbx = dropbox_connect()
with open(local_file_path, "wb") as f:
metadata, result = dbx.files_download(path=dropbox_file_path)
f.write(result.content)
except Exception as e:
print("Error downloading file from Dropbox: " + str(e))
def dropbox_upload_file(local_path, local_file, dropbox_file_path):
"""Upload a file from the local machine to a path in the Dropbox app directory.
Args:
local_path (str): The path to the local file.
local_file (str): The name of the local file.
dropbox_file_path (str): The path to the file in the Dropbox app directory.
Example:
dropbox_upload_file('.', 'test.csv', '/stuff/test.csv')
Returns:
meta: The Dropbox file metadata.
"""
try:
dbx = dropbox_connect()
local_file_path = pathlib.Path(local_path) / local_file
with local_file_path.open("rb") as f:
meta = dbx.files_upload(
f.read(), dropbox_file_path, mode=dropbox.files.WriteMode("overwrite")
)
return meta
except Exception as e:
print("Error uploading file to Dropbox: " + str(e))
if __name__ == "__main__":
print("here")
| michaeladavis10/WahooToGarmin | dropbox_utils.py | dropbox_utils.py | py | 2,994 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.path.dirname",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "keyring.get_password",
... |
29685980647 | import re
import pep8
import six
"""
Guidelines for writing new hacking checks
- Use only for Octavia specific tests. OpenStack general tests
should be submitted to the common 'hacking' module.
- Pick numbers in the range O3xx. Find the current test with
the highest allocated number and then pick the next value.
- Keep the test method code in the source file ordered based
on the O3xx value.
- List the new rule in the top level HACKING.rst file
- Add test cases for each new rule to
octavia/tests/unit/test_hacking.py
"""
log_translation = re.compile(
r"(.)*LOG\.(audit|error|info|warn|warning|critical|exception)\(\s*('|\")")
author_tag_re = (re.compile("^\s*#\s*@?(a|A)uthor"),
re.compile("^\.\.\s+moduleauthor::"))
_all_hints = set(['_', '_LI', '_LE', '_LW', '_LC'])
_all_log_levels = {
# NOTE(yamamoto): Following nova which uses _() for audit.
'audit': '_',
'error': '_LE',
'info': '_LI',
'warn': '_LW',
'warning': '_LW',
'critical': '_LC',
'exception': '_LE',
}
log_translation_hints = []
for level, hint in six.iteritems(_all_log_levels):
r = "(.)*LOG\.%(level)s\(\s*((%(wrong_hints)s)\(|'|\")" % {
'level': level,
'wrong_hints': '|'.join(_all_hints - set([hint])),
}
log_translation_hints.append(re.compile(r))
assert_trueinst_re = re.compile(
r"(.)*assertTrue\(isinstance\((\w|\.|\'|\"|\[|\])+, "
"(\w|\.|\'|\"|\[|\])+\)\)")
assert_equal_in_end_with_true_or_false_re = re.compile(
r"assertEqual\((\w|[][.'\"])+ in (\w|[][.'\", ])+, (True|False)\)")
assert_equal_in_start_with_true_or_false_re = re.compile(
r"assertEqual\((True|False), (\w|[][.'\"])+ in (\w|[][.'\", ])+\)")
assert_equal_with_true_re = re.compile(
r"assertEqual\(True,")
assert_equal_with_false_re = re.compile(
r"assertEqual\(False,")
mutable_default_args = re.compile(r"^\s*def .+\((.+=\{\}|.+=\[\])")
assert_equal_end_with_none_re = re.compile(r"(.)*assertEqual\(.+, None\)")
assert_equal_start_with_none_re = re.compile(r".*assertEqual\(None, .+\)")
assert_not_equal_end_with_none_re = re.compile(
r"(.)*assertNotEqual\(.+, None\)")
assert_not_equal_start_with_none_re = re.compile(
r"(.)*assertNotEqual\(None, .+\)")
assert_no_xrange_re = re.compile(
r"\s*xrange\s*\(")
def _directory_to_check_translation(filename):
return True
def assert_true_instance(logical_line):
"""Check for assertTrue(isinstance(a, b)) sentences
O316
"""
if assert_trueinst_re.match(logical_line):
yield (0, "O316: assertTrue(isinstance(a, b)) sentences not allowed")
def assert_equal_or_not_none(logical_line):
"""Check for assertEqual(A, None) or assertEqual(None, A) sentences,
assertNotEqual(A, None) or assertNotEqual(None, A) sentences
O318
"""
msg = ("O318: assertEqual/assertNotEqual(A, None) or "
"assertEqual/assertNotEqual(None, A) sentences not allowed")
res = (assert_equal_start_with_none_re.match(logical_line) or
assert_equal_end_with_none_re.match(logical_line) or
assert_not_equal_start_with_none_re.match(logical_line) or
assert_not_equal_end_with_none_re.match(logical_line))
if res:
yield (0, msg)
def no_translate_debug_logs(logical_line, filename):
"""Check for 'LOG.debug(_('
As per our translation policy,
https://wiki.openstack.org/wiki/LoggingStandards#Log_Translation
we shouldn't translate debug level logs.
* This check assumes that 'LOG' is a logger.
O319
"""
if _directory_to_check_translation(filename) and logical_line.startswith(
"LOG.debug(_("):
yield(0, "O319 Don't translate debug level logs")
def validate_log_translations(logical_line, physical_line, filename):
# Translations are not required in the test directory
if "octavia/tests" in filename:
return
if pep8.noqa(physical_line):
return
msg = "O320: Log messages require translations!"
if log_translation.match(logical_line):
yield (0, msg)
if _directory_to_check_translation(filename):
msg = "O320: Log messages require translation hints!"
for log_translation_hint in log_translation_hints:
if log_translation_hint.match(logical_line):
yield (0, msg)
def use_jsonutils(logical_line, filename):
msg = "O321: jsonutils.%(fun)s must be used instead of json.%(fun)s"
# Some files in the tree are not meant to be run from inside Octavia
# itself, so we should not complain about them not using jsonutils
json_check_skipped_patterns = [
]
for pattern in json_check_skipped_patterns:
if pattern in filename:
return
if "json." in logical_line:
json_funcs = ['dumps(', 'dump(', 'loads(', 'load(']
for f in json_funcs:
pos = logical_line.find('json.%s' % f)
if pos != -1:
yield (pos, msg % {'fun': f[:-1]})
def no_author_tags(physical_line):
for regex in author_tag_re:
if regex.match(physical_line):
physical_line = physical_line.lower()
pos = physical_line.find('moduleauthor')
if pos < 0:
pos = physical_line.find('author')
return pos, "O322: Don't use author tags"
def assert_equal_true_or_false(logical_line):
"""Check for assertEqual(True, A) or assertEqual(False, A) sentences
O323
"""
res = (assert_equal_with_true_re.search(logical_line) or
assert_equal_with_false_re.search(logical_line))
if res:
yield (0, "O323: assertEqual(True, A) or assertEqual(False, A) "
"sentences not allowed")
def no_mutable_default_args(logical_line):
msg = "O324: Method's default argument shouldn't be mutable!"
if mutable_default_args.match(logical_line):
yield (0, msg)
def assert_equal_in(logical_line):
"""Check for assertEqual(A in B, True), assertEqual(True, A in B),
assertEqual(A in B, False) or assertEqual(False, A in B) sentences
O338
"""
res = (assert_equal_in_start_with_true_or_false_re.search(logical_line) or
assert_equal_in_end_with_true_or_false_re.search(logical_line))
if res:
yield (0, "O338: Use assertIn/NotIn(A, B) rather than "
"assertEqual(A in B, True/False) when checking collection "
"contents.")
def no_log_warn(logical_line):
"""Disallow 'LOG.warn('
O339
"""
if logical_line.startswith('LOG.warn('):
yield(0, "O339:Use LOG.warning() rather than LOG.warn()")
def no_xrange(logical_line):
"""Disallow 'xrange()'
O340
"""
if assert_no_xrange_re.match(logical_line):
yield(0, "O340: Do not use xrange().")
def factory(register):
register(assert_true_instance)
register(assert_equal_or_not_none)
register(no_translate_debug_logs)
register(validate_log_translations)
register(use_jsonutils)
register(no_author_tags)
register(assert_equal_true_or_false)
register(no_mutable_default_args)
register(assert_equal_in)
register(no_log_warn)
register(no_xrange)
| BeaconFramework/Distributor | octavia/hacking/checks.py | checks.py | py | 7,161 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "re.compile",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "six.iteritems",
"line_number": 3... |
42399945606 | """empty message
Revision ID: a5cfe890710d
Revises: 7352c721e0a4
Create Date: 2023-05-28 16:47:42.177222
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'a5cfe890710d'
down_revision = '7352c721e0a4'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('images', schema=None) as batch_op:
batch_op.drop_column('url_small')
batch_op.drop_column('url_full')
batch_op.drop_column('url_regular')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('images', schema=None) as batch_op:
batch_op.add_column(sa.Column('url_regular', sa.VARCHAR(length=500), autoincrement=False, nullable=True))
batch_op.add_column(sa.Column('url_full', sa.VARCHAR(length=500), autoincrement=False, nullable=True))
batch_op.add_column(sa.Column('url_small', sa.VARCHAR(length=500), autoincrement=False, nullable=True))
# ### end Alembic commands ### | RBird111/capstone-yelp-clone | migrations/versions/20230528_164742_.py | 20230528_164742_.py | py | 1,132 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "alembic.op.batch_alter_table",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "alembic.op.batch_alter_table",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "... |
32166211761 | import requests
from bs4 import BeautifulSoup
import json
def get_description(url):
response = requests.get(url)
if response is not None:
soup = BeautifulSoup(response.text, 'html.parser')
description = {}
l1 = []
l2 = []
for item in soup.find_all("span", class_="adPage__content__features__key"):
if item is not None:
l1.append(item.text)
for item in soup.find_all("span", class_="adPage__content__features__value"):
if item is not None:
l2.append(item.text)
for index in range(len(l2)):
description[l1[index]] = l2[index]
extra = []
for index in range(len(l2)+1,len(l1)):
extra.append(index)
description['Extra_features'] = extra
if soup.find('h1') is not None:
car_model = soup.find('h1').text
des = soup.find('div', class_=("adPage__content__description grid_18"))
if des is not None:
description["description"] = des.text
print(description)
desc = {}
desc[car_model] = description
file_name = "description"
with open(file_name,"a") as json_file:
json.dump(desc, json_file)
| Drkiller325/PR_Lab2 | homework.py | homework.py | py | 1,157 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "requests.get",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 45,
"usage_type": "call"
}
] |
7437025622 | """Module containing class `UntagClipsCommand`."""
import logging
import random
import time
from django.db import transaction
from vesper.command.clip_set_command import ClipSetCommand
from vesper.django.app.models import Job, Tag, TagEdit, TagInfo
import vesper.command.command_utils as command_utils
import vesper.django.app.model_utils as model_utils
import vesper.util.archive_lock as archive_lock
import vesper.util.text_utils as text_utils
import vesper.util.time_utils as time_utils
_logger = logging.getLogger()
class TagClipsCommand(ClipSetCommand):
extension_name = 'tag_clips'
def __init__(self, args):
super().__init__(args, True)
get_opt = command_utils.get_optional_arg
self._clip_count = get_opt('clip_count', args)
def execute(self, job_info):
self._job_info = job_info
clip_indices = self._get_tag_clip_indices()
self._tag_clips(clip_indices)
return True
def _get_tag_clip_indices(self):
if self._clip_count is None:
# tag all clips
return None
clip_count = self._count_clips()
if clip_count <= self._clip_count:
# tag all clips
return None
# If we get here, a clip count is specified and it is less than
# the number of untagged clips.
_logger.info('Getting indices of clips to tag...')
indices = random.sample(range(clip_count), self._clip_count)
return frozenset(indices)
def _count_clips(self):
value_tuples = self._create_clip_query_values_iterator()
count = 0
for station, mic_output, date, detector in value_tuples:
clips = model_utils.get_clips(
station=station,
mic_output=mic_output,
date=date,
detector=detector,
annotation_name=self._annotation_name,
annotation_value=self._annotation_value,
tag_name=self._tag_name,
tag_excluded=True,
order=False)
count += clips.count()
return count
def _tag_clips(self, clip_indices):
start_time = time.time()
value_tuples = self._create_clip_query_values_iterator()
clip_index = 0
total_clip_count = 0
total_tagged_count = 0
for station, mic_output, date, detector in value_tuples:
# Get clip for this station, mic_output, date, and detector.
clips = model_utils.get_clips(
station=station,
mic_output=mic_output,
date=date,
detector=detector,
annotation_name=self._annotation_name,
annotation_value=self._annotation_value,
tag_name=self._tag_name,
tag_excluded=True,
order=False)
# Get list of clip IDs.
clip_ids = clips.values_list('pk', flat=True)
# Get IDs of clips to tag.
tag_clip_ids = \
self._get_tag_clip_ids(clip_ids, clip_index, clip_indices)
clip_count = len(clip_ids)
tagged_count = len(tag_clip_ids)
clip_index += clip_count
# Tag clips.
try:
self._tag_clip_batch(tag_clip_ids)
except Exception as e:
batch_text = \
_get_batch_text(station, mic_output, date, detector)
command_utils.log_and_reraise_fatal_exception(
e, f'Tagging of clips for {batch_text}')
# Log clip counts.
if tagged_count == clip_count:
prefix = 'Tagged'
else:
untagged_count = clip_count - tagged_count
prefix = (
f'Tagged {tagged_count} and left untagged '
f'{untagged_count} of')
count_text = text_utils.create_count_text(clip_count, 'clip')
batch_text = _get_batch_text(station, mic_output, date, detector)
_logger.info(f'{prefix} {count_text} for {batch_text}.')
total_clip_count += clip_count
total_tagged_count += tagged_count
# Log total clip counts and tagging rate.
if total_tagged_count == total_clip_count:
prefix = 'Tagged'
else:
total_untagged_count = total_clip_count - total_tagged_count
prefix = (
f'Tagged {total_tagged_count} and left untagged '
f'{total_untagged_count} of')
count_text = text_utils.create_count_text(total_clip_count, 'clip')
elapsed_time = time.time() - start_time
timing_text = command_utils.get_timing_text(
elapsed_time, total_clip_count, 'clips')
_logger.info(f'{prefix} a total of {count_text}{timing_text}.')
def _get_tag_clip_ids(self, clip_ids, start_clip_index, clip_indices):
if clip_indices is None:
# tagging all clips
return clip_ids
else:
# not tagging all clips
clip_index = start_clip_index
tag_clip_ids = []
for clip_id in clip_ids:
if clip_index in clip_indices:
tag_clip_ids.append(clip_id)
clip_index += 1
return tag_clip_ids
def _tag_clip_batch(self, clip_ids):
with archive_lock.atomic():
with transaction.atomic():
# See note in untag_clips_command.py about maximum
# chunk size. I'm not certain we have to do the same
# thing here, but it seems likely that we do, for a
# similar reason.
max_chunk_size = 900
tag_info = TagInfo.objects.get(name=self._tag_name)
action = TagEdit.ACTION_SET
creation_time = time_utils.get_utc_now()
creating_job = Job.objects.get(id=self._job_info.job_id)
for i in range(0, len(clip_ids), max_chunk_size):
chunk = clip_ids[i:i + max_chunk_size]
# Create tags.
Tag.objects.bulk_create([
Tag(
clip_id=clip_id,
info=tag_info,
creation_time=creation_time,
creating_user=None,
creating_job=creating_job,
creating_processor=None)
for clip_id in chunk])
# Create tag edits.
TagEdit.objects.bulk_create([
TagEdit(
clip_id=clip_id,
info=tag_info,
action=action,
creation_time=creation_time,
creating_user=None,
creating_job=creating_job,
creating_processor=None)
for clip_id in chunk])
def _get_batch_text(station, mic_output, date, detector):
return (
f'station "{station.name}", mic output "{mic_output.name}", '
f'date {date}, and detector "{detector.name}"')
| HaroldMills/Vesper | vesper/command/tag_clips_command.py | tag_clips_command.py | py | 7,835 | python | en | code | 47 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "vesper.command.clip_set_command.ClipSetCommand",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "vesper.command.command_utils.get_optional_arg",
"line_number": 32,
"usag... |
38368937564 | import string, itertools
ascii_lowercases = list(string.ascii_lowercase)
MAX_WORD_LENGTH = 5
for i in range(1, MAX_WORD_LENGTH + 1):
charlist = [[x for x in ascii_lowercases]] * i
for combinations in itertools.product(*charlist):
combinations = "".join(combinations)
with open("../wordlist.txt", "a") as file:
file.write(combinations + "\n")
print("finished!", i) | 1LCB/hash-cracker | complement/wordlist generator.py | wordlist generator.py | py | 407 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "string.ascii_lowercase",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "itertools.product",
"line_number": 10,
"usage_type": "call"
}
] |
30804267516 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
if __name__ == '__main__':
# 将csv数据读取为pandas对象
fund = pd.read_csv('./csv/001112.csv', dtype={'fcode': str})
# 转化时间字符串为时间
fund['fdate'] = pd.to_datetime(fund['fdate'])
# 设置时间列为索引,并升序排列
fund = fund.set_index('fdate').sort_index(ascending=False)
# x轴为数据2017后的索引 y为2017后的NAV
x = fund.loc['2017'].index
y = fund.loc['2017']['NAV']
# 将xy数据转化为矩阵:
# 将时间转化为时间戳
x = x.astype(np.int64)
# 将时间戳转化为1列多行的二维数组
x = x.values.reshape(-1, 1)
y = y.values.reshape(-1, 1)
# 放入数据开始训练
lr = LinearRegression()
lr.fit(x, y)
# 构建一个时间戳,来预测y轴的值
test_x = pd.to_datetime(np.array(['2017-9-30', '2017-10-1'])).astype(np.int64).values.reshape(-1, 1)
# 预测到Y轴的值 [[1.41483561]
# [1.41626252]]
new_y = lr.predict(test_x)
# 把拟合线画出来:如果y为训练预测出的值,则线条为直线拟合线
x_date = fund.loc['2017'].index
# 走势点图
plt.scatter(x_date, fund.loc['2017']['NAV'])
plt.plot(x_date, lr.predict(x), 'r')
plt.show()
print(new_y)
| bobchi/learn_py | 23.py | 23.py | py | 1,416 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pandas.read_csv",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pandas.to_datetime",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.int64",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "sklearn.linear_mod... |
43447079150 |
import sys, re
from argparse import ArgumentParser #import the library
parser = ArgumentParser(description = 'Classify a sequence as DNA or RNA') #create one ArgumentParser
parser.add_argument("-s", "--seq", type = str, required = True, help = "Input sequence") #add the first argument
parser.add_argument("-m", "--motif", type = str, required = False, help = "Motif") #add the second argument
if len(sys.argv) == 1: #print the help message only if no arguments are supplied on the command line
parser.print_help()
sys.exit(1)
args = parser.parse_args() #parser the argument
args.seq = args.seq.upper() #convert the sequence in upper case
if 'U' in args.seq and 'T' in args.seq: #if it finds U and T in the sequence return a message that the sequence have a mutagenic bases
print ('The sequence have a mutagenic bases') # if it finds this condition it does not execute the others command line
sys.exit ()
if re.search('^[ACGTU]+$', args.seq): #search in the sequence the pattern within the string
if re.search('T', args.seq): #if it finds T in the sequence return a message that the sequence is DNA
print ('The sequence is DNA')
elif re.search('U', args.seq):
print ('The sequence is RNA') #if it finds U in the sequence return a message that the sequence is RNA
else:
print ('The sequence can be DNA or RNA') #if it finds T and U in the sequence return a message that the sequence can be DNA or RNA
else:
print ('The sequence is not DNA') #else the sequence is not DNA
if args.motif:
args.motif = args.motif.upper() #converte the motif in upper case
print(f'Motif search enabled: looking for motif in sequence') #to find simple motifs in the sequence, besides printing the type of molecule (DNA or RNA)
if re.search(args.motif, args.seq):
print("FOUND")
else:
print("NOT FOUND")
| stepsnap/git_HandsOn | seqClass.py | seqClass.py | py | 1,881 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_num... |
23682024390 | import datetime
def rest_sec_of_day():
"""
:return: 截止到目前当日剩余时间
"""
today = datetime.datetime.strptime(str(datetime.date.today()), "%Y-%m-%d")
tomorrow = today + datetime.timedelta(days=1)
nowTime = datetime.datetime.now()
return (tomorrow - nowTime).seconds # 获取秒
| peacefulyin/gh | BackEnd/util/common.py | common.py | py | 339 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "datetime.datetime.strptime",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "datetime.date.today",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "da... |
8670124375 | import pandas as pd
import pickle
df=pd.read_csv(r'C:/Users/SAIDHANUSH/spam-ham.csv')
df['Category'].replace('spam',0,inplace=True)
df['Category'].replace('ham',1,inplace=True)
x=df['Message']
y=df['Category']
from sklearn.feature_extraction.text import CountVectorizer
cv=CountVectorizer()
x=cv.fit_transform(x)
pickle.dump(cv,open('transform.pkl','wb'))
from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test=train_test_split(x,y,random_state=4,test_size=0.2)
from sklearn.tree import DecisionTreeClassifier
clf=DecisionTreeClassifier()
clf.fit(x_train,y_train)
pr=clf.predict(x_test)
from sklearn.metrics import accuracy_score
print(accuracy_score(y_test,pr))
pickle.dump(clf,open('nlp_model1.pkl','wb')) | dhanush77777/spam-messages-classification-app | nlp_model.py | nlp_model.py | py | 770 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pandas.read_csv",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "sklearn.feature_extraction.text.CountVectorizer",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pickle.dump",
"line_number": 16,
"usage_type": "call"
},
{
"api_na... |
40411312041 | #!/usr/bin/env python3
"""
Name: bgp_neighbor_prefix_received.py
Description: NXAPI: display bgp neighbor summary info
"""
our_version = 109
script_name = "bgp_neighbor_prefix_received"
# standard libraries
import argparse
from concurrent.futures import ThreadPoolExecutor
# local libraries
from nxapi_netbox.args.args_cookie import ArgsCookie
from nxapi_netbox.args.args_nxapi_tools import ArgsNxapiTools
from nxapi_netbox.general.log import get_logger
from nxapi_netbox.netbox.netbox_session import netbox, get_device_mgmt_ip
from nxapi_netbox.vault.vault import get_vault
from nxapi_netbox.nxapi.nxapi_bgp_unicast_summary import (
NxapiBgpUnicastSummaryIpv4,
NxapiBgpUnicastSummaryIpv6,
)
def get_parser():
help_afi = "address family to query. one of ipv4 or ipv6."
help_nonzero = (
"if specified, only display neighbors with non-zero prefixes received"
)
ex_prefix = "Example: "
ex_afi = "{} --afi ipv6".format(ex_prefix)
ex_nonzero = "{} --nonzero".format(ex_prefix)
parser = argparse.ArgumentParser(
description="DESCRIPTION: display bgp unicast summary info via NXAPI",
parents=[ArgsCookie, ArgsNxapiTools],
)
default = parser.add_argument_group(title="DEFAULT SCRIPT ARGS")
mandatory = parser.add_argument_group(title="MANDATORY SCRIPT ARGS")
default.add_argument(
"--afi",
dest="afi",
required=False,
choices=["ipv4", "ipv6"],
default="ipv4",
help="{} {}".format(help_afi, ex_afi),
)
default.add_argument(
"--nonzero",
dest="nonzero",
required=False,
default=False,
action="store_true",
help="{} {}".format(help_nonzero, ex_nonzero),
)
parser.add_argument(
"--version", action="version", version="{} v{}".format("%(prog)s", our_version)
)
return parser.parse_args()
def get_device_list():
try:
return cfg.devices.split(",")
except:
log.error(
"exiting. Cannot parse --devices {}. Example usage: --devices leaf_1,spine_2,leaf_2".format(
cfg.devices
)
)
exit(1)
def print_header():
print(fmt.format("ip", "hostname", "neighbor", "prefix_rx"))
def print_output(futures):
for future in futures:
output = future.result()
if output == None:
continue
for line in output:
print(line)
def collect_prefix_rx(ip, bgp):
lines = list()
for neighbor in bgp.neighbor_info:
bgp.neighbor = neighbor
try:
prefixreceived = int(bgp.prefixreceived)
except:
log.warning(
"collect_prefix_rx. {} skipping neighbor {}. cannot convert bgp.prefixreceived {} to int()".format(
bgp.hostname, bgp.neighbor, bgp.prefixreceived
)
)
continue
if prefixreceived == 0 and cfg.nonzero == True:
continue
lines.append(fmt.format(ip, bgp.hostname, bgp.neighbor, bgp.prefixreceived))
lines.append("")
return lines
def get_instance(ip, vault):
"""
return a list of NxapiBgpUnicastSummary*() instances based on cfg.afi
"""
if cfg.afi == "ipv4":
return NxapiBgpUnicastSummaryIpv4(
vault.nxos_username, vault.nxos_password, ip, log
)
elif cfg.afi == "ipv6":
return NxapiBgpUnicastSummaryIpv6(
vault.nxos_username, vault.nxos_password, ip, log
)
else:
log.error("exiting. Unknown afi {}".format(cfg.afi))
exit(1)
def worker(device, vault):
ip = get_device_mgmt_ip(nb, device)
instance = get_instance(ip, vault)
instance.nxapi_init(cfg)
instance.vrf = cfg.vrf
instance.refresh()
return collect_prefix_rx(ip, instance)
def get_fmt():
fmt_ipv6 = "{:<15} {:<18} {:<40} {:>9}"
fmt_ipv4 = "{:<15} {:<18} {:<15} {:>9}"
if cfg.afi == "ipv4":
return fmt_ipv4
else:
return fmt_ipv6
cfg = get_parser()
log = get_logger(script_name, cfg.loglevel, "DEBUG")
vault = get_vault(cfg.vault)
vault.fetch_data()
nb = netbox(vault)
devices = get_device_list()
fmt = get_fmt()
print_header()
executor = ThreadPoolExecutor(max_workers=len(devices))
futures = list()
for device in devices:
args = [device, vault]
futures.append(executor.submit(worker, *args))
print_output(futures)
| allenrobel/nxapi-netbox | scripts/bgp_neighbor_prefix_received.py | bgp_neighbor_prefix_received.py | py | 4,416 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "nxapi_netbox.args.args_cookie.ArgsCookie",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "nxapi_netbox.args.args_nxapi_tools.ArgsNxapiTools",
"line_number": 35,
"... |
26189029070 | import datetime
import table
import restaurant
class Restaurant:
def __init__(self):
self.tables = []
self.name = "Restaurant Dingo"
for i in range(8):
self.tables.append(table.Table(i))
def get_tables(self):
return self.tables
def print_tables(self):
for i in range(8):
print("Table " + str(i))
def loop_opening_hours(action):
dt = datetime.datetime.now()
newdate = dt.replace(hour=12, minute=0)
for i in range(12, 20):
newdate = dt.replace(hour=i, minute=0)
action(newdate) | jemmajh/Reservation_system_Y2 | restaurant.py | restaurant.py | py | 560 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "table.Table",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 20,
"usage_type": "attribute"
}
] |
40033463881 | import json
from pathlib import Path
import numpy as np
import torch
import torch.utils.data
from PIL import Image
from panopticapi.utils import rgb2id
from utils.utils import masks_to_boxes
from dataset.utils import make_coco_transforms
city2int = {
"aachen": 0,
"bremen": 1,
"darmstadt": 2,
"erfurt": 3,
"hanover": 4,
"krefeld": 5,
"strasbourg": 6,
"tubingen": 7,
"weimar": 8,
"bochum": 9,
"cologne": 10,
"dusseldorf": 11,
"hamburg": 12,
"jena": 13,
"monchengladbach": 14,
"stuttgart": 15,
"ulm": 16,
"zurich": 17,
"frankfurt": 18,
"lindau": 19,
"munster": 20,
"berlin": 21,
"bielefeld": 22,
"bonn": 23,
"leverkusen": 24,
"mainz": 25,
"munich": 26,
}
int2city = {v: k for k, v in city2int.items()}
def imgid2int(id):
city, f, s = id.split("_")
return int(int(s) + int(f) * 1e6 + city2int[city] * 1e12)
def int2imgid(num):
cityn = num // int(1e12)
f = (num - int(cityn * 1e12)) // int(1e6)
s = num % int(1e6)
return int2city[cityn] + "_" + str(f).zfill(6) + "_" + str(s).zfill(6)
class CityscapesPanoptic:
def __init__(
self, img_folder, ann_folder, ann_file, transforms=None, return_masks=True
):
with open(ann_file, "r") as f:
self.coco = json.load(f)
# sort 'images' field so that they are aligned with 'annotations'
# i.e., in alphabetical order
self.coco["images"] = sorted(self.coco["images"], key=lambda x: x["id"])
self.img_folder = img_folder
self.ann_folder = ann_folder
self.ann_file = ann_file
self.transforms = transforms
self.return_masks = return_masks
def __getitem__(self, idx):
ann_info = (
self.coco["annotations"][idx]
if "annotations" in self.coco
else self.coco["images"][idx]
)
city = ann_info["image_id"].split("_")[0]
img_path = (
Path(self.img_folder) / city / (ann_info["image_id"] + "_leftImg8bit.png")
)
ann_path = Path(self.ann_folder) / ann_info["file_name"]
img = Image.open(img_path).convert("RGB")
w, h = img.size
if "segments_info" in ann_info:
masks = np.asarray(Image.open(ann_path), dtype=np.uint32)
masks = rgb2id(masks)
ids = np.array([ann["id"] for ann in ann_info["segments_info"]])
masks = masks == ids[:, None, None]
masks = torch.as_tensor(masks, dtype=torch.uint8)
labels = torch.tensor(
[ann["category_id"] for ann in ann_info["segments_info"]],
dtype=torch.int64,
)
target = {}
target["image_id"] = torch.tensor(
[
imgid2int(
ann_info["image_id"] if "image_id" in ann_info else ann_info["id"]
)
]
)
if self.return_masks:
target["masks"] = masks
target["labels"] = labels
target["boxes"] = masks_to_boxes(masks)
target["size"] = torch.as_tensor([int(h), int(w)])
target["orig_size"] = torch.as_tensor([int(h), int(w)])
if "segments_info" in ann_info:
for name in ["iscrowd", "area"]:
target[name] = torch.tensor(
[ann[name] for ann in ann_info["segments_info"]]
)
if self.transforms is not None:
img, target = self.transforms(img, target)
return img, target
def __len__(self):
return len(self.coco["images"])
def get_height_and_width(self, idx):
img_info = self.coco["images"][idx]
height = img_info["height"]
width = img_info["width"]
return height, width
def build_cityscapes_panoptic(image_set, args):
img_folder_root = Path(args.coco_path)
ann_folder_root = Path(args.coco_panoptic_path)
assert img_folder_root.exists(), f"provided path {img_folder_root} does not exist"
assert ann_folder_root.exists(), f"provided path {ann_folder_root} does not exist"
ann_file = {
"train": "/content/drive/MyDrive/cityscapes/gtFine/cityscapes_panoptic_train.json",
"val": "/content/drive/MyDrive/cityscapes/gtFine/cityscapes_panoptic_val.json",
}
img_folder_path = {
"train": "/content/drive/MyDrive/cityscapes/leftImg8bit/train",
"val": "/content/drive/MyDrive/cityscapes/leftImg8bit/val",
}
ann_folder = {
"train": "/content/drive/MyDrive/cityscapes/gtFine/cityscapes_panoptic_train",
"val": "/content/drive/MyDrive/cityscapes/gtFine/cityscapes_panoptic_val",
}
dataset = CityscapesPanoptic(
img_folder_path[image_set],
ann_folder[image_set],
ann_file[image_set],
transforms=make_coco_transforms(image_set),
return_masks=args.masks,
)
return dataset
def build_dataset(image_set, args):
if args.dataset_file == "coco_panoptic":
# to avoid making panopticapi required for coco
return build_cityscapes_panoptic(image_set, args)
raise ValueError(f"dataset {args.dataset_file} not supported")
| adilsammar/detr-fine | archived/dataset/cts_dataset.py | cts_dataset.py | py | 5,196 | python | en | code | 4 | github-code | 6 | [
{
"api_name": "json.load",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number... |
17466316782 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
# File: cifar-convnet.py
# Author: Yuxin Wu <ppwwyyxxc@gmail.com>
import tensorflow as tf
import argparse
import numpy as np
import os
from tensorpack import *
import tensorpack.tfutils.symbolic_functions as symbf
from tensorpack.tfutils.summary import *
from tensorpack.utils.gpu import get_nr_gpu
"""
A small convnet model for Cifar10 or Cifar100 dataset.
Cifar10:
91% accuracy after 50k step.
19.3 step/s on Tesla M40
Not a good model for Cifar100, just for demonstration.
"""
class Model(ModelDesc):
def __init__(self, cifar_classnum):
super(Model, self).__init__()
self.cifar_classnum = cifar_classnum
def _get_input_vars(self):
return [InputVar(tf.float32, [None, 30, 30, 3], 'input'),
InputVar(tf.int32, [None], 'label')
]
def _build_graph(self, input_vars):
image, label = input_vars
is_training = get_current_tower_context().is_training
keep_prob = tf.constant(0.5 if is_training else 1.0)
if is_training:
tf.image_summary("train_image", image, 10)
image = image / 4.0 # just to make range smaller
with argscope(Conv2D, nl=BNReLU, use_bias=False, kernel_shape=3):
logits = LinearWrap(image) \
.Conv2D('conv1.1', out_channel=64) \
.Conv2D('conv1.2', out_channel=64) \
.MaxPooling('pool1', 3, stride=2, padding='SAME') \
.Conv2D('conv2.1', out_channel=128) \
.Conv2D('conv2.2', out_channel=128) \
.MaxPooling('pool2', 3, stride=2, padding='SAME') \
.Conv2D('conv3.1', out_channel=128, padding='VALID') \
.Conv2D('conv3.2', out_channel=128, padding='VALID') \
.FullyConnected('fc0', 1024 + 512, nl=tf.nn.relu) \
.tf.nn.dropout(keep_prob) \
.FullyConnected('fc1', 512, nl=tf.nn.relu) \
.FullyConnected('linear', out_dim=self.cifar_classnum, nl=tf.identity)()
cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits, label)
cost = tf.reduce_mean(cost, name='cross_entropy_loss')
wrong = symbf.prediction_incorrect(logits, label)
# monitor training error
add_moving_summary(tf.reduce_mean(wrong, name='train_error'))
# weight decay on all W of fc layers
wd_cost = tf.mul(0.0004,
regularize_cost('fc.*/W', tf.nn.l2_loss),
name='regularize_loss')
add_moving_summary(cost, wd_cost)
add_param_summary([('.*/W', ['histogram'])]) # monitor W
self.cost = tf.add_n([cost, wd_cost], name='cost')
def get_data(train_or_test, cifar_classnum):
isTrain = train_or_test == 'train'
if cifar_classnum == 10:
ds = dataset.Cifar10(train_or_test)
else:
ds = dataset.Cifar100(train_or_test)
if isTrain:
augmentors = [
imgaug.RandomCrop((30, 30)),
imgaug.Flip(horiz=True),
imgaug.Brightness(63),
imgaug.Contrast((0.2,1.8)),
imgaug.GaussianDeform(
[(0.2, 0.2), (0.2, 0.8), (0.8,0.8), (0.8,0.2)],
(30,30), 0.2, 3),
imgaug.MeanVarianceNormalize(all_channel=True)
]
else:
augmentors = [
imgaug.CenterCrop((30, 30)),
imgaug.MeanVarianceNormalize(all_channel=True)
]
ds = AugmentImageComponent(ds, augmentors)
ds = BatchData(ds, 128, remainder=not isTrain)
if isTrain:
ds = PrefetchData(ds, 3, 2)
return ds
def get_config(cifar_classnum):
logger.auto_set_dir()
# prepare dataset
dataset_train = get_data('train', cifar_classnum)
step_per_epoch = dataset_train.size()
dataset_test = get_data('test', cifar_classnum)
sess_config = get_default_sess_config(0.5)
lr = symbf.get_scalar_var('learning_rate', 1e-2, summary=True)
def lr_func(lr):
if lr < 3e-5:
raise StopTraining()
return lr * 0.31
return TrainConfig(
dataset=dataset_train,
optimizer=tf.train.AdamOptimizer(lr, epsilon=1e-3),
callbacks=Callbacks([
StatPrinter(), ModelSaver(),
InferenceRunner(dataset_test, ClassificationError()),
StatMonitorParamSetter('learning_rate', 'val_error', lr_func,
threshold=0.001, last_k=10),
]),
session_config=sess_config,
model=Model(cifar_classnum),
step_per_epoch=step_per_epoch,
max_epoch=150,
)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')
parser.add_argument('--load', help='load model')
parser.add_argument('--classnum', help='10 for cifar10 or 100 for cifar100',
type=int, default=10)
args = parser.parse_args()
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
else:
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
with tf.Graph().as_default():
config = get_config(args.classnum)
if args.load:
config.session_init = SaverRestore(args.load)
if args.gpu:
config.nr_tower = len(args.gpu.split(','))
nr_gpu = get_nr_gpu()
if nr_gpu == 1:
QueueInputTrainer(config).train()
else:
SyncMultiGPUTrainer(config).train()
| jxwufan/NLOR_A3C | tensorpack/examples/cifar-convnet.py | cifar-convnet.py | py | 5,549 | python | en | code | 16 | github-code | 6 | [
{
"api_name": "tensorflow.float32",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.int32",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.constant",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "ten... |
20894068105 | import cv2
import math
import monta
import numpy as np
import matcompat
from scipy import signal
import matplotlib.pyplot as plt
lammbda=6
pi = math.pi
theta = np.arange(0, (np.pi-np.pi/8)+(np.pi/8), np.pi/8)
psi = 0
gamma = np.linspace(.4,1,4)
gamma = np.arange(.4, 1.2, .2)
b = 4
sigma = (1/pi)*math.sqrt((math.log(2)/2))*((2**b+1)/(2**b-1))*lammbda
l = int(12/2)
gt = 0
imagen0 = np.float32(cv2.imread('images/NegroyYo4.jpg'))
imagen0 = cv2.cvtColor(imagen0,cv2.COLOR_BGR2RGB)
imagen0 = cv2.resize(imagen0, (320, 240))
imagen1 = (imagen0-128)/127
imagen = np.zeros((240,320,4))
imagen[:,:,0]=(imagen1[:,:,0]-imagen1[:,:,1])/2
imagen[:,:,1]=(imagen1[:,:,0]+imagen1[:,:,1]-2*imagen1[:,:,2])/4
imagen[:,:,2]=(imagen1[:,:,0]+imagen1[:,:,1]+imagen1[:,:,2])/3
s = matcompat.size(imagen0)
for i in np.arange(1., (s[0])+1):
for j in np.arange(1., (s[1])+1):
imagen[int(i)-1,int(j)-1,3] = ((imagen1[int(i)-1,int(j)-1,:]).max()-(imagen1[int(i)-1,int(j)-1,:]).min())/2
contador = 0
g = np.zeros((13,13))
imagenSalida=np.zeros((240,320,4,32))
for i in range(len(theta)):
for f in range(len(gamma)):
for j in range(-l,l+1):
for k in range(-l,l+1):
x = j*math.cos(theta[i])+k*math.sin(theta[i])
y = k*math.cos(theta[i])-j*math.sin(theta[i])
g[j+l,k+l]=math.exp(-(x**2 + (gamma[f]**2)*(y**2))/(2*sigma**2))*math.cos((2*pi*x/lammbda)+psi)
imagenSalida[:,:,0,contador] = signal.convolve2d(imagen[:,:,0], g, boundary='symm', mode='same')
imagenSalida[:,:,1,contador] = signal.convolve2d(imagen[:,:,1], g, boundary='symm', mode='same')
imagenSalida[:,:,2,contador] = signal.convolve2d(imagen[:,:,2], g, boundary='symm', mode='same')
imagenSalida[:,:,3,contador] = signal.convolve2d(imagen[:,:,3], g, boundary='symm', mode='same')
contador = contador + 1
s = matcompat.size(imagenSalida)
FM = np.zeros(s)
area = []
for i in range(s[3]):
alpha = .6
m1 = alpha*imagenSalida[:,:,0,k].max().max()
m2 = alpha*imagenSalida[:,:,1,k].max().max()
m3 = alpha*imagenSalida[:,:,2,k].max().max()
m4 = alpha*imagenSalida[:,:,3,k].max().max()
for i in range(s[0]):
for j in range(s[1]):
if imagenSalida[i,j,0,k]>m1:
FM[i,j,0,k] = 1
if imagenSalida[i,j,1,k]>m2:
FM[i,j,1,k]=1
if imagenSalida[i,j,2,k]>m3:
FM[i,j,2,k]=1
if imagenSalida[i,j,3,k]>m4:
FM[i,j,3,k]=1
[area,num] = monta.monta(FM[:,:,0,k])
cv2.imshow("input", imagen)
cv2.waitKey(0)
cv2.destroyAllWindows()
#theta = [round(float(i)/10000000,4) for i in range(0,int((pi-pi/8)*10000000),int((pi/8)*10000000))]
#gamma = [float(i)/10 for i in range(4,11,2)] | ErickJuarez/AtencionSelectiva | Python/main.py | main.py | py | 2,566 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "math.pi",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "numpy.arange",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "numpy.linspace",
"line_nu... |
36030730386 | """Timezones lookup."""
import concurrent.futures
import os
import shutil
import subprocess
import sys
import time
import traceback
from datetime import datetime
from multiprocessing import cpu_count
from pathlib import Path
import pytz
import requests
import tzlocal
from fuzzywuzzy import process
import pycountry
import albert as v0
__title__ = "Timezones lookup"
__version__ = "0.4.0"
__triggers__ = "tz "
__authors__ = "Nikos Koukis"
__homepage__ = (
"https://github.com/bergercookie/awesome-albert-plugins/blob/master/plugins/timezones"
)
__py_deps__ = ["pycountry", "fuzzywuzzy",
"tzlocal", "requests", "traceback", "pytz"]
icon_path = str(Path(__file__).parent / "timezones")
cache_path = Path(v0.cacheLocation()) / "timezones"
config_path = Path(v0.configLocation()) / "timezones"
data_path = Path(v0.dataLocation()) / "timezones"
country_logos_path = data_path / "logos"
dev_mode = False
# country code -> cities
code_to_cities = dict({k: v for k, v in pytz.country_timezones.items()})
codes = list(code_to_cities.keys())
city_to_code = {vi: k for k, v in pytz.country_timezones.items() for vi in v}
cities = list(city_to_code.keys())
country_to_code = {
c.name: c.alpha_2 for c in pycountry.countries if c.alpha_2 in codes}
country_to_cities = {
country: [code_to_cities[code]] for country, code in country_to_code.items()
}
countries = list(country_to_code.keys())
local_tz_str = tzlocal.get_localzone().zone
def download_logo_for_code(code: str) -> bytes:
"""
Download the logo of the given code.
.. raises:: KeyError if given code is invalid.
"""
# ret = requests.get(f"https://www.countryflags.io/{code}/flat/64.png")
ret = requests.get(f"file:///64/{code}.png")
if not ret.ok:
print(f"[E] Couldn't download logo for code {code}")
return ret.content
def get_logo_path_for_code(code: str) -> Path:
"""Return the path to the cached country logo"""
return country_logos_path / f"{code}.png"
def save_logo_for_code(code: str, data: bytes):
with open(get_logo_path_for_code(code), "wb") as f:
f.write(data)
def download_and_save_logo_for_code(code):
save_logo_for_code(code, download_logo_for_code(code))
def download_all_logos():
with concurrent.futures.ThreadPoolExecutor(max_workers=cpu_count()) as executor:
future_to_code = {
executor.submit(download_and_save_logo_for_code, code): code for code in codes
}
for future in concurrent.futures.as_completed(future_to_code):
code = future_to_code[future]
try:
future.result()
except Exception as exc:
print(
f"[W] Fetching logo for {code} generated an exception: {exc}")
# plugin main functions -----------------------------------------------------------------------
def initialize():
"""Called when the extension is loaded (ticked in the settings) - blocking."""
# create plugin locations
for p in (cache_path, config_path, data_path):
p.mkdir(parents=False, exist_ok=True)
# fetch all logos at startup
country_logos_path.mkdir(exist_ok=True)
if not list(country_logos_path.iterdir()):
print("Downloading country logos")
t = time.time()
download_all_logos()
print(f"Downloaded country logos - Took {time.time() - t} seconds")
def finalize():
pass
def get_uniq_elements(seq):
"""Return only the unique elements off the list - Preserve the order.
.. ref:: https://stackoverflow.com/questions/480214/how-do-you-remove-duplicates-from-a-list-whilst-preserving-order
"""
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))]
def handleQuery(query) -> list:
"""Hook that is called by albert with *every new keypress*.""" # noqa
results = []
if query.isTriggered:
try:
query.disableSort()
results_setup = setup(query)
if results_setup:
return results_setup
query_str = query.string.strip()
matched = [
elem[0] for elem in process.extract(query_str, [*cities, *countries], limit=8)
]
matched2 = []
# replace country names with its cities
for m in matched:
if m in countries:
matched2.extend(*country_to_cities[m])
else:
matched2.append(m)
matched2 = get_uniq_elements(matched2)
# add own timezone:
if local_tz_str in matched2:
matched2.remove(local_tz_str)
matched2.insert(0, local_tz_str)
results.extend([get_as_item(m) for m in matched2])
except Exception: # user to report error
if dev_mode: # let exceptions fly!
print(traceback.format_exc())
raise
results.insert(
0,
v0.Item(
id=__title__,
icon=icon_path,
text="Something went wrong! Press [ENTER] to copy error and report it",
actions=[
v0.ClipAction(
f"Copy error - report it to {__homepage__[8:]}",
f"{traceback.format_exc()}",
)
],
),
)
return results
# supplementary functions ---------------------------------------------------------------------
def get_as_item(city: str):
"""Return an item - ready to be appended to the items list and be rendered by Albert."""
code = city_to_code[city]
icon = str(get_logo_path_for_code(code))
utc_dt = pytz.utc.localize(datetime.utcnow())
dst_tz = pytz.timezone(city)
dst_dt = utc_dt.astimezone(dst_tz)
text = f"{str(dst_dt)}"
subtext = f"[{code}] | {city}"
return v0.Item(
id=__title__,
icon=icon,
text=text,
subtext=subtext,
completion=city,
actions=[
v0.UrlAction(
"Open in zeitverschiebung.net",
f'https://www.zeitverschiebung.net/en/timezone/{city.replace("/", "--").lower()}',
),
],
)
def sanitize_string(s: str) -> str:
return s.replace("<", "<")
def get_as_subtext_field(field, field_title=None) -> str:
"""Get a certain variable as part of the subtext, along with a title for that variable."""
s = ""
if field:
s = f"{field} | "
else:
return ""
if field_title:
s = f"{field_title}: " + s
return s
def save_data(data: str, data_name: str):
"""Save a piece of data in the configuration directory."""
with open(config_path / data_name, "w") as f:
f.write(data)
def load_data(data_name) -> str:
"""Load a piece of data from the configuration directory."""
with open(config_path / data_name, "r") as f:
data = f.readline().strip().split()[0]
return data
def setup(query):
"""Setup is successful if an empty list is returned.
Use this function if you need the user to provide you data
"""
results = []
return results
| ppablocruzcobas/Dotfiles | albert/timezones/__init__.py | __init__.py | py | 7,290 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "pathlib.Path",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "albert.cacheLocation",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"li... |
15917643475 | from django.shortcuts import render, redirect, get_object_or_404
from django.shortcuts import render, get_object_or_404
from .models import *
from .forms import *
from .models import Product
from .forms import ProductUpdateForm
from .models import Category
from django.http import JsonResponse
# libraries for Import Export
from import_export.formats import base_formats
from django.http import HttpResponse
from import_export.resources import modelresource_factory
from .resources import ProductResources
from tablib import Dataset
from reportlab.pdfgen import canvas
from import_export import resources
from import_export.resources import ModelResource
#===============================================================code for category========================================
#====== list and add category======================
def category_list(request):
queryset = Category.objects.all()
if request.method == "POST": # Add Categories
formbb = CategoryForm(request.POST or None)
if formbb.is_valid():
formbb.save()
return redirect('category_list')
else:
formbb = CategoryForm()
context = {
"queryset":queryset,
"formbb": formbb,
}
return render(request, "category_list.html", context)
#============= delete category===================
def delete_categorys(request, pk):
queryset = get_object_or_404(Category, pk=pk) # used to get the product
if request.method == 'POST':
queryset.delete()
return redirect('category_list')
return redirect('category_list')
#===========================================================the for Products page==========================================
#=========== list and Add product ===========================
def product_list(request):
queryset = Product.objects.all()
if request.method == 'POST': # Add Products
formcc = ProductForm(request.POST)
if formcc.is_valid():
formcc.save()
return redirect('product_list')
else:
formcc = ProductForm()
queryset = Product.objects.all().order_by('product_name')
context ={
"queryset":queryset,
"formcc":formcc,
}
return render(request, "product_list.html", context)
#====================================== Add product to main Store=======================================
def receive_products(request, code):
queryset = Product.objects.get(code=code)
formjj= ProductAmendForm(request.POST or None, instance=queryset)
if formjj.is_valid():
instance= formjj.save(commit=False)
instance.shop_send_quantity = 0 # set the value of the "issue to shop" =0
instance.factory_quantity += instance.receive_main_quantity # add the received quantity from factory to the quantity in the store
instance.first_add_main_quantity = instance.receive_main_quantity+instance.first_add_main_quantity # recording the products that are stored in main store from the first record of received itme until now
instance.save()
return redirect ("product_list")
context = {
"instance":queryset,
"formjj":formjj,
}
return render(request, 'receive_products.html',context)
#===================== issue products from main store to shop store================================
def issue_products(request, code):
queryset = Product.objects.get(code=code)
formuu= ProductIssueForm(request.POST or None, instance=queryset)
if formuu.is_valid():
instance= formuu.save(commit=False)
instance.receive_main_quantity=0
instance.factory_quantity -= instance.shop_send_quantity
instance.shop_receive_quantity = instance.shop_receive_quantity+ instance.shop_send_quantity
instance.shop_remain_quantity +=instance.shop_send_quantity
instance.save()
return redirect ("product_list")
context = {
"instance":queryset,
"formuu":formuu,
}
return render(request, 'issue_products.html',context)
#====================== delete products from the list====================
def delete_product(request, code):
queryset = get_object_or_404(Product, code=code)
if request.method == 'POST':
queryset.delete()
return redirect('product_list')
return redirect('product_list')
#======================= Update the Products =================================
def update_products(request, code):
queryset= Product.objects.get(code=code)
formvv= ProductUpdateForm(instance = queryset)
if request.method == 'POST':
formvv = ProductUpdateForm(request.POST,instance=queryset)
if formvv.is_valid():
formvv.save()
return redirect('product_list')
context= {
'formvv' : formvv
}
return render(request, 'update_products.html', context)
#=================================================================for the shop store =============================================
def shop_sell(request):
return render(request, 'shop_sell.html')
def product_shop_list(request):
queryset = Product.objects.all()
queryset = Product.objects.all().order_by('product_name')
context ={
"queryset":queryset,
}
return render(request, "product_shop_list.html", context)
#==================== code for the import and Export=========================================
class ProductResource(ModelResource):
class Meta:
model = Product
def export_pdf(request):
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'attachment; filename="products.pdf"'
p = canvas.Canvas(response)
product = Product.objects.all()
for item in product:
p.drawString(100, 700, f'Name: {item.product_name}')
p.drawString(100, 680, f'Description: {item.description}')
p.showPage()
p.save()
return response
def export_excel(request):
product = ProductResource().export()
response = HttpResponse(product.xls, content_type='application/ms-excel')
response['Content-Disposition'] = 'attachment; filename="product.xls"'
return response
def import_excel(request):
if request.method == 'POST':
dataset = Dataset()
new_data = request.FILES['myfile']
if not new_data.name.endswith('xls'):
messages.info(request, 'Wrong format')
return render(request, 'import_data.html')
imported_data = dataset.load(new_data.read(), 'xls')
result = ProductResource().import_data(dataset, dry_run=True) # Check if the data is valid
if not result.has_errors():
ProductResource().import_data(dataset, dry_run=False) # Import the actual data
messages.success(request, 'Data imported successfully')
return render(request, 'import_data.html')
def export_import(request):
return render(request, 'product_list.html') | elumes446/Store-Management-System | Store Managment System/main/views.py | views.py | py | 7,245 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "models.Category.objects.all",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "models.Category.objects",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "models.Category",
"line_number": 25,
"usage_type": "name"
},
{
"api_name... |
34228406110 | from pymongo.collection import Collection
from bson.objectid import ObjectId
def insert_object(obj: dict, collection: Collection):
"""Вставка объекта в коллекцию"""
obj['fields'] = list(obj['fields'].items())
return collection.insert_one(obj).inserted_id
def delete_object(object_id: str, collection: Collection):
"""Удаление объекта из коллекции"""
collection.delete_one({"_id": ObjectId(object_id)})
def get_object(object_id: str, collection: Collection):
"""Получение объекта из коллекции по id"""
obj = collection.find_one({"_id": ObjectId(object_id)})
if obj is not None:
obj['fields'] = dict(obj['fields'])
return obj
def get_objects(
page_size: int,
page_number: int,
collection: Collection
) -> list[dict]:
"""
Получение объектов из коллекции
:param page_size: Размер страницы
:param page_number: Номер страницы
:param collection: Коллекция MongoDB
:return: Список объектов
"""
result = []
for obj in collection.find({}).limit(page_size).skip((page_number - 1) * page_size):
obj['fields'] = dict(obj['fields'])
result.append(obj)
return result
| AKovalyuk/test-task | app/db/crud.py | crud.py | py | 1,341 | python | ru | code | 0 | github-code | 6 | [
{
"api_name": "pymongo.collection.Collection",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "pymongo.collection.Collection",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "bson.objectid.ObjectId",
"line_number": 13,
"usage_type": "call"
},
{
... |
7985866436 | import numpy as np
import cv2
import time
def my_padding(src, filter):
(h, w) = src.shape
if isinstance(filter, tuple):
(h_pad, w_pad) = filter
else:
(h_pad, w_pad) = filter.shape
h_pad = h_pad // 2
w_pad = w_pad // 2
padding_img = np.zeros((h+h_pad*2, w+w_pad*2))
padding_img[h_pad:h+h_pad, w_pad:w+w_pad] = src
# repetition padding
# up
padding_img[:h_pad, w_pad:w_pad + w] = src[0, :]
# down
padding_img[h_pad + h:, w_pad:w_pad + w] = src[h - 1, :]
# left
padding_img[:, :w_pad] = padding_img[:, w_pad:w_pad + 1]
# right
padding_img[:, w_pad + w:] = padding_img[:, w_pad + w - 1:w_pad + w]
return padding_img
def my_filtering(src, filter):
(h, w) = src.shape
(f_h, f_w) = filter.shape
#filter 확인
#print('<filter>')
#print(filter)
# 직접 구현한 my_padding 함수를 이용
pad_img = my_padding(src, filter)
dst = np.zeros((h, w))
for row in range(h):
for col in range(w):
dst[row, col] = np.sum(pad_img[row:row + f_h, col:col + f_w] * filter)
return dst
def get_my_sobel():
sobel_x = np.dot(np.array([[1], [2], [1]]), np.array([[-1, 0, 1]]))
sobel_y = np.dot(np.array([[-1], [0], [1]]), np.array([[1, 2, 1]]))
return sobel_x, sobel_y
def calc_derivatives(src):
# calculate Ix, Iy
sobel_x, sobel_y = get_my_sobel()
Ix = my_filtering(src, sobel_x)
Iy = my_filtering(src, sobel_y)
return Ix, Iy
def find_local_maxima(src, ksize):
(h, w) = src.shape
pad_img = np.zeros((h+ksize, w+ksize))
pad_img[ksize//2:h+ksize//2, ksize//2:w+ksize//2] = src
dst = np.zeros((h, w))
for row in range(h):
for col in range(w):
max_val = np.max(pad_img[row : row+ksize, col:col+ksize])
if max_val == 0:
continue
if src[row, col] == max_val:
dst[row, col] = src[row, col]
return dst
def get_integral_image(src):
assert len(src.shape) == 2
h, w = src.shape
dst = np.zeros(src.shape)
##############################
# ToDo
# dst는 integral image
# dst 알아서 채우기
##############################
integral_image = dst
for row in range(0, h):
summation = 0
for col in range(0, w):
summation += src[row][col]
integral_image[row][col] = summation
if row > 0:
integral_image[row][col] += integral_image[row - 1][col]
# dst = integral_image
# return dst
dst2 = np.zeros(src.shape)
for y in range(h):
for x in range(w):
min_row, max_row = max(0, y - 1), min(h - 1, y + 1)
min_col, max_col = max(0, x - 1), min(w - 1, x + 1)
dst2[y][x] = integral_image[max_row][max_col]
if min_row > 0:
dst2[y][x] -= integral_image[min_row - 1][max_col]
if min_col > 0:
dst2[y][x] -= integral_image[max_row][min_col - 1]
if min_col > 0 and min_row > 0:
dst2[y][x] += integral_image[min_row - 1][min_col - 1]
return dst2
def calc_M_harris(IxIx, IxIy, IyIy, fsize = 5):
assert IxIx.shape == IxIy.shape and IxIx.shape == IyIy.shape
h, w = IxIx.shape
M = np.zeros((h, w, 2, 2))
IxIx_pad = my_padding(IxIx, (fsize, fsize))
IxIy_pad = my_padding(IxIy, (fsize, fsize))
IyIy_pad = my_padding(IyIy, (fsize, fsize))
# for row in range(h):
# for col in range(w):
# M[row, col, 0, 0] = np.sum(IxIx_pad[row:row+fsize, col:col+fsize])
# M[row, col, 0, 1] = np.sum(IxIy_pad[row:row+fsize, col:col+fsize])
# M[row, col, 1, 0] = M[row, col, 0, 1]
# M[row, col, 1, 1] = np.sum(IyIy_pad[row:row+fsize, col:col+fsize])
for row in range(h):
for col in range(w):
ixix = 0
ixiy = 0
iyiy = 0
for f_row in range(fsize):
for f_col in range(fsize):
ixix = ixix + IxIx_pad[row + f_row][col + f_col]
ixiy = ixiy + IxIy_pad[row + f_row][col + f_col]
iyiy = iyiy + IyIy_pad[row + f_row][col + f_col]
M[row, col, 0, 0] = ixix
M[row, col, 0, 1] = ixiy
M[row, col, 1, 0] = ixiy
M[row, col, 1, 1] = iyiy
return M
def harris_detector(src, k = 0.04, threshold_rate = 0.01, fsize=5):
harris_img = src.copy()
h, w, c = src.shape
gray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY) / 255.
# calculate Ix, Iy
Ix, Iy = calc_derivatives(gray)
# Square of derivatives
IxIx = Ix**2
IyIy = Iy**2
IxIy = Ix * Iy
start = time.perf_counter() # 시간 측정 시작
M_harris = calc_M_harris(IxIx, IxIy, IyIy, fsize)
end = time.perf_counter() # 시간 측정 끝
print('M_harris time : ', end-start)
R = np.zeros((h, w))
for row in range(h):
for col in range(w):
##########################################################################
# ToDo
# det_M 계산
# trace_M 계산
# R 계산 Harris & Stephens (1988), Nobel (1998) 어떤걸로 구현해도 상관없음
##########################################################################
det_M = M_harris[row, col, 0, 0] * M_harris[row, col, 1, 1] - (M_harris[row, col, 0, 1] * M_harris[row, col, 1, 0])
trace_M = M_harris[row, col, 0, 0] + M_harris[row, col, 1, 1]
R[row, col] = det_M - k*trace_M*trace_M
# thresholding
R[R < threshold_rate * np.max(R)] = 0
R = find_local_maxima(R, 21)
R = cv2.dilate(R, None)
harris_img[R != 0]=[0, 0, 255]
return harris_img
def harris_detector_integral(src, k = 0.04, threshold_rate = 0.01, fsize=5):
harris_img = src.copy()
h, w, c = src.shape
gray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY) / 255.
# calculate Ix, Iy
Ix, Iy = calc_derivatives(gray)
# Square of derivatives
IxIx = Ix**2
IyIy = Iy**2
IxIy = Ix * Iy
start = time.perf_counter() # 시간 측정 시작
IxIx_integral = get_integral_image(IxIx)
IxIy_integral = get_integral_image(IxIy)
IyIy_integral = get_integral_image(IyIy)
end = time.perf_counter() # 시간 측정 끝
print('make integral image time : ', end-start)
start = time.perf_counter() # 시간 측정 시작
##############################
# ToDo
# M_integral 완성시키기
##############################
M_integral = calc_M_harris(IxIx_integral, IxIy_integral, IyIy_integral, fsize)
end = time.perf_counter() # 시간 측정 끝
print('M_harris integral time : ', end-start)
R = np.zeros((h, w))
for row in range(h):
for col in range(w):
##########################################################################
# ToDo
# det_M 계산
# trace_M 계산
# R 계산 Harris & Stephens (1988), Nobel (1998) 어떤걸로 구현해도 상관없음
##########################################################################
det_M = M_integral[row, col, 0, 0] * M_integral[row, col, 1, 1] - (M_integral[row, col, 0, 1] * M_integral[row, col, 1, 0])
trace_M = M_integral[row, col, 0, 0] + M_integral[row, col, 1, 1]
R[row, col] = det_M - k * trace_M * trace_M
# thresholding
R[R < threshold_rate * np.max(R)] = 0
R = find_local_maxima(R, 21)
R = cv2.dilate(R, None)
harris_img[R != 0]=[0, 0, 255]
return harris_img
def main():
src = cv2.imread('zebra.png') # shape : (552, 435, 3)
print('start!')
cv2.imshow('original ', src)
harris_img = harris_detector(src)
cv2.imshow('harris_img ' + '201402414', harris_img)
harris_integral_img = harris_detector_integral(src)
cv2.imshow('harris_integral_img ' + '201402414' , harris_integral_img)
cv2.waitKey()
cv2.destroyAllWindows()
if __name__ == '__main__':
main() | 201402414/CG | [CG]201402414_장수훈_5주차_과제/[CG]201402414_장수훈_5주차_과제/integral_image_report.py | integral_image_report.py | py | 8,317 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.zeros",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_number": 47,
... |
37055851732 | from unittest.runner import TextTestRunner
import urllib.request
import unittest
from typing import TypeVar, Callable, List
T = TypeVar('T')
S = TypeVar('S')
#################################################################################
# EXERCISE 1
#################################################################################
def mysort(lst: List[T], compare: Callable[[T, T], int]) -> List[T]:
"""
This method should sort input list lst of elements of some type T.
Elements of the list are compared using function compare that takes two
elements of type T as input and returns -1 if the left is smaller than the
right element, 1 if the left is larger than the right, and 0 if the two
elements are equal.
"""
temp = lst
switched = True
while switched:
switched = False
for i in range(len(temp) - 1):
if compare(temp[i], temp[i + 1]) == 1:
temp[i], temp[i + 1] = temp[i + 1], temp[i]
switched = True
return temp
def mybinsearch(lst: List[T], elem: S, compare: Callable[[T, S], int]) -> int:
"""
This method search for elem in lst using binary search.
The elements of lst are compared using function compare. Returns the
position of the first (leftmost) match for elem in lst. If elem does not
exist in lst, then return -1.
"""
def binsearch(ar, targ, start, end):
if start > end:
return -1
mid = (start + end) // 2
if compare(ar[mid], targ) == 0:
return mid
if compare(ar[mid], targ) == 1:
return binsearch(ar, targ, start, mid-1)
else:
return binsearch(ar, targ, mid+1, end)
return binsearch(lst, elem, 0, len(lst)-1)
class Student():
"""Custom class to test generic sorting and searching."""
def __init__(self, name: str, gpa: float):
self.name = name
self.gpa = gpa
def __eq__(self, other):
return self.name == other.name
# 30 Points (total)
def test1():
"""Tests for generic sorting and binary search."""
print(80 * "#" + "\nTests for generic sorting and binary search.")
test1_1()
test1_2()
test1_3()
test1_4()
test1_5()
# 6 Points
def test1_1():
"""Sort ints."""
print("\t-sort ints")
tc = unittest.TestCase()
ints = [ 4, 3, 7, 10, 9, 2 ]
intcmp = lambda x,y: 0 if x == y else (-1 if x < y else 1)
sortedints = mysort(ints, intcmp)
tc.assertEqual(sortedints, [2, 3, 4, 7, 9, 10])
# 6 Points
def test1_2():
"""Sort strings based on their last character."""
print("\t-sort strings on their last character")
tc = unittest.TestCase()
strs = [ 'abcd', 'aacz', 'zasa' ]
suffixcmp = lambda x,y: 0 if x[-1] == y[-1] else (-1 if x[-1] < y[-1] else 1)
sortedstrs = mysort(strs,suffixcmp)
tc.assertEqual(sortedstrs, [ 'zasa', 'abcd', 'aacz' ])
# 6 Points
def test1_3():
"""Sort students based on their GPA."""
print("\t-sort students on their GPA.")
tc = unittest.TestCase()
students = [ Student('Josh', 3.0), Student('Angela', 2.5), Student('Vinesh', 3.8), Student('Jia', 3.5) ]
sortedstudents = mysort(students, lambda x,y: 0 if x.gpa == y.gpa else (-1 if x.gpa < y.gpa else 1))
expected = [ Student('Angela', 2.5), Student('Josh', 3.0), Student('Jia', 3.5), Student('Vinesh', 3.8) ]
tc.assertEqual(sortedstudents, expected)
# 6 Points
def test1_4():
"""Binary search for ints."""
print("\t-binsearch ints")
tc = unittest.TestCase()
ints = [ 4, 3, 7, 10, 9, 2 ]
intcmp = lambda x,y: 0 if x == y else (-1 if x < y else 1)
sortedints = mysort(ints, intcmp)
tc.assertEqual(mybinsearch(sortedints, 3, intcmp), 1)
tc.assertEqual(mybinsearch(sortedints, 10, intcmp), 5)
tc.assertEqual(mybinsearch(sortedints, 11, intcmp), -1)
# 6 Points
def test1_5():
"""Binary search for students by gpa."""
print("\t-binsearch students")
tc = unittest.TestCase()
students = [ Student('Josh', 3.0), Student('Angela', 2.5), Student('Vinesh', 3.8), Student('Jia', 3.5) ]
stcmp = lambda x,y: 0 if x.gpa == y.gpa else (-1 if x.gpa < y.gpa else 1)
stbincmp = lambda x,y: 0 if x.gpa == y else (-1 if x.gpa < y else 1)
sortedstudents = mysort(students, stcmp)
tc.assertEqual(mybinsearch(sortedstudents, 3.5, stbincmp), 2)
tc.assertEqual(mybinsearch(sortedstudents, 3.7, stbincmp), -1)
#################################################################################
# EXERCISE 2
#################################################################################
class PrefixSearcher():
def __init__(self, document, k):
"""
Initializes a prefix searcher using a document and a maximum
search string length k.
"""
self.strings = []
for x in range(0, len(document) - 1):
if x + k < len(document):
self.strings.append(document[x: x + k])
else:
self.strings.append(document[x: len(document)])
comp = lambda x,y: 0 if len(x) == len(y) else (-1 if len(x) > len(y) else 1)
self.strings = mysort(self.strings, comp)
def search(self, q):
"""
Return true if the document contains search string q (of
length up to n). If q is longer than n, then raise an
Exception.
"""
for x in self.strings:
if q in x:
return True
return False
pass
# 30 Points
def test2():
print("#" * 80 + "\nSearch for substrings up to length n")
test2_1()
test2_2()
# 15Points
def test2_1():
print("\t-search in hello world")
tc = unittest.TestCase()
p = PrefixSearcher("Hello World!", 1)
tc.assertTrue(p.search("l"))
tc.assertTrue(p.search("e"))
tc.assertFalse(p.search("h"))
tc.assertFalse(p.search("Z"))
tc.assertFalse(p.search("Y"))
p = PrefixSearcher("Hello World!", 2)
tc.assertTrue(p.search("l"))
tc.assertTrue(p.search("ll"))
tc.assertFalse(p.search("lW"))
# 20 Points
def test2_2():
print("\t-search in Moby Dick")
tc = unittest.TestCase()
md_url = 'https://www.gutenberg.org/files/2701/2701-0.txt'
md_text = urllib.request.urlopen(md_url).read().decode()
p = PrefixSearcher(md_text[0:1000],4)
tc.assertTrue(p.search("Moby"))
tc.assertTrue(p.search("Dick"))
#################################################################################
# EXERCISE 3
#################################################################################
class SuffixArray():
def __init__(self, document: str):
"""
Creates a suffix array for document (a string).
"""
comp = lambda x,y: 0 if x == y else (-1 if x < y else 1)
self.sa = mysort([document[i:] for i in range(len(document))], comp)
pass
def positions(self, searchstr: str):
"""
Returns all the positions of searchstr in the documented indexed by the suffix array.
"""
out = []
for x in range(0, len(self.sa)):
sub = self.sa[x]
if searchstr == sub[0:len(searchstr)]:
out.append(x)
return out
pass
def contains(self, searchstr: str):
"""
Returns true of searchstr is coontained in document.
"""
for x in self.sa:
if searchstr in x:
return True
pass
# 40 Points
def test3():
"""Test suffix arrays."""
print(80 * "#" + "\nTest suffix arrays.")
test3_1()
test3_2()
# 20 Points
def test3_1():
print("\t-suffixarray on Hello World!")
tc = unittest.TestCase()
s = SuffixArray("Hello World!")
tc.assertTrue(s.contains("l"))
tc.assertTrue(s.contains("e"))
tc.assertFalse(s.contains("h"))
tc.assertFalse(s.contains("Z"))
tc.assertFalse(s.contains("Y"))
tc.assertTrue(s.contains("ello Wo"))
# 20 Points
def test3_2():
print("\t-suffixarray on Moby Dick!")
tc = unittest.TestCase()
md_url = 'https://www.gutenberg.org/files/2701/2701-0.txt'
md_text = urllib.request.urlopen(md_url).read().decode()
s = SuffixArray(md_text[0:1000])
tc.assertTrue(s.contains("Moby-Dick"))
tc.assertTrue(s.contains("Herman Melville"))
posset = set(s.positions("Moby-Dick"))
tc.assertEqual(posset, {355, 356})
#################################################################################
# TEST CASES
#################################################################################
def main():
test1()
test2()
test3()
if __name__ == '__main__':
main()
| saronson/cs331-s21-jmallett2 | lab03/lab03.py | lab03.py | py | 8,672 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "typing.TypeVar",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "typing.TypeVar",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "typing.Callable",
"line_n... |
39441402911 | from mlearn import base
from functools import reduce
from datetime import datetime
from mlearn.data.dataset import GeneralDataset
from mlearn.data.batching import Batch, BatchExtractor
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
def process_and_batch(dataset: GeneralDataset, data: base.DataType, batch_size: int, onehot: bool = True,
shuffle: bool = False, **kwargs):
"""
Process a dataset and data.
:dataset (GeneralDataset): The dataset object to use for processing.
:data (base.DataType): The data to be batched and processed.
:batch_size (int): Size of batches to create.
:returns: Batched data.
"""
# Process labels and encode data.
dataset.process_labels(data)
# Batch data
batch = Batch(batch_size, data)
batch.create_batches()
batches = BatchExtractor('label', batch, dataset, onehot)
if shuffle:
batches.shuffle()
return batches
def get_deep_dict_value(source: dict, keys: str, default = None):
"""
Get values from deeply nested dicts.
:source (dict): Dictionary to get data from.
:keys (str): Keys split by '|'. E.g. outerkey|middlekey|innerkey.
:default: Default return value.
"""
value = reduce(lambda d, key: d.get(key, default) if isinstance(d, dict) else default, keys.split("|"), source)
return value
def select_vectorizer(vectorizer: str = 'dict') -> base.VectType:
"""
Identify vectorizer used and return it to be used.
:vectorizer, default = 'dict': Vectorizer to be used.
:return v: Vectorizer function.
"""
vect = vectorizer.lower()
if 'dict' in vect:
v = DictVectorizer()
setattr(v, 'name', 'DictVectorizer')
elif 'tfidf' in vect:
v = TfidfVectorizer()
setattr(v, 'name', 'TFIDF-Vectorizer')
elif 'count' in vect:
v = CountVectorizer()
setattr(v, 'name', 'CountVectorizer')
setattr(v, 'fitted', False)
return v
def _get_datestr():
return datetime.now().strftime('%Y.%m.%d.%H.%M.%S')
def hyperparam_space(search_space: base.List[dict], hyper_parameters: base.List[base.Tuple]
) -> base.List[dict]:
"""
Create all hyper-parameter combinations to run.
:search_space (base.List[dict]): List of dictionaries with one value
:hyper_parameters (base.List[dict]): List of tuples containing a dict with all values for each iteration.
:returns search_space (base.Generator[dict]): A list of dictionaries containing the search space.
"""
for param_name, param_space in hyper_parameters:
additions = []
for comb_dict in search_space:
for param in param_space:
additions.append({**comb_dict, **{param_name: param}})
search_space = additions
return search_space
| zeeraktalat/mlearn | mlearn/utils/pipeline.py | pipeline.py | py | 2,898 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "mlearn.data.dataset.GeneralDataset",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "mlearn.base.DataType",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "mlearn.base",
"line_number": 10,
"usage_type": "name"
},
{
"api_name... |
34196938558 | #!/user/bin/env python
# -*- coding:utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
import h5py
#一个HDF5文件就是一个容器,用于储存两类对象:datasets,类似于数组的数据集合;groups,类似于文件夹的容器,可以储存datasets和其它groups。
from lr_utils import load_dataset
train_set_x_orig , train_set_y , test_set_x_orig , test_set_y , classes = load_dataset()
# index = 30
# print(train_set_x_orig[index])
# plt.imshow(train_set_x_orig[index])
#打印出当前的训练标签值
#使用np.squeeze的目的是压缩维度,【未压缩】train_set_y[:,index]的值为[1] , 【压缩后】np.squeeze(train_set_y[:,index])的值为1
#print("【使用np.squeeze:" + str(np.squeeze(train_set_y[:,index])) + ",不使用np.squeeze: " + str(train_set_y[:,index]) + "】")
#只有压缩后的值才能进行解码操作
# print("train_set_y=" +str(train_set_y[:,index]))
# print(classes[np.squeeze(train_set_y[:,index])])
# plt.show()
#image.shape[0],image.shape[1],image.shape[2]表示图像长,宽,通道数 image.shape表示图片的维度
m_train=train_set_y.shape[1]
m_test=test_set_y.shape[1]
num_px=train_set_x_orig.shape[1]
print("训练集的数量:m_train="+str(m_train))
print("测试集的数量:m_test="+str(m_test))
print("每张图片的高和宽:num_px="+str(num_px))
print("每张图片的大小:("+str(num_px)+","+str(num_px)+",3)")
print("训练集图片的维度:"+str(train_set_x_orig.shape))
print("训练集标签的维度:"+str(train_set_y.shape))
print("测试集图片的维度:"+str(test_set_x_orig.shape))
print("测试集标签的维度:"+str(test_set_y.shape))
#X_flatten = X.reshape(X.shape [0],-1).T #X.T是X的转置
#将训练集的维度降低并转置。这里的-1被理解为unspecified value,意思是未指定为给定的。如果我只需要特定的行数,列数多少我无所谓,我只需要指定行数,那么列数直接用-1代替就行了,计算机帮我们算赢有多少列,反之亦然。
#如果是reshape(5,-1) 就是将数组变为5行的矩阵,列的话根据具体的来分
train_set_x_flatten = train_set_x_orig.reshape(train_set_x_orig.shape[0],-1).T
#将测试集的维度降低并转置。
test_set_x_flatten = test_set_x_orig.reshape(test_set_x_orig.shape[0], -1).T
#
# print ("训练集降维最后的维度: " + str(train_set_x_flatten.shape))
# print ("训练集_标签的维数 : " + str(train_set_y.shape))
# print ("测试集降维之后的维度: " + str(test_set_x_flatten.shape))
# print ("测试集_标签的维数 : " + str(test_set_y.shape))
train_set_x = train_set_x_flatten / 255
test_set_x = test_set_x_flatten / 255
def sigmoid(z):
"""
:param z: 任意大小的标量或者numpy数组
:return:
"""
s=1/(1+np.exp(-z))
return s
def initialize_with_zero(dim):
"""
此函数为w创建一个维度为(dim,1)的0向量,并将b初始化为0,w b都被初始化为0
:param dim:想要的w的大小
:return:w-维度为(dim,1)的初始化向量 b-初始化的标量
"""
w=np.zeros(shape=(dim,1))
b=0
assert (w.shape==(dim,1))#assert 表示如果出错则终止程序,断言函数是对表达式布尔值的判断,要求表达式计算值必须为真。如果表达式为假,触发异常;如果表达式为真,不执行任何操作。
assert (isinstance(b,float)or isinstance(b,int))#isinstance() 函数来判断一个对象是否是一个已知的类型,类似 type()。
return (w,b)
def propagate(w,b,X,Y):
"""
:param w:权重,大小不等的数组(num_px * num_px * 3,1)
:param b:偏差,一个标量
:param X:矩阵类型为(num_px * num_px * 3,训练数量)
:param Y: 真正的“标签”矢量(如果非猫则为0,如果是猫则为1),矩阵维度为(1,训练数据数量)
:return: cost- 逻辑回归的负对数似然成本
dw - 相对于w的损失梯度,因此与w相同的形状
db - 相对于b的损失梯度,因此与b的形状相同
"""
m=X.shape[1] #X=np.array([[1,2,4,5], [3,4,6,1]]),X.shape[0]=2,X.shape[1]=4
#正向传播
A=sigmoid(np.dot(w.T,X)+b)
cost=(-1/m)*(np.sum(Y*np.log(A)+(1-Y)*np.log(1-A)))
#反向传播
dw=(1/m)*(np.dot(X,(A-Y).T))
db=(1/m)*(np.sum(A-Y))
# 使用断言确保我的数据是正确的
assert (dw.shape==w.shape)
assert (db.dtype==float)
cost=np.squeeze(cost)#只有一行或一列的维度(a singleton dimension)被去除掉了
assert (cost.shape==())
grads={
"dw":dw,
"db":db
}
return (grads,cost)
# #测试一下propagate
# print("====================测试propagate====================")
# #初始化一些参数
# w, b, X, Y = np.array([[1], [2]]), 2, np.array([[1,2], [3,4]]), np.array([[1, 0]])
# grads, cost = propagate(w, b, X, Y)
# print ("dw = " + str(grads["dw"]))
# print ("db = " + str(grads["db"]))
# print ("cost = " + str(cost))
def optimize(w, b, X, Y, num_iterations, learning_rate, print_cost=False):
"""
此函数通过运行梯度下降算法来优化w和b
参数:
w - 权重,大小不等的数组(num_px * num_px * 3,1)
b - 偏差,一个标量
X - 维度为(num_px * num_px * 3,训练数据的数量)的数组。
Y - 真正的“标签”矢量(如果非猫则为0,如果是猫则为1),矩阵维度为(1,训练数据的数量)
num_iterations - 优化循环的迭代次数
learning_rate - 梯度下降更新规则的学习率
print_cost - 每100步打印一次损失值
返回:
params - 包含权重w和偏差b的字典
grads - 包含权重和偏差相对于成本函数的梯度的字典
成本 - 优化期间计算的所有成本列表,将用于绘制学习曲线。
提示:
我们需要写下两个步骤并遍历它们:
1)计算当前参数的成本和梯度,使用propagate()。
2)使用w和b的梯度下降法则更新参数。
"""
costs = []
for i in range(num_iterations):
grads, cost = propagate(w, b, X, Y)
dw = grads["dw"]
db = grads["db"]
w = w - learning_rate * dw
b = b - learning_rate * db
# 记录成本
if i % 100 == 0:
costs.append(cost)
# 打印成本数据
if (print_cost) and (i % 100 == 0):
print("迭代的次数: %i , 误差值: %f" % (i, cost))
params = {
"w": w,
"b": b}
grads = {
"dw": dw,
"db": db}
return (params, grads, costs)
# #测试optimize
# print("====================测试optimize====================")
# w, b, X, Y = np.array([[1], [2]]), 2, np.array([[1,2], [3,4]]), np.array([[1, 0]])
# params , grads , costs = optimize(w , b , X , Y , num_iterations=100 , learning_rate = 0.009 , print_cost = False)
# print ("w = " + str(params["w"]))
# print ("b = " + str(params["b"]))
# print ("dw = " + str(grads["dw"]))
# print ("db = " + str(grads["db"]))
def predict(w, b, X):
"""
使用学习逻辑回归参数logistic (w,b)预测标签是0还是1,
参数:
w - 权重,大小不等的数组(num_px * num_px * 3,1)
b - 偏差,一个标量
X - 维度为(num_px * num_px * 3,训练数据的数量)的数据
返回:
Y_prediction - 包含X中所有图片的所有预测【0 | 1】的一个numpy数组(向量)
"""
m = X.shape[1] # 图片的数量????为什么是图片的数量
"""
shape函数是numpy.core.fromnumeric中的函数,它的功能是读取矩阵的长度,
比如shape[0]就是读取矩阵第一维度的长度。
shape的输入参数可以是一个整数(表示维度),也可以是一个矩阵。
"""
Y_prediction = np.zeros((1, m))
w = w.reshape(X.shape[0], 1) # reshape函数:改变数组的维数
# 计预测猫在图片中出现的概率
A = sigmoid(np.dot(w.T, X) + b)
for i in range(A.shape[1]):
# 将概率a[0,i]转换为实际预测p[0,i]
Y_prediction[0, i] = 1 if A[0, i] > 0.5 else 0
# 使用断言
assert (Y_prediction.shape == (1, m))
return Y_prediction
#
# #测试predict
# print("------测试predict------")
# w,b,X,Y = np.array([[1],[2]]),2,np.array([[1,2],[3,4]]),np.array([[1,0]])
# print("predictions = " + str(predict(w,b,X)))
def model(X_train, Y_train, X_test, Y_test, num_iterations=2000, learning_rate=0.5, print_cost=False):
"""
通过调用之前实现的函数来构建逻辑回归模型
参数:
X_train - numpy的数组,维度为(num_px * num_px * 3,m_train)的训练集
Y_train - numpy的数组,维度为(1,m_train)(矢量)的训练标签集
X_test - numpy的数组,维度为(num_px * num_px * 3,m_test)的测试集
Y_test - numpy的数组,维度为(1,m_test)的(向量)的测试标签集
num_iterations - 表示用于优化参数的迭代次数的超参数
learning_rate - 表示optimize()更新规则中使用的学习速率的超参数
print_cost - 设置为true以每100次迭代打印成本
返回:
d - 包含有关模型信息的字典。
"""
w, b = initialize_with_zero(X_train.shape[0])
parameters, grads, costs = optimize(w, b, X_train, Y_train, num_iterations, learning_rate, print_cost)
# 从字典“参数”中检索参数w和b
w, b = parameters["w"], parameters["b"]
# 预测测试/训练集的例子
Y_prediction_test = predict(w, b, X_test)
Y_prediction_train = predict(w, b, X_train)
# 打印训练后的准确性
print("训练集准确性:", format(100 - np.mean(np.abs(Y_prediction_train - Y_train)) * 100), "%") # mean()函数功能:求取均值,np.abs()返回决定值
print("测试集准确性:", format(100 - np.mean(np.abs(Y_prediction_test - Y_test)) * 100), "%") # abs() 函数返回数字的绝对值
d = {
"costs": costs,
"Y_prediction_test": Y_prediction_test,
"Y_prediction_train": Y_prediction_train,
"w": w,
"b": b,
"learning_rate": learning_rate,
"num_iterations": num_iterations}
return d
print("------测试model------")
# 这里加载的是真实的数据,请参见上面的代码部分
d = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations=2000, learning_rate=0.005, print_cost=True)
# 绘制图
costs = np.squeeze(d['costs'])
"""
squeeze()函数的用法:
在机器学习和深度学习中,通常算法的结果是可以表示向量的数组(即包含两对或以上的方括号形式[[]]),
如果直接利用这个数组进行画图可能显示界面为空(见后面的示例)。我们可以利用squeeze()函数将表示向量
的数组转换为秩为1的数组,这样利用matplotlib库函数画图时,就可以正常的显示结果了。
"""
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('iterations(per hundreds)')
plt.title("Learning rate = " + str(d["learning_rate"]))
plt.show()
learning_rates = [0.01, 0.001, 0.0001]
models = {}
for i in learning_rates:
print("learning rate is:" + str(i))
models[str(i)] = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations=1500, learning_rate=i,
print_cost=False)
print('\n' + "--------------" + '\n')
for i in learning_rates:
plt.plot(np.squeeze(models[str(i)]["costs"]), label=str(models[str(i)]["learning_rate"]))
plt.ylabel('cost')
plt.xlabel('iterations')
legend = plt.legend(loc='upper center', shadow=True) # loc:图例所有figure位置;shadow:控制是否在图例后面画一个阴影
# 设置图例legend背景颜色
frame = legend.get_frame()
frame.set_facecolor('0.90')
plt.show()
| CheQiXiao/cfair | fc_net.py | fc_net.py | py | 11,968 | python | zh | code | 0 | github-code | 6 | [
{
"api_name": "lr_utils.load_dataset",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_numb... |
74377247228 | '''
@Author: Never
@Date: 2020-06-13 11:02:05
@Description:
@LastEditTime: 2020-07-14 15:20:19
@LastEditors: Never
'''
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/4/27 19:47
# @Author : Shark
# @Site :
# @File : lepin1.py
# @Software: PyCharm
import csv
import requests
import json
import random
import time
start =time.time()
print('程序开始时间:%s'%(time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(start))))
WinStatuslist=[]
i=0
with open('C:\\Users\\lhx\\Desktop\\user1.csv','rt') as myfile:
lines=csv.reader(myfile)
for memberid,addid in lines:
with open(r'\\192.168.0.200\shop.h5\MemberId.json', 'w') as m:
m.write(memberid)
t=0
while t<20:
date={
"productid":216869,
"lpTimes":1,
"addressid":addid,
"isPay":'true',
"useBalance":10.9,
}
url = "http://192.168.0.200:818/order/ActivityOrderConfirm"
response = requests.post(url,data=date)
text=response.text
jsonobj=json.loads(text)
if jsonobj['success']==200:
totext=jsonobj['data']['OrderIdList']
url="http://192.168.0.200:818/HappyOrder/ForthWithOrder"
data={"happyOrderId":totext,
"chooseNumber":random.randint(0,9)}
response=requests.post(url,data=data)
text=response.text
jsonobj=json.loads(text)
totext=jsonobj['data']['WinStatus']
WinStatuslist.append(totext)
else:
print(jsonobj)
break
t+=1
# time.sleep(1)
i+=1
print(i)
m=0
print(WinStatuslist)
for j in WinStatuslist:
if j==1:
m+=1
i=20*i
print("订单数:%s"%i)
print("中奖次数:%s"%m)
s=m/i*100
print('中奖概率:{:.2f}%'.format(s))
end =time.time()
print('程序结束时间:%s'%(time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(end))))
print("循环运行时间:%.2f秒"%(end-start))
| gitxzq/py | lepin1.py | lepin1.py | py | 2,131 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "time.time",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "time.strftime",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "time.localtime",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "csv.reader",
"line_number"... |
41211987806 | import matplotlib.pyplot as plt
import librosa
import librosa.display
import os
import torch
from torch.distributions.beta import Beta
import numpy as np
from pytorch_lightning.callbacks import Callback
import torch.nn as nn
from einops import rearrange
from tqdm import tqdm
from helpers import nessi
image_folder = "images"
os.makedirs(image_folder, exist_ok=True)
class MyStaticPostQuantizationCallback(Callback):
def __init__(self, get_calibration_loader, calibration_batches=100):
self.calibration_loader = get_calibration_loader()
self.calibration_batches = calibration_batches
def quantize_model(self, pl_module):
print("*********** Before Quantization: ***********")
if hasattr(pl_module, 'mel'):
pl_module.mel.cpu()
# get the shape of spectrograms
sample = next(iter(self.calibration_loader))[0][0].unsqueeze(0)
sample = sample[:, :, :sample.size(2) // 10]
shape = pl_module.mel_forward(sample).size()
# get original macs and params
macc_orig, n_params_orig = nessi.get_model_size(pl_module.net, input_size=(1, shape[1], shape[2], shape[3]))
print("macc_orig: ", macc_orig)
print("n_params_orig: ", n_params_orig)
# print size of model before quantization
print_size_of_model(pl_module.net)
pl_module.net.fuse_model()
# get macs and params after fusing model
macc, n_params = nessi.get_model_size(
pl_module.net, input_size=(1, shape[1], shape[2], shape[3]))
print("macc after fuse : ", macc)
print("n_params after fuse: ", n_params)
pl_module.net.qconfig = torch.quantization.get_default_qconfig('fbgemm')
torch.quantization.prepare(pl_module.net, inplace=True)
pl_module.net.cpu()
if hasattr(pl_module, 'mel'):
pl_module.mel.cpu()
for i, batch in enumerate(tqdm(self.calibration_loader, total=self.calibration_batches)):
x, files, y, device_indices, cities, indices = batch
# split to 1-second pieces
x = rearrange(x, 'b c (slices t) -> (b slices) c t', slices=10)
x = x.cpu()
if hasattr(pl_module, 'mel'):
x = pl_module.mel_forward(x)
with torch.no_grad():
pl_module.net(x)
# stop after a certain number of calibration samples
if i == self.calibration_batches:
break
torch.quantization.convert(pl_module.net, inplace=True)
print("*********** After Quantization: ***********")
return dict(macc_orig=macc_orig, n_params_orig=n_params_orig,
macc_fuse=macc, n_params_fuse=n_params, model_size_bytes=print_size_of_model(pl_module.net))
def on_test_start(self, trainer, pl_module):
self.quantize_model(pl_module)
def mixstyle(x, p=0.5, alpha=0.1, eps=1e-6):
if np.random.rand() > p:
return x
batch_size = x.size(0)
# changed from dim=[2,3] to dim=[1,3] from channel-wise statistics to frequency-wise statistics
f_mu = x.mean(dim=[1, 3], keepdim=True)
f_var = x.var(dim=[1, 3], keepdim=True)
f_sig = (f_var + eps).sqrt() # compute instance standard deviation
f_mu, f_sig = f_mu.detach(), f_sig.detach() # block gradients
x_normed = (x - f_mu) / f_sig # normalize input
lmda = Beta(alpha, alpha).sample((batch_size, 1, 1, 1)).to(x.device) # sample instance-wise convex weights
perm = torch.randperm(batch_size).to(x.device) # generate shuffling indices
f_mu_perm, f_sig_perm = f_mu[perm], f_sig[perm] # shuffling
mu_mix = f_mu * lmda + f_mu_perm * (1 - lmda) # generate mixed mean
sig_mix = f_sig * lmda + f_sig_perm * (1 - lmda) # generate mixed standard deviation
return x_normed * sig_mix + mu_mix # denormalize input using the mixed statistics
def print_size_of_model(model):
torch.save(model.state_dict(), "temp.p")
model_size_bytes = os.path.getsize("temp.p")
print('Size (MB):', model_size_bytes/1e6)
os.remove('temp.p')
return model_size_bytes
def mixup(size, alpha):
rn_indices = torch.randperm(size)
lambd = np.random.beta(alpha, alpha, size).astype(np.float32)
lambd = np.concatenate([lambd[:, None], 1 - lambd[:, None]], 1).max(1)
lam = torch.FloatTensor(lambd)
# data = data * lam + data2 * (1 - lam)
# targets = targets * lam + targets2 * (1 - lam)
return rn_indices, lam
def spawn_get(seedseq, n_entropy, dtype):
child = seedseq.spawn(1)[0]
state = child.generate_state(n_entropy, dtype=np.uint32)
if dtype == np.ndarray:
return state
elif dtype == int:
state_as_int = 0
for shift, s in enumerate(state):
state_as_int = state_as_int + int((2 ** (32 * shift) * s))
return state_as_int
else:
raise ValueError(f'not a valid dtype "{dtype}"')
| CPJKU/cpjku_dcase22 | helpers/utils.py | utils.py | py | 4,903 | python | en | code | 18 | github-code | 6 | [
{
"api_name": "os.makedirs",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pytorch_lightning.callbacks.Callback",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "helpers.nessi.get_model_size",
"line_number": 35,
"usage_type": "call"
},
{
"api... |
21672470765 | #!/usr/bin/python
#coding:utf-8
"""
Author: Andy Tian
Contact: tianjunning@126.com
Software: PyCharm
Filename: get_heatMap_html.py
Time: 2019/2/21 10:51
"""
import requests
import re
def get_html():
'''
获取百度热力图demo的源代码
:return: h5代码
'''
url = "http://lbsyun.baidu.com/jsdemo/demo/c1_15.htm"
header = {
"User-Agent":"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.26 Safari/537.36 Core/1.63.6788.400 QQBrowser/10.3.2864.400",
"Accept":"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"Accept-Encoding":"gzip, deflate",
"Accept-Language":"zh-CN,zh;q=0.9"
}
htmlstr = requests.get(url,headers=header)._content.decode()
htmlstr_formated = htmlstr.replace("\n",'').replace("\t",'')
return htmlstr_formated
def modify_html(htmlstr):
'''
根据项目需要,对demo中的参数进行修改
1)ak 修改为自己在百度中申请的密钥
2)container{height:500px;width:100%;} 地图打开时的大小.
3)points 热力图显示的坐标
4)new BMap.point 地图打开时显示的重点位置
5)heatmapOverlay = new BMapLib.HeatmapOverlay({"radius":20}) 热力显示半径
6)# heatmapOverlay.setDataSet({data:points,max:100}); 数据大小,超过max后显示颜色一致,根据实际数据,修改max大小
:param htmlstr:需要修改的h5代码
:return: 修改好的代码
'''
data = open("G:\Python\Project\Spider\scrapyProject\lianjia\lon_lat.json")
datastr = data.read()
htmlstr = htmlstr.replace("height:500px","height:80%").replace('{"radius":20}','{"radius":10}').replace("max:100","max:120000")
be_replaced_data = ",\n".join(re.findall(r'{"lng":.*"count":\d*}',htmlstr))
htmlstr_modified = htmlstr.replace(be_replaced_data,datastr)
return htmlstr_modified
def rewrite_html(str):
'''
h5代码写入文件
:param str: h5代码
:return: h5文档
'''
with open("heat.html","w",encoding="utf-8") as f:
f.write(str)
if __name__ == "__main__":
htmlstr = get_html()
htmlstr_modified = modify_html(htmlstr)
write_html(htmlstr_modified)
| tianzheyiran/HeatMap | get_heatMap_html.py | get_heatMap_html.py | py | 2,229 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "requests.get",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 47,
"usage_type": "call"
}
] |
35347629144 | import json
with open('mahasiswa.json', 'r') as file:
a = json.load(file)
b = dict()
c = int(input("Masukkan Jumkah Mahasiswa baru : "))
for i in range(c):
nm = input("Masukkan nama anda: ")
hb = []
untuk_hobi = int(input("Masukkan jumlah hobi: "))
for j in range(untuk_hobi):
hb1 = input("Masukkan hobi ke-{} : ".format(j+1))
hb.append(hb1)
per = input("Masukkan prestasi anda: ")
print("====Data Berhasil ditambahkan===")
print()
b [nm] = [{"Biodata": {"Hobi": hb, "Prestasti" : per}}]
a.update(b)
with open('mahasiswa.json', 'w') as file:
json.dump(a,file) | TIRSA30/strukdat_04_71210700 | ug4.py | ug4.py | py | 705 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "json.load",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 27,
"usage_type": "call"
}
] |
20914243110 | """added columns to Places
Revision ID: cba44d27f422
Revises: 061ea741f852
Create Date: 2023-06-28 15:56:11.475592
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'cba44d27f422'
down_revision = '061ea741f852'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('places', schema=None) as batch_op:
batch_op.add_column(sa.Column('website', sa.String(), nullable=True))
batch_op.add_column(sa.Column('photo', sa.String(), nullable=True))
batch_op.add_column(sa.Column('price_level', sa.Integer(), nullable=True))
batch_op.add_column(sa.Column('user_ratings_total', sa.Integer(), nullable=True))
batch_op.add_column(sa.Column('rating', sa.Float(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('places', schema=None) as batch_op:
batch_op.drop_column('rating')
batch_op.drop_column('user_ratings_total')
batch_op.drop_column('price_level')
batch_op.drop_column('photo')
batch_op.drop_column('website')
# ### end Alembic commands ###
| choihalim/halfway | server/migrations/versions/cba44d27f422_added_columns_to_places.py | cba44d27f422_added_columns_to_places.py | py | 1,294 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "alembic.op.batch_alter_table",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.... |
74693961467 | import torch
import time
import torch.nn.functional as F
def train(model, device, train_loader, optimizer, epoch): # 训练模型
model.train()
best_acc = 0.0
for batch_idx, (x1, x2, x3, y) in enumerate(train_loader):
start_time = time.time()
x1, x2, x3, y = x1.to(device), x2.to(device), x3.to(device), y.to(device)
out = model([x1, x2, x3]) # 得到预测结果
y_pred = out[0]
model.zero_grad() # 梯度清零
loss = F.cross_entropy(y_pred, y.squeeze()) # 得到loss
loss.backward()
optimizer.step()
if(batch_idx + 1) % 100 == 0: # 打印loss
print('Train Epoch: {} [{}/{} ({:.2f}%)]\t\tLoss: {:.6f}'.format(epoch, (batch_idx+1) * len(x1),
len(train_loader.dataset),
100. * (batch_idx+1) / len(train_loader),
loss.item())) # 记得为loss.item()
def test(model, device, test_loader): # 测试模型, 得到测试集评估结果
model.eval()
test_loss = 0.0
acc = 0
for batch_idx, (x1, x2, x3, y) in enumerate(test_loader):
x1, x2, x3, y = x1.to(device), x2.to(device), x3.to(device), y.to(device)
with torch.no_grad():
out = model([x1, x2, x3])
y_ = out[0]
test_loss += F.cross_entropy(y_, y.squeeze())
pred = y_.max(-1, keepdim=True)[1] # .max(): 2输出,分别为最大值和最大值的index
acc += pred.eq(y.view_as(pred)).sum().item() # 记得加item()
test_loss /= len(test_loader)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)'.format(
test_loss, acc, len(test_loader.dataset),
100. * acc / len(test_loader.dataset)))
return acc / len(test_loader.dataset)
def test_lem(model, device, test_loader): # 测试模型, 得到测试集评估结果
model.eval()
input_embeddings, label_embeddings, labels = [], [], []
test_loss = 0.0
acc = 0
for batch_idx, (x1, x2, x3, y) in enumerate(test_loader):
x1, x2, x3, y = x1.to(device), x2.to(device), x3.to(device), y.to(device)
with torch.no_grad():
out = model([x1, x2, x3])
y_, V, C = out[0], out[1], out[2]
input_embeddings.append(V.cpu())
label_embeddings = C.cpu()
test_loss += F.cross_entropy(y_, y.squeeze())
pred = y_.max(-1, keepdim=True)[1] # .max(): 2输出,分别为最大值和最大值的index
labels.append(pred)
acc += pred.eq(y.view_as(pred)).sum().item() # 记得加item()
test_loss /= len(test_loader)
return acc / len(test_loader.dataset), input_embeddings, label_embeddings, labels
| Huasheng-hou/r2-nlp | src/utils.py | utils.py | py | 2,866 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "time.time",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.cross_entropy",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "torch... |
19274830613 | #!/usr/bin/env python
'''
Created on Jun 28, 2016
@author: isvoboda
'''
from __future__ import print_function
import sys
import multiprocessing
import logging
import yaml
import argparse
from collections import OrderedDict
import cnn_image_processing as ci
import signal
signal.signal(signal.SIGINT, lambda x, y: sys.exit(1))
LOGGER = logging.getLogger("cnn_image_processing")
def parse_phase(conf):
"""
Parse net phase Train/Test
"""
dmodules = {}
creator = ci.Creator
pque_size = 5
if 'provider_queue_size' in conf:
pque_size = conf['provider_queue_size']
sque_size = 512
if 'sample_queue_size' in conf:
sque_size = conf['sample_queue_size']
dmodules['pque'] = multiprocessing.Queue(pque_size)
dmodules['sque'] = multiprocessing.Queue(sque_size)
if 'Provider' in conf:
dmodules['provider'] = creator.create_provider(conf['Provider'])
dmodules['provider'].out_queue = dmodules['pque']
else:
dmodules['provider'] = None
# train_provider.file_list = train_list
if 'Sampler' in conf:
dmodules['sampler'] = creator.create_sampler(conf['Sampler'])
dmodules['sampler'].in_queue = dmodules['pque']
dmodules['sampler'].out_queue = dmodules['sque']
else:
dmodules['sampler'] = None
return dmodules
def parse_config(conf=None):
"""
Parse the train_cnn application configuration
"""
creator = ci.Creator
app = {}
app['Train'] = parse_phase(conf['Train'])
app['Train']['provider'].out_queue = app['Train']['pque']
app['Train']['sampler'].in_queue = app['Train']['pque']
app['Train']['sampler'].out_queue = app['Train']['sque']
in_ques = []
if 'Test' in conf:
test_nets = OrderedDict()
test_net_list = [test_net.keys()[0] for test_net in conf['Test']]
test_net_list.sort()
for i_key, net_key in enumerate(test_net_list):
test_nets[net_key] = parse_phase(conf['Test'][i_key][net_key])
if test_nets[net_key]['provider'] == None:
tprovider = creator.create_provider(conf['Train']['Provider'])
tprovider.out_queue = test_nets[net_key]['pque']
test_nets[net_key]['provider'] = tprovider
if test_nets[net_key]['sampler'] == None:
tsampler = creator.create_sampler(['Train']['Sampler'])
tsampler.in_queue = test_nets[net_key]['pque']
tsampler.out_queue = test_nets[net_key]['sque']
test_nets[net_key]['sampler'] = tsampler
in_ques.append(test_nets[net_key]['sque'])
app['Test'] = test_nets
app['Trainer'] = creator.create_trainer(conf['Trainer'])
app['Trainer'].train_in_queue = app['Train']['sque']
app['Trainer'].test_in_queue = in_ques
return app
def main():
'''
Entry point
Args:
argv: list of command line arguments.
'''
parser = argparse.ArgumentParser(description="Train the cnn")
parser.add_argument("-c", "--conf-file", action='store', type=str,
choices=None, required=True, help="Configuration file",
metavar=None, dest='conf_file')
parser.add_argument("-s", "--solver-file", action='store', type=str,
choices=None, required=True, help="Solver file",
metavar=None, dest='solver_file')
parser.add_argument("-v", "--verbose", action="store_true", required=False,
help="Set the verbose mode.", dest='verbose')
parser.add_argument("-tr", "--train-list", action='store', type=str,
help="Training file list", required=True,
dest='train_list')
parser.add_argument("-te", "--test-lists", action='store',
nargs='*', type=str, default=None,
required=False, dest='test_lists',
help="Training file lists")
args = parser.parse_args()
# Print the arguments
for key, val in vars(args).iteritems():
print("{}: {}".format(key, val))
# Initialize logging
if args.verbose:
LOGGER.setLevel(logging.DEBUG)
else:
LOGGER.setLevel(logging.INFO)
logging.basicConfig()
config_file = args.conf_file
solver_file = args.solver_file
train_list = args.train_list
test_lists = args.test_lists
# Open, parse and print the configuration file
with open(config_file) as cf_file:
conf = yaml.safe_load(cf_file)
print (yaml.dump(conf))
app = parse_config(conf)
app['Train']['provider'].file_list = train_list
app['Train']['provider'].start()
app['Train']['sampler'].start()
if test_lists is not None:
assert len(test_lists) == len(app['Test'])
for i_test, test_k in enumerate(app['Test']):
app['Test'][test_k]['provider'].file_list = test_lists[i_test]
app['Test'][test_k]['provider'].start()
app['Test'][test_k]['sampler'].start()
app['Trainer'].solver_file = solver_file
app['Trainer'].start()
app['Trainer'].join()
if __name__ == "__main__":
main()
| DCGM/cnn-image-processing | bin/train_cnn.py | train_cnn.py | py | 5,210 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "signal.signal",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "signal.SIGINT",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"li... |
36014041676 | import torch.nn as nn
import tqdm
import torch
class ANN(nn.Module):
def __init__(self, input=4):
super().__init__()
# self.relu1 = nn.ReLU(inplace=True)
self.liner1 = nn.Linear(input,128)
self.relu = nn.ReLU()
self.liner2 = nn.Linear(128,8)
self.liner3 = nn.Linear(8,4)
# self.relu = nn.ReLU(inplace=True)
def forward(self, x):
out = self.relu(self.liner1(x))
out = self.relu(self.liner2(out))
out = self.relu(self.liner3(out))
return out | infinity-linh/Bot_Inf | scripts/model_ANN.py | model_ANN.py | py | 539 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "torch.nn.Module",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_numbe... |
70103613629 | #!/usr/bin/env python3
"""
Example for Implied Volatility using the NAG Library for Python
Finds implied volatilities of the Black Scholes equation using specfun.opt_imp_vol
Data needs to be downloaded from:
http://www.cboe.com/delayedquote/QuoteTableDownload.aspx
Make sure to download data during CBOE Trading Hours.
Updated for NAG Library for Python Mark 27.1
"""
# pylint: disable=invalid-name,too-many-branches,too-many-locals,too-many-statements
try:
import sys
import pandas
import numpy as np
import matplotlib.pylab as plt
import warnings
from naginterfaces.library import specfun, fit
from naginterfaces.base import utils
from matplotlib import cm
except ImportError as e:
print(
"Could not import the following module. "
"Do you have a working installation of the NAG Library for Python?"
)
print(e)
sys.exit(1)
__author__ = "Edvin Hopkins, John Morrissey and Brian Spector"
__copyright__ = "Copyright 2021, The Numerical Algorithms Group Inc"
__email__ = "support@nag.co.uk"
# Set to hold expiration dates
dates = []
cumulative_month = {'Jan': 31, 'Feb': 57, 'Mar': 90,
'Apr': 120, 'May': 151, 'Jun': 181,
'Jul': 212, 'Aug': 243, 'Sep': 273,
'Oct': 304, 'Nov': 334, 'Dec': 365}
def main(): # pylint: disable=missing-function-docstring
try:
if len(sys.argv)>1:
QuoteData = sys.argv[1]
else:
QuoteData = 'QuoteData.dat'
qd = open(QuoteData, 'r')
qd_head = []
qd_head.append(qd.readline())
qd_head.append(qd.readline())
qd.close()
except: # pylint: disable=bare-except
sys.stderr.write("Usage: implied_volatility.py QuoteData.dat\n")
sys.stderr.write("Couldn't read QuoteData\n")
sys.exit(1)
print("Implied Volatility for %s %s" % (qd_head[0].strip(), qd_head[1]))
# Parse the header information in QuotaData
first = qd_head[0].split(',')
second = qd_head[1].split()
qd_date = qd_head[1].split(',')[0]
company = first[0]
underlyingprice = float(first[1])
month, day = second[:2]
today = cumulative_month[month] + int(day) - 30
current_year = int(second[2])
def getExpiration(x):
monthday = x.split()
adate = monthday[0] + ' ' + monthday[1]
if adate not in dates:
dates.append(adate)
return (int(monthday[0]) - (current_year % 2000)) * 365 + cumulative_month[monthday[1]]
def getStrike(x):
monthday = x.split()
return float(monthday[2])
data = pandas.io.parsers.read_csv(QuoteData, sep=',', header=2, na_values=' ')
# Need to fill the NA values in dataframe
data = data.fillna(0.0)
# Let's look at data where there was a recent sale
data = data[(data['Last Sale'] > 0) | (data['Last Sale.1'] > 0)]
# Get the Options Expiration Date
exp = data.Calls.apply(getExpiration)
exp.name = 'Expiration'
# Get the Strike Prices
strike = data.Calls.apply(getStrike)
strike.name = 'Strike'
data = data.join(exp).join(strike)
print("Number of data points found: {}\n".format(len(data.index)))
print('Calculating Implied Vol of Calls...')
r = np.zeros(len(data.index))
t = (data.Expiration - today)/365.0
s0 = np.full(len(data.index),underlyingprice)
pCall= (data.Bid + data.Ask) / 2
# A lot of the data is incomplete or extreme so we tell the NAG routine
# not to worry about warning us about data points it can't work with
warnings.simplefilter('ignore',utils.NagAlgorithmicWarning)
sigmaCall = specfun.opt_imp_vol('C',pCall,data.Strike, s0,t,r,mode = 1).sigma
impvolcall = pandas.Series(sigmaCall,index=data.index, name='impvolCall')
data = data.join(impvolcall)
print('Calculating Implied Vol of Puts...')
pPut= (data['Bid.1'] + data['Ask.1']) / 2
sigmaPut = specfun.opt_imp_vol('P',pPut,data.Strike, s0,t,r,mode = 1).sigma
impvolput = pandas.Series(sigmaPut,index=data.index, name='impvolPut')
data = data.join(impvolput)
fig = plt.figure(1)
fig.subplots_adjust(hspace=.4, wspace=.3)
# Plot the Volatility Curves
# Encode graph layout: 3 rows, 3 columns, 1 is first graph.
num = 331
max_xticks = 4
for date in dates:
# add each subplot to the figure
plot_year, plot_month = date.split()
plot_date = (int(plot_year) - (current_year % 2000)) * 365 + cumulative_month[plot_month]
plot_call = data[(data.impvolCall > .01) &
(data.Expiration == plot_date) &
(data['Last Sale'] > 0)]
plot_put = data[(data.impvolPut > .01) &
(data.Expiration == plot_date) &
(data['Last Sale.1'] > 0)]
myfig = fig.add_subplot(num)
xloc = plt.MaxNLocator(max_xticks)
myfig.xaxis.set_major_locator(xloc)
myfig.set_title('Expiry: %s 20%s' % (plot_month, plot_year))
myfig.plot(plot_call.Strike, plot_call.impvolCall, 'pr', label='call',markersize=0.5)
myfig.plot(plot_put.Strike, plot_put.impvolPut, 'p', label='put',markersize=0.5)
myfig.legend(loc=1, numpoints=1, prop={'size': 10})
myfig.set_ylim([0,1])
myfig.set_xlabel('Strike Price')
myfig.set_ylabel('Implied Volatility')
num += 1
plt.suptitle('Implied Volatility for %s Current Price: %s Date: %s' %
(company, underlyingprice, qd_date))
print("\nPlotting Volatility Curves/Surface")
# The code below will plot the Volatility Surface
# It uses fit.dim2_cheb_lines to fit with a polynomial and
# fit.dim2_cheb_eval to evaluate at intermediate points
m = np.empty(len(dates), dtype=np.int32)
y = np.empty(len(dates), dtype=np.double)
xmin = np.empty(len(dates), dtype=np.double)
xmax = np.empty(len(dates), dtype=np.double)
data = data.sort_values(by=['Strike']) # Need to sort for NAG Algorithm
k = 3 # this is the degree of polynomial for x-axis (Strike Price)
l = 3 # this is the degree of polynomial for y-axis (Expiration Date)
i = 0
for date in dates:
plot_year, plot_month = date.split()
plot_date = (int(plot_year) - (current_year % 2000)) * 365 + cumulative_month[plot_month]
call_data = data[(data.Expiration == plot_date) &
(data.impvolPut > .01) &
(data.impvolPut < 1) &
(data['Last Sale.1'] > 0)]
exp_sizes = call_data.Expiration.size
if exp_sizes > 0:
m[i] = exp_sizes
if i == 0:
x = np.array(call_data.Strike)
call = np.array(call_data.impvolPut)
xmin[0] = x.min()
xmax[0] = x.max()
else:
x2 = np.array(call_data.Strike)
x = np.append(x,x2)
call2 = np.array(call_data.impvolPut)
call = np.append(call,call2)
xmin[i] = x2.min()
xmax[i] = x2.max()
y[i] = plot_date-today
i+=1
nux = np.zeros(1,dtype=np.double)
nuy = np.zeros(1,dtype=np.double)
if len(dates) != i:
print(
"Error with data: the CBOE may not be open for trading "
"or one expiration date has null data"
)
return 0
weight = np.ones(call.size, dtype=np.double)
#Call the NAG Chebyshev fitting function
output_coef = fit.dim2_cheb_lines(m,k,l,x,y,call,weight,(k + 1) * (l + 1),xmin,xmax,nux,nuy)
# Now that we have fit the function,
# we use fit.dim2_cheb_eval to evaluate at different strikes/expirations
nStrikes = 100 # number of Strikes to evaluate
spacing = 20 # number of Expirations to evaluate
for i in range(spacing):
mfirst = 1
xmin = data.Strike.min()
xmax = data.Strike.max()
x = np.linspace(xmin, xmax, nStrikes)
ymin = data.Expiration.min() - today
ymax = data.Expiration.max() - today
y = (ymin) + i * np.floor((ymax - ymin) / spacing)
fx=np.empty(nStrikes)
fx=fit.dim2_cheb_eval(mfirst,k,l,x,xmin,xmax,y,ymin,ymax,output_coef)
if 'xaxis' in locals():
xaxis = np.append(xaxis, x)
temp = np.empty(len(x))
temp.fill(y)
yaxis = np.append(yaxis, temp)
for j in range(len(x)):
zaxis.append(fx[j])
else:
xaxis = x
yaxis = np.empty(len(x), dtype=np.double)
yaxis.fill(y)
zaxis = []
for j in range(len(x)):
zaxis.append(fx[j])
fig = plt.figure(2)
ax = fig.add_subplot(111, projection='3d')
# A try-except block for Matplotlib
try:
ax.plot_trisurf(xaxis, yaxis, zaxis, cmap=cm.jet)
except AttributeError:
print ("Your version of Matplotlib does not support plot_trisurf")
print ("...plotting wireframe instead")
ax.plot(xaxis, yaxis, zaxis)
ax.set_xlabel('Strike Price')
ax.set_ylabel('Days to Expiration')
ax.set_zlabel('Implied Volatility for Put Options')
plt.suptitle('Implied Volatility Surface for %s Current Price: %s Date: %s' %
(company, underlyingprice, qd_date))
plt.show()
if __name__ == "__main__":
main()
| cthadeufaria/passport | investing/impliedVolatility.py | impliedVolatility.py | py | 9,398 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sys.exit",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "sys.stderr.write",
"line_num... |
2778228066 | import types
from imp import reload
def print_status(module):
print(f'reloading {module.__name__}')
def try_reload(module):
try:
reload(module)
except Exception as e:
print(f'FAILED {e.__repr__()} : {module}')
def transitive_reload(module, visited):
if not module in visited:
print_status(module)
try_reload(module)
visited[module] = True
for attrobj in module.__dict__.values():
if type(attrobj) == types.ModuleType:
transitive_reload(attrobj, visited)
def reload_all(*args):
visited = {}
for arg in args:
if type(arg) == types.ModuleType:
transitive_reload(arg, visited)
if __name__ == '__main__':
def tester(reloader, modname):
import importlib, sys
if len(sys.argv) > 1:
modname = sys.argv[1]
module = importlib.import_module(modname)
reloader(module)
tester(reload_all, 'reloadall')
| Quessou/quessoutils | qssmodules/reloadall.py | reloadall.py | py | 967 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "imp.reload",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "types.ModuleType",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "types.ModuleType",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
... |
35717342742 | import torch
import torch.nn as nn
from utils.resnet_infomin import model_dict
import torch.nn.functional as F
from collections import OrderedDict
class RGBSingleHead(nn.Module):
"""RGB model with a single linear/mlp projection head"""
def __init__(self, name='resnet50', head='linear', feat_dim=128):
super(RGBSingleHead, self).__init__()
name, width = self._parse_width(name)
dim_in = int(2048 * width)
self.width = width
self.encoder = model_dict[name](width=width)
if head == 'linear':
self.head = nn.Sequential(
nn.Linear(dim_in, feat_dim),
Normalize(2)
)
elif head == 'mlp':
self.head = nn.Sequential(
nn.Linear(dim_in, dim_in),
nn.ReLU(inplace=True),
nn.Linear(dim_in, feat_dim),
Normalize(2)
)
else:
raise NotImplementedError(
'head not supported: {}'.format(head))
@staticmethod
def _parse_width(name):
if name.endswith('x4'):
return name[:-2], 4
elif name.endswith('x2'):
return name[:-2], 2
else:
return name, 1
def forward(self, x, mode=0):
# mode --
# 0: normal encoder,
# 1: momentum encoder,
# 2: testing mode
feat = self.encoder(x)
if mode == 0 or mode == 1:
feat = self.head(feat)
return feat
class RGBMultiHeads(RGBSingleHead):
"""RGB model with Multiple linear/mlp projection heads"""
def __init__(self, name='resnet50', head='linear', feat_dim=128):
super(RGBMultiHeads, self).__init__(name, head, feat_dim)
self.head_jig = JigsawHead(dim_in=int(2048*self.width),
dim_out=feat_dim,
head=head)
def forward(self, x, x_jig=None, mode=0):
# mode --
# 0: normal encoder,
# 1: momentum encoder,
# 2: testing mode
if mode == 0:
feat = self.head(self.encoder(x))
feat_jig = self.head_jig(self.encoder(x_jig))
return feat, feat_jig
elif mode == 1:
feat = self.head(self.encoder(x))
return feat
else:
feat = self.encoder(x)
return feat
class CMCSingleHead(nn.Module):
"""CMC model with a single linear/mlp projection head"""
def __init__(self, name='resnet50', head='linear', feat_dim=128):
super(CMCSingleHead, self).__init__()
name, width = self._parse_width(name)
dim_in = int(2048 * width)
self.width = width
self.encoder1 = model_dict[name](width=width, in_channel=1)
self.encoder2 = model_dict[name](width=width, in_channel=2)
if head == 'linear':
self.head1 = nn.Sequential(
nn.Linear(dim_in, feat_dim),
Normalize(2)
)
self.head2 = nn.Sequential(
nn.Linear(dim_in, feat_dim),
Normalize(2)
)
elif head == 'mlp':
self.head1 = nn.Sequential(
nn.Linear(dim_in, dim_in),
nn.ReLU(inplace=True),
nn.Linear(dim_in, feat_dim),
Normalize(2)
)
self.head2 = nn.Sequential(
nn.Linear(dim_in, dim_in),
nn.ReLU(inplace=True),
nn.Linear(dim_in, feat_dim),
Normalize(2)
)
else:
raise NotImplementedError(
'head not supported: {}'.format(head))
@staticmethod
def _parse_width(name):
if name.endswith('x4'):
return name[:-2], 2
elif name.endswith('x2'):
return name[:-2], 1
else:
return name, 0.5
def forward(self, x, mode=0):
# mode --
# 0: normal encoder,
# 1: momentum encoder,
# 2: testing mode
x1, x2 = torch.split(x, [1, 2], dim=1)
feat1 = self.encoder1(x1)
feat2 = self.encoder2(x2)
if mode == 0 or mode == 1:
feat1 = self.head1(feat1)
feat2 = self.head2(feat2)
return torch.cat((feat1, feat2), dim=1)
class CMCMultiHeads(CMCSingleHead):
"""CMC model with Multiple linear/mlp projection heads"""
def __init__(self, name='resnet50', head='linear', feat_dim=128):
super(CMCMultiHeads, self).__init__(name, head, feat_dim)
self.head1_jig = JigsawHead(dim_in=int(2048*self.width),
dim_out=feat_dim,
head=head)
self.head2_jig = JigsawHead(dim_in=int(2048*self.width),
dim_out=feat_dim,
head=head)
def forward(self, x, x_jig=None, mode=0):
# mode --
# 0: normal encoder,
# 1: momentum encoder,
# 2: testing mode
x1, x2 = torch.split(x, [1, 2], dim=1)
feat1 = self.encoder1(x1)
feat2 = self.encoder2(x2)
if mode == 0:
x1_jig, x2_jig = torch.split(x_jig, [1, 2], dim=1)
feat1_jig = self.encoder1(x1_jig)
feat2_jig = self.encoder2(x2_jig)
feat1, feat2 = self.head1(feat1), self.head2(feat2)
feat1_jig = self.head1_jig(feat1_jig)
feat2_jig = self.head2_jig(feat2_jig)
feat = torch.cat((feat1, feat2), dim=1)
feat_jig = torch.cat((feat1_jig, feat2_jig), dim=1)
return feat, feat_jig
elif mode == 1:
feat1, feat2 = self.head1(feat1), self.head2(feat2)
return torch.cat((feat1, feat2), dim=1)
else:
return torch.cat((feat1, feat2), dim=1)
class Normalize(nn.Module):
def __init__(self, p=2):
super(Normalize, self).__init__()
self.p = p
def forward(self, x):
return F.normalize(x, p=self.p, dim=1)
class JigsawHead(nn.Module):
"""Jigswa + linear + l2norm"""
def __init__(self, dim_in, dim_out, k=9, head='linear'):
super(JigsawHead, self).__init__()
if head == 'linear':
self.fc1 = nn.Linear(dim_in, dim_out)
elif head == 'mlp':
self.fc1 = nn.Sequential(
nn.Linear(dim_in, dim_in),
nn.ReLU(inplace=True),
nn.Linear(dim_in, dim_out),
)
else:
raise NotImplementedError('JigSaw head not supported: {}'.format(head))
self.fc2 = nn.Linear(dim_out * k, dim_out)
self.l2norm = Normalize(2)
self.k = k
def forward(self, x):
bsz = x.shape[0]
x = self.fc1(x)
# ==== shuffle ====
# this step can be moved to data processing step
shuffle_ids = self.get_shuffle_ids(bsz)
x = x[shuffle_ids]
# ==== shuffle ====
n_img = int(bsz / self.k)
x = x.view(n_img, -1)
x = self.fc2(x)
x = self.l2norm(x)
return x
def get_shuffle_ids(self, bsz):
n_img = int(bsz / self.k)
rnd_ids = [torch.randperm(self.k) for i in range(n_img)]
rnd_ids = torch.cat(rnd_ids, dim=0)
base_ids = torch.arange(bsz)
base_ids = torch.div(base_ids, self.k).long()
base_ids = base_ids * self.k
shuffle_ids = rnd_ids + base_ids
return shuffle_ids
#default settings taken from https://github.com/HobbitLong/PyContrast/tree/master/pycontrast
OPT = {'method': 'InfoMin',
'modal': 'RGB',
'jigsaw': True,
'mem': 'moco',
'arch': 'resnet50',
'feat_dim': 128,
'head': 'mlp',
'ckpt': '/experimentos/pesos/infomin/InfoMin_800.pth', #custom path
'aug_linear': 'NULL',
'n_class': 1000,
'aug': 'D'}
NAME_TO_FUNC = {
'RGBSin': RGBSingleHead,
'RGBMul': RGBMultiHeads,
'CMCSin': CMCSingleHead,
'CMCMul': CMCMultiHeads,
}
def load_encoder_weights(model):
"""load pre-trained weights for encoder
Args:
model: pretrained encoder, should be frozen
"""
msg = "Empty Message"
if OPT['ckpt']:
ckpt = torch.load(OPT['ckpt'], map_location='cpu')
state_dict = ckpt['model']
if OPT['modal'] == 'RGB':
# Unimodal (RGB) case
encoder_state_dict = OrderedDict()
for k, v in state_dict.items():
k = k.replace('module.', '')
if 'encoder' in k:
k = k.replace('encoder.', '')
encoder_state_dict[k] = v
msg = model.encoder.load_state_dict(encoder_state_dict)
else:
# Multimodal (CMC) case
encoder1_state_dict = OrderedDict()
encoder2_state_dict = OrderedDict()
for k, v in state_dict.items():
k = k.replace('module.', '')
if 'encoder1' in k:
k = k.replace('encoder1.', '')
encoder1_state_dict[k] = v
if 'encoder2' in k:
k = k.replace('encoder2.', '')
encoder2_state_dict[k] = v
msg = model.encoder1.load_state_dict(encoder1_state_dict)
msg += " " + model.encoder2.load_state_dict(encoder2_state_dict)
print('Pre-trained weights loaded!', msg)
else:
print('==============================')
print('warning: no pre-trained model!')
print('==============================')
msg = "warning: no pre-trained model!"
return model, msg
def build_model():
# specify modal key
branch = 'Mul' if OPT['jigsaw'] else 'Sin'
model_key = OPT['modal'] + branch
model = NAME_TO_FUNC[model_key](OPT['arch'], OPT['head'], OPT['feat_dim'])
if OPT['mem'] == 'moco':
model_ema = NAME_TO_FUNC[model_key](OPT['arch'], OPT['head'], OPT['feat_dim'])
else:
model_ema = None
return model, model_ema
if __name__ == '__main__':
model, _ = build_model()
model, msg = load_encoder_weights(model)
print(msg)
| VirtualSpaceman/ssl-skin-lesions | utils/build_backbone_infomin.py | build_backbone_infomin.py | py | 10,323 | python | en | code | 7 | github-code | 6 | [
{
"api_name": "torch.nn.Module",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "utils.resnet_infomin.model_dict",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "torch.nn.... |
8267132836 | import logging
import os
import pytest
import yaml
from cekit.config import Config
from cekit.descriptor import Image, Overrides
from cekit.descriptor.resource import create_resource
from cekit.errors import CekitError
try:
from unittest.mock import call
except ImportError:
from mock import call
config = Config()
def setup_function(function):
config.cfg["common"] = {"work_dir": "/tmp"}
if os.path.exists("file"):
os.remove("file")
def test_repository_dir_is_constructed_properly(mocker):
mocker.patch("subprocess.run")
mocker.patch("os.path.isdir", ret="True")
mocker.patch("cekit.descriptor.resource.Chdir", autospec=True)
res = create_resource(
{"git": {"url": "http://host.com/url/repo.git", "ref": "ref"}}
)
assert res.copy("dir") == "dir/repo"
def test_repository_dir_uses_name_if_defined(mocker):
mocker.patch("subprocess.run")
mocker.patch("os.path.isdir", ret="True")
mocker.patch("cekit.descriptor.resource.Chdir", autospec=True)
res = create_resource(
{
"name": "some-id",
"git": {"url": "http://host.com/url/repo.git", "ref": "ref"},
}
)
assert res.copy("dir") == "dir/some-id"
def test_repository_dir_uses_target_if_defined(mocker):
mocker.patch("subprocess.run")
mocker.patch("os.path.isdir", ret="True")
mocker.patch("cekit.descriptor.resource.Chdir", autospec=True)
res = create_resource(
{
"target": "some-name",
"git": {"url": "http://host.com/url/repo.git", "ref": "ref"},
}
)
assert res.copy("dir") == "dir/some-name"
def test_git_clone(mocker):
mock = mocker.patch("subprocess.run")
mocker.patch("os.path.isdir", ret="True")
mocker.patch("cekit.descriptor.resource.Chdir", autospec=True)
res = create_resource(
{"git": {"url": "http://host.com/url/path.git", "ref": "ref"}}
)
res.copy("dir")
mock.assert_has_calls(
[
call(
["git", "clone", "http://host.com/url/path.git", "dir/path"],
stdout=None,
stderr=None,
check=True,
universal_newlines=True,
),
call(
["git", "checkout", "ref"],
stdout=None,
stderr=None,
check=True,
universal_newlines=True,
),
],
any_order=True,
)
def get_res(mocker):
res = mocker.Mock()
res.status_code = 200
res.iter_content = lambda chunk_size: [b"test"]
return res
def get_ctx(mocker):
ctx = mocker.Mock()
ctx.check_hostname = True
ctx.verify_mode = 1
return ctx
def get_mock_urlopen(mocker):
return mocker.patch("cekit.tools.urlopen", return_value=get_res(mocker))
def get_mock_ssl(mocker, ctx):
return mocker.patch("cekit.tools.ssl.create_default_context", return_value=ctx)
def test_fetching_with_ssl_verify(mocker):
config.cfg["common"]["ssl_verify"] = True
ctx = get_ctx(mocker)
get_mock_ssl(mocker, ctx)
mock_urlopen = get_mock_urlopen(mocker)
res = create_resource({"name": "file", "url": "https:///dummy"})
try:
res.copy()
except Exception:
pass
mock_urlopen.assert_called_with("https:///dummy", context=ctx)
assert ctx.check_hostname is True
assert ctx.verify_mode == 1
def test_fetching_disable_ssl_verify(mocker):
config.cfg["common"]["ssl_verify"] = False
mock_urlopen = get_mock_urlopen(mocker)
ctx = get_ctx(mocker)
get_mock_ssl(mocker, ctx)
res = create_resource({"name": "file", "url": "https:///dummy"})
try:
res.copy()
except Exception:
pass
mock_urlopen.assert_called_with("https:///dummy", context=ctx)
assert ctx.check_hostname is False
assert ctx.verify_mode == 0
def test_fetching_bad_status_code():
res = create_resource({"name": "file", "url": "http:///dummy"})
with pytest.raises(CekitError):
res.copy()
def test_fetching_file_exists_but_used_as_is(mocker):
"""
It should not download the file, because we didn't
specify any hash algorithm, so integrity checking is
implicitly disabled here.
"""
with open("file", "w") as f: # noqa: F841
pass
mock_urlopen = get_mock_urlopen(mocker)
res = create_resource(
{
"name": "file",
"url": "http:///dummy",
"md5": "d41d8cd98f00b204e9800998ecf8427e",
}
)
res.copy()
mock_urlopen.assert_not_called()
def test_fetching_file_exists_fetched_again(mocker):
"""
It should download the file again, because available
file locally doesn't match checksum.
"""
mock_urlopen = get_mock_urlopen(mocker)
ctx = get_ctx(mocker)
get_mock_ssl(mocker, ctx)
with open("file", "w") as f: # noqa: F841
pass
res = create_resource({"name": "file", "url": "http:///dummy", "md5": "123456"})
with pytest.raises(CekitError):
# Checksum will fail, because the "downloaded" file
# will not have md5 equal to 123456. We need investigate
# mocking of requests get calls to do it properly
res.copy()
mock_urlopen.assert_called_with("http:///dummy", context=ctx)
def test_fetching_file_exists_no_hash_fetched_again(mocker):
"""
It should download the file again, because available
file locally doesn't match checksum.
"""
mock_urlopen = get_mock_urlopen(mocker)
ctx = get_ctx(mocker)
get_mock_ssl(mocker, ctx)
with open("file", "w") as f: # noqa: F841
pass
res = create_resource({"name": "file", "url": "http:///dummy"})
with pytest.raises(CekitError):
# url is not valid so we get error, but we are not interested
# in it. We just need to check that we attempted to downlad.
res.copy()
mock_urlopen.assert_called_with("http:///dummy", context=ctx)
def test_generated_url_without_cacher():
res = create_resource({"url": "url"})
assert res._Resource__substitute_cache_url("url") == "url"
def test_resource_verify(mocker):
mock = mocker.patch("cekit.descriptor.resource.check_sum")
res = create_resource({"url": "dummy", "sha256": "justamocksum"})
res._Resource__verify("dummy")
mock.assert_called_with("dummy", "sha256", "justamocksum")
def test_generated_url_with_cacher():
config.cfg["common"]["cache_url"] = "#filename#,#algorithm#,#hash#"
res = create_resource({"url": "dummy", "sha256": "justamocksum"})
res.name = "file"
assert res._Resource__substitute_cache_url("file") == "file,sha256,justamocksum"
def test_path_resource_absolute():
res = create_resource({"name": "foo", "path": "/bar"}, directory="/foo")
assert res.path == "/bar"
def test_path_resource_relative():
res = create_resource({"name": "foo", "path": "bar"}, directory="/foo")
assert res.path == "/foo/bar"
def test_path_local_existing_resource_no_cacher_use(mocker):
config.cfg["common"]["cache_url"] = "#filename#,#algorithm#,#hash#"
mocker.patch("os.path.exists", return_value=True)
shutil_mock = mocker.patch("shutil.copy2")
res = create_resource({"name": "foo", "path": "bar"}, directory="/foo")
mocker.spy(res, "_download_file")
res.guarded_copy("target")
shutil_mock.assert_called_with("/foo/bar", "target")
assert res._download_file.call_count == 0
def test_path_local_non_existing_resource_with_cacher_use(mocker):
config.cfg["common"]["cache_url"] = "#filename#,#algorithm#,#hash#"
mocker.patch("os.path.exists", return_value=False)
mocker.patch("os.makedirs")
res = create_resource({"name": "foo", "path": "bar"}, directory="/foo")
mocker.spy(res, "_download_file")
download_file_mock = mocker.patch.object(res, "_download_file")
res.guarded_copy("target")
download_file_mock.assert_called_with("/foo/bar", "target")
def test_url_resource_download_cleanup_after_failure(mocker, tmpdir, caplog):
caplog.set_level(logging.DEBUG, logger="cekit")
mocker.patch("os.path.exists", return_value=False)
mocker.patch("os.makedirs")
os_remove_mock = mocker.patch("os.remove")
urlopen_class_mock = mocker.patch("cekit.tools.urlopen")
urlopen_mock = urlopen_class_mock.return_value
urlopen_mock.getcode.return_value = 200
urlopen_mock.read.side_effect = Exception
res = create_resource({"url": "http://server.org/dummy", "sha256": "justamocksum"})
targetfile = os.path.join(str(tmpdir), "targetfile")
with pytest.raises(CekitError) as excinfo:
res.guarded_copy(targetfile)
assert "Error copying resource: 'dummy'. See logs for more info" in str(
excinfo.value
)
assert (
"Removing incompletely downloaded '{}' file".format(targetfile) in caplog.text
)
urlopen_class_mock.assert_called_with("http://server.org/dummy", context=mocker.ANY)
os_remove_mock.assert_called_with(targetfile)
def test_copy_plain_resource_with_cacher(mocker, tmpdir):
config.cfg["common"]["cache_url"] = "#filename#,#algorithm#,#hash#"
config.cfg["common"]["work_dir"] = str(tmpdir)
urlopen_class_mock = mocker.patch("cekit.tools.urlopen")
mock_urlopen = urlopen_class_mock.return_value
mock_urlopen.getcode.return_value = 200
mock_urlopen.read.side_effect = [b"one", b"two", None]
ctx = get_ctx(mocker)
get_mock_ssl(mocker, ctx)
with open("file", "w") as f: # noqa: F841
pass
res = create_resource({"name": "foo", "md5": "5b9164ad6f496d9dee12ec7634ce253f"})
substitute_cache_url_mock = mocker.patch.object(
res, "_Resource__substitute_cache_url", return_value="http://cache/abc"
)
res.copy(str(tmpdir))
substitute_cache_url_mock.assert_called_once_with(None)
urlopen_class_mock.assert_called_with("http://cache/abc", context=ctx)
def test_copy_plain_resource_from_brew(mocker, tmpdir):
config.cfg["common"]["work_dir"] = str(tmpdir)
config.cfg["common"]["redhat"] = True
urlopen_class_mock = mocker.patch("cekit.tools.urlopen")
mock_urlopen = urlopen_class_mock.return_value
mock_urlopen.getcode.return_value = 200
mock_urlopen.read.side_effect = [b"one", b"two", None]
ctx = get_ctx(mocker)
get_mock_ssl(mocker, ctx)
with open("file", "w") as f: # noqa: F841
pass
res = create_resource({"name": "foo", "md5": "5b9164ad6f496d9dee12ec7634ce253f"})
mocker.spy(res, "_Resource__substitute_cache_url")
mock_get_brew_url = mocker.patch(
"cekit.descriptor.resource.get_brew_url", return_value="http://cache/abc"
)
res.copy(str(tmpdir))
mock_get_brew_url.assert_called_once_with("5b9164ad6f496d9dee12ec7634ce253f")
assert res._Resource__substitute_cache_url.call_count == 0
urlopen_class_mock.assert_called_with("http://cache/abc", context=ctx)
def test_override_resource_remove_chksum():
image = Image(
yaml.safe_load(
"""
from: foo
name: test/foo
version: 1.9
artifacts:
- name: abs
path: /tmp/abs
md5: 'foo'
sha1: 'foo'
sha256: 'foo'
sha512: 'foo'
"""
),
"foo",
)
overrides = Overrides(
yaml.safe_load(
"""
artifacts:
- name: abs
path: /tmp/over
"""
),
"foo",
)
overrides.merge(image)
assert overrides["from"] == "foo"
assert overrides["artifacts"][0]["path"] == "/tmp/over"
assert "md5" not in overrides["artifacts"][0]
assert "sha1" not in overrides["artifacts"][0]
assert "sha256" not in overrides["artifacts"][0]
assert "sha512" not in overrides["artifacts"][0]
| cekit/cekit | tests/test_unit_resource.py | test_unit_resource.py | py | 11,760 | python | en | code | 70 | github-code | 6 | [
{
"api_name": "cekit.config.Config",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "os.remove",
"line... |
14159066621 | import tkinter as tk
from tkinter import ttk
import pyautogui
import pygetwindow
# The app was developed by Tom Girshovksi.
class CenterWindowGUI:
def __init__(self, master):
self.master = master
master.title("Center Window")
# Create the frame
self.frame = ttk.Frame(master, padding=20)
self.frame.pack()
# Configure columns to have equal weight
self.frame.columnconfigure(0, weight=1)
self.frame.columnconfigure(1, weight=1)
self.frame.columnconfigure(2, weight=1)
# Create the label
self.label = ttk.Label(self.frame, text="Choose a window to center:")
self.label.grid(row=0, column=0, columnspan=3, pady=10)
# Create the listbox to display the windows
self.listbox = tk.Listbox(self.frame, width=50, height=10, selectmode=tk.SINGLE)
self.listbox.grid(row=1, column=0, columnspan=3, padx=10, pady=10)
self.update_windows()
# Center Button
self.center_button = ttk.Button(self.frame, text="Center Window", command=self.center_window)
self.center_button.grid(row=2, column=0, pady=10)
# Scale Function Button
self.scale_button = ttk.Button(self.frame, text="Scale Window", command=self.scale_window)
self.scale_button.grid(row=2, column=1, pady=10)
# Update List Button
self.update_button = ttk.Button(self.frame, text="Update List", command=self.update_windows)
self.update_button.grid(row=2, column=2, pady=10)
def center_window(self):
# Get the index of the selected item in the list box
index = self.listbox.curselection()[0]
# Get the selected window
window = self.windows[index]
# Get the size of the screen
screen_width, screen_height = pyautogui.size()
# Get the size of the window
window_width, window_height = window.size
# Calculate the new position to center the window
new_left = (screen_width - window_width) // 2
new_top = (screen_height - window_height) // 2
# Move the window to the new position
window.moveTo(new_left, new_top)
def update_windows(self):
# Clear the list box
self.listbox.delete(0, tk.END)
# Get a list of all windows that are currently open
self.windows = pyautogui.getAllWindows()
# Add the window titles to the list box
for window in self.windows:
self.listbox.insert(tk.END, window.title)
def scale_window(self):
# Get the index of the selected item in the list box
index = self.listbox.curselection()[0]
# Get the selected window
window = self.windows[index]
# Get the size of the screen
screen_width, screen_height = pyautogui.size()
# Get the size of the window
window_width, window_height = window.size
if window_width == screen_width and window_height == screen_height:
# If the window is already full screen, center it instead
self.center_window()
else:
# Resize the window to full screen
window.resizeTo(screen_width // 2 + 500, screen_height // 2 +300)
# Create the root window
root = tk.Tk()
root.resizable(False, False)
# Set the style of the GUI
style = ttk.Style(root)
gui = CenterWindowGUI(root)
root.mainloop()
| R1veltm/WindowCenterizer | main.py | main.py | py | 3,398 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "tkinter.ttk.Frame",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "tkinter.ttk",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "tkinter.ttk.Label",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "tkinter.ttk",
"li... |
28924320598 | import os
from flask import Flask, request, abort, jsonify
from flask_sqlalchemy import SQLAlchemy
from flask_cors import CORS
import random
from sqlalchemy import func
from models import setup_db, Question, Category
QUESTIONS_PER_PAGE = 10
# Create APP and settings cors headers
def create_app(test_config=None):
app = Flask(__name__)
setup_db(app)
cors = CORS(app, resources={r"/api/*": {"origins": "*"}})
@app.after_request
def after_request(response):
response.headers.add('Access-Control-Allow-Headers',
'Content-Type,Authorization,true')
response.headers.add('Access-Control-Allow-Methods',
'GET,PATCH,POST,DELETE,OPTIONS')
return response
# Paginate method
def paginate_questions(request, questions):
page = request.args.get('page', 1, type=int)
start = (page - 1) * QUESTIONS_PER_PAGE
end = start + QUESTIONS_PER_PAGE
questions = [question.format() for question in questions]
paginated_questions = questions[start:end]
return paginated_questions
# Questions API with pagination
@app.route('/api/questions', methods=['GET'])
def get_questions_with_pagination():
error_code = 422
try:
categories = Category.query.all()
questions = Question.query.all()
formatted_questions = paginate_questions(request, questions)
formatted_categories = [category.format()
for category in categories]
if len(formatted_categories) == 0 or len(formatted_questions) == 0:
error_code = 404
abort(error_code)
current_categories = []
for question in formatted_questions:
category = question['category']
if not (category in current_categories):
current_categories.append(category)
return jsonify({
'success': True,
'questions': formatted_questions,
'total_questions': len(questions),
'current_category': current_categories,
'categories': formatted_categories
})
except:
abort(error_code)
# Categories API
@app.route('/api/categories', methods=['GET'])
def get_categories():
try:
categories = Category.query.all()
formatted_categories = [category.format()
for category in categories]
if len(formatted_categories) == 0:
abort(404)
return jsonify({
'success': True,
'categories': formatted_categories,
'total_categories': len(formatted_categories)
})
except:
abort(422)
# Delete Question API
@app.route('/api/questions/<int:question_id>', methods=['DELETE'])
def delete_question(question_id):
question = Question.query.filter_by(id=question_id).first()
if question is None:
abort(404)
try:
question.delete()
return jsonify({
'success': True,
'question': question_id
})
except:
abort(405)
# Create Question API
@app.route('/api/questions/create', methods=['POST'])
def new_question():
try:
body = request.get_json()
new_question = body.get('question', None)
new_answer = body.get('answer', None)
new_category = body.get('category', None)
new_difficulty = body.get('difficulty', None)
question = Question(
question=new_question,
answer=new_answer,
category=new_category,
difficulty=new_difficulty)
question.insert()
return jsonify({
'success': True,
'created': question.id
})
except:
abort(422)
# Get Questions by Category API
@app.route(
'/api/category/<int:question_category>/questions',
methods=['GET']
)
def get_questions_by_categories(question_category):
error_code = 422
try:
questions = Question.query.filter(
question_category == Question.category).all()
formatted_questions = paginate_questions(request, questions)
if len(formatted_questions) == 0:
error_code = 404
abort(error_code)
current_categories = []
for question in formatted_questions:
category = question['category']
if not (category in current_categories):
current_categories.append(category)
return jsonify({
'success': True,
'questions': formatted_questions,
'total_questions': len(formatted_questions),
'current_categories': current_categories,
})
except:
abort(error_code)
# Get Question by Search Term API
@app.route('/api/questions/search', methods=['POST'])
def search_questions():
body = request.get_json()
search_term = body.get('searchTerm', None)
search = "%{}%".format(search_term.replace(" ", "\ "))
data = Question.query.filter(Question.question.ilike(search)).all()
formatted_questions = [question.format() for question in data]
if len(formatted_questions) == 0:
abort(404)
try:
current_categories = []
for question in formatted_questions:
category = question['category']
if not (category in current_categories):
current_categories.append(category)
return jsonify({
'success': True,
'questions': formatted_questions,
'totalQuestions': len(formatted_questions),
'current_categories': current_categories,
'search': search_term
})
except:
abort(422)
# Get Question to Play Quiz API
@app.route('/api/quizzes', methods=['POST'])
def post_quiz_questions():
code = 422
try:
request_quiz = request.get_json()
previous_questions = request_quiz.get('previous_questions')
quiz_category = request_quiz.get('quiz_category')
question = Question.query
question = question.filter(~Question.id.in_(previous_questions))
if quiz_category != 0:
question = question.filter(Question.category == quiz_category)
questions_random = question.order_by(func.random()).first()
if not questions_random:
return(jsonify({
'success': True,
'previous_question': len(previous_questions)
}))
return jsonify({
'success': True,
'question': questions_random.format(),
'previous_question': previous_questions
})
except:
abort(code)
@app.errorhandler(404)
def not_found(error):
return jsonify({
"success": False,
"error": 404,
"message": "resource not found"
}), 404
@app.errorhandler(422)
def unprocessable(error):
return jsonify({
"success": False,
"error": 422,
"message": "unprocessable"
}), 422
@app.errorhandler(400)
def bad_request(error):
return jsonify({
"success": False,
"error": 400,
"message": "bad request"
}), 400
@app.errorhandler(405)
def method_not_allowed(error):
return jsonify({
"success": False,
"error": 405,
"message": "Method Not Allowed"
}), 405
return app
| steffaru/udacity-trivia-api-project | starter/backend/flaskr/__init__.py | __init__.py | py | 8,097 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "flask.Flask",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "models.setup_db",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "flask_cors.CORS",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "flask.request.args.get",
... |
14471351413 | '''
Given a list accounts, each element accounts[i] is a list of strings, where the first element accounts[i][0] is a name, and the rest of the elements are emails representing emails of the account.
Now, we would like to merge these accounts. Two accounts definitely belong to the same person if there is some email that is common to both accounts. Note that even if two accounts have the same name, they may belong to different people as people could have the same name. A person can have any number of accounts initially, but all of their accounts definitely have the same name.
After merging the accounts, return the accounts in the following format: the first element of each account is the name, and the rest of the elements are emails in sorted order. The accounts themselves can be returned in any order.
Example 1:
Input:
accounts = [["John", "johnsmith@mail.com", "john00@mail.com"], ["John", "johnnybravo@mail.com"], ["John", "johnsmith@mail.com", "john_newyork@mail.com"], ["Mary", "mary@mail.com"]]
Output: [["John", 'john00@mail.com', 'john_newyork@mail.com', 'johnsmith@mail.com'], ["John", "johnnybravo@mail.com"], ["Mary", "mary@mail.com"]]
Explanation:
The first and third John's are the same person as they have the common email "johnsmith@mail.com".
The second John and Mary are different people as none of their email addresses are used by other accounts.
We could return these lists in any order, for example the answer [['Mary', 'mary@mail.com'], ['John', 'johnnybravo@mail.com'],
['John', 'john00@mail.com', 'john_newyork@mail.com', 'johnsmith@mail.com']] would still be accepted.
Note:
The length of accounts will be in the range [1, 1000].
The length of accounts[i] will be in the range [1, 10].
The length of accounts[i][j] will be in the range [1, 30].
'''
from collections import defaultdict
class Solution:
def accountsMerge(self, accounts: List[List[str]]) -> List[List[str]]:
email_to_name = {}
graph = defaultdict(set)
for account in accounts:
name = account[0]
first_email = account[1]
email_to_name[first_email] = name
for email in account[1:]:
graph[first_email].add(email)
graph[email].add(first_email)
email_to_name[email] = name
seen = set()
ans = []
for email in graph:
if email not in seen:
seen.add(email)
stack = [email]
component = []
while stack:
node = stack.pop()
component.append(node)
for nei in graph[node]:
if nei not in seen:
seen.add(nei)
stack.append(nei)
ans.append([email_to_name[email]] + sorted(component))
return ans
| loganyu/leetcode | problems/721_accounts_merge.py | 721_accounts_merge.py | py | 2,893 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "collections.defaultdict",
"line_number": 29,
"usage_type": "call"
}
] |
42891510827 | #PYTHON CAMERA MODEL
import cv2
import numpy as np
i=0
def capturing(event,x,y,flags,param):
global i
if event==cv2.EVENT_LBUTTONUP:
name="photo_"+str(i)+".png"
wname="CAPTURED IMAGE"
cv2.imwrite(name,frame)
h=cv2.imread(name)
cv2.namedWindow(wname)
cv2.imshow(wname,h)
cv2.moveWindow(wname,700,50)
i+=1
cv2.waitKey(1000)
cv2.destroyWindow(wname)
cap=cv2.VideoCapture(0)
while True:
ret,frame = cap.read()
win="CAPTURE"
cv2.imshow("CAMERA",frame)
cv2.moveWindow("CAMERA",50,50)
cv2.namedWindow(win)
img=np.zeros((150,150,3))
cv2.putText(img,"CLICK",(35,65),cv2.FONT_HERSHEY_SIMPLEX,0.85,(255,255,255),2,cv2.LINE_AA)
cv2.putText(img,"HERE",(35,90),cv2.FONT_HERSHEY_SIMPLEX,0.85,(255,255,255),2,cv2.LINE_AA)
cv2.imshow(win,img)
cv2.moveWindow(win,250,560)
cv2.setMouseCallback(win,capturing)
if cv2.waitKey(1)==13:
break
cap.release()
cv2.destroyAllWindows()
| NamrithaGirish/LiveCam | cam.py | cam.py | py | 1,003 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "cv2.EVENT_LBUTTONUP",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "cv2.imwrite",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "cv2.namedWindow",
... |
7911525547 | import nltk
from collections import Counter
nltk.download('vader_lexicon')
from nltk.sentiment import SentimentIntensityAnalyzer
#Зчитуємо файл який дали в завданні
filename = "data.csv"
with open(filename, 'r') as f:
reviews = f.readlines()
# ініціалізуємо SentimentIntensityAnalyzer (бібліотека для визначення настроїв)
sia = SentimentIntensityAnalyzer()
# рахуємо загальний настрій відгуків
compound_scores = [sia.polarity_scores(review)['compound'] for review in reviews]
overall_sentiment = sum(compound_scores) / len(compound_scores)
# класифікуємо відгуки на позитивні, негативні та нейтральні (рахує всі відгуки пропускаючи ті де немає числового значення в колонці "Stars"
positive_reviews = [review for review in reviews if sia.polarity_scores(review)['compound'] > 0]
negative_reviews = [review for review in reviews if sia.polarity_scores(review)['compound'] < 0]
neutral_reviews = [review for review in reviews if sia.polarity_scores(review)['compound'] == 0]
#positive_reviews = [review for review in reviews if review.strip() and int(review.split('Stars \n')[0]) >= 4]
#negative_reviews = [review for review in reviews if review.strip() and int(review.split('Stars \n')[0]) <= 2]
#neutral_reviews = [review for review in reviews if review.strip() and int(review.split('Stars \n')[0]) == 3]
# рахуємо кількість повторюваних слів
word_count = Counter(word for review in reviews for word in review.split())
most_common_words = word_count.most_common(5)
num_positive = len(positive_reviews)
num_negative = len(negative_reviews)
num_neutral = len(neutral_reviews)
with open('report.txt', 'w') as file:
file.write('\n Аналіз відгуків:\n')
file.write(f"Загальний настрій відгуків: ({overall_sentiment}):\n")
file.write(f"Позитивні: ({len(positive_reviews)}):\n")
file.write(f"Негативні: ({len(negative_reviews)}):\n")
file.write(f"Нейтральні: ({len(neutral_reviews)}):\n")
with open('repeating words.txt', 'w') as file:
file.write("\n П'ять найбільш вживаних слів: \n")
for word, count in most_common_words:
file.write(f"{word}: {count}\n")
file.write("Кількість повторюваних слів: \n")
for word, count in word_count.items():
file.write(f"{word}: {count}\n")
# Для перевірки
print("Аналіз настроїв:")
print("Загальний настрій відгуків: {:.2f}".format(overall_sentiment))
print("")
print("Аналіз негативних, позитивних і природних відгуків:")
print("Кількість позитивних відгуків: {}".format(num_positive))
print("Кількість негативних відгуків: {}".format(num_negative))
print("Кількість нейтральних відгуків: {}".format(num_neutral))
print("") | Stepanxan/home_task-2 | app.py | app.py | py | 3,167 | python | uk | code | 0 | github-code | 6 | [
{
"api_name": "nltk.download",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "nltk.sentiment.SentimentIntensityAnalyzer",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "collections.Counter",
"line_number": 33,
"usage_type": "call"
}
] |
36347951264 | import random
import numpy as np
from scipy.optimize import fsolve
# velocity upper bound from Wu et al (https://flow-project.github.io/papers/wu17a.pdf )
# This is an approximation
def v_eq_max_function(v, *args):
"""Return the error between the desired and actual equivalent gap."""
num_vehicles, length = args
# maximum gap in the presence of one rl vehicle
s_eq_max = (length - num_vehicles * 5) / (num_vehicles - 1)
v0 = 30
s0 = 2
tau = 1
gamma = 4
error = s_eq_max - (s0 + v * tau) * (1 - (v / v0) ** gamma) ** -0.5
return error
def get_velocity_upper_bound(num_vehicles, length):
"""Return the velocity upper bound for the given number of vehicles."""
v_guess = 4
return fsolve(v_eq_max_function, np.array(v_guess), args=(num_vehicles, length))[0]
def get_desired_velocity(num_vehicles, length, method_name = None):
"""
Desired velocity is gotten as the uniform flow equillibrium velocity
Only some controllers require this
"""
# some known values are hard coded:
if length == 220:
# reduce to 2.7 for FS
if method_name == "fs":
return 2.7
else:
return 3.0
elif length == 230:
return 3.45
elif length == 260:
# From hit and trial, for
return 4.82 # Value from LORR paper, other sources
elif length == 270:
return 5.2
else:
scaler = 0.93 # 93% of the upper bound may be desired?
print("Scaler: ", scaler)
return get_velocity_upper_bound(num_vehicles, length) * scaler
# Shock
# Define shock models
def get_shock_model(identifier, length = None, network_scaler=1, bidirectional=False, high_speed = False):
# Network scaler 6 used in the bottleneck
# Accel/ Decel value, duration, frequency (in the interval between shock start and shock end)
# Duration: In seconds, for which each shock is applied
# Frequency: In the interval, how many shocks are applied
# if identifier == 1:
# return (-1.4, 2, 10)
if identifier == 2:
# Thiese ranges are obtained form data
# sample frequency
frequency = network_scaler*np.random.randint(5, 20) # value of 10 means once shock every 3000/10 = 300 steps, 5 = 600 steps, 15 = 200 steps
intensity_collect = []
duration_collect = []
if high_speed:
intensity_abs_min = 1.5
intensity_abs_max = 4.0
else:
intensity_abs_min = 1
intensity_abs_max = 3.0
print("Frequency:", frequency)
for i in range(frequency):
if bidirectional:
# between (-abs_max to -abs_min) and (abs_min to abs_max) but not between (-abs_min to abs_min)
intensity = random.uniform(-intensity_abs_max, intensity_abs_max)
while intensity > -intensity_abs_min and intensity < intensity_abs_min:
intensity = random.uniform(-intensity_abs_max, intensity_abs_max)
else:
intensity = random.uniform(-intensity_abs_max, -intensity_abs_min)
print("Intensity:", intensity)
durations = np.linspace(0.1, 2.5, 20) # In seconds
abs_intensity = abs(intensity)
intensity_bucket = np.linspace(intensity_abs_min, intensity_abs_max,len(durations))
loc = np.searchsorted(intensity_bucket, abs_intensity)
left = loc
right = len(durations) - loc
probabilities_left = np.linspace(0.0, 10, left)
# print("Probabilities left:", probabilities_left, probabilities_left.sum())
probabilities_right = np.linspace(10, 0.0, right)
# print("Probabilities right:", probabilities_right, probabilities_right.sum())
probabilities = np.concatenate((probabilities_left, probabilities_right))
probabilities /= probabilities.sum()
#print("Probabilities:", probabilities, probabilities.sum())
duration = round(np.random.choice(durations, 1, p=probabilities)[0], 1)
print("Duration:", duration)
intensity_collect.append(intensity)
duration_collect.append(duration)
# return intensity, durations (second), frequency
return (np.asarray(intensity_collect), np.asarray(duration_collect), frequency)
# Stability test
elif identifier == -1:
# velocity, duration, frequency
# Stability tests have velocity manipulation, so the first param here is speed at the velocity dip
# Duration and frequency are also used
# Just apply once is enough
if length ==220:
vel_set = 2.0
duration = 1
elif length == 270:
vel_set = 3.0
duration = 2
elif length == 260:
vel_set = 3.0
duration = 2
else:
vel_set = 5.0
duration = 2
print("\n\nVelocity set: ", vel_set)
return (vel_set, duration, 1)
#return (2, 10, 10)
else:
raise ValueError("Shock model identifier not recognized")
## Shock utils
def get_time_steps_stability(duration, frequency, shock_start_time, shock_end_time):
# Convert duration to env steps
duration = duration*10
# Based on this frequency, get the time steps at which the shock is applied
start_times = np.linspace(shock_start_time, shock_end_time - duration, frequency, dtype=int)
end_times = np.linspace(shock_start_time + duration, shock_end_time, frequency, dtype=int)
shock_time_steps = np.stack((start_times, end_times), axis=1)
print("Start times: ", start_times)
print("End times: ", end_times)
print("Shock times: \n", shock_time_steps)
# TODO: Perform overlap tests and warn if there is overlap
# if start_times[1] < end_times[0]:
# import sys
# sys.exit()
return shock_time_steps
def get_time_steps(durations, frequency, shock_start_time, shock_end_time):
# Convert duration to env steps
durations = durations*10
print("Durations: ", durations)
# Based on this frequency, get the time steps at which the shock is applied
start_times = np.linspace(shock_start_time, shock_end_time - durations[-1], frequency, dtype=int)
end_times = []
for i in range(frequency):
end_times.append(start_times[i] + durations[i])
shock_time_steps = np.stack((start_times, end_times), axis=1)
print("Start times: ", start_times)
print("End times: ", end_times)
print("Shock times: \n", shock_time_steps)
# TODO: Perform overlap tests and warn if there is overlap
# if start_times[1] < end_times[0]:
# import sys
# sys.exit()
return shock_time_steps
# use
# sm = shock_model(2)
# get_time_steps(durations, frequency, 8000, 10000)
#print(sm[0][1]) | poudel-bibek/Beyond-Simulated-Drivers | flow/density_aware_util.py | density_aware_util.py | py | 7,049 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "scipy.optimize.fsolve",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "numpy.random.randint",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "numpy.random",... |
20182818588 | # File: utils.py
# Name: Sergio Ley Languren
"""Utility for wordle program"""
from WordleDictionary import FIVE_LETTER_WORDS
from WordleGraphics import CORRECT_COLOR, PRESENT_COLOR, MISSING_COLOR, UNKNOWN_COLOR, N_COLS, N_ROWS, WordleGWindow
from random import choice
from typing import Type, Union, Optional
from copy import deepcopy
from tempfile import NamedTemporaryFile
from os import getcwd, unlink
__all__ = [
"choose_word",
"validate_responce",
"ScoreFileParser"
]
# CONSTANT
MINUS_COL = 5
t = None
# Functions
def choose_word() -> str:
"""Chooses the answer from a list of words of five characters"""
a = choice(FIVE_LETTER_WORDS)
return a
# -----------------------------------------------
def _set_key_color_or_not(gw, key_colored, k, c, override_check=False):
if not key_colored or override_check:
gw.set_key_color(k.capitalize(), c)
def _add_color(gw, column, keycolored, character, color, ac, oc: Optional[bool] = None):
gw.set_square_color(gw.get_current_row(), column, color)
if oc:
_set_key_color_or_not(gw, keycolored, character, color, oc)
else:
_set_key_color_or_not(gw, keycolored, character, color)
a_copy = ac.replace(character, "", 1)
return a_copy
def add_tempfile() -> NamedTemporaryFile:
"""creates score file"""
global t
if not t:
t = NamedTemporaryFile("w+", encoding="utf-8", prefix="wordle_", dir=getcwd(), delete=False)
return t
def validate_responce(gw: Type[WordleGWindow], res: str, a: str) -> Union[bool, bool, NamedTemporaryFile]:
"""Validates user response
:param gw: Main Wordle window class
:param res: User responce
:param a: answer to the wordle
Returns:
validity | word-validation | score tempfile
"""
global MINUS_COL
a_copy = deepcopy(a)
correct_counter = 0
temp = add_tempfile()
# checks if word is not in the word list
if res not in FIVE_LETTER_WORDS:
gw.show_message(f"{res} is not a word!!!")
return False, True, temp
for c in a:
col = N_COLS - MINUS_COL
ch = gw.get_square_letter(gw.get_current_row(), col).lower()
key_colored = gw.get_key_color(c.capitalize()) != UNKNOWN_COLOR
if ch == c:
a_copy = _add_color(gw, col, key_colored, ch, CORRECT_COLOR, a_copy, True)
correct_counter += 1
elif ch in a_copy:
a_copy = _add_color(gw, col, key_colored, ch, PRESENT_COLOR, a_copy)
else:
a_copy = _add_color(gw, col, key_colored, ch, MISSING_COLOR, a_copy)
MINUS_COL -= 1
line = f"{gw.get_current_row()}|{correct_counter}\n"
temp.write(line)
temp.flush()
MINUS_COL = 5
if correct_counter == 5:
return True, False, temp
return False, False, temp
class ScoreFileParser:
"""
Parses and adds score to wordle grid based on the scorefile
"""
cleared = False
def __init__(self, gw: Type[WordleGWindow], tmp: Type[NamedTemporaryFile]):
self.gw = gw
self.tmpfile = tmp
def parse(self):
"""Main function to parse the score file"""
self.tmpfile.seek(0)
lines = self.tmpfile.readlines()
if not self.cleared:
self.clear_grid()
self.parse()
for l in lines:
row = l.split("|")[0]
correct_points = l.split("|")[1].replace("\n", "")
self.gw.set_square_letter(int(row), 0, str(int(row) + 1))
self.gw.set_square_letter(int(row), 4, correct_points)
self.gw.set_square_color(int(row), 0, PRESENT_COLOR)
if int(correct_points) == 5:
self.gw.set_square_color(int(row), 4, CORRECT_COLOR)
else:
self.gw.set_square_color(int(row), 4, MISSING_COLOR)
self.gw.show_message("rows points", "limegreen")
def clear_grid(self):
"""Clear wordle grid"""
for i in range(N_ROWS):
self.gw.set_current_row(i)
for j in range(N_COLS):
self.gw.set_square_letter(i, j, "")
self.cleared = True
def close(self):
"""closes the score file"""
self.tmpfile.close()
path = self.tmpfile.name
print(path)
unlink(path) | SLey3/Project-1 | utils.py | utils.py | py | 4,316 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "random.choice",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "WordleDictionary.FIVE_LETTER_WORDS",
"line_number": 28,
"usage_type": "argument"
},
{
"api_name": "typing.Optional",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "... |
35968448866 | from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import ui
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from collections import defaultdict
import time
import datetime
import csv
import unicodedata
import re
import hashlib
import os
from selenium.common.exceptions import ElementNotVisibleException
options = webdriver.ChromeOptions()
options.add_argument("--start-maximized")
driver = webdriver.Chrome(chrome_options=options)
actions = ActionChains(driver)
today =datetime.date.today()
def check_exists_by_xpath(xpath):
try:
while (driver.find_element_by_xpath("%s"%(xpath,))) :
driver.find_element_by_xpath("%s"%(xpath,)).click()
time.sleep(5)
except ElementNotVisibleException:
print ("element not found")
wait = ui.WebDriverWait(driver, 10)
driver.get('http://www.cwtv.com/shows/')
print(driver.current_url)
time.sleep(8)
(driver.page_source).encode('ascii','ignore')
shows_count =driver.find_elements_by_xpath(".//*[@id='cw-main-footer-1']/div[1]/ul/li/a")
print ("Shows count :[%s]"%(len(shows_count)),)
launch_id =[]
service_videos = {}
href =[]
release_year=0
multiples =1
for s in range (len(shows_count)):
href.append(shows_count[s].get_attribute('href'))
print (href)
for h in range (len(href)):
try:
print (h)
driver.get (href[h])
episodes=driver.find_elements_by_xpath(".//*[@id='list_1']/div//li//a")
multiples= len(episodes)/5
print (multiples)
for m in range (multiples) :
for e in range (len(episodes)):
print (len(episodes), e+1, m+1)
if e+1==(5*(m+1)) :
driver.find_element_by_xpath(".//*[contains(@id,'touchcarousel_1')]/button[2]").click()
time.sleep (3)
epi_href =episodes[e].get_attribute('href')
video_id =epi_href.split("=")[-1].encode('ascii', 'ignore')
epi_details =driver.find_element_by_xpath("(.//*[@id='list_1']/div//li//a//div[contains(@class,'videodetails')]/p[1])[%s]"%(e+1)).text.encode('ascii', 'ignore')
epi_title =epi_details.split("Ep.")[0].split("(")[0].strip()
epi_sea_num =epi_details.split("Ep.")[1].split(")")[0]
print (epi_details, epi_title, epi_sea_num)
if (len (epi_sea_num) == 3) :
epi_num=epi_details.split("Ep.")[1].split(")")[0][-2:]
season_num =epi_details.split("Ep.")[1].split(")")[0][0]
elif (len (epi_sea_num) == 4) :
epi_num=epi_details.split("Ep.")[1].split(")")[0][-2:]
season_num =epi_details.split("Ep.")[1].split(")")[0][0:2]
series_title =driver.find_element_by_xpath(".//*[@id='show-logo']/a").get_attribute('title').encode('ascii', 'ignore')
launch_id.append(video_id)
service_videos ["cwtv"] =launch_id
res=[today, "CWTV Shows", series_title, season_num, epi_num, epi_title, service_videos]
print (res)
with open(os.getcwd()+'/'+"cwtv_shows_output"+ '.csv', 'ab+') as mycsvfile:
thedatawriter =csv.writer(mycsvfile)
thedatawriter.writerow(res)
launch_id =[]
service_videos = {}
except Exception as e:
print(e)
continue | surbhikhandelwal/Python-Projects | CWTV/cwtv.py | cwtv.py | py | 3,267 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "selenium.webdriver.ChromeOptions",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 22,
"usage_type": "call"
},
{
"api... |
7998902064 | import os
from bson.json_util import dumps
from dotenv import load_dotenv
# from flask import jsonify
import pymongo
load_dotenv() # use dotenv to hide sensitive credential as environment variables
DATABASE_URL = f'mongodb+srv://{os.environ.get("user")}:{os.environ.get("passwort")}' \
'@flask-mongodb-atlas.wicsm.mongodb.net/' \
'flaura?retryWrites=true&w=majority' # get connection url from environment
client = pymongo.MongoClient(DATABASE_URL) # establish connection with database
# plants.config['MONGO_DBNAME'] = 'restdb'
# plants.config['MONGO_URI'] = 'mongodb://localhost:27017/restdb'
# mongo = PyMongo(plants)
mydb = client.flaura
mycol = mydb.plants
def getPlantsByName(name):
cursor = mycol.find({"name": {"$regex": '.*'+name+'.*', "$options": 'i'}})
list_cur = list(cursor)
plants = dumps(list_cur)
return plants
def getAllPlants():
cursor = mycol.find()
list_cur = list(cursor)
plantList = dumps(list_cur)
return plantList
def setNewPlant(name, waterAmount, critMoist, sleepTime):
newPlant = {"name": name, "waterAmountML": waterAmount, "criticalMoisture": critMoist, "sleepTime": sleepTime}
mycol.insert_one(newPlant)
# function Get List of Plants that contain <name>
# function Get All Plants??
# function Add new Plant to DB
| rosemaxio/flauraBackend | plants/db.py | db.py | py | 1,326 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "dotenv.load_dotenv",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "pymongo.MongoClient",
... |
37182795454 | import os
import re
from typing import Tuple
from transformers import pipeline # type: ignore
MODEL_PATH = os.environ.get("MODEL_PATH", "./distilbert-base-cased-distilled-squad")
class CardSourceGeneratorMock:
def __call__(self, text: str, question: str) -> Tuple[int, int]:
return 0, len(text) // 2
class CardSourceGenerator:
def __init__(self) -> None:
self._qa_model = pipeline(
"question-answering", model=MODEL_PATH, tokenizer=MODEL_PATH
)
def __call__(self, text: str, question: str) -> Tuple[int, int]:
answer = self._qa_model(question=question, context=text) # type: ignore
start, end = self._find_sentence_indices(text, answer["start"], answer["end"])
return start, end
def _find_sentence_indices(
self, text: str, substring_start: int, substring_end: int
) -> Tuple[int, int]:
"""
Finds the starting and ending indices of the sentence that contains the substring.
"""
sentences = re.split(r"\n|(?<=[.!?])\s+", text)
substring = text[substring_start:substring_end]
for sentence in sentences:
index = sentence.lower().find(substring.lower())
if index != -1:
start = text.index(sentence)
end = start + len(sentence)
return start, end
return substring_start, substring_end
| MoShrank/card-generation-service | text/CardSourceGenerator.py | CardSourceGenerator.py | py | 1,408 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.environ.get",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "typing.Tuple",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "transformers.pipeline",
... |
9264192052 | import mne
import numpy as np
import pandas as pd
from mne.beamformer import make_lcmv, apply_lcmv, apply_lcmv_cov
from scipy.stats import pearsonr
import config
from config import fname, lcmv_settings
from time_series import simulate_raw, create_epochs
# Don't be verbose
mne.set_log_level(False)
fn_stc_signal = fname.stc_signal(vertex=config.vertex)
fn_simulated_raw = fname.simulated_raw(vertex=config.vertex)
fn_simulated_epochs = fname.simulated_epochs(vertex=config.vertex)
# fn_report_h5 = fname.report(vertex=config.vertex)
fn_report_h5 = None # Don't produce a report
###############################################################################
# Simulate raw data and create epochs
###############################################################################
print('simulate data')
info = mne.io.read_info(fname.sample_raw)
info = mne.pick_info(info, mne.pick_types(info, meg=True, eeg=False))
fwd_disc_true = mne.read_forward_solution(fname.fwd_discrete_true)
fwd_disc_true = mne.pick_types_forward(fwd_disc_true, meg=True, eeg=False)
er_raw = mne.io.read_raw_fif(fname.ernoise, preload=True)
raw, stc_signal = simulate_raw(info=info, fwd_disc_true=fwd_disc_true, signal_vertex=config.vertex,
signal_freq=config.signal_freq, n_trials=config.n_trials,
noise_multiplier=config.noise, random_state=config.random,
n_noise_dipoles=config.n_noise_dipoles_vol, er_raw=er_raw)
true_ori = fwd_disc_true['src'][0]['nn'][config.vertex]
# del info, fwd_disc_true, er_raw
epochs = create_epochs(raw)
###############################################################################
# Sensor-level analysis
###############################################################################
epochs_grad = epochs.copy().pick_types(meg='grad')
epochs_mag = epochs.copy().pick_types(meg='mag')
epochs_joint = epochs.copy().pick_types(meg=True)
# Make cov matrices
cov = mne.compute_covariance(epochs, tmin=-1, tmax=1, method='empirical')
signal_cov = mne.compute_covariance(epochs, tmin=0, tmax=1, method='empirical')
noise_cov = mne.compute_covariance(epochs, tmin=-1, tmax=0, method='empirical')
# Compute evokeds
evoked_grad = epochs_grad.average()
evoked_mag = epochs_mag.average()
evoked_joint = epochs_joint.average()
###############################################################################
# Compute LCMV beamformer results
###############################################################################
# Read in forward solution
fwd_disc_man = mne.read_forward_solution(fname.fwd_discrete_man)
dists = []
focs = []
corrs = []
ori_errors = []
for setting in lcmv_settings:
reg, sensor_type, pick_ori, inversion, weight_norm, normalize_fwd, use_noise_cov, reduce_rank, project_pca = setting
try:
if sensor_type == 'grad':
evoked = evoked_grad
elif sensor_type == 'mag':
evoked = evoked_mag
elif sensor_type == 'joint':
evoked = evoked_joint
else:
raise ValueError('Invalid sensor type: %s', sensor_type)
if project_pca and pick_ori != 'vector':
raise NotImplementedError('project_pca=True only makes sense when pick_ori="vector"')
filters = make_lcmv(evoked.info, fwd_disc_man,
cov if use_noise_cov else signal_cov,
reg=reg,
pick_ori=pick_ori, weight_norm=weight_norm,
inversion=inversion,
depth=1. if normalize_fwd else None,
noise_cov=noise_cov if use_noise_cov else None,
reduce_rank=reduce_rank)
stc_est = apply_lcmv(evoked, filters).crop(0.001, 1)
if pick_ori == 'vector':
# Combine vector time source
if project_pca:
stc_proj, _ = stc_est.project('pca', fwd_disc_man['src'])
else:
stc_proj = stc_est.magnitude()
stc_est_power = (stc_proj ** 2).sum()
peak_vertex, peak_time = stc_est_power.get_peak(vert_as_index=True, time_as_index=True)
estimated_time_course = np.abs(stc_proj.data[peak_vertex])
else:
stc_est_power = (stc_est ** 2).sum()
peak_vertex, peak_time = stc_est_power.get_peak(vert_as_index=True, time_as_index=True)
estimated_time_course = np.abs(stc_est.data[peak_vertex])
# Compute distance between true and estimated source locations
pos_est = fwd_disc_man['source_rr'][peak_vertex]
pos_true = fwd_disc_man['source_rr'][config.vertex]
dist = np.linalg.norm(pos_est - pos_true)
# Ratio between estimated peak activity and all estimated activity.
focality_score = stc_est_power.data[peak_vertex, 0] / stc_est_power.data.sum()
# Correlation between true and reconstructed timecourse
true_time_course = stc_signal.copy().crop(0, 1).data[0]
corr = pearsonr(np.abs(true_time_course), estimated_time_course)[0]
# Angle between estimated and true source orientation
if pick_ori == 'max-power':
estimated_ori = filters['max_power_ori'][config.vertex]
ori_error = np.rad2deg(np.arccos(estimated_ori @ true_ori))
if ori_error > 90:
ori_error = 180 - ori_error
elif pick_ori == 'vector':
estimated_ori = stc_est.data[peak_vertex, :, peak_time]
estimated_ori /= np.linalg.norm(estimated_ori)
ori_error = np.rad2deg(np.arccos(estimated_ori @ true_ori))
if ori_error > 90:
ori_error = 180 - ori_error
else:
ori_error = np.nan
except Exception as e:
print(e)
dist = np.nan
focality_score = np.nan
corr = np.nan
ori_error = np.nan
print(setting, dist, focality_score, corr, ori_error)
dists.append(dist)
focs.append(focality_score)
corrs.append(corr)
ori_errors.append(ori_error)
###############################################################################
# Save everything to a pandas dataframe
###############################################################################
df = pd.DataFrame(lcmv_settings,
columns=['reg', 'sensor_type', 'pick_ori', 'inversion',
'weight_norm', 'normalize_fwd', 'use_noise_cov',
'reduce_rank', 'project_pca'])
df['dist'] = dists
df['focality'] = focs
df['corr'] = corrs
df['ori_error'] = ori_errors
df.to_csv(fname.lcmv_results(vertex=config.vertex, noise=config.noise))
print('OK!')
| wmvanvliet/beamformer_simulation | lcmv.py | lcmv.py | py | 6,703 | python | en | code | 4 | github-code | 6 | [
{
"api_name": "mne.set_log_level",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "config.fname.stc_signal",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "config.fname",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "config.vertex"... |
6193427862 | """
Main script: Autonomous Driving on Udacity Simulator
@author : nelsoonc
Undergraduate Thesis
Nelson Changgraini - Bandung Institute of Technology, Indonesia
"""
# Throttle 0 - 1 will produce speed 0 - 30 mph
# Steering -1 - 1 will produce angle -25 - 25 degrees
import os
import numpy as np
import socketio
import eventlet
from flask import Flask
import tensorflow as tf
from tensorflow.keras.models import load_model
import base64
from io import BytesIO
from PIL import Image
from train import rmse, get_lr_metric
from utils import preprocess
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# DIRECTORY PATH
MODEL_PATH = 'models/simulation_model.h5'
# VARIABLE
MAX_SPEED = 25
# FOR REAL TIME COMMUNICATION BETWEEN CLIENT AND SERVER
sio = socketio.Server()
# FLASK IS A MICRO WEB FRAMEWORK WRITTEN IN PYTHON
app = Flask(__name__) # '__main__'
# Executing in graph mode
@tf.function
def predict(input_tensor, model):
return model(input_tensor)
@sio.on('telemetry')
def telemetry(sid, data):
speed = float(data['speed'])
image = Image.open(BytesIO(base64.b64decode(data['image'])))
image = np.asarray(image)
image = preprocess(image)
image = np.array([image])
steering = float(predict(image, model))
throttle = 1.0 - abs(steering) - speed / MAX_SPEED
print('{}, {}, {}'.format(steering, throttle, speed))
sendControl(steering, throttle)
@sio.on('connect')
def connect(sid, environ):
print('Connected', sid)
sendControl(0, 0)
@sio.on('disconnect')
def disconnect(sid):
print('Disconnect', sid)
def sendControl(steering, throttle):
sio.emit('steer', data={
'steering_angle': steering.__str__(),
'throttle': throttle.__str__()
}, skip_sid=True)
if __name__ == '__main__':
print('Setting up..')
model = load_model(MODEL_PATH, custom_objects={'rmse': rmse, 'lr': get_lr_metric})
if model:
print('Model loaded')
app = socketio.Middleware(sio, app)
# LISTEN TO PORT 4567
eventlet.wsgi.server(eventlet.listen(('', 4567)), app)
| zhouzheny1/Conditional_Imitation_Learning | simulation/main.py | main.py | py | 2,123 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.environ",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "socketio.Server",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "flask.Flask",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "tensorflow.function",
... |
7965704838 | from pathlib import Path
from promtail_ops_manager import PromtailOpsManager
# The promtail release file.
resource = "./promtail.zip"
manager = PromtailOpsManager()
# manager.install(resource)
# Setup for local tests such that installation of binaries etc.
# will not mess up your local client.
manager.promtail_home = Path('/tmp/promtail')
manager.promtail = Path('/tmp/promtail/promtail-linux-amd64')
manager.promtail_cfg = manager.promtail_home.joinpath('promtail-local-config.yaml')
manager.promtail_unitfile = Path('/tmp/promtail.service')
# Run tests.
manager._prepareOS()
manager._install_from_resource(resource)
manager._install_config()
manager._install_systemd_unitfile()
if manager.verify_config():
print("Config OK")
else:
print("Config is error")
print("Version:", manager.promtail_version() )
# manager._purge()
| erik78se/promtail-vm-operator | tests/testlib.py | testlib.py | py | 839 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "promtail_ops_manager.PromtailOpsManager",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "path... |
29401120526 | import json
import os
from googleapiclient.discovery import build
class Channel:
"""Класс для ютуб-канала"""
def __init__(self, channel_id: str) -> None:
"""Экземпляр инициализируется id канала. Дальше все данные будут подтягиваться по API."""
self.__channel_id = channel_id
api_key: str = os.getenv('API_KEY')
youtube = build('youtube', 'v3', developerKey=api_key)
channel = youtube.channels().list(id=self.__channel_id, part='snippet,statistics').execute()
self.title = channel['items'][0]['snippet']['title']
self.description = channel['items'][0]['snippet']['description']
self.url = 'https://www.youtube.com/channel/' + self.__channel_id
self.subscribers = channel['items'][0]['statistics']['subscriberCount']
self.video_count = channel['items'][0]['statistics']['videoCount']
self.views = channel['items'][0]['statistics']['viewCount']
def __str__(self):
return f"{self.title} ({self.url})"
def __add__(self, other):
""" Метод для операции сложения"""
return int(self.subscribers) + int(other.subscribers)
def __sub__(self, other):
""" Метод для операции вычитания"""
return int(self.subscribers) - int(other.subscribers)
def __lt__(self, other):
""" Метод для операции сравнения «меньше»"""
if int(self.subscribers) < int(other.subscribers):
return True
else:
return False
def __le__(self, other):
""" Метод для операции сравнения «меньше или равно»"""
if int(self.subscribers) <= int(other.subscribers):
return True
else:
return False
def __gt__(self, other):
""" Метод для операции сравнения «больше»"""
if int(self.subscribers) > int(other.subscribers):
return True
else:
return False
def __ge__(self, other):
""" Метод для операции сравнения «больше или равно»"""
if int(self.subscribers) >= int(other.subscribers):
return True
else:
return False
def __eq__(self, other):
""" Поведение оператора равенства"""
if int(self.subscribers) == int(other.subscribers):
return True
else:
return False
@property
def channel_id(self):
return self.__channel_id
def print_info(self) -> None:
"""Выводит в консоль информацию о канале."""
api_key: str = os.getenv('API_KEY')
youtube = build('youtube', 'v3', developerKey=api_key)
channel = youtube.channels().list(id=self.__channel_id, part='snippet,statistics').execute()
print(json.dumps(channel, indent=2, ensure_ascii=False))
@classmethod
def get_service(cls):
"""
Возвращает объект для работы с YouTube API
"""
api_key: str = os.getenv('API_KEY')
youtube = build('youtube', 'v3', developerKey=api_key)
return youtube
def to_json(self, name_json):
"""
Сохраняет в файл значения атрибутов экземпляра Channel
"""
attribute_dict = {'channel_id': self.__channel_id,
'title': self.title,
'description': self.description,
'url': self.url,
'subscribers': self.subscribers,
'video_count': self.video_count,
'views': self.views,
}
with open(name_json, "w", encoding="utf-8") as file:
file.write(json.dumps(attribute_dict))
| AnastasiaLykova/youtube-analytics-project | src/channel.py | channel.py | py | 4,052 | python | ru | code | null | github-code | 6 | [
{
"api_name": "os.getenv",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "googleapiclient.discovery.build",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "googleapiclient.d... |
10996457940 | import time
import pyrealsense2 as rs
import numpy as np
import cv2
import os
import open3d as o3d
intrinsics = np.array([
[605.7855224609375, 0., 324.2651672363281, 0.0],
[0., 605.4981689453125, 238.91090393066406, 0.0],
[0., 0., 1., 0.0],
[0., 0., 0., 1.],])
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def launch_realsense(pixel_width, pixel_high, fps, found_rgb=False):
pipeline = rs.pipeline()
# Create a config and configure the pipeline to stream
config = rs.config()
pipeline_wrapper = rs.pipeline_wrapper(pipeline)
pipeline_profile = config.resolve(pipeline_wrapper)
device = pipeline_profile.get_device()
for s in device.sensors:
if s.get_info(rs.camera_info.name) == 'RGB Camera':
found_rgb = True
break
if not found_rgb:
print("Can't launch rgb camera")
exit(0)
config.enable_stream(rs.stream.depth, pixel_width, pixel_high, rs.format.z16, fps)
config.enable_stream(rs.stream.color, pixel_width, pixel_high, rs.format.bgr8, fps)
align_to = rs.stream.color
alignedFs = rs.align(align_to)
# Start streaming
pipeline.start(config)
# Create folders by date
save_path = os.path.join(os.getcwd(), "out_data",
time.strftime("%Y_%m_%d_%H_%M_%S",
time.localtime()))
os.makedirs(save_path)
os.makedirs(os.path.join(save_path, "rgb"))
os.makedirs(os.path.join(save_path, "depth"))
os.makedirs(os.path.join(save_path, "depth_colormap"))
# cv2.namedWindow("camera in real time", cv2.WINDOW_AUTOSIZE)
# saved_color_image = None
# saved_depth_mapped_image = None
try:
flag = 0
while True:
if flag == 0:
time.sleep(2)
flag = 1
continue
# Wait for a coherent pair of frames: rgb and depth
frames = pipeline.wait_for_frames()
align_frames = alignedFs.process(frames)
depth_frame = align_frames.get_depth_frame()
color_frame = align_frames.get_color_frame()
if not depth_frame or not color_frame:
continue
color_profile = color_frame.get_profile()
cvsprofile = rs.video_stream_profile(color_profile)
color_intrin = cvsprofile.get_intrinsics()
color_intrin_part = [color_intrin.ppx, color_intrin.ppy,
color_intrin.fx, color_intrin.fy]
print('**color_intrin_part**:',color_intrin_part)
# Convert images to numpy arrays
depth_image = np.asanyarray(depth_frame.get_data())
color_image = np.asanyarray(color_frame.get_data())
depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.1), cv2.COLORMAP_JET)
# depth_colormap_dim = depth_colormap.shape
# color_colormap_dim = color_image.shape
#
# if depth_colormap_dim != color_colormap_dim:
# resized_color_image = cv2.resize(color_image, dsize=(depth_colormap_dim[1], depth_colormap_dim[0]),
# interpolation=cv2.INTER_AREA)
# images = np.hstack((resized_color_image, depth_colormap))
# else:
# images = np.hstack((color_image, depth_colormap))
# # Show images
# cv2.imshow("camera in real time", images)
# key = cv2.waitKey(1)
# Save the image
# if key & 0xFF == ord('s'):
saved_count = 0
for filename in os.listdir(os.path.join((save_path), "rgb")):
if filename.endswith('.png'):
saved_count += 1
print('save data:',saved_count)
saved_color_image = color_image
saved_depth_image = depth_image
saved_depth_mapped_image = depth_colormap
# save rgb png
cv2.imwrite(os.path.join((save_path), "rgb",
"rgb_{}.png".format(saved_count)),saved_color_image)
# save depth_colormap png
cv2.imwrite(os.path.join((save_path), "depth_colormap",
"depth_colormap_{}.png".format(saved_count)),
saved_depth_mapped_image)
# save depth png
cv2.imwrite(os.path.join((save_path), "depth",
"depth_{}.png".format(saved_count)),
saved_depth_image)
# save depth npy
np.save(os.path.join((save_path), "depth",
"depth_{}.npy".format(saved_count)), saved_depth_image)
depth_path = os.path.join((save_path), "depth", "depth_{}.npy".format(saved_count))
color_path = os.path.join((save_path), "rgb", "rgb_{}.png".format(saved_count))
return depth_path, color_path
finally:
# Stop streaming
pipeline.stop()
def loadRGB(color_file):
return cv2.cvtColor(cv2.imread(color_file), cv2.COLOR_BGR2RGB)
def loadDepth(depth_file):
return cv2.imread(depth_file, cv2.IMREAD_UNCHANGED)
def save_points(depth_path, color_path):
colors = loadRGB(color_path).astype(np.float32) / 255.0
depths = np.load(depth_path) # loadDepth(depth_path)
# convert RGB-D to point cloud
fx, fy = intrinsics[0, 0], intrinsics[1, 1]
cx, cy = intrinsics[0, 2], intrinsics[1, 2]
# depth factor
s = 1000.0
xmap, ymap = np.arange(colors.shape[1]), np.arange(colors.shape[0])
xmap, ymap = np.meshgrid(xmap, ymap)
points_z = depths / s
points_x = (xmap - cx) / fx * points_z
points_y = (ymap - cy) / fy * points_z
points = np.stack([points_x, points_y, points_z], axis=-1)
points = points.reshape((-1, 3))
colors = colors.reshape((-1, 3))
mask = np.where(points[:, 2] < 1)
points = points[mask]
colors = colors[mask]
cloud = o3d.geometry.PointCloud()
cloud.points = o3d.utility.Vector3dVector(points)
cloud.colors = o3d.utility.Vector3dVector(colors)
coord = o3d.geometry.TriangleMesh.create_coordinate_frame(size=0.1, origin=[0, 0, 0])
o3d.visualization.draw_geometries([cloud, coord])
base_dir = os.path.dirname(os.path.dirname(color_path))
points_file = os.path.join(base_dir, 'points.npy')
colors_file = os.path.join(base_dir, 'colors.npy')
np.save(points_file, points)
np.save(colors_file, colors)
return points_file, colors_file
if __name__ == '__main__':
depth_path, color_path = launch_realsense(pixel_width=640, pixel_high=480, fps=30)
save_points(depth_path, color_path)
| midea-ai/CMG-Net | utils/get_points.py | get_points.py | py | 6,789 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "numpy.array",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_n... |
18155298342 | import customtkinter as ctk
from PIL import Image
root = ctk.CTk()
root.title("IRIS")
root.geometry("1080x720")
root._set_appearance_mode("dark")
frame = ctk.CTkFrame(master=root)
frame.pack(pady=20)
logo = ctk.CTkImage(Image.open(
"/home/nabendu/Documents/MCA/projects/python-speechRecongition-desktop-AI-project/main/img/walle.png"), size=(200, 180))
label = ctk.CTkLabel(frame, image=logo, text="")
label.grid(row=0, column=0, pady=0, padx=0)
aiTextBox = ctk.CTkTextbox(master=frame, height=100, width=500)
aiTextBox.grid(row=0, column=1, pady=10, padx=50)
frame2 = ctk.CTkFrame(master=root)
frame2.pack(pady=10)
userTextBox = ctk.CTkTextbox(master=frame2, height=50, width=500)
userTextBox.grid(row=0, column=0, padx=30, pady=10)
command = ctk.CTkButton(master=frame2, text="Enter Command",
height=50)
command.grid(row=0, column=1, padx=50, pady=10)
root.mainloop()
| Nandy1002/python-speechRecongition-desktop-AI-project | main/gui.py | gui.py | py | 906 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "customtkinter.CTk",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "customtkinter.CTkFrame",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "customtkinter.CTkImage",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "PIL.Ima... |
42488414261 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 23 12:34:08 2018
@author: michal
"""
import networkx as nx
from networkx.algorithms.isomorphism import GraphMatcher
from networkx.readwrite.json_graph import node_link_data
from os.path import isdir, join, isfile
from os import mkdir
import json
from glob import glob
import shutil
class anionMatcher(GraphMatcher):
def semantic_feasibility(self, G1_node, G2_node):
if "charged" in self.G1.node[G1_node]:
if self.G1.node[G1_node]["charged"] != self.G2.node[G2_node]["charged"]:
return False
elif self.G2.node[G2_node]["charged"]:
return False
if self.G2.node[G2_node]["terminating"]:
if len(list(self.G2.neighbors(G2_node))) != len(list(self.G1.neighbors(G1_node))):
return False
if ( self.G2.node[G2_node]["element"] == "X" or "X" in self.G2.node[G2_node]["aliases"] ) and not self.G1.node[G1_node]["element"] in self.G2.node[G2_node]["notAliases"] :
return True
return self.G1.node[G1_node]["element"] == self.G2.node[G2_node]["element"] or self.G1.node[G1_node]["element"] in self.G2.node[G2_node]["aliases"]
def addAtribute( graph, nodes, key ):
if isinstance(nodes, list):
for nodeId in nodes:
graph.node[nodeId][key] = True
else:
graph.node[nodes][key] = True
def saveAnion( atoms, bonds, charged, name, priority , terminating = [], aliases = {},
notAliases = {}, geometry = {}, fullIsomorphism = False, nameMapping = {} , nonUniqueCharge = [] ,
properties2measure = [] ):
graph = nx.Graph()
nonUniqueCharge = set(nonUniqueCharge)
for i, el in enumerate(atoms):
graph.add_node(i, element = el, terminating = False, bonded = False, aliases = [], charged = False )
graph.add_edges_from(bonds)
addAtribute( graph, terminating, "terminating")
for nodeId in aliases:
graph.node[nodeId]["aliases"] = aliases[nodeId]
for nodeId in notAliases:
graph.node[nodeId]["notAliases"] = notAliases[nodeId]
if not geometry:
graph.graph["geometry"]= "no restrictions"
else:
graph.graph["geometry"]= geometry
graph.graph["fullIsomorphism"] = fullIsomorphism
graph.graph["name"] = name
graph.graph["nameMapping"] = nameMapping
graph.graph["priority"] = priority
graph.graph["properties2measure"] = properties2measure
fileName = str(priority)+"_"+name
if isinstance( charged , list ) :
uniqueCharges = set(charged)
for nodeId in charged:
nuc = uniqueCharges | nonUniqueCharge
nuc.remove(nodeId)
saveAnionJson(graph, fileName, nodeId, nuc)
else:
saveAnionJson(graph, fileName, charged, nonUniqueCharge)
def saveAnionJson( graph, fileName, charged, nonUniqueCharges = []):
mainElement = graph.node[charged]["element"]
elements = [ mainElement ]
if "aliases" in graph.node[charged]:
elements += graph.node[charged]["aliases"]
graph.node[charged]["aliases"] = []
graph.node[charged]["charged"] = True
graph.graph["charged"] = charged
graph.graph["otherCharges"] = list(nonUniqueCharges)
oldName = ""
nameMapping = False
if "X" in graph.graph["name"] and charged in graph.graph["nameMapping"]:
oldName = graph.graph["name"]
nameMapping = graph.graph["nameMapping"][charged]
graph.graph["nameMapping"].pop(charged)
for element in elements:
graph.node[charged]["element"] = element
if nameMapping:
graph.graph["name"] = oldName.replace( nameMapping , element)
dir_path = join("anion_templates", element)
if not isdir( dir_path ):
mkdir( dir_path )
path2save = getUniquePath( dir_path , fileName)
output = open(path2save, 'w')
json.dump(node_link_data(graph), output )
output.close()
graph.node[charged]["charged"] = False
def getUniquePath(dirPath, fileName):
path2save = join( dirPath , fileName+".json")
if not isfile(path2save):
return path2save
similarFiles = glob( join(dirPath, fileName)+"_*.json" )
if not similarFiles:
return join( dirPath , fileName+"_0.json")
maxNumber = -1
for s in similarFiles:
newNumber = int( s[:-5].split("_")[-1] )
maxNumber = max(maxNumber, newNumber)
return join( dirPath , fileName+"_"+str(maxNumber+1)+".json")
def clearAnionTemplates():
if isdir("anion_templates"):
shutil.rmtree("anion_templates")
mkdir("anion_templates")
if __name__ == "__main__":
clearAnionTemplates()
# atoms, bonds, charged, name, priority, terminating = [], aliases = {}, notAliases = {}, geometry = {}, fullIsomorphism = False
#OXYGEN
# #RCOOH
saveAnion( [ "C" , "C", "O", "O" ], [ (0,1), (1,2), (1,3) ],
2, "RCOO", 0, terminating = [1, 2, 3],
geometry = "planar", nonUniqueCharge = [3],
properties2measure= [ { "kind" : "plane", "atoms" : [ 1, 2, 3 ] , "directionalVector" : [ { "atom" : 1 }, { "center" : [ 2, 3] } ] } ] )
#ClO, BrO, IO,
saveAnion([ "CL", "O" ], [(0, 1)],
1, "XO", 5, fullIsomorphism = True,
aliases = { 0 : [ "BR", "I" ] }, nameMapping = { 0 : "X"}, properties2measure= [ { "kind" : "line", "atoms" : [ 0, 1 ] } ] )
#NO2, ClO2, BRO2,
saveAnion([ "N", "O" , "O" ], [(0, 1), (0,2)],
1, "XO2", 10, fullIsomorphism = True, aliases = { 0 : ["CL", "BR"]}, nameMapping = { 0 : "X" },
nonUniqueCharge=[2], properties2measure= [ { "kind" : "plane" , "atoms" : [ 0, 1, 2 ],
"directionalVector" : [ { "atom" : 0 }, { "center" : [ 1, 2] } ] } ])
#NO3, CO3, PO3, SO3, AsO3, BO3, ClO3, BRO3
saveAnion( ["N", "O", "O", "O"], [(0,1), (0,2), (0,3)],
1, "XO3", 15, fullIsomorphism = True,
aliases = { 0 : [ "C", "P", "B", "S", "AS", "CL", "BR", "I" ] }, nameMapping = { 0 : "X" },
nonUniqueCharge= [2, 3],
properties2measure= [ { "kind" : "plane", "atoms" : [ 1, 2, 3 ] , "directionalVector" : [ { "closest" : [1, 2, 3] }, { "center" : [ 1, 2, 3] } ]} ])
#PO4, SO4, AsO4, ClO4, BRO4
saveAnion( ["P", "O", "O", "O", "O"], [(0,1), (0,2), (0,3), (0, 4)],
1, "XO4", 20, fullIsomorphism = True,
aliases = { 0 : [ "S", "AS", "CL", "BR", "I" ] }, nameMapping = { 0 : "X" },
nonUniqueCharge=[2, 3, 4])
# Ph-OH
# saveAnion( [ "C" , "C" , "C" , "C" , "C", "C" , "O" ], [(0,1),(1,2), (2,3), (3,4),( 4, 5), (5, 0), (5,6)],
# 6, "PhOH", 25, terminating = [6], geometry = "planarWithSubstituents")
# #RBOOH
saveAnion( [ "X" , "B", "O", "O" ], [ (0,1), (1,2), (1,3) ],
2, "RBOO", 30, terminating = [2, 3],
notAliases = {0 : [ "O" ] },
nonUniqueCharge=[3],
properties2measure= [ { "kind" : "plane" , "atoms" : [ 1, 2, 3 ] , "directionalVector" : [ { "atom" : 1 }, { "center" : [ 2, 3] } ]} ])
#COO
saveAnion( [ "C", "O", "O" ], [ (0,1), (0,2) ],
1, "COO", 35, terminating = [1, 2], nonUniqueCharge=[2],
properties2measure= [ { "kind" : "plane", "atoms" : [ 0, 1, 2 ], "directionalVector" : [ { "atom" : 0 }, { "center" : [ 1, 2] } ] } ] )
#R-PO4, R-SO4, R-AsO4
saveAnion( ["P", "O", "O", "O", "O"], [(0,1), (0,2), (0,3), (0, 4)],
1, "R-XO4", 45, terminating = [ 1, 2, 3 ] ,
aliases = { 0 : [ "S", "AS" ] }, nameMapping = { 0 : "X" },
nonUniqueCharge=[2,3])
#R2-PO4, R2-SO4, R2-AsO4
saveAnion( ["P", "O", "O", "O", "O"], [(0,1), (0,2), (0,3), (0, 4)],
1, "R2-XO4", 47, terminating = [ 1, 2 ] ,
aliases = { 0 : [ "S", "AS" ] }, nameMapping = { 0 : "X" },
nonUniqueCharge=[2])
#R3-PO4, R3-SO4, R3-AsO4
# saveAnion( ["P", "O", "O", "O", "O"], [(0,1), (0,2), (0,3), (0, 4)],
# 1, "R2-XO4", 48, terminating = [ 1 ] ,
# aliases = { 0 : [ "S", "AS" ] }, nameMapping = { 0 : "X" } )
#RAsO3, RPO3, RSO3
saveAnion( ["P", "O", "O", "O", "C"], [(0,1), (0,2), (0,3), (0, 4)],
1, "RXO3", 50, terminating = [1, 2, 3] ,
aliases = { 0 : [ "S", "AS" ] }, nameMapping = { 0 : "X" },
nonUniqueCharge=[2,3])
#R2AsO2, R2PO2, RRSO2
# saveAnion( ["P", "O", "O", "C", "C"], [(0,1), (0,2), (0,3), (0, 4)],
# 1, "R2XO2", 55, terminating = [1, 2],
# aliases = { 0 : [ "S", "AS" ] }, nameMapping = { 0 : "X" } )
#F, CL, BR, I, S
saveAnion( [ "F" ], [], 0, "X", 55, aliases = { 0 : [ "CL", "BR", "I", "S"] },
fullIsomorphism = True, nameMapping = { 0 : "X"})
#SCN
saveAnion([ "S", "C" , "N" ], [(0, 1), (0,2)],
[0,1,2], "SCN", 62, fullIsomorphism = True, properties2measure= [ { "kind" : "line", "atoms" : [ 0, 2 ] } ])
# #RSH
# saveAnion( [ "X" , "S" ], [ (0,1)],
# 1, "RSH", 60, terminating = [1],
# notAliases = {0 : [ "O" ] } )
#
#N3
saveAnion([ "N", "N" , "N" ], [(0, 1), (0,2)],
[0,1], "N3", 70, fullIsomorphism = True, nonUniqueCharge=[2], properties2measure= [ { "kind" : "lineSymmetric", "atoms" : [ 0, 2 ] } ])
#CN
saveAnion([ "C" , "N" ], [(0, 1)],
[0,1], "CN", 75, fullIsomorphism = True, properties2measure= [ { "kind" : "line", "atoms" : [ 0, 1 ] } ])
# #RSSR
# saveAnion( [ "X" , "S", "S" ], [ (0,1), (1,2)],
# 1, "RSS", 80 ,
# notAliases = {0 : [ "O" ] } )
| chemiczny/PDB_supramolecular_search | anionTemplateCreator.py | anionTemplateCreator.py | py | 10,065 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "networkx.algorithms.isomorphism.GraphMatcher",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "networkx.Graph",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 112,
"usage_type": "call"
},
{
"api_nam... |
3777146121 | from django.shortcuts import render
from cowsay_app.models import Input
from cowsay_app.forms import InputForm
import subprocess
# I mainly used this source to figure out subprocess:
# https://linuxhint.com/execute_shell_python_subprocess_run_method/
# I also used Stackoverflow and Python docs
# Also found some useful stuff on Stackoverflow for doing the history:
# https://stackoverflow.com/questions/47428403/how-to-get-the-last-10-item-data-in-django
def index(request):
if request.method == "POST":
new_input = InputForm()
form = InputForm(request.POST)
if form.is_valid():
data = form.cleaned_data
Input.objects.create(
input=data.get('input')
)
cow = subprocess.run(
['cowsay', data['input']], capture_output=True
).stdout.decode("utf-8")
return render(request, "index.html", {'form': new_input, 'cow': cow})
form = InputForm()
return render(request, "index.html", {"title": "Welcome to Cowsay!", "form": form})
def history(request):
cowsay_history = Input.objects.order_by('-id')[:10]
return render(request, 'history.html', {'cowsay_history': cowsay_history})
| pokeyjess/cowsay | cowsay_app/views.py | views.py | py | 1,247 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "cowsay_app.forms.InputForm",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "cowsay_app.forms.InputForm",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "cowsay_app.models.Input.objects.create",
"line_number": 18,
"usage_type": "call"
... |
25993011459 | import urllib
from flask import Blueprint, request, render_template, flash, redirect, url_for
from orders_tracker.blueprints.clients.service import add_client, update_client, remove_client, search_clients, \
get_form_fields, get_path_args, \
get_clients_count, render_empty, get_pagination_metadata, paginate_clients
from orders_tracker.forms import NewClientForm, DeleteConfirmForm
from orders_tracker.models import Client, Device
from orders_tracker.tables import ClientsTable
clients_blueprint = Blueprint('clients_bp', __name__, template_folder="templates")
@clients_blueprint.route('/clients/new', methods=['GET', 'POST'])
def new_client():
form = NewClientForm()
if request.method == 'POST':
if form.validate_on_submit():
created_client = Client(form.name.data, form.phone.data, form.address.data, form.notes.data)
add_client(created_client)
return redirect(url_for('clients_bp.clients'))
else:
flash('Перевірте введені значення.', category='warning')
return render_template('new_client.html', form=form)
@clients_blueprint.route('/clients', methods=['GET', 'POST'])
def clients():
if request.method == 'POST':
search_field = get_form_fields()
return redirect(url_for('clients_bp.clients',
search_query=search_field))
search_arg, page_arg = get_path_args()
stats = {'total': get_clients_count(), 'filter': -1}
clients_query = search_clients(search_arg)
stats['filter'] = clients_query.count()
if stats['filter'] == 0:
return render_empty(stats, search_arg)
pagination_metadata = get_pagination_metadata(page_arg, clients_query)
clients_list = paginate_clients(pagination_metadata, clients_query)
table = ClientsTable(clients_list)
return render_template('clients.html',
table=table,
stats=stats,
search_field_value=search_arg,
pagination_data=pagination_metadata)
@clients_blueprint.route('/clients/<client_id>', methods=['GET', 'POST'])
def client(client_id):
address_link = None
selected_client = Client.query.filter_by(id=client_id).first_or_404()
if selected_client.address:
address_link = "https://www.google.com/maps/search/?api=1&query=" + \
urllib.parse.quote_plus(selected_client.address)
devices = Device.query.filter_by(client_id=client_id).all()
return render_template('client.html',
client=selected_client,
devices=devices,
address_link=address_link)
@clients_blueprint.route('/clients/<client_id>/edit', methods=['GET', 'POST'])
def edit_client(client_id):
edited_client = Client.query.filter_by(id=client_id).first()
modal_form = NewClientForm()
if request.method == 'POST':
if modal_form.validate_on_submit():
edited_client.name = modal_form.name.data
edited_client.phone = modal_form.phone.data
edited_client.address = modal_form.address.data
edited_client.notes = modal_form.notes.data
update_client(edited_client)
return redirect(url_for('clients_bp.client', client_id=edited_client.id))
else:
flash('Дані про клієнта не оновлено.', category='warning')
modal_form = NewClientForm(edited_client)
return render_template('edit_client.html',
form=modal_form,
message_title="Редагування інформації про клієнта",
client_id=edited_client.id,
color="is-link")
@clients_blueprint.route('/clients/<client_id>/delete', methods=['GET', 'POST'])
def delete_client(client_id):
deleted_client = Client.query.filter_by(id=client_id).first()
form = DeleteConfirmForm()
if request.method == 'POST':
if form.validate_on_submit():
remove_client(deleted_client)
return redirect(url_for('clients_bp.clients'))
return render_template('delete_confirm.html',
form=form,
client_id=deleted_client.id,
message_title="Видалення клієнта",
message="Ви дійсно бажаєте видалити клієнта " + deleted_client.name + "?")
| 1Lorde/orders-tracker | orders_tracker/blueprints/clients/routes.py | routes.py | py | 4,565 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "flask.Blueprint",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "orders_tracker.forms.NewClientForm",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_... |
38899572282 | import pygame
import time
import random
pygame.init()
pygame.font.init()
myfont = pygame.font.SysFont('Comic Sans MS', 30)
screen = pygame.display.set_mode((1280,720))
done = False
p1_x=30
p1_y= screen.get_height()-60
#make player
class Player:
def __init__(self,x,y):
self.x=x
self.y=y
def moveLeft(self):
if self.x>0:
self.x-=2
def moveRight(self):
if self.x<screen.get_width()-60:
self.x+=2
def draw(self):
pygame.draw.rect(screen, (255,255,255), pygame.Rect(self.x,self.y,60,60))
class Egg:
def __init__(self):
self.x=random.randint(0,screen.get_width()-30)
self.y=0
self.incr=1
def update(self):
if self.y==screen.get_height()-30:
self.__init__()
self.y+=self.incr
self.incr*=1.1
def draw(self):
pygame.draw.rect(screen, (255,255,255), pygame.Rect(self.x,self.y,30,30))
p1 = Player(p1_x,p1_y)
egg1=Egg()
while not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
pressed=pygame.key.get_pressed()
#movement
if pressed[pygame.K_a] : p1.moveLeft()
if pressed[pygame.K_d] : p1.moveRight()
screen.fill((0,0,0))
#screen.blit(score, ((screen.get_width()/2)-20,0))
p1.draw()
egg1.draw()
egg1.update()
pygame.display.flip() | mahi-pas/Egg-Catcher | catcher.py | catcher.py | py | 1,381 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pygame.init",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "pygame.font.init",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pygame.font",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "pygame.font.SysFont",
... |
6815148797 | import pygame
import numpy as np
import pickle
import datetime
import os
from snake import Snake
from map import Map
from agent import Agent
# Version 1.1
MODEL_DIR = "models"
MODEL_NAME = "model_1v7" # Name of the pickle file in which we store our model.
MODEL_PATH = os.path.join(MODEL_DIR, MODEL_NAME)
# MODEL_NAME = "models/Best_model" # Name of the pickle file in which we store our model.
GATHER_DATA = True
DATA_DIR = r"..\data"
DATA_PATH = os.path.join(DATA_DIR, f"data_{MODEL_NAME}_dis")
learn = 1
if learn:
VISUAL = False
GENERATIONS = 50
save = False
epsilon_dec = 0.000_03
else:
VISUAL = True
GENERATIONS = 30
save = False
epsilon_dec = 0.1
MAX_ITERATIONS = 7_000 # max iterations in game # Dropped to 5_000!!!
MIN_EPSILON = 0.0001
epsilon_dec = 0.1
GAMMA = 0.4
LEARNING_RATE = 0.2
MIN_LEARNING_RATE = 0.3
def redraw_window(win: pygame.display.set_mode, snake: Snake, playground: Map):
win.fill((25, 119, 207))
playground.draw(win)
snake.draw(win, playground)
pygame.display.update() # This updates the screen so we can see our rectangle
def main(visual: bool = True):
start = datetime.datetime.now()
st2 = datetime.datetime.now()
best_score = 0
best_time = 0
# MODEL
if os.path.isfile(MODEL_PATH):
with open(MODEL_PATH, 'rb') as f:
q_table, generation = pickle.load(f)
else:
if not os.path.isdir(MODEL_DIR):
os.mkdir(MODEL_DIR)
q_table = np.zeros((2 ** 11, 3))
generation = 0
if os.path.isfile(DATA_PATH):
with open(DATA_PATH, 'rb') as f:
gameplay_data = pickle.load(f)
else:
if not os.path.isdir(DATA_DIR):
os.mkdir(DATA_DIR)
gameplay_data = []
# Classes
agent = Agent()
playground = Map()
snake = Snake()
playground.random_snack_pos(snake)
# PyGame
if visual:
win = pygame.display.set_mode((playground.map_size, playground.map_size))
clock = pygame.time.Clock()
pygame.display.set_caption("Snake Game, Generation: 0")
generations_rewards = []
generation_time = []
for gen in range(GENERATIONS):
generation += 1
current_state = agent.get_state(snake, playground)
current_binary_state = agent.make_binary(current_state)
# It should work as proper reset, but who knows...
snake.reset()
playground.reset()
# game_over = False
generation_reward = 0
iteration = 0
# epsilon = max(MIN_EPSILON, 0.9 - generation * 0.0008)
epsilon = max(MIN_EPSILON, 0.9 - generation * epsilon_dec)
# LEARNING_RATE = max(0.95 - generation * 0.000_000_004, MIN_LEARNING_RATE)
if visual:
pygame.display.set_caption(f"Snake Game, Generation: {generation}")
for iteration in range(MAX_ITERATIONS):
if visual:
clock.tick(30)
pygame.time.delay(20)
redraw_window(win, snake, playground)
# Maybe it can go to agent as get_action.
# Action ==> 0 - straight, 1 - left, 2 - right
if np.random.uniform(0, 1) < epsilon:
action = np.random.randint(3)
else:
action = np.argmax(q_table[int(current_binary_state, 2), :])
probability = max(q_table[int(current_binary_state, 2), :])
if GATHER_DATA:
gameplay_data.append([current_state, probability])
snake.move_action(action, visual)
playground.random_snack_pos(snake)
# It can be as one function.
next_state = agent.get_state(snake, playground)
next_binary_state = agent.make_binary(next_state)
game_over, reward = snake.collision(playground, add_snack=True)
bellman_equation = (1 - LEARNING_RATE) * q_table[int(current_binary_state, 2), action] + LEARNING_RATE *\
(reward + GAMMA * max(q_table[int(next_binary_state, 2), :]))
# bellman_equation = max(q_table[int(next_binary_state, 2), :]) + LEARNING_RATE * (reward + GAMMA + (
# max(q_table[int(next_binary_state, 2), :]) - q_table[int(current_binary_state, 2), action]))
q_table[int(current_binary_state, 2), action] = bellman_equation
generation_reward += reward
if game_over:
if playground.score > best_score:
best_score = playground.score
if best_score > 10 and save:
with open(f"models/Best_model", "wb") as f:
data = (q_table, generation)
pickle.dump(data, f)
if iteration > best_time:
best_time = iteration
break
# current_state = next_state
current_binary_state = next_binary_state
if visual:
print(f"SCORE: {playground.score}")
print(f"Reward: {reward}, time: {iteration} iterations")
generations_rewards.append(generation_reward)
generation_time.append(iteration)
# print(f"Rewards : {generations_rewards}")
# print(f"Time : {generation_time}")
if generation % 100 == 0:
print(generation, datetime.datetime.now() - st2, best_score, best_time)
if save:
with open(MODEL_PATH, "wb") as f:
data = (q_table, generation)
pickle.dump(data, f)
if GATHER_DATA:
with open(DATA_PATH, "wb") as f:
pickle.dump(gameplay_data, f)
st2 = datetime.datetime.now()
print(f"\nTime of leaning last: {datetime.datetime.now() - start}, for {GENERATIONS} generations.")
print(f"Best score was: {best_score} and best time was {best_time}.")
print(f"Age: {generation} generations.")
if save:
with open(MODEL_PATH, "wb") as f:
data = (q_table, generation)
pickle.dump(data, f)
if GATHER_DATA:
with open(DATA_PATH, "wb") as f:
pickle.dump(gameplay_data, f)
if __name__ == "__main__":
main(VISUAL)
| Dawir7/Reinforcement-Learing-Bot-to-play-Snake-game | Reinforcement_learninig/main_learning.py | main_learning.py | py | 6,247 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.path.join",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 1... |
7804756691 | from jinja2 import Environment, FileSystemLoader
import yaml
import os.path
ENV = Environment(loader=FileSystemLoader('./'))
script_path = 'SCRIPTS/'
script = os.path.join(script_path, 'script.txt')
with open("config.yaml") as _:
yaml_dict = yaml.load(_)
template = ENV.get_template("template.text")
with open(script, 'w') as outfile:
temp = template.render(config=yaml_dict)
outfile.write(temp)
| dancwilliams/Prefix_List_Script | EXTRA_SCRIPTS/MANUAL_CREATE/generate_config.py | generate_config.py | py | 416 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "jinja2.Environment",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "jinja2.FileSystemLoader",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "os.path.path.join",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path.pat... |
811133362 | import pygame
from pygame.locals import *
from entities import User, Enemy
from fonctions import *
from stage import *
from hud import *
import random
import time
import zmq
import threading
from stage import *
from tkinter import *
from playsound import playsound
def choix1():
global perso
perso=1
button1.configure(relief=SUNKEN)
button2.configure(relief=RAISED)
button3.configure(relief=RAISED)
def choix2():
global perso
perso=2
button1.configure(relief=RAISED)
button2.configure(relief=SUNKEN)
button3.configure(relief=RAISED)
def choix3():
global perso
perso=3
button1.configure(relief=RAISED)
button2.configure(relief=RAISED)
button3.configure(relief=SUNKEN)
perso=1
fen=Tk()
fen.geometry("250x300+200+0")
fen.configure(bg = "white")
user1=PhotoImage(file='images/user1.gif')
user2=PhotoImage(file='images/user2.gif')
user3=PhotoImage(file='images/user3.gif')
fen.title("LE JEU")
Label(fen,text=" ",bg="white").grid(row=1,column=0)
Label(fen,text="LE JEU \n\n ",bg="white").grid(row=0,column=2)
Button(fen,text="Jouer ",bg="white",command=fen.destroy).grid(row=1,column=2)
Label(fen,text="\n"*3,bg="white").grid(row=2,column=1)
button1=Button(fen, image=user1,bg="white",command=choix1, relief=SUNKEN)
button1.grid(row=3,column=1)
button2=Button(fen, image=user2,bg="white",command=choix2)
button2.grid(row=3,column=2)
button3=Button(fen, image=user3,bg="white",command=choix3)
button3.grid(row=3,column=3)
Label(fen,text="\n"*3,bg="white").grid(row=4,column=1)
Button(fen,text="Quitter",command=exit).grid(row=5,column=2)
jeu=0
playsound('musique_menu.mp3',block = False)
fen.mainloop()
gameOver = False
pygame.init()
screen = pygame.display.set_mode((620,480))
pygame.display.set_caption('User 1')
screen.fill((50,60,50))
pygame.display.update()
user = User(screen,1,perso)
coop = User(screen,2,3)
hud = HUD(screen)
context = zmq.Context()
usersChan = context.socket(zmq.PAIR)
usersChan.bind("tcp://127.0.0.1:1111".format(coop.id))
murs, enemies, potions, portes, eaus = classic(screen)
def recv(usersChan):
global coop, gameOver, points
while True:
if gameOver == True:
if points == 16:
print("WIN !")
else:
print("Game Over ! Vous avez {} points".format(points))
exit()
return
try:
data = usersChan.recv_pyobj(flags=zmq.NOBLOCK)
coop.pos = data["user"]["pos"]
coop.vie = data["user"]["vie"]
coop.attaque = data["user"]["attaque"]
coop.defense = data["user"]["defense"]
coop.level = data["user"]["level"]
coop.xp= data["user"]["xp"]
### Supprimer objets qui ne sont pas en communs entre 2 listes python
for potion in potions:
ok = False
for p in data["potions"]:
if potion.pos == p["pos"]:
ok = True
if ok == False:
potions.remove(potion)
for enemy in enemies:
ok = False
for e in data["enemies"]:
if enemy.pos == e["pos"]:
enemy.vie = e["vie"]
ok = True
if ok == False:
enemies.remove(enemy)
refresh()
except zmq.ZMQError as err:
pass
def refresh():
screen.fill((50,60,50))
hud.show(user,coop)
user.show()
coop.show()
for enemy in enemies:
enemy.show()
for mur in murs:
mur.show()
for potion in potions:
potion.show()
for porte in portes:
porte.show()
for eau in eaus:
eau.show()
user.show()
coop.show()
pygame.display.flip()
pygame.display.update()
# Envoyez première data
usersChan.send_pyobj(setData(user,coop,murs,potions,portes,eaus,enemies,True))
points = 0
# Création du Thread pour recevoir les données
threadRecv = threading.Thread(target=recv, args=(usersChan,))
threadRecv.start()
while not gameOver:
changement = False
if user.vie <= 0:
gameOver = True
if coop.vie <= 0:
gameOver = True
for event in pygame.event.get():
# Alt + F4 ou fléche en haut
if event.type == QUIT:
gameOver = True
# Si touche pressée
if event.type == KEYDOWN:
action = 1
if event.key == K_UP:
coord = (0,-1)
elif event.key == K_DOWN:
coord = (0,1)
elif event.key == K_LEFT:
coord = (-1,0)
elif event.key == K_RIGHT:
coord = (1,0)
else:
action = 0
if action != 0:
user.mouvement(coord)
if user.pos == coop.pos:
user.mouvement((-coord[0],-coord[1]))
for enemy in enemies:
if enemy.pos == user.pos:
# Attaquer :
enemy.vie -= user.attaque + user.arme
user.vie -= enemy.defense
if user.vie <= 0:
user.vie = 0
gameOver == True
# print("Vie restante :", user.vie, "Vie enemmi :", enemy.vie)
if enemy.vie <= 0:
user.xp += enemy.level
enemies.remove(enemy)
# Revenir en arriére
else:
user.mouvement((-coord[0],-coord[1]))
if user.xp >= user.level * 2:
user.levelUP()
for mur in murs:
if mur.pos == user.pos :
if mur.genre == "lave":
user.vie -= 15
elif mur.genre == "pont":
pass
elif mur.genre == "levier":
pass
else:
user.mouvement((-coord[0],-coord[1]))
for eau in eaus:
if eau.pos == user.pos :
user.mouvement((-coord[0],-coord[1]))
for potion in potions:
if user.pos == potion.pos:
if potion.type == "heal":
user.heal()
elif potion.type == "atk":
user.atk()
elif potion.type == "atkboss":
for i in range (20):
user.atk()
elif potion.type == "xp":
user.levelUP()
potions.remove(potion)
for porte in portes:
if porte.pos == user.pos or porte.pos == coop.pos:
print("Changement de map")
points += 1
user.pos = [32,160]
coop.pos = [32,192]
if points == 1:
murs, enemies, potions, portes, eaus = deux(screen)
elif points == 2:
murs, enemies, potions, portes, eaus = troix(screen)
elif points == 15:
murs, enemies, potions, portes, eaus = six(screen)
elif points == 16:
gameOver = True
else:
murs, enemies, potions, portes, eaus = random.choice([quatre(screen), cinq(screen)])
changement = True
### Renvoyez les données
try:
message = setData(user,coop,murs,potions,portes,eaus,enemies,changement)
usersChan.send_pyobj(message)
except zmq.ZMQError as err:
print ("Error while trying to send the value " + message + " : " + str(err))
refresh()
pygame.display.flip()
pygame.display.update()
pygame.time.wait(10)
| ZeProf10T/projet-isn | server.py | server.py | py | 8,030 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "playsound.playsound",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "pygame.init",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "pygame.displa... |
43011396057 | """Calculate various statistics for the CEA playerbase, and stores in a spreadsheet.
Attributes:
counts (Counter): counting number of games
EXTRA_GAMES_FILE (str): File to be used if we need to input extra games
K (int): K-value used for elo ratings.
"""
import csv
import json
import os
import re
import string
import sys
import traceback
from datetime import datetime
from datetime import timedelta
from collections import Counter, deque
import mpyq
import sc2reader
import trueskill
import glicko2
import cea_team_name_parser
import xlsxwriter
import pandas as pd
from sc2reader.engine.plugins import APMTracker, SelectionTracker # unused
from consts import SEASONS, STARTING_DATE, WEEKS
from setup_replays import find_team, replay_directory, teams_file
from zeroNumber import zeroNumber
from elo import EloRating
sc2reader.engine.register_plugin(APMTracker())
UNKNOWN_TEAM = "TEAM_NOT_KNOWN"
EXTRA_GAMES_FILE = "extra_games.csv"
# K is used for elo.
K=80
counts = Counter()
class PlayerObject:
def __init__(self, name, season, team):
self.name = name
self.aliases = set()
self.wins = 0
self.rating = 1000
self.glicko = glicko2.Player()
# long term glicko rating
self.glicko_longterm = glicko2.Player()
self.trueskill = trueskill.Rating()
self.peak_rating = 1000
self.games = []
self.teams = {season : team}
self.zeroNumber = sys.maxsize
losses = property(fget=lambda self: len(self.games) - self.wins)
mmr = property(fget=lambda self: max(game.mmr for game in self.games))
def setRating(self, rating):
self.rating = rating
if rating > self.peak_rating:
self.peak_rating = rating
def isActive(self):
return 0 in self.teams
def addTeam(self, season, team):
self.teams[season] = team
@property
def race(self):
race_counter = Counter([game.race for game in self.games])
return race_counter.most_common(1)[0][0]
@property
def opponents_beaten(self):
return [game.opponent for game in self.games if game.win]
@property
def opponents_lost_to(self):
return [game.opponent for game in self.games if not game.win]
@property
def mostRecentTeam(self):
return self.teams[sorted(list(self.teams.keys()))[0]]
def addGames(self, game):
self.games.append(game)
if game.win:
self.wins += 1
class GameObject:
""" Struct containing information about a game, given 1 player.
Attributes:
duration (int): Length of the game in seconds
opponent (str): Name of the opponent
race (str): Selected race
"""
def __init__(self, opponent, race, opponent_race, map_name, mmr, win, duration,
season, glicko_longterm, opp_glicko_longterm):
self.opponent = opponent
self.race = race
self.opponent_race = opponent_race
self.mmr = mmr
self.win = win
self.map = map_name
self.glicko_rating = glicko_longterm.getRating()
self.glicko_rd = glicko_longterm.getRd()
self.opp_glicko_rating = opp_glicko_longterm.getRating()
self.opp_glicko_rd = opp_glicko_longterm.getRd()
# self.apm = apm
self.duration = duration
self.season = season
# Add in extra games
# Games is 2d array: each one has [date, player1, player2, win]
def input_extra_elo(players, games, current_date, season):
"""Add in extra games.
Args:
players (Array[PlayerObject]): array of the 2 players
games (str[n,4]): Each column is [date, player1, player2, win].
Each row is a game.
current_date (datetime): current date. don't process games after date.
season (int): season. 0 is most recent
"""
while games and games[0][0] and current_date > datetime.strptime(games[0][0], "%m/%d/%Y"):
# ISSUE: doesn't resolve aliases, doesn't work if player has not already been processed.
player_names = [games[0][1].lower(), games[0][2].lower()]
for index, player in enumerate(player_names):
# add them in if not in there
if player not in players:
players[player] = PlayerObject(player,
season, find_team(teams, player))
for index, player in enumerate(player_names):
gameObject = GameObject(opponent=player_names[1-index], race="", opponent_race="", map_name="", mmr=0,
win=games[0][3].lower() == player,
duration=0, season=season,
glicko_longterm = players[player].glicko_longterm,
opp_glicko_longterm = players[player_names[1 - index]].glicko_longterm)
players[player].addGames(gameObject)
winner = games[0][3].lower() == player_names[0]
update_rating(players[player_names[0]], players[player_names[1]], winner)
games.popleft()
def update_rating(player1, player2, win):
"""Update player ratings after a game
Args:
player1 (PlayerObject):
player2 (PlayerObject):
win (bool): whether player 1 won
"""
# Update Elo rating
A,B = EloRating(player1.rating, player2.rating, K, win)
player1.rating = A
player2.rating = B
# Update Glicko-2 rating
player1.glicko.update_player([player2.glicko.getRating()], [player2.glicko.getRd()], [win])
player2.glicko.update_player([player1.glicko.getRating()], [player1.glicko.getRd()], [not win])
# Update Trueskill rating
winner, loser = trueskill.rate_1vs1(player1.trueskill, player2.trueskill) if win == 1 else trueskill.rate_1vs1(player2.trueskill, player1.trueskill)
player1.trueskill = winner if win else loser
player2.trueskill = loser if win else winner
def update_glicko_longterm(players):
"""Updates Longterm Glicko ratings
Args:
players (Dict<Player>[String]): Dictionary of players: key is player
name (lowercase), value is PlayerObject
"""
# Iterate through seasons in reverse order (oldest to newest)
for season in reversed(range(len(SEASONS))):
for player in players.values():
# First, gather all the glicko ratings in their games
opp_ratings = []
opp_rds = []
win = []
for game in player.games:
if game.season == season:
opp_ratings.append(game.opp_glicko_rating)
opp_rds.append(game.opp_glicko_rd)
win.append(game.win)
if not opp_ratings:
player.glicko_longterm.did_not_compete()
else:
player.glicko_longterm.update_player(opp_ratings, opp_rds, win)
def load_value(replay_filename, value):
"""Gets values from replay file
Args:
replay_filename (Replay): Replay
value (String): Key to get from replay. (I.e MMR)
Returns:
TYPE: Description
"""
archive = mpyq.MPQArchive(replay_filename)
jsondata = archive.read_file("replay.gamemetadata.json").decode("utf-8")
obj = json.loads(jsondata)
mmrs = [0,0]
for i in [0,1]:
mmrs[i] = 0 if value not in obj['Players'][i] else obj['Players'][i][value]
return mmrs
def calculate_elo(directory, players, teams, aliases, season, games):
# Using mypq, load the replay file
matcher = re.compile(r'\.SC2Replay$', re.IGNORECASE)
def myFunc(replay):
replay_file = sc2reader.load_replay(os.path.join(directory, replay), load_level=2)
return replay_file.date
replays = [file for file in os.listdir(directory) if matcher.search(file)]
replays.sort(key=myFunc)
print("Found %d replays to scan" % len(replays))
for replay in replays:
try:
replay_filename = os.path.join(directory, replay)
replay_file = sc2reader.load_replay(replay_filename, load_level=2)
player_list = replay_file.players
player_names = [player_list[0].name,
player_list[1].name]
player_mmrs = load_value(replay_filename, 'MMR')
input_extra_elo(players, games, replay_file.date, season)
# ignore 2v2
if len(replay_file.players) > 2:
print(replay)
continue
# resolve aliases for players who play under several accounts
for i in range(len(player_names)):
if player_names[i].lower() in aliases:
player_names[i] = aliases[player_names[i].lower()].lower()
else:
player_names[i] = player_names[i].lower()
# Ignore it
if replay_file.winner is None:
print(replay)
continue
# Add them to the player list if they're not there
for index, player in enumerate(player_list):
player_name = player_names[index]
if player_name not in players:
players[player_name] = PlayerObject(player.name,
season, find_team(teams, player.name))
else:
players[player_name].addTeam(season, find_team(teams, player.name))
# Loop again to add the games
for index, player in enumerate(player_list):
player_name = player_names[index]
gameObject = GameObject(opponent=player_names[1-index],
race = player.pick_race,
opponent_race=player_list[1-index].pick_race,
map_name = replay_file.map_name,
mmr = player_mmrs[index],
win=replay_file.winner.players[0] == player,
duration=replay_file.real_length,
season=season,
glicko_longterm=players[player_name].glicko_longterm,
opp_glicko_longterm=players[player_names[1 - index]].glicko_longterm)
players[player_name].addGames(gameObject)
winner = replay_file.winner.players[0] == player_list[0]
update_rating(players[player_names[0]], players[player_names[1]], winner)
except:
print("Error processing replay: %s" % replay)
traceback.print_exc()
def writeProfile(value, workbook, player_dictionary):
if value.name not in workbook.sheetnames:
sheet_name = value.name
else:
sheet_name = value.name + ' 1'
playerWorksheet = workbook.add_worksheet(sheet_name)
main_sheet = "Main"
playerWorksheet.write_url(0, 0, f"internal:'{main_sheet}'!A1", string='Back to Main Sheet')
playerWorksheet.write(0, 1, 'Player Name')
playerWorksheet.write(1, 1, value.name)
playerWorksheet.set_column(1, 1, max(len('Player Name'), len(value.name))+1)
playerWorksheet.write(0, 2, 'Teams')
playerWorksheet.set_column(2, 2, 20)
playerWorksheet.set_column(3, 4, 12)
playerWorksheet.write(0, 4, 'Games')
playerWorksheet.write(0, 5, 'Opponent Team')
playerWorksheet.set_column(5, 5, 15)
playerWorksheet.write(0, 6, 'Opponent')
playerWorksheet.set_column(6, 6, 15)
playerWorksheet.write(0, 7, 'Player Race')
playerWorksheet.set_column(7, 7, 8)
playerWorksheet.write(0, 8, 'Opponent Race')
playerWorksheet.set_column(8, 8, 8)
playerWorksheet.write(0, 9, 'Match Result')
playerWorksheet.set_column(9, 9, 6)
playerWorksheet.write(0, 10, 'Map')
playerWorksheet.set_column(10, 10, 20)
playerWorksheet.write(0, 12, 'Records')
playerWorksheet.set_column(11, 11, 25)
index = 1
for season, team in value.teams.items():
startIndex = 2
playerWorksheet.write(index, startIndex, team)
playerWorksheet.write(index, startIndex + 1, SEASONS[season])
index += 1
indexGame = 1
raceWinCounter = Counter()
raceLossCounter = Counter()
for game in value.games:
win = "Win" if game.win else "Loss"
if game.opponent_race:
if game.win:
raceWinCounter[game.opponent_race] += 1
else:
raceLossCounter[game.opponent_race] += 1
startIndex = 4
playerWorksheet.write(indexGame, startIndex, SEASONS[game.season])
if game.season in player_dictionary[game.opponent].teams:
oppTeam = player_dictionary[game.opponent].teams[game.season]
else:
oppTeam = "UNKOWN_TEAM"
playerWorksheet.write(indexGame, startIndex + 1, oppTeam)
playerWorksheet.write(indexGame, startIndex + 2, player_dictionary[game.opponent].name)
playerWorksheet.write(indexGame, startIndex + 3, game.race)
playerWorksheet.write(indexGame, startIndex + 4, game.opponent_race)
playerWorksheet.write(indexGame, startIndex + 5, win)
playerWorksheet.write(indexGame, startIndex + 6, game.map)
indexGame += 1
# For Player Records
opponentsBeaten = Counter(value.opponents_beaten)
opponentsLostTo = Counter(value.opponents_lost_to)
indexRecord = 1
for opponent in set(value.opponents_beaten + value.opponents_lost_to):
count = 0
startIndex = 11
if opponent in opponentsBeaten:
count += opponentsBeaten[opponent]
if opponent in opponentsLostTo:
count += opponentsLostTo[opponent]
if count >= 2:
playerWorksheet.write(indexRecord, startIndex, player_dictionary[opponent].name)
playerWorksheet.write(indexRecord, startIndex+1, "{0}:{1}".format(opponentsBeaten[opponent], opponentsLostTo[opponent]))
indexRecord += 1
indexRecord += 1
for race in ['Terran', 'Zerg', 'Protoss', 'Random']:
playerWorksheet.write(indexRecord, startIndex, "vs " + race)
playerWorksheet.write(indexRecord, startIndex + 1, "{0}:{1}".format(raceWinCounter[race], raceLossCounter[race]))
indexRecord += 1
return sheet_name
def write_profiles(player_dictionary):
workbook = xlsxwriter.Workbook('cea_season_stats.xlsx')
index = 0
for key, value in player_dictionary.items():
writeProfile(value, workbook, player_dictionary)
index +=1
workbook.close()
def make_csv(player_dictionary):
# calculate zero number
maxPlayer = zeroNumber(player_dictionary)
headers_arr = ["Team Name", "Name", "Wins", "Losses", "Elo (avg=1000)", "Trueskill Rating (avg=25)", "Peak MMR", maxPlayer + " Number", "Active", "Race",
"Players Defeated", "Players Lost To"]
workbook = xlsxwriter.Workbook('cea_season_stats.xlsx')
worksheet1 = workbook.add_worksheet("Main")
worksheet1.write_row(0, 0, headers_arr)
worksheet1.freeze_panes(1, 0)
# worksheet1.autofilter('A1:L500')
index = 0
for key, value in player_dictionary.items():
new_entry = []
# Name
new_entry.append(value.mostRecentTeam)
new_entry.append(value.name)
# Wins
new_entry.append(int(value.wins))
# Losses
new_entry.append(int(value.losses))
# Elo
new_entry.append(int(value.rating))
# Glicko-2
# new_entry.append("{} ± {}".format(int(value.glicko.getRating()), int(value.glicko.getRd())) )
# Trueskill Rating
new_entry.append("{:.2f} ± {:.1f}".format(value.trueskill.mu, value.trueskill.sigma))
# MMR
new_entry.append(int(value.mmr))
# zero number
zeroNum = int(value.zeroNumber) if value.zeroNumber < sys.maxsize else ''
new_entry.append(zeroNum)
new_entry.append("Yes" if value.isActive() else "No")
# Race
new_entry.append(value.race)
# APM
# new_entry.append(int(value.apm))
# Retrieve list of opponents beaten / lost to, with MMR differential.
def opponent_func(opponents_list, descending):
new_opponents_list = [opp_nickname for opp_nickname in opponents_list]
new_opponents_list = sorted(new_opponents_list, key=lambda item: (
player_dictionary[item].rating), reverse=descending)
new_opponents_list = [player_dictionary[opponent].name for opponent in new_opponents_list]
return new_opponents_list
opponents_beaten = opponent_func(value.opponents_beaten, True)
opponents_lost_to = opponent_func(value.opponents_lost_to, False)
# Opponents beaten / lost to
new_entry.append(" ; ".join(opponents_beaten))
new_entry.append(" ; ".join(opponents_lost_to))
worksheet1.write_row(index + 1, 0, new_entry)
if 0 in value.teams or (1 in value.teams and len(value.games) >= 5):
playerSheet = writeProfile(value, workbook, player_dictionary)
worksheet1.write_url(index + 1, 1, f"internal:'{playerSheet}'!A1", string=value.name)
index += 1
worksheet1.conditional_format('E2:E500', {'type': '3_color_scale'})
print("Done creating CSV")
workbook.close()
if __name__ == "__main__":
players = {}
extra_games = cea_team_name_parser.init_extra_games(EXTRA_GAMES_FILE)
# Instantiate Trueskill
trueskill.setup(draw_probability=0)
# Iterate seasons descending from oldest to newest
for season in reversed(range(len(SEASONS))):
#for season in [3]:
teams, aliases = cea_team_name_parser.init_dictionary(teams_file(season))
calculate_elo(replay_directory(season), players,
teams, aliases, season, extra_games)
# Input extra elo for newest season
input_extra_elo(players, extra_games, datetime.today(), 0)
make_csv(players)
#write_profiles(players)
| carsonhu/cea-elo | calculate_elo.py | calculate_elo.py | py | 16,986 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "sc2reader.engine.register_plugin",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "sc2reader.engine",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "sc2reader.engine.plugins.APMTracker",
"line_number": 31,
"usage_type": "call"
},... |
16816563467 | import json
import requests
from django.http import JsonResponse
from django.shortcuts import render
import numpy as np
# Create your views here.
from django.template.defaultfilters import upper
from django.template.loader import render_to_string
from apps.utils.cases import get_scenario_on_day
from apps.utils.date_adjustment import date_adjustment
def home(request):
countries = requests.get('https://corona.lmao.ninja/countries').json()
url_parameter = request.GET.get("q")
if url_parameter != None:
countries = [ct for ct in countries if upper(url_parameter) in upper(ct['country'])]
if request.is_ajax():
html = render_to_string(
template_name="countries-results-partial.html",
context={"dados": countries}
)
data_dict = {"html_from_view": html}
return JsonResponse(data=data_dict, safe=False)
return render(request, 'home.html', {'dados': countries})
def historico(request):
countries = requests.get('https://corona.lmao.ninja/countries').json()
if request.is_ajax():
context = {}
selected_country = request.GET.get('sortBy')
historic = requests.get(f'https://corona.lmao.ninja/v2/historical/{selected_country}').json()
context['dates'] = historic['timeline']['cases']
context['cases'] = list(context['dates'].values())
context['casesOnDay'] = get_scenario_on_day(context['cases'])
context['deaths'] = list(historic['timeline']['deaths'].values())
context['deathsOnDay'] = get_scenario_on_day(context['deaths'])
context['historic'] = historic
context['adjusted_dates'] = [date_adjustment(date) for date in historic['timeline']['cases'].keys()]
html = render_to_string(
template_name="countries-historical-partial.html", context=context
)
data_dict = {"html_from_view": html}
valores = [{'name': context['adjusted_dates'][i], 'y': context['deathsOnDay'][i]}
for i in range(len(context['dates']))]
chart = {
'chart': {'type': 'column'},
'title': {'text': 'Impacto de Mortes por Corona'},
'series': [{
'name': 'Número de vítimas',
'data': valores
}],
'xAxis': {
'categories': context['adjusted_dates']
}
}
data_dict['html_to_chart'] = chart
return JsonResponse(data=data_dict, safe=False)
return render(request, 'historic.html', {'countries': countries})
| Akijunior/corona-relatorio | src/apps/core/views.py | views.py | py | 2,580 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "requests.get",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "django.template.defaultfilters.upper",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "django.template.loader.render_to_string",
"line_number": 28,
"usage_type": "call"
},
... |
16106099445 | #import logging
class stopwatch:
"""usage:
swgen = stopwatch.template("[INTEGRATION]")
...
with swgen("Running xxx") as _:
run_stuff()
with swgen("Finalizing xxx") as _:
finish_stuff()
"""
def __init__(self, message, logger):
self.logger = logger
self.pre_message = message
if len(message) > 1:
self.post_message = message[0].lower() + message[1:]
else:
self.post_message = message
def __enter__(self):
from time import time
self.logger.info(self.pre_message)
self.timer = time()
return self
def tqdm_range(self, item_list, **kwargs):
from tqdm.auto import tqdm
return tqdm(item_list, desc=self.pre_message, **kwargs)
def tqdm(self, **kwargs):
return tqdm.tqdm(desc=self.pre_message, **kwargs)
def __exit__(self, exc_type, exc_val, exc_tb):
from time import time
delta = time() - self.timer
self.logger.info("Finished %s in %.2f seconds" % (self.post_message, delta))
def template(logname : str = "benj", level=None):
import logging
logger = logging.getLogger(logname)
if level is not None:
logging.basicConfig(level=level)
else:
logging.basicConfig(level=logging.INFO)
return lambda msg: stopwatch(msg, logger=logger)
| KellisLab/benj | benj/timer.py | timer.py | py | 1,382 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "time.time",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "tqdm.auto.tqdm",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "tqdm.auto.tqdm.tqdm",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "tqdm.auto.tqdm",
"li... |
21397154599 | import os
import backoff
import pytest
from racetrack_commons.dir import project_root
from racetrack_client.client.deploy import send_deploy_request
from racetrack_client.client_config.auth import set_user_auth
from racetrack_client.client_config.client_config import ClientConfig
from racetrack_client.utils.request import Requests, ResponseError
from racetrack_client.utils.auth import RT_AUTH_HEADER, is_auth_required
from racetrack_commons.entities.dto import EscDto
from racetrack_commons.entities.esc_client import EscRegistryClient
from racetrack_commons.entities.job_client import JobRegistryClient
from e2e.utils import ADMIN_AUTH_TOKEN, INTERNAL_AUTH_TOKEN, PYTHON_PLUGIN_VERSION, _configure_env, _create_esc, _delete_workload, _wait_for_components, _install_plugin
TEST_SUITE = os.getenv('TEST_SUITE')
suite_auth = pytest.mark.skipif(
TEST_SUITE != 'auth' and TEST_SUITE != 'full', reason='TEST_SUITE value != auth,full'
)
@suite_auth
def test_deploy_job_chain():
_configure_env()
_wait_for_components()
_install_plugin(f'github.com/TheRacetrack/plugin-python-job-type=={PYTHON_PLUGIN_VERSION}')
esc = _create_esc()
_delete_workload('adder')
_deploy_and_verify('sample/python-class', 'adder', esc)
_verify_deployed_job_adder_response('adder', ADMIN_AUTH_TOKEN)
_delete_workload('python-chain')
_deploy_and_verify('sample/python-chain', 'python-chain', esc)
_make_wrongly_authenticated_request('adder')
@suite_auth
def test_deploy_unauthenticated():
_configure_env()
_wait_for_components()
_install_plugin(f'github.com/TheRacetrack/plugin-python-job-type=={PYTHON_PLUGIN_VERSION}')
lifecycle_url = os.environ['LIFECYCLE_URL']
expect_fail = is_auth_required(lifecycle_url)
sample_path = 'sample/python-class'
print(f'Deploying unauthenticated {sample_path} job...')
workdir = str(project_root() / sample_path)
config = ClientConfig()
set_user_auth(config, lifecycle_url, 'invalid')
if expect_fail:
with pytest.raises(ResponseError):
send_deploy_request(workdir, lifecycle_url=lifecycle_url, client_config=config, force=True)
else:
send_deploy_request(workdir, lifecycle_url=lifecycle_url, client_config=config, force=True)
@suite_auth
def test_deploy_wrong_authentication():
_configure_env()
_wait_for_components()
_install_plugin(f'github.com/TheRacetrack/plugin-python-job-type=={PYTHON_PLUGIN_VERSION}')
lifecycle_url = os.environ['LIFECYCLE_URL']
sample_path = 'sample/python-class'
print(f'Deploying with wrong authentication {sample_path} job...')
expect_fail = is_auth_required(lifecycle_url)
workdir = str(project_root() / sample_path)
config = ClientConfig()
# wrong token
user_auth = "eyJ1c2VybmFtZSI6ICJmb28iLCAidG9rZW4iOiAiOGJjMDkzMGEtNTA2Mi00MWFiLWE4MWQtNDVhNjg0OWIyYjg4In1="
set_user_auth(config, lifecycle_url, user_auth)
if expect_fail:
with pytest.raises(ResponseError):
send_deploy_request(workdir, lifecycle_url=lifecycle_url, client_config=config, force=True)
else:
send_deploy_request(workdir, lifecycle_url=lifecycle_url, client_config=config, force=True)
def _deploy(sample_path: str):
lifecycle_url = os.environ['LIFECYCLE_URL']
config = ClientConfig()
set_user_auth(config, lifecycle_url, ADMIN_AUTH_TOKEN)
print(f'Deploying {sample_path} job...')
workdir = str(project_root() / sample_path)
send_deploy_request(workdir, lifecycle_url=lifecycle_url, client_config=config, force=True)
def _deploy_and_verify(sample_path: str, job_name: str, esc: EscDto):
_deploy(sample_path)
print(f'Allowing a job {job_name} to ESC...')
erc = EscRegistryClient(auth_token=INTERNAL_AUTH_TOKEN)
erc.esc_allow_job(esc_id=esc.id, job_name=job_name)
esc_token = erc.get_esc_auth_token(esc.id)
if job_name == 'adder':
_verify_deployed_job_adder_response(job_name, esc_token)
elif job_name == 'python-chain':
frc = JobRegistryClient(auth_token=INTERNAL_AUTH_TOKEN)
frc.job_allow_job('python-chain', 'adder')
_verify_deployed_job_chain_adder_reponse(job_name, esc_token)
_verify_job_logs(job_name, ADMIN_AUTH_TOKEN)
@backoff.on_exception(backoff.fibo, AssertionError, max_value=3, max_time=60, jitter=None)
def _verify_deployed_job_adder_response(job_name: str, auth_token: str):
print(f'Verifying {job_name} job response...')
pub_url = os.environ['PUB_URL']
url = f'{pub_url}/job/{job_name}/latest/api/v1/perform'
headers = {RT_AUTH_HEADER: auth_token}
r = Requests.post(url, json={'numbers': [40, 2]}, headers=headers)
assert r.ok, f'Job response: {r.status_code} {r.status_reason} for url {r.url}, content: {str(r.content)}'
output = r.json()
assert output == 42, 'Unexpected output returned by Job'
@backoff.on_exception(backoff.fibo, AssertionError, max_value=3, max_time=30, jitter=None)
def _verify_deployed_job_chain_adder_reponse(job_name: str, auth_token: str):
print(f'Verifying {job_name} job response...')
pub_url = os.environ['PUB_URL']
url = f'{pub_url}/job/{job_name}/latest/api/v1/perform'
r = Requests.post(url, json={'numbers': [40, 2.7]}, headers={RT_AUTH_HEADER: auth_token})
assert r.ok, f'Job response: {r.status_code} {r.status_reason} for url {r.url}, content: {str(r.content)}'
output = r.json()
assert output == 43, 'Unexpected output returned by Job'
@backoff.on_exception(backoff.fibo, ResponseError, max_value=3, max_time=60, jitter=None)
def _verify_job_logs(job_name: str, user_auth: str):
print(f'Verifying {job_name} logs...')
frc = JobRegistryClient(auth_token=user_auth)
logs = frc.get_runtime_logs(job_name, 'latest')
assert len(logs) > 1, 'Unexpected short log from Job'
def _make_wrongly_authenticated_request(job_name: str):
print(f'Verifying requests without authentication to {job_name}...')
pub_url = os.environ['PUB_URL']
url = f'{pub_url}/job/{job_name}/latest/api/v1/perform'
lifecycle_url = os.environ['LIFECYCLE_URL']
auth_required = is_auth_required(lifecycle_url)
# wrong auth token value
r = Requests.post(url, json={'numbers': [40, 2]}, headers={RT_AUTH_HEADER: 'MrNobody'})
if auth_required:
assert r.status_code == 401
else:
assert r.status_code == 200
# lack of auth token
r = Requests.post(url, json={'numbers': [40, 2]}, headers={})
if auth_required:
assert r.status_code == 401
else:
assert r.status_code == 200
| TheRacetrack/racetrack | tests/e2e/test_auth.py | test_auth.py | py | 6,570 | python | en | code | 27 | github-code | 6 | [
{
"api_name": "os.getenv",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pytest.mark.skipif",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "e2e.utils._configure_en... |
12836912861 | import sys
from typing import Optional
import PySide6
from PySide6 import QtWidgets
from qt_material import QtStyleTools, list_themes
from safebox.gui.widgets import cycle_generator, CreatorWidget
class MainWindow(QtWidgets.QMainWindow, QtStyleTools):
def __init__(self, parent: Optional[PySide6.QtWidgets.QWidget] = ...,
flags: PySide6.QtCore.Qt.WindowFlags = ...) -> None:
super().__init__()
self.themes = cycle_generator(list_themes())
self.apply_stylesheet(self, "dark_teal.xml")
self.setCentralWidget(CreatorWidget(parent=self))
def change_theme(self):
self.apply_stylesheet(self, next(self.themes))
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
main_window= MainWindow()
main_window.show()
sys.exit(app.exec()) | pouralijan/SafeBox | safebox/gui/safebox_creator_main_window.py | safebox_creator_main_window.py | py | 829 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "PySide6.QtWidgets.QMainWindow",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "PySide6.QtWidgets",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "qt_material.QtStyleTools",
"line_number": 9,
"usage_type": "name"
},
{
"api_na... |
36388156115 | from typing import Union
import psutil
def get_cpu_temp() -> Union[float, None]:
temperature_file_path = "/sys/class/thermal/thermal_zone0/temp"
try:
raw_temp = None
with open(temperature_file_path) as f:
raw_temp = f.readline().strip("\n")
return float(raw_temp) / 1000
except (FileNotFoundError, TypeError, ValueError) as e:
print(e)
print("Could not read CPU temperature")
return None
def get_cpu_count() -> int:
return psutil.cpu_count()
def get_cpu_percent(interval: Union[float, None]) -> float:
return psutil.cpu_percent(interval=interval, percpu=True)
def get_cpu_usage(interval: Union[float, None]) -> dict:
return {
"count": get_cpu_count(),
"percent": get_cpu_percent(interval),
"temp": get_cpu_temp(),
}
def get_mem_usage() -> dict:
mem_usage = psutil.virtual_memory()
return {
"total": mem_usage.total,
"used": mem_usage.used,
"available": mem_usage.available,
"percent": mem_usage.percent,
}
def get_disk_usage() -> dict:
disk_usage = psutil.disk_usage("/")
return {
"total": disk_usage.total,
"used": disk_usage.used,
"available": disk_usage.free,
"percent": disk_usage.percent,
}
def get_pids() -> list[int]:
return psutil.pids()
| noahtigner/homelab | api/diagnostics/retrieval.py | retrieval.py | py | 1,365 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "typing.Union",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "psutil.cpu_count",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "typing.Union",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "psutil.cpu_percent",
"l... |
74525063866 | import argparse
from datetime import datetime
import os
import sys
import time
import random
from Classifier_3d_v1 import Classifier
import tensorflow as tf
from util import Visualizer
import numpy as np
from dataset_classifier import LungDataset
import torch
from ops import load,save,pixelwise_cross_entropy
import torchnet as tnt
from torch.utils.data import DataLoader
#restore_from='./models'
restore_from=None
models_path='./models'
logs='./logs'
luna="/home/x/dcsb/data/TianChi/"
luna_data="/home/x/data/datasets/tianchi/train/"
batch_size = 1
max_run = 1000
epoch_print = 100
iters=0
vis = Visualizer()
def main():
vis.vis.texts=''
dice_loss_meter =tnt.meter.AverageValueMeter()
image_batch=tf.placeholder(tf.float32, shape=[None, 48, 48,48, 1])
label_batch=tf.placeholder(tf.float32, shape=[None,2])
net=Classifier({'data': image_batch},batch_size=batch_size)
prob = net.layers['result']
logits=net.layers['logits']
dataset=LungDataset("/home/x/dcsb/data/TianChi",augument=True)
all_trainable =tf.trainable_variables()
restore_var = tf.global_variables()
cross_loss = tf.losses.softmax_cross_entropy(label_batch,logits)
global iters
cross_loss_sum=tf.summary.scalar("crossloss",cross_loss)
# accuracy=tf.metrics.accuracy(label_batch,prob)
optimiser = tf.train.MomentumOptimizer(0.01,0.99)
gradients = tf.gradients(cross_loss, all_trainable)
clipped_gradients, norm = tf.clip_by_global_norm(gradients,1.)
train_op = optimiser.apply_gradients(zip(clipped_gradients, all_trainable))
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
init = tf.global_variables_initializer()
sess.run(init)
all_sum=tf.summary.merge([cross_loss_sum])
summary_writer = tf.summary.FileWriter(logs,graph=tf.get_default_graph())
saver = tf.train.Saver(var_list=restore_var, max_to_keep=40)
# Load variables if the checkpoint is provided.
if restore_from is not None:
loader = tf.train.Saver(var_list=restore_var)
load(loader, sess, restore_from,"classifier_v2")
for i in range(max_run):
dice_loss_meter.reset()
start_time = time.time()
labels=np.array([1,0])
labels=labels[np.newaxis,:]
pred=np.array([1,0])
pred=pred[np.newaxis,:]
train_loader = DataLoader(dataset,batch_size = batch_size,shuffle = True,num_workers = 1,pin_memory=True,drop_last=True)
for batch_idx, (img_, label_,_) in enumerate(train_loader):
iters+=1
img=img_.numpy()
label=label_.numpy()
labels=np.concatenate([labels,label],axis=0)
img=img.transpose([0,2,3,4,1])
feed_dict={image_batch:img,label_batch:label}
_,cross_loss_,probs,summary=sess.run([train_op,cross_loss,prob,all_sum],feed_dict=feed_dict)
summary_writer.add_summary(summary, iters)
pred=np.concatenate([pred,probs],axis=0)
# print "prob+:",probs[:,0]
vis.plot('accuracy',np.mean(np.argmax(labels,axis=1)==np.argmax(pred,axis=1)))
dice_loss_meter.add(cross_loss_)
if batch_idx>10:
try:
vis.plot('cross_loss',dice_loss_meter.value()[0])
except:
pass
vis.img('input',img_[0,0,24,:,:].cpu().float())
if iters%50==0:
pred_=np.argmax(pred,axis=1)
label_=np.argmax(labels,axis=1)
acc=np.mean(label_==pred_)
cross=cross_loss.eval(feed_dict,session=sess)
print("Epoch: [%2d] [%4d] ,time: %4.4f,cross_loss:%.8f,accuracy:%.8f"% \
(i,batch_idx,time.time() - start_time,cross,acc))
if i%2==0:
save(saver,sess,models_path,iters,"classifier_v2",train_tag="nodule_predict")
main()
| jimmyyfeng/Tianchi-1 | Tianchi_tensorflow/train_classifier.py | train_classifier.py | py | 3,980 | python | en | code | 5 | github-code | 6 | [
{
"api_name": "util.Visualizer",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "torchnet.meter.AverageValueMeter",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "torchnet.meter",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "... |
44757415813 | from telegram.ext import *
from telegram import *
import openai
openai.api_key = "YOUR OPENAI API KEY" # Enter your OpenAI Secret Key.
telegram_token = "YOUR TELEGRAM BOT TOKEN" # Enter your Telegram Bot Token.
conversation=[{"role": "system", "content": "You are a helpful assistant."}] # Defined the assistant role.
def main():
app = Application.builder().token(telegram_token).build() # Created a Telegram app.
app.add_handler(CommandHandler('start', start_command)) # Added start_command function.
app.add_handler(CommandHandler('restart', restart_command)) # Added restart_command function.
app.add_handler(MessageHandler(filters.TEXT, handle_message)) # Added handle_message function.
app.add_error_handler(error) # Added error_handle function.
app.run_polling() # Started the app.
def reply(lastMessage): # ChatGPT conversation function
if(len(conversation)>=7): # The conversation has a limit. Only assistant role, last 3 messages and last 3 replies are saved. Other messages and replies are deleted.
conversation.pop(1)
conversation.append({"role": "user", "content": lastMessage}) # Added last request.
completion = openai.ChatCompletion.create( # Sent completion request and received ChatGPT message.
model="gpt-3.5-turbo", # Used "gpt-3.5-turbo" model. "gpt-4" can also be used.
messages=conversation, # Sent all conversation.
max_tokens=1000 # Defined as max 1000 tokens. Changeable value.
)
if(len(conversation)>7): # The conversation has a limit. Only assistant role, last 3 messages and last 3 replies are saved. Other messages and replies are deleted.
conversation.pop(1)
lastReply = completion.choices[0].message['content'] # Read last reply from completion.
conversation.append({"role": "assistant", "content": lastReply}) # Added last reply.
return lastReply # Returned last reply.
def replyStartRestart():
global conversation
conversation.clear()
conversation=[{"role": "system", "content": "You are a helpful assistant."}] # Defined the assistant role.
return 'Hello! How can I help you?'
async def handle_message(update: Update, context: ContextTypes.DEFAULT_TYPE):
text: str = update.message.text # Read last Telegram message from user.
await update.message.reply_text(reply(text)) # Sent ChatGPT message to Telegram user.
async def start_command(update: Update, context: ContextTypes.DEFAULT_TYPE):
await update.message.reply_text(replyStartRestart()) # Replied to Telegram user.
async def restart_command(update: Update, context: ContextTypes.DEFAULT_TYPE):
await update.message.reply_text(replyStartRestart()) # Replied to Telegram user.
async def error(update: Update, context: ContextTypes.DEFAULT_TYPE):
print(f'Error: {context.error}') # Printed error log
await update.message.reply_text('Please wait! If I don\'t respond within a few minutes, try again') # Replied to Telegram user
if __name__ == "__main__":
main() | muhammetharundemir/Telegram-ChatGPT | telegramChatGPT.py | telegramChatGPT.py | py | 3,703 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "openai.api_key",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "openai.ChatCompletion.create",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "openai.ChatCompletion",
"line_number": 21,
"usage_type": "attribute"
}
] |
24890875535 | #!/bin/env python
# -*- coding: UTF-8 -*-
import wx
import os
import sys
import shutil
import re
import math
from bqList import MyBibleList
from exhtml import exHtmlWindow
class MyApp(wx.App):
path = None
def __init__(self, *args, **kwds):
wx.App.__init__ (self, *args, **kwds)
def OnInit(self):
self.path = os.path.realpath(os.path.dirname(sys.argv[0]))
#self.path = '/home/noah/Files/Soft-Win/BibleQuote'
self.SetAppName('BQTlite')
self.SetClassName('BQT reader lite')
frame = MyFrame("BQT reader lite", (150, 72), (667, 740))
frame.Show()
self.SetTopWindow(frame)
return True
class MyFrame(wx.Frame):
path = ''
strongs = False
page = None
findPanel = None
sizer = None
bibles = None
activeModule = None
compareModule = None
buttonz = {}
searchField = None
strongsOn = False
currentBook = -1
currentChapter = -1
fullScreen = False
def buttonData(self):
return (("RST", self.OnModule, 'module', 130, 'Module', ''),
("Genesis", self.OnBook, 'book', 200, 'Book', ''),
("<", self.PrevChapter, None, 20, 'Previous chapter', ''),
("1", self.OnChapter, 'chapter', 40, 'Chapter', ''),
(">", self.NextChapter, None, 20, 'Next chapter', ''),
("#", self.ToggleStrongs, 'strongs', 30, 'Toggle Strong numbers', ''),
("H", self.OnHistory, 'history', 30, 'History', 'HistoryButton.bmp'),
("S", self.OnFind, 'find', 30, 'Search', 'SearchButton.bmp'),
("-Compare-", self.OnCompare, 'compare', 130, 'Compare translation with...', ''),
('F', self.ToggleFullScreen, None, 30, 'Toggle fullscreen', 'FullScreen.bmp'))
def createButtonBar(self, panel, yPos = 0):
xPos = 0
height = 0
for eachLabel, eachHandler, eachName, eachWidth, eachHint, eachPic in self.buttonData():
pos = (xPos, yPos)
button = self.buildOneButton(panel, eachLabel, eachHandler, pos, eachHint, eachPic, height)
if(eachName):
self.buttonz[eachName] = button
if(eachWidth):
button.SetSize((eachWidth, -1))
xPos += button.GetSize().width
if(button.GetSize().height>height):
height=button.GetSize().height
return height
def buildOneButton(self, parent, label, handler, position=(0,0), hint='', img='', height=0):
if(img and os.path.exists(self.path + '/GLYPHS/' + img)):
image1 = wx.Image(self.path + '/GLYPHS/' + img,\
wx.BITMAP_TYPE_ANY).ConvertToBitmap()
button = wx.BitmapButton(parent, id=-1, bitmap=image1,
pos=position, size = (height, height))
elif(img and os.path.exists(self.path + '/help/buttons/' + img)):
image1 = wx.Image(self.path + '/help/buttons/' + img,\
wx.BITMAP_TYPE_ANY).ConvertToBitmap()
button = wx.BitmapButton(parent, id=-1, bitmap=image1,
pos=position, size = (height, height))
else:
button = wx.Button(parent, -1, label, position)
self.Bind(wx.EVT_BUTTON, handler, button)
if(hint):
button.SetToolTip(wx.ToolTip(hint))
return button
def createTabs(self):
# create notebook
notebook = wx.Notebook( self, -1, (0,40), (500,500))
# create pages
ctrl = wx.Panel( notebook, -1 )
# add pages
notebook.AddPage( wx.TextCtrl( notebook, -1 ), "Page 1", False, -1 )
notebook.AddPage( ctrl, "Page 2 Will be Selected", True, -1 )
self.page = wx.html.HtmlWindow(ctrl, -1, (0,0), (200, 200))
return notebook
def __init__(self, title, pos, size):
self.path = os.path.realpath(os.path.dirname(sys.argv[0]))
#self.path = '/home/noah/Files/Soft-Win/BibleQuote'
self.bibles = MyBibleList()
wx.Frame.__init__(self, None, -1, title, pos, size)
#self.createMenuBar()
self.panel = wx.Panel(self, -1)
self.panel.SetBackgroundColour("Yellow")
height = self.createButtonBar(self.panel)
#notebook = self.createTabs()
self.page = exHtmlWindow(self, -1, (0,0), (100,100))
self.page.SetLinkClicked(self.OnLinkClicked)
self.findPanel = wx.Panel(self, -1)
self.searchField = wx.TextCtrl(self.findPanel, -1, '', (0,0))
self.findButton = wx.Button(self.findPanel, -1, 'Find', (100,0))
self.Bind(wx.EVT_BUTTON, self.OnSearchStart, self.findButton)
self.findPanel.Hide()
self.CreateStatusBar()
self.SetStatusText("Ready")
self.strongs = False
self.bibles.loadList(self.path)
self.__do_layout()
if(len(self.bibles.history)>0):
history0 = self.bibles.history[0]
self.bibleGo(history0['command'][3:])
else:
self.OnModule(None)
self.Bind (wx.EVT_CLOSE, self.OnClose)
favicon = wx.Icon(self.path + '/favicon.ico', wx.BITMAP_TYPE_ICO, 16, 16)
self.SetIcon(favicon)
import gobject
gobject.threads_init()
import pygtk
pygtk.require('2.0')
import gtk, gtk.gdk
self.taskBarIcon = favicon
def __do_layout(self):
self.sizer = wx.FlexGridSizer(3, 1, 0, 0)
self.sizer.Add(self.panel, 1, flag = wx.EXPAND)
self.sizer.Add(self.findPanel, 2, flag = wx.EXPAND)
self.sizer.Add(self.page, 3, flag = wx.EXPAND)
self.sizer.AddGrowableRow(2)
self.sizer.AddGrowableCol(0)
self.SetSizer(self.sizer)
searchSizer = wx.FlexGridSizer(1, 2, 0, 0)
searchSizer.Add(self.searchField, 1, flag = wx.EXPAND)
searchSizer.Add(self.findButton, 2, flag = wx.EXPAND)
searchSizer.AddGrowableCol(0)
self.findPanel.SetSizer(searchSizer)
self.Layout()
def arrangeControls(self):
if(self.page.getMode()=='search'):
self.findPanel.Show()
else:
self.findPanel.Hide()
if(not self.activeModule or not self.activeModule.Bible \
or not self.activeModule.StrongNumbers):
self.strongsOn = False
if(self.strongsOn):
self.buttonz['strongs'].SetForegroundColour('Green')
else:
self.buttonz['strongs'].SetForegroundColour('Black')
if(self.activeModule):
self.buttonz['module'].SetLabel(self.activeModule.BibleShortName)
self.buttonz['book'].SetLabel(self.activeModule.FullName[self.currentBook])
self.buttonz['chapter'].SetLabel(str(self.currentChapter))
statusText = self.activeModule.BibleName
else:
self.buttonz['module'].SetLabel('')
self.buttonz['book'].SetLabel('')
self.buttonz['chapter'].SetLabel('')
statusText = 'Select a module'
if(self.compareModule):
statusText = statusText + ' | ' + self.compareModule.BibleName
self.buttonz['compare'].SetLabel(self.compareModule.BibleShortName)
else:
self.buttonz['compare'].SetLabel('-Compare-')
statusText = statusText + ' | ' + 'Mode: ' +self.page.getMode()
if(self.page.ctrlDown):
statusText = statusText + ' [Ctrl]'
self.SetStatusText(statusText)
self.Layout()
def OnCopy(self, event):
self.page.OnCopy(event)
event.Skip()
def OnOptions(self, event): pass
def OnQuit(self, event):
self.Close()
def OnClose(self, event):
try:
self.bibles.saveHistory()
except:
pass
self.Destroy()
def OnAbout(self, event):
wx.MessageBox("BQT reader light (very light)\nWritten by Noah for the sake of learning Python.",
"BQT reader light", wx.OK | wx.ICON_INFORMATION, self)
def OnLinkClicked(self, link):
tmpRe = re.search('^([^:]+):(.*)$', link.GetHref())
if(tmpRe):
if(tmpRe.groups()[0]=='module'):
path = tmpRe.groups()[1]
if(self.activeModule and self.activeModule.path == path):
self.ShowChapter(self.currentChapter)
else:
oldModule = self.activeModule
self.activeModule = self.bibles.getModule(path)
self.activeModule.loadModule()
if(oldModule and oldModule.Bible and self.activeModule.Bible):
#wx.MessageBox('[0]', "Module", wx.ICON_ERROR | wx.OK)
newBookInd = self.activeModule.getOrderNumber(oldModule.getAbsoluteIndex(self.currentBook))
#wx.MessageBox('[1]', "Module", wx.ICON_ERROR | wx.OK)
if(newBookInd>=0):
#wx.MessageBox('[2] book:'+str(newBookInd), "Module", wx.ICON_ERROR | wx.OK)
if(self.activeModule.loadBook(newBookInd)):
self.currentBook = newBookInd
self.ShowChapter(self.currentChapter)
#wx.MessageBox('[3]', "Module", wx.ICON_ERROR | wx.OK)
else:
pass
#wx.MessageBox('Could not find the book', "Module", wx.ICON_ERROR | wx.OK)
else:
self.ChooseBook(path)
elif(tmpRe.groups()[0]=='book'):
book = int(tmpRe.groups()[1])
self.activeModule.loadBook(book)
self.buttonz['book'].SetLabel(self.activeModule.FullName[book])
self.ChooseChapter(book)
elif(tmpRe.groups()[0]=='chapter'):
chapter = int(tmpRe.groups()[1])
self.ShowChapter(chapter)
self.buttonz['chapter'].SetLabel(str(chapter))
elif(tmpRe.groups()[0]=='strong'):
number = tmpRe.groups()[1]
self.ShowStrong(number)
elif(tmpRe.groups()[0]=='go'):
self.bibleGo(tmpRe.groups()[1])
elif(tmpRe.groups()[0]=='searchpage'):
page = int(tmpRe.groups()[1])
self.ShowSearchPage(page)
elif(tmpRe.groups()[0]=='compare'):
path = tmpRe.groups()[1]
if(self.activeModule.path == path):
path = ''
self.OnCompareChoise(path)
else:
self.page.OutputHTML('Unknown command:', link.GetHref(), 'error')
self.arrangeControls()
def OnModule(self, event):
title = 'Choose a module:'
return self.ShowModuleList(title, 'module', True, False)
def ShowModuleList(self, title, mode, showOthers, showNothing):
if(self.page.getMode()==mode):
self.ShowChapter(self.currentChapter)
return
self.page.saveScrollPos()
modList = self.bibles.getBibleList()
content = ''
if(showNothing):
content = content + '<a href="' + mode + ':">Unselect</a>'
if(len(modList)):
content = content + '<h2>Bibles:</h2>' + self.ProcessList(modList, mode)
modList = self.bibles.getCommentaryList()
if(len(modList)):
content = content + '<h2>Commentaries:</h2>' + self.ProcessList(modList, mode)
if(showOthers):
modList = self.bibles.getOtherList()
if(len(modList)):
content = content + '<h2>Other books:</h2>' + self.ProcessList(modList, mode)
if(content == ''):
title = 'Could not find modules'
self.page.OutputHTML(title, content, mode)
self.arrangeControls()
def ProcessList(self, modList, mode):
content = '<ul>'
for mod in modList:
label = mod.BibleName
link = mode+ ':' + mod.path
content = content + '<li> <a href="' + link + '">' + label + '</a>'
content = content + '</ul>'
return content
def ChooseBook(self, path):
if(not self.activeModule): return
self.page.setPath(path)
if(self.activeModule.BookQty>1):
content = '<table><tr><td valign=top><ul>'
cnt = int((len(self.activeModule.FullName)-1)/3)+1
for i in range(len(self.activeModule.FullName)):
content = content + '<li><a href="book:' + str(i) + '">' + \
self.activeModule.FullName[i] + '</a>'
if(i+1==cnt or i+1==cnt+cnt):
content = content + '</ul></td><td valign=top><ul>'
content = content + '</ul></td></tr></table>'
self.page.OutputHTML('', content, 'book')
self.arrangeControls()
else:
self.activeModule.loadBook(0)
self.currentBook = 0
self.ChooseChapter(0)
return
def ChooseChapter(self, book):
if(not self.activeModule): return
self.currentBook = int(book)
content = ''
chRange = self.activeModule.getChapterRange(book)
if(len(chRange)>1):
for i in chRange:
content = content + '<a href="chapter:' + str(i) + \
'"><font size="7"> ' + str(i) + \
' </font></a> '
self.page.OutputHTML('', content, 'chapter')
self.arrangeControls()
else:
self.ShowChapter(chRange[0])
return
def transformContent(self, text, strongPrfx, module):
if(module.StrongNumbers):
if(self.strongsOn):
text = re.sub(' ([0-9]{1,5})', ' <a href="strong:' + strongPrfx +\
'\\1"><small>\\1</small></a>', text)
else:
text = re.sub(' [0-9]{1,5}', '', text)
#text = text.replace('<','<br>[[').replace('>',']]<br>')
text = re.sub('<p( [^>]*)?>', '', text)
text = text.replace('</p>','<br>')
return text
def ShowChapter(self, chapter):
if(not self.activeModule): return
self.currentChapter = int(chapter)
content = self.activeModule.getChapter(chapter)
prfx = ''
if(self.activeModule.isOT(self.currentBook)): prfx = '0'
content = self.transformContent(content, prfx, self.activeModule)
absInd = self.activeModule.getAbsoluteIndex(self.currentBook)
newBookInd = self.activeModule.getOrderNumber(absInd)
title = ''
if(self.compareModule):
self.compareModule.loadModule()
newBookInd = self.compareModule.getOrderNumber(absInd)
if(newBookInd>=0 and self.compareModule.loadBook(newBookInd)):
content2 = self.compareModule.getChapter(chapter)
if(content2):
content2 = self.transformContent(content2, prfx, self.compareModule)
prc = int(len(content)*100./(len(content)+len(content2)))
content = '<table><tr><td width='+str(prc)+'% valign=top>' + content + '</td>' +\
'<td width='+str(100-prc)+'% valign=top>' + content2 + '</td></tr></table>'
self.page.OutputHTML('', content, 'text')
self.page.restoreScrollPos()
chzero = 0
if(self.activeModule.ChapterZero): chzero = 1
command = os.path.basename(self.activeModule.path).lower()\
+ ' ' + str(self.currentBook + 1)\
+ ' ' + str(self.currentChapter + chzero)
title = self.activeModule.BibleShortName\
+ ' ' + self.activeModule.ShortName[self.currentBook][0]\
+ ' ' + str(self.currentChapter)
self.bibles.pushHistory(command, title)
self.arrangeControls()
self.page.SetFocus()
def OnBook(self, event):
if(not self.activeModule): return
if(self.page.getMode()=='book'):
self.ShowChapter(self.currentChapter)
return
self.ChooseBook(self.activeModule.path)
def OnChapter(self, event):
if(not self.activeModule): return
if(self.page.getMode()=='chapter'):
self.ShowChapter(self.currentChapter)
return
self.page.setMode('chapter')
self.ChooseChapter(self.currentBook)
def PrevChapter(self, event):
if(not self.activeModule): return
if(self.page.getMode()!='text'): return
ch = self.activeModule.getPrevChapter(self.currentBook, self.currentChapter)
if(ch):
self.activeModule.loadBook(ch[0])
self.currentBook = ch[0]
self.ShowChapter(ch[1])
self.page.clearScrollPos()
self.arrangeControls()
def NextChapter(self, event):
if(not self.activeModule): return
if(self.page.getMode()!='text'): return
ch = self.activeModule.getNextChapter(self.currentBook, self.currentChapter)
if(ch):
self.activeModule.loadBook(ch[0])
self.currentBook = ch[0]
self.ShowChapter(ch[1])
self.page.clearScrollPos()
self.arrangeControls()
def ToggleStrongs(self, event):
if(not self.activeModule): return
if(1 or self.page.getMode()!='strong'):
#if(not self.page.getMode() in ('text','strong')): return
if(self.strongsOn):
self.strongsOn = False
else:
self.strongsOn = True
self.ShowChapter(self.currentChapter)
self.arrangeControls()
def ShowStrong(self, number):
isHeb = False
if(number[0]=='0'):
isHeb = True
number = number[1:]
if(number[0]=='0'):
number = number[1:]
number = int(number)
res = self.bibles.getStrongText(number, isHeb)
title = res[0]
content = res[1]
self.page.OutputHTML(title, content, 'strong')
self.arrangeControls()
def OnHistory(self, event):
if(self.page.getMode()=='history'):
self.ShowChapter(self.currentChapter)
return
content = ''
for item in self.bibles.history:
content = content + '<a href="go:' + item['command'][3:] +'">' + item['title'] + '</a><br>'
title = 'History:'
self.page.OutputHTML(title, content, 'history')
self.arrangeControls()
def OnFind(self, event):
if(not self.activeModule): return
self.page.setMode('search')
if(self.findPanel.IsShown()):
self.ShowChapter(self.currentChapter)
else:
self.ShowSearchPage(1)
self.arrangeControls()
def OnSearchStart(self, event):
if(not self.activeModule or self.searchField.GetValue()==''): return
self.activeModule.search(self.searchField.GetValue(), [])
self.ShowSearchPage(1)
def ShowSearchPage(self, page):
pageSize = 20
searchCount = self.activeModule.searchCount()
found = self.activeModule.getSearchPage(page, pageSize)
title = str(searchCount) + ' results'
content = ''
for i in range(len(found)):
content = content + '<hr><a href="go:- ' + \
str(found[i][0]) + ' ' + \
str(found[i][1]) + ' ' + \
str(found[i][2]) + '">' + \
found[i][3] + '</a> ' + \
self.transformContent(found[i][4], '', self.activeModule)
pageCount = int(searchCount / pageSize) + 1
content = content + '<hr>'
for i in range(1, pageCount+1):
if(i == page):
content = content + ' <FONT size="+2">' + str(i) + '</FONT> '
else:
content = content + ' <a href="searchpage:' + str(i) + '"><FONT size="+2">' + str(i) + '</FONT></a> '
self.page.OutputHTML(title, content, 'search')
def bibleGo(self, command):
where = command.split(' ')
if(not self.activeModule or where[0]!='-'):
newModule = self.bibles.getModuleByShortPath(where[0])
if(not newModule):
wx.MessageBox("Could not open the module: \n" + command, "Error", wx.ICON_ERROR | wx.OK)
return
self.activeModule = newModule
self.activeModule.loadModule()
self.buttonz['module'].SetLabel(self.activeModule.BibleShortName)
currentBook = int(where[1])-1
self.activeModule.loadBook(currentBook)
self.currentBook = currentBook
self.buttonz['book'].SetLabel(self.activeModule.FullName[currentBook])
#wx.MessageBox(where[2], "Chapter", wx.ICON_ERROR | wx.OK)
currentChapter = int(where[2])
if(self.activeModule.ChapterZero):
currentChapter = currentChapter - 1
self.currentChapter = currentChapter
self.buttonz['chapter'].SetLabel(str(currentChapter))
self.ShowChapter(currentChapter)
self.arrangeControls()
def OnCompare(self, event):
title = 'Choose a module to compare:'
return self.ShowModuleList(title, 'compare', False, True)
def OnCompareChoise(self, path):
if(path):
self.compareModule = self.bibles.getModule(path)
else:
self.compareModule = None
self.ShowChapter(self.currentChapter)
def ToggleFullScreen(self, event):
if self.fullScreen:
self.fullScreen = False
else:
self.fullScreen = True
self.ShowFullScreen(self.fullScreen, style=wx.FULLSCREEN_ALL)
self.page.SetFocus()
if __name__ == '__main__':
app = MyApp(False)
app.MainLoop()
| noah-ubf/BQTLite | pybq.py | pybq.py | py | 19,493 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "wx.App",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "wx.App.__init__",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "wx.App",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "os.path.realpath",
"line_... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.