id stringlengths 1 7 | text stringlengths 6 1.03M | dataset_id stringclasses 1
value |
|---|---|---|
1677647 | errorlog = '-'
accesslog = '-'
access_log_format = '{"remote_ip": "%(h)s", "uri": "%(r)s", ' \
'"response_code": %(s)s, "request_time": %(L)s}'
| StarcoderdataPython |
1666760 | <filename>cache-preload.py
#!/usr/bin/env python3
import asyncio
import multiprocessing
import time
import xml.etree.ElementTree as ET
from functools import partial
from pathlib import Path
from random import randint
import aiohttp
import async_timeout
import click
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
from slugify import slugify
mobile_useragent = 'Mozilla/5.0 (Linux; Android 6.0.1; Nexus 5X Build/MMB29P) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.96 Mobile Safari/537.36 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)'
mobile_window_size = [411, 731]
desktop_useragent = 'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)'
desktop_window_size = [1920, 1080]
async def load_sitemaps(url):
async with aiohttp.ClientSession() as client:
return await get_urls(client, url)
async def get_urls(client, url, max_depth=32, current_depth=1):
urls = []
try:
async with async_timeout.timeout(10):
async with client.get(url) as r:
# only use status 200
if r.status == 200:
root = ET.fromstring(await r.text())
for item in root:
if item.tag.endswith('sitemap'):
# sitemap
if current_depth > max_depth:
print('==> warning: maximum depth of {:d} reached - will not continue to search for site maps'.format(current_depth))
break
for prop in item:
if prop.tag.endswith('loc'):
urls += await get_urls(client, prop.text, max_depth, current_depth + 1)
elif item.tag.endswith('url'):
# url list
for prop in item:
if prop.tag.endswith('loc'):
urls.append(prop.text)
else:
print('==> "{:s}" returned status {:d}, skipping'.format(url, r.status))
return sorted(set(urls))
except BaseException as ex:
print(ex)
print('==> "{:s}" failed with error, skipping'.format(url))
def do_test(browser, browser_meta, name, geckodriver_path, screenshot_dir, log_path, url):
print('=> visiting "{:s}" with browser "{:s}" ...'.format(url, browser_meta['name']))
screenshot_paths = None
if screenshot_dir:
slug = slugify(url)
screenshot_paths = [
str(Path(screenshot_dir, '{:s}-{:s}-top.png'.format(slug, name))),
str(Path(screenshot_dir, '{:s}-{:s}-bottom.png'.format(slug, name)))
]
browser.get(url)
time.sleep(randint(500, 1000) / 1000)
if screenshot_paths:
browser.execute_script('window.scrollTo(0, 0)')
time.sleep(randint(50, 100) / 1000)
browser.get_screenshot_as_file(screenshot_paths[0])
browser.execute_script('window.scrollTo(0, document.body.scrollHeight)')
time.sleep(randint(50, 100) / 1000)
browser.get_screenshot_as_file(screenshot_paths[1])
print('==> "{:s}" with browser "{:s}" done'.format(url, browser_meta['name']))
def get_browser(user_agent, window_size, geckodriver_path, log_path):
browser_options = Options()
browser_options.add_argument('--headless')
if user_agent:
browser_options.set_preference('general.useragent.override', user_agent)
browser = webdriver.Firefox(log_path=str(log_path) if log_path else None, firefox_options=browser_options, executable_path=str(geckodriver_path))
browser.set_window_size(window_size[0], window_size[1])
return browser
@click.command(context_settings={'help_option_names': ['--help', '-h'], 'max_content_width': 256})
@click.argument('url', type=click.STRING)
@click.option('--desktop/--no-desktop', '-d/-nd', is_flag=True, default=True, show_default=True, help='enable desktop browser')
@click.option('--mobile/--no-mobile', '-m/-nm', is_flag=True, default=False, show_default=True, help='enable mobile browser')
@click.option(
'--geckodriver-path',
'-gp',
type=click.Path(file_okay=True, dir_okay=False),
default='/usr/bin/geckodriver',
show_default=True,
help='path to geckodriver binary'
)
@click.option('--screenshot-dir', '-sd', type=click.Path(exists=True, file_okay=False, dir_okay=True), help='save screenshots to directory')
@click.option('--log-dir', '-ld', type=click.Path(exists=True, file_okay=False, dir_okay=True), help='save logs to directory')
def main(mobile, desktop, url, geckodriver_path, screenshot_dir, log_dir):
browsers = []
if desktop:
browsers.append({'name': 'desktop', 'window_size': desktop_window_size, 'user_agent': desktop_useragent})
if mobile:
browsers.append({'name': 'mobile', 'window_size': mobile_window_size, 'user_agent': mobile_useragent})
if len(browsers) <= 0:
print('=> error: no browsers enabled')
exit(1)
print('=> active browsers: "{:s}"'.format(', '.join(list(map(lambda x: x['name'], browsers)))))
geckodriver_path = Path(geckodriver_path).resolve()
if log_dir:
log_dir = Path(log_dir).resolve()
log_dir.mkdir(parents=True, exist_ok=True)
print('=> will save logs to "{}"'.format(str(log_dir)))
else:
log_dir = None
if screenshot_dir:
screenshot_dir = Path(screenshot_dir).resolve().joinpath(str(int(time.time())))
screenshot_dir.mkdir(parents=True, exist_ok=True)
print('=> will save screenshots to "{}"'.format(str(screenshot_dir)))
else:
screenshot_dir = None
print('=> fetching all urls from sitemap on "{:s}" and its children...'.format(url))
loop = asyncio.get_event_loop()
urls = loop.run_until_complete(asyncio.gather(asyncio.ensure_future(load_sitemaps(url))))
#urls = [['http://orf.at', 'https://duernberg.at', 'https://felixklein.net']]
loop.close()
if not urls or not urls[0]:
print('=> error: No urls found, exiting')
exit(1)
if len(urls) <= 0 or len(urls[0]) <= 0:
print('=> error: No urls found, exiting')
exit(1)
urls = urls[0]
print('==> found {:d} urls'.format(len(urls)))
pool = multiprocessing.Pool(processes=1)
print('=> initializing firefox...')
pool.map(partial(browser_run, urls, geckodriver_path, screenshot_dir, log_dir), browsers, 1)
pool.close()
pool.join()
def browser_run(urls, geckodriver_path, screenshot_dir, log_dir, browser):
log_path = None
if log_dir:
log_path = log_dir.joinpath('{:s}.log'.format(browser['name']))
b = get_browser(browser['user_agent'], browser['window_size'], geckodriver_path, log_path)
for url in urls:
do_test(b, browser, browser['name'], geckodriver_path, screenshot_dir, log_path, url)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt as e:
exit(1)
| StarcoderdataPython |
3268112 | <gh_stars>0
from .base import *
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '<KEY>'
# SECURITY WARNING: define the correct hosts in production!
ALLOWED_HOSTS = ['*']
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
try:
from .local import *
except ImportError:
pass
| StarcoderdataPython |
3347008 | <filename>l10n_br_eletronic_document/models/account_move.py
import re
from datetime import datetime
from random import SystemRandom
from odoo import api, fields, models
from odoo.exceptions import UserError
TYPE2EDOC = {
'out_invoice': 'saida', # Customer Invoice
'in_invoice': 'entrada', # Vendor Bill
'out_refund': 'entrada', # Customer Refund
'in_refund': 'saida', # Vendor Refund
}
class AccountMove(models.Model):
_inherit = 'account.move'
def _compute_total_edocs(self):
for item in self:
item.total_edocs = self.env['eletronic.document'].search_count(
[('move_id', '=', item.id)])
def _get_default_policy(self):
if self.env.context.get('default_type', '') == 'out_invoice':
return 'directly'
if self.env.context.get('default_type', '') == 'in_invoice':
return 'manually'
total_edocs = fields.Integer(string="Total NFe", compute=_compute_total_edocs)
l10n_br_edoc_policy = fields.Selection(
[('directly', 'Emitir agora'),
('after_payment', 'Emitir após pagamento'),
('manually', 'Manualmente')], string="Nota Eletrônica", default=_get_default_policy)
@api.model
def _autopost_draft_entries(self):
records = self.search([
('state', '=', 'draft'),
('date', '<=', fields.Date.today()),
('auto_post', '=', True),
])
for item in records:
item.action_post()
self.env.cr.commit()
def _validate_for_eletronic_document(self):
errors = []
for move in self:
if not move.company_id.l10n_br_certificate:
errors.append('Cadastro da Empresa - Certificado Digital')
if not move.company_id.l10n_br_cert_password:
errors.append('Cadastro da Empresa - Senha do Certificado Digital')
if not move.company_id.partner_id.l10n_br_legal_name:
errors.append('Cadastro da Empresa - Razão Social')
if not move.company_id.partner_id.l10n_br_cnpj_cpf:
errors.append('Cadastro da Empresa - CNPJ/CPF')
if not move.company_id.partner_id.street:
errors.append('Cadastro da Empresa / Endereço - Logradouro')
if not move.company_id.partner_id.l10n_br_number:
errors.append('Cadastro da Empresa / Endereço - Número')
if not move.company_id.partner_id.zip or len(
re.sub(r"\D", "", self.company_id.partner_id.zip)) != 8:
errors.append('Cadastro da Empresa / Endereço - CEP')
if not move.company_id.partner_id.state_id:
errors.append('Cadastro da Empresa / Endereço - Estado')
else:
if not move.company_id.partner_id.state_id.l10n_br_ibge_code:
errors.append('Cadastro da Empresa / Endereço - Cód. do IBGE do estado')
if not move.company_id.partner_id.state_id.name:
errors.append('Cadastro da Empresa / Endereço - Nome do estado')
if not move.company_id.partner_id.city_id:
errors.append('Cadastro da Empresa / Endereço - município')
else:
if not move.company_id.partner_id.city_id.name:
errors.append('Cadastro da Empresa / Endereço - Nome do município')
if not move.company_id.partner_id.city_id.l10n_br_ibge_code:
errors.append('Cadastro da Empresa/Endereço - Cód. do IBGE do município')
if not move.company_id.partner_id.country_id:
errors.append('Cadastro da Empresa / Endereço - país')
else:
if not move.company_id.partner_id.country_id.name:
errors.append('Cadastro da Empresa / Endereço - Nome do país')
if not move.company_id.partner_id.country_id.l10n_br_ibge_code:
errors.append('Cadastro da Empresa / Endereço - Código do BC do país')
responsavel_tecnico = move.company_id.l10n_br_responsavel_tecnico_id
if responsavel_tecnico:
if not responsavel_tecnico.l10n_br_cnpj_cpf:
errors.append("Configure o CNPJ do responsável técnico")
if not responsavel_tecnico.email:
errors.append("Configure o Email do responsável técnico")
if not responsavel_tecnico.phone:
errors.append("Configure o Telefone do responsável técnico")
if len(responsavel_tecnico.child_ids) == 0:
errors.append("Adicione um contato para o responsável técnico!")
has_products = has_services = False
# produtos
for eletr in move.invoice_line_ids:
if eletr.product_id.type == 'service':
has_services = True
if eletr.product_id.type in ('consu', 'product'):
has_products = True
prod = "Produto: %s - %s" % (eletr.product_id.default_code,
eletr.product_id.name)
if not eletr.product_id.default_code:
errors.append(
'Prod: %s - Código do produto' % (
eletr.product_id.name))
if has_products and not eletr.product_id.l10n_br_ncm_id:
errors.append('%s - NCM do produto' % prod)
if not move.fiscal_position_id:
errors.append('Configure a posição fiscal')
if move.company_id.l10n_br_accountant_id and not \
move.company_id.l10n_br_accountant_id.l10n_br_cnpj_cpf:
errors.append('Cadastro da Empresa / CNPJ do escritório contabilidade')
if has_products and not move.company_id.l10n_br_nfe_sequence:
errors.append('Configure a sequência para numeração de NFe')
if has_services and not move.company_id.l10n_br_nfe_service_sequence:
errors.append('Configure a sequência para numeração de NFe de serviço')
# Verificar os campos necessários para envio de nfse (serviço)
if has_services:
cod_municipio = '%s%s' % (
move.company_id.state_id.l10n_br_ibge_code,
move.company_id.city_id.l10n_br_ibge_code,
)
if cod_municipio == '4205407':
if not all([
move.company_id.l10n_br_aedf,
move.company_id.l10n_br_client_id,
move.company_id.l10n_br_client_secret,
move.company_id.l10n_br_user_password
]):
errors.append('Campos de validação para a API de Florianópolis não estão preenchidos')
elif cod_municipio in ['3550308', '3106200']:
for line in move.invoice_line_ids:
if line.product_id.type == 'service':
if not line.product_id.service_type_id:
errors.append('Produto %s não possui Tipo de Serviço.' % line.product_id.name)
if not line.product_id.service_code:
errors.append('Produto %s não possui Código do Município.' % line.product_id.name)
else:
if not move.company_id.l10n_br_nfse_token_acess:
errors.append('Token da Focus não está preenchida!\nPor favor, preencha-a no cadastro da empresa.')
partner = move.partner_id.commercial_partner_id
company = move.company_id
# Destinatário
if partner.is_company and not partner.l10n_br_legal_name:
errors.append('Cliente - Razão Social')
if partner.country_id.id == company.partner_id.country_id.id:
if not partner.l10n_br_cnpj_cpf:
errors.append('Cliente - CNPJ/CPF')
if not partner.street:
errors.append('Cliente / Endereço - Logradouro')
if not partner.l10n_br_number:
errors.append('Cliente / Endereço - Número')
if partner.country_id.id == company.partner_id.country_id.id:
if not partner.zip or len(
re.sub(r"\D", "", partner.zip)) != 8:
errors.append('Cliente / Endereço - CEP')
if partner.country_id.id == company.partner_id.country_id.id:
if not partner.state_id:
errors.append('Cliente / Endereço - Estado')
else:
if not partner.state_id.l10n_br_ibge_code:
errors.append('Cliente / Endereço - Código do IBGE \
do estado')
if not partner.state_id.name:
errors.append('Cliente / Endereço - Nome do estado')
if partner.country_id.id == company.partner_id.country_id.id:
if not partner.city_id:
errors.append('Cliente / Endereço - Município')
else:
if not partner.city_id.name:
errors.append('Cliente / Endereço - Nome do \
município')
if not partner.city_id.l10n_br_ibge_code:
errors.append('Cliente / Endereço - Código do IBGE \
do município')
if not partner.country_id:
errors.append('Cliente / Endereço - País')
else:
if not partner.country_id.name:
errors.append('Cliente / Endereço - Nome do país')
if not partner.country_id.l10n_br_ibge_code:
errors.append('Cliente / Endereço - Cód. do BC do país')
if len(errors) > 0:
msg = "\n".join(
["Por favor corrija os erros antes de prosseguir"] + errors)
raise UserError(msg)
def _prepare_eletronic_line_vals(self, invoice_lines):
lines = []
for line in invoice_lines:
vals = line.get_eletronic_line_vals()
lines.append((0, 0, vals))
return lines
def _prepare_eletronic_doc_vals(self, invoice_lines):
invoice = self
num_controle = int(''.join([str(SystemRandom().randrange(9))
for i in range(8)]))
numero_nfe = numero_rps = 0
if self.company_id.l10n_br_nfe_sequence:
numero_nfe = self.company_id.l10n_br_nfe_sequence.next_by_id()
if self.company_id.l10n_br_nfe_service_sequence:
numero_rps = self.company_id.l10n_br_nfe_service_sequence.next_by_id()
vals = {
'name': invoice.name,
'move_id': invoice.id,
'company_id': invoice.company_id.id,
'schedule_user_id': self.env.user.id,
'state': 'draft',
'tipo_operacao': TYPE2EDOC[invoice.type],
'numero_controle': num_controle,
'data_emissao': datetime.now(),
'data_agendada': invoice.invoice_date,
'finalidade_emissao': '1',
'ambiente': invoice.company_id.l10n_br_tipo_ambiente,
'partner_id': invoice.partner_id.id,
'payment_term_id': invoice.invoice_payment_term_id.id,
'fiscal_position_id': invoice.fiscal_position_id.id,
'natureza_operacao': invoice.fiscal_position_id.name,
'ind_pres': invoice.fiscal_position_id.ind_pres,
'informacoes_complementares': invoice.narration,
'numero_fatura': invoice.name,
'fatura_bruto': invoice.amount_total,
'fatura_desconto': 0.0,
'fatura_liquido': invoice.amount_total,
'pedido_compra': invoice.ref,
'serie_documento': invoice.fiscal_position_id.serie_nota_fiscal,
'numero': numero_nfe,
'numero_rps': numero_rps,
'valor_frete': invoice.l10n_br_delivery_amount,
'valor_seguro': invoice.l10n_br_insurance_amount,
'valor_despesas': invoice.l10n_br_expense_amount,
}
vals['cod_regime_tributario'] = '1' if invoice.company_id.l10n_br_tax_regime == 'simples' else '3'
# Indicador de destino
vals['ind_dest'] = '1'
if invoice.company_id.state_id != invoice.commercial_partner_id.state_id:
vals['ind_dest'] = '2'
if invoice.company_id.country_id != invoice.commercial_partner_id.country_id:
vals['ind_dest'] = '3'
# Indicador IE Destinatário
ind_ie_dest = False
if invoice.commercial_partner_id.is_company:
if invoice.commercial_partner_id.l10n_br_inscr_est:
ind_ie_dest = '1'
elif invoice.commercial_partner_id.state_id.code in ('AM', 'BA', 'CE',
'GO', 'MG', 'MS',
'MT', 'PE', 'RN',
'SP'):
ind_ie_dest = '9'
elif invoice.commercial_partner_id.country_id.code != 'BR':
ind_ie_dest = '9'
else:
ind_ie_dest = '2'
else:
ind_ie_dest = '9'
if invoice.commercial_partner_id.l10n_br_indicador_ie_dest:
ind_ie_dest = invoice.commercial_partner_id.l10n_br_indicador_ie_dest
vals['ind_ie_dest'] = ind_ie_dest
# Indicador Consumidor Final
if invoice.commercial_partner_id.is_company:
if vals['ind_ie_dest'] == '9':
vals['ind_final'] = '1'
else:
vals['ind_final'] = '0'
else:
vals['ind_final'] = '1'
if invoice.fiscal_position_id.ind_final:
vals['ind_final'] = invoice.fiscal_position_id.ind_final
iest_id = invoice.company_id.l10n_br_iest_ids.filtered(
lambda x: x.state_id == invoice.commercial_partner_id.state_id)
if iest_id:
vals['iest'] = iest_id.name
# Duplicatas
duplicatas = []
count = 1
for parcela in invoice.receivable_move_line_ids.sorted(lambda x: x.date_maturity):
duplicatas.append((0, None, {
'numero_duplicata': "%03d" % count,
'data_vencimento': parcela.date_maturity,
'valor': parcela.credit or parcela.debit,
}))
count += 1
vals['duplicata_ids'] = duplicatas
total_produtos = total_servicos = 0.0
bruto_produtos = bruto_servicos = 0.0
total_desconto = 0
for inv_line in invoice_lines:
total_desconto += round(inv_line.price_unit * inv_line.quantity * inv_line.discount / 100, 2)
if inv_line.product_id.type == 'service':
total_servicos += inv_line.price_total
bruto_servicos += round(inv_line.quantity * inv_line.price_unit, 2)
else:
total_produtos += inv_line.price_total
bruto_produtos += round(inv_line.quantity * inv_line.price_unit, 2)
vals.update({
'valor_bruto': bruto_produtos + bruto_servicos,
'valor_servicos': total_servicos,
'valor_produtos': total_produtos,
'valor_desconto': total_desconto,
'valor_final': total_produtos + total_servicos,
})
return vals
def sum_line_taxes(self, vals):
lines = vals.get("document_line_ids")
return {
'valor_icms': sum(line[2].get("icms_valor", 0) for line in lines),
'valor_icmsst': sum(line[2].get("icms_st_valor", 0) for line in lines),
'valor_icms_uf_dest': sum(line[2].get("icms_uf_dest", 0) for line in lines),
'valor_icms_uf_remet': sum(line[2].get("icms_uf_remet", 0) for line in lines),
'valor_icms_fcp_uf_dest': sum(line[2].get("icms_fcp_uf_dest", 0) for line in lines),
'valor_ipi': sum(line[2].get("ipi_valor", 0) for line in lines),
'pis_valor': sum(line[2].get("pis_valor", 0) for line in lines),
'cofins_valor': sum(line[2].get("cofins_valor", 0) for line in lines),
'valor_ii': sum(line[2].get("ii_valor", 0) for line in lines),
'valor_bc_icms': sum(line[2].get("icms_base_calculo", 0) for line in lines),
'valor_bc_icmsst': sum(line[2].get("icms_st_base_calculo", 0) for line in lines),
'pis_valor_retencao': sum(line[2].get("pis_valor_retencao", 0) for line in lines),
'cofins_valor_retencao': sum(line[2].get("cofins_valor_retencao", 0) for line in lines),
'irrf_base_calculo': sum(line[2].get("irrf_base_calculo", 0) for line in lines),
'irrf_valor_retencao': sum(line[2].get("irrf_valor_retencao", 0) for line in lines),
'csll_base_calculo': sum(line[2].get("csll_base_calculo", 0) for line in lines),
'csll_valor_retencao': sum(line[2].get("csll_valor_retencao", 0) for line in lines),
'inss_base_calculo': sum(line[2].get("inss_base_calculo", 0) for line in lines),
'inss_valor_retencao': sum(line[2].get("inss_valor_retencao", 0) for line in lines),
}
def action_create_eletronic_document(self):
for move in self:
invoice_lines = move.invoice_line_ids.filtered(
lambda x: not x.is_delivery_expense_or_insurance()
)
services = invoice_lines.filtered(lambda x: x.product_id.type == 'service')
if services:
self._create_service_eletronic_document(move, services)
products = invoice_lines.filtered(lambda x: x.product_id.type != 'service')
if products:
self._create_product_eletronic_document(move, products)
def _create_service_eletronic_document(self, move, services):
vals = move._prepare_eletronic_doc_vals(services)
vals['model'] = 'nfse'
vals['document_line_ids'] = move._prepare_eletronic_line_vals(services)
vals.update(self.sum_line_taxes(vals))
self.env['eletronic.document'].create(vals)
def _create_product_eletronic_document(self, move, products):
vals = move._prepare_eletronic_doc_vals(products)
vals['model'] = 'nfe'
if self.type == 'out_refund':
vals['related_document_ids'] = self._create_related_doc(vals)
vals['document_line_ids'] = move._prepare_eletronic_line_vals(products)
vals.update(self.sum_line_taxes(vals))
self.env['eletronic.document'].create(vals)
def _create_related_doc(self, vals):
related_move_id = self.env['account.move'].search([
('reversal_move_id', 'in', self.id)], limit=1)
doc = self.env['eletronic.document'].search([
('move_id', '=', related_move_id.id),
('model', '=', vals['model']),
('state', '=', 'done')
], limit=1, order='id desc')
if doc:
related_doc = self.env['nfe.related.document'].create({
'move_related_id': related_move_id.id,
'document_type': 'nfe',
'access_key': doc.chave_nfe,
})
return related_doc
def action_post(self):
moves = self.filtered(lambda x: x.l10n_br_edoc_policy == 'directly' and x.type != 'entry')
moves._validate_for_eletronic_document()
res = super(AccountMove, self).action_post()
moves.action_create_eletronic_document()
return res
def action_view_edocs(self):
if self.total_edocs == 1:
dummy, act_id = self.env['ir.model.data'].get_object_reference(
'l10n_br_eletronic_document', 'action_view_eletronic_document')
dummy, view_id = self.env['ir.model.data'].get_object_reference(
'l10n_br_eletronic_document', 'view_eletronic_document_form')
vals = self.env['ir.actions.act_window'].browse(act_id).read()[0]
vals['view_id'] = (view_id, 'sped.eletronic.doc.form')
vals['views'][1] = (view_id, 'form')
vals['views'] = [vals['views'][1], vals['views'][0]]
edoc = self.env['eletronic.document'].search(
[('move_id', '=', self.id)], limit=1)
vals['res_id'] = edoc.id
return vals
else:
dummy, act_id = self.env['ir.model.data'].get_object_reference(
'l10n_br_eletronic_document', 'action_view_eletronic_document')
vals = self.env['ir.actions.act_window'].browse(act_id).read()[0]
vals['domain'] = [('move_id', '=', self.id)]
return vals
class AccountMoveLine(models.Model):
_inherit = "account.move.line"
def get_eletronic_line_vals(self):
pis = self.move_id.line_ids.filtered(lambda x: x.tax_line_id.domain == 'pis')
cofins = self.move_id.line_ids.filtered(lambda x: x.tax_line_id.domain == 'cofins')
iss = self.move_id.line_ids.filtered(lambda x: x.tax_line_id.domain == 'iss')
csll = self.move_id.line_ids.filtered(lambda x: x.tax_line_id.domain == 'csll')
irpj = self.move_id.line_ids.filtered(lambda x: x.tax_line_id.domain == 'irpj')
inss = self.move_id.line_ids.filtered(lambda x: x.tax_line_id.domain == 'inss')
ipi = self.move_id.line_ids.filtered(lambda x: x.tax_line_id.domain == 'ipi')
fiscal_pos = self.move_id.fiscal_position_id
vals = {
'name': self.name,
'product_id': self.product_id.id,
'eletronic_document_id': self.id,
'company_id': self.company_id.id,
'tipo_produto': 'service' if self.product_id.type == 'service' else 'product',
# 'cfop': self.cfop_id.code,
'uom_id': self.product_uom_id.id,
'quantidade': self.quantity,
'preco_unitario': self.price_unit,
'valor_bruto': round(self.quantity * self.price_unit, 2),
'desconto': round(self.quantity * self.price_unit, 2) - self.price_subtotal,
'valor_liquido': self.price_total,
'origem': self.product_id.l10n_br_origin,
# 'tributos_estimados': self.tributos_estimados,
'ncm': self.product_id.l10n_br_ncm_id.code,
'cest': self.product_id.l10n_br_cest,
'extipi': self.product_id.l10n_br_extipi,
'codigo_beneficio': self.product_id.l10n_br_fiscal_benefit,
'pedido_compra': self.ref,
# 'item_pedido_compra': self.item_pedido_compra,
# - ICMS -
'icms_cst': fiscal_pos.csosn_icms,
# 'icms_aliquota': self.icms_aliquota,
# 'icms_tipo_base': self.icms_tipo_base,
# 'icms_aliquota_reducao_base': self.icms_aliquota_reducao_base,
# 'icms_base_calculo': self.icms_base_calculo,
# 'icms_valor': self.icms_valor,
# - ICMS ST -
# 'icms_st_aliquota': self.icms_st_aliquota,
# 'icms_st_aliquota_mva': self.icms_st_aliquota_mva,
# 'icms_st_aliquota_reducao_base': self.\
# icms_st_aliquota_reducao_base,
# 'icms_st_base_calculo': self.icms_st_base_calculo,
# 'icms_st_valor': self.icms_st_valor,
# # - Simples Nacional -
'icms_aliquota_credito': fiscal_pos.icms_aliquota_credito,
'icms_valor_credito': round(self.price_total * fiscal_pos.icms_aliquota_credito / 100, 2),
# - IPI -
'ipi_cst': '99',
'ipi_aliquota': ipi.tax_line_id.amount or 0,
'ipi_base_calculo': self.price_total or 0,
'ipi_valor': round(self.price_total * ipi.tax_line_id.amount / 100, 2),
# 'ipi_reducao_bc': self.ipi_reducao_bc,
# - II -
# 'ii_base_calculo': self.ii_base_calculo,
# 'ii_valor_despesas': self.ii_valor_despesas,
# 'ii_valor': self.ii_valor,
# 'ii_valor_iof': self.ii_valor_iof,
# - PIS -
'pis_cst': '49',
'pis_aliquota': pis.tax_line_id.amount or 0,
'pis_base_calculo': self.price_total or 0,
'pis_valor': round(self.price_total * pis.tax_line_id.amount / 100, 2),
# 'pis_valor_retencao':
# abs(self.pis_valor) if self.pis_valor < 0 else 0,
# - COFINS -
'cofins_cst': '49',
'cofins_aliquota': cofins.tax_line_id.amount or 0,
'cofins_base_calculo': self.price_total or 0,
'cofins_valor': round(self.price_total * cofins.tax_line_id.amount / 100, 2),
# 'cofins_valor_retencao':
# abs(self.cofins_valor) if self.cofins_valor < 0 else 0,
# - ISS -
'item_lista_servico': self.product_id.service_type_id.code,
'codigo_servico_municipio': self.product_id.service_code,
'iss_aliquota': iss.tax_line_id.amount or 0,
'iss_base_calculo': self.price_subtotal or 0,
'iss_valor': round(self.price_subtotal * iss.tax_line_id.amount / 100, 2),
# 'iss_valor_retencao':
# abs(self.iss_valor) if self.iss_valor < 0 else 0,
# - RETENÇÔES -
'csll_aliquota': csll.tax_line_id.amount or 0,
'csll_base_calculo': self.price_total or 0,
'csll_valor': round(self.price_total * csll.tax_line_id.amount / 100, 2),
# abs(self.csll_valor) if self.csll_valor < 0 else 0,
'irpj_aliquota': irpj.tax_line_id.amount or 0,
'irpj_base_calculo': self.price_total or 0,
'irpj_valor': round(self.price_total * irpj.tax_line_id.amount / 100, 2),
# 'irrf_base_calculo': self.irrf_base_calculo,
# 'irrf_aliquota': abs(self.irrf_aliquota),
# 'irrf_valor_retencao':
# abs(self.irrf_valor) if self.irrf_valor < 0 else 0,
'inss_base_calculo': self.price_subtotal or 0,
'inss_aliquota': abs(inss.tax_line_id.amount or 0),
'inss_valor_retencao': abs(
round(self.price_subtotal * inss.tax_line_id.amount / 100, 2)
),
'frete': self.l10n_br_delivery_amount,
'seguro': self.l10n_br_insurance_amount,
'outras_despesas': self.l10n_br_expense_amount,
}
cfop = fiscal_pos.l10n_br_cfop_id.code or '5101'
if self.move_id.type in ['in_invoice', 'out_refund']:
if self.move_id.company_id.state_id == self.move_id.commercial_partner_id.state_id:
cfop = '1' + cfop[1:]
else:
cfop = '2' + cfop[1:]
elif self.move_id.type in ['out_invoice', 'in_refund']:
if self.move_id.company_id.state_id == self.move_id.commercial_partner_id.state_id:
cfop = '5' + cfop[1:]
else:
cfop = '6' + cfop[1:]
vals['cfop'] = cfop
return vals
| StarcoderdataPython |
1724238 | import eel
import os
import glob
import shutil
import codecs
from OpenGL.GL import *
from OpenGL.WGL import *
from ctypes import *
import numpy
import pyaudio
import array
import re
class FileSystem():
def __init__(self):
self.categoryDir = './category'
self.extension = '.glsl'
self.defaultSrc = '''vec2 mainSound(int samp, float time){
return vec2(sin(6.2831*440.0*time)*exp(-3.0*time));
}
'''
if not os.path.exists(self.categoryDir):
os.mkdir(self.categoryDir)
if len(self.listCategory())==0:
self.newCategory('default')
def load(self, name):
src = ''
if os.path.exists(name):
f = codecs.open(name, 'r', 'utf-8')
src = f.read()
f.close()
return src
def save(self, name, src):
f = codecs.open(name, 'w', 'utf-8')
f.write(src)
f.close()
def pathCategory(self, category):
return self.categoryDir + '/' + category
def filenameShader(self, category, shader):
return self.pathCategory(category) + '/' + shader + self.extension
def listCategory(self):
files = glob.glob(self.pathCategory("*"))
files.sort(key=os.path.getatime, reverse=True)
return ",".join(list(map(lambda a:os.path.basename(a),files)))
def listShaders(self, category):
files = glob.glob(self.filenameShader(category, '*'))
files.sort(key=os.path.getmtime, reverse=True)
return ','.join(list(map(lambda a:os.path.basename(a).split('.')[0],files)))
def newShader(self, category):
name = self.uniqShader(category)
self.saveShader(category, name, self.defaultSrc)
return name
def newCategory(self, category):
if len(category)==0: return 0
name = self.pathCategory(category)
if not os.path.exists(name):
os.mkdir(name)
self.save(self.filenameShader(category, category), self.defaultSrc)
return 1
return 0
def forkShader(self, category, shader):
name = self.uniqShader(category, shader)
self.saveShader(category, name, self.loadShader(category, shader))
return name
def delShader(self, category, shader):
ret = 0
name = self.filenameShader(category, shader)
if os.path.exists(name): os.remove(name)
if len(self.listShaders(category))==0:
ret = 1
os.rmdir(self.pathCategory(category))
if len(self.listCategory())==0:
self.newCategory('default')
return ret
def renameCategory(self, old, new):
if len(new) == 0: return 0
if os.path.exists(self.pathCategory(new)):
return 0
os.rename(self.pathCategory(old), self.pathCategory(new))
return 1
def renameShader(self, category, old, new):
if len(new) == 0: return 0
if os.path.exists(self.filenameShader(category, new)):
return 0
else:
os.rename(self.filenameShader(category, old), self.filenameShader(category, new))
return 1
def uniqShader(self, category, shader = '', fork = True):
if (len(shader) is 0):
num = 0
while os.path.exists(self.filenameShader(category, category + "_" + str(num))):
num += 1
return category + "_" + str(num)
else:
num = 0
s = "_fork" if fork else "-"
shader = re.sub(r'_.*[0-9]*', '', shader)
while os.path.exists(self.filenameShader(category, shader + s + str(num))):
num += 1
return shader + s + str(num)
def shiftShader(self, old, new, shader):
name = self.uniqShader(new, shader, False)
shutil.move(
self.filenameShader(old, shader),
self.filenameShader(new, name))
if len(self.listShaders(old))==0:
os.rmdir(self.pathCategory(old))
return name
def forkShader(self, category, shader):
srcFilename = self.filenameShader(category, shader)
name = self.uniqShader(category, shader)
dstFilename = self.filenameShader(category, name)
self.save(dstFilename, self.load(srcFilename))
return name
def loadShader(self, category, shader):
filename = self.filenameShader(category, shader)
return self.load(filename)
def saveShader(self, category, shader, src):
filename = self.filenameShader(category, shader)
self.save(filename, src)
class SoundShader():
def __init__(self, chunk, rate):
self.chunk = chunk
self.rate = rate
self.channels = 2
self.head ='''
#version 430
out vec2 gain;
uniform float iFrameCount;
const float iChunk = {0:.1f};
const float iSampleRate = {1:.1f};
'''.format(self.chunk ,self.rate)
self.foot ='''
void main() {
float time = (iChunk * iFrameCount + float(gl_VertexID)) / iSampleRate;
int samp = int(iFrameCount);
gain = clamp(mainSound(samp, time), -1.0, 1.0);
}
'''
# OpenGL Context
self.hWnd = windll.user32.CreateWindowExA(0,0xC018,0,0,0,0,0,0,0,0,0,0)
self.hDC = windll.user32.GetDC(self.hWnd)
pfd = PIXELFORMATDESCRIPTOR(0,1,0,32,0,0,0,0,0,0,0,0,0,0,0,0,0,32,0,0,0,0,0,0,0)
SetPixelFormat(self.hDC,ChoosePixelFormat(self.hDC, pfd), pfd)
self.hGLrc = wglCreateContext(self.hDC)
wglMakeCurrent(self.hDC, self.hGLrc)
# Buffer
self.samples = (c_float * self.chunk * self.channels)()
vbo = glGenBuffers(1)
glBindBuffer(GL_ARRAY_BUFFER, vbo)
glBufferData(GL_ARRAY_BUFFER, sizeof(self.samples), None, GL_STATIC_DRAW)
glBindBufferBase(GL_TRANSFORM_FEEDBACK_BUFFER, 0, vbo)
self.alive = False
self.success = False
self.program = glCreateProgram()
def audioData(self, frame_count):
if self.alive:
glUniform1f(glGetUniformLocation(self.program, "iFrameCount"), frame_count)
glEnable(GL_RASTERIZER_DISCARD)
glBeginTransformFeedback(GL_POINTS)
glDrawArrays(GL_POINTS, 0, self.chunk)
glEndTransformFeedback()
glDisable(GL_RASTERIZER_DISCARD)
glGetBufferSubData(GL_ARRAY_BUFFER, 0, sizeof(self.samples), byref(self.samples))
return numpy.frombuffer(self.samples, dtype=numpy.float32)
def compile(self, src):
shader = glCreateShader(GL_VERTEX_SHADER)
glShaderSource(shader, self.head + src + self.foot)
glCompileShader(shader)
if glGetShaderiv(shader, GL_COMPILE_STATUS) != GL_TRUE:
self.success = False
return (glGetShaderInfoLog(shader).decode())
p = self.program
self.program = glCreateProgram()
glAttachShader(self.program, shader)
glDeleteShader(shader)
outs = cast((c_char_p*1)(b"gain"), POINTER(POINTER(c_char)))
glTransformFeedbackVaryings(self.program, 1, outs, GL_INTERLEAVED_ATTRIBS)
glLinkProgram(self.program)
glUseProgram(self.program)
glDeleteProgram(p)
self.success = True
return ("Success")
def close(self):
wglMakeCurrent(0, 0);
wglDeleteContext(self.hGLrc);
windll.user32.ReleaseDC(self.hWnd, self.hDC);
windll.user32.PostQuitMessage(0);
def trimSize(self, src):
src = re.compile(r'/\*.*?\*/', re.DOTALL).sub("", src)
src = re.sub(r"//.*", "", src)
src = re.sub(r"\t", " ", src)
src = re.sub(r" +", " ", src)
src = re.sub(r" *\n *", "\n", src)
src = re.sub(r"\n+", "\n", src)
src = re.sub(r"^\n", "", src)
L = src.split("\n")
for i in range(len(L)):
s = L[i]
if re.search("#", s) != None:
L[i] = "\n" + L[i] + "\n"
else:
s = re.sub(r" *\+ *" ,"+", s)
s = re.sub(r" *\- *" ,"-", s)
s = re.sub(r" *\* *" ,"*", s)
s = re.sub(r" */ *" ,"/", s)
s = re.sub(r" *= *" ,"=", s)
s = re.sub(r" *< *" ,"<", s)
s = re.sub(r" *> *" ,">", s)
s = re.sub(r" *& *" ,"&", s)
s = re.sub(r" *\| *" ,"|", s)
s = re.sub(r" *\( *" ,"(", s)
s = re.sub(r" *\) *" ,")", s)
s = re.sub(r" *\[ *" ,"[", s)
s = re.sub(r" *\] *" ,"]", s)
s = re.sub(r" *{ *" ,"{", s)
s = re.sub(r" *} *" ,"}", s)
s = re.sub(r" *; *" ,";", s)
s = re.sub(r" *, *" ,",", s)
L[i] = s
src = "".join(L)
src = re.sub(r"\n+", "\n", src)
src = re.sub(r"^\n", "", src)
return len(src)
class Tick():
def __init__(self, chunk, rate):
self.n = 0
self.chunk = chunk
self.rate = rate
self.startN = 0
self.endN = 1800
def clucN(self, sec):
return sec * self.rate / self.chunk
def clucTime(self, n):
return n * self.chunk / self.rate
def startTime(self, sec):
self.startN = self.clucN(sec)
self.n = max(self.startN, self.n)
def endTime(self, sec):
self.endN = self.clucN(sec)
self.n = min(self.endN, self.n)
def reset(self):
self.n = self.startN
def time(self):
return self.clucTime(self.n)
def tick(self):
self.n += 1
if self.endN < self.n:
self.n = self.startN
return self.n
@eel.expose
def charSize(src):
global s
return s.trimSize(src)
@eel.expose
def compile(src):
global s
s.alive = False
ret = s.compile(src)
s.alive = True
return ret
@eel.expose
def success():
global s
return s.success
@eel.expose
def listCategory():
global f
return f.listCategory()
@eel.expose
def listShaders(category):
global f
return f.listShaders(category)
@eel.expose
def newCategory(category):
global f
return f.newCategory(category)
@eel.expose
def newShader(category):
global f
return f.newShader(category)
@eel.expose
def forkShader(category, shader):
global f
return f.forkShader(category, shader)
@eel.expose
def delShader(category, shader):
global f
return f.delShader(category, shader)
@eel.expose
def renameCategory(old, new):
global f
return f.renameCategory(old, new)
@eel.expose
def renameShader(category, old, new):
global f
return f.renameShader(category, old, new)
@eel.expose
def shiftShader(old, new, shader):
global f
return f.shiftShader(old, new, shader)
@eel.expose
def loadShader(category, shader):
global f
return f.loadShader(category, shader)
@eel.expose
def saveShader(category, shader,src):
global f
return f.saveShader(category, shader, src)
@eel.expose
def reset():
global t
t.reset()
@eel.expose
def startTime(x):
global t
t.startTime(x)
@eel.expose
def endTime(x):
global t
t.endTime(x)
@eel.expose
def play():
global s
s.alive = s.success
return s.alive
@eel.expose
def stop():
global s
s.alive = False
@eel.expose
def close():
global alive
alive = False
##+++ main +++++++++++++++++
eel.init("web")
eel.start("index.html",port=8002, block=False)
chunk = 2048
rate = 44100
s = SoundShader(chunk, rate)
t = Tick(chunk, rate)
f = FileSystem()
alive = True
eel.data(s.audioData(0).tolist())
p = pyaudio.PyAudio()
stream = p.open(
format = pyaudio.paFloat32,
channels = 2,
rate = s.rate,
frames_per_buffer = s.chunk,
output=True,
input=False
)
stream.start_stream()
while alive:
eel.sleep(0.01)
if s.alive:
data = s.audioData(t.tick())
stream.write(array.array('f', data).tobytes())
eel.data(numpy.hstack((data[::2], data[1::2])).tolist())
eel.time(t.time())
stream.stop_stream()
s.close()
| StarcoderdataPython |
1700733 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['ProductPortfolioAssociationArgs', 'ProductPortfolioAssociation']
@pulumi.input_type
class ProductPortfolioAssociationArgs:
def __init__(__self__, *,
portfolio_id: pulumi.Input[str],
product_id: pulumi.Input[str],
accept_language: Optional[pulumi.Input[str]] = None,
source_portfolio_id: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a ProductPortfolioAssociation resource.
:param pulumi.Input[str] portfolio_id: Portfolio identifier.
:param pulumi.Input[str] product_id: Product identifier.
:param pulumi.Input[str] accept_language: Language code. Valid values: `en` (English), `jp` (Japanese), `zh` (Chinese). Default value is `en`.
:param pulumi.Input[str] source_portfolio_id: Identifier of the source portfolio.
"""
pulumi.set(__self__, "portfolio_id", portfolio_id)
pulumi.set(__self__, "product_id", product_id)
if accept_language is not None:
pulumi.set(__self__, "accept_language", accept_language)
if source_portfolio_id is not None:
pulumi.set(__self__, "source_portfolio_id", source_portfolio_id)
@property
@pulumi.getter(name="portfolioId")
def portfolio_id(self) -> pulumi.Input[str]:
"""
Portfolio identifier.
"""
return pulumi.get(self, "portfolio_id")
@portfolio_id.setter
def portfolio_id(self, value: pulumi.Input[str]):
pulumi.set(self, "portfolio_id", value)
@property
@pulumi.getter(name="productId")
def product_id(self) -> pulumi.Input[str]:
"""
Product identifier.
"""
return pulumi.get(self, "product_id")
@product_id.setter
def product_id(self, value: pulumi.Input[str]):
pulumi.set(self, "product_id", value)
@property
@pulumi.getter(name="acceptLanguage")
def accept_language(self) -> Optional[pulumi.Input[str]]:
"""
Language code. Valid values: `en` (English), `jp` (Japanese), `zh` (Chinese). Default value is `en`.
"""
return pulumi.get(self, "accept_language")
@accept_language.setter
def accept_language(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "accept_language", value)
@property
@pulumi.getter(name="sourcePortfolioId")
def source_portfolio_id(self) -> Optional[pulumi.Input[str]]:
"""
Identifier of the source portfolio.
"""
return pulumi.get(self, "source_portfolio_id")
@source_portfolio_id.setter
def source_portfolio_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_portfolio_id", value)
@pulumi.input_type
class _ProductPortfolioAssociationState:
def __init__(__self__, *,
accept_language: Optional[pulumi.Input[str]] = None,
portfolio_id: Optional[pulumi.Input[str]] = None,
product_id: Optional[pulumi.Input[str]] = None,
source_portfolio_id: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering ProductPortfolioAssociation resources.
:param pulumi.Input[str] accept_language: Language code. Valid values: `en` (English), `jp` (Japanese), `zh` (Chinese). Default value is `en`.
:param pulumi.Input[str] portfolio_id: Portfolio identifier.
:param pulumi.Input[str] product_id: Product identifier.
:param pulumi.Input[str] source_portfolio_id: Identifier of the source portfolio.
"""
if accept_language is not None:
pulumi.set(__self__, "accept_language", accept_language)
if portfolio_id is not None:
pulumi.set(__self__, "portfolio_id", portfolio_id)
if product_id is not None:
pulumi.set(__self__, "product_id", product_id)
if source_portfolio_id is not None:
pulumi.set(__self__, "source_portfolio_id", source_portfolio_id)
@property
@pulumi.getter(name="acceptLanguage")
def accept_language(self) -> Optional[pulumi.Input[str]]:
"""
Language code. Valid values: `en` (English), `jp` (Japanese), `zh` (Chinese). Default value is `en`.
"""
return pulumi.get(self, "accept_language")
@accept_language.setter
def accept_language(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "accept_language", value)
@property
@pulumi.getter(name="portfolioId")
def portfolio_id(self) -> Optional[pulumi.Input[str]]:
"""
Portfolio identifier.
"""
return pulumi.get(self, "portfolio_id")
@portfolio_id.setter
def portfolio_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "portfolio_id", value)
@property
@pulumi.getter(name="productId")
def product_id(self) -> Optional[pulumi.Input[str]]:
"""
Product identifier.
"""
return pulumi.get(self, "product_id")
@product_id.setter
def product_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "product_id", value)
@property
@pulumi.getter(name="sourcePortfolioId")
def source_portfolio_id(self) -> Optional[pulumi.Input[str]]:
"""
Identifier of the source portfolio.
"""
return pulumi.get(self, "source_portfolio_id")
@source_portfolio_id.setter
def source_portfolio_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_portfolio_id", value)
class ProductPortfolioAssociation(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
accept_language: Optional[pulumi.Input[str]] = None,
portfolio_id: Optional[pulumi.Input[str]] = None,
product_id: Optional[pulumi.Input[str]] = None,
source_portfolio_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Manages a Service Catalog Product Portfolio Association.
## Example Usage
### Basic Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.servicecatalog.ProductPortfolioAssociation("example",
portfolio_id="port-68656c6c6f",
product_id="prod-dnigbtea24ste")
```
## Import
`aws_servicecatalog_product_portfolio_association` can be imported using the accept language, portfolio ID, and product ID, e.g.,
```sh
$ pulumi import aws:servicecatalog/productPortfolioAssociation:ProductPortfolioAssociation example en:port-68656c6c6f:prod-dnigbtea24ste
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] accept_language: Language code. Valid values: `en` (English), `jp` (Japanese), `zh` (Chinese). Default value is `en`.
:param pulumi.Input[str] portfolio_id: Portfolio identifier.
:param pulumi.Input[str] product_id: Product identifier.
:param pulumi.Input[str] source_portfolio_id: Identifier of the source portfolio.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ProductPortfolioAssociationArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages a Service Catalog Product Portfolio Association.
## Example Usage
### Basic Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.servicecatalog.ProductPortfolioAssociation("example",
portfolio_id="port-68656c6c6f",
product_id="prod-dnigbtea24ste")
```
## Import
`aws_servicecatalog_product_portfolio_association` can be imported using the accept language, portfolio ID, and product ID, e.g.,
```sh
$ pulumi import aws:servicecatalog/productPortfolioAssociation:ProductPortfolioAssociation example en:port-68656c6c6f:prod-dnigbtea24ste
```
:param str resource_name: The name of the resource.
:param ProductPortfolioAssociationArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ProductPortfolioAssociationArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
accept_language: Optional[pulumi.Input[str]] = None,
portfolio_id: Optional[pulumi.Input[str]] = None,
product_id: Optional[pulumi.Input[str]] = None,
source_portfolio_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ProductPortfolioAssociationArgs.__new__(ProductPortfolioAssociationArgs)
__props__.__dict__["accept_language"] = accept_language
if portfolio_id is None and not opts.urn:
raise TypeError("Missing required property 'portfolio_id'")
__props__.__dict__["portfolio_id"] = portfolio_id
if product_id is None and not opts.urn:
raise TypeError("Missing required property 'product_id'")
__props__.__dict__["product_id"] = product_id
__props__.__dict__["source_portfolio_id"] = source_portfolio_id
super(ProductPortfolioAssociation, __self__).__init__(
'aws:servicecatalog/productPortfolioAssociation:ProductPortfolioAssociation',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
accept_language: Optional[pulumi.Input[str]] = None,
portfolio_id: Optional[pulumi.Input[str]] = None,
product_id: Optional[pulumi.Input[str]] = None,
source_portfolio_id: Optional[pulumi.Input[str]] = None) -> 'ProductPortfolioAssociation':
"""
Get an existing ProductPortfolioAssociation resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] accept_language: Language code. Valid values: `en` (English), `jp` (Japanese), `zh` (Chinese). Default value is `en`.
:param pulumi.Input[str] portfolio_id: Portfolio identifier.
:param pulumi.Input[str] product_id: Product identifier.
:param pulumi.Input[str] source_portfolio_id: Identifier of the source portfolio.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ProductPortfolioAssociationState.__new__(_ProductPortfolioAssociationState)
__props__.__dict__["accept_language"] = accept_language
__props__.__dict__["portfolio_id"] = portfolio_id
__props__.__dict__["product_id"] = product_id
__props__.__dict__["source_portfolio_id"] = source_portfolio_id
return ProductPortfolioAssociation(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="acceptLanguage")
def accept_language(self) -> pulumi.Output[Optional[str]]:
"""
Language code. Valid values: `en` (English), `jp` (Japanese), `zh` (Chinese). Default value is `en`.
"""
return pulumi.get(self, "accept_language")
@property
@pulumi.getter(name="portfolioId")
def portfolio_id(self) -> pulumi.Output[str]:
"""
Portfolio identifier.
"""
return pulumi.get(self, "portfolio_id")
@property
@pulumi.getter(name="productId")
def product_id(self) -> pulumi.Output[str]:
"""
Product identifier.
"""
return pulumi.get(self, "product_id")
@property
@pulumi.getter(name="sourcePortfolioId")
def source_portfolio_id(self) -> pulumi.Output[Optional[str]]:
"""
Identifier of the source portfolio.
"""
return pulumi.get(self, "source_portfolio_id")
| StarcoderdataPython |
131872 | #!/usr/bin/python
"""
Test XML character decoding against a range of encodings, valid and not."""
__author__ = "<NAME> <http://www.kafsemo.org/>"
__version__ = "$Revision$"
__copyright__ = "Copyright (c) 2004, 2006 <NAME>"
import os, sys
import codecs
import re
curdir = os.path.abspath(os.path.dirname(__file__))
srcdir = os.path.split(curdir)[0]
if srcdir not in sys.path:
sys.path.insert(0, srcdir)
basedir = os.path.split(srcdir)[0]
skippedNames = []
import unittest, glob, re
from feedvalidator import xmlEncoding
class EncodingTestCase(unittest.TestCase):
def testEncodingMatches(self):
try:
enc = xmlEncoding.detect(self.bytes)
except UnicodeError as u:
self.fail("'" + self.filename + "' should not cause an exception (" + str(u) + ")")
self.assert_(enc, 'An encoding must be returned for all valid files ('
+ self.filename + ')')
self.assertEqual(enc, self.expectedEncoding, 'Encoding for '
+ self.filename + ' should be ' + self.expectedEncoding + ', but was ' + enc)
def testEncodingFails(self):
eventLog = []
try:
encoding = xmlEncoding.detect(self.bytes, eventLog)
except UnicodeError as u:
self.fail("'" + self.filename + "' should not cause an exception (" + str(u) + ")")
if encoding:
self.fail("'" + self.filename + "' should not parse successfully (as " + encoding + ")")
if not(eventLog):
self.fail("'" + self.filename + "' should give a reason for parse failure")
bom8='\xEF\xBB\xBF'
bom16BE='\xFE\xFF'
bom16LE='\xFF\xFE'
bom32BE='\x00\x00\xFE\xFF'
bom32LE='\xFF\xFE\x00\x00'
# Some fairly typical Unicode text. It should survive XML roundtripping.
docText=u'<x>\u201c"This\uFEFF" is\na\r\u00A3t\u20Acst\u201D</x>'
validDecl = re.compile('[A-Za-z][-A-Za-z0-9._]*')
def makeDecl(enc=None):
if enc:
assert validDecl.match(enc), "'" + enc + "' is not a valid encoding name"
return "<?xml version='1.0' encoding='" + enc + "'?>"
else:
return "<?xml version='1.0'?>"
def encoded(enc, txt=docText):
return codecs.getencoder(enc)(txt, 'xmlcharrefreplace')[0]
def genValidXmlTestCases():
someFailed = False
# Required
yield('UTF-8', ['BOM', 'declaration'],
bom8 + makeDecl('UTF-8') + encoded('UTF-8'))
yield('UTF-8', [],
encoded('UTF-8'))
yield('UTF-8', ['noenc'],
makeDecl() + encoded('UTF-8'))
yield('UTF-8', ['declaration'],
makeDecl('UTF-8') + encoded('UTF-8'))
yield('UTF-8', ['BOM'],
bom8 + encoded('UTF-8'))
yield('UTF-8', ['BOM', 'noenc'],
bom8 + makeDecl('UTF-8') + encoded('UTF-8'))
yield('UTF-16', ['BOM', 'declaration', 'BE'],
bom16BE + encoded('UTF-16BE', makeDecl('UTF-16') + docText))
yield('UTF-16', ['BOM', 'declaration', 'LE'],
bom16LE + encoded('UTF-16LE', makeDecl('UTF-16') + docText))
yield('UTF-16', ['BOM', 'BE'],
bom16BE + encoded('UTF-16BE'))
yield('UTF-16', ['BOM', 'BE', 'noenc'],
bom16BE + encoded('UTF-16BE', makeDecl() + docText))
yield('UTF-16', ['BOM', 'LE'],
bom16LE + encoded('UTF-16LE'))
yield('UTF-16', ['BOM', 'LE', 'noenc'],
bom16LE + encoded('UTF-16LE', makeDecl() + docText))
yield('UTF-16', ['declaration', 'BE'],
encoded('UTF-16BE', makeDecl('UTF-16') + docText))
yield('UTF-16', ['declaration', 'LE'],
encoded('UTF-16LE', makeDecl('UTF-16') + docText))
# Standard wide encodings
try:
yield('ISO-10646-UCS-2', ['BOM', 'declaration', 'BE'],
bom16BE + encoded('UCS-2BE', makeDecl('ISO-10646-UCS-2') + docText))
yield('ISO-10646-UCS-2', ['BOM', 'declaration', 'LE'],
bom16LE + encoded('UCS-2LE', makeDecl('ISO-10646-UCS-2') + docText))
yield('UTF-32', ['BOM', 'declaration', 'BE'],
bom32BE + encoded('UTF-32BE', makeDecl('UTF-32') + docText))
yield('UTF-32', ['BOM', 'declaration', 'LE'],
bom32LE + encoded('UTF-32LE', makeDecl('UTF-32') + docText))
yield('UTF-32', ['declaration', 'BE'],
encoded('UTF-32BE', makeDecl('UTF-32') + docText))
yield('UTF-32', ['declaration', 'LE'],
encoded('UTF-32LE', makeDecl('UTF-32') + docText))
yield('ISO-10646-UCS-4', ['BOM', 'declaration', 'BE'],
bom32BE + encoded('UCS-4BE', makeDecl('ISO-10646-UCS-4') + docText))
yield('ISO-10646-UCS-4', ['BOM', 'declaration', 'LE'],
bom32LE + encoded('UCS-4LE', makeDecl('ISO-10646-UCS-4') + docText))
except LookupError as e:
print e
someFailed = True
# Encodings that don't have BOMs, and require declarations
withDeclarations = [
# Common ASCII-compatible encodings
'US-ASCII', 'ISO-8859-1', 'ISO-8859-15', 'WINDOWS-1252',
# EBCDIC
'IBM037', 'IBM038',
# Encodings with explicit endianness
'UTF-16BE', 'UTF-16LE',
'UTF-32BE', 'UTF-32LE',
# (UCS doesn't seem to define endian'd encodings)
]
for enc in withDeclarations:
try:
yield(enc, ['declaration'], encoded(enc, makeDecl(enc) + docText))
except LookupError as e:
print e
someFailed = True
# 10646-UCS encodings, with no BOM but with a declaration
try:
yield('ISO-10646-UCS-2', ['declaration', 'BE'],
encoded('UCS-2BE', makeDecl('ISO-10646-UCS-2') + docText))
yield('ISO-10646-UCS-2', ['declaration', 'LE'],
encoded('UCS-2LE', makeDecl('ISO-10646-UCS-2') + docText))
yield('ISO-10646-UCS-4', ['declaration', 'BE'],
encoded('UCS-4BE', makeDecl('ISO-10646-UCS-4') + docText))
yield('ISO-10646-UCS-4', ['declaration', 'LE'],
bom32LE + encoded('UCS-4LE', makeDecl('ISO-10646-UCS-4') + docText))
except LookupError as e:
print e
someFailed = True
# Files with aliases for declarations. The declared alias should be
# reported back, rather than the canonical form.
try:
yield('csUnicode', ['alias', 'BOM', 'BE'],
bom16BE + encoded('UCS-2BE', makeDecl('csUnicode') + docText))
yield('csUnicode', ['alias', 'LE'],
encoded('UCS-2LE', makeDecl('csUnicode') + docText))
yield('csucs4', ['alias', 'BE'],
encoded('csucs4', makeDecl('csucs4') + docText))
except LookupError as e:
print e
someFailed = True
if someFailed:
print "Unable to generate some tests; see README for details"
def genInvalidXmlTestCases():
# Invalid files
someFailed = False
# UTF-32 with a non-four-byte declaration
# try:
# yield('UTF-32', ['BOM', 'BE', 'declaration'],
# encoded('UTF-32', makeDecl('US-ASCII') + docText))
# except LookupError as e:
# print e
# someFailed = True
# UTF-16 with a non-two-byte declaration
yield('UTF-16', ['BOM', 'BE', 'declaration'],
encoded('UTF-16', makeDecl('UTF-8') + docText))
# UTF-16BE, with a BOM
yield('UTF-16BE', ['BOM', 'declaration'],
bom16BE + encoded('UTF-16BE', makeDecl('UTF-16BE') + docText))
# UTF-8, with a BOM, declaring US-ASCII
yield('UTF-8', ['BOM', 'declaration'],
bom8 + encoded('UTF-8', makeDecl('US-ASCII') + docText))
try:
# UTF-32, with a BOM, beginning without a declaration
yield('UTF-32', ['BOM', 'BE'],
bom32BE + encoded('UTF-32BE'))
# UTF-32, with a BOM, and a declaration with no encoding
yield('UTF-32', ['BOM', 'BE', 'noenc'],
bom32BE + encoded('UTF-32BE', makeDecl() + docText))
except LookupError as e:
print e
someFailed = True
# UTF-16, no BOM, no declaration
# yield('UTF-16', ['BE'], encoded('UTF-16BE'))
# This case falls through, and is identified as UTF-8; leave it out
# until we're doing decoding as well as detection.
if someFailed:
print "Unable to generate some tests; see README for details"
def genXmlTestCases():
for (enc, t, x) in genValidXmlTestCases():
yield (enc, t, x, True)
for (enc, t, x) in genInvalidXmlTestCases():
yield (enc, t, x, False)
def buildTestSuite():
import codecs
suite = unittest.TestSuite()
for (enc, t, x, valid) in genXmlTestCases():
t.sort()
if valid: pfx = 'valid_'
else: pfx = 'invalid_'
name = pfx + '_'.join([enc] + t) + '.xml'
# name, x is content
try:
alias = enc
if enc.startswith('ISO-10646-'):
alias = enc[10:]
c = codecs.lookup(alias)
if valid:
t = EncodingTestCase('testEncodingMatches')
t.expectedEncoding = enc
else:
t = EncodingTestCase('testEncodingFails')
t.filename = name
t.bytes = x
suite.addTest(t)
except LookupError as e:
print "Skipping " + name + ": " + str(e)
skippedNames.append(name)
return suite
if __name__ == "__main__":
s = buildTestSuite()
unittest.TextTestRunner().run(s)
if skippedNames:
print "Tests skipped:",len(skippedNames)
print "Please see README for details"
| StarcoderdataPython |
176963 | <gh_stars>1-10
# missing last quote, escaped
a = 1 # make below not a doc comment
<error descr="Missing closing quote [']">'abc\'</error>
| StarcoderdataPython |
191584 | <filename>cellpack/mgl_tools/upy/examples/BasicGeom.py
"""
Copyright (C) <2010> <NAME>
This file git_upy/examples/BasicGeom.py is part of upy.
upy is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
upy is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with upy. If not, see <http://www.gnu.org/licenses/gpl-3.0.html>.
"""
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 29 13:09:18 2010
@author: <NAME>
"""
import math
import sys, os
from random import random
try:
import upy
except:
# pyubic have to be in the pythonpath, if not uncomment the next line to add it
# pathtoupy = "/Users/ludo/DEV/upy/trunk/"
pathtoupy = "C:\\Users\\ludov\\OneDrive\\Documents\\"
sys.path.insert(0, pathtoupy)
import upy
from upy import colors as col
# get the helperClass for modeling
helperClass = upy.getHelperClass()
helper = helperClass()
print(helper._usenumpy)
sc = helper.getCurrentScene()
# camera and light
center = [0.0, -12.0, 40.0]
# to create a camera provide a name, a Type ("ortho" or "persp"),
# a focal angle, a center, the current scene(optional)
cam = helper.addCameraToScene("cam1", "persp", 30.0, center, sc)
# to create a lught provide a name, a Type ("Area" or "Sun" or "Spot"),
# a color, a distane of influence, an intensity, a lel of shadowness, if it produceshadow,
# a center, the current scene(optional)
light = helper.addLampToScene(
"light1",
Type="Sun",
rgb=[1.0, 1.0, 1.0],
dist=20.0,
energy=1.0,
soft=1.0,
shadow=True,
center=center,
sc=sc,
)
# We will create the title of the scene as a Textat the top of the scene.
# The text will be extrude, it will return the text and the extruder if any
# depending on the host, oyucan reuse the extruder for different text object
t, extruder = helper.Text(
"text", string="upy", size=5.0, pos=[0.0, 4.0, 0.0], extrude=True
)
# helper.rotateObj(t,(0.,0.,math.pi/2.))
Y = 0.0
# the previous extruder can be reused for the next text
simple = helper.Text(
"basicobjectLabel",
string="BasicObject",
size=2.0,
pos=[-18.0, 0.0, 0.0],
extrude=extruder,
)
# helper.rotateObj(simple,(0.,math.pi/2.,0.))
# create a null/empty objec for organization purpose
basic = helper.newEmpty("BasicObject", location=[0.0, Y, 0.0])
# we will no create all the basic/primitive objects
# a sphere of radius 2, with quality set to 12, and that will be put the previus
# null object we created.
s, ms = helper.Sphere("sphere", radius=2.0, res=12, pos=[4.0, Y, 0.0], parent=basic)
helper.changeColor(s, col.red)
# helper.rotateObj(s,(math.pi/2.,0.,0.))
c, mc = helper.Cube("cube", center=[8.0, 0.0, 0.0], size=[2.0, 2.0, 2.0], parent=basic)
helper.changeColor(c, col.blue)
helper.rotateObj(c, (math.pi / 2.0, 0.0, 0.0))
#
cy, mcy = helper.Cylinder(
"cylinder", radius=2.0, length=4.0, res=12, pos=[-4.0, Y, 0.0], parent=basic
)
helper.changeColor(cy, col.green)
helper.rotateObj(cy, (math.pi / 2.0, 0.0, 0.0))
#
cone, mcone = helper.Cone(
"cone", radius=1.5, length=3.0, res=9, pos=[0.0, Y, 0.0], parent=basic
)
helper.changeColor(cone, col.yellow)
helper.rotateObj(cone, (math.pi / 2.0, 0.0, 0.0))
p, mpl = helper.plane("plane", center=[-9.0, Y, 0.0], size=[5.0, 5.0], parent=basic)
# apply a texture to the plane.
# first get the image, and then create the materal that will host the image
filename = upy.__path__[0] + os.sep + "examples" + os.sep + "marble.jpg"
mat = helper.createTexturedMaterial("planeMat", filename)
# assign the material to the plane
helper.assignMaterial(p, mat, texture=True)
#
Y = -6.0
complex = helper.Text(
"lineobjectLabel",
string="LineObject",
size=2.0,
pos=[-18.0, Y, 0.0],
extrude=extruder,
)
# we will now create som line objects usin the following coordinates
# curve pts
listPts = (
(5.598, 5.767, 11.082),
(8.496, 4.609, 8.837),
(6.500, 1.584, 7.565),
(3.545, 3.935, 6.751),
(5.929, 6.358, 5.055),
(7.331, 3.607, 2.791),
(3.782, 2.599, 1.742),
(2.890, 6.285, 1.126),
)
line = helper.newEmpty("LineObject", location=[0.0, 0.0, 0.0])
# spline
Y = -7.0
spline, mspline = helper.spline("spline", listPts, close=0, type=1, parent=line)
helper.rotateObj(spline, (0.0, math.pi / 2.0, 0.0))
helper.setTranslation(spline, [-8.377, Y, 2.556])
helper.scaleObj(spline, [0.5, 0.5, 0.5])
# we can extrude the spline using a 2D circle shape
# loft spline extrusion
extruder_spline, shape, spline_clone = helper.extrudeSpline(
spline, shape="circle", clone=True
)
# or instance
helper.rotateObj(extruder_spline, (0.0, math.pi / 2.0, 0.0))
helper.setTranslation(extruder_spline, [-1.7, Y, 2.556])
helper.scaleObj(extruder_spline, [0.5, 0.5, 0.5])
print(extruder_spline, line)
try:
helper.reParent(extruder_spline, line)
except:
pass
# armature, create a bone for each curve points
armature, bones = helper.armature("armature", listPts, scn=sc, root=line)
helper.rotateObj(armature, (0.0, math.pi / 2.0, 0.0))
helper.setTranslation(armature, [4.0, Y, 2.556])
helper.scaleObj(armature, [0.5, 0.5, 0.5])
Y = -12.0
# points object : poin cloud, metaballs, particle
# Note: there is no polygon point object in maya.
# In maya creating a mesh require vertex and faces information. uPy create intead
# a particle system.
pointsLabel = helper.Text(
"pointsobjectLabel",
string="PointsObject",
size=2.0,
pos=[-18.0, Y, 0.0],
extrude=extruder,
)
points = helper.newEmpty("PointsObject", location=[0.0, 0.0, 0.0])
# pointClouds
pointscloud, mesh_pts = helper.PointCloudObject(
"pts_cloud", vertices=listPts, parent=points
)
helper.rotateObj(pointscloud, (0.0, math.pi / 2.0, 0.0))
helper.setTranslation(pointscloud, [-8.377, Y, 2.556])
helper.scaleObj(pointscloud, [0.5, 0.5, 0.5])
# we can extract the new coordinates after thetransformation
f, modifiedVertex, n = helper.DecomposeMesh(
pointscloud, edit=True, copy=True, tri=True, transform=True
)
# and use them to create a new particle system
p = helper.particle("particle", modifiedVertex)
helper.setTranslation(pointscloud, [-2.38, Y, 2.556])
# surface metaball
# metball in maya are a particle system
metab, cloud = helper.metaballs("metaballs", listPts, None, scn=sc, root=points)
helper.rotateObj(metab, (0.0, math.pi / 2.0, 0.0))
helper.setTranslation(metab, [4.0, Y, 2.556])
helper.scaleObj(metab, [0.5, 0.5, 0.5])
# Mesh platonic
Y = -18.0
# points object : poin cloud, metaballs, particle
platonicLabel = helper.Text(
"platonicLabel",
string="PlatonicObject",
size=2.0,
pos=[-18.0, Y, 0.0],
extrude=extruder,
)
platonic = helper.newEmpty("PlatonicObject", location=[0.0, 0.0, 0.0])
# pointClouds
tetra, mtetra = helper.Platonic("Tetra", "tetra", 2.0, parent=platonic)
helper.setTranslation(tetra, [-8.0, Y, 0.0])
hexa, mhexa = helper.Platonic("Hexa", "hexa", 2.0, parent=platonic)
helper.setTranslation(hexa, [-4.5, Y, 0.0])
octa, mocta = helper.Platonic("Octa", "octa", 2.0, parent=platonic)
helper.setTranslation(octa, [-0.8, Y, 0.0])
dodeca, mdodeca = helper.Platonic("Dodeca", "dodeca", 2.0, parent=platonic)
helper.setTranslation(dodeca, [3.15, Y, 0.0])
icosa, micosa = helper.Platonic("Icosa", "icosa", 2.0, parent=platonic)
helper.setTranslation(icosa, [7.81, Y, 0.0])
# instance
##compute instance
##one instance
##matrice instance
##object vertice instance
Y = -24.0
instancelabel = helper.Text(
"instanceLabel",
string="InstanceObject",
size=2.0,
pos=[-18.0, Y, 0.0],
extrude=extruder,
)
instance = helper.newEmpty("InstanceObject", location=[0.0, 0.0, 0.0])
# one instance
inst = helper.newInstance(
"instanceOfIco", icosa, location=[-8.0, Y, 0.0], parent=instance
)
# list instance from an object vertices
isph = helper.newEmpty("InstanceOfSpheres", location=[0.0, 0.0, 0.0], parent=instance)
f, verts, n = helper.DecomposeMesh(inst, edit=True, copy=True, tri=True, transform=True)
print(f, verts, n)
for i, v in enumerate(verts):
instsph = helper.newInstance("instanceOfSph" + str(i), s, location=v, parent=isph)
helper.scaleObj(instsph, [0.1, 0.1, 0.1])
##list instance from list of matrices
itetra = helper.newEmpty("InstanceOfTetra", location=[0.0, 0.0, 0.0], parent=instance)
listM = []
for i in range(len(listPts)):
m = helper.rotation_matrix(
random() * math.pi, [random(), random(), random()], trans=modifiedVertex[i]
)
listM.append(m)
ipoly = helper.instancePolygon("instOfTetra", matrices=listM, mesh=tetra, parent=itetra)
helper.setTranslation(itetra, [6.0, -14, 0.0]) # ?
##execfile("/Users/ludo/DEV/upy/trunk/upy/examples/BasicGeom.py")
# Blender Text Run Python Script
# maya open and run in the console OR execfile("pathto/pyubic/examples/Cube_Sphere.py")
# dejavu mgtloos/bin/pythonsh -i BasicGeom.py
| StarcoderdataPython |
111513 | import copy
import pickle
from pathlib import Path
from typing import Set
from networkx.classes.digraph import DiGraph
from networkx.classes.function import (
set_edge_attributes,
set_node_attributes,
non_edges,
)
from src.data.scripts.utils import apx2nxgraph, nxgraph2apx
from src.data.solvers.AcceptanceSolver import AcceptanceSolver
class ArgumentationFramework:
"""Argumentation Framework class to compute extensions, determine argument acceptance
and get graph representations"""
graph: DiGraph
@classmethod
def from_pkl(cls, pkl_path: Path):
state_dict = pickle.load(open(pkl_path, "rb"))
# legacy _id
if "_id" in state_dict:
state_dict["id"] = state_dict.pop("_id")
return cls(**state_dict)
@classmethod
def from_apx(cls, apx: str, id=None):
"""
Initalize AF object from apx file
"""
graph = apx2nxgraph(apx)
return cls(id, graph)
def __init__(self, id, graph, extensions=None, **kwargs):
self.extensions = extensions if extensions is not None else {}
self.graph = graph
self.representations = {}
self.id = id
def to_apx(self):
return nxgraph2apx(self.graph)
def edge_hamming_distance(self, AF: "ArgumentationFramework"):
edges1 = set(self.graph.edges)
edges2 = set(AF.graph.edges)
return len(edges1.symmetric_difference(edges2))
def get_extensions_containing_s(self, semantic, S: set) -> Set[frozenset]:
extensions = set(
[
extension
for extension in self.extensions[semantic]
if S.issubset(extension)
]
)
return extensions
def get_cred_accepted_args(self, semantic, S: frozenset = None) -> frozenset:
credulous = frozenset()
extensions = (
self.extensions[semantic]
if S is None
else self.get_extensions_containing_s(semantic, S)
)
if len(extensions) > 0:
credulous = frozenset.union(*extensions)
return credulous
def get_scept_accepted_args(self, semantic, S: frozenset = None) -> frozenset:
sceptical = frozenset()
extensions = (
self.extensions[semantic]
if S is None
else self.get_extensions_containing_s(semantic, S)
)
if len(extensions) > 0:
sceptical = frozenset.intersection(*extensions)
return sceptical
@property
def state_dict(self) -> dict:
return self.__dict__.copy()
@property
def num_arguments(self) -> int:
return len(self.graph.nodes)
@property
def num_attacks(self) -> int:
return len(self.graph.edges)
@property
def arguments(self) -> set:
return set(n for n in range(self.num_arguments))
def get_representation(self, type) -> DiGraph:
assert type in ["base", "AGNN", "enforcement", "FM2", "GCN"]
if type not in self.representations:
self.representations[type] = getattr(self, f"get_{type}_representation")()
return self.representations[type]
def get_base_representation(self) -> DiGraph:
graph = copy.deepcopy(self.graph)
set_node_attributes(graph, 0, "node_input")
return graph
def get_AGNN_representation(self) -> DiGraph:
graph = self.get_base_representation()
set_node_attributes(graph, 0, "node_y")
return graph
def get_GCN_representation(self) -> DiGraph:
graph = self.get_AGNN_representation()
set_node_attributes(graph, float(1), "node_x")
return graph
def get_FM2_representation(self) -> DiGraph:
graph = self.get_AGNN_representation()
for node in graph.nodes:
graph.nodes[node]["node_x_in"] = float(graph.in_degree(node))
graph.nodes[node]["node_x_out"] = float(graph.out_degree(node))
return graph
def get_enforcement_representation(self) -> DiGraph:
graph = self.get_base_representation()
set_edge_attributes(graph, 1, "edge_input")
for u, v in non_edges(graph):
graph.add_edge(u, v, edge_input=0)
# self attacks
for n in graph.nodes:
if graph.has_edge(n, n):
graph.edges[n, n]["edge_input"] = 3
else:
graph.add_edge(n, n, edge_input=2)
set_edge_attributes(graph, 0, "edge_y")
return graph
def verify(self, S: frozenset, semantics, solver=None):
if semantics == "ST":
return self.verify_stable(S)
elif semantics == "CO":
return self.verify_complete(S)
elif semantics in ["GR", "PR"]:
return self.verify_solver(S, semantics, solver)
else:
raise Exception("Semantics not known")
def verify_stable(self, S: frozenset):
# "the set of arguments which are not attacked by S and then testing if this set is equal to S"
not_attacked_by_S = self.arguments - self.attacked_by(S)
return S == frozenset(not_attacked_by_S)
def verify_complete(self, S: frozenset):
# "Compute the set of arguments defended by S, the set of arguments not attacked by S and then to test if their intersection is equal to S."
attacked_by_S = self.attacked_by(S)
defended_by_S = set()
for arg in self.arguments:
attackers = set(self.graph.predecessors(arg))
if attackers.issubset(attacked_by_S):
defended_by_S.add(arg)
not_attacked_by_S = self.arguments - attacked_by_S
intersection = defended_by_S.intersection(not_attacked_by_S)
return S == frozenset(intersection)
def verify_solver(self, S: frozenset, semantics, solver: AcceptanceSolver):
return S in solver.solve(self, semantics)
def attacked_by(self, S: frozenset):
attacked_args = set()
for arg in S:
for attacked_arg in self.graph.successors(arg):
attacked_args.add(attacked_arg)
return attacked_args
| StarcoderdataPython |
4805441 | <gh_stars>0
#! /usr/bin/python
# Copyright (c) 2007-13 <NAME> (<EMAIL>)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from cgi import parse_qs
from urllib import urlencode, unquote_plus
from roundup.cgi.TranslationService import get_translation
from roundup.exceptions import Reject
import common
def fix_url_and_template (new_values, url) :
tmplate = new_values.get ('tmplate')
deleted = False
#print "url before:", url
if url :
urldict = parse_qs (url)
#print urldict
for k in '@:' :
key = k + 'template'
if key in urldict :
tmplate = tmplate or urldict [key][0]
del urldict [key]
deleted = True
if deleted :
for k, v in urldict.iteritems () :
urldict [k] = ','.join (v)
new_values ['url'] = unquote_plus (urlencode (urldict))
#print "url after:", new_values ['url']
#print "tmplate:", tmplate or 'index'
return tmplate or 'index'
# end def fix_url_and_template
def check_klass (db, cl, nodeid, new_values) :
common.require_attributes (_, cl, nodeid, new_values, 'klass')
klass = new_values.get ('klass')
if klass :
if klass not in db.classes :
raise Reject (_ ("Invalid Class: %(klass)s") % locals ())
# end def check_klass
def new_query (db, cl, nodeid, new_values) :
url = new_values.get ('url')
new_values ['tmplate'] = fix_url_and_template (new_values, url)
# end def new_query
def check_query (db, cl, nodeid, new_values) :
url = new_values.get ('url', cl.get (nodeid, 'url'))
tmplate = fix_url_and_template (new_values, url)
if 'tmplate' in new_values and not new_values ['tmplate'] :
new_values ['tmplate'] = tmplate
# end def check_query
def init (db) :
global _
_ = get_translation \
(db.config.TRACKER_LANGUAGE, db.config.TRACKER_HOME).gettext
db.query.audit ("create", new_query)
db.query.audit ("set", check_query)
db.query.audit ("create", check_klass)
db.query.audit ("set", check_klass)
# end def init
| StarcoderdataPython |
4805882 | import requests
class HttpApi(object):
"""A minimal wrapper around the LeanKitKanban HTTP API.
Methods take dictionaries for POST data, returns JSON
"""
def __init__(self, account_name, username, password, _service_base=None):
self.account_name = account_name
self.session = requests.session(auth=(username, password))
self._service_base = _service_base
def _get_endpoint(self, name, board_id=''):
"""All LeanKit endpoints include the account name, so this will
take an endpoint name and construct a leankit endpoint URL with
the account name included"""
url_template = \
("http://%(account)s.leankitkanban.com/Kanban/"
"Api/%(board_id)s$%(name)s")
if name == 'Boards':
url_template = url_template.replace(
'%(board_id)s$%(name)s', '%(name)s$%(board_id)s')
elif board_id != '':
board_id = ''.join(('Board/', str(board_id)))
if board_id == '':
url_template = url_template.replace('$', '')
else:
url_template = url_template.replace('$', '/')
return url_template % \
{'account': self.account_name, 'name': name, 'board_id': board_id}
def get_boards(self):
"""Wraps LeanKits' GetBoards method
See http://support.leankitkanban.com/entries/20264797-get-boards
"""
endpoint = self._get_endpoint('Boards')
r = self.session.get(endpoint)
return r.json
def get_board_identifiers(self, board_id):
"""Wraps LeanKit's GetBoardIdentifiers
See http://support.leankitkanban.com/entries/20267921-getboardidentifiers
"""
endpoint = self._get_endpoint('GetBoardIdentifiers', board_id)
r = self.session.get(endpoint)
return r.json
def get_board(self, board_id):
"""Wraps LeanKit's GetBoard
See http://support.leankitkanban.com/entries/20267956-get-board
"""
endpoint = self._get_endpoint('Boards', board_id)
r = self.session.get(endpoint)
return r.json
def get_newer_if_exists(self, board_id, board_version):
"""Wraps LeanKit's GetNewerIfExists
See http://support.leankitkanban.com/entries/20267966-getnewerifexists
"""
base_endpoint = self._get_endpoint('BoardVersion', board_id)
endpoint = '/'.join((
base_endpoint,
str(board_version),
'GetNewerIfExists'))
r = self.session.get(endpoint)
return r.json
def get_board_history_since(self, board_id, board_version):
"""Wraps LeanKit's GetBoardHistorySince
See http://support.leankitkanban.com/entries/20267971-getboardhistorysince
"""
base_endpoint = self._get_endpoint('BoardVersion', board_id)
endpoint = '/'.join((
base_endpoint,
str(board_version),
'GetBoardHistorySince'))
r = self.session.get(endpoint)
return r.json
def get_card(self, board_id, card_id):
"""Wraps LeanKit's GetCard
See http://support.leankitkanban.com/entries/20267991-getcard
"""
base_endpoint = self._get_endpoint('GetCard', board_id)
endpoint = '/'.join((base_endpoint, str(card_id)))
r = self.session.get(endpoint)
return r.json
def get_card_by_external_id(self, board_id, extneral_id):
"""Wraps LeanKit's GetCardByExternalId
See http://support.leankitkanban.com/entries/20268001-getcardbyexternalid
"""
base_endpoint = self._get_endpoint('GetCardByExternalId', board_id)
endpoint = '/'.join((base_endpoint, str(extneral_id)))
r = self.session.get(endpoint)
return r.json
| StarcoderdataPython |
1600723 | #!/usr/bin/env python3
from pyteal import *
# 45 parts out of 10000 from each swap goes to liquidity providers
# 5 parts out of 10000 from each swap goes to the developers
KEY_TOTAL_TOKEN1_BALANCE = Bytes("B1")
KEY_TOTAL_TOKEN2_BALANCE = Bytes("B2")
KEY_TOTAL_LIQUIDITY_TOKEN_DISTRIBUTED = Bytes("LD")
KEY_PROTOCOL_UNUSED_TOKEN1 = Bytes("P1")
KEY_PROTOCOL_UNUSED_TOKEN2 = Bytes("P2")
KEY_TOKEN1 = Bytes("T1")
KEY_TOKEN2 = Bytes("T2")
KEY_LIQUIDITY_TOKEN = Bytes("LT")
KEY_USER_UNUSED_TOKEN1 = Bytes("U1")
KEY_USER_UNUSED_TOKEN2 = Bytes("U2")
KEY_USER_UNUSED_LIQUIDITY = Bytes("UL")
TRANSACTION_TYPE_SWAP_DEPOSIT_TOKEN1_TO_TOKEN2 = Bytes("s1")
TRANSACTION_TYPE_SWAP_DEPOSIT_TOKEN2_TO_TOKEN1 = Bytes("s2")
TRANSACTION_TYPE_ADD_LIQUIDITY_DEPOSIT = Bytes("a")
TRANSACTION_TYPE_WITHDRAW_LIQUIDITY = Bytes("w")
TRANSACTION_TYPE_REFUND = Bytes("r")
TRANSACTION_TYPE_WITHDRAW_PROTOCOL_FEES = Bytes("p")
def approval_program():
"""
This smart contract implements the Manager part of the AlgoSwap DEX.
It maintains the global and local storage for users and escrow contracts
that are opted into the AlgoSwap protocol for every possible atomic
transaction group that AlgoSwap supports (Swap Token 1 for Token 2,
Swap Token 2 for Token 1, Add Liquidity, Withdraw Liquidity, Withdraw
Protocol Fees, and Refund).
Any atomic transaction group MUST have a transaction to the manager
smart contract as the second transaction of the group to proceed.
Commands:
s1 Swap Token 1 for Token 2 in a liquidity pair
s2 Swap Token 2 for Token 1 in a liquidity pair
a Add liquidity to a liquidity pool
w Withdraw liquidity from a liquidity pool
r Get a refund of unused tokens
p Withdraw protocol fees (Developer only)
"""
# Read from additional account
read_key_token1 = App.localGet(Int(1), KEY_TOKEN1)
read_key_token2 = App.localGet(Int(1), KEY_TOKEN2)
read_key_liquidity_token = App.localGet(Int(1), KEY_LIQUIDITY_TOKEN)
read_key_total_token1_bal = App.localGet(Int(1), KEY_TOTAL_TOKEN1_BALANCE)
read_key_total_token2_bal = App.localGet(Int(1), KEY_TOTAL_TOKEN2_BALANCE)
read_key_total_liquidity_token_distributed = App.localGet(Int(1), KEY_TOTAL_LIQUIDITY_TOKEN_DISTRIBUTED)
read_protocol_unused_token1 = App.localGet(Int(1), KEY_PROTOCOL_UNUSED_TOKEN1)
read_protocol_unused_token2 = App.localGet(Int(1), KEY_PROTOCOL_UNUSED_TOKEN2)
# Write to additional account
def write_key_total_token1_bal(bal: Int): return App.localPut(Int(1), KEY_TOTAL_TOKEN1_BALANCE, bal)
def write_key_total_token2_bal(bal: Int): return App.localPut(Int(1), KEY_TOTAL_TOKEN2_BALANCE, bal)
def write_key_total_liquidity_token_distributed(bal: Int): return App.localPut(Int(1), KEY_TOTAL_LIQUIDITY_TOKEN_DISTRIBUTED, bal)
def write_protocol_unused_token1(amount: Int): return App.localPut(Int(1), KEY_PROTOCOL_UNUSED_TOKEN1, amount)
def write_protocol_unused_token2(amount: Int): return App.localPut(Int(1), KEY_PROTOCOL_UNUSED_TOKEN2, amount)
# Read from sender account
def read_user_unused_token1(address: Bytes): return App.localGet(Int(0), Concat(KEY_USER_UNUSED_TOKEN1, address))
def read_user_unused_token2(address: Bytes): return App.localGet(Int(0), Concat(KEY_USER_UNUSED_TOKEN2, address))
def read_user_unused_liquidity(address: Bytes): return App.localGet(Int(0), Concat(KEY_USER_UNUSED_LIQUIDITY, address))
# Write to sender account
def write_user_unused_token1(address: Bytes, amount: Int): return App.localPut(Int(0), Concat(KEY_USER_UNUSED_TOKEN1, address), amount)
def write_user_unused_token2(address: Bytes, amount: Int): return App.localPut(Int(0), Concat(KEY_USER_UNUSED_TOKEN2, address), amount)
def write_user_unused_liquidity(address: Bytes, amount: Int): return App.localPut(Int(0), Concat(KEY_USER_UNUSED_LIQUIDITY, address), amount)
# Scratch Vars
scratchvar_token1_used = ScratchVar(TealType.uint64)
scratchvar_token2_used = ScratchVar(TealType.uint64)
scratchvar_total_token1_bal = ScratchVar(TealType.uint64)
scratchvar_total_token2_bal = ScratchVar(TealType.uint64)
scratchvar_total_liquidity_token_distributed = ScratchVar(TealType.uint64)
scratchvar_swap_token2_output = ScratchVar(TealType.uint64)
scratchvar_swap_token1_output = ScratchVar(TealType.uint64)
scratchvar_token1_available = ScratchVar(TealType.uint64)
scratchvar_token2_available = ScratchVar(TealType.uint64)
scratchvar_new_liquidity = ScratchVar(TealType.uint64)
scratchvar_temp = ScratchVar(TealType.uint64)
on_create = Int(1)
on_closeout = Int(1)
on_opt_in = If(Txn.application_args.length() == Int(3),
Seq([
# initialize sender's local state as an escrow
App.localPut(Int(0), KEY_LIQUIDITY_TOKEN, Btoi(Txn.application_args[0])),
App.localPut(Int(0), KEY_TOKEN1, Btoi(Txn.application_args[1])),
App.localPut(Int(0), KEY_TOKEN2, Btoi(Txn.application_args[2])),
Int(1),
]),
Int(1)
)
# 199/200 = 9950/10000 = 0.45% (45/10000) swap fee and 0.05% (5/10000) protocol fee
def swap_token_input_minus_fees(asset_amount: Int): return (asset_amount * Int(199)) / Int(200)
def swap_token2_output(token1_input_minus_fees: Int):
return read_key_total_token2_bal - (read_key_total_token1_bal * read_key_total_token2_bal) / (read_key_total_token1_bal + token1_input_minus_fees)
on_swap_deposit = Seq([
scratchvar_swap_token2_output.store(swap_token2_output(swap_token_input_minus_fees(Gtxn[2].asset_amount()))),
# Add protocol fee to the protocol fees account
# PROTOCOL_UNUSED_TOKEN1 = PROTOCOL_UNUSED_TOKEN1 + token1_input * protocol_fee
write_protocol_unused_token1(
read_protocol_unused_token1 + (Gtxn[2].asset_amount() / Int(2000))
),
# Assert token2_output >= min_token2_received_from_algoswap
Assert(
scratchvar_swap_token2_output.load() >= Btoi(Txn.application_args[1])
),
# USER_UNUSED_TOKEN2 = USER_UNUSED_TOKEN2 + token2_output
write_user_unused_token2(
Txn.accounts[1],
read_user_unused_token2(Txn.accounts[1]) + scratchvar_swap_token2_output.load()
),
# Update total balance
# TOTAL_TOKEN1_BALANCE = TOTAL_TOKEN1_BALANCE + (token1_input * swap_fee) + token1_input_minus_fees
write_key_total_token1_bal(
read_key_total_token1_bal + ((Gtxn[2].asset_amount() * Int(9)) / Int(2000)) + swap_token_input_minus_fees(Gtxn[2].asset_amount())
),
# TOTAL_TOKEN2_BALANCE = TOTAL_TOKEN2_BALANCE - token2_output
write_key_total_token2_bal(
read_key_total_token2_bal - scratchvar_swap_token2_output.load()
),
# successful approval
Int(1)
])
def swap_token1_output(token2_input_minus_fees: Int):
return read_key_total_token1_bal - (read_key_total_token1_bal * read_key_total_token2_bal) / (read_key_total_token2_bal + token2_input_minus_fees)
on_swap_deposit_2 = Seq([
scratchvar_swap_token1_output.store(swap_token1_output(swap_token_input_minus_fees(Gtxn[2].asset_amount()))),
# add protocol fee to protocol fees account
write_protocol_unused_token2(
read_protocol_unused_token2 + (Gtxn[2].asset_amount() / Int(2000))
),
# assert token 1 output >= min_token1_received_from_algoswap
Assert(
scratchvar_swap_token1_output.load() >= Btoi(Txn.application_args[1])
),
# set user unused token1 += token1_output
write_user_unused_token1(
Txn.accounts[1],
read_user_unused_token1(Txn.accounts[1]) + scratchvar_swap_token1_output.load()
),
# update total token2 balance = total_token2_balance + swap fees + swap_token_input_minus_fees
write_key_total_token2_bal(read_key_total_token2_bal + ((Gtxn[2].asset_amount() * Int(9)) / Int(2000)) + swap_token_input_minus_fees(Gtxn[2].asset_amount())),
# update total token1 balance
write_key_total_token1_bal(read_key_total_token1_bal - scratchvar_swap_token1_output.load()),
# successful approval
Int(1),
])
on_add_liquidity_deposit = Seq([
scratchvar_total_token1_bal.store(read_key_total_token1_bal),
scratchvar_total_token2_bal.store(read_key_total_token2_bal),
scratchvar_total_liquidity_token_distributed.store(read_key_total_liquidity_token_distributed),
If(
# If TOTAL_LIQUIDITY_TOKEN_DISTRIBUTED = 0
scratchvar_total_liquidity_token_distributed.load() == Int(0),
# Then, token1_used = token1_deposit
# token2_used = token2_deposit
# new_liquidity = token1_deposit
Seq([
scratchvar_token1_used.store(Gtxn[2].asset_amount()),
scratchvar_token2_used.store(Gtxn[3].asset_amount()),
scratchvar_new_liquidity.store(Gtxn[2].asset_amount()),
]),
# Else, token1_used = min(token1_deposit, (token2_deposit * TOTAL_TOKEN1_BALANCE / TOTAL_TOKEN2_BALANCE))
# token2_used = min(token2_deposit, token1_deposit * TOTAL_TOKEN2_BALANCE / TOTAL_TOKEN1_BALANCE)
Seq([
scratchvar_temp.store(Gtxn[3].asset_amount() * scratchvar_total_token1_bal.load() / scratchvar_total_token2_bal.load()),
If(
# If token1_deposit is min
Gtxn[2].asset_amount() < scratchvar_temp.load(),
# token1_used = token1_deposit
scratchvar_token1_used.store(Gtxn[2].asset_amount()),
# Else, token1_used = (token2_deposit * TOTAL_TOKEN1_BALANCE / TOTAL_TOKEN2_BALANCE))
scratchvar_token1_used.store(scratchvar_temp.load()),
),
scratchvar_temp.store(Gtxn[2].asset_amount() * scratchvar_total_token2_bal.load() / scratchvar_total_token1_bal.load()),
If(
# If token2_deposit is min
Gtxn[3].asset_amount() < scratchvar_temp.load(),
# token2_used = token2_deposit
scratchvar_token2_used.store(Gtxn[3].asset_amount()),
# Else, token2_used = token1_deposit * TOTAL_TOKEN2_BALANCE / TOTAL_TOKEN1_BALANCE)
scratchvar_token2_used.store(scratchvar_temp.load()),
),
scratchvar_new_liquidity.store(scratchvar_total_liquidity_token_distributed.load() * Gtxn[2].asset_amount() / scratchvar_total_token1_bal.load()),
])
),
# Assert new_liquidity >= min_liquidity_received_from_algoswap
Assert(
scratchvar_new_liquidity.load() >= Btoi(Txn.application_args[1]),
),
# USER_UNUSED_TOKEN1 = USER_UNUSED_TOKEN1 + token1_deposit - token1_used
# USER_UNUSED_TOKEN1 = USER_UNUSED_TOKEN1 + token1_deposit - (token2_deposit * TOTAL_TOKEN1_BALANCE / TOTAL_TOKEN2_BALANCE)
write_user_unused_token1(
Txn.accounts[1],
read_user_unused_token1(Txn.accounts[1]) + Gtxn[2].asset_amount() - scratchvar_token1_used.load()
),
# token2_used = min(token2_deposit, (token1_deposit * TOTAL_TOKEN2_BALANCE / TOTAL_TOKEN1_BALANCE))
# If token2_deposit > (token1_deposit * TOTAL_TOKEN2_BALANCE / TOTAL_TOKEN1_BALANCE)
# USER_UNUSED_TOKEN2 = USER_UNUSED_TOKEN2 + token2_deposit - token2_used
# USER_UNUSED_TOKEN2 = USER_UNUSED_TOKEN2 + token2_deposit - (token1_deposit * TOTAL_TOKEN2_BALANCE / TOTAL_TOKEN1_BALANCE)
write_user_unused_token2(
Txn.accounts[1],
read_user_unused_token2(Txn.accounts[1]) + Gtxn[3].asset_amount() - scratchvar_token2_used.load()
),
# total_liquidity = Total Supply (LIQUIDITY_TOKEN(ESCROW(TOKEN1, TOKEN2))) - Balance (RESERVE_ADDR(LIQUIDITY_TOKEN(ESCROW(TOKEN1, TOKEN2))))
# USER_UNUSED_LIQUIDITY = USER_UNUSED_LIQUIDITY + total_liquidity * token1_deposit / TOTAL_TOKEN1_BALANCE
write_user_unused_liquidity(
Txn.accounts[1],
read_user_unused_liquidity(Txn.accounts[1]) + scratchvar_new_liquidity.load()
),
# TOTAL_TOKEN1_BALANCE = TOTAL_TOKEN1_BALANCE + token1_used
write_key_total_token1_bal(scratchvar_total_token1_bal.load() + scratchvar_token1_used.load()),
# TOTAL_TOKEN2_BALANCE = TOTAL_TOKEN2_BALANCE + token2_used
write_key_total_token2_bal(scratchvar_total_token2_bal.load() + scratchvar_token2_used.load()),
# TOTAL_LIQUIDITY_TOKEN_DISTRIBUTED += new_liquidity
write_key_total_liquidity_token_distributed(scratchvar_total_liquidity_token_distributed.load() + scratchvar_new_liquidity.load()),
Int(1)
])
on_withdraw_liquidity = Seq([
# total_liquidity = TOTAL_LIQUIDITY_TOKEN_DISTRIBUTED
# user_liquidity = Asset Amount (Txn)
# token1_available = TOTAL_TOKEN1_BALANCE * user_liquidity / total_liquidity
scratchvar_token1_available.store(read_key_total_token1_bal * Gtxn[2].asset_amount() / read_key_total_liquidity_token_distributed),
# USER_UNUSED_TOKEN1 = USER_UNUSED_TOKEN1 + token1_available
write_user_unused_token1(
Txn.accounts[1],
read_user_unused_token1(Txn.accounts[1]) + scratchvar_token1_available.load()
),
# token2_available = TOTAL_TOKEN2_BALANCE * user_liquidity / total_liquidity
scratchvar_token2_available.store(read_key_total_token2_bal * Gtxn[2].asset_amount() / read_key_total_liquidity_token_distributed),
# USER_UNUSED_TOKEN2 = USER_UNUSED_TOKEN2 + token2_available
write_user_unused_token2(
Txn.accounts[1],
read_user_unused_token2(Txn.accounts[1]) + scratchvar_token2_available.load()
),
# Assert token1_available >= min_token1_received_from_algoswap
# Assert token2_available >= min_token2_received_from_algoswap
Assert(
And(
scratchvar_token1_available.load() >= Btoi(Txn.application_args[1]),
scratchvar_token2_available.load() >= Btoi(Txn.application_args[2]),
)
),
# TOTAL_TOKEN1_BALANCE = TOTAL_TOKEN1_BALANCE - token1_available
write_key_total_token1_bal(
read_key_total_token1_bal - scratchvar_token1_available.load()
),
# TOTAL_TOKEN2_BALANCE = TOTAL_TOKEN2_BALANCE - token2_available
write_key_total_token2_bal(
read_key_total_token2_bal - scratchvar_token2_available.load()
),
# TOTAL_LIQUIDITY_TOKEN_DISTRIBUTED = TOTAL_LIQUIDITY_TOKEN_DISTRIBUTED - user_liquidity
write_key_total_liquidity_token_distributed(
read_key_total_liquidity_token_distributed - Gtxn[2].asset_amount()
),
Int(1)
])
on_refund = Seq([
Cond([
# this AssetTransfer is for an available amount of TOKEN1
And(
Gtxn[2].xfer_asset() == read_key_token1,
Gtxn[2].asset_amount() <= read_user_unused_token1(Txn.accounts[1])
),
# unused_token1 = Gtxn[2].asset_amount()
# USER_UNUSED_TOKEN1 = USER_UNUSED_TOKEN1 - unused_token1
write_user_unused_token1(
Txn.accounts[1],
read_user_unused_token1(Txn.accounts[1]) - Gtxn[2].asset_amount()
),
], [
# this AssetTransfer is for an available amount of TOKEN2
And(
Gtxn[2].xfer_asset() == read_key_token2,
Gtxn[2].asset_amount() <= read_user_unused_token2(Txn.accounts[1])
),
# unused_token2 = Gtxn[2].asset_amount()
# USER_UNUSED_TOKEN2 = USER_UNUSED_TOKEN2 - unused_token2
write_user_unused_token2(
Txn.accounts[1],
read_user_unused_token2(Txn.accounts[1]) - Gtxn[2].asset_amount()
),
], [
# this AssetTransfer is for an available amount of LIQUIDITY_TOKEN
And(
Gtxn[2].xfer_asset() == read_key_liquidity_token,
Gtxn[2].asset_amount() <= read_user_unused_liquidity(Txn.accounts[1])
),
# unused_liquidity = Gtxn[2].asset_amount()
# USER_UNUSED_LIQUIDITY = USER_UNUSED_LIQUIDITY - unused_liquidity
write_user_unused_liquidity(
Txn.accounts[1],
read_user_unused_liquidity(Txn.accounts[1]) - Gtxn[2].asset_amount()
),
]),
Int(1),
])
on_withdraw_protocol_fees = Seq([
Assert(And(
# this TOKEN1 AssetTransfer is for an available amount of TOKEN1
Gtxn[2].asset_amount() <= read_protocol_unused_token1,
# this TOKEN2 AssetTransfer is for an available amount of TOKEN2
Gtxn[3].asset_amount() <= read_protocol_unused_token2,
)),
# withdraw_token1 = Gtxn[2].asset_amount()
# PROTOCOL_UNUSED_TOKEN1 = PROTOCOL_UNUSED_TOKEN1 - withdraw_token1
write_protocol_unused_token1(
read_protocol_unused_token1 - Gtxn[2].asset_amount()
),
# withdraw_token2 = Gtxn[2].asset_amount()
# PROTOCOL_UNUSED_TOKEN2 = PROTOCOL_UNUSED_TOKEN2 - withdraw_token2
write_protocol_unused_token2(
read_protocol_unused_token2 - Gtxn[3].asset_amount()
),
Int(1),
])
program = Cond(
[Txn.application_id() == Int(0),
on_create],
[Txn.on_completion() == OnComplete.CloseOut,
on_closeout],
[Txn.on_completion() == OnComplete.OptIn,
on_opt_in],
[Txn.application_args[0] == TRANSACTION_TYPE_SWAP_DEPOSIT_TOKEN1_TO_TOKEN2,
on_swap_deposit],
[Txn.application_args[0] == TRANSACTION_TYPE_SWAP_DEPOSIT_TOKEN2_TO_TOKEN1,
on_swap_deposit_2],
[Txn.application_args[0] == TRANSACTION_TYPE_ADD_LIQUIDITY_DEPOSIT,
on_add_liquidity_deposit],
[Txn.application_args[0] == TRANSACTION_TYPE_WITHDRAW_LIQUIDITY,
on_withdraw_liquidity],
[Txn.application_args[0] == TRANSACTION_TYPE_REFUND,
on_refund],
[Txn.application_args[0] == TRANSACTION_TYPE_WITHDRAW_PROTOCOL_FEES,
on_withdraw_protocol_fees],
)
return program
def clear_program():
return Int(1) | StarcoderdataPython |
1758309 | # Generated by Django 3.0.6 on 2020-06-04 09:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('crudApp', '0006_auto_20200604_0903'),
]
operations = [
migrations.AlterField(
model_name='project',
name='project_body',
field=models.TextField(blank=True),
),
]
| StarcoderdataPython |
93803 | <reponame>LegenDarius116/pox<filename>run.py
import os
from argparse import ArgumentParser
def main():
"""Runs pox controller which points to h1,h2, and h3."""
ip = '10.0.1.1'
parser = ArgumentParser(description='Command line tool for quickly spinning up POX Controller')
parser.add_argument("-n", type=int, help="number of servers", required=True)
parser.add_argument("-lb", type=str, help="name of load balancing module", required=True)
args = parser.parse_args()
servers = ''
numservers = args.n
lb_alg = "misc.loadbalancing.{}".format(args.lb)
for i in range(numservers):
servers += '10.0.0.{}'.format(i+1)
if i != numservers-1:
servers += ','
command = "sudo python pox.py log.level --DEBUG {lb} --ip={ip} --servers={servers}".format(
lb=lb_alg,
ip=ip,
servers=servers
)
print("Running command: {}".format(command))
os.system(command)
if __name__ == "__main__":
main()
| StarcoderdataPython |
1613747 | <reponame>molnarjani/data
from jsonschema import validate
import os
from pathlib import Path
import json
schema_companies = json.load(open('schema.json', encoding='utf-8'))
schema_authorities = json.load(open('schema-supervisory-authorities.json', encoding='utf-8'))
# see https://stackoverflow.com/a/10378012
pathlist_companies = Path('companies').glob('**/*.json')
for path in pathlist_companies:
try:
record = json.load(open(path, encoding='utf-8'))
if record['slug'] + '.json' != os.path.basename(path):
raise ValueError('Company filename "%s" does not match slug "%s".' % (os.path.basename(path), record['slug']))
validate(record, schema_companies)
except:
print(path)
raise
pathlist_authorities = Path('supervisory-authorities').glob('**/*.json')
for path in pathlist_authorities:
try:
record = json.load(open(path, encoding='utf-8'))
if record['slug'] + '.json' != os.path.basename(path):
raise ValueError('Supervisory authority filename "%s" does not match slug "%s".' % (os.path.basename(path)), record['slug'])
validate(record, schema_authorities)
except:
print(path)
raise
| StarcoderdataPython |
1607040 | from django.test import TestCase
from rest_framework.reverse import reverse
class MonstersTestCase(TestCase):
fixtures = [
'monsters.json',
'series.json',
]
def test_monster_list(self):
resp = self.client.get(reverse('api:v1:monster-list'))
self.assertEqual(resp.status_code, 200)
self.assertIsInstance(resp.data, list)
def test_monster_detail(self):
resp = self.client.get(reverse('api:v1:monster-detail', kwargs=dict(pk=2)))
self.assertEqual(resp.status_code, 200)
self.assertIsInstance(resp.data, dict)
self.assertIn('id', resp.data)
self.assertEqual(resp.data['id'], 2)
self.assertIn('name', resp.data)
self.assertEqual(resp.data['name'], 'Bones')
self.assertIn('description', resp.data)
self.assertRegexpMatches(resp.data['description'], '^Bones is a skeleton monster created by Finster')
def test_monster_not_found(self):
resp = self.client.get(reverse('api:v1:monster-detail', kwargs=dict(pk=9999)))
self.assertEqual(resp.status_code, 404)
| StarcoderdataPython |
1720036 | frase = 'Curso em Vídeo Python'
#print(frase.upper().count('O'))
"""print('''xxxxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxx!''')"""
#print(len(frase.strip()))
#print(frase.replace('Python', 'Gay'))
#print('Curso' in frase)
#print(frase.find('Vídeo'))
#print(frase.lower().find('vídeo'))
"""dividido = frase.split()
print(dividido[1])"""
dividido = frase.split()
print(len(''.join(dividido)))
print('Curso' in frase) | StarcoderdataPython |
74512 | """
"""
from config import config
class Factory(object):
def __init__(self, session_name, **context):
self.context = context
self.session = sessions[session_name]()
self.factory = RepositoryFactory(self.session)
def __enter__(self):
self.session.__enter__()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.session.__exit__(exc_value, exc_value, traceback)
class RepositoryFactory(object):
def __init__(self, session):
self.session = session
def mock_sessions():
from wheezy.core.db import NullSession
return {"ro": NullSession, "rw": NullSession}
# region: configuration details
mode = config.get("runtime", "mode")
if mode == "mock":
sessions = mock_sessions()
else:
raise NotImplementedError(mode)
del mode, config
| StarcoderdataPython |
3207994 | from typing import Dict, Callable, Union
import random
from ..calc.combat_data import AttackData
from ..calc import stats
def _(_: AttackData) -> None:
pass
def fe7_silencer(atk: AttackData) -> None:
"""
With a crit/2% chance to activate, deals damage equal to the opponent's remaining HP.
Prevents other skills from activating on this attack.
Actually simply runs the hit calculation using 2RN, runs the crit calculation, and if Silencer
would activate, sets them both to 100%
"""
if atk.skillable:
avg_roll = (random.randint(0, 99) + random.randint(0, 99)) // 2
if avg_roll < atk.hit - atk.avo and random.randint(0, 99) < (atk.crit - atk.ddg) / 2:
# silencer activates
atk.dmg = atk.against.current_hp
atk.hit = 999
atk.crit = 999
atk.tags.append("silencer")
atk.skillable = False
return None
def fe7_devil(atk: AttackData) -> None:
"""
Rolls a random number [0-99], and if the number is less than (31 - Unit's Luck),
then sets the attacker as the defender for their own attack. Does not change any
other numbers (Hit, Crit, Prt, etc.) remain the same).
Also, does not return any particular message.
"""
luk = stats.calc_luk(atk.by)
if random.randint(0, 99) < (31 - luk):
atk.against = atk.by
return None
def brave(atk: AttackData) -> None:
"""
Adds another attack after this one, identical to it.
"""
if "brave" not in atk.tags:
atk.append(AttackData(
by=atk.by,
against=atk.against,
with_weapon=atk.with_weapon,
against_weapon=atk.against_weapon,
atk=atk.atk,
prt_rsl=atk.prt_rsl,
hit=atk.hit,
avo=atk.avo,
crit=atk.crit,
ddg=atk.ddg,
skillable=atk.skillable,
counterattack=atk.counterattack,
followup=atk.followup,
tags=["brave"] + atk.tags[:]
))
before_attack: Dict[Union[str, None], Callable[[AttackData], Union[Dict, None]]] = {
'brave': brave,
'fe7_devil': fe7_devil,
'fe7_silencer': fe7_silencer,
None: _,
}
| StarcoderdataPython |
3319262 | <gh_stars>1-10
import torch
from torch.nn import Module
from cnns.nnlib.utils.general_utils import next_power2
from torch.nn.functional import pad as torch_pad
import numpy as np
from cnns.nnlib.utils.shift_DC_component import shift_DC
class FFTBandFunction2D(torch.autograd.Function):
"""
We can implement our own custom autograd Functions by subclassing
torch.autograd.Function and implementing the forward and backward
passes which operate on Tensors.
"""
signal_ndim = 2
@staticmethod
def forward(ctx, input, args, onesided=True, is_test=False):
"""
In the forward pass we receive a Tensor containing the input
and return a Tensor containing the output. ctx is a context
object that can be used to stash information for backward
computation. You can cache arbitrary objects for use in the
backward pass using the ctx.save_for_backward method.
:param input: the input image
:param args: for compress rate and next_power2.
:param onesided: FFT convolution leverages the conjugate symmetry and
returns only roughly half of the FFT map, otherwise the full map is
returned
:param is_test: test if the number of zero-ed out coefficients is
correct
"""
# ctx.save_for_backward(input)
# print("round forward")
FFTBandFunction2D.mark_dirty(input)
N, C, H, W = input.size()
if H != W:
raise Exception(f"We support only squared input but the width: {W}"
f" is differnt from height: {H}")
if args.next_power2:
H_fft = next_power2(H)
W_fft = next_power2(W)
pad_H = H_fft - H
pad_W = W_fft - W
input = torch_pad(input, (0, pad_W, 0, pad_H), 'constant', 0)
else:
H_fft = H
W_fft = W
xfft = torch.rfft(input, signal_ndim=FFTBandFunction2D.signal_ndim,
onesided=onesided)
del input
_, _, H_xfft, W_xfft, _ = xfft.size()
# r - is the side of the retained square in one of the quadrants
# 4 * r ** 2 / (H * W) = (1 - c)
# r = np.sqrt((1 - c) * (H * W) / 4)
compress_rate = args.compress_rate / 100
if onesided:
divisor = 2
else:
divisor = 4
# r - is the length of the side that we retain after compression.
r = np.sqrt((1 - compress_rate) * H_xfft * W_xfft / divisor)
# r = np.floor(r)
r = np.ceil(r)
r = int(r)
# zero out high energy coefficients
if is_test:
# We divide by 2 to not count zeros complex number twice.
zero1 = torch.sum(xfft == 0.0).item() / 2
# print(zero1)
xfft[..., r:H_fft - r, :, :] = 0.0
if onesided:
xfft[..., :, r:, :] = 0.0
else:
xfft[..., :, r:W_fft - r, :] = 0.0
if ctx is not None:
ctx.xfft = xfft
if args.is_DC_shift is True:
ctx.xfft = shift_DC(xfft, onesided=onesided)
if is_test:
zero2 = torch.sum(xfft == 0.0).item() / 2
# print(zero2)
total_size = C * H_xfft * W_xfft
# print("total size: ", total_size)
fraction_zeroed = (zero2 - zero1) / total_size
ctx.fraction_zeroed = fraction_zeroed
# print("compress rate: ", compress_rate, " fraction of zeroed out: ", fraction_zeroed)
error = 0.08
if fraction_zeroed > compress_rate + error or (
fraction_zeroed < compress_rate - error):
raise Exception(f"The compression is wrong, for compression "
f"rate {compress_rate}, the number of fraction "
f"of zeroed out coefficients "
f"is: {fraction_zeroed}")
# N, C, H_fft, W_fft = xfft
out = torch.irfft(input=xfft,
signal_ndim=FFTBandFunction2D.signal_ndim,
signal_sizes=(H_fft, W_fft),
onesided=onesided)
out = out[..., :H, :W]
return out
@staticmethod
def backward(ctx, grad_output):
"""
In the backward pass we receive a Tensor containing the
gradient of the loss with respect to the output, and we need
to compute the gradient of the loss with respect to the input.
See: https://arxiv.org/pdf/1706.04701.pdf appendix A
We do not want to zero out the gradient.
Defenses that mask a network’s gradients by quantizingthe input values pose a challenge to gradient-based opti-mization methods for generating adversarial examples,such as the procedure we describe in Section 2.4. Astraightforward application of the approach would findzero gradients, because small changes to the input do notalter the output at all. In Section 3.1.1, we describe anapproach where we run the optimizer on a substitute net-work without the color depth reduction step, which ap-proximates the real network.
"""
# print("round backward")
return grad_output.clone(), None, None, None, None
class FFTBand2D(Module):
"""
No PyTorch Autograd used - we compute backward pass on our own.
"""
"""
FFT Band layer removes high frequency coefficients.
"""
def __init__(self, args):
super(FFTBand2D, self).__init__()
self.args = args
def forward(self, input):
"""
This is the fully manual implementation of the forward and backward
passes via the torch.autograd.Function.
:param input: the input map (e.g., an image)
:return: the result of 1D convolution
"""
return FFTBandFunction2D.apply(input, self.args)
if __name__ == "__main__":
compress_rate = 0.6
H_xfft = 8
W_xfft = 5
divisor = 2
is_test = True
xfft = np.arange(H_xfft*W_xfft).reshape(H_xfft, W_xfft)
xfft = torch.tensor(xfft)
print("initial xfft:\n", xfft)
r = int(np.sqrt((1 - compress_rate) * H_xfft * W_xfft / divisor))
print("r: ", r)
# zero out high energy coefficients
if is_test:
# We divide by 2 to not count zeros complex number twice.
zero1 = torch.sum(xfft == 0.0).item() / 2
# print(zero1)
xfft[r:H_xfft - r, :] = 0.0
xfft[:, r:] = 0.0
print("compressed xfft:\n", xfft)
zero2 = torch.sum(xfft == 0.0).item()
# print(zero2)
total_size = H_xfft * W_xfft
# print("total size: ", total_size)
fraction_zeroed = (zero2 - zero1) / total_size
print("fraction_zeroed: ", fraction_zeroed) | StarcoderdataPython |
1787049 | <gh_stars>10-100
"""Serializers for the sequencers app."""
from rest_framework import serializers
from ..models import BarcodeSet, BarcodeSetEntry
class BarcodeSetEntrySerializer(serializers.ModelSerializer):
barcode_set = serializers.ReadOnlyField(source="barcode_set.sodar_uuid")
def update(self, instance, validated_data):
validated_data["barcode_set"] = self.context["barcode_set"]
return super().update(instance, validated_data)
def create(self, validated_data):
validated_data["barcode_set"] = self.context["barcode_set"]
return super().create(validated_data)
class Meta:
model = BarcodeSetEntry
fields = ("sodar_uuid", "aliases", "barcode_set", "name", "sequence")
read_only_fields = ("sodar_uuid", "barcode_set")
class BarcodeSetSerializer(serializers.ModelSerializer):
entries = BarcodeSetEntrySerializer(many=True, read_only=True)
def update(self, instance, validated_data):
validated_data["project"] = self.context["project"]
return super().update(instance, validated_data)
def create(self, validated_data):
validated_data["project"] = self.context["project"]
return super().create(validated_data)
class Meta:
model = BarcodeSet
fields = ("sodar_uuid", "name", "short_name", "description", "set_type", "entries")
read_only_fields = ("sodar_uuid", "entries")
| StarcoderdataPython |
188188 | <filename>leetcode/python/1043_partition_array_for_maximum_sum.py
from typing import List
class Solution:
def maxSumAfterPartitioning(self, A: List[int], K: int) -> int:
N = len(A)
dp = [0] * N
for i in range(N):
j, left = i, max(0, i - K + 1)
curr_max = 0
while j >= left:
curr_max = max(curr_max, A[j])
count = i - j + 1
prev = dp[j - 1] if j else 0
dp[i] = max(dp[i], prev + count * curr_max)
j -= 1
return dp[N - 1]
| StarcoderdataPython |
3252153 | <filename>src/tests/ftest/daos_test/daos_core_test_dfs.py
#!/usr/bin/python
"""
(C) Copyright 2020-2021 Intel Corporation.
SPDX-License-Identifier: BSD-2-Clause-Patent
"""
import os
from daos_core_base import DaosCoreBase
class DaosCoreTestDfs(DaosCoreBase):
# pylint: disable=too-many-ancestors
"""Runs DAOS file system (DFS) tests.
:avocado: recursive
"""
def __init__(self, *args, **kwargs):
"""Initialize the DaosCoreBase object."""
super().__init__(*args, **kwargs)
self.hostfile_clients_slots = None
def test_daos_dfs_unit(self):
"""Jira ID: DAOS-5409.
Test Description:
Run daos_test -u
Use cases:
Daos File system tests
:avocado: tags=all,pr,daily_regression
:avocado: tags=hw,large
:avocado: tags=daos_test,daos_core_test_dfs,test_daos_dfs_unit
"""
self.daos_test = os.path.join(self.bin, 'dfs_test')
self.run_subtest()
def test_daos_dfs_parallel(self):
"""Jira ID: DAOS-5409.
Test Description:
Run daos_test -p
Use cases:
Daos File system tests
:avocado: tags=all,pr,daily_regression
:avocado: tags=hw,large
:avocado: tags=daos_test,daos_core_test_dfs,test_daos_dfs_parallel
"""
self.daos_test = os.path.join(self.bin, 'dfs_test')
self.run_subtest()
| StarcoderdataPython |
74543 | import click
from train_anomaly_detection import main_func
import numpy as np
import os
# Define base parameters.
dataset_name = 'selfsupervised'
net_name = 'StackConvNet'
xp_path_base = 'log'
data_path = 'data/full'
train_folder = 'train'
val_pos_folder = 'val/wangen_sun_3_pos'
val_neg_folder = 'val/wangen_sun_3_neg'
load_config = None
load_model = None
nu = 0.1
device = 'cuda'
seed = -1
optimizer_name = 'adam'
lr = 0.0001
n_epochs = 150
lr_milestone = (100,)
batch_size = 200
weight_decay = 0.5e-6
ae_optimizer_name = 'adam'
ae_lr = 0.0001
ae_n_epochs = 350
ae_lr_milestone = (250,)
ae_batch_size = 200
ae_weight_decay = 0.5e-6
n_jobs_dataloader = 0
normal_class = 1
batchnorm = False
dropout = False
augment = False
objectives = [
{'objective': 'real-nvp', 'pretrain': True, 'fix_encoder': True}, # 0
{'objective': 'soft-boundary', 'pretrain': True, 'fix_encoder': False}, # 1
{'objective': 'one-class', 'pretrain': True, 'fix_encoder': False}, # 2
{'objective': 'real-nvp', 'pretrain': False, 'fix_encoder': False}, # 3
{'objective': 'real-nvp', 'pretrain': True, 'fix_encoder': False}, # 4
{'objective': 'one-class', 'pretrain': False, 'fix_encoder': False}, # 5
{'objective': 'soft-boundary', 'pretrain': False, 'fix_encoder': False} # 6
]
modalities = [
{'rgb': True , 'ir': False, 'depth': False, 'depth_3d': True , 'normals': False, 'normal_angle': True },
{'rgb': True , 'ir': False, 'depth': False, 'depth_3d': False, 'normals': False, 'normal_angle': False},
{'rgb': False, 'ir': True , 'depth': False, 'depth_3d': False, 'normals': False, 'normal_angle': False},
{'rgb': False, 'ir': False, 'depth': True , 'depth_3d': False, 'normals': False, 'normal_angle': False},
{'rgb': True , 'ir': False, 'depth': True , 'depth_3d': False, 'normals': False, 'normal_angle': False},
{'rgb': False, 'ir': True , 'depth': True , 'depth_3d': False, 'normals': False, 'normal_angle': False},
{'rgb': True , 'ir': True , 'depth': True , 'depth_3d': False, 'normals': False, 'normal_angle': False},
{'rgb': True , 'ir': True , 'depth': True , 'depth_3d': False, 'normals': True , 'normal_angle': False},
{'rgb': True , 'ir': True , 'depth': False, 'depth_3d': True , 'normals': False, 'normal_angle': True },
{'rgb': True , 'ir': False, 'depth': True , 'depth_3d': False, 'normals': True , 'normal_angle': False},
{'rgb': True , 'ir': False, 'depth': False, 'depth_3d': False, 'normals': True , 'normal_angle': False},
{'rgb': True , 'ir': False, 'depth': False, 'depth_3d': True , 'normals': False, 'normal_angle': False},
{'rgb': True , 'ir': False, 'depth': False, 'depth_3d': False, 'normals': False, 'normal_angle': True },
{'rgb': False, 'ir': False, 'depth': True , 'depth_3d': False, 'normals': True , 'normal_angle': False},
{'rgb': False, 'ir': False, 'depth': False, 'depth_3d': True , 'normals': False, 'normal_angle': True },
{'rgb': True , 'ir': False, 'depth': True , 'depth_3d': False, 'normals': False, 'normal_angle': True },
{'rgb': True , 'ir': False, 'depth': False, 'depth_3d': True , 'normals': True , 'normal_angle': False},
{'rgb': False, 'ir': False, 'depth': True , 'depth_3d': False, 'normals': False, 'normal_angle': True }
]
N_ITER = 10
auc_mat = np.zeros((N_ITER, len(objectives)+1, len(modalities))) # +1 for Autoencoder
for it in range(N_ITER):
xp_path = os.path.join(xp_path_base, str(it))
for i, obj in enumerate(objectives):
for j, mod in enumerate(modalities):
train_obj = main_func(dataset_name, net_name, xp_path, data_path, train_folder,
val_pos_folder, val_neg_folder, load_config, load_model, obj['objective'], nu,
device, seed, optimizer_name, lr, n_epochs, lr_milestone, batch_size,
weight_decay, obj['pretrain'], ae_optimizer_name, ae_lr, ae_n_epochs,
ae_lr_milestone, ae_batch_size, ae_weight_decay, n_jobs_dataloader, normal_class,
mod['rgb'], mod['ir'], mod['depth'], mod['depth_3d'], mod['normals'],
mod['normal_angle'], batchnorm, dropout, augment, obj['fix_encoder'])
auc = train_obj.results['test_auc']
auc_ae = train_obj.results['test_auc_ae']
auc_mat[it, i,j] = auc
if auc_ae is not None:
auc_mat[it, -1,j] = auc_ae
np.save(os.path.join(xp_path, 'auc.npy'), auc_mat)
np.save(os.path.join(xp_path_base, 'auc.npy'), auc_mat)
print('avg')
print(np.mean(auc_mat, axis=0))
print('std')
print(np.std(auc_mat, axis=0)) | StarcoderdataPython |
3297759 | #!/usr/bin/python2.4
#
#
# Copyright 2008, The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines common exception classes for this package."""
class MsgException(Exception):
"""Generic exception with an optional string msg."""
def __init__(self, msg=""):
self.msg = msg
class WaitForResponseTimedOutError(Exception):
"""We sent a command and had to wait too long for response."""
class DeviceUnresponsiveError(Exception):
"""Device is unresponsive to command."""
class InstrumentationError(Exception):
"""Failed to run instrumentation."""
class AbortError(MsgException):
"""Generic exception that indicates a fatal error has occurred and program
execution should be aborted."""
class ParseError(MsgException):
"""Raised when xml data to parse has unrecognized format."""
| StarcoderdataPython |
3250523 | <filename>maya/packages/oop_maya/core/maya_fbx.py<gh_stars>10-100
import maya.cmds as cmds
import maya.mel as mel
class FBX_Exporter():
def convert_to_joints( self ):
pass
def merge_mesh( self ):
pass
def export_mesh( self, file_path ):
file_path = file_path.replace( '\\', '/' )
mel.eval( "FBXProperty Export|IncludeGrp|Geometry|SmoothingGroups -v true;" )
mel.eval( "FBXProperty Export|IncludeGrp|Geometry|expHardEdges -v false;" )
mel.eval( "FBXProperty Export|IncludeGrp|Geometry|TangentsandBinormals -v true;" )
mel.eval( "FBXProperty Export|IncludeGrp|Geometry|SmoothMesh -v true;" )
mel.eval( "FBXProperty Export|IncludeGrp|Geometry|SelectionSet -v false;" )
mel.eval( "FBXProperty Export|IncludeGrp|Geometry|BlindData -v false;" )
mel.eval( "FBXProperty Export|IncludeGrp|Geometry|AnimationOnly -v false;" )
mel.eval( "FBXProperty Export|IncludeGrp|Geometry|GeometryNurbsSurfaceAs -v NURBS;" )
mel.eval( "FBXProperty Export|IncludeGrp|Geometry|Instances -v false;" )
mel.eval( "FBXProperty Export|IncludeGrp|Geometry|ContainerObjects -v true;" )
mel.eval( "FBXProperty Export|IncludeGrp|Geometry|Triangulate -v false;" )
mel.eval( "FBXExportInputConnections -v false;" )
mel.eval( "FBXExportInAscii -v false;" )
mel.eval( 'FBXExport -f "{0}" -s;'.format( file_path ) )
maya_message.message( 'Export finished!' )
def export_skin( self, file_path ):
file_path = file_path.replace( '\\', '/' )
mel.eval( "FBXExportAnimationOnly -v false;" )
mel.eval( "FBXExportSkins -v true;" )
mel.eval( "FBXExportScaleFactor 1.0" )
mel.eval( "FBXExportInputConnections -v false;" )
mel.eval( "FBXExportInAscii -v false;" )
mel.eval( 'FBXExport -f "{0}" -s;'.format( file_path ) )
maya_message.message( 'Export finished!' )
def export_animation( self, file_path ):
file_path = file_path.replace( '\\', '/' )
start = str( cmds.playbackOptions( ast = True, q = True ) )
end = str( cmds.playbackOptions( aet = True, q = True ) )
mel.eval( "FBXExportBakeComplexAnimation -v true;" )
mel.eval( 'FBXExportBakeComplexStart -v ' + start + ';' )
mel.eval( 'FBXExportBakeComplexEnd -v ' + end + ';' )
mel.eval( "FBXExportInputConnections -v false;" )
mel.eval( "FBXExportInAscii -v false;" )
mel.eval( 'FBXExport -f "{0}" -s;'.format( file_path ) )
maya_message.message( 'Export finished!' )
| StarcoderdataPython |
1623850 | <filename>tests/test_flask.py
import sqlite3
import unittest
from flask import Flask
import pytest
from sqlalchemy import Column, Integer, String
from alchemical.flask import Alchemical
db = Alchemical()
class User(db.Model):
id = Column(Integer, primary_key=True)
name = Column(String(128))
class User1(db.Model):
__tablename__ = 'users1'
__bind_key__ = 'one'
id = Column(Integer, primary_key=True)
name = Column(String(128))
class User2(db.Model):
__bind_key__ = 'two'
id = Column(Integer, primary_key=True)
name = Column(String(128))
class TestFlask(unittest.TestCase):
def test_read_write(self):
app = Flask(__name__)
app.config['ALCHEMICAL_DATABASE_URL'] = 'sqlite://'
db.init_app(app)
db.drop_all()
db.create_all()
with db.begin() as session:
for name in ['mary', 'joe', 'susan']:
session.add(User(name=name))
with db.Session() as session:
all = session.execute(User.select()).scalars().all()
assert len(all) == 3
with db.Session() as session:
session.execute(User.update().where(User.name == 'joe').values(
name='john'))
names = [u.name for u in session.execute(
User.select()).scalars().all()]
assert 'joe' not in names
assert 'john' in names
with db.Session() as session:
session.execute(User.delete().where(User.name == 'mary'))
names = [u.name for u in session.execute(
User.select()).scalars().all()]
assert len(names) == 2
assert 'mary' not in names
db.drop_all()
db.create_all()
with db.Session() as session:
all = session.execute(User.select()).scalars().all()
assert len(all) == 0
def test_binds(self):
app = Flask(__name__)
app.config['ALCHEMICAL_DATABASE_URL'] = 'sqlite://'
app.config['ALCHEMICAL_BINDS'] = \
{'one': 'sqlite://', 'two': 'sqlite://'}
db.init_app(app)
db.drop_all()
db.create_all()
assert db.bind_names() == ['one', 'two']
with db.begin() as session:
user = User(name='main')
user1 = User1(name='one')
user2 = User2(name='two')
session.add_all([user, user1, user2])
conn = db.get_engine().pool.connect()
cur = conn.cursor()
cur.execute('select * from user;')
assert cur.fetchall() == [(1, 'main')]
conn.close()
conn = db.get_engine(bind='one').pool.connect()
cur = conn.cursor()
cur.execute('select * from users1;')
assert cur.fetchall() == [(1, 'one')]
conn.close()
conn = db.get_engine(bind='two').pool.connect()
cur = conn.cursor()
cur.execute('select * from user2;')
assert cur.fetchall() == [(1, 'two')]
conn.close()
db.drop_all()
conn = db.get_engine().pool.connect()
cur = conn.cursor()
with pytest.raises(sqlite3.OperationalError):
cur.execute('select * from user;')
conn.close()
conn = db.get_engine(bind='one').pool.connect()
cur = conn.cursor()
with pytest.raises(sqlite3.OperationalError):
cur.execute('select * from users1;')
conn.close()
conn = db.get_engine(bind='two').pool.connect()
cur = conn.cursor()
with pytest.raises(sqlite3.OperationalError):
cur.execute('select * from user2;')
conn.close()
def test_db_session(self):
app = Flask(__name__)
app.config['ALCHEMICAL_DATABASE_URL'] = 'sqlite://'
db.init_app(app)
db.drop_all()
db.create_all()
with pytest.raises(RuntimeError):
db.session
with app.app_context():
pass # ensure teardown does not error when there is no session
with app.app_context():
for name in ['mary', 'joe', 'susan']:
db.session.add(User(name=name))
db.session.commit()
with db.Session() as session:
all = session.execute(User.select()).scalars().all()
assert len(all) == 3
def test_db_session_autocommit(self):
app = Flask(__name__)
app.config['ALCHEMICAL_DATABASE_URL'] = 'sqlite://'
app.config['ALCHEMICAL_AUTOCOMMIT'] = True
db.init_app(app)
db.drop_all()
db.create_all()
with app.app_context():
for name in ['mary', 'joe', 'susan']:
db.session.add(User(name=name))
with db.Session() as session:
all = session.execute(User.select()).scalars().all()
assert len(all) == 3
def test_bad_config(self):
app = Flask(__name__)
with pytest.raises(ValueError):
db.init_app(app)
def test_alternate_config(self):
app = Flask(__name__)
app.config['ALCHEMICAL_DATABASE_URI'] = 'sqlite://'
db.init_app(app) # should not raise
| StarcoderdataPython |
1703357 | from main.bike import testBike
from main.main_test import test
if __name__=="__main__":
test() | StarcoderdataPython |
1607858 | import os
import GenFiles
import pandas as pd
from collections import defaultdict
workdir = "/home/jana/Genotipi/Genotipi_DATA/Rjava_TEMP/"
os.chdir("/home/jana/Genotipi/Genotipi_DATA/Rjava_TEMP/")
os.system("ls -d Genotipi*/ > Dirs.txt")
dirs = list(pd.read_table("Dirs.txt", header=None).loc[:,0])
print(dirs)
chips = {19720: "GGPv02",
26145: "GGPv03",
26151: "GGPv03",
30105: "GGPv04",
30106: "GGPv04",
76883: "HD" ,
138892: "HDv02",
139376: "HDv02",
54001:"50Kv01" ,
54609: "50Kv02",
51274: "IDBv03",
52445: "IDBv03"
}
print(chips.values())
pedPars = []
for dir in dirs:
peds = [x.strip(".ped") for x in os.listdir(workdir + "/" + dir + "/") if x.endswith(".ped") and "Clean" not in x]
for ped in peds:
try:
chip = GenFiles.mapFile(workdir + "/" + dir + "/" + ped + ".map").chip
if chip in chips.values() and os.path.isfile(workdir + "/" + dir + "/" + ped + "_" + chip + ".ped"):
print(workdir + "/" + dir + "/" + ped + "_" + chip + ".ped")
pedPars.append((workdir + "/" + dir + "/" + ped,
workdir + "/" + dir + "/" + ped + "_" + chip))
except:
pass
print(pedPars)
compareDF = pd.DataFrame(columns=["File", "Concordance", "Format"])
comparedir = "/home/jana/Genotipi/Genotipi_DATA/Compare/"
os.chdir(comparedir)
for ped in pedPars:
(ped1, ped2) = ped
os.system("cut " + ped1 + '.ped -f1500 -d" " | sort | uniq > Alleles.txt')
alleles = open("Alleles.txt").read().strip().split("\n")
if 'G' in alleles:
format = "Top"
if 'B' in alleles:
format = "AB"
if alleles == ['A']:
print(ped + ": only A in alleles.")
pass
print("Format: " + format)
try:
os.system("plink --file " + ped1 + " --merge " +
ped2 + ".ped " + ped2 + ".map --merge-mode 7 --cow --recode --out DIFF > DIFFtmp.txt")
a = open("DIFFtmp.txt").read().strip().split("\n")
c = [x for x in a if "concordance rate" in x][0].split(" ")[-1].strip(".")
compareDF = compareDF.append(pd.DataFrame({"File": [ped1.split("/")[-1]], "Concordance": [c], "Format": [format]}))
except:
pass
compareDF.to_csv(comparedir + "ComparePlink1PLink2DF.csv") | StarcoderdataPython |
1765079 | <reponame>kssteven418/nums-1<gh_stars>0
# coding=utf-8
# Copyright (C) 2020 NumS Development Team.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import socket
import itertools
from types import FunctionType
from typing import Tuple
import inspect
import ray
import numpy as np
from nums.core.systems.interfaces import ComputeInterface, ComputeImp
from nums.core.systems.utils import check_implementation, extract_functions
class RayScheduler(ComputeInterface):
# pylint: disable=abstract-method
def init(self):
raise NotImplementedError()
def put(self, value, weakref=False):
raise NotImplementedError()
def get(self, object_ids, timeout=None):
raise NotImplementedError()
def remote(self, function: FunctionType, remote_params: dict):
raise NotImplementedError()
def register(self, name: str, func: callable, remote_params: dict = None):
raise NotImplementedError("Implements a way to register new remote functions.")
def call(self, name: str, *args, **kwargs):
raise NotImplementedError()
def call_with_options(self, name, args, kwargs, options):
raise NotImplementedError()
def nodes(self):
raise NotImplementedError()
class TaskScheduler(RayScheduler):
# pylint: disable=abstract-method
"""
Basic task-based scheduler. This scheduler relies on underlying
system's scheduler in distributed memory configurations.
Simply takes as input a compute module and StoreConfiguration.
"""
def __init__(self,
compute_module,
use_head=False):
self.compute_imp: ComputeImp = compute_module.ComputeCls
check_implementation(ComputeInterface, self.compute_imp)
self.remote_functions = {}
self.available_nodes = []
self.use_head = use_head
self.head_node = None
def get_private_ip(self):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
return s.getsockname()[0]
def init(self):
# Compute available nodes, based on CPU resource.
local_ip = self.get_private_ip()
for node in ray.nodes():
node_key = list(filter(lambda key: "node" in key, node["Resources"].keys()))
assert len(node_key) == 1
node_ip = node_key[0].split(":")[1]
has_cpu_resources = "CPU" in node["Resources"] and node["Resources"]["CPU"] >= 1.0
if local_ip == node_ip:
print("head node", node_ip)
self.head_node = node
if self.use_head and has_cpu_resources:
self.available_nodes.append(node)
elif has_cpu_resources:
print("worker node", node_ip)
self.available_nodes.append(node)
# Collect compute functions.
module_functions = extract_functions(self.compute_imp)
function_signatures: dict = {}
required_methods = inspect.getmembers(ComputeInterface(), predicate=inspect.ismethod)
for name, func in required_methods:
function_signatures[name] = func
for name, func in module_functions.items():
func_sig = function_signatures[name]
try:
remote_params = func_sig.remote_params
except Exception as _:
remote_params = {}
self.remote_functions[name] = self.remote(func, remote_params)
def put(self, value, weakref=False):
return ray.put(value, weakref=weakref)
def get(self, object_ids, timeout=None):
return ray.get(object_ids, timeout=timeout)
def remote(self, function: FunctionType, remote_params: dict):
r = ray.remote(num_cpus=1, **remote_params)
return r(function)
def register(self, name: str, func: callable, remote_params: dict = None):
if name in self.remote_functions:
return
self.remote_functions[name] = self.remote(func, remote_params)
def call_with_options(self, name, args, kwargs, options):
return self.remote_functions[name].options(**options).remote(*args, **kwargs)
def call(self, name: str, *args, **kwargs):
if "syskwargs" in kwargs:
kwargs = kwargs.copy()
syskwargs = kwargs["syskwargs"]
del kwargs["syskwargs"]
if "options" in syskwargs:
options = syskwargs["options"]
return self.call_with_options(name, args, kwargs, options)
return self.remote_functions[name].remote(*args, **kwargs)
def nodes(self):
return self.available_nodes
class BlockCyclicScheduler(TaskScheduler):
# pylint: disable=abstract-method
"""
Operations with dimensions larger than the grid axis
to which they are mapped wrap along that axis.
Replication not implemented, but would include the following property:
Operations with 1 dim along any axis are replicated for each dimension along that axis.
"""
def __init__(self, compute_module, cluster_shape: Tuple, use_head=False, verbose=False):
super(BlockCyclicScheduler, self).__init__(compute_module, use_head)
self.verbose = verbose
self.cluster_shape: Tuple = cluster_shape
self.cluster_grid: np.ndarray = np.empty(shape=self.cluster_shape, dtype=np.object)
def init(self):
super().init()
err_str = "Not enough nodes %d for cluster shape %s." % (len(self.available_nodes),
str(self.cluster_shape))
assert len(self.available_nodes) >= np.prod(self.cluster_shape), err_str
for i, cluster_entry in enumerate(self.get_cluster_entry_iterator()):
self.cluster_grid[cluster_entry] = self.available_nodes[i]
print("cluster_grid", cluster_entry, self.get_node_key(cluster_entry))
print("cluster_shape", self.cluster_shape)
def get_cluster_entry_iterator(self):
return itertools.product(*map(range, self.cluster_shape))
def get_cluster_entry(self, grid_entry):
cluster_entry = []
num_grid_entry_axes = len(grid_entry)
num_cluster_axes = len(self.cluster_shape)
if num_grid_entry_axes <= num_cluster_axes:
# When array has fewer or equal # of axes than cluster.
for cluster_axis in range(num_cluster_axes):
if cluster_axis < num_grid_entry_axes:
cluster_dim = self.cluster_shape[cluster_axis]
grid_entry_dim = grid_entry[cluster_axis]
cluster_entry.append(grid_entry_dim % cluster_dim)
else:
cluster_entry.append(0)
elif num_grid_entry_axes > num_cluster_axes:
# When array has more axes then cluster.
for cluster_axis in range(num_cluster_axes):
cluster_dim = self.cluster_shape[cluster_axis]
grid_entry_dim = grid_entry[cluster_axis]
cluster_entry.append(grid_entry_dim % cluster_dim)
# Ignore trailing axes, as these are "cycled" to 0 by assuming
# the dimension of those cluster axes is 1.
return tuple(cluster_entry)
def get_node_key(self, cluster_entry):
node = self.cluster_grid[cluster_entry]
node_key = list(filter(lambda key: "node" in key, node["Resources"].keys()))
assert len(node_key) == 1
node_key = node_key[0]
return node_key
def call(self, name: str, *args, **kwargs):
assert "syskwargs" in kwargs
syskwargs = kwargs["syskwargs"]
grid_entry = syskwargs["grid_entry"]
grid_shape = syskwargs["grid_shape"]
if "options" in syskwargs:
options = syskwargs["options"].copy()
if "resources" in options:
resources = options["resources"].copy()
else:
resources = {}
else:
options = {}
resources = {}
# Make sure no node ip addresses are already in resources.
for key, _ in resources.items():
assert "node" not in key
cluster_entry: tuple = self.get_cluster_entry(grid_entry)
node_key = self.get_node_key(cluster_entry)
# TODO (hme): This will be problematic. Only able to assign 10k tasks-per-node.
resources[node_key] = 1.0/10**4
options["resources"] = resources
kwargs = kwargs.copy()
del kwargs["syskwargs"]
if self.verbose:
if name == "bop":
fname = args[0]
print_str = "BCS: bop_name=%s, " \
"grid_entry=%s, grid_shape=%s " \
"on cluster_grid[%s] == %s"
print(print_str % (fname, str(grid_entry),
str(grid_shape), str(cluster_entry),
node_key))
else:
print_str = "BCS: remote_name=%s, " \
"grid_entry=%s, grid_shape=%s " \
"on cluster_grid[%s] == %s"
print(print_str % (name, str(grid_entry),
str(grid_shape), str(cluster_entry),
node_key))
return self.call_with_options(name, args, kwargs, options)
| StarcoderdataPython |
1695284 | import json
import oandapyV20
import oandapyV20.endpoints.accounts as accounts
import oandapyV20.endpoints.instruments as instruments
import oandapyV20.endpoints.orders as orders
import oandapyV20.endpoints.positions as positions
import oandapyV20.endpoints.pricing as pricing
import oandapyV20.endpoints.trades as trades
import oandapyV20.endpoints.transactions as transactions
from oandapyV20.contrib.requests import (LimitOrderRequest, MarketOrderRequest,
MITOrderRequest, StopLossDetails,
StopLossOrderRequest,
StopOrderRequest, TakeProfitDetails,
TakeProfitOrderRequest,
TrailingStopLossDetails,
TrailingStopLossOrderRequest)
from oandapyV20.exceptions import StreamTerminated
from retry import retry
class OandaAPI(object):
def __init__(self, accountID, access_token):
self.access_token = access_token
self.accountID = accountID
self.client = oandapyV20.API(access_token=access_token)
######################### Account #########################
@retry(tries=20, delay=0.1)
def get_accountID(self, access_token):
return self.accountID
@retry(tries=20, delay=0.1)
def get_AccountDetails(self):
r = accounts.AccountDetails(accountID=self.accountID)
return self.client.request(r)
@retry(tries=20, delay=0.1)
def get_AccountSummary(self):
r = accounts.AccountSummary(accountID=self.accountID)
return self.client.request(r)
######################### Order #########################
@retry(tries=20, delay=0.1)
def get_OrderList(self, ticker):
"""
可以获得特定ticker的 Pending Order
"""
r = orders.OrderList(accountID=self.accountID,
params={"instrument": ticker})
return self.client.request(r)
@retry(tries=20, delay=0.1)
def get_OrdersPending(self):
r = orders.OrdersPending(accountID=self.accountID)
return self.client.request(r)
@retry(tries=20, delay=0.1)
def OrderCreate_mkt(self, ticker, size, takeprofit=None, stoploss=None,
trailingstop=None):
"""
建立市价单
requesttype:
MarketOrder
"""
d = dict(instrument=ticker, units=size)
if takeprofit:
d['takeProfitOnFill'] = TakeProfitDetails(price=takeprofit).data
if stoploss:
d['stopLossOnFill'] = StopLossDetails(price=stoploss).data
if trailingstop:
d['trailingStopLossOnFill'] = TrailingStopLossDetails(
distance=trailingstop).data
Order = MarketOrderRequest(**d).data
r = orders.OrderCreate(accountID=self.accountID, data=Order)
return self.client.request(r)
@retry(tries=20, delay=0.1)
def OrderCreate_pending(self, ticker, size, price, takeprofit=None,
stoploss=None, trailingstop=None,
requesttype='MarketIfTouchedOrder'):
"""
建立挂单
requesttype:
LimitOrder, StopOrder, MarketIfTouchedOrder,
"""
d = dict(instrument=ticker, units=size, price=price)
if takeprofit:
d['takeProfitOnFill'] = TakeProfitDetails(price=takeprofit).data
if stoploss:
d['stopLossOnFill'] = StopLossDetails(price=stoploss).data
if trailingstop:
d['trailingStopLossOnFill'] = TrailingStopLossDetails(
distance=trailingstop).data
if requesttype is 'MarketIfTouchedOrder':
Order = MITOrderRequest(**d).data
elif requesttype is 'LimitOrder':
Order = LimitOrderRequest(**d).data
elif requesttype is 'StopOrder':
Order = StopOrderRequest(**d).data
r = orders.OrderCreate(accountID=self.accountID, data=Order)
return self.client.request(r)
@retry(tries=20, delay=0.1)
def cancel_all_OrdersPending(self, ordertype, long_short=None):
"""
撤销全部挂单
ordertype: LIMIT,STOP,MARKET_IF_TOUCHED,
buy_sell: LONG, SHORT
"""
rv = self.get_OrdersPending()
rv = [dict(id=i.get('id'), units=i.get('units'))
for i in rv['orders'] if i['type'] in ordertype and i.get('units')]
if long_short is 'LONG':
idsToCancel = [order.get('id') for order in rv
if float(order['units']) > 0]
elif long_short is 'SHORT':
idsToCancel = [order.get('id') for order in rv
if float(order['units']) < 0]
elif long_short is None:
idsToCancel = [order.get('id') for order in rv]
for orderID in idsToCancel:
r = orders.OrderCancel(accountID=self.accountID, orderID=orderID)
rv = self.client.request(r)
@retry(tries=20, delay=0.1)
def cancel_all_TSTOrder(self, ticker, ordertype):
"""
撤销全部 止盈, 止损, 追踪止损
ordertype: TAKE_PROFIT, STOP_LOSS, TRAILING_STOP_LOSS
"""
rv = self.get_OrderList(ticker)
idsToCancel = [order.get('id')
for order in rv['orders'] if order['type'] in ordertype]
for orderID in idsToCancel:
r = orders.OrderCancel(accountID=self.accountID, orderID=orderID)
rv = self.client.request(r)
# def OrderCreate_TakeProfit(self,ticker,long_short,price):
# """
# 为所有单添加止盈,但是若止盈已经存在则会报错,此函数暂时不用
# long_short: LONG, SHORT
# """
# rv = self.get_tradeslist(ticker)
#
# if long_short is 'LONG':
# idsToCreate = [trade.get('id') for trade in rv['trades']
# if float(trade['currentUnits']) > 0]
# elif long_short is 'SHORT':
# idsToCreate = [trade.get('id') for trade in rv['trades']
# if float(trade['currentUnits']) < 0]
# for tradeID in idsToCreate:
# Order = TakeProfitOrderRequest(tradeID=tradeID,price=price).data
# r = orders.OrderCreate(accountID = self.accountID, data=Order)
# rv = self.client.request(r)
#
######################### Trades #########################
@retry(tries=20, delay=0.1)
def get_all_open_trades(self):
r = trades.OpenTrades(accountID=self.accountID)
return self.client.request(r)
@retry(tries=20, delay=0.1)
def get_tradeslist(self, ticker):
r = trades.TradesList(accountID=self.accountID, params={
'instrument': ticker})
return self.client.request(r)
@retry(tries=20, delay=0.1)
def get_trade_details(self, tradeID):
r = trades.TradeDetails(accountID=self.accountID, tradeID=tradeID)
return self.client.request(r)
@retry(tries=20, delay=0.1)
def Exitall_trades(self):
rv = self.get_all_open_trades()
idsToClose = [trade.get('id') for trade in rv['trades']]
for tradeID in idsToClose:
r = trades.TradeClose(accountID=self.accountID, tradeID=tradeID)
self.client.request(r)
######################### Positions #########################
@retry(tries=20, delay=0.1)
def close_all_position(self, ticker, closetype='long'):
"""
closetype: long, short
"""
if closetype is 'long':
d = dict(longUnits='ALL')
elif closetype is 'short':
d = dict(shortUnits='ALL')
r = positions.PositionClose(accountID=self.accountID,
instrument=ticker,
data=d)
return self.client.request(r)
@retry(tries=20, delay=0.1)
def get_positions(self):
r = positions.OpenPositions(accountID=self.accountID)
return self.client.request(r)
@retry(tries=20, delay=0.1)
def get_tickstream(self, ticker):
r = pricing.PricingStream(accountID=self.accountID, params={
"instruments": ticker})
n = 0
# let's terminate after receiving 3 ticks
stopAfter = 999999999999999999999999999
try:
# the stream requests returns a generator so we can do ...
for tick in self.client.request(r):
print(json.dumps(tick, indent=2))
if n >= stopAfter:
r.terminate()
n += 1
except StreamTerminated as err:
print(
"Stream processing ended because we made it stop after {} ticks".format(n))
######################### Transactions #########################
@retry(tries=20, delay=0.1)
def get_TransactionsSinceID(self, transactionID):
"""TransactionsSinceID.
Get a range of Transactions for an Account starting at (but not including)
a provided Transaction ID.
"""
r = transactions.TransactionsSinceID(accountID=self.accountID,
params={'id': transactionID})
return self.client.request(r)
@retry(tries=20, delay=0.1)
def get_TransactionDetails(self, transactionID):
r = transactions.TransactionDetails(accountID=self.accountID,
transactionID=transactionID)
return self.client.request(r)
######################### ticker #########################
@retry(tries=20, delay=0.1)
def get_candlestick_list(self, ticker, granularity, count=50,
fromdate=None, todate=None, price='M',
smooth=False, includeFirst=None):
"""
See http://developer.oanda.com/rest-live-v20/instrument-ep/
date: 'YYYY-MM-DDTHH-mm:ssZ'
instrument: Name of the Instrument [required]
price: str, 'M' or 'B' or 'A'
granularity: (S5, S10, S30, M1, M2, M4, M5) <- BAD Interval
M10, M15, M30
H1, H2, H3, H4, H6, H8, H12,
D, W, M
count: number of candle, default=50, maximum=5000
fromdate: format '2017-01-01'
todate: format '2017-01-01'
smooth: A smoothed candlestick uses the previous candle’s close
price as its open price, while an unsmoothed candlestick
uses the first price from its time range as its open price.
includeFirst: A flag that controls whether the candlestick that
is covered by the from time should be included
in the results.
"""
params = dict(granularity=granularity,
count=count,
price=price,
smooth=smooth,
includeFirst=includeFirst)
if fromdate:
# fromdate += 'T00:00:00Z'
params.update({'from': fromdate})
if todate:
# todate += 'T00:00:00Z'
params.update({'to': todate})
r = instruments.InstrumentsCandles(instrument=ticker,
params=params)
return self.client.request(r)
# 其他
@retry(tries=20, delay=0.01)
def get_pricinginfo(self, ticker):
r = pricing.PricingInfo(accountID=self.accountID,
params={"instruments": ticker})
return self.client.request(r)
if __name__ == "__main__":
from oandakey import access_token, accountID
from OnePy.utils.awesome_func import run_multithreading
oanda = OandaAPI(accountID, access_token)
instrument = "EUR_USD"
#
n = 0
# for i in range(200):
# n += 1
# print(n)
# data = oanda.OrderCreate_mkt('EUR_USD', 100)
def submit(a):
data = oanda.OrderCreate_mkt('EUR_USD', 100)
# run_multithreading(submit, [i for i in range(100)], 100)
# data = oanda.close_all_position('EUR_USD', 'long')
# data = oanda.Exitall_trades()
# data = oanda.OrderCreate_mkt('EUR_USD', -100, takeprofit=1.4)
data = oanda.OrderCreate_mkt('EUR_USD', 100)
# data = oanda.OrderCreate_mkt('EUR_USD',100)
# data = oanda.OrderCreate_mkt('EUR_USD',100)
# data = oanda.OrderCreate_mkt('EUR_USD',100)
# data = oanda.OrderCreate_mkt('EUR_USD',100)
# data = oanda.OrderCreate_mkt('EUR_USD',100)
# data = oanda.OrderCreate_mkt('EUR_USD',100)
# data = oanda.OrderCreate_mkt('EUR_USD',100)
# data = oanda.OrderCreate_mkt('EUR_USD', 10, trailingstop=0.002)
#
# data = oanda.OrderCreate_mkt('EUR_GBP',-10)
# data = oanda.OrderCreate_mkt('USD_JPY', 10)
# data = oanda.OrderCreate_pending('EUR_USD',200,1.0,requesttype='LimitOrder')
# data = oanda.OrderCreate_pending(
# 'EUR_USD', 200, 1.2, trailingstop=1, takeprofit=1.3, requesttype='StopOrder')
# data = oanda.cancel_all_OrdersPending('MARKET_IF_TOUCHED', 'LONG')
# data = oanda.cancel_all_OrdersPending('MARKET_IF_TOUCHED', 'SHORT')
# data = oanda.cancel_all_OrdersPending('LIMIT', 'LONG')
# data = oanda.cancel_all_OrdersPending('LIMIT', 'SHORT')
# data = oanda.cancel_all_OrdersPending('STOP', 'SHORT')
# data = oanda.cancel_all_OrdersPending('STOP', 'LONG')
# data = oanda.cancel_all_TSTOrder('EUR_USD', 'TAKE_PROFIT')
# data = oanda.get_OrdersPending()
# oanda.get_tickstream([instrument])
# data = oanda.get_candlestick_list(
# 'EUR_USD', 'S5', count=5, fromdate="2015-01-08T07:00:00Z")
# data = oanda.get_tradeslist(instrument)
# data = oanda.get_AccountDetails()
# print(data)
# data = oanda.get_AccountSummary()
# data = oanda.close_all_position('EUR_USD','long')
# data= oanda.get_TransactionsSinceID(1)
# data = oanda.get_positions()
# print(json.dumps(data['candles'], indent=2))
# print(len(data['positions'][0]['long']['tradeIDs'])) # 获得订单数
# Check interval test
# from datetime import timedelta
# def check_candles_interval(data, interval):
# bar_list = []
# for i in range(len(data['candles'])):
# gg = arrow.get(data['candles'][i+1]['time']) - \
# arrow.get(data['candles'][i]['time'])
# if gg != timedelta(interval/60/60/24) and gg != timedelta(interval/60/60/24 + 2):
# print(gg)
# print(data['candles'][i+1]['time'])
# print(arrow.get(data['candles'][i]['time']))
# # check_candles_interval(data,30)
print(json.dumps(data, indent=2))
| StarcoderdataPython |
3264198 | """
For each point cloud, generate a CSV that includes:
keep_ratio, class_id, true_positive, true_negative, false_positive, false_negative
Configurations
scannet_dir: root data folder of ScanNet
ply_dirname: folder of point cloud in PLY. [Ply_partial, Train_ply, Ply]
K: number of nearest neighbors for label recovering of removed points
n_scene: only effective if ply_dir == "Ply_partial". Copy n_scene point clouds only
if ply_dirname == "Train_ply":
point clouds in {scannet_dir}/{ply_dirname} => {scannet_dir}/{ply_dirname}_label/{K}/*.csv
if ply_dirname == "Ply":
point clouds in {scannet_dir}/{ply_dirname} => {scannet_dir}/{ply_dirname}_label/{K}/*.csv
if ply_dirname == "Ply_partial":
first n_scene point clouds in {scannet_dir}/Ply => {scannet_dir}/{ply_dirname}_label/{K}/*.csv
"""
import os
import sys
import glob
from shutil import copyfile
import time
# ------ Configuration ------
scannet_dir = "/home/dtc/Backup/Data/ScanNet"
# Ply_partial, Train_ply, Ply
ply_dirname = "Ply"
# only effective if ply_dir == "Ply_partial"
n_scene = 50
# number of nearest neighbors
K = 1
# --- end of configuration ---
ply_dir = os.path.join(scannet_dir, ply_dirname)
if ply_dirname == "Ply_partial":
if not os.path.exists(ply_dir):
os.makedirs(ply_dir)
ply_files = glob.glob(os.path.join(scannet_dir, "Ply", "*.ply"))[:n_scene]
for ply_file in ply_files:
copyfile(ply_file, os.path.join(ply_dir, os.path.basename(ply_file)))
save_dir = os.path.join(scannet_dir, ply_dirname+"_label", str(K))
if not os.path.exists(save_dir):
os.makedirs(save_dir)
ply_files = glob.glob(os.path.join(ply_dir, "*.ply"))
index = 1
start_time = time.time()
for ply_file in ply_files:
if (index % 10) == 0:
print("------ {:0.0f} (s) Processing {}/{}".format(time.time() - start_time, index, len(ply_files)))
save_file = os.path.join(save_dir, os.path.basename(ply_file)[:-4]+".csv")
mycmd = "../Cpp/recover_full/build/recover_full {} {} {}".format(ply_file, K, save_file)
os.system(mycmd)
index += 1
| StarcoderdataPython |
115248 | <reponame>jorgedroguett/cajonera
# Cajones es una aplicacion que calcula el listado de partes
# de una cajonera
# unidad de medida en mm
# constantes
hol_lado = 13
# variables
h = 900
a = 600
prof_c = 400
h_c = 120
a_c = 200
hol_sup = 20
hol_inf = 10
hol_int = 40
hol_lateral = 2
esp_lado = 18
esp_sup = 18
esp_inf = 18
esp_c = 15
cubre_der_total = True
cubre_iz_total = True
def calcular_lado_cajon(prof_c, esp_c):
lado_cajon = prof_c - 2 * esp_c
return lado_cajon
def calcular_a_c(cubre_iz_total, cubre_der_total, esp_lado, hol_lado, hol_lateral, a):
if cubre_der_total:
espesor_derecho = esp_lado
else:
espesor_derecho = esp_lado / 2 - hol_lateral
if cubre_iz_total:
espesor_izquierdo = esp_lado
else:
espesor_izquierdo = esp_lado / 2 - hol_lateral
ancho_cajon = a - espesor_izquierdo - espesor_derecho - 2 * hol_lateral
return ancho_cajon
def calcular_h_c(h, esp_sup, esp_inf, hol_sup, hol_int, hol_inf):
suma_holhura = hol_sup + hol_int + hol_inf
suma_espesor = esp_sup + esp_inf
espacio_cajones = h - suma_espesor - suma_holhura
altura_cajon = espacio_cajones / 3
return altura_cajon
h_c = calcular_h_c(h, esp_sup, esp_inf, hol_sup, hol_int, hol_inf)
a_c = calcular_a_c(cubre_iz_total, cubre_der_total, esp_lado, hol_lado, hol_lateral, a)
l_c = calcular_lado_cajon(prof_c, esp_c)
print("frente cajon: ", a_c, " X ", round(h_c))
print("lado cajon: ", l_c, " X ", round(h_c))
| StarcoderdataPython |
4808931 | import unittest
from datetime import datetime
from xbrr.edinet.client.document_list_client import MetaDataClient
from xbrr.edinet.client.document_list_client import DocumentListClient
from tests.utils import delay
class TestDocumentListClient(unittest.TestCase):
@delay
def test_metadata(self):
client = MetaDataClient()
metadata = client.get("2019-01-31")
self.assertGreater(metadata.count, 0)
@delay
def test_metadata_by_datetime(self):
client = MetaDataClient()
date = datetime(2019, 3, 1)
metadata = client.get(date)
self.assertGreater(metadata.count, 0)
@delay
def test_document_list(self):
client = DocumentListClient()
documents = client.get("2019-01-31")
self.assertEqual(documents.metadata.count, len(documents.list))
@delay
def test_document_list_by_datetime(self):
client = DocumentListClient()
date = datetime(2019, 3, 1)
documents = client.get(date)
self.assertEqual(documents.metadata.count, len(documents.list))
| StarcoderdataPython |
1786332 | from .decorator import *
from .process_cn_word import *
name="cn_sort" | StarcoderdataPython |
3329319 | <reponame>justHorsingAround/DevaintArt_bot
from bs4 import BeautifulSoup
import requests
import json
import os
def write_file(download, file_name):
with open(file_name, 'wb') as fd:
for chunk in download.iter_content(chunk_size=4096):
fd.write(chunk)
def write_log(user_name, download_link):
with open("download_log.txt", 'a') as af:
af.write(user_name)
af.write(';')
af.write(download_link)
af.write('\n')
print("Download logged!\n")
def fetch_html(page_url):
da_page = requests.get(page_url)
mysoup = BeautifulSoup(da_page.text, 'html.parser')
return mysoup
# deprecated, json_request will do the job
def search_pictures(url, class_tag, class_name_one, class_name_two, tag_in_list):
DIV = "div"
soup = fetch_html(url)
outer_div = soup.find(DIV, class_="torpedo-container")
result = outer_div.find_all(class_tag, class_=class_name_two)
links = [item[tag_in_list] for item in result]
return links
def fetch_csrf(url):
soup = fetch_html(url)
all_script = soup.head.find_all('script')
csrf = ""
for i in all_script:
if i.string == None:
continue
match_index = i.string.find('csrf')
if int(match_index) > -1:
res = i.string.split(",")
for j in range(len(res)):
if 'csrf' in res[j]:
csrf = res[j].split(":")
csrf = [[csrf[i].replace('"', '') for i in range(len(csrf))]]
csrf = dict(csrf)
return csrf
def fetch_href(page_url, json_request, HEADER):
CLASS_NAME_FOR_TAG_A = 'torpedo-thumb-link'
TAG_A = 'a'
OFFSET = 24
offset_counter = 0
response_counter = 1
href_set = set()
while True:
req = requests.post(page_url, data=json_request, headers=HEADER)
print("RESPONSE GET ------------------------ No. ", response_counter)
print("STATUS CODE -- ", req)
json_soup = BeautifulSoup(req.text, 'html.parser')
out_div2 = [i['href'] for i in json_soup.find_all(TAG_A, class_= CLASS_NAME_FOR_TAG_A)]
if len(out_div2) == 0:
print("RESPONSE GOT WITH NO VALUABLE DATA! REQUESTING FINISHED")
break
else:
href_set.update(out_div2)
offset_counter += OFFSET
json_request["offset"] = str(offset_counter)
response_counter += 1
return href_set
def fetch_src(links, user_name, already_downloaded):
INDEX_OF_HI_RES = 0
INDEX_OF_NAME = -1
saved_file_counter = 1
all_links = len(links)
with open("download_log.txt", 'a') as af:
for link in links:
print("NUMBER :: {} PROGRESS :: {}%".format(saved_file_counter, round((saved_file_counter/all_links)*100, 1)))
saved_file_counter += 1
print("FETCHED LINK: ", link)
soup_for_img_serach = fetch_html(link)
out_div = soup_for_img_serach.find("div", class_='dev-view-deviation')
res = [i for i in out_div.find_all("img", class_='dev-content-full')]
if not res:
print("This is not a picture or NSFW content\n")
continue
res = [j['src'] for j in res]
print("DOWNLOADING ------------------- ", res)
if res[INDEX_OF_HI_RES] in already_downloaded:
print("This file has already been downloaded!\n")
continue
split_name = res[INDEX_OF_HI_RES].split('/')
filepath = "./" + user_name + "/" + split_name[INDEX_OF_NAME]
if not os.path.exists(user_name):
os.makedirs(user_name)
download_req = requests.get(res[INDEX_OF_HI_RES])
status_code = download_req.status_code
if status_code != 200:
print("Failed to download: Error {}".format(status_code))
continue
write_file(download_req, filepath)
print("SAVED AS: {}".format(filepath))
af.write(user_name)
af.write(';')
af.write(res[INDEX_OF_HI_RES])
af.write('\n')
print("Download logged!\n")
def make_url(user_name):
url_list = []
page_url = 'https://'
page_url += user_name.lower()
page_url += '.deviantart.com/gallery/?catpath=/'
url_list.append(page_url)
scrap_gallery = page_url[:-1] + 'scraps'
url_list.append(scrap_gallery)
return url_list
def read_log():
record = []
INDEX_OF_LOGGED_URL = 1
if os.path.isfile("download_log.txt"):
with open("download_log.txt", 'r') as rf:
for line in rf:
readed = line.strip('\n').split(';')
record.append(readed[INDEX_OF_LOGGED_URL])
return record
user_name = ""
while user_name == "":
user_name = input("\nPlease enter the user's name (make sure it's correct): ")
page_url = make_url(user_name)
json_request= {
"username" : "",
"offset" : "0",
"limit" : "24",
"_csrf" : "",
"dapiIid" : "0"}
USER_AGEN = "Mozilla/5.0 (Windows NT 10.0;...) Gecko/20100101 Firefox/57.0"
HEADER = {"user_agen" : USER_AGEN}
INDEX_OF_MAIN_GALLERY = 0
INDEX_OF_SCRAP_GALLERY = 1
csrf = fetch_csrf(page_url[INDEX_OF_MAIN_GALLERY])
print(csrf)
json_request["username"] = user_name
json_request["_csrf"] = csrf['csrf']
print(json_request)
href_set = fetch_href(page_url[INDEX_OF_MAIN_GALLERY], json_request, HEADER)
csrf = fetch_csrf(page_url[INDEX_OF_SCRAP_GALLERY])
print(csrf)
json_request["username"] = user_name
json_request["offset"] = "0"
json_request["catpath"] = "scraps"
json_request["_csrf"] = csrf['csrf']
print(json_request)
scrap_href_set = fetch_href(page_url[INDEX_OF_SCRAP_GALLERY], json_request, HEADER)
href_set.update(scrap_href_set)
print(href_set)
print("\nNUMBER OF LINKS FOUND ---------------- {}\n".format(len(href_set)))
already_downloaded = read_log()
fetch_src(href_set, user_name, already_downloaded) # write file with this temporary
print("-" * 50)
print("OK")
| StarcoderdataPython |
1678380 | """Wrapper for sci-kit learn classifiers.
"""
import os
import joblib
import numpy as np
from wafamole.models import Model
from wafamole.exceptions.models_exceptions import (
NotPyTorchModelError,
PyTorchInternalError,
ModelNotLoadedError,
)
import torch
from wafamole.utils.check import type_check, file_exists
class PyTorchModelWrapper(Model):
"""Sci-kit learn classifier wrapper class"""
def __init__(self, pytorch_classifier=None):
"""Constructs a wrapper around an scikit-learn classifier, or equivalent.
It must implement predict_proba function.
Arguments:
pytorch_classifier : pytorch-learn classifier or equivalent
Raises:
NotPyTorchModelError: not implement predict_proba
NotPyTorchModelError: not implement fit
"""
if pytorch_classifier is None:
self._pytorch_classifier = None
else:
# if getattr(pytorch_classifier, "predict_proba", None) is None:
# raise NotPyTorchModelError(
# "object does not implement predict_proba function"
# )
self._pytorch_classifier = pytorch_classifier
def classify(self, value):
"""It returns the probability of belonging to a particular class.
It calls the extract_features function on the input value to produce a feature vector.
Arguments:
value (numpy ndarray) : an input belonging to the input space of the model
Raises:
ModelNotLoadedError: calling function without having loaded or passed model as arg
Returns:
numpy ndarray : the confidence for each class of the problem.
"""
if self._pytorch_classifier is None:
raise ModelNotLoadedError()
feature_vector = self.extract_features(value)
try:
y_pred = self._pytorch_classifier([feature_vector])
return y_pred
except Exception as e:
raise PyTorchInternalError("Internal PyTorch error.") from e
def load_model(self, filepath, ModelClass):
"""Loads a PyTorch classifier stored in filepath.
Arguments:
filepath (string) : The path of the PyTorch classifier.
Raises:
TypeError: filepath is not string.
FileNotFoundError: filepath not pointing to any file.
NotPyTorchModelError: model can not be loaded.
Returns:
self
"""
type_check(filepath, str, "filepath")
file_exists(filepath)
ModelClass.load_state_dict(torch.load(filepath))
ModelClass.eval()
self._pytorch_classifier = ModelClass
return self
def extract_features(self, value: np.ndarray):
"""It returns the input. To modify this behaviour, extend this class and re-define this method.
Arguments:
value (numpy ndarray) : a sample that belongs to the input space of the model
Returns:
the input.
"""
if type(value) != np.ndarray:
raise TypeError(f"{type(value)} not an nd array")
return value
| StarcoderdataPython |
140029 | <filename>pandas_ta/volatility/rvi.py<gh_stars>1-10
# -*- coding: utf-8 -*-
from pandas_ta.overlap import ma
from pandas_ta.statistics import stdev
from pandas_ta.utils import get_drift, get_offset
from pandas_ta.utils import unsigned_differences, verify_series
def rvi(close, high=None, low=None, length=None, scalar=None, refined=None, thirds=None, mamode=None, drift=None, offset=None, **kwargs):
"""Indicator: Relative Volatility Index (RVI)"""
# Validate arguments
length = int(length) if length and length > 0 else 14
scalar = float(scalar) if scalar and scalar > 0 else 100
refined = False if refined is None else refined
thirds = False if thirds is None else thirds
mamode = mamode if isinstance(mamode, str) else "ema"
close = verify_series(close, length)
drift = get_drift(drift)
offset = get_offset(offset)
if close is None: return
if refined or thirds:
high = verify_series(high)
low = verify_series(low)
# Calculate Result
def _rvi(source, length, scalar, mode, drift):
"""RVI"""
std = stdev(source, length)
pos, neg = unsigned_differences(source, amount=drift)
pos_std = pos * std
neg_std = neg * std
pos_avg = ma(mode, pos_std, length=length)
neg_avg = ma(mode, neg_std, length=length)
result = scalar * pos_avg
result /= pos_avg + neg_avg
return result
_mode = ""
if refined:
high_rvi = _rvi(high, length, scalar, mamode, drift)
low_rvi = _rvi(low, length, scalar, mamode, drift)
rvi = 0.5 * (high_rvi + low_rvi)
_mode = "r"
elif thirds:
high_rvi = _rvi(high, length, scalar, mamode, drift)
low_rvi = _rvi(low, length, scalar, mamode, drift)
close_rvi = _rvi(close, length, scalar, mamode, drift)
rvi = (high_rvi + low_rvi + close_rvi) / 3.0
_mode = "t"
else:
rvi = _rvi(close, length, scalar, mamode, drift)
# Offset
if offset != 0:
rvi = rvi.shift(offset)
# Handle fills
if "fillna" in kwargs:
rvi.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
rvi.fillna(method=kwargs["fill_method"], inplace=True)
# Name and Categorize it
rvi.name = f"RVI{_mode}_{length}"
rvi.category = "volatility"
return rvi
rvi.__doc__ = \
"""Relative Volatility Index (RVI)
The Relative Volatility Index (RVI) was created in 1993 and revised in 1995.
Instead of adding up price changes like RSI based on price direction, the RVI
adds up standard deviations based on price direction.
Sources:
https://www.tradingview.com/wiki/Keltner_Channels_(KC)
Calculation:
Default Inputs:
length=14, scalar=100, refined=None, thirds=None
EMA = Exponential Moving Average
STDEV = Standard Deviation
UP = STDEV(src, length) IF src.diff() > 0 ELSE 0
DOWN = STDEV(src, length) IF src.diff() <= 0 ELSE 0
UPSUM = EMA(UP, length)
DOWNSUM = EMA(DOWN, length
RVI = scalar * (UPSUM / (UPSUM + DOWNSUM))
Args:
high (pd.Series): Series of 'high's
low (pd.Series): Series of 'low's
close (pd.Series): Series of 'close's
length (int): The short period. Default: 14
scalar (float): A positive float to scale the bands. Default: 100
mamode (str): Options: 'sma' or 'ema'. Default: 'sma'
refined (bool): Use 'refined' calculation which is the average of
RVI(high) and RVI(low) instead of RVI(close). Default: False
thirds (bool): Average of high, low and close. Default: False
offset (int): How many periods to offset the result. Default: 0
Kwargs:
fillna (value, optional): pd.DataFrame.fillna(value)
fill_method (value, optional): Type of fill method
Returns:
pd.DataFrame: lower, basis, upper columns.
"""
| StarcoderdataPython |
1630530 | import numpy as np
def mortality_lognormal(r, s):
'''Calculate mortality from cumulative log-normal distribution
Keyword arguments:
:param r: ratio of body burdens to cbr, summed (dimensionless)
:param s: dose-response slope (dimensionless)
:returns: mortality fraction (fraction)
'''
if r>0:
mean = 0.0
x = (np.log10(r) - mean) / (s * np.sqrt(2))
return 0.5 * (1 + erf(x))
else:
return 0.0
def mortality_loglogistic(conc, alpha, beta):
'''Calculate mortality from cumulative log-logistic distribution
Keyword arguments:
:param conc: internal concentration ()
:param alpha: threshold level ()
:param beta: shape parameter ()
:returns: F, cumulative log-logistic
'''
if conc>0:
return 1.0 / (1 + (conc/alpha)**-beta)
else:
return 0.0
| StarcoderdataPython |
3264019 | #!/usr/bin/python
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import logging
import pandas as pd
from . import autolabeling, formatting, utils
def evaluate(predictions, gt_path, lookup_path, count_analyzed, count_total, file_header='FileID'):
gt_df = utils.load_excel(gt_path)
evaluation = []
if not(gt_df.empty):
logging.info("Ground truth loaded.")
if len(gt_df) > 0 and len(predictions) > 0:
try:
for p in predictions:
ev = {}
ev['file_id'] = p['file_id']
ev['count_analyzed'] = count_analyzed
ev['count_total'] = count_total
ev['fields'] = []
row_document = gt_df.loc[gt_df[file_header] == p['file_id']].iloc[0]
for f in p['fields']:
f['subfields'] = []
columns = autolabeling.map_columns(f['label'], lookup_path)
for col in columns:
subfield = {}
subfield['name'] = col
if len(columns) > 1:
compare_method = autolabeling.lookup_compare(col, lookup_path)
sub_text = formatting.find_subtext(f['text'], compare_method)
subfield['text'] = sub_text
else:
subfield['text'] = f['text'].replace('"','')
expected_value = str(row_document[col]).replace('"','')
expected_value = expected_value.split('\n')[0]
subfield['expected'] = expected_value
f['subfields'].append(subfield)
ev['fields'].append(f)
evaluation.append(ev)
logging.info(evaluation)
except Exception as e:
logging.error(f"Error during evaluation: {e}")
else:
logging.error(f"Could not load ground truth.")
return evaluation
def compare(a, b, field_name, lookup_path):
compare_method = autolabeling.lookup_compare(field_name, lookup_path)
a = formatting.normalize(a, compare_method)
b = formatting.normalize(b, compare_method)
if(a!=b):
logging.warning(f"Different: {a}, {b}")
return a == b
def create_eval_file(evaluation, model_id, lookup_path):
output = {}
output['modelId'] = model_id
output['accuracy'] = {}
output['precision'] = {}
output['unlabelled'] = {}
output['avgAccuracy'] = 0
output['avgPrecision'] = 0
fields = {}
unlabelled = {}
mismatches = []
try:
index_max_fields = 0
len_max_fields = 0
for i in range(len(evaluation)):
if len(evaluation[i]['fields']) >= len_max_fields:
len_max_fields = len(evaluation[i]['fields'])
index_max_fields = i
df = pd.DataFrame(evaluation[index_max_fields]['fields'])
labels = df['label'].tolist()
for label in labels:
fields[label] = []
unlabelled[label] = 0
for result in evaluation:
for field in result['fields']:
match = True
unlabelled_subfield = False
for subfield in field['subfields']:
if not(utils.is_valid(subfield['expected'])):
unlabelled_subfield = True
sub_match = compare(subfield['text'], subfield['expected'], subfield['name'], lookup_path)
# Recording mismatches for later review
if sub_match == False:
mismatch = {}
mismatch['fileId'] = result['file_id']
mismatch['labelName'] = subfield['name']
mismatch['textExtracted'] = subfield['text']
mismatch['textExpected'] = subfield['expected']
mismatches.append(mismatch)
match = False
if unlabelled_subfield == True:
try:
unlabelled[field['label']] += 1
except:
unlabelled[field['label']] = 1
try:
fields[field['label']].append(match)
except:
fields[field['label']] = [match]
unlabelled[field['label']] = 0
accuracies = []
precisions = []
for key in fields.keys():
field = fields[key]
accuracy = field.count(True)/(len(evaluation) - unlabelled[key])
precision = field.count(True)/(len(field) - unlabelled[key])
output['accuracy'][key] = accuracy
output['precision'][key] = precision
output['unlabelled'][key] = unlabelled[key]
accuracies.append(accuracy)
precisions.append(precision)
if len(accuracies) > 0:
avg_accuracy = sum(accuracies) / len(accuracies)
output['avgAccuracy'] = avg_accuracy
if len(precisions) > 0:
avg_precision = sum(precisions) / len(precisions)
output['avgPrecision'] = avg_precision
except Exception as e:
logging.error(f"Error creating evaluation file: {e}")
logging.info(output)
return output, mismatches
| StarcoderdataPython |
4830808 | from gunicorn.http.errors import LimitRequestLine
request = LimitRequestLine
| StarcoderdataPython |
4834512 | import time
import board
from digitalio import DigitalInOut, Direction
"""
dir(board)
['A0', 'A1', 'A2', 'A3', 'A4', 'A5', 'SCK', 'MOSI', 'MISO', 'D0', 'RX', 'D1', 'TX', 'SDA', 'SCL',
'D5', 'D6', 'D9', 'D10', 'D11', 'D12', 'D13', 'I2C', 'SPI', 'UART']
"""
LED = DigitalInOut(board.D13)
LED.direction = Direction.OUTPUT
import busio
# requires lib adafruit_ht16k33, adafruit_bus_device
# How to search for source of library https://github.com/search?q=org%3Aadafruit+adafruit_ht16k33
from adafruit_ht16k33 import matrix
matrix = matrix.Matrix8x8(busio.I2C(board.SCL, board.SDA), address=0x70, auto_write=False)
matrix.brightness = 15
def matrixGo():
matrix.fill(0)
for r in range(8):
for c in range(8):
matrix[r, c] = 1
matrix.show()
time.sleep(0.03)
class Program:
def __init__(self):
print('__init__')
def run(self):
matrixGo()
while True:
LED.value = not LED.value
print("Led %s" % (LED.value))
time.sleep(1)
# Main
p = Program()
p.run()
| StarcoderdataPython |
3204114 | #!/usr/bin/env python
# coding: utf-8
"""
unit tests for semiclassical propagators
"""
import unittest
import numpy as np
from numpy import fft
import scipy.linalg as sla
from scipy.interpolate import interp1d
import logging
# # Logging
logger = logging.getLogger(__name__)
logging.basicConfig(format="[testing] %(message)s", level=logging.INFO)
import torch
torch.set_default_dtype(torch.float64)
if torch.cuda.is_available():
logger.info("CUDA available")
# If there are several GPU's available, we use the last one,
# i.e. "cuda:1" on a workstation with 2 GPUs.
device = torch.device("cuda:%d" % (torch.cuda.device_count()-1))
else:
device = torch.device('cpu')
# # Local Imports
from semiclassical.propagators import _sym_sqrtm, _is_symmetric_non_negative
from semiclassical.propagators import HermanKlukPropagator, WaltonManolopoulosPropagator
from semiclassical.potentials import NonHarmonicPotential, MorsePotential
from semiclassical.propagators import CoherentStatesOverlap
from semiclassical import units
from semiclassical.units import hbar
# make random numbers reproducible
torch.manual_seed(0)
class TestLinearAlgebra(unittest.TestCase):
def test_sym_sqrtm(self):
""" tests the implementation of sqrtm(A) based on the eigenvalue decomposition"""
# create random symmetric n x n matrix
n = 5
A = 5.0 * 2.0*(torch.rand(n,n) - 0.5)
A = A + A.T
# reference implementation of scipy
sqA_scipy = sla.sqrtm(A.numpy())
isqA_scipy = sla.inv(sla.sqrtm(A.numpy()))
# my own implementation using pure torch functions
sqA,isqA = (x.numpy() for x in _sym_sqrtm(A))
self.assertTrue(np.isclose(sqA, sqA_scipy).all())
self.assertTrue(np.isclose(isqA, isqA_scipy).all())
def test_is_symmetric_positive(self):
# create random symmetric, positive definite n x n matrix
n = 5
# positive eigenvalues
e = torch.rand(n) + 0.1
# eigenvectors
V = 5.0 * 2.0*(torch.rand(n,n) - 0.5)
A = V @ torch.diag(e) @ V.T
self.assertTrue(_is_symmetric_non_negative(A))
# make A non-symmetric
A[0,1] = A[0,1] + 0.5
self.assertFalse(_is_symmetric_non_negative(A))
class TestCoherentStates(unittest.TestCase):
"""
check overlap integrals between multidimension coherent states
"""
def test_normalization(self):
# make random numbers reproducible
torch.manual_seed(0)
n = 5
# draw random numbers for positive definite, symmetric n x n matrix of width parameters
Gi = 5.0 * 2.0*(torch.rand(n,n) - 0.5)
# symmetrize
Gi = 0.5*(Gi + Gi.T)
# random numbers for position and momentum
qi,pi = torch.rand(n,1), torch.rand(n,1)
# check <qi,pi,Gi|qi,pi,Gi> = 1
cso = CoherentStatesOverlap(Gi,Gi)
olap = cso(qi,pi, qi,pi)
self.assertTrue( abs(olap.squeeze().item()-1.0) < 1.0e-5 )
def test_zero_modes(self):
"""overlaps when width parameter matrix Gamma is singular"""
# make random numbers reproducible
torch.manual_seed(0)
n = 5
# draw random numbers for positive definite, symmetric n x n matrix of width parameters
Gi = 5.0 * 2.0*(torch.rand(n,n) - 0.5)
# symmetrize
Gi = 0.5*(Gi + Gi.T)
# random numbers for position and momentum
qi,pi = torch.rand(n,1), torch.rand(n,1)
qj,pj = qi,pi #torch.rand(n,1), torch.rand(n,1)
# check <qi,pi,Gi|qi,pi,Gi> = 1
cso = CoherentStatesOverlap(Gi,Gi)
olap = cso(qi,pi, qj,pj)
# turn Gi into a singular matrix by embedding it into a larger space
Gi_ = torch.zeros((n+1,n+1))
Gi_[:n,:n] = Gi
qi_, pi_, qj_, pj_ = (torch.cat((x, torch.zeros(1,1)), 0) for x in (qi,pi,qj,pj))
# The zero dimension should have no effect on the overlaps
cso_ = CoherentStatesOverlap(Gi_,Gi_)
olap_ = cso_(qi_,pi_, qj_,pj_)
self.assertEqual(olap.squeeze().item(), olap_.squeeze().item())
class TestSemiclassicalPropagators1D(unittest.TestCase):
"""
run dynamics on anharmonic 1D potential described in Herman & Kluk (1986)
and compare with exact QM dynamics
"""
def setUp(self):
# # Grids for Time Propagation
# time grid
nt = 4000 // 40
# I believe in the HK paper time is measured in units of oscilla
tau_max = 12.0 / 40
# frequency of oscillator
omega = 1.0
t_max = tau_max * 2.0*np.pi/omega
times = np.linspace(0.0, t_max, nt)
dt = times[1]-times[0]
dtau = dt * omega/(2.0*np.pi)
# spatial grid
nx = 10000
x_ = np.linspace(-10.0, 40.0, nx)
dx = x_[1]-x_[0]
# reshape spatial grid as (1,nx)
x = torch.tensor(x_).unsqueeze(0)
# # Propagation with Split Operator Method on a Grid
mass = 1.0
eps = 0.975
b = (12.0)**(-0.5)
# potential, see eqn. (7) in HK paper
v = eps/(2*b**2) * (1.0 - np.exp(-b*x_))**2 + (1.0-eps)*0.5*omega*x_**2
# I believe in the paper time is measured in units of oscillations, tau=t/T
tau_max = 12.0
dtau = dt * omega/(2.0*np.pi)
logger.info(f"time step dt= {dt} dtau= {dtau} T")
# number of propagation steps per time step
m = 20
k = 2.0*np.pi*fft.fftfreq(nx,d=dx)
# exponential of kinetic energy operator in momentum space
# -1/(2*m) d^2/dx^2 exp(i*k*x) --> k^2/(2*m) exp(i*k*x)
expT = np.exp(-(1.0j/hbar) * (hbar*k)**2/(2.0*mass) * (dt/m) )
# exponential of potential energy in coordinate space
expV = np.exp(-(1.0j/hbar) * v * (dt/m))
# initial Gaussian wavefunction, see eqn. (8)
# I think there is a mistake in Kluk & Herman's paper, the exponent of the Gaussian
# should be alpha=1/2 so that the wavefunction is the HO ground state wavefunction
# for the potential V(x) = 1/2 x^2
alpha = 0.5*omega #1.0
x0 = 7.3
p0 = 0.0
psi0 = (2*alpha/np.pi)**0.25 * np.exp(-alpha*(x_-x0)**2 + 1.0j*p0*(x_-x0)/hbar)
# propagate wavefunction for t=0.0,...,12.0 using the split operator propagator
psi = psi0
# autocorrelation function, overlap between initial wavefunction and wavefunction at time t
# Cauto(t) = <phi(0)|phi(t)>
autocorrelation_qm = np.zeros((nt,), dtype=complex)
logger.info("running split operator propagator (exact QM):")
logger.info("... propagate wavepacket psi(0) in time")
for t in range(0, nt):
autocorrelation_qm[t] = np.sum(psi0.conjugate() * psi *dx)
for i in range(0, m):
# split operator step
# |psi(t+dt)> = U(dt) |psi(t)>
# = exp(-i/hbar (V+T) dt) |psi(t)>
# ~ exp(-i/hbar V dt) exp(-i/hbar T dt) |psi(t)>
psi = expV * fft.ifft( expT * fft.fft(psi) )
#
if t % (nt//10+1) == 0:
# kinetic energy T|psi(t)>
Tpsi = fft.ifft(k**2/(2*mass) * fft.fft(psi))
# potential energy V|psi(t)>
Vpsi = v*psi
# energy expectation value <psi(t)|T+V|psi(t)>
energy = np.sum(psi.conjugate() * (Tpsi+Vpsi) * dx)
logger.info("time= %12.5f |psi(t)|= %e <E>= %e" % (times[t], np.sqrt(np.sum(abs(psi)**2*dx)), energy.real))
# # IC Correlation function
# # Propagate Interaction-Prepared Wavefunction
# To compute the exact IC correlation function we propagator the interaction-prepared wavefunction
# $\vert \psi(0) \rangle = \hat{V}^{\dagger} \vert \phi(0) \rangle$
# in time
# This is the vibrational ground state <x|phi(0)> on the excited state potential.
phi0 = (2*alpha/np.pi)**0.25 * np.exp(-alpha*(x-x0)**2 + 1.0j*p0*(x-x0)/hbar)
# The non-adiabatic coupling vector is assumed to be constant
def nac(x):
return 1.0
# The interaction operator is
# Veg = hbar^2/m * nac * d/dx
# The interaction prepared wavefunction becomes
# |psi(0)> = V^+|phi(0)>
psi0 = hbar**2/mass * nac(x) * fft.ifft((1.0j*k) * fft.fft(phi0))
# propagate wavefunction for t=0.0,...,12.0 using the split operator propagator
psi = psi0
# save the correlation function corr(t) = <psi(0)|psi(t)> = <phi(0)|V e^{-i*t*H} V^+|phi(0)>
ic_correlation_qm = np.zeros(nt, dtype=complex)
# zero-point energy of the excited state potential
en0 = hbar/2.0 * omega
logger.info("... propagate interaction-prepared wavepacket V^+\phi(0) in time")
for t in range(0, nt):
ic_correlation_qm[t] = 1.0/hbar**2 * np.exp(1j*times[t]*en0) * np.sum(psi0.conjugate() * psi * dx)
for i in range(0, m):
# split operator step
# |psi(t+dt)> = U(dt) |psi(t)>
# = exp(-i/hbar (V+T) dt) |psi(t)>
# ~ exp(-i/hbar V dt) exp(-i/hbar T dt) |psi(t)>
psi = expV * fft.ifft( expT * fft.fft(psi) )
if t % (nt//10+1) == 0:
# kinetic energy T|psi(t)>
Tpsi = fft.ifft(k**2/(2*mass) * fft.fft(psi))
# potential energy V|psi(t)>
Vpsi = v*psi
# energy expectation value <psi(t)|T+V|psi(t)>
energy = np.sum(psi.conjugate() * (Tpsi+Vpsi) * dx)
logger.info("time= %12.5f |psi(t)|= %e <E>= %e" % (times[t], np.sqrt(np.sum(abs(psi)**2*dx)), energy.real))
#
self.times = times
self.nt = nt
self.dt = dt
self.omega = omega
# save autocorrelation functions and IC correlation functions
# from exact QM propagation on a grid
self.autocorrelation_qm = autocorrelation_qm
self.ic_correlation_qm = ic_correlation_qm
# potential and initial conditions for semiclassical propagators
self.potential = NonHarmonicPotential()
# center of initial wavepacket
self.q0 = torch.tensor([7.3])
# initial momentum
self.p0 = torch.tensor([0.0])
self.Gamma_i = torch.tensor([[2*2.5]])
self.Gamma_t = self.Gamma_i
self.Gamma_0 = torch.tensor([[self.omega]])
# zero-point energy of the excited state potential
self.en0 = hbar/2.0 * omega
def test_HermanKlukPropagator(self):
# create HK propagator
propagator = HermanKlukPropagator(self.Gamma_i, self.Gamma_t, device=device)
propagator.initial_conditions(self.q0, self.p0, self.Gamma_0, ntraj=50000)
# save autocorrelation functions for each time step
autocorrelation = np.zeros((self.nt,), dtype=complex)
ic_correlation = np.zeros((self.nt,), dtype=complex)
logger.info("running Herman-Kluk propagator:")
for t in range(0, self.nt):
autocorrelation[t] = propagator.autocorrelation()
ic_correlation[t] = propagator.ic_correlation(self.potential, energy0_es=self.en0)
if t % (self.nt//10+1) == 0:
logger.info("time= %8.5f time/T= %5.3f" % (self.times[t], self.times[t]*self.omega/(2*np.pi)))
propagator.step(self.potential, self.dt)
# compare semiclassical correlation functions with QM results
self.assertTrue(np.allclose(autocorrelation, self.autocorrelation_qm, rtol=0.05, atol=0.05))
self.assertTrue(np.allclose(ic_correlation, self.ic_correlation_qm, rtol=0.05, atol=0.05))
# check norm of wavefunction is ~ 1 at last time step
norm = propagator.norm()
self.assertAlmostEqual(norm, 1, delta=0.05)
def test_WaltonManolopoulosPropagator(self):
# create WM propagator
alpha = 100.0
beta = 100.0
propagator = WaltonManolopoulosPropagator(self.Gamma_i, self.Gamma_t, alpha, beta,
device=device)
propagator.initial_conditions(self.q0, self.p0, self.Gamma_0, ntraj=50000)
# save autocorrelation functions for each time step
autocorrelation = np.zeros((self.nt,), dtype=complex)
ic_correlation = np.zeros((self.nt,), dtype=complex)
logger.info("running Walton-Manolopoulos propagator:")
for t in range(0, self.nt):
autocorrelation[t] = propagator.autocorrelation()
ic_correlation[t] = propagator.ic_correlation(self.potential, energy0_es=self.en0)
if t % (self.nt//10+1) == 0:
logger.info("time= %8.5f time/T= %5.3f" % (self.times[t], self.times[t]*self.omega/(2*np.pi)))
propagator.step(self.potential, self.dt)
# compare semiclassical correlation functions with QM results
self.assertTrue(np.isclose(autocorrelation, self.autocorrelation_qm, rtol=0.05).all())
self.assertTrue(np.isclose(ic_correlation, self.ic_correlation_qm, rtol=0.1).all())
# check norm of wavefunction is ~ 1 at last time step
norm = propagator.norm()
self.assertAlmostEqual(norm, 1, delta=0.05)
class TestAdiabaticShiftModel(unittest.TestCase):
"""
run dynamics on harmonic and anharmonic adiabatic shift (AS) model
and compare correlation functions with the exact QM counterparts
The anharmonic AS model is described in DOI:10.1063/1.5143212
"""
def _load_AS_model(self, num_modes=5, anharmonicity=0.0):
# load precalculated AS model and exact QM correlation functions for internal conversion
data_dir = f"DATA/AnharmonicAS/{num_modes}modes"
model_file = f"{data_dir}/AS_model_chi{anharmonicity:.2f}.dat"
ic_corr_file = f"{data_dir}/ic_correlation_chi{anharmonicity:.2f}_T0.001.dat"
# # Adiabatic Shift Model
# load frequencies, Huang-Rhys factors and NACs
data = torch.from_numpy(np.loadtxt(model_file))
if len(data.shape) == 1:
# When only singly mode is read, data has the wrong shape
# (ncol,) -> (1,ncol)
data = np.reshape(data, (1, len(data)))
# number of nuclear degrees of freedom
self.dim = data.shape[1]
# frequencies in Hartree
omega = data[:,0] / units.hartree_to_wavenumbers
# Huang-Rhys factors
S = data[:,1]
# NACs
nac = data[:,2]
# anharmonicity
self.anharmonicity = anharmonicity
chi = data[:,3]
assert (chi == anharmonicity).all(), f"Anharmonicities in the model file should be equal to {self.anharmonicity}."
# The horizontal shift dQ of the excited state PES is related to the Huang-Rhys factor
# S = 1/2 dQ^2 omega
# dQ = sqrt(2*|S|/omega) * sign(S)
dQ = torch.sqrt(2.0*abs(S)/omega) * torch.sign(S)
# The sign of S is not needed anymore, Huang-Rhys factors are always positive
S = abs(S)
# # Grid for Time Propagation
# time grid
self.nt = 4000 // 40
# propagate for 150 fs
t_max = 150.0 / units.autime_to_fs / 40.0
self.times = torch.linspace(0.0, t_max, self.nt)
self.dt = self.times[1]-self.times[0]
logger.info(f"time step dt= {self.dt*units.autime_to_fs} fs")
# save frequencies
self.omega = omega
# center of initial wavepacket
self.q0 = dQ
# momentum of initial wavepacket
self.p0 = 0.0*self.q0
# zero-point energy of the excited state potential
self.en0 = torch.sum(hbar/2.0 * omega).item()
# ground state potential energy surface
self.potential = MorsePotential(self.omega, chi, nac)
if anharmonicity == 0.0:
# # Harmonic Approximation
# In the harmonic approximation the correlation function $\tilde{k}_{ic}(t)$ can be computed exactly.
# The definition of the variables follows eqn. (15)-(27)
# in the supporting information of http://www.rsc.org/suppdata/c9/sc/c9sc05012d/c9sc05012d1.pdf
self.ic_correlation_qm = np.zeros(self.nt, dtype=complex)
# If the relative displacements dQ of the ground and excited state minima can be
# negative, we have to multiply A and B with the sign of the displacement.
A = nac * torch.sqrt(omega/(2*S)) * torch.sign(dQ)
B = -nac * torch.sqrt((omega*S)/2) * torch.sign(dQ)
for t in range(0, self.nt):
Xt = S * torch.exp(-1j*omega*self.times[t])
self.ic_correlation_qm[t] = ( 1.0/hbar**2
* torch.prod(torch.exp(-S+Xt))
* (torch.sum(A*Xt+B)**2 + torch.sum(A**2*Xt)) )
else:
# QM correlation function for anharmonicity > 0 has been precalculated.
ic_data = np.loadtxt(ic_corr_file)
times = ic_data[:,0] / units.autime_to_fs
real, imag = ic_data[:,1], ic_data[:,2]
# interpolate precalculated correlation function on the time grid
self.ic_correlation_qm = ( interp1d(times, real)(self.times)
+ 1j*interp1d(times, imag)(self.times) )
# save IC correlation function k_ic(t)
ic_corr_file = "/tmp/ic_correlation_chi%s_qm.dat" % self.anharmonicity
data = np.vstack( (self.times*units.autime_to_fs, self.ic_correlation_qm.real, self.ic_correlation_qm.imag) ).transpose()
with open(ic_corr_file, "w") as f:
f.write("# Time / fs Re[k_IC(t)] Im[k_IC(t)]\n")
np.savetxt(f, data)
logger.info(f"wrote table with correlation function to '{ic_corr_file}'")
def _run_semiclassical_dynamics(self, propagator_name="HK", ntraj=50000):
# width of initial wavepacket psi(x,t=0) on the excited state
Gamma_0 = torch.diag(self.omega)
# Choose width parameters of the frozen Gaussians equal to the normal mode frequencies
Gamma_i = torch.diag(self.omega)
Gamma_t = Gamma_i
# make random numbers reproducible
torch.manual_seed(0)
if propagator_name == "WM":
logger.info("propagator: Walton-Manolopoulos")
# Choose cell dimensions (volume proportional to 1/(a*b)^(dim/2))
alpha = 500
beta = 500
logger.info(f"alpha= {alpha} beta = {beta}")
logger.info(f"volume of phase space cell V= {(1.0/(2*np.sqrt(alpha*beta)))**self.dim}")
propagator = WaltonManolopoulosPropagator(Gamma_i, Gamma_t, alpha, beta, device=device)
else:
logger.info("propagator: Herman-Kluk")
propagator = HermanKlukPropagator(Gamma_i, Gamma_t, device=device)
# initial conditions
propagator.initial_conditions(self.q0, self.p0, Gamma_0, ntraj=ntraj)
# save autocorrelation function for each time step
autocorrelation = np.zeros((self.nt,), dtype=complex)
# correlation function for internal conversion
ic_correlation = np.zeros((self.nt,), dtype=complex)
for t in range(0, self.nt):
autocorrelation[t] = propagator.autocorrelation()
ic_correlation[t] = propagator.ic_correlation(self.potential, energy0_es=self.en0)
if t % 1 == 0:
logger.info(f"{t+1:6}/{self.nt:6} time= {self.times[t]:10.4f} time/fs= {self.times[t]*units.autime_to_fs:10.4f}")
#norm = propagator.norm()
#print(f"|psi|= {norm}")
propagator.step(self.potential, self.dt)
# save IC correlation function k_ic(t)
ic_corr_file = "/tmp/ic_correlation_chi%s_%s.dat" % (self.anharmonicity, propagator_name)
data = np.vstack( (self.times*units.autime_to_fs, ic_correlation.real, ic_correlation.imag) ).transpose()
with open(ic_corr_file, "w") as f:
f.write("# Time / fs Re[k_IC(t)] Im[k_IC(t)]\n")
np.savetxt(f, data)
logger.info(f"wrote table with correlation function to '{ic_corr_file}'")
# compare semiclassical correlation functions with QM results
self.assertTrue(np.isclose(ic_correlation, self.ic_correlation_qm, rtol=0.1).all())
# Check norm of wavefunction is ~ 1 at last time step, this test takes very long
# and is therefore commented out.
"""
logger.info("computing wavefunction norm (scales like Ntraj^2)")
norm = propagator.norm()
self.assertAlmostEqual(norm, 1, delta=0.05)
"""
def test_HermanKlukPropagator(self):
# test harmonic AS model with 5 modes
self._load_AS_model(num_modes=5, anharmonicity=0.0)
self._run_semiclassical_dynamics(propagator_name="HK", ntraj=50000)
# test anharmonic AS model with 5 modes
self._load_AS_model(num_modes=5, anharmonicity=0.02)
self._run_semiclassical_dynamics(propagator_name="HK", ntraj=50000)
def test_WaltonManolopoulosPropagator(self):
# test harmonic AS model with 5 modes
self._load_AS_model(num_modes=5, anharmonicity=0.0)
self._run_semiclassical_dynamics(propagator_name="WM", ntraj=50000)
# test anharmonic AS model with 5 modes
self._load_AS_model(num_modes=5, anharmonicity=0.02)
self._run_semiclassical_dynamics(propagator_name="WM", ntraj=50000)
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
1657669 | import adv.adv_test
from core.advbase import *
from slot.a import *
from slot.d import *
def module():
return Wedding_Elisanne
class Wedding_Elisanne(Adv):
comment = '2in1'
a1 = ('sp',0.08)
a3 = ('bc',0.13)
conf = {}
conf['acl'] = """
`s1,fsc and s2.charged<s2.sp-749
`s2
`s3,fsc and not self.s2debuff.get()
`fs,seq=2 and cancel and ((s1.charged>=909 and not self.s2debuff.get()) or s3.charged>=s3.sp)
`fs,seq=3 and cancel
"""
conf['slot.a'] = TSO()+JotS()
def d_slots(self):
if 'bow' in self.ex:
self.conf.slot.a = TSO()+FRH()
def prerun(self):
self.s2debuff = Debuff('s2defdown',0.15,10,1)
if self.condition('s2 defdown for 10s'):
self.s2defdown = 1
else:
self.s2defdown = 1
def s2_proc(self, e):
if self.s2defdown :
self.s2debuff.on()
if __name__ == '__main__':
conf = {}
adv.adv_test.test(module(), conf)
| StarcoderdataPython |
3340139 | from facex import cartoonify, face_detection, blur_bg
import cv2
image = face_detection('face.jpg', method='opencv')
cv2.imshow("cartoon", image)
cv2.waitKey() | StarcoderdataPython |
1788645 | <filename>examples/shared_pandas.py
from os import getpid
from numpy.random import randn
from numpush.shmem import SDataFrame
from pandas import DataFrame
from multiprocessing import Process
N, M = 10, 10
df = DataFrame(randn(N,M))
sdf = SDataFrame(df)
assert df == sdf
assert df is not sdf
def modify(shared_df, start):
# Modify the column of the dataframe with the pid of the process
# operating on it. It's stupid but it illustrates that the DataFrame
# is truly shared memory instead of copy-on-write.
for i in xrange(start, N, 2):
shared_df[i] = getpid()
# Shared Memory
# =============
p1 = Process(target=modify, args=(sdf,0))
p2 = Process(target=modify, args=(sdf,1))
p1.start()
p2.start()
p1.join()
p2.join()
print sdf.to_string()
# Copy on Write
# =============
p1 = Process(target=modify, args=(df,0))
p2 = Process(target=modify, args=(df,1))
p1.start()
p2.start()
p1.join()
p2.join()
print df.to_string()
| StarcoderdataPython |
68337 | <gh_stars>0
import yaml
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from project.models import Namespace, Project
class Command(BaseCommand):
help = 'Reload sync source config.'
def handle(self, *args, **options):
config_path = settings.TARGET_CONFIG_FILE
try:
with open(config_path, "r") as f:
payload = f.read()
content = yaml.load(payload)
except OSError as e:
raise CommandError(
'Can not get config file: {}.'.format(e))
except ValueError as e:
raise CommandError(
'Load config file error: {}.'.format(e))
namespaces = content.get("namespaces", {})
projects = content.get("projects", {})
try:
for p_name, project in projects.items():
Project.objects.create_project(
name=p_name,
project_name=project['project_name'],
registry_host=project['registry_host'],
registry_namespace=project.get("registry_namespace", ""),
registry_username=project.get("registry_username", ""),
registry_password=project.get("registry_password", ""),
)
for n_name, namespace in namespaces.items():
if namespace['registry_host'] != "https://gcr.io":
raise CommandError("Do not support others.")
Namespace.objects.create_namespace(
name=n_name,
registry_host=namespace['registry_host'],
registry_username=namespace.get("registry_username", ""),
registry_password=namespace.get("registry_password", ""),
)
except KeyError as e:
raise CommandError("Config error, miss key: {}".format(e))
self.stdout.write(self.style.SUCCESS('Load config successfully.'))
| StarcoderdataPython |
3299764 | <filename>users/tests.py
import json
from math import ceil
from random import randrange
from typing import List, Dict
from django.contrib.auth.models import AnonymousUser, User
from django.test import TestCase, Client
from django.urls import reverse
from django.utils.timezone import now
from utils.tests import tester_signup, tester_login, UserTestCase
from utils.unittest import *
from users.models import UserProfile
from utils import Pagination
user_list_url = reverse('user_list')
user_detail_url = lambda user_id: reverse('user_detail', args=[user_id])
change_password_url = lambda user_id: reverse('change_password', args=[user_id])
whoami_url = reverse('users_whoami')
class UserListTest(TestCase):
def check_order(self, results: List[Dict]):
"""
检查 results 是否是按经验降序排列的
"""
exp = list(map(lambda u: int(u["experience"]), results))
self.assertEqual(sorted(exp, reverse=True), exp)
def setUp(self):
tester_signup()
def test_get_user_list(self):
tester_signup('<EMAIL>', 'qwerty', 'account', '1234567')
user = User.objects.filter(username='<EMAIL>')[0]
user.userprofile.experience = 1
user.userprofile.save()
tester_signup('<EMAIL>', 'qwerty', 'account1', '12345678')
user = User.objects.create(username="<EMAIL>", first_name="user")
UserProfile.objects.create(user=user, student_id='233', about="你好,世界!", experience="5")
self.assertEqual(User.objects.count(), 4)
client = Client()
response = client.get(user_list_url)
self.assertEqual(response.status_code, 200) # 未登录,get 返回 200
tester_login(client=client)
response = client.get(user_list_url)
self.assertEqual(response.status_code, 200)
json_content = response.json()
results = json_content['results']
self.check_order(results)
self.assertEqual([5, 1, 0, 0], list(map(lambda u: u['experience'], results))) # 可见结果经过排序
def test_search(self):
# 定义验证返回结果的函数
def check_search_queryset(_results: List[Dict], keywords: str):
self.check_order(_results)
results_id = list(map(lambda _u: str(_u['id']), _results))
for u in User.objects.all(): # 检查 user 是否应当出现在搜索结果中,结果是否和预期相同
self.assertEqual(str(u.id) in results_id,
keywords in (u.username + u.first_name + u.userprofile.student_id),
"keywords=%s\t"
"username=%s\t"
"first_name=%s\t"
"student_id=%s" % (keywords, u.username, u.first_name, u.userprofile.student_id))
for i in range(1, 40):
user = User.objects.create(username="<EMAIL>" % i, first_name="user%d" % (i + 78))
UserProfile.objects.create(user=user, student_id=str(i + 55), about="你好,世界!", experience=randrange(1, 1000))
self.assertEqual(User.objects.count(), 40)
client = Client()
tester_login(client=client)
test_keywords = list('1234567890') + ['hello', 'user', '1@example', '@', '12', '23'] # 测试的关键字
for keyword in test_keywords:
response = client.get("%s?search=%s&page_size=100" % (user_list_url, keyword)) # page_size = 0 表示不分页
self.assertEqual(response.status_code, 200)
json_content = response.json()
results = json_content['results']
check_search_queryset(results, keyword)
r1 = client.get("%s?search=&page_size=100" % user_list_url)
r2 = client.get("%s?page_size=100" % user_list_url)
self.assertEqual(json.loads(r1.content), json.loads(r2.content)) # search=<null> 应当不进行搜索
r1 = client.get("%s?search=qwertyuiop&page_size=100" % user_list_url)
self.assertEqual(r1.status_code, 200) # 搜索无结果,返回 200
self.assertEqual(json.loads(r1.content)["results"], []) # 搜索无结果,返回 results=[]
def test_pagination(self):
total_users = 56
for i in range(1, total_users):
user = User.objects.create(username="<EMAIL>" % i, first_name="user")
UserProfile.objects.create(user=user, student_id=str(i), experience=randrange(0, 1000))
self.assertEqual(User.objects.count(), total_users)
client = Client()
tester_login(client=client)
for page_size in [1, 2, 3, 5, 8, 13, 21, 34]: # 计算这样分页的总页数和页大小
total_pages = ceil(total_users / page_size)
for page in range(1, total_pages):
r1 = client.get("%s?search=&page_size=%s&page=%s" % (user_list_url, page_size, page))
results1 = json.loads(r1.content)['results']
count = json.loads(r1.content)['count']
self.assertEqual(len(results1), page_size) # 页大小正确
self.assertEqual(count, total_users) # 页数正确
self.check_order(results1) # 结果顺序正确
r2 = client.get("%s?page=%s&page_size=%s" % (user_list_url, page, page_size))
results2 = json.loads(r2.content)['results']
self.assertEqual(results1, results2) # 请求参数顺序不同,但结果相同
# 判定 page_size 不合法时结果是否和默认 page_size 相同
if page_size == Pagination.page_size:
for invalid_page_size in [-1, total_users, 0, 'qwerty']:
r3 = client.get("%s?&page_size=%s&" % (user_list_url, invalid_page_size))
self.assertEqual(r1.status_code, 200)
self.assertEqual(json.loads(r1.content), json.loads(r3.content))
# 判定最后一页的正确性
r1 = client.get("%s?&page_size=%s&=%s" % (user_list_url, page_size, total_pages))
results1 = json.loads(r1.content)['results']
self.assertEqual(len(results1), page_size) # 页大小正确
self.check_order(results1) # 结果顺序正确
# 判定 page 不合法时结果的正确性
for page in [-1, total_pages + 1, 0, 'qwerty']:
r1 = client.get("%s?&page_size=%s&page=%s" % (user_list_url, page_size, page))
self.assertEqual(r1.status_code, 404, f"page={page}, page_size={page_size}")
def test_user_privacy_protect(self):
pass
class UserDetailTest(UserTestCase):
def setUp(self):
tester_signup("<EMAIL>", "adminadmin", 'admin', "20210101", )
u = User.objects.get(first_name='admin')
u.is_staff = True
u.save()
tester_signup('<EMAIL>', 'qwerty', 'superuser', '1234567')
u = User.objects.get(first_name='superuser')
u.userprofile.experience = 1
u.is_superuser = True
u.userprofile.save()
u.save()
tester_signup('<EMAIL>', 'qwerty', 'user', '12345679')
u = User(username="<EMAIL>",
first_name="another_user",
last_name="clever",
last_login=now(),
date_joined=now())
up = UserProfile(user=u,
student_id='233',
about="你好,世界!",
experience=5)
u.save()
up.save()
self.assertEqual(User.objects.count(), 4)
def test_get(self):
client = Client()
first_id = User.objects.first().id
response = client.get(user_detail_url(first_id))
self.assertEqual(response.status_code, 200) # 未登录用户访问,返回 200
tester_login(client=client)
for u in User.objects.all():
response = client.get(user_detail_url(u.id))
self.assertUserDetailEqual(response.content, u)
# 只测试 patch
def test_patch_unauthorized(self):
# 设置用户的权限
superuser = User.objects.get(first_name="superuser")
admin = User.objects.get(first_name="admin")
modify_user = User.objects.get(first_name="user")
another_user = User.objects.get(first_name="another_user")
user_permissions = [ # 以五种用户身份去遍历
[AnonymousUser, False],
[superuser, True],
[admin, True],
[modify_user, True],
[another_user, False]
]
for user, permission in user_permissions:
modify_user = User.objects.get(first_name="user")
new_value = modify_user.last_name + '1'
client = Client()
if user != AnonymousUser:
client.force_login(user)
response = client.patch(user_detail_url(modify_user.pk),
data={"last_name": new_value},
content_type='application/json')
self.assertEqual(response.status_code == 200, permission,
f"user={user}, status_code={response.status_code}")
modify_user = User.objects.get(first_name="user")
self.assertEqual(new_value == modify_user.last_name, permission,
f"user={user}, new_value={new_value}, current={modify_user.last_name}")
def test_patch_readonly_field(self):
readonly_field_and_example = {
"id": 233,
"username": "<EMAIL>",
"experience": 233,
"avatar_url": "https://uestc-msc.github.io/",
"last_login": now(),
"date_joined": now(),
"is_staff": True,
"is_superuser": True
}
user = User.objects.get(first_name="user")
client = Client()
client.force_login(user)
for field, example in readonly_field_and_example.items():
response = client.patch(user_detail_url(user.id),
data={field: example},
content_type='application/json')
self.assertEqual(response.status_code, 200)
response = client.get(user_detail_url(user.id))
json_response = response.json()
self.assertNotEqual(json_response[field], example)
def test_patch_correctly(self):
field_and_example = {
"first_name": "string",
"last_name": "string",
"student_id": "4231423",
"about": "hello everyone!",
"subscribe_email": True
}
u = User.objects.get(first_name="user")
id = u.id
client = Client()
client.force_login(u)
response = client.get(user_detail_url(id))
last_json = response.json()
for field, example in field_and_example.items():
response = client.patch(user_detail_url(id),
data={field: example},
content_type='application/json')
self.assertEqual(response.status_code, 200)
response = client.get(user_detail_url(id))
current_json = response.json()
last_json[field] = example
self.assertEqual(current_json, last_json) # 比较更新后 JSON 和预期 JSON
def test_patch_with_invalid_value(self):
field_and_wrong_example = [
["first_name", ""],
["student_id", ""],
["subscribe_email", "I don't want to"]
]
user = User.objects.get(first_name="user")
user.userprofile.student_id = "23333"
user.userprofile.about = "233"
user.userprofile.subscribe_email = True
user.userprofile.save()
client = Client()
client.force_login(user)
for field, example in field_and_wrong_example:
response = client.patch(user_detail_url(user.id),
data={field: example},
content_type='application/json')
self.assertEqual(response.status_code, 400, field + example)
response = client.get(user_detail_url(user.id))
json_response = response.json()
self.assertNotEqual(json_response[field], example)
def test_patch_one_field_in_userprofile_does_not_affect_others(self):
user_before = User.objects.get(first_name="user")
user_before.userprofile.student_id = '2333'
user_before.userprofile.save()
client = Client()
client.force_login(user_before)
response = client.patch(user_detail_url(user_before.id),
data={'about': 'hahaha'},
content_type='application/json')
self.assertEqual(response.status_code, 200)
user_after = User.objects.get(first_name="user")
self.assertEqual(user_after.userprofile.student_id, '2333')
self.assertEqual(user_after.userprofile.about, 'hahaha')
def test_get_patch_user_with_no_userprofile(self):
u = User(id=23333)
u.save()
self.assertEqual(hasattr(u, 'userprofile'), False)
client = Client()
tester_login(client=client)
response = client.get(user_detail_url(23333))
self.assertEqual(response.status_code, 200) # 能够正确访问
response = client.patch(user_detail_url(23333),
data={"about": "I'm 23333"},
content_type='application/json')
self.assertEqual(response.status_code, 200)
u = User.objects.get(id=23333)
self.assertEqual(hasattr(u, 'userprofile'), True) # Patch 操作创建了 userProfile
self.assertEqual(u.userprofile.about, "I'm 23333")
class WhoAmITest(TestCase):
def setUp(self):
tester_signup()
tester_signup("<EMAIL>", "supersuper", "superuser", "1297391")
tester_signup("<EMAIL>", "useruser", "user", "1297392")
tester_signup("<EMAIL>", "anotheruser", "anotheruser", "1297393")
self.admin = User.objects.filter(first_name="admin")[0]
def test_get_whoami(self):
c = Client()
tester_login(client=c)
r1 = c.get(whoami_url)
self.assertEqual(r1.status_code, 200)
r2 = c.get(user_detail_url(self.admin.id))
self.assertEqual(r1.content, r2.content)
def test_get_unauthorized(self):
r = Client().get(whoami_url)
self.assertEqual(r.status_code, 401)
# 更改密码相关测试
class ChangePasswordTest(TestCase):
email = "<EMAIL>"
password = "<PASSWORD>"
def setUp(self):
tester_signup(self.email, self.password, 'Admin', '20210101')
self.user = User.objects.filter(username=self.email).first()
def test_change_password(self):
c1 = Client()
response = tester_login(self.email, self.password, c1)
self.assertEqual(response.wsgi_request.user, self.user)
response = c1.get('/')
self.assertEqual(response.wsgi_request.user, self.user)
c2 = Client()
tester_login(self.email, self.password, c2)
r2 = c2.patch(change_password_url(self.user.id),
{"old_password": <PASSWORD>,
"new_password": "<PASSWORD>"},
content_type='application/json')
self.assertEqual(r2.status_code, 204)
response = c1.get('/')
self.assertEqual(response.wsgi_request.user.is_authenticated, False) # 此时第一个 Client 理应被下线
response = c2.get('/')
self.assertEqual(response.wsgi_request.user.is_authenticated, False) # 此时第二个 Client 理应被下线
response = tester_login(self.email, self.password, c1)
self.assertEqual(response.status_code, 401) # 尝试用旧密码登录
response = tester_login(self.email, 'ADMINADMIN', c1)
self.assertEqual(response.status_code, 200) # 尝试用新密码登录
r2 = tester_login(self.email, self.password, c2)
self.assertEqual(r2.status_code, 401) # 尝试用旧密码登录
response = c1.patch(change_password_url(self.user.id),
{"old_password": "<PASSWORD>",
"new_password": <PASSWORD>},
content_type='application/json')
self.assertEqual(response.status_code, 204) # 将密码改回来
response = tester_login(self.email, self.password, c1)
self.assertEqual(response.status_code, 200)
def test_change_email_and_password(self):
c1 = Client()
response = tester_login(self.email, self.password, c1)
self.assertEqual(response.wsgi_request.user, self.user)
response = c1.patch(change_password_url(self.user.id),
{"old_password": <PASSWORD>,
"new_password": "<PASSWORD>",
"new_email": "<EMAIL>"},
content_type='application/json')
self.assertEqual(response.status_code, 204)
response = tester_login(self.email, self.password)
self.assertEqual(response.status_code, 401) # 旧账号无法登陆
response = tester_login("<EMAIL>", "ADMINADMIN")
self.assertEqual(response.status_code, 200) # 新账号可以登陆
def test_reset_password_with_invalid_field(self):
tester_signup("<EMAIL>", "anotheruser", "another", "20201231")
# 没上线
client = Client()
response = client.patch(change_password_url(self.user.id),
{"old_password": <PASSWORD>,
"new_password": "<PASSWORD>"},
content_type='application/json')
self.assertEqual(response.status_code, 401)
response = tester_login("<EMAIL>", "anotheruser", client)
self.assertEqual(response.status_code, 200)
# 少字段
response = tester_login(self.email, self.password, client)
self.assertEqual(response.status_code, 200)
response = client.patch(change_password_url(self.user.id),
{"new_password": "<PASSWORD>"},
content_type='application/json')
self.assertEqual(response.status_code, 403)
response = client.patch(change_password_url(self.user.id),
{"password": "<PASSWORD>",
"new_password": "<PASSWORD>"},
content_type='application/json')
self.assertEqual(response.status_code, 403)
# 旧密码错误
response = client.patch(change_password_url(self.user.id),
{"old_password": "password",
"new_email": '<EMAIL>',
"new_password": "<PASSWORD>", },
content_type='application/json')
self.assertEqual(response.status_code, 403)
response = tester_login("<EMAIL>", "anotheruser", client)
self.assertEqual(response.status_code, 200) # 邮箱没有被修改
# 新密码强度不够
response = client.patch(change_password_url(self.user.id),
{"old_password": "<PASSWORD>",
"new_password": "<PASSWORD>"},
content_type='application/json')
self.assertEqual(response.status_code, 400)
# 邮箱不合法
response = client.patch(change_password_url(self.user.id),
{"old_password": "<PASSWORD>",
"new_email": "qwerty"},
content_type='application/json')
self.assertEqual(response.status_code, 400)
# 邮箱已被占用
response = client.patch(change_password_url(self.user.id),
{"old_password": "<PASSWORD>",
"new_email": '<EMAIL>',
'new_password': "<PASSWORD>"},
content_type='application/json')
self.assertEqual(response.status_code, 400)
response = tester_login("<EMAIL>", "anotheruser", client)
self.assertEqual(response.status_code, 200) # 密码没有被修改
# 管理员修改普通用户/管理员/超级用户的信息、超级用户修改其他用户的信息
def admin_change_email_and_password(self):
pass
| StarcoderdataPython |
197261 | from os import getcwd, stat, remove
import json
from pyinfraboxutils.token import encode_job_token
from pyinfraboxutils.storage import storage
from temp_tools import TestClient, TestUtils
from test_template import ApiTestTemplate
class JobApiTest(ApiTestTemplate):
url_ns = 'api/job'
def test_job(self):
filename = 'temp_file.json'
filesize = 100
TestClient.execute("""INSERT INTO source_upload (id, project_id, filename, filesize)
VALUES (%s, %s, %s, %s)
""", [self.source_upload_id, self.project_id, filename, filesize])
r = TestClient.get(self.url_ns + '/job', self.job_headers)
self.assertEqual(r['project']['id'], self.project_id)
self.assertEqual(r['job']['id'], self.job_id)
def test_source(self):
data = { "data": "dummy_data" }
file_name = "test_source.tmp_test_file"
with open(file_name, "w") as source_data_file:
json.dump(data, source_data_file)
file_size = stat(file_name).st_size
TestClient.execute("""INSERT INTO source_upload (id, project_id, filename, filesize)
VALUES (%s, %s, %s, %s)
""", [self.source_upload_id, self.project_id, file_name, file_size])
TestClient.execute("""UPDATE build SET source_upload_id = %s
WHERE id = %s""", [self.source_upload_id, self.build_id])
with open(file_name, 'r') as source_data:
storage.upload_project(source_data, file_name)
remove(file_name)
response = TestClient.get(self.url_ns + '/source', self.job_headers)
response_size = TestUtils.get_stream_file_size(response.data)
self.assertEqual(response_size, file_size)
def test_cache(self):
filename = 'cache.tar.gz'
file_path = getcwd() + '/' + filename
test_data = open(file_path, 'rb')
files = { 'cache.tar.gz': test_data }
r = TestClient.post(self.url_ns + '/cache', data=files, headers=self.job_headers,
content_type='multipart/form-data')
self.assertEqual(r, {})
r = TestClient.get(self.url_ns + '/cache', self.job_headers)
actual_cache_size = stat(file_path).st_size
received_cache_size = TestUtils.get_stream_file_size(r.data)
# Ensure downloaded and uploaded file sizes are equal
self.assertEqual(received_cache_size, actual_cache_size)
def test_output(self):
filename = 'output.tar.gz'
file_path = getcwd() + '/' + filename
test_data = open(file_path, 'rb')
files = { 'output.tar.gz': test_data }
r = TestClient.post(self.url_ns + '/output', data=files, headers=self.job_headers,
content_type='multipart/form-data')
self.assertEqual(r, {})
def test_setrunning(self):
r = TestClient.post(self.url_ns + '/setrunning', {}, self.job_headers)
self.assertEqual(r, {})
r = TestClient.execute_one("""SELECT state FROM job
WHERE id = %s""", [self.job_id])
job_state = r["state"]
self.assertEqual(job_state, 'running')
def test_create_jobs(self):
job_id = "6544af82-1c4f-5bb5-b1da-a54a0ced5e6f"
data = { "jobs": [{
"id": job_id,
"type": "docker",
"name": "test_job1",
"docker_file": "",
"build_only": False,
"resources": { "limits": { "cpu": 1, "memory": 512 }}
}]}
r = TestClient.post(self.url_ns + '/create_jobs', data, self.job_headers)
self.assertEqual(r, 'Successfully create jobs')
jobs = TestClient.execute_many("""SELECT id, name, type FROM job
WHERE id = %s""", [job_id])
self.assertEqual(jobs[0][0], data["jobs"][0]["id"])
self.assertEqual(jobs[0][1], self.job_name + "/" + data["jobs"][0]["name"])
# If type was equal to "docker" then it should replace it with "run_project_container" type
self.assertEqual(jobs[0][2], "run_project_container")
num_jobs = len(jobs)
self.assertEqual(num_jobs, 1)
def test_consoleupdate(self):
data = { "output": "some test output" }
r = TestClient.post(self.url_ns + '/consoleupdate', data=data, headers=self.job_headers)
self.assertEqual(r, {})
r = TestClient.execute_one("""SELECT output FROM console
WHERE job_id = %s""", [self.job_id])
self.assertEqual(r["output"], data["output"])
def test_stats(self):
data = { "stats": "finished" }
r = TestClient.post(self.url_ns + '/stats', data=data, headers=self.job_headers)
self.assertEqual(r, {})
r = TestClient.execute_one("""SELECT stats FROM job
WHERE id = %s""", [self.job_id])
self.assertEqual(r["stats"], "\"%s\"" % data["stats"])
def test_markup(self):
markup_data = {
"version": 1,
"title": "dummy_title",
"elements": [ {
"type": "text",
"text": "dummy_text"
}
]
}
file_name = "test_markup.tmp_test_file.json"
with open(file_name, 'w') as markup_data_file:
json.dump(markup_data, markup_data_file)
with open(file_name, 'r') as markup_data_file:
markup_data_file.seek(0)
data = { "file1": markup_data_file }
r = TestClient.post(self.url_ns + '/markup', data=data, headers=self.job_headers,
content_type='multipart/form-data')
remove(file_name)
self.assertEqual(r, {})
r = TestClient.execute_one("""SELECT job_id, project_id, name, data FROM job_markup
WHERE job_id = %s""", [self.job_id])
print(r)
# check job_id
self.assertEqual(r[0], self.job_id)
# check project_id
self.assertEqual(r[1], self.project_id)
# check name
self.assertEqual(r[2], "file1")
received_data = json.loads(r[3])
# check data (title)
self.assertEqual(received_data["title"], markup_data["title"])
# check data (elements)
self.assertEqual(received_data["elements"], markup_data["elements"])
def test_badge(self):
job_data = {
"version": 1,
"subject": "subject_val",
"status": "status_val1",
"color": "green"
}
file_name = "test_badge.tmp_test_file.json"
with open(file_name, 'w') as job_data_file:
# Write data into json file
json.dump(job_data, job_data_file)
with open(file_name, 'r') as job_data_file:
data = { "file1": job_data_file }
result = TestClient.post(self.url_ns + '/badge', data=data, headers=self.job_headers,
content_type='multipart/form-data')
remove(file_name)
self.assertEqual(result, {})
r = TestClient.execute_one("""SELECT * from job_badge
WHERE job_id = %s""", [self.job_id])
self.assertEqual(r["job_id"], self.job_id)
self.assertEqual(r["project_id"], self.project_id)
self.assertEqual(r["subject"], job_data["subject"])
self.assertEqual(r["status"], job_data["status"])
self.assertEqual(r["color"], job_data["color"])
def test_testresult(self):
#test empty data
data = {"data": {}}
result = TestClient.post(self.url_ns + '/testresult', data=data, headers=self.job_headers)
self.assertEqual(result['message'], 'data not set')
# test wrong file format
test_filename = 'dummy_results.xml'
with open(test_filename, 'w') as test_file:
# just create file, there's no need to write anything into file
pass
with open(test_filename, 'r') as test_file:
data = { "data": test_file }
r = TestClient.post(self.url_ns + '/testresult', data=data, headers=self.job_headers,
content_type='multipart/form-data')
self.assertEqual(r['message'], 'file ending not allowed')
remove(test_filename)
# test data
testresult_data = {
"version": 1,
"tests": [
{
"suite":"api_test_suite",
"name": "test_name1",
"status": "ok",
"duration": 5,
"message": "test_message1",
"stack":"stack1",
"measurements":[]
}, {
"suite":"api_test_suite",
"name": "test_name2",
"status": "failure",
"duration": 21,
"message": "test_message2",
"stack":"stack2",
"measurements":[]
}]
}
test_filename = 'dummy_test_result.json'
with open(test_filename, 'w') as test_file:
json.dump(testresult_data, test_file)
TestClient.execute("""TRUNCATE test_run""")
with open(test_filename, 'r') as test_file:
data = { "data": test_file }
r = TestClient.post(self.url_ns + '/testresult', data=data, headers=self.job_headers,
content_type='multipart/form-data')
self.assertEqual(r, {})
remove(test_filename)
r = TestClient.execute_many("""SELECT state, duration, message, stack FROM test_run
WHERE job_id = %s""", [self.job_id])
# We receive doubles from the SQL query results so we need to convert values manually to be able to test it
for test in testresult_data["tests"]:
test["duration"] = float(test["duration"])
keys = ['status', 'duration', 'message', 'stack']
for i, received_row in enumerate(r):
# create dictionary from the list to compare it easier
row_dictionary = dict(zip(keys, received_row))
self.assertTrue(all(item in testresult_data["tests"][i].items()
for item in row_dictionary.items()))
def test_setfinished(self):
data = {
"state": "finished",
"message": "Job successfully finished"
}
r = TestClient.post(self.url_ns + '/setfinished', data, self.job_headers)
self.assertEqual(r, {})
r = TestClient.execute_one("""SELECT state, message, console FROM job
WHERE id = %s""", [self.job_id])
self.assertEqual(r["state"], data["state"])
self.assertEqual(r["message"], data["message"])
self.assertEqual(r["console"], "")
| StarcoderdataPython |
1760972 | # -*- coding: utf-8 -*-
"""
Created on Mon Jan 28 02:00:18 2019
@author: coby_
Practica 10: uso de librerias
"""
import datetime
hoy = datetime.date.today()
print(hoy)
print("El año es ", hoy.year)
print(hoy.weekday())
dias = datetime.timedelta(days=5)
hoy_mas_cinco = hoy + dias;
print(hoy_mas_cinco)
| StarcoderdataPython |
1725025 | <filename>scraping_utils/scraper.py
# Twitter Scraper for Hate Speech detection
import tweepy
import pandas as pd
import time
import configparser
# Folder Locations
OUTPUT_DIR = "data"
# Load Configs
config = configparser.ConfigParser()
config.read('config.ini')
consumer_key = config.get('dev', 'consumer_key')
consumer_secret = config.get('dev', 'consumer_secret')
access_token = config.get('dev', 'access_token')
access_token_secret = config.get('dev', 'access_token_secret')
# Grant Twitter dev account access to Tweepy
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth,wait_on_rate_limit=True)
# Function to scrape twitter based on user
def get_tweets_by_user(username, count, include_rts=False, write_to_file=False, filename=None):
print("Helo")
try:
# Creation of query method using parameters
tweets = tweepy.Cursor(api.user_timeline,screen_name=username, include_rts=False, tweet_mode = 'extended').items(count)
print(tweets)
# Pulling information from tweets iterable object
tweets_list = [[tweet.created_at, tweet.id, tweet.full_text] for tweet in tweets]
print(tweets_list)
# Creation of dataframe from tweets list
# Add or remove columns as you remove tweet information
tweets_df = pd.DataFrame(tweets_list,columns=['Datetime', 'Tweet Id', 'Text'])
print(tweets_df)
if write_to_file == True:
# Set default filename
if filename == None:
filename = f"file_{username.replace(' ', '_')}"
# Converting dataframe to CSV
tweets_df.to_csv(f'{OUTPUT_DIR}/{filename}.txt', sep=',', index = False)
else:
return tweets_df
except BaseException as e:
print('failed on_status,',str(e))
time.sleep(3)
# Function to scrape twitter based on query
def get_tweets_by_query(text_query, count, lang='en', result_type='mixed', filename=None, filter_rt=True):
try:
# Filter retweets
if filter_rt:
text_query = text_query + " -filter:retweets"
# Creation of query method using parameters
tweets = tweepy.Cursor(api.search_tweets,q=text_query, lang=lang, result_type=result_type, tweet_mode = 'extended').items(count)
# Pulling information from tweets iterable object
tweets_list = [[tweet.created_at, tweet.id, tweet.full_text] for tweet in tweets]
# Creation of dataframe from tweets list
# Add or remove columns as you remove tweet information
tweets_df = pd.DataFrame(tweets_list,columns=['Datetime', 'Tweet Id', 'Text'])
# Set default filename
if filename == None:
filename = text_query.replace(' ', '_')
# Converting dataframe to CSV
tweets_df.to_csv(f'{OUTPUT_DIR}/{filename}.txt', sep=',', index = False)
except BaseException as e:
print('failed on_status,',str(e))
time.sleep(3)
#if __name__=='__main__':
# text_query = ""
# count = 20
# get_tweets(text_query, count, filename = "test", result_type="mixed")
| StarcoderdataPython |
1730559 | import struct
import os
import time
import sys
import pymysql
from pypinyin import lazy_pinyin
from utils.tools import UtilLogger
from queue import Queue
from threading import Thread
# 建立一个线程池 用于存入解析完毕的数据
res_queue = Queue()
class ExtSougouScel():
"""
解析搜狗词库文件
"""
def __init__(self):
# 拼音表偏移,
self.startPy = 0x1540
# 汉语词组表偏移
self.startChinese = 0x2628
# 全局拼音表
self.GPy_Table = {}
# 解析结果
# 元组(词频,拼音,中文词组)的列表
self.GTable = []
def byte2str(self, data):
"""将原始字节码转为字符串"""
i = 0
length = len(data)
ret = ''
while i < length:
x = data[i:i + 2]
t = chr(struct.unpack('H', x)[0])
if t == '\r':
ret += '\n'
elif t != ' ':
ret += t
i += 2
return ret
def getPyTable(self, data):
"""获取拼音表"""
# if data[0:4] != "\x9D\x01\x00\x00":
# return None
data = data[4:]
pos = 0
length = len(data)
while pos < length:
index = struct.unpack('H', data[pos:pos + 2])[0]
pos += 2
l = struct.unpack('H', data[pos:pos + 2])[0]
pos += 2
py = self.byte2str(data[pos:pos + l])
self.GPy_Table[index] = py
pos += l
def getWordPy(self, data):
"""获取一个词组的拼音"""
pos = 0
length = len(data)
ret = u''
while pos < length:
index = struct.unpack('H', data[pos] + data[pos + 1])[0]
ret += self.GPy_Table[index]
pos += 2
return ret
def getWord(self, data):
"""获取一个词组"""
pos = 0
length = len(data)
ret = u''
while pos < length:
index = struct.unpack('H', data[pos] + data[pos + 1])[0]
ret += self.GPy_Table[index]
pos += 2
return ret
def getChinese(self, data):
"""读取中文表"""
pos = 0
length = len(data)
while pos < length:
# 同音词数量
same = struct.unpack('H', data[pos:pos + 2])[0]
# 拼音索引表长度
pos += 2
py_table_len = struct.unpack('H', data[pos:pos + 2])[0]
# 拼音索引表
pos += 2
# 中文词组
pos += py_table_len
for i in range(same):
# 中文词组长度
c_len = struct.unpack('H', data[pos:pos + 2])[0]
# 中文词组
pos += 2
word = self.byte2str(data[pos: pos + c_len])
# 扩展数据长度
pos += c_len
ext_len = struct.unpack('H', data[pos:pos + 2])[0]
# 词频
pos += 2
count = struct.unpack('H', data[pos:pos + 2])[0]
# 保存
self.GTable.append(word)
# 到下个词的偏移位置
pos += ext_len
def deal(self, file_name):
"""处理文件"""
f = open(file_name, 'rb')
data = f.read()
f.close()
if data[0:12] != b'@\x15\x00\x00DCS\x01\x01\x00\x00\x00':
print("确认你选择的是搜狗(.scel)词库? {}".format(file_name))
sys.exit(0)
# print("词库名:", self.byte2str(data[0x130:0x338]))
# print("词库类型:", self.byte2str(data[0x338:0x540]))
# print("描述信息:", self.byte2str(data[0x540:0xd40]))
# print("词库示例:", self.byte2str(data[0xd40:self.startPy]))
# self.getPyTable(data[self.startPy:self.startChinese])
self.getChinese(data[self.startChinese:])
# 返回解析完毕的所有中文词组
return list(sorted(set(self.GTable), key=self.GTable.index))
class MysqlUtil():
# 获取数据库连接
def getConn(self, host, userName, pwd, dbName, port):
return pymysql.connect(host=host, user=userName, password=<PASSWORD>, database=dbName, port=port)
def save(self, item):
# 将item里的数据拿出来
keyword = item['keyword']
pinyin = item['pinyin']
type1 = item['type1']
type2 = item['type2']
type3 = item['type3']
host = "127.0.0.1"
port = 3306
userName = "root"
pwd = "<PASSWORD>"
dbName = "test"
connection = self.getConn(host, userName, pwd, dbName, port)
try:
with connection.cursor() as cursor:
# 创建更新值的sql语句
sql = """INSERT INTO SOGOUKEYWORD(keyword,pinyin,type1,type2,type3) VALUES (%s,%s,%s,%s,%s)"""
# 执行sql语句
# excute 的第二个参数可以将sql缺省语句补全,一般以元组的格式
num = cursor.execute(sql, (keyword, pinyin, type1, type2, type3))
print("插入执行成功条数:", num)
# 提交本次插入的记录
connection.commit()
except Exception as e:
print("发生异常,异常信息:", e)
# 发生错误时进行回滚
connection.rollback()
finally:
if connection:
# 关闭数据库连接
connection.close()
def selectOne(self, fileName):
host = "127.0.0.1"
port = 3306
userName = "root"
pwd = "<PASSWORD>"
dbName = "test"
connection = self.getConn(host, userName, pwd, dbName, port)
try:
with connection.cursor() as cursor:
sql = """select type1,type2 from SOGOUCATE where file_name = %s"""
cursor.execute(sql, fileName)
res = cursor.fetchone()
return res[0], res[1]
except Exception as e:
print("发生异常,异常信息:", e)
connection.rollback()
finally:
if connection:
connection.close()
def AddQueue():
"""
遍历目录下的所有词库文件
解析文件存入Queue
"""
# 词库文件目录
baseDir = os.getcwd()
path = baseDir + '\\data\\'
mysqlUtil = MysqlUtil()
for filename in os.listdir(path):
# 将关键字解析 拼成字典存入queue
words_data = ExtSougouScel().deal(path + filename)
# 截取文件名称
filename = filename[0:len(filename) - 5]
# 判断队列大小,若太大就停止从目录读文件
s = res_queue.qsize()
while s > 40000:
print("sleep for a while ")
time.sleep(20)
s = res_queue.qsize()
print('new size is {}'.format(s))
for word in words_data:
'''
解析每一条数据,并存入队列
'''
keyword = word
pinyin = " ".join(lazy_pinyin(word))
type1, type2 = mysqlUtil.selectOne(filename)
data = {
'keyword': keyword,
'pinyin': pinyin,
'type1': type1,
'type2': type2,
'type3': filename,
}
res_queue.put_nowait(data)
mysqlUtil.save(data)
print('all file finshed')
def saveToDb():
mysqlUtil = MysqlUtil()
log = UtilLogger('crack',
os.path.join(os.path.dirname(os.path.abspath(__file__)), 'log_crack.log'))
while True:
try:
st = time.time()
data = res_queue.get_nowait()
t = int(time.time() - st)
if t > 5:
print("res_queue", t)
mysqlUtil.save(data)
log.info('词库文件保存成功 {}'.format(data))
except Exception as e:
print("queue is empty wait for a while {}".format(e))
time.sleep(2)
def start():
# 使用多线程解析
threads = list()
# 读文件存入queue的线程
threads.append(
Thread(target=AddQueue))
# 存数据库的线程
for i in range(10):
threads.append(
Thread(target=saveToDb))
for thread in threads:
thread.start()
if __name__ == '__main__':
start()
| StarcoderdataPython |
81327 | ''' Controller para fornecer dados da CEE '''
from flask_restful_swagger_2 import swagger
from flask import request
from resources.base import BaseResource
from model.municipio import Municipio
class MunicipiosResource(BaseResource):
''' Classe de múltiplos Municípios '''
CUSTOM_SWAGGER_PARAMS = [
{"name": "categorias", "required": True, "type": 'string', "in": "query",
"description": "Informações que devem ser trazidas no dataset. \
Campos disponíveis: cd_municipio_ibge, cd_municipio_ibge_dv, \
st_situacao, cd_municipio_sinpas, cd_municipio_siafi, \
nm_municipio, nm_municipio_sem_acento, ds_observacao, \
cd_municipio_sinonimos, cd_municipio_sinonimos_dv, \
st_amazonia, st_fronteira, st_capital, cd_uf, ano_instalacao, \
ano_extincao, cd_municipio_sucessor, latitude, longitude, \
area, nm_uf, sg_uf, nm_municipio_uf, cd_unidade, cd_prt, \
nm_prt, nm_unidade, tp_unidade, sg_unidade, cd_mesorregiao, \
nm_mesorregiao, cd_microrregiao, nm_microrregiao, \
nu_portaria_mpt, tp_area, cd_geomunicipio_ibge, \
cd_municipio_rfb, cd_regiao e nm_regiao. " + BaseResource.CAT_DETAIL}
]
def __init__(self):
''' Construtor'''
self.domain = Municipio()
@swagger.doc({
'tags':['municipio'],
'description':'Obtém todos os Municípios, de acordo com os \
parâmetros informados',
'parameters': CUSTOM_SWAGGER_PARAMS + BaseResource.DEFAULT_SWAGGER_PARAMS,
'responses': {
'200': {'description': 'Municípios'}
}
})
def get(self):
''' Obtém os registros de Municípios, conforme parâmetros informados '''
options = self.build_options(request.args)
return self.get_domain().find_dataset(options)
def get_domain(self):
''' Carrega o modelo de domínio, se não o encontrar '''
if self.domain is None:
self.domain = Municipio()
return self.domain
class MunicipioResource(BaseResource):
''' Classe de Municipio '''
def __init__(self):
''' Construtor'''
self.domain = Municipio()
@swagger.doc({
'tags':['municipio'],
'description':'Obtém um único município de acordo com o código do IBGE',
'parameters':[
{
"name": "cd_municipio_ibge",
"description": "Código do IBGE do município consultado",
"required": False,
"type": 'string',
"in": "path"
}
],
'responses': {
'200': {
'description': 'Município'
}
}
})
def get(self, cd_municipio_ibge):
''' Obtém o registro de estabelecimento com um determinado cnpj '''
return self.get_domain().find_by_cd_ibge(cd_municipio_ibge).to_json(orient='records')
def get_domain(self):
''' Carrega o modelo de domínio, se não o encontrar '''
if self.domain is None:
self.domain = Municipio()
return self.domain
| StarcoderdataPython |
3315883 | import pygtk
pygtk.require('2.0')
import gtk
import pango
print("Using pygtk " + str(gtk.pygtk_version))
print("Using gtk " + str(gtk.gtk_version))
from DesktopFileCreatorConfig import Info
from DesktopFileCreatorConfig import Config
from DesktopFileCreatorConfig import Categories
from DesktopFileCreatorState import State
class OptionWidget:
def __init__(self, key, helper):
self.key = key
self.helper = helper
self.label = gtk.Label()
self.label.set_text(key)
self.entry = gtk.Entry()
self.entry.set_text("")
self.button = None
if(helper.buttonFactory is not None): self.button = helper.buttonFactory(self.entry)
def optionsToHelpers(options):
for actionKey in options.keys():
action = options[actionKey]
for localeKey in action.keys():
locale = group[localeKey]
for optionKey in locale.keys():
if not optionKey in Config.helpers:
Config.helpers[optionKey] = Config.defaultHelper
return Config.helpers
def helpersToWidgets(helpers):
optionWidgets = dict()
for key in helpers.keys():
optionWidgets[key] = OptionWidget(key, helpers[key])
return optionWidgets
def filterWidgets(widgets):
filteredWidgets = dict()
for category in Categories.categories:
filteredWidgets[category] = list()
for key in widgets.keys():
widget = widgets[key]
filteredWidgets[widget.helper.category].append(widget)
return filteredWidgets
def optionsToWidgets(options, widgets):
for key in options.keys():
widgets[key].entry.set_text(options[key])
def widgetsToOptions(widgets, options):
for key in widgets:
content = widgets[key].entry.get_text().strip()
if len(content) > 0:
options[key] = content
else:
options[key] = None
class OptionsGrid(gtk.VBox):
def __init__(self, filteredWidgets):
super(OptionsGrid, self).__init__()
self.set_spacing(4)
self.set_homogeneous(False)
for widget in filteredWidgets:
hbox = gtk.HBox()
hbox.set_spacing(4)
hbox.set_homogeneous(False)
hbox.pack_start(widget.label, False, False)
hbox.pack_start(widget.entry, True, True)
if widget.button is not None: hbox.pack_start(widget.button, False, False)
self.pack_start(hbox, False, False)
class OptionsFilter(gtk.ScrolledWindow):
def __init__(self, filteredWidgets):
super(OptionsFilter, self).__init__()
self.grid = OptionsGrid(filteredWidgets)
self.set_policy(gtk.POLICY_ALWAYS, gtk.POLICY_ALWAYS)
self.set_size_request(512, 256)
self.add_with_viewport(self.grid)
class OptionsContainer(gtk.Notebook):
def __init__(self, options):
super(OptionsContainer, self).__init__()
self.widgets = helpersToWidgets(optionsToHelpers(options))
filteredWidgets = filterWidgets(self.widgets)
for category in Categories.categories:
self.append_page(OptionsFilter(filteredWidgets[category]), gtk.Label(category))
class GroupComboBox(gtk.HBox):
def __init__(self, groups):
super(GroupComboBox, self).__init__()
self.comboBox = gtk.combo_box_new_text()
for groupKey in groups.keys():
self.comboBox.append_text(groupKey)
self.comboBox.set_active(0)
def getSelectedGroupKey(self):
return self.comboBox.get_active_text()
class ActionComboBox(gtk.HBox):
def __init__(self, actions):
super(ActionComboBox, self).__init__()
self.actions = actions
self.set_spacing(4)
self.label = gtk.Label("Action")
self.comboBox = gtk.combo_box_new_text()
self.comboBox.append_text("[Desktop Entry]")
self.comboBox.set_active(0)
for action in actions:
self.comboBox.append_text("[Desktop Action " + action + "]")
self.pack_start(self.label, False, False)
self.pack_start(self.comboBox, True, True)
def getSelectedAction(self):
index = self.comboBox.get_active()
if index <= 0: return ""
return self.actions[index - 1]
class LocaleComboBox(gtk.HBox):
def __init__(self, locales):
super(LocaleComboBox, self).__init__()
self.locales = locales
self.set_spacing(4)
self.label = gtk.Label("Locale")
self.comboBox = gtk.combo_box_new_text()
self.comboBox.append_text("<default>")
self.comboBox.set_active(0)
for locale in locales:
self.comboBox.append_text(locale)
self.pack_start(self.label, False, False)
self.pack_start(self.comboBox, True, True)
def getSelectedLocale(self):
index = self.comboBox.get_active()
if index <= 0: return ""
return self.locales[index - 1]
class MainWindow(gtk.Window):
def __init__(self):
super(MainWindow, self).__init__()
self.data = State()
self.connect("destroy", gtk.main_quit)
self.set_title(Info.name + " " + Info.version)
self.set_type_hint(gtk.gdk.WINDOW_TYPE_HINT_DIALOG)
self.set_border_width(8)
self.actionComboBox = ActionComboBox(self.data.actions)
self.localeComboBox = LocaleComboBox(self.data.locales)
self.optionsContainer = OptionsContainer(self.data.options)
self.loadButton = gtk.Button()
self.loadButton.set_label("Load Desktop File")
self.loadButton.connect('button_press_event', self.load)
self.saveButton = gtk.Button()
self.saveButton.set_label("Save Desktop File")
self.saveButton.connect('button_press_event', self.save)
hbox = gtk.HBox()
hbox.pack_start(self.loadButton, False, False)
hbox.pack_end(self.saveButton, False, False)
vbox = gtk.VBox()
vbox.set_spacing(8)
vbox.pack_start(self.actionComboBox, False, False)
vbox.pack_start(self.localeComboBox, False, False)
vbox.pack_start(self.optionsContainer, True, True)
vbox.pack_start(hbox, False, False)
self.add(vbox)
def run(self):
self.show_all()
gtk.main()
def load(self, widget, e):
dialog = gtk.FileChooserDialog (title = "Open File", parent = self.get_toplevel(), action = gtk.FILE_CHOOSER_ACTION_OPEN, buttons = (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_OPEN, gtk.RESPONSE_OK), backend = None)
result = dialog.run()
filename = dialog.get_filename()
dialog.destroy()
if result == gtk.RESPONSE_OK:
if filename is not None:
if len(filename) > 0:
self.doLoad(filename)
def doLoad(self, filename):
try:
file = open(filename, "r")
content = file.read()
file.close()
self.data.options.fromString(content)
optionsToWidgets(self.data.options[""][""], self.optionsContainer.widgets)
except Exception as e:
self.showErrorMessage(str(e))
def save(self, widget, e):
dialog = gtk.FileChooserDialog (title = "Save File", parent = self.get_toplevel(), action = gtk.FILE_CHOOSER_ACTION_SAVE, buttons = (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_SAVE, gtk.RESPONSE_OK), backend = None)
result = dialog.run()
filename = dialog.get_filename()
dialog.destroy()
if result == gtk.RESPONSE_OK:
if filename is not None:
if len(filename) > 0:
self.doSave(filename)
def doSave(self, filename):
try:
widgetsToOptions(self.optionsContainer.widgets, self.data.options[""][""])
content = self.data.options.toString()
file = open(filename, "w")
file.write(content)
file.close()
except Exception as e:
self.showErrorMessage(str(e))
def showErrorMessage(self, message):
dialog = gtk.MessageDialog(self, gtk.DIALOG_DESTROY_WITH_PARENT, gtk.MESSAGE_ERROR, gtk.BUTTONS_CLOSE, message)
dialog.run()
dialog.destroy()
| StarcoderdataPython |
102305 | #!/usr/bin/env python2.7
from itertools import groupby
from operator import itemgetter
import sys
from datetime import datetime
from math import *
#reader
def read_mapper_output(file, separator='\t'):
for line in file:
yield line.rstrip().split(separator, 1)
#discard trips with errors
def correctOutput(output_in):
output = output_in
#load pick up and drop off times
timeDropOff=datetime.strptime(output[6],"%Y-%m-%d %H:%M:%S")
timePickUp=datetime.strptime(output[1],"%Y-%m-%d %H:%M:%S")
#recompute trip time in seconds from pick up and drop off times
output[8] = str(abs((timeDropOff-timePickUp).seconds))
#if GPS coordinate to 0: discard
if (float(output[10]) == 0.0 or float(output[11]) == 0.0 or float(output[12]) == 0.0 or float(output[13]) == 0.0):
None
#if total earning minus tip negative: discard
if (float(output[20]) - float(output[18]) < 0):
None
#if trip too short or too long or unrealistic speed: discard
elif (float(output[8]) < 20 or float(output[8]) > 10000 or float(output[9])>40 or float(output[9])/float(output[8])>0.025):
None
#if no error, print to file
else:
print ",".join(output)
def main(separator='\t'):
header = ["medallion","pickup_datetime","hack_license","vendor_id","rate_code","store_and_fwd_flag","dropoff_datetime","passenger_count","trip_time_in_secs","trip_distance","pickup_longitude","pickup_latitude","dropoff_longitude","dropoff_latitude","payment_type","fare_amount","surcharge","mta_tax","tip_amount","tolls_amount","total_amount"]
print ",".join(header)
data = read_mapper_output(sys.stdin, separator=separator)
#read rows with the same key
for key, group in groupby(data, itemgetter(0)):
try:
output=key.rstrip().split(",")
data=None
fare=None
#read each row
for key, value in group:
elements=value.rstrip().split(",")
#extract values for both trip and fare
if len(elements)==12:
data=elements
elif len(elements)==9:
fare=elements[2:]
#if both rows extracted, feed key value to the check function which will output to file if no error
if (data and fare):
output = output + data + fare
correctOutput(output)
except:
#exception
pass
if __name__ == "__main__":
main()
| StarcoderdataPython |
3298295 | '''Module of common tool test class'''
import nose
import tempfile, shutil, os, itertools, re
def get_arg_flag(key):
return "--{}".format(re.sub('_', '-', key))
def make_args(outfile,**args):
'''Makes a list of argument strings from a dictionary for command line tools to process'''
arglist = ['{}={}'.format(get_arg_flag(key), value) if type(value) is not bool else '{}'.format(key) for (key, value) in args.items() if value is not None]
#arglist = []
#for key, value in args.items():
# if value is not None:
# if type(value) is not bool:
# arglist.append('{}={}'.format(get_arg_flag(key), value))
# else:
# arglist.append('{}'.format(get_arg_flag(key)))
arglist.append('-o={}'.format(outfile))
return arglist
def cycle_args(arg_list):
'''Generator function to generate different argument combo's - returns as dictionaries'''
keys = arg_list.keys()
for values in itertools.product(*arg_list.values()):
yield {k:v for (k, v) in zip(keys, values)}
class CommonToolTest:
test_name = None
@classmethod
def setUpClass(cls):
cls.tempdir = tempfile.mkdtemp(prefix='w_tools_test')
os.chdir(cls.tempdir)
@classmethod
def tearDownClass(cls):
os.chdir('{}/lib/west_tools/tests'.format(os.environ['WEST_ROOT']))
shutil.rmtree(cls.tempdir)
def mktemp(self, suffix='.h5', prefix = '', dirname = None):
'''Helper method to make a tempfile (to be written/read by tools)'''
if dirname is None:
dirname = self.tempdir
return tempfile.mktemp(suffix, prefix, dir=dirname)
def check_args(self, outcome, errmsg, args):
self.check_args.__func__.description = '{} Args: '.format(self.test_name) + (', '.join(args) or '[defaults]')
assert outcome, str(errmsg)
def check_runs_with_args(self, **kwargs):
try:
self.w.go() #Run tool with given args
self.check_output(**kwargs) #Check that output hdf5 file is set up correctly
except Exception as e:
return (0, e)
else:
return(1, None)
def check_output(self, **kwargs):
raise NotImplementedError
def check_args_processed(self, **kwargs):
raise NotImplementedError
def test_args(self, **kwargs):
raise NotImplementedError
| StarcoderdataPython |
129175 | <gh_stars>0
#! python3
# isPhoneNumber.py - Program without regular expressions to find a phone number in text
def isPhoneNumber(text):
if len(text) != 12:
return False
for i in range(0,3):
if not text[i].isdecimal():
return False
if text[3] != '-':
return False
for i in range(4,7):
if not text[i].isdecimal():
return False
if text[7] != '-':
return False
for i in range(8,12):
if not text[i].isdecimal():
return False
return True
print('415-555-4242 is a phone number:')
print(isPhoneNumber('415-555-4242'))
print('<NAME> is a phone number:')
print(isPhoneNumber('<NAME>'))
# Additional code to check a string containing the number
message = 'Call me at 415-555-1011 tomorrow. 415-555-9999 is my office.'
for i in range(len(message)):
chunk = message[i:i+12]
if isPhoneNumber(chunk):
print('Phone number found: ' + chunk)
print('Done') | StarcoderdataPython |
1673945 | <reponame>Esukhia/text_utils
import os
import re
from collections import defaultdict
from PyTib.common import open_file, write_csv
def generate_prefixes(syls):
sequences = []
for i in range(len(syls) - 1): # -1 to not take the whole word
sub_sequence = '་'.join(syls[0:i + 1])
sequences.append(sub_sequence)
return sequences
def generate_infixes(syls):
sequences = []
syls_stripped = syls[1:-1]
for i in range(len(syls_stripped)):
sub_sequence = '་'.join(syls_stripped[0:i + 1])
sequences.append(sub_sequence)
return sequences
def generate_postfixes(syls):
sequences = []
syls_inverted = list(reversed(syls))
for i in range(len(syls_inverted)-1): # -1 to not take the whole word
sub_sequence = '་'.join(list(reversed(syls_inverted[0:i+1])))
sequences.append(sub_sequence)
return sequences
def generate_sub_sequences(syls):
prefixes = generate_prefixes(syls)
infixes = generate_infixes(syls)
postfixes = generate_postfixes(syls)
return {'prefixes': prefixes, 'infixes': infixes, 'postfixes': postfixes}
def find_affixes(word_list):
potential_prefixes = defaultdict(int)
potential_infixes = defaultdict(int)
potential_postfixes = defaultdict(int)
for word in word_list:
syls = word.split('་')
sub_sequences = generate_sub_sequences(syls)
for kind, affixes in sub_sequences.items():
if kind == 'prefixes':
for a in affixes:
potential_prefixes[a] += 1
if kind == 'infixes':
for a in affixes:
potential_infixes[a] += 1
if kind == 'postfixes':
for a in affixes:
potential_postfixes[a] += 1
return potential_prefixes, potential_infixes, potential_postfixes
def sort_potentials(prefixes, infixes, postfixes):
prefixes_sorted = sorted([(postfix, freq) for postfix, freq in prefixes.items()],
key=lambda x: x[1], reverse=True)
infixes_sorted = sorted([(postfix, freq) for postfix, freq in infixes.items()],
key=lambda x: x[1], reverse=True)
postfixes_sorted = sorted([(postfix, freq) for postfix, freq in postfixes.items()],
key=lambda x: x[1], reverse=True)
return prefixes_sorted, infixes_sorted, postfixes_sorted
def pre_processing(string):
"""
do all the pre_processing here
:param string:
:return: a list of tokens
"""
# separate the affixed particles into individual words
string = string.replace('-', ' -')
string = string.replace('།', '')
string = string.replace('\u2005', ' -')
string = string.replace('\n', '')
string = re.sub(r'\s+', ' ', string)
# delete tseks ending words
string = string.replace('་ ', ' ')
return string.split(' ')
def process(in_path):
if in_path.endswith('.txt'):
word_list = open_file('resources/uncompound_lexicon.txt').strip().split('\n')
prefixes, infixes, postfixes = find_affixes(word_list)
sorted_pre, sorted_in, sorted_post = sort_potentials(prefixes, infixes, postfixes)
else:
prefixes, infixes, postfixes = defaultdict(int), defaultdict(int), defaultdict(int)
for f in os.listdir(in_path):
word_list = pre_processing(open_file('{}/{}'.format(in_path, f)))
new_prefixes, new_infixes, new_postfixes = find_affixes(word_list)
# inject affixes in the defaultdicts
for pre, freq in new_prefixes.items():
prefixes[pre] += freq
for inf, freq in new_infixes.items():
infixes[inf] += freq
for post, freq in new_postfixes.items():
postfixes[post] += freq
sorted_pre, sorted_in, sorted_post = sort_potentials(prefixes, infixes, postfixes)
# write to csv files
write_csv('output/potential_prefixes.csv', sorted_pre)
write_csv('output/potential_infixes.csv', sorted_in)
write_csv('output/potential_postfixes.csv', sorted_post)
def main():
in_path = 'input'
# in_path = 'resources/uncompound_lexicon.txt'
process(in_path)
if __name__ == '__main__':
main()
| StarcoderdataPython |
20517 | from unittest import TestCase
from unittest.mock import Mock, patch
from typeseam.app import (
load_initial_data,
)
class TestModels(TestCase):
@patch('typeseam.app.os.environ.get')
def test_load_initial_data(self, env_get):
ctx = Mock(return_value=Mock(
__exit__=Mock(),
__enter__=Mock()))
app = Mock(app_context=ctx)
load_initial_data(app) | StarcoderdataPython |
58996 | def f(t):
# relation between f and t
return value
def rxn1(C,t):
return np.array([f(t)*C0/v-f(t)*C[0]/v-k*C[0], f(t)*C[0]/v-f(t)*C[1]/v-k*C[1]])
| StarcoderdataPython |
1737758 | <reponame>rcap107/holoclean
from .constraintfeat import ConstraintFeaturizer
from .featurized_dataset import FeaturizedDataset
from .featurizer import Featurizer
from .freqfeat import FreqFeaturizer
from .initattrfeat import InitAttrFeaturizer
from .initsimfeat import InitSimFeaturizer
from .langmodelfeat import LangModelFeaturizer
from .occurattrfeat import OccurAttrFeaturizer
__all__ = ['ConstraintFeaturizer',
'FeaturizedDataset',
'Featurizer',
'FreqFeaturizer',
'InitAttrFeaturizer',
'InitSimFeaturizer',
'LangModelFeaturizer',
'OccurAttrFeaturizer']
| StarcoderdataPython |
1742851 | <reponame>vinthedark/snet-marketplace-service<gh_stars>0
import json
from common.constant import StatusCode
from dapp_user.exceptions import BadRequestException
from common.logger import get_logger
from common.utils import validate_dict_list, handle_exception_with_slack_notification, generate_lambda_response
from dapp_user.config import SLACK_HOOK, NETWORK_ID
from dapp_user.domain.services.user_service import UserService
logger = get_logger(__name__)
@handle_exception_with_slack_notification(SLACK_HOOK=SLACK_HOOK, NETWORK_ID=NETWORK_ID, logger=logger)
def add_or_update_user_preference(event, context):
payload = json.loads(event["body"])
username = event["requestContext"]["authorizer"]["claims"]["email"]
required_keys = ["communication_type", "preference_type", "source", "status"]
if not validate_dict_list(payload, required_keys):
raise BadRequestException()
user_service = UserService()
response = user_service.add_or_update_user_preference(payload=payload, username=username)
return generate_lambda_response(
StatusCode.OK,
{"status": "success", "data": response, "error": {}}, cors_enabled=True
)
@handle_exception_with_slack_notification(SLACK_HOOK=SLACK_HOOK, NETWORK_ID=NETWORK_ID, logger=logger)
def get_user_preference(event, context):
username = event["requestContext"]["authorizer"]["claims"]["email"]
user_service = UserService()
response = user_service.get_user_preference(username=username)
return generate_lambda_response(
StatusCode.OK,
{"status": "success", "data": response, "error": {}}, cors_enabled=True
)
@handle_exception_with_slack_notification(SLACK_HOOK=SLACK_HOOK, NETWORK_ID=NETWORK_ID, logger=logger)
def delete_user(event, context):
username = event["requestContext"]["authorizer"]["claims"]["email"]
user_service = UserService()
response = user_service.delete_user(username=username)
return generate_lambda_response(
StatusCode.OK,
{"status": "success", "data": response, "error": {}}, cors_enabled=True
)
| StarcoderdataPython |
1660094 | <filename>vit_mutual/models/vision_transformers/deit.py
import torch
import torch.nn as nn
from .vit import ViT
class DeiT(ViT):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
embed_dim = self.transformer.embed_dim
self.dist_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
nn.init.normal_(self.dist_token, std=0.02)
self.dist_head = nn.Linear(embed_dim, self.num_classes)
def forward(self, img: torch.Tensor):
# seq has shape [n, bs, dim]
seq: torch.Tensor = self.patch_embed(img)
bs = seq.shape[1]
cls_token = self.cls_token.expand(-1, bs, -1)
dist_token = self.dist_token.expand(-1, bs, -1)
# add cls and dist token
seq = torch.cat((cls_token, dist_token, seq), dim=0)
# pos embedding
seq = self.pos_embed(seq)
seq = self.transformer(seq)
cls_token = seq[0]
dist_token = seq[1]
prob = self.cls_head(cls_token)
dist = self.dist_head(dist_token)
if self.training:
ret = {
"pred": prob,
"dist": dist
}
else:
# during inference, return the average of both classifier predictions
ret = (prob + dist) / 2
return ret
| StarcoderdataPython |
1752795 | import datetime
from django.db import models
from apps.accounts import models as accounts_models
from apps.commons import (
models as commons_models,
constants as commons_constants,
validators as commons_validators,
)
from apps.facility.models import Facility, TestingLab
from fernet_fields import EncryptedCharField, EncryptedIntegerField, EncryptedTextField
from partial_index import PQ, PartialIndex
from apps.patients import constants
from simple_history.models import HistoricalRecords
from libs.jsonfield import JSONField
class PatientGroup(commons_models.SoftDeleteTimeStampedModel):
"""
model to represent patient Group
"""
name = models.CharField(
max_length=commons_constants.FIELDS_CHARACTER_LIMITS["NAME"], help_text="Name of the patient group",
)
description = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return f"{self.name}<>{self.description}"
class Patient(commons_models.SoftDeleteTimeStampedModel, commons_models.AddressModel):
"""
Model to represent a patient
"""
BLOOD_GROUP_CHOICES = [
("A+", "A+"),
("A-", "A-"),
("B+", "B+"),
("B-", "B-"),
("AB+", "AB+"),
("AB-", "AB-"),
("O+", "O+"),
("O-", "O-"),
]
SOURCE_CHOICES = [
(constants.SOURCE_CHOICES.CA, "CARE"),
(constants.SOURCE_CHOICES.CT, "COVID_TRACKER"),
(constants.SOURCE_CHOICES.ST, "STAY"),
]
source = models.IntegerField(choices=SOURCE_CHOICES, default=constants.SOURCE_CHOICES.CA)
nearest_facility = models.ForeignKey(
Facility, on_delete=models.SET_NULL, null=True, related_name="nearest_facility"
)
icmr_id = models.CharField(max_length=15, blank=True, null=True, unique=True)
govt_id = models.CharField(max_length=15, blank=True, null=True, unique=True)
name = models.CharField(max_length=200)
month = models.PositiveIntegerField(null=True, blank=True)
year = models.PositiveIntegerField(null=True, blank=True)
gender = models.IntegerField(choices=commons_constants.GENDER_CHOICES, blank=False)
phone_number = models.CharField(max_length=14, validators=[commons_validators.phone_number_regex])
phone_number_belongs_to = models.IntegerField(
choices=constants.PATIENT_RELATIVE_CHOICES, default=constants.PATIENT_RELATIVE_TYPE_CHOICES.SELF
)
date_of_birth = models.DateField(default=None, null=True)
nationality = models.CharField(max_length=255, verbose_name="Nationality of Patient", default="indian")
passport_no = models.CharField(
max_length=255, verbose_name="Passport Number of Foreign Patients", unique=True, null=True, blank=True,
)
aadhar_no = models.CharField(
max_length=255, verbose_name="Aadhar Number of Patient", unique=True, null=True, blank=True,
)
is_medical_worker = models.BooleanField(default=False, verbose_name="Is the Patient a Medical Worker")
blood_group = models.CharField(choices=BLOOD_GROUP_CHOICES, max_length=4, verbose_name="Blood Group of Patient",)
contact_with_confirmed_carrier = models.BooleanField(
default=False, verbose_name="Confirmed Contact with a Covid19 Carrier"
)
contact_with_suspected_carrier = models.BooleanField(
default=False, verbose_name="Suspected Contact with a Covid19 Carrier"
)
estimated_contact_date = models.DateTimeField(null=True, blank=True)
past_travel = models.BooleanField(
default=False, verbose_name="Travelled to Any Foreign Countries in the last 28 Days",
)
countries_travelled_old = models.TextField(
null=True, blank=True, verbose_name="Countries Patient has Travelled to", editable=False,
)
countries_travelled = JSONField(null=True, blank=True, verbose_name="Countries Patient has Travelled to")
date_of_return = models.DateTimeField(
blank=True, null=True, verbose_name="Return Date from the Last Country if Travelled",
)
present_health = models.TextField(default="", blank=True, verbose_name="Patient's Current Health Details")
ongoing_medication = models.TextField(default="", blank=True, verbose_name="Already pescribed medication if any")
has_SARI = models.BooleanField(default=False, verbose_name="Does the Patient Suffer from SARI")
local_body = models.ForeignKey(accounts_models.LocalBody, on_delete=models.SET_NULL, null=True, blank=True)
number_of_aged_dependents = models.IntegerField(
default=0, verbose_name="Number of people aged above 60 living with the patient", blank=True,
)
number_of_chronic_diseased_dependents = models.IntegerField(
default=0, verbose_name="Number of people who have chronic diseases living with the patient", blank=True,
)
created_by = models.ForeignKey(accounts_models.User, on_delete=models.SET_NULL, null=True)
is_active = models.BooleanField(
default=True, help_text="Not active when discharged, or removed from the watchlist",
)
date_of_receipt_of_information = models.DateTimeField(
null=True, blank=True, verbose_name="Patient's information received date"
)
cluster_group = models.ForeignKey(
PatientGroup, on_delete=models.PROTECT, related_name="patients", null=True, blank=True,
)
clinical_status_updated_at = models.DateTimeField(null=True, blank=True)
portea_called_at = models.DateTimeField(null=True, blank=True)
portea_able_to_connect = models.BooleanField(null=True, blank=True, verbose_name="Is the portea able to connect")
symptoms = models.ManyToManyField("CovidSymptom", through="PatientSymptom")
diseases = models.ManyToManyField("Disease", through="PatientDisease")
covid_status = models.ForeignKey(
"CovidStatus", null=True, blank=True, on_delete=models.CASCADE, related_name="covid_status",
)
clinical_status = models.ForeignKey(
"ClinicalStatus", null=True, blank=True, on_delete=models.CASCADE, related_name="clinical_status",
)
current_facility = models.ForeignKey(
"PatientFacility", null=True, blank=True, on_delete=models.CASCADE, related_name="current_facility",
)
patient_status = models.IntegerField(
choices=constants.PATIENT_STATUS_CHOICES, default=constants.PATIENT_STATUS.FACILITY_STATUS
)
pincode = models.CharField(max_length=6, null=True, blank=True)
native_state = models.ForeignKey(
"accounts.State", on_delete=models.PROTECT, null=True, blank=True, related_name="native_state"
)
native_country = models.CharField(max_length=56, null=True, blank=True)
history = HistoricalRecords()
objects = commons_models.ActiveObjectsManager()
class Meta:
unique_together = (
"aadhar_no",
"passport_no",
"cluster_group",
)
def __str__(self):
return "{} - {}".format(self.name, self.get_gender_display())
class PatientDisease(commons_models.SoftDeleteTimeStampedModel):
patient = models.ForeignKey("Patient", on_delete=models.CASCADE)
disease = models.ForeignKey("Disease", on_delete=models.CASCADE)
class Meta:
unique_together = ("patient", "disease")
class PatientSymptom(commons_models.SoftDeleteTimeStampedModel):
patient = models.ForeignKey("Patient", on_delete=models.CASCADE)
symptom = models.ForeignKey("CovidSymptom", on_delete=models.CASCADE)
class Meta:
unique_together = ("patient", "symptom")
class CovidSymptom(models.Model):
name = models.CharField(max_length=50, unique=True)
description = models.TextField(blank=True)
def __str__(self):
return f"{self.name}"
class Disease(models.Model):
name = models.CharField(max_length=50, unique=True)
description = models.TextField(blank=True)
def __str__(self):
return f"{self.name}"
class PatientStatus(models.Model):
name = models.CharField(max_length=50, unique=True)
description = models.TextField(blank=True)
def __str__(self):
return f"{self.name}"
class CovidStatus(models.Model):
name = models.CharField(max_length=50, unique=True)
description = models.TextField(blank=True)
def __str__(self):
return f"{self.name}"
class ClinicalStatus(models.Model):
name = models.CharField(max_length=50, unique=True)
description = models.TextField(blank=True)
def __str__(self):
return f"{self.name}"
class PatientFacility(commons_models.SoftDeleteTimeStampedModel):
"""
model to represent patient facility
"""
patient = models.ForeignKey(Patient, on_delete=models.CASCADE)
facility = models.ForeignKey(Facility, on_delete=models.CASCADE)
patient_facility_id = models.CharField(max_length=15, null=True, blank=True)
patient_status = models.ForeignKey("PatientStatus", on_delete=models.CASCADE)
discharged_at = models.DateTimeField(null=True, blank=True)
admitted_at = models.DateTimeField(null=True, blank=True)
def __str__(self):
return f"{self.facility.name}"
class PatientTimeLine(models.Model):
"""
Model to store timelines of a patient
"""
patient = models.ForeignKey(Patient, on_delete=models.CASCADE)
date = models.DateField(default=datetime.date.today)
description = models.TextField()
def __str__(self):
return f"{self.patient.name} - {self.date}"
class PatientFamily(commons_models.SoftDeleteTimeStampedModel, commons_models.AddressModel):
patient = models.ForeignKey("Patient", on_delete=models.CASCADE)
name = models.CharField(max_length=55)
relation = models.CharField(max_length=55)
age_month = models.PositiveIntegerField()
age_year = models.PositiveIntegerField()
phone_number = models.CharField(max_length=15, null=True, blank=True)
gender = models.IntegerField(choices=commons_constants.GENDER_CHOICES, blank=False)
def __str__(self):
return f"{self.patient.name} {self.relation}'s {self.name}"
class PortieCallingDetail(commons_models.SoftDeleteTimeStampedModel):
portie = models.ForeignKey(accounts_models.User, on_delete=models.CASCADE)
patient = models.ForeignKey(Patient, on_delete=models.CASCADE, null=True, blank=True)
patient_number = models.CharField(max_length=14, validators=[commons_validators.phone_number_regex])
relation = models.IntegerField(
choices=constants.PATIENT_RELATIVE_CHOICES, default=constants.PATIENT_RELATIVE_TYPE_CHOICES.SELF
)
called_at = models.DateTimeField()
able_to_connect = models.BooleanField(default=True)
comments = models.TextField(blank=True)
def __str__(self):
return f"{self.portie.name} called {self.patient.name} at {self.called_at}"
class PatientSampleTest(commons_models.SoftDeleteTimeStampedModel):
"""
model for the patient sample test
"""
SAMPLE_TEST_RESULT_CHOICES = [
(constants.SAMPLE_TEST_RESULT_MAP.SS, "Sample Sent"),
(constants.SAMPLE_TEST_RESULT_MAP.PO, "Positive"),
(constants.SAMPLE_TEST_RESULT_MAP.NG, "Negative"),
(constants.SAMPLE_TEST_RESULT_MAP.PP, "Presumptive Positive"),
(constants.SAMPLE_TEST_RESULT_MAP.TI, "Test Inconclusive"),
]
patient = models.ForeignKey(Patient, on_delete=models.PROTECT, related_name="patients")
testing_lab = models.ForeignKey(TestingLab, on_delete=models.PROTECT, related_name="labs")
doctor_name = models.CharField(max_length=255, null=True, blank=True)
result = models.IntegerField(choices=SAMPLE_TEST_RESULT_CHOICES, default=constants.SAMPLE_TEST_RESULT_MAP.SS)
date_of_sample = models.DateTimeField(verbose_name="date at which sample tested")
date_of_result = models.DateTimeField(null=True, blank=True, verbose_name="date of result of sample")
status_updated_at = models.DateTimeField(auto_now=True, verbose_name="date at which sample updated")
def __str__(self):
return f"{self.patient.name} at {self.date_of_sample}"
class PatientTransfer(commons_models.SoftDeleteTimeStampedModel):
"""
Model to store details about the transfer of patient from one facility to another
"""
from_patient_facility = models.ForeignKey(
PatientFacility, on_delete=models.CASCADE, help_text="Current patient facility of a patient",
)
to_facility = models.ForeignKey(
Facility, on_delete=models.CASCADE, help_text="New Facility in which the patient can be transferred",
)
status = models.PositiveSmallIntegerField(
choices=constants.TRANSFER_STATUS_CHOICES, default=constants.TRANSFER_STATUS.PENDING,
)
status_updated_at = models.DateTimeField(
null=True, blank=True, help_text="Date and time at wihich the status is updated"
)
comments = models.TextField(null=True, blank=True, help_text="comments related to patient transfer request")
def __str__(self):
return f"""
Patient: {self.from_patient_facility.patient.name} - From: {self.from_patient_facility.facility.name}
- To: {self.to_facility.name}
"""
| StarcoderdataPython |
112945 | from model.group import Group
from fixture.additional import MH
import os.path
import getopt
import sys
import jsonpickle
try:
opts, args = getopt.getopt(sys.argv[1:], "n:f:", ["number of groups", "file"])
except getopt.GetoptError as err:
getopt.usage()
sys.exit(2)
n = 5
f = "data/groups.json"
for o, a in opts:
if o == "-n":
n = int(a)
elif o == "-f":
f = a
testdata = [
Group(name=MH.random_string("name",10), header=MH.random_string("header",20),footer=MH.random_string("footer",20))
for i in range(n)
]
file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", f)
with open(file, "w") as out:
jsonpickle.set_encoder_options("json", indent=2)
out.write(jsonpickle.encode(testdata))
| StarcoderdataPython |
1730089 | from subprocess import getoutput
from pathlib import Path
from transonic.util import timeit
statements = {
("cmorph", "_dilate"): "_dilate(image, selem, out, shift_x, shift_y)",
(
"_greyreconstruct",
"reconstruction_loop",
): "reconstruction_loop(ranks, prev, next_, strides, current_idx, image_stride)",
}
import_from_skimage = {
(
"_greyreconstruct",
"reconstruction_loop",
): "from skimage.morphology._greyreconstruct import reconstruction_loop"
}
def bench_one(name_module="cmorph", func=None, total_duration=2):
if func is not None:
raise NotImplementedError
functions = [
(mod, func_) for (mod, func_) in statements.keys() if mod == name_module
]
if not functions:
raise ValueError(f"bad name_module: {name_module}")
name_function = functions[0][1]
print(f"module: {name_module}")
stmt = statements[(name_module, name_function)]
print(stmt)
path_setup = Path("setup_codes") / f"{name_module}_{name_function}.py"
if not path_setup.exists():
raise RuntimeError
with open(path_setup) as file:
setup = file.read()
if (name_module, name_function) in import_from_skimage:
setup_from_skimage = setup.replace(
f"from future.{name_module} import {name_function}",
import_from_skimage[(name_module, name_function)],
)
time = timeit(stmt, setup_from_skimage, total_duration=total_duration)
print(f"{'from skimage':18s} {time:.2e} s")
setup_pyx = setup.replace(
f"from future.{name_module} import", f"from pyx.{name_module} import"
)
code = f"""
from transonic.util import timeit
setup = '''{setup}'''
stmt = '''{stmt}'''
print(timeit(stmt, setup, total_duration={total_duration}))
"""
time_old = timeit(stmt, setup_pyx, total_duration=total_duration)
print(f"cython pyx skimage {time_old:.2e} s (= norm)")
with open("tmp.py", "w") as file:
file.write(code)
for backend in ("cython", "pythran", "numba"):
time = float(getoutput(f"TRANSONIC_BACKEND='{backend}' python tmp.py"))
print(f"{backend:18s} {time:.2e} s (= {time/time_old:.2f} * norm)")
# print(getoutput("TRANSONIC_NO_REPLACE=1 python tmp.py"))
if (name_module, name_function) not in import_from_skimage:
return
setup_from_skimage = setup.replace(
f"from future.{name_module} import {name_function}",
import_from_skimage[(name_module, name_function)],
)
time = timeit(stmt, setup_from_skimage, total_duration=total_duration)
print(f"{'from skimage':18s} {time:.2e} s (= {time/time_old:.2f} * norm)")
| StarcoderdataPython |
1796832 | <gh_stars>1-10
from discord.ext import commands
from typing import Any
import discord
from datetime import datetime, timedelta, timezone
class MessageCount(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def count(self, ctx, channel: discord.TextChannel=None):
channel = channel or ctx.channel
count = 0
async for _ in channel.history(limit=None):
count += 1
d_count = f"{count:,}"
embed = discord.Embed()
JST = timezone(timedelta(hours=+9), "JST")
embed.timestamp = datetime.now(JST)
embed.color = discord.Color.greyple()
embed.description = f"このチャンネルには現在 **{d_count}** 件のメッセージがあります。"
await ctx.send(embed=embed)
@commands.command()
async def countall(self, ctx, channel: discord.TextChannel=None):
inu_id = self.bot.get_channel(762575939623452682) #犬
neko_id = self.bot.get_channel(762576579507126273) #猫
kame_id = self.bot.get_channel(780611197350576200) #亀
kyoryu_id = self.bot.get_channel(812312154371784704) #恐竜
all_counter = 0
async for _ in inu_id.history(limit=None):
all_counter += 1
async for _ in neko_id.history(limit=None):
all_counter += 1
async for _ in kame_id.history(limit=None):
all_counter += 1
async for _ in kyoryu_id.history(limit=None):
all_counter += 1
ttl_count = f"{all_counter:,}"
embed = discord.Embed()
JST = timezone(timedelta(hours=+9), "JST")
embed.timestamp = datetime.now(JST)
embed.color = discord.Color.blurple()
embed.description = f"犬~恐竜_txtには現在合計 **{ttl_count}** 件のメッセージがあります。"
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(MessageCount(bot)) | StarcoderdataPython |
3267039 | import unittest
import ipdb
import networkx as nx
import numpy as np
from fmge import fmge
import random
import matplotlib.pyplot as plt
class test_FMGE(unittest.TestCase):
graphs = []
@classmethod
def setUpClass(cls):
cls.graphs = cls._make_rand_graphs(n=100)
cls.fmge = fmge()
@classmethod
def tearUpClass(cls):
pass
@classmethod
def _make_rand_graphs(cls, n=100):
"""
Generates n attributes random graphs
Returns:
list(networkx.graph)
"""
graphs = []
n_node_attributes = random.randint(1, 10)
n_edge_attributes = random.randint(1, 10)
for i in range(100):
G = nx.gnm_random_graph(random.randint(2, 30), random.randint(4, 60))
node_attr = cls._make_attributes(n_node_attributes, G.nodes())
for attr in node_attr:
nx.set_node_attributes(G, attr, node_attr[attr])
edge_attr = cls._make_attributes(n_edge_attributes, G.edges(), prefix="e_")
for attr in edge_attr:
nx.set_edge_attributes(G, attr, edge_attr[attr])
graphs.append(G)
return graphs
@classmethod
def _make_attributes(cls, n, keys, prefix=""):
"""
Make n attributes
"""
attr = {}
for i in range(n):
node_attr = {}
for key in keys:
node_attr[key] = random.gauss(i, i)
attr[prefix + str(i)] = node_attr
return attr
def test_augment_graph(self):
"""
Checks that a new ressemblance attribute has
been created for each node and edge attribute
"""
augmented_graphs = []
for graph in self.graphs:
_ = self.fmge._augment_graph(graph)
augmented_graphs.append(_)
self.assertTrue(True)
def test_extract_all_attributes(self):
attr = self.fmge._extract_all_attributes(self.graphs)
self.assertTrue(True)
def test_get_attribute_intervals(self):
_ = self.fmge._get_fuzzy_attribute_intervals(np.arange(0.1, np.pi, 0.1), 20)
def test_train(self):
_ = self.fmge.train(self.graphs, 8)
for interval in self.fmge.intervals:
print interval, self.fmge.intervals[interval]
for G in self.graphs:
print self.fmge.embed(G)
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
1666041 | <reponame>vyomshm/Auction
from django.dispatch import receiver
from django.db.models.signals import post_save, post_delete
from auction.bid.models import *
@receiver(post_save, sender=Bid)
def update_auction_1(sender, instance, created, **kwargs):
if created:
auction = instance.auction
auction.bid_count = Bid.BidManager.current(auction).count()
if(instance.bid_amount > auction.current_highest_bid):
auction.current_highest_bid = instance.bid_amount
auction.save()
@receiver(post_delete, sender=Bid)
def update_auction_2(sender, instance, **kwargs):
auction = instance.auction
bids = Bid.BidManager.current(auction)
# update current_highest_bid and bid_count for an auction
auction.bid_count = bids.count()
current_highest_bid = 0
if instance.bid_amount == auction.current_highest_bid:
for bid in bids:
if bid.bid_amount > current_highest_bid:
current_highest_bid = bid.bid_amount
auction.current_highest_bid = current_highest_bid
auction.save() | StarcoderdataPython |
3218459 | <gh_stars>0
def spam():
print(eggs) # ERROR!
eggs = 'spam local'
eggs = 'global'
spam() | StarcoderdataPython |
73363 | from collections import Counter
from pathlib import Path
from typing import List, Dict
def calculate_triangular_value(count: int) -> int:
result: int = 0
for i in range(count):
result += i
return result
class CrabCollection:
def __init__(self, initial_lineup: List[int]):
self.lineup = initial_lineup
def calculate_most_efficient_adjustment(self) -> Dict[int, List[int]]:
result: Dict[int, List[int]] = {}
for v in range(max(self.lineup)):
result[v] = [abs(v - x) + calculate_triangular_value(abs(v - x)) for x in self.lineup]
return result
def main(file_path: Path):
with open(file_path) as file:
line = file.readline()
input_data = [int(x) for x in line.split(',')]
crabs = CrabCollection(input_data)
efficient_adjustments = crabs.calculate_most_efficient_adjustment()
most_efficient = min([sum(v) for k, v in efficient_adjustments.items()])
print(f'Most Efficient Adjustment: {most_efficient}')
if __name__ == '__main__':
p = Path('../input/day7_test.txt')
main(p)
p = Path('../input/day7_1.txt')
main(p)
| StarcoderdataPython |
1670087 | <filename>tests/test_sensfsp_solver.py<gh_stars>0
import unittest
import mpi4py.MPI as mpi
import numpy as np
import pypacmensl.sensitivity.multi_sinks as sensfsp
def tcoeff(t, out):
out[0] = 1
out[1] = 1
out[2] = 1
out[3] = 1
def dtcoeff(parameter, t, out):
if parameter == 0:
out[0] = 1.0
elif parameter == 1:
out[1] = 1.0
elif parameter == 2:
out[2] = 1.0
elif parameter == 3:
out[3] = 1.0
def propensity(reaction, states, outs):
if reaction == 0:
outs[:] = np.reciprocal(1 + states[:, 1])
return
if reaction == 1:
outs[:] = states[:, 0]
return
if reaction == 2:
outs[:] = np.reciprocal(1 + states[:, 0])
return
if reaction == 3:
outs[:] = states[:, 1]
def simple_constr(X, out):
out[:, 0] = X[:, 0]
out[:, 1] = X[:, 1]
init_bounds = np.array([10, 10])
class TestFspSolver(unittest.TestCase):
def setUp(self):
self.stoich_mat = np.array([[1, 0], [-1, 0], [0, 1], [0, -1]])
def test_serial_constructor(self):
solver = sensfsp.SensFspSolverMultiSinks(mpi.COMM_SELF)
def test_set_model(self):
solver = sensfsp.SensFspSolverMultiSinks(mpi.COMM_WORLD)
solver.SetModel(num_parameters=4,
stoich_matrix=self.stoich_mat,
propensity_t=tcoeff,
propensity_x=propensity,
tv_reactions=list(range(4)),
d_propensity_t=dtcoeff,
d_propensity_t_sp=[[i] for i in range(4)],
d_propensity_x=None
)
def test_set_initial_distribution(self):
solver = sensfsp.SensFspSolverMultiSinks(mpi.COMM_WORLD)
solver.SetModel(num_parameters=4,
stoich_matrix=self.stoich_mat,
propensity_t=tcoeff,
propensity_x=propensity,
tv_reactions=list(range(4)),
d_propensity_t=dtcoeff,
d_propensity_t_sp=[[i] for i in range(4)],
d_propensity_x=None
)
X0 = np.array([[0, 0]])
p0 = np.array([1.0])
s0 = np.array([0.0])
solver.SetInitialDist(X0, p0, [s0] * 4)
def test_set_shape(self):
solver = sensfsp.SensFspSolverMultiSinks(mpi.COMM_WORLD)
solver.SetModel(num_parameters=4,
stoich_matrix=self.stoich_mat,
propensity_t=tcoeff,
propensity_x=propensity,
tv_reactions=list(range(4)),
d_propensity_t=dtcoeff,
d_propensity_t_sp=[[i] for i in range(4)],
d_propensity_x=None
)
solver.SetFspShape(simple_constr, init_bounds)
def test_solve_serial(self):
solver = sensfsp.SensFspSolverMultiSinks(mpi.COMM_SELF)
solver.SetModel(num_parameters=4,
stoich_matrix=self.stoich_mat,
propensity_t=tcoeff,
propensity_x=propensity,
tv_reactions=list(range(4)),
d_propensity_t=dtcoeff,
d_propensity_t_sp=[[i] for i in range(4)],
d_propensity_x=None
)
solver.SetFspShape(simple_constr, init_bounds)
X0 = np.array([[0,0]])
p0 = np.array([1.0])
s0 = np.array([0.0])
solver.SetInitialDist(X0, p0, [s0]*4)
solution = solver.Solve(10.0, 1.0E-4)
prob = np.asarray(solution.GetProbViewer())
self.assertAlmostEqual(prob.sum(), 1.0, 4)
for i in range(0,4):
svec = np.asarray(solution.GetSensViewer(i))
self.assertAlmostEqual(sum(svec), 0.0, 2)
solution.RestoreSensViewer(i, svec)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1747388 | """Define the litcorpt data models"""
# pylint: disable=no-name-in-module
# pylint: disable=no-self-argument
# pylint: disable=no-self-use
# pylint: disable=too-few-public-methods
from enum import Enum
from typing import List, Optional
from datetime import date
from pydantic import BaseModel, ValidationError
# class ISBN10FormatError(Exception):
# """Custom error that is raised when ISBN10 doesn't has the right format"""
#
# def __init__(self, value: str, message) -> None:
# self.value = value
# self.message = message
# super().__init__(message)
# class IdentifierMissingError(Exception):
# """Custom error that is raised when there is no identifier"""
#
# def __init__(self, title: str, message) -> None:
# self.title = title
# self.message = message
# super().__init__(message)
##############################################################
# Enumeration classes
##############################################################
class CreatorRolesEnum(str, Enum):
"""Enumeration for Creator Roles"""
AUTHOR: str = 'author'
EDITOR: str = 'editor'
TRANSLATOR: str = 'translator'
ORGANIZER: str = 'organizer'
class OrtographyEnum(str, Enum):
"""Enumeration for Ortographic Agreements
https://pt.wikipedia.org/wiki/Ortografia_da_l%C3%ADngua_portuguesa
"""
FN1200: str = '1200fonetico'
ET1500: str = '1500etimologico'
PT1885: str = '1885portugal'
BR1907: str = '1907brasil'
PT1911: str = '1911portugal'
BR1915: str = '1915brasil'
LS1931: str = '1931lusofono'
BR1943: str = '1943brasil'
LS1945: str = '1945lusofono'
BR1971: str = '1971brasil'
PT1973: str = '1973portugal'
LS1975: str = '1975lusofono'
BR1986: str = '1986brasil'
LS1990: str = '1990lusofono'
class LicenseEnum(str, Enum):
"""License Enumeration"""
PUBLIC: str = 'Public Domain'
PRIVATE: str = 'Copyrighted'
CC: str = 'Creative Commons'
ACADEMIC: str = 'Free for academic usage'
SPECIAL: str = 'Special use'
UNKNOWN: str = 'Unknown'
##############################################################
# Model classes
##############################################################
class Creator(BaseModel):
"""Define creator elements"""
role: CreatorRolesEnum
name: Optional[str]
birth: Optional[int]
death: Optional[int]
place: Optional[str]
class Identifiers(BaseModel):
"""Define creator elements"""
isbn_10: Optional[str]
isbn_13: Optional[str]
amazon_books: Optional[str]
google_books: Optional[str]
goodreads: Optional[str]
doi: Optional[str]
class Book(BaseModel):
"""Book entry model
This class define the elements of a book entry.
index: An unique string to internaly identify the entry.
title: A title associated to the entry.
subtitle: Document subtitle (if exists)
creator: A list of creators. Each creator contains:
Role: Creator relationship with the book entry
LastName: creator last name, often used in bibliography,
FirstName: creator given name,
Birth: Creator's birth year.
Death: Creator's death year.
Place: Creator's birth place.
language: A list of ISO entry with language, pt_BR or pt are the
most common here. A document can contain many languages.
Most of time just one.
published: Date of first publish. Multiple edition should use the
date of first edition. Except when large changes happened
in document, as change of translator, change of ortography.
identifier: A unique global identifier, often a ISBN13 for books.
original_language: Original language of document. Using ISO entry for language.
subject: A list entry subjects. As example: Fantasy, Science-Fiction, Kids.
Use lower caps always.
genre: A list of literary genre: Novel, Poetry, Lyrics, Theather.
Use lower caps always.
ortography: Reference to which Portuguese ortography is being used.
abstract: The book abstract/resume.
notes: Any notes worth of note.
contents: Book contents. The text itself.
Caution: Date fields must contain a datetime.date or a string in format "YYYY-MM-DD"
"""
index: str
title: str
subtitle: Optional[str]
creator: List[Creator]
language: List[str]
published: Optional[date]
identifier: Optional[List[str]]
identifiers: Optional[List[Identifiers]]
original_language: Optional[List[str]]
subject: Optional[List[str]]
genre: Optional[List[str]]
ortography: Optional[OrtographyEnum]
abstract: Optional[str]
notes: Optional[List[str]]
license: Optional[LicenseEnum] = LicenseEnum.UNKNOWN
contents: Optional[str]
# @validator('subject')
# def validate_list_lower_subject(cls, field: Optional[str]) -> None:
# """Every entry must be in lower case"""
# if field is not None:
# for entry in field:
# if entry != entry.lower():
# raise ValueError(f"""Entry '{entry}' must be lower case""")
# @validator('genre')
# def validate_list_lower_genre(cls, field: Optional[str]) -> None:
# """Every entry must be in lower case"""
# if field is not None:
# for entry in field:
# if entry != entry.lower():
# raise ValueError(f"""Entry '{entry}' must be lower case""")
# @root_validator(pre=True)
# @classmethod
# def check_isbn10_or_isbn13(cls, values):
# """Makes sure that isbn10 or isbn13 are present"""
# if "isbn_10" not in values and "isbn_13" not in values:
# raise IdentifierMissingError(title=values['title'],
# message="Document should contain a identifier")
# return values
# @validator('isbn_10')
# @classmethod
# def isbn_10_valid(cls, value):
# chars = [c for c in value if c in "0123456789Xx"]
# if len(chars != 10):
# raise ISBN10FormatError(value=value, message="ISBN10 should be 10 digits.")
# def char_to_int(char: str)-> int:
# if char in "Xx":
# return 10
# return int(char)
# weighted_sum = sum((10 -i ) * char_to_int(x) for u, x in enumerate(chars))
# if weighted_sum % 11 != 0:
# raise ISBN10FormatError(value=value, message="ISBN10 should be divisible by 11."
# return value
##############################################################
# Main test function
##############################################################
def test_book() -> Book:
"""Return a Book for testing purposes."""
assis_memorias = {
'index': 'assismemorias1880',
'title': 'Memórias Póstumas de Brás Cubas',
'subtitle': 'Epitáfio de um Pequeno Vencedor',
'creator': [
{'role': 'author',
'name': '<NAME>',
'birth': '1839',
'death': '1908',
}
],
'language': ['pt_BR'],
'published': '1881-01-01',
'original_language': ['pt_BR'],
'subject': ['realism', 'humor'],
'genre': ['novel', 'prose'],
'ortography': '1990lusofono'
}
return Book(**assis_memorias)
#%%
def test_book_contents() -> str:
"""Return a book contents for testing"""
memorias = """
AO VERME
QUE
PRIMEIRO ROEU AS FRIAS CARNES
DO MEU CADÁVER
DEDICO
COMO SAUDOSA LEMBRANÇA
ESTAS
Memórias Póstumas
Prólogo da quarta edição
A PRIMEIRA edição destas Memórias póstumas de Brás Cubas foi feita aos
pedaços na Revista Brasileira, pelos anos de 1870. Postas mais tarde em
livro, corrigi o texto em vários logares. Agora que tive de o rever para
a terceira edição, emendei ainda alguma cousa e suprimi duas ou três
dúzias de linhas. Assim composto, sai novamente à luz esta obra que
alguma benevolência parece ter encontrado no público.
Capistrano de Abreu, noticiando a publicação do livro, perguntava:
« As Memórias Póstumas de Brás Cubas são um romance? » Macedo So-
ares, em carta que me escreveu por esse tempo, recordava amigamente
as Viagens na minha terra. Ao primeiro respondia já o defuncto Brás Cu-
bas (como o leitor viu e verá no prólogo dele que vai adeante) que sim
e que não, que era romance para uns e não o era para outros. Quanto
ao segundo, assim se explicou o finado: « Trata-se de uma obra difusa,
na qual eu, Brás Cubas, se adoptei a forma livre de um Sterne ou de
um Xavier de Maistre, não sei se lhe meti algumas rabugens de pessi-
mismo. » Toda essa gente viajou: <NAME> à roda do quarto,
Garrett na terra dele, Sterne na terra dos outros. De Brás Cubas se pode
talvez dizer que viajou à roda da vida.
O que faz do meu Brás Cubas um autor particular é o que ele chama
« rabugens de pessimismo » . Há na alma deste livro, por mais risonho
que pareça, um sentimento amargo e áspero, que está longe de vir dos
seus modelos. É taça que pode ter lavores de igual escola, mas leva outro
vinho. Não digo mais para não entrar na crítica de um defunto, que se
pintou a si e a outros, conforme lhe pareceu melhor e mais certo.
MACHADO DE A SSIS
Ao leitor
QUE STENDHAL confessasse haver escripto um de seus livros para cem
leitores, cousa é que admira e consterna. O que não admira, nem pro-
vavelmente consternará é se este outro livro não tiver os cem leitores de
Stendhal, nem cincoenta, nem vinte, e quando muito, dez. Dez? Talvez
cinco. Trata-se, na verdade, de uma obra difusa, na qual eu, Brás Cu-
bas, se adoptei a forma livre de um Sterne, ou de um Xavier de Maistre,
não sei se lhe meti algumas rabugens de pessimismo. Pode ser. Obra de
finado. Escrevi-a com a pena da galhofa e a tinta da melancolia, e não
é difícil antever o que poderá sair desse conúbio. Acresce que a gente
grave achará no livro umas aparências de puro romance, ao passo que a
gente frívola não achará nele o seu romance usual; ei-lo aí fica privado
da estima dos graves e do amor dos frívolos, que são as duas colunas
máximas da opinião.
Mas eu ainda espero angariar as simpatias da opinião, e o primeiro
remédio é fugir a um prólogo explícito e longo. O melhor prólogo é o que
contém menos cousas, ou o que as diz de um jeito obscuro e truncado.
Conseguintemente, evito contar o processo extraordinário que empre-
guei na composição destas Memórias, trabalhadas cá no outro mundo.
Seria curioso, mas nimiamente extenso, e aliás desnecessário ao enten-
dimento da obra. A obra em si mesma é tudo: se te agradar, fino leitor,
pago-me da tarefa; se te não agradar, pago-te com um piparote, e adeus.
BRÁS CUBAS
CAPÍTULO PRIMEIRO
Óbito do autor
ALGUM tempo hesitei se devia abrir estas memórias pelo princípio ou
pelo fim, isto é, se poria em primeiro logar o meu nascimento ou a
minha morte. Suposto o uso vulgar seja começar pelo nascimento, duas
considerações me levaram a adoptar diferente método: a primeira é que
eu não sou propriamente um autor defunto, mas um defunto autor, para
quem a campa foi outro berço; a segunda é que o escripto ficaria assim
mais galante e mais novo. Moisés, que também contou a sua morte, não
a pôs no intróito, mas no cabo: diferença radical entre este livro e o
Pentateuco.
Continua...
"""
return memorias
def main() -> None:
"""Run basic tests for models"""
try:
assis_book = test_book()
except ValidationError as err:
print(f"Error in book: {err}")
else:
print(f"Python Object:\n {assis_book}\n")
print(f"Json Object:\n {assis_book.json()}\n")
if __name__ == "__main__":
main()
| StarcoderdataPython |
1792712 | <reponame>cschutijser/scion<filename>python/lib/packet/packet_base.py
# Copyright 2014 ETH Zurich
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod:`packet_base` --- Packet base class
========================================
"""
# Stdlib
from abc import ABCMeta, abstractmethod
# External
import capnp
# SCION
from lib.errors import SCIONParseError, SCIONTypeError
from lib.util import hex_str
class Serializable(object, metaclass=ABCMeta): # pragma: no cover
"""
Base class for all objects which serialize into raw bytes.
"""
def __init__(self, raw=None):
if raw:
self._parse(raw)
@abstractmethod
def _parse(self, raw):
raise NotImplementedError
@abstractmethod
def from_values(self, *args, **kwargs):
raise NotImplementedError
@abstractmethod
def pack(self):
raise NotImplementedError
@abstractmethod
def __len__(self):
raise NotImplementedError
@abstractmethod
def __str__(self):
raise NotImplementedError
class Cerealizable(object, metaclass=ABCMeta):
"""
Base class for all objects which serialize to Cap'n Proto.
Each subclass needs to specify a class attribute for the corresponding
proto file (P) and the proto message name (P_CLS), e.g.,
P = capnp.load("proto/foo.capnp")
P_CLS = P.Foo
"""
def __init__(self, p):
assert not isinstance(p, bytes), type(p)
self.p = p
self._packed = False
@classmethod
def from_raw(cls, raw):
assert isinstance(raw, bytes), type(raw)
try:
return cls(cls.P_CLS.from_bytes_packed(raw).as_builder())
except capnp.lib.capnp.KjException as e:
raise SCIONParseError("Unable to parse %s capnp message: %s" %
(cls, e)) from None
@classmethod
def from_raw_multiple(cls, raw):
assert isinstance(raw, bytes), type(raw)
try:
for p in cls.P_CLS.read_multiple_bytes_packed(raw):
yield cls(p.as_builder())
except capnp.lib.capnp.KjException as e:
raise SCIONParseError("Unable to parse %s capnp message: %s" %
(cls, e)) from None
@abstractmethod
def from_values(self, *args, **kwargs):
raise NotImplementedError
@classmethod
def from_proto(cls, p): # pragma: no cover
return cls(p)
def proto(self):
return self.p
@classmethod
def from_dict(cls, d):
return cls(cls.P_CLS.new_message(**d))
def to_dict(self):
return self.proto().to_dict()
def pack(self, *args, **kwargs):
assert not self._packed, "May only be packed once"
self._packed = True
return self._pack(*args, **kwargs)
def _pack(self):
return self.proto().to_bytes_packed()
def __bool__(self):
return True
def __len__(self):
return self.proto().total_size.word_count * 8
def copy(self):
return type(self)(self.proto().copy())
def __copy__(self):
return type(self)(self.proto().copy())
def __deepcopy__(self, memo):
# http://stackoverflow.com/a/15774013
inst = type(self)(self.p.copy())
memo[id(self)] = inst
return inst
def __eq__(self, other): # pragma: no cover
raise NotImplementedError
def short_desc(self):
return str(self.proto())
def __str__(self):
return "%s: %s" % (self.NAME, self.short_desc())
class L4HeaderBase(Serializable, metaclass=ABCMeta): # pragma: no cover
"""
Base class for L4 headers.
"""
TYPE = None
def pack(self, payload, checksum=None):
self.total_len = self.LEN + len(payload)
if checksum is None:
checksum = self._calc_checksum(payload)
return self._pack(checksum)
@abstractmethod
def validate(self, payload):
raise NotImplementedError
class PacketBase(Serializable): # pragma: no cover
"""
Base class for packets.
"""
def __init__(self, raw=None):
"""
Initialize an instance of the class PacketBase.
"""
self._payload = b""
super().__init__(raw)
def get_payload(self):
return self._payload
def set_payload(self, new_payload):
assert isinstance(new_payload, (Serializable, CerealBox)), type(new_payload)
self._payload = new_payload
class CerealBox(object, metaclass=ABCMeta):
"""
CerealBox represents capnp structs that have a unnamed union. In the simplest case, a CerealBox
object contains a Cerealizable object, but CerealBoxes can also be nested
(e.g. CtrlPayload(PathMgmt(RevInfo.from_values(...)))).
All child classes must define the NAME, P_CLS, and CLASS_FIELD_MAP attributes.
"""
def __init__(self, union):
self.union = union
@classmethod
def from_proto(cls, p): # pragma: no cover
"""
Internal constructor, used by sub-classes to create the corresponding python object from a
capnp object. The appropriate python class is selected by looking up the union field name in
CLASS_FIELD_MAP.
"""
type_ = p.which()
for cls_, field in cls.CLASS_FIELD_MAP.items():
if type_ == field:
return cls._from_union(p, cls_.from_proto(getattr(p, type_)))
raise SCIONParseError("Unsupported %s proto type: %s" % (cls.NAME, type_))
@classmethod
def _from_union(cls, p, union): # pragma: no cover
"""
Internal constructor, overridden by sub-classes which have more fields than just a single
unnamed union.
p is passed in to be available to subclasses which override this.
"""
return cls(union)
def proto(self):
"""
Return the corresponding capnp object.
"""
return self.P_CLS.new_message(**{self.type(): self.union.proto()})
def type(self):
"""
Return the type of the union, represented by the union field name.
"""
c = self.CLASS_FIELD_MAP.get(self.union.__class__)
if c is not None:
return c
raise SCIONTypeError("Unsupported %s proto class %s (%s)" %
(self.NAME, self.union.__class__, type(self.union)))
def inner_type(self):
"""
Return the type of the innermost Cerealizable object, represented by the union field name in
the innermost CerealBox object.
"""
if isinstance(self.union, CerealBox):
return self.union.inner_type()
return self.type()
def pack(self):
return self.proto().to_bytes_packed()
def copy(self):
return self.__class__(self.union.copy())
def __len__(self):
return self.proto().total_size.word_count * 8
def __str__(self):
return "%s(%dB): %s" % (self.NAME, len(self), self.union)
class PayloadRaw(Serializable): # pragma: no cover
NAME = "PayloadRaw"
SNIPPET_LEN = 32
def __init__(self, raw=None):
self._raw = b""
super().__init__(raw)
def _parse(self, raw):
self._raw = raw or b""
def from_values(cls, raw):
assert isinstance(raw, bytes), type(raw)
inst = cls()
inst._raw = raw
return inst
def pack(self):
return self._raw
def __eq__(self, other):
return self._raw == other._raw
def __len__(self):
return len(self._raw)
def __str__(self):
s = "%s(%dB): %s" % (
self.NAME, len(self._raw), hex_str(self._raw[:self.SNIPPET_LEN]))
if len(self._raw) > self.SNIPPET_LEN:
s += "[...]"
return s
| StarcoderdataPython |
3396763 | <filename>setup.py<gh_stars>0
import io
import os
import sys
from shutil import rmtree
from setuptools import find_packages, setup, Command
NAME = 'weakCrypt'
DESCRIPTION = 'Fundamentally insecure crypto-explorations.'
URL = 'https://www.github.com/suddensleep/weakCrypt'
EMAIL = '<EMAIL>'
AUTHOR = '<NAME>'
REQUIRES_PYTHON = '>=3.5.0'
VERSION = None
REQUIRED = [
'numpy'
]
here = os.path.abspath(os.path.dirname(__file__))
with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = '\n' + f.read()
about = {}
if not VERSION:
with open(os.path.join(here, NAME, '__version__.py')) as f:
exec(f.read(), about)
else:
about('__version__'] = VERSION
setup(
name=NAME,
version=about['__version__'],
description=DESCRIPTION,
long_description=long_description,
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
packages=find_packages(exclude=('tests',)),
install_requires=REQUIRED,
include_package_data=True,
license='MIT',
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Topic :: Security',
'Topic :: Security :: Cryptography',
'Topic :: Scientific/Engineering :: Mathematics'
]
)
| StarcoderdataPython |
4811870 | <filename>code/tests/unit/conftest.py
from http import HTTPStatus
from unittest.mock import MagicMock
import jwt
from pytest import fixture
from app import app
from api.errors import INVALID_ARGUMENT
from tests.unit.payloads_for_tests import PRIVATE_KEY
@fixture(scope='session')
def client():
app.rsa_private_key = PRIVATE_KEY
app.testing = True
with app.test_client() as client:
yield client
@fixture(scope='session')
def valid_jwt(client):
def _make_jwt(
jwks_host='visibility.amp.cisco.com',
aud='http://localhost',
kid='02B1174234C29F8EFB69911438F597FF3FFEE6B7',
access_key='access_key',
secret_access_key='secret_access_key',
detector='detector',
region='region',
wrong_structure=False
):
payload = {
'jwks_host': jwks_host,
'aud': aud,
'AWS_REGION': region,
'AWS_ACCESS_KEY_ID': access_key,
'AWS_SECRET_ACCESS_KEY': secret_access_key,
'AWS_GUARD_DUTY_DETECTOR_ID': detector
}
if wrong_structure:
payload.pop('AWS_ACCESS_KEY_ID')
return jwt.encode(
payload, client.application.rsa_private_key, algorithm='RS256',
headers={
'kid': kid
}
)
return _make_jwt
@fixture(scope='module')
def invalid_json_expected_payload():
def _make_message(message):
return {
'errors': [{
'code': INVALID_ARGUMENT,
'message': message,
'type': 'fatal'
}]
}
return _make_message
def mock_api_response(status_code=HTTPStatus.OK, payload=None):
mock_response = MagicMock()
mock_response.status_code = status_code
mock_response.ok = status_code == HTTPStatus.OK
mock_response.json = lambda: payload
return mock_response
| StarcoderdataPython |
1687169 | #Crie um Script Python que leia o nome de uma pessoa e mostre uma mensagem
#de boas vindas de acordo com o valor digitado
nome = input('Olá, digite seu nome: ')
print('Olá, ' +nome+ ' seja bem vindo!')
| StarcoderdataPython |
3205177 | # -*- coding: utf-8
""" Test the file recognizer capabilities.
"""
from __future__ import print_function, unicode_literals
import contextlib
import gzip
import os
import shutil
import socket
import sys
from io import open
from functools import partial
import nose
from grin import FileRecognizer, GZIP_MAGIC
printerr = partial(print, file=sys.stderr)
ALL_BYTES = bytes(bytearray(range(256)))
def empty_file(filename, open=open):
open(filename, "a").close()
def binary_file(filename, open=open):
with open(filename, "wb") as f:
f.write(ALL_BYTES)
def text_file(filename, open=open):
lines = [b"foo\n", b"bar\n"] * 100
lines.append(b"baz\n")
lines.extend([b"foo\n", b"bar\n"] * 100)
with open(filename, "wb") as f:
f.writelines(lines)
def fake_gzip_file(filename, open=open):
""" Write out a binary file that has the gzip magic header bytes, but is not
a gzip file.
"""
with open(filename, "wb") as f:
f.write(GZIP_MAGIC)
f.write(ALL_BYTES)
def binary_middle(filename, open=open):
""" Write out a file that is text for the first 100 bytes, then 100 binary
bytes, then 100 text bytes to test that the recognizer only reads some of
the file.
"""
text = b"a" * 100 + b"\0" * 100 + b"b" * 100
f = open(filename, "wb")
f.write(text)
f.close()
def socket_file(filename):
s = socket.socket(socket.AF_UNIX)
s.bind(filename)
def unreadable_file(filename):
""" Write a file that does not have read permissions.
"""
text_file(filename)
os.chmod(filename, 0o200)
try:
with open(filename) as f:
pass
except IOError as e:
if "Permission denied" not in str(e):
raise
else:
raise RuntimeError(
"grin tests cannot run on a filesystem that doesn't support chmod(). "
"You will encounter false negative"
)
def unreadable_dir(filename):
""" Make a directory that does not have read permissions.
"""
os.mkdir(filename)
os.chmod(filename, 0o300)
def unexecutable_dir(filename):
""" Make a directory that does not have execute permissions.
"""
os.mkdir(filename)
os.chmod(filename, 0o600)
def totally_unusable_dir(filename):
""" Make a directory that has neither read nor execute permissions.
"""
os.mkdir(filename)
os.chmod(filename, 0o100)
def setup():
# Make sure we don't have files remaining from previous tests
teardown()
# Make files to test individual recognizers.
empty_file(b"empty")
binary_file(b"binary")
binary_middle(b"binary_middle")
text_file(b"text")
text_file(b"text~")
text_file(b"text#")
text_file(b"foo.bar.baz")
os.mkdir(b"dir")
binary_file(b".binary")
text_file(b".text")
empty_file(b"empty.gz", open=gzip.open)
binary_file(b"binary.gz", open=gzip.open)
text_file(b"text.gz", open=gzip.open)
binary_file(b".binary.gz", open=gzip.open)
text_file(b".text.gz", open=gzip.open)
fake_gzip_file("fake.gz")
os.mkdir(b".dir")
os.symlink(b"binary", b"binary_link")
os.symlink(b"text", b"text_link")
os.symlink(b"dir", b"dir_link")
os.symlink(b".binary", b".binary_link")
os.symlink(b".text", b".text_link")
os.symlink(b".dir", b".dir_link")
unreadable_file(b"unreadable_file")
unreadable_dir(b"unreadable_dir")
unexecutable_dir(b"unexecutable_dir")
totally_unusable_dir(b"totally_unusable_dir")
os.symlink(b"unreadable_file", b"unreadable_file_link")
os.symlink(b"unreadable_dir", b"unreadable_dir_link")
os.symlink(b"unexecutable_dir", b"unexecutable_dir_link")
os.symlink(b"totally_unusable_dir", b"totally_unusable_dir_link")
text_file(b"text.skip_ext")
os.mkdir(b"dir.skip_ext")
text_file(b"text.dont_skip_ext")
os.mkdir(b"skip_dir")
text_file(b"fake_skip_dir")
socket_file("socket_test")
# Make a directory tree to test tree-walking.
os.mkdir(b"tree")
os.mkdir(b"tree/.hidden_dir")
os.mkdir(b"tree/dir")
os.mkdir(b"tree/dir/subdir")
text_file(b"tree/dir/text")
text_file(b"tree/dir/subdir/text")
text_file(b"tree/text")
text_file(b"tree/text.skip_ext")
os.mkdir(b"tree/dir.skip_ext")
text_file(b"tree/dir.skip_ext/text")
text_file(b"tree/text.dont_skip_ext")
binary_file(b"tree/binary")
os.mkdir(b"tree/skip_dir")
text_file(b"tree/skip_dir/text")
os.mkdir(b"tree/.skip_hidden_dir")
text_file(b"tree/.skip_hidden_file")
os.mkdir(b"tree/unreadable_dir")
text_file(b"tree/unreadable_dir/text")
os.chmod("tree/unreadable_dir", 0o300)
os.mkdir(b"tree/unexecutable_dir")
text_file(b"tree/unexecutable_dir/text")
os.chmod(b"tree/unexecutable_dir", 0o600)
os.mkdir(b"tree/totally_unusable_dir")
text_file(b"tree/totally_unusable_dir/text")
os.chmod(b"tree/totally_unusable_dir", 0o100)
@contextlib.contextmanager
def catch_and_log_env_error(message=None, ignore="No such file or directory", args=()):
""" Catch IOError, print a message, optionnaly reraise. Ignore some types """
try:
yield
except EnvironmentError as e:
if ignore not in str(e):
if message is None:
raise e
printerr(message % (tuple(args) + (e,)))
def teardown():
files_to_delete = [
b"empty",
b"binary",
b"binary_middle",
b"text",
b"text~",
b"empty.gz",
b"binary.gz",
b"text.gz",
b"dir",
b"binary_link",
b"text_link",
b"dir_link",
b".binary",
b".text",
b".binary.gz",
b".text.gz",
b"fake.gz",
b".dir",
b".binary_link",
b".text_link",
b".dir_link",
b"unreadable_file",
b"unreadable_dir",
b"unexecutable_dir",
b"totally_unusable_dir",
b"unreadable_file_link",
b"unreadable_dir_link",
b"unexecutable_dir_link",
b"totally_unusable_dir_link",
b"text.skip_ext",
b"text.dont_skip_ext",
b"dir.skip_ext",
b"skip_dir",
b"fake_skip_dir",
b"text#",
b"foo.bar.baz",
b"tree",
b"socket_test"
]
for filename in files_to_delete:
with catch_and_log_env_error():
os.chmod(filename, 0o777)
if os.path.isdir(filename):
if not filename.startswith(b'/'):
# Make sure we have permission to delete everything
for dirname, dirs, files in os.walk(filename, followlinks=True):
paths = [os.path.join(dirname, p) for p in (dirs + files)]
os.chmod(dirname, 0o777)
for path in paths:
os.chmod(path, 0o777)
with catch_and_log_env_error("Could not delete %r: %r", args=(filename,)):
shutil.rmtree(filename)
else:
with catch_and_log_env_error("Could not delete %r: %r", args=(filename,)):
os.unlink(filename)
def test_binary():
fr = FileRecognizer()
assert fr.is_binary(b"binary")
assert fr.recognize_file(b"binary") == "binary"
assert fr.recognize(b"binary") == "binary"
def test_text():
fr = FileRecognizer()
assert not fr.is_binary(b"text")
assert fr.recognize_file(b"text") == "text"
assert fr.recognize(b"text") == "text"
def test_gzipped():
fr = FileRecognizer()
assert fr.is_binary(b"text.gz")
assert fr.recognize_file(b"text.gz") == "gzip"
assert fr.recognize(b"text.gz") == "gzip"
assert fr.is_binary(b"binary.gz")
assert fr.recognize_file(b"binary.gz") == "binary"
assert fr.recognize(b"binary.gz") == "binary"
assert fr.is_binary(b"fake.gz")
assert fr.recognize_file(b"fake.gz") == "binary"
assert fr.recognize(b"fake.gz") == "binary"
def test_binary_middle():
fr = FileRecognizer(binary_bytes=100)
assert not fr.is_binary(b"binary_middle")
assert fr.recognize_file(b"binary_middle") == "text"
assert fr.recognize(b"binary_middle") == "text"
fr = FileRecognizer(binary_bytes=101)
assert fr.is_binary(b"binary_middle")
assert fr.recognize_file(b"binary_middle") == "binary"
assert fr.recognize(b"binary_middle") == "binary"
def test_socket():
fr = FileRecognizer()
assert fr.recognize(b"socket_test") == "skip"
def test_dir():
fr = FileRecognizer()
assert fr.recognize_directory(b"dir") == "directory"
assert fr.recognize(b"dir") == "directory"
def test_skip_symlinks():
fr = FileRecognizer(skip_symlink_files=True, skip_symlink_dirs=True)
assert fr.recognize(b"binary_link") == "link"
assert fr.recognize_file(b"binary_link") == "link"
assert fr.recognize(b"text_link") == "link"
assert fr.recognize_file(b"text_link") == "link"
assert fr.recognize(b"dir_link") == "link"
assert fr.recognize_directory(b"dir_link") == "link"
def test_do_not_skip_symlinks():
fr = FileRecognizer(skip_symlink_files=False, skip_symlink_dirs=False)
assert fr.recognize(b"binary_link") == "binary"
assert fr.recognize_file(b"binary_link") == "binary"
assert fr.recognize(b"text_link") == "text"
assert fr.recognize_file(b"text_link") == "text"
assert fr.recognize(b"dir_link") == "directory"
assert fr.recognize_directory(b"dir_link") == "directory"
def test_skip_hidden():
fr = FileRecognizer(skip_hidden_files=True, skip_hidden_dirs=True)
assert fr.recognize(b".binary") == "skip"
assert fr.recognize_file(b".binary") == "skip"
assert fr.recognize(b".text") == "skip"
assert fr.recognize_file(b".text") == "skip"
assert fr.recognize(b".dir") == "skip"
assert fr.recognize_directory(b".dir") == "skip"
assert fr.recognize(b".binary_link") == "skip"
assert fr.recognize_file(b".binary_link") == "skip"
assert fr.recognize(b".text_link") == "skip"
assert fr.recognize_file(b".text_link") == "skip"
assert fr.recognize(b".dir_link") == "skip"
assert fr.recognize_directory(b".dir_link") == "skip"
assert fr.recognize(b".text.gz") == "skip"
assert fr.recognize_file(b".text.gz") == "skip"
assert fr.recognize(b".binary.gz") == "skip"
assert fr.recognize_file(b".binary.gz") == "skip"
def test_skip_backup():
fr = FileRecognizer(skip_backup_files=True)
assert fr.recognize_file(b"text~") == "skip"
def test_do_not_skip_backup():
fr = FileRecognizer(skip_backup_files=False)
assert fr.recognize_file(b"text~") == "text"
def test_skip_weird_exts():
fr = FileRecognizer(skip_exts=set())
assert fr.recognize_file(b"text#") == "text"
assert fr.recognize_file(b"foo.bar.baz") == "text"
fr = FileRecognizer(skip_exts=set([b"#", b".bar.baz"]))
assert fr.recognize_file(b"text#") == "skip"
assert fr.recognize_file(b"foo.bar.baz") == "skip"
def test_do_not_skip_hidden_or_symlinks():
fr = FileRecognizer(
skip_hidden_files=False,
skip_hidden_dirs=False,
skip_symlink_dirs=False,
skip_symlink_files=False,
)
assert fr.recognize(b".binary") == "binary"
assert fr.recognize_file(b".binary") == "binary"
assert fr.recognize(b".text") == "text"
assert fr.recognize_file(b".text") == "text"
assert fr.recognize(b".dir") == "directory"
assert fr.recognize_directory(b".dir") == "directory"
assert fr.recognize(b".binary_link") == "binary"
assert fr.recognize_file(b".binary_link") == "binary"
assert fr.recognize(b".text_link") == "text"
assert fr.recognize_file(b".text_link") == "text"
assert fr.recognize(b".dir_link") == "directory"
assert fr.recognize_directory(b".dir_link") == "directory"
assert fr.recognize(b".text.gz") == "gzip"
assert fr.recognize_file(b".text.gz") == "gzip"
assert fr.recognize(b".binary.gz") == "binary"
assert fr.recognize_file(b".binary.gz") == "binary"
def test_do_not_skip_hidden_but_skip_symlinks():
fr = FileRecognizer(
skip_hidden_files=False,
skip_hidden_dirs=False,
skip_symlink_dirs=True,
skip_symlink_files=True,
)
assert fr.recognize(b".binary") == "binary"
assert fr.recognize_file(b".binary") == "binary"
assert fr.recognize(b".text") == "text"
assert fr.recognize_file(b".text") == "text"
assert fr.recognize(b".dir") == "directory"
assert fr.recognize_directory(b".dir") == "directory"
assert fr.recognize(b".binary_link") == "link"
assert fr.recognize_file(b".binary_link") == "link"
assert fr.recognize(b".text_link") == "link"
assert fr.recognize_file(b".text_link") == "link"
assert fr.recognize(b".dir_link") == "link"
assert fr.recognize_directory(b".dir_link") == "link"
assert fr.recognize(b".text.gz") == "gzip"
assert fr.recognize_file(b".text.gz") == "gzip"
assert fr.recognize(b".binary.gz") == "binary"
assert fr.recognize_file(b".binary.gz") == "binary"
def test_lack_of_permissions():
fr = FileRecognizer()
assert fr.recognize(b"unreadable_file") == "unreadable"
assert fr.recognize_file(b"unreadable_file") == "unreadable"
assert fr.recognize(b"unreadable_dir") == "directory"
assert fr.recognize_directory(b"unreadable_dir") == "directory"
assert fr.recognize(b"unexecutable_dir") == "directory"
assert fr.recognize_directory(b"unexecutable_dir") == "directory"
assert fr.recognize(b"totally_unusable_dir") == "directory"
assert fr.recognize_directory(b"totally_unusable_dir") == "directory"
def test_symlink_src_unreadable():
fr = FileRecognizer(skip_symlink_files=False, skip_symlink_dirs=False)
assert fr.recognize(b"unreadable_file_link") == "unreadable"
assert fr.recognize_file(b"unreadable_file_link") == "unreadable"
assert fr.recognize(b"unreadable_dir_link") == "directory"
assert fr.recognize_directory(b"unreadable_dir_link") == "directory"
assert fr.recognize(b"unexecutable_dir_link") == "directory"
assert fr.recognize_directory(b"unexecutable_dir_link") == "directory"
assert fr.recognize(b"totally_unusable_dir_link") == "directory"
assert fr.recognize_directory(b"totally_unusable_dir_link") == "directory"
def test_skip_ext():
fr = FileRecognizer(skip_exts=set([b".skip_ext"]))
assert fr.recognize(b"text.skip_ext") == "skip"
assert fr.recognize_file(b"text.skip_ext") == "skip"
assert fr.recognize(b"text") == "text"
assert fr.recognize_file(b"text") == "text"
assert fr.recognize(b"text.dont_skip_ext") == "text"
assert fr.recognize_file(b"text.dont_skip_ext") == "text"
assert fr.recognize(b"dir.skip_ext") == "directory"
assert fr.recognize_directory(b"dir.skip_ext") == "directory"
def test_skip_dir():
fr = FileRecognizer(skip_dirs=set([b"skip_dir", b"fake_skip_dir"]))
assert fr.recognize(b"skip_dir") == "skip"
assert fr.recognize_directory(b"skip_dir") == "skip"
assert fr.recognize(b"fake_skip_dir") == "text"
assert fr.recognize_file(b"fake_skip_dir") == "text"
def test_walking():
fr = FileRecognizer(
skip_hidden_files=True,
skip_hidden_dirs=True,
skip_exts=set([b".skip_ext"]),
skip_dirs=set([b"skip_dir"]),
)
truth = [
(b"tree/binary", "binary"),
(b"tree/dir.skip_ext/text", "text"),
(b"tree/dir/subdir/text", "text"),
(b"tree/dir/text", "text"),
(b"tree/text", "text"),
(b"tree/text.dont_skip_ext", "text"),
]
result = sorted(fr.walk(b"tree"))
assert result == truth
def predot():
os.chdir(b"tree")
def postdot():
os.chdir(b"..")
@nose.with_setup(predot, postdot)
def test_dot():
fr = FileRecognizer(
skip_hidden_files=True,
skip_hidden_dirs=True,
skip_exts=set([b".skip_ext"]),
skip_dirs=set([b"skip_dir"]),
)
truth = [
(b"./binary", "binary"),
(b"./dir.skip_ext/text", "text"),
(b"./dir/subdir/text", "text"),
(b"./dir/text", "text"),
(b"./text", "text"),
(b"./text.dont_skip_ext", "text"),
]
result = sorted(fr.walk(b"."))
assert result == truth
def predotdot():
os.chdir(b"tree")
os.chdir(b"dir")
def postdotdot():
os.chdir(b"..")
os.chdir(b"..")
@nose.with_setup(predotdot, postdotdot)
def test_dot_dot():
fr = FileRecognizer(
skip_hidden_files=True,
skip_hidden_dirs=True,
skip_exts=set([b".skip_ext"]),
skip_dirs=set([b"skip_dir"]),
)
truth = [
(b"../binary", "binary"),
(b"../dir.skip_ext/text", "text"),
(b"../dir/subdir/text", "text"),
(b"../dir/text", "text"),
(b"../text", "text"),
(b"../text.dont_skip_ext", "text"),
]
result = sorted(fr.walk(b".."))
assert result == truth
| StarcoderdataPython |
3245246 | i = open('sample.txt','r')
o = open('sample_output.txt','w')
a = i.read()
o.write(a)
| StarcoderdataPython |
37796 | <reponame>scottwedge/OpenStack-Stein<filename>murano-7.0.0/murano/policy/modify/actions/action_manager.py<gh_stars>10-100
# Copyright (c) 2015 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from oslo_utils import importutils
from stevedore import extension
import yaml
LOG = logging.getLogger(__name__)
class ModifyActionManager(object):
"""Manages modify actions
The manager encapsulates extensible plugin mechanism for
modify actions loading. Provides ability to apply action on
given object model based on action specification retrieved
from congress
"""
def __init__(self):
self._cache = {}
def load_action(self, name):
"""Loads action by its name
Loaded actions are cached. Plugin mechanism is based on
distutils entry points. Entry point namespace is
'murano_policy_modify_actions'
:param name: action name
:return:
"""
if name in self._cache:
return self._cache[name]
action = self._load_action(name)
self._cache[name] = action
return action
@staticmethod
def _load_action(name):
mgr = extension.ExtensionManager(
namespace='murano_policy_modify_actions',
invoke_on_load=False
)
for ext in mgr.extensions:
if name == ext.name:
target = ext.entry_point_target.replace(':', '.')
return importutils.import_class(target)
raise ValueError('No such action definition: {action_name}'
.format(action_name=name))
def apply_action(self, obj, action_spec):
"""Apply action on given model
Parse action and its parameters from action specification
retrieved from congress. Action specification is YAML format.
E.g. remove-object: {object_id: abc123}")
Action names are keys in top-level dictionary. Values are
dictionaries containing key/value parameters of the action
:param obj: subject of modification
:param action_spec: YAML action spec
:raise ValueError: in case of malformed action spec
"""
actions = yaml.safe_load(action_spec)
if not isinstance(actions, dict):
raise ValueError('Expected action spec format is '
'"action-name: {{p1: v1, ...}}" '
'but got "{action_spec}"'
.format(action_spec=action_spec))
for name, kwargs in actions.items():
LOG.debug('Executing action {name}, params {params}'
.format(name=name, params=kwargs))
# loads action class
action_class = self.load_action(name)
# creates action instance
action_instance = action_class(**kwargs)
# apply action on object model
action_instance.modify(obj)
| StarcoderdataPython |
170370 | import requests
import kivy
kivy.require('1.9.2')
from kivy.app import App
from kivy.properties import ObjectProperty, StringProperty
from kivy.uix.gridlayout import GridLayout
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.button import Label
api_key = 'get from wunderground.com'
class WU_GridLayout(BoxLayout):
location = StringProperty("")
icon_url = StringProperty("")
condition = StringProperty("")
temp_string = StringProperty("")
wind_string = StringProperty("")
pressure = StringProperty("Pressure:\n 0")
visibility = StringProperty("Visibility:\n 0")
clouds = StringProperty("Clouds:\n 0")
dew_point = StringProperty("Dew Point:\n 0")
humidity = StringProperty("Humidity:\n 0")
rainfall = StringProperty("Rainfall:\n 0")
wu_icon = StringProperty("")
ob_time = StringProperty("")
station_id = StringProperty("")
def get_wu_data(self):
r = requests.get('http://api.wunderground.com/api/' + api_key + '/conditions/q/AK/Eagle_River.json')
data = r.json()
return data.get("current_observation")
def update(self):
json_data = self.get_wu_data()
self.location = json_data.get("display_location").get("city") + ",\n" + json_data.get("display_location").get("state_name")
self.icon_url = json_data.get("icon_url")
self.temp_string = json_data.get("temperature_string")
self.condition = json_data.get("weather")
self.wind_string = json_data.get("wind_string")
self.pressure = "Pressure:\n " + json_data.get("pressure_in")
self.visibility = "Visibility:\n " + json_data.get("visibility_mi")
self.clouds = "Clouds:\n " + json_data.get("weather")
self.dew_point = "Dew Point:\n " + json_data.get("dewpoint_string")
self.humidity = "Humidity:\n " + json_data.get("relative_humidity")
self.rainfall = "Rainfall:\n " + json_data.get("precip_today_string")
self.ob_time = json_data.get("observation_time")
self.station_id = json_data.get("station_id")
self.wu_icon = json_data.get("image").get("url")
class WU_BoxLayout(BoxLayout):
pass
class WU_WidgetApp(App):
def build(self):
return WU_GridLayout()
wuApp = WU_WidgetApp()
if __name__== "__main__":
wuApp.run() | StarcoderdataPython |
3327687 | <reponame>UnopposedQuill/muedatos2<gh_stars>0
from django.apps import AppConfig
class EvaluacionConfig(AppConfig):
name = 'Evaluacion'
| StarcoderdataPython |
129768 | <reponame>retresco/Spyder<filename>src/spyder/processor/stripsessions.py<gh_stars>10-100
#
# Copyright (c) 2011 <NAME> <EMAIL>
#
# stripsessions.py 14-Apr-2011
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
"""
Processor to strip all session ids from the extracted URLs. It should be placed
at the very end of the scoper chain in order to process only those URLs that
are relevant for the crawl.
It basically searches for
sid=
jsessionid=
phpsessionid=
aspsessionid=
"""
from spyder.core.constants import CURI_EXTRACTED_URLS
class StripSessionIds(object):
"""
The processor for removing session information from the query string.
"""
def __init__(self, settings):
"""
Initialize me.
"""
self._session_params = ['jsessionid=', 'phpsessid=',
'aspsessionid=', 'sid=']
def __call__(self, curi):
"""
Main method stripping the session stuff from the query string.
"""
if CURI_EXTRACTED_URLS not in curi.optional_vars:
return curi
urls = []
for raw_url in curi.optional_vars[CURI_EXTRACTED_URLS].split('\n'):
urls.append(self._remove_session_ids(raw_url))
curi.optional_vars[CURI_EXTRACTED_URLS] = "\n".join(urls)
return curi
def _remove_session_ids(self, raw_url):
"""
Remove the session information.
"""
for session in self._session_params:
url = raw_url.lower()
begin = url.find(session)
while begin > -1:
end = url.find('&', begin)
if end == -1:
raw_url = raw_url[:begin]
else:
raw_url = "%s%s" % (raw_url[:begin], raw_url[end:])
url = raw_url.lower()
begin = url.find(session)
return raw_url
| StarcoderdataPython |
46469 | from .breadth_first_traversal import breadth_first_traversal
from .bst import BST
def test_breadth_first_traversal():
"""Test breadth-first traversal."""
tree = BST([2, 1, 3])
assert breadth_first_traversal(tree) == [2, 1, 3]
def test_breadth_first_traversal_balanced(balanced_bst):
"""Test breadth-first traversal in balanced tree."""
assert breadth_first_traversal(balanced_bst) == [10, 7, 16, 3, 8, 12, 20]
def test_breadth_first_traversal_right(right_heavy):
"""Test breadth-first traversal in right heavy tree."""
assert breadth_first_traversal(right_heavy) == [1, 3, 5, 7, 9]
| StarcoderdataPython |
3340703 | #!/usr/bin/python3
## author: <EMAIL>
## brief: find -kth node in uni-directional linked link
def inversely_kth(ll, k):
pass
import unittest
class SolutionTest(unittest.TestCase):
def test_basics(self):
self.assertInverselyKth([], 0, None)
self.assertInverselyKth([], 3, None)
self.assertInverselyKth([1], 0, 1)
self.assertInverselyKth([1], 1, None)
self.assertInverselyKth([1,2], 0, 2)
self.assertInverselyKth([1,2], 1, 1)
self.assertInverselyKth([1,2], 2, None)
self.assertInverselyKth([1,2,3,4,5], 0, 5)
self.assertInverselyKth([1,2,3,4,5], 1, 4)
self.assertInverselyKth([1,2,3,4,5], 2, 3)
self.assertInverselyKth([1,2,3,4,5], 3, 2)
self.assertInverselyKth([1,2,3,4,5], 4, 1)
self.assertInverselyKth([1,2,3,4,5], 5, None)
def assertInverselyKth(self, ll, k, expected_val):
pass
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
1746553 | from django.template import Library
register = Library()
@register.inclusion_tag("tag_app/tag_list.html")
def show_tags_for(obj):
return {"obj": obj}
@register.inclusion_tag("tag_app/tag_count_list.html")
def show_tag_counts(tag_counts):
return {"tag_counts": tag_counts} | StarcoderdataPython |
3286720 | """
Contains logic of building GUI for the application.
"""
import os
from tkinter import Tk, ttk, messagebox
from tkinter.filedialog import askdirectory
from icon_apply import seticon
from icon_online_search import find_and_convert
class CreateGUI():
"""
Creates the GUI using tkinter module.
"""
dir_name = None
def __init__(self, root_obj):
"""
Initializes the Tk() root object for the class and calls the 'construct_dialog' method.
"""
self.root_obj = root_obj
self.construct_dialog()
def bulk_icon_apply(self, directory):
"""
Walks recursively through the directory and its subdirectories and sets icons
to each respective folder fetched by the 'get_ico_file' method.
"""
cnt = 0
root, name = os.path.split(directory)
main_directory_icon = self.get_ico_file(root, name)
if main_directory_icon:
seticon(directory, main_directory_icon, 0)
cnt += 1
for root, dirs, _ in os.walk(directory):
for name in dirs:
subdir_icon = self.get_ico_file(root, name)
if subdir_icon:
seticon(os.path.join(root, name), subdir_icon, 0)
cnt += 1
messagebox.showinfo("Success", "Icons applied successfully to {} folders.".format(cnt))
def get_directory(self):
"""
Validates the entered directory name and calls the 'bulk_icon_apply' method.
"""
self.dir_name = askdirectory(title="Choose directory")
if self.dir_name:
if os.path.isdir(self.dir_name):
try:
self.bulk_icon_apply(self.dir_name)
except IOError as err:
messagebox.showerror("Error", err)
else:
messagebox.showerror("Error", "Invalid directory.")
@staticmethod
def get_ico_file(root, name):
"""
Fetches the icon to be applied to the folder.
"""
ico_file = find_and_convert(root, name)
return ico_file if os.path.isfile(ico_file) else None
def construct_dialog(self):
"""
Constructs the GUI dialog form.
"""
self.root_obj.title("Icon apply - advanced")
self.root_obj.geometry('{}x{}'.format(250, 60))
label = ttk.Label(self.root_obj, text="Search and apply icons", font=("Arial", 10))
label.pack()
button = ttk.Button(self.root_obj, text='Select directory', command=self.get_directory)
button.pack()
def main():
"""
Entry-point of the function.
"""
root = Tk()
CreateGUI(root)
root.mainloop()
if __name__ == "__main__":
main()
| StarcoderdataPython |
30042 | from __future__ import print_function
import struct
import copy
#this class handles different protocol versions
class RobotStateRT(object):
@staticmethod
def unpack(buf):
rs = RobotStateRT()
(plen, ptype) = struct.unpack_from("!IB", buf)
if plen == 756:
return RobotStateRT_V15.unpack(buf)
elif plen == 812:
return RobotStateRT_V18.unpack(buf)
elif plen == 1044:
return RobotStateRT_V30.unpack(buf)
else:
print("RobotStateRT has wrong length: " + str(plen))
return rs
#this parses RobotStateRT for versions = v1.5
#http://wiki03.lynero.net/Technical/RealTimeClientInterface?foswiki_redirect_cache=9b4574b30760f720c6f79c5f1f2203dd
class RobotStateRT_V15(object):
__slots__ = ['time',
'q_target', 'qd_target', 'qdd_target', 'i_target', 'm_target',
'q_actual', 'qd_actual', 'i_actual', 'tool_acc_values',
'unused',
'tcp_force', 'tool_vector', 'tcp_speed',
'digital_input_bits', 'motor_temperatures', 'controller_timer',
'test_value']
@staticmethod
def unpack(buf):
offset = 0
message_size = struct.unpack_from("!i", buf, offset)[0]
offset+=4
if message_size != len(buf):
print(("MessageSize: ", message_size, "; BufferSize: ", len(buf)))
raise Exception("Could not unpack RobotStateRT packet: length field is incorrect")
rs = RobotStateRT_V15()
#time: 1x double (1x 8byte)
rs.time = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#q_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.q_target = copy.deepcopy(all_values)
#qd_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.qd_target = copy.deepcopy(all_values)
#qdd_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.qdd_target = copy.deepcopy(all_values)
#i_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.i_target = copy.deepcopy(all_values)
#m_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.m_target = copy.deepcopy(all_values)
#q_actual: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.q_actual = copy.deepcopy(all_values)
#qd_actual: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.qd_actual = copy.deepcopy(all_values)
#i_actual: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.i_actual = copy.deepcopy(all_values)
###
#tool_acc_values: 3x double (3x 8byte)
all_values = list(struct.unpack_from("!ddd",buf, offset))
offset+=3*8
rs.tool_acc_values = copy.deepcopy(all_values)
#unused: 15x double (15x 8byte)
offset+=120
rs.unused = []
#tcp_force: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.tcp_force = copy.deepcopy(all_values)
#tool_vector: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.tool_vector = copy.deepcopy(all_values)
#tcp_speed: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.tcp_speed = copy.deepcopy(all_values)
#digital_input_bits: 1x double (1x 8byte) ?
rs.digital_input_bits = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#motor_temperatures: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.motor_temperatures = copy.deepcopy(all_values)
#controller_timer: 1x double (1x 8byte)
rs.controller_timer = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#test_value: 1x double (1x 8byte)
rs.test_value = struct.unpack_from("!d",buf, offset)[0]
offset+=8
return rs
#this parses RobotStateRT for versions <= v1.8 (i.e. 1.6, 1.7, 1.8)
class RobotStateRT_V18(object):
__slots__ = ['time',
'q_target', 'qd_target', 'qdd_target', 'i_target', 'm_target',
'q_actual', 'qd_actual', 'i_actual', 'tool_acc_values',
'unused',
'tcp_force', 'tool_vector', 'tcp_speed',
'digital_input_bits', 'motor_temperatures', 'controller_timer',
'test_value',
'robot_mode', 'joint_modes']
@staticmethod
def unpack(buf):
offset = 0
message_size = struct.unpack_from("!i", buf, offset)[0]
offset+=4
if message_size != len(buf):
print(("MessageSize: ", message_size, "; BufferSize: ", len(buf)))
raise Exception("Could not unpack RobotStateRT packet: length field is incorrect")
rs = RobotStateRT_V18()
#time: 1x double (1x 8byte)
rs.time = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#q_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.q_target = copy.deepcopy(all_values)
#qd_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.qd_target = copy.deepcopy(all_values)
#qdd_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.qdd_target = copy.deepcopy(all_values)
#i_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.i_target = copy.deepcopy(all_values)
#m_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.m_target = copy.deepcopy(all_values)
#q_actual: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.q_actual = copy.deepcopy(all_values)
#qd_actual: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.qd_actual = copy.deepcopy(all_values)
#i_actual: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.i_actual = copy.deepcopy(all_values)
#tool_acc_values: 3x double (3x 8byte)
all_values = list(struct.unpack_from("!ddd",buf, offset))
offset+=3*8
rs.tool_acc_values = copy.deepcopy(all_values)
#unused: 15x double (15x 8byte)
offset+=120
rs.unused = []
#tcp_force: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.tcp_force = copy.deepcopy(all_values)
#tool_vector: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.tool_vector = copy.deepcopy(all_values)
#tcp_speed: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.tcp_speed = copy.deepcopy(all_values)
#digital_input_bits: 1x double (1x 8byte) ?
rs.digital_input_bits = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#motor_temperatures: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.motor_temperatures = copy.deepcopy(all_values)
#controller_timer: 1x double (1x 8byte)
rs.controller_timer = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#test_value: 1x double (1x 8byte)
rs.test_value = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#robot_mode: 1x double (1x 8byte)
rs.robot_mode = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#joint_mode: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.joint_modes = copy.deepcopy(all_values)
return rs
#this parses RobotStateRT for versions >=3.0 (i.e. 3.0)
class RobotStateRT_V30(object):
__slots__ = ['time',
'q_target', 'qd_target', 'qdd_target', 'i_target', 'm_target',
'q_actual', 'qd_actual', 'i_actual', 'i_control',
'tool_vector_actual', 'tcp_speed_actual', 'tcp_force',
'tool_vector_target', 'tcp_speed_target',
'digital_input_bits', 'motor_temperatures', 'controller_timer',
'test_value',
'robot_mode', 'joint_modes', 'safety_mode',
#6xd: unused
'tool_acc_values',
#6xd: unused
'speed_scaling', 'linear_momentum_norm',
#2xd: unused
'v_main', 'v_robot', 'i_robot', 'v_actual']
@staticmethod
def unpack(buf):
offset = 0
message_size = struct.unpack_from("!i", buf, offset)[0]
offset+=4
if message_size != len(buf):
print(("MessageSize: ", message_size, "; BufferSize: ", len(buf)))
raise Exception("Could not unpack RobotStateRT packet: length field is incorrect")
rs = RobotStateRT_V30()
#time: 1x double (1x 8byte)
rs.time = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#q_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.q_target = copy.deepcopy(all_values)
#qd_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.qd_target = copy.deepcopy(all_values)
#qdd_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.qdd_target = copy.deepcopy(all_values)
#i_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.i_target = copy.deepcopy(all_values)
#m_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.m_target = copy.deepcopy(all_values)
#q_actual: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.q_actual = copy.deepcopy(all_values)
#qd_actual: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.qd_actual = copy.deepcopy(all_values)
#i_actual: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.i_actual = copy.deepcopy(all_values)
#i_control: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.i_control = copy.deepcopy(all_values)
#tool_vector_actual: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.tool_vector_actual = copy.deepcopy(all_values)
#tcp_speed_actual: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.tcp_speed_actual = copy.deepcopy(all_values)
#tcp_force: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.tcp_force = copy.deepcopy(all_values)
#tool_vector_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.tool_vector_target = copy.deepcopy(all_values)
#tcp_speed_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.tcp_speed_target = copy.deepcopy(all_values)
#digital_input_bits: 1x double (1x 8byte) ?
rs.digital_input_bits = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#motor_temperatures: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.motor_temperatures = copy.deepcopy(all_values)
#controller_timer: 1x double (1x 8byte)
rs.controller_timer = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#test_value: 1x double (1x 8byte)
rs.test_value = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#robot_mode: 1x double (1x 8byte)
rs.robot_mode = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#joint_modes: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.joint_modes = copy.deepcopy(all_values)
#safety_mode: 1x double (1x 8byte)
rs.safety_mode = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#unused: 6x double (6x 8byte)
offset+=48
#tool_acc_values: 3x double (3x 8byte)
all_values = list(struct.unpack_from("!ddd",buf, offset))
offset+=3*8
rs.tool_acc_values = copy.deepcopy(all_values)
#unused: 6x double (6x 8byte)
offset+=48
#speed_scaling: 1x double (1x 8byte)
rs.speed_scaling = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#linear_momentum_norm: 1x double (1x 8byte)
rs.linear_momentum_norm = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#unused: 2x double (2x 8byte)
offset+=16
#v_main: 1x double (1x 8byte)
rs.v_main = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#v_robot: 1x double (1x 8byte)
rs.v_robot = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#i_robot: 1x double (1x 8byte)
rs.i_robot = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#v_actual: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.v_actual = copy.deepcopy(all_values)
return rs
| StarcoderdataPython |
1780709 | <gh_stars>100-1000
import os
# Config
NODE_ID = int(os.getenv('NODE_ID'), '0')
MU_SUFFIX = os.getenv('MU_SUFFIX', 'zhaoj.in')
MU_REGEX = os.getenv('MU_REGEX', '%5m%id.%suffix')
SERVER_PUB_ADDR = os.getenv('SERVER_PUB_ADDR', '127.0.0.1')
API_INTERFACE = os.getenv('API_INTERFACE', 'modwebapi')
WEBAPI_URL = os.getenv('WEBAPI_URL', 'https://demo.sspanel.host')
WEBAPI_TOKEN = os.getenv('WEBAPI_TOKEN', '<PASSWORD>')
API_UPDATE_TIME = int(os.getenv('API_UPDATE_TIME', '60'))
"""
get port offset by node->name
HK 1 #9900
then offset is 9900
"""
GET_PORT_OFFSET_BY_NODE_NAME = os.getenv('GET_PORT_OFFSET_BY_NODE_NAME', 'true') == 'true'
| StarcoderdataPython |
1794977 | from .disc import enable
| StarcoderdataPython |
1626679 | import json
import numpy as np
class Turn:
def __init__(self, turn_id, transcript, turn_label, belief_state, system_acts, system_transcript, asr=None, num=None):
self.id = turn_id
self.transcript = transcript
self.turn_label = turn_label
self.belief_state = belief_state
self.system_acts = system_acts
self.system_transcript = system_transcript
self.asr = asr or []
self.num = num or {}
def to_dict(self):
return {'turn_id': self.id,
'transcript': self.transcript,
'turn_label': self.turn_label,
'belief_state': self.belief_state,
'system_acts': self.system_acts,
'system_transcript': self.system_transcript,
'num': self.num}
@classmethod
def from_dict(cls, d):
return cls(**d)
class Dialogue:
def __init__(self, dialogue_id, turns):
self.id = dialogue_id
self.turns = turns
def __len__(self):
return len(self.turns)
def to_dict(self):
return {'dialogue_id': self.id,
'turns': [t.to_dict() for t in self.turns]}
@classmethod
def from_dict(cls, d):
return cls(d['dialogue_id'], [Turn.from_dict(t) for t in d['turns']])
class Dataset:
def __init__(self, dialogues):
self.dialogues = dialogues
def __len__(self):
return len(self.dialogues)
def iter_turns(self):
for d in self.dialogues:
for t in d.turns:
yield t
def to_dict(self):
return {'dialogues': [d.to_dict() for d in self.dialogues]}
@classmethod
def from_dict(cls, d):
return cls([Dialogue.from_dict(dd) for dd in d['dialogues']])
def evaluate_preds(self, preds):
request = []
inform = []
joint_goal = []
fix = {'centre': 'center', 'areas': 'area', 'phone number': 'number'}
i = 0
for d in self.dialogues:
pred_state = {}
for t in d.turns:
gold_request = set([(s, v) for s, v in t.turn_label if s == 'request'])
gold_inform = set([(s, v) for s, v in t.turn_label if s != 'request'])
pred_request = set([(s, v) for s, v in preds[i] if s == 'request'])
pred_inform = set([(s, v) for s, v in preds[i] if s != 'request'])
request.append(gold_request == pred_request)
inform.append(gold_inform == pred_inform)
gold_recovered = set()
pred_recovered = set()
for s, v in pred_inform:
pred_state[s] = v
for b in t.belief_state:
for s, v in b['slots']:
if b['act'] != 'request':
gold_recovered.add((b['act'], fix.get(s.strip(), s.strip()), fix.get(v.strip(), v.strip())))
for s, v in pred_state.items():
pred_recovered.add(('inform', s, v))
joint_goal.append(gold_recovered == pred_recovered)
i += 1
return {'turn_inform': np.mean(inform), 'turn_request': np.mean(request), 'joint_goal': np.mean(joint_goal)}
class Ontology:
def __init__(self, slots=None, values=None, num=None):
self.slots = slots or []
self.values = values or {}
self.num = num or {}
def to_dict(self):
return {'slots': self.slots, 'values': self.values, 'num': self.num}
@classmethod
def from_dict(cls, d):
return cls(**d)
| StarcoderdataPython |
4820179 | <reponame>c4dt/mlbench-core<filename>mlbench_core/optim/pytorch/__init__.py
from .optim import *
| StarcoderdataPython |
1710434 | <filename>mini_ipmi_smartctl.py
#!/usr/bin/env python3
## Installation instructions: https://github.com/nobodysu/zabbix-mini-IPMI ##
# Only one out of three system-specific setting is used, PATH considered.
binPath_LINUX = r'smartctl'
binPath_WIN = r'C:\Program Files\smartmontools\bin\smartctl.exe'
binPath_OTHER = r'/usr/local/sbin/smartctl'
# path to zabbix agent configuration file
agentConf_LINUX = r'/etc/zabbix/zabbix_agentd.conf'
agentConf_WIN = r'C:\zabbix_agentd.conf'
agentConf_OTHER = r'/usr/local/etc/zabbix3/zabbix_agentd.conf'
senderPath_LINUX = r'zabbix_sender'
senderPath_WIN = r'C:\zabbix-agent\bin\win32\zabbix_sender.exe'
senderPath_OTHER = r'/usr/local/bin/zabbix_sender'
# path to second send script
senderPyPath_LINUX = r'/etc/zabbix/scripts/sender_wrapper.py'
senderPyPath_WIN = r'C:\zabbix-agent\scripts\sender_wrapper.py'
senderPyPath_OTHER = r'/usr/local/etc/zabbix/scripts/sender_wrapper.py'
## Advanced configuration ##
# 'True' or 'False'
isCheckNVMe = False # Additional overhead. Should be disabled if smartmontools is >= 7 or NVMe is absent.
isIgnoreDuplicates = True
# type, min, max, critical
thresholds = (
('hdd', 25, 45, 60),
)
isHeavyDebug = False
perDiskTimeout = 3 # Single disk query can not exceed this value. Python33 or above required.
timeout = '80' # How long the script must wait between LLD and sending, increase if data received late (does not affect windows).
# This setting MUST be lower than 'Update interval' in discovery rule.
# Manually provide disk list or RAID configuration if needed.
diskListManual = []
# like this:
#diskListManual = ['/dev/sda -d sat+megaraid,4', '/dev/sda -d sat+megaraid,5']
# more info: https://www.smartmontools.org/wiki/Supported_RAID-Controllers
# These models will not produce 'NOTEMP' warning. Pull requests are welcome.
noTemperatureSensorModels = (
'INTEL SSDSC2CW060A3',
'AXXROMBSASMR',
)
# re.IGNORECASE | re.MULTILINE
modelPatterns = (
'^Device Model:\s+(.+)$',
'^Device:\s+(.+)$',
'^Product:\s+(.+)$',
'^Model Number:\s+(.+)$',
)
# First match returned right away; re.IGNORECASE | re.MULTILINE
temperaturePatterns = (
'^(?:\s+)?\d+\s+Temperature_Celsius\s+[\w-]+\s+\d{3}\s+[\w-]+\s+[\w-]+\s+[\w-]+\s+[\w-]+\s+[\w-]+\s+(\d+)',
'^(?:\s+)?Current\s+Drive\s+Temperature:\s+(\d+)\s+',
'^(?:\s+)?Temperature:\s+(\d+)\s+C',
'^(?:\s+)?\d+\s+Airflow_Temperature_Cel\s+[\w-]+\s+\d{3}\s+[\w-]+\s+[\w-]+\s+[\w-]+\s+[\w-]+\s+[\w-]+\s+(\d+)',
)
## End of configuration ##
import sys
import subprocess
import re
import shlex
from sender_wrapper import (fail_ifNot_Py3, sanitizeStr, clearDiskTypeStr, processData)
def scanDisks(mode):
'''Determines available disks. Can be skipped.'''
if mode == 'NOTYPE':
cmd = [binPath, '--scan']
elif mode == 'NVME':
cmd = [binPath, '--scan', '-d', 'nvme']
else:
print('Invalid type %s. Terminating.' % mode)
sys.exit(1)
try:
p = subprocess.check_output(cmd, universal_newlines=True)
error = ''
except OSError as e:
p = ''
if e.args[0] == 2:
error = 'SCAN_OS_NOCMD_%s' % mode
else:
error = 'SCAN_OS_ERROR_%s' % mode
except Exception as e:
try:
p = e.output
except:
p = ''
error = 'SCAN_UNKNOWN_ERROR_%s' % mode
if sys.argv[1] == 'getverb':
raise
# TESTING
#if mode == 'NVME': p = '''/dev/nvme0 -d nvme # /dev/nvme0, NVMe device\n/dev/bus/0 -d megaraid,4 # /dev/bus/0 [megaraid_disk_04], SCSI device'''
# Determine full device names and types
disks = re.findall(r'^(/dev/[^#]+)', p, re.M)
return error, disks
def moveCsmiToBegining(disks):
csmis = []
others = []
for i in disks:
if re.search(r'\/csmi\d+\,\d+', i, re.I):
csmis.append(i)
else:
others.append(i)
result = csmis + others
return result
def listDisks():
errors = []
if not diskListManual:
scanDisks_Out = scanDisks('NOTYPE')
errors.append(scanDisks_Out[0]) # SCAN_OS_NOCMD_*, SCAN_OS_ERROR_*, SCAN_UNKNOWN_ERROR_*
disks = scanDisks_Out[1]
if isCheckNVMe:
scanDisksNVMe_Out = scanDisks('NVME')
errors.append(scanDisksNVMe_Out[0])
disks.extend(scanDisksNVMe_Out[1])
else:
errors.append('')
else:
disks = diskListManual
# Remove duplicates preserving order
diskResult = []
for i in disks:
if i not in diskResult:
diskResult.append(i)
diskResult = moveCsmiToBegining(diskResult)
return errors, diskResult
def findErrorsAndOuts(cD):
err = None
p = ''
try:
cmd = [binPath, '-A', '-i', '-n', 'standby'] + shlex.split(cD)
if (sys.version_info.major == 3 and
sys.version_info.minor <= 2):
p = subprocess.check_output(cmd, universal_newlines=True)
err = 'OLD_PYTHON32_OR_LESS'
else:
p = subprocess.check_output(cmd, universal_newlines=True, timeout=perDiskTimeout)
except OSError as e:
if e.args[0] == 2:
err = 'D_OS_NOCMD'
else:
err = 'D_OS_ERROR'
if sys.argv[1] == 'getverb': raise
except subprocess.CalledProcessError as e:
p = e.output
if 'Device is in STANDBY (OS)' in p:
err = 'STANDBY_OS'
elif 'Device is in STANDBY' in p:
err = 'STANDBY'
elif 'Device is in SLEEP' in p:
err = 'SLEEP'
elif 'Unknown USB bridge' in p:
err = 'UNK_USB_BRIDGE'
elif r"Packet Interface Devices [this device: CD/DVD] don't support ATA SMART" in p:
err = 'CD_DVD_DRIVE'
elif (sys.version_info.major == 3 and
sys.version_info.minor <= 1):
err = 'UNK_OLD_PYTHON31_OR_LESS'
elif e.args:
err = 'ERR_CODE_%s' % str(e.args[0])
else:
err = 'UNKNOWN_RESPONSE'
except subprocess.TimeoutExpired:
err = 'TIMEOUT'
except Exception as e:
err = 'UNKNOWN_EXC_ERROR'
if sys.argv[1] == 'getverb': raise
try:
p = e.output
except:
p = ''
return (err, p)
def findDiskTemp(p):
resultA = None
for i in temperaturePatterns:
temperatureRe = re.search(i, p, re.I | re.M)
if temperatureRe:
resultA = temperatureRe.group(1)
break
return resultA
def findSerial(p):
reSerial = re.search(r'^(?:\s+)?Serial Number:\s+(.+)', p, re.I | re.M)
if reSerial:
serial = reSerial.group(1)
else:
serial = None
return serial
def chooseSystemSpecificPaths():
if sys.platform.startswith('linux'):
binPath_ = binPath_LINUX
agentConf_ = agentConf_LINUX
senderPath_ = senderPath_LINUX
senderPyPath_ = senderPyPath_LINUX
elif sys.platform == 'win32':
binPath_ = binPath_WIN
agentConf_ = agentConf_WIN
senderPath_ = senderPath_WIN
senderPyPath_ = senderPyPath_WIN
else:
binPath_ = binPath_OTHER
agentConf_ = agentConf_OTHER
senderPath_ = senderPath_OTHER
senderPyPath_ = senderPyPath_OTHER
if sys.argv[1] == 'getverb':
print(' Path guess: %s\n' % sys.platform)
return (binPath_, agentConf_, senderPath_, senderPyPath_)
def isModelWithoutSensor(p):
result = False
for i in modelPatterns:
modelRe = re.search(i, p, re.I | re.M)
if modelRe:
model = modelRe.group(1).strip()
if model in noTemperatureSensorModels:
result = True
break
return result
def isDummyNVMe(p):
subsystemRe = re.search(r'Subsystem ID:\s+0x0000', p, re.I)
ouiRe = re.search(r'IEEE OUI Identifier:\s+0x000000', p, re.I)
if (subsystemRe and
ouiRe):
return True
else:
return False
if __name__ == '__main__':
fail_ifNot_Py3()
paths_Out = chooseSystemSpecificPaths()
binPath = paths_Out[0]
agentConf = paths_Out[1]
senderPath = paths_Out[2]
senderPyPath = paths_Out[3]
host = sys.argv[2]
senderData = []
jsonData = []
listDisks_Out = listDisks()
scanErrors = listDisks_Out[0]
diskList = listDisks_Out[1]
scanErrorNotype = scanErrors[0]
scanErrorNvme = scanErrors[1]
sessionSerials = []
allTemps = []
diskError_NOCMD = False
for d in diskList:
clearedD = clearDiskTypeStr(d)
sanitizedD = sanitizeStr(clearedD)
jsonData.append({'{#DISK}':sanitizedD})
disk_Out = findErrorsAndOuts(clearedD)
diskError = disk_Out[0]
diskPout = disk_Out[1]
if diskError:
if 'D_OS_' in diskError:
diskError_NOCMD = diskError
break # other disks json are discarded
isDuplicate = False
serial = findSerial(diskPout)
if serial in sessionSerials:
isDuplicate = True
elif serial:
sessionSerials.append(serial)
temp = findDiskTemp(diskPout)
if isDuplicate:
if isIgnoreDuplicates:
driveStatus = 'DUPLICATE_IGNORE'
else:
driveStatus = 'DUPLICATE_MENTION'
elif diskError:
driveStatus = diskError
elif isModelWithoutSensor(diskPout):
driveStatus = 'NOSENSOR'
elif isDummyNVMe(diskPout):
driveStatus = 'DUMMY_NVME'
elif not temp:
driveStatus = 'NOTEMP'
else:
driveStatus = 'PROCESSED'
senderData.append('"%s" mini.disk.info[%s,DriveStatus] "%s"' % (host, sanitizedD, driveStatus))
if temp:
senderData.append('"%s" mini.disk.temp[%s] "%s"' % (host, sanitizedD, temp))
allTemps.append(temp)
senderData.append('"%s" mini.disk.tempMin[%s] "%s"' % (host, sanitizedD, thresholds[0][1]))
senderData.append('"%s" mini.disk.tempMax[%s] "%s"' % (host, sanitizedD, thresholds[0][2]))
senderData.append('"%s" mini.disk.tempCrit[%s] "%s"' % (host, sanitizedD, thresholds[0][3]))
if isHeavyDebug:
heavyOut = repr(diskPout.strip())
heavyOut = heavyOut.strip().strip('"').strip("'").strip()
heavyOut = heavyOut.replace("'", r"\'").replace('"', r'\"')
debugData = '"%s" mini.disk.HeavyDebug "%s"' % (host, heavyOut)
if diskError:
if 'ERR_CODE_' in diskError:
senderData.append(debugData)
elif not temp:
if not isModelWithoutSensor(diskPout):
senderData.append(debugData)
if scanErrorNotype:
configStatus = scanErrorNotype
elif diskError_NOCMD:
configStatus = diskError_NOCMD
elif not diskList:
configStatus = 'NODISKS'
elif not allTemps:
configStatus = 'NODISKTEMPS'
else:
configStatus = 'CONFIGURED'
senderData.append('"%s" mini.disk.info[ConfigStatus] "%s"' % (host, configStatus))
if allTemps:
senderData.append('"%s" mini.disk.temp[MAX] "%s"' % (host, str(max(allTemps))))
link = r'https://github.com/nobodysu/zabbix-mini-IPMI/issues'
sendStatusKey = 'mini.disk.info[SendStatus]'
processData(senderData, jsonData, agentConf, senderPyPath, senderPath, timeout, host, link, sendStatusKey)
| StarcoderdataPython |
1760888 | <gh_stars>1-10
"""
Local templates for the plone PAS zopeskel project
"""
from zopeskel.localcommands import ZopeSkelLocalTemplate
class PlonePasSubTemplate(ZopeSkelLocalTemplate):
use_cheetah = True
marker_name = 'implemented plugins'
parent_templates = ['plone_pas']
class ExtractionPlugin(PlonePasSubTemplate):
"""
A plone pas extraction plugin
"""
_template_dir = 'templates/plone_pas/extraction'
summary = "A Plone PAS Extraction Plugin"
class AuthenticationPlugin(PlonePasSubTemplate):
"""
A plone pas authentication plugin
"""
_template_dir = 'templates/plone_pas/authentication'
summary = "A Plone PAS Authentication Plugin"
class ChallengePlugin(PlonePasSubTemplate):
"""
A plone pas challenge plugin
"""
_template_dir = 'templates/plone_pas/challenge'
summary = "A Plone PAS Challenge Plugin"
class CredentialsResetPlugin(PlonePasSubTemplate):
"""
A plone pas CredentialsReset plugin
"""
_template_dir = 'templates/plone_pas/credentials_reset'
summary = "A Plone PAS CredentialsReset Plugin"
class UserAdderPlugin(PlonePasSubTemplate):
"""
A plone pas UserAdder plugin
"""
_template_dir = 'templates/plone_pas/user_adder'
summary = "A Plone PAS UserAdder Plugin"
class RoleAssignerPlugin(PlonePasSubTemplate):
"""
A plone pas RoleAssigner plugin
"""
_template_dir = 'templates/plone_pas/role_assigner'
summary = "A Plone PAS RoleAssigner Plugin"
class UserFactoryPlugin(PlonePasSubTemplate):
"""
A plone pas UserFactory plugin
"""
_template_dir = 'templates/plone_pas/user_factory'
summary = "A Plone PAS UserFactory Plugin"
class AnonymousUserFactoryPlugin(PlonePasSubTemplate):
"""
A plone pas AnonymousUserFactory plugin
"""
_template_dir = 'templates/plone_pas/anonymous_user_factory'
summary = "A Plone PAS AnonymousUserFactory Plugin"
class PropertiesPlugin(PlonePasSubTemplate):
"""
A plone pas Properties plugin
"""
_template_dir = 'templates/plone_pas/properties'
summary = "A Plone PAS Properties Plugin"
class GroupsPlugin(PlonePasSubTemplate):
"""
A plone pas Groups plugin
"""
_template_dir = 'templates/plone_pas/groups'
summary = "A Plone PAS Groups Plugin"
class RolesPlugin(PlonePasSubTemplate):
"""
A plone pas Roles plugin
"""
_template_dir = 'templates/plone_pas/roles'
summary = "A Plone PAS Roles Plugin"
class UpdatePlugin(PlonePasSubTemplate):
"""
A plone pas Update plugin
"""
_template_dir = 'templates/plone_pas/update'
summary = "A Plone PAS Update Plugin"
class ValidationPlugin(PlonePasSubTemplate):
"""
A plone pas Validation plugin
"""
_template_dir = 'templates/plone_pas/validation'
summary = "A Plone PAS Validation Plugin"
class UserEnumerationPlugin(PlonePasSubTemplate):
"""
A plone pas UserEnumeration plugin
"""
_template_dir = 'templates/plone_pas/user_enumeration'
summary = "A Plone PAS UserEnumeration Plugin"
class GroupEnumerationPlugin(PlonePasSubTemplate):
"""
A plone pas GroupEnumeration plugin
"""
_template_dir = 'templates/plone_pas/group_enumeration'
summary = "A Plone PAS GroupEnumeration Plugin"
class RoleEnumerationPlugin(PlonePasSubTemplate):
"""
A plone pas RoleEnumeration plugin
"""
_template_dir = 'templates/plone_pas/role_enumeration'
summary = "A Plone PAS RoleEnumeration Plugin"
| StarcoderdataPython |
87010 | <reponame>Skaleras/Advent_of_Code<filename>day12/part1.py
# N stands for North
# S stands for south
# E stands for east
# W stands for west
#Oeste es West y Este es East
#NoSe == NWSE North and South are opposites, so are West and East
# N
# |
# W---+--E
# |
# S
# L stands for Left
# R stands for right
# F stands for forward
with open('E:\code\AoC\day12\input.txt', 'r') as input:
instructions = input.read().split('\n')
def degrees_to_position(action, degrees):
#degrees have to be integers and action has to be string L or R
if degrees == 90:
if action == 'L':
return 1
else:
return -1
elif degrees == 180:
return 2
else: # the last case is when it's 270 degrees
if action == 'L':
return 3
else:
return -3
points = 'NWSE'
facing_direction = 'E'
x_dist_positive, y_dist_positive = 0, 0
x_dist_negative, y_dist_negative = 0, 0
for instruction in instructions:
boat_action = instruction[0]
value_action = int(instruction[1:])
if boat_action == 'N' or boat_action == 'S':
if boat_action == 'N':
y_dist_positive+= value_action
else:
y_dist_negative+= value_action
elif boat_action == 'W' or boat_action == 'E':
if boat_action == 'E':
x_dist_positive+= value_action
else:
x_dist_negative+= value_action
elif boat_action == 'L' or boat_action == 'R':
position_index = 0
position_index = (degrees_to_position(boat_action, value_action) + \
points.find(facing_direction))%4
facing_direction = points[position_index]
else:
if facing_direction == 'N':
y_dist_positive+= value_action
elif facing_direction == 'S':
y_dist_negative+= value_action
elif facing_direction == 'E':
x_dist_positive+= value_action
elif facing_direction== 'W':
x_dist_negative+= value_action
print(abs(x_dist_positive-x_dist_negative)+abs(y_dist_positive-y_dist_negative))
| StarcoderdataPython |
1721249 | <filename>extractor/utils.py
# coding=utf-8
import os
import sys
import io
import gzip
import shutil
import base64
import traceback
import requests
from queue import Queue
from skimage import io
from bottle import HTTPResponse
from contextlib import closing
from concurrent.futures import ThreadPoolExecutor
class TemporaryDirectory(object):
def __init__(self, name='tmp'):
self.name = name
def __enter__(self):
try:
os.mkdir(self.name)
except:
...
return self.name
def __exit__(self, exc_type, exc_value, traceback):
shutil.rmtree(self.name)
class BatchBase(object):
def __init__(self):
self.queue = Queue()
self.pool = ThreadPoolExecutor(max_workers=10)
self.files = []
def start(self):
for item in self.files:
task = self.pool.submit(self.download, item)
self.queue.put({"object": task})
while not self.queue.empty():
t = self.queue.get()
if not t["object"].done():
self.queue.put(t)
return 'file_name'
def download(self, url):
res = requests.get(url)
with open('data/{}.mp4'.format(url), 'wb')as f:
for content in res.iter_content(1024):
if content:
f.write(content)
def video_downloader(self, video_url, video_name):
size = 0
with closing(requests.get(video_url, headers={}, stream=True)) as res:
chunk_size = 1024
content_size = int(res.headers['content-length'])
if res.status_code == 200:
sys.stdout.write(' [文件大小]:%0.2f MB\n' % (content_size / chunk_size / 1024))
with open(video_name, 'wb') as file:
for data in res.iter_content(chunk_size=chunk_size):
file.write(data)
size += len(data)
file.flush()
sys.stdout.write(' [下载进度]:%.2f%%' % float(size / content_size * 100) + '\r')
sys.stdout.flush()
def make_archive(base_dir, file_name):
import zipfile
with zipfile.ZipFile(file_name, 'w', compression=zipfile.ZIP_DEFLATED) as zf:
base_dir = os.path.normpath(base_dir)
for dir_path, dir_names, filenames in os.walk(base_dir):
for name in sorted(dir_names) + filenames:
if name[0] == '.':
continue
path = os.path.normpath(os.path.join(dir_path, name))
zf.write(path, path[len(base_dir) + 1:])
def response(file_name):
body = open(file_name, 'rb') or gzip.open(file_name, 'rb')
headers = {
'Content-Encoding': 'gzip',
'Content-Type': 'application/zip'
}
return HTTPResponse(body=body, status=200, headers=headers)
def gzip_body(out, charset='utf-8'):
with io.BytesIO() as buf:
with gzip.GzipFile(fileobj=buf, mode='wb') as f:
for data in out:
f.write(data.encode(charset) if isinstance(data, str) else data)
return buf.getvalue()
def get_exception_message():
with io.StringIO() as fp:
traceback.print_exc(file=fp)
return fp.getvalue()
def file_to_base64(file_data: bytes):
return base64.b64encode(file_data).decode('utf-8')
def base64_to_data(content: bytes):
return base64.b64decode(content)
| StarcoderdataPython |
3342237 | # -*- coding: utf-8 -*-
# Copyright (c) 2013, Camptocamp SA
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# The views and conclusions contained in the software and documentation are those
# of the authors and should not be interpreted as representing official policies,
# either expressed or implied, of the FreeBSD Project.
import httplib2
import urllib
import logging
from urlparse import urlparse
import simplejson as json
from simplejson.decoder import JSONDecodeError
from pyramid.view import view_config
from pyramid.response import Response
from pyramid.httpexceptions import HTTPBadGateway
from c2cgeoportal.lib import caching
from c2cgeoportal.lib.functionality import get_functionality
log = logging.getLogger(__name__)
cache_region = caching.get_region()
class Printproxy(object): # pragma: no cover
def __init__(self, request):
self.request = request
self.config = self.request.registry.settings
@view_config(route_name='printproxy_info')
def info(self):
""" Get print capabilities. """
templates = get_functionality(
'print_template', self.config, self.request)
return self._info(templates)
@cache_region.cache_on_arguments()
def _info(self, templates):
# get query string
params = dict(self.request.params)
query_string = urllib.urlencode(params)
# get URL
_url = self.config['print_url'] + 'info.json' + '?' + query_string
log.info("Get print capabilities from %s." % _url)
# forward request to target (without Host Header)
http = httplib2.Http()
h = dict(self.request.headers)
if urlparse(_url).hostname != 'localhost':
h.pop('Host')
try:
resp, content = http.request(_url, method='GET', headers=h)
except:
return HTTPBadGateway()
try:
capabilities = json.loads(content)
except JSONDecodeError:
# log and raise
log.error("Unable to parse capabilities.")
log.info(content)
return content
capabilities['layouts'] = list(
layout for layout in capabilities['layouts'] if
layout['name'] in templates)
headers = dict(resp)
del headers['content-length']
response = Response(
json.dumps(capabilities, separators=(',', ':')),
status=resp.status, headers=headers,
)
response.cache_control.public = True
response.cache_control.max_age = \
self.request.registry.settings["default_max_age"]
return response
@view_config(route_name='printproxy_create')
def create(self):
""" Create PDF. """
# get query string
params = dict(self.request.params)
query_string = urllib.urlencode(params)
# get URL
_url = self.config['print_url'] + 'create.json' + '?' + query_string
log.info("Send print query to %s." % _url)
content_length = int(self.request.environ['CONTENT_LENGTH'])
body = self.request.environ['wsgi.input'].read(content_length)
# forward request to target (without Host Header)
http = httplib2.Http()
h = dict(self.request.headers)
if urlparse(_url).hostname != 'localhost':
h.pop('Host')
h['Content-Length'] = str(len(body))
h["Cache-Control"] = "no-cache"
try:
resp, content = http.request(
_url, method='POST', body=body, headers=h
)
except:
return HTTPBadGateway()
return Response(
content, status=resp.status, headers=dict(resp),
cache_control="no-cache"
)
@view_config(route_name='printproxy_get')
def get(self):
""" Get created PDF. """
file = self.request.matchdict.get('file')
# get URL
_url = self.config['print_url'] + file + '.printout'
log.info("Get print document from %s." % _url)
# forward request to target (without Host Header)
http = httplib2.Http()
h = dict(self.request.headers)
if urlparse(_url).hostname != 'localhost':
h.pop('Host')
h["Cache-Control"] = "no-cache"
try:
resp, content = http.request(_url, method='GET', headers=h)
except:
return HTTPBadGateway()
headers = {}
headers['content-type'] = resp['content-type']
headers['content-disposition'] = resp['content-disposition']
# remove Pragma and Cache-Control headers because of ie bug:
# http://support.microsoft.com/default.aspx?scid=KB;EN-US;q316431
#del response.headers['Pragma']
#del response.headers['Cache-Control']
return Response(
content, status=resp.status, headers=headers, cache_control="no-cache"
)
| StarcoderdataPython |
1619245 | <gh_stars>0
""" DB
This module implements an interface to the bank_server database.
"""
import json
import os.path
class Admin_DB(object):
"""Implements a Database interface for the bank server and admin interface"""
def __init__(self, db_path="admin_access.json"):
self.path = db_path
def close(self):
"""close the database connection"""
pass
def init_db(self):
"""initialize database with file at filepath"""
with open(self.path, 'w') as f:
f.write(json.dumps({'cards': {}}))
def exists(self):
return os.path.exists(self.path)
def modify(self, table, k, subks, vs):
with open(self.path, 'r') as f:
db = json.loads(f.read())
try:
for subk, v in zip(subks, vs):
if k not in db[table]:
db[table][k] = {}
db[table][k][subk] = v
except KeyboardInterrupt:
return False
with open(self.path, 'w') as f:
f.write(json.dumps(db))
return True
def read(self, table, k, subk):
with open(self.path, 'r') as f:
db = json.loads(f.read())
try:
return db[table][k][subk]
except KeyError:
return None
# Private key generators for the backup database(RSA)
def get_outer_onion_private_key(self, card_id):
return self.read("cards", card_id, "inner_onion_private_key")
def set_outer_onion_private_key(self, card_id, value):
return self.modify("cards", card_id, "inner_onion_private_key", value)
| StarcoderdataPython |
1609790 | # -*- coding:utf-8 -*-
"""
@author: leonardo
@created time: 2020-11-03
@last modified time:2020-11-03
"""
from sqlalchemy import create_engine,event
import numpy as np
def get_connection(database_name):
"""
:param database_name:
:return:
"""
return create_engine('mysql+pymysql://root:yangxh@192.168.127.12:3306/{}?charset=utf8'.format(database_name))
def add_own_encoders(conn, cursor, query, *args):
cursor.connection.encoders[np.float64] = lambda value, encoders: float(value)
cursor.connection.encoders[np.int32] = lambda value, encoders: int(value)
def enable_np_encoder(engine):
"""
py_mysql支持np.float64 translate
:param engine:
:return:
"""
event.listen(engine, "before_cursor_execute", add_own_encoders) | StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.