hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9c048e3e8bdc9b4b95cc9e0528a68aa1fd3efcf5
| 10,365
|
py
|
Python
|
address/models.py
|
PerchLive/django-address
|
edab73847ba95d4f7a71993bcd55ea6bf300693e
|
[
"BSD-3-Clause"
] | null | null | null |
address/models.py
|
PerchLive/django-address
|
edab73847ba95d4f7a71993bcd55ea6bf300693e
|
[
"BSD-3-Clause"
] | null | null | null |
address/models.py
|
PerchLive/django-address
|
edab73847ba95d4f7a71993bcd55ea6bf300693e
|
[
"BSD-3-Clause"
] | null | null | null |
import logging
import sys
from django.core.exceptions import ValidationError
from django.db import models
from django.db.models.fields.related import ForeignObject
from django.utils.encoding import python_2_unicode_compatible
try:
from django.db.models.fields.related_descriptors import ForwardManyToOneDescriptor
except ImportError:
from django.db.models.fields.related import ReverseSingleRelatedObjectDescriptor as ForwardManyToOneDescriptor
logger = logging.getLogger(__name__)
if sys.version > '3':
long = int
basestring = (str, bytes)
unicode = str
__all__ = ['Country', 'State', 'Locality', 'Address', 'AddressField']
class InconsistentDictError(Exception):
pass
def _to_python(value):
raw = value.get('raw', '')
country = value.get('country', '')
country_code = value.get('country_code', '')
state = value.get('state', '')
state_code = value.get('state_code', '')
locality = value.get('locality', '')
sublocality = value.get('sublocality', '')
postal_code = value.get('postal_code', '')
street_number = value.get('street_number', '')
route = value.get('route', '')
formatted = value.get('formatted', '')
latitude = value.get('latitude', None)
longitude = value.get('longitude', None)
# If there is no value (empty raw) then return None.
if not raw:
return None
# Fix issue with NYC boroughs (https://code.google.com/p/gmaps-api-issues/issues/detail?id=635)
if not locality and sublocality:
locality = sublocality
# If we have an inconsistent set of value bail out now.
if (country or state or locality) and not (country and state and locality):
raise InconsistentDictError
# Handle the country.
try:
country_obj = Country.objects.get(name=country)
except Country.DoesNotExist:
if country:
if len(country_code) > Country._meta.get_field('code').max_length:
if country_code != country:
raise ValueError('Invalid country code (too long): %s' % country_code)
country_code = ''
country_obj = Country.objects.create(name=country, code=country_code)
else:
country_obj = None
# Handle the state.
try:
state_obj = State.objects.get(name=state, country=country_obj)
except State.DoesNotExist:
if state:
if len(state_code) > State._meta.get_field('code').max_length:
if state_code != state:
raise ValueError('Invalid state code (too long): %s' % state_code)
state_code = ''
state_obj = State.objects.create(name=state, code=state_code, country=country_obj)
else:
state_obj = None
# Handle the locality.
try:
locality_obj = Locality.objects.get(name=locality, postal_code=postal_code, state=state_obj)
except Locality.DoesNotExist:
if locality:
locality_obj = Locality.objects.create(name=locality, postal_code=postal_code, state=state_obj)
else:
locality_obj = None
# Handle the address.
try:
if not (street_number or route or locality):
address_obj = Address.objects.get(raw=raw)
else:
address_obj = Address.objects.get(
street_number=street_number,
route=route,
locality=locality_obj
)
except Address.DoesNotExist:
address_obj = Address(
street_number=street_number,
route=route,
raw=raw,
locality=locality_obj,
formatted=formatted,
latitude=latitude,
longitude=longitude,
)
# If "formatted" is empty try to construct it from other values.
if not address_obj.formatted:
address_obj.formatted = unicode(address_obj)
# Need to save.
address_obj.save()
# Done.
return address_obj
##
# Convert a dictionary to an address.
##
def to_python(value):
# Keep `None`s.
if value is None:
return None
# Is it already an address object?
if isinstance(value, Address):
return value
# If we have an integer, assume it is a model primary key. This is mostly for
# Django being a cunt.
elif isinstance(value, (int, long)):
return value
# A string is considered a raw value.
elif isinstance(value, basestring):
obj = Address(raw=value)
obj.save()
return obj
# A dictionary of named address components.
elif isinstance(value, dict):
# Attempt a conversion.
try:
return _to_python(value)
except InconsistentDictError:
return Address.objects.create(raw=value['raw'])
# Not in any of the formats I recognise.
raise ValidationError('Invalid address value.')
##
# A country.
##
@python_2_unicode_compatible
class Country(models.Model):
name = models.CharField(max_length=40, unique=True, blank=True)
code = models.CharField(max_length=2, blank=True) # not unique as there are duplicates (IT)
class Meta:
verbose_name_plural = 'Countries'
ordering = ('name',)
def __str__(self):
return '%s' % (self.name or self.code)
##
# A state. Google refers to this as `administration_level_1`.
##
@python_2_unicode_compatible
class State(models.Model):
name = models.CharField(max_length=165, blank=True)
code = models.CharField(max_length=3, blank=True)
country = models.ForeignKey(Country, on_delete=models.CASCADE, related_name='states')
class Meta:
unique_together = ('name', 'country')
ordering = ('country', 'name')
def __str__(self):
txt = self.to_str()
country = '%s' % self.country
if country and txt:
txt += ', '
txt += country
return txt
def to_str(self):
return '%s' % (self.name or self.code)
##
# A locality (suburb).
##
@python_2_unicode_compatible
class Locality(models.Model):
name = models.CharField(max_length=165, blank=True)
postal_code = models.CharField(max_length=10, blank=True)
state = models.ForeignKey(State, on_delete=models.CASCADE, related_name='localities')
class Meta:
verbose_name_plural = 'Localities'
unique_together = ('name', 'postal_code', 'state')
ordering = ('state', 'name')
def __str__(self):
txt = '%s' % self.name
state = self.state.to_str() if self.state else ''
if txt and state:
txt += ', '
txt += state
if self.postal_code:
txt += ' %s' % self.postal_code
cntry = '%s' % (self.state.country if self.state and self.state.country else '')
if cntry:
txt += ', %s' % cntry
return txt
##
# An address. If for any reason we are unable to find a matching
# decomposed address we will store the raw address string in `raw`.
##
@python_2_unicode_compatible
class Address(models.Model):
street_number = models.CharField(max_length=20, blank=True)
route = models.CharField(max_length=100, blank=True)
locality = models.ForeignKey(Locality, on_delete=models.CASCADE, related_name='addresses', blank=True, null=True)
raw = models.CharField(max_length=200)
formatted = models.CharField(max_length=200, blank=True)
latitude = models.FloatField(blank=True, null=True)
longitude = models.FloatField(blank=True, null=True)
class Meta:
verbose_name_plural = 'Addresses'
ordering = ('locality', 'route', 'street_number')
# unique_together = ('locality', 'route', 'street_number')
def __str__(self):
if self.formatted != '':
txt = '%s' % self.formatted
elif self.locality:
txt = ''
if self.street_number:
txt = '%s' % self.street_number
if self.route:
if txt:
txt += ' %s' % self.route
locality = '%s' % self.locality
if txt and locality:
txt += ', '
txt += locality
else:
txt = '%s' % self.raw
return txt
def clean(self):
if not self.raw:
raise ValidationError('Addresses may not have a blank `raw` field.')
def as_dict(self):
ad = dict(
street_number=self.street_number,
route=self.route,
raw=self.raw,
formatted=self.formatted,
latitude=self.latitude if self.latitude else '',
longitude=self.longitude if self.longitude else '',
)
if self.locality:
ad['locality'] = self.locality.name
ad['postal_code'] = self.locality.postal_code
if self.locality.state:
ad['state'] = self.locality.state.name
ad['state_code'] = self.locality.state.code
if self.locality.state.country:
ad['country'] = self.locality.state.country.name
ad['country_code'] = self.locality.state.country.code
return ad
class AddressDescriptor(ForwardManyToOneDescriptor):
def __set__(self, inst, value):
super(AddressDescriptor, self).__set__(inst, to_python(value))
##
# A field for addresses in other models.
##
class AddressField(models.ForeignKey):
description = 'An address'
def __init__(self, *args, **kwargs):
kwargs['to'] = 'address.Address'
super(AddressField, self).__init__(*args, **kwargs)
def contribute_to_class(self, cls, name, virtual_only=False):
from address.compat import compat_contribute_to_class
compat_contribute_to_class(self, cls, name, virtual_only)
# super(ForeignObject, self).contribute_to_class(cls, name, virtual_only=virtual_only)
setattr(cls, self.name, AddressDescriptor(self))
# def deconstruct(self):
# name, path, args, kwargs = super(AddressField, self).deconstruct()
# del kwargs['to']
# return name, path, args, kwargs
def formfield(self, **kwargs):
from .forms import AddressField as AddressFormField
defaults = dict(form_class=AddressFormField)
defaults.update(kwargs)
return super(AddressField, self).formfield(**defaults)
| 31.409091
| 117
| 0.625663
| 1,218
| 10,365
| 5.17734
| 0.17734
| 0.026641
| 0.028544
| 0.038059
| 0.19648
| 0.131462
| 0.090073
| 0.053283
| 0.040913
| 0.026641
| 0
| 0.004349
| 0.267921
| 10,365
| 329
| 118
| 31.504559
| 0.8267
| 0.122721
| 0
| 0.184685
| 0
| 0
| 0.060093
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058559
| false
| 0.004505
| 0.04955
| 0.009009
| 0.297297
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c07713f7b2c917072be6181205756050fc2c5cb
| 7,715
|
py
|
Python
|
addons/hr_payroll_account/models/hr_payroll_account.py
|
jjiege/odoo
|
fd5b8ad387c1881f349d125cbd56433f4d49398f
|
[
"MIT"
] | null | null | null |
addons/hr_payroll_account/models/hr_payroll_account.py
|
jjiege/odoo
|
fd5b8ad387c1881f349d125cbd56433f4d49398f
|
[
"MIT"
] | null | null | null |
addons/hr_payroll_account/models/hr_payroll_account.py
|
jjiege/odoo
|
fd5b8ad387c1881f349d125cbd56433f4d49398f
|
[
"MIT"
] | null | null | null |
#-*- coding:utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models, _
from odoo.exceptions import UserError
from odoo.tools import float_compare, float_is_zero
class HrPayslipLine(models.Model):
_inherit = 'hr.payslip.line'
def _get_partner_id(self, credit_account):
"""
Get partner_id of slip line to use in account_move_line
"""
# use partner of salary rule or fallback on employee's address
register_partner_id = self.salary_rule_id.register_id.partner_id
partner_id = register_partner_id.id or self.slip_id.employee_id.address_home_id.id
if credit_account:
if register_partner_id or self.salary_rule_id.account_credit.internal_type in ('receivable', 'payable'):
return partner_id
else:
if register_partner_id or self.salary_rule_id.account_debit.internal_type in ('receivable', 'payable'):
return partner_id
return False
class HrPayslip(models.Model):
_inherit = 'hr.payslip'
date = fields.Date('Date Account', states={'draft': [('readonly', False)]}, readonly=True,
help="Keep empty to use the period of the validation(Payslip) date.")
journal_id = fields.Many2one('account.journal', 'Salary Journal', readonly=True, required=True,
states={'draft': [('readonly', False)]}, default=lambda self: self.env['account.journal'].search([('type', '=', 'general')], limit=1))
move_id = fields.Many2one('account.move', 'Accounting Entry', readonly=True, copy=False)
@api.model
def create(self, vals):
if 'journal_id' in self.env.context:
vals['journal_id'] = self.env.context.get('journal_id')
return super(HrPayslip, self).create(vals)
@api.onchange('contract_id')
def onchange_contract(self):
super(HrPayslip, self).onchange_contract()
self.journal_id = self.contract_id.journal_id.id or (not self.contract_id and self.default_get(['journal_id'])['journal_id'])
@api.multi
def action_payslip_cancel(self):
moves = self.mapped('move_id')
moves.filtered(lambda x: x.state == 'posted').button_cancel()
moves.unlink()
return super(HrPayslip, self).action_payslip_cancel()
@api.multi
def action_payslip_done(self):
res = super(HrPayslip, self).action_payslip_done()
for slip in self:
line_ids = []
debit_sum = 0.0
credit_sum = 0.0
date = slip.date or slip.date_to
currency = slip.company_id.currency_id or slip.journal_id.company_id.currency_id
name = _('Payslip of %s') % (slip.employee_id.name)
move_dict = {
'narration': name,
'ref': slip.number,
'journal_id': slip.journal_id.id,
'date': date,
}
for line in slip.details_by_salary_rule_category:
amount = currency.round(slip.credit_note and -line.total or line.total)
if currency.is_zero(amount):
continue
debit_account_id = line.salary_rule_id.account_debit.id
credit_account_id = line.salary_rule_id.account_credit.id
if debit_account_id:
debit_line = (0, 0, {
'name': line.name,
'partner_id': line._get_partner_id(credit_account=False),
'account_id': debit_account_id,
'journal_id': slip.journal_id.id,
'date': date,
'debit': amount > 0.0 and amount or 0.0,
'credit': amount < 0.0 and -amount or 0.0,
'analytic_account_id': line.salary_rule_id.analytic_account_id.id or slip.contract_id.analytic_account_id.id,
'tax_line_id': line.salary_rule_id.account_tax_id.id,
})
line_ids.append(debit_line)
debit_sum += debit_line[2]['debit'] - debit_line[2]['credit']
if credit_account_id:
credit_line = (0, 0, {
'name': line.name,
'partner_id': line._get_partner_id(credit_account=True),
'account_id': credit_account_id,
'journal_id': slip.journal_id.id,
'date': date,
'debit': amount < 0.0 and -amount or 0.0,
'credit': amount > 0.0 and amount or 0.0,
'analytic_account_id': line.salary_rule_id.analytic_account_id.id or slip.contract_id.analytic_account_id.id,
'tax_line_id': line.salary_rule_id.account_tax_id.id,
})
line_ids.append(credit_line)
credit_sum += credit_line[2]['credit'] - credit_line[2]['debit']
if currency.compare_amounts(credit_sum, debit_sum) == -1:
acc_id = slip.journal_id.default_credit_account_id.id
if not acc_id:
raise UserError(_('The Expense Journal "%s" has not properly configured the Credit Account!') % (slip.journal_id.name))
adjust_credit = (0, 0, {
'name': _('Adjustment Entry'),
'partner_id': False,
'account_id': acc_id,
'journal_id': slip.journal_id.id,
'date': date,
'debit': 0.0,
'credit': currency.round(debit_sum - credit_sum),
})
line_ids.append(adjust_credit)
elif currency.compare_amounts(debit_sum, credit_sum) == -1:
acc_id = slip.journal_id.default_debit_account_id.id
if not acc_id:
raise UserError(_('The Expense Journal "%s" has not properly configured the Debit Account!') % (slip.journal_id.name))
adjust_debit = (0, 0, {
'name': _('Adjustment Entry'),
'partner_id': False,
'account_id': acc_id,
'journal_id': slip.journal_id.id,
'date': date,
'debit': currency.round(credit_sum - debit_sum),
'credit': 0.0,
})
line_ids.append(adjust_debit)
move_dict['line_ids'] = line_ids
move = self.env['account.move'].create(move_dict)
slip.write({'move_id': move.id, 'date': date})
move.post()
return res
class HrSalaryRule(models.Model):
_inherit = 'hr.salary.rule'
analytic_account_id = fields.Many2one('account.analytic.account', 'Analytic Account')
account_tax_id = fields.Many2one('account.tax', 'Tax')
account_debit = fields.Many2one('account.account', 'Debit Account', domain=[('deprecated', '=', False)])
account_credit = fields.Many2one('account.account', 'Credit Account', domain=[('deprecated', '=', False)])
class HrContract(models.Model):
_inherit = 'hr.contract'
_description = 'Employee Contract'
analytic_account_id = fields.Many2one('account.analytic.account', 'Analytic Account')
journal_id = fields.Many2one('account.journal', 'Salary Journal')
class HrPayslipRun(models.Model):
_inherit = 'hr.payslip.run'
journal_id = fields.Many2one('account.journal', 'Salary Journal', states={'draft': [('readonly', False)]}, readonly=True,
required=True, default=lambda self: self.env['account.journal'].search([('type', '=', 'general')], limit=1))
| 46.757576
| 142
| 0.583279
| 910
| 7,715
| 4.697802
| 0.164835
| 0.052632
| 0.030409
| 0.037661
| 0.467135
| 0.41731
| 0.386433
| 0.371462
| 0.293801
| 0.293801
| 0
| 0.009285
| 0.302009
| 7,715
| 164
| 143
| 47.042683
| 0.784587
| 0.02722
| 0
| 0.251852
| 0
| 0
| 0.155903
| 0.006418
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037037
| false
| 0
| 0.022222
| 0
| 0.259259
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c08054f6ca4be429f3f9506fd1f8ea55ac7ad8b
| 3,048
|
py
|
Python
|
ml_datasets/utils.py
|
abkoesdw/ml-datasets
|
c8c7b85ba8ed9c0ea233b4092d499d5022952011
|
[
"MIT"
] | 1
|
2020-07-05T04:58:07.000Z
|
2020-07-05T04:58:07.000Z
|
ml_datasets/utils.py
|
abkoesdw/ml-datasets
|
c8c7b85ba8ed9c0ea233b4092d499d5022952011
|
[
"MIT"
] | null | null | null |
ml_datasets/utils.py
|
abkoesdw/ml-datasets
|
c8c7b85ba8ed9c0ea233b4092d499d5022952011
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import sys
import numpy as np
from matplotlib.colors import LinearSegmentedColormap
from matplotlib.colors import BoundaryNorm
def plot_images(
num_sample_perclass=10, x=None, y=None, labels=None, title=None, cmap=None
):
grid_x = num_sample_perclass + 1
grid_y = len(labels)
plt.figure(figsize=(grid_y, grid_x))
gs1 = gridspec.GridSpec(grid_y, grid_x)
gs1.update(wspace=0.025, hspace=0.05)
font = {"family": "serif", "weight": "bold"}
plt.suptitle(title)
j = 0
for i in range(grid_y):
idxs = [0] + list(np.where(y == list(labels.keys())[i])[0][: grid_x - 1])
label = labels[list(labels.keys())[i]]
for k, idx in enumerate(idxs):
ax1 = plt.subplot(gs1[j])
if k == 0:
ax1.text(0, 0.25, label, ha="right", wrap=True, fontdict=font)
else:
ax1.imshow(x[idx, ...], cmap=cmap)
plt.axis("off")
j += 1
plt.show()
def plot_2D(x, y, title, axis="off"):
BLUE, ORANGE = "#57B5E8", "#E69E00"
plt.figure(figsize=(8, 8))
plt.scatter(
x[:, 0],
x[:, 1],
s=18,
facecolors="none",
edgecolors=np.array([BLUE, ORANGE])[y],
)
if axis == "off":
plt.axis("off")
elif axis == "on":
plt.xlabel("x_1")
plt.ylabel("x_2")
else:
print("incorrect values for arg: axis (on or off only)")
sys.exit()
plt.title(title)
plt.show()
def plot_dna(df, label):
matrix = df.values
col_names = df.columns
rows = np.arange(matrix.shape[0])
cols = np.arange(matrix.shape[1])
np.random.seed(3)
np.random.shuffle(rows)
np.random.shuffle(cols)
matrix = matrix[:, cols[:100]].T
matrix = matrix[:, rows]
col_names = col_names[cols[:100]]
label = label[rows]
mat_min = np.min(matrix)
mat_max = np.max(matrix)
mat_min = -np.max([np.abs(mat_min), mat_max])
mat_max = np.max([np.abs(mat_min), mat_max])
matrix = np.ma.masked_where(np.abs(matrix) <= 0.3, matrix)
plt.figure(figsize=(6, 12))
cmap_list = ["red", "darkred", "green", "lime", "lightgreen"]
cmap = LinearSegmentedColormap.from_list("Custom cmap", cmap_list, len(cmap_list))
cmap.set_bad("black")
bounds = np.linspace(
mat_min + 6, mat_max - 6, 5
) # np.arange(mat_min + 6, mat_max - 6, 0.1)
idx = np.searchsorted(bounds, 0)
bounds = np.insert(bounds, idx, 0)
norm = BoundaryNorm(bounds, cmap.N)
plt.imshow(matrix, cmap=cmap, norm=norm)
plt.xticks(np.arange(len(label)))
plt.yticks(np.arange(len(col_names)))
ax = plt.gca()
ax.set_xticklabels(label, rotation=90)
ax.set_yticklabels(col_names)
ax.yaxis.tick_right()
ax.tick_params(axis=u"both", which=u"both", labelsize=5, length=0.0)
plt.tight_layout()
fig = plt.gcf()
# fig.set_size_inches((6, 12), forward=False)
# fig.savefig("img/dna.png", dpi=200)
plt.show()
| 27.709091
| 86
| 0.597113
| 451
| 3,048
| 3.931264
| 0.365854
| 0.020305
| 0.027073
| 0.029329
| 0.055274
| 0.040609
| 0.024817
| 0.024817
| 0
| 0
| 0
| 0.033247
| 0.240157
| 3,048
| 109
| 87
| 27.963303
| 0.732297
| 0.03937
| 0
| 0.08046
| 0
| 0
| 0.056088
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034483
| false
| 0
| 0.068966
| 0
| 0.103448
| 0.011494
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c09326eb5f4c01f6725f6f04b0ab3e2c2184c2f
| 4,794
|
py
|
Python
|
Simulator/simulator.py
|
MasterRadule/DefenceFirst
|
d3c3a652357ac433213c38fa6134780e286f6cf2
|
[
"MIT"
] | null | null | null |
Simulator/simulator.py
|
MasterRadule/DefenceFirst
|
d3c3a652357ac433213c38fa6134780e286f6cf2
|
[
"MIT"
] | null | null | null |
Simulator/simulator.py
|
MasterRadule/DefenceFirst
|
d3c3a652357ac433213c38fa6134780e286f6cf2
|
[
"MIT"
] | 2
|
2020-08-02T10:47:17.000Z
|
2021-08-31T06:00:44.000Z
|
import logging
import os
import random
from abc import ABC, abstractmethod
from random import randint
from time import sleep, strftime
HOSTNAME = ['defence-first.rs', 'defence-first.de', 'defence-first.ru']
HOSTIP = ['78.218.236.218', '87.236.11.212', '54.147.165.86']
SOURCEIP = ['163.189.141.53', '204.164.10.7', '213.166.160.236', '123.197.235.233', '77.28.21.14']
USERNAMES = ['user1', 'user2', 'user3', 'user4', 'user5']
FACILITY = ['KERN', 'USER', 'MAIL', 'DAEMON', 'AUTH', 'SYSLOG', 'LPR', 'NEWS',
'UUCP', 'CLOCK_DAEMON', 'AUTHPRIV', 'FTP', 'NTP', 'LOGAUDIT', 'LOGALERT',
'CRON', 'LOCAL0', 'LOCAL1', 'LOCAL2', 'LOCAL3', 'LOCAL4', 'LOCAL5', 'LOCAL6', 'LOCAL7']
SEVERITY = ['DEBUG', 'INFORMATIONAL', 'NOTICE', 'WARNING', 'ERROR', 'CRITICAL', 'ALERT', 'EMERGENCY']
FORMAT = '%(asctime)s %(hostname)s-Application-%(hostip)s-%(sourceip)s %(severity)s-%(facility)s %(' \
'message)s '
RESOURCES = ['index.html', 'document.xml', 'dashboard.html']
LOGS_PATH = 'logs'
class State(ABC):
@abstractmethod
def run(self, context):
return NotImplemented
class DoSAttack(State):
def run(self, context):
d = {'hostname': HOSTNAME[0], 'hostip': HOSTIP[0], 'severity': SEVERITY[1],
'facility': FACILITY[1]}
http_response_code = '200'
for i in range(25):
if i >= 20:
http_response_code = '503'
d['severity'] = SEVERITY[5]
for sourceip in SOURCEIP:
d['sourceip'] = sourceip
context.logger.info('Requested resource index.html {}'.format(http_response_code), extra=d)
context.state = NormalState()
class NormalState(State):
def run(self, context):
normal = {'hostname': HOSTNAME[1], 'hostip': HOSTIP[1], 'severity': SEVERITY[1],
'facility': FACILITY[1]}
while True:
normal['sourceip'] = random.choice(SOURCEIP)
if random.random() < 0.3:
context.logger.info(
'Successful authorization on username "{}"'.format(USERNAMES[SOURCEIP.index(normal['sourceip'])]),
extra=normal)
else:
context.logger.info('Requested resource {} 200'.format(random.choice(RESOURCES)), extra=normal)
sleep(1)
if random.random() < 0.1:
rand = randint(1, 3)
if rand == 1:
context.state = DoSAttack()
elif rand == 2:
context.state = BruteForce()
elif rand == 3:
context.state = DatabaseError()
context.state.run(context)
class BruteForce(State):
def run(self, context):
attack = {'hostname': HOSTNAME[1], 'hostip': HOSTIP[1], 'sourceip': SOURCEIP[0], 'severity': SEVERITY[2],
'facility': FACILITY[4]}
normal = {'hostname': HOSTNAME[1], 'hostip': HOSTIP[1], 'severity': SEVERITY[1],
'facility': FACILITY[1]}
for i in range(30):
if i > 5:
attack['severity'] = SEVERITY[3]
if random.random() < 0.45:
normal['sourceip'] = random.choice(SOURCEIP)
context.logger.info('Requested resource {} 200'.format(random.choice(RESOURCES)), extra=normal)
sleep(0.5)
context.logger.info('Failed authorization on username "user1"', extra=attack)
sleep(0.5)
context.state = NormalState()
class DatabaseError(State):
def run(self, context):
d = {'hostname': HOSTNAME[2], 'hostip': HOSTIP[2], 'sourceip': SOURCEIP[0], 'severity': SEVERITY[4],
'facility': FACILITY[3]}
context.logger.info('Database error', extra=d)
sleep(1)
context.state = NormalState()
class Context:
def __init__(self):
self.state = NormalState()
formatter = logging.Formatter(FORMAT, "%Y-%m-%d %H:%M:%S")
logger = logging.getLogger('simulator')
if not os.path.exists(LOGS_PATH):
os.mkdir(LOGS_PATH)
fileHandler = logging.FileHandler(
os.path.join(LOGS_PATH, 'application_log-{}.log'.format(strftime('%Y-%m-%d'))))
fileHandler.setFormatter(formatter)
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(formatter)
logger.addHandler(fileHandler)
logger.addHandler(consoleHandler)
logger.setLevel(logging.INFO)
self.logger = logger
def run(self):
self.state.run(self)
@property
def state(self):
return self._state
@state.setter
def state(self, value):
self._state = value
if __name__ == '__main__':
sm = Context()
sm.run()
| 32.612245
| 118
| 0.569462
| 522
| 4,794
| 5.180077
| 0.33908
| 0.018121
| 0.022189
| 0.031435
| 0.244822
| 0.181953
| 0.139793
| 0.139793
| 0.110947
| 0.110947
| 0
| 0.045468
| 0.275136
| 4,794
| 146
| 119
| 32.835616
| 0.732662
| 0
| 0
| 0.196262
| 0
| 0.009346
| 0.205048
| 0.019816
| 0
| 0
| 0
| 0
| 0
| 1
| 0.084112
| false
| 0
| 0.056075
| 0.018692
| 0.214953
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c0a0b0b086b2b7d8551997a7e4a8ba952ff7a5b
| 793
|
py
|
Python
|
pysteam/evaluator/vector_space_error_eval.py
|
utiasASRL/pysteam
|
c0c8809ee2a5e1dab5ce7f9e5ff9de91138ce68b
|
[
"BSD-3-Clause"
] | 5
|
2021-10-23T00:35:20.000Z
|
2022-03-22T02:32:43.000Z
|
pysteam/evaluator/vector_space_error_eval.py
|
utiasASRL/pysteam
|
c0c8809ee2a5e1dab5ce7f9e5ff9de91138ce68b
|
[
"BSD-3-Clause"
] | null | null | null |
pysteam/evaluator/vector_space_error_eval.py
|
utiasASRL/pysteam
|
c0c8809ee2a5e1dab5ce7f9e5ff9de91138ce68b
|
[
"BSD-3-Clause"
] | 1
|
2022-02-04T21:49:48.000Z
|
2022-02-04T21:49:48.000Z
|
from typing import Optional
import numpy as np
from . import Evaluator
from ..state import VectorSpaceStateVar
class VectorSpaceErrorEval(Evaluator):
"""Error evaluator for a measured vector space state variable"""
def __init__(self, meas: np.ndarray, state_vec: VectorSpaceStateVar) -> None:
super().__init__()
self._meas: np.ndarray = meas
self._state_vec: VectorSpaceStateVar = state_vec
def is_active(self):
return not self._state_vec.locked
def evaluate(self, lhs: Optional[np.ndarray] = None):
error = self._meas - self._state_vec.value
if lhs is None:
return error
assert lhs.shape[-1] == self._state_vec.perturb_dim
jacs = dict()
if not self._state_vec.locked:
jacs = {self._state_vec.key: -lhs}
return error, jacs
| 24.78125
| 79
| 0.711223
| 107
| 793
| 5.028037
| 0.411215
| 0.118959
| 0.133829
| 0.052045
| 0.156134
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001563
| 0.192938
| 793
| 32
| 80
| 24.78125
| 0.839063
| 0.07314
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.05
| 1
| 0.15
| false
| 0
| 0.2
| 0.05
| 0.55
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c0b572f391a67c770410e50b8bf0631101d5372
| 4,152
|
py
|
Python
|
tests/test_autotuner.py
|
RajatRasal/devito
|
162abb6b318e77eaa4e8f719047327c45782056f
|
[
"MIT"
] | null | null | null |
tests/test_autotuner.py
|
RajatRasal/devito
|
162abb6b318e77eaa4e8f719047327c45782056f
|
[
"MIT"
] | null | null | null |
tests/test_autotuner.py
|
RajatRasal/devito
|
162abb6b318e77eaa4e8f719047327c45782056f
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from functools import reduce
from operator import mul
try:
from StringIO import StringIO
except ImportError:
# Python3 compatibility
from io import StringIO
import pytest
from conftest import skipif_yask
import numpy as np
from devito import Grid, Function, TimeFunction, Eq, Operator, configuration, silencio
from devito.logger import logger, logging
@silencio(log_level='DEBUG')
@skipif_yask
@pytest.mark.parametrize("shape,expected", [
((30, 30), 17),
((30, 30, 30), 21)
])
def test_at_is_actually_working(shape, expected):
"""
Check that autotuning is actually running when switched on,
in both 2D and 3D operators.
"""
grid = Grid(shape=shape)
buffer = StringIO()
temporary_handler = logging.StreamHandler(buffer)
logger.addHandler(temporary_handler)
infield = Function(name='infield', grid=grid)
infield.data[:] = np.arange(reduce(mul, shape), dtype=np.int32).reshape(shape)
outfield = Function(name='outfield', grid=grid)
stencil = Eq(outfield.indexify(), outfield.indexify() + infield.indexify()*3.0)
op = Operator(stencil, dle=('blocking', {'blockinner': True, 'blockalways': True}))
# Expected 3 AT attempts for the given shape
op(infield=infield, outfield=outfield, autotune=True)
out = [i for i in buffer.getvalue().split('\n') if 'took' in i]
assert len(out) == 4
# Now try the same with aggressive autotuning, which tries 9 more cases
configuration['autotuning'] = 'aggressive'
op(infield=infield, outfield=outfield, autotune=True)
out = [i for i in buffer.getvalue().split('\n') if 'took' in i]
assert len(out) == expected
configuration['autotuning'] = configuration._defaults['autotuning']
logger.removeHandler(temporary_handler)
temporary_handler.flush()
temporary_handler.close()
buffer.flush()
buffer.close()
@silencio(log_level='DEBUG')
@skipif_yask
def test_timesteps_per_at_run():
"""
Check that each autotuning run (ie with a given block shape) takes
``autotuning.core.options['at_squeezer']`` timesteps, for an operator
performing the increment ``a[t + timeorder, ...] = f(a[t, ...], ...)``.
"""
from devito.core.autotuning import options
buffer = StringIO()
temporary_handler = logging.StreamHandler(buffer)
logger.addHandler(temporary_handler)
shape = (30, 30, 30)
grid = Grid(shape=shape)
x, y, z = grid.dimensions
t = grid.stepping_dim
# Function
infield = Function(name='infield', grid=grid)
infield.data[:] = np.arange(reduce(mul, shape), dtype=np.int32).reshape(shape)
outfield = Function(name='outfield', grid=grid)
stencil = Eq(outfield.indexify(), outfield.indexify() + infield.indexify()*3.0)
op = Operator(stencil, dle=('blocking', {'blockalways': True}))
op(infield=infield, outfield=outfield, autotune=True)
out = [i for i in buffer.getvalue().split('\n') if 'took' in i]
assert len(out) == 4
assert all('in 1 timesteps' in i for i in out)
buffer.truncate(0)
# TimeFunction with increasing time order; increasing the time order
# shouldn't affect how many iterations the autotuner is gonna run
for to in [1, 2, 4]:
infield = TimeFunction(name='infield', grid=grid, time_order=to)
infield.data[:] = np.arange(reduce(mul, infield.shape),
dtype=np.int32).reshape(infield.shape)
outfield = TimeFunction(name='outfield', grid=grid, time_order=to)
stencil = Eq(outfield.indexed[t + to, x, y, z],
outfield.indexify() + infield.indexify()*3.0)
op = Operator(stencil, dle=('blocking', {'blockalways': True}))
op(infield=infield, outfield=outfield, t=2, autotune=True)
out = [i for i in buffer.getvalue().split('\n') if 'took' in i]
assert len(out) == 4
assert all('in %d timesteps' % options['at_squeezer'] in i for i in out)
buffer.truncate(0)
logger.removeHandler(temporary_handler)
temporary_handler.flush()
temporary_handler.close()
buffer.flush()
buffer.close()
| 35.793103
| 87
| 0.67317
| 535
| 4,152
| 5.160748
| 0.282243
| 0.05795
| 0.010866
| 0.015212
| 0.544006
| 0.52155
| 0.488953
| 0.488953
| 0.488953
| 0.469395
| 0
| 0.014101
| 0.197254
| 4,152
| 115
| 88
| 36.104348
| 0.814281
| 0.138006
| 0
| 0.55
| 0
| 0
| 0.067969
| 0
| 0
| 0
| 0
| 0
| 0.075
| 1
| 0.025
| false
| 0
| 0.15
| 0
| 0.175
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c0c673d58dcba5d4585b62a8e7fbc1916ed2edb
| 2,683
|
py
|
Python
|
projects/CharGrid/data/bizcard2coco.py
|
timctho/detectron2-chargrid
|
547479c88ad7d1de2348377706167a84d024a622
|
[
"Apache-2.0"
] | 3
|
2020-03-15T18:33:21.000Z
|
2020-03-28T18:06:45.000Z
|
projects/CharGrid/data/bizcard2coco.py
|
timctho/detectron2-chargrid
|
547479c88ad7d1de2348377706167a84d024a622
|
[
"Apache-2.0"
] | 2
|
2021-09-08T01:46:39.000Z
|
2022-01-13T02:22:56.000Z
|
projects/CharGrid/data/bizcard2coco.py
|
timctho/detectron2-chargrid
|
547479c88ad7d1de2348377706167a84d024a622
|
[
"Apache-2.0"
] | null | null | null |
from data.data_reader import BIZCARD_LABEL_MAP, BizcardDataParser
import argparse
from pathlib import Path
import os
import json
import cv2
import numpy as np
def convert_bizcard_to_coco_format(image_dir, json_dir, id_list, out_dir, out_name):
coco_json = {}
images = []
annotations = []
categories = []
for _, key in enumerate(BIZCARD_LABEL_MAP.keys()):
categories.append({
'id': BIZCARD_LABEL_MAP[key],
'name': key
})
with open(id_list) as fp:
ids = fp.readlines()
for idx, file_id in enumerate(ids):
file_id = Path(file_id.strip())
print(idx, file_id)
if not (image_dir / file_id).with_suffix('.jpg').exists():
file_id = file_id.with_suffix('.jpeg')
else:
file_id = file_id.with_suffix('.jpg')
height, width = cv2.imread(str(image_dir / file_id)).shape[:2]
images.append({
'file_name': str(file_id),
'id': idx,
'height': height,
'width': width
})
try:
gt = BizcardDataParser.parse_data(str((json_dir / file_id).with_suffix('.json')), str(image_dir / file_id))[
0]
for word in gt.words:
anno = {
'id': len(annotations),
'image_id': idx,
'bbox': [word.bbox.min_x, word.bbox.min_y, (word.bbox.max_x - word.bbox.min_x),
(word.bbox.max_y - word.bbox.min_y)],
'segmentation': [word.bbox.val],
'category_id': word.label,
'iscrowd': 0,
'area': cv2.contourArea(np.reshape(word.bbox.val, [-1, 2]).astype(np.float32))
}
annotations.append(anno)
except Exception as e:
print(e)
print(str(image_dir / file_id))
coco_json['images'] = images
coco_json['annotations'] = annotations
coco_json['categories'] = categories
with open(Path(out_dir, out_name), 'w') as f:
json.dump(coco_json, f)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--img_dir', type=str)
parser.add_argument('--gt_dir', type=str)
parser.add_argument('--data_list', type=str)
parser.add_argument('--out_dir', type=str)
parser.add_argument('--out_name', type=str)
args = parser.parse_args()
if not Path(args.out_dir).exists():
Path(args.out_dir).mkdir()
convert_bizcard_to_coco_format(
Path(args.img_dir),
Path(args.gt_dir),
args.data_list,
args.out_dir,
args.out_name)
| 31.197674
| 120
| 0.566157
| 338
| 2,683
| 4.233728
| 0.286982
| 0.0587
| 0.031447
| 0.039133
| 0.238994
| 0.136268
| 0
| 0
| 0
| 0
| 0
| 0.00535
| 0.303392
| 2,683
| 85
| 121
| 31.564706
| 0.7603
| 0
| 0
| 0.027778
| 0
| 0
| 0.065971
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.013889
| false
| 0
| 0.097222
| 0
| 0.111111
| 0.041667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c0cd3c1e4e41a88f41a644df51b7c36f341d915
| 643
|
py
|
Python
|
deckz/cli/run.py
|
m09/deckz
|
0f97ef2a43c2c714ac18173a4fe3266cccba31e2
|
[
"Apache-2.0"
] | null | null | null |
deckz/cli/run.py
|
m09/deckz
|
0f97ef2a43c2c714ac18173a4fe3266cccba31e2
|
[
"Apache-2.0"
] | 41
|
2020-04-06T13:49:18.000Z
|
2020-12-24T11:14:47.000Z
|
deckz/cli/run.py
|
m09/deckz
|
0f97ef2a43c2c714ac18173a4fe3266cccba31e2
|
[
"Apache-2.0"
] | null | null | null |
from pathlib import Path
from typing import List, Optional
from typer import Argument
from deckz.cli import app
from deckz.paths import Paths
from deckz.running import run as running_run
@app.command()
def run(
targets: Optional[List[str]] = Argument(None),
handout: bool = True,
presentation: bool = True,
print: bool = True,
deck_path: Path = Path("."),
) -> None:
"""Compile main targets."""
paths = Paths.from_defaults(deck_path)
running_run(
paths=paths,
build_handout=handout,
build_presentation=presentation,
build_print=print,
target_whitelist=targets,
)
| 22.964286
| 50
| 0.676516
| 80
| 643
| 5.325
| 0.4125
| 0.06338
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.227061
| 643
| 27
| 51
| 23.814815
| 0.857143
| 0.032659
| 0
| 0
| 0
| 0
| 0.001623
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0
| 0.272727
| 0
| 0.318182
| 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c0e950fb4a4ebdf55176f8dd2da092d38504b70
| 2,171
|
py
|
Python
|
chat.py
|
rchampa/chat-server
|
34b5897e90b580754ad95b36bf7f23ac9baf3175
|
[
"MIT"
] | null | null | null |
chat.py
|
rchampa/chat-server
|
34b5897e90b580754ad95b36bf7f23ac9baf3175
|
[
"MIT"
] | null | null | null |
chat.py
|
rchampa/chat-server
|
34b5897e90b580754ad95b36bf7f23ac9baf3175
|
[
"MIT"
] | null | null | null |
import asyncio
import contextvars
import aioredis
import uvloop
from aioredis import Redis
from fastapi import FastAPI
from starlette.middleware.base import BaseHTTPMiddleware
from starlette.staticfiles import StaticFiles
from RLog import rprint
from routers import apirest, websockets
REDIS_HOST = 'redis'
REDIS_PORT = 6379
PORT = 9080
HOST = "0.0.0.0"
cvar_redis = contextvars.ContextVar('redis', default=None)
class CustomHeaderMiddleware(BaseHTTPMiddleware):
def __init__(self, app, header_value='Example'):
rprint('__init__')
super().__init__(app)
self.header_value = header_value
async def dispatch(self, request, call_next):
response = await call_next(request)
response.headers['Custom'] = self.header_value
return response
# uvloop is written in Cython and is built on top of libuv http://magic.io/blog/uvloop-blazing-fast-python-networking/
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
app = FastAPI()
app.mount("/static", StaticFiles(directory="static"), name="static")
app.add_middleware(CustomHeaderMiddleware)
app.include_router(apirest.router)
app.include_router(websockets.router)
@app.on_event("startup")
async def handle_startup() -> None:
rprint("startup")
try:
pool = await aioredis.create_redis_pool((REDIS_HOST, REDIS_PORT), encoding='utf-8', maxsize=20)
cvar_redis.set(pool)
rprint("Connected to Redis on ", REDIS_HOST, REDIS_PORT)
except ConnectionRefusedError as e:
rprint('cannot connect to redis on:', REDIS_HOST, REDIS_PORT)
return
@app.on_event("shutdown")
async def handle_shutdown() -> None:
if cvar_redis.get() is not None:
redis: Redis = cvar_redis.get()
redis.close()
await redis.wait_closed()
rprint("closed connection Redis on ", REDIS_HOST, REDIS_PORT)
else:
rprint("ERROR: cvar_redis.get() devuelve NONE")
if __name__ == "__main__":
import uvicorn
rprint("Starting app")
rprint(dir(app))
rprint(app.url_path_for('websocket_endpoint'))
uvicorn.run('chat:app', host=HOST, port=PORT, log_level='info', reload=True)#, uds='uvicorn.sock')
| 31.463768
| 119
| 0.720405
| 281
| 2,171
| 5.359431
| 0.441281
| 0.02988
| 0.046481
| 0.047809
| 0.052457
| 0.052457
| 0.035857
| 0
| 0
| 0
| 0
| 0.008319
| 0.169507
| 2,171
| 68
| 120
| 31.926471
| 0.826955
| 0.063565
| 0
| 0
| 0
| 0
| 0.121675
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018182
| false
| 0
| 0.2
| 0
| 0.272727
| 0.181818
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c0eb1c40d85566bde4854bf69d4592341ad2835
| 1,009
|
py
|
Python
|
cli.py
|
abel-bernabeu/facecompressor
|
9322f4e3d3f2787dc9dec2fad6b3f1995d052077
|
[
"BSD-3-Clause"
] | 2
|
2020-10-20T09:35:56.000Z
|
2021-04-27T11:27:47.000Z
|
cli.py
|
abel-bernabeu/facecompressor
|
9322f4e3d3f2787dc9dec2fad6b3f1995d052077
|
[
"BSD-3-Clause"
] | null | null | null |
cli.py
|
abel-bernabeu/facecompressor
|
9322f4e3d3f2787dc9dec2fad6b3f1995d052077
|
[
"BSD-3-Clause"
] | null | null | null |
import argparse
import autoencoder
def addTrainablesArg(parser):
parser.add_argument('--model', dest='model', help='Trained model', default='model.pt')
def addExchangeArg(parser):
parser.add_argument('--exchange', dest='exchange', help='File with exchanged data', required=True)
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest="action")
encode_parser = subparsers.add_parser('encode')
addTrainablesArg(encode_parser)
encode_parser.add_argument('--input', dest='input', help='Input image file name', required=True)
addExchangeArg(encode_parser)
decode_parser = subparsers.add_parser('decode')
addTrainablesArg(decode_parser)
addExchangeArg(decode_parser)
decode_parser.add_argument('--output', dest='output', help='Output image file name', required=True)
opts = parser.parse_args()
if opts.action == 'encode':
autoencoder.encode(opts.model, opts.input, opts.exchange)
elif opts.action == 'decode':
autoencoder.decode(opts.model, opts.exchange, opts.output)
| 31.53125
| 102
| 0.769078
| 124
| 1,009
| 6.129032
| 0.290323
| 0.059211
| 0.089474
| 0.060526
| 0.065789
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.09217
| 1,009
| 31
| 103
| 32.548387
| 0.829694
| 0
| 0
| 0
| 0
| 0
| 0.172448
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.095238
| false
| 0
| 0.095238
| 0
| 0.190476
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c0f0c2835497cfafc1c97305175f1c3c60456a9
| 6,995
|
py
|
Python
|
lib/bridgedb/email/request.py
|
liudonghua123/bridgedb
|
94dd10673f9e6650e8a00e162f348e64f7a1ecab
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
lib/bridgedb/email/request.py
|
liudonghua123/bridgedb
|
94dd10673f9e6650e8a00e162f348e64f7a1ecab
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
lib/bridgedb/email/request.py
|
liudonghua123/bridgedb
|
94dd10673f9e6650e8a00e162f348e64f7a1ecab
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
# -*- coding: utf-8; test-case-name: bridgedb.test.test_email_request; -*-
#_____________________________________________________________________________
#
# This file is part of BridgeDB, a Tor bridge distribution system.
#
# :authors: Nick Mathewson <nickm@torproject.org>
# Isis Lovecruft <isis@torproject.org> 0xA3ADB67A2CDB8B35
# Matthew Finkel <sysrqb@torproject.org>
# please also see AUTHORS file
# :copyright: (c) 2007-2015, The Tor Project, Inc.
# (c) 2013-2015, Isis Lovecruft
# :license: see LICENSE for licensing information
#_____________________________________________________________________________
"""
.. py:module:: bridgedb.email.request
:synopsis: Classes for parsing and storing information about requests for
bridges which are sent to the email distributor.
bridgedb.email.request
======================
Classes for parsing and storing information about requests for bridges
which are sent to the email distributor.
::
bridgedb.email.request
| |_ determineBridgeRequestOptions - Figure out which filters to apply, or
| offer help.
|_ EmailBridgeRequest - A request for bridges which was received through
the email distributor.
..
"""
from __future__ import print_function
from __future__ import unicode_literals
import logging
import re
from bridgedb import bridgerequest
from bridgedb.Dist import EmailRequestedHelp
from bridgedb.Dist import EmailRequestedKey
#: A regular expression for matching the Pluggable Transport method TYPE in
#: emailed requests for Pluggable Transports.
TRANSPORT_REGEXP = ".*transport ([a-z][_a-z0-9]*)"
TRANSPORT_PATTERN = re.compile(TRANSPORT_REGEXP)
#: A regular expression that matches country codes in requests for unblocked
#: bridges.
UNBLOCKED_REGEXP = ".*unblocked ([a-z]{2,4})"
UNBLOCKED_PATTERN = re.compile(UNBLOCKED_REGEXP)
def determineBridgeRequestOptions(lines):
"""Figure out which :class:`Bridges.BridgeFilter`s to apply, or offer help.
.. note:: If any ``'transport TYPE'`` was requested, or bridges not
blocked in a specific CC (``'unblocked CC'``), then the ``TYPE``
and/or ``CC`` will *always* be stored as a *lowercase* string.
:param list lines: A list of lines from an email, including the headers.
:raises EmailRequestedHelp: if the client requested help.
:raises EmailRequestedKey: if the client requested our GnuPG key.
:rtype: :class:`EmailBridgeRequest`
:returns: A :class:`~bridgerequst.BridgeRequest` with all of the requested
parameters set. The returned ``BridgeRequest`` will have already had
its filters generated via :meth:`~EmailBridgeRequest.generateFilters`.
"""
request = EmailBridgeRequest()
skippedHeaders = False
for line in lines:
line = line.strip().lower()
# Ignore all lines before the first empty line:
if not line: skippedHeaders = True
if not skippedHeaders: continue
if ("help" in line) or ("halp" in line):
raise EmailRequestedHelp("Client requested help.")
if "get" in line:
request.isValid(True)
logging.debug("Email request was valid.")
if "key" in line:
request.wantsKey(True)
raise EmailRequestedKey("Email requested a copy of our GnuPG key.")
if "ipv6" in line:
request.withIPv6()
if "transport" in line:
request.withPluggableTransportType(line)
if "unblocked" in line:
request.withoutBlockInCountry(line)
logging.debug("Generating hashring filters for request.")
request.generateFilters()
return request
class EmailBridgeRequest(bridgerequest.BridgeRequestBase):
"""We received a request for bridges through the email distributor."""
def __init__(self):
"""Process a new bridge request received through the
:class:`~bridgedb.Dist.EmailBasedDistributor`.
"""
super(EmailBridgeRequest, self).__init__()
self._isValid = False
self._wantsKey = False
def isValid(self, valid=None):
"""Get or set the validity of this bridge request.
If called without parameters, this method will return the current
state, otherwise (if called with the **valid** parameter), it will set
the current state of validity for this request.
:param bool valid: If given, set the validity state of this
request. Otherwise, get the current state.
"""
if valid is not None:
self._isValid = bool(valid)
return self._isValid
def wantsKey(self, wantsKey=None):
"""Get or set whether this bridge request wanted our GnuPG key.
If called without parameters, this method will return the current
state, otherwise (if called with the **wantsKey** parameter set), it
will set the current state for whether or not this request wanted our
key.
:param bool wantsKey: If given, set the validity state of this
request. Otherwise, get the current state.
"""
if wantsKey is not None:
self._wantsKey = bool(wantsKey)
return self._wantsKey
def withoutBlockInCountry(self, line):
"""This request was for bridges not blocked in **country**.
Add any country code found in the **line** to the list of
``notBlockedIn``. Currently, a request for a transport is recognized
if the email line contains the ``'unblocked'`` command.
:param str country: The line from the email wherein the client
requested some type of Pluggable Transport.
"""
unblocked = None
logging.debug("Parsing 'unblocked' line: %r" % line)
try:
unblocked = UNBLOCKED_PATTERN.match(line).group(1)
except (TypeError, AttributeError):
pass
if unblocked:
self.notBlockedIn.append(unblocked)
logging.info("Email requested bridges not blocked in: %r"
% unblocked)
def withPluggableTransportType(self, line):
"""This request included a specific Pluggable Transport identifier.
Add any Pluggable Transport method TYPE found in the **line** to the
list of ``transports``. Currently, a request for a transport is
recognized if the email line contains the ``'transport'`` command.
:param str line: The line from the email wherein the client
requested some type of Pluggable Transport.
"""
transport = None
logging.debug("Parsing 'transport' line: %r" % line)
try:
transport = TRANSPORT_PATTERN.match(line).group(1)
except (TypeError, AttributeError):
pass
if transport:
self.transports.append(transport)
logging.info("Email requested transport type: %r" % transport)
| 37.406417
| 79
| 0.666905
| 802
| 6,995
| 5.578554
| 0.283042
| 0.014305
| 0.020116
| 0.01274
| 0.236477
| 0.228431
| 0.217702
| 0.217702
| 0.206527
| 0.206527
| 0
| 0.0063
| 0.251179
| 6,995
| 186
| 80
| 37.607527
| 0.847843
| 0.551966
| 0
| 0.086957
| 0
| 0
| 0.123576
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086957
| false
| 0.028986
| 0.101449
| 0
| 0.246377
| 0.014493
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c104172c7871ed658426c42c033c011c356f2f0
| 2,250
|
py
|
Python
|
packages/pyre/schemata/Container.py
|
avalentino/pyre
|
7e1f0287eb7eba1c6d1ef385e5160079283ac363
|
[
"BSD-3-Clause"
] | 25
|
2018-04-23T01:45:39.000Z
|
2021-12-10T06:01:23.000Z
|
packages/pyre/schemata/Container.py
|
avalentino/pyre
|
7e1f0287eb7eba1c6d1ef385e5160079283ac363
|
[
"BSD-3-Clause"
] | 53
|
2018-05-31T04:55:00.000Z
|
2021-10-07T21:41:32.000Z
|
packages/pyre/schemata/Container.py
|
avalentino/pyre
|
7e1f0287eb7eba1c6d1ef385e5160079283ac363
|
[
"BSD-3-Clause"
] | 12
|
2018-04-23T22:50:40.000Z
|
2022-02-20T17:27:23.000Z
|
# -*- coding: utf-8 -*-
#
# michael a.g. aïvázis
# orthologue
# (c) 1998-2021 all rights reserved
#
# superclass
from .Schema import Schema
# declaration
class Container(Schema):
"""
The base class for type declarators that are sequences of other types
"""
# constants
typename = 'container' # the name of my type
isContainer = True
@property
def container(self):
"""
The default container represented by this schema
"""
# complain that the subclass is not constructed properly
raise NotImplementedError(
"class {.__name__} must define a {container} type".format(type(self)))
# interface
def coerce(self, value, **kwds):
"""
Convert {value} into an iterable
"""
# get the worker to build an iterable, cast it into my container type and return it
return self.container(self._coerce(value=value, **kwds))
def render(self, renderer, value, workload):
"""
Render {value} using {renderer}
"""
# get my schema
schema = self.schema
# render just my name
yield renderer.trait(name=self.name, value='')
# go through the items
for item in value:
# ask my schema to render each one
entry = ','.join(schema.render(renderer=renderer, value=item,
workload=workload, incognito=True))
# and put it on a separate line
yield renderer.value(value=f"{entry},")
# all done
return
# meta-methods
def __init__(self, default=object, schema=Schema(), **kwds):
# adjust the default; carefully, so we don't all end up using the same global container
# checking for {None} is not appropriate here; the user may want {None} as the default
# value; we need a way to know that {default} was not supplied: use a TYPE (in this
# case object) as the marker
default = self.container() if default is object else default
# chain up with my default
super().__init__(default=default, **kwds)
# save my schema
self.schema = schema
# all done
return
# end of file
| 28.481013
| 95
| 0.597333
| 275
| 2,250
| 4.84
| 0.483636
| 0.022539
| 0.024042
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005825
| 0.313333
| 2,250
| 78
| 96
| 28.846154
| 0.855663
| 0.423556
| 0
| 0.086957
| 0
| 0
| 0.055696
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.173913
| false
| 0
| 0.043478
| 0
| 0.478261
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c108597606416225c709da3b768b53eee32eb1f
| 98,110
|
py
|
Python
|
electronicparsers/exciting/parser.py
|
nomad-coe/electronic-parsers
|
defb47be6ac22b2e48d4fb9204c85390a3c2f328
|
[
"Apache-2.0"
] | null | null | null |
electronicparsers/exciting/parser.py
|
nomad-coe/electronic-parsers
|
defb47be6ac22b2e48d4fb9204c85390a3c2f328
|
[
"Apache-2.0"
] | null | null | null |
electronicparsers/exciting/parser.py
|
nomad-coe/electronic-parsers
|
defb47be6ac22b2e48d4fb9204c85390a3c2f328
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright The NOMAD Authors.
#
# This file is part of NOMAD.
# See https://nomad-lab.eu for further info.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import os
import re
import logging
from nomad.units import ureg
from nomad.parsing.file_parser import TextParser, Quantity, XMLParser, DataTextParser
from nomad.datamodel.metainfo.simulation.run import Run, Program
from nomad.datamodel.metainfo.simulation.method import (
Method, DFT, Electronic, Smearing, XCFunctional, Functional,
GW as GWMethod, Scf, BasisSet
)
from nomad.datamodel.metainfo.simulation.system import (
System, Atoms
)
from nomad.datamodel.metainfo.simulation.calculation import (
Calculation, Dos, DosValues, BandStructure, BandEnergies, Energy, EnergyEntry, Charges,
Forces, ForcesEntry, ScfIteration, BandGap
)
from nomad.datamodel.metainfo.workflow import Workflow, GeometryOptimization
from .metainfo.exciting import x_exciting_section_MT_charge_atom, x_exciting_section_MT_moment_atom,\
x_exciting_section_spin, x_exciting_section_fermi_surface,\
x_exciting_section_atoms_group
re_float = r'[-+]?\d+\.\d*(?:[Ee][-+]\d+)?'
class GWInfoParser(TextParser):
def __init__(self):
super().__init__(None)
def init_quantities(self):
self._quantities = []
def str_to_frequency(val_in):
val = [v.split() for v in val_in.split('\n')]
val = np.transpose(np.array([v for v in val if len(v) == 3], float))
return dict(
number=np.array(val[0], dtype=int), values=val[1] * ureg.hartree,
weights=val[2])
# TODO Read also input parameters here if input_GW.xml does not exist
self._quantities.append(
Quantity(
'frequency_data', r'frequency list:\s*\<\s*#\s*freqs\s*weight\s*>\s*([\d\.Ee\s\-]+)',
str_operation=str_to_frequency, repeats=False)
)
self._quantities.append(
Quantity(
'fermi_energy', r'\-\s*G0W0.+\-\s*\-+\s*[\s\S]*?Fermi [Ee]nergy\s*[:=](\s*-?[\d\.]+)\s',
unit=ureg.hartree, repeats=False)
)
self._quantities.append(
Quantity(
'direct_band_gap', r'\-\s*G0W0\s*\-\s*\-+\s*[\s\S]*?Direct BandGap\s*\((?P<__unit>\w+)\)\s*\:(\s*[\d\.]+)\s',
repeats=False)
)
self._quantities.append(
Quantity(
'fundamental_band_gap', r'\-\s*G0W0\s*\-\s*\-+\s*[\s\S]*?Fundamental BandGap\s*\((?P<__unit>\w+)\)\s*\:(\s*[\d\.]+)\s',
repeats=False)
)
self._quantities.append(
Quantity(
'optical_band_gap', r'\-\s*G0W0\s*\-\s*\-+\s*[\s\S]*?Optical BandGap\s*\((?P<__unit>\w+)\)\s*\:(\s*[\d\.]+)\s',
repeats=False)
)
class ExcitingEvalqpParser(TextParser):
def __init__(self):
super().__init__(None)
def init_quantities(self):
self._quantities = []
def str_to_eigenvalue(val_in):
val = val_in.strip().split('\n')
kpts = np.array(val[0].split(), dtype=float)
keys = val[1].split()
eigs = np.transpose(np.array([v.split() for v in val[2:]], dtype=float))
eigs = {keys[i]: eigs[i] for i in range(len(keys))}
return [kpts, eigs]
self._quantities.append(
Quantity(
'kpoints_eigenvalues', r'\s*k\-point \#\s*\d+:\s*([\d\s\.\-]+)([ \w\(\)]+\n)([\s\d\.\-Ee]+)',
str_operation=str_to_eigenvalue, repeats=True))
class BandstructureDatParser(DataTextParser):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._nspin = kwargs.get('nspin', None)
self._energy_unit = kwargs.get('energy_unit', None)
def init_parameters(self):
# TODO make a parent clas for bandstructure dat and xml
self._nspin = None
self._nkpts_segment = None
self._neigs_segment = None
self._vertices = None
self._distances = None
self._band_energies = None
self._band_k_points = None
@property
def band_energies(self):
if self._band_energies is None:
if self.data is None:
return
data = np.transpose(self.data)
n_kpoints = int(max(data[1]))
bands = data[6:]
bands = np.reshape(bands, (
self.number_of_spin_channels, self.number_of_band_segment_eigenvalues, n_kpoints))
self._band_energies = []
start = 0
for nkpts_segment in self.number_of_k_points_per_segment:
end = start + nkpts_segment
band_energy = np.array([np.transpose(band)[start:end] for band in bands])
if self._energy_unit:
band_energy = band_energy * self._energy_unit
self._band_energies.append(band_energy)
start = end
return self._band_energies
@property
def band_k_points(self):
if self._band_k_points is None:
data = np.transpose(self.data)
self._band_k_points = []
start = 0
for nkpts_segment in self.number_of_k_points_per_segment:
end = start + nkpts_segment
self._band_k_points.append(
np.transpose(data[2:5])[start:end])
start = end
return self._band_k_points
@property
def distances(self):
if self._distances is None:
data = np.transpose(self.data)
self._distances = data[5][:int(max(data[1]))]
return self._distances
@property
def number_of_spin_channels(self):
if self._nspin is None:
self._nspin = np.shape(np.transpose(self.data))[0] - 6
return self._nspin
@property
def number_of_k_points_per_segment(self):
if self._nkpts_segment is None:
self._nkpts_segment = []
count = 1
for i in range(1, len(self.distances)):
if self.distances[i] == self.distances[i - 1]:
self._nkpts_segment.append(count)
count = 1
else:
count += 1
self._nkpts_segment.append(count)
return self._nkpts_segment
@property
def number_of_band_segment_eigenvalues(self):
if self._neigs_segment is None:
data = np.transpose(self.data)
self._neigs_segment = int(max(data[0]))
return self._neigs_segment
class BandOutParser(DataTextParser):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._nspin = kwargs.get('nspin', None)
self._energy_unit = kwargs.get('energy_unit', None)
def init_parameters(self):
self._nspin = None
self._distances = None
self._band_energies = None
self._neigs_segment = None
self._nkpts_segment = None
@property
def band_energies(self):
if self._band_energies is None:
data = np.transpose(self.data)
n_kpoints = np.where(data[0] == data[0][0])[0][1]
bands = data[1:]
bands = np.reshape(bands, (
self.number_of_spin_channels, self.number_of_band_segment_eigenvalues, n_kpoints))
self._band_energies = []
start = 0
for nkpts_segment in self.number_of_k_points_per_segment:
end = start + nkpts_segment
band_energy = np.array([np.transpose(band)[start:end] for band in bands])
if self._energy_unit:
band_energy = band_energy * self._energy_unit
self._band_energies.append(band_energy)
start = end
return self._band_energies
@property
def distances(self):
if self._distances is None:
dist = np.transpose(self.data)[0]
n_k_points = np.where(dist == dist[0])[0][1]
self._distances = dist[:n_k_points]
return self._distances
@property
def number_of_spin_channels(self):
if self._nspin is None:
self._nspin = np.shape(np.transpose(self.data)[1:])[0]
return self._nspin
@property
def number_of_k_points_per_segment(self):
if self._nkpts_segment is None:
self._nkpts_segment = []
count = 1
for i in range(1, len(self.distances)):
if self.distances[i] == self.distances[i - 1]:
self._nkpts_segment.append(count)
count = 1
else:
count += 1
self._nkpts_segment.append(count)
return self._nkpts_segment
@property
def number_of_band_segment_eigenvalues(self):
if self._neigs_segment is None:
data = np.transpose(self.data)[0]
self._neigs_segment = len(np.where(data == data[0])[0])
return self._neigs_segment
class BandstructureXMLParser(XMLParser):
def __init__(self, **kwargs):
# TODO make a parent class for dos and bandstructure
super().__init__(None)
self._distance_key = 'distance'
self._coord_key = 'coord'
self._energy_key = 'eval'
self._vertex_key = 'vertex'
self._band_key = 'band'
self._atom_key = 'atom'
self._nspin = kwargs.get('nspin', None)
self._energy_unit = kwargs.get('energy_unit', None)
def init_parameters(self):
self._nspin = None
self._nkpts_segment = None
self._neigs_segment = None
self._bands = None
self._vertices = None
self._distances = None
self._species = None
@property
def distances(self):
if self._distances is None:
if not self.bands:
return
self._distances = [
point.attrib.get(self._distance_key) for point in self.bands[0][0]]
self._distances = np.array(self._distances, dtype=float)
return self._distances
@property
def bands(self):
if self._bands is None:
bands = self.root.findall('./%s' % self._band_key)
self._bands = []
if bands:
self._bands.append(bands)
# add atom-resolved
bands_atom = self.root.findall('./*/%s' % self._atom_key)
for band in bands_atom:
self._bands.append(band.findall('./%s' % self._band_key))
return self._bands
@property
def vertices(self):
if self._vertices is None:
self._vertices = self.root.findall('./%s' % self._vertex_key)
return self._vertices
@property
def number_of_spin_channels(self):
if self._nspin is None:
self._nspin = 1
return self._nspin
@property
def number_of_k_points_per_segment(self):
if self._nkpts_segment is None:
self._nkpts_segment = []
count = 1
for i in range(1, len(self.distances)):
if self.distances[i] == self.distances[i - 1]:
self._nkpts_segment .append(count)
count = 1
else:
count += 1
self._nkpts_segment.append(count)
return self._nkpts_segment
@property
def number_of_band_segment_eigenvalues(self):
if self._neigs_segment is None:
self._neigs_segment = len(self.bands[0]) // self.number_of_spin_channels
return self._neigs_segment
def parse(self, key):
if self._results is None:
self._results = dict()
if not self.bands:
return
if key == 'band_energies':
# TODO I am not certain about the format for the spin polarized case
# I cannot find an example bandstructure file
# atom-resolved bandstructure are added as separate section_k_band
res = []
for n in range(len(self.bands)):
res_n = []
start = 0
band_energies = np.zeros((
self.number_of_spin_channels, self.number_of_band_segment_eigenvalues,
len(self.distances)), dtype=float)
for i in range(len(self.bands[n])):
band_energies[i % self.number_of_spin_channels][i] = np.array(
[e.attrib.get(self._energy_key) for e in self.bands[n][i]])
for nkpts_segment in self.number_of_k_points_per_segment:
end = start + nkpts_segment
band_energy = np.array([
np.transpose(energy)[start:end] for energy in band_energies])
if self._energy_unit is not None:
band_energy = band_energy * self._energy_unit
res_n.append(band_energy)
start = end
res.append(res_n)
elif key == 'band_k_points':
res = []
for i in range(len(self.number_of_k_points_per_segment)):
start = np.array(
self.vertices[i].attrib.get(self._coord_key).split(), dtype=float)
end = np.array(
self.vertices[i + 1].attrib.get(self._coord_key).split(), dtype=float)
res.append(np.linspace(start, end, self.number_of_k_points_per_segment[i]))
elif key == 'band_segm_labels':
res = []
for i in range(len(self.vertices) - 1):
start = self.vertices[i].attrib.get('label')
end = self.vertices[i + 1].attrib.get('label')
res.append([
'\u0393' if start.lower() == 'gamma' else start,
'\u0393' if end.lower() == 'gamma' else end])
elif key == 'band_segm_start_end':
res = []
for i in range(len(self.number_of_k_points_per_segment)):
start = self.vertices[i].attrib.get(self._coord_key).split()
end = self.vertices[i + 1].attrib.get(self._coord_key).split()
res.append([start, end])
else:
res = None
self._results[key] = res
class DOSXMLParser(XMLParser):
def __init__(self, **kwargs):
super().__init__(None)
self._nspin_key = 'nspin'
self._totaldos_key = 'totaldos'
self._partialdos_key = 'partialdos'
self._diagram_key = 'diagram'
self._l_key = 'l'
self._m_key = 'm'
self._energy_key = 'e'
self._dos_key = 'dos'
self._unit_key = 'unit'
self._energy_unit = kwargs.get('energy_unit', None)
self._units_mapping = dict(hartree=ureg.hartree)
def init_parameters(self):
self._ndos = None
self._natoms = None
self._nspin = None
self._nlm = None
self._energies = None
self._total_dos = None
self._partial_dos = None
@property
def energy_unit(self):
if self._energy_unit is None:
axis = self.root.find('./axis')
if axis is None:
return
self._energy_unit = self._units_mapping.get(axis.attrib.get(self._unit_key).lower(), 1)
return self._energy_unit
@property
def number_of_spin_channels(self):
if self._nspin is None:
if not self.total_dos:
return
self._nspin = len(self.total_dos)
return self._nspin
@property
def number_of_atoms(self):
if self._natoms is None:
partial_dos = self.root.findall('./%s' % self._partialdos_key)
self._natoms = len(partial_dos)
return self._natoms
@property
def number_of_dos(self):
if self._ndos is None:
total_dos = self.root.find('./%s/%s' % (self._totaldos_key, self._diagram_key))
self._ndos = len(total_dos)
return self._ndos
@property
def number_of_lm(self):
if self._nlm is None:
if self.partial_dos is None:
return
self._nlm = 0
l_list = set([int(e.attrib.get(self._l_key)) for e in self.partial_dos])
for li in l_list:
self._nlm += 2 * li + 1
return self._nlm
@property
def total_dos(self):
if self._total_dos is None:
self._total_dos = self.root.findall('./%s/%s' % (self._totaldos_key, self._diagram_key))
return self._total_dos
@property
def partial_dos(self):
if self._partial_dos is None:
self._partial_dos = self.root.findall('./%s/%s' % (self._partialdos_key, self._diagram_key))
return self._partial_dos
@property
def energies(self):
if self._energies is None:
if self.total_dos is None:
return
self._energies = np.array(
[float(point.attrib.get(self._energy_key)) for point in self.total_dos[0]])
if self.energy_unit is not None:
self._energies = self._energies * self.energy_unit
return self._energies
def _get_dos(self, diagram):
dos = np.array(
[point.attrib.get(self._dos_key) for point in diagram], dtype=float)
return dos
def parse(self, key):
if self._results is None:
self._results = dict()
if 'total' in key:
if not self.total_dos:
return
res = np.zeros((self.number_of_spin_channels, self.number_of_dos))
for i in range(len(self.total_dos)):
spin = self.total_dos[i].attrib.get(self._nspin_key, i)
res[i] = self._get_dos(self._total_dos[i])
if self.energy_unit is not None:
res = res * (1 / self.energy_unit)
elif 'partial' in key:
if not self.partial_dos:
return
res = np.zeros((
self.number_of_lm, self.number_of_spin_channels, self.number_of_atoms, self.number_of_dos))
for i in range(len(self.partial_dos)):
spin = self.partial_dos[i].attrib.get(self._nspin_key, None)
if spin is None:
spin = (i % (self.number_of_spin_channels * self.number_of_lm)) // self.number_of_lm
else:
spin = int(spin) - 1
val_l = self.partial_dos[i].attrib.get(self._l_key, None)
val_m = self.partial_dos[i].attrib.get(self._m_key, None)
if val_l is None or val_m is None:
lm = i % self.number_of_lm
else:
lm = int(val_l) ** 2 + int(val_m) + int(val_l)
atom = i // (self.number_of_lm * self.number_of_spin_channels)
res[lm][spin][atom] = self._get_dos(self.partial_dos[i])
if self.energy_unit is not None:
res = res * (1 / self.energy_unit)
elif key == 'energies':
return self.energies
else:
res = None
self._results[key] = res
class ExcitingFermiSurfaceBxsfParser(TextParser):
def __init__(self):
super().__init__(None)
def init_quantities(self):
self._quantities = []
self._quantities.append(
Quantity(
'fermi_energy', r'Fermi Energy:\s*([\d\.]+)\s*', unit=ureg.hartree, repeats=False))
def str_to_band_parameters(val_in):
val = val_in.strip().split('\n')
nbands = int(val[0])
mesh = np.array(val[1].split(), dtype=int)
origin = np.array(val[2].split(), dtype=float)
vector = np.array([v.split() for v in val[3:6]], dtype=float)
return [nbands, mesh, origin, vector]
self._quantities.append(
Quantity(
'band_parameters', r'BANDGRID_3D_BANDS\s*([\d\.\-Ee\s]+)',
str_operation=str_to_band_parameters, repeats=False))
self._quantities.append(
Quantity(
'fermi_surface', r'BAND:\s*\d+\s*([\d\-\+\.Ee\s]+)\n *E*', unit=ureg.hartree,
repeats=True))
class ExcitingEigenvalueParser(TextParser):
def __init__(self):
super().__init__(None)
def init_quantities(self):
self._quantities = []
self._quantities.append(
Quantity(
'k_points', r'\s*\d+\s*([\d\.Ee\- ]+):\s*k\-point', repeats=True))
def str_to_eigenvalues(val_in):
val = val_in[:val_in.rfind('\n \n')].strip()
val = np.array([v.split() for v in val.split('\n')], dtype=float)
val = np.transpose(val)
occs = val[-1]
eigs = val[-2]
nspin = 2 if occs[0] == 1. else 1
data = dict()
data['occupancies'] = np.reshape(occs, (nspin, len(occs) // nspin))
data['eigenvalues'] = np.reshape(eigs, (nspin, len(eigs) // nspin))
return data
self._quantities.append(
Quantity(
'eigenvalues_occupancies', r'\(state\, eigenvalue and occupancy below\)\s*([\d\.Ee\-\s]+?(?:\n *\n))',
str_operation=str_to_eigenvalues, repeats=True))
class ExcitingGWOutParser(TextParser):
def __init__(self, mainfile, logger):
super().__init__(mainfile, logger=logger)
def init_quantities(self):
self._quantities = []
class ExcitingInfoParser(TextParser):
def __init__(self):
super().__init__(None)
def init_quantities(self):
re_symbol = re.compile(r'([A-Z][a-z]?)')
def str_to_array(val_in):
val = [v.split(':')[-1].split() for v in val_in.strip().split('\n')]
val = val[0] if len(val) == 1 else val
return np.array(val, dtype=float)
def str_to_atom_properties_dict(val_in):
unit = None
if 'charge' in val_in:
unit = ureg.elementary_charge
elif 'moment' in val_in:
unit = ureg.elementary_charge * ureg.bohr
val = val_in.strip().split('\n')
properties = dict()
atom_resolved = []
species = None
for v in val:
v = v.strip().split(':')
if len(v) < 2:
continue
elif v[0].startswith('species'):
species = re.search(re_symbol, v[-1]).group(1)
elif v[0].startswith('atom'):
v[0] = v[0].split()
v[1] = [float(vi) for vi in v[1].split()]
v[1] = v[1][0] if len(v[1]) == 1 else v[1]
if species is None:
species = v[0][2]
atom_resolved.append(((species, v[1] * unit)))
else:
vi = [float(vii) for vii in v[1].split()]
vi = vi[0] if len(vi) == 1 else vi
properties[v[0].strip()] = vi * unit
properties['atom_resolved'] = atom_resolved
return properties
def str_to_quantity_tolerances(val_in):
return val_in.strip().replace('(', '').replace(')', '').split()
def str_to_energy_dict(val_in):
val = val_in.strip().split('\n')
energies = dict()
for v in val:
v = v.split(':')
if len(v) < 2:
continue
energies[v[0].strip()] = float(v[1]) * ureg.hartree
return energies
self._quantities = [Quantity(
'program_version', r'\s*EXCITING\s*([\w\-\(\)\. ]+)\s*started', repeats=False,
dtype=str, flatten=False)]
initialization_quantities = [
Quantity(
'lattice_vectors',
r'Lattice vectors\s*[\(cartesian\)]*\s*:\s*([\-0-9\.\s]+)\n',
str_operation=str_to_array, unit=ureg.bohr, repeats=False, convert=False),
Quantity(
'lattice_vectors_reciprocal',
r'Reciprocal lattice vectors\s*[\(cartesian\)]*\s*:\s*([\-0-9\.\s]+)\n',
str_operation=str_to_array, unit=1 / ureg.bohr, repeats=False, convert=False),
]
self._system_keys_mapping = {
'x_exciting_unit_cell_volume': ('Unit cell volume', ureg.bohr ** 3),
'x_exciting_brillouin_zone_volume': ('Brillouin zone volume', 1 / ureg.bohr ** 3),
'x_exciting_number_of_atoms': ('Total number of atoms per unit cell', None),
'x_exciting_spin_treatment': ('Spin treatment', None),
'x_exciting_number_of_bravais_lattice_symmetries': ('Number of Bravais lattice symmetries', None),
'x_exciting_number_of_crystal_symmetries': ('Number of crystal symmetries', None),
'x_exciting_kpoint_grid': (r'k\-point grid', None),
'x_exciting_kpoint_offset': (r'k\-point offset', None),
'x_exciting_number_kpoints': (r'Total number of k\-points', None),
'x_exciting_rgkmax': (r'R\^MT\_min \* \|G\+k\|\_max \(rgkmax\)', None),
'x_exciting_species_rtmin': (r'Species with R\^MT\_min', None),
'x_exciting_gkmax': (r'Maximum \|G\+k\| for APW functions', 1 / ureg.bohr),
'x_exciting_gmaxvr': (r'Maximum \|G\| for potential and density', 1 / ureg.bohr),
'x_exciting_gvector_size': (r'G\-vector grid sizes', None),
'x_exciting_gvector_total': (r'Total number of G\-vectors', None),
'x_exciting_lmaxapw': (r' APW functions', None),
'x_exciting_nuclear_charge': ('Total nuclear charge', ureg.elementary_charge),
'x_exciting_electronic_charge': ('Total electronic charge', ureg.elementary_charge),
'x_exciting_core_charge_initial': ('Total core charge', ureg.elementary_charge),
'x_exciting_valence_charge_initial': ('Total valence charge', ureg.elementary_charge),
'x_exciting_wigner_radius': (r'Effective Wigner radius, r\_s', ureg.bohr),
'x_exciting_empty_states': ('Number of empty states', None),
'x_exciting_valence_states': ('Total number of valence states', None),
'x_exciting_hamiltonian_size': ('Maximum Hamiltonian size', None),
'x_exciting_pw': (r'Maximum number of plane\-waves', None),
'x_exciting_lo': (r'Total number of local\-orbitals', None)}
self._method_keys_mapping = {
'smearing_kind': ('Smearing scheme', None),
'smearing_width': ('Smearing width', None)}
for name, key_unit in self._system_keys_mapping.items():
initialization_quantities.append(
Quantity(
name, r'%s\s*:\s*([\s\S]*?)\n' % key_unit[0], unit=key_unit[1], repeats=False)
)
for name, key_unit in self._method_keys_mapping.items():
initialization_quantities.append(
Quantity(
name, r'%s\s*:\s*([\s\S]*?)\n' % key_unit[0], unit=key_unit[1], repeats=False)
)
initialization_quantities.append(Quantity(
'species',
rf'(Species : *\d+ *\(\w+\)[\s\S]+?{re_float} *{re_float} *{re_float}\n\s*\n)',
repeats=True, sub_parser=TextParser(quantities=[
Quantity('number', r'Species : *(\d+)', dtype=np.int32),
Quantity('symbol', r'\((\w+)\)'),
Quantity('file', r'parameters loaded from *: *(.+)'),
Quantity('name', r'name *: *(.+)'),
Quantity('nuclear_charge', rf'nuclear charge *: *({re_float})', dtype=np.float64, unit=ureg.elementary_charge),
Quantity('electronic_charge', rf'electronic charge *: *({re_float})', dtype=np.float64, unit=ureg.elementary_charge),
Quantity('atomic_mass', rf'atomic mass *: *({re_float})', dtype=np.float64, unit=ureg.electron_mass),
Quantity('muffin_tin_radius', rf'muffin-tin radius *: *({re_float})', dtype=np.float64, unit=ureg.bohr),
Quantity('radial_points', rf'radial points in muffin-tin *: *({re_float})', dtype=np.int32),
Quantity('positions_format', r'atomic positions \((.+?)\)', flatten=False),
Quantity(
'positions',
rf'\d+ : *({re_float}) *({re_float}) *({re_float})',
repeats=True, dtype=np.dtype(np.float64))])))
initialization_quantities.append(Quantity(
'potential_mixing', r'Using ([\w ]+) potential mixing', repeats=False, flatten=False)
)
initialization_quantities.append(Quantity(
'xc_functional', r'(Exchange-correlation type[\s\S]+?\n *\n)',
sub_parser=TextParser(quantities=[
Quantity('type', r'Exchange-correlation type +: +(\S+)'),
Quantity(
'name_reference',
r'\n *(.+?,.+)',
str_operation=lambda x: [v.strip() for v in x.split(':')]),
Quantity(
'parameters',
r'\n *(.+?:.+)', repeats=True,
str_operation=lambda x: [v.strip() for v in x.split(':')])]))
)
self._quantities.append(Quantity(
'initialization',
r'(?:All units are atomic|Starting initialization)([\s\S]+?)(?:Using|Ending initialization)', repeats=False,
sub_parser=TextParser(quantities=initialization_quantities))
)
scf_quantities = [
Quantity(
'energy_total', r'[Tt]*otal energy\s*:\s*([\-\d\.Ee]+)', repeats=False,
dtype=float, unit=ureg.hartree),
Quantity(
'energy_contributions', r'(?:Energies|_)([\+\-\s\w\.\:]+?)\n *(?:DOS|Density)',
str_operation=str_to_energy_dict, repeats=False, convert=False),
Quantity(
'x_exciting_dos_fermi',
r'DOS at Fermi energy \(states\/Ha\/cell\)\s*:\s*([\-\d\.Ee]+)',
repeats=False, dtype=float, unit=1 / ureg.hartree),
Quantity(
'charge_contributions',
r'(?:Charges|Electron charges\s*\:*\s*)([\-\s\w\.\:\(\)]+?)\n *[A-Z\+]',
str_operation=str_to_atom_properties_dict, repeats=False, convert=False),
Quantity(
'moment_contributions',
r'(?:Moments\s*\:*\s*)([\-\s\w\.\:\(\)]+?)\n *[A-Z\+]',
str_operation=str_to_atom_properties_dict, repeats=False, convert=False)]
self._miscellaneous_keys_mapping = {
'x_exciting_gap': (r'Estimated fundamental gap', ureg.hartree),
'time': (r'Wall time \(seconds\)', ureg.s)}
for name, key_unit in self._miscellaneous_keys_mapping.items():
scf_quantities.append(Quantity(
name, r'%s\s*\:*\s*([\-\d\.Ee]+)' % key_unit[0], repeats=False,
unit=key_unit[1]))
self._convergence_keys_mapping = {
'x_exciting_effective_potential_convergence': (
r'RMS change in effective potential \(target\)', ureg.hartree),
'x_exciting_energy_convergence': (
r'Absolute change in total energy\s*\(target\)', ureg.hartree),
'x_exciting_charge_convergence': (
r'Charge distance\s*\(target\)', ureg.elementary_charge),
'x_exciting_IBS_force_convergence': (
r'Abs\. change in max\-nonIBS\-force\s*\(target\)', ureg.hartree / ureg.bohr)}
for name, key_unit in self._convergence_keys_mapping.items():
scf_quantities.append(Quantity(
name, r'%s\s*\:*\s*([\(\)\d\.\-\+Ee ]+)' % key_unit[0],
str_operation=str_to_quantity_tolerances, unit=key_unit[1], repeats=False))
module_quantities = [
Quantity(
'scf_iteration', r'(?:I| i)teration number :([\s\S]+?)(?:\n *\n\+{10}|\+\-{10})',
sub_parser=TextParser(quantities=scf_quantities), repeats=True),
Quantity(
'final',
r'(?:Convergence targets achieved\. Performing final SCF iteration|Reached self-consistent loops maximum)([\s\S]+?)(\n *\n\+{10})',
sub_parser=TextParser(quantities=scf_quantities), repeats=False),
Quantity(
'atomic_positions',
r'(Atomic positions\s*\([\s\S]+?)\n\n',
sub_parser=TextParser(quantities=[
Quantity(
'positions_format', r'Atomic positions\s*\(([a-z]+)\)'),
Quantity(
'symbols', r'atom\s*\d+\s*(\w+)', repeats=True, dtype=str),
Quantity(
'positions', r'\s*:\s*([\d\.\-]+\s*[\d\.\-]+\s*[\d\.\-]+)',
repeats=True, dtype=float)])),
Quantity(
'forces', r'Total atomic forces including IBS \(\w+\)\s*\:(\s*atom[\-\s\w\.\:]*?)\n *Atomic',
repeats=False, str_operation=str_to_array, dtype=float, unit=ureg.hartree / ureg.bohr)
]
self._quantities.append(Quantity(
'groundstate',
r'(?:Self\-consistent loop started|Groundstate module started)([\s\S]+?)Groundstate module stopped',
sub_parser=TextParser(quantities=module_quantities), repeats=False))
optimization_quantities = [
Quantity(
'atomic_positions',
r'(Atomic positions at this step\s*\([\s\S]+?)\n\n',
sub_parser=TextParser(quantities=[
Quantity(
'positions_format', r'Atomic positions at this step\s*\(([a-z]+)\)'),
Quantity(
'symbols', r'atom\s*\d+\s*(\w+)', repeats=True, dtype=str),
Quantity(
'positions', r'\s*:\s*([\d\.\-]+\s*[\d\.\-]+\s*[\d\.\-]+)',
repeats=True, dtype=float)])),
Quantity(
'forces',
r'Total atomic forces including IBS \(\w+\)\s*\:(\s*atom[\-\s\w\.\:]*?)\n *Time',
repeats=False, str_operation=str_to_array, convert=False, unit=ureg.hartree / ureg.bohr),
Quantity(
'step', r'Optimization step\s*(\d+)', repeats=False, dtype=int),
Quantity(
'method', r'method\s*=\s*(\w+)', repeats=False, dtype=str),
Quantity(
'n_scf_iterations',
r'Number of (?:total)* scf iterations\s*\:\s*(\d+)', repeats=False, dtype=int),
Quantity(
'force_convergence',
r'Maximum force magnitude\s*\(target\)\s*\:(\s*[\(\)\d\.\-\+Ee ]+)',
str_operation=str_to_quantity_tolerances, unit=ureg.hartree / ureg.bohr, repeats=False,
dtype=float),
Quantity(
'energy_total', r'Total energy at this optimization step\s*\:\s*([\-\d\.Ee]+)',
unit=ureg.hartree, repeats=False, dtype=float),
Quantity(
'time', r'Time spent in this optimization step\s*\:\s*([\-\d\.Ee]+)\s*seconds',
unit=ureg.s, repeats=False, dtype=float)
]
self._quantities.append(Quantity(
'structure_optimization',
r'Structure\-optimization module started([\s\S]+?)Structure\-optimization module stopped',
sub_parser=TextParser(quantities=[
Quantity(
'optimization_step',
r'(Optimization step\s*\d+[\s\S]+?(?:\n *\n\-{10}|Time spent in this optimization step\s*:\s*[\d\.]+ seconds))',
sub_parser=TextParser(quantities=optimization_quantities),
repeats=True),
Quantity(
'final',
r'Force convergence target achieved([\s\S]+?Opt)',
sub_parser=TextParser(quantities=scf_quantities),
repeats=False),
Quantity(
'atomic_positions',
r'(imized atomic positions\s*\([\s\S]+?)\n\n',
sub_parser=TextParser(quantities=[
Quantity(
'positions_format', r'imized atomic positions\s*\(([a-z]+)\)'),
Quantity(
'symbols', r'atom\s*\d+\s*(\w+)', repeats=True, dtype=str),
Quantity(
'positions', r'\s*:\s*([\d\.\-]+\s*[\d\.\-]+\s*[\d\.\-]+)',
repeats=True, dtype=float)])),
Quantity(
'forces',
r'Total atomic forces including IBS \(\w+\)\s*\:(\s*atom[\-\s\w\.\:]*?)\n *Atomic',
repeats=False, str_operation=str_to_array, dtype=float, unit=ureg.hartree / ureg.bohr),
]), repeats=False))
self._quantities.append(Quantity(
'hybrids',
r'Hybrids module started([\s\S]+?)Hybrids module stopped',
sub_parser=TextParser(quantities=module_quantities)
))
def get_atom_labels(self, section):
labels = section.get('symbols')
if labels is None:
# we get it by concatenating species symbols
species = self.get('initialization', {}).get('species', [])
labels = []
for specie in species:
labels += [specie.get('symbol')] * len(specie.get('positions'))
return labels
def get_positions_format(self, section):
positions_format = section.get('positions_format')
if positions_format is None:
species = self.get_initialization_parameter('species', [])
for specie in species:
positions_format = specie.get('positions_format', None)
if positions_format is not None:
break
return positions_format
def get_atom_positions(self, section={}, positions=None, positions_format=None):
positions = positions if positions is not None else section.get('positions')
if positions is None:
species = self.get_initialization_parameter('species', [])
if species:
positions = np.vstack([s.get('positions') for s in species])
if positions is None:
return
positions = np.array(positions)
positions_format = positions_format if positions_format is not None else self.get_positions_format(section)
if positions_format == 'lattice':
cell = self.get_initialization_parameter('lattice_vectors')
if cell is None:
return
positions = np.dot(positions, cell.magnitude)
return positions * ureg.bohr
def get_scf_threshold(self, name):
reference = self.get('groundstate', self.get('hybrids', {}))
return reference.get('scf_iteration', [{}])[-1].get(
name, [None, None])[-1]
def get_scf_quantity(self, name):
n_scf = len(self.get('energy_total_scf_iteration', []))
quantity = self.get('%s_scf_iteration' % name)
if quantity is None:
return
# this is really problematic if some scf steps dont have the quantity
# the only thing that we can do is to assume that the first steps are the
# ones with the missing quantity
if len(quantity) < n_scf:
quantity = [None] * (n_scf - len(quantity)) + quantity
return quantity
def get_xc_functional_name(self):
# TODO expand list to include other xcf
xc_functional_map = {
2: ['LDA_C_PZ', 'LDA_X_PZ'],
3: ['LDA_C_PW', 'LDA_X_PZ'],
4: ['LDA_C_XALPHA'],
5: ['LDA_C_VBH'],
20: ['GGA_C_PBE', 'GGA_X_PBE'],
21: ['GGA_C_PBE', 'GGA_X_PBE_R'],
22: ['GGA_C_PBE_SOL', 'GGA_X_PBE_SOL'],
26: ['GGA_C_PBE', 'GGA_X_WC'],
30: ['GGA_C_AM05', 'GGA_C_AM05'],
300: ['GGA_C_BGCP', 'GGA_X_PBE'],
406: ['HYB_GGA_XC_PBEH'],
408: ['HYB_GGA_XC_HSE03']}
xc_functional = self.get('initialization', {}).get('xc_functional', None)
if xc_functional is None:
return []
name = xc_functional_map.get(xc_functional.type, [])
return name
@property
def n_optimization_steps(self):
return len(self.get('structure_optimization', {}).get('optimization_step', []))
def get_number_of_spin_channels(self):
spin_treatment = self.get('initialization', {}).get(
'x_exciting_spin_treatment', 'spin-unpolarised')
n_spin = 1 if spin_treatment.lower() == 'spin-unpolarised' else 2
return n_spin
def get_unit_cell_volume(self):
return self.get('initialization', {}).get('x_exciting_unit_cell_volume', 1.0 * ureg.bohr ** 3)
def get_initialization_parameter(self, key, default=None):
return self.get('initialization', {}).get(key, default)
class ExcitingParser:
def __init__(self):
self.info_parser = ExcitingInfoParser()
self.dos_parser = DOSXMLParser(energy_unit=ureg.hartree)
self.bandstructure_parser = BandstructureXMLParser(energy_unit=ureg.hartree)
self.eigval_parser = ExcitingEigenvalueParser()
self.fermisurf_parser = ExcitingFermiSurfaceBxsfParser()
self.evalqp_parser = ExcitingEvalqpParser()
self.dos_out_parser = DataTextParser()
self.bandstructure_dat_parser = BandstructureDatParser(energy_unit=ureg.hartree)
self.band_out_parser = BandOutParser(energy_unit=ureg.hartree)
self.info_gw_parser = GWInfoParser()
self.input_xml_parser = XMLParser()
self.data_xs_parser = DataTextParser()
self.data_clathrate_parser = DataTextParser(dtype=str)
# different names for different versions of exciting
self._energy_keys_mapping = {
'energy_total': ['Total energy', 'total energy'],
'x_exciting_fermi_energy': ['Fermi energy', 'Fermi'],
'energy_kinetic_electronic': ['Kinetic energy', 'electronic kinetic'],
'energy_coulomb': ['Coulomb energy', 'Coulomb'],
'x_exciting_coulomb_energy': ['Coulomb energy', 'Coulomb'],
'energy_exchange': ['Exchange energy', 'exchange'],
'x_exciting_exchange_energy': ['Exchange energy', 'exchange'],
'energy_correlation': ['Correlation energy', 'correlation'],
'x_exciting_correlation_energy': ['Correlation energy', 'correlation'],
'energy_sum_eigenvalues': ['Sum of eigenvalues', 'sum of eigenvalues'],
'x_exciting_effective_potential_energy': ['Effective potential energy'],
'x_exciting_coulomb_potential_energy': ['Coulomb potential energy', 'Coulomb potential'],
'energy_xc_potential': ['xc potential energy', 'xc potential'],
'energy_electrostatic': ['Hartree energy', 'Hartree'],
'x_exciting_hartree_energy': ['Hartree energy', 'Hartree'],
'x_exciting_electron_nuclear_energy': ['Electron-nuclear energy', 'electron-nuclear '],
'x_exciting_nuclear_nuclear_energy': ['Nuclear-nuclear energy', 'nuclear-nuclear'],
'x_exciting_madelung_energy': ['Madelung energy', 'Madelung'],
'x_exciting_core_electron_kinetic_energy': ['Core-electron kinetic energy', 'core electron kinetic'],
'x_exciting_dft_d2_dispersion_correction': ['DFT-D2 dispersion correction']
}
self._electron_charge_keys_mapping = {
'x_exciting_core_charge': ['core'],
'x_exciting_core_leakage': ['core leakage'],
'x_exciting_valence_charge': ['valence'],
'x_exciting_interstitial_charge': ['interstitial'],
'x_exciting_total_MT_charge': ['total charge in muffin-tins', 'total in muffin-tins'],
'charge_total': ['total charge'],
'x_exciting_section_MT_charge_atom': ['atom_resolved']
}
self._moment_keys_mapping = {
'x_exciting_interstitial_moment': ['interstitial'],
'x_exciting_total_MT_moment': ['total moment in muffin-tins'],
'x_exciting_total_moment': ['total moment'],
'x_exciting_section_MT_moment_atom': ['atom_resolved']
}
def get_exciting_files(self, default):
mainfile = os.path.basename(self.info_parser.mainfile)
suffix = mainfile.strip('INFO.OUT')
target = default.rsplit('.', 1)
filename = '%s%s' % (target[0], suffix)
if target[1:]:
filename = '%s.%s' % (filename, target[1])
filename = os.path.join(self.info_parser.maindir, filename)
if os.path.isfile(filename):
return [filename]
filename = os.path.join(self.info_parser.maindir, default)
if not os.path.isfile(filename):
file_ext = default.split('.')[-1]
mainfile_base = mainfile.rsplit('.', 1)[0].replace('INFO', '')
options = [
f for f in os.listdir(
self.info_parser.maindir) if target[0] in f and mainfile_base in f]
options = [f for f in options if f.endswith(file_ext)]
options.sort()
filenames = [os.path.join(self.info_parser.maindir, f) for f in options]
else:
filenames = [filename]
filenames = [f for f in filenames if os.access(f, os.F_OK)]
return filenames
def file_exists(self, filename):
"""Checks if a the given filename exists and is accessible in the same
folder where the mainfile is stored.
"""
mainfile = os.path.basename(self.info_parser.mainfile)
suffix = mainfile.strip('INFO.OUT')
target = filename.rsplit('.', 1)
filepath = '%s%s' % (target[0], suffix)
if target[1:]:
filepath = '%s.%s' % (filepath, target[1])
filepath = os.path.join(self.info_parser.maindir, filepath)
if os.path.isfile(filepath) and os.access(filepath, os.F_OK):
return True
return False
def _parse_dos(self, sec_scc):
if self.dos_parser.get('totaldos', None) is None:
return
# Get fermi energy: it is used to un-shift the DOS to
# the original scale in which also other energies are reported.
energy_fermi = sec_scc.energy.fermi
if energy_fermi is None:
return
energy_fermi = (energy_fermi.magnitude * ureg.joule).to('hartree')
sec_dos = sec_scc.m_create(Dos, Calculation.dos_electronic)
sec_dos.n_energies = self.dos_parser.number_of_dos
sec_dos.energies = self.dos_parser.energies + energy_fermi
volume = self.info_parser.get_unit_cell_volume()
totaldos = self.dos_parser.get('totaldos') * volume.to('m**3').magnitude
for spin in range(len(totaldos)):
sec_dos_values = sec_dos.m_create(DosValues, Dos.total)
sec_dos_values.spin = spin
sec_dos_values.value = totaldos[spin]
partialdos = self.dos_parser.get('partialdos')
if partialdos is None:
return
partialdos = partialdos.to('1/joule').magnitude
lm_values = np.column_stack((np.arange(len(partialdos)), np.zeros(len(partialdos), dtype=np.int32)))
for lm in range(len(partialdos)):
for spin in range(len(partialdos[lm])):
for atom in range(len(partialdos[lm][spin])):
sec_dos_values = sec_dos.m_create(DosValues, Dos.atom_projected)
sec_dos_values.m_kind = 'spherical'
sec_dos_values.lm = lm_values[lm]
sec_dos_values.spin = spin
sec_dos_values.atom_index = atom
sec_dos_values.value = partialdos[lm][spin][atom]
def _parse_bandstructure(self, sec_scc):
# we need to set nspin again as this is overwritten when setting mainfile
self.bandstructure_parser._nspin = self.info_parser.get_number_of_spin_channels()
band_energies = self.bandstructure_parser.get('band_energies', [])
for n in range(len(band_energies)):
# Get fermi energy: it is used to un-shift the band structure to
# the original scale in which also other energies are reported.
energy_fermi = sec_scc.energy.fermi
if energy_fermi is None:
continue
energy_fermi = energy_fermi.to("hartree")
sec_k_band = sec_scc.m_create(BandStructure, Calculation.band_structure_electronic)
sec_k_band.energy_fermi = energy_fermi
band_k_points = self.bandstructure_parser.get('band_k_points')
nkpts_segment = self.bandstructure_parser.number_of_k_points_per_segment
band_seg_labels = self.bandstructure_parser.get('band_segm_labels')
for nb in range(len(band_energies[n])):
sec_k_band_segment = sec_k_band.m_create(BandEnergies)
sec_k_band_segment.n_kpoints = nkpts_segment[nb]
sec_k_band_segment.kpoints = band_k_points[nb]
sec_k_band_segment.endpoints_labels = band_seg_labels[nb]
sec_k_band_segment.energies = band_energies[n][nb] + energy_fermi
def _parse_eigenvalues(self, sec_scc):
if self.eigval_parser.get('eigenvalues_occupancies', None) is None:
return
nspin = self.info_parser.get_number_of_spin_channels()
def get_data(key):
data = self.eigval_parser.get('eigenvalues_occupancies')
# reshaping is not necessary as this is done in parser, however nspin is
# determined from occupancies which is problematic sometimes
res = np.hstack([np.reshape(v[key], (nspin, np.size(v[key]) // nspin)) for v in data])
res = res.reshape((len(res), len(data), len(res[0]) // len(data)))
if key == 'eigenvalues':
res = res * ureg.hartree
return res
sec_eigenvalues = sec_scc.m_create(BandEnergies)
sec_eigenvalues.kpoints = self.eigval_parser.get('k_points')
sec_eigenvalues.occupations = get_data('occupancies')
sec_eigenvalues.energies = get_data('eigenvalues')
def _parse_fermisurface(self, sec_scc):
fermi_surface = self.fermisurf_parser.get('fermi_surface', [None])[0]
if fermi_surface is None:
return
sec_fermisurface = sec_scc.m_create(x_exciting_section_fermi_surface)
band_parameters = self.fermisurf_parser.get('band_parameters', None)
if band_parameters is not None:
sec_fermisurface.x_exciting_number_of_bands_fermi_surface = band_parameters[0]
sec_fermisurface.x_exciting_number_of_mesh_points_fermi_surface = np.product(band_parameters[1])
sec_fermisurface.x_exciting_grid_fermi_surface = band_parameters[1]
sec_fermisurface.x_exciting_origin_fermi_surface = band_parameters[2]
sec_fermisurface.x_exciting_vectors_fermi_surface = band_parameters[3]
fermi_energy = self.fermisurf_parser.get('fermi_energy', None)
if fermi_energy is not None:
sec_fermisurface.x_exciting_fermi_energy_fermi_surface = fermi_energy
sec_fermisurface.x_exciting_values_fermi_surface = fermi_surface
def _parse_evalqp(self, sec_scc):
data = self.evalqp_parser.get('kpoints_eigenvalues')
if data is None:
return
def get_data(key):
if key == 'k_points':
return np.array([d[0][:3] for d in data])
elif key == 'Znk':
return np.array([d[1].get(key, None) for d in data])
else:
energy = np.array([d[1].get(key, None) for d in data])
if None in energy:
return energy
return np.array([d[1].get(key) for d in data]) * ureg.hartree
eigs_gw = get_data('E_GW')
if eigs_gw[0] is None:
return
nspin = self.info_parser.get_number_of_spin_channels()
def reshape(data):
if data[0] is None:
return
return np.reshape(data, (nspin, len(data) // nspin, len(data[0])))
sec_gw_eigenvalues = sec_scc.m_create(BandEnergies)
sec_gw_eigenvalues.qp_linearization_prefactor = reshape(get_data('Znk'))
sec_gw_eigenvalues.n_bands = len(eigs_gw[0])
sec_gw_eigenvalues.n_kpoints = len(eigs_gw)
sec_gw_eigenvalues.kpoints = get_data('k_points')
sec_gw_eigenvalues.energies = reshape(eigs_gw)
sec_gw_eigenvalues.value_exchange = reshape(get_data('Sx'))
eigs_gw_C = reshape(get_data('Sc'))
if eigs_gw_C is None:
eigs_gw_C = reshape(get_data('Re(Sc)'))
sec_gw_eigenvalues.value_correlation = eigs_gw_C
sec_gw_eigenvalues.value_xc_potential = reshape(get_data('Vxc'))
def _parse_dos_out(self, sec_scc):
data = self.dos_out_parser.data
if data is None:
return
# Get fermi energy: it is used to un-shift the DOS to
# the original scale in which also other energies are reported.
energy_fermi = sec_scc.energy.fermi
if energy_fermi is None:
return
energy_fermi = (energy_fermi.magnitude * ureg.joule).to('hartree')
# TODO I am not sure about format for spin-polarized case! I assume it is
# energy dos_up dos_down
nspin = self.info_parser.get_number_of_spin_channels()
sec_dos = sec_scc.m_create(Dos, Calculation.dos_electronic)
sec_dos.n_energies = len(data) // nspin
data = np.reshape(data, (nspin, len(data) // nspin, 2))
data = np.transpose(data, axes=(2, 0, 1))
sec_dos.energies = data[0][0] * ureg.hartree + energy_fermi
volume = self.info_parser.get_unit_cell_volume()
dos = data[1] * (1 / ureg.hartree) * volume.to('m**3').magnitude
for spin in range(len(dos)):
sec_dos_values = sec_dos.m_create(DosValues, Dos.total)
sec_dos_values.spin = spin
sec_dos_values.value = dos[spin]
# TODO add PDOS
def _parse_bandstructure_dat(self, sec_scc):
self.bandstructure_dat_parser._nspin = self.info_parser.get_number_of_spin_channels()
band_energies = self.bandstructure_dat_parser.band_energies
if band_energies is None:
return
# Get fermi energy: it is used to un-shift the band structure to
# the original scale in which also other energies are reported.
energy_fermi = sec_scc.energy.fermi
if energy_fermi is None:
return
energy_fermi = (energy_fermi.magnitude * ureg.joule).to('hartree')
sec_k_band = sec_scc.m_create(BandStructure, Calculation.band_structure_electronic)
sec_k_band.energy_fermi = energy_fermi
band_k_points = self.bandstructure_dat_parser.band_k_points
nkpts_segment = self.bandstructure_dat_parser.number_of_k_points_per_segment
for nb in range(len(band_energies)):
sec_k_band_segment = sec_k_band.m_create(BandEnergies)
sec_k_band_segment.n_kpoints = nkpts_segment[nb]
sec_k_band_segment.kpoints = band_k_points[nb]
sec_k_band_segment.energies = band_energies[nb] + energy_fermi
def _parse_band_out(self, sec_scc):
self.band_out_parser._nspin = self.info_parser.get_number_of_spin_channels()
band_energies = self.band_out_parser.band_energies
if band_energies is None:
return
# Get fermi energy: it is used to un-shift the band structure to
# the original scale in which also other energies are reported.
energy_fermi = 0.0 * ureg.hartree
if sec_scc.energy is not None:
energy_fermi = sec_scc.energy.fermi
energy_fermi = (energy_fermi.magnitude * ureg.joule).to('hartree')
sec_k_band = sec_scc.m_create(BandStructure, Calculation.band_structure_electronic)
sec_k_band.energy_fermi = energy_fermi
nkpts_segment = self.band_out_parser.number_of_k_points_per_segment
for nb in range(len(band_energies)):
sec_k_band_segment = sec_k_band.m_create(BandEnergies)
sec_k_band_segment.n_kpoints = nkpts_segment[nb]
sec_k_band_segment.value = band_energies[nb] + energy_fermi
def parse_file(self, name, section):
# TODO add support for info.xml, wannier.out
if name.startswith('dos') and name.endswith('xml'):
parser = self.dos_parser
parser_function = self._parse_dos
elif name.startswith('bandstructure') and name.endswith('xml'):
parser = self.bandstructure_parser
parser_function = self._parse_bandstructure
elif name.startswith('EIGVAL') and name.endswith('OUT'):
parser = self.eigval_parser
parser_function = self._parse_eigenvalues
elif (name.startswith('FERMISURF') or name.startswith('FS')) and name.endswith('bxsf'):
parser = self.fermisurf_parser
parser_function = self._parse_fermisurface
elif name.startswith('EVALQP') and (name.endswith('DAT') or name.endswith('TXT')):
parser = self.evalqp_parser
parser_function = self._parse_evalqp
elif name.startswith('TDOS') and name.endswith('OUT'):
parser = self.dos_out_parser
parser_function = self._parse_dos_out
elif name.startswith('bandstructure') and name.endswith('dat'):
parser = self.bandstructure_dat_parser
parser_function = self._parse_bandstructure_dat
elif name.startswith('BAND') and name.endswith('OUT'):
parser = self.band_out_parser
parser_function = self._parse_band_out
elif name.startswith('input') and name.endswith('xml'):
parser = self.input_xml_parser
if self._calculation_type == 'gw':
parser_function = self._parse_input_gw
elif self._calculation_type == 'xs':
parser_function = self._parse_input_xs
else:
# TODO implement reading of parameters from input.xml for normal calculations
# in addition to INFO.OUT
return
else:
return
files = self.get_exciting_files(name)
if len(files) > 1:
self.logger.warn('Found multiple files. Will read all!', data=dict(file=name))
for n in range(len(files)):
parser.mainfile = files[n]
parser_function(section)
# free up memory
parser.mainfile = None
def _parse_input_xs(self, sec_method):
xstype = self.input_xml_parser.get('xs/xstype', None)
if xstype is not None:
sec_method.x_exciting_xs_xstype = xstype
sec_method.x_exciting_electronic_structure_method = xstype
sec_method.x_exciting_xs_broadening = self.input_xml_parser.get(
'xs/broad', 0.01, 'hartree')
sec_method.x_exciting_xs_gqmax = self.input_xml_parser.get(
'xs/gqmax', 0.0, '1/bohr')
sec_method.x_exciting_xs_lmaxapw = self.input_xml_parser.get('xs/lmaxapw', 10)
sec_method.x_exciting_xs_number_of_empty_states = self.input_xml_parser.get(
'xs/nempty', 5)
sec_method.x_exciting_xs_ngridq = self.input_xml_parser.get('xs/ngridq', [1, 1, 1])
sec_method.x_exciting_xs_ngridk = self.input_xml_parser.get('xs/ngridk', [1, 1, 1])
rgkmax = self.input_xml_parser.get('xs/rgkmax', None)
if rgkmax is None:
rgkmax = self.info_parser.get_initialization_parameter('x_exciting_rgkmax', 0.)
sec_method.x_exciting_xs_rgkmax = rgkmax
sec_method.x_exciting_xs_scissor = self.input_xml_parser.get('xs/scissor', 0.0)
sec_method.x_exciting_xs_vkloff = self.input_xml_parser.get('xs/vkloff', [0., 0., 0.])
# TODO I am not certain if screening/BSE are children of xs
if self.input_xml_parser.get('xs/screening') is not None:
sec_method.x_exciting_xs_screening_number_of_empty_states = self.input_xml_parser.get(
'xs/screening/nempty', 0)
sec_method.x_exciting_xs_screening_ngridk = self.input_xml_parser.get(
'xs/screening/ngridk', [0, 0, 0])
rgkmax = self.input_xml_parser.get('xs/screening/rgkmax', None)
if rgkmax is None:
rgkmax = self.info_parser.get_initialization_parameter('x_exciting_rgkmax', 0.)
sec_method.x_exciting_xs_screening_rgkmax = rgkmax
sec_method.x_exciting_xs_screening_type = self.input_xml_parser.get(
'xs/screening/screentype', 'full')
if self.input_xml_parser.get('xs/BSE') is not None:
sec_method.x_exciting_xs_bse_antiresonant = self.input_xml_parser.get(
'xs/BSE/aresbse', True)
sec_method.x_exciting_xs_bse_angular_momentum_cutoff = self.input_xml_parser.get(
'xs/BSE/lmaxdielt', 14)
rgkmax = self.input_xml_parser.get('xs/BSE/rgkmax', None)
if rgkmax is None:
rgkmax = self.info_parser.get_initialization_parameter('x_exciting_rgkmax', 0)
sec_method.x_exciting_xs_bse_rgkmax = rgkmax
sec_method.x_exciting_xs_bse_sciavbd = self.input_xml_parser.get(
'xs/BSE/sciavbd', True)
sec_method.x_exciting_xs_bse_sciavqbd = self.input_xml_parser.get(
'xs/BSE/sciavqbd', False)
sec_method.x_exciting_xs_bse_sciavqhd = self.input_xml_parser.get(
'xs/BSE/sciavqhd', False)
sec_method.x_exciting_xs_bse_sciavqwg = self.input_xml_parser.get(
'xs/BSE/sciavqwg', False)
sec_method.x_exciting_xs_bse_sciavtype = self.input_xml_parser.get(
'xs/BSE/sciavtype', 'spherical')
sec_method.x_exciting_xs_bse_xas = self.input_xml_parser.get(
'xs/BSE/xas', False)
sec_method.x_exciting_xs_bse_number_of_bands = self.input_xml_parser.get(
'xs/BSE/nstlbse', [0, 0, 0, 0])
if sec_method.x_exciting_xs_bse_xas:
sec_method.x_exciting_xs_bse_xasatom = self.input_xml_parser.get(
'xs/BSE/xasatom', 0)
sec_method.x_exciting_xs_bse_xasedge = self.input_xml_parser.get(
'xs/BSE/xasedge', 'K')
sec_method.x_exciting_xs_bse_xasspecies = self.input_xml_parser.get(
'xs/BSE/xasspecies', 0)
sec_method.x_exciting_xs_bse_xas_number_of_bands = self.input_xml_parser.get(
'xs/BSE/nstlxas', [0, 0])
if self.input_xml_parser.get('xs/tddft') is not None:
sec_method.x_exciting_xs_tddft_analytic_continuation = self.input_xml_parser.get(
'xs/tddft/acont', False)
sec_method.x_exciting_xs_tddft_anomalous_Hall_conductivity = self.input_xml_parser.get(
'xs/tddft/ahc', False)
sec_method.x_exciting_xs_tddft_anti_resonant_dielectric = self.input_xml_parser.get(
'xs/tddft/aresdf', False)
sec_method.x_exciting_xs_tddft_anti_resonant_xc_kernel = self.input_xml_parser.get(
'xs/tddft/aresfxc', True)
sec_method.x_exciting_xs_tddft_drude = self.input_xml_parser.get(
'xs/tddft/drude', [0., 0.])
sec_method.x_exciting_xs_tddft_split_parameter = self.input_xml_parser.get(
'xs/tddft/fxcbsesplit', 0.00001, 'hartree')
sec_method.x_exciting_xs_tddft_xc_kernel = self.input_xml_parser.get(
'xs/tddft/fxctype', 'RPA')
sec_method.x_exciting_xs_tddft_finite_q_intraband_contribution = self.input_xml_parser.get(
'xs/tddft/intraband', False)
sec_method.x_exciting_xs_tddft_diagonal_xc_kernel = self.input_xml_parser.get(
'xs/tddft/kerndiag', False)
sec_method.x_exciting_xs_tddft_lmax_alda = self.input_xml_parser.get(
'xs/tddft/lmaxalda', 3)
sec_method.x_exciting_xs_tddft_macroscopic_dielectric_function_q_treatment = self.input_xml_parser.get(
'xs/tddft/mdfqtype', 0)
sec_method.x_exciting_xs_tddft_analytic_continuation_number_of_intervals = self.input_xml_parser.get(
'xs/tddft/nwacont', 0)
sec_method.x_exciting_xs_tetra = self.input_xml_parser.get(
'xs/tetra/tetradf', False)
def _parse_xs_bse(self):
sec_run = self.archive.run[-1]
# TODO read from xml file
def get_files(name):
bse_types = ['IP', 'singlet', 'triplet', 'RPA']
scr_types = ['full', 'diag', 'noinvdiag', 'longrange']
bse_files = []
for bse_type in bse_types:
for scr_type in scr_types:
files = self.get_exciting_files(
'%s_BSE%s_SCR%s.OUT' % (name, bse_type, scr_type))
bse_files.append(files)
return bse_files
def get_data(files):
data = []
for f in files:
self.data_xs_parser.mainfile = f
if self.data_xs_parser.data is None:
continue
data.append(self.data_xs_parser.data)
return data
def parse_exciton(data, sec_scc):
n_components = len(data)
data = np.transpose(np.vstack(data))
sec_scc.x_exciting_xs_bse_number_of_components = n_components
n_excitons = len(data[0]) // n_components
sec_scc.x_exciting_xs_bse_number_of_excitons = n_excitons
sec_scc.x_exciting_xs_bse_exciton_energies = np.reshape(
data[1], (n_components, n_excitons)) * ureg.hartree
sec_scc.x_exciting_xs_bse_exciton_binding_energies = np.reshape(
data[2], (n_components, n_excitons)) * ureg.hartree
sec_scc.x_exciting_xs_bse_exciton_oscillator_strength = np.reshape(
data[3], (n_components, n_excitons))
sec_scc.x_exciting_xs_bse_exciton_amplitude_re = np.reshape(
data[4], (n_components, n_excitons))
sec_scc.x_exciting_xs_bse_exciton_amplitude_im = np.reshape(
data[5], (n_components, n_excitons))
def parse_epsilon(data, sec_scc):
n_components = len(data)
data = np.transpose(np.vstack(data))
n_epsilon = len(data[0]) // n_components
sec_scc.x_exciting_xs_bse_number_of_energy_points = n_epsilon
sec_scc.x_exciting_xs_bse_epsilon_energies = np.reshape(
data[0], (n_components, n_epsilon)) * ureg.hartree
sec_scc.x_exciting_xs_bse_epsilon_re = np.reshape(
data[1], (n_components, n_epsilon))
sec_scc.x_exciting_xs_bse_epsilon_im = np.reshape(
data[2], (n_components, n_epsilon))
def parse_sigma(data, sec_scc):
n_components = len(data)
data = np.transpose(np.vstack(data))
n_sigma = len(data[0]) // n_components
sec_scc.x_exciting_xs_bse_sigma_energies = np.reshape(
data[0], (n_components, n_sigma)) * ureg.hartree
sec_scc.x_exciting_xs_bse_sigma_re = np.reshape(
data[1], (n_components, n_sigma))
sec_scc.x_exciting_xs_bse_sigma_im = np.reshape(
data[2], (n_components, n_sigma))
def parse_loss(data, sec_scc):
n_components = len(data)
data = np.transpose(np.vstack(data))
n_loss = len(data[0]) // n_components
sec_scc.x_exciting_xs_bse_loss_energies = np.reshape(
data[0], (n_components, n_loss)) * ureg.hartree
sec_scc.x_exciting_xs_bse_loss = np.reshape(
data[1], (n_components, n_loss))
# TODO check if format of files are really correct, i.e. columns are supposed
# to be what they are. What is the fourth column in epsilon which is not parsed?
sccs = []
for quantity in ['EXCITON', 'EPSILON', 'SIGMA', 'LOSS']:
files = get_files(quantity)
for i in range(len(files)):
data = get_data(files[i])
if not data:
sccs.append(None)
continue
if quantity == 'EXCITON':
sec_scc = sec_run.m_create(Calculation)
sccs.append(sec_scc)
else:
sec_scc = sccs[i]
if sec_scc is None:
# This is the case when there is a mismatch between files
self.logger.warn(
'Mismatch in EXCITON and file type', data=dict(file=quantity))
sec_scc = sec_run.m_create(Calculation)
if quantity == 'EXCITON':
parse_function = parse_exciton
elif quantity == 'EPSILON':
parse_function = parse_epsilon
elif quantity == 'SIGMA':
parse_function = parse_sigma
elif quantity == 'LOSS':
parse_function = parse_loss
else:
continue
try:
parse_function(data, sec_scc)
except Exception:
self.logger.error('Error setting xs data', data=dict(file=quantity))
def _parse_xs_tddft(self):
sec_run = self.archive.run[-1]
fxctype = self.input_xml_parser.get('xs/tddft/fxctype', 'RPA')
tetradf = self.input_xml_parser.get('xs/tetra/tetradf', None)
nwacont = self.input_xml_parser.get('xs/tddft/nwacont', None)
aresdf = self.input_xml_parser.get('xs/tddft/aresdf', True)
file_ext_list = [
'TET' if tetradf else None, 'AC' if nwacont else None, 'NAR' if not aresdf else None]
file_ext = '_'.join([e for e in file_ext_list if e])
# read q points
qpoints = self.input_xml_parser.get('xs/qpointset/qpoint')
def get_data(quantity, ext):
# all files related to quantity at all qpoints
files = self.get_exciting_files('%s_%s%s%s.OUT' % (quantity, file_ext, ext, fxctype))
data = [[], [], []]
for i in range(len(qpoints)):
data_q = []
files_q = [f for f in files if f.endswith('QMT%s.OUT' % str(i + 1).rjust(3, '0'))]
for f in files_q:
self.data_xs_parser.mainfile = f
if self.data_xs_parser.data is None:
continue
data_q.append(self.data_xs_parser.data)
if not data_q:
continue
data_q = np.transpose(data_q, axes=(2, 0, 1))
for j in range(len(data)):
data[j].append(data_q[j])
return data
for quantity in ['EPSILON', 'LOSS', 'SIGMA']:
for ext in ['FXC', 'NLF_FXC']:
data = get_data(quantity, ext)
if not data[0]:
continue
if quantity == 'EPSILON' and ext == 'FXC':
sec_scc = sec_run.m_create(Calculation)
sec_scc.x_exciting_xs_tddft_number_of_epsilon_values = len(data[0][0][0])
sec_scc.x_exciting_xs_tddft_epsilon_energies = data[0][0][0] * ureg.hartree
sec_scc.x_exciting_xs_tddft_dielectric_function_local_field = data[1:]
elif quantity == 'EPSILON' and ext == 'NLF_FXC':
sec_scc.x_exciting_xs_tddft_dielectric_function_no_local_field = data[1:3]
elif quantity == 'LOSS' and ext == 'FXC':
sec_scc.x_exciting_xs_tddft_loss_function_local_field = data[1]
elif quantity == 'LOSS' and ext == 'NLF_FXC':
sec_scc.x_exciting_xs_tddft_loss_function_no_local_field = data[1]
elif quantity == 'SIGMA' and ext == 'FXC':
sec_scc.x_exciting_xs_tddft_sigma_local_field = data[1:3]
elif quantity == 'SIGMA' and ext == 'NLF_FXC':
sec_scc.x_exciting_xs_tddft_sigma_no_local_field = data[1:3]
def parse_xs(self):
sec_run = self.archive.run[-1]
xs_info_files = self.get_exciting_files('INFOXS.OUT')
if not xs_info_files:
return
self._calculation_type = 'xs'
# inconsistency in the naming convention for xs input xml file
sec_method = sec_run.m_create(Method)
sec_method_ref = self.archive.run[-1].method[0]
sec_method.starting_method_ref = sec_method_ref
sec_method.methods_ref = [sec_method_ref]
self.parse_file('input.xml', sec_method)
# parse properties
input_file = self.get_exciting_files('input.xml')
if not input_file:
return
self.input_xml_parser.mainfile = input_file[0]
xstype = self.input_xml_parser.get('xs/xstype', '')
if xstype.lower() == 'bse':
self._parse_xs_bse()
elif xstype.lower() == 'tddft':
self._parse_xs_tddft()
def _parse_input_gw(self, sec_method):
sec_gw = sec_method.m_create(GWMethod)
sec_gw.type = 'G0W0'
gmaxvr = self.info_parser.get_initialization_parameter('x_exciting_gmaxvr', 0)
sec_gw.core_treatment = self.input_xml_parser.get(
'gw/coreflag', 'all')
sec_gw.polarizability_number_of_empty_states = int(
self.input_xml_parser.get('gw/nempty', 0))
sec_gw.ngridq = self.input_xml_parser.get('gw/ngridq', [1, 1, 1])
sec_gw.basis_set = 'mixed'
sec_gw.qp_equation_treatment = 'linearization'
sec_gw.max_frequency = self.input_xml_parser.get(
'gw/freqgrid/freqmax', 1.0)
sec_gw.frequency_grid_type = self.input_xml_parser.get(
'gw/freqgrid/fgrid', 'gaule2')
sec_gw.number_of_frequencies = int(self.input_xml_parser.get(
'gw/freqgrid/nomeg', 16))
sec_gw.self_energy_c_number_of_poles = int(self.input_xml_parser.get(
'gw/selfenergy/npol', 0))
sec_gw.self_energy_c_number_of_empty_states = int(self.input_xml_parser.get(
'gw/selfenergy/nempty', 0))
sec_gw.self_energy_singularity_treatment = self.input_xml_parser.get(
'gw/selfenergy/singularity', 'mpd')
sec_gw.self_energy_c_analytical_continuation = self.input_xml_parser.get(
'gw/selfenergy/actype', 'pade')
sec_gw.mixed_basis_lmax = int(self.input_xml_parser.get(
'gw/mixbasis/lmaxmb', 3))
sec_gw.mixed_basis_tolerance = self.input_xml_parser.get(
'gw/mixbasis/epsmb', 0.0001)
gmb = self.input_xml_parser.get('gw/mixbasis/gmb', 1.0)
sec_gw.mixed_basis_gmax = gmb * gmaxvr
pwm = self.input_xml_parser.get('gw/barecoul/pwm', 2.0)
sec_gw.bare_coulomb_gmax = pwm * gmb * gmaxvr
sec_gw.bare_coulomb_cutofftype = self.input_xml_parser.get(
'gw/barecoul/cutofftype', 'none')
sec_gw.screened_coulomb_volume_average = self.input_xml_parser.get(
'gw/scrcoul/sciavtype', 'isotropic')
sec_gw.screened_Coulomb = self.input_xml_parser.get(
'gw/scrcoul/scrtype', 'rpa')
def parse_gw(self):
sec_run = self.archive.run[-1]
# two versions of gw info files
gw_info_files = ['GW_INFO.OUT', 'GWINFO.OUT']
for f in gw_info_files:
if self.get_exciting_files(f):
self._calculation_type = 'gw'
gw_info_file = f
break
if not self._calculation_type == 'gw':
return
sec_method = sec_run.m_create(Method)
sec_method_ref = self.archive.run[-1].method[0]
sec_method.starting_method_ref = sec_method_ref
sec_method.methods_ref = [sec_method_ref]
# parse input xml file, there seems to be two versions, input_gw.xml and input-gw.xml
for f in ['input_gw.xml', 'input-gw.xml', 'input.xml']:
self.parse_file(f, sec_method)
xc_functional_name = ' '.join(self.info_parser.get_xc_functional_name())
sec_method.gw.starting_point = xc_functional_name
sec_scc = sec_run.m_create(Calculation)
sec_scc.method_ref = sec_method
if sec_run.system:
sec_scc.system_ref = sec_run.system[-1]
sec_scc_ref = sec_run.calculation[0]
sec_scc.starting_calculation_ref = sec_scc_ref
sec_scc.calculations_ref = [sec_scc_ref]
# parse properties
gw_info_files = self.get_exciting_files(gw_info_file)
if len(gw_info_files) > 1:
self.logger.warn('Found multiple GW info files, will read only first!')
self.info_gw_parser.mainfile = gw_info_files[0]
fermi_energy = self.info_gw_parser.get('fermi_energy', None)
if fermi_energy is not None:
sec_scc.energy = Energy(fermi=fermi_energy)
gw_files = ['EVALQP.DAT', 'EVALQP.TXT', 'TDOS-QP.OUT']
# Parse GW band structure from one of the files:
bs_files = ['bandstructure-qp.dat', 'BAND-QP.OUT']
for fname in bs_files:
if self.file_exists(fname):
gw_files.append(fname)
break
for f in gw_files:
self.parse_file(f, sec_scc)
frequency_data = self.info_gw_parser.get('frequency_data', None)
if frequency_data is not None:
number = frequency_data.get('number')
sec_method.gw.number_of_frequencies = len(number)
sec_method.gw.frequency_number = number
sec_method.gw.frequency_values = frequency_data.get('values')
sec_method.gw.frequency_weights = frequency_data.get('weights')
fundamental_band_gap = self.info_gw_parser.get('direct_band_gap', None)
if fundamental_band_gap is None:
fundamental_band_gap = self.info_gw_parser.get('fundamental_band_gap', None)
sec_gap = sec_scc.eigenvalues[-1].m_create(BandGap)
if fundamental_band_gap is not None:
sec_gap.value_fundamental = fundamental_band_gap
optical_band_gap = self.info_gw_parser.get('optical_band_gap', None)
if optical_band_gap is not None:
sec_gap.value_optical = optical_band_gap
def parse_miscellaneous(self):
sec_worfklow = self.archive.m_create(Workflow)
sec_worfklow.type = 'single_point'
structure_optimization = self.info_parser.get('structure_optimization')
if structure_optimization is not None:
sec_worfklow.type = 'geometry_optimization'
sec_geometry_opt = sec_worfklow.m_create(GeometryOptimization)
threshold_force = structure_optimization.get(
'optimization_step', [{}])[0].get('force_convergence', [0., 0.])[-1]
sec_geometry_opt.input_force_maximum_tolerance = threshold_force
def parse_method(self):
sec_run = self.archive.run[-1]
sec_method = sec_run.m_create(Method)
sec_method.basis_set.append(BasisSet(type='(L)APW+lo'))
sec_dft = sec_method.m_create(DFT)
sec_electronic = sec_method.m_create(Electronic)
sec_electronic.method = 'DFT'
smearing_kind_map = {
'Gaussian': 'gaussian', 'Methfessel-Paxton': 'methfessel-paxton',
'Fermi-Dirac': 'fermi', 'Extended': 'tetrahedra'}
sec_smearing = sec_electronic.m_create(Smearing)
smearing_kind = self.info_parser.get_initialization_parameter('smearing_kind')
if smearing_kind is not None:
if not isinstance(smearing_kind, str):
smearing_kind = smearing_kind[0]
smearing_kind = smearing_kind_map[smearing_kind]
sec_smearing.kind = smearing_kind
smearing_width = self.info_parser.get_initialization_parameter('smearing_width')
if smearing_width is not None:
smearing_width = (smearing_width * ureg.hartree).to('joule')
# TODO smearing with should have units of energy
sec_smearing.width = smearing_width.magnitude
for name in self.info_parser._convergence_keys_mapping.keys():
threshold = self.info_parser.get_scf_threshold(name)
if threshold is None:
continue
metainfo_name = 'x_exciting_scf_threshold_%s_change' % name.split('_')[-2]
setattr(sec_method, metainfo_name, threshold)
# additionally, set threshold to global metainfo. This is killing me!
if metainfo_name == 'x_exciting_scf_threshold_energy_change':
sec_method.scf = Scf(threshold_energy_change=threshold)
xc_functional_names = self.info_parser.get_xc_functional_name()
if not xc_functional_names:
# get it from input.xml
input_file = self.get_exciting_files('input.xml')
for f in input_file:
self.input_xml_parser.mainfile = f
correlation = self.input_xml_parser.get('libxc/correlation', None)
xc_functional_names.append(correlation)
exchange = self.input_xml_parser.get('libxc/exchange', None)
xc_functional_names.append(exchange)
sec_xc_functional = sec_dft.m_create(XCFunctional)
for name in xc_functional_names:
if name is None:
continue
if '_X_' in name:
sec_xc_functional.exchange.append(Functional(name=name))
elif '_C_' in name:
sec_xc_functional.correlation.append(Functional(name=name))
elif 'HYB' in name:
sec_xc_functional.hybrid.append(Functional(name=name))
else:
sec_xc_functional.contributions.append(Functional(name=name))
if not xc_functional_names:
# simply write parameters
xc_functional = self.info_parser.get('initialization', {}).get('xc_functional')
if xc_functional is not None:
sec_xc_functional.name = xc_functional.get('name_reference', [None, None])[0]
sec_xc_functional.reference = xc_functional.get('name_reference', [None, None])[1]
sec_electronic.n_spin_channels = self.info_parser.get_number_of_spin_channels()
if self._calculation_type == 'volume_optimization':
sec_method.x_exciting_volume_optimization = True
def parse_scc(self, section):
sec_run = self.archive.run[-1]
final = section if section.get('energy_total') is not None else section.get('final')
if final is None:
# get it from last scf_iteration or optimization_step
final = section.get('scf_iteration', [None])[-1]
final = section.get('optimization_step', [None])[-1] if final is None else final
if final is None:
return
sec_scc = sec_run.m_create(Calculation)
def parse_scf(iteration, msection):
energy_total = iteration.get('energy_total')
sec_energy = msection.m_create(Energy)
if energy_total is not None:
sec_energy.total = EnergyEntry(value=energy_total)
x_exciting_dos_fermi = iteration.get('x_exciting_dos_fermi')
if x_exciting_dos_fermi is not None:
setattr(msection, 'x_exciting_dos_fermi', x_exciting_dos_fermi)
# energy contributions
energy_contributions = iteration.get('energy_contributions', {})
for key, names in self._energy_keys_mapping.items():
val = None
for name in names:
val = energy_contributions.get(name, None)
if val is not None:
break
if val is None:
continue
if key.startswith('energy_'):
sec_energy.m_add_sub_section(getattr(
Energy, key.replace('energy_', '')), EnergyEntry(value=val))
else:
setattr(msection, key, val)
if key == 'x_exciting_fermi_energy':
sec_energy.fermi = val
# charge contributions
charge_contributions = iteration.get('charge_contributions', {})
for key, names in self._electron_charge_keys_mapping.items():
val = None
for name in names:
val = charge_contributions.get(name, None)
if val is not None:
break
if val is None:
continue
if key == 'x_exciting_section_MT_charge_atom':
for n in range(len(val)):
sec_mt_charge_atom = msection.m_create(x_exciting_section_MT_charge_atom)
sec_mt_charge_atom.x_exciting_MT_charge_atom_index = n + 1
sec_mt_charge_atom.x_exciting_MT_charge_atom_symbol = val[n][0]
sec_mt_charge_atom.x_exciting_MT_charge_atom_value = val[n][1]
sec_charges = msection.m_create(Charges)
sec_charges.value = [
val[n][1].magnitude for n in range(len(val))] * val[0][1].units
sec_charges.total = charge_contributions.get('total charge')
elif key == 'charge_total':
pass
else:
setattr(msection, key, val)
# moment contributions
moment_contributions = iteration.get('moment_contributions', {})
for key, names in self._moment_keys_mapping.items():
val = None
for name in names:
val = moment_contributions.get(name, None)
if val is not None:
break
if val is None:
continue
if key == 'x_exciting_section_MT_moment_atom':
for n in range(len(val)):
sec_mt_moment_atom = msection.m_create(x_exciting_section_MT_moment_atom)
sec_mt_moment_atom.x_exciting_MT_moment_atom_index = n + 1
sec_mt_moment_atom.x_exciting_MT_moment_atom_symbol = val[n][0]
sec_mt_moment_atom.x_exciting_MT_moment_atom_value = val[n][1]
else:
setattr(msection, key, val)
# convergence values
for name in self.info_parser._convergence_keys_mapping.keys():
val = iteration.get(name)
if val is None:
continue
setattr(msection, name, val)
# other metainfo
for name in self.info_parser._miscellaneous_keys_mapping.keys():
val = iteration.get(name)
if val is None:
continue
if name == 'time':
msection.time_calculation = val
else:
setattr(msection, name, val)
# energy, moment, charge contributions
parse_scf(final, sec_scc)
# forces
forces = section.get('forces')
if forces is not None:
sec_forces = sec_scc.m_create(Forces)
sec_forces.total = ForcesEntry(value=forces)
# scf iterations
scf_iterations = section.get('scf_iteration', [])
for scf_iteration in scf_iterations:
sec_scf_iteration = sec_scc.m_create(ScfIteration)
parse_scf(scf_iteration, sec_scf_iteration)
return sec_scc
def parse_system(self, section):
sec_run = self.archive.run[-1]
positions = self.info_parser.get_atom_positions(section.get('atomic_positions', {}))
lattice_vectors = self.info_parser.get_initialization_parameter('lattice_vectors')
atom_labels = self.info_parser.get_atom_labels(section.get('atomic_positions', {}))
input_file = self.get_exciting_files('input.xml')
if positions is None:
# get it from input.xml
for f in input_file:
self.input_xml_parser.mainfile = f
positions = self.input_xml_parser.get('structure/species/atom/coord')
lattice_vectors = self.input_xml_parser.get(
'structure/crystal/basevect', np.eye(3))
species = self.input_xml_parser.get('structure/species/speciesfile')
if positions is None or lattice_vectors is None or species is None:
continue
lattice_vectors = np.array(lattice_vectors, dtype=float)
lattice_vectors *= self.input_xml_parser.get('structure/crystal/scale', 1.0)
positions = np.dot(positions, lattice_vectors) * ureg.bohr
lattice_vectors = lattice_vectors * ureg.bohr
atoms = self.input_xml_parser.get('structure/species/atom')
atom_labels = []
for n in range(len(atoms)):
atom_labels.extend([species[n].split('.')[0]] * len(atoms[n]))
if positions is None or atom_labels is None:
return
sec_system = sec_run.m_create(System)
sec_atoms = sec_system.m_create(Atoms)
sec_atoms.positions = positions
sec_atoms.labels = atom_labels
sec_atoms.periodic = [True] * 3
# TODO confirm no cell optimization in exciting
sec_atoms.lattice_vectors = lattice_vectors
lattice_vectors_reciprocal = self.info_parser.get_initialization_parameter(
'lattice_vectors_reciprocal')
sec_atoms.lattice_vectors_reciprocal = lattice_vectors_reciprocal
if len(sec_run.system) > 1:
return sec_system
for name in self.info_parser._system_keys_mapping.keys():
val = self.info_parser.get_initialization_parameter(name)
if val is None:
continue
if name == 'x_exciting_spin_treatment':
sub_sec = sec_system.m_create(x_exciting_section_spin)
sub_sec.x_exciting_spin_treatment = val
elif name == 'x_exciting_species_rtmin':
setattr(sec_system, name, ' '.join([str(v) for v in val]))
else:
try:
setattr(sec_system, name, val)
except Exception:
self.logger.warn('Error setting metainfo.')
# species
species = self.info_parser.get_initialization_parameter('species', [])
for specie in species:
sec_atoms_group = sec_system.m_create(x_exciting_section_atoms_group)
sec_atoms_group.x_exciting_geometry_atom_labels = specie.get('symbol')
sec_atoms_group.x_exciting_geometry_atom_number = str(specie.get('number'))
sec_atoms_group.x_exciting_muffin_tin_points = specie.get('radial_points')
sec_atoms_group.x_exciting_muffin_tin_radius = specie.get('muffin_tin_radius')
positions_format = specie.get('positions_format')
sec_atoms_group.x_exciting_atom_position_format = positions_format
positions = specie.get('positions')
positions = self.info_parser.get_atom_positions(
positions=positions, positions_format=positions_format).to('m')
sec_atoms_group.x_exciting_geometry_atom_positions = positions.magnitude
# clathrate info
clathrate_file = self.get_exciting_files('str.out')
if clathrate_file:
sec_system.x_exciting_clathrates = True
self.data_clathrate_parser.mainfile = clathrate_file[0]
if self.data_clathrate_parser.data:
data = np.transpose(self.data_clathrate_parser.data)
sec_system.x_exciting_clathrates_atom_coordinates = np.transpose(
np.array(data[:3], dtype=float))
sec_system.x_exciting_clathrates_atom_labels = list(data[3])
else:
sec_system.x_exciting_clathrates = False
potential_mixing = self.info_parser.get_initialization_parameter('potential_mixing')
if potential_mixing is not None:
sec_system.x_exciting_potential_mixing = potential_mixing
return sec_system
def parse_configurations(self):
sec_run = self.archive.run[-1]
def parse_configuration(section):
if not section:
return
sec_scc = self.parse_scc(section)
if sec_scc is None:
return
sec_system = self.parse_system(section)
if sec_system is not None:
sec_scc.system_ref = sec_system
sec_scc.method_ref = sec_run.method[-1]
return sec_scc
# groundstate and hybrids calculation
for module in ['groundstate', 'hybrids']:
sec_scc = parse_configuration(self.info_parser.get(module))
if sec_scc is None:
continue
# add data to scc
# TODO add support for more output files and properties
exciting_files = ['EIGVAL.OUT', 'FERMISURF.bxsf', 'FS.bxsf']
# Parse DFT DOS from one of the files
bs_files = ['dos.xml', 'TDOS.OUT']
for fname in bs_files:
if self.file_exists(fname):
exciting_files.append(fname)
break
# Parse DFT band structure from one of the files
bs_files = ['bandstructure.xml', 'BAND.OUT', 'bandstructure.dat']
for fname in bs_files:
if self.file_exists(fname):
exciting_files.append(fname)
break
for f in exciting_files:
self.parse_file(f, sec_scc)
# structure optimization
structure_optimization = self.info_parser.get('structure_optimization', {})
for optimization_step in structure_optimization.get('optimization_step', []):
sec_scc = parse_configuration(optimization_step)
if optimization_step.get('method') is not None:
sec_scc.x_exciting_geometry_optimization_method = optimization_step.get('method')
if optimization_step.get('step') is not None:
sec_scc.x_exciting_geometry_optimization_step = optimization_step.get('step')
force_convergence = optimization_step.get('force_convergence')
if force_convergence is not None:
sec_scc.x_exciting_maximum_force_magnitude = force_convergence[0]
sec_scc.x_exciting_geometry_optimization_threshold_force = force_convergence[1]
sec_scc = parse_configuration(structure_optimization)
if sec_scc is None:
return
# volume optimizations
volume_index = 1
while True:
info_volume = self.get_exciting_files('run_dir%s/INFO.OUT' % str(volume_index).rjust(2, '0'))
if not info_volume:
break
sec_scc.calculations_path.append(info_volume[0])
def init_parser(self):
self.info_parser.mainfile = self.filepath
self.info_parser.logger = self.logger
self.dos_parser.logger = self.logger
self.bandstructure_parser.logger = self.logger
self.eigval_parser.logger = self.logger
self.fermisurf_parser.logger = self.logger
self.evalqp_parser.logger = self.logger
self.dos_out_parser.logger = self.logger
self.bandstructure_dat_parser.logger = self.logger
self.band_out_parser.logger = self.logger
self.info_gw_parser.logger = self.logger
self.input_xml_parser.logger = self.logger
self.data_xs_parser.logger = self.logger
self.data_clathrate_parser.logger = self.logger
def reuse_parser(self, parser):
self.info_parser.quantities = parser.info_parser.quantities
self.eigval_parser.quantities = parser.eigval_parser.quantities
self.fermisurf_parser.quantities = parser.fermisurf_parser.quantities
self.evalqp_parser.quantities = parser.evalqp_parser.quantities
self.info_gw_parser.quantities = parser.info_gw_parser.quantities
def parse(self, filepath, archive, logger):
self.filepath = filepath
self.archive = archive
self.logger = logger if logger is not None else logging
self._calculation_type = None
self.init_parser()
sec_run = self.archive.m_create(Run)
sec_run.program = Program(
name='exciting', version=self.info_parser.get('program_version', '').strip())
# method goes first since reference needed for sec_scc
self.parse_method()
self.parse_configurations()
self.parse_gw()
self.parse_xs()
self.parse_miscellaneous()
| 42.768091
| 147
| 0.593742
| 11,985
| 98,110
| 4.577138
| 0.067668
| 0.029367
| 0.0175
| 0.02625
| 0.524509
| 0.442459
| 0.373243
| 0.286454
| 0.235266
| 0.204514
| 0
| 0.006007
| 0.297584
| 98,110
| 2,293
| 148
| 42.786742
| 0.790014
| 0.038966
| 0
| 0.311404
| 0
| 0.006579
| 0.126901
| 0.04121
| 0
| 0
| 0
| 0.000872
| 0
| 1
| 0.057018
| false
| 0.000548
| 0.006579
| 0.002193
| 0.122807
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c13b63f316f27bf2445b7a4c746d0ccd26b4b27
| 2,644
|
py
|
Python
|
batch-tmp.py
|
texastribune/donations
|
45a75e528564b5fd502319ed7d512ca91bda7f37
|
[
"MIT"
] | 6
|
2019-11-16T23:23:11.000Z
|
2022-02-13T00:53:45.000Z
|
batch-tmp.py
|
texastribune/donations
|
45a75e528564b5fd502319ed7d512ca91bda7f37
|
[
"MIT"
] | 519
|
2018-11-20T22:22:16.000Z
|
2022-03-31T11:11:32.000Z
|
batch-tmp.py
|
texastribune/donations
|
45a75e528564b5fd502319ed7d512ca91bda7f37
|
[
"MIT"
] | 6
|
2019-02-13T05:25:56.000Z
|
2020-08-19T14:41:14.000Z
|
import logging
from config import ACCOUNTING_MAIL_RECIPIENT, LOG_LEVEL, REDIS_URL, TIMEZONE
from datetime import datetime, timedelta
from pytz import timezone
import celery
import redis
from charges import amount_to_charge, charge, ChargeException
from npsp import Opportunity
from util import send_email
zone = timezone(TIMEZONE)
log_level = logging.getLevelName(LOG_LEVEL)
root = logging.getLogger()
root.setLevel(log_level)
class Log(object):
"""
This encapulates sending to the console/stdout and email all in one.
"""
def __init__(self):
self.log = list()
def it(self, string):
"""
Add something to the log.
"""
logging.debug(string)
self.log.append(string)
def send(self):
"""
Send the assembled log out as an email.
"""
body = "\n".join(self.log)
recipient = ACCOUNTING_MAIL_RECIPIENT
subject = "Batch run"
send_email(body=body, recipient=recipient, subject=subject)
class AlreadyExecuting(Exception):
"""
Here to show when more than one job of the same type is running.
"""
pass
class Lock(object):
"""
Claim an exclusive lock. Using Redis.
"""
def __init__(self, key):
self.key = key
self.connection = redis.from_url(REDIS_URL)
def acquire(self):
if self.connection.get(self.key):
raise AlreadyExecuting
self.connection.setex(name=self.key, value="bar", time=1200)
def release(self):
self.connection.delete(self.key)
# TODO stop sending this email and just rely on Sentry and logs?
@celery.task()
def charge_cards():
lock = Lock(key="charge-cards-lock")
lock.acquire()
log = Log()
log.it("---Starting batch job...")
three_days_ago = (datetime.now(tz=zone) - timedelta(days=10)).strftime("%Y-%m-%d")
today = datetime.now(tz=zone).strftime("%Y-%m-%d")
opportunities = Opportunity.list(begin=three_days_ago, end=today)
log.it("---Processing charges...")
log.it(f"Found {len(opportunities)} opportunities available to process.")
for opportunity in opportunities:
if not opportunity.stripe_customer:
continue
amount = amount_to_charge(opportunity)
log.it(
f"---- Charging ${amount} to {opportunity.stripe_customer} ({opportunity.name})"
)
try:
charge(opportunity)
except ChargeException as e:
logging.info("Batch charge error")
e.send_slack_notification()
log.send()
lock.release()
if __name__ == "__main__":
charge_cards()
| 22.793103
| 92
| 0.642209
| 326
| 2,644
| 5.076687
| 0.42638
| 0.021148
| 0.027795
| 0.022961
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003011
| 0.246218
| 2,644
| 115
| 93
| 22.991304
| 0.827396
| 0.113843
| 0
| 0
| 0
| 0
| 0.115556
| 0.012889
| 0
| 0
| 0
| 0.008696
| 0
| 1
| 0.111111
| false
| 0.015873
| 0.142857
| 0
| 0.301587
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c16016ffd51a0a5e8e9512b1d5a109ac8fa3665
| 2,405
|
py
|
Python
|
app/__init__.py
|
jimmybutton/moviedb
|
61028ac4db7f58a671ab3a1c2afd3bfb53372773
|
[
"MIT"
] | null | null | null |
app/__init__.py
|
jimmybutton/moviedb
|
61028ac4db7f58a671ab3a1c2afd3bfb53372773
|
[
"MIT"
] | null | null | null |
app/__init__.py
|
jimmybutton/moviedb
|
61028ac4db7f58a671ab3a1c2afd3bfb53372773
|
[
"MIT"
] | null | null | null |
from flask import Flask
from config import Config
from sqlalchemy import MetaData
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_login import LoginManager
from flask_moment import Moment
from flask_misaka import Misaka
from flask_bootstrap import Bootstrap
import os
import logging
from logging.handlers import RotatingFileHandler
from elasticsearch import Elasticsearch
convention = {
"ix": 'ix_%(column_0_label)s',
"uq": "uq_%(table_name)s_%(column_0_name)s",
"ck": "ck_%(table_name)s_%(constraint_name)s",
"fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s",
"pk": "pk_%(table_name)s"
}
metadata = MetaData(naming_convention=convention)
db = SQLAlchemy(metadata=metadata)
migrate = Migrate()
login = LoginManager()
login.login_view = "auth.login"
moment = Moment()
md = Misaka()
bootstrap = Bootstrap()
def create_app(config_class=Config):
app = Flask(__name__)
app.config.from_object(Config)
db.init_app(app)
with app.app_context():
if db.engine.url.drivername == 'sqlite':
migrate.init_app(app, db, render_as_batch=True)
else:
migrate.init_app(app, db)
# migrate.init_app(app, db)
login.init_app(app)
moment.init_app(app)
md.init_app(app)
bootstrap.init_app(app)
from app.errors import bp as errors_bp
app.register_blueprint(errors_bp)
from app.auth import bp as auth_bp
app.register_blueprint(auth_bp, url_prefix='/auth')
from app.main import bp as main_bp
app.register_blueprint(main_bp)
from app.cli import bp as cli_bp
app.register_blueprint(cli_bp)
app.elasticsearch = Elasticsearch([app.config['ELASTICSEARCH_URL']]) \
if app.config['ELASTICSEARCH_URL'] else None
from app import models
if not app.debug and not app.testing:
if not os.path.exists("logs"):
os.mkdir("logs")
file_handler = RotatingFileHandler(
"logs/moviedb.log", maxBytes=10240, backupCount=10
)
file_handler.setFormatter(
logging.Formatter(
"%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]"
)
)
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
app.logger.setLevel(logging.INFO)
app.logger.info("Moviedb startup")
return app
| 28.630952
| 85
| 0.690644
| 323
| 2,405
| 4.934985
| 0.291022
| 0.033877
| 0.050188
| 0.055207
| 0.098494
| 0.027604
| 0.027604
| 0
| 0
| 0
| 0
| 0.005227
| 0.204574
| 2,405
| 83
| 86
| 28.975904
| 0.828019
| 0.010395
| 0
| 0
| 0
| 0.014706
| 0.142977
| 0.074012
| 0
| 0
| 0
| 0
| 0
| 1
| 0.014706
| false
| 0
| 0.264706
| 0
| 0.294118
| 0.058824
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c161b198ce5d788684f6856cc66f4bdfc78c217
| 7,252
|
py
|
Python
|
optimize.py
|
AranKomat/Sequential-Alpha-Zero
|
21f78dc95e70b68b5fd18eb33d1ea2d5b5a853d4
|
[
"Apache-2.0"
] | 7
|
2021-04-01T09:52:02.000Z
|
2021-06-09T11:57:55.000Z
|
optimize.py
|
AranKomat/Alpha-Transformer
|
21f78dc95e70b68b5fd18eb33d1ea2d5b5a853d4
|
[
"Apache-2.0"
] | null | null | null |
optimize.py
|
AranKomat/Alpha-Transformer
|
21f78dc95e70b68b5fd18eb33d1ea2d5b5a853d4
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import random
from time import time, sleep
import h5py
import torch
import torch.nn as nn
import torch.optim as optimizer
import glob
import os
#from scipy.stats import rankdata
from lstm import Model, initialize
from Optim import ScheduledOptim
# import _pickle as cPickle
# np.set_printoptions(threshold=np.nan)
def start(config):
model = Model(config)
model = model.to(config.device)
#optim = optimizer.SGD(model.parameters(), lr=2e-4, momentum=0.9, weight_decay=config.c)
#lr_scheduler = torch.optim.lr_scheduler.StepLR(optim, step_size=200, gamma=0.1) # 20M iters
optim = ScheduledOptim(
optimizer.Adam(
filter(lambda p: p.requires_grad, model.parameters()), lr=config.lr,
betas=(0.9, 0.98), eps=1e-09),
config.hidden_dim, 2000)
list_of_files = glob.glob(config.model_path + '/*')
latest_file = None
if list_of_files:
latest_file = max(list_of_files, key=os.path.getctime)
model_ckpt = latest_file
# model_ckpt = config.model_path + '/model-454.pth'
print(model_ckpt)
if model_ckpt:
checkpoint = torch.load(model_ckpt)
model.load_state_dict(checkpoint['state_dict'])
optim.optimizer.load_state_dict(checkpoint['optimizer'])
start_iter = model_ckpt.split('-')[-1].split('.')[0]
start_iter = int(start_iter)
else:
model.apply(initialize)
start_iter = 0
count = 0
for iter in range(start_iter, config.total_iterations):
print('iteration: %s' % iter)
#if (iter + 1) % 100000 == 0:
# lr_scheduler.step()
start_time = time()
optim.update_learning_rate(iter)
# reads the randomly sampled (s,pi,z)'s from the buffer
# ~ 0.1s
# TODO: if error, set a lock
# translate, _ = cPickle.load(open('save/vocab_cotra.pkl', 'rb'))
with h5py.File("buffer", "r") as f:
cur_row = int(f['/cur_row'][0])
s_buffer = f['/s']
pi_buffer = f['/pi']
z_buffer = f['/z']
s_tmp = []
pi_tmp = []
z_tmp = []
df = cur_row - count
'''x = np.bincount(s_buffer[:,1].astype(int)) / 500000
for i in range(len(x)):
if x[i] > 0.01:
print(i, x[i], translate[i])
break'''
if count == 0:
count = cur_row
t_inf = time()
if count != 0 and df >= 1000:
print('time required for 32 self-play games: ', 32 * (time() - t_inf) / df)
t_inf = time()
count = cur_row
if cur_row >= config.buffer_size:
r = np.sort(
np.random.choice(list(range(0, config.buffer_size)), (config.batch_size // 2), replace=False))
else:
r = np.sort(
np.random.choice(list(range(0, cur_row)), (config.batch_size // 2), replace=False))
tmp = []
# randomly sample rows 8 times for a dramatic speedup.
num_segments = 8
for i in range(num_segments):
tmp.append(
r[(config.batch_size // 2) // num_segments * i:(config.batch_size // 2) // num_segments * (i + 1)])
for i in range(num_segments):
s_tmp.append(s_buffer[tmp[i], :config.max_length])
pi_tmp.append(pi_buffer[tmp[i], :config.max_length, ...])
z_tmp.append(z_buffer[tmp[i], ...])
s = np.concatenate(s_tmp, 0)
pi = np.concatenate(pi_tmp, 0)
z = np.concatenate(z_tmp, 0)
# print('io time: ',time() - start_time)
# decompresses sampled pi's
# takes about 0.005s
new_pi = np.zeros(((config.batch_size // 2), config.max_length, config.vocab_size))
for i in range((config.batch_size // 2)):
for j in range(config.max_length):
if pi[i, j, 0] == -1: # meaning the terminal state; pi=0
new_pi[i, j, :] = 0
elif pi[i, j, 0] == -2 or sum(pi[i, j, :]) == 0: # meaning the padding; place -1 padding
new_pi[i, j, :] = -1
else:
# Beware that np.bincount's bin is [0,1,...min_length-1]
new_pi[i, j, :] = np.bincount(pi[i, j, :].astype(int),
minlength=config.vocab_size) / config.simulation_num_per_move
pi = new_pi
# creating a mask for loss function and preparing a minibatch
def generate_mask(array):
new_array = np.zeros_like(array)
for i in range(len(array)):
for j in range(len(array[i])):
if j == len(array[i]) - 1:
new_array[i, :] = 1
elif array[i, j] == config.period_token:
new_array[i, :j + 1] = 1
break
elif array[i, j] == config.blank_token:
new_array[i, :j] = 1
break
return new_array
def pi_mask(array):
array = array[:, 1:]
array = np.pad(array, ((0, 0), (0, 1)), 'constant')
return generate_mask(array)
# pi_tmp isn't modified here, since the mask will be modified appropriately
_, pi_mask = pi_mask(s)
z_mask = generate_mask(s)
z_batch = np.concatenate(
[np.ones([(config.batch_size // 2), config.max_length]) * (-1),
np.ones([(config.batch_size // 2), config.max_length])])
def convert(x):
return torch.tensor(x.astype(np.float32), device=config.device)
t2 = time()
# gradient update
model.train()
cache = []
for i in range(config.depth // config.unit_depth):
cache += [torch.zeros(config.batch_size, config.hidden_dim,device=config.device),
torch.zeros(config.batch_size, config.hidden_dim,device=config.device)]
s_batch = convert(np.array(s)).long()
policy, v, cache = model(s_batch, tuple(cache))
def loss_policy(y_true, y_pred):
return torch.sum(-y_true * torch.log(y_pred + 1.0e-8), 2)
def loss_value(y_true, y_pred):
return (y_true - y_pred) ** 2
pi_mask = convert(pi_mask)
z_mask = convert(z_mask)
z = convert(z)
pi = convert(pi)
loss = torch.mean(torch.sum(loss_policy(pi, policy) * pi_mask +
loss_value(z, v) * z_mask
, 1) / torch.sum(z_mask, 1))
loss.backward()
gn = nn.utils.clip_grad_norm(model.parameters(), config.clip)
print(gn)
optim.step()
optim.zero_grad()
print("grad update: %s seconds" % (time() - t2))
print("iteration: %s seconds" % (time() - start_time))
checkpoint = {'state_dict': model.state_dict(),
'optimizer': optim.optimizer.state_dict()}
sleep(config.training_sleep_time)
torch.save(checkpoint, config.model_path + '/model' + '-' + str(iter + 1) + '.pth')
| 37.968586
| 119
| 0.535714
| 947
| 7,252
| 3.938754
| 0.252376
| 0.005898
| 0.040214
| 0.034316
| 0.173995
| 0.139678
| 0.090885
| 0.06756
| 0.06756
| 0.031099
| 0
| 0.024486
| 0.335494
| 7,252
| 191
| 120
| 37.968586
| 0.749533
| 0.129619
| 0
| 0.094203
| 0
| 0
| 0.02919
| 0
| 0
| 0
| 0
| 0.005236
| 0
| 1
| 0.043478
| false
| 0
| 0.07971
| 0.021739
| 0.15942
| 0.043478
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c16fad5499d60d29e8503364a688806e916a7fc
| 2,031
|
py
|
Python
|
src/bin_expr.py
|
Command-Master/MCCC
|
a49440bfd8542002aee35d41bee093dc8b51d781
|
[
"MIT"
] | 6
|
2021-01-15T03:49:01.000Z
|
2021-11-02T10:43:22.000Z
|
src/bin_expr.py
|
Command-Master/MCCC
|
a49440bfd8542002aee35d41bee093dc8b51d781
|
[
"MIT"
] | null | null | null |
src/bin_expr.py
|
Command-Master/MCCC
|
a49440bfd8542002aee35d41bee093dc8b51d781
|
[
"MIT"
] | null | null | null |
from c_int import Int
from casting import cast
from globals_consts import NAMESPACE
from temps import used_temps, get_temp, get_temp_func
def binary_expression(copy_strings, expression, target, variables_name, vtypes):
from expression import generate_expression
c1, t1, tt1 = generate_expression(None, expression.left, vtypes, variables_name, copy_strings, False)
c2, t2, tt2 = generate_expression(None, expression.right, vtypes, variables_name, copy_strings, False)
for ttt in tt1: used_temps.remove(ttt)
for ttt in tt2: used_temps.remove(ttt)
ot = cast(t1, t2)
rt = ot
if expression.op in ['<', '>', '<=', '>=', '==', '!=', '&&']:
rt = Int()
if target is None or target == []:
target = [get_temp() for _ in range(ot.size)]
used_temps.extend(target)
code = ''
if expression.op in ['&&', '||']:
if expression.op == '&&':
code += c1
code += t1.cast(ot, tt1, target)
f2 = get_temp_func()
f2h = open(f'{f2}.mcfunction', 'w')
f2h.write(c2)
f2h.write(t2.cast(ot, tt2, target))
f2h.close()
code += f'execute unless score {target[0]} {NAMESPACE} matches 0 run function {NAMESPACE}:{f2}\n'
elif expression.op == '||':
code += c1
code += t1.cast(ot, tt1, target)
f2 = get_temp_func()
f2h = open(f'{f2}.mcfunction', 'w')
f2h.write(c2)
f2h.write(t2.cast(ot, tt2, target))
f2h.close()
code += f'execute if score {target[0]} {NAMESPACE} matches 0 run function {NAMESPACE}:{f2}\n'
else:
if ot == t1:
code += c1
code += c2
code += t2.cast(ot, tt2, target)
code += ot.binary(expression.op, tt1, target, target)
else:
code += c1
code += t1.cast(ot, tt1, target)
code += c2
code += ot.binary(expression.op, target, tt2, target)
return code, rt, target
| 39.823529
| 109
| 0.557361
| 259
| 2,031
| 4.274131
| 0.258687
| 0.065041
| 0.036134
| 0.03252
| 0.471545
| 0.412827
| 0.349594
| 0.349594
| 0.325203
| 0.325203
| 0
| 0.036017
| 0.302807
| 2,031
| 51
| 110
| 39.823529
| 0.745763
| 0
| 0
| 0.428571
| 0
| 0.040816
| 0.108268
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.020408
| false
| 0
| 0.102041
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c179ba2d16aa2d479920de1be09d5ac3e265384
| 1,186
|
py
|
Python
|
utest/x3270/test_screenshot.py
|
MichaelSeeburger/Robot-Framework-Mainframe-3270-Library
|
76b589d58c55a39f96c027a8ae28c41fa37ed445
|
[
"MIT"
] | 3
|
2018-10-02T14:32:06.000Z
|
2018-10-02T14:33:32.000Z
|
utest/x3270/test_screenshot.py
|
MichaelSeeburger/Robot-Framework-Mainframe-3270-Library
|
76b589d58c55a39f96c027a8ae28c41fa37ed445
|
[
"MIT"
] | null | null | null |
utest/x3270/test_screenshot.py
|
MichaelSeeburger/Robot-Framework-Mainframe-3270-Library
|
76b589d58c55a39f96c027a8ae28c41fa37ed445
|
[
"MIT"
] | null | null | null |
import os
from pytest_mock import MockerFixture
from robot.api import logger
from Mainframe3270.x3270 import x3270
def test_set_screenshot_folder(under_test: x3270):
path = os.getcwd()
under_test.set_screenshot_folder(path)
assert under_test.imgfolder == os.getcwd()
def test_set_screenshot_folder_nonexistent(mocker: MockerFixture, under_test: x3270):
mocker.patch("robot.api.logger.error")
mocker.patch("robot.api.logger.warn")
path = os.path.join(os.getcwd(), "nonexistent")
under_test.set_screenshot_folder(path)
logger.error.assert_called_with('Given screenshots path "%s" does not exist' % path)
logger.warn.assert_called_with(
'Screenshots will be saved in "%s"' % under_test.imgfolder
)
def test_take_screenshot(mocker: MockerFixture, under_test: x3270):
mocker.patch("Mainframe3270.py3270.Emulator.save_screen")
mocker.patch("robot.api.logger.write")
mocker.patch("time.time", return_value=1.0)
under_test.take_screenshot(500, 500)
logger.write.assert_called_with(
'<iframe src="./screenshot_1000.html" height="500" width="500"></iframe>',
level="INFO",
html=True,
)
| 28.238095
| 88
| 0.725126
| 158
| 1,186
| 5.240506
| 0.379747
| 0.086957
| 0.082126
| 0.111111
| 0.323672
| 0.183575
| 0.10628
| 0
| 0
| 0
| 0
| 0.04995
| 0.155987
| 1,186
| 41
| 89
| 28.926829
| 0.777223
| 0
| 0
| 0.074074
| 0
| 0
| 0.232715
| 0.130691
| 0
| 0
| 0
| 0
| 0.148148
| 1
| 0.111111
| false
| 0
| 0.148148
| 0
| 0.259259
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c17a59c22e1b7744bde1f37891a9b3e7d5581e6
| 35,752
|
py
|
Python
|
splat/photometry.py
|
brackham/splat
|
5ee0da82f19017e900ee83af94609dbe9f8a0ea4
|
[
"MIT"
] | null | null | null |
splat/photometry.py
|
brackham/splat
|
5ee0da82f19017e900ee83af94609dbe9f8a0ea4
|
[
"MIT"
] | null | null | null |
splat/photometry.py
|
brackham/splat
|
5ee0da82f19017e900ee83af94609dbe9f8a0ea4
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import print_function, division
"""
.. note::
These are the spectrophotometry functions for SPLAT
"""
# imports - internal
import copy
import os
# imports - external
import numpy
from astropy import units as u # standard units
from astropy import constants as const # physical constants in SI units
import matplotlib.patches as patches
import matplotlib.pyplot as plt
from scipy.integrate import trapz # for numerical integration
from scipy.interpolate import interp1d
# splat functions and constants
from .initialize import *
from .utilities import *
#####################################################
############### SPECTROPHOTOMETRY ###############
#####################################################
# this function has been obseleted
def checkFilter(filt,verbose=True):
output = False
f = copy.deepcopy(filt)
f = f.replace(' ','_').upper()
for k in list(FILTERS.keys()):
if f==k.upper() or f.lower() in FILTERS[k]['altnames']:
output = k
if output == False and verbose == True:
print('\nFilter '+filt+' not currently available for SPLAT; contact '+EMAIL+'\n')
filterInfo()
return output
def filterProfile(filt,**kwargs):
'''
:Purpose: Retrieve the filter profile for a SPLAT filter. Returns two arrays: the filter wavelength and filter transmission curve.
:param filter: String giving the name of one of the predefined filters listed in splat.FILTERS.keys() (required)
:param filterFolder: folder containing the filter transmission files (optional, default = splat.FILTER_FOLDER)
:Example:
>>> import splat
>>> import splat.photometry as spphot
>>> sp = splat.getSpectrum(shortname='1507-1627')[0]
>>> sp.fluxCalibrate('2MASS J',14.5)
>>> spphot.filterMag(sp,'MKO J')
(14.345894376898123, 0.027596454828421831)
'''
# keyword parameters
filterFolder = kwargs.get('filterFolder',SPLAT_PATH+FILTER_FOLDER)
if not os.path.exists(filterFolder):
filterFolder = SPLAT_URL+FILTER_FOLDER
# check that requested filter is in list
f0 = checkFilterName(filt, verbose=True)
if f0 == False: raise ValueError
filt = f0
# read in filter
fwave,ftrans = numpy.genfromtxt(os.path.normpath(filterFolder+FILTERS[filt]['file']), comments='#', unpack=True, missing_values = ('NaN','nan'), filling_values = (numpy.nan))
# print(type(fwave),type(ftrans),isinstance(fwave,numpy.ndarray),isinstance(ftrans,numpy.ndarray),not isinstance(fwave,numpy.ndarray) or not isinstance(ftrans,numpy.ndarray))
if not isinstance(fwave,numpy.ndarray) or not isinstance(ftrans,numpy.ndarray):
raise ValueError('\nProblem reading in {}'.format(filterFolder+FILTERS[filt]['file']))
fwave = fwave[~numpy.isnan(ftrans)]*u.micron
ftrans = ftrans[~numpy.isnan(ftrans)]
return fwave,ftrans
def filterMag(sp,filt,*args,**kwargs):
'''
:Purpose:
Determine the photometric magnitude of a source based on its
spectrum. Spectral fluxes are convolved with the filter profile specified by
the ``filter`` input. By default this filter is also
convolved with a model of Vega to extract Vega magnitudes,
but the user can also specify AB magnitudes, photon flux or energy flux.
:Required Parameters:
**sp**: Spectrum class object, which should contain wave, flux and noise array elements.
**filter**: String giving name of filter, which can either be one of the predefined filters listed in splat.FILTERS.keys() or a custom filter name
:Optional Parameters:
**custom** = None: A 2 x N vector array specifying the wavelengths and transmissions for a custom filter
**notch** = None: A 2 element array that specifies the lower and upper wavelengths for a notch filter (100% transmission within, 0% transmission without)
**vega** = True: compute Vega magnitudes (may be set by filter)
**ab** = False: compute AB magnitudes (may be set by filter)
**energy** = False: compute energy flux
**photon** = False: compute photon flux
**filterFolder** = splat.FILTER_FOLDER: folder containing the filter transmission files
**vegaFile** = 'vega_kurucz.txt': name of file containing Vega flux file, must be within ``filterFolder``
**nsamples** = 100: number of samples to use in Monte Carlo error estimation
**info** = False: List the predefined filter names available
**verbose** = True: List the predefined filter names available
:Example:
>>> import splat
>>> import splat.photometry as spphot
>>> sp = splat.getSpectrum(shortname='1507-1627')[0]
>>> sp.fluxCalibrate('2MASS J',14.5)
>>> spphot.filterMag(sp,'MKO J')
(14.345894376898123, 0.027596454828421831)
'''
# keyword parameters
filterFolder = kwargs.get('filterFolder',SPLAT_PATH+FILTER_FOLDER)
if not os.path.exists(filterFolder):
filterFolder = SPLAT_URL+FILTER_FOLDER
vegaFile = kwargs.get('vegaFile',VEGAFILE)
info = kwargs.get('info',False)
custom = kwargs.get('custom',False)
notch = kwargs.get('notch',False)
vega = kwargs.get('vega',True)
ab = kwargs.get('ab',not vega)
rsr = kwargs.get('rsr',False)
nsamples = kwargs.get('nsamples',100)
verbose = kwargs.get('verbose',False)
# check that requested filter is in list
if isinstance(custom,bool) and isinstance(notch,bool):
f0 = checkFilterName(filt,verbose=True)
if f0 == False:
return numpy.nan, numpy.nan
filt = f0
# reset filter calculation methods based on filter design
if 'ab' in FILTERS[filt]['method']:
ab = kwargs.get('ab',True)
vega = not ab
if 'vega' in FILTERS[filt]['method']:
vega = kwargs.get('vega',True)
ab = not vega
rsr = FILTERS[filt]['rsr']
# other possibilities
photons = kwargs.get('photons',False)
photons = kwargs.get('photon',photons)
energy = kwargs.get('energy',False)
energy = kwargs.get('flux',energy)
if (photons or energy):
vega = False
ab = False
if photons: energy = False
if energy: photons = False
# Read in filter
if isinstance(custom,bool) and isinstance(notch,bool):
fwave,ftrans = filterProfile(filt,**kwargs)
# notch filter
elif isinstance(custom,bool) and isinstance(notch,list):
dn = (notch[1]-notch[0])/1000
fwave = numpy.arange(notch[0]-5.*dn,notch[1]+5.*dn,dn)
ftrans = numpy.zeros(len(fwave))
ftrans[numpy.where(numpy.logical_and(fwave >= notch[0],fwave <= notch[1]))] = 1.
# custom filter
else:
fwave,ftrans = custom[0],custom[1]
# units
if isinstance(fwave,u.quantity.Quantity) == True:
fwave = fwave.to(u.micron)
else:
fwave = fwave*u.micron
# check that spectrum and filter cover the same wavelength ranges
if numpy.nanmax(fwave) < numpy.nanmin(sp.wave) or numpy.nanmin(fwave) > numpy.nanmax(sp.wave):
if verbose==True: print('\nWarning: no overlap between spectrum for {} and filter {}'.format(sp.name,filt))
return numpy.nan, numpy.nan
if numpy.nanmin(fwave) < numpy.nanmin(sp.wave) or numpy.nanmax(fwave) > numpy.nanmax(sp.wave):
if verbose==True: print('\nWarning: spectrum for {} does not span full filter profile for {}'.format(sp.name,filt))
# interpolate spectrum onto filter wavelength function
wgood = numpy.where(~numpy.isnan(sp.noise))
if len(sp.wave[wgood]) > 0:
d = interp1d(sp.wave[wgood].value,sp.flux[wgood].value,bounds_error=False,fill_value=0.)
n = interp1d(sp.wave[wgood].value,sp.noise[wgood].value,bounds_error=False,fill_value=0)
# catch for models
else:
if verbose==True: print('\nWarning: data values in range of filter {} have no uncertainties'.format(filt))
d = interp1d(sp.wave.value,sp.flux.value,bounds_error=False,fill_value=0.)
n = interp1d(sp.wave.value,sp.flux.value*1.e-9,bounds_error=False,fill_value=0.)
result = []
if (vega):
# Read in Vega spectrum
vwave,vflux = numpy.genfromtxt(os.path.normpath(filterFolder+vegaFile), comments='#', unpack=True, \
missing_values = ('NaN','nan'), filling_values = (numpy.nan))
vwave = vwave[~numpy.isnan(vflux)]*u.micron
vflux = vflux[~numpy.isnan(vflux)]*(u.erg/(u.cm**2 * u.s * u.micron))
vflux.to(sp.flux_unit,equivalencies=u.spectral_density(vwave))
# interpolate Vega onto filter wavelength function
v = interp1d(vwave.value,vflux.value,bounds_error=False,fill_value=0.)
if rsr:
val = -2.5*numpy.log10(trapz(ftrans*fwave.value*d(fwave.value),fwave.value)/trapz(ftrans*fwave.value*v(fwave.value),fwave.value))
else:
val = -2.5*numpy.log10(trapz(ftrans*d(fwave.value),fwave.value)/trapz(ftrans*v(fwave.value),fwave.value))
for i in numpy.arange(nsamples):
# result.append(-2.5*numpy.log10(trapz(ftrans*numpy.random.normal(d(fwave),n(fwave))*sp.flux_unit,fwave)/trapz(ftrans*v(fwave)*sp.flux_unit,fwave)))
if rsr:
result.append(-2.5*numpy.log10(trapz(ftrans*fwave.value*(d(fwave.value)+numpy.random.normal(0,1.)*n(fwave.value)),fwave.value)/trapz(ftrans*fwave.value*v(fwave.value),fwave.value)))
else:
result.append(-2.5*numpy.log10(trapz(ftrans*(d(fwave.value)+numpy.random.normal(0,1.)*n(fwave.value)),fwave.value)/trapz(ftrans*v(fwave.value),fwave.value)))
outunit = 1.
elif (ab):
nu = sp.wave.to('Hz',equivalencies=u.spectral())
fnu = sp.flux.to('Jy',equivalencies=u.spectral_density(sp.wave))
noisenu = sp.noise.to('Jy',equivalencies=u.spectral_density(sp.wave))
filtnu = fwave.to('Hz',equivalencies=u.spectral())
fconst = 3631*u.jansky
d = interp1d(nu.value,fnu.value,bounds_error=False,fill_value=0.)
n = interp1d(nu.value,noisenu.value,bounds_error=False,fill_value=0.)
b = trapz((ftrans/filtnu.value)*fconst.value,filtnu.value)
val = -2.5*numpy.log10(trapz(ftrans*d(filtnu.value)/filtnu.value,filtnu.value)/b)
for i in numpy.arange(nsamples):
a = trapz(ftrans*(d(filtnu.value)+numpy.random.normal(0,1)*n(filtnu.value))/filtnu.value,filtnu.value)
result.append(-2.5*numpy.log10(a/b))
outunit = 1.
elif (energy):
outunit = u.erg/u.s/u.cm**2
if rsr:
a = trapz(ftrans*fwave.value*d(fwave.value),fwave.value)*sp.wave.unit*sp.flux.unit
b = trapz(ftrans*fwave.value,fwave.value)*sp.wave.unit
c = trapz(ftrans*fwave.value*fwave.value,fwave.value)*sp.wave.unit*sp.wave.unit
val = (a/b * c/b).to(outunit).value
else:
a = trapz(ftrans*d(fwave.value),fwave.value)*sp.wave.unit*sp.flux.unit
b = trapz(ftrans,fwave.value)*sp.wave.unit
c = trapz(ftrans*fwave.value,fwave.value)*sp.wave.unit*sp.wave.unit
val = (a/b * c/b).to(outunit).value
for i in numpy.arange(nsamples):
if rsr:
result.append((trapz(ftrans*fwave.value*(d(fwave.value)+numpy.random.normal(0,1.)*n(fwave.value)),fwave.value)*sp.wave.unit*sp.flux.unit).to(outunit).value)
else:
result.append((trapz(ftrans*(d(fwave.value)+numpy.random.normal(0,1.)*n(fwave.value)),fwave.value)*sp.wave.unit*sp.flux.unit).to(outunit).value)
elif (photons):
outunit = 1./u.s/u.cm**2
convert = const.h.to('erg s')*const.c.to('micron/s')
val = (trapz(ftrans*fwave.value*convert.value*d(fwave.value),fwave.value)*sp.wave.unit*sp.flux.unit*convert.unit).to(outunit).value
for i in numpy.arange(nsamples):
result.append((trapz(ftrans*fwave.value*convert.value*(d(fwave.value)+numpy.random.normal(0,1.)*n(fwave.value)),fwave.value)*sp.wave.unit*sp.flux.unit*convert.unit).to(outunit).value)
else:
raise NameError('\nfilterMag not given a correct physical quantity (vega, ab, energy, photons) to compute photometry\n\n')
# val = numpy.nanmean(result)*outunit
err = numpy.nanstd(result)
if len(sp.wave[wgood]) == 0:
err = 0.
return val*outunit,err*outunit
def vegaToAB(filt,vegafile=VEGAFILE,filterfolder=SPLAT_PATH+FILTER_FOLDER,custom=False,notch=False,rsr=False,**kwargs):
# check that requested filter is in list
if isinstance(custom,bool) and isinstance(notch,bool):
f0 = checkFilterName(filt,verbose=True)
if f0 == False:
return numpy.nan, numpy.nan
filt = f0
rsr = FILTERS[filt]['rsr']
# Read in filter
if isinstance(custom,bool) and isinstance(notch,bool):
fwave,ftrans = filterProfile(filt,**kwargs)
# notch filter
elif isinstance(custom,bool) and isinstance(notch,list):
dn = (notch[1]-notch[0])/1000
fwave = numpy.arange(notch[0]-5.*dn,notch[1]+5.*dn,dn)
ftrans = numpy.zeros(len(fwave))
ftrans[numpy.where(numpy.logical_and(fwave >= notch[0],fwave <= notch[1]))] = 1.
# custom filter
else:
fwave,ftrans = custom[0],custom[1]
# Read in Vega spectrum
vwave,vflux = numpy.genfromtxt(os.path.normpath(filterfolder+vegafile), comments='#', unpack=True, \
missing_values = ('NaN','nan'), filling_values = (numpy.nan))
vwave = vwave[~numpy.isnan(vflux)]*u.micron
vflux = vflux[~numpy.isnan(vflux)]*(u.erg/(u.cm**2 * u.s * u.micron))
# trim spectrum
vflux = vflux[vwave>=numpy.nanmin(fwave)]
vwave = vwave[vwave>=numpy.nanmin(fwave)]
vflux = vflux[vwave<=numpy.nanmax(fwave)]
vwave = vwave[vwave<=numpy.nanmax(fwave)]
# convert to fnu
nu = vwave.to('Hz',equivalencies=u.spectral())
fnu = vflux.to('Jy',equivalencies=u.spectral_density(vwave))
filtnu = fwave.to('Hz',equivalencies=u.spectral())
fconst = 3631*u.jansky
d = interp1d(nu.value,fnu.value,bounds_error=False,fill_value=0.)
b = trapz((ftrans/filtnu.value)*fconst.value,filtnu.value)
return -2.5*numpy.log10(trapz(ftrans*d(filtnu.value)/filtnu.value,filtnu.value)/b)
def filterInfo(*args,**kwargs):
'''
:Purpose: Prints out the current list of filters in the SPLAT reference library.
'''
verbose = kwargs.get('verbose',True)
if len(args) > 0:
fname = list(args)
elif kwargs.get('filter',False) != False:
fname = kwargs['filter']
else:
fname = sorted(list(FILTERS.keys()))
if isinstance(fname,list) == False:
fname = [fname]
output = {}
for k in fname:
f = checkFilterName(k)
if f != False:
output[f] = {}
output[f]['description'] = FILTERS[f]['description']
output[f]['zeropoint'] = FILTERS[f]['zeropoint']
fwave,ftrans = filterProfile(f,**kwargs)
try:
fwave = fwave.to(u.micron)
except:
fwave = fwave*u.micron
fw = fwave[numpy.where(ftrans > 0.01*numpy.nanmax(ftrans))]
ft = ftrans[numpy.where(ftrans > 0.01*numpy.nanmax(ftrans))]
fw05 = fwave[numpy.where(ftrans > 0.5*numpy.nanmax(ftrans))]
output[f]['lambda_mean'] = trapz(ft*fw,fw)/trapz(ft,fw)
output[f]['lambda_pivot'] = numpy.sqrt(trapz(fw*ft,fw)/trapz(ft/fw,fw))
output[f]['lambda_central'] = 0.5*(numpy.max(fw)+numpy.min(fw))
output[f]['lambda_fwhm'] = numpy.max(fw05)-numpy.min(fw05)
output[f]['lambda_min'] = numpy.min(fw)
output[f]['lambda_max'] = numpy.max(fw)
if verbose ==True:
print(f.replace('_',' ')+': '+output[f]['zeropoint'])
print('Zeropoint = {} Jy'.format(output[f]['zeropoint']))
print('Central wavelength: = {:.3f}'.format(output[f]['lambda_central']))
print('Mean wavelength: = {:.3f}'.format(output[f]['lambda_mean']))
print('Pivot point: = {:.3f}'.format(output[f]['lambda_pivot']))
print('FWHM = {:.3f}'.format(output[f]['lambda_fwhm']))
print('Wavelength range = {:.3f} to {:.3f}\n'.format(output[f]['lambda_min'],output[f]['lambda_max']))
else:
if verbose ==True: print(' Filter {} not in SPLAT filter list'.format(k))
kys = list(output.keys())
if len(kys) == 1: return output[kys[0]]
else: return output
def filterProperties(filt,**kwargs):
'''
:Purpose: Returns a dictionary containing key parameters for a particular filter.
:param filter: name of filter, must be one of the specifed filters given by splat.FILTERS.keys()
:type filter: required
:param verbose: print out information about filter to screen
:type verbose: optional, default = True
:Example:
>>> import splat
>>> data = splat.filterProperties('2MASS J')
Filter 2MASS J: 2MASS J-band
Zeropoint = 1594.0 Jy
Pivot point: = 1.252 micron
FWHM = 0.323 micron
Wavelength range = 1.066 to 1.442 micron
>>> data = splat.filterProperties('2MASS X')
Filter 2MASS X not among the available filters:
2MASS H: 2MASS H-band
2MASS J: 2MASS J-band
2MASS KS: 2MASS Ks-band
BESSEL I: Bessel I-band
FOURSTAR H: FOURSTAR H-band
FOURSTAR H LONG: FOURSTAR H long
FOURSTAR H SHORT: FOURSTAR H short
...
'''
filterFolder = kwargs.get('filterFolder',SPLAT_PATH+FILTER_FOLDER)
if not os.path.exists(filterFolder):
filterFolder = SPLAT_URL+FILTER_FOLDER
# check that requested filter is in list
filt = checkFilterName(filt)
if filt == False: return None
report = {}
report['name'] = filt
report['description'] = FILTERS[filt]['description']
report['zeropoint'] = FILTERS[filt]['zeropoint']
report['method'] = FILTERS[filt]['method']
report['rsr'] = FILTERS[filt]['rsr']
fwave,ftrans = filterProfile(filt,**kwargs)
try:
fwave = fwave.to(u.micron)
except:
fwave = fwave*u.micron
fw = fwave[numpy.where(ftrans > 0.01*numpy.nanmax(ftrans))]
ft = ftrans[numpy.where(ftrans > 0.01*numpy.nanmax(ftrans))]
fw05 = fwave[numpy.where(ftrans > 0.5*numpy.nanmax(ftrans))]
# print(trapz(ft,fw))
# print(trapz(fw*ft,fw))
report['lambda_mean'] = trapz(ft*fw,fw)/trapz(ft,fw)
report['lambda_pivot'] = numpy.sqrt(trapz(fw*ft,fw)/trapz(ft/fw,fw))
report['lambda_central'] = 0.5*(numpy.max(fw)+numpy.min(fw))
report['lambda_fwhm'] = numpy.max(fw05)-numpy.min(fw05)
report['lambda_min'] = numpy.min(fw)
report['lambda_max'] = numpy.max(fw)
report['wave'] = fwave
report['transmission'] = ftrans
# report values out
if kwargs.get('verbose',False):
print('\nFilter '+filt+': '+report['description'])
print('Zeropoint = {} Jy'.format(report['zeropoint']))
print('Pivot point: = {:.3f}'.format(report['lambda_pivot']))
print('FWHM = {:.3f}'.format(report['lambda_fwhm']))
print('Wavelength range = {:.3f} to {:.3f}\n'.format(report['lambda_min'],report['lambda_max']))
return report
def magToFlux(mag,filt,**kwargs):
'''
:Purpose: Converts a magnitude into an energy, and vice versa.
:param mag: magnitude on whatever system is defined for the filter or provided (required)
:param filter: name of filter, must be one of the specifed filters given by splat.FILTERS.keys() (required)
:param reverse: convert energy into magnitude instead (optional, default = False)
:param ab: magnitude is on the AB system (optional, default = filter preference)
:param vega: magnitude is on the Vega system (optional, default = filter preference)
:param rsr: magnitude is on the Vega system (optional, default = filter preference)
:param units: units for energy as an astropy.units variable; if this conversion does not work, the conversion is ignored (optional, default = erg/cm2/s)
:param verbose: print out information about filter to screen (optional, default = False)
WARNING: THIS CODE IS ONLY PARTIALLY COMPLETE
'''
# keyword parameters
filterFolder = kwargs.get('filterFolder',SPLAT_PATH+FILTER_FOLDER)
if not os.path.exists(filterFolder):
filterFolder = SPLAT_URL+FILTER_FOLDER
vegaFile = kwargs.get('vegaFile','vega_kurucz.txt')
vega = kwargs.get('vega',True)
ab = kwargs.get('ab',not vega)
rsr = kwargs.get('rsr',False)
nsamples = kwargs.get('nsamples',100)
custom = kwargs.get('custom',False)
notch = kwargs.get('notch',False)
base_unit = u.erg/(u.cm**2 * u.s)
return_unit = kwargs.get('unit',base_unit)
e_mag = kwargs.get('uncertainty',0.)
e_mag = kwargs.get('unc',e_mag)
e_mag = kwargs.get('e_mag',e_mag)
if not isinstance(mag,u.quantity.Quantity): mag=mag*u.s/u.s
if not isinstance(e_mag,u.quantity.Quantity): e_mag=e_mag*mag.unit
# check that requested filter is in list
filt = checkFilterName(filt)
if filt == False: return numpy.nan, numpy.nan
# reset filter calculation methods based on filter design
if 'ab' in FILTERS[filt]['method']:
ab = kwargs.get('ab',True)
vega = not ab
if 'vega' in FILTERS[filt]['method']:
vega = kwargs.get('vega',True)
ab = not vega
if 'rsr' in FILTERS[filt]['method']:
rsr = kwargs.get('rsr',True)
# Read in filter
if isinstance(custom,bool) and isinstance(notch,bool):
fwave,ftrans = filterProfile(filt,**kwargs)
# notch filter
elif isinstance(custom,bool) and isinstance(notch,list):
dn = (notch[1]-notch[0])/1000
fwave = numpy.arange(notch[0]-5.*dn,notch[1]+5.*dn,dn)*u.micron
ftrans = numpy.zeros(len(fwave))
ftrans[numpy.where(numpy.logical_and(fwave >= notch[0],fwave <= notch[1]))] = 1.
# custom filter
else:
fwave,ftrans = custom[0],custom[1]
if isinstance(fwave,u.quantity.Quantity) == False: fwave=fwave*u.micron
if isinstance(ftrans,u.quantity.Quantity) == True: ftrans=ftrans.value
fwave = fwave[~numpy.isnan(ftrans)]
ftrans = ftrans[~numpy.isnan(ftrans)]
result = []
err = 0.
# magnitude -> energy
if kwargs.get('reverse',False) == False:
if vega == True:
# Read in Vega spectrum
vwave,vflux = numpy.genfromtxt(os.path.normpath(filterFolder+vegaFile), comments='#', unpack=True, \
missing_values = ('NaN','nan'), filling_values = (numpy.nan))
vwave = vwave[~numpy.isnan(vflux)]*u.micron
vflux = vflux[~numpy.isnan(vflux)]*(u.erg/(u.cm**2 * u.s * u.micron))
# interpolate Vega onto filter wavelength function
v = interp1d(vwave.value,vflux.value,bounds_error=False,fill_value=0.)
if rsr: fact = trapz(ftrans*fwave.value*v(fwave.value),fwave.value)
else: fact = trapz(ftrans*v(fwave.value),fwave.value)
val = 10.**(-0.4*mag.value)*fact*u.erg/(u.cm**2 * u.s)
# calculate uncertainty
if e_mag.value > 0.:
for i in numpy.arange(nsamples): result.append(10.**(-0.4*(mag.value+numpy.random.normal(0,1.)*e_mag.value))*fact)
err = (numpy.nanstd(result))*u.erg/(u.cm**2 * u.s)
else: err = 0.*u.erg/(u.cm**2 * u.s)
elif ab == True:
fconst = 3631*u.jansky
ftrans = (ftrans*fconst).to(u.erg/(u.cm**2 * u.s * u.micron),equivalencies=u.spectral_density(fwave))
if rsr: fact = trapz(ftrans.value*fwave.value,fwave.value)
else: fact = trapz(ftrans.value,fwave.value)
val = (10.**(-0.4*mag.value)*fact)*u.erg/(u.cm**2 * u.s)
# calculate uncertainty
if e_mag.value > 0.:
for i in numpy.arange(nsamples): result.append(10.**(-0.4*(mag.value+numpy.random.normal(0,1.)*e_mag.value))*fact)
err = (numpy.nanstd(result))*u.erg/(u.cm**2 * u.s)
else: err = 0.*u.erg/(u.cm**2 * u.s)
else:
raise ValueError('\nmagToFlux needs vega or ab method specified')
# convert to desired energy units
# try:
val.to(return_unit)
err.to(return_unit)
# except:
# print('\nWarning: unit {} is not an energy flux unit'.format(return_unit))
try:
val.to(base_unit)
err.to(base_unit)
except:
print('\nWarning: cannot convert result to an energy flux unit'.format(base_unit))
return numpy.nan, numpy.nan
return val, err
# energy -> magnitude
# THIS NEEDS TO BE COMPLETED
else:
print('passed')
pass
# check that input is an energy flux
# try:
# mag.to(base_unit)
# e_mag.to(base_unit)
# except:
# raise ValueError('\nInput quantity unit {} is not a flux unit'.format(mag.unit))
def visualizeFilter(filters,verbose=True,xra=[],yra=[0,1.2],**kwargs):
'''
:Purpose: Plots a filter profile or set of filter profiles, optionally on top of a spectrum
WARNING: THIS CODE IS CURRENTLY UNDER DEVELOPMENT, BUGS MAY BE COMMON
'''
filt = copy.deepcopy(filters)
wave_unit = kwargs.get('wave_unit',DEFAULT_WAVE_UNIT)
# single filter name
if isinstance(filt,str):
filt = [filt]
if isinstance(filt,list):
# list of filter names
if isinstance(filt[0],str):
for f in filt:
fc = checkFilterName(f)
filt.remove(f)
if fc == False:
if verbose==True: print('Removed filter {}: not included in SPLAT'.format(f))
else:
filt.insert(len(filt),fc)
if len(filt) == 0:
raise ValueError('Did not recognize any of the input filters {}'.format(filters))
# prep parameters
fwave,ftrans = filterProfile(f,**kwargs)
if isUnit(fwave): wave_unit = kwargs.get('wave_unit',fwave.unit)
xl = kwargs.get('xlabel','Wavelength ({})'.format(wave_unit))
yl = kwargs.get('ylabel','Transmission Curve')
legend = []
fig = plt.figure(figsize=kwargs.get('figsize',[5,4]))
for i,f in enumerate(filt):
fwave,ftrans = filterProfile(f,**kwargs)
if isUnit(fwave): fwave.to(wave_unit)
else: fwave = fwave*wave_unit
if kwargs.get('normalize',False): ftrans = ftrans/numpy.nanmax(ftrans)
plt.plot(fwave,ftrans)
if len(xra) == 0: xra = [numpy.nanmin(fwave.value),numpy.nanmax(fwave.value)]
xra = [numpy.nanmin([xra[0],numpy.nanmin(fwave.value)]),numpy.nanmax([xra[1],numpy.nanmax(fwave.value)])]
yra = [yra[0],numpy.nanmax([yra[1],numpy.nanmax(ftrans)])]
legend.append(FILTERS[f]['description'])
if FILTERS[f]['rsr'] == True: yl = kwargs.get('ylabel','Transmission Curve')
# list of notch ranges
if isinstance(filt[0],int) or isinstance(filt[0],float):
filt = [filt]
# list of notch ranges
if isinstance(filt[0],list):
xl = kwargs.get('xlabel','Wavelength ({})'.format(wave_unit))
yl = kwargs.get('ylabel','Transmission Curve')
legend = []
fig = plt.figure(figsize=kwargs.get('figsize',[5,4]))
for i,f in enumerate(filt):
fwave,ftrans = numpy.linspace(f[0],f[1],1000)*wave_unit,numpy.ones(1000)
plt.plot(fwave,ftrans)
if len(xra) == 0: xra = [numpy.nanmin(fwave.value),numpy.nanmax(fwave.value)]
xra = [numpy.nanmin([xra[0],numpy.nanmin(fwave.value)]),numpy.nanmax([xra[1],numpy.nanmax(fwave.value)])]
yra = [yra[0],numpy.nanmax([yra[1],numpy.nanmax(ftrans)])]
legend.append('Filter {}'.format(i+1))
else:
raise ValueError('Could not parse input {}'.format(filt))
# add a comparison spectrum
sp = kwargs.get('spectrum',None)
sp = kwargs.get('comparison',sp)
if isinstance(sp,splat.core.Spectrum) == True:
print(xra)
sp.normalize(xra)
sp.scale(numpy.nanmax(ftrans)*kwargs.get('comparison_scale',0.8))
plt.plot(sp.wave,sp.flux,color=kwargs.get('comparison_color','k'),alpha=kwargs.get('comparison_alpha',0.5))
legend.append(sp.name)
yra = [yra[0],yra[1]*1.1]
# finish up
plt.xlim(xra)
plt.ylim(yra)
plt.xlabel(xl)
plt.ylabel(yl)
plt.legend(legend)
# save if desired
file = kwargs.get('file','')
file = kwargs.get('filename',file)
file = kwargs.get('output',file)
if file != '': plt.savefig(file)
return fig
#########################################
######## SED FITTING TOOLS #########
### WARNING: THESE ARE EXPERIMENTAL!! ###
#########################################
# plan:
def modelMagnitudes(verbose=True):
'''
this will be a code that calculates a set of magnitudes for a model set's SED models
saves to file that could be uploaded
pre-save some model magnitudes
'''
pass
def interpolateMagnitudes(verbose=True):
'''
produces an interpolated value for a grid set of model magnitudes
'''
pass
def compareMagnitudes(mags1,mags2,unc=None,unc2=None,ignore=[],verbose=True):
'''
this code compares a set of magnitudes using one of several statistics
'''
chi = 0.
dm,em = [],[]
for f in list(mags1.keys()):
if f in list(mags2.keys()) and f in list(unc.keys()) and f not in ignore:
dm.append(mags1[f]-mags2[f])
em.append(unc[f])
# find best scale factor
dm = numpy.array(dm)
em = numpy.array(em)
offset = numpy.sum(dm/em**2)/numpy.sum (1./em**2)
dmo = numpy.array([m-offset for m in dm])
return numpy.sum((dmo/em)**2), offset
def SEDFitGrid(verbose=True):
'''
this code will compare a set of magnitudes to a grid of model magnitudes and choose the
closest match based on various statistics
'''
pass
def SEDFitMCMC(verbose=True):
'''
this code will conduct a comparison of a set of magnitudes to model magnitudes using an
MCMC wrapper, and choose best/average/distribution of parameters
'''
pass
def SEDFitAmoeba(verbose=True):
'''
this code will conduct a comparison of a set of magnitudes to model magnitudes using an
Amoeba (Nelder-Mead) wrapper, and choose the closest match
'''
pass
def SEDVisualize(verbose=True):
'''
Visualizes magnitudes on SED scale (flux = lam x F_lam), with option of also comparing to SED spectrum
'''
pass
#####################################################
############### MAGNITUDE CLASS ###############
#####################################################
class Magnitude(object):
'''
:Description:
This is a class data structure for a magnitude value
'''
def __init__(self, magnitude, filt, uncertainty=0., magtype='apparent', verbose=False,**kwargs):
self.magnitude = magnitude
self.uncertainty = uncertainty
self.type = magtype
# check filter and rename if necessary
self.knownfilter = True
fflag = checkFilterName(filt,verbose=verbose)
if fflag == False:
if verbose== True: print('filter {} is not a standard filter; some functions may not work'.format(filt))
self.knownfilter = False
else: filt = fflag
self.filter = filt
# some things that are based on presets
if self.knownfilter == True:
self.wave,self.transmission = filterProfile(self.filter)
info = filterProperties(self.filter)
for k in info.keys(): setattr(self,k,info[k])
def __copy__(self):
'''
:Purpose: Make a copy of a Magnitude object
'''
s = type(self)(self.magnitude,self.filter,uncertainty=self.uncertainty)
s.__dict__.update(self.__dict__)
return s
# backup version
def copy(self):
'''
:Purpose: Make a copy of a Magnitude object
'''
s = type(self)(self.magnitude,self.filter,uncertainty=self.uncertainty)
s.__dict__.update(self.__dict__)
return s
def __repr__(self):
'''
:Purpose: A simple representation of the Spectrum object
'''
if self.uncertainty != 0. and numpy.isfinite(self.uncertainty):
return '{} magnitude of {}+/-{}'.format(self.filter,self.magnitude,self.uncertainty)
else:
return '{} magnitude of {}'.format(self.filter,self.magnitude)
def __add__(self,other,samp=1000):
'''
:Purpose:
A representation of addition for Magnitude classes that takes into account uncertainties
:Output:
A new Magnitude object equal to the sum of values
'''
# make a copy and fill in combined magnitude
out = copy.deepcopy(self)
out.magnitude = self.magnitude+other.magnitude
out.uncertainty = self.uncertainty+other.uncertainty
# combine noises
if self.uncertainty != 0 and other.uncertainty != 0:
m1 = numpy.random.normal(self.magnitude,self.uncertainty,samp)
m2 = numpy.random.normal(other.magnitude,other.uncertainty,samp)
val = m1+m2
out.uncertainty = numpy.nanstd(val)
# check filter agreement
if self.filter != other.filter:
out.filter = '{}+{}'.format(self.filter,other.filter)
return out
def __sub__(self,other,samp=1000):
'''
:Purpose:
A representation of subtraction for Magnitude classes that takes into account uncertainties
:Output:
A new Magnitude object equal to the diffence of values
'''
# make a copy and fill in combined magnitude
out = copy.deepcopy(self)
out.magnitude = self.magnitude-other.magnitude
out.uncertainty = self.uncertainty+other.uncertainty
# combine noises
if self.uncertainty != 0 and other.uncertainty != 0:
m1 = numpy.random.normal(self.magnitude,self.uncertainty,samp)
m2 = numpy.random.normal(other.magnitude,other.uncertainty,samp)
val = m1-m2
out.uncertainty = numpy.nanstd(val)
# check filter agreement
if self.filter != other.filter:
out.filter = '{}-{}'.format(self.filter,other.filter)
return out
def flux(self,type='fnu',samp=1000):
'''
:Purpose:
Report the equivalent flux density of a magnitude
:Output:
astropy quantity in flux density units (default = erg/cm2/s/micron)
'''
pass
def addFlux(self,other,samp=1000):
'''
:Purpose:
A representation of addition for magnitudes (addition of fluxes)
:Output:
A new magnitude object equal to the equivalent sum of fluxes
'''
# check filter agreement
if self.filter != other.filter:
raise ValueError('magnitudes filters {} and {} are not the same'.format(self.filter,other.filter))
# make a copy and fill in combined magnitude
out = copy.deepcopy(self)
out.magnitude = self.magnitude-2.5*numpy.log10(1.+10.**(-0.4*(other.magnitude-self.magnitude)))
out.uncertainty = self.uncertainty+other.uncertainty
# combine noises
if self.uncertainty != 0 and other.uncertainty != 0:
m1 = numpy.random.normal(self.magnitude,self.uncertainty,samp)
m2 = numpy.random.normal(other.magnitude,other.uncertainty,samp)
val = m1-2.5*numpy.log10(1.+10.**(-0.4*(m2-m1)))
out.uncertainty = numpy.nanstd(val)
return out
| 39.857302
| 197
| 0.623881
| 4,730
| 35,752
| 4.680973
| 0.11649
| 0.029357
| 0.015582
| 0.018969
| 0.5759
| 0.531774
| 0.490583
| 0.483628
| 0.450702
| 0.432952
| 0
| 0.017313
| 0.229386
| 35,752
| 896
| 198
| 39.901786
| 0.786324
| 0.241273
| 0
| 0.43787
| 0
| 0.001972
| 0.080124
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045365
| false
| 0.017751
| 0.023669
| 0
| 0.108481
| 0.045365
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c18aa829131bc05a668cd4d7a72da450336ed4f
| 4,766
|
py
|
Python
|
example_scripts/profile_validation/plot_validation_gridded_data.py
|
British-Oceanographic-Data-Centre/NEMO-ENTRUST
|
41ed278e56428404ab8ec41d74a9a3a761e308ae
|
[
"MIT"
] | null | null | null |
example_scripts/profile_validation/plot_validation_gridded_data.py
|
British-Oceanographic-Data-Centre/NEMO-ENTRUST
|
41ed278e56428404ab8ec41d74a9a3a761e308ae
|
[
"MIT"
] | null | null | null |
example_scripts/profile_validation/plot_validation_gridded_data.py
|
British-Oceanographic-Data-Centre/NEMO-ENTRUST
|
41ed278e56428404ab8ec41d74a9a3a761e308ae
|
[
"MIT"
] | null | null | null |
"""
Plot up surface or bottom (or any fixed level) errors from a profile object
with no z_dim (vertical dimension). Provide an array of netcdf files and
mess with the options to get a figure you like.
You can define how many rows and columns the plot will have. This script will
plot the provided list of netcdf datasets from left to right and top to bottom.
A colorbar will be placed right of the figure.
"""
import xarray as xr
import matplotlib.pyplot as plt
import numpy as np
import sys
sys.path.append("/Users/dbyrne/code/COAsT")
import coast
import pandas as pd
#%% File settings
run_name = "test"
# List of analysis output files. Profiles from each will be plotted
# on each axis of the plot
fn_list = [
"~/transfer/test_grid.nc",
"~/transfer/test_grid.nc",
]
# Filename for the output
fn_out = "/Users/dbyrne/transfer/surface_gridded_errors_{0}.png".format(run_name)
#%% General Plot Settings
var_name = "abs_diff_temperature" # Variable name in analysis file to plot
# If you used var modified to make gridded data
# then this is where to select season etc.
save_plot = False
# Masking out grid cells that don't contain many points
min_points_in_average = 5
name_of_count_variable = "grid_N"
# Subplot axes settings
n_r = 2 # Number of subplot rows
n_c = 2 # Number of subplot columns
figsize = (10, 5) # Figure size
lonbounds = [-15, 9.5] # Longitude bounds
latbounds = [45, 64] # Latitude bounds
subplot_padding = 0.5 # Amount of vertical and horizontal padding between plots
fig_pad = (0.075, 0.075, 0.1, 0.1) # Figure padding (left, top, right, bottom)
# Leave some space on right for colorbar
# Scatter opts
marker_size = 3 # Marker size
cmap = "bwr" # Colormap for normal points
clim = (-1, 1) # Color limits for normal points
discrete_cmap = True # Discretize colormap
cmap_levels = 14
# Labels and Titles
fig_title = "SST Errors" # Whole figure title
title_fontsize = 13 # Fontsize of title
title_fontweight = "bold" # Fontweight to use for title
dataset_names = ["CO9p0", "CO9p0", "CO9p0"] # Names to use for labelling plots
subtitle_fontsize = 11 # Fontsize for dataset subtitles
subtitle_fontweight = "normal" # Fontweight for dataset subtitles
# PLOT SEASONS. Make sure n_r = 2 and n_c = 2
# If this option is true, only the first dataset will be plotted, with seasonal
# variables on each subplot. The season_suffixes will be added to var_name
# for each subplot panel.
plot_seasons = True
season_suffixes = ["DJF", "MAM", "JJA", "SON"]
#%% Read and plotdata
# Read all datasets into list
ds_list = [xr.open_dataset(dd) for dd in fn_list]
n_ds = len(ds_list)
n_ax = n_r * n_c
# Create plot and flatten axis array
f, a = coast.plot_util.create_geo_subplots(lonbounds, latbounds, n_r, n_c, figsize=figsize)
a_flat = a.flatten()
# Dicretize colormap maybe
if discrete_cmap:
cmap = plt.cm.get_cmap(cmap, cmap_levels)
# Determine if we will extend the colormap or not
extend_cbar = []
# Loop over dataset
for ii in range(n_ax):
ur_index = np.unravel_index(ii, (n_r, n_c))
# Select season if required
if plot_seasons:
ds = ds_list[0]
var_ii = var_name + "_{0}".format(season_suffixes[ii])
N_var = "{0}_{1}".format(name_of_count_variable, season_suffixes[ii])
a_flat[ii].text(0.05, 1.02, season_suffixes[ii], transform=a_flat[ii].transAxes, fontweight="bold")
else:
ds = ds_list[ii]
var_ii = var_name
a_flat[ii].set_title(dataset_names[ii], fontsize=subtitle_fontsize, fontweight=subtitle_fontweight)
N_var = name_of_count_variable
data = ds[var_ii].values
count_var = ds[N_var]
data[count_var < min_points_in_average] = np.nan
# Scatter and set title
pc = a_flat[ii].pcolormesh(
ds.longitude,
ds.latitude,
data,
cmap=cmap,
vmin=clim[0],
vmax=clim[1],
)
# Will we extend the colorbar for this dataset?
extend_cbar.append(coast.plot_util.determine_colorbar_extension(data, clim[0], clim[1]))
# Set Figure title
f.suptitle(fig_title, fontsize=title_fontsize, fontweight=title_fontweight)
# Set tight figure layout
f.tight_layout(w_pad=subplot_padding, h_pad=subplot_padding)
f.subplots_adjust(left=(fig_pad[0]), bottom=(fig_pad[1]), right=(1 - fig_pad[2]), top=(1 - fig_pad[3]))
# Handle colorbar -- will we extend it?
if "both" in extend_cbar:
extend = "both"
elif "max" in extend_cbar and "min" in extend_cbar:
extend = "both"
elif "max" in extend_cbar:
extend = "max"
elif "min" in extend_cbar:
extend = "min"
else:
extend = "neither"
cbar_ax = f.add_axes([(1 - fig_pad[2] + fig_pad[2] * 0.15), 0.15, 0.025, 0.7])
f.colorbar(pc, cax=cbar_ax, extend=extend)
# Save plot maybe
if save_plot:
f.savefig(fn_out)
| 31.562914
| 107
| 0.712547
| 777
| 4,766
| 4.207207
| 0.342342
| 0.012848
| 0.018354
| 0.022025
| 0.032426
| 0.025084
| 0.025084
| 0.025084
| 0.025084
| 0.025084
| 0
| 0.020914
| 0.187369
| 4,766
| 150
| 108
| 31.773333
| 0.823135
| 0.389845
| 0
| 0.069767
| 0
| 0
| 0.089348
| 0.043097
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.069767
| 0
| 0.069767
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c191035667faa5283a1d949656c67ee58df9705
| 500
|
py
|
Python
|
feature-engineering/samples/statistical_features.py
|
jeury301/text-classifier
|
d86f658ef3368e4a3f6fd74328fa862e2881ac3b
|
[
"MIT"
] | null | null | null |
feature-engineering/samples/statistical_features.py
|
jeury301/text-classifier
|
d86f658ef3368e4a3f6fd74328fa862e2881ac3b
|
[
"MIT"
] | null | null | null |
feature-engineering/samples/statistical_features.py
|
jeury301/text-classifier
|
d86f658ef3368e4a3f6fd74328fa862e2881ac3b
|
[
"MIT"
] | null | null | null |
from sklearn.feature_extraction.text import TfidfVectorizer
def compute_tf_idf(corpus):
"""Computing term frequency (tf) - inverse document frequency (idf).
:param corpus: List of documents.
:returns: tf-idf of corpus.
"""
return TfidfVectorizer().fit_transform(corpus)
if __name__ == '__main__':
sample_corpus = [
'This is sample document.',
'another random document.',
'third sample document text'
]
print(compute_tf_idf(sample_corpus))
| 26.315789
| 72
| 0.682
| 57
| 500
| 5.701754
| 0.614035
| 0.046154
| 0.073846
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.216
| 500
| 18
| 73
| 27.777778
| 0.829082
| 0.256
| 0
| 0
| 0
| 0
| 0.232295
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.1
| 0
| 0.3
| 0.1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c194afa3b23b40f44a756d8271ecc2b2b439fa6
| 18,197
|
py
|
Python
|
Gds/src/fprime_gds/executables/tcpserver.py
|
hunterpaulson/fprime
|
70560897b56dc3037dc966c99751b708b1cc8a05
|
[
"Apache-2.0"
] | null | null | null |
Gds/src/fprime_gds/executables/tcpserver.py
|
hunterpaulson/fprime
|
70560897b56dc3037dc966c99751b708b1cc8a05
|
[
"Apache-2.0"
] | null | null | null |
Gds/src/fprime_gds/executables/tcpserver.py
|
hunterpaulson/fprime
|
70560897b56dc3037dc966c99751b708b1cc8a05
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
from __future__ import print_function
import socket
import threading
try:
import socketserver
except ImportError:
import SocketServer as socketserver
import time
import os
import signal
import sys
import struct
import errno
from fprime.constants import DATA_ENCODING
from optparse import OptionParser
__version__ = 0.1
__date__ = "2015-04-03"
__updated__ = "2016-04-07"
# Universal server id global
SERVER = None
LOCK = None
shutdown_event = threading.Event()
FSW_clients = []
GUI_clients = []
FSW_ids = []
GUI_ids = []
def signal_handler(*_):
print("Ctrl-C received, server shutting down.")
shutdown_event.set()
def now():
return time.ctime(time.time())
class ThreadedTCPRequestHandler(socketserver.StreamRequestHandler):
"""
Derived from original Stable demo during R&TD and adapted
for use in new FSW gse.py applicaiton.
TCP socket server for commands, log events, and telemetry data.
Later this will handle other things such as sequence files and parameters.
Handle is instanced in own thread for each client.
Registration is done by sending the string "Register <name>".
Sending a message to destination <name> is done as
"A5A5 <name> <data>" Note only <data> is sent.
Any client that sends a "List" comment makes the server display all
registered clients.
"""
socketserver.StreamRequestHandler.allow_reuse_address = True
socketserver.StreamRequestHandler.timeout = 1
def handle(self): # on each client connect
"""
The function that is invoked upon a new client. This function listens
for data on the socket. Packets for now are assumed to be separated
by a newline. For each packet, call processPkt.
"""
self.partial = b""
self.cmdQueue = []
self.registered = False
self.name = b""
self.id = 0
# print self.client_address, now() # show this client's address
# Read the data from the socket
data = self.recv(13)
# Connection was closed by the client
if not data:
print("Client exited.")
return
else:
# Process the data into the cmdQueue
self.getCmds(data)
# Process the cmdQueue
self.processQueue()
if self.registered:
print("Registration complete waiting for message.")
self.getNewMsg()
else:
print("Unable to register client.")
return
LOCK.acquire()
del SERVER.dest_obj[self.name]
if self.name in FSW_clients:
FSW_clients.remove(self.name)
FSW_ids.remove(self.id)
elif self.name in GUI_clients:
GUI_clients.remove(self.name)
GUI_ids.remove(self.id)
LOCK.release()
print("Closed %s connection." % self.name.decode(DATA_ENCODING))
self.registered = False
self.request.close()
def getCmds(self, inputString, end_of_command=b"\n"):
"""
Build a command from partial or full socket input
"""
commands = inputString.split(end_of_command)
if len(self.partial):
commands[0] = self.partial + commands[0]
self.partial = b""
if len(commands[-1]):
self.partial = commands[-1]
self.cmdQueue.extend(commands[:-1])
else:
self.cmdQueue.extend(commands[:-1])
def processQueue(self):
for cmd in self.cmdQueue:
self.processRegistration(cmd)
self.cmdQueue = []
def processRegistration(self, cmd):
params = cmd.split()
process_id = 0
if params[0] == b"Register":
LOCK.acquire()
name = params[1]
if b"FSW" in name:
if FSW_clients:
process_id = sorted(FSW_ids)[-1] + 1
name = params[1] + b"_" + bytes(process_id)
FSW_clients.append(name)
FSW_ids.append(process_id)
elif b"GUI" in name:
if GUI_clients:
process_id = sorted(GUI_ids)[-1] + 1
name = params[1] + b"_" + bytes(process_id)
GUI_clients.append(name)
GUI_ids.append(process_id)
SERVER.dest_obj[name] = DestObj(name, self.request)
LOCK.release()
self.registered = True
self.name = name
self.id = process_id
print("Registered client " + self.name.decode(DATA_ENCODING))
#################################################
# New Routines to process the command messages
#################################################
def getNewMsg(self):
"""
After registration wait for an incoming message
The first part must always be an "A5A5 " or a "List "
"""
# Loop while the connected client has packets to send/receive
while not shutdown_event.is_set():
# Read the header data from the socket either A5A5 or List
header = self.readHeader()
# If the received header is an empty string, connection closed, exit loop
if not header:
break
elif header == b"Quit":
LOCK.acquire()
print("Quit received!")
SERVER.dest_obj[self.name].put(struct.pack(">I", 0xA5A5A5A5))
shutdown_event.set()
time.sleep(1)
print("Quit processed!")
SERVER.shutdown()
SERVER.server_close()
LOCK.release()
break
# Got the header data so read the data of the message here...
data = self.readData(header)
# Process and send the packet of the message here...
self.processNewPkt(header, data)
def recv(self, l):
"""
Read l bytes from socket.
"""
chunk = b""
msg = b""
n = 0
while l > n:
try:
chunk = self.request.recv(l - n)
if chunk == b"":
print("read data from socket is empty!")
return b""
msg = msg + chunk
n = len(msg)
except socket.timeout:
if shutdown_event.is_set():
print("socket timed out and shutdown is requested")
return b"Quit\n"
continue
except socket.error as err:
if err.errno == errno.ECONNRESET:
print(
"Socket error "
+ str(err.errno)
+ " (Connection reset by peer) occurred on recv()."
)
else:
print("Socket error " + str(err.errno) + " occurred on recv().")
return msg
def readHeader(self):
"""
Read the 9 byte header (e.g. "A5A5 GUI " or "A5A5 FSW "),
or just read the "List\n" command.
"""
header = self.recv(5)
if len(header) == 0:
print(
"Header information is empty, client "
+ self.name.decode(DATA_ENCODING)
+ " exiting."
)
return header
if header == b"List\n":
return b"List"
elif header == b"Quit\n":
return b"Quit"
elif header[:-1] == b"A5A5":
header2 = self.recv(4)
return header + header2
else:
return
def readData(self, header):
"""
Read the data part of the message sent to either GUI or FSW.
GUI receives telemetry.
FSW receives commands of various lengths.
"""
data = b""
if header == b"List":
return b""
elif header == b"Quit":
return b""
dst = header.split(b" ")[1].strip(b" ")
if dst == b"FSW":
# Read variable length command data here...
desc = self.recv(4)
sizeb = self.recv(4)
size = struct.unpack(">I", sizeb)[0]
data = desc + sizeb + self.recv(size)
elif dst == b"GUI":
# Read telemetry data here...
tlm_packet_size = self.recv(4)
size = struct.unpack(">I", tlm_packet_size)[0]
data = tlm_packet_size + self.recv(size)
else:
raise RuntimeError("unrecognized client %s" % dst.decode(DATA_ENCODING))
return data
def processNewPkt(self, header, data):
"""
Process a single command here header and data here.
The command must always start with A5A5 except if it is a List.
Once the entire header tstring is processed send it on queue.
If something goes wrong report and shutdown server.
"""
dest_list = []
if header == b"List":
print("List of registered clients: ")
LOCK.acquire()
for d in list(SERVER.dest_obj.keys()):
print("\t" + SERVER.dest_obj[d].name.decode(DATA_ENCODING))
reg_client_str = b"List " + SERVER.dest_obj[d].name
l = len(reg_client_str)
reg_client_str = struct.pack("i%ds" % l, l, reg_client_str)
self.request.send(reg_client_str)
LOCK.release()
return 0
# Process data here...
head, dst = header.strip(b" ").split(b" ")
if head == b"A5A5": # Packet Header
# print "Received Packet: %s %s...\n" % (head,dst)
if data == b"":
print(" Data is empty, returning.")
if b"GUI" in dst:
dest_list = GUI_clients
elif b"FSW" in dst:
dest_list = FSW_clients
for dest_elem in dest_list:
# print "Locking TCP"
LOCK.acquire()
if dest_elem in list(SERVER.dest_obj.keys()):
# Send the message here....
# print "Sending TCP msg to ", dest_elem
SERVER.dest_obj[dest_elem].put(data)
LOCK.release()
else:
raise RuntimeError("Packet missing A5A5 header")
class ThreadedUDPRequestHandler(socketserver.BaseRequestHandler):
"""
Derived from original Stable demo during R&TD and adapted
for use in new FSW gse.py applicaiton.
TCP socket server for commands, log events, and telemetry data.
Later this will handle other things such as sequence files and parameters.
Handle is instanced in own thread for each client.
Registration is done by sending the string "Register <name>".
Sending a message to destination <name> is done as
"A5A5 <name> <data>" Note only <data> is sent.
Any client that sends a "List" comment makes the server display all
registered clients.
"""
socketserver.BaseRequestHandler.allow_reuse_address = True
def handle(self): # on each packet
"""
The function that is invoked when a packet is received. This function listens
for data on the socket. Packets for now are assumed to be separated
by a newline. For each packet, call processPkt.
"""
self.getNewMsg(self.request[0])
#################################################
# New Routines to process the command messages
#################################################
def getNewMsg(self, packet):
"""
After registration wait for an incoming message
The first part must always be an "A5A5 " or a "List "
"""
# Read the header data from the socket either A5A5 or List
(header, packet) = self.readHeader(packet)
# If the received header is an empty string, connection closed, exit loop
if not header:
return
# Got the header data so read the data of the message here...
data = self.readData(header, packet)
# Process and send the packet of the message here...
self.processNewPkt(header, data)
def readHeader(self, packet):
"""
Read the 9 byte header (e.g. "A5A5 GUI " or "A5A5 FSW "),
or just read the "List\n" command.
"""
header = packet[:4]
header2 = packet[4:9]
packet = packet[9:]
return (header + header2, packet)
def readData(self, header, packet):
"""
Read the data part of the message sent to either GUI or FSW.
GUI receives telemetry.
FSW receives commands of various lengths.
"""
data = ""
dst = header.split(b" ")[1].strip(b" ")
# Read telemetry data here...
tlm_packet_size = packet[:4]
size = struct.unpack(">I", tlm_packet_size)[0]
data = tlm_packet_size + packet[4 : 4 + size]
return data
def processNewPkt(self, header, data):
"""
Process a single command here header and data here.
The command must always start with A5A5 except if it is a List.
Once the entire header string is processed send it on queue.
If something goes wrong report and shutdown server.
"""
dest_list = []
# Process data here...
head, dst = header.strip(b" ").split(b" ")
if head == b"A5A5": # Packet Header
# print "Received Packet: %s %s...\n" % (head,dst)
if data == "":
print(" Data is empty, returning.")
if b"GUI" in dst:
dest_list = GUI_clients
else:
print("dest? %s" % dst.decode(DATA_ENCODING))
for dest_elem in dest_list:
LOCK.acquire()
if dest_elem in list(SERVER.dest_obj.keys()):
# Send the message here....
# print "Sending UDP msg to ", dest_elem
SERVER.dest_obj[dest_elem].put(data)
LOCK.release()
else:
raise RuntimeError("Telemetry missing A5A5 header")
class ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
"""
TCP Socket server.
Keep a dictionary of destination objects containing queues and
socket id's for writting to destinations.
"""
dest_obj = dict()
lock_obj = threading.Lock()
class ThreadedUDPServer(socketserver.ThreadingMixIn, socketserver.UDPServer):
"""
UDP Socket server.
"""
class DestObj:
"""
Destination object for all clients registered.
"""
def __init__(self, name, request):
"""
Constructor
"""
self.name = name
self.socket = request
self.packet = b""
def put(self, msg):
"""
Write out the message to the destination socket
"""
try:
# print "about to send data to " + self.name
self.socket.send(msg)
except socket.error as err:
print("Socket error " + str(err.errno) + " occurred on send().")
def fileno(self):
"""
"""
return self.socket
def main(argv=None):
global SERVER, LOCK
program_name = os.path.basename(sys.argv[0])
program_license = "Copyright 2015 user_name (California Institute of Technology) \
ALL RIGHTS RESERVED. U.S. Government Sponsorship acknowledged."
program_version = "v0.1"
program_build_date = "%s" % __updated__
program_version_string = "%%prog %s (%s)" % (program_version, program_build_date)
program_longdesc = (
"""""" # optional - give further explanation about what the program does
)
if argv is None:
argv = sys.argv[1:]
try:
parser = OptionParser(
version=program_version_string,
epilog=program_longdesc,
description=program_license,
)
parser.add_option(
"-p",
"--port",
dest="port",
action="store",
type="int",
help="Set threaded tcp socket server port [default: %default]",
default=50007,
)
parser.add_option(
"-i",
"--host",
dest="host",
action="store",
type="string",
help="Set threaded tcp socket server ip [default: %default]",
default="127.0.0.1",
)
# process options
(opts, args) = parser.parse_args(argv)
HOST = opts.host
PORT = opts.port
server = ThreadedTCPServer((HOST, PORT), ThreadedTCPRequestHandler)
udp_server = ThreadedUDPServer((HOST, PORT), ThreadedUDPRequestHandler)
# Hopefully this will allow address reuse and server to restart immediately
server.allow_reuse_address = True
SERVER = server
LOCK = server.lock_obj
ip, port = server.server_address
print("TCP Socket Server listening on host addr %s, port %s" % (HOST, PORT))
# Start a thread with the server -- that thread will then start one
# more thread for each request
server_thread = threading.Thread(target=server.serve_forever)
udp_server_thread = threading.Thread(target=udp_server.serve_forever)
signal.signal(signal.SIGINT, signal_handler)
server_thread.daemon = False
server_thread.start()
udp_server_thread.daemon = False
udp_server_thread.start()
while not shutdown_event.is_set():
server_thread.join(timeout=5.0)
udp_server_thread.join(timeout=5.0)
print("shutdown from main thread")
SERVER.shutdown()
SERVER.server_close()
udp_server.shutdown()
udp_server.server_close()
time.sleep(1)
except Exception as e:
indent = len(program_name) * " "
sys.stderr.write(program_name + ": " + repr(e) + "\n")
sys.stderr.write(indent + " for help use --help\n")
return 2
if __name__ == "__main__":
sys.exit(main())
| 32.093474
| 129
| 0.555531
| 2,104
| 18,197
| 4.715779
| 0.18251
| 0.011288
| 0.013102
| 0.008869
| 0.453336
| 0.404858
| 0.367668
| 0.352953
| 0.345495
| 0.345495
| 0
| 0.010943
| 0.342144
| 18,197
| 566
| 130
| 32.150177
| 0.817893
| 0.250756
| 0
| 0.297619
| 0
| 0
| 0.079925
| 0
| 0
| 0
| 0.000781
| 0
| 0
| 1
| 0.059524
| false
| 0
| 0.041667
| 0.002976
| 0.181548
| 0.065476
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c1abecce40e385182d4e13996f277a150f1a3f4
| 453
|
py
|
Python
|
btb_manager_telegram/__init__.py
|
haivle/BTB-manager-telegram
|
c0f71c5a98a3d128ad03578930932737dc580ed1
|
[
"MIT"
] | 3
|
2021-09-24T10:49:23.000Z
|
2021-11-18T13:38:17.000Z
|
btb_manager_telegram/__init__.py
|
haivle/BTB-manager-telegram
|
c0f71c5a98a3d128ad03578930932737dc580ed1
|
[
"MIT"
] | 1
|
2021-09-01T14:40:35.000Z
|
2021-09-01T14:40:35.000Z
|
btb_manager_telegram/__init__.py
|
haivle/BTB-manager-telegram
|
c0f71c5a98a3d128ad03578930932737dc580ed1
|
[
"MIT"
] | 2
|
2021-11-03T17:57:07.000Z
|
2022-02-01T11:55:54.000Z
|
import logging
import sched
import time
(
MENU,
EDIT_COIN_LIST,
EDIT_USER_CONFIG,
DELETE_DB,
UPDATE_TG,
UPDATE_BTB,
PANIC_BUTTON,
CUSTOM_SCRIPT,
) = range(8)
BOUGHT, BUYING, SOLD, SELLING = range(4)
logging.basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=logging.INFO
)
logger = logging.getLogger("btb_manager_telegram_logger")
scheduler = sched.scheduler(time.time, time.sleep)
| 18.875
| 85
| 0.697572
| 59
| 453
| 5.152542
| 0.694915
| 0.052632
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005376
| 0.178808
| 453
| 23
| 86
| 19.695652
| 0.811828
| 0
| 0
| 0
| 0
| 0
| 0.174393
| 0.059603
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.157895
| 0
| 0.157895
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c1b6d5a187986e544d8284aa99d48f0a66a5c3a
| 10,390
|
py
|
Python
|
test.py
|
xiaohuaibaoguigui/EllSeg
|
ff56b255f8e650856aec9af23792e105897eba5c
|
[
"MIT"
] | 1
|
2021-05-26T05:45:42.000Z
|
2021-05-26T05:45:42.000Z
|
test.py
|
xiaohuaibaoguigui/EllSeg
|
ff56b255f8e650856aec9af23792e105897eba5c
|
[
"MIT"
] | null | null | null |
test.py
|
xiaohuaibaoguigui/EllSeg
|
ff56b255f8e650856aec9af23792e105897eba5c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import tqdm
import torch
import pickle
import resource
import numpy as np
import matplotlib.pyplot as plt
from args import parse_args
from modelSummary import model_dict
from pytorchtools import load_from_file
from torch.utils.data import DataLoader
from helperfunctions import mypause, stackall_Dict
from loss import get_seg2ptLoss
from utils import get_nparams, get_predictions
from utils import getSeg_metrics, getPoint_metric, generateImageGrid, unnormPts
sys.path.append(os.path.abspath(os.path.join(os.getcwd(), os.pardir)))
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (2048*10, rlimit[1]))
#%%
if __name__ == '__main__':
args = parse_args()
device=torch.device("cuda")
torch.cuda.manual_seed(12)
if torch.cuda.device_count() > 1:
print('Moving to a multiGPU setup.')
args.useMultiGPU = True
else:
args.useMultiGPU = False
torch.backends.cudnn.deterministic=False
if args.model not in model_dict:
print("Model not found.")
print("valid models are: {}".format(list(model_dict.keys())))
exit(1)
LOGDIR = os.path.join(os.getcwd(), 'logs', args.model, args.expname)
path2model = os.path.join(LOGDIR, 'weights')
path2checkpoint = os.path.join(LOGDIR, 'checkpoints')
path2writer = os.path.join(LOGDIR, 'TB.lock')
path2op = os.path.join(os.getcwd(), 'op', str(args.curObj))
os.makedirs(LOGDIR, exist_ok=True)
os.makedirs(path2model, exist_ok=True)
os.makedirs(path2checkpoint, exist_ok=True)
os.makedirs(path2writer, exist_ok=True)
os.makedirs(path2op, exist_ok=True)
model = model_dict[args.model]
netDict = load_from_file([args.loadfile,
os.path.join(path2checkpoint, 'checkpoint.pt')])
startEp = netDict['epoch'] if 'epoch' in netDict.keys() else 0
if 'state_dict' in netDict.keys():
model.load_state_dict(netDict['state_dict'])
print('Parameters: {}'.format(get_nparams(model)))
model = model if not args.useMultiGPU else torch.nn.DataParallel(model)
model = model.to(device).to(args.prec)
f = open(os.path.join('curObjects',
'baseline',
'cond_'+str(args.curObj)+'.pkl'), 'rb')
_, _, testObj = pickle.load(f)
testObj.path2data = os.path.join(args.path2data, 'Datasets', 'All')
testObj.augFlag = False
testloader = DataLoader(testObj,
batch_size=args.batchsize,
shuffle=False,
num_workers=args.workers,
drop_last=False)
if args.disp:
fig, axs = plt.subplots(nrows=1, ncols=1)
#%%
accLoss = 0.0
imCounter = 0
ious = []
dists_pupil_latent = []
dists_pupil_seg = []
dists_iris_latent = []
dists_iris_seg = []
model.eval()
opDict = {'id':[], 'archNum': [], 'archName': [], 'code': [],
'scores':{'iou':[], 'lat_dst':[], 'seg_dst':[]},
'pred':{'pup_latent_c':[],
'pup_seg_c':[],
'iri_latent_c':[],
'iri_seg_c':[],
'mask':[]},
'gt':{'pup_c':[], 'mask':[]}}
with torch.no_grad():
for bt, batchdata in enumerate(tqdm.tqdm(testloader)):
img, labels, spatialWeights, distMap, pupil_center, iris_center, elNorm, cond, imInfo = batchdata
out_tup = model(img.to(device).to(args.prec),
labels.to(device).long(),
pupil_center.to(device).to(args.prec),
elNorm.to(device).to(args.prec),
spatialWeights.to(device).to(args.prec),
distMap.to(device).to(args.prec),
cond.to(device).to(args.prec),
imInfo[:, 2].to(device).to(torch.long),
0.5)
output, elOut, latent, loss = out_tup
latent_pupil_center = elOut[:, 0:2].detach().cpu().numpy()
latent_iris_center = elOut[:, 5:7].detach().cpu().numpy()
_, seg_pupil_center = get_seg2ptLoss(output[:, 2, ...].cpu(), pupil_center, temperature=4)
_, seg_iris_center = get_seg2ptLoss(-output[:, 0, ...].cpu(), iris_center, temperature=4)
loss = loss if args.useMultiGPU else loss.mean()
accLoss += loss.detach().cpu().item()
predict = get_predictions(output)
iou, iou_bySample = getSeg_metrics(labels.numpy(),
predict.numpy(),
cond[:, 1].numpy())[1:]
latent_pupil_dist, latent_pupil_dist_bySample = getPoint_metric(pupil_center.numpy(),
latent_pupil_center,
cond[:,0].numpy(),
img.shape[2:],
True) # Unnormalizes the points
seg_pupil_dist, seg_pupil_dist_bySample = getPoint_metric(pupil_center.numpy(),
seg_pupil_center,
cond[:,1].numpy(),
img.shape[2:],
True) # Unnormalizes the points
latent_iris_dist, latent_iris_dist_bySample = getPoint_metric(iris_center.numpy(),
latent_iris_center,
cond[:,1].numpy(),
img.shape[2:],
True) # Unnormalizes the points
seg_iris_dist, seg_iris_dist_bySample = getPoint_metric(iris_center.numpy(),
seg_iris_center,
cond[:,1].numpy(),
img.shape[2:],
True) # Unnormalizes the points
dists_pupil_latent.append(latent_pupil_dist)
dists_iris_latent.append(latent_iris_dist)
dists_pupil_seg.append(seg_pupil_dist)
dists_iris_seg.append(seg_iris_dist)
ious.append(iou)
pup_latent_c = unnormPts(latent_pupil_center,
img.shape[2:])
pup_seg_c = unnormPts(seg_pupil_center,
img.shape[2:])
iri_latent_c = unnormPts(latent_iris_center,
img.shape[2:])
iri_seg_c = unnormPts(seg_iris_center,
img.shape[2:])
dispI = generateImageGrid(img.numpy().squeeze(),
predict.numpy(),
elOut.detach().cpu().numpy().reshape(-1, 2, 5),
pup_seg_c,
cond.numpy(),
override=True,
heatmaps=False)
for i in range(0, img.shape[0]):
archNum = testObj.imList[imCounter, 1]
opDict['id'].append(testObj.imList[imCounter, 0])
opDict['code'].append(latent[i,...].detach().cpu().numpy())
opDict['archNum'].append(archNum)
opDict['archName'].append(testObj.arch[archNum])
opDict['pred']['pup_latent_c'].append(pup_latent_c[i, :])
opDict['pred']['pup_seg_c'].append(pup_seg_c[i, :])
opDict['pred']['iri_latent_c'].append(iri_latent_c[i, :])
opDict['pred']['iri_seg_c'].append(iri_seg_c[i, :])
if args.test_save_op_masks:
opDict['pred']['mask'].append(predict[i,...].numpy().astype(np.uint8))
opDict['scores']['iou'].append(iou_bySample[i, ...])
opDict['scores']['lat_dst'].append(latent_pupil_dist_bySample[i, ...])
opDict['scores']['seg_dst'].append(seg_pupil_dist_bySample[i, ...])
opDict['gt']['pup_c'].append(pupil_center[i,...].numpy())
if args.test_save_op_masks:
opDict['gt']['mask'].append(labels[i,...].numpy().astype(np.uint8))
imCounter+=1
if args.disp:
if bt == 0:
h_im = plt.imshow(dispI.permute(1, 2, 0))
plt.pause(0.01)
else:
h_im.set_data(dispI.permute(1, 2, 0))
mypause(0.01)
opDict = stackall_Dict(opDict)
ious = np.stack(ious, axis=0)
ious = np.nanmean(ious, axis=0)
print('mIoU: {}. IoUs: {}'.format(np.mean(ious), ious))
print('Latent space PUPIL dist. Med: {}, STD: {}'.format(np.nanmedian(dists_pupil_latent),
np.nanstd(dists_pupil_latent)))
print('Segmentation PUPIL dist. Med: {}, STD: {}'.format(np.nanmedian(dists_pupil_seg),
np.nanstd(dists_pupil_seg)))
print('Latent space IRIS dist. Med: {}, STD: {}'.format(np.nanmedian(dists_iris_latent),
np.nanstd(dists_iris_latent)))
print('Segmentation IRIS dist. Med: {}, STD: {}'.format(np.nanmedian(dists_iris_seg),
np.nanstd(dists_iris_seg)))
print('--- Saving output directory ---')
f = open(os.path.join(path2op, 'opDict.pkl'), 'wb')
pickle.dump(opDict, f)
f.close()
| 42.933884
| 109
| 0.487777
| 1,048
| 10,390
| 4.63645
| 0.230916
| 0.027166
| 0.02058
| 0.020169
| 0.237292
| 0.124511
| 0.124511
| 0.113398
| 0.07553
| 0.032517
| 0
| 0.013249
| 0.389798
| 10,390
| 241
| 110
| 43.112033
| 0.753155
| 0.013763
| 0
| 0.123656
| 0
| 0
| 0.065045
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.086022
| 0
| 0.086022
| 0.053763
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c1c20ef193d7e2a2b62ae78d7b0e1c3d0bfeffb
| 21,072
|
py
|
Python
|
tests/test_util.py
|
meskio/tuf
|
09c3ceb993d40f7339bbbaf4eae617f95b972708
|
[
"MIT"
] | 1
|
2015-02-16T22:53:00.000Z
|
2015-02-16T22:53:00.000Z
|
tests/test_util.py
|
meskio/tuf
|
09c3ceb993d40f7339bbbaf4eae617f95b972708
|
[
"MIT"
] | null | null | null |
tests/test_util.py
|
meskio/tuf
|
09c3ceb993d40f7339bbbaf4eae617f95b972708
|
[
"MIT"
] | 1
|
2019-09-12T02:32:54.000Z
|
2019-09-12T02:32:54.000Z
|
#!/usr/bin/env python
"""
<Program Name>
test_util.py
<Author>
Konstantin Andrianov.
<Started>
February 1, 2013.
<Copyright>
See LICENSE for licensing information.
<Purpose>
Unit test for 'util.py'
"""
# Help with Python 3 compatibility, where the print statement is a function, an
# implicit relative import is invalid, and the '/' operator performs true
# division. Example: print 'hello world' raises a 'SyntaxError' exception.
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import os
import sys
import gzip
import shutil
import logging
import tempfile
import unittest
import tuf
import tuf.log
import tuf.hash
import tuf.util
import tuf.unittest_toolbox as unittest_toolbox
import tuf._vendor.six as six
logger = logging.getLogger('tuf.test_util')
class TestUtil(unittest_toolbox.Modified_TestCase):
def setUp(self):
unittest_toolbox.Modified_TestCase.setUp(self)
self.temp_fileobj = tuf.util.TempFile()
def tearDown(self):
unittest_toolbox.Modified_TestCase.tearDown(self)
self.temp_fileobj.close_temp_file()
def test_A1_tempfile_close_temp_file(self):
# Was the temporary file closed?
self.temp_fileobj.close_temp_file()
self.assertTrue(self.temp_fileobj.temporary_file.closed)
def _extract_tempfile_directory(self, config_temp_dir=None):
"""
Takes a directory (essentially specified in the conf.py as
'temporary_directory') and substitutes tempfile.TemporaryFile() with
tempfile.mkstemp() in order to extract actual directory of the stored
tempfile. Returns the config's temporary directory (or default temp
directory) and actual directory.
"""
# Patching 'tuf.conf.temporary_directory'.
tuf.conf.temporary_directory = config_temp_dir
if config_temp_dir is None:
# 'config_temp_dir' needs to be set to default.
config_temp_dir = tempfile.gettempdir()
# Patching 'tempfile.TemporaryFile()' (by substituting
# temfile.TemporaryFile() with tempfile.mkstemp()) in order to get the
# directory of the stored tempfile object.
saved_tempfile_TemporaryFile = tuf.util.tempfile.NamedTemporaryFile
tuf.util.tempfile.NamedTemporaryFile = tempfile.mkstemp
_temp_fileobj = tuf.util.TempFile()
tuf.util.tempfile.NamedTemporaryFile = saved_tempfile_TemporaryFile
junk, _tempfilepath = _temp_fileobj.temporary_file
_tempfile_dir = os.path.dirname(_tempfilepath)
# In the case when 'config_temp_dir' is None or some other discrepancy,
# '_temp_fileobj' needs to be closed manually since tempfile.mkstemp()
# was used.
if os.path.exists(_tempfilepath):
os.remove(_tempfilepath)
return config_temp_dir, _tempfile_dir
def test_A2_tempfile_init(self):
# Goal: Verify that temporary files are stored in the appropriate temp
# directory. The location of the temporary files is set in 'tuf.conf.py'.
# Test: Expected input verification.
# Assumed 'tuf.conf.temporary_directory' is 'None' initially.
temp_file = tuf.util.TempFile()
temp_file_directory = os.path.dirname(temp_file.temporary_file.name)
self.assertEqual(tempfile.gettempdir(), temp_file_directory)
saved_temporary_directory = tuf.conf.temporary_directory
temp_directory = self.make_temp_directory()
tuf.conf.temporary_directory = temp_directory
temp_file = tuf.util.TempFile()
temp_file_directory = os.path.dirname(temp_file.temporary_file.name)
self.assertEqual(temp_directory, temp_file_directory)
tuf.conf.temporary_directory = saved_temporary_directory
# Test: Unexpected input handling.
config_temp_dirs = [self.random_string(), 123, ['a'], {'a':1}]
for config_temp_dir in config_temp_dirs:
config_temp_dir, actual_dir = \
self._extract_tempfile_directory(config_temp_dir)
self.assertEqual(tempfile.gettempdir(), actual_dir)
def test_A3_tempfile_read(self):
filepath = self.make_temp_data_file(data = '1234567890')
fileobj = open(filepath, 'rb')
# Patching 'temp_fileobj.temporary_file'.
self.temp_fileobj.temporary_file = fileobj
# Test: Expected input.
self.assertEqual(self.temp_fileobj.read().decode('utf-8'), '1234567890')
self.assertEqual(self.temp_fileobj.read(4).decode('utf-8'), '1234')
# Test: Unexpected input.
for bogus_arg in ['abcd', ['abcd'], {'a':'a'}, -100]:
self.assertRaises(tuf.FormatError, self.temp_fileobj.read, bogus_arg)
def test_A4_tempfile_write(self):
data = self.random_string()
self.temp_fileobj.write(data.encode('utf-8'))
self.assertEqual(data, self.temp_fileobj.read().decode('utf-8'))
self.temp_fileobj.write(data.encode('utf-8'), auto_flush=False)
self.assertEqual(data, self.temp_fileobj.read().decode('utf-8'))
def test_A5_tempfile_move(self):
# Destination directory to save the temporary file in.
dest_temp_dir = self.make_temp_directory()
dest_path = os.path.join(dest_temp_dir, self.random_string())
self.temp_fileobj.write(self.random_string().encode('utf-8'))
self.temp_fileobj.move(dest_path)
self.assertTrue(dest_path)
def _compress_existing_file(self, filepath):
"""
[Helper]Compresses file 'filepath' and returns file path of
the compresses file.
"""
# NOTE: DO NOT forget to remove the newly created compressed file!
if os.path.exists(filepath):
compressed_filepath = filepath+'.gz'
f_in = open(filepath, 'rb')
f_out = gzip.open(compressed_filepath, 'wb')
f_out.writelines(f_in)
f_out.close()
f_in.close()
return compressed_filepath
else:
logger.error('Compression of '+repr(filepath)+' failed. Path does not exist.')
sys.exit(1)
def _decompress_file(self, compressed_filepath):
"""[Helper]"""
if os.path.exists(compressed_filepath):
f = gzip.open(compressed_filepath, 'rb')
file_content = f.read()
f.close()
return file_content
else:
logger.error('Decompression of '+repr(compressed_filepath)+' failed. '+\
'Path does not exist.')
sys.exit(1)
def test_A6_tempfile_decompress_temp_file_object(self):
# Setup: generate a temp file (self.make_temp_data_file()),
# compress it. Write it to self.temp_fileobj().
filepath = self.make_temp_data_file()
fileobj = open(filepath, 'rb')
compressed_filepath = self._compress_existing_file(filepath)
compressed_fileobj = open(compressed_filepath, 'rb')
self.temp_fileobj.write(compressed_fileobj.read())
os.remove(compressed_filepath)
# Try decompression using incorrect compression type i.e. compressions
# other than 'gzip'. In short feeding incorrect input.
bogus_args = ['zip', 1234, self.random_string()]
for arg in bogus_args:
self.assertRaises(tuf.Error,
self.temp_fileobj.decompress_temp_file_object, arg)
self.temp_fileobj.decompress_temp_file_object('gzip')
self.assertEqual(self.temp_fileobj.read(), fileobj.read())
# Checking the content of the TempFile's '_orig_file' instance.
check_compressed_original = self.make_temp_file()
with open(check_compressed_original, 'wb') as file_object:
file_object.write(self.temp_fileobj._orig_file.read())
data_in_orig_file = self._decompress_file(check_compressed_original)
fileobj.seek(0)
self.assertEqual(data_in_orig_file, fileobj.read())
# Try decompressing once more.
self.assertRaises(tuf.Error,
self.temp_fileobj.decompress_temp_file_object, 'gzip')
# Test decompression of invalid gzip file.
temp_file = tuf.util.TempFile()
fileobj.seek(0)
temp_file.write(fileobj.read())
temp_file.decompress_temp_file_object('gzip')
def test_B1_get_file_details(self):
# Goal: Verify proper output given certain expected/unexpected input.
# Making a temporary file.
filepath = self.make_temp_data_file()
# Computing the hash and length of the tempfile.
digest_object = tuf.hash.digest_filename(filepath, algorithm='sha256')
file_hash = {'sha256' : digest_object.hexdigest()}
file_length = os.path.getsize(filepath)
# Test: Expected input.
self.assertEqual(tuf.util.get_file_details(filepath), (file_length, file_hash))
# Test: Incorrect input.
bogus_inputs = [self.random_string(), 1234, [self.random_string()],
{'a': 'b'}, None]
for bogus_input in bogus_inputs:
if isinstance(bogus_input, six.string_types):
self.assertRaises(tuf.Error, tuf.util.get_file_details, bogus_input)
else:
self.assertRaises(tuf.FormatError, tuf.util.get_file_details, bogus_input)
def test_B2_ensure_parent_dir(self):
existing_parent_dir = self.make_temp_directory()
non_existing_parent_dir = os.path.join(existing_parent_dir, 'a', 'b')
for parent_dir in [existing_parent_dir, non_existing_parent_dir, 12, [3]]:
if isinstance(parent_dir, six.string_types):
tuf.util.ensure_parent_dir(os.path.join(parent_dir, 'a.txt'))
self.assertTrue(os.path.isdir(parent_dir))
else:
self.assertRaises(tuf.FormatError, tuf.util.ensure_parent_dir, parent_dir)
def test_B3_file_in_confined_directories(self):
# Goal: Provide invalid input for 'filepath' and 'confined_directories'.
# Include inputs like: '[1, 2, "a"]' and such...
# Reference to 'file_in_confined_directories()' to improve readability.
in_confined_directory = tuf.util.file_in_confined_directories
list_of_confined_directories = ['a', 12, {'a':'a'}, [1]]
list_of_filepaths = [12, ['a'], {'a':'a'}, 'a']
for bogus_confined_directory in list_of_confined_directories:
for filepath in list_of_filepaths:
self.assertRaises(tuf.FormatError, in_confined_directory,
filepath, bogus_confined_directory)
# Test: Inputs that evaluate to False.
confined_directories = ['a/b/', 'a/b/c/d/']
self.assertFalse(in_confined_directory('a/b/c/1.txt', confined_directories))
confined_directories = ['a/b/c/d/e/']
self.assertFalse(in_confined_directory('a', confined_directories))
self.assertFalse(in_confined_directory('a/b', confined_directories))
self.assertFalse(in_confined_directory('a/b/c', confined_directories))
self.assertFalse(in_confined_directory('a/b/c/d', confined_directories))
# Below, 'e' is a file in the 'a/b/c/d/' directory.
self.assertFalse(in_confined_directory('a/b/c/d/e', confined_directories))
# Test: Inputs that evaluate to True.
self.assertTrue(in_confined_directory('a/b/c.txt', ['']))
self.assertTrue(in_confined_directory('a/b/c.txt', ['a/b/']))
self.assertTrue(in_confined_directory('a/b/c.txt', ['x', '']))
self.assertTrue(in_confined_directory('a/b/c/..', ['a/']))
def test_B4_import_json(self):
self.assertTrue('json' in sys.modules)
def test_B5_load_json_string(self):
# Test normal case.
data = ['a', {'b': ['c', None, 30.3, 29]}]
json_string = tuf.util.json.dumps(data)
self.assertEqual(data, tuf.util.load_json_string(json_string))
# Test invalid arguments.
self.assertRaises(tuf.Error, tuf.util.load_json_string, 8)
invalid_json_string = {'a': tuf.FormatError}
self.assertRaises(tuf.Error, tuf.util.load_json_string, invalid_json_string)
def test_B6_load_json_file(self):
data = ['a', {'b': ['c', None, 30.3, 29]}]
filepath = self.make_temp_file()
fileobj = open(filepath, 'wt')
tuf.util.json.dump(data, fileobj)
fileobj.close()
self.assertEqual(data, tuf.util.load_json_file(filepath))
# Test a gzipped file.
compressed_filepath = self._compress_existing_file(filepath)
self.assertEqual(data, tuf.util.load_json_file(compressed_filepath))
Errors = (tuf.FormatError, IOError)
for bogus_arg in [b'a', 1, [b'a'], {'a':b'b'}]:
self.assertRaises(Errors, tuf.util.load_json_file, bogus_arg)
def test_C1_get_target_hash(self):
# Test normal case.
expected_target_hashes = {
'/file1.txt': 'e3a3d89eb3b70ce3fbce6017d7b8c12d4abd5635427a0e8a238f53157df85b3d',
'/README.txt': '8faee106f1bb69f34aaf1df1e3c2e87d763c4d878cb96b91db13495e32ceb0b0',
'/warehouse/file2.txt': 'd543a573a2cec67026eff06e75702303559e64e705eba06f65799baaf0424417'
}
for filepath, target_hash in six.iteritems(expected_target_hashes):
self.assertTrue(tuf.formats.RELPATH_SCHEMA.matches(filepath))
self.assertTrue(tuf.formats.HASH_SCHEMA.matches(target_hash))
self.assertEqual(tuf.util.get_target_hash(filepath), target_hash)
# Test for improperly formatted argument.
self.assertRaises(tuf.FormatError, tuf.util.get_target_hash, 8)
def test_C2_find_delegated_role(self):
# Test normal case. Create an expected role list, which is one of the
# required arguments to 'find_delegated_role()'.
role_list = [
{
"keyids": [
"a394c28384648328b16731f81440d72243c77bb44c07c040be99347f0df7d7bf"
],
"name": "targets/warehouse",
"paths": [
"/file1.txt", "/README.txt", '/warehouse/'
],
"threshold": 3
},
{
"keyids": [
"a394c28384648328b16731f81440d72243c77bb44c07c040be99347f0df7d7bf"
],
"name": "targets/tuf",
"paths": [
"/updater.py", "formats.py", '/tuf/'
],
"threshold": 4
}
]
self.assertTrue(tuf.formats.ROLELIST_SCHEMA.matches(role_list))
self.assertEqual(tuf.util.find_delegated_role(role_list, 'targets/tuf'), 1)
self.assertEqual(tuf.util.find_delegated_role(role_list, 'targets/warehouse'), 0)
# Test for non-existent role. 'find_delegated_role()' returns 'None'
# if the role is not found.
self.assertEqual(tuf.util.find_delegated_role(role_list, 'targets/non-existent'),
None)
# Test improperly formatted arguments.
self.assertRaises(tuf.FormatError, tuf.util.find_delegated_role, 8, role_list)
self.assertRaises(tuf.FormatError, tuf.util.find_delegated_role, 8, 'targets/tuf')
# Test duplicate roles.
role_list.append(role_list[1])
self.assertRaises(tuf.RepositoryError, tuf.util.find_delegated_role, role_list,
'targets/tuf')
# Test missing 'name' attribute (optional, but required by
# 'find_delegated_role()'.
# Delete the duplicate role, and the remaining role's 'name' attribute.
del role_list[2]
del role_list[0]['name']
self.assertRaises(tuf.RepositoryError, tuf.util.find_delegated_role, role_list,
'targets/warehouse')
def test_C3_paths_are_consistent_with_hash_prefixes(self):
# Test normal case.
path_hash_prefixes = ['e3a3', '8fae', 'd543']
list_of_targets = ['/file1.txt', '/README.txt', '/warehouse/file2.txt']
# Ensure the paths of 'list_of_targets' each have the epected path hash
# prefix listed in 'path_hash_prefixes'.
for filepath in list_of_targets:
self.assertTrue(tuf.util.get_target_hash(filepath)[0:4] in path_hash_prefixes)
self.assertTrue(tuf.util.paths_are_consistent_with_hash_prefixes(list_of_targets,
path_hash_prefixes))
extra_invalid_prefix = ['e3a3', '8fae', 'd543', '0000']
self.assertTrue(tuf.util.paths_are_consistent_with_hash_prefixes(list_of_targets,
extra_invalid_prefix))
# Test improperly formatted arguments.
self.assertRaises(tuf.FormatError,
tuf.util.paths_are_consistent_with_hash_prefixes, 8,
path_hash_prefixes)
self.assertRaises(tuf.FormatError,
tuf.util.paths_are_consistent_with_hash_prefixes,
list_of_targets, 8)
self.assertRaises(tuf.FormatError,
tuf.util.paths_are_consistent_with_hash_prefixes,
list_of_targets, ['zza1'])
# Test invalid list of targets.
bad_target_path = '/file5.txt'
self.assertTrue(tuf.util.get_target_hash(bad_target_path)[0:4] not in
path_hash_prefixes)
self.assertFalse(tuf.util.paths_are_consistent_with_hash_prefixes([bad_target_path],
path_hash_prefixes))
# Add invalid target path to 'list_of_targets'.
list_of_targets.append(bad_target_path)
self.assertFalse(tuf.util.paths_are_consistent_with_hash_prefixes(list_of_targets,
path_hash_prefixes))
def test_C4_ensure_all_targets_allowed(self):
# Test normal case.
rolename = 'targets/warehouse'
self.assertTrue(tuf.formats.ROLENAME_SCHEMA.matches(rolename))
list_of_targets = ['/file1.txt', '/README.txt', '/warehouse/file2.txt']
self.assertTrue(tuf.formats.RELPATHS_SCHEMA.matches(list_of_targets))
parent_delegations = {"keys": {
"a394c28384648328b16731f81440d72243c77bb44c07c040be99347f0df7d7bf": {
"keytype": "ed25519",
"keyval": {
"public": "3eb81026ded5af2c61fb3d4b272ac53cd1049a810ee88f4df1fc35cdaf918157"
}
}
},
"roles": [
{
"keyids": [
"a394c28384648328b16731f81440d72243c77bb44c07c040be99347f0df7d7bf"
],
"name": "targets/warehouse",
"paths": [
"/file1.txt", "/README.txt", '/warehouse/'
],
"threshold": 1
}
]
}
self.assertTrue(tuf.formats.DELEGATIONS_SCHEMA.matches(parent_delegations))
tuf.util.ensure_all_targets_allowed(rolename, list_of_targets,
parent_delegations)
# The target files of 'targets' are always allowed. 'list_of_targets' and
# 'parent_delegations' are not checked in this case.
tuf.util.ensure_all_targets_allowed('targets', list_of_targets,
parent_delegations)
# Test improperly formatted arguments.
self.assertRaises(tuf.FormatError, tuf.util.ensure_all_targets_allowed,
8, list_of_targets, parent_delegations)
self.assertRaises(tuf.FormatError, tuf.util.ensure_all_targets_allowed,
rolename, 8, parent_delegations)
self.assertRaises(tuf.FormatError, tuf.util.ensure_all_targets_allowed,
rolename, list_of_targets, 8)
# Test for invalid 'rolename', which has not been delegated by its parent,
# 'targets'.
self.assertRaises(tuf.RepositoryError, tuf.util.ensure_all_targets_allowed,
'targets/non-delegated_rolename', list_of_targets,
parent_delegations)
# Test for target file that is not allowed by the parent role.
self.assertRaises(tuf.ForbiddenTargetError, tuf.util.ensure_all_targets_allowed,
'targets/warehouse', ['file8.txt'], parent_delegations)
self.assertRaises(tuf.ForbiddenTargetError, tuf.util.ensure_all_targets_allowed,
'targets/warehouse', ['file1.txt', 'bad-README.txt'],
parent_delegations)
# Test for required attributes.
# Missing 'paths' attribute.
del parent_delegations['roles'][0]['paths']
self.assertRaises(tuf.FormatError, tuf.util.ensure_all_targets_allowed,
'targets/warehouse', list_of_targets, parent_delegations)
# Test 'path_hash_prefixes' attribute.
path_hash_prefixes = ['e3a3', '8fae', 'd543']
parent_delegations['roles'][0]['path_hash_prefixes'] = path_hash_prefixes
# Test normal case for 'path_hash_prefixes'.
tuf.util.ensure_all_targets_allowed('targets/warehouse', list_of_targets,
parent_delegations)
# Test target file with a path_hash_prefix that is not allowed in its
# parent role.
path_hash_prefix = tuf.util.get_target_hash('file5.txt')[0:4]
self.assertTrue(path_hash_prefix not in parent_delegations['roles'][0]
['path_hash_prefixes'])
self.assertRaises(tuf.ForbiddenTargetError, tuf.util.ensure_all_targets_allowed,
'targets/warehouse', ['file5.txt'], parent_delegations)
def test_C5_unittest_toolbox_make_temp_directory(self):
# Verify that the tearDown function does not fail when
# unittest_toolbox.make_temp_directory deletes the generated temp directory
# here.
temp_directory = self.make_temp_directory()
os.rmdir(temp_directory)
def test_c6_get_compressed_length(self):
self.temp_fileobj.write(b'hello world')
self.assertTrue(self.temp_fileobj.get_compressed_length() == 11)
temp_file = tuf.util.TempFile()
# Run unit test.
if __name__ == '__main__':
unittest.main()
| 36.968421
| 96
| 0.689683
| 2,593
| 21,072
| 5.340532
| 0.15349
| 0.027296
| 0.034301
| 0.030329
| 0.417966
| 0.346693
| 0.300116
| 0.242779
| 0.221404
| 0.197935
| 0
| 0.030321
| 0.204916
| 21,072
| 569
| 97
| 37.033392
| 0.796228
| 0.19291
| 0
| 0.240854
| 0
| 0
| 0.097111
| 0.032153
| 0
| 0
| 0
| 0
| 0.216463
| 1
| 0.070122
| false
| 0
| 0.054878
| 0
| 0.137195
| 0.003049
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c1e4d053b5156945879fda6f1eb646b81a07f71
| 8,282
|
py
|
Python
|
cams/propressing/data_rotate.py
|
boliqq07/cam3d
|
8b66681166a8ce0ef3304309385c1b899f1d2bb9
|
[
"BSD-3-Clause"
] | 1
|
2020-11-23T08:20:38.000Z
|
2020-11-23T08:20:38.000Z
|
cams/propressing/data_rotate.py
|
boliqq07/cam3d
|
8b66681166a8ce0ef3304309385c1b899f1d2bb9
|
[
"BSD-3-Clause"
] | null | null | null |
cams/propressing/data_rotate.py
|
boliqq07/cam3d
|
8b66681166a8ce0ef3304309385c1b899f1d2bb9
|
[
"BSD-3-Clause"
] | null | null | null |
from functools import lru_cache
from math import cos, sin
import scipy
from scipy.ndimage import affine_transform
import numpy as np
@lru_cache(maxsize=10)
def get_matrix(angles=(90, 90, 90), inverse=False):
"""
Axis of rotation Get matrix by angle.
(shear+compress)
Examples: z = 120
############################################################
---------------------- --------------------------------
-oooooooooooooooooooo- --------------------------------
-oooooooooooooooooooo- -oooooooooooooooooooo-----------
-oooooooooooooooooooo- ---oooooooooooooooooooo---------
-oooooooooooooooooooo- >>> -----oooooooooooooooooooo-------
-oooooooooooooooooooo- -------oooooooooooooooooooo-----
-oooooooooooooooooooo- ---------oooooooooooooooooooo---
-oooooooooooooooooooo- -----------oooooooooooooooooooo-
---------------------- --------------------------------
############################################################
1.The ``matrix`` is the transform matrix to rotate the data with angle. Always in Cartesian coordinates.
2.The ``inverse matrix`` is the interpolation matrix for get true data matrix(Cartesian coordinates)
from relative data matrix (Non-Cartesian coordinates).
The
Parameters
----------
angles: tuple
3 angle of x, y, z
z angle is the intersection angle of x,y,
y angle is the intersection angle of x,z,
x angle is the intersection angle of y,z.
inverse:
Compute the (multiplicative) inverse of a matrix.
"""
theta1, theta2, theta3 = [np.pi / 180 * angle for angle in angles]
matrix1 = np.array([[1, cos(theta3), 0],
[0, sin(theta3), 0],
[0, 0, 1]])
matrix2 = np.array([[1, 0, 0],
[0, 1, cos(theta1)],
[0, 0, sin(theta1)]])
matrix3 = np.array([[1, 0, cos(theta2)],
[0, 1, 0],
[0, 0, sin(theta2)]])
matrix = np.dot(matrix1, matrix2).dot(matrix3)
if inverse:
matrix = np.linalg.inv(matrix)
return matrix
def rotation_axis_by_angle(data, angles=(90, 90, 90), times=(2, 2, 2)):
"""
Get true data matrix(Cartesian coordinates) from relative data matrix (Non-Cartesian coordinates).
Parameters
----------
data: np.ndarray
data with shape (nx,ny,nz).
angles:tuple
3 angle of x, y, z
z angle is the intersection angle of x,y,
y angle is the intersection angle of x,z,
x angle is the intersection angle of y,z.
times: tuple
expand the multiple of the matrix.
"""
matrix = get_matrix(angles=angles, inverse=True)
return rotation_axis_by_matrix(data, matrix, times=times)
def rotation_axis_by_matrix(data, matrix, times=(2, 2, 2)):
"""
Get true data matrix(Cartesian coordinates) from relative data matrix (Non-Cartesian coordinates).
Parameters
----------
data: np.ndarray
data with shape (nx,ny,nz).
matrix:tuple
See Also ``get_matrix``
times: tuple
expand the multiple of the matrix.
"""
dims_old = data.shape
dims = tuple([int(i * j) for i, j in zip(dims_old, times)])
n_data = np.zeros(dims)
d0s = int((dims[0] - dims_old[0]) / 2)
d1s = int((dims[1] - dims_old[1]) / 2)
d2s = int((dims[2] - dims_old[2]) / 2)
n_data[d0s:d0s + dims_old[0], d1s:d1s + dims_old[1], d2s:d2s + dims_old[2]] = data
coords = np.meshgrid(range(dims[0]), range(dims[1]), range(dims[2]), indexing="ij")
xy_coords = np.vstack([coords[0].reshape(-1), coords[1].reshape(-1), coords[2].reshape(-1)])
# apply the transformation matrix
# please note: the coordinates are not homogeneous.
# for the 3D case, I've added code for homogeneous coordinates, you might want to look at that
# please also note: rotation is always around the origin:
# since I want the origin to be in the image center, I had to substract dim/2, rotate, then add it again
dims2 = np.array([i / 2 for i in dims])
dims2 = dims2.reshape(-1, 1)
xy_coords = np.dot(matrix, xy_coords - dims2) + dims2
#
# # undo the stacking and reshaping
x = xy_coords[0, :]
y = xy_coords[1, :]
z = xy_coords[2, :]
x = x.reshape(dims, order="A")
y = y.reshape(dims, order="A")
z = z.reshape(dims, order="A")
new_coords = [x, y, z]
# use map_coordinates to sample values for the new image
new_img = scipy.ndimage.map_coordinates(n_data, new_coords, order=2)
return new_img
def _coords(points, angles=(90, 90, 90), times=(2, 2, 2)):
"""
Parameters
----------
points: np.darray
percent of shape.
key points with shape(n_sample,3)
angles:tuple
3 angle of x, y, z
z angle is the intersection angle of x,y,
y angle is the intersection angle of x,z,
x angle is the intersection angle of y,z.
times: tuple
expand the multiple of the matrix.
"""
dims_old = [1, 1, 1]
matrix = get_matrix(angles=angles)
times = np.array(list(times))
times = times.reshape((-1, 1))
dims_old = np.array(dims_old)
dims_old = dims_old.reshape(-1, 1)
dims2 = dims_old / 2
points = points.T * dims_old
xy_coords = np.dot(matrix, points - dims2) + dims2
xy_coords = xy_coords + (times / 2 - 0.5)
return xy_coords
def rote_index(points, data, angles=(90, 90, 90), times=(2, 2, 2), data_init=True, return_type="float"):
"""
Parameters
----------
points: np.darray
key points with shape(n_sample,3)
percent of shape.
data: np.ndarray or tuple
data or data.shape
data_init:bool
The data is the init data (relative location) or Cartesian coordinates.(rotation_axis_by_angle)
angles:tuple
3 angle of x, y, z
z angle is the intersection angle of x,y,
y angle is the intersection angle of x,z,
x angle is the intersection angle of y,z.
times: tuple
expand the multiple of the matrix.
return_type:str
"float", "int", "percent"
for "float", "int" return the new index
for "percent" return the new percent.
"""
data_shape = data.shape if isinstance(data, np.ndarray) else data
if data_init:
times_np = np.array([1,1,1])
else:
times_np = np.array(times)
dims = data_shape
dims = np.array(dims).reshape((-1, 1))
xy_coords = _coords(points, angles=angles, times=times)
if return_type == "percent":
return xy_coords
if return_type == "float":
return (dims * xy_coords/times_np).T
else:
return np.round((dims * xy_coords/times_np).T).astype(int) # for rounding off: .4 -, .5 +
def rote_value(points, data, angles=(90, 90, 90), times=(2, 2, 2), method="in", data_type="td"):
"""
Parameters
----------
points: np.darray
key points with shape(n_sample,3)
percent of shape.
data: np.ndarray
data
angles:tuple
3 angle of x, y, z
z angle is the intersection angle of x,y,
y angle is the intersection angle of x,z,
x angle is the intersection angle of y,z.
times: tuple
expand the multiple of the matrix.
data_type:str
if "init" the data accept init data (elfcar, chgcar). see rotation_axis_by_angle.
if "td" the data accept true matrix data . see rotation_axis_by_angle.
method:str
if "near" , return nearest site's value.
if "inter" , return the interpolation value.
"""
if data_type == "td":
new_data = data
else:
new_data = rotation_axis_by_angle(data, angles=angles, times=times)
if method == "near":
ind = rote_index(points, data, angles=angles, times=times, return_type="int")
new_value = np.array([new_data[tuple(i)] for i in ind.T])
return new_value
else:
ind = rote_index(points, data, angles=angles, times=times, return_type="float")
new_value = scipy.ndimage.map_coordinates(new_data, ind, order=2)
return new_value
| 32.225681
| 108
| 0.584762
| 1,131
| 8,282
| 4.191866
| 0.161804
| 0.02953
| 0.025311
| 0.069606
| 0.453491
| 0.408564
| 0.390002
| 0.370808
| 0.366589
| 0.363215
| 0
| 0.027169
| 0.266723
| 8,282
| 256
| 109
| 32.351563
| 0.753499
| 0.484545
| 0
| 0.093023
| 0
| 0
| 0.010613
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.069767
| false
| 0
| 0.05814
| 0
| 0.232558
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c1f7437cc31d152ad0f3a65db16fb0d7effff6e
| 596
|
py
|
Python
|
playground/conversions/parser/lola2dot.py
|
flange/esp
|
78925925daf876e4936ca7af046b4f884e8a4233
|
[
"MIT"
] | null | null | null |
playground/conversions/parser/lola2dot.py
|
flange/esp
|
78925925daf876e4936ca7af046b4f884e8a4233
|
[
"MIT"
] | null | null | null |
playground/conversions/parser/lola2dot.py
|
flange/esp
|
78925925daf876e4936ca7af046b4f884e8a4233
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import sys
#lolafile = open("ex-small.graph", "r")
source = 0
target = 0
lowlink = 0
trans = "bla"
print("digraph {")
with open(sys.argv[1]) as lolafile:
for line in lolafile:
if len(line) == 1:
continue
linelist = line.split(" ")
if "STATE" in linelist:
source = linelist[1]
lowlink = linelist[3].rstrip()
if "->" in linelist:
trans = linelist[0]
target = linelist[2].rstrip()
print(''' {} -> {} [label="{}", lowlink="{}"];'''.format(source, target, trans, lowlink))
print("}")
| 17.028571
| 98
| 0.540268
| 71
| 596
| 4.535211
| 0.535211
| 0.043478
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.020833
| 0.275168
| 596
| 34
| 99
| 17.529412
| 0.724537
| 0.097315
| 0
| 0
| 0
| 0
| 0.110075
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.052632
| 0
| 0.052632
| 0.157895
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c1f830b50d1855accf6647797b5fa3fed845091
| 3,098
|
py
|
Python
|
engkor/views.py
|
takeshixx/dprkdict
|
7f436eb99a855ae8037b2219fc97944f5c000f68
|
[
"MIT"
] | 10
|
2017-09-25T09:30:02.000Z
|
2021-12-10T13:38:55.000Z
|
engkor/views.py
|
takeshixx/dprkdict
|
7f436eb99a855ae8037b2219fc97944f5c000f68
|
[
"MIT"
] | null | null | null |
engkor/views.py
|
takeshixx/dprkdict
|
7f436eb99a855ae8037b2219fc97944f5c000f68
|
[
"MIT"
] | null | null | null |
import re
import urllib.parse
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse, JsonResponse
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from .models import Definition
RE_HANGUL = re.compile(r'[(]*[\uAC00-\uD7AF]+[\uAC00-\uD7AF (),;]*', re.IGNORECASE)
def index(request):
definitions = Definition.objects.all()
limit = request.GET.get('limit')
try:
limit = int(limit)
except (ValueError, TypeError):
limit = 15
paginator = Paginator(definitions, limit)
page = request.GET.get('page')
try:
show_lines = paginator.page(page)
except PageNotAnInteger:
show_lines = paginator.page(1)
except EmptyPage:
show_lines = paginator.page(paginator.num_pages)
return render(request, 'index.html', {'definitions': definitions,
'lines': show_lines})
def fix_definition_format(definition):
definition = definition.replace('{I}', '<i>') \
.replace('{/I}', '</i>') \
.replace('{B}', '<b>') \
.replace('{/B}', '</b>') \
.replace('{Pr}', '[') \
.replace('{/Pr}', ']') \
.replace('{H}', '') \
.replace('{/H}', '') \
.replace('{E}', '') \
.replace('{/E}', '') \
.replace('{J}', '') \
.replace('{/J}', '') \
.replace('{S}', '') \
.replace('{/S}', '') \
.replace('{U}', '') \
.replace('{-}', '- ')
if definition.startswith('&'):
definition = definition[1:]
word, _definition = definition.split('\n', 1)
definition = '<h4>' + word + '</h4>\n'
definition += _definition
return definition
def generate_translate_tag(word):
out = '<a href="https://translate.google.de/#ko/en/{word_url}" '
out += 'title="Translate with Google Translate" target="'
out += '_blank">{word}</a>'
out = out.format(word_url=urllib.parse.quote_plus(word.group(0)),
word=word.group(0))
return out
def get_definitions(request):
if request.is_ajax():
q = request.GET.get('term', '')
definitions = Definition.objects.filter(word__icontains=q) \
.values_list('word', flat=True)[:25]
data = list(definitions)
else:
data = []
return JsonResponse(data, safe=False)
def get_definition(request, id):
definition = get_object_or_404(Definition, id=id)
data = fix_definition_format(definition.definition)
data = RE_HANGUL.sub(generate_translate_tag, data)
return HttpResponse(data)
def get_definition_word(request, word):
definition = get_object_or_404(Definition, word=word)
data = fix_definition_format(definition.definition)
data = RE_HANGUL.sub(generate_translate_tag, data)
return HttpResponse(data)
| 35.204545
| 83
| 0.548741
| 310
| 3,098
| 5.345161
| 0.329032
| 0.08449
| 0.019916
| 0.025347
| 0.190103
| 0.166566
| 0.125528
| 0.125528
| 0.125528
| 0.125528
| 0
| 0.01191
| 0.295352
| 3,098
| 88
| 84
| 35.204545
| 0.747137
| 0
| 0
| 0.109589
| 0
| 0
| 0.095192
| 0.010971
| 0
| 0
| 0
| 0
| 0
| 1
| 0.082192
| false
| 0
| 0.082192
| 0
| 0.246575
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c2000bb0df619412795ffbe35ee177921174a1f
| 1,867
|
py
|
Python
|
appdaemon/apps/toggle_switch/toggle_switch.py
|
Mithras/ha
|
d37f8673eed27a85f76c97ee3e924d2ddc033ee5
|
[
"MIT"
] | 3
|
2019-10-27T06:10:26.000Z
|
2020-07-21T01:27:11.000Z
|
appdaemon/apps/toggle_switch/toggle_switch.py
|
Mithras/ha
|
d37f8673eed27a85f76c97ee3e924d2ddc033ee5
|
[
"MIT"
] | null | null | null |
appdaemon/apps/toggle_switch/toggle_switch.py
|
Mithras/ha
|
d37f8673eed27a85f76c97ee3e924d2ddc033ee5
|
[
"MIT"
] | null | null | null |
import globals
class ToggleSwitch(globals.Hass):
async def initialize(self):
config = self.args["config"]
self._input = config["input"]
self._toggle_service = config["toggle_service"]
self._toggle_payload = config["toggle_payload"]
self._power = config["power"]
self._power_on_threshold = float(config["power_on_threshold"])
self._check_interval = float(config["check_interval"])
self.ensure_state_task = await self.create_task(
self._ensure_state_async(False))
await self.listen_state(self._input_callback_async,
entity=self._input)
async def terminate(self):
# self.log("Terminate")
self.ensure_state_task.cancel()
async def _input_callback_async(self, entity, attribute, old, new, kwargs):
if old == new:
return
# self.log(f"InputChange: old = {old}, new = {new}")
self.ensure_state_task.cancel()
self.ensure_state_task = await self.create_task(self._ensure_state_async())
async def _ensure_state_async(self, immediate=True):
# self.log(f"EnsureState: immediate = {immediate}")
if immediate:
await self._toggle_async()
while True:
await self.sleep(self._check_interval)
power = float(await self.get_state(self._power))
input = await self.get_state(self._input)
# self.log(
# f"EnsureState: input = {input}, power: {power}")
if input == "on" and power < self._power_on_threshold or input == "off" and power > self._power_on_threshold:
await self._toggle_async()
async def _toggle_async(self):
# self.log("Toggle")
await self.call_service(self._toggle_service,
**self._toggle_payload)
| 38.895833
| 121
| 0.619175
| 217
| 1,867
| 5.023041
| 0.235023
| 0.074312
| 0.082569
| 0.069725
| 0.320183
| 0.157798
| 0.106422
| 0.106422
| 0.106422
| 0.106422
| 0
| 0
| 0.27263
| 1,867
| 47
| 122
| 39.723404
| 0.802651
| 0.109266
| 0
| 0.121212
| 0
| 0
| 0.048913
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.030303
| 0
| 0.090909
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c20a7fb2067e672343540ce82ccf23d80253a6c
| 336
|
py
|
Python
|
templates_deepdive_app_bagofwords/udf/dd_extract_features.py
|
charlieccarey/rdoc
|
2e857f29e128f893706d042d583eec698c0bc56a
|
[
"CC-BY-4.0"
] | null | null | null |
templates_deepdive_app_bagofwords/udf/dd_extract_features.py
|
charlieccarey/rdoc
|
2e857f29e128f893706d042d583eec698c0bc56a
|
[
"CC-BY-4.0"
] | 5
|
2016-05-07T04:42:06.000Z
|
2018-04-19T01:08:38.000Z
|
templates_deepdive_app_bagofwords/udf/dd_extract_features.py
|
charlieccarey/rdoc
|
2e857f29e128f893706d042d583eec698c0bc56a
|
[
"CC-BY-4.0"
] | null | null | null |
#!/usr/bin/env python
from __future__ import print_function
'''
1\taaaa~^~bbbb~^~cccc
2\tdddd~^~EEEE~^~ffff
'''
import sys
ARR_DELIM = '~^~'
for row in sys.stdin:
row = row.strip()
sent_id, lemmas = row.split('\t')
lemmas = lemmas.split(ARR_DELIM)
for lemma in lemmas:
print('{}\t{}'.format(sent_id, lemma))
| 17.684211
| 46
| 0.625
| 50
| 336
| 4.02
| 0.64
| 0.079602
| 0.109453
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007326
| 0.1875
| 336
| 18
| 47
| 18.666667
| 0.728938
| 0.059524
| 0
| 0
| 0
| 0
| 0.041667
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.222222
| 0
| 0.222222
| 0.222222
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c232026bb42fe3062506f2b3ba59f07439cb07f
| 7,482
|
py
|
Python
|
ppos_dex_data.py
|
cusma/pposdex
|
31b834ffcb1a43958ccc57b444c7b9337a5623c9
|
[
"MIT"
] | 10
|
2021-01-06T20:09:17.000Z
|
2022-01-07T09:38:02.000Z
|
ppos_dex_data.py
|
cusma/pposdex
|
31b834ffcb1a43958ccc57b444c7b9337a5623c9
|
[
"MIT"
] | null | null | null |
ppos_dex_data.py
|
cusma/pposdex
|
31b834ffcb1a43958ccc57b444c7b9337a5623c9
|
[
"MIT"
] | 1
|
2021-07-17T09:47:18.000Z
|
2021-07-17T09:47:18.000Z
|
import time
import json
import base64
import msgpack
from schema import Schema, And, Optional
from datetime import datetime
from algosdk import mnemonic
from algosdk.account import address_from_private_key
from algosdk.error import *
from algosdk.future.transaction import PaymentTxn
from inequality_indexes import *
from algo_query import *
def wait_for_confirmation(algod_client, transaction_id, timeout):
"""Wait until the transaction is confirmed or rejected, or until 'timeout'
number of rounds have passed.
Args:
algod_client (AlgodClient): Algod Client
transaction_id (str): the transaction to wait for
timeout (int): maximum number of rounds to wait
Returns:
(dict): pending transaction information, or throws an error if the
transaction is not confirmed or rejected in the next timeout rounds
"""
start_round = algod_client.status()["last-round"] + 1
current_round = start_round
while current_round < start_round + timeout:
algod_client.status_after_block(current_round)
try:
pending_txn = algod_client.pending_transaction_info(transaction_id)
except Exception:
return
if pending_txn.get("confirmed-round", 0) > 0:
return pending_txn
elif pending_txn["pool-error"]:
raise Exception(
'pool error: {}'.format(pending_txn["pool-error"]))
current_round += 1
raise Exception(
'pending tx not found in timeout rounds, timeout value = : {}'.format(
timeout))
def post_ppos_dex_data(algod_client, indexer_client, passphrase,
algo_threshold):
private_key = mnemonic.to_private_key(passphrase)
account = {'pk': address_from_private_key(private_key),
'sk': private_key}
CONNECTION_ATTEMPT_DELAY_SEC = 3
MAX_CONNECTION_ATTEMPTS = 10
MICROALGO_TO_ALGO = 1 / 10 ** 6
MICROALGO_TOTAL_SUPPLY = 10 ** 16
attempts = 1
params = None
ledger = None
while attempts <= MAX_CONNECTION_ATTEMPTS:
try:
params = algod_client.suggested_params()
ledger = algod_client.ledger_supply()
break
except AlgodHTTPError:
print(f"Algod Client connection attempt "
f"{attempts}/{MAX_CONNECTION_ATTEMPTS}")
print("Trying to contact Algod Client again...")
time.sleep(CONNECTION_ATTEMPT_DELAY_SEC)
finally:
attempts += 1
if attempts > MAX_CONNECTION_ATTEMPTS:
quit("Unable to connect to Algod Client.")
attempts = 1
algo_owners = None
while attempts <= MAX_CONNECTION_ATTEMPTS:
try:
algo_owners = get_algo_owners(indexer_client, algo_threshold)
break
except IndexerHTTPError:
print(f"Indexer Client connection attempt "
f"{attempts}/{MAX_CONNECTION_ATTEMPTS}")
print("Trying to contact Indexer Client again...")
time.sleep(CONNECTION_ATTEMPT_DELAY_SEC)
finally:
attempts += 1
if attempts > MAX_CONNECTION_ATTEMPTS:
quit("Unable to connect to Indexer Client.")
stakes = [account['amount'] * MICROALGO_TO_ALGO for
account in algo_owners]
algo_hhi = herfindahl_hirschman_index(stakes)
online_stakes = [account['amount'] * MICROALGO_TO_ALGO
for account in algo_owners
if account['status'] == 'Online']
algo_dynamics = ledger['total-money'] / MICROALGO_TOTAL_SUPPLY
ppos_online_stake = ledger['online-money'] / ledger['total-money']
ppos_online_accounts = len(online_stakes) / len(algo_owners)
ppos_gini = gini_index(online_stakes)
ppos_theil_l = theil_l_index(online_stakes)
ppos_theil_t = theil_t_index(online_stakes)
ppos_hhi = herfindahl_hirschman_index(online_stakes)
ppos_dex = (algo_dynamics
* ppos_online_stake
* ppos_online_accounts
* (1 - ppos_gini))
note = {'algo_threshold': algo_threshold,
'accounts': len(algo_owners),
'algo_hhi': algo_hhi,
'algo_dynamics': algo_dynamics,
'ppos_online_stake': ppos_online_stake,
'ppos_online_accounts': ppos_online_accounts,
'ppos_gini': ppos_gini,
'ppos_theil_l': ppos_theil_l,
'ppos_theil_t': ppos_theil_t,
'ppos_hhi': ppos_hhi,
'ppos_dex': ppos_dex,
'timestamp': str(datetime.now())}
bytes_note = msgpack.packb(note)
unsigned_txn = PaymentTxn(sender=account['pk'],
sp=params,
receiver=account['pk'],
amt=0,
note=bytes_note)
signed_txn = unsigned_txn.sign(account['sk'])
txid = algod_client.send_transaction(signed_txn)
print("Publishing Algorand PPoS Dex data in txID: {}".format(txid))
try:
confirmed_txn = wait_for_confirmation(algod_client, txid, 4)
except Exception as err:
print(err)
return
print("txID: {}".format(txid), " confirmed in round: {}\n".format(
confirmed_txn.get("confirmed-round", 0)))
print("Transaction information:\n{}".format(
json.dumps(confirmed_txn, indent=4)))
def get_ppos_dex_data(indexer_client, ppos_dex_address, algo_threshold,
start_block=11476070, end_block=None):
CONNECTION_ATTEMPT_DELAY_SEC = 3
MAX_CONNECTION_ATTEMPTS = 10
attempts = 1
ppos_dex_txns_note = None
while attempts <= MAX_CONNECTION_ATTEMPTS:
try:
ppos_dex_txns_note = get_address_txns_note(
indexer_client, ppos_dex_address, start_block, end_block)
break
except IndexerHTTPError:
print(f"Indexer Client connection attempt "
f"{attempts}/{MAX_CONNECTION_ATTEMPTS}")
print("Trying to contact Indexer Client again...")
time.sleep(CONNECTION_ATTEMPT_DELAY_SEC)
finally:
attempts += 1
if attempts > MAX_CONNECTION_ATTEMPTS:
quit("Unable to connect to Indexer Client.")
# TODO: make 'algo_hhi' and 'ppos_hhi' mandatory fileds in the schema
schema = Schema({
'algo_threshold': int,
'accounts': And(int, lambda n: 0 <= n),
Optional('algo_hhi'): And(float, lambda n: 0 <= n <= 1),
'algo_dynamics': And(float, lambda n: 0 <= n),
'ppos_online_stake': And(float, lambda n: 0 <= n <= 1),
'ppos_online_accounts': And(float, lambda n: 0 <= n <= 1),
'ppos_gini': And(float, lambda n: 0 <= n <= 1),
'ppos_theil_l': And(float, lambda n: 0 <= n),
'ppos_theil_t': And(float, lambda n: 0 <= n),
Optional('ppos_hhi'): And(float, lambda n: 0 <= n <= 1),
'ppos_dex': And(float, lambda n: 0 <= n <= 1),
'timestamp': str
})
ppos_dex_data = []
for txn_note in ppos_dex_txns_note:
try:
data = schema.validate(
msgpack.unpackb(base64.b64decode(txn_note))
)
if data['algo_threshold'] == algo_threshold:
ppos_dex_data += [data]
except:
pass
if not ppos_dex_data:
quit(f"Impossible to find valid PPos Dex data published by "
f"{ppos_dex_address} starting from block {start_block}.")
return ppos_dex_data
| 36.320388
| 79
| 0.625234
| 889
| 7,482
| 4.993251
| 0.202475
| 0.028385
| 0.052039
| 0.020275
| 0.353007
| 0.293985
| 0.283172
| 0.225276
| 0.200045
| 0.177968
| 0
| 0.011007
| 0.283614
| 7,482
| 205
| 80
| 36.497561
| 0.817164
| 0.064288
| 0
| 0.277108
| 0
| 0
| 0.163576
| 0.015524
| 0
| 0
| 0
| 0.004878
| 0
| 1
| 0.018072
| false
| 0.018072
| 0.072289
| 0
| 0.114458
| 0.060241
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c2938d99163d6ef8085c36d2b63a4a8fe4a49b8
| 117,896
|
py
|
Python
|
hvm/chains/base.py
|
hyperevo/py-helios-node
|
ff417fe3fe90f85c9f95b3d8a5f0dd4c80532ee8
|
[
"MIT"
] | null | null | null |
hvm/chains/base.py
|
hyperevo/py-helios-node
|
ff417fe3fe90f85c9f95b3d8a5f0dd4c80532ee8
|
[
"MIT"
] | null | null | null |
hvm/chains/base.py
|
hyperevo/py-helios-node
|
ff417fe3fe90f85c9f95b3d8a5f0dd4c80532ee8
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
import operator
from collections import deque
import functools
from abc import (
ABCMeta,
abstractmethod
)
import rlp_cython as rlp
import time
import math
from uuid import UUID
from typing import ( # noqa: F401
Any,
Optional,
Callable,
cast,
Dict,
Generator,
Iterator,
Tuple,
Type,
TYPE_CHECKING,
Union,
List,
Iterable,
)
import logging
from itertools import groupby
from hvm.rlp.receipts import Receipt
from hvm.types import Timestamp
from eth_typing import (
Address,
BlockNumber,
Hash32,
)
from eth_utils import (
to_tuple,
to_set,
)
from hvm.db.backends.base import BaseDB
from hvm.db.backends.memory import MemoryDB
from hvm.db.chain import (
BaseChainDB,
ChainDB,
)
from hvm.db.journal import (
JournalDB,
)
from hvm.db.read_only import ReadOnlyDB
from hvm.constants import (
BLOCK_GAS_LIMIT,
BLANK_ROOT_HASH,
NUMBER_OF_HEAD_HASH_TO_SAVE,
TIME_BETWEEN_HEAD_HASH_SAVE,
GENESIS_PARENT_HASH,
)
from hvm.db.trie import make_trie_root_and_nodes
from hvm import constants
from hvm.estimators import (
get_gas_estimator,
)
from hvm.exceptions import (
HeaderNotFound,
TransactionNotFound,
ValidationError,
VMNotFound,
BlockOnWrongChain,
CanonicalHeadNotFound,
CannotCalculateStake,
NotEnoughTimeBetweenBlocks,
ReceivableTransactionNotFound,
TriedImportingGenesisBlock,
JournalDbNotActivated,
ReplacingBlocksNotAllowed,
UnprocessedBlockNotAllowed,
AppendHistoricalRootHashTooOld,
HistoricalNetworkTPCMissing,
HistoricalMinGasPriceError,
UnprocessedBlockChildIsProcessed,
ParentNotFound,
NoChronologicalBlocks,
RewardProofSenderBlockMissing,
InvalidHeadRootTimestamp,
RewardAmountRoundsToZero, TriedDeletingGenesisBlock, NoGenesisBlockPresent)
from eth_keys.exceptions import (
BadSignature,
)
from hvm.utils.blocks import reorganize_chronological_block_list_for_correct_chronological_order_at_index
from hvm.validation import (
validate_block_number,
validate_uint256,
validate_word,
validate_vm_configuration,
validate_canonical_address,
validate_is_queue_block,
validate_centisecond_timestamp,
)
from hvm.rlp.blocks import (
BaseBlock,
BaseQueueBlock,
)
from hvm.rlp.headers import (
BlockHeader,
HeaderParams,
)
from hvm.rlp.transactions import (
BaseTransaction,
BaseReceiveTransaction
)
from hvm.utils.db import (
apply_state_dict,
)
from hvm.utils.datatypes import (
Configurable,
)
from hvm.utils.headers import (
compute_gas_limit_bounds,
)
from hvm.utils.hexadecimal import (
encode_hex,
decode_hex
)
from hvm.utils.rlp import (
ensure_imported_block_unchanged,
)
from hvm.db.chain_head import ChainHeadDB
from hvm.db.consensus import ConsensusDB
from eth_keys import keys
from eth_keys.datatypes import(
BaseKey,
PublicKey,
PrivateKey
)
from hvm.utils.numeric import (
effecient_diff,
are_items_in_list_equal,
)
from sortedcontainers import (
SortedList,
SortedDict,
)
from hvm.rlp.consensus import NodeStakingScore, PeerNodeHealth
from hvm.rlp.accounts import TransactionKey
if TYPE_CHECKING:
from hvm.vm.base import BaseVM # noqa: F401
from functools import partial
import asyncio
# Mapping from address to account state.
# 'balance', 'nonce' -> int
# 'code' -> bytes
# 'storage' -> Dict[int, int]
AccountState = Dict[Address, Dict[str, Union[int, bytes, Dict[int, int]]]]
class BaseChain(Configurable, metaclass=ABCMeta):
"""
The base class for all Chain objects
"""
chain_head_db: ChainHeadDB = None
chaindb: ChainDB = None
chaindb_class = None # type: Type[BaseChainDB]
vm_configuration = None # type: Tuple[Tuple[int, Type[BaseVM]], ...]
genesis_wallet_address: Address = None
genesis_block_timestamp: Timestamp = None
min_time_between_blocks: int = None
#
# Helpers
#
@classmethod
@abstractmethod
def get_chaindb_class(cls) -> Type[BaseChainDB]:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def get_consensus_db(self, header: BlockHeader = None, timestamp: Timestamp = None) -> ConsensusDB:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def enable_read_only_db(self) -> None:
raise NotImplementedError("Chain classes must implement this method")
#
# Chain API
#
@classmethod
@abstractmethod
def from_genesis(cls,
base_db: BaseDB,
genesis_params: Dict[str, HeaderParams],
genesis_state: AccountState=None) -> 'BaseChain':
raise NotImplementedError("Chain classes must implement this method")
@classmethod
@abstractmethod
def from_genesis_header(cls,
base_db: BaseDB,
genesis_header: BlockHeader) -> 'BaseChain':
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def get_chain_at_block_parent(self, block: BaseBlock) -> 'BaseChain':
raise NotImplementedError("Chain classes must implement this method")
#
# VM API
#
@classmethod
def get_vm_configuration(cls) -> Tuple[Tuple[int, Type['BaseVM']], ...]:
return cls.vm_configuration
@classmethod
def get_vm_class(cls, header: BlockHeader) -> Type['BaseVM']:
"""
Returns the VM instance for the given block number.
"""
return cls.get_vm_class_for_block_timestamp(header.timestamp)
@abstractmethod
def get_vm(self, header: BlockHeader=None, timestamp: Timestamp = None) -> 'BaseVM':
raise NotImplementedError("Chain classes must implement this method")
@classmethod
def get_vm_class_for_block_timestamp(cls, timestamp: int = None) -> Type['BaseVM']:
"""
Returns the VM class for the given block number.
"""
if timestamp is None:
timestamp = int(time.time())
if cls.vm_configuration is None:
raise AttributeError("Chain classes must define the VMs in vm_configuration")
validate_uint256(timestamp)
for start_timestamp, vm_class in reversed(cls.vm_configuration):
if timestamp >= start_timestamp:
return vm_class
else:
raise VMNotFound("No vm available for timestamp #{0}".format(timestamp))
#
# Header API
#
@abstractmethod
def create_header_from_parent(self,
parent_header: BlockHeader,
**header_params: HeaderParams) -> BlockHeader:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def get_block_header_by_hash(self, block_hash: Hash32) -> BlockHeader:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def get_canonical_head(self):
raise NotImplementedError("Chain classes must implement this method")
#
# Block API
#
@abstractmethod
def get_ancestors(self, limit: int, header: BlockHeader=None) -> Iterator[BaseBlock]:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def get_block_by_hash(self, block_hash: Hash32) -> BaseBlock:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def get_block_by_header(self, block_header: BlockHeader) -> BaseBlock:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def get_block_by_number(self, block_number: BlockNumber, wallet_address: Address = None) -> BaseBlock:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def get_blocks_on_chain(self, start: int, end: int, wallet_address: Address = None) -> List[BaseBlock]:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def get_all_blocks_on_chain(self, wallet_address: Address = None) -> List[BaseBlock]:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def get_all_blocks_on_chain_by_head_block_hash(self, chain_head_hash: Hash32) -> List[BaseBlock]:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def get_blocks_on_chain_up_to_block_hash(self, chain_head_hash: Hash32, start_block_number: int = 0, limit: int = float('inf')) -> List[BaseBlock]:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def get_block(self) -> BaseBlock:
raise NotImplementedError("Chain classes must implement this method")
# @abstractmethod
# def get_canonical_block_by_number(self, block_number: BlockNumber) -> BaseBlock:
# raise NotImplementedError("Chain classes must implement this method")
# @abstractmethod
# def get_canonical_block_hash(self, block_number):
# raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def get_all_chronological_blocks_for_window(self, window_timestamp: Timestamp) -> List[BaseBlock]:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def import_current_queue_block(self) -> BaseBlock:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def import_current_queue_block_with_reward(self, node_staking_score_list: List[NodeStakingScore]) -> BaseBlock:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def purge_block_and_all_children_and_set_parent_as_chain_head_by_hash(self, block_hash_to_delete: Hash32) -> None:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def purge_block_and_all_children_and_set_parent_as_chain_head(self, existing_block_header: BlockHeader):
raise NotImplementedError("Chain classes must implement this method")
#
# Chronologically consistent blockchain db API
#
@abstractmethod
def check_block_chronological_consistency(self, block: BaseBlock) -> List[Hash32]:
raise NotImplementedError("Chain classes must implement this method")
#
# Transaction API
#
@abstractmethod
def get_transaction_by_block_hash_and_index(self, block_hash: Hash32, transaction_index: int) -> Union[BaseTransaction, BaseReceiveTransaction]:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def create_transaction(self, *args: Any, **kwargs: Any) -> BaseTransaction:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def get_canonical_transaction(self, transaction_hash: Hash32) -> BaseTransaction:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def populate_queue_block_with_receive_tx(self) -> List[BaseReceiveTransaction]:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def get_block_receive_transactions_by_hash(
self,
block_hash: Hash32) -> List['BaseReceiveTransaction']:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def get_receive_tx_from_send_tx(self, tx_hash: Hash32) -> Optional['BaseReceiveTransaction']:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def create_receivable_transactions(self) -> List[BaseReceiveTransaction]:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def get_receivable_transactions(self, address: Address) -> Tuple[List[BaseReceiveTransaction], List[TransactionKey]]:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def get_current_queue_block_nonce(self) -> int:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def create_and_sign_transaction_for_queue_block(self, *args: Any, **kwargs: Any) -> BaseTransaction:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def create_and_sign_transaction(self, *args: Any, **kwargs: Any) -> BaseTransaction:
raise NotImplementedError("Chain classes must implement this method")
#
# Chronological Chain API
#
@abstractmethod
def try_to_rebuild_chronological_chain_from_historical_root_hashes(self, historical_root_hash_timestamp: Timestamp) -> None:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def get_block_hashes_that_are_new_for_this_historical_root_hash_timestamp(self, historical_root_hash_timestamp: Timestamp) -> List[Tuple[Timestamp, Hash32]]:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def initialize_historical_root_hashes_and_chronological_blocks(self) -> None:
raise NotImplementedError("Chain classes must implement this method")
#
# Execution API
#
# @abstractmethod
# def apply_transaction(self, transaction):
# raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def estimate_gas(self, transaction: BaseTransaction, at_header: BlockHeader=None) -> int:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def import_block(self, block: BaseBlock, perform_validation: bool=True) -> BaseBlock:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def import_chain(self, block_list: List[BaseBlock], perform_validation: bool=True, save_block_head_hash_timestamp: bool = True, allow_replacement: bool = True) -> None:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def import_chronological_block_window(self, block_list: List[BaseBlock], window_start_timestamp: Timestamp,
save_block_head_hash_timestamp: bool = True,
allow_unprocessed: bool = False) -> None:
raise NotImplementedError("Chain classes must implement this method")
#
# Validation API
#
@abstractmethod
def get_allowed_time_of_next_block(self, chain_address: Address = None) -> Timestamp:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def validate_block(self, block: BaseBlock) -> None:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def validate_gaslimit(self, header: BlockHeader) -> None:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def validate_block_specification(self, block) -> bool:
raise NotImplementedError("Chain classes must implement this method")
#
# Stake API
#
@abstractmethod
def get_mature_stake(self, wallet_address: Address = None, raise_canonical_head_not_found_error:bool = False) -> int:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def get_mature_stake_for_chronological_block_window(self, chronological_block_window_timestamp, timestamp_for_stake):
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def get_new_block_hash_to_test_peer_node_health(self) -> Hash32:
raise NotImplementedError("Chain classes must implement this method")
#
# Min Block Gas API used for throttling the network
#
@abstractmethod
def re_initialize_historical_minimum_gas_price_at_genesis(self) -> None:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def update_current_network_tpc_capability(self, current_network_tpc_cap: int,
update_min_gas_price: bool = True) -> None:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def get_local_tpc_cap(self) -> int:
raise NotImplementedError("Chain classes must implement this method")
#
# Consensus db passthrough with correct db corresponding to timestamp
#
@abstractmethod
def get_signed_peer_score(self, private_key: PrivateKey,
network_id: int,
peer_wallet_address: Address,
after_block_number: BlockNumber = None,
) -> NodeStakingScore:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def get_signed_peer_score_string_private_key(self,
private_key_string: bytes,
peer_wallet_address: Address,
after_block_number: BlockNumber = None,
) -> NodeStakingScore:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def validate_node_staking_score(self,
node_staking_score: NodeStakingScore,
since_block_number: BlockNumber) -> None:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def save_health_request(self, peer_wallet_address: Address, response_time_in_micros: int = float('inf')) -> None:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def get_current_peer_node_health(self,peer_wallet_address: Address) -> PeerNodeHealth:
raise NotImplementedError("Chain classes must implement this method")
class Chain(BaseChain):
"""
A Chain is a combination of one or more VM classes. Each VM is associated
with a range of blocks. The Chain class acts as a wrapper around these other
VM classes, delegating operations to the appropriate VM depending on the
current block number.
"""
raise_errors = False
logger = logging.getLogger("hvm.chain.chain.Chain")
header = None # type: BlockHeader
network_id = None # type: int
gas_estimator = None # type: Callable
_journaldb = None
num_journal_records_for_block_import = 0
chaindb_class = ChainDB # type: Type[BaseChainDB]
chain_head_db_class = ChainHeadDB
_queue_block: BaseQueueBlock = None
def __init__(self, base_db: BaseDB, wallet_address: Address, private_key: BaseKey=None) -> None:
if not self.vm_configuration:
raise ValueError(
"The Chain class cannot be instantiated with an empty `vm_configuration`"
)
else:
validate_vm_configuration(self.vm_configuration)
validate_canonical_address(wallet_address, "Wallet Address")
self.db = base_db
self.private_key = private_key
self.wallet_address = wallet_address
self.chaindb = self.get_chaindb_class()(self.db)
self.chain_head_db = self.get_chain_head_db_class().load_from_saved_root_hash(self.db)
try:
self.header = self.create_header_from_parent(self.get_canonical_head())
except CanonicalHeadNotFound:
#this is a new block, lets make a genesis block
# self.logger.debug("Creating new genesis block on chain {}".format(self.wallet_address))
self.header = self.get_vm_class_for_block_timestamp().create_genesis_block(self.wallet_address).header
if self.gas_estimator is None:
self.gas_estimator = get_gas_estimator() # type: ignore
def reinitialize(self):
self.__init__(self.db, self.wallet_address, self.private_key)
def set_new_wallet_address(self, wallet_address: Address, private_key: BaseKey=None):
self.logger.debug('setting new wallet address')
self.wallet_address = wallet_address
self.private_key = private_key
self.reinitialize()
@property
def queue_block(self):
if self._queue_block is None:
self._queue_block = self.get_queue_block()
return self._queue_block
@queue_block.setter
def queue_block(self,val:BaseQueueBlock):
self._queue_block = val
@property
def min_time_between_blocks(self):
vm = self.get_vm(timestamp=Timestamp(int(time.time())))
min_allowed_time_between_blocks = vm.min_time_between_blocks
return min_allowed_time_between_blocks
# @property
# def consensus_db(self, header: BlockHeader = None, timestamp: Timestamp = None):
# # gets the consensus db corresponding to the block timestamp
#
# return self.get_vm(header, timestamp).consensus_db
def get_consensus_db(self, header: BlockHeader = None, timestamp: Timestamp = None) -> ConsensusDB:
# gets the consensus db corresponding to the block timestamp
return self.get_vm(header, timestamp).consensus_db
#
# Global Record and discard API
#
def enable_read_only_db(self) -> None:
if not isinstance(self.db, ReadOnlyDB):
self.base_db = self.db
self.db = ReadOnlyDB(self.base_db)
self.reinitialize()
def enable_journal_db(self):
if self._journaldb is None:
self.base_db = self.db
self._journaldb = JournalDB(self.base_db)
#we keep the name self.db so that all of the functions still work, but at this point it is a journaldb.
self.db = self._journaldb
#reinitialize to ensure chain and chain_head_db have the new journaldb
self.reinitialize()
def disable_journal_db(self):
if self._journaldb is not None:
self.db = self.base_db
self._journaldb = None
#reinitialize to ensure chain and chain_head_db have the new journaldb
self.reinitialize()
def record_journal(self) -> UUID:
if self._journaldb is not None:
return (self._journaldb.record())
else:
raise JournalDbNotActivated()
def discard_journal(self, changeset: UUID) -> None:
if self._journaldb is not None:
db_changeset = changeset
self._journaldb.discard(db_changeset)
else:
raise JournalDbNotActivated()
def commit_journal(self, changeset: UUID) -> None:
if self._journaldb is not None:
db_changeset = changeset
self._journaldb.commit(db_changeset)
else:
raise JournalDbNotActivated()
def persist_journal(self) -> None:
if self._journaldb is not None:
self._journaldb.persist()
else:
raise JournalDbNotActivated()
#
# Helpers
#
@classmethod
def get_chaindb_class(cls) -> Type[BaseChainDB]:
if cls.chaindb_class is None:
raise AttributeError("`chaindb_class` not set")
return cls.chaindb_class
@classmethod
def get_chain_head_db_class(cls) -> Type[ChainHeadDB]:
if cls.chain_head_db_class is None:
raise AttributeError("`chain_head_db class` not set")
return cls.chain_head_db_class
@classmethod
def get_genesis_wallet_address(cls) -> Address:
if cls.genesis_wallet_address is None:
raise AttributeError("`genesis_wallet_address` not set")
return cls.genesis_wallet_address
#
# Chain API
#
@classmethod
def create_genesis_header(cls,
base_db: BaseDB,
wallet_address: Address,
private_key: BaseKey,
genesis_params: Dict[str, HeaderParams],
genesis_state: AccountState=None,
) -> 'BaseChain':
genesis_vm_class = cls.get_vm_class_for_block_timestamp()
account_db = genesis_vm_class.get_state_class().get_account_db_class()(base_db)
if genesis_state is None:
genesis_state = {}
# mutation
account_db = apply_state_dict(account_db, genesis_state)
account_db.persist(save_account_hash = True, wallet_address = wallet_address)
genesis_params['account_hash'] = account_db.get_account_hash(wallet_address)
genesis_header = BlockHeader(**genesis_params)
signed_genesis_header = genesis_header.get_signed(private_key, cls.network_id)
chaindb = cls.get_chaindb_class()(base_db)
chaindb.persist_header(signed_genesis_header)
return signed_genesis_header
@classmethod
def from_genesis(cls,
base_db: BaseDB,
wallet_address: Address,
genesis_params: Dict[str, HeaderParams],
genesis_state: AccountState,
private_key: BaseKey = None
) -> 'BaseChain':
"""
Initializes the Chain from a genesis state.
"""
genesis_vm_class = cls.get_vm_class_for_block_timestamp()
account_db = genesis_vm_class.get_state_class().get_account_db_class()(
base_db
)
if genesis_state is None:
genesis_state = {}
# mutation
account_db = apply_state_dict(account_db, genesis_state)
account_db.persist(save_account_hash = True, wallet_address = cls.genesis_wallet_address)
genesis_header = BlockHeader(**genesis_params)
return cls.from_genesis_header(base_db, wallet_address = wallet_address, private_key = private_key, genesis_header = genesis_header)
@classmethod
def from_genesis_header(cls,
base_db: BaseDB,
wallet_address: Address,
genesis_header: BlockHeader,
private_key: BaseKey,
) -> 'BaseChain':
"""
Initializes the chain from the genesis header.
"""
chaindb = cls.get_chaindb_class()(base_db)
chaindb.persist_header(genesis_header)
chain_head_db = cls.get_chain_head_db_class()(base_db)
#window_for_this_block = math.ceil((genesis_header.timestamp+1)/TIME_BETWEEN_HEAD_HASH_SAVE) * TIME_BETWEEN_HEAD_HASH_SAVE
window_for_this_block = int(genesis_header.timestamp / TIME_BETWEEN_HEAD_HASH_SAVE) * TIME_BETWEEN_HEAD_HASH_SAVE + TIME_BETWEEN_HEAD_HASH_SAVE
chain_head_db.set_chain_head_hash(cls.genesis_wallet_address, genesis_header.hash)
chain_head_db.initialize_historical_root_hashes(chain_head_db.root_hash, window_for_this_block)
chain_head_db.persist(save_current_root_hash = True)
#chain_head_db.add_block_hash_to_chronological_window(genesis_header.hash, genesis_header.timestamp)
return cls(base_db, wallet_address = wallet_address, private_key=private_key)
def get_chain_at_block_parent(self, block: BaseBlock) -> BaseChain:
"""
Returns a `Chain` instance with the given block's parent at the chain head.
"""
try:
parent_header = self.get_block_header_by_hash(block.header.parent_hash)
except HeaderNotFound:
raise ValidationError("Parent ({0}) of block {1} not found".format(
block.header.parent_hash,
block.header.hash
))
init_header = self.create_header_from_parent(parent_header)
return type(self)(self.chaindb.db, self.wallet_address, self.private_key, init_header)
#
# VM API
#
def get_vm(self, header: BlockHeader=None, timestamp: Timestamp = None) -> 'BaseVM':
"""
Returns the VM instance for the given block timestamp. Or if timestamp is given, gets the vm for that timestamp
"""
if header is not None and timestamp is not None:
raise ValueError("Cannot specify header and timestamp for get_vm(). Only one is allowed.")
if header is None or header == self.header:
header = self.header
if timestamp is not None:
header = header.copy(timestamp = timestamp)
vm_class = self.get_vm_class_for_block_timestamp(header.timestamp)
return vm_class(header=header,
chaindb=self.chaindb,
network_id=self.network_id)
else:
vm_class = self.get_vm_class_for_block_timestamp(header.timestamp)
return vm_class(header=header,
chaindb=self.chaindb,
network_id=self.network_id)
#
# Header API
#
def create_header_from_parent(self, parent_header, **header_params):
"""
Passthrough helper to the VM class of the block descending from the
given header.
"""
return self.get_vm_class_for_block_timestamp().create_header_from_parent(parent_header, **header_params)
def get_block_header_by_hash(self, block_hash: Hash32) -> BlockHeader:
"""
Returns the requested block header as specified by block hash.
Raises BlockNotFound if there's no block header with the given hash in the db.
"""
validate_word(block_hash, title="Block Hash")
return self.chaindb.get_block_header_by_hash(block_hash)
def get_canonical_head(self, chain_address = None):
"""
Returns the block header at the canonical chain head.
Raises CanonicalHeadNotFound if there's no head defined for the canonical chain.
"""
if chain_address is not None:
return self.chaindb.get_canonical_head(chain_address)
else:
return self.chaindb.get_canonical_head(self.wallet_address)
#
# Block API
#
def get_genesis_block_hash(self) -> Hash32:
return self.chaindb.get_canonical_block_hash(block_number = BlockNumber(0),
chain_address= self.genesis_wallet_address)
@to_tuple
def get_ancestors(self, limit: int, header: BlockHeader=None) -> Iterator[BaseBlock]:
"""
Return `limit` number of ancestor blocks from the current canonical head.
"""
if header is None:
header = self.header
lower_limit = max(header.block_number - limit, 0)
for n in reversed(range(lower_limit, header.block_number)):
yield self.get_block_by_number(BlockNumber(n), header.chain_address)
def get_block_by_hash(self, block_hash: Hash32) -> BaseBlock:
block_header = self.get_block_header_by_hash(block_hash)
return self.get_block_by_header(block_header)
def get_block_by_header(self, block_header: BlockHeader) -> BaseBlock:
"""
Returns the requested block as specified by the block header.
"""
block_class = self.get_vm_class_for_block_timestamp(block_header.timestamp).get_block_class()
send_transactions = self.chaindb.get_block_transactions(block_header, block_class.transaction_class)
receive_transactions = self.chaindb.get_block_receive_transactions(block_header,block_class.receive_transaction_class)
reward_bundle = self.chaindb.get_reward_bundle(block_header.reward_hash, block_class.reward_bundle_class)
output_block = block_class(block_header, send_transactions, receive_transactions, reward_bundle)
return output_block
def get_block_by_number(self, block_number: BlockNumber, chain_address: Address = None) -> BaseBlock:
if chain_address is None:
chain_address = self.wallet_address
block_hash = self.chaindb.get_canonical_block_hash(block_number, chain_address)
return self.get_block_by_hash(block_hash)
def get_blocks_on_chain(self, start: int, end: int, chain_address: Address = None) -> List[BaseBlock]:
if chain_address is None:
chain_address = self.wallet_address
if end == 0:
canonical_head_header = self.get_canonical_head(chain_address=chain_address)
head_block_number = canonical_head_header.block_number
end = head_block_number + 1
blocks = []
for block_number in range(start, end):
try:
new_block = self.get_block_by_number(BlockNumber(block_number), chain_address)
blocks.append(new_block)
except HeaderNotFound:
break
return blocks
def get_all_blocks_on_chain(self, chain_address: Address = None) -> List[BaseBlock]:
if chain_address is None:
chain_address = self.wallet_address
canonical_head_header = self.get_canonical_head(chain_address=chain_address)
head_block_number = canonical_head_header.block_number
return self.get_blocks_on_chain(0, head_block_number + 1, chain_address=chain_address)
def get_all_blocks_on_chain_by_head_block_hash(self, chain_head_hash: Hash32) -> List[BaseBlock]:
chain_head_header = self.get_block_header_by_hash(chain_head_hash)
chain_address = chain_head_header.chain_address
return self.get_all_blocks_on_chain(chain_address)
def get_blocks_on_chain_up_to_block_hash(self, chain_head_hash: Hash32, start_block_number: int = 0, limit: int = float('inf')) -> List[BaseBlock]:
chain_head_header = self.get_block_header_by_hash(chain_head_hash)
to_block_number = chain_head_header.block_number
if to_block_number > (start_block_number + limit):
to_block_number = (start_block_number + limit)
chain_address = chain_head_header.chain_address
return self.get_blocks_on_chain(start_block_number, to_block_number + 1, chain_address)
def get_block(self) -> BaseBlock:
"""
Returns the current TIP block.
"""
return self.get_vm().block
def get_queue_block(self) -> BaseBlock:
"""
Returns the current TIP block.
"""
return self.get_vm().queue_block
# def get_block_by_hash(self, block_hash: Hash32) -> BaseBlock:
# """
# Returns the requested block as specified by block hash.
# """
# validate_word(block_hash, title="Block Hash")
# block_header = self.get_block_header_by_hash(block_hash)
# return self.get_block_by_header(block_header)
# def get_canonical_block_by_number(self, block_number: BlockNumber) -> BaseBlock:
# """
# Returns the block with the given number in the canonical chain.
#
# Raises BlockNotFound if there's no block with the given number in the
# canonical chain.
# """
# validate_uint256(block_number, title="Block Number")
# return self.get_block_by_hash(self.chaindb.get_canonical_block_hash(block_number))
#
# def get_canonical_block_hash(self, block_number: BlockNumber) -> Hash32:
# """
# Returns the block hash with the given number in the canonical chain.
#
# Raises BlockNotFound if there's no block with the given number in the
# canonical chain.
# """
# return self.chaindb.get_canonical_block_hash(block_number)
#
# Blockchain Database API
#
def save_chain_head_hash_to_trie_for_time_period(self,block_header):
timestamp = block_header.timestamp
currently_saving_window = int(time.time()/TIME_BETWEEN_HEAD_HASH_SAVE) * TIME_BETWEEN_HEAD_HASH_SAVE +TIME_BETWEEN_HEAD_HASH_SAVE
if timestamp <= currently_saving_window:
#we have to go back and put it into the correct window, and update all windows after that
#lets only keep the past NUMBER_OF_HEAD_HASH_TO_SAVE block_head_root_hash
window_for_this_block = int(timestamp / TIME_BETWEEN_HEAD_HASH_SAVE) * TIME_BETWEEN_HEAD_HASH_SAVE + TIME_BETWEEN_HEAD_HASH_SAVE
#window_for_this_block = math.ceil((timestamp + 1)/TIME_BETWEEN_HEAD_HASH_SAVE) * TIME_BETWEEN_HEAD_HASH_SAVE
# if propogate_to_present:
self.chain_head_db.add_block_hash_to_timestamp(block_header.chain_address, block_header.hash, window_for_this_block)
# else:
# self.chain_head_db.add_block_hash_to_timestamp_without_propogating_to_present(self.wallet_address, block_header.hash, window_for_this_block)
#
# Queueblock API
#
def add_transaction_to_queue_block(self, transaction) -> None:
validate_is_queue_block(self.queue_block, title='self.queue_block')
if isinstance(transaction, BaseTransaction):
if not self.queue_block.contains_transaction(transaction):
self.queue_block = self.queue_block.add_transaction(transaction)
else:
self.logger.debug("found transaction in queueblock already, not adding again")
else:
if not self.queue_block.contains_receive_transaction(transaction):
self.queue_block = self.queue_block.add_receive_transaction(transaction)
else:
self.logger.debug("found receive transaction in queueblock already, not adding again")
def add_transactions_to_queue_block(self, transactions) -> None:
if not isinstance(transactions, list):
self.add_transaction_to_queue_block(transactions)
#self.logger.debug("tx_nonce after adding transaction = {}".format(self.queue_block.current_tx_nonce))
else:
for tx in transactions:
self.add_transaction_to_queue_block(tx)
def sign_queue_block(self, *args: Any, **kwargs: Any) -> BaseQueueBlock:
"""
Passthrough helper to the current VM class.
"""
return self.get_vm().sign_queue_block(*args, **kwargs)
def sign_header(self, *args: Any, **kwargs: Any) -> BlockHeader:
"""
Passthrough helper to the current VM class.
"""
return self.get_vm().sign_header(*args, **kwargs)
#
# Transaction API
#
def get_canonical_transaction(self, transaction_hash: Hash32) -> BaseTransaction:
"""
Returns the requested transaction as specified by the transaction hash
from the canonical chain.
Raises TransactionNotFound if no transaction with the specified hash is
found in the main chain.
"""
(block_hash, index, is_receive) = self.chaindb.get_transaction_index(transaction_hash)
block_header = self.get_block_header_by_hash(block_hash)
VM = self.get_vm_class_for_block_timestamp(block_header.timestamp)
if is_receive == False:
transaction = self.chaindb.get_transaction_by_index_and_block_hash(
block_hash,
index,
VM.get_transaction_class(),
)
else:
transaction = self.chaindb.get_receive_transaction_by_index_and_block_hash(
block_hash,
index,
VM.get_receive_transaction_class(),
)
if transaction.hash == transaction_hash:
return transaction
else:
raise TransactionNotFound("Found transaction {} instead of {} in block {} at {}".format(
encode_hex(transaction.hash),
encode_hex(transaction_hash),
block_hash,
index,
))
@functools.lru_cache(maxsize=32)
def get_transaction_by_block_hash_and_index(self, block_hash: Hash32, transaction_index: int) -> Union[BaseTransaction, BaseReceiveTransaction]:
num_send_transactions = self.chaindb.get_number_of_send_tx_in_block(block_hash)
header = self.chaindb.get_block_header_by_hash(block_hash)
vm = self.get_vm(header=header)
if transaction_index >= num_send_transactions:
# receive transaction
transaction_index = transaction_index - num_send_transactions
tx = self.chaindb.get_receive_transaction_by_index_and_block_hash(block_hash=block_hash,
transaction_index=transaction_index,
transaction_class=vm.get_receive_transaction_class())
else:
# send transaction
tx = self.chaindb.get_transaction_by_index_and_block_hash(block_hash=block_hash,
transaction_index=transaction_index,
transaction_class=vm.get_transaction_class())
return tx
def create_transaction(self, *args: Any, **kwargs: Any) -> BaseTransaction:
"""
Passthrough helper to the current VM class.
"""
return self.get_vm().create_transaction(*args, **kwargs)
def create_and_sign_transaction(self, *args: Any, **kwargs: Any) -> BaseTransaction:
if self.private_key is None:
raise ValueError("Cannot sign transaction because private key not provided for chain instantiation")
transaction = self.create_transaction(*args, **kwargs)
signed_transaction = transaction.get_signed(self.private_key, self.network_id)
return signed_transaction
def create_and_sign_transaction_for_queue_block(self, *args: Any, **kwargs: Any) -> BaseTransaction:
if 'nonce' not in kwargs or kwargs['nonce'] is None:
kwargs['nonce'] = self.get_current_queue_block_nonce()
transaction = self.create_and_sign_transaction(*args, **kwargs)
self.add_transactions_to_queue_block(transaction)
return transaction
def get_current_queue_block_nonce(self) -> int:
if self.queue_block is None or self.queue_block.current_tx_nonce is None:
tx_nonce = self.get_vm().state.account_db.get_nonce(self.wallet_address)
else:
tx_nonce =self.queue_block.current_tx_nonce
return tx_nonce
def create_receive_transaction(self, *args: Any, **kwargs: Any) -> BaseReceiveTransaction:
"""
Passthrough helper to the current VM class.
"""
return self.get_vm().create_receive_transaction(*args, **kwargs)
def get_receivable_transactions(self, address: Address) -> Tuple[List[BaseReceiveTransaction], List[TransactionKey]]:
#from hvm.rlp_templates.accounts import TransactionKey
tx_keys = self.get_vm().state.account_db.get_receivable_transactions(address)
if len(tx_keys) == 0:
return [], []
transactions = []
for tx_key in tx_keys:
tx = self.get_canonical_transaction(tx_key.transaction_hash)
transactions.append(tx)
return transactions, tx_keys
def create_receivable_transactions(self) -> List[BaseReceiveTransaction]:
tx_keys = self.get_vm().state.account_db.get_receivable_transactions(self.wallet_address)
if len(tx_keys) == 0:
return []
receive_transactions = []
for tx_key in tx_keys:
#find out if it is a receive or a refund
block_hash, index, is_receive = self.chaindb.get_transaction_index(tx_key.transaction_hash)
re_tx = self.get_vm().create_receive_transaction(
sender_block_hash = tx_key.sender_block_hash,
send_transaction_hash=tx_key.transaction_hash,
is_refund=is_receive,
)
receive_transactions.append(re_tx)
return receive_transactions
def populate_queue_block_with_receive_tx(self) -> List[BaseReceiveTransaction]:
receive_tx = self.create_receivable_transactions()
self.add_transactions_to_queue_block(receive_tx)
return receive_tx
def get_block_receive_transactions_by_hash(
self,
block_hash: Hash32) -> List['BaseReceiveTransaction']:
block_header = self.get_block_header_by_hash(block_hash)
vm = self.get_vm(header = block_header)
receive_transaction_class = vm.get_block_class().receive_transaction_class
receive_transactions = self.chaindb.get_block_receive_transactions(header = block_header, transaction_class = receive_transaction_class)
return receive_transactions
def get_receive_tx_from_send_tx(self, tx_hash: Hash32) -> Optional['BaseReceiveTransaction']:
block_hash, index, is_receive = self.chaindb.get_transaction_index(tx_hash)
if is_receive:
raise ValidationError("The provided tx hash is not for a send transaction")
send_transaction = self.get_canonical_transaction(tx_hash)
block_children = self.chaindb.get_block_children(block_hash)
if block_children is not None:
block_children_on_correct_chain = [child_hash for child_hash in block_children
if self.chaindb.get_chain_wallet_address_for_block_hash(child_hash) == send_transaction.to]
for block_hash in block_children_on_correct_chain:
receive_transactions = self.get_block_receive_transactions_by_hash(block_hash)
for receive_tx in receive_transactions:
if receive_tx.send_transaction_hash == tx_hash:
return receive_tx
return None
def get_transaction_by_index_and_block_hash(self, block_hash: Hash32, transaction_index: int) -> Union[BaseTransaction, BaseReceiveTransaction]:
header = self.chaindb.get_block_header_by_hash(block_hash)
vm = self.get_vm(header=header)
self.chaindb.get_transaction_by_index_and_block_hash()
self.chaindb.get_transaction_by_index_and_block_hash(
block_hash,
transaction_index,
vm.get_transaction_class(),
)
#
# Chronological Chain api
#
def try_to_rebuild_chronological_chain_from_historical_root_hashes(self, historical_root_hash_timestamp: Timestamp) -> None:
try:
correct_chronological_block_window = self.get_block_hashes_that_are_new_for_this_historical_root_hash_timestamp(historical_root_hash_timestamp)
self.chain_head_db.save_chronological_block_window(correct_chronological_block_window, historical_root_hash_timestamp-TIME_BETWEEN_HEAD_HASH_SAVE)
except InvalidHeadRootTimestamp:
pass
def get_block_hashes_that_are_new_for_this_historical_root_hash_timestamp(self, historical_root_hash_timestamp: Timestamp) -> List[Tuple[Timestamp, Hash32]]:
'''
This is a time consuming function that gets all of the blocks that are new in this root hash that didn't exist in the base root hash.
:param timestamp:
:return:
'''
block_window_start = historical_root_hash_timestamp - TIME_BETWEEN_HEAD_HASH_SAVE
base_root_hash = self.chain_head_db.get_historical_root_hash(block_window_start)
new_root_hash = self.chain_head_db.get_historical_root_hash(historical_root_hash_timestamp)
if base_root_hash == new_root_hash:
return None
if base_root_hash is None or new_root_hash is None:
raise InvalidHeadRootTimestamp(
"Could not load block hashes for this historical_root_hash_timestamp because we don't have a root hash for this window or the previous window.")
base_head_block_hashes = set(self.chain_head_db.get_head_block_hashes(base_root_hash))
new_head_block_hashes = set(self.chain_head_db.get_head_block_hashes(new_root_hash))
diff_head_block_hashes = new_head_block_hashes - base_head_block_hashes
chronological_block_hash_timestamps = []
# now we have to run down each chain until we get to a block that is older than block_window_start
for head_block_hash in diff_head_block_hashes:
header = self.chaindb.get_block_header_by_hash(head_block_hash)
chronological_block_hash_timestamps.append([header.timestamp, head_block_hash])
while True:
if header.parent_hash == GENESIS_PARENT_HASH:
break
try:
header = self.chaindb.get_block_header_by_hash(header.parent_hash)
except HeaderNotFound:
break
if header.timestamp < block_window_start:
break
chronological_block_hash_timestamps.append([header.timestamp, header.hash])
assert len(chronological_block_hash_timestamps) > 0
chronological_block_hash_timestamps.sort()
return chronological_block_hash_timestamps
# def initialize_historical_root_hashes_and_chronological_blocks(self) -> None:
# '''
# This function rebuilds all historical root hashes, and chronological blocks, from the blockchain database. It starts with the saved root hash and works backwards.
# This function needs to be run from chain because it requires chain_head_db and chaindb.
# :return:
# '''
#
# self.chain_head_db.load_saved_root_hash()
# current_window = self.chain_head_db.current_window
# earliest_root_hash = self.chain_head_db.earliest_window
# #TIME_BETWEEN_HEAD_HASH_SAVE
#
# # 1) iterate down the root hash times
# # 2) create new chain_head_db with memorydb
# # 3) go through each chain and any blocks newer than the timestamp, save to chronological window.
# # 4) when you reach a block less than the timestamp, set it as chain head in the new memory based chain_head_db
# # 5) get the root hash
# # 6) set this root hash in the real chain_head_db at the correct timestamp.
#
# # A chronological block window holds all of the blocks starting at its timestamp, going to timestamp + TIME_BETWEEN_HEAD_HASH_SAVE
# # A historical root hash is the root hash at the given timestamp, so it includes all blocks earlier than that timestamp.
#
# # us a journaldb so that it doesnt write changes to the database.
# temp_chain_head_db = self.get_chain_head_db_class()(MemoryDB())
# #temp_chain_head_db = self.get_chain_head_db_class().load_from_saved_root_hash(JournalDB(self.db))
# for current_timestamp in range(current_window, earliest_root_hash-TIME_BETWEEN_HEAD_HASH_SAVE, -TIME_BETWEEN_HEAD_HASH_SAVE):
# self.logger.debug("Rebuilding chronological block window {}".format(current_timestamp))
# if current_timestamp < self.genesis_block_timestamp:
# break
#
# if current_timestamp == current_window:
# head_block_hashes = self.chain_head_db.get_head_block_hashes_list()
# else:
# head_block_hashes = temp_chain_head_db.get_head_block_hashes_list()
#
# # iterate over all chains
# for head_block_hash in head_block_hashes:
# current_block_hash = head_block_hash
# # now iterate over blocks in chain
# while True:
# current_header = self.chaindb.get_block_header_by_hash(current_block_hash)
# if current_header.timestamp >= current_timestamp:
# # add it to chronological block window in the real chain head db
# self.chain_head_db.add_block_hash_to_chronological_window(current_header.hash, current_header.timestamp)
# else:
# # The block is older than the timestamp. Set it as the chain head block hash in our temp chain head db
# temp_chain_head_db.set_chain_head_hash(current_header.chain_address, current_header.hash)
# break
# if current_header.parent_hash == GENESIS_PARENT_HASH:
# # we reached the end of the chain
# temp_chain_head_db.delete_chain_head_hash(current_header.chain_address)
# break
# # set the current block to the parent so we move down the chain
# current_block_hash = current_header.parent_hash
#
# # Now that we have gone through all chains, and removed any blocks newer than this timestamp, the root hash in the
# # temp chain head db is the correct one for this historical root hash timestamp.
# self.chain_head_db.save_single_historical_root_hash(temp_chain_head_db.root_hash, Timestamp(current_timestamp))
def initialize_historical_root_hashes_and_chronological_blocks(self) -> None:
'''
This function rebuilds all historical root hashes, and chronological blocks, from the blockchain database. It starts with the saved root hash and works backwards.
This function needs to be run from chain because it requires chain_head_db and chaindb.
:return:
'''
self.chain_head_db.load_saved_root_hash()
current_window = self.chain_head_db.current_window
earliest_root_hash = self.chain_head_db.earliest_window
#TIME_BETWEEN_HEAD_HASH_SAVE
# the saved
# 1) iterate down the root hash times
# 2) create new chain_head_db with memorydb
# 3) go through each chain and any blocks newer than the timestamp, save to chronological window.
# 4) when you reach a block less than the timestamp, set it as chain head in the new memory based chain_head_db
# 5) get the root hash
# 6) set this root hash in the real chain_head_db at the correct timestamp.
# A chronological block window holds all of the blocks starting at its timestamp, going to timestamp + TIME_BETWEEN_HEAD_HASH_SAVE
# A historical root hash is the root hash at the given timestamp, so it includes all blocks earlier than that timestamp.
self.logger.debug("Rebuilding chronological block windows")
# us a journaldb so that it doesnt write changes to the database.
temp_chain_head_db = self.get_chain_head_db_class()(MemoryDB())
#temp_chain_head_db = self.get_chain_head_db_class().load_from_saved_root_hash(JournalDB(self.db))
for current_timestamp in range(current_window, earliest_root_hash-TIME_BETWEEN_HEAD_HASH_SAVE, -TIME_BETWEEN_HEAD_HASH_SAVE):
if current_timestamp < self.genesis_block_timestamp:
break
head_block_hashes = self.chain_head_db.get_head_block_hashes_list()
# iterate over all chains
for head_block_hash in head_block_hashes:
current_block_hash = head_block_hash
# now iterate over blocks in chain
while True:
current_header = self.chaindb.get_block_header_by_hash(current_block_hash)
if current_header.timestamp >= current_timestamp:
# add it to chronological block window in the real chain head db
self.chain_head_db.add_block_hash_to_chronological_window(current_header.hash, current_header.timestamp)
else:
# The block is older than the timestamp. Set it as the chain head block hash in our temp chain head db
self.chain_head_db.set_chain_head_hash(current_header.chain_address, current_header.hash)
break
if current_header.parent_hash == GENESIS_PARENT_HASH:
# we reached the end of the chain
self.chain_head_db.delete_chain_head_hash(current_header.chain_address)
break
# set the current block to the parent so we move down the chain
current_block_hash = current_header.parent_hash
# Now that we have gone through all chains, and removed any blocks newer than this timestamp, the root hash in the
# temp chain head db is the correct one for this historical root hash timestamp.
self.chain_head_db.save_single_historical_root_hash(self.chain_head_db.root_hash, Timestamp(current_timestamp))
self.chain_head_db.persist()
# finally, lets load the saved root hash again so we are up to date.
self.chain_head_db.load_saved_root_hash()
#
# Execution API
#
def estimate_gas(self, transaction: BaseTransaction, at_header: BlockHeader=None) -> int:
"""
Returns an estimation of the amount of gas the given transaction will
use if executed on top of the block specified by the given header.
"""
if at_header is None:
at_header = self.get_canonical_head()
with self.get_vm(at_header).state_in_temp_block() as state:
return self.gas_estimator(state, transaction)
def validate_time_from_genesis_block(self,block):
if not block.is_genesis:
#first make sure enough time has passed since genesis. We need at least TIME_BETWEEN_HEAD_HASH_SAVE since genesis so that the
# genesis historical root hash only contains the genesis chain.
if block.header.timestamp < (self.genesis_block_timestamp + TIME_BETWEEN_HEAD_HASH_SAVE):
raise NotEnoughTimeBetweenBlocks("Not enough time has passed since the genesis block. Must wait at least {} seconds after genesis block. "
"This block timestamp is {}, genesis block timestamp is {}.".format(TIME_BETWEEN_HEAD_HASH_SAVE, block.header.timestamp, self.genesis_block_timestamp))
return
#
# Reverting block functions
#
def delete_canonical_chain(self, wallet_address: Address, vm: 'BaseVM', save_block_head_hash_timestamp:bool = True) -> None:
self.logger.debug("delete_canonical_chain. Chain address {}".format(encode_hex(wallet_address)))
self.chain_head_db.delete_chain(wallet_address, save_block_head_hash_timestamp)
self.chaindb.delete_canonical_chain(wallet_address)
vm.state.clear_account_keep_receivable_transactions_and_persist(wallet_address)
def set_parent_as_canonical_head(self, existing_block_header: BlockHeader, vm: 'BaseVM', save_block_head_hash_timestamp:bool = True) -> None:
block_parent_header = self.chaindb.get_block_header_by_hash(existing_block_header.parent_hash)
self.logger.debug("Setting new block as canonical head after reverting blocks. Chain address {}, header hash {}".format(encode_hex(existing_block_header.chain_address), encode_hex(block_parent_header.hash)))
if save_block_head_hash_timestamp:
self.save_chain_head_hash_to_trie_for_time_period(block_parent_header)
self.chain_head_db.set_chain_head_hash(block_parent_header.chain_address, block_parent_header.hash)
self.chaindb._set_as_canonical_chain_head(block_parent_header)
vm.state.revert_account_to_hash_keep_receivable_transactions_and_persist(block_parent_header.account_hash, block_parent_header.chain_address)
def revert_block(self, descendant_block_hash: Hash32) -> None:
self.logger.debug('Reverting block with hash {}'.format(encode_hex(descendant_block_hash)))
descendant_block_header = self.chaindb.get_block_header_by_hash(descendant_block_hash)
vm = self.get_vm(descendant_block_header)
self.chain_head_db.delete_block_hash_from_chronological_window(descendant_block_hash, descendant_block_header.timestamp)
self.chaindb.remove_block_from_all_parent_child_lookups(descendant_block_header, vm.get_block_class().receive_transaction_class)
self.chaindb.delete_all_block_children_lookups(descendant_block_hash)
self.revert_block_chronological_consistency_lookups(descendant_block_hash)
#for every one, re-add pending receive transaction for all receive transactions only if sending block still exists
#make all blocks unprocessed so that receivable transactions are not saved that came from one of the non-canonical blocks.
vm.reverse_pending_transactions(descendant_block_header)
# remove the block from the canonical chain. This must be done last because reversing the pending transactions requires that it
# is still in the canonical chain to look up transactions
self.chaindb.delete_block_from_canonical_chain(descendant_block_hash)
#self.chaindb.save_unprocessed_block_lookup(descendant_block_hash)
vm.state.account_db.persist()
def revert_block_chronological_consistency_lookups(self, block_hash: Hash32) -> None:
# check to see if there are any reward type 2 proofs. Then loop through each one to revert inconsistency lookups
block_header = self.chaindb.get_block_header_by_hash(block_hash)
block_class = self.get_vm_class_for_block_timestamp(block_header.timestamp).get_block_class()
reward_bundle = self.chaindb.get_reward_bundle(block_header.reward_hash, block_class.reward_bundle_class)
chronological_consistency_key = [block_header.timestamp, block_header.hash]
for proof in reward_bundle.reward_type_2.proof:
# timestamp, block hash of block responsible
sender_chain_header = self.chaindb.get_block_header_by_hash(proof.head_hash_of_sender_chain)
# The chronological consistency restrictions are placed on the block on top of the one giving the proof.
block_number_with_restrictions = sender_chain_header.block_number + 1
self.chaindb.delete_block_consistency_key(sender_chain_header.chain_address, block_number_with_restrictions, chronological_consistency_key)
def purge_block_and_all_children_and_set_parent_as_chain_head_by_hash(self, block_hash_to_delete: Hash32, save_block_head_hash_timestamp: bool = True) -> None:
genesis_block_hash = self.chaindb.get_canonical_block_hash(BlockNumber(0), self.genesis_wallet_address)
if block_hash_to_delete == genesis_block_hash:
raise TriedDeletingGenesisBlock("Attempted to delete genesis block. This is not allowed.")
block_header_to_delete = self.chaindb.get_block_header_by_hash(block_hash_to_delete)
self.purge_block_and_all_children_and_set_parent_as_chain_head(block_header_to_delete, save_block_head_hash_timestamp)
def purge_block_and_all_children_and_set_parent_as_chain_head(self, existing_block_header: BlockHeader, save_block_head_hash_timestamp: bool = True) -> None:
# First make sure it is actually in the canonical chain. If not, then we don't have anything to do.
if self.chaindb.is_in_canonical_chain(existing_block_header.hash):
vm = self.get_vm()
if existing_block_header.block_number == 0:
self.delete_canonical_chain(existing_block_header.chain_address, vm, save_block_head_hash_timestamp)
else:
#set the parent block as the new canonical head, and handle all the data for that
self.set_parent_as_canonical_head(existing_block_header, vm, save_block_head_hash_timestamp)
#1) delete chronological transactions, delete everything from chronological root hashes, delete children lookups
all_descendant_block_hashes = self.chaindb.get_all_descendant_block_hashes(existing_block_header.hash)
#first set all of the new chain heads and all the data that goes along with them
if all_descendant_block_hashes is not None:
for descendant_block_hash in all_descendant_block_hashes:
if not self.chaindb.is_block_unprocessed(descendant_block_hash):
descendant_block_header = self.chaindb.get_block_header_by_hash(descendant_block_hash)
if descendant_block_header.parent_hash not in all_descendant_block_hashes:
#this is the new head of a chain. set it as the new head for chronological root hashes
#except for children in this chain, because it will be off by 1 block. we already set this earlier
if descendant_block_header.chain_address != existing_block_header.chain_address:
if descendant_block_header.block_number == 0:
self.delete_canonical_chain(descendant_block_header.chain_address, vm, save_block_head_hash_timestamp)
else:
self.set_parent_as_canonical_head(descendant_block_header, vm, save_block_head_hash_timestamp)
# Must persist now because revert_block creates new vm's for each block and could overrwite changes if we wait.
vm.state.account_db.persist()
#now we know what the new heads are, so we can deal with the rest of the descendants
for descendant_block_hash in all_descendant_block_hashes:
#here, since we are already going through all children, we don't need this function to purge children as well
if self.chaindb.is_block_unprocessed(descendant_block_hash):
self.purge_unprocessed_block(descendant_block_hash, purge_children_too = False)
else:
self.revert_block(descendant_block_hash)
self.revert_block(existing_block_header.hash)
#persist changes
self.chain_head_db.persist(True)
self.reinitialize()
def purge_unprocessed_block(self, block_hash, purge_children_too = True):
'''
Deletes all unprocessed block lookups, and unprocessed children lookups for this block and all children blocks.
Todo: delete saved block header, and saved transaction tries for each block as well
'''
self.logger.debug("purging unprocessed block")
if purge_children_too:
self.logger.debug("purging unprocessed children")
if self.chaindb.has_unprocessed_children(block_hash):
self.logger.debug("HAS UNPROCESSED CHILDREN BLOCKS")
children_block_hashes = self.chaindb.get_block_children(block_hash)
if children_block_hashes != None:
for child_block_hash in children_block_hashes:
#this includes the child in this actual chain as well as children from send transactions.
if not self.chaindb.is_block_unprocessed(child_block_hash):
raise UnprocessedBlockChildIsProcessed("In process of deleting children of unprocessed block, and found one that is processed. This should never happen")
else:
self.purge_unprocessed_block(child_block_hash)
try:
block = self.get_block_by_hash(block_hash)
chain = encode_hex(block.header.chain_address)
self.logger.debug("deleting unprocessed child block number {} on chain {}".format(block.number, chain))
self.chaindb.remove_block_from_unprocessed(block)
except HeaderNotFound:
pass
def import_chronological_block_window(self, block_list: List[BaseBlock], window_start_timestamp: Timestamp, save_block_head_hash_timestamp:bool = True, allow_unprocessed:bool =False) -> None:
validate_uint256(window_start_timestamp, title='timestamp')
if block_list is None or len(block_list) == 0:
return
#if we are given a block that is not one of the two allowed classes, try converting it.
if len(block_list) > 0 and not isinstance(block_list[0], self.get_vm(timestamp = block_list[0].header.timestamp).get_block_class()):
self.logger.debug("converting chain to correct class")
corrected_block_list = []
for block in block_list:
corrected_block = self.get_vm(timestamp = block.header.timestamp).convert_block_to_correct_class(block)
corrected_block_list.append(corrected_block)
block_list = corrected_block_list
#first we delete any blocks we have in the same window that are not in the new block list
local_chronological_timestamp_block_window = self.chain_head_db.load_chronological_block_window(window_start_timestamp)
if local_chronological_timestamp_block_window is not None:
local_block_hash_list = [x[1] for x in local_chronological_timestamp_block_window]
new_block_hash_list = [block.hash for block in block_list]
block_hashes_to_delete = effecient_diff(new_block_hash_list, local_block_hash_list)
if len(block_hashes_to_delete) > 0:
self.logger.debug("deleting existing blocks in chronological window {}".format(block_hashes_to_delete))
for block_hash_to_delete in block_hashes_to_delete:
self.purge_block_and_all_children_and_set_parent_as_chain_head_by_hash(block_hash_to_delete)
if len(block_list) > 0:
self.logger.debug("starting block import for chronological block window")
#if block list is empty, load the local historical root hashes and delete them all
for i in range(len(block_list)):
# Reset this after each block imports
blocks_that_have_been_reorganized = set()
wallet_address = block_list[i].header.chain_address
while True:
try:
self.import_block(block_list[i], wallet_address = wallet_address, save_block_head_hash_timestamp = save_block_head_hash_timestamp, allow_unprocessed=allow_unprocessed)
break
except (UnprocessedBlockNotAllowed, ParentNotFound) as e:
# Because of the timestamps being in seconds, there may be multiple blocks that depend on each other
# with the same timestamp, and they could be out of order. So we attempt to reorganize the blocks
# and import again. If it fails again we will raise the exception.
if block_list[i].header.hash in blocks_that_have_been_reorganized:
self.logger.debug("Already tried reorganizing this block.")
raise e
self.logger.debug("Attempting to reorganize chronological window for import")
blocks_that_have_been_reorganized.add(block_list[i].header.hash)
block_list = reorganize_chronological_block_list_for_correct_chronological_order_at_index(block_list, i, self.logger)
else:
self.logger.debug("importing an empty chronological window. going to make sure we have a saved historical root hash")
historical_root_hashes = self.chain_head_db.get_historical_root_hashes()
if historical_root_hashes is not None:
#historical_root_hashes_dict = dict(historical_root_hashes)
#if it does exist, make sure it is the same as the last one. if not, then delete all newer
try:
self.chain_head_db.propogate_previous_historical_root_hash_to_timestamp(window_start_timestamp + TIME_BETWEEN_HEAD_HASH_SAVE)
except AppendHistoricalRootHashTooOld:
self.logger.debug("Tried to propogate the previous historical root hash but there was none. This shouldn't happen")
#self.logger.debug("historical root hashes after chronological block import {}".format(self.chain_head_db.get_historical_root_hashes()))
def import_chain(self, block_list: List[BaseBlock], perform_validation: bool=True, save_block_head_hash_timestamp: bool = True, allow_replacement: bool = True) -> None:
if len(block_list) > 0:
self.logger.debug("importing chain")
#if we are given a block that is not one of the two allowed classes, try converting it.
if not isinstance(block_list[0], self.get_vm(timestamp = block_list[0].header.timestamp).get_block_class()):
self.logger.debug("converting chain to correct class")
corrected_block_list = []
for block in block_list:
corrected_block = self.get_vm(timestamp = block.header.timestamp).convert_block_to_correct_class(block)
corrected_block_list.append(corrected_block)
block_list = corrected_block_list
wallet_address = block_list[0].header.chain_address
for block in block_list:
self.import_block(block,
perform_validation = perform_validation,
save_block_head_hash_timestamp = save_block_head_hash_timestamp,
wallet_address = wallet_address,
allow_replacement = allow_replacement)
# If we started with a longer chain, and all the imported blocks match ours, our chain will remain longer even after importing the new one.
# To fix this, we need to delete any blocks of ours that is longer in length then this chain that we are importing
# First make sure the whole chain imported correctly. If not, then we don't need to do anything
try:
local_canonical_head = self.chaindb.get_canonical_head(wallet_address)
imported_canonical_head = block_list[-1].header
#self.logger.debug("imported chain head hash {}. actual chain head hash {}".format(encode_hex(imported_canonical_head.hash), encode_hex(local_canonical_head.hash)))
if imported_canonical_head.block_number < local_canonical_head.block_number:
if self.chaindb.is_in_canonical_chain(imported_canonical_head.hash):
# Our chain is the same as the imported one, but we have some extra blocks on top. In this case, we would like to prune our chain
# to match the imported one.
# We only need to purge the next block after the imported chain. The vm will automatically purge all children
self.logger.debug("After importing a chain, our local chain is identical except with additional blocks on top. We will prune the top blocks to bring"
" our chain in line with the imported one.")
block_number_to_purge = imported_canonical_head.block_number + 1
hash_to_purge = self.chaindb.get_canonical_block_hash(BlockNumber(block_number_to_purge), wallet_address)
self.purge_block_and_all_children_and_set_parent_as_chain_head_by_hash(hash_to_purge, save_block_head_hash_timestamp)
except CanonicalHeadNotFound:
pass
from hvm.utils.profile import profile
@profile(sortby='cumulative')
def import_block_with_profiler(self, *args, **kwargs):
self.import_block(*args, **kwargs)
def import_block(self, block: BaseBlock,
perform_validation: bool=True,
save_block_head_hash_timestamp = True,
wallet_address = None,
allow_unprocessed = True,
allow_replacement = True,
ensure_block_unchanged:bool = True,
microblock_origin: bool = False) -> BaseBlock:
#we handle replacing blocks here
#this includes deleting any blocks that it might be replacing
#then we start the journal db
#then within _import_block, it can commit the journal
#but we wont persist until it gets out here again.
wallet_address = block.header.chain_address
# we need to re-initialize the chain for the new wallet address.
if wallet_address != self.wallet_address:
self.logger.debug("Changing to chain with wallet address {}".format(encode_hex(wallet_address)))
self.set_new_wallet_address(wallet_address=wallet_address)
journal_enabled = False
#if we are given a block that is not one of the two allowed classes, try converting it.
#There is no reason why this should be a queueblock, because a queueblock would never come over the network, it
#it always generated locally, and should have the correct class.
if not isinstance(block, self.get_vm(timestamp = block.header.timestamp).get_block_class()):
self.logger.debug("converting block to correct class")
block = self.get_vm(timestamp = block.header.timestamp).convert_block_to_correct_class(block)
if isinstance(block, self.get_vm(timestamp = block.header.timestamp).get_queue_block_class()):
# Set the queue block timestamp to now, when it is being imported.
block = block.copy(header=block.header.copy(timestamp=int(time.time())))
else:
if block.header.chain_address == self.genesis_wallet_address and block.header.block_number == 0:
try:
our_genesis_hash = self.chaindb.get_canonical_block_header_by_number(BlockNumber(0), self.genesis_wallet_address).hash
except HeaderNotFound:
raise NoGenesisBlockPresent("Tried importing a block, but we have no genesis block loaded. Need to load a genesis block first.")
if block.header.hash == our_genesis_hash:
return block
else:
raise ValidationError("Tried to import a new genesis block on the genesis chain. This is not allowed.")
if len(block.transactions) == 0 and len(block.receive_transactions) == 0:
# if block.reward_bundle is None:
# raise ValidationError('The block must have at least 1 transaction, or a non-zero reward bundle. Reward bundle = None')
if (block.reward_bundle.reward_type_1.amount == 0 and block.reward_bundle.reward_type_2.amount == 0):
raise RewardAmountRoundsToZero('The block has no send or receive transactions, and the reward bundle has amount = 0 for all types of rewards. This is not allowed. If this is just a reward block this usually means more time needs to pass before creating reward bundle.')
#if we are adding to the top of the chain, or beyond, we need to check for unprocessed blocks
#handle deleting any unprocessed blocks that will be replaced.
if block.number >= self.header.block_number:
existing_unprocessed_block_hash = self.chaindb.get_unprocessed_block_hash_by_block_number(self.wallet_address, block.number)
if (existing_unprocessed_block_hash != block.hash) and (existing_unprocessed_block_hash is not None):
if not allow_replacement:
raise ReplacingBlocksNotAllowed("Attempted to replace an unprocessed block.")
#check to make sure the parent matches the one we have
if block.number != 0:
# if block.number == self.header.block_number:
# existing_parent_hash = self.chaindb.get_canonical_head_hash(self.wallet_address)
# else:
existing_unprocessed_parent_hash = self.chaindb.get_unprocessed_block_hash_by_block_number(self.wallet_address, block.number-1)
if existing_unprocessed_parent_hash is not None:
if block.header.parent_hash != existing_unprocessed_parent_hash:
raise ParentNotFound("Parent is unprocessed. Parent hash = {}, this hash = {}".format(
encode_hex(existing_unprocessed_parent_hash), encode_hex(block.header.parent_hash)))
else:
try:
existing_canonical_parent_hash = self.chaindb.get_canonical_block_header_by_number(block.header.block_number-1, block.header.chain_address)
if block.header.parent_hash != existing_canonical_parent_hash:
raise ParentNotFound("Parent is canonical. Parent hash = {}, this hash = {}".format(
encode_hex(existing_canonical_parent_hash), encode_hex(block.header.parent_hash)))
except HeaderNotFound:
pass
#lets delete the unprocessed block, and its children, then import
self.enable_journal_db()
journal_record = self.record_journal()
journal_enabled = True
self.purge_unprocessed_block(existing_unprocessed_block_hash)
#check to see if this is the same hash that was already saved as unprocessed
if block.number > self.header.block_number:
#check that the parent hash matches what we have.
existing_parent_hash = self.chaindb.get_unprocessed_block_hash_by_block_number(self.wallet_address, block.number-1)
#we can allow this for unprocessed blocks as long as we have the parent in our database
if existing_parent_hash == block.header.parent_hash:
if block.hash == self.chaindb.get_unprocessed_block_hash_by_block_number(self.wallet_address, block.number):
#we already imported this one
return_block = block
else:
#save as unprocessed
if not allow_unprocessed:
raise UnprocessedBlockNotAllowed()
self.logger.debug("Saving block as unprocessed because parent on this chain is unprocessed")
return_block = self.save_block_as_unprocessed(block)
if journal_enabled:
self.logger.debug('commiting journal')
self.commit_journal(journal_record)
self.persist_journal()
self.disable_journal_db()
return return_block
else:
raise ParentNotFound('Parent is unprocessed 2')
#now, if it is the head of the chain, lets make sure the parent hash is correct.
if block.number == self.header.block_number and block.number != 0:
if block.header.parent_hash != self.chaindb.get_canonical_head_hash(chain_address= self.wallet_address):
raise ParentNotFound("Block is at the head of the chain")
if block.number < self.header.block_number:
if not allow_replacement:
raise ReplacingBlocksNotAllowed("Attempted to replace a canonical block")
self.logger.debug("went into block replacing mode")
self.logger.debug("block.number = {}, self.header.block_number = {}".format(block.number,self.header.block_number))
self.logger.debug("this chains wallet address = {}, this block's sender = {}".format(encode_hex(self.wallet_address), encode_hex(block.sender)))
#check to see if we can load the existing canonical block
existing_block_header = self.chaindb.get_canonical_block_header_by_number(block.number, self.wallet_address)
if existing_block_header.hash == block.header.hash:
self.logger.debug("tried to import a block that has a hash that matches the local block. no import required.")
return block
else:
if not journal_enabled:
self.enable_journal_db()
journal_record = self.record_journal()
journal_enabled = True
self.purge_block_and_all_children_and_set_parent_as_chain_head(existing_block_header, save_block_head_hash_timestamp = save_block_head_hash_timestamp)
#check to see if this block is chronologically inconsistent - usually due to reward block that used proof from this chain
block_hashes_leading_to_inconsistency = self.check_block_chronological_consistency(block)
if len(block_hashes_leading_to_inconsistency) > 0:
if not allow_replacement:
raise ReplacingBlocksNotAllowed("Attempted to import chronologically inconsistent block. Block hashes leading to inconsistency = {}.".format([encode_hex(x) for x in block_hashes_leading_to_inconsistency]))
else:
# revert all of the blocks leading to the inconsistency.
if not journal_enabled:
self.enable_journal_db()
journal_record = self.record_journal()
journal_enabled = True
for block_hash in block_hashes_leading_to_inconsistency:
self.logger.debug("Purging block {} to preserve chronological consistency".format(encode_hex(block_hash)))
block_header = self.chaindb.get_block_header_by_hash(block_hash)
# This should be impossible, but lets double check that none of these blocks are on the same chain as this block
if block_header.chain_address == block.header.chain_address:
raise Exception("Tried to revert chronologically inconsistent block on this same chain. This should never happen...")
self.purge_block_and_all_children_and_set_parent_as_chain_head(block_header, save_block_head_hash_timestamp = save_block_head_hash_timestamp)
try:
return_block = self._import_block(block = block,
perform_validation = perform_validation,
save_block_head_hash_timestamp = save_block_head_hash_timestamp,
allow_unprocessed = allow_unprocessed,
ensure_block_unchanged= ensure_block_unchanged,
microblock_origin = microblock_origin)
# handle importing unprocessed blocks here because doing it recursively results in maximum recursion depth exceeded error
if not self.chaindb.is_block_unprocessed(return_block.hash):
self.logger.debug("Checking to see if block has unprocessed children")
self.import_all_unprocessed_descendants(return_block.hash,
perform_validation= True,
save_block_head_hash_timestamp = save_block_head_hash_timestamp,
allow_unprocessed = True)
except Exception as e:
if journal_enabled:
self.logger.debug('discarding journal')
self.discard_journal(journal_record)
self.disable_journal_db()
raise e
if journal_enabled:
self.logger.debug('commiting journal')
self.commit_journal(journal_record)
self.persist_journal()
self.disable_journal_db()
return return_block
def _import_block(self, block: BaseBlock,
perform_validation: bool=True,
save_block_head_hash_timestamp = True,
allow_unprocessed = True,
ensure_block_unchanged: bool = True,
microblock_origin: bool = False) -> BaseBlock:
"""
Imports a complete block.
"""
self.logger.debug("importing block {} with number {}".format(block.__repr__(), block.number))
self.validate_time_from_genesis_block(block)
if isinstance(block, self.get_vm(timestamp = block.header.timestamp).get_queue_block_class()):
# If it was a queueblock, then the header will have changed after importing
perform_validation = False
ensure_block_unchanged = False
queue_block = True
else:
queue_block = False
if not self.chaindb.is_block_unprocessed(block.header.parent_hash):
#this part checks to make sure the parent exists
try:
vm = self.get_vm(timestamp = block.header.timestamp)
self.logger.debug("importing block with vm {}".format(vm.__repr__()))
if queue_block:
imported_block = vm.import_block(block, private_key = self.private_key)
else:
imported_block = vm.import_block(block)
# Validate the imported block.
if ensure_block_unchanged:
if microblock_origin:
# this started out as a microblock. So we only ensure the microblock fields are unchanged.
self.logger.debug('ensuring block unchanged. microblock correction')
corrected_micro_block = block.copy(header = block.header.copy(
receipt_root = imported_block.header.receipt_root,
bloom = imported_block.header.bloom,
gas_limit = imported_block.header.gas_limit,
gas_used = imported_block.header.gas_used,
account_hash = imported_block.header.account_hash,
account_balance = imported_block.header.account_balance,
))
ensure_imported_block_unchanged(imported_block, corrected_micro_block)
else:
self.logger.debug('ensuring block unchanged')
ensure_imported_block_unchanged(imported_block, block)
else:
self.logger.debug('Not checking block for changes.')
if perform_validation:
self.validate_block(imported_block)
#self.chain_head_db.set_chain_head_hash(self.wallet_address, imported_block.header.hash)
if save_block_head_hash_timestamp:
self.chain_head_db.add_block_hash_to_chronological_window(imported_block.header.hash, imported_block.header.timestamp)
self.save_chain_head_hash_to_trie_for_time_period(imported_block.header)
self.chain_head_db.set_chain_head_hash(imported_block.header.chain_address, imported_block.header.hash)
self.chain_head_db.persist(True)
self.chaindb.persist_block(imported_block)
vm.state.account_db.persist(save_account_hash = True, wallet_address = self.wallet_address)
#here we must delete the unprocessed lookup before importing children
#because the children cannot be imported if their chain parent is unprocessed.
#but we cannot delete the lookup for unprocessed children yet.
self.chaindb.remove_block_from_unprocessed(imported_block)
# Add chronological consistency lookups
self.save_block_chronological_consistency_lookups(imported_block)
try:
self.header = self.create_header_from_parent(self.get_canonical_head())
except CanonicalHeadNotFound:
self.header = self.get_vm_class_for_block_timestamp().create_genesis_block(self.wallet_address).header
self.queue_block = None
self.logger.debug(
'IMPORTED_BLOCK: number %s | hash %s',
imported_block.number,
encode_hex(imported_block.hash),
)
# Make sure our wallet address hasn't magically changed
if self.wallet_address != imported_block.header.chain_address:
raise ValidationError("Attempted to import a block onto the wrong chain.")
return_block = imported_block
except ReceivableTransactionNotFound as e:
if not allow_unprocessed:
raise UnprocessedBlockNotAllowed()
self.logger.debug("Saving block as unprocessed because of ReceivableTransactionNotFound error: {}".format(e))
return_block = self.save_block_as_unprocessed(block)
if self.raise_errors:
raise e
except RewardProofSenderBlockMissing as e:
if not allow_unprocessed:
raise UnprocessedBlockNotAllowed()
self.logger.debug("Saving block as unprocessed because of RewardProofSenderBlockMissing error: {}".format(e))
return_block = self.save_block_as_unprocessed(block)
else:
if not allow_unprocessed:
raise UnprocessedBlockNotAllowed()
self.logger.debug("Saving block as unprocessed because parent on this chain is unprocessed")
return_block = self.save_block_as_unprocessed(block)
return return_block
def import_all_unprocessed_descendants(self, block_hash, *args, **kwargs):
# 1) get unprocessed children
# 2) loop through and import
# 3) if child imports, add their unprocessed children to list, and delete that block from unprocessed
# 4) if list of unprocessed children has 0 length, break
# need to step one level at a time. We use a queue to achieve this effect. It won't get to the next level
# until it finishes all of the blocks on this level. So it goes one level at a time.
if self.chaindb.has_unprocessed_children(block_hash):
self.logger.debug("HAS UNPROCESSED BLOCKS")
# try to import all children
children_block_hashes = self.chaindb.get_block_children(block_hash)
if children_block_hashes != None:
block_hashes_to_import = deque(children_block_hashes)
# iterate over children
while True:
# remove from right side
current_block_hash_to_import = block_hashes_to_import.pop()
if self.chaindb.is_block_unprocessed(current_block_hash_to_import):
self.logger.debug("importing child block")
try:
child_block = self.get_block_by_hash(current_block_hash_to_import)
if child_block.header.chain_address != self.wallet_address:
#self.logger.debug("Changing to chain with wallet address {}".format(encode_hex(child_block.header.chain_address)))
self.set_new_wallet_address(wallet_address=child_block.header.chain_address)
self._import_block(child_block, *args, **kwargs)
#if the block imported, add its children the the deque
if not self.chaindb.is_block_unprocessed(current_block_hash_to_import):
# it imported successfully
if self.chaindb.has_unprocessed_children(current_block_hash_to_import):
children_block_hashes = self.chaindb.get_block_children(current_block_hash_to_import)
if children_block_hashes != None:
block_hashes_to_import.extendleft(children_block_hashes)
# we have queued up its children to be imported. Assuming exceptions don't occur, we can remove this block from the unprocessed children lookup.
self.chaindb.delete_unprocessed_children_blocks_lookup(current_block_hash_to_import)
except Exception as e:
self.logger.error("Tried to import an unprocessed child block and got this error {}".format(e))
if len(block_hashes_to_import) == 0:
return
self.chaindb.delete_unprocessed_children_blocks_lookup(block_hash)
def save_block_chronological_consistency_lookups(self, block: BaseBlock) -> None:
'''
We need to require that the proof sender chain doesn't add a block after their claimed chain_head_hash, and the timestamp of this block being imported.
:param block:
:return:
'''
block_header = block.header
reward_bundle = self.chaindb.get_reward_bundle(block_header.reward_hash, block.reward_bundle_class)
chronological_consistency_key = [block_header.timestamp, block_header.hash]
for proof in reward_bundle.reward_type_2.proof:
# timestamp, block hash of block responsible
sender_chain_header = self.chaindb.get_block_header_by_hash(proof.head_hash_of_sender_chain)
# The chronological consistency restrictions are placed on the block on top of the one giving the proof.
block_number_with_restrictions = sender_chain_header.block_number + 1
self.logger.debug("saving chronological consistency lookup for chain {}, block {}, timestamp {}".format(encode_hex(sender_chain_header.chain_address), block_number_with_restrictions, block_header.timestamp))
self.chaindb.add_block_consistency_key(sender_chain_header.chain_address, block_number_with_restrictions, chronological_consistency_key)
def save_block_as_unprocessed(self, block):
#if it is already saved as unprocesessed, do nothing
if self.chaindb.is_block_unprocessed(block.hash):
return block
#before adding to unprocessed blocks, make sure the receive transactions are valid
# for receive_transaction in block.receive_transactions:
# #there must be at least 1 to get this far
# receive_transaction.validate()
#now we add it to unprocessed blocks
self.chaindb.save_block_as_unprocessed(block)
#save the transactions to db
vm = self.get_vm(timestamp = block.header.timestamp)
vm.save_items_to_db_as_trie(block.transactions, block.header.transaction_root)
vm.save_items_to_db_as_trie(block.receive_transactions, block.header.receive_transaction_root)
#we don't want to persist because that will add it to the canonical chain.
#We just want to save it to the database so we can process it later if needbe.
self.chaindb.persist_non_canonical_block(block)
#self.chaindb.persist_block(block)
try:
self.header = self.create_header_from_parent(self.get_canonical_head())
except CanonicalHeadNotFound:
self.header = self.get_vm_class_for_block_timestamp().create_genesis_block(self.wallet_address).header
self.queue_block = None
self.logger.debug(
'SAVED_BLOCK_AS_UNPROCESSED: number %s | hash %s',
block.number,
encode_hex(block.hash),
)
return block
def import_current_queue_block(self) -> BaseBlock:
return self.import_block(self.queue_block)
def import_current_queue_block_with_reward(self, node_staking_score_list: List[NodeStakingScore]) -> BaseBlock:
reward_bundle = self.get_consensus_db().create_reward_bundle_for_block(self.wallet_address, node_staking_score_list, at_timestamp=Timestamp(int(time.time())))
# #testing
# reward_bundle = reward_bundle.copy(reward_type_2 = reward_bundle.reward_type_2.copy(amount=0))
self.queue_block = self.queue_block.copy(reward_bundle = reward_bundle)
return self.import_current_queue_block()
def get_all_chronological_blocks_for_window(self, window_timestamp:Timestamp) -> List[BaseBlock]:
validate_uint256(window_timestamp, title='timestamp')
chronological_blocks = self.chain_head_db.load_chronological_block_window(window_timestamp)
if chronological_blocks is None:
return None
else:
list_of_blocks = []
for chronological_block in chronological_blocks:
block_hash = chronological_block[1]
new_block = self.get_block_by_hash(block_hash)
list_of_blocks.append(new_block)
return list_of_blocks
#
# Chronologically consistent blockchain db API
#
def check_block_chronological_consistency(self, block: BaseBlock) -> List[Hash32]:
'''
Checks to see if the block breaks any chronological consistency. If it does, it will return a list of blocks that need to be reverted for this block to be imported
returns list of block hashes that have to be reverted
:param block:
:return:
'''
consistency_keys = self.chaindb.get_block_chronological_consistency_keys(block.header.chain_address, block.header.block_number)
block_hashes_to_revert = list()
for consistency_key in consistency_keys:
if consistency_key[0] > block.header.timestamp:
block_hashes_to_revert.append(consistency_key[1])
return block_hashes_to_revert
#
# Validation API
#
def get_allowed_time_of_next_block(self, chain_address: Address = None) -> Timestamp:
if chain_address is None:
chain_address = self.wallet_address
try:
canonical_head = self.chaindb.get_canonical_head(chain_address=chain_address)
except CanonicalHeadNotFound:
return Timestamp(0)
vm = self.get_vm(timestamp=Timestamp(int(time.time())))
min_allowed_time_between_blocks = vm.min_time_between_blocks
return Timestamp(canonical_head.timestamp + min_allowed_time_between_blocks)
def validate_block(self, block: BaseBlock) -> None:
"""
Performs validation on a block that is either being mined or imported.
Since block validation (specifically the uncle validation must have
access to the ancestor blocks, this validation must occur at the Chain
level.
"""
self.validate_gaslimit(block.header)
def validate_gaslimit(self, header: BlockHeader) -> None:
"""
Validate the gas limit on the given header.
"""
#parent_header = self.get_block_header_by_hash(header.parent_hash)
#low_bound, high_bound = compute_gas_limit_bounds(parent_header)
#if header.gas_limit < low_bound:
# raise ValidationError(
# "The gas limit on block {0} is too low: {1}. It must be at least {2}".format(
# encode_hex(header.hash), header.gas_limit, low_bound))
if header.gas_limit > BLOCK_GAS_LIMIT:
raise ValidationError(
"The gas limit on block {0} is too high: {1}. It must be at most {2}".format(
encode_hex(header.hash), header.gas_limit, BLOCK_GAS_LIMIT))
def validate_block_specification(self, block) -> bool:
'''
This validates everything we can without looking at the blockchain database. It doesnt need to assume
that we have the block that sent the transactions.
This that this can check:
block signature
send transaction signatures
receive transaction signatures - dont need to check this. it doesnt add any security
signatures of send transaction within receive transactions
send transaction root matches transactions
receive transaction root matches transactions
'''
if not isinstance(block, self.get_vm(timestamp = block.header.timestamp).get_block_class()):
self.logger.debug("converting block to correct class")
block = self.get_vm(timestamp = block.header.timestamp).convert_block_to_correct_class(block)
block.header.check_signature_validity()
for transaction in block.transactions:
transaction.validate()
for transaction in block.receive_transactions:
transaction.validate()
send_tx_root_hash, _ = make_trie_root_and_nodes(block.transactions)
if block.header.transaction_root != send_tx_root_hash:
raise ValidationError("Block has invalid transaction root")
receive_tx_root_hash, _ = make_trie_root_and_nodes(block.receive_transactions)
if block.header.receive_transaction_root != receive_tx_root_hash:
raise ValidationError("Block has invalid receive transaction root")
return True
#
# Stake API
#
def get_mature_stake(self, wallet_address: Address = None, raise_canonical_head_not_found_error:bool = False) -> int:
if wallet_address is None:
wallet_address = self.wallet_address
coin_mature_time_for_staking = self.get_vm(timestamp = Timestamp(int(time.time()))).consensus_db.coin_mature_time_for_staking
return self.chaindb.get_mature_stake(wallet_address, coin_mature_time_for_staking, raise_canonical_head_not_found_error = raise_canonical_head_not_found_error)
# gets the stake for the timestamp corresponding to teh chronological block window, so it is all blocks for the next 1000 seconds.
def get_mature_stake_for_chronological_block_window(self, chronological_block_window_timestamp: Timestamp, timestamp_for_stake: Timestamp = None):
if timestamp_for_stake is not None and timestamp_for_stake < chronological_block_window_timestamp:
raise ValidationError("Cannot get chronological block window stake for a timestamp before the window")
if timestamp_for_stake is None:
timestamp_for_stake = int(time.time())
chronological_block_hash_timestamps = self.chain_head_db.load_chronological_block_window(chronological_block_window_timestamp)
chronological_block_hashes = [x[1] for x in chronological_block_hash_timestamps]
coin_mature_time_for_staking = self.get_vm(timestamp=timestamp_for_stake).consensus_db.coin_mature_time_for_staking
return self.chaindb.get_total_block_stake_of_block_hashes(chronological_block_hashes, coin_mature_time_for_staking, timestamp_for_stake)
def get_new_block_hash_to_test_peer_node_health(self) -> Hash32:
'''
returns one of the newest blocks we have seen.
:return:
'''
before_this_timestamp = int(time.time()) - 60 # ask the peer for a block that was received at before 1 minute ago
current_historical_window = int(time.time() / TIME_BETWEEN_HEAD_HASH_SAVE) * TIME_BETWEEN_HEAD_HASH_SAVE
for timestamp in range(current_historical_window,
current_historical_window-NUMBER_OF_HEAD_HASH_TO_SAVE*TIME_BETWEEN_HEAD_HASH_SAVE,
-1* TIME_BETWEEN_HEAD_HASH_SAVE):
chronological_window = self.chain_head_db.load_chronological_block_window(timestamp)
if chronological_window is not None:
chronological_window.sort(key=lambda x: -1*x[0])
for timestamp_hash in chronological_window:
if timestamp_hash[0] < before_this_timestamp:
return timestamp_hash[1]
#if we get to here then we don't have any blocks within all chronological block windows...
raise NoChronologicalBlocks()
#
# Min Block Gas API used for throttling the network
#
def re_initialize_historical_minimum_gas_price_at_genesis(self) -> None:
'''
re-initializes system with last set min gas price and net tpc cap
'''
hist_min_gas_price = self.chaindb.load_historical_minimum_gas_price()
hist_tpc_cap = self.chaindb.load_historical_network_tpc_capability()
hist_tx_per_centisecond = self.chaindb.load_historical_tx_per_centisecond()
if hist_min_gas_price is not None:
init_min_gas_price = hist_min_gas_price[-1][1]
else:
init_min_gas_price = 1
if hist_tpc_cap is not None:
init_tpc_cap = hist_tpc_cap[-1][1]
else:
init_tpc_cap = self.get_local_tpc_cap()
if hist_tx_per_centisecond is not None:
init_tpc = hist_tx_per_centisecond[-1][1]
else:
init_tpc = None
self.chaindb.initialize_historical_minimum_gas_price_at_genesis(init_min_gas_price, init_tpc_cap, init_tpc)
def update_current_network_tpc_capability(self, current_network_tpc_cap: int, update_min_gas_price:bool = True) -> None:
validate_uint256(current_network_tpc_cap, title="current_network_tpc_cap")
self.chaindb.save_current_historical_network_tpc_capability(current_network_tpc_cap)
if update_min_gas_price:
current_centisecond = int(time.time()/100) * 100
timestamp_min_gas_price_updated = self.update_tpc_from_chronological(update_min_gas_price = True)
if timestamp_min_gas_price_updated > current_centisecond:
self.chaindb._recalculate_historical_mimimum_gas_price(current_centisecond)
def update_tpc_from_chronological(self, update_min_gas_price: bool = True):
#start at the newest window, if the same tps stop. but if different tps keep going back
self.logger.debug("Updating tpc from chronological")
current_historical_window = int(time.time()/TIME_BETWEEN_HEAD_HASH_SAVE) * TIME_BETWEEN_HEAD_HASH_SAVE
current_centisecond = int(time.time()/100) * 100
#load this once to find out if its None. If it is None, then the node just started, lets only go back 50 steps
#hist_tpc = self.chaindb.load_historical_tx_per_centisecond()
end_outer = current_historical_window-20*TIME_BETWEEN_HEAD_HASH_SAVE
for historical_window_timestamp in range(current_historical_window,
end_outer,
-TIME_BETWEEN_HEAD_HASH_SAVE):
tpc_sum_dict = {}
chronological_block_window = self.chain_head_db.load_chronological_block_window(historical_window_timestamp)
self.logger.debug('loading chronological block window for timestamp {}'.format(historical_window_timestamp))
#zero the dictionary
if historical_window_timestamp+TIME_BETWEEN_HEAD_HASH_SAVE < current_centisecond:
end = historical_window_timestamp +TIME_BETWEEN_HEAD_HASH_SAVE
else:
end = current_centisecond+100
for timestamp in range(historical_window_timestamp, end, 100):
tpc_sum_dict[timestamp] = 0
if chronological_block_window is not None:
for timestamp_block_hash in chronological_block_window:
#first count up the tx in the block
#if it is 0, then set to 1? in case block is all receive
num_tx_in_block = self.chaindb.get_number_of_total_tx_in_block(timestamp_block_hash[1])
if num_tx_in_block == 0:
num_tx_in_block = 1
#then add them to the dict
centisecond_window_for_block = int(timestamp_block_hash[0]/100) * 100
if centisecond_window_for_block <= end:
tpc_sum_dict[centisecond_window_for_block] += num_tx_in_block
same_as_database = self._update_tpc_from_chronological(tpc_sum_dict)
if same_as_database == True:
break
if update_min_gas_price:
self.chaindb._recalculate_historical_mimimum_gas_price(historical_window_timestamp + TIME_BETWEEN_HEAD_HASH_SAVE)
return historical_window_timestamp+TIME_BETWEEN_HEAD_HASH_SAVE
def _update_tpc_from_chronological(self, new_hist_tpc_dict):
'''
returns True if they are all the same as what we already had in the database, otherwise it returns False
'''
if not isinstance(new_hist_tpc_dict, dict):
raise ValidationError("Expected a dict. Didn't get a dict.")
hist_tpc = self.chaindb.load_historical_tx_per_centisecond()
difference_found = False
if hist_tpc is None:
hist_tpc = list(new_hist_tpc_dict.items())
else:
hist_tpc_dict = dict(hist_tpc)
for timestamp, tpc in new_hist_tpc_dict.items():
if timestamp not in hist_tpc_dict or hist_tpc_dict[timestamp] != tpc:
#if tpc != 0:
difference_found = True
hist_tpc_dict[timestamp] = tpc
hist_tpc = list(hist_tpc_dict.items())
#print(hist_tpc)
#save it to db
self.chaindb.save_historical_tx_per_centisecond(hist_tpc, de_sparse = False)
return not difference_found
def get_local_tpc_cap(self) -> int:
#base it on the time it takes to import a block
from hvm.utils.profile import profile
from hvm.db.backends.memory import MemoryDB
from hvm import MainnetChain
from hvm.chains.mainnet import (
MAINNET_TPC_CAP_TEST_GENESIS_PARAMS,
MAINNET_TPC_CAP_TEST_GENESIS_STATE,
TPC_CAP_TEST_GENESIS_PRIVATE_KEY,
MAINNET_TPC_CAP_TEST_BLOCK_TO_IMPORT,
)
from hvm.constants import random_private_keys
db = MemoryDB()
chain = MainnetChain.from_genesis(db,
TPC_CAP_TEST_GENESIS_PRIVATE_KEY.public_key.to_canonical_address(),
MAINNET_TPC_CAP_TEST_GENESIS_PARAMS,
MAINNET_TPC_CAP_TEST_GENESIS_STATE,
private_key = TPC_CAP_TEST_GENESIS_PRIVATE_KEY)
block_to_import = chain.get_vm(timestamp = MAINNET_TPC_CAP_TEST_BLOCK_TO_IMPORT['header']['timestamp']).get_block_class().from_dict(MAINNET_TPC_CAP_TEST_BLOCK_TO_IMPORT)
chain.genesis_wallet_address = MAINNET_TPC_CAP_TEST_GENESIS_PARAMS['chain_address']
chain.genesis_block_timestamp = MAINNET_TPC_CAP_TEST_GENESIS_PARAMS['timestamp']
#@profile(sortby='cumulative')
def temp():
chain.import_block(block_to_import)
start_time = time.time()
temp()
duration = time.time()-start_time
#self.logger.debug('duration = {} seconds'.format(duration))
tx_per_centisecond = int(100/duration)
return tx_per_centisecond
#
# Consensus DB passthrough's that depend on block timestamp
#
def get_signed_peer_score(self, private_key: PrivateKey,
network_id: int,
peer_wallet_address: Address,
after_block_number: BlockNumber = None,
) -> NodeStakingScore:
# This function should always use the vm for the current timestamp. So we dont need to ask for timestamp
return self.get_consensus_db(timestamp=Timestamp(int(time.time()))).get_signed_peer_score(private_key,
network_id,
peer_wallet_address,
after_block_number)
def get_signed_peer_score_string_private_key(self,
private_key_string: bytes,
peer_wallet_address: Address,
after_block_number: BlockNumber = None,
) -> NodeStakingScore:
network_id = self.network_id
# This always occurs at this time. So we take the current consensus db
return self.get_consensus_db(timestamp=Timestamp(int(time.time()))).get_signed_peer_score_string_private_key(private_key_string,
network_id,
peer_wallet_address,
after_block_number)
def validate_node_staking_score(self,
node_staking_score: NodeStakingScore,
since_block_number: BlockNumber) -> None:
# This depends on when the staking score was created. So get the consensus db given by that timestamp
return self.get_consensus_db(timestamp = node_staking_score.timestamp).validate_node_staking_score(node_staking_score, since_block_number)
def save_health_request(self, peer_wallet_address: Address, response_time_in_micros: int = float('inf')) -> None:
# This always occurs at this time. So we take the current consensus db
return self.get_consensus_db(timestamp=Timestamp(int(time.time()))).save_health_request(peer_wallet_address,
response_time_in_micros)
def get_current_peer_node_health(self,peer_wallet_address: Address) -> PeerNodeHealth:
return self.get_consensus_db(timestamp=Timestamp(int(time.time()))).get_current_peer_node_health(peer_wallet_address)
| 46.989239
| 285
| 0.672601
| 14,187
| 117,896
| 5.279693
| 0.05498
| 0.020547
| 0.012189
| 0.029318
| 0.614942
| 0.54899
| 0.499112
| 0.457271
| 0.420597
| 0.379971
| 0
| 0.002734
| 0.264852
| 117,896
| 2,508
| 286
| 47.007974
| 0.861488
| 0.187598
| 0
| 0.404427
| 0
| 0.003353
| 0.076882
| 0.003369
| 0
| 0
| 0
| 0.000399
| 0.000671
| 1
| 0.105298
| false
| 0.004024
| 0.088531
| 0.005366
| 0.259557
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c29bc02bf13f97d4663f0060faece281922045c
| 3,113
|
py
|
Python
|
integreat_cms/api/v3/regions.py
|
Integreat/cms-django
|
ab0a89576ae901f4b30aa8e9c65ff43c44654a80
|
[
"Apache-2.0"
] | 21
|
2018-10-26T20:10:45.000Z
|
2020-10-22T09:41:46.000Z
|
integreat_cms/api/v3/regions.py
|
Integreat/cms-django
|
ab0a89576ae901f4b30aa8e9c65ff43c44654a80
|
[
"Apache-2.0"
] | 392
|
2018-10-25T08:34:07.000Z
|
2020-11-19T08:20:30.000Z
|
integreat_cms/api/v3/regions.py
|
digitalfabrik/integreat-cms
|
ab0a89576ae901f4b30aa8e9c65ff43c44654a80
|
[
"Apache-2.0"
] | 23
|
2019-03-06T17:11:35.000Z
|
2020-10-16T04:36:41.000Z
|
"""
This module includes functions related to the regions API endpoint.
"""
from django.http import JsonResponse
from ...cms.models import Region
from ...cms.constants import region_status
from ..decorators import json_response
def transform_region(region):
"""
Function to create a JSON from a single region object, including information if region is live/active.
:param region: The region object which should be converted
:type region: ~integreat_cms.cms.models.regions.region.Region
:return: data necessary for API
:rtype: dict
"""
return {
"id": region.id,
"name": region.full_name,
"path": region.slug,
"live": region.status == region_status.ACTIVE,
"prefix": region.prefix,
"name_without_prefix": region.name,
"plz": region.postal_code,
"extras": region.offers.exists(),
"events": region.events_enabled,
"pois": region.locations_enabled,
"push_notifications": region.push_notifications_enabled,
"longitude": region.longitude,
"latitude": region.latitude,
"bounding_box": region.bounding_box.api_representation,
"aliases": region.aliases,
"tunews": region.tunews_enabled,
}
def transform_region_by_status(region):
"""
Function to create a JSON from a single "active" region object.
:param region: The region object which should be converted
:type region: ~integreat_cms.cms.models.regions.region.Region
:return: data necessary for API
:rtype: dict
"""
result = transform_region(region)
# Remove status
del result["live"]
return result
@json_response
def regions(_):
"""
List all regions that are not archived and transform result into JSON
:return: JSON object according to APIv3 regions endpoint definition
:rtype: ~django.http.JsonResponse
"""
result = list(
map(transform_region, Region.objects.exclude(status=region_status.ARCHIVED))
)
return JsonResponse(
result, safe=False
) # Turn off Safe-Mode to allow serializing arrays
@json_response
def liveregions(_):
"""
List all regions that are not archived and transform result into JSON
:return: JSON object according to APIv3 live regions endpoint definition
:rtype: ~django.http.JsonResponse
"""
result = list(
map(
transform_region_by_status,
Region.objects.filter(status=region_status.ACTIVE),
)
)
return JsonResponse(
result, safe=False
) # Turn off Safe-Mode to allow serializing arrays
@json_response
def hiddenregions(_):
"""
List all regions that are hidden and transform result into JSON
:return: JSON object according to APIv3 hidden regions endpoint definition
:rtype: ~django.http.JsonResponse
"""
result = list(
map(
transform_region_by_status,
Region.objects.filter(status=region_status.HIDDEN),
)
)
return JsonResponse(
result, safe=False
) # Turn off Safe-Mode to allow serializing arrays
| 28.559633
| 106
| 0.673305
| 364
| 3,113
| 5.651099
| 0.288462
| 0.040836
| 0.029169
| 0.033544
| 0.582402
| 0.561011
| 0.561011
| 0.561011
| 0.561011
| 0.524064
| 0
| 0.001266
| 0.238677
| 3,113
| 108
| 107
| 28.824074
| 0.866667
| 0.401863
| 0
| 0.280702
| 0
| 0
| 0.07093
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.087719
| false
| 0
| 0.070175
| 0
| 0.245614
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c2a4913dd37bcfdaee2efb5a4e62c145d6170b0
| 10,964
|
py
|
Python
|
cli/src/ansible/AnsibleVarsGenerator.py
|
romsok24/epiphany
|
f058984939561fc8d51288765976118ae12e6c32
|
[
"Apache-2.0"
] | null | null | null |
cli/src/ansible/AnsibleVarsGenerator.py
|
romsok24/epiphany
|
f058984939561fc8d51288765976118ae12e6c32
|
[
"Apache-2.0"
] | null | null | null |
cli/src/ansible/AnsibleVarsGenerator.py
|
romsok24/epiphany
|
f058984939561fc8d51288765976118ae12e6c32
|
[
"Apache-2.0"
] | null | null | null |
import copy
import os
from cli.src.Config import Config
from cli.src.helpers.build_io import (get_ansible_path,
get_ansible_path_for_build,
get_ansible_vault_path)
from cli.src.helpers.data_loader import (load_all_schema_objs_from_directory,
load_schema_obj, types)
from cli.src.helpers.doc_list_helpers import (ExpectedSingleResultException,
select_first, select_single)
from cli.src.helpers.naming_helpers import to_feature_name, to_role_name
from cli.src.helpers.ObjDict import ObjDict
from cli.src.helpers.yaml_helpers import dump
from cli.src.schema.DefaultMerger import DefaultMerger
from cli.src.Step import Step
from cli.version import VERSION
class AnsibleVarsGenerator(Step):
def __init__(self, inventory_creator=None, inventory_upgrade=None):
super().__init__(__name__)
self.inventory_creator = inventory_creator
self.inventory_upgrade = inventory_upgrade
self.roles_with_generated_vars = []
self.manifest_docs = []
if inventory_creator is not None and inventory_upgrade is None:
self.cluster_model = inventory_creator.cluster_model
self.config_docs = [self.cluster_model] + inventory_creator.config_docs
elif inventory_upgrade is not None and inventory_creator is None:
self.cluster_model = inventory_upgrade.cluster_model
self.config_docs = []
defaults = load_all_schema_objs_from_directory(types.DEFAULT, 'common', 'configuration')
for default in defaults:
config_doc = select_first(inventory_upgrade.config_docs, lambda x: x.kind == default.kind)
if config_doc is None:
self.config_docs.append(default)
else:
self.config_docs.append(config_doc)
self.manifest_docs = inventory_upgrade.manifest_docs
else:
raise Exception('Invalid AnsibleVarsGenerator configuration')
def __enter__(self):
super().__enter__()
return self
def __exit__(self, exc_type, exc_value, traceback):
pass
def generate(self):
self.logger.info('Generate Ansible vars')
self.is_upgrade_run = self.inventory_creator is None
if self.is_upgrade_run:
ansible_dir = get_ansible_path_for_build(self.inventory_upgrade.build_dir)
else:
ansible_dir = get_ansible_path(self.cluster_model.specification.name)
cluster_config_file_path = os.path.join(ansible_dir, 'roles', 'common', 'vars', 'main.yml')
clean_cluster_model = self.get_clean_cluster_model()
with open(cluster_config_file_path, 'w') as stream:
if 'name' in clean_cluster_model:
del clean_cluster_model['name'] # reserved word in ansible!
dump(clean_cluster_model, stream)
if self.is_upgrade_run:
# For upgrade we always need common, repository, image_registry, node_exporter and postgresql. Common is
# already provisioned from the cluster model constructed from the inventory. As PostgreSQL configuration
# is changed between versions (e.g. wal_keep_segments -> wal_keep_size) and sometimes previous parameters
# are not compatible with the new ones, defaults are used for template processing
roles_with_defaults = [
'haproxy', 'image_registry', 'jmx_exporter', 'kafka_exporter',
'node_exporter', 'postgres_exporter', 'postgresql', 'repository'
]
# now lets add any external configs we want to load
roles_with_defaults = [*roles_with_defaults, *self.inventory_upgrade.get_new_config_roles()]
# In special cases (like haproxy), where user specifies majority of the config, it's easier (and less
# awkward) to re-render config templates instead of modifying (for example with regular expressions)
# no-longer-compatible config files.
roles_with_manifest = ['filebeat', 'postgresql', 'repository']
else:
roles_with_defaults = self.inventory_creator.get_enabled_roles()
roles_with_manifest = [] # applies only to upgrades
for role in roles_with_defaults:
kind = 'configuration/' + to_feature_name(role)
document = select_first(self.config_docs, lambda x: x.kind == kind)
if document is None:
self.logger.warn('No config document for enabled role: ' + role)
continue
document.specification['provider'] = self.cluster_model.provider
self.write_role_vars(ansible_dir, role, document)
for role in roles_with_manifest:
kind = 'configuration/' + to_feature_name(role)
self.write_role_manifest_vars(ansible_dir, role, kind)
self.populate_group_vars(ansible_dir)
def write_role_vars(self, ansible_dir, role, document, vars_file_name='main.yml'):
vars_dir = os.path.join(ansible_dir, 'roles', to_role_name(role), 'vars')
if not os.path.exists(vars_dir):
os.makedirs(vars_dir)
vars_file_path = os.path.join(vars_dir, vars_file_name)
with open(vars_file_path, 'w') as stream:
if 'name' in document:
del document['name'] # reserved word in ansible!
dump(document, stream)
if vars_file_name == 'main.yml':
self.roles_with_generated_vars.append(to_role_name(role))
def write_role_manifest_vars(self, ansible_dir, role, kind):
try:
cluster_model = select_single(self.manifest_docs, lambda x: x.kind == 'epiphany-cluster')
except ExpectedSingleResultException:
return # skip
document = select_first(self.manifest_docs, lambda x: x.kind == kind)
if document is None:
# If there is no document provided by the user, then fallback to defaults
document = load_schema_obj(types.DEFAULT, 'common', kind)
# Inject the required "version" attribute
document['version'] = VERSION
# Copy the "provider" value from the cluster model
document['provider'] = cluster_model['provider']
# Merge the document with defaults
with DefaultMerger([document]) as doc_merger:
document = doc_merger.run()[0]
self.write_role_vars(ansible_dir, role, document, vars_file_name='manifest.yml')
def populate_group_vars(self, ansible_dir):
main_vars = ObjDict()
main_vars['admin_user'] = self.cluster_model.specification.admin_user
main_vars['validate_certs'] = Config().validate_certs
main_vars['offline_requirements'] = Config().offline_requirements
main_vars['wait_for_pods'] = Config().wait_for_pods
main_vars['is_upgrade_run'] = self.is_upgrade_run
main_vars['roles_with_generated_vars'] = sorted(self.roles_with_generated_vars)
main_vars['upgrade_components'] = Config().upgrade_components
main_vars['epiphany_version'] = VERSION
# Consider to move this to the provider level.
if self.cluster_model.provider != 'any':
main_vars['k8s_as_cloud_service'] = self.cluster_model.specification.cloud.k8s_as_cloud_service
else:
main_vars['k8s_as_cloud_service'] = False
if self.is_upgrade_run:
shared_config_doc = self.get_shared_config_from_manifest()
else:
shared_config_doc = select_first(self.config_docs, lambda x: x.kind == 'configuration/shared-config')
# Fallback if there is completely no trace of the shared-config doc
if shared_config_doc is None:
shared_config_doc = load_schema_obj(types.DEFAULT, 'common', 'configuration/shared-config')
self.set_vault_path(shared_config_doc)
main_vars.update(shared_config_doc.specification)
vars_dir = os.path.join(ansible_dir, 'group_vars')
if not os.path.exists(vars_dir):
os.makedirs(vars_dir)
vars_file_name = 'all.yml'
vars_file_path = os.path.join(vars_dir, vars_file_name)
with open(vars_file_path, 'a') as stream:
dump(main_vars, stream)
def set_vault_path(self, shared_config):
if shared_config.specification.vault_location == '':
shared_config.specification.vault_tmp_file_location = Config().vault_password_location
cluster_name = self.get_cluster_name()
shared_config.specification.vault_location = get_ansible_vault_path(cluster_name)
def get_cluster_name(self):
if 'name' in self.cluster_model.specification.keys():
return self.cluster_model.specification.name
elif self.inventory_upgrade is not None:
return os.path.basename(self.inventory_upgrade.build_dir)
return 'default'
def get_clean_cluster_model(self):
cluster_model = copy.copy(self.cluster_model)
self.clear_object(cluster_model, 'credentials')
return cluster_model
def get_shared_config_from_manifest(self):
# Reuse shared config from existing manifest
# Shared config contains the use_ha_control_plane flag which is required during upgrades
cluster_model = select_single(self.manifest_docs, lambda x: x.kind == 'epiphany-cluster')
try:
shared_config_doc = select_single(self.manifest_docs, lambda x: x.kind == 'configuration/shared-config')
shared_config_doc['provider'] = cluster_model['provider']
except ExpectedSingleResultException:
# If there is no shared-config doc inside the manifest file, this is probably a v0.3 cluster
# Returning None here (there is nothing to merge at this point) and
# hoping that the shared-config doc from defaults will be enough
return None
# Remove un-used supported_os list if present from shared/config from manifest so we avoid namedlist merging errors.
# This has been refactored in from Epicli 1.0.x and no longer needed at this stage.
if hasattr(shared_config_doc.specification, 'supported_os'):
del shared_config_doc.specification['supported_os']
# Merge the shared config doc with defaults
with DefaultMerger([shared_config_doc]) as doc_merger:
shared_config_doc = doc_merger.run()[0]
del shared_config_doc['provider']
return shared_config_doc
def clear_object(self, obj_to_clean, key_to_clean):
for key, val in obj_to_clean.items():
if key == key_to_clean:
obj_to_clean[key] = ''
continue
if isinstance(obj_to_clean[key], ObjDict):
self.clear_object(obj_to_clean[key], key_to_clean)
| 46.854701
| 124
| 0.671105
| 1,370
| 10,964
| 5.077372
| 0.20073
| 0.051754
| 0.038815
| 0.012076
| 0.301323
| 0.186314
| 0.115871
| 0.09977
| 0.075331
| 0.061242
| 0
| 0.001096
| 0.251368
| 10,964
| 233
| 125
| 47.055794
| 0.846369
| 0.150401
| 0
| 0.161677
| 0
| 0
| 0.08258
| 0.011413
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071856
| false
| 0.011976
| 0.071856
| 0
| 0.197605
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c2cb9849ca550c888fd888e8fc11648dd0f1d72
| 2,501
|
py
|
Python
|
plenum/test/view_change/test_no_instance_change_before_node_is_ready.py
|
evernym/indy-plenum
|
dc390caa16c0b15dcc549d557ede6f64c0c1b842
|
[
"Apache-2.0"
] | null | null | null |
plenum/test/view_change/test_no_instance_change_before_node_is_ready.py
|
evernym/indy-plenum
|
dc390caa16c0b15dcc549d557ede6f64c0c1b842
|
[
"Apache-2.0"
] | null | null | null |
plenum/test/view_change/test_no_instance_change_before_node_is_ready.py
|
evernym/indy-plenum
|
dc390caa16c0b15dcc549d557ede6f64c0c1b842
|
[
"Apache-2.0"
] | 2
|
2017-12-13T21:14:54.000Z
|
2021-06-06T15:48:03.000Z
|
import pytest
from plenum.server.view_change.view_changer import ViewChanger
from stp_core.common.log import getlogger
from plenum.test.pool_transactions.helper import start_not_added_node, add_started_node
logger = getlogger()
@pytest.fixture(scope="module", autouse=True)
def tconf(tconf):
old_vc_timeout = tconf.VIEW_CHANGE_TIMEOUT
tconf.VIEW_CHANGE_TIMEOUT = 10
yield tconf
tconf.VIEW_CHANGE_TIMEOUT = old_vc_timeout
def test_no_instance_change_on_primary_disconnection_for_not_ready_node(
looper, txnPoolNodeSet, tdir, tconf,
allPluginsPath, sdk_pool_handle, sdk_wallet_steward):
"""
Test steps:
1. create a new node, but don't add it to the pool (so not send NODE txn), so that the node is not ready.
2. wait for more than VIEW_CHANGE_TIMEOUT (a timeout for initial check for disconnected primary)
3. make sure no InstanceChange sent by the new node
4. add the node to the pool (send NODE txn) and make sure that the node is ready now.
5. wait for more than VIEW_CHANGE_TIMEOUT (a timeout for initial check for disconnected primary)
6. make sure no InstanceChange sent by the new node
"""
# 1. create a new node, but don't add it to the pool (so not send NODE txn), so that the node is not ready.
sigseed, bls_key, new_node, node_ha, client_ha = \
start_not_added_node(looper,
tdir, tconf, allPluginsPath,
"TestTheta")
# 2. wait for more than VIEW_CHANGE_TIMEOUT (a timeout for initial check for disconnected primary)
looper.runFor(tconf.VIEW_CHANGE_TIMEOUT + 2)
# 3. make sure no InstanceChange sent by the new node
assert 0 == new_node.view_changer.spylog.count(ViewChanger.sendInstanceChange.__name__)
logger.info("Start added node {}".format(new_node))
# 4. add the node to the pool (send NODE txn) and make sure that the node is ready now.
add_started_node(looper,
new_node,
node_ha,
client_ha,
txnPoolNodeSet,
sdk_pool_handle,
sdk_wallet_steward,
bls_key)
# 5. wait for more than VIEW_CHANGE_TIMEOUT (a timeout for initial check for disconnected primary)
looper.runFor(tconf.VIEW_CHANGE_TIMEOUT + 2)
# 6. make sure no InstanceChange sent by the new node
assert 0 == new_node.view_changer.spylog.count(ViewChanger.sendInstanceChange.__name__)
| 41
| 111
| 0.692123
| 362
| 2,501
| 4.574586
| 0.279006
| 0.046498
| 0.092391
| 0.066425
| 0.675121
| 0.644324
| 0.583937
| 0.583937
| 0.583937
| 0.583937
| 0
| 0.009564
| 0.247501
| 2,501
| 60
| 112
| 41.683333
| 0.870351
| 0.396641
| 0
| 0.129032
| 0
| 0
| 0.023224
| 0
| 0
| 0
| 0
| 0
| 0.064516
| 1
| 0.064516
| false
| 0
| 0.129032
| 0
| 0.193548
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c2d9e0a79b8d15e42eda3577f2435526ea67e86
| 1,688
|
py
|
Python
|
searching/jump_search.py
|
magnusrodseth/data-structures-and-algorithms
|
45dfdc0859683d5c76b82b87f415e2c0cdbc15e8
|
[
"MIT"
] | null | null | null |
searching/jump_search.py
|
magnusrodseth/data-structures-and-algorithms
|
45dfdc0859683d5c76b82b87f415e2c0cdbc15e8
|
[
"MIT"
] | null | null | null |
searching/jump_search.py
|
magnusrodseth/data-structures-and-algorithms
|
45dfdc0859683d5c76b82b87f415e2c0cdbc15e8
|
[
"MIT"
] | null | null | null |
import math
from typing import List
def jump_search(array: List[int], value: int) -> int:
"""
Performs a jump search on a list of integers.
:param array: is the array to search.
:param value: is the value to search.
:return: the index of the value, or -1 if it doesn't exist.'
"""
if len(array) == 0:
return -1
block_size = get_block_size(array)
# Pointers for traversing the array
start_pointer = 0
next_pointer = block_size
while (start_pointer < len(array)) and (array[next_pointer - 1] < value):
start_pointer = next_pointer
next_pointer += block_size
# Prevent next from going out of bounds
if next_pointer > len(array):
next_pointer = len(array)
# Linear search through the relevant block
for i in range(start_pointer, next_pointer):
if array[i] == value:
return i
return -1
def get_block_size(array: List[int]) -> int:
"""
Gets the block size of an array for jump search.
The block size is the square root of the length of the array.
We then calculate the absolute value of this block size, because we're using the value as
index pointer, and negative values do not make sense here.
This value is then floored to act as index pointer in the array.
:param array: is the array to search.
:return: the block size to be used in jump search.
"""
return math.floor(abs(math.sqrt(len(array))))
if __name__ == '__main__':
# Array must be sorted in order for binary search to work
array = [3, 5, 6, 9, 11, 18, 20, 21, 24, 30]
print(array)
index = jump_search(array, 31)
print(index)
| 28.610169
| 93
| 0.650474
| 262
| 1,688
| 4.083969
| 0.377863
| 0.075701
| 0.042056
| 0.028037
| 0.052336
| 0.052336
| 0.052336
| 0
| 0
| 0
| 0
| 0.019402
| 0.26718
| 1,688
| 58
| 94
| 29.103448
| 0.845594
| 0.454976
| 0
| 0.083333
| 0
| 0
| 0.009412
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.083333
| 0
| 0.333333
| 0.083333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c2faa49ef48fc93a9aff0f5610c889ba1ee0f3a
| 3,219
|
py
|
Python
|
demo/test_bug_3d.py
|
zhanwj/multi-task-pytorch
|
7d57645ec8be0ca0c258cfa99fb788e3cd37f106
|
[
"MIT"
] | 2
|
2019-06-11T16:16:11.000Z
|
2020-07-21T10:34:40.000Z
|
demo/test_bug_3d.py
|
zhanwj/multi-task-pytorch
|
7d57645ec8be0ca0c258cfa99fb788e3cd37f106
|
[
"MIT"
] | null | null | null |
demo/test_bug_3d.py
|
zhanwj/multi-task-pytorch
|
7d57645ec8be0ca0c258cfa99fb788e3cd37f106
|
[
"MIT"
] | 2
|
2019-05-21T11:07:29.000Z
|
2019-06-11T16:17:02.000Z
|
import torch
import lib.modeling.resnet as resnet
import lib.modeling.semseg_heads as snet
import torch.nn as nn
import torch.optim as optim
import utils.resnet_weights_helper as resnet_utils
from torch.autograd import Variable
from roi_data.loader import RoiDataLoader, MinibatchSampler, collate_minibatch, collate_minibatch_semseg
from datasets.roidb import combined_roidb_for_training, combined_roidb_for_training_semseg
import os
import numpy as np
import nn as mynn
import cv2
from modeling.model_builder_3DSD import Generalized_3DSD
from modeling.model_builder_PSP3D import DispSeg
from core.config import cfg, cfg_from_file, cfg_from_list, assert_and_infer_cfg
#load net
class load_net(nn.Module):
def __init__(self):
super(load_net, self).__init__()
build=snet.ModelBuilder()
fc_dim = 2048
self.encoder = build.build_encoder(
arch= 'resnet50_dilated8',
fc_dim=fc_dim)
self.decoder = build.build_decoder(
arch = 'ppm_bilinear',
num_class=19,
fc_dim=fc_dim,
use_softmax=False)
def _init_modules(self):
resnet_utils.load_pretrained_imagenet_weights(self)
def forward(self, data):
pred=self.decoder(self.encoder(data, return_feature_maps=True))
pred = nn.functional.interpolate(
pred, size=[128,128],
mode='bilinear', align_corners=False)
pred = nn.functional.log_softmax(pred, dim=1)
return pred
def dataloader(bs, gpus):
inputs = {}
inputs['data'] = Variable(torch.randn(2*bs, 3, 128, 128)).to('cuda')
inputs['semseg_label_0'] = Variable(torch.LongTensor(
np.random.randint(0, 19, (bs, 128//8, 128//8), dtype=np.long))).to('cuda')
inputs['disp_label_0'] = Variable(torch.rand(bs, 128//8, 128//8)).to('cuda')
inputs['disp_scans'] = Variable(torch.arange(0,
cfg.DISP.MAX_DISPLACEMENT).float().view(1,cfg.DISP.MAX_DISPLACEMENT,1,1).repeat(bs,1,1,1)).to('cuda')
inputs['semseg_scans'] = Variable(torch.arange(0,
cfg.MODEL.NUM_CLASSES).float().view(1, cfg.MODEL.NUM_CLASSES, 1, 1).repeat(bs,1,1,1)).to('cuda')
return inputs
cfg_file = 'e2e_segdisp-R-50_3Dpool_1x.yaml'
cfg_from_file(cfg_file)
print (cfg.SEM)
print (cfg.DISP)
#cfg_from_list(cfg_file)
#assert_and_infer_cfg()
devices_ids=[5]
os.environ["CUDA_VISIBLE_DEVICES"] = ','.join([str(ids) for ids in devices_ids])
torch.backends.cudnn.benchmark=True
#torch.cuda.set_device(3)
len_gpus = len(devices_ids)
batch_size = 2 * len_gpus
#net = mynn.DataParallel(load_net().to('cuda'), minibatch=True)
net = mynn.DataParallel(DispSeg().to('cuda'), minibatch=True)
optimizer = optim.SGD(net.parameters(), lr=0.000875, momentum=0.9)
criterion = nn.NLLLoss(ignore_index=255)
#dataloader= dataloader(batch_size, len_gpus)
for i in range(10):
#for i, inputs in zip(range(1000), dataloader):
inputs = dataloader(batch_size, len_gpus)
for key in inputs:
inputs[key] = torch.chunk(inputs[key], chunks=len_gpus, dim=0)
optimizer.zero_grad()
loss=net(**inputs)
optimizer.step()
for k in loss['losses'].keys():
print (loss['losses'][k].item())
| 38.321429
| 113
| 0.695247
| 471
| 3,219
| 4.543524
| 0.363057
| 0.019626
| 0.02243
| 0.02243
| 0.080374
| 0.071028
| 0.017757
| 0.017757
| 0.017757
| 0
| 0
| 0.033924
| 0.175831
| 3,219
| 83
| 114
| 38.783133
| 0.77271
| 0.07114
| 0
| 0
| 0
| 0
| 0.059356
| 0.010396
| 0
| 0
| 0
| 0
| 0.014286
| 1
| 0.057143
| false
| 0
| 0.228571
| 0
| 0.328571
| 0.042857
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c304e47e988cc3ac6451c94e5e66110773b8469
| 2,909
|
py
|
Python
|
tests/components/evil_genius_labs/test_light.py
|
liangleslie/core
|
cc807b4d597daaaadc92df4a93c6e30da4f570c6
|
[
"Apache-2.0"
] | 30,023
|
2016-04-13T10:17:53.000Z
|
2020-03-02T12:56:31.000Z
|
tests/components/evil_genius_labs/test_light.py
|
liangleslie/core
|
cc807b4d597daaaadc92df4a93c6e30da4f570c6
|
[
"Apache-2.0"
] | 24,710
|
2016-04-13T08:27:26.000Z
|
2020-03-02T12:59:13.000Z
|
tests/components/evil_genius_labs/test_light.py
|
liangleslie/core
|
cc807b4d597daaaadc92df4a93c6e30da4f570c6
|
[
"Apache-2.0"
] | 11,956
|
2016-04-13T18:42:31.000Z
|
2020-03-02T09:32:12.000Z
|
"""Test Evil Genius Labs light."""
from unittest.mock import patch
import pytest
from homeassistant.components.light import (
ATTR_COLOR_MODE,
ATTR_SUPPORTED_COLOR_MODES,
ColorMode,
LightEntityFeature,
)
from homeassistant.const import ATTR_SUPPORTED_FEATURES
@pytest.mark.parametrize("platforms", [("light",)])
async def test_works(hass, setup_evil_genius_labs):
"""Test it works."""
state = hass.states.get("light.fibonacci256_23d4")
assert state is not None
assert state.state == "on"
assert state.attributes["brightness"] == 128
assert state.attributes[ATTR_COLOR_MODE] == ColorMode.RGB
assert state.attributes[ATTR_SUPPORTED_COLOR_MODES] == [ColorMode.RGB]
assert state.attributes[ATTR_SUPPORTED_FEATURES] == LightEntityFeature.EFFECT
@pytest.mark.parametrize("platforms", [("light",)])
async def test_turn_on_color(hass, setup_evil_genius_labs):
"""Test turning on with a color."""
with patch(
"pyevilgenius.EvilGeniusDevice.set_path_value"
) as mock_set_path_value, patch(
"pyevilgenius.EvilGeniusDevice.set_rgb_color"
) as mock_set_rgb_color:
await hass.services.async_call(
"light",
"turn_on",
{
"entity_id": "light.fibonacci256_23d4",
"brightness": 100,
"rgb_color": (10, 20, 30),
},
blocking=True,
)
assert len(mock_set_path_value.mock_calls) == 2
mock_set_path_value.mock_calls[0][1] == ("brightness", 100)
mock_set_path_value.mock_calls[1][1] == ("power", 1)
assert len(mock_set_rgb_color.mock_calls) == 1
mock_set_rgb_color.mock_calls[0][1] == (10, 20, 30)
@pytest.mark.parametrize("platforms", [("light",)])
async def test_turn_on_effect(hass, setup_evil_genius_labs):
"""Test turning on with an effect."""
with patch("pyevilgenius.EvilGeniusDevice.set_path_value") as mock_set_path_value:
await hass.services.async_call(
"light",
"turn_on",
{
"entity_id": "light.fibonacci256_23d4",
"effect": "Pride Playground",
},
blocking=True,
)
assert len(mock_set_path_value.mock_calls) == 2
mock_set_path_value.mock_calls[0][1] == ("pattern", 4)
mock_set_path_value.mock_calls[1][1] == ("power", 1)
@pytest.mark.parametrize("platforms", [("light",)])
async def test_turn_off(hass, setup_evil_genius_labs):
"""Test turning off."""
with patch("pyevilgenius.EvilGeniusDevice.set_path_value") as mock_set_path_value:
await hass.services.async_call(
"light",
"turn_off",
{
"entity_id": "light.fibonacci256_23d4",
},
blocking=True,
)
assert len(mock_set_path_value.mock_calls) == 1
mock_set_path_value.mock_calls[0][1] == ("power", 0)
| 33.056818
| 86
| 0.645583
| 357
| 2,909
| 4.955182
| 0.221289
| 0.055399
| 0.094969
| 0.099491
| 0.685698
| 0.644997
| 0.6026
| 0.530808
| 0.488977
| 0.414924
| 0
| 0.028992
| 0.229288
| 2,909
| 87
| 87
| 33.436782
| 0.760036
| 0.009625
| 0
| 0.352941
| 0
| 0
| 0.171014
| 0.096739
| 0
| 0
| 0
| 0
| 0.147059
| 1
| 0
| false
| 0
| 0.058824
| 0
| 0.058824
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c30953c84b77a7d66d7d91568a3c0c17191380f
| 4,410
|
py
|
Python
|
python_on_whales/download_binaries.py
|
joshbode/python-on-whales
|
4d5b8b4c5c6dc3ac0af5713e4fe5a72788f44cda
|
[
"MIT"
] | null | null | null |
python_on_whales/download_binaries.py
|
joshbode/python-on-whales
|
4d5b8b4c5c6dc3ac0af5713e4fe5a72788f44cda
|
[
"MIT"
] | null | null | null |
python_on_whales/download_binaries.py
|
joshbode/python-on-whales
|
4d5b8b4c5c6dc3ac0af5713e4fe5a72788f44cda
|
[
"MIT"
] | null | null | null |
import platform
import shutil
import tempfile
import warnings
from pathlib import Path
import requests
from tqdm import tqdm
DOCKER_VERSION = "20.10.5"
BUILDX_VERSION = "0.5.1"
CACHE_DIR = Path.home() / ".cache" / "python-on-whales"
TEMPLATE_CLI = (
"https://download.docker.com/{os}/static/stable/{arch}/docker-{version}.tgz"
)
WINDOWS_CLI_URL = "https://github.com/StefanScherer/docker-cli-builder/releases/download/{version}/docker.exe"
def get_docker_binary_path_in_cache():
return CACHE_DIR / "docker-cli" / DOCKER_VERSION / "docker"
def get_docker_cli_url():
user_os = get_user_os()
if user_os == "windows":
return WINDOWS_CLI_URL.format(version=DOCKER_VERSION)
arch = get_arch_for_docker_cli_url()
return TEMPLATE_CLI.format(os=user_os, arch=arch, version=DOCKER_VERSION)
def download_docker_cli():
file_to_download = get_docker_cli_url()
extension = file_to_download.split(".")[-1]
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_dir = Path(tmp_dir)
downloaded_file_path = tmp_dir / f"docker.{extension}"
download_from_url(file_to_download, downloaded_file_path)
docker_binary_path = get_docker_binary_path_in_cache()
docker_binary_path.parent.mkdir(exist_ok=True, parents=True)
if extension == "tgz":
extract_dir = tmp_dir / "extracted"
shutil.unpack_archive(str(downloaded_file_path), str(extract_dir))
shutil.move(extract_dir / "docker" / "docker", docker_binary_path)
elif extension == "exe":
shutil.move(downloaded_file_path, docker_binary_path)
warnings.warn(
f"The docker client binary file {DOCKER_VERSION} was downloaded and put "
f"in `{docker_binary_path.absolute()}`. \n"
f"You can feel free to remove it if you wish, Python on whales will download "
f"it again if needed."
)
def download_from_url(url, dst):
try:
_download_from_url(url, dst)
except Exception as e:
raise ConnectionError(f"Error while downloading {url}") from e
def _download_from_url(url, dst):
# Streaming, so we can iterate over the response.
response = requests.get(url, stream=True)
total_size_in_bytes = int(response.headers.get("content-length", 0))
block_size = 1024
progress_bar = tqdm(total=total_size_in_bytes, unit="iB", unit_scale=True)
with open(dst, "wb") as file:
for data in response.iter_content(block_size):
progress_bar.update(len(data))
file.write(data)
progress_bar.close()
if total_size_in_bytes != 0 and progress_bar.n != total_size_in_bytes:
raise ConnectionError(
f"Total size should be {total_size_in_bytes}, downloaded {progress_bar.n}"
)
def get_user_os():
user_os = platform.system()
if user_os == "Linux":
return "linux"
elif user_os == "Darwin":
return "mac"
elif user_os == "Windows":
return "windows"
else:
raise NotImplementedError(
f"Unknown OS: {user_os}, cannot determine which Docker CLI binary file to "
f"download. \n"
f"Please open an issue at \n"
f"https://github.com/gabrieldemarmiesse/python-on-whales/issues \n"
f"and in the meantime, install Docker manually to make python-on-whales "
f"work."
)
def get_arch_for_docker_cli_url():
arch = platform.architecture()[0]
# I don't know the exact list of possible architectures,
# so if a user reports a NotImplementedError, we can easily add
# his/her platform here.
arch_mapping = {
"NotImplementedError": "aarch64",
"NotImplementedError2": "armel",
"NotImplementedError3": "armhf",
"NotImplementedError4": "ppc64le",
"NotImplementedError5": "s390x",
"64bit": "x86_64",
}
try:
return arch_mapping[arch]
except KeyError:
raise NotImplementedError(
f"The architecture detected on your system is `{arch}`, the list of "
f"available architectures is {list(arch_mapping.values())}. \n"
f"Please open an issue at \n"
f"https://github.com/gabrieldemarmiesse/python-on-whales/issues "
f"and make sure to copy past this error message. \n"
f"In the meantime, install Docker manually on your system."
)
| 34.186047
| 110
| 0.664172
| 585
| 4,410
| 4.791453
| 0.331624
| 0.021406
| 0.039957
| 0.028541
| 0.179451
| 0.153407
| 0.053514
| 0.053514
| 0.053514
| 0.053514
| 0
| 0.009746
| 0.2322
| 4,410
| 128
| 111
| 34.453125
| 0.818074
| 0.042404
| 0
| 0.06
| 0
| 0.02
| 0.315078
| 0.020389
| 0
| 0
| 0
| 0
| 0
| 1
| 0.07
| false
| 0
| 0.07
| 0.01
| 0.21
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c336d5aaf0a0461822389d24ee86c2449c67183
| 4,415
|
py
|
Python
|
reinvent-2019/connected-photo-booth/lambda_code/Cerebro_GetQRCode.py
|
chriscoombs/aws-builders-fair-projects
|
eee405931030b833fa8c51e906c73d09ce051bcd
|
[
"Apache-2.0"
] | null | null | null |
reinvent-2019/connected-photo-booth/lambda_code/Cerebro_GetQRCode.py
|
chriscoombs/aws-builders-fair-projects
|
eee405931030b833fa8c51e906c73d09ce051bcd
|
[
"Apache-2.0"
] | null | null | null |
reinvent-2019/connected-photo-booth/lambda_code/Cerebro_GetQRCode.py
|
chriscoombs/aws-builders-fair-projects
|
eee405931030b833fa8c51e906c73d09ce051bcd
|
[
"Apache-2.0"
] | null | null | null |
import boto3
import json
import os
import logging
from contextlib import closing
from boto3.dynamodb.conditions import Key, Attr
from botocore.exceptions import ClientError
from random import shuffle
import time
import pyqrcode
import png
__BUCKET_NAME__ = "project-cerebro"
dynamo = boto3.client('dynamodb')
logger = None
print("In initialize fn ...")
logger = logging.getLogger()
if int(os.environ['DEBUG_MODE']):
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
logger.info("Initialize: Just a test")
logger.debug("Initialize: debug a test")
def create_presigned_url(bucket_name, object_name, expiration=3600):
"""Generate a presigned URL to share an S3 object
:param bucket_name: string
:param object_name: string
:param expiration: Time in seconds for the presigned URL to remain valid
:return: Presigned URL as string. If error, returns None.
"""
# Generate a presigned URL for the S3 object
s3_client = boto3.client('s3')
try:
response = s3_client.generate_presigned_url('get_object',
Params={'Bucket': bucket_name,
'Key': object_name},
ExpiresIn=expiration)
except ClientError as e:
logging.error(e)
return None
# The response contains the presigned URL
return response
def respond(err, res=None):
return {
'statusCode': '400' if err else '200',
'body': err.message if err else json.dumps(res),
'headers': {
'Content-Type': 'application/json',
'Access-Control-Allow-Origin': '*'
},
}
# input parameters are:
# 1. image ID
# output parameters are:
# 1. generated QRCode
# workflow:
# 1. first get the image_id
# 2. confirm this exists in s3
# 3. generate a presigned URL with this s3 path
# 4. create a QR Code image with this url embedded
# 5. return the QR code stored in S3 temp.
def main(event, context):
logger.info("In main ...")
start_time = int(round(time.time() * 1000))
body_params = json.loads(event["body"])
logger.debug("Body params:")
logger.debug(body_params)
response_data = {}
# 1. get the image_id
if "image_id" in body_params:
image_id = body_params["image_id"]
# prefix and check for existence
s3_prefix = "production/%s" % image_id
# 2. check for the object in s3
s3 = boto3.resource('s3')
s3_object = s3.Object(__BUCKET_NAME__, s3_prefix)
obj_metadata = s3_object.load() # fetches metadata for the object, but not data.
logger.info("metadata found:")
logger.info(obj_metadata)
if obj_metadata:
response_data["s3_image"] = s3_prefix
# 3. generate the presigned url
presigned_url = create_presigned_url(bucket_name = __BUCKET_NAME__, object_name=s3_prefix, expiration=5*60)
logger.info("generated the presigned URL:")
logger.info(presigned_url)
if presigned_url:
response_data["presigned_url"] = presigned_url
logger.info("assigned presigned url")
# 4. generate the qrcode, convert to png
url = pyqrcode.create(presigned_url)
url.png('/tmp/code.png', scale=5)
logger.info("Created a png file by now!")
# 5. save to s3
target_file='/tmp/code.png'
qrcode_key = "qrcodes/current_qrcode.png"
logger.info("Now trying to put s3 object ...")
# Create an S3 client
s3 = boto3.client('s3')
response = s3.put_object(
Body=open(target_file, 'rb'),
Bucket=__BUCKET_NAME__,
Key=qrcode_key)
logger.info("Now trying to put s3 object - completed!")
response_data["qrcode_key"] = qrcode_key
else:
response_data["result"] = "Failure"
return respond(None, response_data)
end_time = int(round(time.time() * 1000))
logger.info("Time Taken: %f" % (end_time - start_time))
logger.info("Done with main!")
response_data["result"] = "Success"
response_data["time_taken"] = str(end_time - start_time)
return respond(None, response_data)
def lambda_handler(event, context):
return main(event, context)
| 30.034014
| 115
| 0.622877
| 556
| 4,415
| 4.785971
| 0.30036
| 0.081172
| 0.022548
| 0.023675
| 0.08493
| 0.042089
| 0.024051
| 0.024051
| 0
| 0
| 0
| 0.02069
| 0.277463
| 4,415
| 146
| 116
| 30.239726
| 0.81348
| 0.186863
| 0
| 0.044944
| 0
| 0
| 0.162528
| 0.014955
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044944
| false
| 0
| 0.123596
| 0.022472
| 0.235955
| 0.011236
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c36070009525ecb4d0b9ecb8aa020fd7b1f9bca
| 1,480
|
py
|
Python
|
src/cms/views/error_handler/error_handler.py
|
digitalfabrik/coldaid-backend
|
b769510570d5921e30876565263813c0362994e2
|
[
"Apache-2.0"
] | 4
|
2019-12-05T16:45:17.000Z
|
2020-05-09T07:26:34.000Z
|
src/cms/views/error_handler/error_handler.py
|
digitalfabrik/coldaid-backend
|
b769510570d5921e30876565263813c0362994e2
|
[
"Apache-2.0"
] | 56
|
2019-12-05T12:31:37.000Z
|
2021-01-07T15:47:45.000Z
|
src/cms/views/error_handler/error_handler.py
|
digitalfabrik/coldaid-backend
|
b769510570d5921e30876565263813c0362994e2
|
[
"Apache-2.0"
] | 2
|
2019-12-11T09:52:26.000Z
|
2020-05-09T07:26:38.000Z
|
from django.shortcuts import render
from django.utils.translation import ugettext as _
# pylint: disable=unused-argument
def handler400(request, exception):
ctx = {'code': 400, 'title': _('Bad request'),
'message': _('There was an error in your request.')}
response = render(request, 'error_handler/http_error.html', ctx)
response.status_code = 400
return response
# pylint: disable=unused-argument
def handler403(request, exception):
ctx = {'code': 403, 'title': _('Forbidden'),
'message': _("You don't have the permission to access this page.")}
response = render(request, 'error_handler/http_error.html', ctx)
response.status_code = 403
return response
# pylint: disable=unused-argument
def handler404(request, exception):
ctx = {'code': 404, 'title': _('Page not found'),
'message': _('The page you requested could not be found.')}
response = render(request, 'error_handler/http_error.html', ctx)
response.status_code = 404
return response
# pylint: disable=unused-argument
def handler500(request):
ctx = {'code': 500, 'title': _('Internal Server Error'),
'message': _('An unexpected error has occurred.')}
response = render(request, 'error_handler/http_error.html', ctx)
response.status_code = 500
return response
# pylint: disable=unused-argument
def csrf_failure(request, reason):
return render(request, 'error_handler/csrf_failure.html')
| 37
| 78
| 0.686486
| 179
| 1,480
| 5.541899
| 0.363128
| 0.065524
| 0.095766
| 0.136089
| 0.477823
| 0.447581
| 0.447581
| 0.270161
| 0.270161
| 0.270161
| 0
| 0.02995
| 0.187838
| 1,480
| 39
| 79
| 37.948718
| 0.795341
| 0.107432
| 0
| 0.285714
| 0
| 0
| 0.323954
| 0.111787
| 0
| 0
| 0
| 0
| 0
| 1
| 0.178571
| false
| 0
| 0.071429
| 0.035714
| 0.428571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c36da1c5a18d69672ff02d87de44158f45e8811
| 738
|
py
|
Python
|
examples/ex3/app/models.py
|
trym-inc/django-msg
|
0b306524515a8fb4840d1a2ef8cf20901b64bc11
|
[
"MIT"
] | 7
|
2018-02-28T19:03:48.000Z
|
2020-12-21T01:15:34.000Z
|
examples/ex3/app/models.py
|
trym-inc/django-msg
|
0b306524515a8fb4840d1a2ef8cf20901b64bc11
|
[
"MIT"
] | null | null | null |
examples/ex3/app/models.py
|
trym-inc/django-msg
|
0b306524515a8fb4840d1a2ef8cf20901b64bc11
|
[
"MIT"
] | null | null | null |
from typing import NamedTuple
from django.contrib.auth.models import AbstractUser
from django.db import models
from msg.models import Msg
class User(AbstractUser):
phone_number: 'str' = models.CharField(max_length=255,
null=True, blank=True)
class HelloSMSMessage(NamedTuple):
phone_number: 'str'
username: 'str'
def send_hello_sms(self):
if not self.phone_number:
raise ValueError('User has to have a phone number'
'to send a sms message.')
hello = self.HelloSMSMessage(
username=self.username,
phone_number=self.phone_number,
)
Msg.new(hello, dispatch_now=True)
| 28.384615
| 65
| 0.612466
| 85
| 738
| 5.211765
| 0.494118
| 0.148984
| 0.063205
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005906
| 0.311653
| 738
| 25
| 66
| 29.52
| 0.866142
| 0
| 0
| 0
| 0
| 0
| 0.084011
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.210526
| 0
| 0.421053
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c3a00aad13ad525c3f1adcd91ff20ba8d288a5b
| 6,558
|
py
|
Python
|
tfx/examples/chicago_taxi_pipeline/serving/chicago_taxi_client.py
|
pingsutw/tfx
|
bf0d1d74e3f6ea429989fc7b80b82bea08077857
|
[
"Apache-2.0"
] | 1
|
2021-07-21T15:54:20.000Z
|
2021-07-21T15:54:20.000Z
|
tfx/examples/chicago_taxi_pipeline/serving/chicago_taxi_client.py
|
pingsutw/tfx
|
bf0d1d74e3f6ea429989fc7b80b82bea08077857
|
[
"Apache-2.0"
] | 1
|
2020-08-28T09:59:13.000Z
|
2020-08-28T09:59:13.000Z
|
tfx/examples/chicago_taxi_pipeline/serving/chicago_taxi_client.py
|
pingsutw/tfx
|
bf0d1d74e3f6ea429989fc7b80b82bea08077857
|
[
"Apache-2.0"
] | 1
|
2020-11-06T11:44:33.000Z
|
2020-11-06T11:44:33.000Z
|
# Lint as: python2, python3
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A client for the chicago_taxi demo."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import base64
import json
import os
import subprocess
import tempfile
import requests
from tensorflow_transform import coders as tft_coders
from tensorflow_transform.tf_metadata import dataset_schema
from tensorflow_transform.tf_metadata import schema_utils
from google.protobuf import text_format
from tensorflow.python.lib.io import file_io # pylint: disable=g-direct-tensorflow-import
from tensorflow.python.platform import app # pylint: disable=g-direct-tensorflow-import
from tensorflow_metadata.proto.v0 import schema_pb2
from tfx.utils import io_utils
_LOCAL_INFERENCE_TIMEOUT_SECONDS = 5.0
_LABEL_KEY = 'tips'
# Tf.Transform considers these features as "raw"
def _get_raw_feature_spec(schema):
return schema_utils.schema_as_feature_spec(schema).feature_spec
def _make_proto_coder(schema):
raw_feature_spec = _get_raw_feature_spec(schema)
raw_schema = dataset_schema.from_feature_spec(raw_feature_spec)
return tft_coders.ExampleProtoCoder(raw_schema)
def _make_csv_coder(schema, column_names):
"""Return a coder for tf.transform to read csv files."""
raw_feature_spec = _get_raw_feature_spec(schema)
parsing_schema = dataset_schema.from_feature_spec(raw_feature_spec)
return tft_coders.CsvCoder(column_names, parsing_schema)
def _read_schema(path):
"""Reads a schema from the provided location.
Args:
path: The location of the file holding a serialized Schema proto.
Returns:
An instance of Schema or None if the input argument is None
"""
result = schema_pb2.Schema()
contents = file_io.read_file_to_string(path)
text_format.Parse(contents, result)
return result
def _do_local_inference(host, port, serialized_examples):
"""Performs inference on a model hosted by the host:port server."""
json_examples = []
for serialized_example in serialized_examples:
# The encoding follows the guidelines in:
# https://www.tensorflow.org/tfx/serving/api_rest
example_bytes = base64.b64encode(serialized_example).decode('utf-8')
predict_request = '{ "b64": "%s" }' % example_bytes
json_examples.append(predict_request)
json_request = '{ "instances": [' + ','.join(map(str, json_examples)) + ']}'
server_url = 'http://' + host + ':' + port + '/v1/models/chicago_taxi:predict'
response = requests.post(
server_url, data=json_request, timeout=_LOCAL_INFERENCE_TIMEOUT_SECONDS)
response.raise_for_status()
prediction = response.json()
print(json.dumps(prediction, indent=4))
def _do_aiplatform_inference(model, version, serialized_examples):
"""Performs inference on the model:version in AI Platform."""
working_dir = tempfile.mkdtemp()
instances_file = os.path.join(working_dir, 'test.json')
json_examples = []
for serialized_example in serialized_examples:
# The encoding follows the example in:
# https://github.com/GoogleCloudPlatform/training-data-analyst/blob/master/quests/tpu/invoke_model.py
json_examples.append('{ "inputs": { "b64": "%s" } }' %
base64.b64encode(serialized_example).decode('utf-8'))
file_io.write_string_to_file(instances_file, '\n'.join(json_examples))
gcloud_command = [
'gcloud', 'ai-platform', 'predict', '--model', model, '--version',
version, '--json-instances', instances_file
]
print(subprocess.check_output(gcloud_command))
def _do_inference(model_handle, examples_file, num_examples, schema):
"""Sends requests to the model and prints the results.
Args:
model_handle: handle to the model. This can be either
"aiplatform:model:version" or "host:port"
examples_file: path to csv file containing examples, with the first line
assumed to have the column headers
num_examples: number of requests to send to the server
schema: a Schema describing the input data
Returns:
Response from model server
"""
filtered_features = [
feature for feature in schema.feature if feature.name != _LABEL_KEY
]
del schema.feature[:]
schema.feature.extend(filtered_features)
column_names = io_utils.load_csv_column_names(examples_file)
csv_coder = _make_csv_coder(schema, column_names)
proto_coder = _make_proto_coder(schema)
input_file = open(examples_file, 'r')
input_file.readline() # skip header line
serialized_examples = []
for _ in range(num_examples):
one_line = input_file.readline()
if not one_line:
print('End of example file reached')
break
one_example = csv_coder.decode(one_line)
serialized_example = proto_coder.encode(one_example)
serialized_examples.append(serialized_example)
parsed_model_handle = model_handle.split(':')
if parsed_model_handle[0] == 'aiplatform':
_do_aiplatform_inference(
model=parsed_model_handle[1],
version=parsed_model_handle[2],
serialized_examples=serialized_examples)
else:
_do_local_inference(
host=parsed_model_handle[0],
port=parsed_model_handle[1],
serialized_examples=serialized_examples)
def main(_):
parser = argparse.ArgumentParser()
parser.add_argument(
'--num_examples',
help=('Number of examples to send to the server.'),
default=1,
type=int)
parser.add_argument(
'--server',
help=('Prediction service host:port or aiplatform:model:version'),
required=True)
parser.add_argument(
'--examples_file',
help=('Path to csv file containing examples.'),
required=True)
parser.add_argument(
'--schema_file', help='File holding the schema for the input data')
known_args, _ = parser.parse_known_args()
_do_inference(known_args.server, known_args.examples_file,
known_args.num_examples, _read_schema(known_args.schema_file))
if __name__ == '__main__':
app.run(main)
| 33.630769
| 105
| 0.745502
| 888
| 6,558
| 5.242117
| 0.304054
| 0.025994
| 0.021053
| 0.010956
| 0.196992
| 0.156391
| 0.113856
| 0.095811
| 0.058432
| 0.058432
| 0
| 0.007093
| 0.161635
| 6,558
| 194
| 106
| 33.804124
| 0.839578
| 0.271729
| 0
| 0.122807
| 0
| 0
| 0.097187
| 0.011722
| 0.008772
| 0
| 0
| 0
| 0
| 1
| 0.070175
| false
| 0
| 0.157895
| 0.008772
| 0.263158
| 0.035088
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c3a1a0942bfa4b3696876f16d5ec82b36b6c9bd
| 23,156
|
py
|
Python
|
PyVideo/main.py
|
BlackIQ/Cute
|
5835e989d661f23b04b6e436589c6e844167522e
|
[
"Apache-2.0"
] | 5
|
2021-11-21T10:59:47.000Z
|
2022-01-16T11:57:14.000Z
|
PyVideo/main.py
|
BlackIQ/Cute
|
5835e989d661f23b04b6e436589c6e844167522e
|
[
"Apache-2.0"
] | null | null | null |
PyVideo/main.py
|
BlackIQ/Cute
|
5835e989d661f23b04b6e436589c6e844167522e
|
[
"Apache-2.0"
] | null | null | null |
from PyQt5.QtCore import (pyqtSignal, pyqtSlot, Q_ARG, QAbstractItemModel,
QFileInfo, qFuzzyCompare, QMetaObject, QModelIndex, QObject, Qt,
QThread, QTime, QUrl)
from PyQt5.QtGui import QColor, qGray, QImage, QPainter, QPalette
from PyQt5.QtMultimedia import (QAbstractVideoBuffer, QMediaContent,
QMediaMetaData, QMediaPlayer, QMediaPlaylist, QVideoFrame, QVideoProbe)
from PyQt5.QtMultimediaWidgets import QVideoWidget
from PyQt5.QtWidgets import (QApplication, QComboBox, QDialog, QFileDialog,
QFormLayout, QHBoxLayout, QLabel, QListView, QMessageBox, QPushButton,
QSizePolicy, QSlider, QStyle, QToolButton, QVBoxLayout, QWidget)
class VideoWidget(QVideoWidget):
def __init__(self, parent=None):
super(VideoWidget, self).__init__(parent)
self.setSizePolicy(QSizePolicy.Ignored, QSizePolicy.Ignored)
p = self.palette()
p.setColor(QPalette.Window, Qt.black)
self.setPalette(p)
self.setAttribute(Qt.WA_OpaquePaintEvent)
def keyPressEvent(self, event):
if event.key() == Qt.Key_Escape and self.isFullScreen():
self.setFullScreen(False)
event.accept()
elif event.key() == Qt.Key_Enter and event.modifiers() & Qt.Key_Alt:
self.setFullScreen(not self.isFullScreen())
event.accept()
else:
super(VideoWidget, self).keyPressEvent(event)
def mouseDoubleClickEvent(self, event):
self.setFullScreen(not self.isFullScreen())
event.accept()
class PlaylistModel(QAbstractItemModel):
Title, ColumnCount = range(2)
def __init__(self, parent=None):
super(PlaylistModel, self).__init__(parent)
self.m_playlist = None
def rowCount(self, parent=QModelIndex()):
return self.m_playlist.mediaCount() if self.m_playlist is not None and not parent.isValid() else 0
def columnCount(self, parent=QModelIndex()):
return self.ColumnCount if not parent.isValid() else 0
def index(self, row, column, parent=QModelIndex()):
return self.createIndex(row, column) if self.m_playlist is not None and not parent.isValid() and row >= 0 and row < self.m_playlist.mediaCount() and column >= 0 and column < self.ColumnCount else QModelIndex()
def parent(self, child):
return QModelIndex()
def data(self, index, role=Qt.DisplayRole):
if index.isValid() and role == Qt.DisplayRole:
if index.column() == self.Title:
location = self.m_playlist.media(index.row()).canonicalUrl()
return QFileInfo(location.path()).fileName()
return self.m_data[index]
return None
def playlist(self):
return self.m_playlist
def setPlaylist(self, playlist):
if self.m_playlist is not None:
self.m_playlist.mediaAboutToBeInserted.disconnect(
self.beginInsertItems)
self.m_playlist.mediaInserted.disconnect(self.endInsertItems)
self.m_playlist.mediaAboutToBeRemoved.disconnect(
self.beginRemoveItems)
self.m_playlist.mediaRemoved.disconnect(self.endRemoveItems)
self.m_playlist.mediaChanged.disconnect(self.changeItems)
self.beginResetModel()
self.m_playlist = playlist
if self.m_playlist is not None:
self.m_playlist.mediaAboutToBeInserted.connect(
self.beginInsertItems)
self.m_playlist.mediaInserted.connect(self.endInsertItems)
self.m_playlist.mediaAboutToBeRemoved.connect(
self.beginRemoveItems)
self.m_playlist.mediaRemoved.connect(self.endRemoveItems)
self.m_playlist.mediaChanged.connect(self.changeItems)
self.endResetModel()
def beginInsertItems(self, start, end):
self.beginInsertRows(QModelIndex(), start, end)
def endInsertItems(self):
self.endInsertRows()
def beginRemoveItems(self, start, end):
self.beginRemoveRows(QModelIndex(), start, end)
def endRemoveItems(self):
self.endRemoveRows()
def changeItems(self, start, end):
self.dataChanged.emit(self.index(start, 0),
self.index(end, self.ColumnCount))
class PlayerControls(QWidget):
play = pyqtSignal()
pause = pyqtSignal()
stop = pyqtSignal()
next = pyqtSignal()
previous = pyqtSignal()
changeVolume = pyqtSignal(int)
changeMuting = pyqtSignal(bool)
changeRate = pyqtSignal(float)
def __init__(self, parent=None):
super(PlayerControls, self).__init__(parent)
self.playerState = QMediaPlayer.StoppedState
self.playerMuted = False
self.playButton = QToolButton(clicked=self.playClicked)
self.playButton.setIcon(self.style().standardIcon(QStyle.SP_MediaPlay))
self.stopButton = QToolButton(clicked=self.stop)
self.stopButton.setIcon(self.style().standardIcon(QStyle.SP_MediaStop))
self.stopButton.setEnabled(False)
self.nextButton = QToolButton(clicked=self.next)
self.nextButton.setIcon(
self.style().standardIcon(QStyle.SP_MediaSkipForward))
self.previousButton = QToolButton(clicked=self.previous)
self.previousButton.setIcon(
self.style().standardIcon(QStyle.SP_MediaSkipBackward))
self.muteButton = QToolButton(clicked=self.muteClicked)
self.muteButton.setIcon(
self.style().standardIcon(QStyle.SP_MediaVolume))
self.volumeSlider = QSlider(Qt.Horizontal,
sliderMoved=self.changeVolume)
self.volumeSlider.setRange(0, 100)
self.rateBox = QComboBox(activated=self.updateRate)
self.rateBox.addItem("0.5x", 0.5)
self.rateBox.addItem("1.0x", 1.0)
self.rateBox.addItem("2.0x", 2.0)
self.rateBox.setCurrentIndex(1)
layout = QHBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
layout.addWidget(self.stopButton)
layout.addWidget(self.previousButton)
layout.addWidget(self.playButton)
layout.addWidget(self.nextButton)
layout.addWidget(self.muteButton)
layout.addWidget(self.volumeSlider)
layout.addWidget(self.rateBox)
self.setLayout(layout)
def state(self):
return self.playerState
def setState(self,state):
if state != self.playerState:
self.playerState = state
if state == QMediaPlayer.StoppedState:
self.stopButton.setEnabled(False)
self.playButton.setIcon(
self.style().standardIcon(QStyle.SP_MediaPlay))
elif state == QMediaPlayer.PlayingState:
self.stopButton.setEnabled(True)
self.playButton.setIcon(
self.style().standardIcon(QStyle.SP_MediaPause))
elif state == QMediaPlayer.PausedState:
self.stopButton.setEnabled(True)
self.playButton.setIcon(
self.style().standardIcon(QStyle.SP_MediaPlay))
def volume(self):
return self.volumeSlider.value()
def setVolume(self, volume):
self.volumeSlider.setValue(volume)
def isMuted(self):
return self.playerMuted
def setMuted(self, muted):
if muted != self.playerMuted:
self.playerMuted = muted
self.muteButton.setIcon(
self.style().standardIcon(
QStyle.SP_MediaVolumeMuted if muted else QStyle.SP_MediaVolume))
def playClicked(self):
if self.playerState in (QMediaPlayer.StoppedState, QMediaPlayer.PausedState):
self.play.emit()
elif self.playerState == QMediaPlayer.PlayingState:
self.pause.emit()
def muteClicked(self):
self.changeMuting.emit(not self.playerMuted)
def playbackRate(self):
return self.rateBox.itemData(self.rateBox.currentIndex())
def setPlaybackRate(self, rate):
for i in range(self.rateBox.count()):
if qFuzzyCompare(rate, self.rateBox.itemData(i)):
self.rateBox.setCurrentIndex(i)
return
self.rateBox.addItem("%dx" % rate, rate)
self.rateBox.setCurrentIndex(self.rateBox.count() - 1)
def updateRate(self):
self.changeRate.emit(self.playbackRate())
class FrameProcessor(QObject):
histogramReady = pyqtSignal(list)
@pyqtSlot(QVideoFrame, int)
def processFrame(self, frame, levels):
histogram = [0.0] * levels
if levels and frame.map(QAbstractVideoBuffer.ReadOnly):
pixelFormat = frame.pixelFormat()
if pixelFormat == QVideoFrame.Format_YUV420P or pixelFormat == QVideoFrame.Format_NV12:
# Process YUV data.
bits = frame.bits()
for idx in range(frame.height() * frame.width()):
histogram[(bits[idx] * levels) >> 8] += 1.0
else:
imageFormat = QVideoFrame.imageFormatFromPixelFormat(pixelFormat)
if imageFormat != QImage.Format_Invalid:
# Process RGB data.
image = QImage(frame.bits(), frame.width(), frame.height(), imageFormat)
for y in range(image.height()):
for x in range(image.width()):
pixel = image.pixel(x, y)
histogram[(qGray(pixel) * levels) >> 8] += 1.0
# Find the maximum value.
maxValue = 0.0
for value in histogram:
if value > maxValue:
maxValue = value
# Normalise the values between 0 and 1.
if maxValue > 0.0:
for i in range(len(histogram)):
histogram[i] /= maxValue
frame.unmap()
self.histogramReady.emit(histogram)
class HistogramWidget(QWidget):
def __init__(self, parent=None):
super(HistogramWidget, self).__init__(parent)
self.m_levels = 128
self.m_isBusy = False
self.m_histogram = []
self.m_processor = FrameProcessor()
self.m_processorThread = QThread()
self.m_processor.moveToThread(self.m_processorThread)
self.m_processor.histogramReady.connect(self.setHistogram)
def __del__(self):
self.m_processorThread.quit()
self.m_processorThread.wait(10000)
def setLevels(self, levels):
self.m_levels = levels
def processFrame(self, frame):
if self.m_isBusy:
return
self.m_isBusy = True
QMetaObject.invokeMethod(self.m_processor, 'processFrame',
Qt.QueuedConnection, Q_ARG(QVideoFrame, frame),
Q_ARG(int, self.m_levels))
@pyqtSlot(list)
def setHistogram(self, histogram):
self.m_isBusy = False
self.m_histogram = list(histogram)
self.update()
def paintEvent(self, event):
painter = QPainter(self)
if len(self.m_histogram) == 0:
painter.fillRect(0, 0, self.width(), self.height(),
QColor.fromRgb(0, 0, 0))
return
barWidth = self.width() / float(len(self.m_histogram))
for i, value in enumerate(self.m_histogram):
h = value * self.height()
# Draw the level.
painter.fillRect(barWidth * i, self.height() - h,
barWidth * (i + 1), self.height(), Qt.red)
# Clear the rest of the control.
painter.fillRect(barWidth * i, 0, barWidth * (i + 1),
self.height() - h, Qt.black)
class Player(QWidget):
fullScreenChanged = pyqtSignal(bool)
def __init__(self, playlist, parent=None):
super(Player, self).__init__(parent)
self.colorDialog = None
self.trackInfo = ""
self.statusInfo = ""
self.duration = 0
self.player = QMediaPlayer()
self.playlist = QMediaPlaylist()
self.player.setPlaylist(self.playlist)
self.player.durationChanged.connect(self.durationChanged)
self.player.positionChanged.connect(self.positionChanged)
self.player.metaDataChanged.connect(self.metaDataChanged)
self.playlist.currentIndexChanged.connect(self.playlistPositionChanged)
self.player.mediaStatusChanged.connect(self.statusChanged)
self.player.bufferStatusChanged.connect(self.bufferingProgress)
self.player.videoAvailableChanged.connect(self.videoAvailableChanged)
self.player.error.connect(self.displayErrorMessage)
self.videoWidget = VideoWidget()
self.player.setVideoOutput(self.videoWidget)
self.playlistModel = PlaylistModel()
self.playlistModel.setPlaylist(self.playlist)
self.playlistView = QListView()
self.playlistView.setModel(self.playlistModel)
self.playlistView.setCurrentIndex(
self.playlistModel.index(self.playlist.currentIndex(), 0))
self.playlistView.activated.connect(self.jump)
self.slider = QSlider(Qt.Horizontal)
self.slider.setRange(0, self.player.duration() / 1000)
self.labelDuration = QLabel()
self.slider.sliderMoved.connect(self.seek)
self.labelHistogram = QLabel()
self.labelHistogram.setText("Histogram:")
self.histogram = HistogramWidget()
histogramLayout = QHBoxLayout()
histogramLayout.addWidget(self.labelHistogram)
histogramLayout.addWidget(self.histogram, 1)
self.probe = QVideoProbe()
self.probe.videoFrameProbed.connect(self.histogram.processFrame)
self.probe.setSource(self.player)
openButton = QPushButton("Open", clicked=self.open)
controls = PlayerControls()
controls.setState(self.player.state())
controls.setVolume(self.player.volume())
controls.setMuted(controls.isMuted())
controls.play.connect(self.player.play)
controls.pause.connect(self.player.pause)
controls.stop.connect(self.player.stop)
controls.next.connect(self.playlist.next)
controls.previous.connect(self.previousClicked)
controls.changeVolume.connect(self.player.setVolume)
controls.changeMuting.connect(self.player.setMuted)
controls.changeRate.connect(self.player.setPlaybackRate)
controls.stop.connect(self.videoWidget.update)
self.player.stateChanged.connect(controls.setState)
self.player.volumeChanged.connect(controls.setVolume)
self.player.mutedChanged.connect(controls.setMuted)
self.fullScreenButton = QPushButton("FullScreen")
self.fullScreenButton.setCheckable(True)
self.colorButton = QPushButton("Color Options...")
self.colorButton.setEnabled(False)
self.colorButton.clicked.connect(self.showColorDialog)
displayLayout = QHBoxLayout()
displayLayout.addWidget(self.videoWidget, 2)
displayLayout.addWidget(self.playlistView)
controlLayout = QHBoxLayout()
controlLayout.setContentsMargins(0, 0, 0, 0)
controlLayout.addWidget(openButton)
controlLayout.addStretch(1)
controlLayout.addWidget(controls)
controlLayout.addStretch(1)
controlLayout.addWidget(self.fullScreenButton)
controlLayout.addWidget(self.colorButton)
layout = QVBoxLayout()
layout.addLayout(displayLayout)
hLayout = QHBoxLayout()
hLayout.addWidget(self.slider)
hLayout.addWidget(self.labelDuration)
layout.addLayout(hLayout)
layout.addLayout(controlLayout)
layout.addLayout(histogramLayout)
self.setLayout(layout)
if not self.player.isAvailable():
QMessageBox.warning(self, "Service not available",
"The QMediaPlayer object does not have a valid service.\n"
"Please check the media service plugins are installed.")
controls.setEnabled(False)
self.playlistView.setEnabled(False)
openButton.setEnabled(False)
self.colorButton.setEnabled(False)
self.fullScreenButton.setEnabled(False)
self.metaDataChanged()
self.addToPlaylist(playlist)
def open(self):
fileNames, _ = QFileDialog.getOpenFileNames(self, "Open Files")
self.addToPlaylist(fileNames)
def addToPlaylist(self, fileNames):
for name in fileNames:
fileInfo = QFileInfo(name)
if fileInfo.exists():
url = QUrl.fromLocalFile(fileInfo.absoluteFilePath())
if fileInfo.suffix().lower() == 'm3u':
self.playlist.load(url)
else:
self.playlist.addMedia(QMediaContent(url))
else:
url = QUrl(name)
if url.isValid():
self.playlist.addMedia(QMediaContent(url))
def durationChanged(self, duration):
duration /= 1000
self.duration = duration
self.slider.setMaximum(duration)
def positionChanged(self, progress):
progress /= 1000
if not self.slider.isSliderDown():
self.slider.setValue(progress)
self.updateDurationInfo(progress)
def metaDataChanged(self):
if self.player.isMetaDataAvailable():
self.setTrackInfo("%s - %s" % (
self.player.metaData(QMediaMetaData.AlbumArtist),
self.player.metaData(QMediaMetaData.Title)))
def previousClicked(self):
# Go to the previous track if we are within the first 5 seconds of
# playback. Otherwise, seek to the beginning.
if self.player.position() <= 5000:
self.playlist.previous()
else:
self.player.setPosition(0)
def jump(self, index):
if index.isValid():
self.playlist.setCurrentIndex(index.row())
self.player.play()
def playlistPositionChanged(self, position):
self.playlistView.setCurrentIndex(
self.playlistModel.index(position, 0))
def seek(self, seconds):
self.player.setPosition(seconds * 1000)
def statusChanged(self, status):
self.handleCursor(status)
if status == QMediaPlayer.LoadingMedia:
self.setStatusInfo("Loading...")
elif status == QMediaPlayer.StalledMedia:
self.setStatusInfo("Media Stalled")
elif status == QMediaPlayer.EndOfMedia:
QApplication.alert(self)
elif status == QMediaPlayer.InvalidMedia:
self.displayErrorMessage()
else:
self.setStatusInfo("")
def handleCursor(self, status):
if status in (QMediaPlayer.LoadingMedia, QMediaPlayer.BufferingMedia, QMediaPlayer.StalledMedia):
self.setCursor(Qt.BusyCursor)
else:
self.unsetCursor()
def bufferingProgress(self, progress):
self.setStatusInfo("Buffering %d%" % progress)
def videoAvailableChanged(self, available):
if available:
self.fullScreenButton.clicked.connect(
self.videoWidget.setFullScreen)
self.videoWidget.fullScreenChanged.connect(
self.fullScreenButton.setChecked)
if self.fullScreenButton.isChecked():
self.videoWidget.setFullScreen(True)
else:
self.fullScreenButton.clicked.disconnect(
self.videoWidget.setFullScreen)
self.videoWidget.fullScreenChanged.disconnect(
self.fullScreenButton.setChecked)
self.videoWidget.setFullScreen(False)
self.colorButton.setEnabled(available)
def setTrackInfo(self, info):
self.trackInfo = info
if self.statusInfo != "":
self.setWindowTitle("%s | %s" % (self.trackInfo, self.statusInfo))
else:
self.setWindowTitle(self.trackInfo)
def setStatusInfo(self, info):
self.statusInfo = info
if self.statusInfo != "":
self.setWindowTitle("%s | %s" % (self.trackInfo, self.statusInfo))
else:
self.setWindowTitle(self.trackInfo)
def displayErrorMessage(self):
self.setStatusInfo(self.player.errorString())
def updateDurationInfo(self, currentInfo):
duration = self.duration
if currentInfo or duration:
currentTime = QTime((currentInfo/3600)%60, (currentInfo/60)%60,
currentInfo%60, (currentInfo*1000)%1000)
totalTime = QTime((duration/3600)%60, (duration/60)%60,
duration%60, (duration*1000)%1000);
format = 'hh:mm:ss' if duration > 3600 else 'mm:ss'
tStr = currentTime.toString(format) + " / " + totalTime.toString(format)
else:
tStr = ""
self.labelDuration.setText(tStr)
def showColorDialog(self):
if self.colorDialog is None:
brightnessSlider = QSlider(Qt.Horizontal)
brightnessSlider.setRange(-100, 100)
brightnessSlider.setValue(self.videoWidget.brightness())
brightnessSlider.sliderMoved.connect(
self.videoWidget.setBrightness)
self.videoWidget.brightnessChanged.connect(
brightnessSlider.setValue)
contrastSlider = QSlider(Qt.Horizontal)
contrastSlider.setRange(-100, 100)
contrastSlider.setValue(self.videoWidget.contrast())
contrastSlider.sliderMoved.connect(self.videoWidget.setContrast)
self.videoWidget.contrastChanged.connect(contrastSlider.setValue)
hueSlider = QSlider(Qt.Horizontal)
hueSlider.setRange(-100, 100)
hueSlider.setValue(self.videoWidget.hue())
hueSlider.sliderMoved.connect(self.videoWidget.setHue)
self.videoWidget.hueChanged.connect(hueSlider.setValue)
saturationSlider = QSlider(Qt.Horizontal)
saturationSlider.setRange(-100, 100)
saturationSlider.setValue(self.videoWidget.saturation())
saturationSlider.sliderMoved.connect(
self.videoWidget.setSaturation)
self.videoWidget.saturationChanged.connect(
saturationSlider.setValue)
layout = QFormLayout()
layout.addRow("Brightness", brightnessSlider)
layout.addRow("Contrast", contrastSlider)
layout.addRow("Hue", hueSlider)
layout.addRow("Saturation", saturationSlider)
button = QPushButton("Close")
layout.addRow(button)
self.colorDialog = QDialog(self)
self.colorDialog.setWindowTitle("Color Options")
self.colorDialog.setLayout(layout)
button.clicked.connect(self.colorDialog.close)
self.colorDialog.show()
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
player = Player(sys.argv[1:])
player.show()
sys.exit(app.exec_())
| 35.734568
| 217
| 0.634738
| 2,198
| 23,156
| 6.634668
| 0.199272
| 0.014057
| 0.017829
| 0.01728
| 0.160118
| 0.124049
| 0.06631
| 0.05575
| 0.048893
| 0.040184
| 0
| 0.010181
| 0.266151
| 23,156
| 647
| 218
| 35.789799
| 0.847996
| 0.010969
| 0
| 0.127835
| 0
| 0
| 0.014852
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.11134
| false
| 0
| 0.012371
| 0.018557
| 0.187629
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c3a31af53788d8bf47df143a1f5099537838024
| 1,234
|
py
|
Python
|
tests/snapshot/periodic.py
|
Uornca/mirheo
|
162c722ffa27c02e1f5b0d1866816e44c2393f0f
|
[
"MIT"
] | 22
|
2019-07-17T13:06:41.000Z
|
2021-12-15T14:45:24.000Z
|
tests/snapshot/periodic.py
|
Uornca/mirheo
|
162c722ffa27c02e1f5b0d1866816e44c2393f0f
|
[
"MIT"
] | 63
|
2019-06-26T13:30:47.000Z
|
2021-02-23T10:13:10.000Z
|
tests/snapshot/periodic.py
|
Uornca/mirheo
|
162c722ffa27c02e1f5b0d1866816e44c2393f0f
|
[
"MIT"
] | 9
|
2019-10-11T07:32:19.000Z
|
2021-05-17T11:25:35.000Z
|
#!/usr/bin/env python
"""Test checkpoint-like periodic snapshots.
We test that there are that many folders and that the currentStep changes.
"""
import mirheo as mir
u = mir.Mirheo(nranks=(1, 1, 1), domain=(4, 6, 8), debug_level=3,
log_filename='log', no_splash=True,
checkpoint_every=10, checkpoint_mode='Incremental',
checkpoint_folder='periodic_snapshots/snapshot_', checkpoint_mechanism='Snapshot')
pv = mir.ParticleVectors.ParticleVector('pv', mass=1)
ic = mir.InitialConditions.Uniform(number_density=2)
u.registerParticleVector(pv, ic)
dpd = mir.Interactions.Pairwise('dpd', rc=1.0, kind='DPD', a=10.0, gamma=10.0, kBT=1.0, power=0.5)
lj = mir.Interactions.Pairwise('lj', rc=1.0, kind='LJ', epsilon=1.25, sigma=0.75)
u.registerInteraction(dpd)
u.registerInteraction(lj)
u.setInteraction(dpd, pv, pv)
minimize = mir.Integrators.Minimize('minimize', max_displacement=1. / 1024)
u.registerIntegrator(minimize)
u.run(45, dt=0.125)
# TEST: snapshot.periodic
# cd snapshot
# rm -rf periodic_snapshots/
# mir.run --runargs "-n 2" ./periodic.py
# ls periodic_snapshots | cat > snapshot.out.txt
# grep -rH --include=*.json currentStep periodic_snapshots/ | sort >> snapshot.out.txt
| 34.277778
| 98
| 0.71799
| 179
| 1,234
| 4.871508
| 0.553073
| 0.097477
| 0.052752
| 0.018349
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.040225
| 0.133712
| 1,234
| 35
| 99
| 35.257143
| 0.775491
| 0.300648
| 0
| 0
| 0
| 0
| 0.082353
| 0.032941
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.0625
| 0
| 0.0625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c3aa677e610f9e2bf81b41d5bae0ca83fbbae6f
| 3,632
|
py
|
Python
|
tools/resource_prefetch_predictor/generate_database.py
|
xzhan96/chromium.src
|
1bd0cf3997f947746c0fc5406a2466e7b5f6159e
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1
|
2021-01-07T18:51:03.000Z
|
2021-01-07T18:51:03.000Z
|
tools/resource_prefetch_predictor/generate_database.py
|
emilio/chromium.src
|
1bd0cf3997f947746c0fc5406a2466e7b5f6159e
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
tools/resource_prefetch_predictor/generate_database.py
|
emilio/chromium.src
|
1bd0cf3997f947746c0fc5406a2466e7b5f6159e
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/python
#
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Loads a set of web pages several times on a device, and extracts the
predictor database.
"""
import argparse
import logging
import os
import sys
_SRC_PATH = os.path.abspath(os.path.join(
os.path.dirname(__file__), os.pardir, os.pardir))
sys.path.append(os.path.join(_SRC_PATH, 'third_party', 'catapult', 'devil'))
from devil.android import device_utils
sys.path.append(os.path.join(_SRC_PATH, 'build', 'android'))
import devil_chromium
sys.path.append(os.path.join(_SRC_PATH, 'tools', 'android', 'loading'))
import controller
from options import OPTIONS
import page_track
_PAGE_LOAD_TIMEOUT = 20
def _CreateArgumentParser():
"""Creates and returns the argument parser."""
parser = argparse.ArgumentParser(
description=('Loads a set of web pages several times on a device, and '
'extracts the predictor database.'),
parents=[OPTIONS.GetParentParser()])
parser.add_argument('--device', help='Device ID')
parser.add_argument('--urls_filename', help='File containing a list of URLs '
'(one per line). URLs can be repeated.')
parser.add_argument('--output_filename',
help='File to store the database in.')
parser.add_argument('--url_repeat',
help=('Number of times each URL in the input '
'file is loaded.'),
default=3)
return parser
def _FindDevice(device_id):
"""Returns a device matching |device_id| or the first one if None, or None."""
devices = device_utils.DeviceUtils.HealthyDevices()
if device_id is None:
return devices[0]
matching_devices = [d for d in devices if str(d) == device_id]
if not matching_devices:
return None
return matching_devices[0]
def _Setup(device):
"""Sets up a device and returns an instance of RemoteChromeController."""
chrome_controller = controller.RemoteChromeController(device)
device.ForceStop(OPTIONS.ChromePackage().package)
chrome_controller.AddChromeArguments(
['--speculative-resource-prefetching=learning'])
chrome_controller.ResetBrowserState()
return chrome_controller
def _Go(chrome_controller, urls_filename, output_filename, repeats):
urls = []
with open(urls_filename) as f:
urls = [line.strip() for line in f.readlines()]
with chrome_controller.Open() as connection:
for repeat in range(repeats):
logging.info('Repeat #%d', repeat)
for url in urls:
logging.info('\tLoading %s', url)
page_track.PageTrack(connection) # Registers the listeners.
connection.MonitorUrl(url, timeout_seconds=_PAGE_LOAD_TIMEOUT,
stop_delay_multiplier=1.5)
device = chrome_controller.GetDevice()
device.ForceStop(OPTIONS.ChromePackage().package)
database_filename = (
'/data/user/0/%s/app_chrome/Default/Network Action Predictor' %
OPTIONS.ChromePackage().package)
device.PullFile(database_filename, output_filename)
def main():
logging.basicConfig(level=logging.INFO)
parser = _CreateArgumentParser()
args = parser.parse_args()
OPTIONS.SetParsedArgs(args)
devil_chromium.Initialize()
device = _FindDevice(args.device)
if device is None:
logging.error('Could not find device: %s.', args.device)
sys.exit(1)
chrome_controller = _Setup(device)
_Go(chrome_controller, args.urls_filename, args.output_filename,
int(args.url_repeat))
if __name__ == '__main__':
main()
| 32.141593
| 80
| 0.704295
| 464
| 3,632
| 5.344828
| 0.37931
| 0.058065
| 0.016129
| 0.018145
| 0.127419
| 0.093548
| 0.093548
| 0.093548
| 0.057258
| 0.057258
| 0
| 0.004405
| 0.1875
| 3,632
| 112
| 81
| 32.428571
| 0.835988
| 0.128855
| 0
| 0.025641
| 0
| 0
| 0.163532
| 0.027096
| 0
| 0
| 0
| 0
| 0
| 1
| 0.064103
| false
| 0
| 0.115385
| 0
| 0.24359
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c3ae120bfd666dab5412a24ae65101fc3c9e81d
| 9,947
|
py
|
Python
|
palm_wrapper/job_submission/domain.py
|
madeline-scyphers/palm
|
0ecf9eb49f66b86f284bac9506c9570159aba02b
|
[
"MIT"
] | null | null | null |
palm_wrapper/job_submission/domain.py
|
madeline-scyphers/palm
|
0ecf9eb49f66b86f284bac9506c9570159aba02b
|
[
"MIT"
] | 6
|
2021-12-07T15:59:42.000Z
|
2021-12-07T16:03:45.000Z
|
palm_wrapper/job_submission/domain.py
|
madeline-scyphers/palm
|
0ecf9eb49f66b86f284bac9506c9570159aba02b
|
[
"MIT"
] | null | null | null |
from abc import ABC, abstractmethod
from typing import Optional
from xml import dom
import numpy as np
import pandas as pd
from .utils import get_factors_rev
def calc_plot_size(domain_x, domain_y, plot_goal, house_goal):
f1 = sorted(get_factors_rev(domain_x))
f2 = sorted(get_factors_rev(domain_y))
plot_x, plot_y = None, None
for x in f1:
for y in f2:
if x * y - house_goal >= 0 and plot_goal - x * y >= 0:
if not plot_x and not plot_y:
plot_x, plot_y = x, y
if (plot_goal - x * y) < (plot_goal - plot_x * plot_y):
plot_x, plot_y = x, y
elif ((plot_goal - x * y) == (plot_goal - plot_x * plot_y)) and ((x - y) < (plot_x - plot_y)):
plot_x, plot_y = x, y
return plot_x, plot_y
def calc_plot_sizes(
domain_x, domain_y, plot_footprint, house_footprint, plot_ratio, dx, dy, full_domain, x_spread=None, y_spread=None
):
x_spread = x_spread if x_spread is not None else (-round(domain_x / 15), 0)
y_spread = (
y_spread if y_spread is not None else (-round(domain_y / 20), min(full_domain - domain_y, round(domain_y / 10)))
)
goal = plot_footprint / (dx * dy)
house_goal = house_footprint / (dx * dy)
dom_x = range(domain_x + x_spread[0], domain_x + x_spread[1] + 1)
dom_y = range(domain_y + y_spread[0], domain_y + y_spread[1] + 1)
plots = []
for d_x in dom_x:
for d_y in dom_y:
trimmed_d_y = int(d_y * plot_ratio)
plot_x, plot_y = calc_plot_size(d_x, trimmed_d_y, goal, house_goal)
if plot_x is not None and plot_y is not None:
plots.append((plot_x, plot_y, d_x, d_y, trimmed_d_y))
return plots
def get_best_plot_size(plots, plot_footprint, plot_ratio, dx, dy):
goal = plot_footprint / (dx * dy)
tmp = pd.DataFrame(plots, columns=["px", "py", "domx", "domy", "trimmed_dy"])
tmp["plt_area"] = tmp["px"] * tmp["py"]
tmp["goal_diff"] = goal - tmp.plt_area
tmp["domain_y_diff"] = tmp.domy * plot_ratio - tmp.trimmed_dy
tmp["trimmed_area"] = tmp["domx"] * tmp["trimmed_dy"]
tmp["full_domain"] = tmp["domx"] * tmp["domy"]
tmp["ratio_diff"] = abs((((tmp.trimmed_area + round(tmp.domain_y_diff * tmp.domx))) / tmp.full_domain - plot_ratio))
normalized_ratio_diff = (tmp.ratio_diff + plot_ratio) / plot_ratio
normalized_goal_diff = (tmp.goal_diff + goal) / goal
tmp["weighted_sorter"] = (tmp.px + tmp.py) ** (normalized_ratio_diff * normalized_goal_diff)
# tmp["ratio_diff"] = abs(((tmp.trimmed_area) / tmp.full_domain - plot_ratio))
tmp = tmp.sort_values(
by=["weighted_sorter", "goal_diff", "ratio_diff", "domain_y_diff", "trimmed_area"],
ascending=[True, True, True, True, False],
)
# tmp = tmp.sort_values(by=["goal_diff", "domain_y_diff", "trimmed_area"], ascending=[True, True, False])
tplot_x, tplot_y, tdomain_x, tdomain_y, trimmed_y = tmp[["px", "py", "domx", "domy", "trimmed_dy"]].iloc[0]
return tplot_x, tplot_y, tdomain_x, tdomain_y, trimmed_y
def calc_house_size(plot_x, plot_y, house_footprint, dx, dy):
goal = house_footprint / (dx * dy)
f1 = range(1, plot_x + 1)
f2 = range(1, plot_y + 1)
true_x, true_y = f1[0], f2[0]
for x in f1:
for y in f2:
padded_x, padded_y = x - 0, y - 0
nums = sorted([padded_x, padded_y])
if nums[0] * 2 < nums[1]:
continue
if abs(goal - padded_x * padded_y) < abs(goal - true_x * true_y):
true_x, true_y = padded_x, padded_y
elif (abs(goal - padded_x * padded_y) == abs(goal - true_x * true_y)) and (
abs(padded_x - padded_y) < abs(true_x - true_y)
):
true_x, true_y = padded_x, padded_y
return true_x, true_y
class BaseDomainArea(ABC):
subplot: Optional["BaseDomainArea"]
x: int
y: int
z: Optional[int]
matrix: np.ndarray
def __str__(self) -> str:
string = ""
for row in self.matrix:
string += f'{" ".join(str(int(pixel)) for pixel in row)}\n'
return string
@abstractmethod
def get_matrix(self) -> np.ndarray:
"""Get the numpy matrix representation of the domain area"""
def _validate_matrix_size(self, subplot):
for value in ["x", "y"]:
cell_val = getattr(self, value)
subplot_val = getattr(subplot, value)
if subplot_val and cell_val < subplot_val:
raise ValueError(
f"The {value} ({cell_val}) value of {self.__class__.__name__}"
f" must be larger than the house ({subplot_val}) going on it!"
)
def save_matrix(self, filename: str, matrix_name: str = None) -> None:
matrix = self.matrix if matrix_name is None else getattr(self, matrix_name)
np.savetxt(filename, matrix, delimiter=",")
class House(BaseDomainArea):
def __init__(self, x: int, y: int, z: int) -> None:
self.x = x
self.y = y
self.z = z
self.matrix = self.get_matrix()
def get_matrix(self) -> np.ndarray:
house = np.full((self.x, self.y), self.z)
return house
class Cell(BaseDomainArea):
def __init__(self, subplot: House, x: int, y: int) -> None:
self.subplot = subplot
self.x = x
self.y = y
self._validate_matrix_size(subplot=self.subplot)
self.matrix = self.get_matrix()
def get_matrix(self) -> np.ndarray:
left = (self.x - self.subplot.x) // 2
top = (self.y - self.subplot.y) // 2
plot = np.zeros((self.x, self.y), dtype=int)
plot[left : left + self.subplot.x, top : top + self.subplot.y] = self.subplot.matrix
return plot
class Domain(BaseDomainArea):
def __init__(self, subplot: Cell, tdomain_x, tdomain_y, full_x, full_y, trimmed_y, plot_ratio, stack_height) -> None:
self.subplot = subplot
self.temp_x = tdomain_x
self.temp_y = tdomain_y
self.full_x = full_x
self.full_y = full_y
self.trimmed_y = trimmed_y
self.plot_ratio = plot_ratio
self.stack_height = stack_height
# self._validate_matrix_size(subplot=self.subplot)
self.matrix, self.trees_matrix = self.get_matrix()
def print_tree_matrix(self) -> str:
string = ""
for row in self.trees_matrix:
string += f'{" ".join(str(int(pixel)) for pixel in row)}\n'
return string
def get_matrix(self) -> np.ndarray:
houses_row = np.tile(
self.subplot.matrix,
(
self.temp_x // self.subplot.x,
1,
),
)
number_of_house_rows = self.trimmed_y // self.subplot.y
number_of_full_tree_rows = self.temp_y - self.trimmed_y - 1
mixed_row_ratio = self.temp_y * self.plot_ratio - self.trimmed_y
tree_row = np.full((self.temp_x, 1), -1)
mixed_row = np.array(
[-1 if i <= mixed_row_ratio * self.temp_x else 0 for i in range(1, self.temp_x + 1)]
).reshape(self.temp_x, 1)
rows = [[houses_row.copy()] for _ in range(number_of_house_rows)]
trees = [tree_row.copy() for _ in range(number_of_full_tree_rows)]
trees.insert(number_of_house_rows // 2, mixed_row)
while trees:
for row in rows:
if not trees:
break
row.append(trees.pop())
domain_with_trees = np.concatenate([np.concatenate(row, axis=1) for row in rows], axis=1)
dwtx = domain_with_trees.shape[0]
dwty = domain_with_trees.shape[1]
xs = int(np.floor((self.full_x - dwtx) / 2)), int(np.ceil((self.full_x - dwtx) / 2))
full_domain = np.pad(domain_with_trees, (xs, (self.full_y - dwty, 0)))
mid_x = self.full_x // 2
full_domain[mid_x - 2:mid_x + 2, :1] = self.stack_height # stack for surface scalar to come out of
domain = np.where(full_domain != -1, full_domain, 0)
trees = np.where(full_domain == -1, full_domain, 0)
return domain.T, trees.T
@classmethod
def from_domain_config(cls, house, config):
cell = Cell(house, tree_domain_fraction=config["trees"]["domain_fraction"], **config["plot_size"])
x = config["domain"]["x"]
y = config["domain"]["y"]
return cls(subplot=cell, x=x, y=y)
@classmethod
def from_plot_size(cls, house, config, tplot_x, tplot_y, tdomain_x, tdomain_y, trimmed_y, plot_ratio, stack_height):
cell = Cell(house, x=tplot_x, y=tplot_y)
# x = config["domain"]["x"]
# y = config["domain"]["y"]
return cls(cell, tdomain_x, tdomain_y, config["domain"]["x"], config["domain"]["y"], trimmed_y, plot_ratio, stack_height)
def setup_domain(cfg):
domain_x, domain_y = cfg["domain"]["x"], (round(cfg["domain"]["y"] * cfg["domain"]["urban_ratio"]))
plot_footprint, plot_ratio, dx, dy = (
cfg["plot"]["plot_footprint"],
cfg["plot"]["plot_ratio"],
cfg["domain"]["dx"],
cfg["domain"]["dy"],
)
plots = calc_plot_sizes(
domain_x,
domain_y,
plot_footprint,
cfg["house"]["footprint"],
plot_ratio,
dx,
dy,
cfg["domain"]["y"],
)
tplot_x, tplot_y, tdomain_x, tdomain_y, trimmed_y = get_best_plot_size(plots, plot_footprint, plot_ratio, dx, dy)
house_x, house_y = calc_house_size(tplot_x, tplot_y, cfg["house"]["footprint"], dx, dy)
house = House(house_x, house_y, cfg["house"]["height"])
return Domain.from_plot_size(house, cfg, tplot_x, tplot_y, tdomain_x, tdomain_y, trimmed_y, plot_ratio, cfg["domain"]["stack_height"])
if __name__ == "__main__":
from .load_wrapper_config import get_wrapper_config
config = get_wrapper_config()
domain = setup_domain(config)
domain
| 38.405405
| 138
| 0.6037
| 1,477
| 9,947
| 3.787407
| 0.118483
| 0.023775
| 0.017698
| 0.019664
| 0.400965
| 0.295316
| 0.263497
| 0.207186
| 0.188059
| 0.158384
| 0
| 0.008761
| 0.265608
| 9,947
| 258
| 139
| 38.554264
| 0.757016
| 0.037901
| 0
| 0.158654
| 0
| 0
| 0.067162
| 0.007218
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086538
| false
| 0
| 0.033654
| 0
| 0.221154
| 0.067308
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c3c80f9f134a4c7e10b07a2a070fce993cd44e3
| 373
|
py
|
Python
|
zad5.py
|
Alba126/Laba21
|
ce5735ca223d92287efa64bc3347f4356234b399
|
[
"MIT"
] | null | null | null |
zad5.py
|
Alba126/Laba21
|
ce5735ca223d92287efa64bc3347f4356234b399
|
[
"MIT"
] | null | null | null |
zad5.py
|
Alba126/Laba21
|
ce5735ca223d92287efa64bc3347f4356234b399
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- config: utf-8 -*-
from tkinter import *
from random import random
def on_click():
x = random()
y = random()
bt1.place(relx=x, rely=y)
root = Tk()
root['bg'] = 'white'
root.title('crown')
img = PhotoImage(file='crown.png')
bt1 = Button(image=img, command=on_click)
bt1.place(relx=0.5, rely=0.5, anchor=CENTER)
root.mainloop()
| 16.217391
| 44
| 0.643432
| 59
| 373
| 4.033898
| 0.644068
| 0.058824
| 0.10084
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.028939
| 0.16622
| 373
| 22
| 45
| 16.954545
| 0.736334
| 0.115282
| 0
| 0
| 0
| 0
| 0.064024
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.153846
| 0
| 0.230769
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c3e76ea8723f85b50595507d895179df16ec7b9
| 341
|
py
|
Python
|
tests/importer/utils/test_utils.py
|
HumanCellAtlas/ingest-common
|
6a230f9606f64cd787b67c143854db36e012a2b7
|
[
"Apache-2.0"
] | null | null | null |
tests/importer/utils/test_utils.py
|
HumanCellAtlas/ingest-common
|
6a230f9606f64cd787b67c143854db36e012a2b7
|
[
"Apache-2.0"
] | null | null | null |
tests/importer/utils/test_utils.py
|
HumanCellAtlas/ingest-common
|
6a230f9606f64cd787b67c143854db36e012a2b7
|
[
"Apache-2.0"
] | null | null | null |
from openpyxl import Workbook
def create_test_workbook(*worksheet_titles, include_default_sheet=False):
workbook = Workbook()
for title in worksheet_titles:
workbook.create_sheet(title)
if not include_default_sheet:
default_sheet = workbook['Sheet']
workbook.remove(default_sheet)
return workbook
| 24.357143
| 73
| 0.73607
| 40
| 341
| 6
| 0.5
| 0.2
| 0.158333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.199413
| 341
| 13
| 74
| 26.230769
| 0.879121
| 0
| 0
| 0
| 0
| 0
| 0.014663
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.111111
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c3ee51c4543a5b2653184ca78a98a29af6b98cb
| 2,114
|
py
|
Python
|
test/test_import_stats.py
|
WBobby/pytorch
|
655960460ccca936fa5c06df6bbafd25b5582115
|
[
"Intel"
] | 24
|
2020-11-02T21:25:12.000Z
|
2022-03-17T07:20:33.000Z
|
test/test_import_stats.py
|
WBobby/pytorch
|
655960460ccca936fa5c06df6bbafd25b5582115
|
[
"Intel"
] | 1
|
2019-08-01T00:17:43.000Z
|
2019-09-12T01:31:53.000Z
|
test/test_import_stats.py
|
WBobby/pytorch
|
655960460ccca936fa5c06df6bbafd25b5582115
|
[
"Intel"
] | 12
|
2020-11-06T05:00:37.000Z
|
2022-01-30T19:17:36.000Z
|
import subprocess
import sys
import unittest
import pathlib
from torch.testing._internal.common_utils import TestCase, run_tests, IS_LINUX, IS_IN_CI
REPO_ROOT = pathlib.Path(__file__).resolve().parent.parent
try:
# Just in case PyTorch was not built in 'develop' mode
sys.path.append(str(REPO_ROOT))
from tools.stats.scribe import rds_write, register_rds_schema
except ImportError:
register_rds_schema = None
rds_write = None
# these tests could eventually be changed to fail if the import/init
# time is greater than a certain threshold, but for now we just use them
# as a way to track the duration of `import torch` in our ossci-metrics
# S3 bucket (see tools/stats/print_test_stats.py)
class TestImportTime(TestCase):
def test_time_import_torch(self):
TestCase.runWithPytorchAPIUsageStderr("import torch")
def test_time_cuda_device_count(self):
TestCase.runWithPytorchAPIUsageStderr(
"import torch; torch.cuda.device_count()",
)
@unittest.skipIf(not IS_LINUX, "Memory test is only implemented for Linux")
@unittest.skipIf(not IS_IN_CI, "Memory test only runs in CI")
def test_peak_memory(self):
def profile(module, name):
command = f"import {module}; import resource; print(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss)"
result = subprocess.run(
[sys.executable, "-c", command],
stdout=subprocess.PIPE,
)
max_rss = int(result.stdout.decode().strip())
return {
"test_name": name,
"peak_memory_bytes": max_rss,
}
data = profile("torch", "pytorch")
baseline = profile("sys", "baseline")
rds_write(
"import_stats", [data, baseline]
)
if __name__ == "__main__":
if register_rds_schema and IS_IN_CI:
register_rds_schema(
"import_stats",
{
"test_name": "string",
"peak_memory_bytes": "int",
"time_ms": "int",
},
)
run_tests()
| 31.088235
| 116
| 0.637181
| 259
| 2,114
| 4.96139
| 0.474903
| 0.012451
| 0.052918
| 0.071595
| 0.079377
| 0
| 0
| 0
| 0
| 0
| 0
| 0.000647
| 0.268685
| 2,114
| 67
| 117
| 31.552239
| 0.83053
| 0.145695
| 0
| 0
| 0
| 0
| 0.187778
| 0.045556
| 0
| 0
| 0
| 0
| 0
| 1
| 0.081633
| false
| 0
| 0.285714
| 0
| 0.408163
| 0.020408
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c3ffd59fa98b323892e6f69d6dc5851e106b046
| 1,365
|
py
|
Python
|
post_office/validators.py
|
fasih/django-post_office
|
e4086527a48bc0d1e5b8e0dfe9c27ab3a6260224
|
[
"MIT"
] | 661
|
2015-01-07T09:35:14.000Z
|
2022-03-24T11:45:33.000Z
|
post_office/validators.py
|
fasih/django-post_office
|
e4086527a48bc0d1e5b8e0dfe9c27ab3a6260224
|
[
"MIT"
] | 267
|
2015-01-10T22:45:08.000Z
|
2022-03-31T11:49:52.000Z
|
post_office/validators.py
|
fasih/django-post_office
|
e4086527a48bc0d1e5b8e0dfe9c27ab3a6260224
|
[
"MIT"
] | 238
|
2015-01-10T22:53:39.000Z
|
2022-03-24T12:56:16.000Z
|
from django.core.exceptions import ValidationError
from django.core.validators import validate_email
from django.template import Template, TemplateSyntaxError, TemplateDoesNotExist
from django.utils.encoding import force_str
def validate_email_with_name(value):
"""
Validate email address.
Both "Recipient Name <email@example.com>" and "email@example.com" are valid.
"""
value = force_str(value)
recipient = value
if '<' in value and '>' in value:
start = value.find('<') + 1
end = value.find('>')
if start < end:
recipient = value[start:end]
validate_email(recipient)
def validate_comma_separated_emails(value):
"""
Validate every email address in a comma separated list of emails.
"""
if not isinstance(value, (tuple, list)):
raise ValidationError('Email list must be a list/tuple.')
for email in value:
try:
validate_email_with_name(email)
except ValidationError:
raise ValidationError('Invalid email: %s' % email, code='invalid')
def validate_template_syntax(source):
"""
Basic Django Template syntax validation. This allows for robuster template
authoring.
"""
try:
Template(source)
except (TemplateSyntaxError, TemplateDoesNotExist) as err:
raise ValidationError(str(err))
| 28.4375
| 80
| 0.677656
| 157
| 1,365
| 5.796178
| 0.401274
| 0.071429
| 0.030769
| 0.046154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.000952
| 0.230769
| 1,365
| 47
| 81
| 29.042553
| 0.865714
| 0.185348
| 0
| 0.076923
| 0
| 0
| 0.056711
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.115385
| false
| 0
| 0.153846
| 0
| 0.269231
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c429be32392440a110878d04d24fb43356f3b77
| 1,144
|
py
|
Python
|
paperhub/input.py
|
GiuseppeBaldini/PaperHub
|
5efdee1a0374c995a6717a4baee2106df808af12
|
[
"MIT"
] | null | null | null |
paperhub/input.py
|
GiuseppeBaldini/PaperHub
|
5efdee1a0374c995a6717a4baee2106df808af12
|
[
"MIT"
] | 1
|
2020-03-27T12:05:14.000Z
|
2020-03-28T01:10:20.000Z
|
paperhub/input.py
|
GiuseppeBaldini/PaperHub
|
5efdee1a0374c995a6717a4baee2106df808af12
|
[
"MIT"
] | null | null | null |
# Input DOI / URL
import re
import sys
# Pyperclip is not built-in, check and download if needed
try:
import pyperclip
except (ImportError, ModuleNotFoundError):
print('Pyperclip module not found. Please download it.')
sys.exit(0)
# Regex for links
link_regex = re.compile(r'''(
http[s]?://
(?:[a-zA-Z]|
[0-9]|
[$-_@.&+]|
[!*\(\),]|
(?:%[0-9a-fA-F][0-9a-fA-F]))+
)''', re.IGNORECASE | re.VERBOSE)
# Get DOI / URL using different methods
# Method 1: argument
try:
input_link = sys.argv[1]
# Method 2: clipboard
except IndexError:
input_link = pyperclip.paste()
# Method 3: manual input
def regex_check(regex, link):
"""
Check using regex. If DOI/URL are not in the right format,
require manual input until correct or Enter to quit.
"""
while True:
match = re.match(regex, link)
if match == None:
link = str(input('''Enter valid DOI / URL or press Enter to quit: > '''))
if link == '':
exit()
else:
continue
else:
return link
url = regex_check(link_regex, input_link)
| 23.346939
| 85
| 0.581294
| 152
| 1,144
| 4.322368
| 0.526316
| 0.03653
| 0.015221
| 0.018265
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013366
| 0.280594
| 1,144
| 49
| 86
| 23.346939
| 0.784933
| 0.262238
| 0
| 0.129032
| 0
| 0
| 0.25641
| 0.035409
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032258
| false
| 0
| 0.129032
| 0
| 0.193548
| 0.032258
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c42cb2bbf7ba2f9f5bbb8435dcd766270fb6340
| 6,338
|
py
|
Python
|
main.py
|
chillum1718/EffcientNetV2
|
4338652454185db648a6ea5df04528bcafb24ed2
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
chillum1718/EffcientNetV2
|
4338652454185db648a6ea5df04528bcafb24ed2
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
chillum1718/EffcientNetV2
|
4338652454185db648a6ea5df04528bcafb24ed2
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import csv
import os
import torch
import tqdm
from torch import distributed
from torch.utils import data
from torchvision import datasets
from torchvision import transforms
from nets import nn
from utils import util
data_dir = os.path.join('..', 'Dataset', 'IMAGENET')
def batch(images, target, model, criterion=None):
images = images.cuda()
target = target.cuda()
if criterion:
with torch.cuda.amp.autocast():
loss = criterion(model(images), target)
return loss
else:
return util.accuracy(model(images), target, top_k=(1, 5))
def train(args):
epochs = 350
batch_size = 288
util.set_seeds(args.rank)
model = nn.EfficientNet().cuda()
lr = batch_size * torch.cuda.device_count() * 0.256 / 4096
optimizer = nn.RMSprop(util.add_weight_decay(model), lr, 0.9, 1e-3, momentum=0.9)
ema = nn.EMA(model)
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank])
else:
model = torch.nn.DataParallel(model)
criterion = nn.CrossEntropyLoss().cuda()
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
dataset = datasets.ImageFolder(os.path.join(data_dir, 'train'),
transforms.Compose([util.RandomResize(),
transforms.ColorJitter(0.4, 0.4, 0.4),
transforms.RandomHorizontalFlip(),
util.RandomAugment(),
transforms.ToTensor(), normalize]))
if args.distributed:
sampler = torch.utils.data.distributed.DistributedSampler(dataset)
else:
sampler = None
loader = data.DataLoader(dataset, batch_size, sampler=sampler, num_workers=8, pin_memory=True)
scheduler = nn.StepLR(optimizer)
amp_scale = torch.cuda.amp.GradScaler()
with open(f'weights/{scheduler.__str__()}.csv', 'w') as f:
if args.local_rank == 0:
writer = csv.DictWriter(f, fieldnames=['epoch', 'acc@1', 'acc@5'])
writer.writeheader()
best_acc1 = 0
for epoch in range(0, epochs):
if args.distributed:
sampler.set_epoch(epoch)
if args.local_rank == 0:
print(('\n' + '%10s' * 2) % ('epoch', 'loss'))
bar = tqdm.tqdm(loader, total=len(loader))
else:
bar = loader
model.train()
for images, target in bar:
loss = batch(images, target, model, criterion)
optimizer.zero_grad()
amp_scale.scale(loss).backward()
amp_scale.step(optimizer)
amp_scale.update()
ema.update(model)
torch.cuda.synchronize()
if args.local_rank == 0:
bar.set_description(('%10s' + '%10.4g') % ('%g/%g' % (epoch + 1, epochs), loss))
scheduler.step(epoch + 1)
if args.local_rank == 0:
acc1, acc5 = test(ema.model.eval())
writer.writerow({'acc@1': str(f'{acc1:.3f}'),
'acc@5': str(f'{acc5:.3f}'),
'epoch': str(epoch + 1).zfill(3)})
util.save_checkpoint({'state_dict': ema.model.state_dict()}, acc1 > best_acc1)
best_acc1 = max(acc1, best_acc1)
if args.distributed:
torch.distributed.destroy_process_group()
torch.cuda.empty_cache()
def test(model=None):
if model is None:
model = nn.EfficientNet()
model.load_state_dict(torch.load('weights/best.pt', 'cpu')['state_dict'])
model = model.cuda()
model.eval()
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
dataset = datasets.ImageFolder(os.path.join(data_dir, 'val'),
transforms.Compose([transforms.Resize(416),
transforms.CenterCrop(384),
transforms.ToTensor(), normalize]))
loader = data.DataLoader(dataset, 48, num_workers=os.cpu_count(), pin_memory=True)
top1 = util.AverageMeter()
top5 = util.AverageMeter()
with torch.no_grad():
for images, target in tqdm.tqdm(loader, ('%10s' * 2) % ('acc@1', 'acc@5')):
acc1, acc5 = batch(images, target, model)
torch.cuda.synchronize()
top1.update(acc1.item(), images.size(0))
top5.update(acc5.item(), images.size(0))
acc1, acc5 = top1.avg, top5.avg
print('%10.3g' * 2 % (acc1, acc5))
if model is None:
torch.cuda.empty_cache()
else:
return acc1, acc5
def print_parameters():
model = nn.EfficientNet().eval()
_ = model(torch.zeros(1, 3, 224, 224))
params = sum(p.numel() for p in model.parameters())
print(f'Number of parameters: {int(params)}')
def benchmark():
shape = (1, 3, 384, 384)
util.torch2onnx(nn.EfficientNet().export().eval(), shape)
util.onnx2caffe()
util.print_benchmark(shape)
def main():
# python -m torch.distributed.launch --nproc_per_node=3 main.py --train
parser = argparse.ArgumentParser()
parser.add_argument("--local_rank", default=0, type=int)
parser.add_argument('--benchmark', action='store_true')
parser.add_argument('--train', action='store_true')
parser.add_argument('--test', action='store_true')
args = parser.parse_args()
args.distributed = False
args.rank = 0
if 'WORLD_SIZE' in os.environ:
args.distributed = int(os.environ['WORLD_SIZE']) > 1
if args.distributed:
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
args.rank = torch.distributed.get_rank()
if args.local_rank == 0:
if not os.path.exists('weights'):
os.makedirs('weights')
if args.local_rank == 0:
print_parameters()
if args.benchmark:
benchmark()
if args.train:
train(args)
if args.test:
test()
if __name__ == '__main__':
main()
| 36.011364
| 100
| 0.570369
| 746
| 6,338
| 4.737265
| 0.286863
| 0.023769
| 0.029428
| 0.025467
| 0.123373
| 0.087719
| 0.057725
| 0.057725
| 0.057725
| 0.057725
| 0
| 0.038065
| 0.295361
| 6,338
| 175
| 101
| 36.217143
| 0.753247
| 0.010887
| 0
| 0.176871
| 0
| 0
| 0.055848
| 0.005266
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040816
| false
| 0
| 0.07483
| 0
| 0.136054
| 0.040816
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c42d1030d5bf12bec44656b0c6d8328e6f4647e
| 2,897
|
py
|
Python
|
cgbind/esp.py
|
duartegroup/cgbind
|
8c2369d4c49e8b008fc3951719d99e0c4f6b6b16
|
[
"MIT"
] | 7
|
2020-06-08T16:18:56.000Z
|
2021-01-28T09:59:16.000Z
|
cgbind/esp.py
|
duartegroup/cgbind
|
8c2369d4c49e8b008fc3951719d99e0c4f6b6b16
|
[
"MIT"
] | null | null | null |
cgbind/esp.py
|
duartegroup/cgbind
|
8c2369d4c49e8b008fc3951719d99e0c4f6b6b16
|
[
"MIT"
] | 2
|
2020-11-16T04:52:43.000Z
|
2021-06-04T05:07:29.000Z
|
import numpy as np
from time import time
from cgbind.atoms import get_atomic_number
from cgbind.log import logger
from cgbind.constants import Constants
from cgbind.exceptions import CgbindCritical
def get_esp_cube_lines(charges, atoms):
"""
From a list of charges and a set of xyzs create the electrostatic potential
map grid-ed uniformly between the most negative x, y, z values -5 Å
and the largest x, y, z +5 Å
:param charges: (list(float))
:param atoms: (list(autode.atoms.Atom))
:return: (list(str)), (min ESP value, max ESP value)
"""
logger.info('Calculating the ESP and generating a .cube file')
start_time = time()
try:
from esp_gen import get_cube_lines
except ModuleNotFoundError:
raise CgbindCritical('esp_gen not available. cgbind must be '
'installed with the --esp_gen flag')
if charges is None:
logger.error('Could not generate an .cube file, charges were None')
return [], (None, None)
coords = np.array([atom.coord for atom in atoms])
charges = np.array(charges)
# Get the max and min points from the coordinates
max_cart_values = np.max(coords, axis=0)
min_cat_values = np.min(coords, axis=0)
# The grid needs to be slightly larger than the smallest/largest Cartesian
# coordinate
# NOTE: All distances from here are in Bohr (a0) i.e. atomic units
min_carts = Constants.ang2a0 * (min_cat_values - 5 * np.ones(3))
max_carts = Constants.ang2a0 * (max_cart_values + 5 * np.ones(3))
coords = np.array([Constants.ang2a0 * np.array(coord) for coord in coords])
# Number of voxels will be nx * ny * nz
nx, ny, nz = 50, 50, 50
vox_size = max_carts - min_carts
rx, ry, rz = vox_size[0] / nx, vox_size[1] / ny, vox_size[2] / nz
# Write the .cube file lines
cube_file_lines = ['Generated by cgbind\n', 'ESP\n']
n_atoms = len(coords)
min_x, min_y, min_z = min_carts
cube_file_lines.append(f'{n_atoms:>5d}{min_x:>12f}{min_y:>12f}{min_z:>12f}\n') # n_atoms origin(x y z)
cube_file_lines.append(f'{nx:>5d}{rx:>12f}{0.0:>12f}{0.0:>12f}\n') # Number of voxels and their size
cube_file_lines.append(f'{ny:>5d}{0.0:>12f}{ry:>12f}{0.0:>12f}\n')
cube_file_lines.append(f'{nz:>5d}{0.0:>12f}{0.0:>12f}{rz:>12f}\n')
for atom in atoms:
x, y, z = atom.coord
cube_file_lines.append(f'{get_atomic_number(atom):>5d}{0.0:>12f}'
f'{Constants.ang2a0*x:>12f}{Constants.ang2a0*y:>12f}{Constants.ang2a0*z:>12f}\n')
# Looping over x, y, z is slow in python so use Cython extension
cube_val_lines, min_val, max_val = get_cube_lines(nx, ny, nz, coords, min_carts, charges, vox_size)
cube_file_lines += cube_val_lines
logger.info(f'ESP generated in {time()-start_time:.3f} s')
return cube_file_lines, (min_val, max_val)
| 38.118421
| 112
| 0.661374
| 478
| 2,897
| 3.866109
| 0.307531
| 0.047619
| 0.063312
| 0.051407
| 0.103896
| 0.010823
| 0
| 0
| 0
| 0
| 0
| 0.036076
| 0.215395
| 2,897
| 75
| 113
| 38.626667
| 0.776947
| 0.233345
| 0
| 0
| 0
| 0.095238
| 0.23932
| 0.14102
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02381
| false
| 0
| 0.166667
| 0
| 0.238095
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c43dad16fef03fbc908a7aa39b6c4226fc2883c
| 6,051
|
py
|
Python
|
codes/test_specular.py
|
mcdenoising/AdvMCDenoise
|
4ba00098c2d0f50a7dfc1e345b5e50a20768d7e8
|
[
"MIT"
] | 35
|
2019-11-04T06:49:39.000Z
|
2022-01-13T07:53:37.000Z
|
codes/test_specular.py
|
qbhan/Adversarial_MCdenoising
|
a99bf312baf2430d750d70a79270aca0720532aa
|
[
"MIT"
] | 1
|
2019-11-28T22:33:11.000Z
|
2019-11-28T22:33:11.000Z
|
codes/test_specular.py
|
qbhan/Adversarial_MCdenoising
|
a99bf312baf2430d750d70a79270aca0720532aa
|
[
"MIT"
] | 8
|
2019-11-08T04:58:08.000Z
|
2020-11-03T07:49:58.000Z
|
import os
import sys
import logging
import time
import argparse
import numpy as np
from collections import OrderedDict
import scripts.options as option
import utils.util as util
from data.util import bgr2ycbcr
from data import create_dataset, create_dataloader
from models import create_model
# options
parser = argparse.ArgumentParser()
parser.add_argument('-opt', type=str, required=True, help='Path to options JSON file.')
opt = option.parse(parser.parse_args().opt, is_train=False)
util.mkdirs((path for key, path in opt['path'].items() if not key == 'pretrain_model_G'))
opt = option.dict_to_nonedict(opt)
util.setup_logger(None, opt['path']['log'], 'test.log', level=logging.INFO, screen=True)
logger = logging.getLogger('base')
logger.info(option.dict2str(opt))
# Create test dataset and dataloader
test_loaders = []
for phase, dataset_opt in sorted(opt['datasets'].items()):
test_set = create_dataset(dataset_opt)
test_loader = create_dataloader(test_set, dataset_opt)
logger.info('Number of test images in [{:s}]: {:d}'.format(dataset_opt['name'], len(test_set)))
test_loaders.append(test_loader)
# Create model
model = create_model(opt)
for test_loader in test_loaders:
test_set_name = test_loader.dataset.opt['name']
logger.info('\nTesting [{:s}]...'.format(test_set_name))
test_start_time = time.time()
dataset_dir = os.path.join(opt['path']['results_root'], test_set_name)
util.mkdir(dataset_dir)
test_results = OrderedDict()
test_results['psnr'] = []
test_results['ssim'] = []
test_results['psnr_y'] = []
test_results['ssim_y'] = []
for data in test_loader:
need_GT = False if test_loader.dataset.opt['dataroot_GT'] is None else True
# need_GT = True
model.feed_data_specular(data, need_GT=need_GT)
if opt["image_type"] == "exr":
y = data["x_offset"]
x = data["y_offset"]
img_path = data['NOISY_path'][0]
img_name = os.path.splitext(os.path.basename(img_path))[0]
start = time.time()
model.test() # test
end = time.time()
print("Time elapsed... %f "%(end - start))
visuals = model.get_current_visuals(need_GT=need_GT)
denoised_img = util.tensor2img(visuals['DENOISED']) # uint8
noisy_img = util.tensor2img(visuals['NOISY'])
gt_img = util.tensor2img(visuals['GT']) # uint8
# save images
suffix = opt['suffix']
if suffix ==None:
suffix = ""
save_DENOISED_img_path = os.path.join(dataset_dir, img_name + suffix + '_1denoised.png')
save_NOISY_img_path = os.path.join(dataset_dir, img_name + suffix + '_0noisy.png')
save_GT_img_path = os.path.join(dataset_dir, img_name + suffix + '_2gt.png')
# calculate PSNR and SSIM
if need_GT:
# gt_img = util.tensor2img(visuals['GT'])
gt_img = gt_img / 255.
denoised_img = denoised_img / 255.
crop_border = test_loader.dataset.opt['scale']
cropped_denoised_img = denoised_img#[crop_border:-crop_border, crop_border:-crop_border, :]
cropped_gt_img = gt_img#[crop_border:-crop_border, crop_border:-crop_border, :]
psnr = util.calculate_psnr(cropped_denoised_img * 255, cropped_gt_img * 255)
ssim = util.calculate_ssim(cropped_denoised_img * 255, cropped_gt_img * 255)
test_results['psnr'].append(psnr)
test_results['ssim'].append(ssim)
if gt_img.shape[2] == 3: # RGB image
denoised_img_y = bgr2ycbcr(denoised_img, only_y=True)
gt_img_y = bgr2ycbcr(gt_img, only_y=True)
cropped_denoised_img_y = denoised_img_y[crop_border:-crop_border, crop_border:-crop_border]
cropped_gt_img_y = gt_img_y[crop_border:-crop_border, crop_border:-crop_border]
psnr_y = util.calculate_psnr(cropped_denoised_img_y * 255, cropped_gt_img_y * 255)
ssim_y = util.calculate_ssim(cropped_denoised_img_y * 255, cropped_gt_img_y * 255)
test_results['psnr_y'].append(psnr_y)
test_results['ssim_y'].append(ssim_y)
logger.info('{:20s} - PSNR: {:.6f} dB; SSIM: {:.6f}; PSNR_Y: {:.6f} dB; SSIM_Y: {:.6f}.'\
.format(img_name, psnr, ssim, psnr_y, ssim_y))
else:
logger.info('{:20s} - PSNR: {:.6f} dB; SSIM: {:.6f}.'.format(img_name, psnr, ssim))
else:
logger.info(img_name)
if opt["image_type"] == "exr":
denoised_exr = util.tensor2exr(visuals['DENOISED']) # uint8
noisy_exr = util.tensor2exr(visuals['NOISY'])
gt_exr = util.tensor2exr(visuals['GT']) # uint8
save_DENOISED_img_path = os.path.join(dataset_dir, img_name + suffix + '_1denoised.exr')
save_NOISY_img_path = os.path.join(dataset_dir, img_name + suffix + '_0noisy.exr')
save_GT_img_path = os.path.join(dataset_dir, img_name + suffix + '_2gt.exr')
util.saveEXRfromMatrix(save_DENOISED_img_path, denoised_exr, (x, y))
util.saveEXRfromMatrix(save_NOISY_img_path, noisy_exr, (x, y))
util.saveEXRfromMatrix(save_GT_img_path, gt_exr, (x, y))
if need_GT: # metrics
# Average PSNR/SSIM results
ave_psnr = sum(test_results['psnr']) / len(test_results['psnr'])
ave_ssim = sum(test_results['ssim']) / len(test_results['ssim'])
logger.info('----Average PSNR/SSIM results for {}----\n\tPSNR: {:.6f} dB; SSIM: {:.6f}\n'\
.format(test_set_name, ave_psnr, ave_ssim))
# if test_results['psnr_y'] and test_results['ssim_y']:
# ave_psnr_y = sum(test_results['psnr_y']) / len(test_results['psnr_y'])
# ave_ssim_y = sum(test_results['ssim_y']) / len(test_results['ssim_y'])
# logger.info('----Y channel, average PSNR/SSIM----\n\tPSNR_Y: {:.6f} dB; SSIM_Y: {:.6f}\n'\
# .format(ave_psnr_y, ave_ssim_y))
| 44.822222
| 107
| 0.637085
| 830
| 6,051
| 4.350602
| 0.174699
| 0.057879
| 0.046525
| 0.066464
| 0.301855
| 0.28524
| 0.204376
| 0.204376
| 0.169482
| 0.154528
| 0
| 0.014781
| 0.228557
| 6,051
| 134
| 108
| 45.156716
| 0.758783
| 0.11056
| 0
| 0.06
| 0
| 0.02
| 0.110095
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.12
| 0
| 0.12
| 0.01
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c4403124da36e660f5e49831ef1324004e35d3f
| 5,403
|
py
|
Python
|
neuralNetwork/layer3/nerualNet.py
|
zzw0929/deeplearning
|
d96aadd71838fa60a4c031b13fe475d4839e8a33
|
[
"Apache-2.0"
] | 4
|
2017-09-04T07:54:33.000Z
|
2017-09-04T16:55:04.000Z
|
neuralNetwork/layer3/nerualNet.py
|
zzw0929/deeplearning
|
d96aadd71838fa60a4c031b13fe475d4839e8a33
|
[
"Apache-2.0"
] | null | null | null |
neuralNetwork/layer3/nerualNet.py
|
zzw0929/deeplearning
|
d96aadd71838fa60a4c031b13fe475d4839e8a33
|
[
"Apache-2.0"
] | null | null | null |
# coding:utf-8
import time
import matplotlib.pyplot as plt
import numpy as np
import sklearn
import sklearn.datasets
import sklearn.linear_model
import matplotlib
matplotlib.rcParams['figure.figsize'] = (10.0, 8.0)
np.random.seed(0)
X, y = sklearn.datasets.make_moons(200, noise=0.20)
plt.scatter(X[:,0], X[:,1], s=40, c=y, cmap=plt.cm.Spectral)
# plt.show()
clf = sklearn.linear_model.LogisticRegressionCV()
clf.fit(X, y)
# Helper function to plot a decision boundary.
# If you don't fully understand this function don't worry, it just generates
# the contour plot below.
def plot_decision_boundary(pred_func):
# Set min and max values and give it some padding
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
h = 0.01
# Generate a grid of points with distance h between them
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max,
h))
# Predict the function value for the whole gid
Z = pred_func(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# Plot the contour and training examples
plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Spectral)
plot_decision_boundary(lambda x: clf.predict(x))
plt.title("Logistic Regression")
#plt.show()
num_examples = len(X) # training set size
nn_input_dim = 2 # input layer dimensionality
nn_output_dim = 2 # output layer dimensionality
# Gradient descent parameters (I picked these by hand)
epsilon = 0.01 # learning rate for gradient descent
reg_lambda = 0.01 # regularization strength
# Helper function to evaluate the total loss on the dataset
def calculate_loss(model):
W1, b1, W2, b2 = model['W1'], model['b1'], model['W2'], model['b2']
# Forward propagation to calculate our predictions
z1 = X.dot(W1) + b1
a1 = np.tanh(z1)
z2 = a1.dot(W2) + b2
exp_scores = np.exp(z2)
probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True)
# Calculating the loss
#print(11111111)
#print(probs)
#time.sleep(10)
corect_logprobs = -np.log(probs[range(num_examples), y])
data_loss = np.sum(corect_logprobs)
# Add regulatization term to loss (optional)
# L2 regulatization
data_loss += reg_lambda/2 * (np.sum(np.square(W1)) + np.sum(np.square(W2)))
return 1./num_examples * data_loss
def predict(model, x):
W1, b1, W2, b2 = model['W1'], model['b1'], model['W2'], model['b2']
# Forward propagation
z1 = x.dot(W1) + b1
a1 = np.tanh(z1)
z2 = a1.dot(W2) + b2
exp_scores = np.exp(z2)
probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True)
return np.argmax(probs, axis=1)
# This function learns parameters for the neural network and returns the model.
# - nn_hdim: Number of nodes in the hidden layer
# - num_passes: Number of passes through the training data for gradient descent
# - print_loss: If True, print the loss every 1000 iterations
def build_model(nn_hdim, num_passes=20000, print_loss=False):
# Initialize the parameters to random values. We need to learn these.
np.random.seed(0)
W1 = np.random.randn(nn_input_dim, nn_hdim) / np.sqrt(nn_input_dim)
b1 = np.zeros((1, nn_hdim))
W2 = np.random.randn(nn_hdim, nn_output_dim) / np.sqrt(nn_hdim)
b2 = np.zeros((1, nn_output_dim))
# This is what we return at the end
model = {}
# Gradient descent. For each batch...
for i in range(0, num_passes):
# Forward propagation
z1 = X.dot(W1) + b1
a1 = np.tanh(z1)
z2 = a1.dot(W2) + b2
exp_scores = np.exp(z2)
probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True)
# Backpropagation
delta3 = probs
delta3[range(num_examples), y] -= 1
dW2 = (a1.T).dot(delta3)
db2 = np.sum(delta3, axis=0, keepdims=True)
delta2 = delta3.dot(W2.T) * (1 - np.power(a1, 2))
dW1 = np.dot(X.T, delta2)
db1 = np.sum(delta2, axis=0)
# Add regularization terms (b1 and b2 don't have regularization terms)
dW2 += reg_lambda * W2
dW1 += reg_lambda * W1
# Gradient descent parameter update
W1 += -epsilon * dW1
b1 += -epsilon * db1
W2 += -epsilon * dW2
b2 += -epsilon * db2
# Assign new parameters to the model
model = { 'W1': W1, 'b1': b1, 'W2': W2, 'b2': b2}
# Optionally print the loss.
# This is expensive because it uses the whole dataset, so we don't want to do it too often.
if print_loss and i % 1000 == 0:
print("Loss after iteration %i: %f" %(i, calculate_loss(model)))
return model
def test_1():
# Build a model with a 3-dimensional hidden layer
model = build_model(3, print_loss=True)
# Plot the decision boundary
plot_decision_boundary(lambda x: predict(model, x))
plt.title("Decision Boundary for hidden layer size 3")
plt.show()
def test_2():
plt.figure(figsize=(16, 32))
hidden_layer_dimensions = [1, 2, 3, 4, 5, 20, 50]
for i, nn_hdim in enumerate(hidden_layer_dimensions):
plt.subplot(5, 2, i+1)
plt.title('Hidden Layer size %d' % nn_hdim)
model = build_model(nn_hdim)
plot_decision_boundary(lambda x: predict(model, x))
plt.show()
if __name__ == '__main__':
#print(y)
#print(12121)
#print(X)
test_1()
| 34.634615
| 99
| 0.644827
| 851
| 5,403
| 3.985899
| 0.283196
| 0.02388
| 0.019458
| 0.015035
| 0.177182
| 0.163325
| 0.143868
| 0.143868
| 0.143868
| 0.118514
| 0
| 0.046478
| 0.227466
| 5,403
| 155
| 100
| 34.858065
| 0.766172
| 0.295021
| 0
| 0.231579
| 0
| 0
| 0.040648
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.063158
| false
| 0.021053
| 0.073684
| 0
| 0.168421
| 0.042105
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c4523a703ff0d45d61f298f70ea4dd4f700b946
| 1,188
|
py
|
Python
|
tljh_repo2docker/tests/utils.py
|
TimoRoth/tljh-repo2docker
|
35e7e940266de0490990acc780b64802afe973c1
|
[
"BSD-3-Clause"
] | 46
|
2020-05-04T19:32:39.000Z
|
2022-03-25T13:47:41.000Z
|
tljh_repo2docker/tests/utils.py
|
TimoRoth/tljh-repo2docker
|
35e7e940266de0490990acc780b64802afe973c1
|
[
"BSD-3-Clause"
] | 41
|
2020-04-29T09:58:34.000Z
|
2022-03-15T21:44:15.000Z
|
tljh_repo2docker/tests/utils.py
|
TimoRoth/tljh-repo2docker
|
35e7e940266de0490990acc780b64802afe973c1
|
[
"BSD-3-Clause"
] | 9
|
2020-04-29T08:42:12.000Z
|
2021-11-04T04:01:35.000Z
|
import asyncio
import json
from aiodocker import Docker, DockerError
from jupyterhub.tests.utils import api_request
async def add_environment(
app, *, repo, ref="master", name="", memory="", cpu=""
):
"""Use the POST endpoint to add a new environment"""
r = await api_request(
app,
"environments",
method="post",
data=json.dumps(
{"repo": repo, "ref": ref, "name": name, "memory": memory, "cpu": cpu,}
),
)
return r
async def wait_for_image(*, image_name):
"""wait until an image is built"""
count, retries = 0, 60 * 10
image = None
async with Docker() as docker:
while count < retries:
await asyncio.sleep(1)
try:
image = await docker.images.inspect(image_name)
except DockerError:
count += 1
continue
else:
break
return image
async def remove_environment(app, *, image_name):
"""Use the DELETE endpoint to remove an environment"""
r = await api_request(
app, "environments", method="delete", data=json.dumps({"name": image_name,}),
)
return r
| 25.826087
| 85
| 0.574074
| 139
| 1,188
| 4.827338
| 0.460432
| 0.053651
| 0.050671
| 0.059613
| 0.14307
| 0.14307
| 0.14307
| 0.14307
| 0
| 0
| 0
| 0.008547
| 0.310606
| 1,188
| 45
| 86
| 26.4
| 0.810745
| 0
| 0
| 0.114286
| 0
| 0
| 0.061069
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.114286
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c464b6985d41cae6c644e444882f725004b5bea
| 657
|
py
|
Python
|
05_ARIADNE_SUBSCRIPTIONS_GRAPHQL/api/resolvers/mutations/__init__.py
|
CrispenGari/python-flask
|
3e7896f401920b8dd045d807212ec24b8353a75a
|
[
"Apache-2.0"
] | 2
|
2021-11-08T07:37:18.000Z
|
2021-11-13T09:23:46.000Z
|
05_ARIADNE_SUBSCRIPTIONS_GRAPHQL/api/resolvers/mutations/__init__.py
|
CrispenGari/Flask
|
3e7896f401920b8dd045d807212ec24b8353a75a
|
[
"Apache-2.0"
] | null | null | null |
05_ARIADNE_SUBSCRIPTIONS_GRAPHQL/api/resolvers/mutations/__init__.py
|
CrispenGari/Flask
|
3e7896f401920b8dd045d807212ec24b8353a75a
|
[
"Apache-2.0"
] | null | null | null |
from api import db
from uuid import uuid4
from ariadne import MutationType
from api.models import Post
from api.store import queues
mutation = MutationType()
@mutation.field("createPost")
async def create_post_resolver(obj, info, input):
try:
post = Post(postId=uuid4(), caption=input["caption"])
db.session.add(post)
db.session.commit()
for queue in queues:
queue.put(post)
return{
"error": None,
"post": post
}
except Exception as e:
return{
"error": {"message":str(e), "field": "unknown"},
"post": None
}
| 24.333333
| 61
| 0.572298
| 75
| 657
| 4.986667
| 0.56
| 0.05615
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004454
| 0.316591
| 657
| 27
| 62
| 24.333333
| 0.828508
| 0
| 0
| 0.086957
| 0
| 0
| 0.082192
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.217391
| 0
| 0.217391
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c482fa55469ed7e8a8294ff4e637257f9060775
| 6,275
|
py
|
Python
|
source/tweet.py
|
jfilter/foia-bot
|
11a9e31116dddfcd7bbd17730be3bdb9cec65e27
|
[
"MIT"
] | null | null | null |
source/tweet.py
|
jfilter/foia-bot
|
11a9e31116dddfcd7bbd17730be3bdb9cec65e27
|
[
"MIT"
] | null | null | null |
source/tweet.py
|
jfilter/foia-bot
|
11a9e31116dddfcd7bbd17730be3bdb9cec65e27
|
[
"MIT"
] | null | null | null |
"""
tweet stuff in intervals
"""
import time
import datetime
import twitter
from markov_chains import german_text
from config import config_no, config_yes
MAX_TWEET_LENGTH = 280
greeting = ' Sehr geehrte/r Anstragssteller/in.'
ending = ' MfG'
num_tweets = 3
class FoiaBot:
def __init__(self, config):
self.api = twitter.Api(consumer_key=config["consumer_key"],
consumer_secret=config["consumer_secret"],
access_token_key=config["access_token"],
access_token_secret=config["access_token_secret"], sleep_on_rate_limit=True)
self.screen_name = config["screen_name"]
self.model = german_text.setup_model(config["model_path"])
self.hour_to_tweet = config["hour_to_tweet"]
def get_favorites(self):
favorites = self.api.GetFavorites(
screen_name=self.screen_name, count=200)
print(favorites)
fav_set = set([f.id for f in favorites])
return fav_set
def get_status_to_work_on(self):
favorites = self.get_favorites()
status_list = self.api.GetMentions(count=200, trim_user=True,
contributor_details=False, include_entities=False)
for status in status_list:
print(status)
if status.id in favorites:
continue
if status.in_reply_to_status_id is not None:
continue
if not status.text.startswith('@' + self.screen_name):
continue
self.post_replies(status)
def post_replies(self, status):
tweets = self.create_tweets()
print(tweets)
success = True
reply_to_status_id = status.id
for tweet in tweets:
response = self.api.PostUpdate(tweet, in_reply_to_status_id=reply_to_status_id, auto_populate_reply_metadata=True,
exclude_reply_user_ids=False, trim_user=True, verify_status_length=False)
if response is None:
success = False
break
else:
reply_to_status_id = response.id
if success:
self.api.CreateFavorite(status=status)
def generate_sentence(self, tweet_text, chars_left, set_limit=False):
max_length = 150
if set_limit:
max_length = chars_left
new_sent = self.model.make_short_sentence(max_length, tries=100)
if new_sent is not None and len(new_sent) < chars_left:
tweet_text += ' ' + new_sent
return tweet_text
# https://stackoverflow.com/questions/7703865/going-from-twitter-date-to-python-datetime-date
def get_date_from_twitter_string(self, created_at):
x = time.strptime(created_at, '%a %b %d %H:%M:%S +0000 %Y')
return datetime.datetime.fromtimestamp(time.mktime(x))
def tweet_once_a_day(self):
now = datetime.datetime.now()
print(now.hour)
if now.hour == self.hour_to_tweet:
last_status_list = self.api.GetUserTimeline(screen_name=self.screen_name, count=1,
include_rts=False, trim_user=True, exclude_replies=True)
print(last_status_list)
if last_status_list is None:
return
if len(last_status_list) == 0:
self.post_single_tweet()
if len(last_status_list) == 1:
last_status = last_status_list[0]
created_at_date = self.get_date_from_twitter_string(
last_status.created_at)
time_diff = now - created_at_date
print('time_diff', time_diff)
time_diff_hours = time_diff.seconds / 3600 + time_diff.days * 24
print(time_diff_hours)
if time_diff_hours > 20: # something is broken with the date but whatever
self.post_single_tweet()
def post_single_tweet(self):
tweet_text = self.generate_single_tweet_text()
response = self.api.PostUpdate(tweet_text, verify_status_length=False)
def generate_single_tweet_text(self):
tweet_text = ""
while True:
chars_left = MAX_TWEET_LENGTH - len(tweet_text)
chars_left -= 1 # for the space
if chars_left < 20:
break
if chars_left < 70:
tweet_text = self.generate_sentence(
tweet_text, chars_left, True)
else:
tweet_text = self.generate_sentence(
tweet_text, chars_left)
return tweet_text
def create_tweets(self):
tweets = []
for i in range(num_tweets):
tweet_text = f'{i + 1}/{num_tweets}'
if i == 0:
tweet_text += greeting
while True:
chars_left = MAX_TWEET_LENGTH - \
len(tweet_text) - 1 # because of space
# ensure space for the ending
if i + 1 == num_tweets:
chars_left -= len(ending)
if chars_left < 20:
# at ending
if i + 1 == num_tweets:
tweet_text += ending
break
if chars_left < 70:
tweet_text = self.generate_sentence(
tweet_text, chars_left, True)
else:
tweet_text = self.generate_sentence(
tweet_text, chars_left)
tweets.append(tweet_text)
return tweets
def run(self):
self.get_status_to_work_on()
def main():
print('main called')
no_bot = FoiaBot(config_no)
print('after setting up no bot')
yes_bot = FoiaBot(config_yes)
print('after setting up yes bot')
no_bot.run()
print('after running no bot')
yes_bot.run()
print('after running yes bot')
no_bot.tweet_once_a_day()
yes_bot.tweet_once_a_day()
print('after tweet once a day')
def lambda_handler(event, context):
print('handler called')
main()
print('handler about to finish')
# if __name__ == '__main__':
# main()
| 33.918919
| 126
| 0.579283
| 754
| 6,275
| 4.514589
| 0.236074
| 0.060811
| 0.024677
| 0.031727
| 0.210341
| 0.124559
| 0.096357
| 0.096357
| 0.096357
| 0.096357
| 0
| 0.012811
| 0.340717
| 6,275
| 184
| 127
| 34.103261
| 0.810007
| 0.043187
| 0
| 0.202797
| 0
| 0
| 0.057773
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.034965
| 0
| 0.174825
| 0.104895
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c4a756656ca930b517891bc50444eed71522301
| 2,537
|
py
|
Python
|
atlas-outreach-data-tools-framework-1.1/Configurations/PlotConf_TTbarAnalysis.py
|
Harvard-Neutrino/phys145
|
c3dc5788128fa2a7db0af0c796cf3afd957bf0ed
|
[
"CC0-1.0"
] | null | null | null |
atlas-outreach-data-tools-framework-1.1/Configurations/PlotConf_TTbarAnalysis.py
|
Harvard-Neutrino/phys145
|
c3dc5788128fa2a7db0af0c796cf3afd957bf0ed
|
[
"CC0-1.0"
] | null | null | null |
atlas-outreach-data-tools-framework-1.1/Configurations/PlotConf_TTbarAnalysis.py
|
Harvard-Neutrino/phys145
|
c3dc5788128fa2a7db0af0c796cf3afd957bf0ed
|
[
"CC0-1.0"
] | 1
|
2021-11-30T02:08:12.000Z
|
2021-11-30T02:08:12.000Z
|
config = {
"Luminosity": 1000,
"InputDirectory": "results",
"Histograms" : {
"WtMass" : {},
"etmiss" : {},
"lep_n" : {},
"lep_pt" : {},
"lep_eta" : {},
"lep_E" : {},
"lep_phi" : {"y_margin" : 0.6},
"lep_charge" : {"y_margin" : 0.6},
"lep_type" : {"y_margin" : 0.5},
"lep_ptconerel30" : {},
"lep_etconerel20" : {},
"lep_d0" : {},
"lep_z0" : {},
"n_jets" : {},
"jet_pt" : {},
"jet_m" : {},
"jet_jvf" : {"y_margin" : 0.4},
"jet_eta" : {},
"jet_MV1" : {"y_margin" : 0.3},
"vxp_z" : {},
"pvxp_n" : {},
},
"Paintables": {
"Stack": {
"Order" : ["Diboson", "DrellYan", "W", "Z", "stop", "ttbar"],
"Processes" : {
"Diboson" : {
"Color" : "#fa7921",
"Contributions" : ["WW", "WZ", "ZZ"]},
"DrellYan": {
"Color" : "#5bc0eb",
"Contributions" : ["DYeeM08to15", "DYeeM15to40", "DYmumuM08to15", "DYmumuM15to40", "DYtautauM08to15", "DYtautauM15to40"]},
"W": {
"Color" : "#e55934",
"Contributions" : ["WenuJetsBVeto", "WenuWithB", "WenuNoJetsBVeto", "WmunuJetsBVeto", "WmunuWithB", "WmunuNoJetsBVeto", "WtaunuJetsBVeto", "WtaunuWithB", "WtaunuNoJetsBVeto"]},
"Z": {
"Color" : "#086788",
"Contributions" : ["Zee", "Zmumu", "Ztautau"]},
"stop": {
"Color" : "#fde74c",
"Contributions" : ["stop_tchan_top", "stop_tchan_antitop", "stop_schan", "stop_wtchan"]},
"ttbar": {
"Color" : "#9bc53d",
"Contributions" : ["ttbar_lep", "ttbar_had"]}
}
},
"data" : {
"Contributions": ["data_Egamma", "data_Muons"]}
},
"Depictions": {
"Order": ["Main", "Data/MC"],
"Definitions" : {
"Data/MC": {
"type" : "Agreement",
"Paintables" : ["data", "Stack"]
},
"Main": {
"type" : "Main",
"Paintables": ["Stack", "data"]
},
}
},
}
| 32.525641
| 192
| 0.358691
| 160
| 2,537
| 5.475
| 0.54375
| 0.039954
| 0.045662
| 0.020548
| 0.027397
| 0
| 0
| 0
| 0
| 0
| 0
| 0.048551
| 0.45605
| 2,537
| 77
| 193
| 32.948052
| 0.586232
| 0
| 0
| 0
| 0
| 0
| 0.353961
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c4b16b905a82a27b27a39983a45cc2293e0e0ce
| 1,943
|
py
|
Python
|
modules/optimizations/dead_codes.py
|
OMGhozlan/deobshell
|
701c8a09f9258442255013605185ed0a7fbac704
|
[
"MIT"
] | null | null | null |
modules/optimizations/dead_codes.py
|
OMGhozlan/deobshell
|
701c8a09f9258442255013605185ed0a7fbac704
|
[
"MIT"
] | null | null | null |
modules/optimizations/dead_codes.py
|
OMGhozlan/deobshell
|
701c8a09f9258442255013605185ed0a7fbac704
|
[
"MIT"
] | null | null | null |
# coding=utf-8
from ..logger import log_debug
from ..utils import parent_map, replace_node, is_prefixed_var, get_used_vars
def opt_unused_variable(ast):
parents = parent_map(ast)
used_vars = get_used_vars(ast)
for node in ast.iter():
if node.tag in ["AssignmentStatementAst"]:
subnodes = list(node)
if subnodes[0].tag == "VariableExpressionAst":
if subnodes[0].attrib["VariablePath"].lower() not in used_vars:
if not is_prefixed_var(subnodes[0].attrib["VariablePath"]):
log_debug("Remove assignement of unused variable %s" % (subnodes[0].attrib["VariablePath"]))
parents[node].remove(node)
return True
return False
def opt_remove_uninitialised_variable_usage(ast):
assigned = set()
for node in ast.iter():
if node.tag in ["AssignmentStatementAst"]:
subnodes = list(node)
if subnodes[0].tag == "VariableExpressionAst":
assigned.add(subnodes[0].attrib["VariablePath"].lower())
if node.tag in ["BinaryExpressionAst"]:
subnodes = list(node)
if subnodes[0].tag == "VariableExpressionAst":
variable = subnodes[0]
other = subnodes[1]
elif subnodes[1].tag == "VariableExpressionAst":
variable = subnodes[1]
other = subnodes[0]
else:
variable, other = None, None
if variable is not None and other is not None:
if variable.attrib["VariablePath"].lower() not in assigned:
if not is_prefixed_var(variable.attrib["VariablePath"]):
log_debug("Remove unassigned variable use '%s'" % (variable.attrib["VariablePath"]))
replace_node(ast, node, other)
return True
return False
| 36.660377
| 116
| 0.574884
| 207
| 1,943
| 5.275362
| 0.289855
| 0.074176
| 0.040293
| 0.098901
| 0.410256
| 0.229853
| 0.229853
| 0.229853
| 0.18315
| 0.18315
| 0
| 0.009878
| 0.322697
| 1,943
| 52
| 117
| 37.365385
| 0.819909
| 0.006176
| 0
| 0.358974
| 0
| 0
| 0.158631
| 0.066356
| 0
| 0
| 0
| 0
| 0
| 1
| 0.051282
| false
| 0
| 0.051282
| 0
| 0.205128
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c4c40d49329ce6958ed3b498e11172edf73d231
| 1,433
|
py
|
Python
|
Convert Integer A to Integer B.py
|
RijuDasgupta9116/LintCode
|
4629a3857b2c57418b86a3b3a7180ecb15e763e3
|
[
"Apache-2.0"
] | 321
|
2015-01-04T04:01:44.000Z
|
2022-03-20T13:21:55.000Z
|
Convert Integer A to Integer B.py
|
leifoo/LintCode
|
2520762a1cfbd486081583136396a2b2cac6e4fb
|
[
"Apache-2.0"
] | 1
|
2016-01-11T04:29:37.000Z
|
2016-01-11T04:29:37.000Z
|
Convert Integer A to Integer B.py
|
leifoo/LintCode
|
2520762a1cfbd486081583136396a2b2cac6e4fb
|
[
"Apache-2.0"
] | 114
|
2015-01-27T06:08:17.000Z
|
2022-03-23T03:58:11.000Z
|
"""
Determine the number of bits required to convert integer A to integer B
Example
Given n = 31, m = 14,return 2
(31)10=(11111)2
(14)10=(01110)2
"""
__author__ = 'Danyang'
class Solution:
def bitSwapRequired(self, a, b):
"""
:param a:
:param b:
:return: int
"""
a = self.to_bin(a)
b = self.to_bin(b)
diff = len(a)-len(b)
ret = 0
if diff<0:
a, b = b, a
diff *= -1
b = "0"*diff+b
for i in xrange(len(b)):
if a[i]!=b[i]:
ret += 1
return ret
def to_bin(self, n):
"""
2's complement
32-bit
:param n:
:return:
"""
"""
:param n:
:return:
"""
a = abs(n)
lst = []
while a>0:
lst.append(a%2)
a /= 2
# 2's complement
if n>=0:
lst.extend([0]*(32-len(lst)))
else:
pivot = -1
for i in xrange(len(lst)):
if pivot==-1 and lst[i]==1:
pivot = i
continue
if pivot!=-1:
lst[i] ^= 1
lst.extend([1]*(32-len(lst)))
return "".join(map(str, reversed(lst)))
if __name__=="__main__":
assert Solution().bitSwapRequired(1, -1)==31
assert Solution().bitSwapRequired(31, 14)==2
| 19.630137
| 71
| 0.415213
| 182
| 1,433
| 3.186813
| 0.340659
| 0.010345
| 0.031034
| 0.041379
| 0.051724
| 0
| 0
| 0
| 0
| 0
| 0
| 0.072319
| 0.440335
| 1,433
| 72
| 72
| 19.902778
| 0.650873
| 0.163294
| 0
| 0
| 0
| 0
| 0.015195
| 0
| 0
| 0
| 0
| 0
| 0.055556
| 1
| 0.055556
| false
| 0
| 0
| 0
| 0.138889
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c4d1d59e8d1a05ab55391042aa571be2ead1705
| 2,549
|
py
|
Python
|
macaddress/__init__.py
|
paradxum/django-macaddress
|
c223dc8c79555d2265789c4d13667036cfbd7bd8
|
[
"BSD-3-Clause"
] | 42
|
2015-11-23T09:40:36.000Z
|
2022-03-15T18:15:44.000Z
|
macaddress/__init__.py
|
paradxum/django-macaddress
|
c223dc8c79555d2265789c4d13667036cfbd7bd8
|
[
"BSD-3-Clause"
] | 19
|
2016-01-08T13:36:23.000Z
|
2021-05-13T23:57:39.000Z
|
macaddress/__init__.py
|
paradxum/django-macaddress
|
c223dc8c79555d2265789c4d13667036cfbd7bd8
|
[
"BSD-3-Clause"
] | 16
|
2016-02-04T09:43:12.000Z
|
2021-04-15T13:27:40.000Z
|
from django.conf import settings
from netaddr import mac_unix, mac_eui48
import importlib
import warnings
class mac_linux(mac_unix):
"""MAC format with zero-padded all upper-case hex and colon separated"""
word_fmt = '%.2X'
def default_dialect(eui_obj=None):
# Check to see if a default dialect class has been specified in settings,
# using 'module.dialect_cls' string and use importlib and getattr to retrieve dialect class. 'module' is the module and
# 'dialect_cls' is the class name of the custom dialect. The dialect must either be defined or imported by the module's
# __init__.py if the module is a package.
from .fields import MACAddressField # Remove import at v1.4
if hasattr(settings, 'MACADDRESS_DEFAULT_DIALECT') and not MACAddressField.dialect:
module, dialect_cls = settings.MACADDRESS_DEFAULT_DIALECT.split('.')
dialect = getattr(importlib.import_module(module), dialect_cls, mac_linux)
return dialect
else:
if MACAddressField.dialect: # Remove this "if" statement at v1.4
warnings.warn(
"The set_dialect class method on MACAddressField has been deprecated, in favor of the default_dialect "
"utility function and settings.MACADDRESS_DEFAULT_DIALECT. See macaddress.__init__.py source or the "
"project README for more information.",
DeprecationWarning,
)
return MACAddressField.dialect
if eui_obj:
return eui_obj.dialect
else:
return mac_linux
def format_mac(eui_obj, dialect):
# Format a EUI instance as a string using the supplied dialect class, allowing custom string classes by
# passing directly or as a string, a la 'module.dialect_cls', where 'module' is the module and 'dialect_cls'
# is the class name of the custom dialect. The dialect must either be defined or imported by the module's __init__.py if
# the module is a package.
if not isinstance(dialect, mac_eui48):
if isinstance(dialect, str):
module, dialect_cls = dialect.split('.')
dialect = getattr(importlib.import_module(module), dialect_cls)
eui_obj.dialect = dialect
return str(eui_obj)
from pkg_resources import get_distribution, DistributionNotFound
import os.path
try:
_dist = get_distribution('django-macaddress')
except DistributionNotFound:
__version__ = 'Please install this project with setup.py'
else:
__version__ = _dist.version
VERSION = __version__ # synonym
| 43.20339
| 124
| 0.710867
| 340
| 2,549
| 5.144118
| 0.352941
| 0.04574
| 0.054889
| 0.054889
| 0.232133
| 0.232133
| 0.232133
| 0.232133
| 0.232133
| 0.232133
| 0
| 0.004559
| 0.225579
| 2,549
| 58
| 125
| 43.948276
| 0.881459
| 0.326795
| 0
| 0.073171
| 0
| 0
| 0.191765
| 0.049412
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04878
| false
| 0
| 0.219512
| 0
| 0.439024
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c4ee3a1833fdef3d1343fa0ed07aabcf8faecca
| 2,422
|
py
|
Python
|
textmagic/test/message_status_tests.py
|
dfstrauss/textmagic-sms-api-python
|
9ab05b461861ac53da651588bef6b0b504653ecd
|
[
"BSD-3-Clause"
] | 2
|
2017-12-20T11:16:57.000Z
|
2022-02-22T06:46:19.000Z
|
textmagic/test/message_status_tests.py
|
dfstrauss/textmagic-sms-api-python
|
9ab05b461861ac53da651588bef6b0b504653ecd
|
[
"BSD-3-Clause"
] | 2
|
2015-06-14T16:06:33.000Z
|
2017-08-23T11:38:22.000Z
|
textmagic/test/message_status_tests.py
|
dfstrauss/textmagic-sms-api-python
|
9ab05b461861ac53da651588bef6b0b504653ecd
|
[
"BSD-3-Clause"
] | 5
|
2015-06-12T16:21:17.000Z
|
2022-02-22T06:46:23.000Z
|
import time
from textmagic.test import ONE_TEST_NUMBER
from textmagic.test import THREE_TEST_NUMBERS
from textmagic.test import TextMagicTestsBase
from textmagic.test import LiveUnsafeTests
class MessageStatusTestsBase(TextMagicTestsBase):
def sendAndCheckStatusTo(self, numbers):
message = 'sdfqwersdfgfdg'
response = self.client.send(message, numbers)
ids = response['message_id'].keys()
self.getStatus(ids, message)
return (ids, message)
def getStatus(self, ids, message):
response = self.client.message_status(ids)
self.assertKeysEqualExpectedKeys(response, ids)
statuses = []
for id in ids:
status = response[id]
expected_keys = ['status', 'text', 'reply_number', 'created_time']
if (len(status) == 4):
pass
elif (len(status) == 6):
expected_keys.append('completed_time')
expected_keys.append('credits_cost')
else:
self.fail("Unexpected number of return parameters: %s" % len(status))
self.assertKeysEqualExpectedKeys(status, expected_keys)
self.assertEquals(status['text'], message)
self.assertEquals(status['reply_number'], '447624800500')
self.assertTrue(isinstance(status['created_time'], time.struct_time))
if (len(status) == 6):
self.assertTrue(isinstance(status['completed_time'], time.struct_time))
self.assertTrue(isinstance(status['credits_cost'], float))
statuses.append(status['status'])
return statuses
class MessageStatusTests(MessageStatusTestsBase):
def testMessageStatusWhenSendingOneMessage(self):
self.sendAndCheckStatusTo(ONE_TEST_NUMBER)
def testMessageStatusWhenSendingThreeMessages(self):
self.sendAndCheckStatusTo(THREE_TEST_NUMBERS)
class LiveUnsafeMessageStatusTests(MessageStatusTestsBase, LiveUnsafeTests):
"""
This test is live-unsafe because it is intended to be sent to a real
telephone number. It keeps asking for message status until it receives
a "delivered" response.
"""
def testMessageStatusWhenPhoneIsSwitchedOff(self):
ids, message = self.sendAndCheckStatusTo(['27991114444'])
while True:
s, = self.getStatus(ids, message)
if (s == 'd'):
break
| 36.149254
| 87
| 0.660198
| 236
| 2,422
| 6.673729
| 0.360169
| 0.031746
| 0.043175
| 0.058413
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014223
| 0.245252
| 2,422
| 66
| 88
| 36.69697
| 0.847374
| 0.0673
| 0
| 0
| 0
| 0
| 0.094002
| 0
| 0
| 0
| 0
| 0
| 0.148936
| 1
| 0.106383
| false
| 0.021277
| 0.106383
| 0
| 0.319149
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c4ef34765e81a312523257e87f5ab76933d8997
| 2,245
|
py
|
Python
|
apps/orders/models.py
|
LinkanDawang/FreshMallDemo
|
5b8e2d2e8e137f609e8ac1e29ea013bb3ef34edb
|
[
"Apache-2.0"
] | null | null | null |
apps/orders/models.py
|
LinkanDawang/FreshMallDemo
|
5b8e2d2e8e137f609e8ac1e29ea013bb3ef34edb
|
[
"Apache-2.0"
] | 5
|
2020-06-05T18:27:41.000Z
|
2022-01-13T00:48:03.000Z
|
apps/orders/models.py
|
LinkanDawang/dailyfresh
|
4f0360d5e4eeda4737234942248715b77d9e3b12
|
[
"Apache-2.0"
] | null | null | null |
from django.db import models
from utils.models import BaseModel
from users.models import User, Address
from goods.models import GoodsSKU
# Create your models here.
class OrderInfo(BaseModel):
"""订单信息"""
PAY_METHOD = ['1', '2']
PAY_METHOD_CHOICES = (
(1, "货到付款"),
(2, "支付宝"),
)
ORDER_STATUS_CHOICES = (
(1, "待支付"),
(2, "待发货"),
(3, "待收货"),
(4, "待评价"),
(5, "已完成"),
)
"""---------订单信息------------------------"""
PAY_METHODS = {
1: "货到付款",
2: "支付宝",
}
ORDER_STATUS = {
1: "待支付",
2: "待发货",
3: "待收货",
4: "待评价",
5: "已完成",
}
PAY_METHODS_ENUM = {
"CASH": 1,
"ALIPAY": 2
}
ORDER_STATUS_ENUM = {
"UNPAID": 1,
"UNSEND": 2,
"UNRECEIVED": 3,
"UNCOMMENT": 4,
"FINISHED": 5
}
order_id = models.CharField(max_length=64, primary_key=True, verbose_name="订单号")
user = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name="下单用户")
address = models.ForeignKey(Address, on_delete=models.CASCADE, verbose_name="收获地址")
total_count = models.IntegerField(default=1, verbose_name="商品总数")
total_amount = models.DecimalField(max_digits=10, decimal_places=2, verbose_name="商品总金额")
trans_cost = models.DecimalField(max_digits=10, decimal_places=2, verbose_name="运费")
pay_method = models.SmallIntegerField(choices=PAY_METHOD_CHOICES, default=1, verbose_name="支付方式")
status = models.SmallIntegerField(choices=ORDER_STATUS_CHOICES, default=1, verbose_name="订单状态")
trade_id = models.CharField(max_length=100, unique=True, null=True, blank=True, verbose_name="支付编号")
class Meta:
db_table = "df_order_info"
class OrderGoods(BaseModel):
"""订单商品"""
order = models.ForeignKey(OrderInfo, on_delete=models.CASCADE, verbose_name="订单")
sku = models.ForeignKey(GoodsSKU, on_delete=models.CASCADE, verbose_name="订单商品")
count = models.IntegerField(default=1, verbose_name="数量")
price = models.DecimalField(max_digits=10, decimal_places=2, verbose_name="单价")
comment = models.TextField(default="", verbose_name="评价信息")
class Meta:
db_table = "df_order_goods"
| 28.782051
| 104
| 0.620045
| 270
| 2,245
| 4.955556
| 0.366667
| 0.115097
| 0.041854
| 0.06278
| 0.45142
| 0.373692
| 0.213752
| 0.150972
| 0.150972
| 0.150972
| 0
| 0.023563
| 0.224944
| 2,245
| 77
| 105
| 29.155844
| 0.745402
| 0.01559
| 0
| 0.034483
| 0
| 0
| 0.078923
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.068966
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c4f72bb8eb3058809660eadcee54f1e16cab76f
| 18,201
|
py
|
Python
|
event/arguments/prepare/event_vocab.py
|
hunterhector/DDSemantics
|
883ef1015bd21d9b8575d8000faf3b506a09f21c
|
[
"Apache-2.0"
] | null | null | null |
event/arguments/prepare/event_vocab.py
|
hunterhector/DDSemantics
|
883ef1015bd21d9b8575d8000faf3b506a09f21c
|
[
"Apache-2.0"
] | null | null | null |
event/arguments/prepare/event_vocab.py
|
hunterhector/DDSemantics
|
883ef1015bd21d9b8575d8000faf3b506a09f21c
|
[
"Apache-2.0"
] | 2
|
2018-06-24T17:40:31.000Z
|
2020-07-30T19:19:55.000Z
|
from collections import defaultdict, Counter
import os
import gzip
import json
import pickle
from json.decoder import JSONDecodeError
import logging
from typing import Dict
import pdb
from event import util
from event.arguments.prepare.slot_processor import get_simple_dep, is_propbank_dep
logger = logging.getLogger(__name__)
class TypedEventVocab:
unk_predicate = "unk_predicate-pred"
unk_arg_word = "unk_argument"
unk_frame = "unk_frame"
unk_fe = "unk_fe"
unk_prep = "unk_preposition"
unk_dep = "unk_dep"
unobserved_fe = "__unobserved_fe__"
unobserved_arg = "__unobserved_arg__"
ghost = "__ghost_component__"
def __init__(self, vocab_dir, event_data=None):
self.lookups: Dict[str, Dict[str, int]] = {}
self.oovs: Dict[str, str] = {}
self.vocab_dir = vocab_dir
if not os.path.exists(os.path.join(vocab_dir, "predicate.vocab")):
if event_data is None:
logging.error(
"Vocabulary file not exist and not data " "provided for counting."
)
logger.info("Counting vocabulary.")
vocab_counters = self.get_vocab_count(event_data)
for vocab_name, counter in vocab_counters.items():
raw_vocab_path = os.path.join(vocab_dir, vocab_name + ".vocab")
with open(raw_vocab_path, "w") as out:
for key, value in counter.most_common():
out.write("{}\t{}\n".format(key, value))
logger.info("Done vocabulary counting.")
# Now filter the vocabulary.
logger.info("Filtering vocabulary.")
filtered_vocab = self.filter_vocab(vocab_counters)
logger.info("Done filtering.")
logger.info("Writing filtered vocab to disk.")
for key, vocab in filtered_vocab.items():
with open(os.path.join(self.vocab_dir, key + ".vocab"), "w") as out:
for token, count in vocab:
out.write("{}\t{}\n".format(token, count))
self.pickle_counts()
logger.info("Done.")
else:
logger.info("Will not overwrite vocabulary, using existing.")
if not self.unpickle_counts():
logger.info("Reading counts from .vocab files.")
f_name: str
for f_name in os.listdir(vocab_dir):
if "_" in f_name and f_name.endswith(".vocab"):
vocab_type = f_name.split("_")[0]
else:
continue
self.lookups[vocab_type] = {}
self.oovs[vocab_type] = "unk_" + vocab_type
with open(os.path.join(vocab_dir, f_name)) as vocab_file:
index = 0
for line in vocab_file:
word, count = line.strip().split("\t")
self.lookups[vocab_type][word] = index
index += 1
logger.info(
"Loaded {} types for {}".format(
len(self.lookups[vocab_type]), vocab_type
)
)
self.pickle_counts()
def pickle_counts(self):
with open(os.path.join(self.vocab_dir, "lookups.pickle"), "wb") as out:
pickle.dump(self.lookups, out)
with open(os.path.join(self.vocab_dir, "oovs.pickle"), "wb") as out:
pickle.dump(self.oovs, out)
def unpickle_counts(self):
lookup_pickle = os.path.join(self.vocab_dir, "lookups.pickle")
oov_pickle = os.path.join(self.vocab_dir, "oovs.pickle")
if os.path.exists(lookup_pickle) and os.path.exists(oov_pickle):
logger.info("Directly loading pickled counts.")
with open(lookup_pickle, "rb") as lp:
self.lookups = pickle.load(lp)
with open(oov_pickle, "rb") as op:
self.oovs = pickle.load(op)
return True
else:
return False
def get_vocab_word(self, word, key):
if not word:
return self.oovs[key]
if word in self.lookups[key]:
return word
else:
return self.oovs[key]
@classmethod
def make_arg(cls, text, role):
if role == "NA":
return text + "-" + cls.unk_dep
else:
return text + "-" + role
@staticmethod
def make_predicate(text):
return text.lower() + "-pred"
@staticmethod
def make_fe(frame, fe):
# Do not use frame,fe format to alleviate sparsity.
return fe
def get_arg_entity_rep(self, arg, entity_text):
# If a specific entity text is provided.
rep = self.oovs["argument"]
if entity_text is not None:
# Use the argument's own text.
rep = self.get_vocab_word(entity_text, "argument")
if rep == self.oovs["argument"]:
# Use the text after hypen.
if "-" in entity_text:
rep = self.get_vocab_word(entity_text.split("-")[-1], "argument")
arg_text = arg["text"].lower()
if rep == self.oovs["argument"]:
# Fall back to use the argument's own text.
rep = self.get_vocab_word(arg_text, "argument")
if rep == self.oovs["argument"]:
if "-" in arg_text:
rep = self.get_vocab_word(arg_text.split("-")[-1], "argument")
if rep == self.oovs["argument"]:
# Fall back to NER tag.
if "ner" in arg:
rep = arg["ner"]
return rep
@classmethod
def get_unk_arg_rep(cls):
# This will create a full unknown argument, try to back off to
# a partial unknown argument if possible.
return cls.make_arg(cls.unk_arg_word, cls.unk_dep)
@classmethod
def get_unk_arg_with_dep(cls, dep):
"""Return a backoff version of the representation by using the
actual dep, but unk_arg
Args:
dep
"""
return cls.make_arg(cls.unk_arg_word, dep)
@classmethod
def get_arg_rep_no_dep(cls, entity_rep):
"""Return the backoff version of the argument representation by using
the unk_dep, but the actual entity.
Args:
entity_rep:
Returns:
"""
return cls.make_arg(entity_rep, cls.unk_dep)
def get_arg_rep(self, dep, entity_rep):
if dep.startswith("prep"):
dep = self.get_vocab_word(dep, "preposition")
arg_rep = self.make_arg(entity_rep, dep)
return arg_rep
def get_pred_rep(self, event):
"""
Take the predicates, and get the vocab index for it. This will first
use the predicate itself, if not found, it will try to use the verb
form.
:param event:
:return:
"""
pred = self.get_vocab_word(event["predicate"], "predicate")
if pred == self.oovs["predicate"]:
# Try to see if the verb form help.
if "verb_form" in event:
pred = self.get_vocab_word(event["verb_form"], "predicate")
return self.make_predicate(pred)
def get_fe_rep(self, frame_name, fe_role):
# return self.make_fe(frame_name, fe_role)
return self.get_vocab_word(self.make_fe(frame_name, fe_role), "fe")
@staticmethod
def filter_by_count(counter, min_count):
return [
(key, count) for key, count in counter.most_common() if count >= min_count
]
def filter_vocab(
self,
vocab_counters,
top_num_prep=150,
min_token_count=500,
min_fe_count=50,
min_frame_count=5,
):
filtered_vocab = {
"predicate_min_%d"
% min_token_count: self.filter_by_count(
vocab_counters["predicate"], min_token_count
),
"argument_min_%d"
% min_token_count: self.filter_by_count(
vocab_counters["argument"], min_token_count
),
"preposition_top_%d"
% top_num_prep: vocab_counters["preposition"].most_common(top_num_prep),
"fe_min_%d"
% min_fe_count: self.filter_by_count(vocab_counters["fe"], min_fe_count),
"frame_min_%d"
% min_frame_count: self.filter_by_count(
vocab_counters["frame"], min_frame_count
),
}
for key, counts in filtered_vocab.items():
# Use the base key name for the vocabulary, not including the
# cutoff, (i.e. predicate_min_50 -> predicate)
name = key.split("_")[0]
# Put oov token as a token int he vocab file.
oov = "unk_" + name
counts.insert(0, (oov, 0))
self.lookups[name] = {}
self.oovs[name] = oov
index = 0
for term, _ in counts:
self.lookups[name][term] = index
index += 1
return filtered_vocab
def get_vocab_count(self, data_path):
vocab_counters = defaultdict(Counter)
doc_count = 0
event_count = 0
with gzip.open(data_path) as data:
for line in data:
doc_info = json.loads(line)
for event in doc_info["events"]:
event_count += 1
predicate = event["predicate"]
vocab_counters["predicate"][predicate] += 1
frame = event["frame"]
if not frame == "NA":
vocab_counters["frame"][frame] += 1
for arg in event["arguments"]:
fe_name = arg["feName"]
syn_role = arg["dep"]
arg_text = arg["text"].lower()
vocab_counters["argument"][arg_text] += 1
if not fe_name == "NA":
vocab_counters["fe"][
self.make_fe(event["frame"], fe_name)
] += 1
if syn_role.startswith("prep"):
vocab_counters["preposition"][syn_role] += 1
doc_count += 1
if doc_count % 1000 == 0:
print(
"\rCounted vocab for {} events in "
"{} docs.".format(event_count, doc_count),
end="",
)
return vocab_counters
class EmbbedingVocab:
def __init__(self, vocab_file, with_padding=False, extras=None):
self.vocab_file = vocab_file
self.vocab = {}
self.tf = []
self.extras = []
self.pad = "__PADDING__"
self.padded = False
if with_padding:
# Paddings should be at 0.
self.padded = True
self.vocab[self.pad] = 0
self.tf.append(0)
if extras:
for name in extras:
self.add_extra(name)
self.__read_vocab()
@staticmethod
def with_extras(vocab_file):
"""
Create a EmbeddingVocab with unknown word slots and padding slot.
Args:
vocab_file:
Returns:
"""
return EmbbedingVocab(
vocab_file,
True,
[
TypedEventVocab.unk_frame,
TypedEventVocab.unk_fe,
TypedEventVocab.get_unk_arg_rep(),
TypedEventVocab.unobserved_arg,
TypedEventVocab.unobserved_fe,
TypedEventVocab.ghost,
],
)
def get_index(self, token, unk):
try:
return self.vocab[token]
except KeyError:
if unk:
return self.vocab[unk]
else:
return -1
def extra_size(self):
return len(self.extras)
def add_extra(self, name):
"""Add extra dimensions into the embedding vocab, used for special
tokens.
Args:
name:
Returns:
"""
if name in self.extras:
logger.info(
f"Extra {name} already exist in vocabulary "
f"at index {self.vocab[name]}"
)
return self.vocab[name]
else:
self.extras.append(name)
extra_index = len(self.vocab)
self.vocab[name] = extra_index
self.tf.append(0)
logger.info(
f"Adding {name} as extra dimension {extra_index} "
f"to {self.vocab_file}"
)
return extra_index
def get_size(self):
return len(self.vocab)
def vocab_items(self):
return self.vocab.items()
def get_term_freq(self, token):
return self.tf[self.get_index(token, None)]
def __read_vocab(self):
with open(self.vocab_file) as din:
index = len(self.vocab)
for line in din:
word, count = line.split()
self.vocab[word] = index
self.tf.append(int(count))
index += 1
def create_sentences(
doc,
event_vocab,
output_path,
include_frame=False,
use_simple_dep=False,
prop_arg_only=False,
):
if include_frame:
print("Adding frames to sentences.")
doc_count = 0
event_count = 0
with gzip.open(doc) as data, gzip.open(output_path, "w") as out:
for line in data:
try:
doc_info = json.loads(line)
except JSONDecodeError:
continue
sentence = []
represent_by_id = {}
for entity in doc_info["entities"]:
eid = entity["entityId"]
represent = entity["representEntityHead"]
represent_by_id[eid] = represent
for event in doc_info["events"]:
event_count += 1
sentence.append(event_vocab.get_pred_rep(event))
if include_frame and not event["frame"] == "NA":
frame = event_vocab.get_vocab_word(event["frame"], "frame")
sentence.append(frame)
for arg in event["arguments"]:
dep = arg["dep"]
if (
arg["argStart"] == event["predicateStart"]
and arg["argEnd"] == event["predicateEnd"]
):
dep = "root"
if use_simple_dep:
dep = get_simple_dep(dep)
if prop_arg_only and not is_propbank_dep(dep):
continue
sentence.append(
event_vocab.get_arg_rep(
dep, event_vocab.get_arg_entity_rep(arg, None)
)
)
if include_frame and not arg["feName"] == "NA":
fe = event_vocab.get_fe_rep(frame, arg["feName"])
if not fe == event_vocab.oovs["fe"]:
sentence.append(fe)
if "NA" in sentence:
pdb.set_trace()
doc_count += 1
out.write(str.encode(" ".join(sentence) + "\n"))
if event_count % 1000 == 0:
print(
"\rCreated sentences for {} documents, "
"{} events.".format(doc_count, event_count),
end="",
)
print(
"\rCreated sentences for {} documents, "
"{} events.\n".format(doc_count, event_count),
end="",
)
def write_sentences(
sent_out, event_data, event_vocab, include_frame, simple_dep, prop_arg
):
if not os.path.exists(sent_out):
os.makedirs(sent_out)
fname = "sent_with_frames.gz" if include_frame else "sent_pred_only.gz"
out = os.path.join(sent_out, fname)
if not os.path.exists(out):
create_sentences(
event_data,
event_vocab,
out,
include_frame=include_frame,
use_simple_dep=simple_dep,
prop_arg_only=prop_arg,
)
else:
logger.info(f"Will not overwrite {out}")
def main(event_data, vocab_dir, sent_out, prop_arg):
if not os.path.exists(vocab_dir):
os.makedirs(vocab_dir)
event_vocab = TypedEventVocab(vocab_dir, event_data=event_data)
logger.info("Done loading vocabulary.")
# The 3 boolean are : include_frame,simple_dep, prop_arg
if prop_arg:
# For propbank style training.
logger.info("Creating event sentences in propbank style")
# Include frame or not version for propbank, but always use simple dep
# and propbank style arguments.
write_sentences(sent_out, event_data, event_vocab, False, True, True)
write_sentences(sent_out, event_data, event_vocab, True, True, True)
else:
# For framenet style training.
logger.info("Creating event sentences in FrameNet style")
# Include frame or not version for framenet, but always use complex dep
# and framenet style arguments.
write_sentences(sent_out, event_data, event_vocab, True, False, False)
write_sentences(sent_out, event_data, event_vocab, False, False, False)
if __name__ == "__main__":
parser = util.OptionPerLineParser(
description="Event Vocabulary.", fromfile_prefix_chars="@"
)
parser.add_argument("--vocab_dir", type=str, help="Vocabulary directory.")
parser.add_argument("--input_data", type=str, help="Input data.")
parser.add_argument("--sent_out", type=str, help="Sentence out dir.")
parser.add_argument(
"--prop_arg", action="store_true", help="Propbank arg only.", default=False
)
util.set_basic_log()
args = parser.parse_args()
main(args.input_data, args.vocab_dir, args.sent_out, args.prop_arg)
| 31.598958
| 86
| 0.538652
| 2,097
| 18,201
| 4.455889
| 0.141631
| 0.023116
| 0.012842
| 0.013699
| 0.227633
| 0.176156
| 0.15229
| 0.110017
| 0.055009
| 0.030394
| 0
| 0.004404
| 0.363716
| 18,201
| 575
| 87
| 31.653913
| 0.802435
| 0.082138
| 0
| 0.191067
| 0
| 0
| 0.101958
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.07196
| false
| 0
| 0.027295
| 0.022333
| 0.198511
| 0.009926
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c51e7ffa104c06ed45deeaa7e32faf7f56f41a1
| 4,570
|
py
|
Python
|
autovirt/equipment/domain/equipment.py
|
xlam/autovirt
|
a19f9237c8b1123ce4f4b8b396dc88122019d4f8
|
[
"MIT"
] | null | null | null |
autovirt/equipment/domain/equipment.py
|
xlam/autovirt
|
a19f9237c8b1123ce4f4b8b396dc88122019d4f8
|
[
"MIT"
] | null | null | null |
autovirt/equipment/domain/equipment.py
|
xlam/autovirt
|
a19f9237c8b1123ce4f4b8b396dc88122019d4f8
|
[
"MIT"
] | null | null | null |
from enum import Enum
from functools import reduce
from math import ceil
from typing import Optional, Tuple
from autovirt import utils
from autovirt.exception import AutovirtError
from autovirt.structs import UnitEquipment, RepairOffer
logger = utils.get_logger()
# maximum allowed equipment price
PRICE_MAX = 100000
# value to add and sub from offer quality when filtering
QUALITY_DELTA = 3
class QualityType(Enum):
INSTALLED = "quality"
REQUIRED = "quality_required"
def quantity_to_repair(units: list[UnitEquipment]) -> int:
"""Calculate total quantity of equipment to repair on given units"""
return sum([unit.wear_quantity for unit in units])
def quantity_total(units: list[UnitEquipment]) -> int:
"""Calculate total equipment count on given units"""
return sum([unit.quantity for unit in units])
def filter_offers(
offers: list[RepairOffer], quality: float, quantity: int
) -> list[RepairOffer]:
# select units in range [quality-DELTA ... quality+DELTA] and having enough repair parts
filtered = list(filter(lambda x: x.quality > quality - QUALITY_DELTA, offers))
filtered = list(filter(lambda x: x.quality < quality + QUALITY_DELTA, filtered))
filtered = list(filter(lambda x: x.quantity > quantity, filtered))
filtered = list(filter(lambda x: x.price < PRICE_MAX, filtered))
return filtered
def expected_quality(
qual_rep: float, qual_inst: float, items_total: int, items_wear: int
) -> float:
return (
qual_inst * (items_total - items_wear) + qual_rep * items_wear
) / items_total
def select_offer(
offers: list[RepairOffer], units: list[UnitEquipment], quality: float = None
) -> RepairOffer:
if not quality:
quality = units[0].quality_required
qnt_rep = quantity_to_repair(units)
qnt_total = quantity_total(units)
qual_min = utils.get_min(units, QualityType.INSTALLED.value)
qual_exp = [
expected_quality(o.quality, qual_min, qnt_total, qnt_rep) for o in offers
]
qual_diff = [abs(qual - quality) for qual in qual_exp]
diff_norm = utils.normalize_array(qual_diff)
price_norm = utils.normalize_array([o.price for o in offers])
qp_dist = [p + q for (p, q) in zip(price_norm, diff_norm)]
summary: list = [
[o, price_norm[i], qual_exp[i], qual_diff[i], diff_norm[i], qp_dist[i]]
for i, o in enumerate(offers)
if qual_exp[i] >= quality
]
logger.info(f"listing filtered offers for quality of {quality}:")
for o in summary:
logger.info(
f"id: {o[0].id}, quality: {o[0].quality}, price: {o[0].price},"
f" quantity: {o[0].quantity}, qual_exp: {o[2]:.2f}, qp: {o[5]:.3f}"
)
minimum_qp_item = reduce(lambda x, y: x if x[5] < y[5] else y, summary)
return minimum_qp_item[0]
def select_offer_to_raise_quality(
unit: UnitEquipment, offers: list[RepairOffer], margin: float = 0
) -> Optional[Tuple[RepairOffer, int]]:
required = unit.quality_required + margin
quality_coeff = unit.quantity * (required - unit.quality)
offers = list(filter(lambda o: o.quality >= required, offers))
if not offers:
return None
offer = offers[0]
count_to_replace = ceil(quality_coeff / (offer.quality - unit.quality))
price = count_to_replace * offer.price
for offer_ in offers[1:]:
count = ceil(quality_coeff / (offer_.quality - unit.quality))
price_ = count * offer_.price
if price_ < price:
offer = offer_
count_to_replace = count
return offer, count_to_replace
def split_by_quality(
units: list[UnitEquipment], quality_type: QualityType = QualityType.REQUIRED
) -> dict[float, list[UnitEquipment]]:
"""Split units by quality (required or installed)"""
res: dict[float, list[UnitEquipment]] = {}
for unit in units:
quality = getattr(unit, quality_type.value)
if quality not in res.keys():
res[quality] = []
res[quality].append(unit)
return res
def split_mismatch_quality_units(
units: list[UnitEquipment],
) -> tuple[list[UnitEquipment], list[UnitEquipment]]:
"""Split units into 'normal' and 'mismatch' groups.
Mismatched unit have installed equipment of lower quality then required.
We need to treat them in different manner then normal while repairing.
"""
normal = []
mismatch = []
for unit in units:
if unit.quality < unit.quality_required:
mismatch.append(unit)
else:
normal.append(unit)
return normal, mismatch
| 34.104478
| 92
| 0.679212
| 611
| 4,570
| 4.93617
| 0.224223
| 0.050729
| 0.036472
| 0.018568
| 0.148541
| 0.148541
| 0.089523
| 0.066976
| 0.066976
| 0.034483
| 0
| 0.006128
| 0.214442
| 4,570
| 133
| 93
| 34.360902
| 0.833983
| 0.114661
| 0
| 0.020408
| 0
| 0.020408
| 0.048878
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.081633
| false
| 0
| 0.071429
| 0.010204
| 0.27551
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c520307c63d7fc118bc65c38c0ef12159f02949
| 594
|
py
|
Python
|
day09/part2.py
|
mtn/advent16
|
0df34237485ee1246532e9eda0ef643e6950d13e
|
[
"MIT"
] | null | null | null |
day09/part2.py
|
mtn/advent16
|
0df34237485ee1246532e9eda0ef643e6950d13e
|
[
"MIT"
] | null | null | null |
day09/part2.py
|
mtn/advent16
|
0df34237485ee1246532e9eda0ef643e6950d13e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import re
with open("input.txt") as f:
content = f.read().strip()
def ulen(content):
ans = 0
i = 0
while i < len(content):
if content[i] == "(":
end = content[i:].find(")") + i
instr = content[i+1:end]
chars, times = map(int, content[i+1:end].split("x"))
to_copy = content[end+1:end+1+chars]
to_copy_len = ulen(to_copy)
ans += times * to_copy_len
i = end + 1 + chars
else:
ans += 1
i += 1
return ans
print(ulen(content))
| 21.214286
| 64
| 0.481481
| 83
| 594
| 3.373494
| 0.457831
| 0.114286
| 0.064286
| 0.085714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.026596
| 0.367003
| 594
| 27
| 65
| 22
| 0.718085
| 0.035354
| 0
| 0
| 0
| 0
| 0.020979
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.05
| 0
| 0.15
| 0.05
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c520e00d9b073d8aaafcc2b263b654b36c5fc45
| 17,397
|
py
|
Python
|
cirq-core/cirq/contrib/quimb/mps_simulator_test.py
|
Nexuscompute/Cirq
|
640ef8f82d6a56ec95361388ce7976e096cca906
|
[
"Apache-2.0"
] | null | null | null |
cirq-core/cirq/contrib/quimb/mps_simulator_test.py
|
Nexuscompute/Cirq
|
640ef8f82d6a56ec95361388ce7976e096cca906
|
[
"Apache-2.0"
] | 4
|
2022-01-16T14:12:15.000Z
|
2022-02-24T03:58:46.000Z
|
cirq-core/cirq/contrib/quimb/mps_simulator_test.py
|
Nexuscompute/Cirq
|
640ef8f82d6a56ec95361388ce7976e096cca906
|
[
"Apache-2.0"
] | null | null | null |
# pylint: disable=wrong-or-nonexistent-copyright-notice
import itertools
import math
import numpy as np
import pytest
import sympy
import cirq
import cirq.contrib.quimb as ccq
import cirq.testing
from cirq import value
def assert_same_output_as_dense(circuit, qubit_order, initial_state=0, grouping=None):
mps_simulator = ccq.mps_simulator.MPSSimulator(grouping=grouping)
ref_simulator = cirq.Simulator()
actual = mps_simulator.simulate(circuit, qubit_order=qubit_order, initial_state=initial_state)
expected = ref_simulator.simulate(circuit, qubit_order=qubit_order, initial_state=initial_state)
np.testing.assert_allclose(
actual.final_state.to_numpy(), expected.final_state_vector, atol=1e-4
)
assert len(actual.measurements) == 0
def test_various_gates_1d():
gate_op_cls = [cirq.I, cirq.H, cirq.X, cirq.Y, cirq.Z, cirq.T]
cross_gate_op_cls = [cirq.CNOT, cirq.SWAP]
q0, q1 = cirq.LineQubit.range(2)
for q0_gate_op in gate_op_cls:
for q1_gate_op in gate_op_cls:
for cross_gate_op in cross_gate_op_cls:
circuit = cirq.Circuit(q0_gate_op(q0), q1_gate_op(q1), cross_gate_op(q0, q1))
for initial_state in range(2 * 2):
assert_same_output_as_dense(
circuit=circuit, qubit_order=[q0, q1], initial_state=initial_state
)
def test_various_gates_1d_flip():
q0, q1 = cirq.LineQubit.range(2)
circuit = cirq.Circuit(cirq.H(q1), cirq.CNOT(q1, q0))
assert_same_output_as_dense(circuit=circuit, qubit_order=[q0, q1])
assert_same_output_as_dense(circuit=circuit, qubit_order=[q1, q0])
def test_various_gates_2d():
gate_op_cls = [cirq.I, cirq.H]
cross_gate_op_cls = [cirq.CNOT, cirq.SWAP]
q0, q1, q2, q3, q4, q5 = cirq.GridQubit.rect(3, 2)
for q0_gate_op in gate_op_cls:
for q1_gate_op in gate_op_cls:
for q2_gate_op in gate_op_cls:
for q3_gate_op in gate_op_cls:
for cross_gate_op1 in cross_gate_op_cls:
for cross_gate_op2 in cross_gate_op_cls:
circuit = cirq.Circuit(
q0_gate_op(q0),
q1_gate_op(q1),
cross_gate_op1(q0, q1),
q2_gate_op(q2),
q3_gate_op(q3),
cross_gate_op2(q3, q1),
)
assert_same_output_as_dense(
circuit=circuit, qubit_order=[q0, q1, q2, q3, q4, q5]
)
def test_grouping():
q0, q1, q2 = cirq.LineQubit.range(3)
circuit = cirq.Circuit(
cirq.X(q0) ** 0.1,
cirq.Y(q1) ** 0.2,
cirq.Z(q2) ** 0.3,
cirq.CNOT(q0, q1),
cirq.Y(q1) ** 0.4,
)
groupings = [
None,
{q0: 0, q1: 1, q2: 2},
{q0: 0, q1: 0, q2: 1},
{q0: 0, q1: 1, q2: 0},
{q0: 1, q1: 0, q2: 0},
{q0: 0, q1: 0, q2: 0},
]
for grouping in groupings:
for initial_state in range(2 * 2 * 2):
assert_same_output_as_dense(
circuit=circuit,
qubit_order=[q0, q1, q2],
initial_state=initial_state,
grouping=grouping,
)
def test_grouping_does_not_overlap():
q0, q1 = cirq.LineQubit.range(2)
mps_simulator = ccq.mps_simulator.MPSSimulator(grouping={q0: 0})
with pytest.raises(ValueError, match="Grouping must cover exactly the qubits"):
mps_simulator.simulate(cirq.Circuit(), qubit_order={q0: 0, q1: 1})
def test_same_partial_trace():
qubit_order = cirq.LineQubit.range(2)
q0, q1 = qubit_order
mps_simulator = ccq.mps_simulator.MPSSimulator()
for _ in range(50):
for initial_state in range(4):
circuit = cirq.testing.random_circuit(qubit_order, 3, 0.9)
expected_density_matrix = cirq.final_density_matrix(
circuit, qubit_order=qubit_order, initial_state=initial_state
)
expected_partial_trace = cirq.partial_trace(
expected_density_matrix.reshape(2, 2, 2, 2), keep_indices=[0]
)
final_state = mps_simulator.simulate(
circuit, qubit_order=qubit_order, initial_state=initial_state
).final_state
actual_density_matrix = final_state.partial_trace([q0, q1])
actual_partial_trace = final_state.partial_trace([q0])
np.testing.assert_allclose(actual_density_matrix, expected_density_matrix, atol=1e-4)
np.testing.assert_allclose(actual_partial_trace, expected_partial_trace, atol=1e-4)
def test_probs_dont_sum_up_to_one():
q0 = cirq.NamedQid('q0', dimension=2)
circuit = cirq.Circuit(cirq.measure(q0))
simulator = ccq.mps_simulator.MPSSimulator(
simulation_options=ccq.mps_simulator.MPSOptions(sum_prob_atol=-0.5)
)
with pytest.raises(ValueError, match="Sum of probabilities exceeds tolerance"):
simulator.run(circuit, repetitions=1)
def test_empty():
q0 = cirq.NamedQid('q0', dimension=2)
q1 = cirq.NamedQid('q1', dimension=3)
q2 = cirq.NamedQid('q2', dimension=5)
circuit = cirq.Circuit()
for initial_state in range(2 * 3 * 5):
assert_same_output_as_dense(
circuit=circuit, qubit_order=[q0, q1, q2], initial_state=initial_state
)
def test_cnot():
q0, q1 = cirq.LineQubit.range(2)
circuit = cirq.Circuit(cirq.CNOT(q0, q1))
for initial_state in range(4):
assert_same_output_as_dense(
circuit=circuit, qubit_order=[q0, q1], initial_state=initial_state
)
def test_cnot_flipped():
q0, q1 = cirq.LineQubit.range(2)
circuit = cirq.Circuit(cirq.CNOT(q1, q0))
for initial_state in range(4):
assert_same_output_as_dense(
circuit=circuit, qubit_order=[q0, q1], initial_state=initial_state
)
def test_simulation_state():
q0, q1 = qubit_order = cirq.LineQubit.range(2)
circuit = cirq.Circuit(cirq.CNOT(q1, q0))
mps_simulator = ccq.mps_simulator.MPSSimulator()
ref_simulator = cirq.Simulator()
for initial_state in range(4):
args = mps_simulator._create_simulation_state(initial_state=initial_state, qubits=(q0, q1))
actual = mps_simulator.simulate(circuit, qubit_order=qubit_order, initial_state=args)
expected = ref_simulator.simulate(
circuit, qubit_order=qubit_order, initial_state=initial_state
)
np.testing.assert_allclose(
actual.final_state.to_numpy(), expected.final_state_vector, atol=1e-4
)
assert len(actual.measurements) == 0
def test_three_qubits():
q0, q1, q2 = cirq.LineQubit.range(3)
circuit = cirq.Circuit(cirq.CCX(q0, q1, q2))
with pytest.raises(ValueError, match="Can only handle 1 and 2 qubit operations"):
assert_same_output_as_dense(circuit=circuit, qubit_order=[q0, q1, q2])
def test_measurement_1qubit():
q0, q1 = cirq.LineQubit.range(2)
circuit = cirq.Circuit(cirq.X(q0), cirq.H(q1), cirq.measure(q1))
simulator = ccq.mps_simulator.MPSSimulator()
result = simulator.run(circuit, repetitions=100)
assert sum(result.measurements['q(1)'])[0] < 80
assert sum(result.measurements['q(1)'])[0] > 20
def test_reset():
q = cirq.LineQubit(0)
simulator = ccq.mps_simulator.MPSSimulator()
c = cirq.Circuit(cirq.X(q), cirq.reset(q), cirq.measure(q))
assert simulator.sample(c)['q(0)'][0] == 0
c = cirq.Circuit(cirq.H(q), cirq.reset(q), cirq.measure(q))
assert simulator.sample(c)['q(0)'][0] == 0
c = cirq.Circuit(cirq.reset(q), cirq.measure(q))
assert simulator.sample(c)['q(0)'][0] == 0
def test_measurement_2qubits():
q0, q1, q2 = cirq.LineQubit.range(3)
circuit = cirq.Circuit(cirq.H(q0), cirq.H(q1), cirq.H(q2), cirq.measure(q0, q2))
simulator = ccq.mps_simulator.MPSSimulator()
repetitions = 1024
measurement = simulator.run(circuit, repetitions=repetitions).measurements['q(0),q(2)']
result_counts = {'00': 0, '01': 0, '10': 0, '11': 0}
for i in range(repetitions):
key = str(measurement[i, 0]) + str(measurement[i, 1])
result_counts[key] += 1
for result_count in result_counts.values():
# Expected value is 1/4:
assert result_count > repetitions * 0.15
assert result_count < repetitions * 0.35
def test_measurement_str():
q0 = cirq.NamedQid('q0', dimension=3)
circuit = cirq.Circuit(cirq.measure(q0))
simulator = ccq.mps_simulator.MPSSimulator()
result = simulator.run(circuit, repetitions=7)
assert str(result) == "q0 (d=3)=0000000"
def test_trial_result_str():
q0 = cirq.LineQubit(0)
final_simulator_state = ccq.mps_simulator.MPSState(
qubits=(q0,),
prng=value.parse_random_state(0),
simulation_options=ccq.mps_simulator.MPSOptions(),
)
result = ccq.mps_simulator.MPSTrialResult(
params=cirq.ParamResolver({}),
measurements={'m': np.array([[1]])},
final_simulator_state=final_simulator_state,
)
assert 'output state: TensorNetwork' in str(result)
def test_trial_result_repr_pretty():
q0 = cirq.LineQubit(0)
final_simulator_state = ccq.mps_simulator.MPSState(
qubits=(q0,),
prng=value.parse_random_state(0),
simulation_options=ccq.mps_simulator.MPSOptions(),
)
result = ccq.mps_simulator.MPSTrialResult(
params=cirq.ParamResolver({}),
measurements={'m': np.array([[1]])},
final_simulator_state=final_simulator_state,
)
cirq.testing.assert_repr_pretty_contains(result, 'output state: TensorNetwork')
cirq.testing.assert_repr_pretty(result, "cirq.MPSTrialResult(...)", cycle=True)
def test_empty_step_result():
q0 = cirq.LineQubit(0)
sim = ccq.mps_simulator.MPSSimulator()
step_result = next(sim.simulate_moment_steps(cirq.Circuit(cirq.measure(q0))))
assert 'TensorNetwork' in str(step_result)
def test_step_result_repr_pretty():
q0 = cirq.LineQubit(0)
sim = ccq.mps_simulator.MPSSimulator()
step_result = next(sim.simulate_moment_steps(cirq.Circuit(cirq.measure(q0))))
cirq.testing.assert_repr_pretty_contains(step_result, 'TensorNetwork')
cirq.testing.assert_repr_pretty(step_result, "cirq.MPSSimulatorStepResult(...)", cycle=True)
def test_state_equal():
q0, q1 = cirq.LineQubit.range(2)
state0 = ccq.mps_simulator.MPSState(
qubits=(q0,),
prng=value.parse_random_state(0),
simulation_options=ccq.mps_simulator.MPSOptions(cutoff=1e-3, sum_prob_atol=1e-3),
)
state1a = ccq.mps_simulator.MPSState(
qubits=(q1,),
prng=value.parse_random_state(0),
simulation_options=ccq.mps_simulator.MPSOptions(cutoff=1e-3, sum_prob_atol=1e-3),
)
state1b = ccq.mps_simulator.MPSState(
qubits=(q1,),
prng=value.parse_random_state(0),
simulation_options=ccq.mps_simulator.MPSOptions(cutoff=1729.0, sum_prob_atol=1e-3),
)
assert state0 == state0
assert state0 != state1a
assert state1a != state1b
def test_random_circuits_equal_more_rows():
circuit = cirq.testing.random_circuit(
qubits=cirq.GridQubit.rect(3, 2), n_moments=6, op_density=1.0
)
qubits = circuit.all_qubits()
assert_same_output_as_dense(circuit, qubits)
def test_supremacy_equal_more_cols():
circuit = cirq.testing.random_circuit(
qubits=cirq.GridQubit.rect(2, 3), n_moments=6, op_density=1.0
)
qubits = circuit.all_qubits()
assert_same_output_as_dense(circuit, qubits)
def test_tensor_index_names():
qubits = cirq.LineQubit.range(12)
qubit_map = {qubit: i for i, qubit in enumerate(qubits)}
state = ccq.mps_simulator.MPSState(qubits=qubit_map, prng=value.parse_random_state(0))
assert state.i_str(0) == "i_00"
assert state.i_str(11) == "i_11"
assert state.mu_str(0, 3) == "mu_0_3"
assert state.mu_str(3, 0) == "mu_0_3"
def test_simulate_moment_steps_sample():
q0, q1 = cirq.LineQubit.range(2)
circuit = cirq.Circuit(cirq.H(q0), cirq.CNOT(q0, q1))
simulator = ccq.mps_simulator.MPSSimulator()
for i, step in enumerate(simulator.simulate_moment_steps(circuit)):
if i == 0:
np.testing.assert_almost_equal(
step._simulator_state().to_numpy(),
np.asarray([1.0 / math.sqrt(2), 0.0, 1.0 / math.sqrt(2), 0.0]),
)
# There are two "Tensor()" copies in the string.
assert len(str(step).split('Tensor(')) == 3
samples = step.sample([q0, q1], repetitions=10)
for sample in samples:
assert np.array_equal(sample, [True, False]) or np.array_equal(
sample, [False, False]
)
np.testing.assert_almost_equal(
step._simulator_state().to_numpy(),
np.asarray([1.0 / math.sqrt(2), 0.0, 1.0 / math.sqrt(2), 0.0]),
)
else:
np.testing.assert_almost_equal(
step._simulator_state().to_numpy(),
np.asarray([1.0 / math.sqrt(2), 0.0, 0.0, 1.0 / math.sqrt(2)]),
)
# There are two "Tensor()" copies in the string.
assert len(str(step).split('Tensor(')) == 3
samples = step.sample([q0, q1], repetitions=10)
for sample in samples:
assert np.array_equal(sample, [True, True]) or np.array_equal(
sample, [False, False]
)
def test_sample_seed():
q = cirq.NamedQubit('q')
circuit = cirq.Circuit(cirq.H(q), cirq.measure(q))
simulator = ccq.mps_simulator.MPSSimulator(seed=1234)
result = simulator.run(circuit, repetitions=20)
measured = result.measurements['q']
result_string = ''.join(map(lambda x: str(int(x[0])), measured))
assert result_string == '01011001110111011011'
def test_run_no_repetitions():
q0 = cirq.LineQubit(0)
simulator = ccq.mps_simulator.MPSSimulator()
circuit = cirq.Circuit(cirq.H(q0), cirq.measure(q0))
result = simulator.run(circuit, repetitions=0)
assert len(result.measurements['q(0)']) == 0
def test_run_parameters_not_resolved():
a = cirq.LineQubit(0)
simulator = ccq.mps_simulator.MPSSimulator()
circuit = cirq.Circuit(cirq.XPowGate(exponent=sympy.Symbol('a'))(a), cirq.measure(a))
with pytest.raises(ValueError, match='symbols were not specified'):
_ = simulator.run_sweep(circuit, cirq.ParamResolver({}))
def test_deterministic_gate_noise():
q = cirq.LineQubit(0)
circuit = cirq.Circuit(cirq.I(q), cirq.measure(q))
simulator1 = ccq.mps_simulator.MPSSimulator(noise=cirq.X)
result1 = simulator1.run(circuit, repetitions=10)
simulator2 = ccq.mps_simulator.MPSSimulator(noise=cirq.X)
result2 = simulator2.run(circuit, repetitions=10)
assert result1 == result2
simulator3 = ccq.mps_simulator.MPSSimulator(noise=cirq.Z)
result3 = simulator3.run(circuit, repetitions=10)
assert result1 != result3
def test_nondeterministic_mixture_noise():
q = cirq.LineQubit(0)
circuit = cirq.Circuit(cirq.I(q), cirq.measure(q))
simulator = ccq.mps_simulator.MPSSimulator(
noise=cirq.ConstantQubitNoiseModel(cirq.depolarize(0.5))
)
result1 = simulator.run(circuit, repetitions=50)
result2 = simulator.run(circuit, repetitions=50)
assert result1 != result2
def test_unsupported_noise_fails():
with pytest.raises(ValueError, match='noise must be unitary or mixture but was'):
ccq.mps_simulator.MPSSimulator(noise=cirq.amplitude_damp(0.5))
def test_state_copy():
sim = ccq.mps_simulator.MPSSimulator()
q = cirq.LineQubit(0)
circuit = cirq.Circuit(cirq.H(q), cirq.H(q))
state_Ms = []
for step in sim.simulate_moment_steps(circuit):
state_Ms.append(step.state.M)
for x, y in itertools.combinations(state_Ms, 2):
assert len(x) == len(y)
for i in range(len(x)):
assert not np.shares_memory(x[i], y[i])
def test_simulation_state_initializer():
s = ccq.mps_simulator.MPSState(
qubits=(cirq.LineQubit(0),),
prng=np.random.RandomState(0),
classical_data=cirq.ClassicalDataDictionaryStore(
_records={cirq.MeasurementKey('test'): [(4,)]}
),
)
assert s.qubits == (cirq.LineQubit(0),)
assert s.log_of_measurement_results == {'test': [4]}
def test_act_on_gate():
args = ccq.mps_simulator.MPSState(qubits=cirq.LineQubit.range(3), prng=np.random.RandomState(0))
cirq.act_on(cirq.X, args, [cirq.LineQubit(1)])
np.testing.assert_allclose(
args.state_vector().reshape((2, 2, 2)),
cirq.one_hot(index=(0, 1, 0), shape=(2, 2, 2), dtype=np.complex64),
)
def test_deprecated():
prng = np.random.RandomState(0)
with cirq.testing.assert_deprecated('log_of_measurement_results', deadline='0.16', count=2):
_ = ccq.mps_simulator.MPSState(
qubits=cirq.LineQubit.range(3), prng=prng, log_of_measurement_results={}
)
with cirq.testing.assert_deprecated('positional', deadline='0.16'):
_ = ccq.mps_simulator.MPSState(cirq.LineQubit.range(3), prng=prng)
| 34.724551
| 100
| 0.647755
| 2,336
| 17,397
| 4.609161
| 0.12286
| 0.053497
| 0.054333
| 0.052661
| 0.661094
| 0.584285
| 0.504783
| 0.453701
| 0.445621
| 0.424166
| 0
| 0.03833
| 0.227683
| 17,397
| 500
| 101
| 34.794
| 0.763025
| 0.009772
| 0
| 0.331579
| 0
| 0
| 0.0288
| 0.004761
| 0
| 0
| 0
| 0
| 0.152632
| 1
| 0.094737
| false
| 0
| 0.023684
| 0
| 0.118421
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c53aaaab36a01f9660d76573d43ecd12a07d0cb
| 7,340
|
py
|
Python
|
src/greenbudget/app/subaccount/serializers.py
|
nickmflorin/django-proper-architecture-testing
|
da7c4019697e85f921695144375d2f548f1e98ad
|
[
"MIT"
] | null | null | null |
src/greenbudget/app/subaccount/serializers.py
|
nickmflorin/django-proper-architecture-testing
|
da7c4019697e85f921695144375d2f548f1e98ad
|
[
"MIT"
] | null | null | null |
src/greenbudget/app/subaccount/serializers.py
|
nickmflorin/django-proper-architecture-testing
|
da7c4019697e85f921695144375d2f548f1e98ad
|
[
"MIT"
] | null | null | null |
from django.contrib.contenttypes.models import ContentType
from rest_framework import serializers, exceptions
from greenbudget.lib.rest_framework_utils.fields import ModelChoiceField
from greenbudget.lib.rest_framework_utils.serializers import (
EnhancedModelSerializer)
from greenbudget.app.budget.models import BaseBudget
from greenbudget.app.common.serializers import (
EntitySerializer,
AbstractBulkUpdateSerializer,
create_bulk_create_serializer
)
from greenbudget.app.fringe.models import Fringe
from greenbudget.app.group.models import (
BudgetSubAccountGroup,
TemplateSubAccountGroup
)
from .models import SubAccount, BudgetSubAccount, TemplateSubAccount
class SubAccountSimpleSerializer(EnhancedModelSerializer):
id = serializers.IntegerField(read_only=True)
type = serializers.CharField(read_only=True)
identifier = serializers.CharField(
required=False,
allow_blank=False,
allow_null=True,
trim_whitespace=False
)
description = serializers.CharField(
required=False,
allow_blank=False,
allow_null=True,
trim_whitespace=False
)
name = serializers.CharField(
required=False,
allow_blank=True,
allow_null=False,
trim_whitespace=False
)
class Meta:
model = SubAccount
fields = ('id', 'name', 'identifier', 'type', 'description')
class SubAccountSerializer(SubAccountSimpleSerializer):
created_by = serializers.PrimaryKeyRelatedField(read_only=True)
updated_by = serializers.PrimaryKeyRelatedField(read_only=True)
created_at = serializers.DateTimeField(read_only=True)
updated_at = serializers.DateTimeField(read_only=True)
quantity = serializers.IntegerField(
required=False,
allow_null=True
)
rate = serializers.FloatField(required=False, allow_null=True)
multiplier = serializers.FloatField(required=False, allow_null=True)
estimated = serializers.FloatField(read_only=True)
unit = ModelChoiceField(
required=False,
choices=SubAccount.UNITS,
allow_null=True
)
budget = serializers.PrimaryKeyRelatedField(read_only=True)
subaccounts = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
ancestors = EntitySerializer(many=True, read_only=True)
siblings = EntitySerializer(many=True, read_only=True)
account = serializers.IntegerField(read_only=True, source='account.pk')
object_id = serializers.IntegerField(read_only=True)
parent_type = serializers.ChoiceField(
choices=["account", "subaccount"],
read_only=True
)
fringes = serializers.PrimaryKeyRelatedField(
many=True,
required=False,
queryset=Fringe.objects.filter(budget__trash=False)
)
class Meta:
model = SubAccount
fields = SubAccountSimpleSerializer.Meta.fields + (
'identifier', 'name', 'created_by', 'updated_by', 'created_at',
'updated_at', 'quantity', 'rate', 'multiplier', 'unit', 'account',
'object_id', 'parent_type', 'ancestors', 'estimated', 'subaccounts',
'budget', 'siblings', 'fringes')
def validate(self, attrs):
if self.instance is not None and self.instance.subaccounts.count() != 0:
if any([field in attrs for field in self.instance.DERIVING_FIELDS]):
raise exceptions.ValidationError(
"Field can only be updated when the sub account is not "
"derived."
)
return super().validate(attrs)
class BudgetSubAccountSerializer(SubAccountSerializer):
actual = serializers.FloatField(read_only=True)
variance = serializers.FloatField(read_only=True)
group = serializers.PrimaryKeyRelatedField(
required=False,
allow_null=True,
queryset=BudgetSubAccountGroup.objects.all()
)
class Meta:
model = BudgetSubAccount
fields = SubAccountSerializer.Meta.fields + (
'actual', 'variance', 'group')
class TemplateSubAccountSerializer(SubAccountSerializer):
group = serializers.PrimaryKeyRelatedField(
required=False,
allow_null=True,
queryset=TemplateSubAccountGroup.objects.all()
)
class Meta:
model = TemplateSubAccount
fields = SubAccountSerializer.Meta.fields + ('group', )
def create_bulk_create_subaccounts_serializer(model_cls):
data_serializer = BudgetSubAccountSerializer
if model_cls is TemplateSubAccount:
data_serializer = TemplateSubAccountSerializer
base_serializer = create_bulk_create_serializer(data_serializer)
class BulkCreateSubAccountsSerializer(base_serializer):
class Meta(base_serializer.Meta):
model = BaseBudget
def get_serializer_context(self, instance):
return {'parent': instance}
def perform_save(self, serializer, instance, validated_data):
# Note that the updated_by argument is the user updating the
# Account by adding new SubAccount(s), so the SubAccount(s)
# should be denoted as having been created by this user.
return serializer.save(
updated_by=validated_data['updated_by'],
created_by=validated_data['updated_by'],
object_id=instance.pk,
content_type=ContentType.objects.get_for_model(model_cls),
parent=instance,
budget=instance.budget
)
return BulkCreateSubAccountsSerializer
def create_subaccount_bulk_change_serializer(model_cls):
base_serializer = BudgetSubAccountSerializer
if model_cls is TemplateSubAccount:
base_serializer = TemplateSubAccountSerializer
class SubAccountBulkChangeSerializer(base_serializer):
id = serializers.PrimaryKeyRelatedField(
required=True,
queryset=model_cls.objects.all()
)
def validate_id(self, instance):
account = self.parent.parent.instance
if account != instance.parent:
raise exceptions.ValidationError(
"The sub-account %s does not belong to account %s."
% (instance.pk, account.pk)
)
return instance
return SubAccountBulkChangeSerializer
def create_bulk_update_subaccounts_serializer(model_cls):
class BulkUpdateSubAccountsSerializer(AbstractBulkUpdateSerializer):
data = create_subaccount_bulk_change_serializer(model_cls)(
many=True, nested=True)
class Meta:
model = BaseBudget
fields = ('data', )
def update(self, instance, validated_data):
for subaccount, change in validated_data['data']:
serializer = SubAccountSerializer(
instance=subaccount,
data=change,
partial=True
)
serializer.is_valid(raise_exception=True)
serializer.save(
updated_by=validated_data['updated_by'],
suppress_budget_update=validated_data.get(
'suppress_budget_update', False)
)
return instance
return BulkUpdateSubAccountsSerializer
| 35.631068
| 80
| 0.674523
| 686
| 7,340
| 7.04519
| 0.223032
| 0.026485
| 0.039727
| 0.026071
| 0.311608
| 0.250155
| 0.146493
| 0.081523
| 0.062901
| 0.033106
| 0
| 0.000181
| 0.248229
| 7,340
| 205
| 81
| 35.804878
| 0.87568
| 0.023297
| 0
| 0.224852
| 0
| 0
| 0.05806
| 0.00307
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047337
| false
| 0
| 0.053254
| 0.011834
| 0.390533
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c554c033e4e1ae5351bb05f507b9e976ca41041
| 13,152
|
py
|
Python
|
modules/dbnd/src/dbnd/_core/tracking/managers/callable_tracking.py
|
busunkim96/dbnd
|
0191fdcd4c4fbd35006f1026d1a55b2abab9097b
|
[
"Apache-2.0"
] | 224
|
2020-01-02T10:46:37.000Z
|
2022-03-02T13:54:08.000Z
|
modules/dbnd/src/dbnd/_core/tracking/managers/callable_tracking.py
|
busunkim96/dbnd
|
0191fdcd4c4fbd35006f1026d1a55b2abab9097b
|
[
"Apache-2.0"
] | 16
|
2020-03-11T09:37:58.000Z
|
2022-01-26T10:22:08.000Z
|
modules/dbnd/src/dbnd/_core/tracking/managers/callable_tracking.py
|
busunkim96/dbnd
|
0191fdcd4c4fbd35006f1026d1a55b2abab9097b
|
[
"Apache-2.0"
] | 24
|
2020-03-24T13:53:50.000Z
|
2022-03-22T11:55:18.000Z
|
import contextlib
import logging
import typing
from typing import Any, Dict, Tuple
import attr
from dbnd._core.configuration import get_dbnd_project_config
from dbnd._core.constants import (
RESULT_PARAM,
DbndTargetOperationStatus,
DbndTargetOperationType,
TaskRunState,
)
from dbnd._core.current import (
current_task_run,
get_databand_run,
is_verbose,
try_get_current_task,
)
from dbnd._core.errors.errors_utils import log_exception
from dbnd._core.log.external_exception_logging import log_exception_to_server
from dbnd._core.parameter.parameter_definition import ParameterDefinition
from dbnd._core.parameter.parameter_value import ParameterFilters
from dbnd._core.settings import TrackingConfig
from dbnd._core.task.tracking_task import TrackingTask
from dbnd._core.task_build.task_context import try_get_current_task
from dbnd._core.task_build.task_definition import TaskDefinition
from dbnd._core.task_build.task_results import FuncResultParameter
from dbnd._core.task_run.task_run import TaskRun
from dbnd._core.task_run.task_run_error import TaskRunError
from dbnd._core.utils.callable_spec import args_to_kwargs
from dbnd._core.utils.timezone import utcnow
from targets import InMemoryTarget, Target
from targets.value_meta import ValueMetaConf
from targets.values import get_value_type_of_obj
if typing.TYPE_CHECKING:
from dbnd._core.task_build.task_decorator import TaskDecorator
logger = logging.getLogger(__name__)
@attr.s
class TrackedFuncCallWithResult(object):
call_args = attr.ib() # type: Tuple[Any]
call_kwargs = attr.ib() # type: Dict[str,Any]
callable = attr.ib()
result = attr.ib(default=None)
def set_result(self, value):
self.result = value
return value
def invoke(self):
func = self.callable
return func(*self.call_args, **self.call_kwargs)
class CallableTrackingManager(object):
def __init__(self, task_decorator):
# type: (CallableTrackingManager, TaskDecorator) -> None
self.task_decorator = task_decorator
self._tracking_task_definition = None
self._call_count = 0
self._call_as_func = False
self._max_call_count = get_dbnd_project_config().max_calls_per_run
@property
def callable(self):
return self.task_decorator.class_or_func
def get_tracking_task_definition(self):
if not self._tracking_task_definition:
self._tracking_task_definition = self._build_tracking_task_definition()
return self._tracking_task_definition
def _build_tracking_task_definition(self):
return TaskDefinition.from_task_decorator(task_decorator=self.task_decorator)
def _call_count_limit_exceeded(self):
if not self._call_as_func:
self._call_count += 1
if self._call_count > self._max_call_count:
logger.info(
"Reached maximum tracking limit of {} tasks. Running function regularly.".format(
self._max_call_count
)
)
self._call_as_func = True
return self._call_as_func
@contextlib.contextmanager
def tracking_context(self, call_args, call_kwargs):
user_code_called = False # whether we got to executing of user code
user_code_finished = False # whether we passed executing of user code
func_call = None
try:
# 1. check that we don't have too many calls
if self._call_count_limit_exceeded():
yield _do_nothing_decorator
return
# 2. Start or reuse existing "main tracking task" that is root for tracked tasks
if not try_get_current_task():
"""
try to get existing task, and if not exists - try to get/create inplace_task_run
"""
from dbnd._core.tracking.script_tracking_manager import (
try_get_inplace_tracking_task_run,
)
inplace_tacking_task = try_get_inplace_tracking_task_run()
if not inplace_tacking_task:
# we didn't manage to start inplace tracking task run, we will not be able to track
yield _do_nothing_decorator
return
tracking_task_definition = self.get_tracking_task_definition()
callable_spec = tracking_task_definition.task_decorator.get_callable_spec()
func_call = TrackedFuncCallWithResult(
callable=self.callable,
call_args=tuple(call_args), # prevent original call_args modification
call_kwargs=dict(call_kwargs), # prevent original kwargs modification
)
# replace any position argument with kwarg if it possible
args, kwargs = args_to_kwargs(
callable_spec.args, func_call.call_args, func_call.call_kwargs,
)
# instantiate inline task
task = TrackingTask.for_func(tracking_task_definition, args, kwargs)
# update upstream/downstream relations - needed for correct tracking
# we can have the task as upstream , as it was executed already
parent_task = current_task_run().task
if not parent_task.task_dag.has_upstream(task):
parent_task.set_upstream(task)
# checking if any of the inputs are the outputs of previous task.
# we can add that task as upstream.
dbnd_run = get_databand_run()
call_kwargs_as_targets = dbnd_run.target_origin.get_for_map(kwargs)
for value_origin in call_kwargs_as_targets.values():
up_task = value_origin.origin_target.task
task.set_upstream(up_task)
# creating task_run as a task we found mid-run
task_run = dbnd_run.create_task_run_at_execution_time(
task, task_engine=current_task_run().task_engine
)
should_capture_log = TrackingConfig.current().capture_tracking_log
with task_run.runner.task_run_execution_context(
handle_sigterm=True, capture_log=should_capture_log
):
task_run.set_task_run_state(state=TaskRunState.RUNNING)
_log_inputs(task_run)
# if we reached this line, then all tracking initialization is
# finished successfully, and we're going to execute user code
user_code_called = True
try:
# tracking_context is context manager - user code will run on yield
yield func_call.set_result
# if we reached this line, this means that user code finished
# successfully without any exceptions
user_code_finished = True
except Exception as ex:
task_run.finished_time = utcnow()
error = TaskRunError.build_from_ex(ex, task_run)
task_run.set_task_run_state(TaskRunState.FAILED, error=error)
raise
else:
task_run.finished_time = utcnow()
# func_call.result should contain result, log it
_log_result(task_run, func_call.result)
task_run.set_task_run_state(TaskRunState.SUCCESS)
except Exception:
if user_code_called and not user_code_finished:
# if we started to call the user code and not got to user_code_finished
# line - it means there was user code exception - so just re-raise it
raise
# else it's either we didn't reached calling user code, or already passed it
# then it's some dbnd tracking error - just log it
if func_call:
_handle_tracking_error("tracking-init", func_call)
else:
log_exception_to_server()
# if we didn't reached user_code_called=True line - there was an error during
# dbnd tracking initialization, so nothing is done - user function wasn't called yet
if not user_code_called:
# tracking_context is context manager - user code will run on yield
yield _do_nothing_decorator
return
def _handle_tracking_error(msg, func_call=None):
log_exception_to_server()
location = " for %s" % func_call.callable if func_call else ""
msg = "Failed during dbnd %s for %s, ignoring, and continue without tracking" % (
msg,
location,
)
if is_verbose():
logger.warning(
msg, exc_info=True,
)
else:
logger.info(msg)
def _do_nothing_decorator(f):
return f
def _log_inputs(task_run):
"""
For tracking mode. Logs InMemoryTarget inputs.
"""
try:
params = task_run.task._params
for param_value in params.get_param_values(ParameterFilters.INPUTS):
param, value = param_value.parameter, param_value.value
if isinstance(param_value, InMemoryTarget):
try:
param = param.modify(
value_meta_conf=ValueMetaConf(
log_preview=True, log_schema=True,
)
)
task_run.tracker.log_parameter_data(
parameter=param,
target=param_value,
value=value,
operation_type=DbndTargetOperationType.read,
operation_status=DbndTargetOperationStatus.OK,
)
except Exception as ex:
log_exception(
"Failed to log input param to tracking store.",
ex=ex,
non_critical=True,
)
except Exception as ex:
log_exception(
"Failed to log input params to tracking store.", ex=ex, non_critical=True
)
def _log_result(task_run, result):
# type: (TaskRun, Any) -> None
"""
For tracking mode. Logs the task result and adds it to the target_origin map to support relationships between
dynamic tasks.
"""
try:
result_param = task_run.task.task_params.get_param_value(RESULT_PARAM)
if not result_param:
logger.debug(
"No result params to log for task {}".format(task_run.task_af_id)
)
return
# we now the parameter value is a target because this is an output param
# the target is created in the task creation
result_param_def, result_target = result_param.parameter, result_param.value
# spread result into relevant fields.
if isinstance(result_param_def, FuncResultParameter):
# assign all returned values to relevant band Outputs
if result is None:
return
for result_name, value in result_param_def.named_results(result):
# we now the parameter value is a target because this is an output param
# the target is created in the task creation
parameter_value = task_run.task.task_params.get_param_value(result_name)
_log_parameter_value(
task_run,
parameter_definition=parameter_value.parameter,
target=parameter_value.value,
value=value,
)
else:
_log_parameter_value(
task_run,
parameter_definition=result_param_def,
target=result_target,
value=result,
)
except Exception as ex:
log_exception(
"Failed to log result to tracking store.", ex=ex, non_critical=True
)
def _log_parameter_value(task_run, parameter_definition, target, value):
# type: (TaskRun, ParameterDefinition, Target, Any) -> None
# make sure it will be logged correctly
parameter_definition = parameter_definition.modify(
value_meta_conf=ValueMetaConf(log_preview=True, log_schema=True)
)
try:
# case what if result is Proxy
value_type = get_value_type_of_obj(value, parameter_definition.value_type)
task_run.run.target_origin.add(target, value, value_type)
except Exception as ex:
log_exception(
"Failed to register result to target tracking.", ex=ex, non_critical=True
)
try:
task_run.tracker.log_parameter_data(
parameter=parameter_definition, # was: task_run.task.task_definition.task_class.result,
target=target,
value=value,
operation_type=DbndTargetOperationType.write, # is it write? (or log?)
operation_status=DbndTargetOperationStatus.OK,
)
except Exception as ex:
log_exception(
"Failed to log result to tracking store.", ex=ex, non_critical=True
)
| 38.568915
| 113
| 0.633744
| 1,558
| 13,152
| 5.06611
| 0.186778
| 0.035474
| 0.027366
| 0.01419
| 0.25605
| 0.190929
| 0.169897
| 0.1215
| 0.112251
| 0.102116
| 0
| 0.00044
| 0.309231
| 13,152
| 340
| 114
| 38.682353
| 0.868354
| 0.1783
| 0
| 0.202479
| 0
| 0
| 0.038364
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.053719
| false
| 0
| 0.107438
| 0.012397
| 0.235537
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c56d5b6165d77a3d76bfb27f03c0f747558ff24
| 5,534
|
py
|
Python
|
api.py
|
Benardi/redis-basics
|
614a15afe47780886bb6088f4ae45c6a7cbc6e22
|
[
"MIT"
] | null | null | null |
api.py
|
Benardi/redis-basics
|
614a15afe47780886bb6088f4ae45c6a7cbc6e22
|
[
"MIT"
] | null | null | null |
api.py
|
Benardi/redis-basics
|
614a15afe47780886bb6088f4ae45c6a7cbc6e22
|
[
"MIT"
] | null | null | null |
import os
import logging
from json import loads, dumps
from datetime import timedelta
from argparse import ArgumentParser
from redis import Redis
from flask import Response, Flask, request
app = Flask(__name__)
log = logging.getLogger(__name__)
parser = ArgumentParser()
parser.add_argument("-a", "--address",
action="store", dest="address",
type=str, required=True,
help="Address for api")
parser.add_argument("-p", "--port",
action="store", dest="port",
type=str, required=True,
help="Port for api")
parser.add_argument("-c", "--crt",
action="store", dest="cert",
type=str, required=False,
help="Path to certificate for this API")
parser.add_argument("-k", "--key",
action="store", dest="key",
type=str, required=False,
help="Path to key of certificate used by this API")
parser.add_argument("-rp", "--redis-port",
action="store", dest="redis-port",
type=str, required=True,
help="Port for Redis client")
args = vars(parser.parse_args())
api_address = args["address"]
api_port = args["port"]
api_cert = args["cert"]
api_key = args["key"]
redis_port = args["redis-port"]
r = Redis(port=redis_port, charset="utf-8", decode_responses=True)
@app.route("/hash", methods=['POST'])
def create_redis_hash():
data = loads(request.data)
success = r.hmset(data["key"], data["pairs"])
if data.get("expire") is not None:
expiration = timedelta(**data.get("expire"))
r.expire(data["key"], expiration)
response_body = {"success": success}
response_body[data["key"]] = r.hgetall(data["key"])
return Response(dumps(response_body), status=200, mimetype="application/json")
@app.route("/hash", methods=['PUT'])
def update_redis_hash():
data = loads(request.data)
success = r.hmset(data["key"], data["pairs"])
if data.get("expire") is not None:
expiration = timedelta(**data.get("expire"))
r.expire(data["key"], expiration)
if data.get("newkey") is not None:
r.rename(data["key"], data["newkey"])
response_body = {"success": success}
if data.get("newkey") is not None:
response_body[data["newkey"]] = r.hgetall(data["newkey"])
else:
response_body[data["key"]] = r.hgetall(data["key"])
return Response(dumps(response_body), status=200, mimetype="application/json")
@app.route("/hash", methods=['GET'])
def get_redis_hash():
response_body = {"success": True}
key = request.headers.get("key")
response_body[key] = r.hgetall(key)
return Response(dumps(response_body), status=200, mimetype="application/json")
@app.route("/key", methods=['DELETE'])
def delete_redis_key():
status = 200
key = request.headers.get("key")
success = r.delete(key)
if not success:
status = 404
response_body = {"success": bool(success)}
return Response(dumps(response_body), status=status, mimetype="application/json")
@app.route("/list", methods=['POST'])
def create_redis_list():
data = loads(request.data)
strat = data.get("strategy")
if strat is not None and strat == "left":
length = r.lpush(data["key"], *data["values"])
else:
length = r.rpush(data["key"], *data["values"])
response_body = {"length": length}
response_body[data["key"]] = r.lrange(data["key"], 0, -1)
return Response(dumps(response_body), status=200, mimetype="application/json")
@app.route("/list", methods=['GET'])
def get_entire_list():
response_body = {"success": True}
key = request.headers.get("key")
response_body[key] = r.lrange(key, 0, -1)
return Response(dumps(response_body), status=200, mimetype="application/json")
@app.route("/list/<idx>", methods=['GET'])
def get_list_at_idx(idx):
response_body = {"success": True}
key = request.headers.get("key")
response_body[key] = {}
response_body[key][str(idx)] = r.lindex(key, idx)
return Response(dumps(response_body), status=200, mimetype="application/json")
@app.route("/set", methods=['POST'])
def create_add_set():
data = loads(request.data)
length = r.sadd(data["key"], *data["values"])
response_body = {"length": length}
response_body[data["key"]] = list(r.smembers(data["key"]))
return Response(dumps(response_body), status=200, mimetype="application/json")
@app.route("/set/<n_items>", methods=['GET'])
def get_n_items_set(n_items):
response_body = {"success": True}
key = request.headers.get("key")
response_body = {key: list(r.srandmember(key, n_items))}
return Response(dumps(response_body), status=200, mimetype="application/json")
@app.route("/set", methods=['GET'])
def get_set():
response_body = {"success": True}
key = request.headers.get("key")
response_body = {key: list(r.smembers(key))}
return Response(dumps(response_body), status=200, mimetype="application/json")
def start_api(address, port, clnt_cert=None, clnt_key=None):
if clnt_cert is None or clnt_key is None:
app.run(host=address, port=port, debug=False)
else:
app.run(host=address, port=port,
ssl_context=(clnt_cert, clnt_key), debug=False)
if api_cert is None or api_key is None:
start_api(api_address, api_port)
else:
start_api(api_address, api_port, api_cert, api_key)
| 30.744444
| 85
| 0.632815
| 724
| 5,534
| 4.707182
| 0.16989
| 0.109155
| 0.055751
| 0.079225
| 0.594484
| 0.54284
| 0.502641
| 0.456573
| 0.43662
| 0.43662
| 0
| 0.008603
| 0.201843
| 5,534
| 179
| 86
| 30.916201
| 0.762961
| 0
| 0
| 0.376923
| 0
| 0
| 0.134285
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.084615
| false
| 0
| 0.053846
| 0
| 0.215385
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c5842107ba44f69dd4be13f1db7dd944439eb70
| 6,071
|
py
|
Python
|
zhihu_spider/ZhihuSpider/spiders/zhihu.py
|
Ki-Seki/gadgets
|
6e031e1f6536a15b48e3beb80ba8bf31d2a3db7a
|
[
"MIT"
] | 1
|
2022-02-24T12:48:47.000Z
|
2022-02-24T12:48:47.000Z
|
zhihu_spider/ZhihuSpider/spiders/zhihu.py
|
Ki-Seki/gadgets
|
6e031e1f6536a15b48e3beb80ba8bf31d2a3db7a
|
[
"MIT"
] | null | null | null |
zhihu_spider/ZhihuSpider/spiders/zhihu.py
|
Ki-Seki/gadgets
|
6e031e1f6536a15b48e3beb80ba8bf31d2a3db7a
|
[
"MIT"
] | 1
|
2022-02-24T12:51:20.000Z
|
2022-02-24T12:51:20.000Z
|
"""
启动此 spider 前需要手动启动 Chrome,cmd 命令如下:
cd 进入 Chrome 可执行文件 所在的目录
执行:chrome.exe --remote-debugging-port=9222
此时在浏览器窗口地址栏访问:http://127.0.0.1:9222/json,如果页面出现 json 数据,则表明手动启动成功
启动此 spider 后,注意与命令行交互!
在 settings 当中要做的:
# ROBOTSTXT_OBEY = False # 如果不关闭,parse 方法无法执行
# COOKIES_ENABLED = True # 以便 Request 值在传递时自动传递 cookies
# USER_AGENT = 一个合适的值
# DOWNLOADER_MIDDLEWARES 配置好以备 user agent 的自动变换
"""
import re
import json
import datetime
import scrapy
from scrapy.loader import ItemLoader
from urllib import parse
from ZhihuSpider.utils.browsezhihu import get_cookies
from ZhihuSpider import settings
from ZhihuSpider.items import ZhihuQuestionItem, ZhihuAnswerItem
class ZhihuSpider(scrapy.Spider):
name = 'zhihu'
allowed_domains = ['zhihu.com']
start_urls = ['http://zhihu.com/']
# 通用的 question 第一页 answer 请求 url
# 0: question id, 1: offset, 2: limit
start_answer_urls = 'https://www.zhihu.com/api/v4/questions/{0}/answers?include=data%5B*%5D.is_normal%2Cadmin_closed_comment%2Creward_info%2Cis_collapsed%2Cannotation_action%2Cannotation_detail%2Ccollapse_reason%2Cis_sticky%2Ccollapsed_by%2Csuggest_edit%2Ccomment_count%2Ccan_comment%2Ccontent%2Ceditable_content%2Cattachment%2Cvoteup_count%2Creshipment_settings%2Ccomment_permission%2Ccreated_time%2Cupdated_time%2Creview_info%2Crelevant_info%2Cquestion%2Cexcerpt%2Cis_labeled%2Cpaid_info%2Cpaid_info_content%2Crelationship.is_authorized%2Cis_author%2Cvoting%2Cis_thanked%2Cis_nothelp%2Cis_recognized%3Bdata%5B*%5D.mark_infos%5B*%5D.url%3Bdata%5B*%5D.author.follower_count%2Cvip_info%2Cbadge%5B*%5D.topics%3Bdata%5B*%5D.settings.table_of_content.enabled&offset={1}&limit={2}&sort_by=default&platform=desktop'
headers = {
"HOST": "www.zhihu.com",
"Referer": "https://www.zhihu.com",
"User-Agent": settings.USER_AGENT
}
# 提取主页所有指向问题的 url
def parse(self, response, **kwargs):
# .extract() 是 parsel.selection 中的函数,用于提取元素集合中的 data 域的值
all_urls = response.css("a::attr(href)").extract()
# urllib.parse.urljoin 可以合并两个不完整 url
all_urls = [parse.urljoin(response.url, url) for url in all_urls]
all_urls = filter(lambda x: True if x.startswith("https") else False, all_urls)
for url in all_urls:
# (/|$) 表示匹配 / 或“结束”
match_obj = re.match("(.*zhihu.com/question/(\d+))(/|$).*", url)
if match_obj: # 如果是一个含有指向 question 页的 url
question_url = match_obj.group(1)
question_id = match_obj.group(2)
yield scrapy.Request(question_url, callback=self.parse_question, headers=self.headers
, meta={"question_id": question_id, "url": question_url}) # meta 可以向下传递
def parse_question(self, response):
"""
提取问题页 question item
"""
# 使用 ItemLoader 时,每个字段值都是一个 list
item_loader = ItemLoader(item=ZhihuQuestionItem(), response=response)
item_loader.add_value("question_id", response.meta.get("question_id", 0)) # 使用 meta 来加载
item_loader.add_css("topics", "head > meta[name=keywords]::attr(content)")
item_loader.add_value("url", response.meta.get("url", ''))
item_loader.add_css("title", "h1.QuestionHeader-title::text")
item_loader.add_css("content", ".QuestionRichText span:nth-child(1)::text")
item_loader.add_css("answer_num", ".List-headerText > span::text, .ViewAll:nth-child(1) > a::text")
item_loader.add_css("comments_num", ".QuestionHeader-Comment button::text")
item_loader.add_css("watch_user_num", ".NumberBoard-itemValue::attr(title)")
item_loader.add_css("click_num", ".NumberBoard-itemValue::attr(title)")
# 关于获取 create_time update_time
# request log url of question,接着,将以上 item_loader 的内容改为 meta 字典向下传递
# 最终交到 get_create_update_of_question 中去打包 question_item 然后 yield
# 未完成的部分实现如下
# tmp = response.css(".QuestionHeader-menu > a").extract()[0]
# log_url = parse.urljoin(self.start_urls[0], tmp)
# yield scrapy.Request(log_url, callback=self.get_create_update_of_question, headers=self.headers, meta=......)
question_item = item_loader.load_item()
yield question_item
yield scrapy.Request(self.start_answer_urls.format(response.meta.get("question_id", ''), 0, 20)
, callback=self.parse_answer, headers=self.headers)
# def get_create_update_of_question(self, response):
# pass
def parse_answer(self, response):
"""
提取答案页 answer item
"""
answer_json = json.loads(response.text)
is_end = answer_json["paging"]["is_end"]
next_url = answer_json["paging"]["next"]
for answer in answer_json["data"]:
answer_item = ZhihuAnswerItem()
answer_item["answer_id"] = answer["id"]
answer_item["url"] = answer["url"]
answer_item["question_id"] = answer["question"]["id"]
answer_item["author_id"] = answer["author"]["id"]
answer_item["content"] = answer["content"] if "content" in answer else None
answer_item["praise_num"] = answer["voteup_count"]
answer_item["comments_num"] = answer["comment_count"]
answer_item["create_time"] = answer["created_time"]
answer_item["update_time"] = answer["updated_time"]
answer_item["crawl_time"] = datetime.datetime.now()
yield answer_item
if not is_end:
yield scrapy.Request(next_url, callback=self.parse_answer, headers=self.headers)
def start_requests(self):
# 在使用 selenium 前要用以下 cmd 启动 chrome
# cd "C:\Program Files\Google\Chrome\Application"
# chrome.exe --remote-debugging-port=9222
# 不能使用下面的 python 代码的原因是:这个命令是要求返回值的,除非使用多线程
# os.system('"C:\\Program Files\\Google\\Chrome\\Application\\chrome.exe" --remote-debugging-port=9222')
cookies = get_cookies()
yield scrapy.Request(url=self.start_urls[0], dont_filter=True, cookies=cookies)
| 46.7
| 813
| 0.680119
| 773
| 6,071
| 5.139715
| 0.349288
| 0.032721
| 0.029449
| 0.02819
| 0.157312
| 0.096652
| 0.056381
| 0.056381
| 0.034231
| 0.034231
| 0
| 0.01842
| 0.19519
| 6,071
| 129
| 814
| 47.062016
| 0.79472
| 0.232416
| 0
| 0
| 0
| 0.029851
| 0.333042
| 0.051288
| 0
| 0
| 0
| 0
| 0
| 1
| 0.059701
| false
| 0
| 0.134328
| 0
| 0.283582
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c5b6f7b1147d0bfa29ae31ca75143f0f85b1910
| 523
|
py
|
Python
|
main/handle_file.py
|
nucluster/us_states
|
26cca38990b9afb6a2b8cc4d1365409428793c6d
|
[
"MIT"
] | null | null | null |
main/handle_file.py
|
nucluster/us_states
|
26cca38990b9afb6a2b8cc4d1365409428793c6d
|
[
"MIT"
] | null | null | null |
main/handle_file.py
|
nucluster/us_states
|
26cca38990b9afb6a2b8cc4d1365409428793c6d
|
[
"MIT"
] | null | null | null |
from pathlib import Path
BASE_DIR = Path(__file__).resolve().parent.parent
# def handle_uploaded_file(f):
# with open('screenshot.png', 'wb') as destination:
# # for chunk in f.chunks():
# # destination.write(chunk)
# destination.write(f)
with open(
BASE_DIR/'media'/'Greater_coat_of_arms_of_the_United_States.png', 'rb'
) as file:
flag = file.read()
# handle_uploaded_file(flag)
print(type(flag))
print(len(flag))
# print(flag)
# for place in sys.path:
# print(place)
| 21.791667
| 74
| 0.659656
| 73
| 523
| 4.493151
| 0.561644
| 0.082317
| 0.109756
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.198853
| 523
| 24
| 75
| 21.791667
| 0.782816
| 0.500956
| 0
| 0
| 0
| 0
| 0.208
| 0.18
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.125
| 0
| 0.125
| 0.25
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c5ca9cec48517b47b0e018883a0875e922d1924
| 4,921
|
py
|
Python
|
2018/finals/pwn-gdb-as-a-service/web_challenge/challenge/gaas.py
|
iicarus-bit/google-ctf
|
4eb8742bca58ff071ff8f6814d41d9ec7eb1db4b
|
[
"Apache-2.0"
] | 2,757
|
2018-04-28T21:41:36.000Z
|
2022-03-29T06:33:36.000Z
|
2018/finals/pwn-gdb-as-a-service/web_challenge/challenge/gaas.py
|
iicarus-bit/google-ctf
|
4eb8742bca58ff071ff8f6814d41d9ec7eb1db4b
|
[
"Apache-2.0"
] | 20
|
2019-07-23T15:29:32.000Z
|
2022-01-21T12:53:04.000Z
|
2018/finals/pwn-gdb-as-a-service/web_challenge/challenge/gaas.py
|
iicarus-bit/google-ctf
|
4eb8742bca58ff071ff8f6814d41d9ec7eb1db4b
|
[
"Apache-2.0"
] | 449
|
2018-05-09T05:54:05.000Z
|
2022-03-30T14:54:18.000Z
|
#!/usr/bin/env python3
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from aiohttp import web
import capstone
import functools
from gdbproc import GDBProcess
import socketio
import asyncio
import codecs
import os
enable_logging = False
premium = 'PREMIUM' in os.environ
if premium:
access_key = os.getenv('PREMIUM_KEY')
runnable = ['/home/user/printwebflag']
else:
access_key = os.getenv('TRIAL_KEY')
runnable = ['/bin/sleep', '20']
MAX_INSN_LEN = 15
capstone_md = capstone.Cs(capstone.CS_ARCH_X86, capstone.CS_MODE_64)
sio = socketio.AsyncServer()
app = web.Application()
sio.attach(app)
with open('index.html') as f:
index_html = f.read()
async def index(request):
if not 'key' in request.cookies:
return web.Response(status=401, text='permission denied (missing key)', content_type='text/html')
if request.cookies['key'] != access_key:
return web.Response(status=401, text='permission denied (invalid key)', content_type='text/html')
return web.Response(text=index_html, content_type='text/html')
app.add_routes([web.get('/', index),
web.get('/{name}', index)])
gdb_sessions = {}
stop_queue_readers = {}
async def on_shutdown(app):
await asyncio.gather(delete_gdb_process(sid) for sid in gdb_sessions.keys())
app.on_shutdown.append(on_shutdown)
def log(msg):
if enable_logging:
print('[*] {}'.format(msg))
@sio.on('connect')
def connect(sid, environ):
log('connected {}'.format(sid))
if not 'key={}'.format(access_key) in environ['HTTP_COOKIE']:
log('access_key not found {}'.format(environ['HTTP_COOKIE']))
return False
@sio.on('disconnect')
async def disconnect(sid):
log('disconnected {}'.format(sid))
await delete_gdb_process(sid)
async def stop_queue_reader(sid, queue):
while True:
pkt = await queue.get()
await update_all(sid)
async def create_gdb_process(sid):
stop_queue = asyncio.Queue()
gdb_sessions[sid] = await GDBProcess.create(runnable, stop_queue, env={'KEY': access_key}, log_fn=log)
loop = asyncio.get_event_loop()
stop_queue_readers[sid] = loop.create_task(stop_queue_reader(sid, stop_queue))
async def delete_gdb_process(sid):
if sid in gdb_sessions:
stop_queue_readers[sid].cancel()
del stop_queue_readers[sid]
await gdb_sessions[sid].release()
del gdb_sessions[sid]
@sio.on('start')
async def start(sid):
await delete_gdb_process(sid)
await create_gdb_process(sid)
# Reading registers doesn't work on ubuntu 18.04 for some reason.
# Step once as a work around
step(sid)
async def update_all(sid):
log('updating sid {}'.format(sid))
regs_task = getregs(sid)
maps_task = getmaps(sid)
asm_task = getasm(sid, {'addr': await gdb_sessions[sid].get_reg('rip'), 'count': 100})
await asyncio.gather(regs_task, maps_task, asm_task)
log('update done')
@sio.on('step')
def step(sid):
gdb_sessions[sid].step()
@sio.on('cont')
def cont(sid):
gdb_sessions[sid].cont()
@sio.on('stop')
def stop(sid):
gdb_sessions[sid].interrupt()
async def getregs(sid):
regs = await gdb_sessions[sid].get_regs()
await sio.emit('regs', regs, room=sid)
@sio.on('mem')
async def getmem(sid, msg):
addr = msg['addr']
count = msg['count']
data = gdb_sessions[sid].read_mem(addr, count)
await sio.emit('mem', {'addr': addr, 'data': data}, room=sid)
async def getmaps(sid):
maps = gdb_sessions[sid].maps()
await sio.emit('maps', maps, room=sid)
@sio.on('break')
async def setbreakpoint(sid, data):
addr = data['addr']
await gdb_sessions[sid].set_breakpoint(addr)
await sio.emit('breakpoints', gdb_sessions[sid].breakpoints(), room=sid)
@sio.on('unbreak')
async def rmbreakpoint(sid, data):
addr = data['addr']
await gdb_sessions[sid].remove_breakpoint(addr)
await sio.emit('breakpoints', gdb_sessions[sid].breakpoints(), room=sid)
@sio.on('search')
async def search(sid, data):
q = data['q']
qtype = data['type']
await sio.emit('search_result', gdb_sessions[sid].search(q.encode(), qtype), room=sid)
async def getasm(sid, data):
addr = data['addr']
count = data['count']
result = []
for _ in range(count):
data = gdb_sessions[sid].read_mem(addr, MAX_INSN_LEN)
try:
disasm = next(capstone_md.disasm_lite(data, addr))
except StopIteration:
break
result.append(disasm)
addr += disasm[1]
await sio.emit('asm', result, room=sid)
if __name__ == '__main__':
web.run_app(app)
| 27.960227
| 104
| 0.710018
| 740
| 4,921
| 4.577027
| 0.308108
| 0.061707
| 0.066135
| 0.028048
| 0.178034
| 0.129318
| 0.113375
| 0.113375
| 0.066135
| 0.043696
| 0
| 0.00739
| 0.147531
| 4,921
| 175
| 105
| 28.12
| 0.8
| 0.134322
| 0
| 0.055118
| 0
| 0
| 0.106293
| 0.005421
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03937
| false
| 0
| 0.062992
| 0
| 0.133858
| 0.015748
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c5de31d5758cb655e6faea3c4a14331feb71111
| 4,960
|
py
|
Python
|
examples/multi_physics/piezo_elasticity.py
|
BubuLK/sfepy
|
3e8e2082c26d574dc334fe3a0e0eeb723f7a6657
|
[
"BSD-3-Clause"
] | null | null | null |
examples/multi_physics/piezo_elasticity.py
|
BubuLK/sfepy
|
3e8e2082c26d574dc334fe3a0e0eeb723f7a6657
|
[
"BSD-3-Clause"
] | null | null | null |
examples/multi_physics/piezo_elasticity.py
|
BubuLK/sfepy
|
3e8e2082c26d574dc334fe3a0e0eeb723f7a6657
|
[
"BSD-3-Clause"
] | null | null | null |
r"""
Piezo-elasticity problem - linear elastic material with piezoelectric
effects.
Find :math:`\ul{u}`, :math:`\phi` such that:
.. math::
- \omega^2 \int_{Y} \rho\ \ul{v} \cdot \ul{u}
+ \int_{Y} D_{ijkl}\ e_{ij}(\ul{v}) e_{kl}(\ul{u})
- \int_{Y_2} g_{kij}\ e_{ij}(\ul{v}) \nabla_k \phi
= 0
\;, \quad \forall \ul{v} \;,
\int_{Y_2} g_{kij}\ e_{ij}(\ul{u}) \nabla_k \psi
+ \int_{Y} K_{ij} \nabla_i \psi \nabla_j \phi
= 0
\;, \quad \forall \psi \;,
where
.. math::
D_{ijkl} = \mu (\delta_{ik} \delta_{jl}+\delta_{il} \delta_{jk}) +
\lambda \ \delta_{ij} \delta_{kl}
\;.
"""
from __future__ import absolute_import
import os
import numpy as nm
from sfepy import data_dir
from sfepy.discrete.fem import MeshIO
from sfepy.mechanics.matcoefs import stiffness_from_lame
import six
def post_process(out, pb, state, extend=False):
"""
Calculate and output the strain and stresses for the given state.
"""
from sfepy.base.base import Struct
from sfepy.discrete.fem import extend_cell_data
ev = pb.evaluate
strain = ev('ev_cauchy_strain.i.Y(u)', mode='el_avg')
stress = ev('ev_cauchy_stress.i.Y(inclusion.D, u)', mode='el_avg')
piezo = -ev('ev_piezo_stress.i.Y2(inclusion.coupling, phi)',
mode='el_avg')
piezo = extend_cell_data(piezo, pb.domain, 'Y2', val=0.0)
piezo_strain = ev('ev_piezo_strain.i.Y(inclusion.coupling, u)',
mode='el_avg')
out['cauchy_strain'] = Struct(name='output_data', mode='cell',
data=strain, dofs=None)
out['elastic_stress'] = Struct(name='output_data', mode='cell',
data=stress, dofs=None)
out['piezo_stress'] = Struct(name='output_data', mode='cell',
data=piezo, dofs=None)
out['piezo_strain'] = Struct(name='output_data', mode='cell',
data=piezo_strain, dofs=None)
out['total_stress'] = Struct(name='output_data', mode='cell',
data=stress + piezo, dofs=None)
return out
filename_mesh = data_dir + '/meshes/2d/special/circle_in_square.mesh'
## filename_mesh = data_dir + '/meshes/2d/special/circle_in_square_small.mesh'
## filename_mesh = data_dir + '/meshes/3d/special/cube_sphere.mesh'
## filename_mesh = data_dir + '/meshes/2d/special/cube_cylinder.mesh'
omega = 1
omega_squared = omega**2
conf_dir = os.path.dirname(__file__)
io = MeshIO.any_from_filename(filename_mesh, prefix_dir=conf_dir)
bbox, dim = io.read_bounding_box(ret_dim=True)
geom = {3 : '3_4', 2 : '2_3'}[dim]
x_left, x_right = bbox[:,0]
options = {
'post_process_hook' : 'post_process',
}
regions = {
'Y' : 'all',
'Y1' : 'cells of group 1',
'Y2' : 'cells of group 2',
'Y2_Surface': ('r.Y1 *v r.Y2', 'facet'),
'Left' : ('vertices in (x < %f)' % (x_left + 1e-3), 'facet'),
'Right' : ('vertices in (x > %f)' % (x_right - 1e-3), 'facet'),
}
fields = {
'displacement' : ('real', dim, 'Y', 1),
'potential' : ('real', 1, 'Y', 1),
}
variables = {
'u' : ('unknown field', 'displacement', 0),
'v' : ('test field', 'displacement', 'u'),
'phi' : ('unknown field', 'potential', 1),
'psi' : ('test field', 'potential', 'phi'),
}
ebcs = {
'u1' : ('Left', {'u.all' : 0.0}),
'u2' : ('Right', {'u.0' : 0.1}),
'phi' : ('Y2_Surface', {'phi.all' : 0.0}),
}
def get_inclusion_pars(ts, coor, mode=None, **kwargs):
"""TODO: implement proper 3D -> 2D transformation of constitutive
matrices."""
if mode == 'qp':
_, dim = coor.shape
sym = (dim + 1) * dim // 2
dielectric = nm.eye(dim, dtype=nm.float64)
# !!!
coupling = nm.ones((dim, sym), dtype=nm.float64)
# coupling[0,1] = 0.2
out = {
# Lame coefficients in 1e+10 Pa.
'D' : stiffness_from_lame(dim=2, lam=0.1798, mu=0.148),
# dielectric tensor
'dielectric' : dielectric,
# piezoelectric coupling
'coupling' : coupling,
'density' : nm.array([[0.1142]]), # in 1e4 kg/m3
}
for key, val in six.iteritems(out):
out[key] = val[None, ...]
return out
materials = {
'inclusion' : (None, 'get_inclusion_pars')
}
functions = {
'get_inclusion_pars' : (get_inclusion_pars,),
}
integrals = {
'i' : 2,
}
equations = {
'1' : """- %f * dw_volume_dot.i.Y(inclusion.density, v, u)
+ dw_lin_elastic.i.Y(inclusion.D, v, u)
- dw_piezo_coupling.i.Y2(inclusion.coupling, v, phi)
= 0""" % omega_squared,
'2' : """dw_piezo_coupling.i.Y2(inclusion.coupling, u, psi)
+ dw_diffusion.i.Y(inclusion.dielectric, psi, phi)
= 0""",
}
solvers = {
'ls' : ('ls.scipy_direct', {}),
'newton' : ('nls.newton',
{'i_max' : 1,
'eps_a' : 1e-10,
}),
}
| 29.349112
| 78
| 0.563105
| 670
| 4,960
| 3.979104
| 0.319403
| 0.021005
| 0.02063
| 0.037509
| 0.208177
| 0.17892
| 0.168042
| 0.141785
| 0.069017
| 0.036009
| 0
| 0.024946
| 0.256452
| 4,960
| 168
| 79
| 29.52381
| 0.697939
| 0.216532
| 0
| 0.038462
| 0
| 0
| 0.289357
| 0.094718
| 0
| 0
| 0
| 0.005952
| 0
| 1
| 0.019231
| false
| 0
| 0.086538
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c614378ccffafbcb6378e7da9d99a24c5b8ad0b
| 1,848
|
py
|
Python
|
tests/sentry/api/endpoints/test_project_details.py
|
erhuabushuo/sentry
|
8b3bad10155aaacfdff80910e5972e64304e880c
|
[
"BSD-3-Clause"
] | null | null | null |
tests/sentry/api/endpoints/test_project_details.py
|
erhuabushuo/sentry
|
8b3bad10155aaacfdff80910e5972e64304e880c
|
[
"BSD-3-Clause"
] | null | null | null |
tests/sentry/api/endpoints/test_project_details.py
|
erhuabushuo/sentry
|
8b3bad10155aaacfdff80910e5972e64304e880c
|
[
"BSD-3-Clause"
] | null | null | null |
from django.core.urlresolvers import reverse
from sentry.models import Project
from sentry.testutils import APITestCase
class ProjectDetailsTest(APITestCase):
def test_simple(self):
project = self.project # force creation
self.login_as(user=self.user)
url = reverse('sentry-api-0-project-details', kwargs={'project_id': project.id})
response = self.client.get(url)
assert response.status_code == 200
assert response.data['id'] == str(project.id)
class ProjectUpdateTest(APITestCase):
def test_simple(self):
project = self.project # force creation
self.login_as(user=self.user)
url = reverse('sentry-api-0-project-details', kwargs={'project_id': project.id})
resp = self.client.put(url, data={
'name': 'hello world',
'slug': 'foobar',
})
assert resp.status_code == 200, resp.content
project = Project.objects.get(id=project.id)
assert project.name == 'hello world'
assert project.slug == 'foobar'
class ProjectDeleteTest(APITestCase):
def test_simple(self):
project = self.create_project()
self.login_as(user=self.user)
url = reverse('sentry-api-0-project-details', kwargs={'project_id': project.id})
with self.settings(SENTRY_PROJECT=0):
response = self.client.delete(url)
assert response.status_code == 204
assert not Project.objects.filter(id=project.id).exists()
def test_internal_project(self):
project = self.create_project()
self.login_as(user=self.user)
url = reverse('sentry-api-0-project-details', kwargs={'project_id': project.id})
with self.settings(SENTRY_PROJECT=project.id):
response = self.client.delete(url)
assert response.status_code == 403
| 33
| 88
| 0.65368
| 225
| 1,848
| 5.275556
| 0.257778
| 0.090986
| 0.055602
| 0.050548
| 0.620051
| 0.574558
| 0.574558
| 0.554339
| 0.554339
| 0.468408
| 0
| 0.011872
| 0.225108
| 1,848
| 55
| 89
| 33.6
| 0.817039
| 0.015693
| 0
| 0.425
| 0
| 0
| 0.10793
| 0.061674
| 0
| 0
| 0
| 0
| 0.2
| 1
| 0.1
| false
| 0
| 0.075
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c6258f2e73dfc4619740d301b9ae33bb12c5202
| 29,732
|
py
|
Python
|
tests/test_table.py
|
databook1/python-pptx
|
87ca6bf34f9ced17cc4f3c94cf141069429e7583
|
[
"MIT"
] | null | null | null |
tests/test_table.py
|
databook1/python-pptx
|
87ca6bf34f9ced17cc4f3c94cf141069429e7583
|
[
"MIT"
] | 12
|
2021-01-22T16:53:51.000Z
|
2022-02-23T13:57:43.000Z
|
tests/test_table.py
|
databook1/python-pptx
|
87ca6bf34f9ced17cc4f3c94cf141069429e7583
|
[
"MIT"
] | null | null | null |
# encoding: utf-8
"""Unit-test suite for `pptx.table` module."""
import pytest
from pptx.dml.fill import FillFormat
from pptx.dml.border import BorderFormat
from pptx.enum.text import MSO_ANCHOR
from pptx.oxml.ns import qn
from pptx.oxml.table import CT_Table, CT_TableCell, TcRange
from pptx.shapes.graphfrm import GraphicFrame
from pptx.table import (
_Cell,
_CellCollection,
_Column,
_ColumnCollection,
_Row,
_RowCollection,
Table,
)
from pptx.text.text import TextFrame
from pptx.util import Inches, Length, Pt
from .unitutil.cxml import element, xml
from .unitutil.mock import call, class_mock, instance_mock, property_mock
class DescribeTable(object):
"""Unit-test suite for `pptx.table.Table` objects."""
def it_provides_access_to_its_cells(self, tbl_, tc_, _Cell_, cell_):
row_idx, col_idx = 4, 2
tbl_.tc.return_value = tc_
_Cell_.return_value = cell_
table = Table(tbl_, None)
cell = table.cell(row_idx, col_idx)
tbl_.tc.assert_called_once_with(row_idx, col_idx)
_Cell_.assert_called_once_with(tc_, table)
assert cell is cell_
def it_provides_access_to_its_columns(self, request):
columns_ = instance_mock(request, _ColumnCollection)
_ColumnCollection_ = class_mock(
request, "pptx.table._ColumnCollection", return_value=columns_
)
tbl = element("a:tbl")
table = Table(tbl, None)
columns = table.columns
_ColumnCollection_.assert_called_once_with(tbl, table)
assert columns is columns_
def it_can_iterate_its_grid_cells(self, request, _Cell_):
tbl = element("a:tbl/(a:tr/(a:tc,a:tc),a:tr/(a:tc,a:tc))")
expected_tcs = tbl.xpath(".//a:tc")
expected_cells = _Cell_.side_effect = [
instance_mock(request, _Cell, name="cell%d" % idx) for idx in range(4)
]
table = Table(tbl, None)
cells = list(table.iter_cells())
assert cells == expected_cells
assert _Cell_.call_args_list == [call(tc, table) for tc in expected_tcs]
def it_provides_access_to_its_rows(self, request):
rows_ = instance_mock(request, _RowCollection)
_RowCollection_ = class_mock(
request, "pptx.table._RowCollection", return_value=rows_
)
tbl = element("a:tbl")
table = Table(tbl, None)
rows = table.rows
_RowCollection_.assert_called_once_with(tbl, table)
assert rows is rows_
def it_updates_graphic_frame_width_on_width_change(self, dx_fixture):
table, expected_width = dx_fixture
table.notify_width_changed()
assert table._graphic_frame.width == expected_width
def it_updates_graphic_frame_height_on_height_change(self, dy_fixture):
table, expected_height = dy_fixture
table.notify_height_changed()
assert table._graphic_frame.height == expected_height
# fixtures -------------------------------------------------------
@pytest.fixture
def dx_fixture(self, graphic_frame_):
tbl_cxml = "a:tbl/a:tblGrid/(a:gridCol{w=111},a:gridCol{w=222})"
table = Table(element(tbl_cxml), graphic_frame_)
expected_width = 333
return table, expected_width
@pytest.fixture
def dy_fixture(self, graphic_frame_):
tbl_cxml = "a:tbl/(a:tr{h=100},a:tr{h=200})"
table = Table(element(tbl_cxml), graphic_frame_)
expected_height = 300
return table, expected_height
# fixture components ---------------------------------------------
@pytest.fixture
def _Cell_(self, request):
return class_mock(request, "pptx.table._Cell")
@pytest.fixture
def cell_(self, request):
return instance_mock(request, _Cell)
@pytest.fixture
def graphic_frame_(self, request):
return instance_mock(request, GraphicFrame)
@pytest.fixture
def tbl_(self, request):
return instance_mock(request, CT_Table)
@pytest.fixture
def tc_(self, request):
return instance_mock(request, CT_TableCell)
class DescribeTableBooleanProperties(object):
def it_knows_its_boolean_property_settings(self, boolprop_get_fixture):
table, boolprop_name, expected_value = boolprop_get_fixture
boolprop_value = getattr(table, boolprop_name)
assert boolprop_value is expected_value
def it_can_change_its_boolean_property_settings(self, boolprop_set_fixture):
table, boolprop_name, new_value, expected_xml = boolprop_set_fixture
setattr(table, boolprop_name, new_value)
assert table._tbl.xml == expected_xml
# fixtures -------------------------------------------------------
@pytest.fixture(
params=[
("a:tbl", "first_row", False),
("a:tbl/a:tblPr", "first_row", False),
("a:tbl/a:tblPr{firstRow=1}", "first_row", True),
("a:tbl/a:tblPr{firstRow=0}", "first_row", False),
("a:tbl/a:tblPr{firstRow=true}", "first_row", True),
("a:tbl/a:tblPr{firstRow=false}", "first_row", False),
("a:tbl/a:tblPr{firstCol=1}", "first_col", True),
("a:tbl/a:tblPr{lastRow=0}", "last_row", False),
("a:tbl/a:tblPr{lastCol=true}", "last_col", True),
("a:tbl/a:tblPr{bandRow=false}", "horz_banding", False),
("a:tbl/a:tblPr", "vert_banding", False),
]
)
def boolprop_get_fixture(self, request):
tbl_cxml, boolprop_name, expected_value = request.param
table = Table(element(tbl_cxml), None)
return table, boolprop_name, expected_value
@pytest.fixture(
params=[
("a:tbl", "first_row", True, "a:tbl/a:tblPr{firstRow=1}"),
("a:tbl", "first_row", False, "a:tbl/a:tblPr"),
("a:tbl/a:tblPr", "first_row", True, "a:tbl/a:tblPr{firstRow=1}"),
("a:tbl/a:tblPr", "first_row", False, "a:tbl/a:tblPr"),
(
"a:tbl/a:tblPr{firstRow=true}",
"first_row",
True,
"a:tbl/a:tblPr{firstRow=1}",
),
("a:tbl/a:tblPr{firstRow=false}", "first_row", False, "a:tbl/a:tblPr"),
(
"a:tbl/a:tblPr{bandRow=1}",
"first_row",
True,
"a:tbl/a:tblPr{bandRow=1,firstRow=1}",
),
("a:tbl", "first_col", True, "a:tbl/a:tblPr{firstCol=1}"),
("a:tbl", "last_row", True, "a:tbl/a:tblPr{lastRow=1}"),
("a:tbl", "last_col", True, "a:tbl/a:tblPr{lastCol=1}"),
("a:tbl", "horz_banding", True, "a:tbl/a:tblPr{bandRow=1}"),
("a:tbl", "vert_banding", True, "a:tbl/a:tblPr{bandCol=1}"),
]
)
def boolprop_set_fixture(self, request):
tbl_cxml, boolprop_name, new_value, expected_tbl_cxml = request.param
table = Table(element(tbl_cxml), None)
expected_xml = xml(expected_tbl_cxml)
return table, boolprop_name, new_value, expected_xml
class Describe_Cell(object):
"""Unit-test suite for `pptx.table._Cell` object."""
def it_is_equal_to_other_instance_having_same_tc(self):
tc = element("a:tc")
other_tc = element("a:tc")
cell = _Cell(tc, None)
cell_with_same_tc = _Cell(tc, None)
cell_with_other_tc = _Cell(other_tc, None)
assert cell == cell_with_same_tc
assert cell != cell_with_other_tc
def it_has_a_fill(self, fill_fixture):
cell = fill_fixture
assert isinstance(cell.fill, FillFormat)
def it_knows_whether_it_is_merge_origin_cell(self, origin_fixture):
tc, expected_value = origin_fixture
cell = _Cell(tc, None)
is_merge_origin = cell.is_merge_origin
assert is_merge_origin is expected_value
def it_knows_whether_it_is_spanned(self, spanned_fixture):
tc, expected_value = spanned_fixture
cell = _Cell(tc, None)
is_spanned = cell.is_spanned
assert is_spanned is expected_value
def it_knows_its_margin_settings(self, margin_get_fixture):
cell, margin_prop_name, expected_value = margin_get_fixture
margin_value = getattr(cell, margin_prop_name)
assert margin_value == expected_value
def it_can_change_its_margin_settings(self, margin_set_fixture):
cell, margin_prop_name, new_value, expected_xml = margin_set_fixture
setattr(cell, margin_prop_name, new_value)
assert cell._tc.xml == expected_xml
def it_raises_on_margin_assigned_other_than_int_or_None(
self, margin_raises_fixture
):
cell, margin_attr_name, val_of_invalid_type = margin_raises_fixture
with pytest.raises(TypeError):
setattr(cell, margin_attr_name, val_of_invalid_type)
def it_can_merge_a_range_of_cells(self, TcRange_, tc_range_):
tbl = element("a:tbl/(a:tr/(a:tc,a:tc),a:tr/(a:tc,a:tc))")
tc, other_tc = tbl.tc(0, 0), tbl.tc(1, 1)
TcRange_.return_value = tc_range_
tc_range_.contains_merged_cell = False
tc_range_.dimensions = 2, 2
def tcs(*rowcols):
return (tbl.tc(*rowcol) for rowcol in rowcols)
tc_range_.iter_top_row_tcs.return_value = tcs((0, 0), (0, 1))
tc_range_.iter_left_col_tcs.return_value = tcs((0, 0), (1, 0))
tc_range_.iter_except_left_col_tcs.return_value = tcs((0, 1), (1, 1))
tc_range_.iter_except_top_row_tcs.return_value = tcs((1, 0), (1, 1))
expected_xml = xml(
"a:tbl/(a:tr/(a:tc{gridSpan=2,rowSpan=2},a:tc{rowSpan=2,hMerge=1"
"}),a:tr/(a:tc{gridSpan=2,vMerge=1},a:tc{hMerge=1,vMerge=1}))"
)
cell, other_cell = _Cell(tc, None), _Cell(other_tc, None)
cell.merge(other_cell)
TcRange_.assert_called_once_with(tc, other_tc)
tc_range_.move_content_to_origin.assert_called_once_with()
assert tbl.xml == expected_xml
def but_it_raises_when_cells_are_from_different_tables(self, TcRange_, tc_range_):
TcRange_.return_value = tc_range_
tc_range_.in_same_table = False
cell, other_cell = _Cell(None, None), _Cell(None, None)
with pytest.raises(ValueError) as e:
cell.merge(other_cell)
assert "different table" in str(e.value)
def and_it_raises_when_range_contains_merged_cell(self, TcRange_, tc_range_):
TcRange_.return_value = tc_range_
tc_range_.contains_merged_cell = True
cell, other_cell = _Cell(None, None), _Cell(None, None)
with pytest.raises(ValueError) as e:
cell.merge(other_cell)
assert "contains one or more merged cells" in str(e.value)
def it_knows_how_many_rows_the_merge_spans(self, height_fixture):
tc, expected_value = height_fixture
cell = _Cell(tc, None)
span_height = cell.span_height
assert span_height == expected_value
def it_knows_how_many_columns_the_merge_spans(self, width_fixture):
tc, expected_value = width_fixture
cell = _Cell(tc, None)
span_width = cell.span_width
assert span_width == expected_value
def it_can_split_a_merged_cell(self, split_fixture):
origin_tc, range_tcs = split_fixture
cell = _Cell(origin_tc, None)
cell.split()
assert all(tc.gridSpan == 1 for tc in range_tcs)
assert all(tc.rowSpan == 1 for tc in range_tcs)
assert all(not tc.hMerge for tc in range_tcs)
assert all(not tc.vMerge for tc in range_tcs)
def but_it_raises_when_cell_to_be_split_is_not_merge_origin(self):
tc = element("a:tbl/a:tr/a:tc").xpath("//a:tc")[0]
cell = _Cell(tc, None)
with pytest.raises(ValueError) as e:
cell.split()
assert "not a merge-origin cell" in str(e.value)
def it_knows_what_text_it_contains(self, text_frame_prop_, text_frame_):
text_frame_prop_.return_value = text_frame_
text_frame_.text = "foobar"
cell = _Cell(None, None)
text = cell.text
assert text == "foobar"
def it_can_change_its_text(self, text_frame_prop_, text_frame_):
text_frame_prop_.return_value = text_frame_
cell = _Cell(None, None)
cell.text = "føøbår"
assert text_frame_.text == "føøbår"
def it_knows_its_vertical_anchor_setting(self, anchor_get_fixture):
cell, expected_value = anchor_get_fixture
assert cell.vertical_anchor == expected_value
def it_can_change_its_vertical_anchor(self, anchor_set_fixture):
cell, new_value, expected_xml = anchor_set_fixture
cell.vertical_anchor = new_value
assert cell._tc.xml == expected_xml
def it_knows_it_has_border_settings(self, border_fixture):
cell = border_fixture
assert isinstance(cell.border_left, BorderFormat)
assert isinstance(cell.border_right, BorderFormat)
assert isinstance(cell.border_top, BorderFormat)
assert isinstance(cell.border_bottom, BorderFormat)
assert isinstance(cell.border_tl_br, BorderFormat)
assert isinstance(cell.border_bl_tr, BorderFormat)
# fixtures -------------------------------------------------------
@pytest.fixture(
params=[
("a:tc", None),
("a:tc/a:tcPr", None),
("a:tc/a:tcPr{anchor=t}", MSO_ANCHOR.TOP),
("a:tc/a:tcPr{anchor=ctr}", MSO_ANCHOR.MIDDLE),
("a:tc/a:tcPr{anchor=b}", MSO_ANCHOR.BOTTOM),
]
)
def anchor_get_fixture(self, request):
tc_cxml, expected_value = request.param
cell = _Cell(element(tc_cxml), None)
return cell, expected_value
@pytest.fixture(
params=[
("a:tc", None, "a:tc"),
("a:tc", MSO_ANCHOR.TOP, "a:tc/a:tcPr{anchor=t}"),
("a:tc", MSO_ANCHOR.MIDDLE, "a:tc/a:tcPr{anchor=ctr}"),
("a:tc", MSO_ANCHOR.BOTTOM, "a:tc/a:tcPr{anchor=b}"),
("a:tc/a:tcPr{anchor=t}", MSO_ANCHOR.MIDDLE, "a:tc/a:tcPr{anchor=ctr}"),
("a:tc/a:tcPr{anchor=ctr}", None, "a:tc/a:tcPr"),
]
)
def anchor_set_fixture(self, request):
tc_cxml, new_value, expected_tc_cxml = request.param
cell = _Cell(element(tc_cxml), None)
expected_xml = xml(expected_tc_cxml)
return cell, new_value, expected_xml
@pytest.fixture
def fill_fixture(self, cell):
return cell
@pytest.fixture
def border_fixture(self, cell):
return cell
@pytest.fixture(
params=[("a:tc", 1), ("a:tc{gridSpan=2}", 1), ("a:tc{rowSpan=42}", 42)]
)
def height_fixture(self, request):
tc_cxml, expected_value = request.param
tc = element(tc_cxml)
return tc, expected_value
@pytest.fixture(
params=[
("a:tc/a:tcPr{marL=82296}", "margin_left", Inches(0.09)),
("a:tc/a:tcPr{marR=73152}", "margin_right", Inches(0.08)),
("a:tc/a:tcPr{marT=64008}", "margin_top", Inches(0.07)),
("a:tc/a:tcPr{marB=54864}", "margin_bottom", Inches(0.06)),
("a:tc", "margin_left", Inches(0.1)),
("a:tc/a:tcPr", "margin_right", Inches(0.1)),
("a:tc", "margin_top", Inches(0.05)),
("a:tc/a:tcPr", "margin_bottom", Inches(0.05)),
]
)
def margin_get_fixture(self, request):
tc_cxml, margin_prop_name, expected_value = request.param
cell = _Cell(element(tc_cxml), None)
return cell, margin_prop_name, expected_value
@pytest.fixture(
params=[
("a:tc", "margin_left", Inches(0.08), "a:tc/a:tcPr{marL=73152}"),
("a:tc", "margin_right", Inches(0.08), "a:tc/a:tcPr{marR=73152}"),
("a:tc", "margin_top", Inches(0.08), "a:tc/a:tcPr{marT=73152}"),
("a:tc", "margin_bottom", Inches(0.08), "a:tc/a:tcPr{marB=73152}"),
("a:tc", "margin_left", None, "a:tc"),
("a:tc/a:tcPr{marL=42}", "margin_left", None, "a:tc/a:tcPr"),
]
)
def margin_set_fixture(self, request):
tc_cxml, margin_prop_name, new_value, expected_tc_cxml = request.param
cell = _Cell(element(tc_cxml), None)
expected_xml = xml(expected_tc_cxml)
return cell, margin_prop_name, new_value, expected_xml
@pytest.fixture(
params=["margin_left", "margin_right", "margin_top", "margin_bottom"]
)
def margin_raises_fixture(self, request):
margin_prop_name = request.param
cell = _Cell(element("a:tc"), None)
val_of_invalid_type = "foobar"
return cell, margin_prop_name, val_of_invalid_type
@pytest.fixture(
params=[
("a:tc", False),
("a:tc{gridSpan=1}", False),
("a:tc{hMerge=1}", False),
("a:tc{gridSpan=2,vMerge=1}", False),
("a:tc{gridSpan=2}", True),
("a:tc{rowSpan=2}", True),
("a:tc{gridSpan=2,rowSpan=3}", True),
]
)
def origin_fixture(self, request):
tc_cxml, expected_value = request.param
tc = element(tc_cxml)
return tc, expected_value
@pytest.fixture(
params=[
("a:tc", False),
("a:tc{gridSpan=2}", False),
("a:tc{hMerge=1}", True),
("a:tc{gridSpan=2,vMerge=1}", True),
("a:tc{rowSpan=2,hMerge=true}", True),
("a:tc{gridSpan=2,rowSpan=3}", False),
]
)
def spanned_fixture(self, request):
tc_cxml, expected_value = request.param
tc = element(tc_cxml)
return tc, expected_value
@pytest.fixture(
params=[
(
"a:tbl/(a:tr/(a:tc{gridSpan=2},a:tc{hMerge=1}),a:tr/(a:tc,a:tc))",
0,
[0, 1],
),
(
"a:tbl/(a:tr/(a:tc{rowSpan=2},a:tc),a:tr/(a:tc{vMerge=1},a:tc))",
0,
[0, 2],
),
(
"a:tbl/(a:tr/(a:tc{gridSpan=2,rowSpan=2},a:tc{hMerge=1,rowSpan=2}),"
"a:tr/(a:tc{gridSpan=2,vMerge=1},a:tc{hMerge=1,vMerge=1}))",
0,
[0, 1, 2, 3],
),
]
)
def split_fixture(self, request):
tbl_cxml, origin_tc_idx, range_tc_idxs = request.param
tcs = element(tbl_cxml).xpath("//a:tc")
origin_tc = tcs[origin_tc_idx]
range_tcs = tuple(tcs[idx] for idx in range_tc_idxs)
return origin_tc, range_tcs
@pytest.fixture(
params=[("a:tc", 1), ("a:tc{rowSpan=2}", 1), ("a:tc{gridSpan=24}", 24)]
)
def width_fixture(self, request):
tc_cxml, expected_value = request.param
tc = element(tc_cxml)
return tc, expected_value
# fixture components ---------------------------------------------
@pytest.fixture
def cell(self):
return _Cell(element("a:tc"), None)
@pytest.fixture
def TcRange_(self, request):
return class_mock(request, "pptx.table.TcRange")
@pytest.fixture
def tc_range_(self, request):
return instance_mock(request, TcRange)
@pytest.fixture
def text_frame_(self, request):
return instance_mock(request, TextFrame)
@pytest.fixture
def text_frame_prop_(self, request):
return property_mock(request, _Cell, "text_frame")
class Describe_CellCollection(object):
def it_knows_how_many_cells_it_contains(self, len_fixture):
cells, expected_count = len_fixture
assert len(cells) == expected_count
def it_can_iterate_over_the_cells_it_contains(self, iter_fixture):
cell_collection, _Cell_, calls, expected_cells = iter_fixture
cells = list(cell_collection)
assert _Cell_.call_args_list == calls
assert cells == expected_cells
def it_supports_indexed_access(self, _Cell_, cell_):
tr = element("a:tr/(a:tc, a:tc, a:tc)")
tcs = tr.xpath("//a:tc")
_Cell_.return_value = cell_
cell_collection = _CellCollection(tr, None)
cell = cell_collection[1]
_Cell_.assert_called_once_with(tcs[1], cell_collection)
assert cell is cell_
def it_raises_on_indexed_access_out_of_range(self):
cells = _CellCollection(element("a:tr/a:tc"), None)
with pytest.raises(IndexError):
cells[-1]
with pytest.raises(IndexError):
cells[9]
# fixtures -------------------------------------------------------
@pytest.fixture(params=["a:tr", "a:tr/a:tc", "a:tr/(a:tc, a:tc, a:tc)"])
def iter_fixture(self, request, _Cell_):
tr_cxml = request.param
tr = element(tr_cxml)
tcs = tr.xpath("//a:tc")
cell_collection = _CellCollection(tr, None)
expected_cells = [
instance_mock(request, _Cell, name="cell%d" % idx)
for idx in range(len(tcs))
]
_Cell_.side_effect = expected_cells
calls = [call(tc, cell_collection) for tc in tcs]
return cell_collection, _Cell_, calls, expected_cells
@pytest.fixture(params=[("a:tr", 0), ("a:tr/a:tc", 1), ("a:tr/(a:tc, a:tc)", 2)])
def len_fixture(self, request):
tr_cxml, expected_len = request.param
cells = _CellCollection(element(tr_cxml), None)
return cells, expected_len
# fixture components ---------------------------------------------
@pytest.fixture
def _Cell_(self, request):
return class_mock(request, "pptx.table._Cell")
@pytest.fixture
def cell_(self, request):
return instance_mock(request, _Cell)
class Describe_Column(object):
def it_knows_its_width(self, width_get_fixture):
column, expected_value = width_get_fixture
width = column.width
assert width == expected_value
assert isinstance(width, Length)
def it_can_change_its_width(self, width_set_fixture):
column, new_width, expected_xml, parent_ = width_set_fixture
column.width = new_width
assert column._gridCol.xml == expected_xml
parent_.notify_width_changed.assert_called_once_with()
# fixtures -------------------------------------------------------
@pytest.fixture(
params=[("a:gridCol{w=914400}", Inches(1)), ("a:gridCol{w=10pt}", Pt(10))]
)
def width_get_fixture(self, request):
gridCol_cxml, expected_value = request.param
column = _Column(element(gridCol_cxml), None)
return column, expected_value
@pytest.fixture(
params=[
("a:gridCol{w=12pt}", Inches(1), "a:gridCol{w=914400}"),
("a:gridCol{w=1234}", Inches(1), "a:gridCol{w=914400}"),
]
)
def width_set_fixture(self, request, parent_):
gridCol_cxml, new_width, expected_gridCol_cxml = request.param
column = _Column(element(gridCol_cxml), parent_)
expected_xml = xml(expected_gridCol_cxml)
return column, new_width, expected_xml, parent_
# fixture components ---------------------------------------------
@pytest.fixture
def parent_(self, request):
return instance_mock(request, _ColumnCollection)
class Describe_ColumnCollection(object):
def it_knows_how_many_columns_it_contains(self, len_fixture):
columns, expected_count = len_fixture
assert len(columns) == expected_count
def it_can_iterate_over_the_columns_it_contains(self, iter_fixture):
columns, expected_gridCol_lst = iter_fixture
count = 0
for idx, column in enumerate(columns):
assert isinstance(column, _Column)
assert column._gridCol is expected_gridCol_lst[idx]
count += 1
assert count == len(expected_gridCol_lst)
def it_supports_indexed_access(self, getitem_fixture):
columns, expected_gridCol_lst = getitem_fixture
for idx, gridCol in enumerate(expected_gridCol_lst):
column = columns[idx]
assert isinstance(column, _Column)
assert column._gridCol is gridCol
def it_raises_on_indexed_access_out_of_range(self):
columns = _ColumnCollection(element("a:tbl/a:tblGrid/a:gridCol"), None)
with pytest.raises(IndexError):
columns[-1]
with pytest.raises(IndexError):
columns[9]
# fixtures -------------------------------------------------------
@pytest.fixture(
params=[
"a:tbl/a:tblGrid",
"a:tbl/a:tblGrid/a:gridCol",
"a:tbl/a:tblGrid/(a:gridCol, a:gridCol, a:gridCol)",
]
)
def getitem_fixture(self, request):
tbl_cxml = request.param
tbl = element(tbl_cxml)
columns = _ColumnCollection(tbl, None)
expected_column_lst = tbl.xpath("//a:gridCol")
return columns, expected_column_lst
@pytest.fixture(
params=[
"a:tbl/a:tblGrid",
"a:tbl/a:tblGrid/a:gridCol",
"a:tbl/a:tblGrid/(a:gridCol, a:gridCol, a:gridCol)",
]
)
def iter_fixture(self, request):
tbl_cxml = request.param
tbl = element(tbl_cxml)
columns = _ColumnCollection(tbl, None)
expected_column_lst = tbl.xpath("//a:gridCol")
return columns, expected_column_lst
@pytest.fixture(
params=[
("a:tbl/a:tblGrid", 0),
("a:tbl/a:tblGrid/a:gridCol", 1),
("a:tbl/a:tblGrid/(a:gridCol,a:gridCol)", 2),
]
)
def len_fixture(self, request):
tbl_cxml, expected_len = request.param
columns = _ColumnCollection(element(tbl_cxml), None)
return columns, expected_len
class Describe_Row(object):
def it_knows_its_height(self, height_get_fixture):
row, expected_value = height_get_fixture
height = row.height
assert height == expected_value
assert isinstance(height, Length)
def it_can_change_its_height(self, height_set_fixture):
row, new_height, expected_xml, parent_ = height_set_fixture
row.height = new_height
assert row._tr.xml == expected_xml
parent_.notify_height_changed.assert_called_once_with()
def it_provides_access_to_its_cells(self, cells_fixture):
row, _CellCollection_, cells_ = cells_fixture
cells = row.cells
_CellCollection_.assert_called_once_with(row._tr, row)
assert cells is cells_
# fixtures -------------------------------------------------------
@pytest.fixture
def cells_fixture(self, _CellCollection_, cells_):
row = _Row(element("a:tr"), None)
return row, _CellCollection_, cells_
@pytest.fixture(params=[("a:tr{h=914400}", Inches(1)), ("a:tr{h=10pt}", Pt(10))])
def height_get_fixture(self, request):
tr_cxml, expected_value = request.param
row = _Row(element(tr_cxml), None)
return row, expected_value
@pytest.fixture(
params=[
("a:tr{h=12pt}", Inches(1), "a:tr{h=914400}"),
("a:tr{h=1234}", Inches(1), "a:tr{h=914400}"),
]
)
def height_set_fixture(self, request, parent_):
tr_cxml, new_height, expected_tr_cxml = request.param
row = _Row(element(tr_cxml), parent_)
expected_xml = xml(expected_tr_cxml)
return row, new_height, expected_xml, parent_
# fixture components ---------------------------------------------
@pytest.fixture
def _CellCollection_(self, request, cells_):
return class_mock(request, "pptx.table._CellCollection", return_value=cells_)
@pytest.fixture
def cells_(self, request):
return instance_mock(request, _CellCollection)
@pytest.fixture
def parent_(self, request):
return instance_mock(request, _RowCollection)
class Describe_RowCollection(object):
def it_knows_how_many_rows_it_contains(self, len_fixture):
rows, expected_count = len_fixture
assert len(rows) == expected_count
def it_can_iterate_over_the_rows_it_contains(self, iter_fixture):
rows, expected_tr_lst = iter_fixture
count = 0
for idx, row in enumerate(rows):
assert isinstance(row, _Row)
assert row._tr is expected_tr_lst[idx]
count += 1
assert count == len(expected_tr_lst)
def it_supports_indexed_access(self, getitem_fixture):
rows, expected_tr_lst = getitem_fixture
for idx, tr in enumerate(expected_tr_lst):
row = rows[idx]
assert isinstance(row, _Row)
assert row._tr is tr
def it_raises_on_indexed_access_out_of_range(self):
rows = _RowCollection(element("a:tbl/a:tr"), None)
with pytest.raises(IndexError):
rows[-1]
with pytest.raises(IndexError):
rows[9]
# fixtures -------------------------------------------------------
@pytest.fixture(params=["a:tbl", "a:tbl/a:tr", "a:tbl/(a:tr, a:tr, a:tr)"])
def getitem_fixture(self, request):
tbl_cxml = request.param
tbl = element(tbl_cxml)
rows = _RowCollection(tbl, None)
expected_row_lst = tbl.findall(qn("a:tr"))
return rows, expected_row_lst
@pytest.fixture(params=["a:tbl", "a:tbl/a:tr", "a:tbl/(a:tr, a:tr, a:tr)"])
def iter_fixture(self, request):
tbl_cxml = request.param
tbl = element(tbl_cxml)
rows = _RowCollection(tbl, None)
expected_row_lst = tbl.findall(qn("a:tr"))
return rows, expected_row_lst
@pytest.fixture(params=[("a:tbl", 0), ("a:tbl/a:tr", 1), ("a:tbl/(a:tr, a:tr)", 2)])
def len_fixture(self, request):
tbl_cxml, expected_len = request.param
rows = _RowCollection(element(tbl_cxml), None)
return rows, expected_len
| 35.995157
| 88
| 0.61227
| 3,862
| 29,732
| 4.421284
| 0.067323
| 0.017745
| 0.016105
| 0.015813
| 0.640586
| 0.504246
| 0.387174
| 0.313148
| 0.231508
| 0.206149
| 0
| 0.012538
| 0.246233
| 29,732
| 825
| 89
| 36.038788
| 0.749364
| 0.033533
| 0
| 0.30303
| 0
| 0.013636
| 0.120706
| 0.064028
| 0
| 0
| 0
| 0
| 0.104545
| 1
| 0.136364
| false
| 0
| 0.018182
| 0.028788
| 0.236364
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c6344071efa98707250768a8a8a6346ceb89a33
| 6,612
|
py
|
Python
|
bl60x_flash/main.py
|
v3l0c1r4pt0r/bl60x-flash
|
065770004629c3e5bf98057677e7a6ca566e9c4a
|
[
"MIT"
] | null | null | null |
bl60x_flash/main.py
|
v3l0c1r4pt0r/bl60x-flash
|
065770004629c3e5bf98057677e7a6ca566e9c4a
|
[
"MIT"
] | null | null | null |
bl60x_flash/main.py
|
v3l0c1r4pt0r/bl60x-flash
|
065770004629c3e5bf98057677e7a6ca566e9c4a
|
[
"MIT"
] | null | null | null |
from serial import Serial
from tqdm import tqdm
import binascii
import hashlib
import struct
import time
import sys
import os
def if_read(ser, data_len):
data = bytearray(0)
received = 0
while received < data_len:
tmp = ser.read(data_len - received)
if len(tmp) == 0:
break
else:
data += tmp
received += len(tmp)
if len(data) != data_len:
return (0, data)
return (1, data)
def reset(ser):
ser.setRTS(0)
time.sleep(0.2)
reset_cnt = 2
while reset_cnt > 0:
ser.setRTS(1)
time.sleep(0.005)
ser.setRTS(0)
time.sleep(0.1)
ser.setRTS(1)
time.sleep(0.005)
ser.setRTS(0)
time.sleep(0.005)
reset_cnt -= 1
def handshake(ser):
ser.setRTS(1)
time.sleep(0.2)
ser.setRTS(0)
time.sleep(0.05)
ser.setRTS(1)
ser.setDTR(1)
time.sleep(0.1)
ser.setDTR(0)
time.sleep(0.1)
def expect_ok(ser):
data = ser.read(2)
if data[0] != 0x4f or data[1] != 0x4b:
err = ser.read(2)
raise ValueError(binascii.hexlify(err))
def expect_data(ser):
expect_ok(ser)
len = ser.read(2)
len = struct.unpack('<h', len)[0]
data = ser.read(len)
return data
def cmd_load_seg_header(ser, file):
header = file.read(0x10)
ser.write(b'\x17\x00\x10\x00' + header)
data = expect_data(ser)
seg_addr, seg_len = struct.unpack('<II', data[0:8])
print(f'{seg_len} bytes @ {hex(seg_addr)}')
return seg_len
def cmd_load_seg_data(ser, data):
ser.write(b'\x18\x00' + struct.pack('<H', len(data)) + data)
expect_ok(ser)
def cmd_load_boot_header(ser, file):
header = file.read(0xb0)
ser.write(b'\x11\x00\xb0\x00' + header)
expect_ok(ser)
def cmd_check_image(ser):
ser.write(b'\x19\x00\x00\x00')
expect_ok(ser)
def cmd_run_image(ser):
ser.write(b'\x1a\x00\x00\x00')
expect_ok(ser)
def load_image(ser, file):
image = open(file, 'rb')
cmd_load_boot_header(ser, image)
total = cmd_load_seg_header(ser, image)
sent = 0
with tqdm(total=total, unit='byte', unit_scale=True) as pbar:
while sent != total:
chunk = image.read(min(total-sent, 4080))
cmd_load_seg_data(ser, chunk)
sent = sent + len(chunk)
pbar.update(len(chunk))
cmd_check_image(ser)
cmd_run_image(ser)
def empty_buffer(ser):
timeout = ser.timeout
ser.timeout = 0.1
if_read(ser, 10000)
ser.timeout = timeout
def send_sync(ser):
empty_buffer(ser)
ser.write(b'\x55' * int(0.006 * ser.baudrate / 10))
expect_ok(ser)
def efl_write_cmd(ser, id, payload = b''):
plen = len(payload)
plen_data = struct.pack('<h', plen)
checksum = struct.pack('<h', sum(plen_data + payload) & 0xff)[0:1]
data = bytes([id]) + checksum + plen_data + payload
ser.write(data)
def efl_cmd_read_memory(ser, addr):
# there is a length parameter here but it doesn't seem to work correctly
efl_write_cmd(ser, 0x51, struct.pack('<II', addr, 0x4))
return expect_data(ser)
def efl_cmd_write_memory(ser, addr, data):
efl_write_cmd(ser, 0x50, struct.pack('<I', len(data)) + data)
expect_ok(ser)
def efl_cmd_read_jid(ser):
efl_write_cmd(ser, 0x36)
return expect_data(ser)
def efl_cmd_flash_erase(ser, addr, len):
end_addr = addr + len - 1
efl_write_cmd(ser, 0x30, struct.pack('<II', addr, end_addr))
timeout = ser.timeout
ser.timeout = 10.0
expect_ok(ser)
ser.timeout = timeout
print(f'Erased {len} bytes @ {hex(addr)}')
def efl_cmd_flash_write(ser, addr, data):
efl_write_cmd(ser, 0x31, struct.pack('<I', addr) + data)
expect_ok(ser)
def efl_cmd_flash_write_check(ser):
efl_write_cmd(ser, 0x3a)
expect_ok(ser)
def efl_cmd_flash_xip_read_start(ser):
efl_write_cmd(ser, 0x60)
expect_ok(ser)
def efl_cmd_flash_xip_read_sha(ser, addr, len):
efl_write_cmd(ser, 0x3e, struct.pack('<II', addr, len))
return expect_data(ser)
def efl_cmd_flash_xip_read_finish(ser):
efl_write_cmd(ser, 0x61)
expect_ok(ser)
def efl_cmd_reset(ser):
efl_write_cmd(ser, 0x21)
expect_ok(ser)
def efl_program_img(ser, addr, data):
data_len = len(data)
efl_cmd_flash_erase(ser, addr, data_len)
print(f'Programming {data_len} bytes @ {hex(addr)}')
sent = 0
with tqdm(total=data_len, unit='byte', unit_scale=True) as pbar:
while sent != data_len:
buf_len = min(2048, data_len - sent)
buf = data[sent:sent + buf_len]
efl_cmd_flash_write(ser, addr + sent, buf)
sent = sent + buf_len
pbar.update(buf_len)
efl_cmd_flash_write_check(ser)
sha256sum = hashlib.sha256(data).digest()
efl_cmd_flash_xip_read_start(ser)
device_sum = efl_cmd_flash_xip_read_sha(ser, addr, data_len)
efl_cmd_flash_xip_read_finish(ser)
if device_sum != sha256sum:
print('Verification failed')
print('Host SHA256:', binascii.hexlify(sha256sum))
print('BL SHA256:', binascii.hexlify(device_sum))
return False
print('Verified by XIP SHA256 hash')
return True
def prepend_fw_header(img, header_file):
if img[0:4] == b'BFNP':
print('Image already has FW header')
return img
with open(header_file, 'rb') as f:
header = f.read()
img = header + (b'\xFF' * (4096-len(header))) + img
return img
def get_contrib_path(name):
sep = os.path.sep
return os.path.dirname(os.path.realpath(__file__)) + sep + 'contrib' + sep + name
def main():
if len(sys.argv) < 3:
print(f'Usage: {sys.argv[0]} <serial port> <firmware bin>')
sys.exit(1)
ser = Serial(sys.argv[1], baudrate=500000, timeout=2)
handshake(ser)
reset(ser)
send_sync(ser)
time.sleep(0.1)
print('Loading helper binary')
load_image(ser, get_contrib_path('eflash_loader_40m.bin'))
time.sleep(0.2)
print()
# at this point, the eflash loader binary is running with efl_ commands
# (which seems to work with a higher baudrate)
ser.baudrate = 2000000
send_sync(ser)
with open(sys.argv[2], 'rb') as f:
data = f.read()
data = prepend_fw_header(data, get_contrib_path('bootheader.bin'))
efl_program_img(ser, 0x10000, data)
efl_cmd_reset(ser)
if __name__ == "__main__":
main()
| 28.25641
| 86
| 0.613581
| 997
| 6,612
| 3.865597
| 0.202608
| 0.026466
| 0.039958
| 0.039958
| 0.32356
| 0.22548
| 0.158796
| 0.084587
| 0.057602
| 0.022314
| 0
| 0.043682
| 0.259074
| 6,612
| 233
| 87
| 28.377682
| 0.743009
| 0.027979
| 0
| 0.22449
| 0
| 0
| 0.072052
| 0.003393
| 0
| 0
| 0.011309
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.040816
| 0
| 0.244898
| 0.056122
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c63d06a1b4ade87729c096ceb91bf4dea5b367b
| 467
|
py
|
Python
|
monte_py/__init__.py
|
domluna/fun_with_ffi
|
9fc197b11a3470395db517657d624f0a3aa06958
|
[
"MIT"
] | 1
|
2018-07-16T22:10:58.000Z
|
2018-07-16T22:10:58.000Z
|
monte_py/__init__.py
|
domluna/fun_with_ffi
|
9fc197b11a3470395db517657d624f0a3aa06958
|
[
"MIT"
] | null | null | null |
monte_py/__init__.py
|
domluna/fun_with_ffi
|
9fc197b11a3470395db517657d624f0a3aa06958
|
[
"MIT"
] | null | null | null |
import random
def estimate_pi(sims, needles):
trials = []
for _ in xrange(sims):
trials.append(simulate_pi(needles))
mean = sum(trials) / sims
return mean
# use a unit square
def simulate_pi(needles):
hits = 0 # how many hits we hit the circle
for _ in xrange(needles):
x = random.uniform(-1., 1.)
y = random.uniform(-1, 1.)
if x*x + y*y <= 1.0:
hits += 1
return 4. * (hits / float(needles))
| 23.35
| 46
| 0.573876
| 68
| 467
| 3.867647
| 0.514706
| 0.038023
| 0.08365
| 0.114068
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027523
| 0.299786
| 467
| 19
| 47
| 24.578947
| 0.776758
| 0.104925
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.133333
| false
| 0
| 0.066667
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c6424690b87c4502fb44bc4e25fa64fa727a995
| 36,577
|
py
|
Python
|
tools/mpy_ld.py
|
UVA-DSI/circuitpython
|
35ee4add63a604320d2fbd4e30baef2b5675f9a7
|
[
"Unlicense",
"BSD-3-Clause",
"MIT-0",
"MIT"
] | 1
|
2021-10-20T12:21:44.000Z
|
2021-10-20T12:21:44.000Z
|
tools/mpy_ld.py
|
UVA-DSI/circuitpython
|
35ee4add63a604320d2fbd4e30baef2b5675f9a7
|
[
"Unlicense",
"BSD-3-Clause",
"MIT-0",
"MIT"
] | null | null | null |
tools/mpy_ld.py
|
UVA-DSI/circuitpython
|
35ee4add63a604320d2fbd4e30baef2b5675f9a7
|
[
"Unlicense",
"BSD-3-Clause",
"MIT-0",
"MIT"
] | null | null | null |
#!/usr/bin/env python3
#
# This file is part of the MicroPython project, http://micropython.org/
#
# The MIT License (MIT)
#
# Copyright (c) 2019 Damien P. George
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Link .o files to .mpy
"""
import sys, os, struct, re
from elftools.elf import elffile
sys.path.append(os.path.dirname(__file__) + "/../py")
import makeqstrdata as qstrutil
# MicroPython constants
MPY_VERSION = 5
MP_NATIVE_ARCH_X86 = 1
MP_NATIVE_ARCH_X64 = 2
MP_NATIVE_ARCH_ARMV7M = 5
MP_NATIVE_ARCH_ARMV7EMSP = 7
MP_NATIVE_ARCH_ARMV7EMDP = 8
MP_NATIVE_ARCH_XTENSA = 9
MP_NATIVE_ARCH_XTENSAWIN = 10
MP_CODE_BYTECODE = 2
MP_CODE_NATIVE_VIPER = 4
MP_SCOPE_FLAG_VIPERRELOC = 0x20
MP_SCOPE_FLAG_VIPERRODATA = 0x40
MP_SCOPE_FLAG_VIPERBSS = 0x80
MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE = 1
MICROPY_PY_BUILTINS_STR_UNICODE = 2
MP_SMALL_INT_BITS = 31
QSTR_WINDOW_SIZE = 32
# ELF constants
R_386_32 = 1
R_X86_64_64 = 1
R_XTENSA_32 = 1
R_386_PC32 = 2
R_X86_64_PC32 = 2
R_ARM_ABS32 = 2
R_386_GOT32 = 3
R_ARM_REL32 = 3
R_386_PLT32 = 4
R_X86_64_PLT32 = 4
R_XTENSA_PLT = 6
R_386_GOTOFF = 9
R_386_GOTPC = 10
R_ARM_THM_CALL = 10
R_XTENSA_DIFF32 = 19
R_XTENSA_SLOT0_OP = 20
R_ARM_BASE_PREL = 25 # aka R_ARM_GOTPC
R_ARM_GOT_BREL = 26 # aka R_ARM_GOT32
R_ARM_THM_JUMP24 = 30
R_X86_64_REX_GOTPCRELX = 42
R_386_GOT32X = 43
################################################################################
# Architecture configuration
def asm_jump_x86(entry):
return struct.pack("<BI", 0xE9, entry - 5)
def asm_jump_arm(entry):
b_off = entry - 4
if b_off >> 11 == 0 or b_off >> 11 == -1:
# Signed value fits in 12 bits
b0 = 0xE000 | (b_off >> 1 & 0x07FF)
b1 = 0
else:
# Use large jump
b0 = 0xF000 | (b_off >> 12 & 0x07FF)
b1 = 0xB800 | (b_off >> 1 & 0x7FF)
return struct.pack("<HH", b0, b1)
def asm_jump_xtensa(entry):
jump_offset = entry - 4
jump_op = jump_offset << 6 | 6
return struct.pack("<BH", jump_op & 0xFF, jump_op >> 8)
class ArchData:
def __init__(self, name, mpy_feature, qstr_entry_size, word_size, arch_got, asm_jump):
self.name = name
self.mpy_feature = mpy_feature
self.qstr_entry_size = qstr_entry_size
self.word_size = word_size
self.arch_got = arch_got
self.asm_jump = asm_jump
self.separate_rodata = name == "EM_XTENSA" and qstr_entry_size == 4
ARCH_DATA = {
"x86": ArchData(
"EM_386",
MP_NATIVE_ARCH_X86 << 2
| MICROPY_PY_BUILTINS_STR_UNICODE
| MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE,
2,
4,
(R_386_PC32, R_386_GOT32, R_386_GOT32X),
asm_jump_x86,
),
"x64": ArchData(
"EM_X86_64",
MP_NATIVE_ARCH_X64 << 2
| MICROPY_PY_BUILTINS_STR_UNICODE
| MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE,
2,
8,
(R_X86_64_REX_GOTPCRELX,),
asm_jump_x86,
),
"armv7m": ArchData(
"EM_ARM",
MP_NATIVE_ARCH_ARMV7M << 2 | MICROPY_PY_BUILTINS_STR_UNICODE,
2,
4,
(R_ARM_GOT_BREL,),
asm_jump_arm,
),
"armv7emsp": ArchData(
"EM_ARM",
MP_NATIVE_ARCH_ARMV7EMSP << 2 | MICROPY_PY_BUILTINS_STR_UNICODE,
2,
4,
(R_ARM_GOT_BREL,),
asm_jump_arm,
),
"armv7emdp": ArchData(
"EM_ARM",
MP_NATIVE_ARCH_ARMV7EMDP << 2 | MICROPY_PY_BUILTINS_STR_UNICODE,
2,
4,
(R_ARM_GOT_BREL,),
asm_jump_arm,
),
"xtensa": ArchData(
"EM_XTENSA",
MP_NATIVE_ARCH_XTENSA << 2 | MICROPY_PY_BUILTINS_STR_UNICODE,
2,
4,
(R_XTENSA_32, R_XTENSA_PLT),
asm_jump_xtensa,
),
"xtensawin": ArchData(
"EM_XTENSA",
MP_NATIVE_ARCH_XTENSAWIN << 2 | MICROPY_PY_BUILTINS_STR_UNICODE,
4,
4,
(R_XTENSA_32, R_XTENSA_PLT),
asm_jump_xtensa,
),
}
################################################################################
# Helper functions
def align_to(value, align):
return (value + align - 1) & ~(align - 1)
def unpack_u24le(data, offset):
return data[offset] | data[offset + 1] << 8 | data[offset + 2] << 16
def pack_u24le(data, offset, value):
data[offset] = value & 0xFF
data[offset + 1] = value >> 8 & 0xFF
data[offset + 2] = value >> 16 & 0xFF
def xxd(text):
for i in range(0, len(text), 16):
print("{:08x}:".format(i), end="")
for j in range(4):
off = i + j * 4
if off < len(text):
d = int.from_bytes(text[off : off + 4], "little")
print(" {:08x}".format(d), end="")
print()
# Smaller numbers are enabled first
LOG_LEVEL_1 = 1
LOG_LEVEL_2 = 2
LOG_LEVEL_3 = 3
log_level = LOG_LEVEL_1
def log(level, msg):
if level <= log_level:
print(msg)
################################################################################
# Qstr extraction
def extract_qstrs(source_files):
def read_qstrs(f):
with open(f) as f:
vals = set()
objs = set()
for line in f:
while line:
m = re.search(r"MP_OBJ_NEW_QSTR\((MP_QSTR_[A-Za-z0-9_]*)\)", line)
if m:
objs.add(m.group(1))
else:
m = re.search(r"MP_QSTR_[A-Za-z0-9_]*", line)
if m:
vals.add(m.group())
if m:
s = m.span()
line = line[: s[0]] + line[s[1] :]
else:
line = ""
return vals, objs
static_qstrs = ["MP_QSTR_" + qstrutil.qstr_escape(q) for q in qstrutil.static_qstr_list]
qstr_vals = set()
qstr_objs = set()
for f in source_files:
vals, objs = read_qstrs(f)
qstr_vals.update(vals)
qstr_objs.update(objs)
qstr_vals.difference_update(static_qstrs)
return static_qstrs, qstr_vals, qstr_objs
################################################################################
# Linker
class LinkError(Exception):
pass
class Section:
def __init__(self, name, data, alignment, filename=None):
self.filename = filename
self.name = name
self.data = data
self.alignment = alignment
self.addr = 0
self.reloc = []
@staticmethod
def from_elfsec(elfsec, filename):
assert elfsec.header.sh_addr == 0
return Section(elfsec.name, elfsec.data(), elfsec.data_alignment, filename)
class GOTEntry:
def __init__(self, name, sym, link_addr=0):
self.name = name
self.sym = sym
self.offset = None
self.link_addr = link_addr
def isexternal(self):
return self.sec_name.startswith(".external")
def istext(self):
return self.sec_name.startswith(".text")
def isrodata(self):
return self.sec_name.startswith((".rodata", ".data.rel.ro"))
def isbss(self):
return self.sec_name.startswith(".bss")
class LiteralEntry:
def __init__(self, value, offset):
self.value = value
self.offset = offset
class LinkEnv:
def __init__(self, arch):
self.arch = ARCH_DATA[arch]
self.sections = [] # list of sections in order of output
self.literal_sections = [] # list of literal sections (xtensa only)
self.known_syms = {} # dict of symbols that are defined
self.unresolved_syms = [] # list of unresolved symbols
self.mpy_relocs = [] # list of relocations needed in the output .mpy file
def check_arch(self, arch_name):
if arch_name != self.arch.name:
raise LinkError("incompatible arch")
def print_sections(self):
log(LOG_LEVEL_2, "sections:")
for sec in self.sections:
log(LOG_LEVEL_2, " {:08x} {} size={}".format(sec.addr, sec.name, len(sec.data)))
def find_addr(self, name):
if name in self.known_syms:
s = self.known_syms[name]
return s.section.addr + s["st_value"]
raise LinkError("unknown symbol: {}".format(name))
def build_got_generic(env):
env.got_entries = {}
for sec in env.sections:
for r in sec.reloc:
s = r.sym
if not (
s.entry["st_info"]["bind"] == "STB_GLOBAL"
and r["r_info_type"] in env.arch.arch_got
):
continue
s_type = s.entry["st_info"]["type"]
assert s_type in ("STT_NOTYPE", "STT_FUNC", "STT_OBJECT"), s_type
assert s.name
if s.name in env.got_entries:
continue
env.got_entries[s.name] = GOTEntry(s.name, s)
def build_got_xtensa(env):
env.got_entries = {}
env.lit_entries = {}
env.xt_literals = {}
# Extract the values from the literal table
for sec in env.literal_sections:
assert len(sec.data) % env.arch.word_size == 0
# Look through literal relocations to find any global pointers that should be GOT entries
for r in sec.reloc:
s = r.sym
s_type = s.entry["st_info"]["type"]
assert s_type in ("STT_NOTYPE", "STT_FUNC", "STT_OBJECT", "STT_SECTION"), s_type
assert r["r_info_type"] in env.arch.arch_got
assert r["r_offset"] % env.arch.word_size == 0
# This entry is a global pointer
existing = struct.unpack_from("<I", sec.data, r["r_offset"])[0]
if s_type == "STT_SECTION":
assert r["r_addend"] == 0
name = "{}+0x{:x}".format(s.section.name, existing)
else:
assert existing == 0
name = s.name
if r["r_addend"] != 0:
name = "{}+0x{:x}".format(name, r["r_addend"])
idx = "{}+0x{:x}".format(sec.filename, r["r_offset"])
env.xt_literals[idx] = name
if name in env.got_entries:
# Deduplicate GOT entries
continue
env.got_entries[name] = GOTEntry(name, s, existing)
# Go through all literal entries finding those that aren't global pointers so must be actual literals
for i in range(0, len(sec.data), env.arch.word_size):
idx = "{}+0x{:x}".format(sec.filename, i)
if idx not in env.xt_literals:
# This entry is an actual literal
value = struct.unpack_from("<I", sec.data, i)[0]
env.xt_literals[idx] = value
if value in env.lit_entries:
# Deduplicate literals
continue
env.lit_entries[value] = LiteralEntry(
value, len(env.lit_entries) * env.arch.word_size
)
def populate_got(env):
# Compute GOT destination addresses
for got_entry in env.got_entries.values():
sym = got_entry.sym
if hasattr(sym, "resolved"):
sym = sym.resolved
sec = sym.section
addr = sym["st_value"]
got_entry.sec_name = sec.name
got_entry.link_addr += sec.addr + addr
# Get sorted GOT, sorted by external, text, rodata, bss so relocations can be combined
got_list = sorted(
env.got_entries.values(),
key=lambda g: g.isexternal() + 2 * g.istext() + 3 * g.isrodata() + 4 * g.isbss(),
)
# Layout and populate the GOT
offset = 0
for got_entry in got_list:
got_entry.offset = offset
offset += env.arch.word_size
o = env.got_section.addr + got_entry.offset
env.full_text[o : o + env.arch.word_size] = got_entry.link_addr.to_bytes(
env.arch.word_size, "little"
)
# Create a relocation for each GOT entry
for got_entry in got_list:
if got_entry.name == "mp_fun_table":
dest = "mp_fun_table"
elif got_entry.name.startswith("mp_fun_table+0x"):
dest = int(got_entry.name.split("+")[1], 16) // env.arch.word_size
elif got_entry.sec_name.startswith(".text"):
dest = ".text"
elif got_entry.sec_name.startswith(".rodata"):
dest = ".rodata"
elif got_entry.sec_name.startswith(".data.rel.ro"):
dest = ".data.rel.ro"
elif got_entry.sec_name.startswith(".bss"):
dest = ".bss"
else:
assert 0, (got_entry.name, got_entry.sec_name)
env.mpy_relocs.append((".text", env.got_section.addr + got_entry.offset, dest))
# Print out the final GOT
log(LOG_LEVEL_2, "GOT: {:08x}".format(env.got_section.addr))
for g in got_list:
log(
LOG_LEVEL_2,
" {:08x} {} -> {}+{:08x}".format(g.offset, g.name, g.sec_name, g.link_addr),
)
def populate_lit(env):
log(LOG_LEVEL_2, "LIT: {:08x}".format(env.lit_section.addr))
for lit_entry in env.lit_entries.values():
value = lit_entry.value
log(LOG_LEVEL_2, " {:08x} = {:08x}".format(lit_entry.offset, value))
o = env.lit_section.addr + lit_entry.offset
env.full_text[o : o + env.arch.word_size] = value.to_bytes(env.arch.word_size, "little")
def do_relocation_text(env, text_addr, r):
# Extract relevant info about symbol that's being relocated
s = r.sym
s_bind = s.entry["st_info"]["bind"]
s_shndx = s.entry["st_shndx"]
s_type = s.entry["st_info"]["type"]
r_offset = r["r_offset"] + text_addr
r_info_type = r["r_info_type"]
try:
# only for RELA sections
r_addend = r["r_addend"]
except KeyError:
r_addend = 0
# Default relocation type and name for logging
reloc_type = "le32"
log_name = None
if (
env.arch.name == "EM_386"
and r_info_type in (R_386_PC32, R_386_PLT32)
or env.arch.name == "EM_X86_64"
and r_info_type in (R_X86_64_PC32, R_X86_64_PLT32)
or env.arch.name == "EM_ARM"
and r_info_type in (R_ARM_REL32, R_ARM_THM_CALL, R_ARM_THM_JUMP24)
or s_bind == "STB_LOCAL"
and env.arch.name == "EM_XTENSA"
and r_info_type == R_XTENSA_32 # not GOT
):
# Standard relocation to fixed location within text/rodata
if hasattr(s, "resolved"):
s = s.resolved
sec = s.section
if env.arch.separate_rodata and sec.name.startswith(".rodata"):
raise LinkError("fixed relocation to rodata with rodata referenced via GOT")
if sec.name.startswith(".bss"):
raise LinkError(
"{}: fixed relocation to bss (bss variables can't be static)".format(s.filename)
)
if sec.name.startswith(".external"):
raise LinkError(
"{}: fixed relocation to external symbol: {}".format(s.filename, s.name)
)
addr = sec.addr + s["st_value"]
reloc = addr - r_offset + r_addend
if r_info_type in (R_ARM_THM_CALL, R_ARM_THM_JUMP24):
# Both relocations have the same bit pattern to rewrite:
# R_ARM_THM_CALL: bl
# R_ARM_THM_JUMP24: b.w
reloc_type = "thumb_b"
elif (
env.arch.name == "EM_386"
and r_info_type == R_386_GOTPC
or env.arch.name == "EM_ARM"
and r_info_type == R_ARM_BASE_PREL
):
# Relocation to GOT address itself
assert s.name == "_GLOBAL_OFFSET_TABLE_"
addr = env.got_section.addr
reloc = addr - r_offset + r_addend
elif (
env.arch.name == "EM_386"
and r_info_type in (R_386_GOT32, R_386_GOT32X)
or env.arch.name == "EM_ARM"
and r_info_type == R_ARM_GOT_BREL
):
# Relcation pointing to GOT
reloc = addr = env.got_entries[s.name].offset
elif env.arch.name == "EM_X86_64" and r_info_type == R_X86_64_REX_GOTPCRELX:
# Relcation pointing to GOT
got_entry = env.got_entries[s.name]
addr = env.got_section.addr + got_entry.offset
reloc = addr - r_offset + r_addend
elif env.arch.name == "EM_386" and r_info_type == R_386_GOTOFF:
# Relocation relative to GOT
addr = s.section.addr + s["st_value"]
reloc = addr - env.got_section.addr + r_addend
elif env.arch.name == "EM_XTENSA" and r_info_type == R_XTENSA_SLOT0_OP:
# Relocation pointing to GOT, xtensa specific
sec = s.section
if sec.name.startswith(".text"):
# it looks like R_XTENSA_SLOT0_OP into .text is already correctly relocated
return
assert sec.name.startswith(".literal"), sec.name
lit_idx = "{}+0x{:x}".format(sec.filename, r_addend)
lit_ptr = env.xt_literals[lit_idx]
if isinstance(lit_ptr, str):
addr = env.got_section.addr + env.got_entries[lit_ptr].offset
log_name = "GOT {}".format(lit_ptr)
else:
addr = env.lit_section.addr + env.lit_entries[lit_ptr].offset
log_name = "LIT"
reloc = addr - r_offset
reloc_type = "xtensa_l32r"
elif env.arch.name == "EM_XTENSA" and r_info_type == R_XTENSA_DIFF32:
if s.section.name.startswith(".text"):
# it looks like R_XTENSA_DIFF32 into .text is already correctly relocated
return
assert 0
else:
# Unknown/unsupported relocation
assert 0, r_info_type
# Write relocation
if reloc_type == "le32":
(existing,) = struct.unpack_from("<I", env.full_text, r_offset)
struct.pack_into("<I", env.full_text, r_offset, (existing + reloc) & 0xFFFFFFFF)
elif reloc_type == "thumb_b":
b_h, b_l = struct.unpack_from("<HH", env.full_text, r_offset)
existing = (b_h & 0x7FF) << 12 | (b_l & 0x7FF) << 1
if existing >= 0x400000: # 2's complement
existing -= 0x800000
new = existing + reloc
b_h = (b_h & 0xF800) | (new >> 12) & 0x7FF
b_l = (b_l & 0xF800) | (new >> 1) & 0x7FF
struct.pack_into("<HH", env.full_text, r_offset, b_h, b_l)
elif reloc_type == "xtensa_l32r":
l32r = unpack_u24le(env.full_text, r_offset)
assert l32r & 0xF == 1 # RI16 encoded l32r
l32r_imm16 = l32r >> 8
l32r_imm16 = (l32r_imm16 + reloc >> 2) & 0xFFFF
l32r = l32r & 0xFF | l32r_imm16 << 8
pack_u24le(env.full_text, r_offset, l32r)
else:
assert 0, reloc_type
# Log information about relocation
if log_name is None:
if s_type == "STT_SECTION":
log_name = s.section.name
else:
log_name = s.name
log(LOG_LEVEL_3, " {:08x} {} -> {:08x}".format(r_offset, log_name, addr))
def do_relocation_data(env, text_addr, r):
s = r.sym
s_type = s.entry["st_info"]["type"]
r_offset = r["r_offset"] + text_addr
r_info_type = r["r_info_type"]
try:
# only for RELA sections
r_addend = r["r_addend"]
except KeyError:
r_addend = 0
if (
env.arch.name == "EM_386"
and r_info_type == R_386_32
or env.arch.name == "EM_X86_64"
and r_info_type == R_X86_64_64
or env.arch.name == "EM_ARM"
and r_info_type == R_ARM_ABS32
or env.arch.name == "EM_XTENSA"
and r_info_type == R_XTENSA_32
):
# Relocation in data.rel.ro to internal/external symbol
if env.arch.word_size == 4:
struct_type = "<I"
elif env.arch.word_size == 8:
struct_type = "<Q"
sec = s.section
assert r_offset % env.arch.word_size == 0
addr = sec.addr + s["st_value"] + r_addend
if s_type == "STT_SECTION":
log_name = sec.name
else:
log_name = s.name
log(LOG_LEVEL_3, " {:08x} -> {} {:08x}".format(r_offset, log_name, addr))
if env.arch.separate_rodata:
data = env.full_rodata
else:
data = env.full_text
(existing,) = struct.unpack_from(struct_type, data, r_offset)
if sec.name.startswith((".text", ".rodata", ".data.rel.ro", ".bss")):
struct.pack_into(struct_type, data, r_offset, existing + addr)
kind = sec.name
elif sec.name == ".external.mp_fun_table":
assert addr == 0
kind = s.mp_fun_table_offset
else:
assert 0, sec.name
if env.arch.separate_rodata:
base = ".rodata"
else:
base = ".text"
env.mpy_relocs.append((base, r_offset, kind))
else:
# Unknown/unsupported relocation
assert 0, r_info_type
def load_object_file(env, felf):
with open(felf, "rb") as f:
elf = elffile.ELFFile(f)
env.check_arch(elf["e_machine"])
# Get symbol table
symtab = list(elf.get_section_by_name(".symtab").iter_symbols())
# Load needed sections from ELF file
sections_shndx = {} # maps elf shndx to Section object
for idx, s in enumerate(elf.iter_sections()):
if s.header.sh_type in ("SHT_PROGBITS", "SHT_NOBITS"):
if s.data_size == 0:
# Ignore empty sections
pass
elif s.name.startswith((".literal", ".text", ".rodata", ".data.rel.ro", ".bss")):
sec = Section.from_elfsec(s, felf)
sections_shndx[idx] = sec
if s.name.startswith(".literal"):
env.literal_sections.append(sec)
else:
env.sections.append(sec)
elif s.name.startswith(".data"):
raise LinkError("{}: {} non-empty".format(felf, s.name))
else:
# Ignore section
pass
elif s.header.sh_type in ("SHT_REL", "SHT_RELA"):
shndx = s.header.sh_info
if shndx in sections_shndx:
sec = sections_shndx[shndx]
sec.reloc_name = s.name
sec.reloc = list(s.iter_relocations())
for r in sec.reloc:
r.sym = symtab[r["r_info_sym"]]
# Link symbols to their sections, and update known and unresolved symbols
for sym in symtab:
sym.filename = felf
shndx = sym.entry["st_shndx"]
if shndx in sections_shndx:
# Symbol with associated section
sym.section = sections_shndx[shndx]
if sym["st_info"]["bind"] == "STB_GLOBAL":
# Defined global symbol
if sym.name in env.known_syms and not sym.name.startswith(
"__x86.get_pc_thunk."
):
raise LinkError("duplicate symbol: {}".format(sym.name))
env.known_syms[sym.name] = sym
elif sym.entry["st_shndx"] == "SHN_UNDEF" and sym["st_info"]["bind"] == "STB_GLOBAL":
# Undefined global symbol, needs resolving
env.unresolved_syms.append(sym)
def link_objects(env, native_qstr_vals_len, native_qstr_objs_len):
# Build GOT information
if env.arch.name == "EM_XTENSA":
build_got_xtensa(env)
else:
build_got_generic(env)
# Creat GOT section
got_size = len(env.got_entries) * env.arch.word_size
env.got_section = Section("GOT", bytearray(got_size), env.arch.word_size)
if env.arch.name == "EM_XTENSA":
env.sections.insert(0, env.got_section)
else:
env.sections.append(env.got_section)
# Create optional literal section
if env.arch.name == "EM_XTENSA":
lit_size = len(env.lit_entries) * env.arch.word_size
env.lit_section = Section("LIT", bytearray(lit_size), env.arch.word_size)
env.sections.insert(1, env.lit_section)
# Create section to contain mp_native_qstr_val_table
env.qstr_val_section = Section(
".text.QSTR_VAL",
bytearray(native_qstr_vals_len * env.arch.qstr_entry_size),
env.arch.qstr_entry_size,
)
env.sections.append(env.qstr_val_section)
# Create section to contain mp_native_qstr_obj_table
env.qstr_obj_section = Section(
".text.QSTR_OBJ", bytearray(native_qstr_objs_len * env.arch.word_size), env.arch.word_size
)
env.sections.append(env.qstr_obj_section)
# Resolve unknown symbols
mp_fun_table_sec = Section(".external.mp_fun_table", b"", 0)
fun_table = {
key: 68 + idx
for idx, key in enumerate(
[
"mp_type_type",
"mp_type_str",
"mp_type_list",
"mp_type_dict",
"mp_type_fun_builtin_0",
"mp_type_fun_builtin_1",
"mp_type_fun_builtin_2",
"mp_type_fun_builtin_3",
"mp_type_fun_builtin_var",
"mp_stream_read_obj",
"mp_stream_readinto_obj",
"mp_stream_unbuffered_readline_obj",
"mp_stream_write_obj",
]
)
}
for sym in env.unresolved_syms:
assert sym["st_value"] == 0
if sym.name == "_GLOBAL_OFFSET_TABLE_":
pass
elif sym.name == "mp_fun_table":
sym.section = Section(".external", b"", 0)
elif sym.name == "mp_native_qstr_val_table":
sym.section = env.qstr_val_section
elif sym.name == "mp_native_qstr_obj_table":
sym.section = env.qstr_obj_section
elif sym.name in env.known_syms:
sym.resolved = env.known_syms[sym.name]
else:
if sym.name in fun_table:
sym.section = mp_fun_table_sec
sym.mp_fun_table_offset = fun_table[sym.name]
else:
raise LinkError("{}: undefined symbol: {}".format(sym.filename, sym.name))
# Align sections, assign their addresses, and create full_text
env.full_text = bytearray(env.arch.asm_jump(8)) # dummy, to be filled in later
env.full_rodata = bytearray(0)
env.full_bss = bytearray(0)
for sec in env.sections:
if env.arch.separate_rodata and sec.name.startswith((".rodata", ".data.rel.ro")):
data = env.full_rodata
elif sec.name.startswith(".bss"):
data = env.full_bss
else:
data = env.full_text
sec.addr = align_to(len(data), sec.alignment)
data.extend(b"\x00" * (sec.addr - len(data)))
data.extend(sec.data)
env.print_sections()
populate_got(env)
if env.arch.name == "EM_XTENSA":
populate_lit(env)
# Fill in relocations
for sec in env.sections:
if not sec.reloc:
continue
log(
LOG_LEVEL_3,
"{}: {} relocations via {}:".format(sec.filename, sec.name, sec.reloc_name),
)
for r in sec.reloc:
if sec.name.startswith((".text", ".rodata")):
do_relocation_text(env, sec.addr, r)
elif sec.name.startswith(".data.rel.ro"):
do_relocation_data(env, sec.addr, r)
else:
assert 0, sec.name
################################################################################
# .mpy output
class MPYOutput:
def open(self, fname):
self.f = open(fname, "wb")
self.prev_base = -1
self.prev_offset = -1
def close(self):
self.f.close()
def write_bytes(self, buf):
self.f.write(buf)
def write_uint(self, val):
b = bytearray()
b.insert(0, val & 0x7F)
val >>= 7
while val:
b.insert(0, 0x80 | (val & 0x7F))
val >>= 7
self.write_bytes(b)
def write_qstr(self, s):
if s in qstrutil.static_qstr_list:
self.write_bytes(bytes([0, qstrutil.static_qstr_list.index(s) + 1]))
else:
s = bytes(s, "ascii")
self.write_uint(len(s) << 1)
self.write_bytes(s)
def write_reloc(self, base, offset, dest, n):
need_offset = not (base == self.prev_base and offset == self.prev_offset + 1)
self.prev_offset = offset + n - 1
if dest <= 2:
dest = (dest << 1) | (n > 1)
else:
assert 6 <= dest <= 127
assert n == 1
dest = dest << 1 | need_offset
assert 0 <= dest <= 0xFE, dest
self.write_bytes(bytes([dest]))
if need_offset:
if base == ".text":
base = 0
elif base == ".rodata":
base = 1
self.write_uint(offset << 1 | base)
if n > 1:
self.write_uint(n)
def build_mpy(env, entry_offset, fmpy, native_qstr_vals, native_qstr_objs):
# Write jump instruction to start of text
jump = env.arch.asm_jump(entry_offset)
env.full_text[: len(jump)] = jump
log(LOG_LEVEL_1, "arch: {}".format(env.arch.name))
log(LOG_LEVEL_1, "text size: {}".format(len(env.full_text)))
if len(env.full_rodata):
log(LOG_LEVEL_1, "rodata size: {}".format(len(env.full_rodata)))
log(LOG_LEVEL_1, "bss size: {}".format(len(env.full_bss)))
log(LOG_LEVEL_1, "GOT entries: {}".format(len(env.got_entries)))
# xxd(env.full_text)
out = MPYOutput()
out.open(fmpy)
# MPY: header
out.write_bytes(
bytearray(
[
ord("C"),
MPY_VERSION,
env.arch.mpy_feature,
MP_SMALL_INT_BITS,
QSTR_WINDOW_SIZE,
]
)
)
# MPY: kind/len
out.write_uint(len(env.full_text) << 2 | (MP_CODE_NATIVE_VIPER - MP_CODE_BYTECODE))
# MPY: machine code
out.write_bytes(env.full_text)
# MPY: n_qstr_link (assumes little endian)
out.write_uint(len(native_qstr_vals) + len(native_qstr_objs))
for q in range(len(native_qstr_vals)):
off = env.qstr_val_section.addr + q * env.arch.qstr_entry_size
out.write_uint(off << 2)
out.write_qstr(native_qstr_vals[q])
for q in range(len(native_qstr_objs)):
off = env.qstr_obj_section.addr + q * env.arch.word_size
out.write_uint(off << 2 | 3)
out.write_qstr(native_qstr_objs[q])
# MPY: scope_flags
scope_flags = MP_SCOPE_FLAG_VIPERRELOC
if len(env.full_rodata):
scope_flags |= MP_SCOPE_FLAG_VIPERRODATA
if len(env.full_bss):
scope_flags |= MP_SCOPE_FLAG_VIPERBSS
out.write_uint(scope_flags)
# MPY: n_obj
out.write_uint(0)
# MPY: n_raw_code
out.write_uint(0)
# MPY: rodata and/or bss
if len(env.full_rodata):
rodata_const_table_idx = 1
out.write_uint(len(env.full_rodata))
out.write_bytes(env.full_rodata)
if len(env.full_bss):
bss_const_table_idx = bool(env.full_rodata) + 1
out.write_uint(len(env.full_bss))
# MPY: relocation information
prev_kind = None
for base, addr, kind in env.mpy_relocs:
if isinstance(kind, str) and kind.startswith(".text"):
kind = 0
elif kind in (".rodata", ".data.rel.ro"):
if env.arch.separate_rodata:
kind = rodata_const_table_idx
else:
kind = 0
elif isinstance(kind, str) and kind.startswith(".bss"):
kind = bss_const_table_idx
elif kind == "mp_fun_table":
kind = 6
else:
kind = 7 + kind
assert addr % env.arch.word_size == 0, addr
offset = addr // env.arch.word_size
if kind == prev_kind and base == prev_base and offset == prev_offset + 1:
prev_n += 1
prev_offset += 1
else:
if prev_kind is not None:
out.write_reloc(prev_base, prev_offset - prev_n + 1, prev_kind, prev_n)
prev_kind = kind
prev_base = base
prev_offset = offset
prev_n = 1
if prev_kind is not None:
out.write_reloc(prev_base, prev_offset - prev_n + 1, prev_kind, prev_n)
# MPY: sentinel for end of relocations
out.write_bytes(b"\xff")
out.close()
################################################################################
# main
def do_preprocess(args):
if args.output is None:
assert args.files[0].endswith(".c")
args.output = args.files[0][:-1] + "config.h"
static_qstrs, qstr_vals, qstr_objs = extract_qstrs(args.files)
with open(args.output, "w") as f:
print(
"#include <stdint.h>\n"
"typedef uintptr_t mp_uint_t;\n"
"typedef intptr_t mp_int_t;\n"
"typedef uintptr_t mp_off_t;",
file=f,
)
for i, q in enumerate(static_qstrs):
print("#define %s (%u)" % (q, i + 1), file=f)
for i, q in enumerate(sorted(qstr_vals)):
print("#define %s (mp_native_qstr_val_table[%d])" % (q, i), file=f)
for i, q in enumerate(sorted(qstr_objs)):
print(
"#define MP_OBJ_NEW_QSTR_%s ((mp_obj_t)mp_native_qstr_obj_table[%d])" % (q, i),
file=f,
)
if args.arch == "xtensawin":
qstr_type = "uint32_t" # esp32 can only read 32-bit values from IRAM
else:
qstr_type = "uint16_t"
print("extern const {} mp_native_qstr_val_table[];".format(qstr_type), file=f)
print("extern const mp_uint_t mp_native_qstr_obj_table[];", file=f)
def do_link(args):
if args.output is None:
assert args.files[0].endswith(".o")
args.output = args.files[0][:-1] + "mpy"
native_qstr_vals = []
native_qstr_objs = []
if args.qstrs is not None:
with open(args.qstrs) as f:
for l in f:
m = re.match(r"#define MP_QSTR_([A-Za-z0-9_]*) \(mp_native_", l)
if m:
native_qstr_vals.append(m.group(1))
else:
m = re.match(r"#define MP_OBJ_NEW_QSTR_MP_QSTR_([A-Za-z0-9_]*)", l)
if m:
native_qstr_objs.append(m.group(1))
log(LOG_LEVEL_2, "qstr vals: " + ", ".join(native_qstr_vals))
log(LOG_LEVEL_2, "qstr objs: " + ", ".join(native_qstr_objs))
env = LinkEnv(args.arch)
try:
for file in args.files:
load_object_file(env, file)
link_objects(env, len(native_qstr_vals), len(native_qstr_objs))
build_mpy(env, env.find_addr("mpy_init"), args.output, native_qstr_vals, native_qstr_objs)
except LinkError as er:
print("LinkError:", er.args[0])
sys.exit(1)
def main():
import argparse
cmd_parser = argparse.ArgumentParser(description="Run scripts on the pyboard.")
cmd_parser.add_argument(
"--verbose", "-v", action="count", default=1, help="increase verbosity"
)
cmd_parser.add_argument("--arch", default="x64", help="architecture")
cmd_parser.add_argument("--preprocess", action="store_true", help="preprocess source files")
cmd_parser.add_argument("--qstrs", default=None, help="file defining additional qstrs")
cmd_parser.add_argument(
"--output", "-o", default=None, help="output .mpy file (default to input with .o->.mpy)"
)
cmd_parser.add_argument("files", nargs="+", help="input files")
args = cmd_parser.parse_args()
global log_level
log_level = args.verbose
if args.preprocess:
do_preprocess(args)
else:
do_link(args)
if __name__ == "__main__":
main()
| 33.618566
| 109
| 0.576893
| 4,974
| 36,577
| 3.991355
| 0.119019
| 0.019745
| 0.011333
| 0.016622
| 0.346396
| 0.270387
| 0.171914
| 0.138367
| 0.11021
| 0.096862
| 0
| 0.025682
| 0.299532
| 36,577
| 1,087
| 110
| 33.649494
| 0.74919
| 0.106269
| 0
| 0.238609
| 0
| 0
| 0.095906
| 0.016981
| 0
| 0
| 0.004798
| 0
| 0.033573
| 1
| 0.049161
| false
| 0.004796
| 0.004796
| 0.008393
| 0.080336
| 0.015588
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c64d6e1ca9f65ffe83cf4a6cb96b5de160e7309
| 2,289
|
py
|
Python
|
ui_mant_libros.py
|
edzzn/Manejo_Liberia
|
c735d35b32fc53839acfc48d4e088e69983edf16
|
[
"MIT"
] | null | null | null |
ui_mant_libros.py
|
edzzn/Manejo_Liberia
|
c735d35b32fc53839acfc48d4e088e69983edf16
|
[
"MIT"
] | null | null | null |
ui_mant_libros.py
|
edzzn/Manejo_Liberia
|
c735d35b32fc53839acfc48d4e088e69983edf16
|
[
"MIT"
] | null | null | null |
from PyQt4 import QtGui
from ui_mant_libros_new import NewLibrosWindow
from ui_mant_libros_edit import EditLibrosWindow
from ui_mant_libros_id_edit import GetIdEditWindow
# Debug only
import inspect
class MenuLibros(QtGui.QWidget):
"""
Ventana-menu para editar Libros
"""
def __init__(self):
super(MenuLibros, self).__init__()
self.createButtons()
self.setWindowTitle('Mantenimiento Libros')
self.setWindowIcon(QtGui.QIcon('images/user-plus.png'))
self.setWindowTitle("Mantenimiento Libros")
self.setGeometry(650, 300, 150, 100)
def createButtons(self):
btn_new_libros = QtGui.QPushButton('Nuevo')
btn_new_libros.clicked.connect(self.open_new_libros_window)
btn_edit_libros = QtGui.QPushButton('Editar')
btn_edit_libros.clicked.connect(self.open_edit_libros_window)
btn_list_libros = QtGui.QPushButton('Listar')
btn_list_libros.clicked.connect(self.close)
btn_delete_libros = QtGui.QPushButton('Eliminar')
btn_delete_libros.clicked.connect(self.close)
hbox = QtGui.QHBoxLayout()
hbox.addWidget(btn_new_libros)
hbox.addWidget(btn_edit_libros)
hbox.addWidget(btn_list_libros)
hbox.addWidget(btn_delete_libros)
vbox = QtGui.QVBoxLayout()
vbox.addLayout(hbox)
self.setLayout(vbox)
def open_new_libros_window(self):
self.new_libros_view = NewLibrosWindow()
self.new_libros_view.show()
print(inspect.stack()[0][3])
self.close()
def open_edit_libros_window(self):
self.edit_libros_view = GetIdEditWindow()
self.edit_libros_view.show()
print(inspect.stack()[0][3])
self.close()
def open_list_reserva_window(self):
# self.new_reserva_view = NewReserva()
# self.new_reserva_view.show()
print(inspect.stack()[0][3])
self.close()
def open_delete_reserva_window(self):
# self.new_reserva_view = NewReserva()
# self.new_reserva_view.show()
print(inspect.stack()[0][3])
self.close()
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.argv)
mainWin = MenuLibros()
mainWin.show()
sys.exit(app.exec_())
| 26.929412
| 69
| 0.671035
| 273
| 2,289
| 5.315018
| 0.285714
| 0.043418
| 0.060648
| 0.066161
| 0.338387
| 0.203308
| 0.203308
| 0.203308
| 0.203308
| 0.203308
| 0
| 0.011811
| 0.223242
| 2,289
| 84
| 70
| 27.25
| 0.804274
| 0.076453
| 0
| 0.153846
| 0
| 0
| 0.044498
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.115385
| false
| 0
| 0.115385
| 0
| 0.25
| 0.076923
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c64e0be4c2600978945ef57f08d4ac03e9f96cf
| 6,583
|
py
|
Python
|
env/gym_poker_ai/envs/tests/holdem_calc/holdem_argparser.py
|
MrStonkus/PokerAi
|
9c43c3a7a9c3ac01f4ee9e3f1f95f0786c35de99
|
[
"MIT"
] | null | null | null |
env/gym_poker_ai/envs/tests/holdem_calc/holdem_argparser.py
|
MrStonkus/PokerAi
|
9c43c3a7a9c3ac01f4ee9e3f1f95f0786c35de99
|
[
"MIT"
] | 1
|
2020-05-09T20:27:33.000Z
|
2020-05-09T20:27:33.000Z
|
env/gym_poker_ai/envs/tests/holdem_calc/holdem_argparser.py
|
MrStonkus/PokerAi
|
9c43c3a7a9c3ac01f4ee9e3f1f95f0786c35de99
|
[
"MIT"
] | null | null | null |
import argparse
import re
import holdem_calc.holdem_functions as holdem_functions
# Wrapper class which holds the arguments for library calls
# Mocks actual argparse object
class LibArgs:
def __init__(self, board, exact, num, input_file, hole_cards):
self.board = board
self.cards = hole_cards
self.n = num
self.input = input_file
self.exact = exact
# Parses arguments passed to holdem_calc as a library call
def parse_lib_args(args):
error_check_arguments(args)
# Parse hole cards and board
hole_cards, board = None, None
if not args.input:
hole_cards, board = parse_cards(args.cards, args.board)
return hole_cards, args.n, args.exact, board, args.input
# Parses command line arguments to holdem_calc
def parse_args():
# Define possible command line arguments
parser = argparse.ArgumentParser(
description="Find the odds that a Texas Hold'em hand will win. Note "
"that cards must be given in the following format: As, Jc, Td, 3h.")
parser.add_argument("cards", nargs="*", type=str, metavar="hole card",
help="Hole cards you want to find the odds for.")
parser.add_argument("-b", "--board", nargs="*", type=str, metavar="card",
help="Add board cards")
parser.add_argument("-e", "--exact", action="store_true",
help="Find exact odds by enumerating every possible "
"board")
parser.add_argument("-n", type=int, default=100000,
help="Run N Monte Carlo simulations")
parser.add_argument("-i", "--input", type=str,
help="Read hole cards and boards from an input file. "
"Commandline arguments for hole cards and board will "
"be ignored")
# Parse command line arguments and check for errors
args = parser.parse_args()
error_check_arguments(args)
# Parse hole cards and board
hole_cards, board = None, None
if not args.input:
hole_cards, board = parse_cards(args.cards, args.board)
return hole_cards, args.n, args.exact, board, args.input
# Parses a line taken from the input file and returns the hole cards and board
def parse_file_args(line):
if line is None or len(line) == 0:
print(line)
print("Invalid format")
exit()
values = line.split("|")
if len(values) > 2 or len(values) < 1:
print(line)
print("Invalid format")
exit()
hole_cards = values[0].split()
all_cards = list(hole_cards)
board = None
if len(values) == 2:
board = values[1].split()
all_cards.extend(board)
error_check_cards(all_cards)
return parse_cards(hole_cards, board)
# Parses hole cards and board
def parse_cards(cards, board):
hole_cards = create_hole_cards(cards)
if board:
board = parse_board(board)
return hole_cards, board
# Error check the command line arguments
def error_check_arguments(args):
# Check that the number of Monte Carlo simulations is a positive number
if args.n <= 0:
print("Number of Monte Carlo simulations must be positive.")
exit()
# Check that we can open the specified input file
if args.input:
file_name = args.input
try:
input_file = open(file_name, 'r')
input_file.close()
except IOError:
print("Error opening file " + file_name)
exit()
# Check to make sure all cards are of a valid format
all_cards = list(args.cards)
if args.board:
all_cards.extend(args.board)
error_check_cards(all_cards)
# Error check the command line arguments
def error_check_arguments(args):
# Check that the number of Monte Carlo simulations is a positive number
if args.n <= 0:
print("Number of Monte Carlo simulations must be positive.")
exit()
# Check that we can open the specified input file
if args.input:
file_name = args.input
try:
input_file = open(file_name, 'r')
input_file.close()
except IOError:
print("Error opening file " + file_name)
exit()
# Check to make sure all cards are of a valid format
all_cards = list(args.cards)
if args.board:
all_cards.extend(args.board)
error_check_cards(all_cards)
# Checking that the hole cards + board are formatted properly and unique
def error_check_cards(all_cards):
card_re = re.compile('[AKQJT98765432][scdh]')
for card in all_cards:
if card != "?" and not card_re.match(card):
print("Invalid card given.")
exit()
else:
if all_cards.count(card) != 1 and card != "?":
print("The cards given must be unique.")
exit()
# Returns tuple of two-tuple hole_cards: e.g. ((As, Ks), (Ad, Kd), (Jh, Th))
def create_hole_cards(raw_hole_cards):
# Checking that there are an even number of hole cards
if (raw_hole_cards is None or len(raw_hole_cards) < 2 or
len(raw_hole_cards) % 2):
print("You must provide a non-zero even number of hole cards")
exit()
# Create two-tuples out of hole cards
hole_cards, current_hole_cards = [], []
for hole_card in raw_hole_cards:
if hole_card != "?":
current_card = holdem_functions.Card(hole_card)
current_hole_cards.append(current_card)
else:
current_hole_cards.append(None)
if len(current_hole_cards) == 2:
if None in current_hole_cards:
if (current_hole_cards[0] is not None or
current_hole_cards[1] is not None):
print("Unknown hole cards must come in pairs")
exit()
hole_cards.append((current_hole_cards[0], current_hole_cards[1]))
current_hole_cards = []
if hole_cards.count((None, None)) > 1:
print("Can only have one set of unknown hole cards")
return tuple(hole_cards)
# Returns list of board cards: e.g. [As Ks Ad Kd]
def parse_board(board):
if len(board) > 5 or len(board) < 3:
print("Board must have a length of 3, 4, or 5.")
exit()
if "?" in board:
print("Board cannot have unknown cards")
exit()
return create_cards(board)
# Instantiates new cards from the arguments and returns them in a tuple
def create_cards(card_strings):
return [holdem_functions.Card(arg) for arg in card_strings]
| 33.93299
| 78
| 0.628285
| 915
| 6,583
| 4.375956
| 0.202186
| 0.107892
| 0.03996
| 0.021229
| 0.378871
| 0.362637
| 0.318681
| 0.311189
| 0.311189
| 0.311189
| 0
| 0.007824
| 0.281635
| 6,583
| 193
| 79
| 34.108808
| 0.838867
| 0.183047
| 0
| 0.416058
| 0
| 0
| 0.163086
| 0.003923
| 0
| 0
| 0
| 0
| 0
| 1
| 0.080292
| false
| 0
| 0.021898
| 0.007299
| 0.160584
| 0.109489
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c651d14eff8b0f1392964eb0805b7871c20c731
| 8,318
|
py
|
Python
|
qbay/controllers.py
|
KarlDorogy/Cisc-327-Course-Project-Group-20
|
0e2c003f78bbdd932381a7a8cbc3aa757da18b24
|
[
"MIT"
] | null | null | null |
qbay/controllers.py
|
KarlDorogy/Cisc-327-Course-Project-Group-20
|
0e2c003f78bbdd932381a7a8cbc3aa757da18b24
|
[
"MIT"
] | null | null | null |
qbay/controllers.py
|
KarlDorogy/Cisc-327-Course-Project-Group-20
|
0e2c003f78bbdd932381a7a8cbc3aa757da18b24
|
[
"MIT"
] | null | null | null |
from flask import render_template, request, session, redirect
from qbay.models import *
from datetime import date
from qbay import app
def authenticate(inner_function):
"""
:param inner_function: any python function that accepts a user object
Wrap any python function and check the current session to see if
the user has logged in. If login, it will call the inner_function
with the logged in user object.
To wrap a function, we can put a decoration on that function.
Example:
@authenticate
def home_page(user):
pass
"""
def wrapped_inner():
# check did we store the key in the session
if 'logged_in' in session:
email = session['logged_in']
try:
user = User.query.filter_by(email=email).one_or_none()
if user:
# if the user exists, call the inner_function
# with user as parameter
return inner_function(user)
except Exception:
return redirect('/login')
else:
# else, redirect to the login page
return redirect('/login')
# return the wrapped version of the inner_function:
return wrapped_inner
@app.route('/login', methods=['GET'])
def login_get():
return render_template('login.html', message='Please login')
@app.route('/login', methods=['POST'])
def login_post():
email = request.form.get('email')
password = request.form.get('password')
user = login(email, password)
if user:
session['logged_in'] = user.email
"""
Session is an object that contains sharing information
between a user's browser and the end server.
Typically it is packed and stored in the browser cookies.
They will be past along between every request the browser made
to this services. Here we store the user object into the
session, so we can tell if the client has already login
in the following sessions.
"""
# success! go back to the home page
# code 303 is to force a 'GET' request
return redirect('/', code=303)
else:
return render_template('login.html', message='login failed')
@app.route('/')
@authenticate
def home(user):
# gets a list of products that the logged in user owns
user_products = get_products(user.email)
# gets list of user purchased products
products = get_transaction(user.email)
return render_template('index.html', user=user,
owned_products=user_products, orders=products)
@app.route('/register', methods=['GET'])
def register_get():
# templates are stored in the templates folder
return render_template('register.html', message='')
@app.route('/register', methods=['POST'])
def register_post():
email = request.form.get('email')
name = request.form.get('name')
password = request.form.get('password')
password2 = request.form.get('password2')
error_message = None
if password != password2:
error_message = "The passwords do not match"
else:
# use backend api to register the user
success = register(name, email, password)
if not success:
error_message = "Registration Failed."
# if there is any error messages when registering new user
# at the backend, go back to the register page.
if error_message:
return render_template('register.html', message=error_message)
else:
return redirect('/login')
@app.route('/updateuser', methods=['Get'])
def update_user_get():
return render_template('updateuser.html',
message='Please enter new info below:')
@app.route('/updateuser', methods=['POST'])
def update_user_post():
# retrieves current logged in user's email
user_email = session['logged_in']
name = request.form.get('name')
shipping_address = request.form.get('shippingaddress')
postal_code = request.form.get('postalcode')
error_message = None
# use backend api to update the user attributes
success = update_user(user_email, name, shipping_address, postal_code)
if not success:
error_message = "Updating of User Profile Failed."
# if there is any error messages when updateing user profile
# at the backend, go back to the update page.
if error_message:
return render_template('updateuser.html', message=error_message)
else:
return redirect('/', code=303)
@app.route('/updateproduct', methods=['Get'])
def update_product_get():
return render_template('updateproduct.html',
message="Please enter new product info below:",
pName=request.args.get('pName'))
@app.route('/updateproduct', methods=['POST'])
def update_product_post():
new_price = int(request.form.get('new_price'))
new_title = request.form.get('new_title')
new_description = request.form.get('new_description')
title = request.form.get('title')
# use backend api to update the user attributes
success = update_product(new_price, new_title, new_description, title)
error_message = None
if not success:
error_message = "Product Update Failed"
# if there is any error messages when creating a product
# at the backend, go back to the create product page.
if error_message:
return render_template('updateproduct.html', message=error_message,
pName=request.args.get('pName'))
else:
return redirect('/', code=303)
@app.route('/createproduct', methods=['Get'])
def create_product_get():
return render_template('createproduct.html',
message='Please enter product info below:')
@app.route('/createproduct', methods=['POST'])
def create_product_post():
# retrieves current logged in user's email
owner_email = session['logged_in']
today = date.today()
current_date = today.strftime("%d/%m/%Y")
last_modified_date = (current_date[6:10] +
"-" + current_date[3:5] + "-" + current_date[0:2])
price = int(request.form.get('price'))
title = request.form.get('title')
description = request.form.get('description')
error_message = None
# use backend api to update the user attributes
success = create_product(price, title, description,
last_modified_date, owner_email)
if not success:
error_message = "Product Creation Failed."
# if there is any error messages when creating a product
# at the backend, go back to the create product page.
if error_message:
return render_template('createproduct.html', message=error_message)
else:
return redirect('/', code=303)
@app.route('/listings', methods=['GET'])
def available_products_get():
# retrieves current logged in user's email
user_email = session['logged_in']
# gets other user products that are available to purchase
products = get_listings(user_email)
return render_template('available_products.html',
available_products=products)
@app.route('/placeorder', methods=['GET'])
def place_order_get():
return render_template('placeorder.html',
message="Please confirm the purchase below:",
pTitle=request.args.get('pTitle'),
pPrice=request.args.get('pPrice'))
@app.route('/placeorder', methods=['POST'])
def place_order_post():
new_owner = session['logged_in']
product_title = request.args.get('pTitle')
# use backend api to place the product order
success = place_order(new_owner, product_title)
error_message = None
if not success:
error_message = "Placing Order Failed"
# if there is any error messages when ordering product
# at the backend, go back to the available product listings page.
if error_message:
return render_template('available_products.html',
message=error_message)
else:
return redirect('/', code=303)
@app.route('/logout')
def logout():
if 'logged_in' in session:
session.pop('logged_in', None)
return redirect('/')
| 34.658333
| 76
| 0.648593
| 1,039
| 8,318
| 5.07411
| 0.194418
| 0.0478
| 0.042489
| 0.012519
| 0.403073
| 0.315061
| 0.230273
| 0.185888
| 0.152314
| 0.135243
| 0
| 0.004496
| 0.251262
| 8,318
| 239
| 77
| 34.803347
| 0.842004
| 0.201972
| 0
| 0.331081
| 0
| 0
| 0.159543
| 0.007504
| 0
| 0
| 0
| 0
| 0
| 1
| 0.114865
| false
| 0.047297
| 0.027027
| 0.040541
| 0.310811
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c6678445c5b8ffd9879e0f6a21e874c128e214d
| 6,998
|
py
|
Python
|
gbfs/serializers.py
|
stadtulm/cykel
|
b292d958330279654c49beafc3f95a0067274472
|
[
"MIT"
] | 80
|
2019-08-20T17:41:31.000Z
|
2021-05-31T19:20:28.000Z
|
gbfs/serializers.py
|
transportkollektiv/cykel
|
b292d958330279654c49beafc3f95a0067274472
|
[
"MIT"
] | 19
|
2019-08-24T15:17:33.000Z
|
2021-09-22T17:58:03.000Z
|
gbfs/serializers.py
|
stadtulm/cykel
|
b292d958330279654c49beafc3f95a0067274472
|
[
"MIT"
] | 12
|
2019-08-21T17:55:14.000Z
|
2021-04-07T18:53:52.000Z
|
from datetime import timedelta
from django.utils.timezone import now
from preferences import preferences
from rest_framework import fields, serializers
from bikesharing.models import Bike, Station, VehicleType
from cykel.serializers import EnumFieldSerializer
class TimestampSerializer(fields.CharField):
def to_representation(self, value):
return value.timestamp()
class GbfsFreeBikeStatusSerializer(serializers.HyperlinkedModelSerializer):
bike_id = serializers.CharField(source="non_static_bike_uuid", read_only=True)
vehicle_type_id = serializers.CharField(read_only=True)
last_reported = TimestampSerializer(read_only=True)
class Meta:
model = Bike
fields = (
"bike_id",
"vehicle_type_id",
"current_range_meters",
"last_reported",
)
def to_representation(self, instance):
representation = super().to_representation(instance)
# defined by GBFS 2.1: Only if the vehicle has a motor the field is required
if (
instance.vehicle_type is not None
and instance.vehicle_type.propulsion_type
== VehicleType.PropulsionType.HUMAN
):
representation.pop("current_range_meters")
# Default to False TODO: maybe configuration later
representation["is_reserved"] = False
# Default to False TODO: maybe configuration later
representation["is_disabled"] = False
public_geolocation = instance.public_geolocation()
if public_geolocation is not None:
pos = public_geolocation.geo
if pos and pos.x and pos.y:
representation["lat"] = pos.y
representation["lon"] = pos.x
return representation # only return bikes with public geolocation
class GbfsVehicleOnStationSerializer(GbfsFreeBikeStatusSerializer):
def to_representation(self, instance):
representation = super().to_representation(instance)
if representation is None:
return None
representation.pop("lat")
representation.pop("lon")
return representation
class GbfsStationInformationSerializer(serializers.HyperlinkedModelSerializer):
name = serializers.CharField(source="station_name", read_only=True)
capacity = serializers.IntegerField(source="max_bikes", read_only=True)
station_id = serializers.CharField(source="id", read_only=True)
class Meta:
model = Station
fields = (
"name",
"capacity",
"station_id",
)
def to_representation(self, instance):
representation = super().to_representation(instance)
if (
instance.location is not None
and instance.location.x
and instance.location.y
):
representation["lat"] = instance.location.y
representation["lon"] = instance.location.x
return representation
class GbfsStationStatusSerializer(serializers.HyperlinkedModelSerializer):
station_id = serializers.CharField(source="id", read_only=True)
vehicles = serializers.SerializerMethodField()
def get_vehicles(self, obj):
# if configured filter vehicles, where time report
# is older than configure allowed silent timeperiod
bsp = preferences.BikeSharePreferences
if bsp.gbfs_hide_bikes_after_location_report_silence:
available_bikes = obj.bike_set.filter(
availability_status=Bike.Availability.AVAILABLE,
last_reported__gte=now()
- timedelta(hours=bsp.gbfs_hide_bikes_after_location_report_hours),
)
else:
available_bikes = obj.bike_set.filter(
availability_status=Bike.Availability.AVAILABLE
)
vehicles = GbfsVehicleOnStationSerializer(available_bikes, many=True).data
return list(filter(lambda val: val is not None, vehicles))
class Meta:
model = Station
fields = (
"station_id",
"vehicles",
)
def to_representation(self, instance):
representation = super().to_representation(instance)
representation["num_bikes_available"] = len(representation["vehicles"])
representation["num_docks_available"] = (
instance.max_bikes - representation["num_bikes_available"]
)
if representation["num_bikes_available"] > 0:
representation["last_reported"] = max(
(
vehicle["last_reported"]
if vehicle["last_reported"] is not None
else 0
)
for vehicle in representation["vehicles"]
)
else:
# if no bike is at the station, last_report is the current time
# not sure if this is the intended behavior of the field
# or it should be the timestamp of the last bike removed
# but it is not so easy to implement
representation["last_reported"] = int(now().timestamp())
def drop_last_reported(obj):
obj.pop("last_reported")
return obj
representation["vehicles"] = list(
map(drop_last_reported, representation["vehicles"])
)
status = (instance.status == Station.Status.ACTIVE) or False
representation["is_installed"] = status
representation["is_renting"] = status
representation["is_returning"] = status
return representation
class GbfsVehicleTypeSerializer(serializers.HyperlinkedModelSerializer):
vehicle_type_id = serializers.CharField(source="id", read_only=True)
form_factor = EnumFieldSerializer(
read_only=True,
mapping={
VehicleType.FormFactor.BIKE: "bicycle",
VehicleType.FormFactor.ESCOOTER: "scooter",
VehicleType.FormFactor.CAR: "car",
VehicleType.FormFactor.MOPED: "moped",
VehicleType.FormFactor.OTHER: "other",
},
)
propulsion_type = EnumFieldSerializer(
read_only=True,
mapping={
VehicleType.PropulsionType.HUMAN: "human",
VehicleType.PropulsionType.ELECTRIC_ASSIST: "electric_assist",
VehicleType.PropulsionType.ELECTRIC: "electric",
VehicleType.PropulsionType.COMBUSTION: "combustion",
},
)
def to_representation(self, instance):
data = super(GbfsVehicleTypeSerializer, self).to_representation(instance)
# defined by GBFS 2.1: Only if the vehicle has a motor the field is required
if instance.propulsion_type == VehicleType.PropulsionType.HUMAN:
data.pop("max_range_meters")
return data
class Meta:
model = VehicleType
fields = (
"vehicle_type_id",
"form_factor",
"propulsion_type",
"max_range_meters",
"name",
)
| 36.447917
| 84
| 0.642612
| 680
| 6,998
| 6.448529
| 0.252941
| 0.040137
| 0.027366
| 0.031471
| 0.298974
| 0.243558
| 0.212087
| 0.196123
| 0.186545
| 0.138198
| 0
| 0.001188
| 0.278079
| 6,998
| 191
| 85
| 36.638743
| 0.866785
| 0.085024
| 0
| 0.281046
| 0
| 0
| 0.086215
| 0
| 0
| 0
| 0
| 0.005236
| 0
| 1
| 0.052288
| false
| 0
| 0.039216
| 0.006536
| 0.287582
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c672aa16a64502ad882d71db5ffef21757f9d6f
| 1,095
|
py
|
Python
|
anime_downloader/extractors/vidstream.py
|
ngomile/anime-downloader
|
14d9cebe8aa4eb9d906b937d7c19fedfa737d184
|
[
"Unlicense"
] | 2
|
2020-08-10T12:34:42.000Z
|
2020-11-19T08:13:48.000Z
|
anime_downloader/extractors/vidstream.py
|
ngomile/anime-downloader
|
14d9cebe8aa4eb9d906b937d7c19fedfa737d184
|
[
"Unlicense"
] | null | null | null |
anime_downloader/extractors/vidstream.py
|
ngomile/anime-downloader
|
14d9cebe8aa4eb9d906b937d7c19fedfa737d184
|
[
"Unlicense"
] | null | null | null |
import logging
import re
from anime_downloader.extractors.base_extractor import BaseExtractor
from anime_downloader.sites import helpers
logger = logging.getLogger(__name__)
class VidStream(BaseExtractor):
def _get_data(self):
QUALITIES = {
"360":[],
"480":[],
"720":[],
"1080":[],
}
url = self.url.replace('https:////','https://')
soup = helpers.get(url).text
regex = r'https://vidstreaming\.io/download\?[^"]*'
download = re.search(regex,soup).group()
soup = helpers.soupify(helpers.get(download))
links = soup.select('div.mirror_link')[0].select('div.dowload > a')
for a in QUALITIES:
for b in links:
if a in b.text:
QUALITIES[a].append(b.get('href'))
stream_url = QUALITIES[self.quality[:-1]][0] if QUALITIES != {"360":[],"480":[],"720":[],"1080":[],} else links[0].get('href') #In case nothing is found
return {
'stream_url': stream_url,
'referer': download
}
| 31.285714
| 160
| 0.552511
| 123
| 1,095
| 4.813008
| 0.512195
| 0.045608
| 0.064189
| 0.060811
| 0.074324
| 0
| 0
| 0
| 0
| 0
| 0
| 0.038265
| 0.284018
| 1,095
| 34
| 161
| 32.205882
| 0.716837
| 0.021918
| 0
| 0
| 0
| 0
| 0.129907
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035714
| false
| 0
| 0.142857
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c67ab6dcf7da8380a3c1b1759e1c7f496809cce
| 2,799
|
py
|
Python
|
gui/sum_v1/views.py
|
time-crunched/nlp-toolbox
|
b732abd0b2c6b265971efe04a4d70ebe20d2ee8f
|
[
"MIT"
] | null | null | null |
gui/sum_v1/views.py
|
time-crunched/nlp-toolbox
|
b732abd0b2c6b265971efe04a4d70ebe20d2ee8f
|
[
"MIT"
] | 3
|
2020-06-05T18:58:57.000Z
|
2021-06-10T20:50:13.000Z
|
gui/sum_v1/views.py
|
time-crunched/nlp-toolbox
|
b732abd0b2c6b265971efe04a4d70ebe20d2ee8f
|
[
"MIT"
] | 1
|
2019-12-01T16:56:41.000Z
|
2019-12-01T16:56:41.000Z
|
import time
import os
from django.shortcuts import render, redirect
from django.http import JsonResponse
from django.views import View
from django.conf import settings
from .forms import File_uploadForm
from .models import File_upload, SummaryRes
from sim_v1.textsummary import TEXTSummary
summary_document_dir = os.path.join(os.path.dirname(os.path.dirname(__file__)),'media','sum_v1','upload')
#summary_document_dir = r'C:\Users\ERDIG\Dropbox\Python\nlp_v1\media\sum_v1\upload'
summary_extraction_dir = os.path.join(os.path.dirname(os.path.dirname(__file__)),'media','sum_v1','temp')
#summary_extraction_dir = r'C:\Users\ERDIG\Dropbox\Python\nlp_v1\media\sum_v1\temp'
summary_ratio = 0.01
class Upload(View):
def post(self, request):
time.sleep(1) # You don't need this line. This is just to delay the process so you can see the progress bar testing locally.
form = File_uploadForm(self.request.POST, self.request.FILES)
print(form.errors)
if form.is_valid():
document = form.save()
data = {'is_valid': True, 'name': document.file.name, 'url': document.file.url}
else:
data = {'is_valid': False}
return JsonResponse(data)
def get(self, request):
for document in File_upload.objects.all():
document.file.delete()
document.delete()
doc_list = File_upload.objects.all()
form = File_uploadForm()
return render(self.request, 'upload.html', {'documents': doc_list, 'form': form,})
def sum_words(request):
if request.method == 'POST':
form = File_uploadForm(request.POST)
if form.is_valid():
form.save()
sum_words = form.cleaned_data['sum_words']
request.session['sum_words'] = sum_words
else:
pass
else:
pass
return redirect('sum_v1:summarize')
def clear_database(request):
for document in File_upload.objects.all():
document.file.delete()
document.delete()
return redirect(request.POST.get('next'))
def Summarize(request):
SummaryRes.objects.all().delete()
summary_word_count = request.session['sum_words']
for document in os.listdir(summary_document_dir):
for filename in os.listdir(summary_extraction_dir):
os.remove(os.path.join(summary_extraction_dir, filename))
text_dir = os.path.join(summary_document_dir, document)
summary = TEXTSummary(text_dir, summary_extraction_dir, summary_ratio, summary_word_count)
summary.textextraction()
summary.summary()
SummaryRes.objects.create(doc = document, summary = summary.summary)
results = SummaryRes.objects.all()
return render(request, 'summarize.html', {'results': results})
| 29.15625
| 133
| 0.679171
| 363
| 2,799
| 5.060606
| 0.305785
| 0.02613
| 0.054437
| 0.02123
| 0.209036
| 0.182907
| 0.182907
| 0.182907
| 0.182907
| 0.182907
| 0
| 0.005403
| 0.206502
| 2,799
| 95
| 134
| 29.463158
| 0.821702
| 0.097535
| 0
| 0.216667
| 0
| 0
| 0.059849
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0.033333
| 0.15
| 0
| 0.333333
| 0.016667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c67af820f4a5f09ac6dce61683f07d3e73f1273
| 1,290
|
py
|
Python
|
homeassistant/components/websocket_api/__init__.py
|
dannyqwertz/home-assistant
|
688bdc6532e514afbdc8efd1f574a7b5c9e8d280
|
[
"Apache-2.0"
] | 4
|
2019-01-10T14:47:54.000Z
|
2021-04-22T02:06:27.000Z
|
homeassistant/components/websocket_api/__init__.py
|
dannyqwertz/home-assistant
|
688bdc6532e514afbdc8efd1f574a7b5c9e8d280
|
[
"Apache-2.0"
] | 6
|
2021-02-08T21:02:40.000Z
|
2022-03-12T00:52:16.000Z
|
homeassistant/components/websocket_api/__init__.py
|
dannyqwertz/home-assistant
|
688bdc6532e514afbdc8efd1f574a7b5c9e8d280
|
[
"Apache-2.0"
] | 1
|
2019-08-13T11:54:30.000Z
|
2019-08-13T11:54:30.000Z
|
"""
Websocket based API for Home Assistant.
For more details about this component, please refer to the documentation at
https://developers.home-assistant.io/docs/external_api_websocket.html
"""
from homeassistant.core import callback
from homeassistant.loader import bind_hass
from . import commands, connection, const, decorators, http, messages
DOMAIN = const.DOMAIN
DEPENDENCIES = ('http',)
# Backwards compat / Make it easier to integrate
# pylint: disable=invalid-name
ActiveConnection = connection.ActiveConnection
BASE_COMMAND_MESSAGE_SCHEMA = messages.BASE_COMMAND_MESSAGE_SCHEMA
error_message = messages.error_message
result_message = messages.result_message
async_response = decorators.async_response
require_admin = decorators.require_admin
ws_require_user = decorators.ws_require_user
# pylint: enable=invalid-name
@bind_hass
@callback
def async_register_command(hass, command, handler, schema):
"""Register a websocket command."""
handlers = hass.data.get(DOMAIN)
if handlers is None:
handlers = hass.data[DOMAIN] = {}
handlers[command] = (handler, schema)
async def async_setup(hass, config):
"""Initialize the websocket API."""
hass.http.register_view(http.WebsocketAPIView)
commands.async_register_commands(hass)
return True
| 30
| 75
| 0.784496
| 161
| 1,290
| 6.111801
| 0.496894
| 0.026423
| 0.036585
| 0.04878
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130233
| 1,290
| 42
| 76
| 30.714286
| 0.877005
| 0.248837
| 0
| 0
| 0
| 0
| 0.004353
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043478
| false
| 0
| 0.130435
| 0
| 0.217391
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c6c3991eeee7dfdd77baaa787b34e6799b4425e
| 1,355
|
py
|
Python
|
Leetcode/Python/_1721.py
|
Xrenya/algorithms
|
aded82cacde2f4f2114241907861251e0e2e5638
|
[
"MIT"
] | null | null | null |
Leetcode/Python/_1721.py
|
Xrenya/algorithms
|
aded82cacde2f4f2114241907861251e0e2e5638
|
[
"MIT"
] | null | null | null |
Leetcode/Python/_1721.py
|
Xrenya/algorithms
|
aded82cacde2f4f2114241907861251e0e2e5638
|
[
"MIT"
] | null | null | null |
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def swapNodes(self, head: Optional[ListNode], k: int) -> Optional[ListNode]:
temp = head
array = []
while temp:
array.append(temp.val)
temp = temp.next
array[k - 1], array[len(array) - k] = array[len(array) - k], array[k - 1]
head = ListNode(0)
dummy = head
for num in array:
dummy.next = ListNode(num)
dummy = dummy.next
return head.next
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def swapNodes(self, head: Optional[ListNode], k: int) -> Optional[ListNode]:
if head is None or head.next is None:
return head
slow = fast = cnt = head
counter = 0
while cnt:
counter += 1
cnt = cnt.next
for _ in range(k - 1):
slow = slow.next
for _ in range(counter - k):
fast = fast.next
slow.val, fast.val = fast.val, slow.val
return head
| 27.653061
| 81
| 0.500369
| 159
| 1,355
| 4.201258
| 0.238994
| 0.041916
| 0.056886
| 0.07485
| 0.516467
| 0.467066
| 0.467066
| 0.467066
| 0.467066
| 0.467066
| 0
| 0.009804
| 0.397786
| 1,355
| 48
| 82
| 28.229167
| 0.808824
| 0.20738
| 0
| 0.206897
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068966
| false
| 0
| 0
| 0
| 0.241379
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c6e78ca230293ad0a6075105e0e0da44e90fcbd
| 25,892
|
py
|
Python
|
Pyrado/pyrado/environments/mujoco/wam_bic.py
|
KhanhThiVo/SimuRLacra
|
fdeaf2059c2ed80ea696f018c29290510b5c4cb9
|
[
"DOC",
"Zlib",
"BSD-3-Clause"
] | null | null | null |
Pyrado/pyrado/environments/mujoco/wam_bic.py
|
KhanhThiVo/SimuRLacra
|
fdeaf2059c2ed80ea696f018c29290510b5c4cb9
|
[
"DOC",
"Zlib",
"BSD-3-Clause"
] | null | null | null |
Pyrado/pyrado/environments/mujoco/wam_bic.py
|
KhanhThiVo/SimuRLacra
|
fdeaf2059c2ed80ea696f018c29290510b5c4cb9
|
[
"DOC",
"Zlib",
"BSD-3-Clause"
] | 1
|
2020-11-24T15:25:26.000Z
|
2020-11-24T15:25:26.000Z
|
# Copyright (c) 2020, Fabio Muratore, Honda Research Institute Europe GmbH, and
# Technical University of Darmstadt.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of Fabio Muratore, Honda Research Institute Europe GmbH,
# or Technical University of Darmstadt, nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL FABIO MURATORE, HONDA RESEARCH INSTITUTE EUROPE GMBH,
# OR TECHNICAL UNIVERSITY OF DARMSTADT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import mujoco_py
import numpy as np
import os.path as osp
from init_args_serializer import Serializable
from typing import Optional
import pyrado
from pyrado.environments.barrett_wam import (
goal_pos_init_sim_4dof,
goal_pos_init_sim_7dof,
init_qpos_des_4dof,
init_qpos_des_7dof,
act_space_bic_4dof,
act_space_bic_7dof,
wam_q_limits_up_7dof,
wam_q_limits_lo_7dof,
torque_space_wam_4dof,
torque_space_wam_7dof,
wam_pgains_7dof,
wam_dgains_7dof,
wam_pgains_4dof,
wam_dgains_4dof,
)
from pyrado.environments.mujoco.base import MujocoSimEnv
from pyrado.spaces.base import Space
from pyrado.spaces.box import BoxSpace
from pyrado.spaces.singular import SingularStateSpace
from pyrado.tasks.base import Task
from pyrado.tasks.condition_only import ConditionOnlyTask
from pyrado.tasks.desired_state import DesStateTask
from pyrado.tasks.final_reward import BestStateFinalRewTask, FinalRewTask, FinalRewMode
from pyrado.tasks.goalless import GoallessTask
from pyrado.tasks.masked import MaskedTask
from pyrado.tasks.parallel import ParallelTasks
from pyrado.tasks.reward_functions import ZeroPerStepRewFcn, ExpQuadrErrRewFcn, QuadrErrRewFcn
from pyrado.tasks.sequential import SequentialTasks
from pyrado.utils.data_types import EnvSpec
from pyrado.utils.input_output import print_cbt
class WAMBallInCupSim(MujocoSimEnv, Serializable):
"""
WAM robotic arm from Barrett technologies for the ball-in-the-cup task, controlled by a PD controller.
.. note::
When using the `reset()` function, always pass a meaningful `init_state`
.. seealso::
[1] https://github.com/psclklnk/self-paced-rl/tree/master/sprl/envs/ball_in_a_cup.py
"""
name: str = "wam-bic"
def __init__(
self,
num_dof: int,
frame_skip: int = 4,
dt: Optional[float] = None,
max_steps: int = pyrado.inf,
fixed_init_state: bool = True,
stop_on_collision: bool = True,
observe_ball: bool = False,
observe_cup: bool = False,
task_args: Optional[dict] = None,
):
"""
Constructor
:param num_dof: number of degrees of freedom (4 or 7), depending on which Barrett WAM setup being used
:param frame_skip: number of simulation frames for which the same action is held, results in a multiplier of
the time step size `dt`
:param dt: by default the time step size is the one from the mujoco config file multiplied by the number of
frame skips (legacy from OpenAI environments). By passing an explicit `dt` value, this can be
overwritten. Possible use case if if you know that you recorded a trajectory with a specific `dt`.
:param max_steps: max number of simulation time steps
:param fixed_init_state: enables/disables deterministic, fixed initial state
:param stop_on_collision: set the `failed` flag in the `dict` returned by `_mujoco_step()` to true, if the ball
collides with something else than the desired parts of the cup. This causes the
episode to end. Keep in mind that in case of a negative step reward and no final
cost on failing, this might result in undesired behavior.
:param observe_ball: if `True`, include the 2-dim (x-z plane) cartesian ball position into the observation
:param observe_cup: if `True`, include the 2-dim (x-z plane) cartesian cup position into the observation
:param task_args: arguments for the task construction
"""
Serializable._init(self, locals())
self.fixed_init_state = fixed_init_state
self.observe_ball = observe_ball
self.observe_cup = observe_cup
# Initialize num DoF specific variables
self._num_dof = num_dof
if num_dof == 4:
graph_file_name = "wam_4dof_bic.xml"
self.qpos_des_init = init_qpos_des_4dof
self.p_gains = wam_pgains_4dof
self.d_gains = wam_dgains_4dof
init_ball_pos = np.array([0.723, 0.0, 1.168])
init_cup_goal = goal_pos_init_sim_4dof
elif num_dof == 7:
graph_file_name = "wam_7dof_bic.xml"
self.qpos_des_init = init_qpos_des_7dof
self.p_gains = wam_pgains_7dof
self.d_gains = wam_dgains_7dof
init_ball_pos = np.array([0.828, 0.0, 1.131])
init_cup_goal = goal_pos_init_sim_7dof
else:
raise pyrado.ValueErr(given=num_dof, eq_constraint="4 or 7")
model_path = osp.join(pyrado.MUJOCO_ASSETS_DIR, graph_file_name)
super().__init__(model_path, frame_skip, dt, max_steps, task_args)
# Actual initial joint position (when the WAM moved to the home position)
if num_dof == 4:
self.init_qpos[:4] = np.array([0.0, 0.63, 0.0, 1.27])
self.init_qpos[4] = -0.34 # angle of the first rope segment relative to the cup bottom plate
else:
self.init_qpos[:7] = np.array([0.0, 0.65, 0.0, 1.41, 0.0, -0.28, -1.57])
self.init_qpos[7] = -0.21 # angle of the first rope segment relative to the cup bottom plate
# Set the actual stable initial position. This position would be reached after some time using the internal
# PD controller to stabilize at self._qpos_des_init.
# The initial position of the ball in cartesian coordinates
self._init_state = np.concatenate([self.init_qpos, self.init_qvel, init_ball_pos, init_cup_goal])
if self.fixed_init_state:
self._init_space = SingularStateSpace(self._init_state)
else:
# Add plus/minus one degree to each motor joint and the first rope segment joint
init_state_up = self._init_state.copy()
init_state_up[: self._num_dof] += np.pi / 180 * np.array([0.1, 1, 0.5, 1.0, 0.1, 1.0, 1.0])[: self._num_dof]
init_state_lo = self._init_state.copy()
init_state_lo[: self._num_dof] -= np.pi / 180 * np.array([0.1, 1, 0.5, 1.0, 0.1, 1.0, 1.0])[: self._num_dof]
self._init_space = BoxSpace(init_state_lo, init_state_up)
# Bodies to check fo collision
self._collision_bodies = [
"wam/base_link",
"wam/shoulder_yaw_link",
"wam/shoulder_pitch_link",
"wam/upper_arm_link",
"wam/forearm_link",
"wrist_palm_link",
"wam/wrist_pitch_link",
"wam/wrist_yaw_link",
]
if self._num_dof == 4:
self._collision_bodies = self._collision_bodies[:6]
# We access a private attribute since a method like 'model.geom_names[geom_id]' cannot be used because
# not every geom has a name
self._collision_geom_ids = [self.model._geom_name2id[name] for name in ["cup_geom1", "cup_geom2"]]
self.stop_on_collision = stop_on_collision
self.camera_config = dict(
distance=2.7,
trackbodyid=0, # id of the body to track
elevation=-30, # camera rotation around the axis in the plane
azimuth=-90, # camera rotation around the camera's vertical axis
)
@property
def num_dof(self) -> int:
""" Get the number of degrees of freedom. """
return self._num_dof
@property
def torque_space(self) -> Space:
""" Get the space of joint torques. """
return torque_space_wam_7dof if self._num_dof == 7 else torque_space_wam_4dof
@property
def state_space(self) -> Space:
# The state space has the same shape as the init space (including ball and cup)
state_shape = np.concatenate([self.init_qpos, self.init_qvel, np.empty(3), np.empty(3)]).shape
state_lo, state_up = np.full(state_shape, -pyrado.inf), np.full(state_shape, pyrado.inf)
# Ensure that joint limits of the arm are not reached (5 deg safety margin)
state_lo[: self._num_dof] = wam_q_limits_lo_7dof[: self._num_dof]
state_up[: self._num_dof] = wam_q_limits_up_7dof[: self._num_dof]
return BoxSpace(state_lo, state_up)
@property
def obs_space(self) -> Space:
# Observing the normalized time and optionally the cup and ball position
obs_lo, obs_up, labels = [0.0], [1.0], ["t"]
if self.observe_ball:
obs_lo.extend([-3.0, -3.0])
obs_up.extend([3.0, 3.0])
labels.extend(["ball_x", "ball_z"])
if self.observe_cup:
obs_lo.extend([-3.0, -3.0])
obs_up.extend([3.0, 3.0])
labels.extend(["cup_x", "cup_z"])
return BoxSpace(obs_lo, obs_up, labels=labels)
@property
def act_space(self) -> Space:
# Running a PD controller on joint positions and velocities
return act_space_bic_7dof if self._num_dof == 7 else act_space_bic_4dof
@classmethod
def get_nominal_domain_param(cls, num_dof: int = 7) -> dict:
if num_dof == 7:
return dict(
cup_scale=1.0, # scaling factor for the radius of the cup [-] (should be >0.65)
rope_length=0.41, # length of the rope [m]
ball_mass=0.024, # mass of the ball [kg]
joint_1_damping=0.05, # damping of motor joints [N/s] (default value is small)
joint_2_damping=0.05, # damping of motor joints [N/s] (default value is small)
joint_3_damping=0.05, # damping of motor joints [N/s] (default value is small)
joint_4_damping=0.05, # damping of motor joints [N/s] (default value is small)
joint_5_damping=0.05, # damping of motor joints [N/s] (default value is small)
joint_6_damping=0.05, # damping of motor joints [N/s] (default value is small)
joint_7_damping=0.05, # damping of motor joints [N/s] (default value is small)
joint_1_dryfriction=0.4, # dry friction coefficient of motor joint 1 [-]
joint_2_dryfriction=0.4, # dry friction coefficient of motor joint 2 [-]
joint_3_dryfriction=0.4, # dry friction coefficient of motor joint 3 [-]
joint_4_dryfriction=0.4, # dry friction coefficient of motor joint 4 [-]
joint_5_dryfriction=0.4, # dry friction coefficient of motor joint 5 [-]
joint_6_dryfriction=0.4, # dry friction coefficient of motor joint 6 [-]
joint_7_dryfriction=0.4, # dry friction coefficient of motor joint 7 [-]
rope_damping=1e-4, # damping of rope joints [N/s] (reasonable values are 6e-4 to 1e-6)
)
elif num_dof == 4:
return dict(
cup_scale=1.0, # scaling factor for the radius of the cup [-] (should be >0.65)
rope_length=0.41, # length of the rope [m]
ball_mass=0.024, # mass of the ball [kg]
joint_1_damping=0.05, # damping of motor joints [N/s] (default value is small)
joint_2_damping=0.05, # damping of motor joints [N/s] (default value is small)
joint_3_damping=0.05, # damping of motor joints [N/s] (default value is small)
joint_4_damping=0.05, # damping of motor joints [N/s] (default value is small)
joint_1_dryfriction=0.4, # dry friction coefficient of motor joint 1 [-]
joint_2_dryfriction=0.4, # dry friction coefficient of motor joint 2 [-]
joint_3_dryfriction=0.4, # dry friction coefficient of motor joint 3 [-]
joint_4_dryfriction=0.4, # dry friction coefficient of motor joint 4 [-]
rope_damping=1e-4, # damping of rope joints [N/s] (reasonable values are 6e-4 to 1e-6)
)
else:
raise pyrado.ValueErr(given=num_dof, eq_constraint="4 or 7")
def _create_task(self, task_args: dict) -> Task:
if task_args.get("sparse_rew_fcn", False):
# Create a task with binary reward
return self._create_main_task(task_args)
else:
# Create two (or three) parallel running task.
# 1.) Main task: Desired state task for the cartesian ball distance
# 2.) Deviation task: Desired state task for the cartesian- and joint deviation from the init position
# 3.) Binary Bonus: Adds a binary bonus when ball is catched [inactive by default]
return ParallelTasks(
[
self._create_main_task(task_args),
self._create_deviation_task(task_args),
self._create_main_task(
dict(
sparse_rew_fcn=True,
success_bonus=task_args.get("success_bonus", 0),
)
),
]
)
def _create_main_task(self, task_args: dict) -> Task:
# Create a DesStateTask that masks everything but the ball position
idcs = list(range(self.state_space.flat_dim - 6, self.state_space.flat_dim - 3)) # Cartesian ball position
spec = EnvSpec(
self.spec.obs_space,
self.spec.act_space,
self.spec.state_space.subspace(self.spec.state_space.create_mask(idcs)),
)
# If we do not use copy(), state_des coming from MuJoCo is a reference and updates automatically at each step.
# Note: sim.forward() + get_body_xpos() results in wrong output for state_des, as sim has not been updated to
# init_space.sample(), which is first called in reset()
if task_args.get("sparse_rew_fcn", False):
factor = task_args.get("success_bonus", 1)
# Binary final reward task
main_task = FinalRewTask(
ConditionOnlyTask(
spec,
condition_fcn=self.check_ball_in_cup,
is_success_condition=True,
),
mode=FinalRewMode(always_positive=True),
factor=factor,
)
# Yield -1 on fail after the main task ist done (successfully or not)
dont_fail_after_succ_task = FinalRewTask(
GoallessTask(spec, ZeroPerStepRewFcn()),
mode=FinalRewMode(always_negative=True),
factor=factor,
)
# Augment the binary task with an endless dummy task, to avoid early stopping
task = SequentialTasks((main_task, dont_fail_after_succ_task))
return MaskedTask(self.spec, task, idcs)
else:
state_des = self.sim.data.get_site_xpos("cup_goal") # this is a reference
# state_des_ball = self.sim.data.get_site_xpos("cup_goal") # this is a reference
# state_des_cup = np.array([0.82521, 0, 1.4469]) if self._num_dof == 7 else np.array([0.758, 0, 1.5])
# state_des = np.concatenate([state_des_ball, state_des_cup])
R_default = np.diag([0, 0, 1, 1e-2, 1e-2, 1e-1]) if self._num_dof == 7 else np.diag([0, 0, 1e-2, 1e-2])
rew_fcn = ExpQuadrErrRewFcn(
Q=task_args.get("Q", np.diag([2e1, 1e-4, 2e1])), # distance ball - cup; shouldn't move in y-direction
R=task_args.get("R", R_default), # last joint is really unreliable for 7 dof, thus punish more
)
task = DesStateTask(spec, state_des, rew_fcn)
# Wrap the masked DesStateTask to add a bonus for the best state in the rollout
return BestStateFinalRewTask(
MaskedTask(self.spec, task, idcs),
factor=task_args.get("final_factor", 0.05 * self.max_steps),
)
def _create_deviation_task(self, task_args: dict) -> Task:
idcs = list(range(self.state_space.flat_dim - 3, self.state_space.flat_dim)) # Cartesian cup goal position
spec = EnvSpec(
self.spec.obs_space,
self.spec.act_space,
self.spec.state_space.subspace(self.spec.state_space.create_mask(idcs)),
)
# init cup goal position
state_des = goal_pos_init_sim_7dof if self._num_dof == 7 else goal_pos_init_sim_4dof
rew_fcn = QuadrErrRewFcn(
Q=task_args.get("Q_dev", np.diag([2e-1, 1e-6, 5e0])), # Cartesian distance from init cup position
R=task_args.get(
"R_dev", np.zeros((self.act_space.shape[0], self.act_space.shape[0]))
), # joint space distance from init pose, interferes with R_default from _create_main_task
)
task = DesStateTask(spec, state_des, rew_fcn)
return MaskedTask(self.spec, task, idcs)
def _adapt_model_file(self, xml_model: str, domain_param: dict) -> str:
# First replace special domain parameters
cup_scale = domain_param.pop("cup_scale", None)
rope_length = domain_param.pop("rope_length", None)
if cup_scale is not None:
# See [1, l.93-96]
xml_model = xml_model.replace("[scale_mesh]", str(cup_scale * 0.001))
xml_model = xml_model.replace("[pos_mesh]", str(0.055 - (cup_scale - 1.0) * 0.023))
xml_model = xml_model.replace("[pos_goal]", str(0.1165 + (cup_scale - 1.0) * 0.0385))
xml_model = xml_model.replace("[size_cup]", str(cup_scale * 0.038))
xml_model = xml_model.replace("[size_cup_inner]", str(cup_scale * 0.03))
if rope_length is not None:
# The rope consists of 30 capsules
xml_model = xml_model.replace("[pos_capsule]", str(rope_length / 30))
# Each joint is at the top of each capsule (therefore negative direction from center)
xml_model = xml_model.replace("[pos_capsule_joint]", str(-rope_length / 60))
# Pure visualization component
xml_model = xml_model.replace("[size_capsule_geom]", str(rope_length / 72))
# Resolve mesh directory and replace the remaining domain parameters
return super()._adapt_model_file(xml_model, domain_param)
def _mujoco_step(self, act: np.ndarray) -> dict:
assert self.act_space.contains(act, verbose=True)
# Get the desired positions and velocities for the selected joints
qpos_des = self.qpos_des_init.copy() # the desired trajectory is relative to self._qpos_des_init
qvel_des = np.zeros_like(qpos_des)
if self._num_dof == 4:
np.add.at(qpos_des, [1, 3], act[:2])
np.add.at(qvel_des, [1, 3], act[2:])
elif self._num_dof == 7:
np.add.at(qpos_des, [1, 3, 5], act[:3])
np.add.at(qvel_des, [1, 3, 5], act[3:])
# Compute the position and velocity errors
err_pos = qpos_des - self.state[: self._num_dof]
err_vel = qvel_des - self.state[self.model.nq : self.model.nq + self._num_dof]
# Compute the torques for the PD controller and clip them to their max values
torque = self.p_gains * err_pos + self.d_gains * err_vel
torque = self.torque_space.project_to(torque)
# Apply the torques to the robot
self.sim.data.qfrc_applied[: self._num_dof] = torque
# Call MuJoCo
try:
self.sim.step()
mjsim_crashed = False
except mujoco_py.builder.MujocoException:
# When MuJoCo recognized instabilities in the simulation, it simply kills it.
# Instead, we want the episode to end with a failure.
mjsim_crashed = True
qpos, qvel = self.sim.data.qpos.copy(), self.sim.data.qvel.copy()
ball_pos = self.sim.data.get_body_xpos("ball").copy()
cup_goal = self.sim.data.get_site_xpos("cup_goal").copy()
self.state = np.concatenate([qpos, qvel, ball_pos, cup_goal])
# If desired, check for collisions of the ball with the robot
ball_collided = self.check_ball_collisions() if self.stop_on_collision else False
# If state is out of bounds (this is normally checked by the task, but does not work because of the mask)
state_oob = False if self.state_space.contains(self.state) else True
return dict(
qpos_des=qpos_des,
qvel_des=qvel_des,
qpos=qpos[: self._num_dof],
qvel=qvel[: self._num_dof],
ball_pos=ball_pos,
cup_pos=cup_goal,
failed=mjsim_crashed or ball_collided or state_oob,
)
def check_ball_collisions(self, verbose: bool = False) -> bool:
"""
Check if an undesired collision with the ball occurs.
:param verbose: print messages on collision
:return: `True` if the ball collides with something else than the central parts of the cup
"""
for i in range(self.sim.data.ncon):
# Get current contact object
contact = self.sim.data.contact[i]
# Extract body-id and body-name of both contact geoms
body1 = self.model.geom_bodyid[contact.geom1]
body1_name = self.model.body_names[body1]
body2 = self.model.geom_bodyid[contact.geom2]
body2_name = self.model.body_names[body2]
# Evaluate if the ball collides with part of the WAM (collision bodies)
# or the connection of WAM and cup (geom_ids)
c1 = body1_name == "ball" and (
body2_name in self._collision_bodies or contact.geom2 in self._collision_geom_ids
)
c2 = body2_name == "ball" and (
body1_name in self._collision_bodies or contact.geom1 in self._collision_geom_ids
)
if c1 or c2:
if verbose:
print_cbt(
f"Undesired collision of {body1_name} and {body2_name} detected!",
"y",
)
return True
return False
def check_ball_in_cup(self, *args, verbose: bool = False):
"""
Check if the ball is in the cup.
:param verbose: print messages when ball is in the cup
:return: `True` if the ball is in the cup
"""
for i in range(self.sim.data.ncon):
# Get current contact object
contact = self.sim.data.contact[i]
# Extract body-id and body-name of both contact geoms
body1 = self.model.geom_bodyid[contact.geom1]
body1_name = self.model.body_names[body1]
body2 = self.model.geom_bodyid[contact.geom2]
body2_name = self.model.body_names[body2]
# Evaluate if the ball collides with part of the WAM (collision bodies)
# or the connection of WAM and cup (geom_ids)
cup_inner_id = self.model._geom_name2id["cup_inner"]
c1 = body1_name == "ball" and contact.geom2 == cup_inner_id
c2 = body2_name == "ball" and contact.geom1 == cup_inner_id
if c1 or c2:
if verbose:
print_cbt(f"The ball is in the cup at time step {self.curr_step}.", "y")
return True
return False
def observe(self, state: np.ndarray) -> np.ndarray:
# TODO: Debug print-outs, should be removed in future...
# if self._curr_step == 0:
# print_cbt(f'cup xpos: {self.sim.data.get_body_xpos("cup").copy()}', 'b') # center of frame
# print_cbt(f'cup xipos: {self.sim.data.get_body_xipos("cup").copy()}', 'b') # center of mass
# Observe the normalized time
obs = [self._curr_step / self.max_steps]
# Extract the (x, z) cartesian position of cup and ball (the robot operates in the x-z plane).
# Note: the cup_goal is the mujoco site object marking the goal position for the ball. It is not identical
# to the coordinate system origin of the rigid body object 'cup'
if self.observe_ball:
obs.extend([state[-3], state[-1]])
if self.observe_cup:
obs.extend([state[-6], state[-4]])
return np.array(obs)
| 49.037879
| 120
| 0.632551
| 3,649
| 25,892
| 4.300082
| 0.184708
| 0.013766
| 0.015295
| 0.011918
| 0.382703
| 0.317316
| 0.279651
| 0.239883
| 0.225161
| 0.208782
| 0
| 0.025472
| 0.281284
| 25,892
| 527
| 121
| 49.13093
| 0.817723
| 0.371389
| 0
| 0.269231
| 0
| 0
| 0.038103
| 0.002771
| 0
| 0
| 0
| 0.001898
| 0.002959
| 1
| 0.044379
| false
| 0
| 0.065089
| 0.002959
| 0.171598
| 0.008876
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c6f5eebc67f2c098afe70ef549d9f14b27bc659
| 1,572
|
py
|
Python
|
app/strategies/ema_bb_alligator_strategy.py
|
namuan/crypto-rider
|
f5b47ada60a7cef07e66609e2e92993619c6bfbe
|
[
"MIT"
] | 1
|
2022-01-18T19:06:20.000Z
|
2022-01-18T19:06:20.000Z
|
app/strategies/ema_bb_alligator_strategy.py
|
namuan/crypto-rider
|
f5b47ada60a7cef07e66609e2e92993619c6bfbe
|
[
"MIT"
] | null | null | null |
app/strategies/ema_bb_alligator_strategy.py
|
namuan/crypto-rider
|
f5b47ada60a7cef07e66609e2e92993619c6bfbe
|
[
"MIT"
] | null | null | null |
import pandas as pd
import ta
from app.common import reshape_data
from app.strategies.base_strategy import BaseStrategy
pd.set_option("display.max_columns", None)
pd.set_option("display.width", None)
class EMABBAlligatorStrategy(BaseStrategy):
BUY_SIGNAL = "buy_signal"
SELL_SIGNAL = "sell_signal"
def calculate_indicators(self):
df = self.load_df(limit=1000)
_ = df["close_3_ema"]
_ = df["boll"]
ao = ta.momentum.AwesomeOscillatorIndicator(high=df["high"], low=df["low"])
df["AO"] = ao.ao()
return df
def can_sell(self, df):
prev_candle = self.candle(df)
last_ema = prev_candle["close_3_ema"]
last_bb = prev_candle["boll"]
return [
last_ema < last_bb,
(self.candle(df, rewind=-2)["AO"] > 0)
& (self.candle(df, rewind=-1)["AO"] < 0),
prev_candle["volume"] > 0,
]
def can_buy(self, df):
prev_candle = self.candle(df)
last_ema = prev_candle["close_3_ema"]
last_bb = prev_candle["boll"]
return [
last_ema > last_bb,
(self.candle(df, rewind=-2)["AO"] < 0)
& (self.candle(df, rewind=-1)["AO"] > 0),
prev_candle["volume"] > 0,
]
def alert_message(self, df):
prev_candle = self.candle(df)
last_close = prev_candle["close"]
last_ao = prev_candle["AO"]
return (
"Close: {:.2f}, Awesome Oscillator value: {:.2f}".format(
last_close, last_ao
),
)
| 29.111111
| 83
| 0.564885
| 197
| 1,572
| 4.279188
| 0.309645
| 0.130486
| 0.099644
| 0.085409
| 0.410439
| 0.410439
| 0.410439
| 0.410439
| 0.372479
| 0.372479
| 0
| 0.017179
| 0.296438
| 1,572
| 53
| 84
| 29.660377
| 0.745027
| 0
| 0
| 0.244444
| 0
| 0
| 0.11514
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.088889
| false
| 0
| 0.088889
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c723e762bff7b4ab80b6f5113e4e550464fb8ae
| 1,276
|
py
|
Python
|
awx/api/urls/ad_hoc_command.py
|
ziegenberg/awx
|
a3e29317c5d4220fffe28370ec73c73802255246
|
[
"Apache-2.0"
] | null | null | null |
awx/api/urls/ad_hoc_command.py
|
ziegenberg/awx
|
a3e29317c5d4220fffe28370ec73c73802255246
|
[
"Apache-2.0"
] | 2
|
2022-02-10T11:57:21.000Z
|
2022-02-27T22:43:44.000Z
|
awx/api/urls/ad_hoc_command.py
|
ziegenberg/awx
|
a3e29317c5d4220fffe28370ec73c73802255246
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2017 Ansible, Inc.
# All Rights Reserved.
from django.urls import re_path
from awx.api.views import (
AdHocCommandList,
AdHocCommandDetail,
AdHocCommandCancel,
AdHocCommandRelaunch,
AdHocCommandAdHocCommandEventsList,
AdHocCommandActivityStreamList,
AdHocCommandNotificationsList,
AdHocCommandStdout,
)
urls = [
re_path(r'^$', AdHocCommandList.as_view(), name='ad_hoc_command_list'),
re_path(r'^(?P<pk>[0-9]+)/$', AdHocCommandDetail.as_view(), name='ad_hoc_command_detail'),
re_path(r'^(?P<pk>[0-9]+)/cancel/$', AdHocCommandCancel.as_view(), name='ad_hoc_command_cancel'),
re_path(r'^(?P<pk>[0-9]+)/relaunch/$', AdHocCommandRelaunch.as_view(), name='ad_hoc_command_relaunch'),
re_path(r'^(?P<pk>[0-9]+)/events/$', AdHocCommandAdHocCommandEventsList.as_view(), name='ad_hoc_command_ad_hoc_command_events_list'),
re_path(r'^(?P<pk>[0-9]+)/activity_stream/$', AdHocCommandActivityStreamList.as_view(), name='ad_hoc_command_activity_stream_list'),
re_path(r'^(?P<pk>[0-9]+)/notifications/$', AdHocCommandNotificationsList.as_view(), name='ad_hoc_command_notifications_list'),
re_path(r'^(?P<pk>[0-9]+)/stdout/$', AdHocCommandStdout.as_view(), name='ad_hoc_command_stdout'),
]
__all__ = ['urls']
| 42.533333
| 137
| 0.724922
| 160
| 1,276
| 5.45
| 0.28125
| 0.061927
| 0.123853
| 0.110092
| 0.316514
| 0.316514
| 0.114679
| 0.073395
| 0
| 0
| 0
| 0.015666
| 0.09953
| 1,276
| 29
| 138
| 44
| 0.743255
| 0.041536
| 0
| 0
| 0
| 0
| 0.327049
| 0.292623
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.090909
| 0
| 0.090909
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c7497307c0cb4f07fda11674de8080bc75940ac
| 3,265
|
py
|
Python
|
fgarcade/sprites.py
|
fabiommendes/fgarcade
|
2bfdb3ca18cb8260048ccfc9e84524987c322221
|
[
"MIT"
] | 2
|
2019-04-20T00:07:16.000Z
|
2019-04-24T01:25:38.000Z
|
fgarcade/sprites.py
|
fabiommendes/fgarcade
|
2bfdb3ca18cb8260048ccfc9e84524987c322221
|
[
"MIT"
] | null | null | null |
fgarcade/sprites.py
|
fabiommendes/fgarcade
|
2bfdb3ca18cb8260048ccfc9e84524987c322221
|
[
"MIT"
] | 7
|
2019-06-18T17:59:41.000Z
|
2019-07-02T21:37:21.000Z
|
import arcade
from arcade import FACE_RIGHT, FACE_DOWN, FACE_UP, FACE_LEFT
class AnimatedWalkingSprite(arcade.Sprite):
def __init__(self, scale: float = 1,
image_x: float = 0, image_y: float = 0,
center_x: float = 0, center_y: float = 0, *,
stand_left, stand_right, left, right, up, down, step=20):
super().__init__(scale=scale, image_x=image_x, image_y=image_y,
center_x=center_x, center_y=center_y)
self.state = FACE_RIGHT
self.stand_right_texture = stand_right
self.stand_left_texture = stand_left
self.walk_left_textures = left
self.walk_right_textures = right
self.walk_up_textures = up
self.walk_down_textures = down
self.cur_texture_index = 0
self.texture_change_distance = step
self.last_texture_change_center_x = 0
self.last_texture_change_center_y = 0
self._update_direction(FACE_RIGHT, self.stand_right_texture)
self.textures = [self._texture]
def _update_direction(self, state, texture):
self.last_texture_change_center_x = self.center_x
self.last_texture_change_center_y = self.center_y
self.state = state
self.cur_texture_index = 0
self._texture = texture
def _rotate(self, delta, list):
if abs(delta) >= self.texture_change_distance:
self.cur_texture_index += 1
self.last_texture_change_center_x = self.center_x
self.last_texture_change_center_y = self.center_y
self._texture = list[self.cur_texture_index % len(list)]
def update_animation(self):
tol = 1.
# Falling
if self.change_y <= -tol:
if self.state != FACE_DOWN:
self._update_direction(FACE_DOWN, self.walk_down_textures[0])
else:
self._rotate(self.center_y - self.last_texture_change_center_y,
self.walk_down_textures)
# Jumping
elif self.change_y >= tol:
if self.state != FACE_UP:
self._update_direction(FACE_UP, self.walk_up_textures[0])
else:
self._rotate(self.center_y - self.last_texture_change_center_y,
self.walk_up_textures)
# Going left
elif self.change_x <= -tol:
if self.state != FACE_LEFT:
self._update_direction(FACE_LEFT, self.stand_left_texture)
else:
self._rotate(self.center_x - self.last_texture_change_center_x,
self.walk_left_textures)
# Going right
elif self.change_x >= tol:
if self.state != FACE_RIGHT:
self._update_direction(FACE_RIGHT, self.stand_right_texture)
else:
self._rotate(self.center_x - self.last_texture_change_center_x,
self.walk_right_textures)
elif abs(self.change_x) < tol and self.state == FACE_DOWN:
self._update_direction(FACE_RIGHT, self.stand_right_texture)
self.textures[0] = self._texture
self.width = self._texture.width * self.scale
self.height = self._texture.height * self.scale
| 40.8125
| 79
| 0.618989
| 414
| 3,265
| 4.487923
| 0.128019
| 0.04521
| 0.080732
| 0.113025
| 0.502691
| 0.495156
| 0.44887
| 0.415501
| 0.360603
| 0.302476
| 0
| 0.007018
| 0.301685
| 3,265
| 80
| 80
| 40.8125
| 0.807895
| 0.011639
| 0
| 0.261538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.061538
| false
| 0
| 0.030769
| 0
| 0.107692
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c76b7443d1cefb8613a32ec558f3e2d259300ab
| 2,089
|
py
|
Python
|
src/mafUtility.py
|
gh-schen/SiriusEpiClassifier
|
617e0243a95fe1014acfeca25ff6f6ba617d366f
|
[
"Apache-2.0"
] | 1
|
2021-12-08T19:21:07.000Z
|
2021-12-08T19:21:07.000Z
|
src/mafUtility.py
|
gh-schen/SiriusEpiClassifier
|
617e0243a95fe1014acfeca25ff6f6ba617d366f
|
[
"Apache-2.0"
] | null | null | null |
src/mafUtility.py
|
gh-schen/SiriusEpiClassifier
|
617e0243a95fe1014acfeca25ff6f6ba617d366f
|
[
"Apache-2.0"
] | null | null | null |
from numpy.core.fromnumeric import transpose
from sklearn import linear_model
from scipy.special import logit
from scipy import stats
from copy import deepcopy
from numpy import random, concatenate, quantile, matmul, transpose
import logging
class singleRegModel():
"""
data struct for running a single regression test
"""
def __init__(self, regressor):
self.regressor = regressor
self.mmodel = None
# params
self.quantile_limit_ = 0.95
def train_binary(self, x_train, y_train):
self.mmodel = deepcopy(self.regressor)
self.mmodel.fit(x_train, y_train)
def train_quant(self, init_x, follow_x, init_y, follow_iter):
self.train_binary(init_x, init_y)
if follow_x is None:
logging.warning("No samples have missing MAF - no follow up training")
return
for i in range(follow_iter):
init_preds = self.mmodel.predict(init_x)
upper_limit = quantile(init_preds, self.quantile_limit_)
follow_y = self.mmodel.predict(follow_x)
follow_y[follow_y > upper_limit] = upper_limit
x_merge = concatenate((init_x, follow_x))
y_merge = concatenate((init_y, follow_y))
self.mmodel = deepcopy(self.regressor)
self.mmodel.fit(x_merge, y_merge)
def predict_prob(self, input_x):
preds = matmul(input_x, transpose(self.mmodel.coef_)) + self.mmodel.intercept_
probs = preds[:,0]
return probs
def predict_quant(self, input_x):
#preds = matmul(input_x, transpose(self.mmodel.coef_)) + self.mmodel.intercept_
#print(preds, self.mmodel.predict(input_x))
#probs = preds[:,0]
#return probs
return self.mmodel.predict(input_x)
class predOutcome():
"""
store output for prediction
"""
def __init__(self):
self.true_y = None
self.test_y = None
self.train_ys = [] # with CV training can have multiple results
self.cancer_status = None # binary: 0 for normal and 1 for cance
| 31.179104
| 87
| 0.650551
| 274
| 2,089
| 4.733577
| 0.324818
| 0.100231
| 0.052429
| 0.018504
| 0.245181
| 0.17579
| 0.17579
| 0.17579
| 0.17579
| 0.106399
| 0
| 0.004534
| 0.26089
| 2,089
| 67
| 88
| 31.179104
| 0.835492
| 0.150311
| 0
| 0.04878
| 0
| 0
| 0.029327
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.146341
| false
| 0
| 0.170732
| 0.02439
| 0.439024
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c77b39243b7ae9ea7813df0033b58ce3c06fb82
| 4,553
|
py
|
Python
|
examples/linreg.py
|
hanyas/sds
|
3c195fb9cbd88a9284287d62c0eacb6afc4598a7
|
[
"MIT"
] | 12
|
2019-09-21T13:52:09.000Z
|
2022-02-14T06:48:46.000Z
|
examples/linreg.py
|
hanyas/sds
|
3c195fb9cbd88a9284287d62c0eacb6afc4598a7
|
[
"MIT"
] | 1
|
2020-01-22T12:34:52.000Z
|
2020-01-26T21:14:11.000Z
|
examples/linreg.py
|
hanyas/sds
|
3c195fb9cbd88a9284287d62c0eacb6afc4598a7
|
[
"MIT"
] | 5
|
2019-09-18T15:11:26.000Z
|
2021-12-10T14:04:53.000Z
|
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import ARDRegression, LinearRegression
# Parameters of the example
np.random.seed(0)
n_samples, n_features = 100, 100
# Create Gaussian data
X = np.random.randn(n_samples, n_features)
# Create weights with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noise with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target<
y = np.dot(X, w) + noise
clf = ARDRegression(fit_intercept=False, n_iter=1000)
clf.fit(X, y)
ols = LinearRegression(fit_intercept=False)
ols.fit(X, y)
from copy import deepcopy
from sds.distributions.lingauss import SingleOutputLinearGaussianWithKnownPrecision
from sds.distributions.lingauss import SingleOutputLinearGaussianWithKnownMean
from sds.distributions.gaussian import GaussianWithPrecision
from sds.distributions.gaussian import GaussianWithKnownMeanAndDiagonalPrecision
from sds.distributions.gamma import Gamma
likelihood_precision_prior = Gamma(dim=1, alphas=np.ones((1, )),
betas=1e-6 * np.ones((1, )))
parameter_precision_prior = Gamma(dim=n_features, alphas=np.ones((n_features, )),
betas=1e-6 * np.ones((n_features, )))
likelihood_precision_posterior = deepcopy(likelihood_precision_prior)
parameter_precision_posterior = deepcopy(parameter_precision_prior)
parameter_posterior = None
for i in range(100):
# parameter posterior
alphas = parameter_precision_posterior.mean()
parameter_prior = GaussianWithPrecision(dim=n_features,
mu=np.zeros((n_features, )),
lmbda=np.diag(alphas))
parameter_posterior = deepcopy(parameter_prior)
beta = likelihood_precision_posterior.mean()
likelihood_known_precision = SingleOutputLinearGaussianWithKnownPrecision(column_dim=n_features,
lmbda=beta,
affine=False)
stats = likelihood_known_precision.statistics(X, y)
parameter_posterior.nat_param = parameter_prior.nat_param + stats
# likelihood precision posterior
param = parameter_posterior.mean()
likelihood_known_mean = SingleOutputLinearGaussianWithKnownMean(column_dim=n_features,
W=param, affine=False)
stats = likelihood_known_mean.statistics(X, y)
likelihood_precision_posterior.nat_param = likelihood_precision_prior.nat_param + stats
# parameter precision posterior
parameter_likelihood = GaussianWithKnownMeanAndDiagonalPrecision(dim=n_features)
param = parameter_posterior.mean()
stats = parameter_likelihood.statistics(param)
parameter_precision_posterior.nat_param = parameter_precision_prior.nat_param + stats
our_ard = parameter_posterior.mode()
from sds.distributions.composite import MatrixNormalGamma
from sds.distributions.lingauss import LinearGaussianWithDiagonalPrecision
M = np.zeros((1, n_features))
K = 1e-16 * np.eye(n_features)
alphas = 1e-16 * np.ones((1, ))
betas = 1e-16 * np.ones((1, ))
prior = MatrixNormalGamma(column_dim=n_features, row_dim=1,
M=M, K=K, alphas=alphas, betas=betas)
posterior = deepcopy(prior)
likelihood = LinearGaussianWithDiagonalPrecision(column_dim=n_features,
row_dim=1,
affine=False)
stats = likelihood.statistics(X, np.atleast_2d(y).T)
posterior.nat_param = prior.nat_param + stats
our_ols = posterior.mode()[0]
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(w, color='orange', linestyle='-', linewidth=2, label="Ground truth")
plt.plot(clf.coef_, color='darkblue', linestyle='-', linewidth=2, label="Sklearn ARD")
plt.plot(our_ard, color='red', linestyle='-', linewidth=2, label="Our ARD")
# plt.plot(ols.coef_, color='yellowgreen', linestyle=':', linewidth=2, label="Sklearn OLS")
# plt.plot(our_ols.flatten(), color='cyan', linestyle='-', linewidth=2, label="Our OLS")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc=1)
plt.show()
| 39.591304
| 100
| 0.691852
| 543
| 4,553
| 5.627993
| 0.255985
| 0.04712
| 0.045812
| 0.039267
| 0.194372
| 0.034686
| 0.034686
| 0.018325
| 0.018325
| 0
| 0
| 0.016653
| 0.208654
| 4,553
| 114
| 101
| 39.938596
| 0.831529
| 0.097738
| 0
| 0.051282
| 0
| 0
| 0.02417
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.153846
| 0
| 0.153846
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c77f77e66dc427bbe7624fc776b41c3d875169f
| 7,516
|
py
|
Python
|
optimal/tompkins/examples/dask_scheduling_problem_nonetcontention.py
|
KarizCache/serverless
|
c5735afee29e104f3909f3b0140e993d461a5420
|
[
"MIT"
] | null | null | null |
optimal/tompkins/examples/dask_scheduling_problem_nonetcontention.py
|
KarizCache/serverless
|
c5735afee29e104f3909f3b0140e993d461a5420
|
[
"MIT"
] | null | null | null |
optimal/tompkins/examples/dask_scheduling_problem_nonetcontention.py
|
KarizCache/serverless
|
c5735afee29e104f3909f3b0140e993d461a5420
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
import os
import json
import re
import ast
import json
from graphviz import Digraph
import pandas as pd
# color the graph
import graph_tool.all as gt
import copy
import matplotlib.colors as mcolors
import sys
import utils
from tompkins.ilp import schedule, jobs_when_where
from collections import defaultdict
from pulp import value
import re
import ast
import json
from graphviz import Digraph
import pandas as pd
# color the graph
import graph_tool.all as gt
import copy
import matplotlib.colors as mcolors
import sys
import seaborn as sns
def get_benchmarks():
benchmarks = {}
for _file in os.listdir(stats_dir):
try:
bnch = _file.rsplit('.', 1)[0]
assert os.path.isfile(os.path.join(stats_dir, f'{bnch}.iopt'))
app = bnch #, scheduler = bnch.rsplit(':', 1)
scheduler = 'vanilla'
benchmarks[bnch] = {'app': app, 'scheduler': scheduler, 'benchmark': bnch}
except AssertionError:
pass
return benchmarks
def build_graph(benchmark):
css_colors = list(mcolors.CSS4_COLORS.keys())
gfile = os.path.join(stats_dir, f'{benchmark}.iopt')
with open(gfile, 'r') as fd:
raw = fd.read().split('\n')
g = gt.Graph(directed=True)
vid_to_vx = {}
name_to_vid = {}
g.vertex_properties['name'] = g.new_vertex_property("string")
g.vertex_properties['worker'] = g.new_vertex_property("string")
g.vertex_properties['color'] = g.new_vertex_property("string", '#e0e0e0')
g.vertex_properties['icolor'] = g.new_vertex_property("int")
g.vertex_properties['output_size'] = g.new_vertex_property("int")
g.vertex_properties['runtime'] = g.new_vertex_property("float")
for ln in raw:
if ln.startswith('v'):
_, vid, name, runtime, output_size = ln.split(',', 4)
v = g.add_vertex()
vid_to_vx[vid] = v
name_to_vid[name] = vid
g.vp.name[v] = name
g.vp.runtime[v] = float(runtime) # 1 second
g.vp.output_size[v] = float(output_size) # 1GB
g.vp.color[v] = '#e0e0e0'
for ln in raw:
if ln.startswith('e'):
_, vsrc, vdst = ln.split(',')
g.add_edge(vid_to_vx[vsrc], vid_to_vx[vdst])
return g
def get_runtime_statistics(benchmark):
tasks = []
statistics = {}
jfile = os.path.join(stats_dir, f'{benchmark}.json')
with open(jfile, 'r') as fd:
stats = ast.literal_eval(fd.read())
for ts in stats:
ops = 'ts'; #ts.replace("(", '').replace(')', '').split("'")[1].split('-')[0]
statistics[ts] = {'key': ts, 'op': ops,
'output_size': stats[ts]['msg']['nbytes'], 'worker': stats[ts]['worker'].split(':')[1].replace('/', '')}
startsstops = stats[ts]['msg']['startstops']
for ss in startsstops:
if ss['action'] == 'compute':
statistics[ts]['compute_end'] = ss['stop']
statistics[ts]['compute_start'] = ss['start']
statistics[ts]['runtime'] = ss['stop'] - ss['start']
cfile = os.path.join(stats_dir, f'{benchmark}.colors')
with open(cfile, 'r') as cfd:
raw = cfd.read().split('\n')
for ln in raw:
if not ln:
continue
ts, color = ln.split(',')
#ts += ')'
statistics[ts]['color'] = int(color)
return statistics
def plot_graph(g, benchmark, optimal=False):
print(benchmark["benchmark"])
post = ".optimal" if optimal else ""
dg = Digraph('G', filename=f'{benchmark["benchmark"]}{post}.gv', format='png')
for v in g.vertices():
dg.attr('node', shape='ellipse', style="filled,solid",
penwidth="3",
fillcolor=g.vp.color[v],
color=worker_color[g.vp.statistics[v]['worker']])
#if benchmark['scheduler'] == "vanilla":
# dg.node(f'{v}')
#else:
dg.node(f'{v}, color({g.vp.icolor[v]})')
for e in g.edges():
#if benchmark['scheduler'] == "vanilla":
# dg.edge(f'{e.source()}', f'{e.target()}')
#else:
dg.edge(f'{e.source()}, color({g.vp.icolor[e.source()]})',
f'{e.target()}, color({g.vp.icolor[e.target()]})')
dg.view(os.path.join(f'{results_dir}',f'{benchmark["benchmark"]}{post}'), quiet=False)
import pulp as pl
import time
def find_optimal(g, bw):
n_workers = 4
workers = [f'w{i}' for i in range(n_workers)]
# Job Release Times - Additional constraints on availablility of Jobs
# R = np.zeros(n)
R = defaultdict(lambda:0)
# Maximum makespan
M = 100
B = defaultdict(lambda:1)
agents = workers
jobs = []
for v in g.vertices():
jobs.append(f't{v}')
n = len(jobs)
m = len(agents)
P = defaultdict(lambda:0)
for e in g.edges():
P[f't{e.source()}',f't{e.target()}'] = 1
# computation
D = defaultdict(lambda:0)
for v in g.vertices():
for a in agents:
D[f't{v}', a] = g.vp.runtime[v] # statistics[g.vp.name[v]]['runtime']
# Communication Delay matrix - Cost of sending results of job from
# agent to agent
#bw = 10*(1<<30)/(1<<3)
bw = bw*(1<<20)/(1<<3)
C = defaultdict(lambda:0)
for v in g.vertices():
for a in agents:
for b in agents:
C[f't{v}', a, b] = 0 if a == b else g.vp.output_size[v]/bw # 0 --> cost_serialization
start = time.time()
# Set up the Mixed Integer Linear Program
prob, X, S, Cmax = schedule(jobs, agents, D, C, R, B, P, M)
solver = pl.GUROBI_CMD()
prob.solve(solver)
latency = time.time() - start
print('-----------------------------------------------> constraints', len(prob.constraints.keys()))
print('----------------------------------------------> # of variables', prob.numVariables())
print('---------------------------------------------->', latency)
print("Makespan: ", value(Cmax))
sched = jobs_when_where(prob, X, S, Cmax)
print("Schedule: ", sched)
sched2 = []
for j in sched:
new = j + (j[1] + D[j[0], j[2]], g.vp.name[int(j[0].replace('t', ''))])
sched2.append(new)
print("Schedule: ", sched2)
return sched2, {'makespan': value(Cmax),
'constraints': len(prob.constraints.keys()),
'variables': prob.numVariables(),
'time': float(latency)}
results_dir = './benchmarks'
stats_dir='./benchmarks'
benchmarks = get_benchmarks()
#benchmarks = ['dom4x61GB1B', 'dom2x41GB1B', 'tree4x61GB1B']
for bnch in benchmarks:
for bw in [1*1024, 16*1024, 512, 32*1024, 8*1024, 4*1024, 2*1024, 256, 128, 64, 32]:
print(f'process {bnch}')
g = build_graph(bnch)
sched2, stats = find_optimal(g, bw)
with open(f'{results_dir}/optimal_compuation_stats.csv', 'a') as fd:
fd.write(f'{bnch},{stats["makespan"]},{stats["constraints"]},{stats["variables"]},{stats["time"]},no,{bw}\n')
with open(f'{results_dir}/{bnch}.nonetworkcontention.{bw}mbps.optimal', 'w') as fd:
for s in sched2:
fd.write(f'v,{s[0]},{s[1]},{s[2]}\n')
#fd.write(f'{s[4]},{s[3]},{s[0]},{s[1]},{s[2]}\n')
#v = int(s[0].replace('t', ''))
#g.vp.worker[v] = s[2]
break
#break
| 32.678261
| 125
| 0.548696
| 997
| 7,516
| 4.057172
| 0.248746
| 0.010383
| 0.025216
| 0.0267
| 0.269221
| 0.180717
| 0.17602
| 0.139926
| 0.100865
| 0.100865
| 0
| 0.021554
| 0.265434
| 7,516
| 229
| 126
| 32.820961
| 0.711103
| 0.106706
| 0
| 0.180723
| 0
| 0.006024
| 0.162328
| 0.076451
| 0
| 0
| 0
| 0
| 0.012048
| 1
| 0.03012
| false
| 0.006024
| 0.162651
| 0
| 0.216867
| 0.048193
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c79f89ccfffa309abd3d78c50d5bebd47df7780
| 3,675
|
py
|
Python
|
slackchannel2pdf/locales.py
|
ErikKalkoken/slackchannel2pdf
|
2848dfaaffbf9a5255c6dbe87dcc1e90d062b820
|
[
"MIT"
] | 52
|
2019-08-05T21:58:53.000Z
|
2022-03-21T22:36:22.000Z
|
slackchannel2pdf/locales.py
|
ErikKalkoken/slackchannel2pdf
|
2848dfaaffbf9a5255c6dbe87dcc1e90d062b820
|
[
"MIT"
] | 10
|
2020-04-11T21:30:53.000Z
|
2022-03-12T07:14:06.000Z
|
slackchannel2pdf/locales.py
|
ErikKalkoken/slackchannel2pdf
|
2848dfaaffbf9a5255c6dbe87dcc1e90d062b820
|
[
"MIT"
] | 10
|
2020-01-30T07:52:09.000Z
|
2022-02-03T03:44:41.000Z
|
import datetime as dt
import logging
from babel import Locale, UnknownLocaleError
from babel.dates import format_datetime, format_time, format_date
import pytz
from tzlocal import get_localzone
from . import settings
logger = logging.getLogger(__name__)
class LocaleHelper:
"""Helpers for converting date & time according to current locale and timezone"""
def __init__(
self,
my_locale: Locale = None,
my_tz: pytz.BaseTzInfo = None,
author_info: dict = None,
) -> None:
"""
Args:
- my_locale: Primary locale to use
- my_tz: Primary timezone to use
- author_info: locale and timezone to use from this Slack response
if my_locale and/or my_tz are not given
"""
self._locale = self._determine_locale(my_locale, author_info)
self._timezone = self._determine_timezone(my_tz, author_info)
@staticmethod
def _determine_locale(my_locale: Locale = None, author_info: dict = None) -> Locale:
if my_locale:
if not isinstance(my_locale, Locale):
raise TypeError("my_locale must be a babel Locale object")
else:
if author_info:
try:
my_locale = Locale.parse(author_info["locale"], sep="-")
except UnknownLocaleError:
logger.warning("Could not use locale info from Slack")
my_locale = Locale.default()
else:
my_locale = Locale.default()
if not my_locale:
my_locale = Locale.parse(settings.FALLBACK_LOCALE)
return my_locale
@staticmethod
def _determine_timezone(
my_tz: pytz.BaseTzInfo = None, author_info: dict = None
) -> pytz.BaseTzInfo:
if my_tz:
if not isinstance(my_tz, pytz.BaseTzInfo):
raise TypeError("my_tz must be of type pytz")
else:
if author_info:
try:
my_tz = pytz.timezone(author_info["tz"])
except pytz.exceptions.UnknownTimeZoneError:
logger.warning("Could not use timezone info from Slack")
my_tz = get_localzone()
else:
my_tz = get_localzone()
if not my_tz:
my_tz = pytz.UTC
return my_tz
@property
def locale(self) -> Locale:
return self._locale
@property
def timezone(self) -> pytz.BaseTzInfo:
return self._timezone
def format_date_full_str(self, my_datetime: dt.datetime) -> str:
return format_date(my_datetime, format="full", locale=self.locale)
def format_datetime_str(self, my_datetime: dt.datetime) -> str:
"""returns formated datetime string for given dt using locale"""
return format_datetime(my_datetime, format="short", locale=self.locale)
def get_datetime_formatted_str(self, ts: int) -> str:
"""return given timestamp as formated datetime string using locale"""
my_datetime = self.get_datetime_from_ts(ts)
return format_datetime(my_datetime, format="short", locale=self.locale)
def get_time_formatted_str(self, ts: int) -> str:
"""return given timestamp as formated datetime string using locale"""
my_datetime = self.get_datetime_from_ts(ts)
return format_time(my_datetime, format="short", locale=self.locale)
def get_datetime_from_ts(self, ts: int) -> dt.datetime:
"""returns datetime object of a unix timestamp with local timezone"""
my_datetime = dt.datetime.fromtimestamp(float(ts), pytz.UTC)
return my_datetime.astimezone(self.timezone)
| 36.386139
| 88
| 0.633741
| 449
| 3,675
| 4.966592
| 0.209354
| 0.050224
| 0.043946
| 0.034081
| 0.301794
| 0.270404
| 0.25157
| 0.224664
| 0.224664
| 0.188789
| 0
| 0
| 0.284082
| 3,675
| 100
| 89
| 36.75
| 0.847586
| 0.137959
| 0
| 0.277778
| 0
| 0
| 0.054098
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.138889
| false
| 0
| 0.097222
| 0.041667
| 0.375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c7c266f5c66aa6fb93fbd1ac553f14737d31adf
| 1,193
|
py
|
Python
|
python_developer_tools/cv/bases/pool/AvgPool2d.py
|
carlsummer/python_developer_tools
|
a8c4365b7cc601cda55648cdfd8c0cb1faae132f
|
[
"Apache-2.0"
] | 32
|
2021-06-21T04:49:48.000Z
|
2022-03-29T05:46:59.000Z
|
python_developer_tools/cv/bases/pool/AvgPool2d.py
|
carlsummer/python_developer_tools
|
a8c4365b7cc601cda55648cdfd8c0cb1faae132f
|
[
"Apache-2.0"
] | 1
|
2021-11-12T03:45:55.000Z
|
2021-11-12T03:45:55.000Z
|
python_developer_tools/cv/bases/pool/AvgPool2d.py
|
carlsummer/python_developer_tools
|
a8c4365b7cc601cda55648cdfd8c0cb1faae132f
|
[
"Apache-2.0"
] | 10
|
2021-06-03T08:05:05.000Z
|
2021-12-13T03:10:42.000Z
|
# !/usr/bin/env python
# -- coding: utf-8 --
# @Author zengxiaohui
# Datatime:8/31/2021 1:37 PM
# @File:GlobalAvgPool2d
import torch.nn as nn
from python_developer_tools.cv.bases.activates.swish import h_swish
class GlobalAvgPool2d(nn.Module):
""" Fast implementation of global average pooling from
TResNet: High Performance GPU-Dedicated Architecture
https://arxiv.org/pdf/2003.13630.pdf
Args:
flatten (bool, optional): whether spatial dimensions should be squeezed
"""
def __init__(self, flatten: bool = False) -> None:
super().__init__()
self.flatten = flatten
def forward(self, x):
if self.flatten:
in_size = x.size()
return x.view((in_size[0], in_size[1], -1)).mean(dim=2)
else:
return x.view(x.size(0), x.size(1), -1).mean(-1).view(x.size(0), x.size(1), 1, 1)
class SwishAdaptiveAvgPool2d(nn.Module):
def __init__(self,inplace=True):
super().__init__()
self.avgpool=nn.Sequential(
nn.ReLU6(inplace=inplace),
nn.AdaptiveAvgPool2d((1, 1)),
h_swish()
)
def forward(self, x):
return self.avgpool(x)
| 31.394737
| 93
| 0.619447
| 159
| 1,193
| 4.503145
| 0.528302
| 0.034916
| 0.02514
| 0.041899
| 0.047486
| 0.047486
| 0.047486
| 0.047486
| 0
| 0
| 0
| 0.043285
| 0.244761
| 1,193
| 38
| 94
| 31.394737
| 0.751387
| 0.285834
| 0
| 0.181818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.181818
| false
| 0
| 0.090909
| 0.045455
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c7c4de6be5e48f9c89afdf0a57351e2ebf01e66
| 28,531
|
py
|
Python
|
expyfun/_utils.py
|
nordme/expyfun
|
e644bba8cbfb6edd2a076099536417d4854d64af
|
[
"BSD-3-Clause"
] | 2
|
2015-12-31T07:56:16.000Z
|
2016-08-22T17:23:02.000Z
|
expyfun/_utils.py
|
nordme/expyfun
|
e644bba8cbfb6edd2a076099536417d4854d64af
|
[
"BSD-3-Clause"
] | 6
|
2015-02-18T04:25:46.000Z
|
2017-01-25T01:00:35.000Z
|
expyfun/_utils.py
|
nordme/expyfun
|
e644bba8cbfb6edd2a076099536417d4854d64af
|
[
"BSD-3-Clause"
] | 1
|
2015-12-31T07:56:20.000Z
|
2015-12-31T07:56:20.000Z
|
"""Some utility functions"""
# Authors: Eric Larson <larsoner@uw.edu>
#
# License: BSD (3-clause)
import warnings
import operator
from copy import deepcopy
import subprocess
import importlib
import os
import os.path as op
import inspect
import sys
import tempfile
import ssl
from shutil import rmtree
import atexit
import json
from functools import partial
from distutils.version import LooseVersion
from numpy import sqrt, convolve, ones
import logging
import datetime
from timeit import default_timer as clock
from threading import Timer
import numpy as np
import scipy as sp
from ._externals import decorator
# set this first thing to make sure it "takes"
try:
import pyglet
pyglet.options['debug_gl'] = False
del pyglet
except Exception:
pass
# for py3k (eventually)
if sys.version.startswith('2'):
string_types = basestring # noqa
input = raw_input # noqa, input is raw_input in py3k
text_type = unicode # noqa
from __builtin__ import reload
from urllib2 import urlopen # noqa
from cStringIO import StringIO # noqa
else:
string_types = str
text_type = str
from urllib.request import urlopen
input = input
from io import StringIO # noqa, analysis:ignore
from importlib import reload # noqa, analysis:ignore
###############################################################################
# LOGGING
EXP = 25
logging.addLevelName(EXP, 'EXP')
def exp(self, message, *args, **kwargs):
"""Experiment-level logging."""
self.log(EXP, message, *args, **kwargs)
logging.Logger.exp = exp
logger = logging.getLogger('expyfun')
def flush_logger():
"""Flush expyfun logger"""
for handler in logger.handlers:
handler.flush()
def set_log_level(verbose=None, return_old_level=False):
"""Convenience function for setting the logging level
Parameters
----------
verbose : bool, str, int, or None
The verbosity of messages to print. If a str, it can be either DEBUG,
INFO, WARNING, ERROR, or CRITICAL. Note that these are for
convenience and are equivalent to passing in logging.DEBUG, etc.
For bool, True is the same as 'INFO', False is the same as 'WARNING'.
If None, the environment variable EXPYFUN_LOGGING_LEVEL is read, and if
it doesn't exist, defaults to INFO.
return_old_level : bool
If True, return the old verbosity level.
"""
if verbose is None:
verbose = get_config('EXPYFUN_LOGGING_LEVEL', 'INFO')
elif isinstance(verbose, bool):
verbose = 'INFO' if verbose is True else 'WARNING'
if isinstance(verbose, string_types):
verbose = verbose.upper()
logging_types = dict(DEBUG=logging.DEBUG, INFO=logging.INFO,
WARNING=logging.WARNING, ERROR=logging.ERROR,
CRITICAL=logging.CRITICAL)
if verbose not in logging_types:
raise ValueError('verbose must be of a valid type')
verbose = logging_types[verbose]
old_verbose = logger.level
logger.setLevel(verbose)
return (old_verbose if return_old_level else None)
def set_log_file(fname=None,
output_format='%(asctime)s - %(levelname)-7s - %(message)s',
overwrite=None):
"""Convenience function for setting the log to print to a file
Parameters
----------
fname : str, or None
Filename of the log to print to. If None, stdout is used.
To suppress log outputs, use set_log_level('WARN').
output_format : str
Format of the output messages. See the following for examples:
http://docs.python.org/dev/howto/logging.html
e.g., "%(asctime)s - %(levelname)s - %(message)s".
overwrite : bool, or None
Overwrite the log file (if it exists). Otherwise, statements
will be appended to the log (default). None is the same as False,
but additionally raises a warning to notify the user that log
entries will be appended.
"""
handlers = logger.handlers
for h in handlers:
if isinstance(h, logging.FileHandler):
h.close()
logger.removeHandler(h)
if fname is not None:
if op.isfile(fname) and overwrite is None:
warnings.warn('Log entries will be appended to the file. Use '
'overwrite=False to avoid this message in the '
'future.')
mode = 'w' if overwrite is True else 'a'
lh = logging.FileHandler(fname, mode=mode)
else:
""" we should just be able to do:
lh = logging.StreamHandler(sys.stdout)
but because doctests uses some magic on stdout, we have to do this:
"""
lh = logging.StreamHandler(WrapStdOut())
lh.setFormatter(logging.Formatter(output_format))
# actually add the stream handler
logger.addHandler(lh)
###############################################################################
# RANDOM UTILITIES
building_doc = any('sphinx-build' in ((''.join(i[4]).lower() + i[1])
if i[4] is not None else '')
for i in inspect.stack())
def run_subprocess(command, **kwargs):
"""Run command using subprocess.Popen
Run command and wait for command to complete. If the return code was zero
then return, otherwise raise CalledProcessError.
By default, this will also add stdout= and stderr=subproces.PIPE
to the call to Popen to suppress printing to the terminal.
Parameters
----------
command : list of str
Command to run as subprocess (see subprocess.Popen documentation).
**kwargs : objects
Keywoard arguments to pass to ``subprocess.Popen``.
Returns
-------
stdout : str
Stdout returned by the process.
stderr : str
Stderr returned by the process.
"""
# code adapted with permission from mne-python
kw = dict(stderr=subprocess.PIPE, stdout=subprocess.PIPE)
kw.update(kwargs)
p = subprocess.Popen(command, **kw)
stdout_, stderr = p.communicate()
output = (stdout_.decode(), stderr.decode())
if p.returncode:
err_fun = subprocess.CalledProcessError.__init__
if 'output' in _get_args(err_fun):
raise subprocess.CalledProcessError(p.returncode, command, output)
else:
raise subprocess.CalledProcessError(p.returncode, command)
return output
class ZeroClock(object):
"""Clock that uses "clock" function but starts at zero on init."""
def __init__(self):
self._start_time = clock()
def get_time(self):
"""Get time."""
return clock() - self._start_time
def date_str():
"""Produce a date string for the current date and time
Returns
-------
datestr : str
The date string.
"""
return str(datetime.datetime.today()).replace(':', '_')
class WrapStdOut(object):
"""Ridiculous class to work around how doctest captures stdout."""
def __getattr__(self, name):
# Even more ridiculous than this class, this must be sys.stdout (not
# just stdout) in order for this to work (tested on OSX and Linux)
return getattr(sys.stdout, name)
class _TempDir(str):
"""Class for creating and auto-destroying temp dir
This is designed to be used with testing modules.
We cannot simply use __del__() method for cleanup here because the rmtree
function may be cleaned up before this object, so we use the atexit module
instead. Passing del_after and print_del kwargs to the constructor are
helpful primarily for debugging purposes.
"""
def __new__(self, del_after=True, print_del=False):
new = str.__new__(self, tempfile.mkdtemp())
self._del_after = del_after
self._print_del = print_del
return new
def __init__(self):
self._path = self.__str__()
atexit.register(self.cleanup)
def cleanup(self):
if self._del_after is True:
if self._print_del is True:
print('Deleting {} ...'.format(self._path))
rmtree(self._path, ignore_errors=True)
def check_units(units):
"""Ensure user passed valid units type
Parameters
----------
units : str
Must be ``'norm'``, ``'deg'``, or ``'pix'``.
"""
good_units = ['norm', 'pix', 'deg']
if units not in good_units:
raise ValueError('"units" must be one of {}, not {}'
''.format(good_units, units))
###############################################################################
# DECORATORS
# Following deprecated class copied from scikit-learn
class deprecated(object):
"""Decorator to mark a function or class as deprecated.
Issue a warning when the function is called/the class is instantiated and
adds a warning to the docstring.
The optional extra argument will be appended to the deprecation message
and the docstring. Note: to use this with the default value for extra, put
in an empty of parentheses:
>>> from expyfun._utils import deprecated
>>> deprecated() # doctest: +ELLIPSIS
<expyfun._utils.deprecated object at ...>
>>> @deprecated()
... def some_function(): pass
"""
# Adapted from http://wiki.python.org/moin/PythonDecoratorLibrary,
# but with many changes.
# scikit-learn will not import on all platforms b/c it can be
# sklearn or scikits.learn, so a self-contained example is used above
def __init__(self, extra=''):
"""
Parameters
----------
extra: string
to be added to the deprecation messages
"""
self.extra = extra
def __call__(self, obj):
"""Call."""
if isinstance(obj, type):
return self._decorate_class(obj)
else:
return self._decorate_fun(obj)
def _decorate_class(self, cls):
msg = "Class %s is deprecated" % cls.__name__
if self.extra:
msg += "; %s" % self.extra
# FIXME: we should probably reset __new__ for full generality
init = cls.__init__
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return init(*args, **kwargs)
cls.__init__ = wrapped
wrapped.__name__ = '__init__'
wrapped.__doc__ = self._update_doc(init.__doc__)
wrapped.deprecated_original = init
return cls
def _decorate_fun(self, fun):
"""Decorate function fun"""
msg = "Function %s is deprecated" % fun.__name__
if self.extra:
msg += "; %s" % self.extra
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return fun(*args, **kwargs)
wrapped.__name__ = fun.__name__
wrapped.__dict__ = fun.__dict__
wrapped.__doc__ = self._update_doc(fun.__doc__)
return wrapped
def _update_doc(self, olddoc):
newdoc = "DEPRECATED"
if self.extra:
newdoc = "%s: %s" % (newdoc, self.extra)
if olddoc:
newdoc = "%s\n\n%s" % (newdoc, olddoc)
return newdoc
if hasattr(inspect, 'signature'): # py35
def _get_args(function, varargs=False):
params = inspect.signature(function).parameters
args = [key for key, param in params.items()
if param.kind not in (param.VAR_POSITIONAL, param.VAR_KEYWORD)]
if varargs:
varargs = [param.name for param in params.values()
if param.kind == param.VAR_POSITIONAL]
if len(varargs) == 0:
varargs = None
return args, varargs
else:
return args
else:
def _get_args(function, varargs=False):
out = inspect.getargspec(function) # args, varargs, keywords, defaults
if varargs:
return out[:2]
else:
return out[0]
@decorator
def verbose_dec(function, *args, **kwargs):
"""Improved verbose decorator to allow functions to override log-level
Do not call this directly to set global verbosrity level, instead use
set_log_level().
Parameters
----------
function : callable
Function to be decorated by setting the verbosity level.
Returns
-------
dec - function
The decorated function
"""
arg_names = _get_args(function)
if len(arg_names) > 0 and arg_names[0] == 'self':
default_level = getattr(args[0], 'verbose', None)
else:
default_level = None
if('verbose' in arg_names):
verbose_level = args[arg_names.index('verbose')]
else:
verbose_level = default_level
if verbose_level is not None:
old_level = set_log_level(verbose_level, True)
# set it back if we get an exception
try:
ret = function(*args, **kwargs)
except Exception:
set_log_level(old_level)
raise
set_log_level(old_level)
return ret
else:
ret = function(*args, **kwargs)
return ret
def _new_pyglet():
import pyglet
return LooseVersion(pyglet.version) >= LooseVersion('1.4')
def _has_video():
if _new_pyglet():
try:
from pyglet.media.codecs.ffmpeg import FFmpegSource # noqa
except ImportError:
return False
else:
try:
from pyglet.media.avbin import AVbinSource # noqa
except ImportError:
try:
from pyglet.media.sources.avbin import AVbinSource # noqa
except ImportError:
return False
return True
def requires_video():
"""Requires FFmpeg/AVbin decorator."""
import pytest
return pytest.mark.skipif(not _has_video(), reason='Requires FFmpeg/AVbin')
def requires_opengl21(func):
"""Requires OpenGL decorator."""
import pytest
import pyglet.gl
vendor = pyglet.gl.gl_info.get_vendor()
version = pyglet.gl.gl_info.get_version()
sufficient = pyglet.gl.gl_info.have_version(2, 0)
return pytest.mark.skipif(not sufficient,
reason='OpenGL too old: %s %s'
% (vendor, version,))(func)
def requires_lib(lib):
"""Requires lib decorator."""
import pytest
try:
importlib.import_module(lib)
except Exception as exp:
val = True
reason = 'Needs %s (%s)' % (lib, exp)
else:
val = False
reason = ''
return pytest.mark.skipif(val, reason=reason)
def _has_scipy_version(version):
return (LooseVersion(sp.__version__) >= LooseVersion(version))
def _get_user_home_path():
"""Return standard preferences path"""
# this has been checked on OSX64, Linux64, and Win32
val = os.getenv('APPDATA' if 'nt' == os.name.lower() else 'HOME', None)
if val is None:
raise ValueError('expyfun config file path could '
'not be determined, please report this '
'error to expyfun developers')
return val
def fetch_data_file(fname):
"""Fetch example remote file
Parameters
----------
fname : str
The remote filename to get. If the filename already exists
on the local system, the file will not be fetched again.
Returns
-------
fname : str
The filename on the local system where the file was downloaded.
"""
path = get_config('EXPYFUN_DATA_PATH', op.join(_get_user_home_path(),
'.expyfun', 'data'))
fname_out = op.join(path, fname)
if not op.isdir(op.dirname(fname_out)):
os.makedirs(op.dirname(fname_out))
fname_url = ('https://github.com/LABSN/expyfun-data/raw/master/{0}'
''.format(fname))
try:
# until we get proper certificates
context = ssl._create_unverified_context()
this_urlopen = partial(urlopen, context=context)
except AttributeError:
context = None
this_urlopen = urlopen
if not op.isfile(fname_out):
try:
with open(fname_out, 'wb') as fid:
www = this_urlopen(fname_url, timeout=30.0)
try:
fid.write(www.read())
finally:
www.close()
except Exception:
os.remove(fname_out)
raise
return fname_out
def get_config_path():
r"""Get path to standard expyfun config file.
Returns
-------
config_path : str
The path to the expyfun configuration file. On windows, this
will be '%APPDATA%\.expyfun\expyfun.json'. On every other
system, this will be $HOME/.expyfun/expyfun.json.
"""
val = op.join(_get_user_home_path(), '.expyfun', 'expyfun.json')
return val
# List the known configuration values
known_config_types = ('RESPONSE_DEVICE',
'AUDIO_CONTROLLER',
'DB_OF_SINE_AT_1KHZ_1RMS',
'EXPYFUN_EYELINK',
'SOUND_CARD_API',
'SOUND_CARD_BACKEND',
'SOUND_CARD_FS',
'SOUND_CARD_NAME',
'SOUND_CARD_FIXED_DELAY',
'TDT_CIRCUIT_PATH',
'TDT_DELAY',
'TDT_INTERFACE',
'TDT_MODEL',
'TDT_TRIG_DELAY',
'TRIGGER_CONTROLLER',
'TRIGGER_ADDRESS',
'WINDOW_SIZE',
'SCREEN_NUM',
'SCREEN_WIDTH',
'SCREEN_DISTANCE',
'SCREEN_SIZE_PIX',
'EXPYFUN_LOGGING_LEVEL',
)
# These allow for partial matches: 'NAME_1' is okay key if 'NAME' is listed
known_config_wildcards = ()
def get_config(key=None, default=None, raise_error=False):
"""Read expyfun preference from env, then expyfun config
Parameters
----------
key : str
The preference key to look for. The os environment is searched first,
then the expyfun config file is parsed.
default : str | None
Value to return if the key is not found.
raise_error : bool
If True, raise an error if the key is not found (instead of returning
default).
Returns
-------
value : str | None
The preference key value.
"""
if key is not None and not isinstance(key, string_types):
raise ValueError('key must be a string')
# first, check to see if key is in env
if key is not None and key in os.environ:
return os.environ[key]
# second, look for it in expyfun config file
config_path = get_config_path()
if not op.isfile(config_path):
key_found = False
val = default
else:
with open(config_path, 'r') as fid:
config = json.load(fid)
if key is None:
return config
key_found = True if key in config else False
val = config.get(key, default)
if not key_found and raise_error is True:
meth_1 = 'os.environ["%s"] = VALUE' % key
meth_2 = 'expyfun.utils.set_config("%s", VALUE)' % key
raise KeyError('Key "%s" not found in environment or in the '
'expyfun config file:\n%s\nTry either:\n'
' %s\nfor a temporary solution, or:\n'
' %s\nfor a permanent one. You can also '
'set the environment variable before '
'running python.'
% (key, config_path, meth_1, meth_2))
return val
def set_config(key, value):
"""Set expyfun preference in config
Parameters
----------
key : str | None
The preference key to set. If None, a tuple of the valid
keys is returned, and ``value`` is ignored.
value : str | None
The value to assign to the preference key. If None, the key is
deleted.
"""
if key is None:
return sorted(known_config_types)
if not isinstance(key, string_types):
raise ValueError('key must be a string')
# While JSON allow non-string types, we allow users to override config
# settings using env, which are strings, so we enforce that here
if not isinstance(value, string_types) and value is not None:
raise ValueError('value must be a string or None')
if key not in known_config_types and not \
any(k in key for k in known_config_wildcards):
warnings.warn('Setting non-standard config type: "%s"' % key)
# Read all previous values
config_path = get_config_path()
if op.isfile(config_path):
with open(config_path, 'r') as fid:
config = json.load(fid)
else:
config = dict()
logger.info('Attempting to create new expyfun configuration '
'file:\n%s' % config_path)
if value is None:
config.pop(key, None)
else:
config[key] = value
# Write all values
directory = op.split(config_path)[0]
if not op.isdir(directory):
os.mkdir(directory)
with open(config_path, 'w') as fid:
json.dump(config, fid, sort_keys=True, indent=0)
###############################################################################
# MISC
def fake_button_press(ec, button='1', delay=0.):
"""Fake a button press after a delay
Notes
-----
This function only works with the keyboard controller (not TDT)!
It uses threads to ensure that control is passed back, so other commands
can be called (like wait_for_presses).
"""
def send():
ec._response_handler._on_pyglet_keypress(button, [], True)
Timer(delay, send).start() if delay > 0. else send()
def fake_mouse_click(ec, pos, button='left', delay=0.):
"""Fake a mouse click after a delay"""
button = dict(left=1, middle=2, right=4)[button] # trans to pyglet
def send():
ec._mouse_handler._on_pyglet_mouse_click(pos[0], pos[1], button, [])
Timer(delay, send).start() if delay > 0. else send()
def _check_pyglet_version(raise_error=False):
"""Check pyglet version, return True if usable.
"""
import pyglet
is_usable = LooseVersion(pyglet.version) >= LooseVersion('1.2')
if raise_error is True and is_usable is False:
raise ImportError('On Linux, you must run at least Pyglet '
'version 1.2, and you are running '
'{0}'.format(pyglet.version))
return is_usable
def _wait_secs(secs, ec=None):
"""Wait a specified number of seconds.
Parameters
----------
secs : float
Number of seconds to wait.
ec : None | expyfun.ExperimentController instance
The ExperimentController.
Notes
-----
This function uses a while loop. Although this slams the CPU, it will
guarantee that events (keypresses, etc.) are processed.
"""
# hog the cpu, checking time
t0 = clock()
if ec is not None:
while (clock() - t0) < secs:
ec._dispatch_events()
ec.check_force_quit()
else:
wins = _get_display().get_windows()
for win in wins:
win.dispatch_events()
def running_rms(signal, win_length):
"""RMS of ``signal`` with rectangular window ``win_length`` samples long.
Parameters
----------
signal : array_like
The (1-dimesional) signal of interest.
win_length : int
Length (in samples) of the rectangular window
"""
return sqrt(convolve(signal ** 2, ones(win_length) / win_length, 'valid'))
def _fix_audio_dims(signal, n_channels):
"""Make it so a valid audio buffer is in the standard dimensions
Parameters
----------
signal : array_like
The signal whose dimensions should be checked and fixed.
n_channels : int
The number of channels that the output should have.
If the input is mono and n_channels=2, it will be tiled to be
shape (2, n_samples). Otherwise, the number of channels in signal
must match n_channels.
Returns
-------
signal_fixed : array
The signal with standard dimensions (n_channels, N).
"""
# Check requested channel output
n_channels = int(operator.index(n_channels))
signal = np.asarray(np.atleast_2d(signal), dtype=np.float32)
# Check dimensionality
if signal.ndim != 2:
raise ValueError('Sound data must have one or two dimensions, got %s.'
% (signal.ndim,))
# Return data with correct dimensions
if n_channels == 2 and signal.shape[0] == 1:
signal = np.tile(signal, (n_channels, 1))
if signal.shape[0] != n_channels:
raise ValueError('signal channel count %d did not match required '
'channel count %d' % (signal.shape[0], n_channels))
return signal
def _sanitize(text_like):
"""Cast as string, encode as UTF-8 and sanitize any escape characters.
"""
return text_type(text_like).encode('unicode_escape').decode('utf-8')
def _sort_keys(x):
"""Sort and return keys of dict"""
keys = list(x.keys()) # note: not thread-safe
idx = np.argsort([str(k) for k in keys])
keys = [keys[ii] for ii in idx]
return keys
def object_diff(a, b, pre=''):
"""Compute all differences between two python variables
Parameters
----------
a : object
Currently supported: dict, list, tuple, ndarray, int, str, bytes,
float, StringIO, BytesIO.
b : object
Must be same type as ``a``.
pre : str
String to prepend to each line.
Returns
-------
diffs : str
A string representation of the differences.
Notes
-----
Taken from mne-python with permission.
"""
out = ''
if type(a) != type(b):
out += pre + ' type mismatch (%s, %s)\n' % (type(a), type(b))
elif isinstance(a, dict):
k1s = _sort_keys(a)
k2s = _sort_keys(b)
m1 = set(k2s) - set(k1s)
if len(m1):
out += pre + ' x1 missing keys %s\n' % (m1)
for key in k1s:
if key not in k2s:
out += pre + ' x2 missing key %s\n' % key
else:
out += object_diff(a[key], b[key], pre + 'd1[%s]' % repr(key))
elif isinstance(a, (list, tuple)):
if len(a) != len(b):
out += pre + ' length mismatch (%s, %s)\n' % (len(a), len(b))
else:
for xx1, xx2 in zip(a, b):
out += object_diff(xx1, xx2, pre='')
elif isinstance(a, (string_types, int, float, bytes)):
if a != b:
out += pre + ' value mismatch (%s, %s)\n' % (a, b)
elif a is None:
if b is not None:
out += pre + ' a is None, b is not (%s)\n' % (b)
elif isinstance(a, np.ndarray):
if not np.array_equal(a, b):
out += pre + ' array mismatch\n'
else:
raise RuntimeError(pre + ': unsupported type %s (%s)' % (type(a), a))
return out
def _check_skip_backend(backend):
from expyfun._sound_controllers import _import_backend
import pytest
if isinstance(backend, dict): # actually an AC
backend = backend['SOUND_CARD_BACKEND']
try:
_import_backend(backend)
except Exception as exc:
pytest.skip('Skipping test for backend %s: %s' % (backend, exc))
def _check_params(params, keys, defaults, name):
if not isinstance(params, dict):
raise TypeError('{0} must be a dict, got type {1}'
.format(name, type(params)))
params = deepcopy(params)
if not isinstance(params, dict):
raise TypeError('{0} must be a dict, got {1}'
.format(name, type(params)))
# Set sensible defaults for values that are not passed
for k in keys:
params[k] = params.get(k, get_config(k, defaults.get(k, None)))
# Check keys
for k in params.keys():
if k not in keys:
raise KeyError('Unrecognized key in {0}["{1}"], must be '
'one of {2}'.format(name, k, ', '.join(keys)))
return params
def _get_display():
import pyglet
try:
display = pyglet.canvas.get_display()
except AttributeError: # < 1.4
display = pyglet.window.get_platform().get_default_display()
return display
| 31.45645
| 79
| 0.598156
| 3,582
| 28,531
| 4.63512
| 0.207984
| 0.008432
| 0.004337
| 0.001988
| 0.101488
| 0.059989
| 0.037824
| 0.034452
| 0.031079
| 0.031079
| 0
| 0.005049
| 0.291858
| 28,531
| 906
| 80
| 31.49117
| 0.816719
| 0.293681
| 0
| 0.189691
| 0
| 0
| 0.109688
| 0.006269
| 0
| 0
| 0
| 0.001104
| 0
| 1
| 0.098969
| false
| 0.002062
| 0.101031
| 0.004124
| 0.294845
| 0.008247
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c7d50b1d9dc52a93f5eb0bc5220367d727e498d
| 9,079
|
py
|
Python
|
torch/_fx/graph_module.py
|
jsun94/nimble
|
e5c899a69677818b1becc58100577441e15ede13
|
[
"BSD-3-Clause"
] | 206
|
2020-11-28T22:56:38.000Z
|
2022-03-27T02:33:04.000Z
|
torch/_fx/graph_module.py
|
jsun94/nimble
|
e5c899a69677818b1becc58100577441e15ede13
|
[
"BSD-3-Clause"
] | 19
|
2020-12-09T23:13:14.000Z
|
2022-01-24T23:24:08.000Z
|
torch/_fx/graph_module.py
|
jsun94/nimble
|
e5c899a69677818b1becc58100577441e15ede13
|
[
"BSD-3-Clause"
] | 28
|
2020-11-29T15:25:12.000Z
|
2022-01-20T02:16:27.000Z
|
import torch
import torch.overrides
import linecache
from typing import Type, Dict, List, Any, Union
from .graph import Graph
import copy
# normal exec loses the source code, however we can patch
# the linecache module to still recover it.
# using exec_with_source will add it to our local cache
# and then tools like TorchScript will be able to get source info.
_next_id = 0
def exec_with_source(src: str, globals: Dict[str, Any]):
global _next_id
key = f'<eval_with_key_{_next_id}>'
_next_id += 1
_eval_cache[key] = [line + '\n' for line in src.splitlines()]
exec(compile(src, key, 'exec'), globals)
# patch linecache so that any code we exec using exec_with_source
# works with inspect
_eval_cache : Dict[str, List[str]] = {}
_orig_getlines = linecache.getlines
def patched_getline(*args, **kwargs):
if args[0] in _eval_cache:
return _eval_cache[args[0]]
return _orig_getlines(*args, **kwargs)
linecache.getlines = patched_getline
def _forward_from_src(src : str):
gbls: Dict[str, Any] = {
'torch': torch
}
exec_with_source(src, gbls)
return gbls['forward']
def deserialize_graphmodule(body : dict) -> torch.nn.Module:
"""
Deserialize a GraphModule given the dictionary of the original module,
using the code to reconstruct the graph. We delete the actual graph before
saving the dictionary so that changes to the in-memory graph format do not
get serialized.
"""
# We create a dummy class here because symbolic_trace pulls the forward()
# function off of the class, rather than the instance
class CodeOnlyModule(torch.nn.Module):
def __init__(self, body):
super().__init__()
self.__dict__ = body
CodeOnlyModule.forward = _forward_from_src(body['code'])
from .symbolic_trace import Tracer
# we shouldn't trace into any of the submodules, they were not
# because they were not traced in the original GraphModule
class KeepModules(Tracer):
def is_leaf_module(self, _: torch.nn.Module, __: str) -> bool:
return True
return KeepModules().trace(CodeOnlyModule(body))
# copy an attribute value with qualified name 'target' from 'from_module' to 'to_module'
# This installs empty Modules where none exist yet if they are subpaths of target
def _copy_attr(from_module: torch.nn.Module, to_module: torch.nn.Module, target: str):
*prefix, field = target.split('.')
for item in prefix:
f = getattr(from_module, item)
t = getattr(to_module, item, None)
if f is t:
# we have already installed one of its parents
# (e.g. target = root.linear.weight, but we have already installed root.linear)
# once we install a parent, we no longer need to copy the children
# since all the needed properties will already be present
return
if t is None:
t = torch.nn.Module()
setattr(to_module, item, t)
from_module, to_module = f, t
setattr(to_module, field, getattr(from_module, field))
# Assign attribute 'from_obj' to the qualified name 'target' on 'to_module
# This installs empty Modules where none exist yet if they are subpaths of target
def _assign_attr(from_obj: Any, to_module: torch.nn.Module, target: str):
*prefix, field = target.split('.')
for item in prefix:
t = getattr(to_module, item, None)
if t is None:
t = torch.nn.Module()
setattr(to_module, item, t)
to_module = t
setattr(to_module, field, from_obj)
class GraphModule(torch.nn.Module):
"""
GraphModule is an nn.Module generated from an fx.Graph. GraphModule has
important attributes:
graph : The graph from which this GraphModule was generated
code : The Python source code for the function generated from `graph`
forward : The Python method generated from `graph`
Note that when `graph` is reassigned, `code` and `forward` will be automatically
regenerated.
"""
def __new__(cls: 'Type[GraphModule]', *args, **kwargs):
# each instance of a graph module needs its own forward method
# so create a new singleton class for each instance.
# it is a subclass of the user-defined class, the only difference
# is an extra layer to install the forward method
class GraphModuleImpl(cls): # type: ignore
pass
return super().__new__(GraphModuleImpl)
def __init__(self, root: Union[torch.nn.Module, Dict[str, Any]], graph: Graph):
"""
Construct a GraphModule.
root - `root` can either be an nn.Module instance or a Dict mapping strings to any attribute type.
- In the case that `root` is a Module, any references to Module-based objects (via qualified
name) in the Graph's Nodes' `target` field will be copied over from the respective place
within `root`'s Module hierarchy into the GraphModule's module hierarchy.
- In the case that `root` is a dict, the qualified name found in a Node's `target` will be
looked up directly in the dict's keys. The object mapped to by the Dict will be copied
over into the appropriate place within the GraphModule's module hierarchy.
graph - `graph` contains the nodes this GraphModule should use for code generation
"""
super().__init__()
if isinstance(root, torch.nn.Module):
if hasattr(root, 'training'):
self.training = root.training
for node in graph.nodes:
if node.op in ['get_attr', 'call_module']:
assert isinstance(node.target, str)
_copy_attr(root, self, node.target)
elif isinstance(root, dict):
targets_to_copy = []
for node in graph.nodes:
if node.op in ['get_attr', 'call_module']:
assert isinstance(node.target, str)
if node.target not in root:
raise RuntimeError('Node ' + str(node) + ' referenced target ' + node.target +
' but that target was not provided in `root`!')
targets_to_copy.append(node.target)
# Sort targets in ascending order of the # of atoms.
# This will ensure that less deeply nested attributes are assigned
# before more deeply nested attributes. For example, foo.bar
# will be assigned before foo.bar.baz. Otherwise, we might assign
# the user-provided `foo.bar` and wipe out the previously-assigned
# `foo.bar.baz`
targets_to_copy.sort(key=lambda t: t.count('.'))
for target_to_copy in targets_to_copy:
_assign_attr(root[target_to_copy], self, target_to_copy)
else:
raise RuntimeError('Unsupported type ' + str(root) + ' passed for root!')
self.graph = graph
# TorchScript breaks trying to compile the graph setter because of the
# continued string literal. Issue here: https://github.com/pytorch/pytorch/issues/44842
#
# Shouldn't be an issue since these methods shouldn't be used in TorchScript anyway
__jit_unused_properties__ = ['graph']
@property
def graph(self):
return self._graph
@graph.setter
def graph(self, val) -> None:
self._graph = val
body, result, free_variables = self._graph.python_code(root_module='self')
body = '\n'.join(' ' + line for line in body.split('\n')) + '\n'
self.code = f"""\
def forward(self, {', '.join(free_variables)}):
{body}
return {result}
"""
cls = type(self)
cls.forward = _forward_from_src(self.code)
def __reduce__(self):
dict_without_graph = self.__dict__.copy()
del dict_without_graph['_graph']
return (deserialize_graphmodule, (dict_without_graph,))
# because __reduce__ is defined for serialization,
# we need to define deepcopy otherwise it will call __reduce__
# and cause symbolic tracing to occur every time we try to copy the object
def __deepcopy__(self, memo):
fake_mod = torch.nn.Module()
fake_mod.__dict__ = copy.deepcopy(self.__dict__)
return GraphModule(fake_mod, self.graph)
def __copy__(self):
return GraphModule(self, self.graph)
def __str__(self) -> str:
orig_str = super().__str__()
return '\n'.join([orig_str, self.code])
# workarounds for issues in __torch_function__
# WAR for __torch_function__ not handling tensor lists,
# fix is in https://github.com/pytorch/pytorch/pull/34725
# orig_cat = torch.cat
# def patched_cat(*args, **kwargs):
# tensors = args[0]
# for t in tensors:
# if isinstance(t, Proxy):
# return t.__torch_function__(patched_cat, (), args, kwargs)
# return orig_cat(*args, **kwargs)
# patched_cat.__module__ = 'torch'
# patched_cat.__name__ = 'cat'
# torch.cat = patched_cat
| 41.268182
| 107
| 0.651614
| 1,228
| 9,079
| 4.638436
| 0.265472
| 0.019663
| 0.027388
| 0.010007
| 0.135534
| 0.107795
| 0.107795
| 0.091994
| 0.091994
| 0.091994
| 0
| 0.002236
| 0.261042
| 9,079
| 219
| 108
| 41.456621
| 0.846773
| 0.43474
| 0
| 0.172414
| 0
| 0
| 0.064922
| 0.010347
| 0
| 0
| 0
| 0
| 0.017241
| 1
| 0.137931
| false
| 0.017241
| 0.060345
| 0.025862
| 0.353448
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|