hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a0b2bfc27d686dbbcf69621c567c9d6c54f9e13
| 1,066
|
py
|
Python
|
admindash/views.py
|
ricksaha2000/StayConnected
|
f5632cca785d8c4679a2edb8ab33d1321603c658
|
[
"MIT"
] | null | null | null |
admindash/views.py
|
ricksaha2000/StayConnected
|
f5632cca785d8c4679a2edb8ab33d1321603c658
|
[
"MIT"
] | 5
|
2021-03-19T11:20:25.000Z
|
2022-02-10T10:32:50.000Z
|
admindash/views.py
|
ricksaha2000/StayConnected
|
f5632cca785d8c4679a2edb8ab33d1321603c658
|
[
"MIT"
] | 1
|
2020-06-07T11:08:04.000Z
|
2020-06-07T11:08:04.000Z
|
from django.shortcuts import render
from meetups.models import Meetup
from django.views.decorators.csrf import csrf_exempt
from django.shortcuts import render
from meetups.models import Meetup
from django.views.decorators.csrf import csrf_exempt
from meetups.models import Meetup
# Create your views here.
def home(request):
event = Meetup.objects.all()
return render(request,'admindash/admin.html',{"events":event})
@csrf_exempt
def pie_chart(request):
labels = []
data = []
queryset = Meetup.objects.order_by('-count')[:5]
for event in queryset:
labels.append(event.title)
data.append(event.count)
return render(request, 'pie_chart.html', {
'labels': labels,
'data': data,
})
@csrf_exempt
def line_chart(request):
labels = []
data = []
queryset = Meetup.objects.order_by('-count')
for event in queryset:
labels.append(event.title)
data.append(event.count)
return render(request, 'line_chart.html', {
'labels': labels,
'data': data,
})
| 23.688889
| 66
| 0.673546
|
4a0b2d07d9ee8c0dc801d7af3edeaf72dcc3456c
| 78,631
|
py
|
Python
|
lib/sqlalchemy/orm/persistence.py
|
dand-oss/sqlalchemy
|
4fa657ae549d11b09ba3a641a17268f3dc78d3cb
|
[
"MIT"
] | null | null | null |
lib/sqlalchemy/orm/persistence.py
|
dand-oss/sqlalchemy
|
4fa657ae549d11b09ba3a641a17268f3dc78d3cb
|
[
"MIT"
] | null | null | null |
lib/sqlalchemy/orm/persistence.py
|
dand-oss/sqlalchemy
|
4fa657ae549d11b09ba3a641a17268f3dc78d3cb
|
[
"MIT"
] | 1
|
2021-10-06T07:03:07.000Z
|
2021-10-06T07:03:07.000Z
|
# orm/persistence.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""private module containing functions used to emit INSERT, UPDATE
and DELETE statements on behalf of a :class:`_orm.Mapper` and its descending
mappers.
The functions here are called only by the unit of work functions
in unitofwork.py.
"""
from itertools import chain
from itertools import groupby
import operator
from . import attributes
from . import evaluator
from . import exc as orm_exc
from . import loading
from . import sync
from .base import state_str
from .. import exc as sa_exc
from .. import future
from .. import sql
from .. import util
from ..engine import result as _result
from ..sql import coercions
from ..sql import expression
from ..sql import operators
from ..sql import roles
from ..sql import select
from ..sql.base import _entity_namespace_key
from ..sql.base import CompileState
from ..sql.base import Options
from ..sql.dml import DeleteDMLState
from ..sql.dml import UpdateDMLState
from ..sql.elements import BooleanClauseList
def _bulk_insert(
mapper,
mappings,
session_transaction,
isstates,
return_defaults,
render_nulls,
):
base_mapper = mapper.base_mapper
if session_transaction.session.connection_callable:
raise NotImplementedError(
"connection_callable / per-instance sharding "
"not supported in bulk_insert()"
)
if isstates:
if return_defaults:
states = [(state, state.dict) for state in mappings]
mappings = [dict_ for (state, dict_) in states]
else:
mappings = [state.dict for state in mappings]
else:
mappings = list(mappings)
connection = session_transaction.connection(base_mapper)
for table, super_mapper in base_mapper._sorted_tables.items():
if not mapper.isa(super_mapper):
continue
records = (
(
None,
state_dict,
params,
mapper,
connection,
value_params,
has_all_pks,
has_all_defaults,
)
for (
state,
state_dict,
params,
mp,
conn,
value_params,
has_all_pks,
has_all_defaults,
) in _collect_insert_commands(
table,
((None, mapping, mapper, connection) for mapping in mappings),
bulk=True,
return_defaults=return_defaults,
render_nulls=render_nulls,
)
)
_emit_insert_statements(
base_mapper,
None,
super_mapper,
table,
records,
bookkeeping=return_defaults,
)
if return_defaults and isstates:
identity_cls = mapper._identity_class
identity_props = [p.key for p in mapper._identity_key_props]
for state, dict_ in states:
state.key = (
identity_cls,
tuple([dict_[key] for key in identity_props]),
)
def _bulk_update(
mapper, mappings, session_transaction, isstates, update_changed_only
):
base_mapper = mapper.base_mapper
search_keys = mapper._primary_key_propkeys
if mapper._version_id_prop:
search_keys = {mapper._version_id_prop.key}.union(search_keys)
def _changed_dict(mapper, state):
return dict(
(k, v)
for k, v in state.dict.items()
if k in state.committed_state or k in search_keys
)
if isstates:
if update_changed_only:
mappings = [_changed_dict(mapper, state) for state in mappings]
else:
mappings = [state.dict for state in mappings]
else:
mappings = list(mappings)
if session_transaction.session.connection_callable:
raise NotImplementedError(
"connection_callable / per-instance sharding "
"not supported in bulk_update()"
)
connection = session_transaction.connection(base_mapper)
for table, super_mapper in base_mapper._sorted_tables.items():
if not mapper.isa(super_mapper):
continue
records = _collect_update_commands(
None,
table,
(
(
None,
mapping,
mapper,
connection,
(
mapping[mapper._version_id_prop.key]
if mapper._version_id_prop
else None
),
)
for mapping in mappings
),
bulk=True,
)
_emit_update_statements(
base_mapper,
None,
super_mapper,
table,
records,
bookkeeping=False,
)
def save_obj(base_mapper, states, uowtransaction, single=False):
"""Issue ``INSERT`` and/or ``UPDATE`` statements for a list
of objects.
This is called within the context of a UOWTransaction during a
flush operation, given a list of states to be flushed. The
base mapper in an inheritance hierarchy handles the inserts/
updates for all descendant mappers.
"""
# if batch=false, call _save_obj separately for each object
if not single and not base_mapper.batch:
for state in _sort_states(base_mapper, states):
save_obj(base_mapper, [state], uowtransaction, single=True)
return
states_to_update = []
states_to_insert = []
for (
state,
dict_,
mapper,
connection,
has_identity,
row_switch,
update_version_id,
) in _organize_states_for_save(base_mapper, states, uowtransaction):
if has_identity or row_switch:
states_to_update.append(
(state, dict_, mapper, connection, update_version_id)
)
else:
states_to_insert.append((state, dict_, mapper, connection))
for table, mapper in base_mapper._sorted_tables.items():
if table not in mapper._pks_by_table:
continue
insert = _collect_insert_commands(table, states_to_insert)
update = _collect_update_commands(
uowtransaction, table, states_to_update
)
_emit_update_statements(
base_mapper,
uowtransaction,
mapper,
table,
update,
)
_emit_insert_statements(
base_mapper,
uowtransaction,
mapper,
table,
insert,
)
_finalize_insert_update_commands(
base_mapper,
uowtransaction,
chain(
(
(state, state_dict, mapper, connection, False)
for (state, state_dict, mapper, connection) in states_to_insert
),
(
(state, state_dict, mapper, connection, True)
for (
state,
state_dict,
mapper,
connection,
update_version_id,
) in states_to_update
),
),
)
def post_update(base_mapper, states, uowtransaction, post_update_cols):
"""Issue UPDATE statements on behalf of a relationship() which
specifies post_update.
"""
states_to_update = list(
_organize_states_for_post_update(base_mapper, states, uowtransaction)
)
for table, mapper in base_mapper._sorted_tables.items():
if table not in mapper._pks_by_table:
continue
update = (
(
state,
state_dict,
sub_mapper,
connection,
mapper._get_committed_state_attr_by_column(
state, state_dict, mapper.version_id_col
)
if mapper.version_id_col is not None
else None,
)
for state, state_dict, sub_mapper, connection in states_to_update
if table in sub_mapper._pks_by_table
)
update = _collect_post_update_commands(
base_mapper, uowtransaction, table, update, post_update_cols
)
_emit_post_update_statements(
base_mapper,
uowtransaction,
mapper,
table,
update,
)
def delete_obj(base_mapper, states, uowtransaction):
"""Issue ``DELETE`` statements for a list of objects.
This is called within the context of a UOWTransaction during a
flush operation.
"""
states_to_delete = list(
_organize_states_for_delete(base_mapper, states, uowtransaction)
)
table_to_mapper = base_mapper._sorted_tables
for table in reversed(list(table_to_mapper.keys())):
mapper = table_to_mapper[table]
if table not in mapper._pks_by_table:
continue
elif mapper.inherits and mapper.passive_deletes:
continue
delete = _collect_delete_commands(
base_mapper, uowtransaction, table, states_to_delete
)
_emit_delete_statements(
base_mapper,
uowtransaction,
mapper,
table,
delete,
)
for (
state,
state_dict,
mapper,
connection,
update_version_id,
) in states_to_delete:
mapper.dispatch.after_delete(mapper, connection, state)
def _organize_states_for_save(base_mapper, states, uowtransaction):
"""Make an initial pass across a set of states for INSERT or
UPDATE.
This includes splitting out into distinct lists for
each, calling before_insert/before_update, obtaining
key information for each state including its dictionary,
mapper, the connection to use for the execution per state,
and the identity flag.
"""
for state, dict_, mapper, connection in _connections_for_states(
base_mapper, uowtransaction, states
):
has_identity = bool(state.key)
instance_key = state.key or mapper._identity_key_from_state(state)
row_switch = update_version_id = None
# call before_XXX extensions
if not has_identity:
mapper.dispatch.before_insert(mapper, connection, state)
else:
mapper.dispatch.before_update(mapper, connection, state)
if mapper._validate_polymorphic_identity:
mapper._validate_polymorphic_identity(mapper, state, dict_)
# detect if we have a "pending" instance (i.e. has
# no instance_key attached to it), and another instance
# with the same identity key already exists as persistent.
# convert to an UPDATE if so.
if (
not has_identity
and instance_key in uowtransaction.session.identity_map
):
instance = uowtransaction.session.identity_map[instance_key]
existing = attributes.instance_state(instance)
if not uowtransaction.was_already_deleted(existing):
if not uowtransaction.is_deleted(existing):
util.warn(
"New instance %s with identity key %s conflicts "
"with persistent instance %s"
% (state_str(state), instance_key, state_str(existing))
)
else:
base_mapper._log_debug(
"detected row switch for identity %s. "
"will update %s, remove %s from "
"transaction",
instance_key,
state_str(state),
state_str(existing),
)
# remove the "delete" flag from the existing element
uowtransaction.remove_state_actions(existing)
row_switch = existing
if (has_identity or row_switch) and mapper.version_id_col is not None:
update_version_id = mapper._get_committed_state_attr_by_column(
row_switch if row_switch else state,
row_switch.dict if row_switch else dict_,
mapper.version_id_col,
)
yield (
state,
dict_,
mapper,
connection,
has_identity,
row_switch,
update_version_id,
)
def _organize_states_for_post_update(base_mapper, states, uowtransaction):
"""Make an initial pass across a set of states for UPDATE
corresponding to post_update.
This includes obtaining key information for each state
including its dictionary, mapper, the connection to use for
the execution per state.
"""
return _connections_for_states(base_mapper, uowtransaction, states)
def _organize_states_for_delete(base_mapper, states, uowtransaction):
"""Make an initial pass across a set of states for DELETE.
This includes calling out before_delete and obtaining
key information for each state including its dictionary,
mapper, the connection to use for the execution per state.
"""
for state, dict_, mapper, connection in _connections_for_states(
base_mapper, uowtransaction, states
):
mapper.dispatch.before_delete(mapper, connection, state)
if mapper.version_id_col is not None:
update_version_id = mapper._get_committed_state_attr_by_column(
state, dict_, mapper.version_id_col
)
else:
update_version_id = None
yield (state, dict_, mapper, connection, update_version_id)
def _collect_insert_commands(
table,
states_to_insert,
bulk=False,
return_defaults=False,
render_nulls=False,
):
"""Identify sets of values to use in INSERT statements for a
list of states.
"""
for state, state_dict, mapper, connection in states_to_insert:
if table not in mapper._pks_by_table:
continue
params = {}
value_params = {}
propkey_to_col = mapper._propkey_to_col[table]
eval_none = mapper._insert_cols_evaluating_none[table]
for propkey in set(propkey_to_col).intersection(state_dict):
value = state_dict[propkey]
col = propkey_to_col[propkey]
if value is None and col not in eval_none and not render_nulls:
continue
elif not bulk and (
hasattr(value, "__clause_element__")
or isinstance(value, sql.ClauseElement)
):
value_params[col] = (
value.__clause_element__()
if hasattr(value, "__clause_element__")
else value
)
else:
params[col.key] = value
if not bulk:
# for all the columns that have no default and we don't have
# a value and where "None" is not a special value, add
# explicit None to the INSERT. This is a legacy behavior
# which might be worth removing, as it should not be necessary
# and also produces confusion, given that "missing" and None
# now have distinct meanings
for colkey in (
mapper._insert_cols_as_none[table]
.difference(params)
.difference([c.key for c in value_params])
):
params[colkey] = None
if not bulk or return_defaults:
# params are in terms of Column key objects, so
# compare to pk_keys_by_table
has_all_pks = mapper._pk_keys_by_table[table].issubset(params)
if mapper.base_mapper.eager_defaults:
has_all_defaults = mapper._server_default_cols[table].issubset(
params
)
else:
has_all_defaults = True
else:
has_all_defaults = has_all_pks = True
if (
mapper.version_id_generator is not False
and mapper.version_id_col is not None
and mapper.version_id_col in mapper._cols_by_table[table]
):
params[mapper.version_id_col.key] = mapper.version_id_generator(
None
)
yield (
state,
state_dict,
params,
mapper,
connection,
value_params,
has_all_pks,
has_all_defaults,
)
def _collect_update_commands(
uowtransaction, table, states_to_update, bulk=False
):
"""Identify sets of values to use in UPDATE statements for a
list of states.
This function works intricately with the history system
to determine exactly what values should be updated
as well as how the row should be matched within an UPDATE
statement. Includes some tricky scenarios where the primary
key of an object might have been changed.
"""
for (
state,
state_dict,
mapper,
connection,
update_version_id,
) in states_to_update:
if table not in mapper._pks_by_table:
continue
pks = mapper._pks_by_table[table]
value_params = {}
propkey_to_col = mapper._propkey_to_col[table]
if bulk:
# keys here are mapped attribute keys, so
# look at mapper attribute keys for pk
params = dict(
(propkey_to_col[propkey].key, state_dict[propkey])
for propkey in set(propkey_to_col)
.intersection(state_dict)
.difference(mapper._pk_attr_keys_by_table[table])
)
has_all_defaults = True
else:
params = {}
for propkey in set(propkey_to_col).intersection(
state.committed_state
):
value = state_dict[propkey]
col = propkey_to_col[propkey]
if hasattr(value, "__clause_element__") or isinstance(
value, sql.ClauseElement
):
value_params[col] = (
value.__clause_element__()
if hasattr(value, "__clause_element__")
else value
)
# guard against values that generate non-__nonzero__
# objects for __eq__()
elif (
state.manager[propkey].impl.is_equal(
value, state.committed_state[propkey]
)
is not True
):
params[col.key] = value
if mapper.base_mapper.eager_defaults:
has_all_defaults = (
mapper._server_onupdate_default_cols[table]
).issubset(params)
else:
has_all_defaults = True
if (
update_version_id is not None
and mapper.version_id_col in mapper._cols_by_table[table]
):
if not bulk and not (params or value_params):
# HACK: check for history in other tables, in case the
# history is only in a different table than the one
# where the version_id_col is. This logic was lost
# from 0.9 -> 1.0.0 and restored in 1.0.6.
for prop in mapper._columntoproperty.values():
history = state.manager[prop.key].impl.get_history(
state, state_dict, attributes.PASSIVE_NO_INITIALIZE
)
if history.added:
break
else:
# no net change, break
continue
col = mapper.version_id_col
no_params = not params and not value_params
params[col._label] = update_version_id
if (
bulk or col.key not in params
) and mapper.version_id_generator is not False:
val = mapper.version_id_generator(update_version_id)
params[col.key] = val
elif mapper.version_id_generator is False and no_params:
# no version id generator, no values set on the table,
# and version id wasn't manually incremented.
# set version id to itself so we get an UPDATE
# statement
params[col.key] = update_version_id
elif not (params or value_params):
continue
has_all_pks = True
expect_pk_cascaded = False
if bulk:
# keys here are mapped attribute keys, so
# look at mapper attribute keys for pk
pk_params = dict(
(propkey_to_col[propkey]._label, state_dict.get(propkey))
for propkey in set(propkey_to_col).intersection(
mapper._pk_attr_keys_by_table[table]
)
)
else:
pk_params = {}
for col in pks:
propkey = mapper._columntoproperty[col].key
history = state.manager[propkey].impl.get_history(
state, state_dict, attributes.PASSIVE_OFF
)
if history.added:
if (
not history.deleted
or ("pk_cascaded", state, col)
in uowtransaction.attributes
):
expect_pk_cascaded = True
pk_params[col._label] = history.added[0]
params.pop(col.key, None)
else:
# else, use the old value to locate the row
pk_params[col._label] = history.deleted[0]
if col in value_params:
has_all_pks = False
else:
pk_params[col._label] = history.unchanged[0]
if pk_params[col._label] is None:
raise orm_exc.FlushError(
"Can't update table %s using NULL for primary "
"key value on column %s" % (table, col)
)
if params or value_params:
params.update(pk_params)
yield (
state,
state_dict,
params,
mapper,
connection,
value_params,
has_all_defaults,
has_all_pks,
)
elif expect_pk_cascaded:
# no UPDATE occurs on this table, but we expect that CASCADE rules
# have changed the primary key of the row; propagate this event to
# other columns that expect to have been modified. this normally
# occurs after the UPDATE is emitted however we invoke it here
# explicitly in the absence of our invoking an UPDATE
for m, equated_pairs in mapper._table_to_equated[table]:
sync.populate(
state,
m,
state,
m,
equated_pairs,
uowtransaction,
mapper.passive_updates,
)
def _collect_post_update_commands(
base_mapper, uowtransaction, table, states_to_update, post_update_cols
):
"""Identify sets of values to use in UPDATE statements for a
list of states within a post_update operation.
"""
for (
state,
state_dict,
mapper,
connection,
update_version_id,
) in states_to_update:
# assert table in mapper._pks_by_table
pks = mapper._pks_by_table[table]
params = {}
hasdata = False
for col in mapper._cols_by_table[table]:
if col in pks:
params[col._label] = mapper._get_state_attr_by_column(
state, state_dict, col, passive=attributes.PASSIVE_OFF
)
elif col in post_update_cols or col.onupdate is not None:
prop = mapper._columntoproperty[col]
history = state.manager[prop.key].impl.get_history(
state, state_dict, attributes.PASSIVE_NO_INITIALIZE
)
if history.added:
value = history.added[0]
params[col.key] = value
hasdata = True
if hasdata:
if (
update_version_id is not None
and mapper.version_id_col in mapper._cols_by_table[table]
):
col = mapper.version_id_col
params[col._label] = update_version_id
if (
bool(state.key)
and col.key not in params
and mapper.version_id_generator is not False
):
val = mapper.version_id_generator(update_version_id)
params[col.key] = val
yield state, state_dict, mapper, connection, params
def _collect_delete_commands(
base_mapper, uowtransaction, table, states_to_delete
):
"""Identify values to use in DELETE statements for a list of
states to be deleted."""
for (
state,
state_dict,
mapper,
connection,
update_version_id,
) in states_to_delete:
if table not in mapper._pks_by_table:
continue
params = {}
for col in mapper._pks_by_table[table]:
params[
col.key
] = value = mapper._get_committed_state_attr_by_column(
state, state_dict, col
)
if value is None:
raise orm_exc.FlushError(
"Can't delete from table %s "
"using NULL for primary "
"key value on column %s" % (table, col)
)
if (
update_version_id is not None
and mapper.version_id_col in mapper._cols_by_table[table]
):
params[mapper.version_id_col.key] = update_version_id
yield params, connection
def _emit_update_statements(
base_mapper,
uowtransaction,
mapper,
table,
update,
bookkeeping=True,
):
"""Emit UPDATE statements corresponding to value lists collected
by _collect_update_commands()."""
needs_version_id = (
mapper.version_id_col is not None
and mapper.version_id_col in mapper._cols_by_table[table]
)
execution_options = {"compiled_cache": base_mapper._compiled_cache}
def update_stmt():
clauses = BooleanClauseList._construct_raw(operators.and_)
for col in mapper._pks_by_table[table]:
clauses.clauses.append(
col == sql.bindparam(col._label, type_=col.type)
)
if needs_version_id:
clauses.clauses.append(
mapper.version_id_col
== sql.bindparam(
mapper.version_id_col._label,
type_=mapper.version_id_col.type,
)
)
stmt = table.update().where(clauses)
return stmt
cached_stmt = base_mapper._memo(("update", table), update_stmt)
for (
(connection, paramkeys, hasvalue, has_all_defaults, has_all_pks),
records,
) in groupby(
update,
lambda rec: (
rec[4], # connection
set(rec[2]), # set of parameter keys
bool(rec[5]), # whether or not we have "value" parameters
rec[6], # has_all_defaults
rec[7], # has all pks
),
):
rows = 0
records = list(records)
statement = cached_stmt
return_defaults = False
if not has_all_pks:
statement = statement.return_defaults()
return_defaults = True
elif (
bookkeeping
and not has_all_defaults
and mapper.base_mapper.eager_defaults
):
statement = statement.return_defaults()
return_defaults = True
elif mapper.version_id_col is not None:
statement = statement.return_defaults(mapper.version_id_col)
return_defaults = True
assert_singlerow = (
connection.dialect.supports_sane_rowcount
if not return_defaults
else connection.dialect.supports_sane_rowcount_returning
)
assert_multirow = (
assert_singlerow
and connection.dialect.supports_sane_multi_rowcount
)
allow_multirow = has_all_defaults and not needs_version_id
if hasvalue:
for (
state,
state_dict,
params,
mapper,
connection,
value_params,
has_all_defaults,
has_all_pks,
) in records:
c = connection._execute_20(
statement.values(value_params),
params,
execution_options=execution_options,
)
if bookkeeping:
_postfetch(
mapper,
uowtransaction,
table,
state,
state_dict,
c,
c.context.compiled_parameters[0],
value_params,
True,
c.returned_defaults,
)
rows += c.rowcount
check_rowcount = assert_singlerow
else:
if not allow_multirow:
check_rowcount = assert_singlerow
for (
state,
state_dict,
params,
mapper,
connection,
value_params,
has_all_defaults,
has_all_pks,
) in records:
c = connection._execute_20(
statement, params, execution_options=execution_options
)
# TODO: why with bookkeeping=False?
if bookkeeping:
_postfetch(
mapper,
uowtransaction,
table,
state,
state_dict,
c,
c.context.compiled_parameters[0],
value_params,
True,
c.returned_defaults,
)
rows += c.rowcount
else:
multiparams = [rec[2] for rec in records]
check_rowcount = assert_multirow or (
assert_singlerow and len(multiparams) == 1
)
c = connection._execute_20(
statement, multiparams, execution_options=execution_options
)
rows += c.rowcount
for (
state,
state_dict,
params,
mapper,
connection,
value_params,
has_all_defaults,
has_all_pks,
) in records:
if bookkeeping:
_postfetch(
mapper,
uowtransaction,
table,
state,
state_dict,
c,
c.context.compiled_parameters[0],
value_params,
True,
c.returned_defaults
if not c.context.executemany
else None,
)
if check_rowcount:
if rows != len(records):
raise orm_exc.StaleDataError(
"UPDATE statement on table '%s' expected to "
"update %d row(s); %d were matched."
% (table.description, len(records), rows)
)
elif needs_version_id:
util.warn(
"Dialect %s does not support updated rowcount "
"- versioning cannot be verified."
% c.dialect.dialect_description
)
def _emit_insert_statements(
base_mapper,
uowtransaction,
mapper,
table,
insert,
bookkeeping=True,
):
"""Emit INSERT statements corresponding to value lists collected
by _collect_insert_commands()."""
cached_stmt = base_mapper._memo(("insert", table), table.insert)
execution_options = {"compiled_cache": base_mapper._compiled_cache}
for (
(connection, pkeys, hasvalue, has_all_pks, has_all_defaults),
records,
) in groupby(
insert,
lambda rec: (
rec[4], # connection
set(rec[2]), # parameter keys
bool(rec[5]), # whether we have "value" parameters
rec[6],
rec[7],
),
):
statement = cached_stmt
if (
not bookkeeping
or (
has_all_defaults
or not base_mapper.eager_defaults
or not connection.dialect.implicit_returning
)
and has_all_pks
and not hasvalue
):
# the "we don't need newly generated values back" section.
# here we have all the PKs, all the defaults or we don't want
# to fetch them, or the dialect doesn't support RETURNING at all
# so we have to post-fetch / use lastrowid anyway.
records = list(records)
multiparams = [rec[2] for rec in records]
c = connection._execute_20(
statement, multiparams, execution_options=execution_options
)
if bookkeeping:
for (
(
state,
state_dict,
params,
mapper_rec,
conn,
value_params,
has_all_pks,
has_all_defaults,
),
last_inserted_params,
) in zip(records, c.context.compiled_parameters):
if state:
_postfetch(
mapper_rec,
uowtransaction,
table,
state,
state_dict,
c,
last_inserted_params,
value_params,
False,
c.returned_defaults
if not c.context.executemany
else None,
)
else:
_postfetch_bulk_save(mapper_rec, state_dict, table)
else:
# here, we need defaults and/or pk values back.
records = list(records)
if (
not hasvalue
and connection.dialect.insert_executemany_returning
and len(records) > 1
):
do_executemany = True
else:
do_executemany = False
if not has_all_defaults and base_mapper.eager_defaults:
statement = statement.return_defaults()
elif mapper.version_id_col is not None:
statement = statement.return_defaults(mapper.version_id_col)
elif do_executemany:
statement = statement.return_defaults(*table.primary_key)
if do_executemany:
multiparams = [rec[2] for rec in records]
c = connection._execute_20(
statement, multiparams, execution_options=execution_options
)
if bookkeeping:
for (
(
state,
state_dict,
params,
mapper_rec,
conn,
value_params,
has_all_pks,
has_all_defaults,
),
last_inserted_params,
inserted_primary_key,
returned_defaults,
) in util.zip_longest(
records,
c.context.compiled_parameters,
c.inserted_primary_key_rows,
c.returned_defaults_rows or (),
):
for pk, col in zip(
inserted_primary_key,
mapper._pks_by_table[table],
):
prop = mapper_rec._columntoproperty[col]
if state_dict.get(prop.key) is None:
state_dict[prop.key] = pk
if state:
_postfetch(
mapper_rec,
uowtransaction,
table,
state,
state_dict,
c,
last_inserted_params,
value_params,
False,
returned_defaults,
)
else:
_postfetch_bulk_save(mapper_rec, state_dict, table)
else:
for (
state,
state_dict,
params,
mapper_rec,
connection,
value_params,
has_all_pks,
has_all_defaults,
) in records:
if value_params:
result = connection._execute_20(
statement.values(value_params),
params,
execution_options=execution_options,
)
else:
result = connection._execute_20(
statement,
params,
execution_options=execution_options,
)
primary_key = result.inserted_primary_key
assert primary_key
for pk, col in zip(
primary_key, mapper._pks_by_table[table]
):
prop = mapper_rec._columntoproperty[col]
if (
col in value_params
or state_dict.get(prop.key) is None
):
state_dict[prop.key] = pk
if bookkeeping:
if state:
_postfetch(
mapper_rec,
uowtransaction,
table,
state,
state_dict,
result,
result.context.compiled_parameters[0],
value_params,
False,
result.returned_defaults
if not result.context.executemany
else None,
)
else:
_postfetch_bulk_save(mapper_rec, state_dict, table)
def _emit_post_update_statements(
base_mapper, uowtransaction, mapper, table, update
):
"""Emit UPDATE statements corresponding to value lists collected
by _collect_post_update_commands()."""
execution_options = {"compiled_cache": base_mapper._compiled_cache}
needs_version_id = (
mapper.version_id_col is not None
and mapper.version_id_col in mapper._cols_by_table[table]
)
def update_stmt():
clauses = BooleanClauseList._construct_raw(operators.and_)
for col in mapper._pks_by_table[table]:
clauses.clauses.append(
col == sql.bindparam(col._label, type_=col.type)
)
if needs_version_id:
clauses.clauses.append(
mapper.version_id_col
== sql.bindparam(
mapper.version_id_col._label,
type_=mapper.version_id_col.type,
)
)
stmt = table.update().where(clauses)
if mapper.version_id_col is not None:
stmt = stmt.return_defaults(mapper.version_id_col)
return stmt
statement = base_mapper._memo(("post_update", table), update_stmt)
# execute each UPDATE in the order according to the original
# list of states to guarantee row access order, but
# also group them into common (connection, cols) sets
# to support executemany().
for key, records in groupby(
update,
lambda rec: (rec[3], set(rec[4])), # connection # parameter keys
):
rows = 0
records = list(records)
connection = key[0]
assert_singlerow = (
connection.dialect.supports_sane_rowcount
if mapper.version_id_col is None
else connection.dialect.supports_sane_rowcount_returning
)
assert_multirow = (
assert_singlerow
and connection.dialect.supports_sane_multi_rowcount
)
allow_multirow = not needs_version_id or assert_multirow
if not allow_multirow:
check_rowcount = assert_singlerow
for state, state_dict, mapper_rec, connection, params in records:
c = connection._execute_20(
statement, params, execution_options=execution_options
)
_postfetch_post_update(
mapper_rec,
uowtransaction,
table,
state,
state_dict,
c,
c.context.compiled_parameters[0],
)
rows += c.rowcount
else:
multiparams = [
params
for state, state_dict, mapper_rec, conn, params in records
]
check_rowcount = assert_multirow or (
assert_singlerow and len(multiparams) == 1
)
c = connection._execute_20(
statement, multiparams, execution_options=execution_options
)
rows += c.rowcount
for state, state_dict, mapper_rec, connection, params in records:
_postfetch_post_update(
mapper_rec,
uowtransaction,
table,
state,
state_dict,
c,
c.context.compiled_parameters[0],
)
if check_rowcount:
if rows != len(records):
raise orm_exc.StaleDataError(
"UPDATE statement on table '%s' expected to "
"update %d row(s); %d were matched."
% (table.description, len(records), rows)
)
elif needs_version_id:
util.warn(
"Dialect %s does not support updated rowcount "
"- versioning cannot be verified."
% c.dialect.dialect_description
)
def _emit_delete_statements(
base_mapper, uowtransaction, mapper, table, delete
):
"""Emit DELETE statements corresponding to value lists collected
by _collect_delete_commands()."""
need_version_id = (
mapper.version_id_col is not None
and mapper.version_id_col in mapper._cols_by_table[table]
)
def delete_stmt():
clauses = BooleanClauseList._construct_raw(operators.and_)
for col in mapper._pks_by_table[table]:
clauses.clauses.append(
col == sql.bindparam(col.key, type_=col.type)
)
if need_version_id:
clauses.clauses.append(
mapper.version_id_col
== sql.bindparam(
mapper.version_id_col.key, type_=mapper.version_id_col.type
)
)
return table.delete().where(clauses)
statement = base_mapper._memo(("delete", table), delete_stmt)
for connection, recs in groupby(delete, lambda rec: rec[1]): # connection
del_objects = [params for params, connection in recs]
execution_options = {"compiled_cache": base_mapper._compiled_cache}
expected = len(del_objects)
rows_matched = -1
only_warn = False
if (
need_version_id
and not connection.dialect.supports_sane_multi_rowcount
):
if connection.dialect.supports_sane_rowcount:
rows_matched = 0
# execute deletes individually so that versioned
# rows can be verified
for params in del_objects:
c = connection._execute_20(
statement, params, execution_options=execution_options
)
rows_matched += c.rowcount
else:
util.warn(
"Dialect %s does not support deleted rowcount "
"- versioning cannot be verified."
% connection.dialect.dialect_description
)
connection._execute_20(
statement, del_objects, execution_options=execution_options
)
else:
c = connection._execute_20(
statement, del_objects, execution_options=execution_options
)
if not need_version_id:
only_warn = True
rows_matched = c.rowcount
if (
base_mapper.confirm_deleted_rows
and rows_matched > -1
and expected != rows_matched
and (
connection.dialect.supports_sane_multi_rowcount
or len(del_objects) == 1
)
):
# TODO: why does this "only warn" if versioning is turned off,
# whereas the UPDATE raises?
if only_warn:
util.warn(
"DELETE statement on table '%s' expected to "
"delete %d row(s); %d were matched. Please set "
"confirm_deleted_rows=False within the mapper "
"configuration to prevent this warning."
% (table.description, expected, rows_matched)
)
else:
raise orm_exc.StaleDataError(
"DELETE statement on table '%s' expected to "
"delete %d row(s); %d were matched. Please set "
"confirm_deleted_rows=False within the mapper "
"configuration to prevent this warning."
% (table.description, expected, rows_matched)
)
def _finalize_insert_update_commands(base_mapper, uowtransaction, states):
"""finalize state on states that have been inserted or updated,
including calling after_insert/after_update events.
"""
for state, state_dict, mapper, connection, has_identity in states:
if mapper._readonly_props:
readonly = state.unmodified_intersection(
[
p.key
for p in mapper._readonly_props
if (
p.expire_on_flush
and (not p.deferred or p.key in state.dict)
)
or (
not p.expire_on_flush
and not p.deferred
and p.key not in state.dict
)
]
)
if readonly:
state._expire_attributes(state.dict, readonly)
# if eager_defaults option is enabled, load
# all expired cols. Else if we have a version_id_col, make sure
# it isn't expired.
toload_now = []
if base_mapper.eager_defaults:
toload_now.extend(
state._unloaded_non_object.intersection(
mapper._server_default_plus_onupdate_propkeys
)
)
if (
mapper.version_id_col is not None
and mapper.version_id_generator is False
):
if mapper._version_id_prop.key in state.unloaded:
toload_now.extend([mapper._version_id_prop.key])
if toload_now:
state.key = base_mapper._identity_key_from_state(state)
stmt = future.select(mapper).apply_labels()
loading.load_on_ident(
uowtransaction.session,
stmt,
state.key,
refresh_state=state,
only_load_props=toload_now,
)
# call after_XXX extensions
if not has_identity:
mapper.dispatch.after_insert(mapper, connection, state)
else:
mapper.dispatch.after_update(mapper, connection, state)
if (
mapper.version_id_generator is False
and mapper.version_id_col is not None
):
if state_dict[mapper._version_id_prop.key] is None:
raise orm_exc.FlushError(
"Instance does not contain a non-NULL version value"
)
def _postfetch_post_update(
mapper, uowtransaction, table, state, dict_, result, params
):
if uowtransaction.is_deleted(state):
return
prefetch_cols = result.context.compiled.prefetch
postfetch_cols = result.context.compiled.postfetch
if (
mapper.version_id_col is not None
and mapper.version_id_col in mapper._cols_by_table[table]
):
prefetch_cols = list(prefetch_cols) + [mapper.version_id_col]
refresh_flush = bool(mapper.class_manager.dispatch.refresh_flush)
if refresh_flush:
load_evt_attrs = []
for c in prefetch_cols:
if c.key in params and c in mapper._columntoproperty:
dict_[mapper._columntoproperty[c].key] = params[c.key]
if refresh_flush:
load_evt_attrs.append(mapper._columntoproperty[c].key)
if refresh_flush and load_evt_attrs:
mapper.class_manager.dispatch.refresh_flush(
state, uowtransaction, load_evt_attrs
)
if postfetch_cols:
state._expire_attributes(
state.dict,
[
mapper._columntoproperty[c].key
for c in postfetch_cols
if c in mapper._columntoproperty
],
)
def _postfetch(
mapper,
uowtransaction,
table,
state,
dict_,
result,
params,
value_params,
isupdate,
returned_defaults,
):
"""Expire attributes in need of newly persisted database state,
after an INSERT or UPDATE statement has proceeded for that
state."""
prefetch_cols = result.context.compiled.prefetch
postfetch_cols = result.context.compiled.postfetch
returning_cols = result.context.compiled.returning
if (
mapper.version_id_col is not None
and mapper.version_id_col in mapper._cols_by_table[table]
):
prefetch_cols = list(prefetch_cols) + [mapper.version_id_col]
refresh_flush = bool(mapper.class_manager.dispatch.refresh_flush)
if refresh_flush:
load_evt_attrs = []
if returning_cols:
row = returned_defaults
if row is not None:
for row_value, col in zip(row, returning_cols):
# pk cols returned from insert are handled
# distinctly, don't step on the values here
if col.primary_key and result.context.isinsert:
continue
# note that columns can be in the "return defaults" that are
# not mapped to this mapper, typically because they are
# "excluded", which can be specified directly or also occurs
# when using declarative w/ single table inheritance
prop = mapper._columntoproperty.get(col)
if prop:
dict_[prop.key] = row_value
if refresh_flush:
load_evt_attrs.append(prop.key)
for c in prefetch_cols:
if c.key in params and c in mapper._columntoproperty:
dict_[mapper._columntoproperty[c].key] = params[c.key]
if refresh_flush:
load_evt_attrs.append(mapper._columntoproperty[c].key)
if refresh_flush and load_evt_attrs:
mapper.class_manager.dispatch.refresh_flush(
state, uowtransaction, load_evt_attrs
)
if isupdate and value_params:
# explicitly suit the use case specified by
# [ticket:3801], PK SQL expressions for UPDATE on non-RETURNING
# database which are set to themselves in order to do a version bump.
postfetch_cols.extend(
[
col
for col in value_params
if col.primary_key and col not in returning_cols
]
)
if postfetch_cols:
state._expire_attributes(
state.dict,
[
mapper._columntoproperty[c].key
for c in postfetch_cols
if c in mapper._columntoproperty
],
)
# synchronize newly inserted ids from one table to the next
# TODO: this still goes a little too often. would be nice to
# have definitive list of "columns that changed" here
for m, equated_pairs in mapper._table_to_equated[table]:
sync.populate(
state,
m,
state,
m,
equated_pairs,
uowtransaction,
mapper.passive_updates,
)
def _postfetch_bulk_save(mapper, dict_, table):
for m, equated_pairs in mapper._table_to_equated[table]:
sync.bulk_populate_inherit_keys(dict_, m, equated_pairs)
def _connections_for_states(base_mapper, uowtransaction, states):
"""Return an iterator of (state, state.dict, mapper, connection).
The states are sorted according to _sort_states, then paired
with the connection they should be using for the given
unit of work transaction.
"""
# if session has a connection callable,
# organize individual states with the connection
# to use for update
if uowtransaction.session.connection_callable:
connection_callable = uowtransaction.session.connection_callable
else:
connection = uowtransaction.transaction.connection(base_mapper)
connection_callable = None
for state in _sort_states(base_mapper, states):
if connection_callable:
connection = connection_callable(base_mapper, state.obj())
mapper = state.manager.mapper
yield state, state.dict, mapper, connection
def _sort_states(mapper, states):
pending = set(states)
persistent = set(s for s in pending if s.key is not None)
pending.difference_update(persistent)
try:
persistent_sorted = sorted(
persistent, key=mapper._persistent_sortkey_fn
)
except TypeError as err:
util.raise_(
sa_exc.InvalidRequestError(
"Could not sort objects by primary key; primary key "
"values must be sortable in Python (was: %s)" % err
),
replace_context=err,
)
return (
sorted(pending, key=operator.attrgetter("insert_order"))
+ persistent_sorted
)
_EMPTY_DICT = util.immutabledict()
class BulkUDCompileState(CompileState):
class default_update_options(Options):
_synchronize_session = "evaluate"
_autoflush = True
_subject_mapper = None
_resolved_values = _EMPTY_DICT
_resolved_keys_as_propnames = _EMPTY_DICT
_value_evaluators = _EMPTY_DICT
_matched_objects = None
_matched_rows = None
_refresh_identity_token = None
@classmethod
def orm_pre_session_exec(
cls,
session,
statement,
params,
execution_options,
bind_arguments,
is_reentrant_invoke,
):
if is_reentrant_invoke:
return statement, execution_options
(
update_options,
execution_options,
) = BulkUDCompileState.default_update_options.from_execution_options(
"_sa_orm_update_options",
{"synchronize_session"},
execution_options,
statement._execution_options,
)
sync = update_options._synchronize_session
if sync is not None:
if sync not in ("evaluate", "fetch", False):
raise sa_exc.ArgumentError(
"Valid strategies for session synchronization "
"are 'evaluate', 'fetch', False"
)
bind_arguments["clause"] = statement
try:
plugin_subject = statement._propagate_attrs["plugin_subject"]
except KeyError:
assert False, "statement had 'orm' plugin but no plugin_subject"
else:
bind_arguments["mapper"] = plugin_subject.mapper
update_options += {"_subject_mapper": plugin_subject.mapper}
if update_options._autoflush:
session._autoflush()
statement = statement._annotate(
{"synchronize_session": update_options._synchronize_session}
)
# this stage of the execution is called before the do_orm_execute event
# hook. meaning for an extension like horizontal sharding, this step
# happens before the extension splits out into multiple backends and
# runs only once. if we do pre_sync_fetch, we execute a SELECT
# statement, which the horizontal sharding extension splits amongst the
# shards and combines the results together.
if update_options._synchronize_session == "evaluate":
update_options = cls._do_pre_synchronize_evaluate(
session,
statement,
params,
execution_options,
bind_arguments,
update_options,
)
elif update_options._synchronize_session == "fetch":
update_options = cls._do_pre_synchronize_fetch(
session,
statement,
params,
execution_options,
bind_arguments,
update_options,
)
return (
statement,
util.immutabledict(execution_options).union(
dict(_sa_orm_update_options=update_options)
),
)
@classmethod
def orm_setup_cursor_result(
cls,
session,
statement,
params,
execution_options,
bind_arguments,
result,
):
# this stage of the execution is called after the
# do_orm_execute event hook. meaning for an extension like
# horizontal sharding, this step happens *within* the horizontal
# sharding event handler which calls session.execute() re-entrantly
# and will occur for each backend individually.
# the sharding extension then returns its own merged result from the
# individual ones we return here.
update_options = execution_options["_sa_orm_update_options"]
if update_options._synchronize_session == "evaluate":
cls._do_post_synchronize_evaluate(session, result, update_options)
elif update_options._synchronize_session == "fetch":
cls._do_post_synchronize_fetch(session, result, update_options)
return result
@classmethod
def _adjust_for_extra_criteria(cls, global_attributes, ext_info):
"""Apply extra criteria filtering.
For all distinct single-table-inheritance mappers represented in the
table being updated or deleted, produce additional WHERE criteria such
that only the appropriate subtypes are selected from the total results.
Additionally, add WHERE criteria originating from LoaderCriteriaOptions
collected from the statement.
"""
return_crit = ()
adapter = ext_info._adapter if ext_info.is_aliased_class else None
if (
"additional_entity_criteria",
ext_info.mapper,
) in global_attributes:
return_crit += tuple(
ae._resolve_where_criteria(ext_info)
for ae in global_attributes[
("additional_entity_criteria", ext_info.mapper)
]
if ae.include_aliases or ae.entity is ext_info
)
if ext_info.mapper._single_table_criterion is not None:
return_crit += (ext_info.mapper._single_table_criterion,)
if adapter:
return_crit = tuple(adapter.traverse(crit) for crit in return_crit)
return return_crit
@classmethod
def _do_pre_synchronize_evaluate(
cls,
session,
statement,
params,
execution_options,
bind_arguments,
update_options,
):
mapper = update_options._subject_mapper
target_cls = mapper.class_
value_evaluators = resolved_keys_as_propnames = _EMPTY_DICT
try:
evaluator_compiler = evaluator.EvaluatorCompiler(target_cls)
crit = ()
if statement._where_criteria:
crit += statement._where_criteria
global_attributes = {}
for opt in statement._with_options:
if opt._is_criteria_option:
opt.get_global_criteria(global_attributes)
if global_attributes:
crit += cls._adjust_for_extra_criteria(
global_attributes, mapper
)
if crit:
eval_condition = evaluator_compiler.process(*crit)
else:
def eval_condition(obj):
return True
except evaluator.UnevaluatableError as err:
util.raise_(
sa_exc.InvalidRequestError(
'Could not evaluate current criteria in Python: "%s". '
"Specify 'fetch' or False for the "
"synchronize_session execution option." % err
),
from_=err,
)
if statement.__visit_name__ == "lambda_element":
# ._resolved is called on every LambdaElement in order to
# generate the cache key, so this access does not add
# additional expense
effective_statement = statement._resolved
else:
effective_statement = statement
if effective_statement.__visit_name__ == "update":
resolved_values = cls._get_resolved_values(
mapper, effective_statement
)
value_evaluators = {}
resolved_keys_as_propnames = cls._resolved_keys_as_propnames(
mapper, resolved_values
)
for key, value in resolved_keys_as_propnames:
try:
_evaluator = evaluator_compiler.process(
coercions.expect(roles.ExpressionElementRole, value)
)
except evaluator.UnevaluatableError:
pass
else:
value_evaluators[key] = _evaluator
# TODO: detect when the where clause is a trivial primary key match.
matched_objects = [
state.obj()
for state in session.identity_map.all_states()
if state.mapper.isa(mapper)
and eval_condition(state.obj())
and (
update_options._refresh_identity_token is None
# TODO: coverage for the case where horiziontal sharding
# invokes an update() or delete() given an explicit identity
# token up front
or state.identity_token
== update_options._refresh_identity_token
)
]
return update_options + {
"_matched_objects": matched_objects,
"_value_evaluators": value_evaluators,
"_resolved_keys_as_propnames": resolved_keys_as_propnames,
}
@classmethod
def _get_resolved_values(cls, mapper, statement):
if statement._multi_values:
return []
elif statement._ordered_values:
iterator = statement._ordered_values
elif statement._values:
iterator = statement._values.items()
else:
return []
values = []
if iterator:
for k, v in iterator:
if mapper:
if isinstance(k, util.string_types):
desc = _entity_namespace_key(mapper, k)
values.extend(desc._bulk_update_tuples(v))
elif "entity_namespace" in k._annotations:
k_anno = k._annotations
attr = _entity_namespace_key(
k_anno["entity_namespace"], k_anno["orm_key"]
)
values.extend(attr._bulk_update_tuples(v))
else:
values.append((k, v))
else:
values.append((k, v))
return values
@classmethod
def _resolved_keys_as_propnames(cls, mapper, resolved_values):
values = []
for k, v in resolved_values:
if isinstance(k, attributes.QueryableAttribute):
values.append((k.key, v))
continue
elif hasattr(k, "__clause_element__"):
k = k.__clause_element__()
if mapper and isinstance(k, expression.ColumnElement):
try:
attr = mapper._columntoproperty[k]
except orm_exc.UnmappedColumnError:
pass
else:
values.append((attr.key, v))
else:
raise sa_exc.InvalidRequestError(
"Invalid expression type: %r" % k
)
return values
@classmethod
def _do_pre_synchronize_fetch(
cls,
session,
statement,
params,
execution_options,
bind_arguments,
update_options,
):
mapper = update_options._subject_mapper
select_stmt = (
select(*(mapper.primary_key + (mapper.select_identity_token,)))
.select_from(mapper)
.options(*statement._with_options)
)
select_stmt._where_criteria = statement._where_criteria
def skip_for_full_returning(orm_context):
bind = orm_context.session.get_bind(**orm_context.bind_arguments)
if bind.dialect.full_returning:
return _result.null_result()
else:
return None
result = session.execute(
select_stmt,
params,
execution_options,
bind_arguments,
_add_event=skip_for_full_returning,
)
matched_rows = result.fetchall()
value_evaluators = _EMPTY_DICT
if statement.__visit_name__ == "lambda_element":
# ._resolved is called on every LambdaElement in order to
# generate the cache key, so this access does not add
# additional expense
effective_statement = statement._resolved
else:
effective_statement = statement
if effective_statement.__visit_name__ == "update":
target_cls = mapper.class_
evaluator_compiler = evaluator.EvaluatorCompiler(target_cls)
resolved_values = cls._get_resolved_values(
mapper, effective_statement
)
resolved_keys_as_propnames = cls._resolved_keys_as_propnames(
mapper, resolved_values
)
resolved_keys_as_propnames = cls._resolved_keys_as_propnames(
mapper, resolved_values
)
value_evaluators = {}
for key, value in resolved_keys_as_propnames:
try:
_evaluator = evaluator_compiler.process(
coercions.expect(roles.ExpressionElementRole, value)
)
except evaluator.UnevaluatableError:
pass
else:
value_evaluators[key] = _evaluator
else:
resolved_keys_as_propnames = _EMPTY_DICT
return update_options + {
"_value_evaluators": value_evaluators,
"_matched_rows": matched_rows,
"_resolved_keys_as_propnames": resolved_keys_as_propnames,
}
@CompileState.plugin_for("orm", "update")
class BulkORMUpdate(UpdateDMLState, BulkUDCompileState):
@classmethod
def create_for_statement(cls, statement, compiler, **kw):
self = cls.__new__(cls)
ext_info = statement.table._annotations["parententity"]
self.mapper = mapper = ext_info.mapper
self.extra_criteria_entities = {}
self._resolved_values = cls._get_resolved_values(mapper, statement)
extra_criteria_attributes = {}
for opt in statement._with_options:
if opt._is_criteria_option:
opt.get_global_criteria(extra_criteria_attributes)
if not statement._preserve_parameter_order and statement._values:
self._resolved_values = dict(self._resolved_values)
new_stmt = sql.Update.__new__(sql.Update)
new_stmt.__dict__.update(statement.__dict__)
new_stmt.table = mapper.local_table
# note if the statement has _multi_values, these
# are passed through to the new statement, which will then raise
# InvalidRequestError because UPDATE doesn't support multi_values
# right now.
if statement._ordered_values:
new_stmt._ordered_values = self._resolved_values
elif statement._values:
new_stmt._values = self._resolved_values
new_crit = cls._adjust_for_extra_criteria(
extra_criteria_attributes, mapper
)
if new_crit:
new_stmt = new_stmt.where(*new_crit)
# if we are against a lambda statement we might not be the
# topmost object that received per-execute annotations
if (
compiler._annotations.get("synchronize_session", None) == "fetch"
and compiler.dialect.full_returning
):
new_stmt = new_stmt.returning(*mapper.primary_key)
UpdateDMLState.__init__(self, new_stmt, compiler, **kw)
return self
@classmethod
def _do_post_synchronize_evaluate(cls, session, result, update_options):
states = set()
evaluated_keys = list(update_options._value_evaluators.keys())
values = update_options._resolved_keys_as_propnames
attrib = set(k for k, v in values)
for obj in update_options._matched_objects:
state, dict_ = (
attributes.instance_state(obj),
attributes.instance_dict(obj),
)
# the evaluated states were gathered across all identity tokens.
# however the post_sync events are called per identity token,
# so filter.
if (
update_options._refresh_identity_token is not None
and state.identity_token
!= update_options._refresh_identity_token
):
continue
# only evaluate unmodified attributes
to_evaluate = state.unmodified.intersection(evaluated_keys)
for key in to_evaluate:
dict_[key] = update_options._value_evaluators[key](obj)
state.manager.dispatch.refresh(state, None, to_evaluate)
state._commit(dict_, list(to_evaluate))
to_expire = attrib.intersection(dict_).difference(to_evaluate)
if to_expire:
state._expire_attributes(dict_, to_expire)
states.add(state)
session._register_altered(states)
@classmethod
def _do_post_synchronize_fetch(cls, session, result, update_options):
target_mapper = update_options._subject_mapper
states = set()
evaluated_keys = list(update_options._value_evaluators.keys())
if result.returns_rows:
matched_rows = [
tuple(row) + (update_options._refresh_identity_token,)
for row in result.all()
]
else:
matched_rows = update_options._matched_rows
objs = [
session.identity_map[identity_key]
for identity_key in [
target_mapper.identity_key_from_primary_key(
list(primary_key),
identity_token=identity_token,
)
for primary_key, identity_token in [
(row[0:-1], row[-1]) for row in matched_rows
]
if update_options._refresh_identity_token is None
or identity_token == update_options._refresh_identity_token
]
if identity_key in session.identity_map
]
values = update_options._resolved_keys_as_propnames
attrib = set(k for k, v in values)
for obj in objs:
state, dict_ = (
attributes.instance_state(obj),
attributes.instance_dict(obj),
)
to_evaluate = state.unmodified.intersection(evaluated_keys)
for key in to_evaluate:
dict_[key] = update_options._value_evaluators[key](obj)
state.manager.dispatch.refresh(state, None, to_evaluate)
state._commit(dict_, list(to_evaluate))
to_expire = attrib.intersection(dict_).difference(to_evaluate)
if to_expire:
state._expire_attributes(dict_, to_expire)
states.add(state)
session._register_altered(states)
@CompileState.plugin_for("orm", "delete")
class BulkORMDelete(DeleteDMLState, BulkUDCompileState):
@classmethod
def create_for_statement(cls, statement, compiler, **kw):
self = cls.__new__(cls)
ext_info = statement.table._annotations["parententity"]
self.mapper = mapper = ext_info.mapper
self.extra_criteria_entities = {}
extra_criteria_attributes = {}
for opt in statement._with_options:
if opt._is_criteria_option:
opt.get_global_criteria(extra_criteria_attributes)
new_crit = cls._adjust_for_extra_criteria(
extra_criteria_attributes, mapper
)
if new_crit:
statement = statement.where(*new_crit)
if (
mapper
and compiler._annotations.get("synchronize_session", None)
== "fetch"
and compiler.dialect.full_returning
):
statement = statement.returning(*mapper.primary_key)
DeleteDMLState.__init__(self, statement, compiler, **kw)
return self
@classmethod
def _do_post_synchronize_evaluate(cls, session, result, update_options):
session._remove_newly_deleted(
[
attributes.instance_state(obj)
for obj in update_options._matched_objects
]
)
@classmethod
def _do_post_synchronize_fetch(cls, session, result, update_options):
target_mapper = update_options._subject_mapper
if result.returns_rows:
matched_rows = [
tuple(row) + (update_options._refresh_identity_token,)
for row in result.all()
]
else:
matched_rows = update_options._matched_rows
for row in matched_rows:
primary_key = row[0:-1]
identity_token = row[-1]
# TODO: inline this and call remove_newly_deleted
# once
identity_key = target_mapper.identity_key_from_primary_key(
list(primary_key),
identity_token=identity_token,
)
if identity_key in session.identity_map:
session._remove_newly_deleted(
[
attributes.instance_state(
session.identity_map[identity_key]
)
]
)
| 33.38896
| 79
| 0.547252
|
4a0b2d99f55b781bc71128cd6af4835240da9e44
| 1,820
|
py
|
Python
|
libneko/pag/abc.py
|
Natsurii/b00t
|
09fac50434fd6692d6f1a07e8c8f4a5df20ce9d4
|
[
"MIT"
] | 1
|
2018-09-22T23:58:55.000Z
|
2018-09-22T23:58:55.000Z
|
libneko/pag/abc.py
|
Natsurii/b00t
|
09fac50434fd6692d6f1a07e8c8f4a5df20ce9d4
|
[
"MIT"
] | null | null | null |
libneko/pag/abc.py
|
Natsurii/b00t
|
09fac50434fd6692d6f1a07e8c8f4a5df20ce9d4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2018-2019 Nekoka.tt
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in a$
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Abstract base classes for the pag module.
"""
__all__ = ("PagABC",)
import weakref
from abc import ABC, abstractmethod
class PagABC(ABC):
"""
Keeps track of the references. Useful for memory usage debugging:
a serious downside to the pagination system in Neko2.
The weak references are dealt with automatically internally, so you
never even have to acknowledge it's existence.
"""
_instances = weakref.WeakSet()
def __init__(self):
self._instances.add(self)
@classmethod
@abstractmethod
def memory_usage(cls) -> int:
"""Estimates the memory usage used by the internal content."""
...
| 33.703704
| 79
| 0.734066
|
4a0b2dae16418e5358e2d7af6c856df4bf92c1b5
| 4,430
|
py
|
Python
|
test/functional/rpc_signrawtransaction.py
|
lulworm/core
|
6a046551e403b336644c2ce8fde60123cc212cc6
|
[
"MIT"
] | 24
|
2018-05-08T15:01:48.000Z
|
2022-03-27T19:33:35.000Z
|
test/functional/rpc_signrawtransaction.py
|
lulworm/core
|
6a046551e403b336644c2ce8fde60123cc212cc6
|
[
"MIT"
] | 7
|
2018-01-03T00:06:07.000Z
|
2018-10-06T17:04:40.000Z
|
test/functional/rpc_signrawtransaction.py
|
lulworm/core
|
6a046551e403b336644c2ce8fde60123cc212cc6
|
[
"MIT"
] | 25
|
2017-12-18T12:09:38.000Z
|
2021-05-15T16:44:41.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2015-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test transaction signing using the signrawtransaction RPC."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class SignRawTransactionsTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def successful_signing_test(self):
"""Create and sign a valid raw transaction with one input.
Expected results:
1) The transaction has a complete set of signatures
2) No script verification error occurred"""
privKeys = ['cUeKHd5orzT3mz8P9pxyREHfsWtVfgsfDjiZZBcjUBAaGk1BTj7N']
inputs = [
# Valid pay-to-pubkey script
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0,
'scriptPubKey': '76a91460baa0f494b38ce3c940dea67f3804dc52d1fb9488ac'}
]
outputs = {'mbTYaNZm7TaPt5Du65aPsL8FNTktufYydC': 0.1}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
rawTxSigned = self.nodes[0].signrawtransaction(rawTx, inputs, privKeys)
# 1) The transaction has a complete set of signatures
assert 'complete' in rawTxSigned
assert_equal(rawTxSigned['complete'], True)
# 2) No script verification error occurred
assert 'errors' not in rawTxSigned
def script_verification_error_test(self):
"""Create and sign a raw transaction with valid (vin 0), invalid (vin 1) and one missing (vin 2) input script.
Expected results:
3) The transaction has no complete set of signatures
4) Two script verification errors occurred
5) Script verification errors have certain properties ("txid", "vout", "scriptSig", "sequence", "error")
6) The verification errors refer to the invalid (vin 1) and missing input (vin 2)"""
privKeys = ['cUeKHd5orzT3mz8P9pxyREHfsWtVfgsfDjiZZBcjUBAaGk1BTj7N']
inputs = [
# Valid pay-to-pubkey script
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0},
# Invalid script
{'txid': '5b8673686910442c644b1f4993d8f7753c7c8fcb5c87ee40d56eaeef25204547', 'vout': 7},
# Missing scriptPubKey
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 1},
]
scripts = [
# Valid pay-to-pubkey script
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0,
'scriptPubKey': '76a91460baa0f494b38ce3c940dea67f3804dc52d1fb9488ac'},
# Invalid script
{'txid': '5b8673686910442c644b1f4993d8f7753c7c8fcb5c87ee40d56eaeef25204547', 'vout': 7,
'scriptPubKey': 'badbadbadbad'}
]
outputs = {'mbTYaNZm7TaPt5Du65aPsL8FNTktufYydC': 0.1}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
rawTxSigned = self.nodes[0].signrawtransaction(rawTx, scripts, privKeys)
# 3) The transaction has no complete set of signatures
assert 'complete' in rawTxSigned
assert_equal(rawTxSigned['complete'], False)
# 4) Two script verification errors occurred
assert 'errors' in rawTxSigned
assert_equal(len(rawTxSigned['errors']), 2)
# 5) Script verification errors have certain properties
assert 'txid' in rawTxSigned['errors'][0]
assert 'vout' in rawTxSigned['errors'][0]
assert 'scriptSig' in rawTxSigned['errors'][0]
assert 'sequence' in rawTxSigned['errors'][0]
assert 'error' in rawTxSigned['errors'][0]
# 6) The verification errors refer to the invalid (vin 1) and missing input (vin 2)
assert_equal(rawTxSigned['errors'][0]['txid'], inputs[1]['txid'])
assert_equal(rawTxSigned['errors'][0]['vout'], inputs[1]['vout'])
assert_equal(rawTxSigned['errors'][1]['txid'], inputs[2]['txid'])
assert_equal(rawTxSigned['errors'][1]['vout'], inputs[2]['vout'])
def run_test(self):
self.successful_signing_test()
self.script_verification_error_test()
if __name__ == '__main__':
SignRawTransactionsTest().main()
| 42.190476
| 118
| 0.676749
|
4a0b2e9e0e169a699c2e1068274cc0fb9ba052bf
| 1,687
|
py
|
Python
|
spark_auto_mapper_fhir/value_sets/claim_processing_codes.py
|
imranq2/SparkAutoMapper.FHIR
|
dd23b218fb0097d1edc2f3e688e8d6d4d7278bd2
|
[
"Apache-2.0"
] | 1
|
2020-10-31T23:25:07.000Z
|
2020-10-31T23:25:07.000Z
|
spark_auto_mapper_fhir/value_sets/claim_processing_codes.py
|
icanbwell/SparkAutoMapper.FHIR
|
98f368e781b46523142c7cb513c670d659a93c9b
|
[
"Apache-2.0"
] | null | null | null |
spark_auto_mapper_fhir/value_sets/claim_processing_codes.py
|
icanbwell/SparkAutoMapper.FHIR
|
98f368e781b46523142c7cb513c670d659a93c9b
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import annotations
from spark_auto_mapper_fhir.fhir_types.uri import FhirUri
from spark_auto_mapper_fhir.value_sets.generic_type import GenericTypeCode
from spark_auto_mapper.type_definitions.defined_types import AutoMapperTextInputType
# This file is auto-generated by generate_classes so do not edit manually
# noinspection PyPep8Naming
class ClaimProcessingCodesCode(GenericTypeCode):
"""
ClaimProcessingCodes
From: http://hl7.org/fhir/remittance-outcome in valuesets.xml
This value set includes Claim Processing Outcome codes.
"""
def __init__(self, value: AutoMapperTextInputType):
super().__init__(value=value)
"""
http://hl7.org/fhir/remittance-outcome
"""
codeset: FhirUri = "http://hl7.org/fhir/remittance-outcome"
class ClaimProcessingCodesCodeValues:
"""
The Claim/Pre-authorization/Pre-determination has been received but processing
has not begun.
From: http://hl7.org/fhir/remittance-outcome in valuesets.xml
"""
Queued = ClaimProcessingCodesCode("queued")
"""
The processing has completed without errors
From: http://hl7.org/fhir/remittance-outcome in valuesets.xml
"""
ProcessingComplete = ClaimProcessingCodesCode("complete")
"""
One or more errors have been detected in the Claim
From: http://hl7.org/fhir/remittance-outcome in valuesets.xml
"""
Error = ClaimProcessingCodesCode("error")
"""
No errors have been detected in the Claim and some of the adjudication has
been performed.
From: http://hl7.org/fhir/remittance-outcome in valuesets.xml
"""
PartialProcessing = ClaimProcessingCodesCode("partial")
| 33.078431
| 84
| 0.739775
|
4a0b31af7c7b26a59c35d4e199fa79268a8a9358
| 15,998
|
py
|
Python
|
ansible/modules/cloud/cloudstack/cs_storage_pool.py
|
EnjoyLifeFund/macHighSierra-py36-pkgs
|
5668b5785296b314ea1321057420bcd077dba9ea
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 1
|
2022-01-25T22:52:58.000Z
|
2022-01-25T22:52:58.000Z
|
ansible/modules/cloud/cloudstack/cs_storage_pool.py
|
EnjoyLifeFund/Debian_py36_packages
|
1985d4c73fabd5f08f54b922e73a9306e09c77a5
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null |
ansible/modules/cloud/cloudstack/cs_storage_pool.py
|
EnjoyLifeFund/Debian_py36_packages
|
1985d4c73fabd5f08f54b922e73a9306e09c77a5
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2017, Netservers Ltd. <support@netservers.co.uk>
# (c) 2017, René Moser <mail@renemoser.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cs_storage_pool
short_description: Manages Primary Storage Pools on Apache CloudStack based clouds.
description:
- Create, update, put into maintenance, disable, enable and remove storage pools.
version_added: "2.4"
author:
- "Netservers Ltd. (@netservers)"
- "René Moser (@resmo)"
options:
name:
description:
- Name of the storage pool.
required: true
zone:
description:
- Name of the zone in which the host should be deployed.
- If not set, default zone is used.
storage_url:
description:
- URL of the storage pool.
- Required if C(state=present).
pod:
description:
- Name of the pod.
cluster:
description:
- Name of the cluster.
scope:
description:
- The scope of the storage pool.
- Defaults to cluster when C(cluster) is provided, otherwise zone.
choices: [ cluster, zone ]
managed:
description:
- Whether the storage pool should be managed by CloudStack.
- Only considere on creation.
hypervisor:
description:
- Required when creating a zone scoped pool.
choices: [ KVM, VMware, BareMetal, XenServer, LXC, HyperV, UCS, OVM, Simulator ]
storage_tags:
description:
- Tags associated with this storage pool.
provider:
description:
- Name of the storage provider e.g. SolidFire, SolidFireShared, DefaultPrimary, CloudByte.
default: DefaultPrimary
capacity_bytes:
description:
- Bytes CloudStack can provision from this storage pool.
capacity_iops:
description:
- Bytes CloudStack can provision from this storage pool.
allocation_state:
description:
- Allocation state of the storage pool.
choices: [ enabled, disabled ]
state:
description:
- State of the storage pool.
default: present
choices: [ present, absent ]
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
- name: ensure a zone scoped storage_pool is present
local_action:
module: cs_storage_pool
zone: zone01
storage_url: rbd://admin:SECRET@ceph-mons.domain/poolname
provider: DefaultPrimary
name: Ceph RBD
scope: zone
hypervisor: KVM
- name: ensure a cluster scoped storage_pool is disabled
local_action:
module: cs_storage_pool
name: Ceph RBD
zone: zone01
cluster: cluster01
pod: pod01
storage_url: rbd://admin:SECRET@ceph-the-mons.domain/poolname
provider: DefaultPrimary
name: Ceph RBD
scope: cluster
allocation_state: disabled
- name: ensure a cluster scoped storage_pool is in maintenance
local_action:
module: cs_storage_pool
name: Ceph RBD
zone: zone01
cluster: cluster01
pod: pod01
storage_url: rbd://admin:SECRET@ceph-the-mons.domain/poolname
provider: DefaultPrimary
name: Ceph RBD
scope: cluster
allocation_state: maintenance
- name: ensure a storage_pool is absent
local_action:
module: cs_storage_pool
name: Ceph RBD
state: absent
'''
RETURN = '''
---
id:
description: UUID of the pool.
returned: success
type: string
sample: a3fca65a-7db1-4891-b97c-48806a978a96
created:
description: Date of the pool was created.
returned: success
type: string
sample: 2014-12-01T14:57:57+0100
capacity_iops:
description: IOPS CloudStack can provision from this storage pool
returned: when available
type: int
sample: 60000
zone:
description: The name of the zone.
returned: success
type: string
sample: Zone01
cluster:
description: The name of the cluster.
returned: when scope is cluster
type: string
sample: Cluster01
pod:
description: The name of the pod.
returned: when scope is cluster
type: string
sample: Cluster01
disk_size_allocated:
description: The pool's currently allocated disk space.
returned: success
type: int
sample: 2443517624320
disk_size_total:
description: The total size of the pool.
returned: success
type: int
sample: 3915055693824
disk_size_used:
description: The pool's currently used disk size.
returned: success
type: int
sample: 1040862622180
scope:
description: The scope of the storage pool.
returned: success
type: string
sample: cluster
hypervisor:
description: Hypervisor related to this storage pool.
returned: when available
type: string
sample: KVM
state:
description: The state of the storage pool as returned by the API.
returned: success
type: string
sample: Up
allocation_state:
description: The state of the storage pool.
returned: success
type: string
sample: enabled
path:
description: The storage pool path used in the storage_url.
returned: success
type: string
sample: poolname
overprovision_factor:
description: The overprovision factor of the storage pool.
returned: success
type: string
sample: 2.0
suitable_for_migration:
description: Whether the storage pool is suitable to migrate a volume or not.
returned: success
type: bool
sample: false
storage_capabilities:
description: Capabilities of the torage pool.
returned: success
type: dict
sample: {"VOLUME_SNAPSHOT_QUIESCEVM": "false"}
storage_tags:
description: the tags for the storage pool.
returned: success
type: list
sample: ["perf", "ssd"]
'''
# import cloudstack common
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.cloudstack import (
AnsibleCloudStack,
cs_argument_spec,
cs_required_together,
CS_HYPERVISORS,
)
class AnsibleCloudStackStoragePool(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackStoragePool, self).__init__(module)
self.returns = {
'capacityiops': 'capacity_iops',
'podname': 'pod',
'clustername': 'cluster',
'disksizeallocated': 'disk_size_allocated',
'disksizetotal': 'disk_size_total',
'disksizeused': 'disk_size_used',
'scope': 'scope',
'hypervisor': 'hypervisor',
'type': 'type',
'ip_address': 'ipaddress',
'path': 'path',
'overprovisionfactor': 'overprovision_factor',
'storagecapabilities': 'storage_capabilities',
'suitableformigration': 'suitable_for_migration',
}
self.allocation_states = {
# Host state: param state
'Up': 'enabled',
'Disabled': 'disabled',
'Maintenance': 'maintenance',
}
self.storage_pool = None
def _get_common_args(self):
return {
'name': self.module.params.get('name'),
'url': self.module.params.get('storage_url'),
'zoneid': self.get_zone(key='id'),
'provider': self.get_storage_provider(),
'scope': self.module.params.get('scope'),
'hypervisor': self.module.params.get('hypervisor'),
'capacitybytes': self.module.params.get('capacity_bytes'),
'capacityiops': self.module.params.get('capacity_iops'),
}
def _allocation_state_enabled_disabled_changed(self, pool, allocation_state):
if allocation_state in ['enabled', 'disabled']:
for pool_state, param_state in self.allocation_states.items():
if pool_state == pool['state'] and allocation_state != param_state:
return True
return False
def _handle_allocation_state(self, pool, state=None):
allocation_state = state or self.module.params.get('allocation_state')
if not allocation_state:
return pool
if self.allocation_states.get(pool['state']) == allocation_state:
return pool
# Cancel maintenance if target state is enabled/disabled
elif allocation_state in ['enabled', 'disabled']:
pool = self._cancel_maintenance(pool)
pool = self._update_storage_pool(pool=pool, allocation_state=allocation_state)
# Only an enabled host can put in maintenance
elif allocation_state == 'maintenance':
pool = self._update_storage_pool(pool=pool, allocation_state='enabled')
pool = self._enable_maintenance(pool=pool)
return pool
def _create_storage_pool(self):
args = self._get_common_args()
args.update({
'clusterid': self.get_cluster(key='id'),
'podid': self.get_pod(key='id'),
'managed': self.module.params.get('managed'),
})
scope = self.module.params.get('scope')
if scope is None:
args['scope'] = 'cluster' if args['clusterid'] else 'zone'
self.result['changed'] = True
if not self.module.check_mode:
res = self.query_api('createStoragePool', **args)
return res['storagepool']
def _update_storage_pool(self, pool, allocation_state=None):
args = {
'id': pool['id'],
'capacitybytes': self.module.params.get('capacity_bytes'),
'capacityiops': self.module.params.get('capacity_iops'),
'tags': self.get_storage_tags(),
}
if self.has_changed(args, pool) or self._allocation_state_enabled_disabled_changed(pool, allocation_state):
self.result['changed'] = True
args['enabled'] = allocation_state == 'enabled' if allocation_state in ['enabled', 'disabled'] else None
if not self.module.check_mode:
res = self.query_api('updateStoragePool', **args)
pool = res['storagepool']
return pool
def _enable_maintenance(self, pool):
if pool['state'].lower() != "maintenance":
self.result['changed'] = True
if not self.module.check_mode:
res = self.query_api('enableStorageMaintenance', id=pool['id'])
pool = self.poll_job(res, 'storagepool')
return pool
def _cancel_maintenance(self, pool):
if pool['state'].lower() == "maintenance":
self.result['changed'] = True
if not self.module.check_mode:
res = self.query_api('cancelStorageMaintenance', id=pool['id'])
pool = self.poll_job(res, 'storagepool')
return pool
def get_storage_tags(self):
storage_tags = self.module.params.get('storage_tags')
if storage_tags is None:
return None
return ','.join(storage_tags)
def get_storage_pool(self, key=None):
if self.storage_pool is None:
zoneid = self.get_zone(key='id')
clusterid = self.get_cluster(key='id')
podid = self.get_pod(key='id')
args = {
'zoneid': zoneid,
'podid': podid,
'clusterid': clusterid,
'name': self.module.params.get('name'),
}
res = self.query_api('listStoragePools', **args)
if 'storagepool' not in res:
return None
self.storage_pool = res['storagepool'][0]
return self.storage_pool
def present_storage_pool(self):
pool = self.get_storage_pool()
if pool:
pool = self._update_storage_pool(pool=pool)
else:
pool = self._create_storage_pool()
if pool:
pool = self._handle_allocation_state(pool=pool)
return pool
def absent_storage_pool(self):
pool = self.get_storage_pool()
if pool:
self.result['changed'] = True
args = {
'id': pool['id'],
}
if not self.module.check_mode:
# Only a pool in maintenance can be deleted
self._handle_allocation_state(pool=pool, state='maintenance')
self.query_api('deleteStoragePool', **args)
return pool
def get_storage_provider(self, type="primary"):
args = {
'type': type,
}
provider = self.module.params.get('provider')
storage_providers = self.query_api('listStorageProviders', **args)
for sp in storage_providers.get('dataStoreProvider') or []:
if sp['name'].lower() == provider.lower():
return provider
self.fail_json(msg="Storage provider %s not found" % provider)
def get_pod(self, key=None):
pod = self.module.params.get('pod')
if not pod:
return None
args = {
'name': pod,
'zoneid': self.get_zone(key='id'),
}
pods = self.query_api('listPods', **args)
if pods:
return self._get_by_key(key, pods['pod'][0])
self.fail_json(msg="Pod %s not found" % self.module.params.get('pod'))
def get_cluster(self, key=None):
cluster = self.module.params.get('cluster')
if not cluster:
return None
args = {
'name': cluster,
'zoneid': self.get_zone(key='id'),
}
clusters = self.query_api('listClusters', **args)
if clusters:
return self._get_by_key(key, clusters['cluster'][0])
self.fail_json(msg="Cluster %s not found" % cluster)
def get_result(self, pool):
super(AnsibleCloudStackStoragePool, self).get_result(pool)
if pool:
self.result['storage_url'] = "%s://%s/%s" % (pool['type'], pool['ipaddress'], pool['path'])
self.result['scope'] = pool['scope'].lower()
self.result['storage_tags'] = pool['tags'].split(',') if pool.get('tags') else []
self.result['allocation_state'] = self.allocation_states.get(pool['state'])
return self.result
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
name=dict(required=True),
storage_url=dict(),
zone=dict(),
pod=dict(),
cluster=dict(),
scope=dict(choices=['zone', 'cluster']),
hypervisor=dict(choices=CS_HYPERVISORS),
provider=dict(default='DefaultPrimary'),
capacity_bytes=dict(type='int'),
capacity_iops=dict(type='int'),
managed=dict(type='bool'),
storage_tags=dict(type='list', aliases=['storage_tag']),
allocation_state=dict(choices=['enabled', 'disabled', 'maintenance']),
state=dict(choices=['present', 'absent'], default='present'),
))
required_together = cs_required_together()
required_together.extend([
['pod', 'cluster'],
])
module = AnsibleModule(
argument_spec=argument_spec,
required_together=required_together,
required_if=[
('state', 'present', ['storage_url']),
],
supports_check_mode=True
)
acs_storage_pool = AnsibleCloudStackStoragePool(module)
state = module.params.get('state')
if state in ['absent']:
pool = acs_storage_pool.absent_storage_pool()
else:
pool = acs_storage_pool.present_storage_pool()
result = acs_storage_pool.get_result(pool)
module.exit_json(**result)
if __name__ == '__main__':
main()
| 31.430255
| 116
| 0.636205
|
4a0b31c4cfe360e867b55daf3f0ff81fac7f7c64
| 1,306
|
py
|
Python
|
_bin/word_perm.py
|
HongxuChen/dotfiles
|
4a51a74ae3345273514faccf40a47e1a39048049
|
[
"MIT"
] | 13
|
2015-09-11T14:53:06.000Z
|
2021-12-19T23:07:06.000Z
|
_bin/word_perm.py
|
HongxuChen/dotfiles
|
4a51a74ae3345273514faccf40a47e1a39048049
|
[
"MIT"
] | 5
|
2015-03-18T17:08:01.000Z
|
2020-01-07T08:51:47.000Z
|
_bin/word_perm.py
|
HongxuChen/dotfiles
|
4a51a74ae3345273514faccf40a47e1a39048049
|
[
"MIT"
] | 4
|
2016-12-27T14:52:19.000Z
|
2019-05-16T07:07:53.000Z
|
#!/usr/bin/env python3
from __future__ import print_function
import sys
import os
import subprocess
from itertools import permutations
def get_words(fname):
word_set = set()
try:
with open(fname, "r") as f:
for word in f.readlines():
word = word.lower().strip()
word_set.add(word)
except FileNotFoundError:
print("cannot find the dict file: {}".format(fname))
sys.exit(1)
return word_set
if len(sys.argv) < 2:
print("usage: {} [alphabets] ([num], [num], ...)".format(sys.argv[0]), file=sys.stderr)
sys.exit(1)
lists = [w.lower() for w in sys.argv[1]]
if len(lists) < 3:
print("invalid lists, length should be >= 3", file=sys.stderr)
sys.exit(1)
nums = [int(num) for num in sys.argv[2:]]
if len(nums) == 0:
nums = list(range(3, len(lists) + 1))
words = get_words("/usr/share/dict/american-english")
print("lists={}, nums={}".format(lists, nums))
for num in nums:
if num > len(lists):
print("invalid num, ignoring...", file=sys.stderr)
continue
print("{}\tnum={}\t{}".format("=" * 20, num, "=" * 20))
printed = set()
for p in permutations(lists, num):
w = "".join(p)
if w in words and w not in printed:
print(w)
printed.add(w)
| 28.391304
| 91
| 0.584992
|
4a0b33571c5c863e951106940326af22f36973de
| 4,819
|
py
|
Python
|
sdk/python/pulumi_azure_native/recoveryservices/v20180710/get_replication_storage_classification_mapping.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/recoveryservices/v20180710/get_replication_storage_classification_mapping.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/recoveryservices/v20180710/get_replication_storage_classification_mapping.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetReplicationStorageClassificationMappingResult',
'AwaitableGetReplicationStorageClassificationMappingResult',
'get_replication_storage_classification_mapping',
]
@pulumi.output_type
class GetReplicationStorageClassificationMappingResult:
"""
Storage mapping object.
"""
def __init__(__self__, id=None, location=None, name=None, properties=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Resource Id
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource Location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource Name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.StorageClassificationMappingPropertiesResponse':
"""
Properties of the storage mapping object.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource Type
"""
return pulumi.get(self, "type")
class AwaitableGetReplicationStorageClassificationMappingResult(GetReplicationStorageClassificationMappingResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetReplicationStorageClassificationMappingResult(
id=self.id,
location=self.location,
name=self.name,
properties=self.properties,
type=self.type)
def get_replication_storage_classification_mapping(fabric_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
resource_name: Optional[str] = None,
storage_classification_mapping_name: Optional[str] = None,
storage_classification_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetReplicationStorageClassificationMappingResult:
"""
Storage mapping object.
:param str fabric_name: Fabric name.
:param str resource_group_name: The name of the resource group where the recovery services vault is present.
:param str resource_name: The name of the recovery services vault.
:param str storage_classification_mapping_name: Storage classification mapping name.
:param str storage_classification_name: Storage classification name.
"""
__args__ = dict()
__args__['fabricName'] = fabric_name
__args__['resourceGroupName'] = resource_group_name
__args__['resourceName'] = resource_name
__args__['storageClassificationMappingName'] = storage_classification_mapping_name
__args__['storageClassificationName'] = storage_classification_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:recoveryservices/v20180710:getReplicationStorageClassificationMapping', __args__, opts=opts, typ=GetReplicationStorageClassificationMappingResult).value
return AwaitableGetReplicationStorageClassificationMappingResult(
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
properties=__ret__.properties,
type=__ret__.type)
| 37.648438
| 202
| 0.654078
|
4a0b337ca4ad9dd5f008d03255af2e82697423da
| 1,357
|
py
|
Python
|
week_9_serverless/lambda_function.py
|
razekmaiden/ml_zoomcamp
|
fe26bb2fc611cb22b5da6178544c4f1dfd30f9ee
|
[
"MIT"
] | null | null | null |
week_9_serverless/lambda_function.py
|
razekmaiden/ml_zoomcamp
|
fe26bb2fc611cb22b5da6178544c4f1dfd30f9ee
|
[
"MIT"
] | null | null | null |
week_9_serverless/lambda_function.py
|
razekmaiden/ml_zoomcamp
|
fe26bb2fc611cb22b5da6178544c4f1dfd30f9ee
|
[
"MIT"
] | 1
|
2021-10-11T12:18:36.000Z
|
2021-10-11T12:18:36.000Z
|
#!/usr/bin/env python
# coding: utf-8
#import tensorflow.lite as tflite
import tflite_runtime.interpreter as tflite
import numpy as np
from io import BytesIO
from urllib import request
from PIL import Image
def download_image(url):
with request.urlopen(url) as resp:
buffer = resp.read()
stream = BytesIO(buffer)
img = Image.open(stream)
return img
def prepare_image(img, target_size=(150, 150)):
if img.mode != 'RGB':
img = img.convert('RGB')
img = img.resize(target_size, Image.NEAREST)
return img
def preprocess_input(x):
x /= 255
return x
#model = "dogs_cats_10_0.687.tflite"
model = "cats-dogs-v2.tflite"
interpreter = tflite.Interpreter(model_path=model)
interpreter.allocate_tensors()
input_index = interpreter.get_input_details()[0]['index']
output_index = interpreter.get_output_details()[0]['index']
def predict(url):
raw_img = download_image(url)
prepared_img = prepare_image(raw_img)
x = np.array(prepared_img, dtype='float32')
X = np.array([x])
X = preprocess_input(X)
interpreter.set_tensor(input_index, X)
interpreter.invoke()
preds = interpreter.get_tensor(output_index)
float_predictions = preds[0].tolist()
return float_predictions
def lambda_handler(event, context):
url = event['url']
result = predict(url)
return result
| 24.232143
| 59
| 0.707443
|
4a0b33f947823869ca0565bd9710db59d7a21ab4
| 324
|
py
|
Python
|
aqueduct/exceptions.py
|
artemcpp/aqueduct
|
2fc177b9e533dbe900f5878b9cc7a9c0e9eed179
|
[
"MIT"
] | null | null | null |
aqueduct/exceptions.py
|
artemcpp/aqueduct
|
2fc177b9e533dbe900f5878b9cc7a9c0e9eed179
|
[
"MIT"
] | null | null | null |
aqueduct/exceptions.py
|
artemcpp/aqueduct
|
2fc177b9e533dbe900f5878b9cc7a9c0e9eed179
|
[
"MIT"
] | null | null | null |
class AqueductError(Exception):
"""Base class for all Aqueduct errors."""
class FlowError(AqueductError):
"""Flow can raise this if something was wrong."""
class NotRunningError(FlowError):
"""Flow can raise this if it's not already running."""
class BadReferenceCount(AqueductError, ValueError):
pass
| 21.6
| 58
| 0.722222
|
4a0b34226443e1817a1480efa09073fb889e70a6
| 5,668
|
py
|
Python
|
tests/replication/tcp/streams/test_typing.py
|
rkfg/synapse
|
0b3112123da5fae4964db784e3bab0c4d83d9d62
|
[
"Apache-2.0"
] | 1
|
2021-09-09T08:50:13.000Z
|
2021-09-09T08:50:13.000Z
|
tests/replication/tcp/streams/test_typing.py
|
rkfg/synapse
|
0b3112123da5fae4964db784e3bab0c4d83d9d62
|
[
"Apache-2.0"
] | null | null | null |
tests/replication/tcp/streams/test_typing.py
|
rkfg/synapse
|
0b3112123da5fae4964db784e3bab0c4d83d9d62
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest.mock import Mock
from synapse.handlers.typing import RoomMember
from synapse.replication.tcp.streams import TypingStream
from synapse.util.caches.stream_change_cache import StreamChangeCache
from tests.replication._base import BaseStreamTestCase
USER_ID = "@feeling:blue"
USER_ID_2 = "@da-ba-dee:blue"
ROOM_ID = "!bar:blue"
ROOM_ID_2 = "!foo:blue"
class TypingStreamTestCase(BaseStreamTestCase):
def _build_replication_data_handler(self):
return Mock(wraps=super()._build_replication_data_handler())
def test_typing(self):
typing = self.hs.get_typing_handler()
self.reconnect()
typing._push_update(member=RoomMember(ROOM_ID, USER_ID), typing=True)
self.reactor.advance(0)
# We should now see an attempt to connect to the master
request = self.handle_http_replication_attempt()
self.assert_request_is_get_repl_stream_updates(request, "typing")
self.test_handler.on_rdata.assert_called_once()
stream_name, _, token, rdata_rows = self.test_handler.on_rdata.call_args[0]
self.assertEqual(stream_name, "typing")
self.assertEqual(1, len(rdata_rows))
row = rdata_rows[0] # type: TypingStream.TypingStreamRow
self.assertEqual(ROOM_ID, row.room_id)
self.assertEqual([USER_ID], row.user_ids)
# Now let's disconnect and insert some data.
self.disconnect()
self.test_handler.on_rdata.reset_mock()
typing._push_update(member=RoomMember(ROOM_ID, USER_ID), typing=False)
self.test_handler.on_rdata.assert_not_called()
self.reconnect()
self.pump(0.1)
# We should now see an attempt to connect to the master
request = self.handle_http_replication_attempt()
self.assert_request_is_get_repl_stream_updates(request, "typing")
# The from token should be the token from the last RDATA we got.
assert request.args is not None
self.assertEqual(int(request.args[b"from_token"][0]), token)
self.test_handler.on_rdata.assert_called_once()
stream_name, _, token, rdata_rows = self.test_handler.on_rdata.call_args[0]
self.assertEqual(stream_name, "typing")
self.assertEqual(1, len(rdata_rows))
row = rdata_rows[0]
self.assertEqual(ROOM_ID, row.room_id)
self.assertEqual([], row.user_ids)
def test_reset(self):
"""
Test what happens when a typing stream resets.
This is emulated by jumping the stream ahead, then reconnecting (which
sends the proper position and RDATA).
"""
typing = self.hs.get_typing_handler()
self.reconnect()
typing._push_update(member=RoomMember(ROOM_ID, USER_ID), typing=True)
self.reactor.advance(0)
# We should now see an attempt to connect to the master
request = self.handle_http_replication_attempt()
self.assert_request_is_get_repl_stream_updates(request, "typing")
self.test_handler.on_rdata.assert_called_once()
stream_name, _, token, rdata_rows = self.test_handler.on_rdata.call_args[0]
self.assertEqual(stream_name, "typing")
self.assertEqual(1, len(rdata_rows))
row = rdata_rows[0] # type: TypingStream.TypingStreamRow
self.assertEqual(ROOM_ID, row.room_id)
self.assertEqual([USER_ID], row.user_ids)
# Push the stream forward a bunch so it can be reset.
for i in range(100):
typing._push_update(
member=RoomMember(ROOM_ID, "@test%s:blue" % i), typing=True
)
self.reactor.advance(0)
# Disconnect.
self.disconnect()
# Reset the typing handler
self.hs.get_replication_streams()["typing"].last_token = 0
self.hs.get_tcp_replication()._streams["typing"].last_token = 0
typing._latest_room_serial = 0
typing._typing_stream_change_cache = StreamChangeCache(
"TypingStreamChangeCache", typing._latest_room_serial
)
typing._reset()
# Reconnect.
self.reconnect()
self.pump(0.1)
# We should now see an attempt to connect to the master
request = self.handle_http_replication_attempt()
self.assert_request_is_get_repl_stream_updates(request, "typing")
# Reset the test code.
self.test_handler.on_rdata.reset_mock()
self.test_handler.on_rdata.assert_not_called()
# Push additional data.
typing._push_update(member=RoomMember(ROOM_ID_2, USER_ID_2), typing=False)
self.reactor.advance(0)
self.test_handler.on_rdata.assert_called_once()
stream_name, _, token, rdata_rows = self.test_handler.on_rdata.call_args[0]
self.assertEqual(stream_name, "typing")
self.assertEqual(1, len(rdata_rows))
row = rdata_rows[0]
self.assertEqual(ROOM_ID_2, row.room_id)
self.assertEqual([], row.user_ids)
# The token should have been reset.
self.assertEqual(token, 1)
| 36.567742
| 83
| 0.687368
|
4a0b34655023b3137b80ba53099777e07c058bae
| 1,992
|
py
|
Python
|
Bioinformatics V/Week I/diauxic_shift_clustering.py
|
egeulgen/Bioinformatics_Specialization
|
38581b471a54c41d780d9eeb26a7033eb57f3a01
|
[
"MIT"
] | 3
|
2021-04-03T23:46:42.000Z
|
2021-08-08T01:19:32.000Z
|
Bioinformatics V/Week I/diauxic_shift_clustering.py
|
egeulgen/Bioinformatics_Specialization
|
38581b471a54c41d780d9eeb26a7033eb57f3a01
|
[
"MIT"
] | null | null | null |
Bioinformatics V/Week I/diauxic_shift_clustering.py
|
egeulgen/Bioinformatics_Specialization
|
38581b471a54c41d780d9eeb26a7033eb57f3a01
|
[
"MIT"
] | null | null | null |
import pandas as pd
from copy import deepcopy
import k_means_initializer
import SquaredErrorDistortion
def Euclidean_distance(PointA, PointB):
if len(PointA) != len(PointB):
raise ValueError('The dimensions are not the same!')
dist = 0
for i in range(len(PointA)):
dist += ((PointA[i] - PointB[i]) ** 2)
dist **= 1/2
return dist
def assign_clusters(Centers, Data):
clusters = [[] for i in range(len(Centers))]
for DataPoint in Data:
min_d = float('inf')
for i, Center in enumerate(Centers):
current = Euclidean_distance(DataPoint, Center)
if current < min_d:
min_d = current
idx = i
clusters[idx].append(DataPoint)
return clusters
def cluster_mean(cluster):
m = len(cluster[0])
clu_mean = [0 for i in range(m)]
for member in cluster:
for i in range(m):
clu_mean[i] += member[i] / len(cluster)
return clu_mean
def Lloyd_kmeans(Data, k):
Centers = k_means_initializer.k_means_initializer(Data, k)
new_centers = [[] for i in range(k)]
while True:
clusters = assign_clusters(Centers, Data)
for i, clu in enumerate(clusters):
new_centers[i] = cluster_mean(clu)
if new_centers == Centers:
break
Centers = deepcopy(new_centers)
return Centers
if __name__ == "__main__":
# df = pd.read_csv('230genes_log_expression.txt', sep='\t', header=0, na_filter=False)
df = pd.read_csv('diauxic_raw_ratios_RG.txt', sep='\t', header=0, na_filter=False)
exp_columns = [x for x in list(df.columns) if x.startswith('R')]
Data = []
for index, row in df.iterrows():
Data.append(list(row[exp_columns]))
for k in range(6, 7):
Centers = Lloyd_kmeans(Data, k)
distortion = SquaredErrorDistortion.Distortion(Data, Centers)
print('For k = %d' % k)
print('Distortion is %f' % distortion)
print('')
| 30.646154
| 90
| 0.615462
|
4a0b3483971daa6613afe70a7548d41570654076
| 3,837
|
py
|
Python
|
apps/agents_ppo/trainer/trigger.py
|
cwbeitel/kubeflow-agents-demo
|
4b70afc7434901111e7733e6f83ad218afd5cbd5
|
[
"Apache-2.0"
] | 8
|
2018-01-08T10:41:10.000Z
|
2019-12-23T15:17:43.000Z
|
apps/agents_ppo/trainer/trigger.py
|
cwbeitel/kubeflow-agents-demo
|
4b70afc7434901111e7733e6f83ad218afd5cbd5
|
[
"Apache-2.0"
] | 6
|
2018-01-07T19:12:55.000Z
|
2018-01-24T18:28:51.000Z
|
apps/agents_ppo/trainer/trigger.py
|
cwbeitel/kubeflow-rl
|
4b70afc7434901111e7733e6f83ad218afd5cbd5
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 The TensorFlow Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import session_run_hook
from tensorflow.python.training.summary_io import SummaryWriterCache
from tensorflow.python.training import training_util
from tensorflow.python.training.session_run_hook import SessionRunArgs
from tensorflow.core.framework.summary_pb2 import Summary
import requests
def trigger_with_manifest(manifest, router_ip):
tf.logging.info("Dialing Fission router at IP %s..." % router_ip)
route = "job-trigger-render-events"
address = "http://%s:8888/%s" % (router_ip, route)
tf.logging.info("Request address: %s" % address)
tf.logging.info("issuing request with manifest: %s" % manifest)
r = requests.post(address, json=manifest)
tf.logging.info("Issued request and received response: %s" % str(r.status_code))
return r.status_code
class RenderTriggerHook(session_run_hook.SessionRunHook):
"""Hook that counts steps per second."""
def __init__(self,
every_n_steps=None,
every_n_secs=5,
log_dir=None,
job_service_ip="localhost"):
if (every_n_steps is None) == (every_n_secs is None):
raise ValueError(
"exactly one of every_n_steps and every_n_secs should be provided.")
self._timer = basic_session_run_hooks.SecondOrStepTimer(every_steps=every_n_steps,
every_secs=every_n_secs)
self._log_dir = log_dir
self._render_count = 0
self._total_elapsed_time = 0
self._job_service_ip = job_service_ip
def begin(self):
self._global_step_tensor = training_util._get_or_create_global_step_read() # pylint: disable=protected-access
if self._global_step_tensor is None:
raise RuntimeError(
"Global step should be created to use StepCounterHook.")
def before_run(self, run_context): # pylint: disable=unused-argument
return SessionRunArgs(self._global_step_tensor)
def after_run(self, run_context, run_values):
_ = run_context
stale_global_step = run_values.results
if self._timer.should_trigger_for_step(stale_global_step+1):
global_step = run_context.session.run(self._global_step_tensor)
if self._timer.should_trigger_for_step(global_step):
elapsed_time, elapsed_steps = self._timer.update_last_triggered_step(
global_step)
if elapsed_time is not None:
self._total_elapsed_time += elapsed_time
self._render_count += 1
event_manifest = {"job_type": "render",
"args": {
"render_count": int(self._render_count),
"log_dir": str(self._log_dir),
"meta": {
"elapsed_time": float(self._total_elapsed_time),
"global_step": int(global_step)
}
}}
tf.logging.info("Triggering render number %s." % self._render_count)
tf.logging.info("Render trigger manifest: %s" % event_manifest)
trigger_with_manifest(event_manifest, self._job_service_ip)
| 42.633333
| 114
| 0.681001
|
4a0b350a0c0025e444a9ecd766c1b2b31182548a
| 3,095
|
py
|
Python
|
setup.py
|
williamholland/django-rest-messaging
|
c9d5405fed7db2d79ec5c93c721a8fe42ea86958
|
[
"0BSD"
] | 43
|
2016-02-22T16:34:22.000Z
|
2021-12-08T01:11:21.000Z
|
setup.py
|
williamholland/django-rest-messaging
|
c9d5405fed7db2d79ec5c93c721a8fe42ea86958
|
[
"0BSD"
] | 6
|
2016-04-07T12:38:29.000Z
|
2019-07-05T10:58:45.000Z
|
setup.py
|
williamholland/django-rest-messaging
|
c9d5405fed7db2d79ec5c93c721a8fe42ea86958
|
[
"0BSD"
] | 12
|
2016-04-07T12:20:20.000Z
|
2021-06-24T17:29:38.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import os
import sys
from setuptools import setup
name = 'django-rest-messaging'
package = 'rest_messaging'
description = 'The project provides a Facebook-like messaging API for Django Rest Framework.'
url = 'https://github.com/raphaelgyory/django-rest-messaging'
author = 'Raphael Gyory'
author_email = 'raphael@conseilpartage.com'
license = 'BSD'
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
def get_version(package):
"""
Return package version as listed in `__version__` in `init.py`.
"""
init_py = open(os.path.join(package, '__init__.py')).read()
return re.search("^__version__ = ['\"]([^'\"]+)['\"]",
init_py, re.MULTILINE).group(1)
def get_packages(package):
"""
Return root package and all sub-packages.
"""
return [dirpath
for dirpath, dirnames, filenames in os.walk(package)
if os.path.exists(os.path.join(dirpath, '__init__.py'))]
def get_package_data(package):
"""
Return all files under the root package, that are not in a
package themselves.
"""
walk = [(dirpath.replace(package + os.sep, '', 1), filenames)
for dirpath, dirnames, filenames in os.walk(package)
if not os.path.exists(os.path.join(dirpath, '__init__.py'))]
filepaths = []
for base, filenames in walk:
filepaths.extend([os.path.join(base, filename)
for filename in filenames])
return {package: filepaths}
version = get_version(package)
if sys.argv[-1] == 'publish':
if os.system("pip freeze | grep wheel"):
print("wheel not installed.\nUse `pip install wheel`.\nExiting.")
sys.exit()
os.system("python setup.py sdist upload")
os.system("python setup.py bdist_wheel upload")
print("You probably want to also tag the version now:")
print(" git tag -a {0} -m 'version {0}'".format(version))
print(" git push --tags")
sys.exit()
setup(
name=name,
version=version,
url=url,
license=license,
description=description,
author=author,
author_email=author_email,
packages=get_packages(package),
package_data=get_package_data(package),
install_requires = [
'django>=1.6',
'djangorestframework>=2.4.3',
'six',
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
| 30.343137
| 93
| 0.624233
|
4a0b36f9c3b8ed0a60ccc5fd28f057508843d7b4
| 1,005
|
py
|
Python
|
errorify/parse_verbs.py
|
wangwang110/PIE
|
474769e3c4266deefcb7dd5daf802a1306bc7c99
|
[
"MIT"
] | 165
|
2019-10-08T09:54:46.000Z
|
2022-03-17T06:50:32.000Z
|
errorify/parse_verbs.py
|
wangwang110/PIE
|
474769e3c4266deefcb7dd5daf802a1306bc7c99
|
[
"MIT"
] | 28
|
2019-11-02T07:06:26.000Z
|
2022-03-24T09:20:58.000Z
|
errorify/parse_verbs.py
|
wangwang110/PIE
|
474769e3c4266deefcb7dd5daf802a1306bc7c99
|
[
"MIT"
] | 38
|
2019-12-05T06:01:54.000Z
|
2022-03-21T09:35:23.000Z
|
import pickle
verbs_file = "morphs.txt"
def expand_dict(d):
result = {}
for key in d:
if key in result:
result[key] = result[key].union(d[key].difference({key}))
else:
result[key] = d[key].difference({key})
for item in d[key]:
if item in result:
if item != key:
result[item] = result[item].union(d[key].difference({item})).union({key})
else:
result[item] = result[item].union(d[key].difference({item}))
else:
if item != key:
result[item] = d[key].difference({item}).union({key})
else:
d[key].difference({item})
for key in result:
result[key]=list(result[key])
return result
with open(verbs_file,"r") as ip_file:
ip_lines = ip_file.readlines()
words = {}
for line in ip_lines:
line = line.strip().split()
if len(line) != 3:
print(line)
word = line[1]
word_form = line[0]
if word in words:
words[word].add(word_form)
else:
words[word]={word_form}
result = expand_dict(words)
pickle.dump(result,open("verbs.p","wb"))
| 22.333333
| 78
| 0.633831
|
4a0b3728fdf621269ec9150c733834a4e955653e
| 288
|
py
|
Python
|
project/db/admin.py
|
Moshood-Wale/Bouncer-E-commerce-api
|
5f07bab970bf3d20f3e98ae8b7e4e284ee52baa8
|
[
"MIT"
] | null | null | null |
project/db/admin.py
|
Moshood-Wale/Bouncer-E-commerce-api
|
5f07bab970bf3d20f3e98ae8b7e4e284ee52baa8
|
[
"MIT"
] | null | null | null |
project/db/admin.py
|
Moshood-Wale/Bouncer-E-commerce-api
|
5f07bab970bf3d20f3e98ae8b7e4e284ee52baa8
|
[
"MIT"
] | 1
|
2022-02-09T14:13:20.000Z
|
2022-02-09T14:13:20.000Z
|
from django.contrib import admin
from db.models import *
# Register your models here.
admin.site.register(Product)
admin.site.register(User)
admin.site.register(Category)
admin.site.register(Cart)
admin.site.register(SubCategory)
admin.site.register(Payment)
admin.site.register(Brand)
| 22.153846
| 32
| 0.809028
|
4a0b38a6199a79d381d370a3a79e7b3279337c5d
| 9,648
|
py
|
Python
|
src/natives/python/repeat_lib.py
|
hptruong93/Repeat
|
b1306e13e529e7317a57faf36b86e113cb893cdf
|
[
"MIT"
] | 13
|
2015-04-13T03:11:32.000Z
|
2021-08-22T11:16:43.000Z
|
src/natives/python/repeat_lib.py
|
hptruong93/Repeat
|
b1306e13e529e7317a57faf36b86e113cb893cdf
|
[
"MIT"
] | 1
|
2017-05-29T14:38:49.000Z
|
2017-05-29T15:17:14.000Z
|
src/natives/python/repeat_lib.py
|
hptruong93/Repeat
|
b1306e13e529e7317a57faf36b86e113cb893cdf
|
[
"MIT"
] | 1
|
2016-09-09T23:22:37.000Z
|
2016-09-09T23:22:37.000Z
|
import json
import os
import sys
import signal
import traceback
import time
import imp
import socket
import select
import threading
import Queue
import specifications
import shared_memory_request
import keyboard_request
import mouse_request
import tool_request
import system_host_request
import system_client_request
class RepeatClient(object):
"""Server will terminate connection if not received anything after this period of time"""
REPEAT_SERVER_TIMEOUT_SEC = 10
"""Delimiter between messages (Receiver must receive at least one delimiter between two messages. However, two or more is also acceptable)"""
MESSAGE_DELIMITER = '\x02'
"""Client must send keep alive message to maintain the connection with server.
Therefore the client timeout has to be less than server timeout"""
REPEAT_CLIENT_TIMEOUT_SEC = REPEAT_SERVER_TIMEOUT_SEC * 0.3
def __init__(self, host = 'localhost', port = 9999):
super(RepeatClient, self).__init__()
self.host = host
self.port = port
self.socket = None
self.is_terminated = False
self.synchronization_objects = {}
self.send_queue = Queue.Queue()
self.task_manager = TaskManager(self)
self.system = system_host_request.SystemHostRequest(self)
self.system_client = system_client_request.SystemClientRequest(self)
self.shared_memory = shared_memory_request.SharedMemoryRequest(self)
self.mouse = mouse_request.MouseRequest(self)
self.key = keyboard_request.KeyboardRequest(self)
self.tool = tool_request.ToolRequest(self)
self._previous_message = []
def _clear_queue(self):
while not self.send_queue.empty():
self.send_queue.get()
def start(self):
self._clear_queue()
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect((self.host, self.port))
self.system_client.identify()
print "Successfully started python client"
def stop(self):
self._clear_queue()
self.socket.close()
def process_write(self):
while not self.is_terminated:
data = None
try:
data = self.send_queue.get(block = True, timeout = RepeatClient.REPEAT_CLIENT_TIMEOUT_SEC)
except Queue.Empty as e:
pass
keep_alive = data is None
if keep_alive:
self.system.keep_alive()
else:
to_send = '%s%s%s%s%s' % (RepeatClient.MESSAGE_DELIMITER, RepeatClient.MESSAGE_DELIMITER, \
json.dumps(data), RepeatClient.MESSAGE_DELIMITER, RepeatClient.MESSAGE_DELIMITER)
self.socket.sendall(to_send)
print "Write process terminated..."
def _extract_messages(self, received_data):
output = []
for char in received_data:
if char == RepeatClient.MESSAGE_DELIMITER:
if len(self._previous_message) > 0:
output.append(''.join(self._previous_message))
del self._previous_message[:]
else:
self._previous_message.append(char)
return output
def process_read(self):
while not self.is_terminated:
data = None
try:
ready = select.select([self.socket], [], [], RepeatClient.REPEAT_CLIENT_TIMEOUT_SEC)
if ready[0]:
data = self.socket.recv(1024)
else:
data = None
except socket.error as se:
print traceback.format_exc()
break
except Exception as e:
print traceback.format_exc()
if data is None or len(data.strip()) == 0:
continue
messages = self._extract_messages(data)
for message in messages:
try:
parsed = json.loads(message)
message_type = parsed['type']
message_id = parsed['id']
message_content = parsed['content']
if message_id in self.synchronization_objects:
returned_object = parsed['content']['message']
cv = self.synchronization_objects.pop(message_id)
if returned_object is None or len(returned_object) > 0: #Give the output of this to the caller
self.synchronization_objects[message_id] = returned_object
cv.set()
else:
if message_type != 'task':
print "Unknown id %s. Drop message..." % message_id
continue
def to_run():
processing_id = message_id
processing_content = message_content
processing_type = message_type
reply = self.task_manager.process_message(processing_id, processing_content)
if reply is None:
return
self.send_queue.put({
'type' : processing_type,
'id' : processing_id,
'content' : reply
})
running = threading.Thread(target=to_run)
running.start()
except Exception as e:
print traceback.format_exc()
print "Read process terminated..."
##############################################################################################################################
def generate_reply(status, message):
return {
'status' : status,
'message' : message
}
class UserDefinedTask(object):
def __init__(self, repeat_lib, file_name):
super(UserDefinedTask, self).__init__()
self.file_name = file_name
self.repeat_lib = repeat_lib
self.executing_module = None
"""
invoker is the hotkey that invoke this action
"""
def run(self, invoker):
print "Running task with file name %s" % self.file_name
parent_dir = os.path.dirname(self.file_name)
raw_file_name = os.path.basename(self.file_name)
raw_file_name = os.path.splitext(raw_file_name)[0] #Remove file extension
if self.executing_module is None:
self.executing_module = imp.load_source(raw_file_name, self.file_name)
self.executing_module.action(self.repeat_lib, invoker)
class TaskManager(object):
def __init__(self, repeat_lib):
super(TaskManager, self).__init__()
assert repeat_lib is not None
self.repeat_lib = repeat_lib
self.tasks = {}
self.base_id = 0
def _next_id(self):
self.base_id += 1
return self.base_id
def process_message(self, message_id, message):
action = message['task_action']
params = message['parameters']
if action == 'create_task':
return self.create_task(*params)
elif action == 'run_task':
return self.run_task(*params)
elif action == 'remove_task':
return self.remove_task(*params)
return None
def sync_tasks(self):
pass
def create_task(self, file_name):
if not os.path.isfile(file_name):
return generate_reply(specifications.FAILURE, 'File %s does not exist' % file_name)
elif not os.access(file_name, os.X_OK):
return generate_reply(specifications.FAILURE, 'File %s is not executable' % file_name)
next_id = self._next_id()
self.tasks[next_id] = UserDefinedTask(self.repeat_lib, file_name)
return generate_reply(specifications.SUCCESS, {
'id' : next_id,
'file_name' : file_name
})
def run_task(self, task_id, invoker):
if task_id not in self.tasks:
return generate_reply(specifications.FAILURE, 'Cannot find task id %s' % task_id)
self.tasks[task_id].run(invoker)
return generate_reply(specifications.SUCCESS, {
'id' : task_id,
'file_name' : self.tasks[task_id].file_name
})
def remove_task(self, task_id):
if task_id not in self.tasks:
return generate_reply(specifications.SUCCESS, {
'id' : task_id,
'file_name' : ''
})
removing = self.tasks.pop(task_id)
return generate_reply(specifications.SUCCESS, {
'id' : task_id,
'file_name' : removing.file_name
})
##############################################################################################################################
if __name__ == "__main__":
client = RepeatClient()
client.start()
write_thread = threading.Thread(target=client.process_write)
read_thread = threading.Thread(target=client.process_read)
def terminate_repeat_client(*args, **kwargs):
client.is_terminated = True
write_thread.join()
read_thread.join()
client.stop()
sys.exit(0)
signal.signal(signal.SIGTERM, terminate_repeat_client)
write_thread.start()
read_thread.start()
try:
while True:
time.sleep(0.1)
except KeyboardInterrupt:
print "Terminating repeat client..."
terminate_repeat_client()
| 34.212766
| 145
| 0.570377
|
4a0b38e231817cf6040b27f2d88b63d93a547406
| 798
|
py
|
Python
|
app/core/admin.py
|
afelopez/growing-recipies
|
89e2690f37701ea6aaa32123120b266cbe76a250
|
[
"MIT"
] | null | null | null |
app/core/admin.py
|
afelopez/growing-recipies
|
89e2690f37701ea6aaa32123120b266cbe76a250
|
[
"MIT"
] | null | null | null |
app/core/admin.py
|
afelopez/growing-recipies
|
89e2690f37701ea6aaa32123120b266cbe76a250
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.utils.translation import gettext as _
from core import models
class UserAdmin(BaseUserAdmin):
ordering = ['id']
list_display = ['email', 'name']
fieldsets = (
(None, {"fields": ('email', 'password')}),
(_('Personal Info'), {'fields': ('name',)}),
(
_('Permissions'),
{'fields': ('is_active', 'is_staff', 'is_superuser')}
),
(_('Important dates'), {'fields': ('last_login',)})
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('email', 'password1', 'password2')
}),
)
admin.site.register(models.User, UserAdmin)
admin.site.register(models.Tag)
| 26.6
| 65
| 0.571429
|
4a0b38f174093110c00e780df9f4cb79ae38857a
| 5,267
|
py
|
Python
|
ros/src/twist_controller/dbw_node.py
|
nialldevlin/Self-driving-car-complete
|
e2b4288a0ebcf2e370fae78809cdafbff4a79466
|
[
"MIT"
] | null | null | null |
ros/src/twist_controller/dbw_node.py
|
nialldevlin/Self-driving-car-complete
|
e2b4288a0ebcf2e370fae78809cdafbff4a79466
|
[
"MIT"
] | null | null | null |
ros/src/twist_controller/dbw_node.py
|
nialldevlin/Self-driving-car-complete
|
e2b4288a0ebcf2e370fae78809cdafbff4a79466
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import rospy
from std_msgs.msg import Bool
from dbw_mkz_msgs.msg import ThrottleCmd, SteeringCmd, BrakeCmd, SteeringReport
from geometry_msgs.msg import TwistStamped
import math
from twist_controller import Controller
'''
You can build this node only after you have built (or partially built) the `waypoint_updater` node.
You will subscribe to `/twist_cmd` message which provides the proposed linear and angular velocities.
You can subscribe to any other message that you find important or refer to the document for list
of messages subscribed to by the reference implementation of this node.
One thing to keep in mind while building this node and the `twist_controller` class is the status
of `dbw_enabled`. While in the simulator, its enabled all the time, in the real car, that will
not be the case. This may cause your PID controller to accumulate error because the car could
temporarily be driven by a human instead of your controller.
We have provided two launch files with this node. Vehicle specific values (like vehicle_mass,
wheel_base) etc should not be altered in these files.
We have also provided some reference implementations for PID controller and other utility classes.
You are free to use them or build your own.
Once you have the proposed throttle, brake, and steer values, publish it on the various publishers
that we have created in the `__init__` function.
'''
class DBWNode(object):
def __init__(self):
rospy.init_node('dbw_node')
vehicle_mass = rospy.get_param('~vehicle_mass', 1736.35)
fuel_capacity = rospy.get_param('~fuel_capacity', 13.5)
brake_deadband = rospy.get_param('~brake_deadband', .1)
decel_limit = rospy.get_param('~decel_limit', -5)
accel_limit = rospy.get_param('~accel_limit', 1.)
wheel_radius = rospy.get_param('~wheel_radius', 0.2413)
wheel_base = rospy.get_param('~wheel_base', 2.8498)
steer_ratio = rospy.get_param('~steer_ratio', 14.8)
max_lat_accel = rospy.get_param('~max_lat_accel', 3.)
max_steer_angle = rospy.get_param('~max_steer_angle', 8.)
self.steer_pub = rospy.Publisher('/vehicle/steering_cmd',
SteeringCmd, queue_size=1)
self.throttle_pub = rospy.Publisher('/vehicle/throttle_cmd',
ThrottleCmd, queue_size=1)
self.brake_pub = rospy.Publisher('/vehicle/brake_cmd',
BrakeCmd, queue_size=1)
# TODO: Create `Controller` object
self.controller = Controller(vehicle_mass=vehicle_mass,
fuel_capacity=fuel_capacity,
brake_deadband=brake_deadband,
decel_limit=decel_limit,
accel_limit=accel_limit,
wheel_radius=wheel_radius,
wheel_base=wheel_base,
steer_ratio=steer_ratio,
max_lat_accel=max_lat_accel,
max_steer_angle=max_steer_angle)
rospy.Subscriber('/vehicle/dbw_enabled', Bool, self.dbw_enabled_cb)
rospy.Subscriber('/twist_cmd', TwistStamped, self.twist_cb)
rospy.Subscriber('/current_velocity', TwistStamped, self.velocity_cb)
self.current_vel = None
self.curr_ang_vel = None
self.dbw_enabled = None
self.linear_vel = None
self.angular_vel = None
self.throttle = self.steering = self.brake = 0
self.loop()
def loop(self):
rate = rospy.Rate(50) # 50Hz
while not rospy.is_shutdown():
if not None in (self.current_vel, self.linear_vel, self.angular_vel):
self.throttle, self.brake, self.steering = self.controller.control(self.current_vel,
self.dbw_enabled,
self.linear_vel,
self.angular_vel)
if self.dbw_enabled:
self.publish(self.throttle, self.brake, self.steering)
rate.sleep()
def dbw_enabled_cb(self, msg):
self.dbw_enabled = msg
def twist_cb(self, msg):
self.linear_vel = msg.twist.linear.x
self.angular_vel = msg.twist.angular.z
def velocity_cb(self, msg):
self.current_vel = msg.twist.linear.x
def publish(self, throttle, brake, steer):
tcmd = ThrottleCmd()
tcmd.enable = True
tcmd.pedal_cmd_type = ThrottleCmd.CMD_PERCENT
tcmd.pedal_cmd = throttle
self.throttle_pub.publish(tcmd)
scmd = SteeringCmd()
scmd.enable = True
scmd.steering_wheel_angle_cmd = steer
self.steer_pub.publish(scmd)
bcmd = BrakeCmd()
bcmd.enable = True
bcmd.pedal_cmd_type = BrakeCmd.CMD_TORQUE
bcmd.pedal_cmd = brake
self.brake_pub.publish(bcmd)
if __name__ == '__main__':
DBWNode()
| 42.136
| 101
| 0.612113
|
4a0b3919ac8aa179abff64344b2fac6bad742e16
| 7,793
|
py
|
Python
|
spekev2_verification_testsuite/helpers/utils.py
|
amphied/speke-reference-server
|
7b589a333fb3c619c6f7e53483d43de9a588f7b7
|
[
"Apache-2.0"
] | null | null | null |
spekev2_verification_testsuite/helpers/utils.py
|
amphied/speke-reference-server
|
7b589a333fb3c619c6f7e53483d43de9a588f7b7
|
[
"Apache-2.0"
] | null | null | null |
spekev2_verification_testsuite/helpers/utils.py
|
amphied/speke-reference-server
|
7b589a333fb3c619c6f7e53483d43de9a588f7b7
|
[
"Apache-2.0"
] | null | null | null |
import re
import base64
from urllib.parse import urlparse
import xml.etree.ElementTree as ET
from io import StringIO
from collections import Counter
import m3u8
import requests
from aws_requests_auth.aws_auth import AWSRequestsAuth
from aws_requests_auth import boto_utils
# FILES USED FOR TESTS
# SpekeV2 test requests for Preset Video 1 and Preset Audio 1 with no Key rotation
GENERIC_WIDEVINE_TEST_FILE = "1_generic_spekev2_dash_widevine_preset_video_1_audio_1_no_rotation.xml"
SPEKEV1_STYLE_REQUEST_WITH_SPEKEV2_HEADERS = "2_speke_v1_style_implementation.xml"
WRONG_VERSION_TEST_FILE = "3_negative_wrong_version_spekev2_dash_widevine.xml" # Wrong CPIX version in request
NEGATIVE_PRESET_SHARED_VIDEO = "4_spekev2_negative_preset_shared_video.xml"
NEGATIVE_PRESET_SHARED_AUDIO = "5_spekev2_negative_preset_shared_audio.xml"
# TEST CASES
TEST_CASE_1_P_V_1_A_1 = "test_case_1_p_v_1_a_1"
TEST_CASE_2_P_V_3_A_2 = "test_case_2_p_v_3_a_2"
TEST_CASE_3_P_V_5_A_3 = "test_case_3_p_v_5_a_3"
TEST_CASE_4_P_V_8_A_2 = "test_case_4_p_v_8_a_2"
TEST_CASE_5_P_V_2_A_UNENC = "test_case_5_p_v_2_a_unencrypted"
TEST_CASE_6_P_V_UNENC_A_2 = "test_case_6_p_v_unencrypted_a_2"
# PRESET TEST CASES FILE NAMES
PRESETS_WIDEVINE = "1_widevine.xml"
PRESETS_PLAYREADY = "2_playready.xml"
PRESETS_FAIRPLAY = "3_fairplay.xml"
PRESETS_WIDEVINE_PLAYREADY = "4_widevine_playready.xml"
PRESETS_WIDEVINE_FAIRPLAY = "5_widevine_fairplay.xml"
PRESETS_PLAYREADY_FAIRPLAY = "6_playready_fairplay.xml"
PRESETS_WIDEVINE_PLAYREADY_FAIRPLAY = "7_widevine_playready_fairplay.xml"
SPEKE_V2_REQUEST_HEADERS = {"x-speke-version": "2.0", 'Content-type': 'application/xml'}
SPEKE_V2_MANDATORY_NAMESPACES = {
"cpix": "urn:dashif:org:cpix",
"pskc": "urn:ietf:params:xml:ns:keyprov:pskc"
}
SPEKE_V2_CONTENTKEY_COMMONENCRYPTIONSCHEME_ALLOWED_VALUES = ["cenc", "cbc1", "cens", "cbcs"]
SPEKE_V2_SUPPORTED_INTENDED_TRACK_TYPES = ['VIDEO', 'AUDIO']
SPEKE_V2_SUPPORTED_INTENDED_TRACK_TYPES_VIDEO = [
"VIDEO",
"SD",
"HD",
"UHD",
"SD+HD1",
"HD1",
"HD2",
"UHD1",
"UHD2"
]
SPEKE_V2_SUPPORTED_INTENDED_TRACK_TYPES_AUDIO = [
"AUDIO",
"STEREO_AUDIO",
"MULTICHANNEL_AUDIO",
"MULTICHANNEL_AUDIO_3_6",
"MULTICHANNEL_AUDIO_7"
]
SPEKE_V2_MANDATORY_ELEMENTS_LIST = [
'./{urn:dashif:org:cpix}ContentKeyList',
'./{urn:dashif:org:cpix}DRMSystemList',
'./{urn:dashif:org:cpix}ContentKeyUsageRuleList',
'./{urn:dashif:org:cpix}ContentKey',
'./{urn:dashif:org:cpix}DRMSystem',
'./{urn:dashif:org:cpix}ContentKeyUsageRule'
]
SPEKE_V2_MANDATORY_FILTER_ELEMENTS_LIST = [
'./{urn:dashif:org:cpix}VideoFilter',
'./{urn:dashif:org:cpix}AudioFilter'
]
SPEKE_V2_MANDATORY_ATTRIBUTES_LIST = [
['./{urn:dashif:org:cpix}ContentKey', ['kid', 'commonEncryptionScheme']],
['./{urn:dashif:org:cpix}DRMSystem', ['kid', 'systemId']],
['./{urn:dashif:org:cpix}ContentKeyUsageRule', ['kid', 'intendedTrackType']],
]
SPEKE_V2_GENERIC_RESPONSE_ELEMENT_LIST = [
'{urn:ietf:params:xml:ns:keyprov:pskc}PlainValue',
'{urn:ietf:params:xml:ns:keyprov:pskc}Secret',
'{urn:dashif:org:cpix}Data',
'{urn:dashif:org:cpix}ContentKey',
'{urn:dashif:org:cpix}ContentKeyList',
'{urn:dashif:org:cpix}PSSH',
'{urn:dashif:org:cpix}DRMSystem',
'{urn:dashif:org:cpix}DRMSystemList',
'{urn:dashif:org:cpix}VideoFilter',
'{urn:dashif:org:cpix}ContentKeyUsageRule',
'{urn:dashif:org:cpix}AudioFilter',
'{urn:dashif:org:cpix}ContentKeyUsageRuleList',
'{urn:dashif:org:cpix}CPIX'
]
SPEKE_V2_GENERIC_RESPONSE_ATTRIBS_DICT = {
'CPIX': ['contentId', 'version'],
'ContentKey': ['kid', 'commonEncryptionScheme'],
'DRMSystem': ['kid', 'systemId'],
'ContentKeyUsageRule': ['kid', 'intendedTrackType']
}
SPEKE_V2_HLS_SIGNALING_DATA_PLAYLIST_MANDATORY_ATTRIBS = ['media', 'master']
## DRM SYSTEM ID LIST
WIDEVINE_SYSTEM_ID = 'edef8ba9-79d6-4ace-a3c8-27dcd51d21ed'
PLAYREADY_SYSTEM_ID = '9a04f079-9840-4286-ab92-e65be0885f95'
FAIRPLAY_SYSTEM_ID = '94ce86fb-07ff-4f43-adb8-93d2fa968ca2'
HLS_SIGNALING_DATA_KEYFORMAT = {
'fairplay': 'com.apple.streamingkeydelivery',
'playready': 'com.microsoft.playready'
}
def read_xml_file_contents(test_type, filename):
with open(f"./spekev2_requests/{test_type}/{filename.strip()}", "r") as f:
return f.read().encode('utf-8')
def speke_v2_request(speke_url, request_data):
return requests.post(
url=speke_url,
auth=get_aws_auth(speke_url),
data=request_data,
headers=SPEKE_V2_REQUEST_HEADERS
)
def get_aws_auth(url):
api_gateway_netloc = urlparse(url).netloc
api_gateway_region = re.match(
r"[a-z0-9]+\.execute-api\.(.+)\.amazonaws\.com",
api_gateway_netloc
).group(1)
return AWSRequestsAuth(
aws_host=api_gateway_netloc,
aws_region=api_gateway_region,
aws_service='execute-api',
**boto_utils.get_credentials()
)
def send_speke_request(test_xml_folder, test_xml_file, spekev2_url):
test_request_data = read_xml_file_contents(test_xml_folder, test_xml_file)
response = speke_v2_request(spekev2_url, test_request_data)
return response.text
def remove_element(xml_request, element_to_remove, kid_value = ""):
for node in xml_request.iter():
if not kid_value:
for child in node.findall(element_to_remove):
node.remove(child)
else:
for child in node.findall(element_to_remove):
if child.attrib.get("kid") == kid_value:
node.remove(child)
return xml_request
def send_modified_speke_request_with_element_removed(spekev2_url, xml_request_str, element_to_remove):
request_cpix = ET.fromstring(xml_request_str)
modified_cpix_request = remove_element(request_cpix, element_to_remove)
modified_cpix_request_str = ET.tostring(modified_cpix_request, method="xml")
response = speke_v2_request(spekev2_url, modified_cpix_request_str)
return response
def send_modified_speke_request_with_matching_elements_kid_values_removed(spekev2_url, xml_request_str, elements_to_remove, kid_values):
request_cpix = ET.fromstring(xml_request_str)
for elem in elements_to_remove:
for kid in kid_values:
remove_element(request_cpix, elem, kid)
modified_cpix_request_str = ET.tostring(request_cpix, method="xml")
response = speke_v2_request(spekev2_url, modified_cpix_request_str)
return response
def count_tags(xml_content):
xml_tags = []
for element in ET.iterparse(StringIO(xml_content)):
if type(element) is tuple:
pos, ele = element
xml_tags.append(ele.tag)
else:
xml_tags.append(element.tag)
xml_keys = Counter(xml_tags).keys()
xml_values = Counter(xml_tags).values()
xml_dict = dict(zip(xml_keys, xml_values))
return xml_dict
def count_child_element_tags_for_element(parent_element):
xml_tags = [element.tag for element in parent_element]
xml_keys = Counter(xml_tags).keys()
xml_values = Counter(xml_tags).values()
xml_dict = dict(zip(xml_keys, xml_values))
return xml_dict
def count_child_element_tags_in_parent(root_cpix, parent_element, child_element):
parent_element_xml = root_cpix.find(parent_element)
return len(parent_element_xml.findall(child_element))
def parse_ext_x_key_contents(text_in_bytes):
decoded_text = decode_b64_bytes(text_in_bytes)
return m3u8.loads(decoded_text)
def parse_ext_x_session_key_contents(text_in_bytes):
decoded_text = decode_b64_bytes(text_in_bytes).replace("#EXT-X-SESSION-KEY:METHOD", "#EXT-X-KEY:METHOD")
return m3u8.loads(decoded_text)
def decode_b64_bytes(text_in_bytes):
return base64.b64decode(text_in_bytes).decode('utf-8')
| 33.882609
| 136
| 0.744771
|
4a0b39392ce9248a0a42ed95686bd0deac9405cf
| 442
|
py
|
Python
|
recipes/android-ndk/all/test_package/conanfile.py
|
nadzkie0/conan-center-index
|
fde12bf20f2c4cb6a7554d09a5c9433a0f5cb72c
|
[
"MIT"
] | 1
|
2021-02-07T12:33:15.000Z
|
2021-02-07T12:33:15.000Z
|
recipes/android-ndk/all/test_package/conanfile.py
|
nadzkie0/conan-center-index
|
fde12bf20f2c4cb6a7554d09a5c9433a0f5cb72c
|
[
"MIT"
] | 5
|
2021-03-25T01:49:56.000Z
|
2021-03-28T16:42:12.000Z
|
recipes/android-ndk/all/test_package/conanfile.py
|
nadzkie0/conan-center-index
|
fde12bf20f2c4cb6a7554d09a5c9433a0f5cb72c
|
[
"MIT"
] | 4
|
2021-06-03T23:24:03.000Z
|
2022-03-03T17:16:09.000Z
|
from conans import ConanFile, tools
class TestPackgeConan(ConanFile):
settings = "os", "arch"
def build(self):
pass #nothing to do, not warnings please
def test(self):
if not tools.cross_building(self):
if self.settings.os == "Windows":
self.run("ndk-build.cmd --version", run_environment=True)
else:
self.run("ndk-build --version", run_environment=True)
| 29.466667
| 73
| 0.606335
|
4a0b396f94dc8c4bd092d3c36dbb5bc46364a210
| 5,042
|
py
|
Python
|
imagenet_training/data/imagenet/download_images.py
|
daniilgaltsev/ImageNet-Training
|
9ca1d26cde07782398c7f366d5bf510c9e988236
|
[
"MIT"
] | null | null | null |
imagenet_training/data/imagenet/download_images.py
|
daniilgaltsev/ImageNet-Training
|
9ca1d26cde07782398c7f366d5bf510c9e988236
|
[
"MIT"
] | null | null | null |
imagenet_training/data/imagenet/download_images.py
|
daniilgaltsev/ImageNet-Training
|
9ca1d26cde07782398c7f366d5bf510c9e988236
|
[
"MIT"
] | null | null | null |
"""A module containing function to download images."""
import asyncio
import os
from pathlib import Path
from typing import Dict, List, Union
import aiofiles
import aiohttp
async def _download_image(
url: str,
filename: Union[Path, str],
semaphore: asyncio.Semaphore,
timeout: float
) -> bool:
"""Download an image from a given url to a given path.
Args:
url: A url from which to download.
filename: A path to the file where to save the image..
semaphore: A semaphore to limit concurrent download-writes.
timeout: Time until download is abandoned.
Returns:
A boolean, which indicates if download-write was successful.
"""
if os.path.exists(filename):
return True
try:
async with semaphore:
async with aiohttp.ClientSession() as session:
async with session.get(url, timeout=timeout, raise_for_status=True) as response:
if response.status != 200:
return False
f = await aiofiles.open(filename, "wb+")
await f.write(await response.read())
await f.close()
return True
except Exception as e: # pylint: disable=broad-except
print(f"Unable to download image {url} to {filename} due to {e}.")
return False
async def _download_synset_images(
images_data_dirname: Path,
urls: List[str],
synset: str,
n_to_download: int,
semaphore: asyncio.Semaphore,
timeout: float
) -> int:
"""Downloads images using given urls for a synset.
Args:
images_data_dirname: A path where to save images.
urls: A list of urls for downloading.
synset: A name of the synset for which images are downloaded.
n_to_download: A number of images to download.
semaphore: A semaphore to limit concurrent download-writes.
timeout: Time until download is abandoned.
Returns:
A number of downloaded images.
"""
urls.sort(key=lambda url: url.find('flickr') == -1)
downloaded = 0
last_tried = 0
while downloaded < n_to_download and last_tried < len(urls):
end = last_tried + (n_to_download - downloaded)
print(f"{synset}: {downloaded}/{n_to_download} next batch {last_tried}-{end}")
results = await asyncio.gather(
*[_download_image(
urls[idx],
images_data_dirname / "{}_{}.jpg".format(synset, idx),
semaphore,
timeout
) for idx in range(last_tried, end)]
)
for res in results:
downloaded += res
last_tried = end
print(f"{synset}: done, downloaded {downloaded}/{n_to_download}.")
return downloaded
async def _download_subsampled_images(
images_dirname: Path,
synsets: List[str],
synsets_to_urls: Dict[str, List[str]],
images_per_class: int,
max_concurrent: int,
timeout: float
) -> List[int]:
"""Downloads images using given urls.
Args:
images_dirname: A path where to save images.
synsets: A list of synsets for which to download images.
synsets_to_urls: A dict of synsets to their corresponding lists of urls.
images_per_class: A number of images per synset/class to download.
max_concurrent: A maximum number of concurrent image download-writes.
timeout: Time until download is abandoned.
Returns:
A list describing number of images per synsets that were downloaded.
"""
images_dirname.mkdir(exist_ok=True, parents=True)
semaphore = asyncio.Semaphore(max_concurrent)
result = await asyncio.gather(
*[_download_synset_images(
images_dirname,
synsets_to_urls[synset],
synset,
images_per_class,
semaphore,
timeout) for synset in synsets]
)
return list(result)
def download_subsampled_images(
images_dirname: Path,
synsets: List[str],
synsets_to_urls: Dict[str, List[str]],
images_per_class: int,
max_concurrent: int,
timeout: float
) -> List[int]:
"""Downloads images using given urls.
Args:
images_dirname: A path where to save images.
synsets: A list of synsets for which to download images.
synsets_to_urls: A dict of synsets to their corresponding lists of urls.
images_per_class: A number of images per synset/class to download.
max_concurrent: A maximum number of concurrent image download-writes.
timeout: Time until download is abandoned.
Returns:
A list describing number of images per synsets that were downloaded.
"""
downloaded_images = asyncio.run(_download_subsampled_images(
images_dirname=images_dirname,
synsets=synsets,
synsets_to_urls=synsets_to_urls,
images_per_class=images_per_class,
max_concurrent=max_concurrent,
timeout=timeout
))
return downloaded_images
| 32.74026
| 96
| 0.644585
|
4a0b39e8150c88bf80466c2273d33646b71c11ea
| 28,778
|
py
|
Python
|
mesonbuild/modules/pkgconfig.py
|
mscofield0/meson
|
007c4659c2154755fc1f57d415afc8a736f81af2
|
[
"Apache-2.0"
] | 2
|
2021-12-06T21:07:10.000Z
|
2021-12-06T21:20:39.000Z
|
mesonbuild/modules/pkgconfig.py
|
eli-schwartz/meson
|
40343fae9fb0acae6509753a5879bf4964276053
|
[
"Apache-2.0"
] | 1
|
2021-09-03T18:10:25.000Z
|
2021-09-03T18:10:25.000Z
|
mesonbuild/modules/pkgconfig.py
|
eli-schwartz/meson
|
40343fae9fb0acae6509753a5879bf4964276053
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import PurePath
import os
import typing as T
from . import ExtensionModule
from . import ModuleReturnValue
from .. import build
from .. import dependencies
from .. import mesonlib
from .. import mlog
from ..dependencies import ThreadDependency
from ..interpreterbase import permittedKwargs, FeatureNew, FeatureNewKwargs
if T.TYPE_CHECKING:
from . import ModuleState
already_warned_objs = set()
class DependenciesHelper:
def __init__(self, state, name):
self.state = state
self.name = name
self.pub_libs = []
self.pub_reqs = []
self.priv_libs = []
self.priv_reqs = []
self.cflags = []
self.version_reqs = {}
self.link_whole_targets = []
def add_pub_libs(self, libs):
libs, reqs, cflags = self._process_libs(libs, True)
self.pub_libs = libs + self.pub_libs # prepend to preserve dependencies
self.pub_reqs += reqs
self.cflags += cflags
def add_priv_libs(self, libs):
libs, reqs, _ = self._process_libs(libs, False)
self.priv_libs = libs + self.priv_libs
self.priv_reqs += reqs
def add_pub_reqs(self, reqs):
self.pub_reqs += self._process_reqs(reqs)
def add_priv_reqs(self, reqs):
self.priv_reqs += self._process_reqs(reqs)
def _check_generated_pc_deprecation(self, obj):
if not hasattr(obj, 'generated_pc_warn'):
return
name = obj.generated_pc_warn[0]
if (name, obj.name) in already_warned_objs:
return
mlog.deprecation('Library', mlog.bold(obj.name), 'was passed to the '
'"libraries" keyword argument of a previous call '
'to generate() method instead of first positional '
'argument.', 'Adding', mlog.bold(obj.generated_pc),
'to "Requires" field, but this is a deprecated '
'behaviour that will change in a future version '
'of Meson. Please report the issue if this '
'warning cannot be avoided in your case.',
location=obj.generated_pc_warn[1])
already_warned_objs.add((name, obj.name))
def _process_reqs(self, reqs):
'''Returns string names of requirements'''
processed_reqs = []
for obj in mesonlib.listify(reqs):
if not isinstance(obj, str):
FeatureNew.single_use('pkgconfig.generate requirement from non-string object', '0.46.0', self.state.subproject)
if hasattr(obj, 'generated_pc'):
self._check_generated_pc_deprecation(obj)
processed_reqs.append(obj.generated_pc)
elif isinstance(obj, dependencies.PkgConfigDependency):
if obj.found():
processed_reqs.append(obj.name)
self.add_version_reqs(obj.name, obj.version_reqs)
elif isinstance(obj, str):
name, version_req = self.split_version_req(obj)
processed_reqs.append(name)
self.add_version_reqs(name, version_req)
elif isinstance(obj, dependencies.Dependency) and not obj.found():
pass
elif isinstance(obj, ThreadDependency):
pass
else:
raise mesonlib.MesonException('requires argument not a string, '
'library with pkgconfig-generated file '
'or pkgconfig-dependency object, got {obj!r}')
return processed_reqs
def add_cflags(self, cflags):
self.cflags += mesonlib.stringlistify(cflags)
def _process_libs(self, libs, public: bool):
libs = mesonlib.listify(libs)
processed_libs = []
processed_reqs = []
processed_cflags = []
for obj in libs:
if hasattr(obj, 'generated_pc'):
self._check_generated_pc_deprecation(obj)
processed_reqs.append(obj.generated_pc)
elif isinstance(obj, dependencies.PkgConfigDependency):
if obj.found():
processed_reqs.append(obj.name)
self.add_version_reqs(obj.name, obj.version_reqs)
elif isinstance(obj, dependencies.InternalDependency):
if obj.found():
processed_libs += obj.get_link_args()
processed_cflags += obj.get_compile_args()
self._add_lib_dependencies(obj.libraries, obj.whole_libraries, obj.ext_deps, public, private_external_deps=True)
elif isinstance(obj, dependencies.Dependency):
if obj.found():
processed_libs += obj.get_link_args()
processed_cflags += obj.get_compile_args()
elif isinstance(obj, build.SharedLibrary) and obj.shared_library_only:
# Do not pull dependencies for shared libraries because they are
# only required for static linking. Adding private requires has
# the side effect of exposing their cflags, which is the
# intended behaviour of pkg-config but force Debian to add more
# than needed build deps.
# See https://bugs.freedesktop.org/show_bug.cgi?id=105572
processed_libs.append(obj)
elif isinstance(obj, (build.SharedLibrary, build.StaticLibrary)):
processed_libs.append(obj)
# If there is a static library in `Libs:` all its deps must be
# public too, otherwise the generated pc file will never be
# usable without --static.
self._add_lib_dependencies(obj.link_targets,
obj.link_whole_targets,
obj.external_deps,
isinstance(obj, build.StaticLibrary) and public)
elif isinstance(obj, (build.CustomTarget, build.CustomTargetIndex)):
if not obj.is_linkable_target():
raise mesonlib.MesonException('library argument contains a not linkable custom_target.')
FeatureNew.single_use('custom_target in pkgconfig.generate libraries', '0.58.0', self.state.subproject)
processed_libs.append(obj)
elif isinstance(obj, str):
processed_libs.append(obj)
else:
raise mesonlib.MesonException(f'library argument of type {type(obj).__name__} not a string, library or dependency object.')
return processed_libs, processed_reqs, processed_cflags
def _add_lib_dependencies(self, link_targets, link_whole_targets, external_deps, public, private_external_deps=False):
add_libs = self.add_pub_libs if public else self.add_priv_libs
# Recursively add all linked libraries
for t in link_targets:
# Internal libraries (uninstalled static library) will be promoted
# to link_whole, treat them as such here.
if t.is_internal():
self._add_link_whole(t, public)
else:
add_libs([t])
for t in link_whole_targets:
self._add_link_whole(t, public)
# And finally its external dependencies
if private_external_deps:
self.add_priv_libs(external_deps)
else:
add_libs(external_deps)
def _add_link_whole(self, t, public):
# Don't include static libraries that we link_whole. But we still need to
# include their dependencies: a static library we link_whole
# could itself link to a shared library or an installed static library.
# Keep track of link_whole_targets so we can remove them from our
# lists in case a library is link_with and link_whole at the same time.
# See remove_dups() below.
self.link_whole_targets.append(t)
self._add_lib_dependencies(t.link_targets, t.link_whole_targets, t.external_deps, public)
def add_version_reqs(self, name, version_reqs):
if version_reqs:
if name not in self.version_reqs:
self.version_reqs[name] = set()
# Note that pkg-config is picky about whitespace.
# 'foo > 1.2' is ok but 'foo>1.2' is not.
# foo, bar' is ok, but 'foo,bar' is not.
new_vreqs = [s for s in mesonlib.stringlistify(version_reqs)]
self.version_reqs[name].update(new_vreqs)
def split_version_req(self, s):
for op in ['>=', '<=', '!=', '==', '=', '>', '<']:
pos = s.find(op)
if pos > 0:
return s[0:pos].strip(), s[pos:].strip()
return s, None
def format_vreq(self, vreq):
# vreq are '>=1.0' and pkgconfig wants '>= 1.0'
for op in ['>=', '<=', '!=', '==', '=', '>', '<']:
if vreq.startswith(op):
return op + ' ' + vreq[len(op):]
return vreq
def format_reqs(self, reqs):
result = []
for name in reqs:
vreqs = self.version_reqs.get(name, None)
if vreqs:
result += [name + ' ' + self.format_vreq(vreq) for vreq in vreqs]
else:
result += [name]
return ', '.join(result)
def remove_dups(self):
# Set of ids that have already been handled and should not be added any more
exclude = set()
# We can't just check if 'x' is excluded because we could have copies of
# the same SharedLibrary object for example.
def _ids(x):
if hasattr(x, 'generated_pc'):
yield x.generated_pc
if isinstance(x, build.Target):
yield x.get_id()
yield x
# Exclude 'x' in all its forms and return if it was already excluded
def _add_exclude(x):
was_excluded = False
for i in _ids(x):
if i in exclude:
was_excluded = True
else:
exclude.add(i)
return was_excluded
# link_whole targets are already part of other targets, exclude them all.
for t in self.link_whole_targets:
_add_exclude(t)
def _fn(xs, libs=False):
# Remove duplicates whilst preserving original order
result = []
for x in xs:
# Don't de-dup unknown strings to avoid messing up arguments like:
# ['-framework', 'CoreAudio', '-framework', 'CoreMedia']
known_flags = ['-pthread']
cannot_dedup = libs and isinstance(x, str) and \
not x.startswith(('-l', '-L')) and \
x not in known_flags
if not cannot_dedup and _add_exclude(x):
continue
result.append(x)
return result
# Handle lists in priority order: public items can be excluded from
# private and Requires can excluded from Libs.
self.pub_reqs = _fn(self.pub_reqs)
self.pub_libs = _fn(self.pub_libs, True)
self.priv_reqs = _fn(self.priv_reqs)
self.priv_libs = _fn(self.priv_libs, True)
# Reset exclude list just in case some values can be both cflags and libs.
exclude = set()
self.cflags = _fn(self.cflags)
class PkgConfigModule(ExtensionModule):
def __init__(self, interpreter):
super().__init__(interpreter)
self.methods.update({
'generate': self.generate,
})
def _get_lname(self, l, msg, pcfile, is_custom_target):
if is_custom_target:
basename = os.path.basename(l.get_filename())
name = os.path.splitext(basename)[0]
if name.startswith('lib'):
name = name[3:]
return name
# Nothing special
if not l.name_prefix_set:
return l.name
# Sometimes people want the library to start with 'lib' everywhere,
# which is achieved by setting name_prefix to '' and the target name to
# 'libfoo'. In that case, try to get the pkg-config '-lfoo' arg correct.
if l.prefix == '' and l.name.startswith('lib'):
return l.name[3:]
# If the library is imported via an import library which is always
# named after the target name, '-lfoo' is correct.
if isinstance(l, build.SharedLibrary) and l.import_filename:
return l.name
# In other cases, we can't guarantee that the compiler will be able to
# find the library via '-lfoo', so tell the user that.
mlog.warning(msg.format(l.name, 'name_prefix', l.name, pcfile))
return l.name
def _escape(self, value):
'''
We cannot use quote_arg because it quotes with ' and " which does not
work with pkg-config and pkgconf at all.
'''
# We should always write out paths with / because pkg-config requires
# spaces to be quoted with \ and that messes up on Windows:
# https://bugs.freedesktop.org/show_bug.cgi?id=103203
if isinstance(value, PurePath):
value = value.as_posix()
return value.replace(' ', r'\ ')
def _make_relative(self, prefix, subdir):
prefix = PurePath(prefix)
subdir = PurePath(subdir)
try:
libdir = subdir.relative_to(prefix)
except ValueError:
libdir = subdir
# pathlib joining makes sure absolute libdir is not appended to '${prefix}'
return ('${prefix}' / libdir).as_posix()
def _generate_pkgconfig_file(self, state, deps, subdirs, name, description,
url, version, pcfile, conflicts, variables,
unescaped_variables, uninstalled=False, dataonly=False):
coredata = state.environment.get_coredata()
if uninstalled:
outdir = os.path.join(state.environment.build_dir, 'meson-uninstalled')
if not os.path.exists(outdir):
os.mkdir(outdir)
prefix = PurePath(state.environment.get_build_dir())
srcdir = PurePath(state.environment.get_source_dir())
else:
outdir = state.environment.scratch_dir
prefix = PurePath(coredata.get_option(mesonlib.OptionKey('prefix')))
# These always return paths relative to prefix
libdir = PurePath(coredata.get_option(mesonlib.OptionKey('libdir')))
incdir = PurePath(coredata.get_option(mesonlib.OptionKey('includedir')))
fname = os.path.join(outdir, pcfile)
with open(fname, 'w', encoding='utf-8') as ofile:
if not dataonly:
ofile.write('prefix={}\n'.format(self._escape(prefix)))
if uninstalled:
ofile.write('srcdir={}\n'.format(self._escape(srcdir)))
ofile.write('libdir={}\n'.format(self._escape('${prefix}' / libdir)))
ofile.write('includedir={}\n'.format(self._escape('${prefix}' / incdir)))
if variables or unescaped_variables:
ofile.write('\n')
for k, v in variables:
ofile.write('{}={}\n'.format(k, self._escape(v)))
for k, v in unescaped_variables:
ofile.write(f'{k}={v}\n')
ofile.write('\n')
ofile.write(f'Name: {name}\n')
if len(description) > 0:
ofile.write(f'Description: {description}\n')
if len(url) > 0:
ofile.write(f'URL: {url}\n')
ofile.write(f'Version: {version}\n')
reqs_str = deps.format_reqs(deps.pub_reqs)
if len(reqs_str) > 0:
ofile.write(f'Requires: {reqs_str}\n')
reqs_str = deps.format_reqs(deps.priv_reqs)
if len(reqs_str) > 0:
ofile.write(f'Requires.private: {reqs_str}\n')
if len(conflicts) > 0:
ofile.write('Conflicts: {}\n'.format(' '.join(conflicts)))
def generate_libs_flags(libs):
msg = 'Library target {0!r} has {1!r} set. Compilers ' \
'may not find it from its \'-l{2}\' linker flag in the ' \
'{3!r} pkg-config file.'
Lflags = []
for l in libs:
if isinstance(l, str):
yield l
else:
if uninstalled:
install_dir = os.path.dirname(state.backend.get_target_filename_abs(l))
else:
_i = l.get_custom_install_dir()
install_dir = _i[0] if _i else None
if install_dir is False:
continue
is_custom_target = isinstance(l, (build.CustomTarget, build.CustomTargetIndex))
if not is_custom_target and 'cs' in l.compilers:
if isinstance(install_dir, str):
Lflag = '-r{}/{}'.format(self._escape(self._make_relative(prefix, install_dir)), l.filename)
else: # install_dir is True
Lflag = '-r${libdir}/%s' % l.filename
else:
if isinstance(install_dir, str):
Lflag = '-L{}'.format(self._escape(self._make_relative(prefix, install_dir)))
else: # install_dir is True
Lflag = '-L${libdir}'
if Lflag not in Lflags:
Lflags.append(Lflag)
yield Lflag
lname = self._get_lname(l, msg, pcfile, is_custom_target)
# If using a custom suffix, the compiler may not be able to
# find the library
if not is_custom_target and l.name_suffix_set:
mlog.warning(msg.format(l.name, 'name_suffix', lname, pcfile))
if is_custom_target or 'cs' not in l.compilers:
yield f'-l{lname}'
def get_uninstalled_include_dirs(libs):
result = []
for l in libs:
if isinstance(l, (str, build.CustomTarget, build.CustomTargetIndex)):
continue
if l.get_subdir() not in result:
result.append(l.get_subdir())
for i in l.get_include_dirs():
curdir = i.get_curdir()
for d in i.get_incdirs():
path = os.path.join(curdir, d)
if path not in result:
result.append(path)
return result
def generate_uninstalled_cflags(libs):
for d in get_uninstalled_include_dirs(libs):
for basedir in ['${prefix}', '${srcdir}']:
path = PurePath(basedir, d)
yield '-I%s' % self._escape(path.as_posix())
if len(deps.pub_libs) > 0:
ofile.write('Libs: {}\n'.format(' '.join(generate_libs_flags(deps.pub_libs))))
if len(deps.priv_libs) > 0:
ofile.write('Libs.private: {}\n'.format(' '.join(generate_libs_flags(deps.priv_libs))))
cflags = []
if uninstalled:
cflags += generate_uninstalled_cflags(deps.pub_libs + deps.priv_libs)
else:
for d in subdirs:
if d == '.':
cflags.append('-I${includedir}')
else:
cflags.append(self._escape(PurePath('-I${includedir}') / d))
cflags += [self._escape(f) for f in deps.cflags]
if cflags and not dataonly:
ofile.write('Cflags: {}\n'.format(' '.join(cflags)))
@FeatureNewKwargs('pkgconfig.generate', '0.59.0', ['unescaped_variables', 'unescaped_uninstalled_variables'])
@FeatureNewKwargs('pkgconfig.generate', '0.54.0', ['uninstalled_variables'])
@FeatureNewKwargs('pkgconfig.generate', '0.42.0', ['extra_cflags'])
@FeatureNewKwargs('pkgconfig.generate', '0.41.0', ['variables'])
@FeatureNewKwargs('pkgconfig.generate', '0.54.0', ['dataonly'])
@permittedKwargs({'libraries', 'version', 'name', 'description', 'filebase',
'subdirs', 'requires', 'requires_private', 'libraries_private',
'install_dir', 'extra_cflags', 'variables', 'url', 'd_module_versions',
'dataonly', 'conflicts', 'uninstalled_variables',
'unescaped_variables', 'unescaped_uninstalled_variables'})
def generate(self, state: 'ModuleState', args, kwargs):
default_version = state.project_version
default_install_dir = None
default_description = None
default_name = None
mainlib = None
default_subdirs = ['.']
if not args and 'version' not in kwargs:
FeatureNew.single_use('pkgconfig.generate implicit version keyword', '0.46.0', state.subproject)
elif len(args) == 1:
FeatureNew.single_use('pkgconfig.generate optional positional argument', '0.46.0', state.subproject)
mainlib = args[0]
if not isinstance(mainlib, (build.StaticLibrary, build.SharedLibrary)):
raise mesonlib.MesonException('Pkgconfig_gen first positional argument must be a library object')
default_name = mainlib.name
default_description = state.project_name + ': ' + mainlib.name
install_dir = mainlib.get_custom_install_dir()
if install_dir and isinstance(install_dir[0], str):
default_install_dir = os.path.join(install_dir[0], 'pkgconfig')
elif len(args) > 1:
raise mesonlib.MesonException('Too many positional arguments passed to Pkgconfig_gen.')
dataonly = kwargs.get('dataonly', False)
if not isinstance(dataonly, bool):
raise mesonlib.MesonException('dataonly must be boolean.')
if dataonly:
default_subdirs = []
blocked_vars = ['libraries', 'libraries_private', 'require_private', 'extra_cflags', 'subdirs']
if any(k in kwargs for k in blocked_vars):
raise mesonlib.MesonException(f'Cannot combine dataonly with any of {blocked_vars}')
subdirs = mesonlib.stringlistify(kwargs.get('subdirs', default_subdirs))
version = kwargs.get('version', default_version)
if not isinstance(version, str):
raise mesonlib.MesonException('Version must be specified.')
name = kwargs.get('name', default_name)
if not isinstance(name, str):
raise mesonlib.MesonException('Name not specified.')
filebase = kwargs.get('filebase', name)
if not isinstance(filebase, str):
raise mesonlib.MesonException('Filebase must be a string.')
description = kwargs.get('description', default_description)
if not isinstance(description, str):
raise mesonlib.MesonException('Description is not a string.')
url = kwargs.get('url', '')
if not isinstance(url, str):
raise mesonlib.MesonException('URL is not a string.')
conflicts = mesonlib.stringlistify(kwargs.get('conflicts', []))
# Prepend the main library to public libraries list. This is required
# so dep.add_pub_libs() can handle dependency ordering correctly and put
# extra libraries after the main library.
libraries = mesonlib.extract_as_list(kwargs, 'libraries')
if mainlib:
libraries = [mainlib] + libraries
deps = DependenciesHelper(state, filebase)
deps.add_pub_libs(libraries)
deps.add_priv_libs(kwargs.get('libraries_private', []))
deps.add_pub_reqs(kwargs.get('requires', []))
deps.add_priv_reqs(kwargs.get('requires_private', []))
deps.add_cflags(kwargs.get('extra_cflags', []))
dversions = kwargs.get('d_module_versions', None)
if dversions:
compiler = state.environment.coredata.compilers.host.get('d')
if compiler:
deps.add_cflags(compiler.get_feature_args({'versions': dversions}, None))
deps.remove_dups()
def parse_variable_list(vardict):
reserved = ['prefix', 'libdir', 'includedir']
variables = []
for name, value in vardict.items():
if not dataonly and name in reserved:
raise mesonlib.MesonException(f'Variable "{name}" is reserved')
variables.append((name, value))
return variables
variables = self.interpreter.extract_variables(kwargs, dict_new=True)
variables = parse_variable_list(variables)
unescaped_variables = self.interpreter.extract_variables(kwargs, argname='unescaped_variables')
unescaped_variables = parse_variable_list(unescaped_variables)
pcfile = filebase + '.pc'
pkgroot = pkgroot_name = kwargs.get('install_dir', default_install_dir)
if pkgroot is None:
if mesonlib.is_freebsd():
pkgroot = os.path.join(state.environment.coredata.get_option(mesonlib.OptionKey('prefix')), 'libdata', 'pkgconfig')
pkgroot_name = os.path.join('{prefix}', 'libdata', 'pkgconfig')
else:
pkgroot = os.path.join(state.environment.coredata.get_option(mesonlib.OptionKey('libdir')), 'pkgconfig')
pkgroot_name = os.path.join('{libdir}', 'pkgconfig')
if not isinstance(pkgroot, str):
raise mesonlib.MesonException('Install_dir must be a string.')
self._generate_pkgconfig_file(state, deps, subdirs, name, description, url,
version, pcfile, conflicts, variables,
unescaped_variables, False, dataonly)
res = build.Data([mesonlib.File(True, state.environment.get_scratch_dir(), pcfile)], pkgroot, pkgroot_name, None, state.subproject, install_tag='devel')
variables = self.interpreter.extract_variables(kwargs, argname='uninstalled_variables', dict_new=True)
variables = parse_variable_list(variables)
unescaped_variables = self.interpreter.extract_variables(kwargs, argname='unescaped_uninstalled_variables')
unescaped_variables = parse_variable_list(unescaped_variables)
pcfile = filebase + '-uninstalled.pc'
self._generate_pkgconfig_file(state, deps, subdirs, name, description, url,
version, pcfile, conflicts, variables,
unescaped_variables, uninstalled=True, dataonly=dataonly)
# Associate the main library with this generated pc file. If the library
# is used in any subsequent call to the generated, it will generate a
# 'Requires:' or 'Requires.private:'.
# Backward compatibility: We used to set 'generated_pc' on all public
# libraries instead of just the main one. Keep doing that but warn if
# anyone is relying on that deprecated behaviour.
if mainlib:
if not hasattr(mainlib, 'generated_pc'):
mainlib.generated_pc = filebase
else:
mlog.warning('Already generated a pkg-config file for', mlog.bold(mainlib.name))
else:
for lib in deps.pub_libs:
if not isinstance(lib, str) and not hasattr(lib, 'generated_pc'):
lib.generated_pc = filebase
location = state.current_node
lib.generated_pc_warn = [name, location]
return ModuleReturnValue(res, [res])
def initialize(*args, **kwargs):
return PkgConfigModule(*args, **kwargs)
| 48.776271
| 160
| 0.583606
|
4a0b39f6f41aeab7f133236db291a7fd7bbaf610
| 5,172
|
py
|
Python
|
kubernetes_asyncio/client/api/scheduling_api.py
|
jnschaeffer/kubernetes_asyncio
|
05f42510e7acb8d229bf7c2d4e2455e6328486a6
|
[
"Apache-2.0"
] | null | null | null |
kubernetes_asyncio/client/api/scheduling_api.py
|
jnschaeffer/kubernetes_asyncio
|
05f42510e7acb8d229bf7c2d4e2455e6328486a6
|
[
"Apache-2.0"
] | null | null | null |
kubernetes_asyncio/client/api/scheduling_api.py
|
jnschaeffer/kubernetes_asyncio
|
05f42510e7acb8d229bf7c2d4e2455e6328486a6
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1.15.11
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from kubernetes_asyncio.client.api_client import ApiClient
from kubernetes_asyncio.client.exceptions import (
ApiTypeError,
ApiValueError
)
class SchedulingApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_api_group(self, **kwargs): # noqa: E501
"""get_api_group # noqa: E501
get information of a group # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_group(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1APIGroup
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_api_group_with_http_info(**kwargs) # noqa: E501
def get_api_group_with_http_info(self, **kwargs): # noqa: E501
"""get_api_group # noqa: E501
get information of a group # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_group_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1APIGroup, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_group" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/scheduling.k8s.io/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIGroup', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
| 37.478261
| 124
| 0.610209
|
4a0b3ad2ee32c21e4eb6602eae33edaac3806fcd
| 6,853
|
py
|
Python
|
airflow/operators/s3_file_transform_operator.py
|
RSEnergyGroup/incubator-airflow
|
e947c6c034238ede29a6c8f51307458d3e40c1b5
|
[
"Apache-2.0"
] | 4
|
2019-01-17T06:21:45.000Z
|
2020-06-20T01:59:57.000Z
|
airflow/operators/s3_file_transform_operator.py
|
RSEnergyGroup/incubator-airflow
|
e947c6c034238ede29a6c8f51307458d3e40c1b5
|
[
"Apache-2.0"
] | 14
|
2018-10-24T03:15:11.000Z
|
2019-01-02T19:02:58.000Z
|
airflow/operators/s3_file_transform_operator.py
|
cse-airflow/incubator-airflow
|
215b8c8170bd63f4c449614945bb4b6d90f6a860
|
[
"Apache-2.0"
] | 6
|
2018-12-04T12:15:23.000Z
|
2020-11-23T03:51:41.000Z
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from tempfile import NamedTemporaryFile
import subprocess
import sys
from airflow.exceptions import AirflowException
from airflow.hooks.S3_hook import S3Hook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class S3FileTransformOperator(BaseOperator):
"""
Copies data from a source S3 location to a temporary location on the
local filesystem. Runs a transformation on this file as specified by
the transformation script and uploads the output to a destination S3
location.
The locations of the source and the destination files in the local
filesystem is provided as an first and second arguments to the
transformation script. The transformation script is expected to read the
data from source, transform it and write the output to the local
destination file. The operator then takes over control and uploads the
local destination file to S3.
S3 Select is also available to filter the source contents. Users can
omit the transformation script if S3 Select expression is specified.
:param source_s3_key: The key to be retrieved from S3. (templated)
:type source_s3_key: str
:param source_aws_conn_id: source s3 connection
:type source_aws_conn_id: str
:param source_verify: Whether or not to verify SSL certificates for S3 connetion.
By default SSL certificates are verified.
You can provide the following values:
- ``False``: do not validate SSL certificates. SSL will still be used
(unless use_ssl is False), but SSL certificates will not be
verified.
- ``path/to/cert/bundle.pem``: A filename of the CA cert bundle to uses.
You can specify this argument if you want to use a different
CA cert bundle than the one used by botocore.
This is also applicable to ``dest_verify``.
:type source_verify: bool or str
:param dest_s3_key: The key to be written from S3. (templated)
:type dest_s3_key: str
:param dest_aws_conn_id: destination s3 connection
:type dest_aws_conn_id: str
:param replace: Replace dest S3 key if it already exists
:type replace: bool
:param transform_script: location of the executable transformation script
:type transform_script: str
:param select_expression: S3 Select expression
:type select_expression: str
"""
template_fields = ('source_s3_key', 'dest_s3_key')
template_ext = ()
ui_color = '#f9c915'
@apply_defaults
def __init__(
self,
source_s3_key,
dest_s3_key,
transform_script=None,
select_expression=None,
source_aws_conn_id='aws_default',
source_verify=None,
dest_aws_conn_id='aws_default',
dest_verify=None,
replace=False,
*args, **kwargs):
super(S3FileTransformOperator, self).__init__(*args, **kwargs)
self.source_s3_key = source_s3_key
self.source_aws_conn_id = source_aws_conn_id
self.source_verify = source_verify
self.dest_s3_key = dest_s3_key
self.dest_aws_conn_id = dest_aws_conn_id
self.dest_verify = dest_verify
self.replace = replace
self.transform_script = transform_script
self.select_expression = select_expression
self.output_encoding = sys.getdefaultencoding()
def execute(self, context):
if self.transform_script is None and self.select_expression is None:
raise AirflowException(
"Either transform_script or select_expression must be specified")
source_s3 = S3Hook(aws_conn_id=self.source_aws_conn_id,
verify=self.source_verify)
dest_s3 = S3Hook(aws_conn_id=self.dest_aws_conn_id,
verify=self.dest_verify)
self.log.info("Downloading source S3 file %s", self.source_s3_key)
if not source_s3.check_for_key(self.source_s3_key):
raise AirflowException(
"The source key {0} does not exist".format(self.source_s3_key))
source_s3_key_object = source_s3.get_key(self.source_s3_key)
with NamedTemporaryFile("wb") as f_source, NamedTemporaryFile("wb") as f_dest:
self.log.info(
"Dumping S3 file %s contents to local file %s",
self.source_s3_key, f_source.name
)
if self.select_expression is not None:
content = source_s3.select_key(
key=self.source_s3_key,
expression=self.select_expression
)
f_source.write(content.encode("utf-8"))
else:
source_s3_key_object.download_fileobj(Fileobj=f_source)
f_source.flush()
if self.transform_script is not None:
process = subprocess.Popen(
[self.transform_script, f_source.name, f_dest.name],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
close_fds=True
)
self.log.info("Output:")
for line in iter(process.stdout.readline, b''):
self.log.info(line.decode(self.output_encoding).rstrip())
process.wait()
if process.returncode > 0:
raise AirflowException(
"Transform script failed: {0}".format(process.returncode)
)
else:
self.log.info(
"Transform script successful. Output temporarily located at %s",
f_dest.name
)
self.log.info("Uploading transformed file to S3")
f_dest.flush()
dest_s3.load_file(
filename=f_dest.name,
key=self.dest_s3_key,
replace=self.replace
)
self.log.info("Upload successful")
| 40.791667
| 88
| 0.649497
|
4a0b3b1a46ef8be15ba71d66004c53386dce7022
| 8,742
|
py
|
Python
|
st2common/st2common/transport/connection_retry_wrapper.py
|
saucetray/st2
|
8f507d6c8d9483c8371e386fe2b7998596856fd7
|
[
"Apache-2.0"
] | 2
|
2021-08-04T01:04:06.000Z
|
2021-08-04T01:04:08.000Z
|
st2common/st2common/transport/connection_retry_wrapper.py
|
saucetray/st2
|
8f507d6c8d9483c8371e386fe2b7998596856fd7
|
[
"Apache-2.0"
] | null | null | null |
st2common/st2common/transport/connection_retry_wrapper.py
|
saucetray/st2
|
8f507d6c8d9483c8371e386fe2b7998596856fd7
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import six
from st2common.util import concurrency
__all__ = ['ConnectionRetryWrapper', 'ClusterRetryContext']
class ClusterRetryContext(object):
"""
Stores retry context for cluster retries. It makes certain assumptions
on how cluster_size and retry should be determined.
"""
def __init__(self, cluster_size):
# No of nodes in a cluster
self.cluster_size = cluster_size
# No of times to retry in a cluster
self.cluster_retry = 2
# time to wait between retry in a cluster
self.wait_between_cluster = 10
# No of nodes attempted. Starts at 1 since the
self._nodes_attempted = 1
def test_should_stop(self, e=None):
# Special workaround for "(504) CHANNEL_ERROR - second 'channel.open' seen" which happens
# during tests on Travis and block and slown down the tests
# NOTE: This error is not fatal during tests and we can simply switch to a next connection
# without sleeping.
if "second 'channel.open' seen" in six.text_type(e):
return False, -1
should_stop = True
if self._nodes_attempted > self.cluster_size * self.cluster_retry:
return should_stop, -1
wait = 0
should_stop = False
if self._nodes_attempted % self.cluster_size == 0:
wait = self.wait_between_cluster
self._nodes_attempted += 1
return should_stop, wait
class ConnectionRetryWrapper(object):
"""
Manages retry of connection and also switching to different nodes in a cluster.
:param cluster_size: Size of the cluster.
:param logger: logger to use to log moderately useful information.
.. code-block:: python
# Without ensuring recoverable errors are retried
connection_urls = [
'amqp://guest:guest@node1:5672',
'amqp://guest:guest@node2:5672',
'amqp://guest:guest@node3:5672'
]
with Connection(connection_urls) as connection:
retry_wrapper = ConnectionRetryWrapper(cluster_size=len(connection_urls),
logger=my_logger)
# wrapped_callback must have signature ``def func(connection, channel)``
def wrapped_callback(connection, channel):
pass
retry_wrapper.run(connection=connection, wrapped_callback=wrapped_callback)
# With ensuring recoverable errors are retried
connection_urls = [
'amqp://guest:guest@node1:5672',
'amqp://guest:guest@node2:5672',
'amqp://guest:guest@node3:5672'
]
with Connection(connection_urls) as connection:
retry_wrapper = ConnectionRetryWrapper(cluster_size=len(connection_urls),
logger=my_logger)
# wrapped_callback must have signature ``def func(connection, channel)``
def wrapped_callback(connection, channel):
kwargs = {...}
# call ensured to correctly deal with recoverable errors.
retry_wrapper.ensured(connection=connection_retry_wrapper,
obj=my_obj,
to_ensure_func=my_obj.ensuree,
**kwargs)
retry_wrapper.run(connection=connection, wrapped_callback=wrapped_callback)
"""
def __init__(self, cluster_size, logger, ensure_max_retries=3):
self._retry_context = ClusterRetryContext(cluster_size=cluster_size)
self._logger = logger
# How many times to try to retrying establishing a connection in a place where we are
# calling connection.ensure_connection
self._ensure_max_retries = ensure_max_retries
def errback(self, exc, interval):
self._logger.error('Rabbitmq connection error: %s', exc.message)
def run(self, connection, wrapped_callback):
"""
Run the wrapped_callback in a protective covering of retries and error handling.
:param connection: Connection to messaging service
:type connection: kombu.connection.Connection
:param wrapped_callback: Callback that will be wrapped by all the fine handling in this
method. Expected signature of callback -
``def func(connection, channel)``
"""
should_stop = False
channel = None
while not should_stop:
try:
channel = connection.channel()
wrapped_callback(connection=connection, channel=channel)
should_stop = True
except connection.connection_errors + connection.channel_errors as e:
should_stop, wait = self._retry_context.test_should_stop(e)
# reset channel to None to avoid any channel closing errors. At this point
# in case of an exception there should be no channel but that is better to
# guarantee.
channel = None
# All attempts to re-establish connections have failed. This error needs to
# be notified so raise.
if should_stop:
raise
# -1, 0 and 1+ are handled properly by eventlet.sleep
self._logger.debug('Received RabbitMQ server error, sleeping for %s seconds '
'before retrying: %s' % (wait, six.text_type(e)))
concurrency.sleep(wait)
connection.close()
# ensure_connection will automatically switch to an alternate. Other connections
# in the pool will be fixed independently. It would be nice to cut-over the
# entire ConnectionPool simultaneously but that would require writing our own
# ConnectionPool. If a server recovers it could happen that the same process
# ends up talking to separate nodes in a cluster.
def log_error_on_conn_failure(exc, interval):
self._logger.debug('Failed to re-establish connection to RabbitMQ server, '
'retrying in %s seconds: %s' % (interval, six.text_type(e)))
try:
# NOTE: This function blocks and tries to restablish a connection for
# indefinetly if "max_retries" argument is not specified
connection.ensure_connection(max_retries=self._ensure_max_retries,
errback=log_error_on_conn_failure)
except Exception:
self._logger.exception('Connections to RabbitMQ cannot be re-established: %s',
six.text_type(e))
raise
except Exception as e:
self._logger.exception('Connections to RabbitMQ cannot be re-established: %s',
six.text_type(e))
# Not being able to publish a message could be a significant issue for an app.
raise
finally:
if should_stop and channel:
try:
channel.close()
except Exception:
self._logger.warning('Error closing channel.', exc_info=True)
def ensured(self, connection, obj, to_ensure_func, **kwargs):
"""
Ensure that recoverable errors are retried a set number of times before giving up.
:param connection: Connection to messaging service
:type connection: kombu.connection.Connection
:param obj: Object whose method is to be ensured. Typically, channel, producer etc. from
the kombu library.
:type obj: Must support mixin kombu.abstract.MaybeChannelBound
"""
ensuring_func = connection.ensure(
obj, to_ensure_func,
errback=self.errback,
max_retries=3)
ensuring_func(**kwargs)
| 44.830769
| 99
| 0.610844
|
4a0b3b4bad2851b97325a33e3fe2a46c4683e6e8
| 916
|
py
|
Python
|
30-Day-Challange/Day-16/course-schedule-1.py
|
EashanKaushik/LeetCode
|
8ee2a61cefa42b332b6252fafff4a2772d25aa31
|
[
"Apache-2.0"
] | null | null | null |
30-Day-Challange/Day-16/course-schedule-1.py
|
EashanKaushik/LeetCode
|
8ee2a61cefa42b332b6252fafff4a2772d25aa31
|
[
"Apache-2.0"
] | null | null | null |
30-Day-Challange/Day-16/course-schedule-1.py
|
EashanKaushik/LeetCode
|
8ee2a61cefa42b332b6252fafff4a2772d25aa31
|
[
"Apache-2.0"
] | null | null | null |
from collections import defaultdict
class Solution:
def canFinish(self, numCourses, prerequisites):
adj_matrix = {num:[] for num in range(0, numCourses)}
for prereq in prerequisites:
adj_matrix[prereq[0]].append(prereq[1])
cycle = set()
def dfs(course):
if course in cycle:
return False
if len(adj_matrix[course]) > 0:
cycle.add(course)
for prereq in adj_matrix[course]:
if not dfs(prereq):
return False
else:
adj_matrix[course].remove(prereq)
cycle.remove(course)
return True
for course in range(0, numCourses):
if not dfs(course):
return False
return True
s = Solution()
print(s.canFinish(2, [[1, 0], [0, 1]]))
| 22.341463
| 61
| 0.501092
|
4a0b3c2df8c3456289794765d16ce26bd4a32c1c
| 925
|
py
|
Python
|
src/configreader.py
|
sconos/super-octo-chainsaw
|
392abf6c6fa6a15a82f7f442c108da2a0613c9d0
|
[
"MIT"
] | null | null | null |
src/configreader.py
|
sconos/super-octo-chainsaw
|
392abf6c6fa6a15a82f7f442c108da2a0613c9d0
|
[
"MIT"
] | null | null | null |
src/configreader.py
|
sconos/super-octo-chainsaw
|
392abf6c6fa6a15a82f7f442c108da2a0613c9d0
|
[
"MIT"
] | null | null | null |
import sys,os, ConfigParser
class ConfigReader:
"""
Read configuration file in dosini format
"""
conf = 'config'
dotconf = '.SuperOctoChainsaw.conf'
def __init__(self, projectName):
self.projectName = projectName
self.config = None
self.fileLocs = dict()
self.filelocs[conf] = set()
self.fileLocs[conf].add(os.path.join(os.path.get("XDG_CONFIG_HOME"), 'SuperOctoChainsaw')
self.fileLocs[dotconf] = set()
self.fileLocs[dotconf].add(os.curdir)
self.fileLocs[dotconf].add(os.path.expanduser("~")
def read(self):
for filename, path in self.fileLocs:
fullpath = os.path.join(path, filename)
try:
with open(fullpath) as source:
config.readfp(source)
except IOError:
pass
if self.config == None:
#TODO: Add Logger!
| 28.030303
| 97
| 0.578378
|
4a0b3c4710b6af22b6aeb2eb7c41c722776c3da4
| 474
|
py
|
Python
|
accounts/migrations/0003_user_avatar.py
|
yogoh31/Repath-App-Backend
|
c2cc85b05457ae7338de6f299ce369358402ce12
|
[
"MIT"
] | null | null | null |
accounts/migrations/0003_user_avatar.py
|
yogoh31/Repath-App-Backend
|
c2cc85b05457ae7338de6f299ce369358402ce12
|
[
"MIT"
] | null | null | null |
accounts/migrations/0003_user_avatar.py
|
yogoh31/Repath-App-Backend
|
c2cc85b05457ae7338de6f299ce369358402ce12
|
[
"MIT"
] | null | null | null |
# Generated by Django 4.0.2 on 2022-02-26 10:23
import accounts.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0002_alter_user_date_joined'),
]
operations = [
migrations.AddField(
model_name='user',
name='avatar',
field=models.ImageField(default='images/default.jpg', upload_to=accounts.models.get_user_avatar_path),
),
]
| 23.7
| 114
| 0.647679
|
4a0b3d0ab8731e48f04bb2b2074d2c197a82b4cb
| 5,816
|
py
|
Python
|
tf2onnx/optimizer/const_fold_optimizer.py
|
wangqiaoshi/tensorflow-onnx
|
7ef484c5deae6213cd18cf2548b2551f7bbc8b48
|
[
"MIT"
] | null | null | null |
tf2onnx/optimizer/const_fold_optimizer.py
|
wangqiaoshi/tensorflow-onnx
|
7ef484c5deae6213cd18cf2548b2551f7bbc8b48
|
[
"MIT"
] | null | null | null |
tf2onnx/optimizer/const_fold_optimizer.py
|
wangqiaoshi/tensorflow-onnx
|
7ef484c5deae6213cd18cf2548b2551f7bbc8b48
|
[
"MIT"
] | null | null | null |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
"""const fold Optimizer.
if op's inputs are all const then do op computation when building the graph to improve performance
for example, input of transpose node is const then we can do transpose statically instead of at runtime
"""
from .. import utils
from .optimizer_base import GraphOptimizerBase
# pylint: disable=logging-not-lazy,unused-argument,missing-docstring
# key is op_type, value is the function to compute outputs
# the schema of function is: inputs are(node, graph), output is a list of constant values.
_func_map = {}
def _register_func(op_type):
def _internal_fun(func):
_func_map[op_type] = func
return func
return _internal_fun
class ConstFoldOptimizer(GraphOptimizerBase):
def __init__(self): # pylint: disable=useless-super-delegation
super(ConstFoldOptimizer, self).__init__()
def _optimize(self, graph):
return self._apply_optimization(graph, self._optimize_at_current_graph_level)
def _optimize_at_current_graph_level(self, graph):
graph_changed = True
while graph_changed:
graph_changed = False
ops = graph.get_nodes()
for op in ops:
if self._should_skip(op):
continue
if self._fold_node(op, graph):
graph_changed = True
self.graph_been_opt = True
return graph
@staticmethod
def _should_skip(node):
# only support onnx official op for now, op in other domain is not supported for now
if not utils.is_onnx_domain(node.domain):
return True
if node.is_const() or node.is_graph_input():
return True
skip_type = ["Identity"]
if node.type in skip_type:
return True
return False
def _fold_node(self, node, graph):
""" if node's input are all const and it's not graph's output then it can be fold.
if node can be fold True will be return indicating that graph is changed
"""
if self._all_inputs_are_const(node.inputs) and not self._is_graph_output(node, graph):
process_func = _func_map.get(node.type, None)
if process_func:
const_outputs = process_func(node, graph)
self._replace_node_with_const(node, graph, const_outputs)
return True
self.logger.debug("need to add function to fold op %s whose op_type is %s", node.name, node.type)
return False
@staticmethod
def _all_inputs_are_const(nodes):
return all(node.is_const() for node in nodes if node)
@staticmethod
def _is_graph_output(node, graph):
node_out_set = set(node.output)
graph_out_set = set(graph.outputs)
return node_out_set.intersection(graph_out_set)
@staticmethod
def _replace_node_with_const(node, graph, vals):
utils.make_sure(len(node.output) == len(vals), "length of node outputs and const vals should be same")
for old_input, val in zip(node.output, vals):
const_node = graph.make_const(utils.make_name("const_fold_opt"), val)
graph.set_dtype(const_node.output[0], utils.map_numpy_to_onnx_dtype(val.dtype))
graph.set_shape(const_node.output[0], val.shape)
graph.replace_all_inputs(old_input, const_node.output[0]) # ops=graph.get_nodes()
graph.remove_node(node.name)
@staticmethod
@_register_func("Cast")
def _fold_cast(node, graph):
const_val = node.inputs[0].get_tensor_value(as_list=False)
np_dtype = utils.ONNX_TO_NUMPY_DTYPE[node.get_attr("to").i]
const_val_after_cast = const_val.astype(np_dtype)
return [const_val_after_cast]
@staticmethod
@_register_func("Transpose")
def _fold_transpose(node, graph) -> list:
const_val = node.inputs[0].get_tensor_value(as_list=False)
perm_attr = node.get_attr("perm")
perm = perm_attr.ints if perm_attr else None
const_val_after_trans = const_val.transpose(perm)
return [const_val_after_trans]
@staticmethod
@_register_func("Reshape")
def _fold_reshape(node, graph):
const_val_data = node.inputs[0].get_tensor_value(as_list=False)
const_val_shape = node.inputs[1].get_tensor_value(as_list=True)
data_shape = const_val_data.shape
for i, dim in enumerate(const_val_shape):
if dim == 0:
# In ORT a dim of 0 means the shape stays the same.
const_val_shape[i] = data_shape[i]
const_val_after_trans = const_val_data.reshape(const_val_shape)
return [const_val_after_trans]
@staticmethod
@_register_func("Unsqueeze")
def _fold_unsqueeze(node, graph):
"""
numpy expand_dims only supports to unsqueeze one dim one time, so reshape is used to simplify the logic
"""
const_val = node.inputs[0].get_tensor_value(as_list=False)
axes = list(node.get_attr("axes").ints)
utils.make_sure(all(axis >= 0 for axis in axes), "onnx spec says it only supports positive axis")
shape_in = const_val.shape
dims_out = len(shape_in) + len(axes)
# calculate the shape of output accroding to onnx Unsqueeze's spec
# https://github.com/onnx/onnx/blob/master/docs/Operators.md#Unsqueeze
shape_in = iter(shape_in)
shape_out = [None] * dims_out
for ind in axes:
shape_out[ind] = 1
for ind, val in enumerate(shape_out):
if val is None:
shape_out[ind] = next(shape_in)
const_val_after_unsqueeze = const_val.reshape(shape_out)
return [const_val_after_unsqueeze]
| 39.033557
| 111
| 0.662827
|
4a0b3efc08a6a2bbb248352182dd3a39d10b767e
| 4,068
|
py
|
Python
|
switch/sonoff.py
|
2016for/HASS-sonoff-ewelink
|
cbe39b183be19911663b65d9fcc71af666f9ec97
|
[
"MIT"
] | null | null | null |
switch/sonoff.py
|
2016for/HASS-sonoff-ewelink
|
cbe39b183be19911663b65d9fcc71af666f9ec97
|
[
"MIT"
] | null | null | null |
switch/sonoff.py
|
2016for/HASS-sonoff-ewelink
|
cbe39b183be19911663b65d9fcc71af666f9ec97
|
[
"MIT"
] | 1
|
2019-03-08T14:09:55.000Z
|
2019-03-08T14:09:55.000Z
|
import logging, time, json
from homeassistant.components.switch import SwitchDevice
from homeassistant.components.switch import DOMAIN
# from homeassistant.components.sonoff import (DOMAIN as SONOFF_DOMAIN, SonoffDevice)
from custom_components.sonoff import (DOMAIN as SONOFF_DOMAIN, SonoffDevice)
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Add the Sonoff Switch entities"""
entities = []
for device in hass.data[SONOFF_DOMAIN].get_devices(force_update = True):
outlets_number = hass.data[SONOFF_DOMAIN].get_outlets(device)
if outlets_number is None: # fallback to whatever the device might have
if 'switches' in device['params']: # the device has multiple switches, split them by outlets
for outlet in device['params']['switches']:
entity = SonoffSwitch(hass, device, outlet['outlet'])
entities.append(entity)
elif 'switch' in device['params'] or 'state' in device['params']:
entity = SonoffSwitch(hass, device)
entities.append(entity)
elif outlets_number > 1: # the device has multiple switches, split them by available outlets
for outlet in range(0, outlets_number):
entity = SonoffSwitch(hass, device, outlet)
entities.append(entity)
# normal device = Sonoff Basic (and alike)
elif 'switch' in device['params'] or 'state' in device['params']: #ignore devices like Sonoff RF bridge:
entity = SonoffSwitch(hass, device)
entities.append(entity)
if len(entities):
async_add_entities(entities, update_before_add=False)
class SonoffSwitch(SonoffDevice, SwitchDevice):
"""Representation of a Sonoff switch device."""
def __init__(self, hass, device, outlet = None):
"""Initialize the device."""
# add switch unique stuff here if needed
SonoffDevice.__init__(self, hass, device)
self._outlet = outlet
self._name = '{}{}'.format(device['name'], '' if outlet is None else ' '+str(outlet+1))
if outlet is None:
self._name = device['name']
else:
self._attributes['outlet'] = outlet
if 'tags' in device and 'ck_channel_name' in device['tags']:
if str(outlet) in device['tags']['ck_channel_name'].keys() and \
device['tags']['ck_channel_name'][str(outlet)]:
self._name = '{} - {}'.format(device['name'], device['tags']['ck_channel_name'][str(outlet)])
self._attributes['outlet_name'] = device['tags']['ck_channel_name'][str(outlet)]
else:
self._name = '{} {}'.format(device['name'], ('CH %s' % str(outlet+1)) )
else:
self._name = '{} {}'.format(device['name'], ('CH %s' % str(outlet+1)) )
@property
def is_on(self):
"""Return true if device is on."""
self._state = self.get_state()
return self._state
def turn_on(self, **kwargs):
"""Turn the device on."""
self._hass.bus.async_fire('sonoff_state', {
'state' : True,
'deviceid' : self._deviceid,
'outlet' : self._outlet
})
self.async_schedule_update_ha_state()
def turn_off(self, **kwargs):
"""Turn the device off."""
self._hass.bus.async_fire('sonoff_state', {
'state' : False,
'deviceid' : self._deviceid,
'outlet' : self._outlet
})
self.async_schedule_update_ha_state()
# entity id is required if the name use other characters not in ascii
@property
def entity_id(self):
"""Return the unique id of the switch."""
entity_id = "{}.{}".format(DOMAIN, self._deviceid)
if self._outlet is not None:
entity_id = "{}_{}".format(entity_id, str(self._outlet+1))
return entity_id
| 39.882353
| 113
| 0.602262
|
4a0b40300be9a28633709a6b10908603918e5805
| 8,230
|
py
|
Python
|
lib/ansible/plugins/connection/lxc.py
|
Container-Projects/ansible-provider-docs
|
100b695b0b0c4d8d08af362069557ffc735d0d7e
|
[
"PSF-2.0",
"BSD-2-Clause",
"MIT"
] | 37
|
2017-08-15T15:02:43.000Z
|
2021-07-23T03:44:31.000Z
|
lib/ansible/plugins/connection/lxc.py
|
Container-Projects/ansible-provider-docs
|
100b695b0b0c4d8d08af362069557ffc735d0d7e
|
[
"PSF-2.0",
"BSD-2-Clause",
"MIT"
] | 12
|
2018-01-10T05:25:25.000Z
|
2021-11-28T06:55:48.000Z
|
lib/ansible/plugins/connection/lxc.py
|
Container-Projects/ansible-provider-docs
|
100b695b0b0c4d8d08af362069557ffc735d0d7e
|
[
"PSF-2.0",
"BSD-2-Clause",
"MIT"
] | 49
|
2017-08-15T09:52:13.000Z
|
2022-03-21T17:11:54.000Z
|
# (c) 2015, Joerg Thalheim <joerg@higgsboson.tk>
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
author: Joerg Thalheim <joerg@higgsboson.tk>
connection: lxc
short_description: Run tasks in lxc containers via lxc python library
description:
- Run commands or put/fetch files to an existing lxc container using lxc python library
version_added: "2.0"
options:
remote_addr:
description:
- Container identifier
default: inventory_hostname
vars:
- name: ansible_host
- name: ansible_lxc_host
executable:
default: /bin/sh
description:
- Shell executable
vars:
- name: ansible_executable
- name: ansible_lxc_executable
"""
import os
import shutil
import traceback
import select
import fcntl
import errno
HAS_LIBLXC = False
try:
import lxc as _lxc
HAS_LIBLXC = True
except ImportError:
pass
from ansible import constants as C
from ansible import errors
from ansible.module_utils._text import to_bytes, to_native
from ansible.plugins.connection import ConnectionBase
class Connection(ConnectionBase):
''' Local lxc based connections '''
transport = 'lxc'
has_pipelining = True
become_methods = frozenset(C.BECOME_METHODS)
default_user = 'root'
def __init__(self, play_context, new_stdin, *args, **kwargs):
super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
self.container_name = self._play_context.remote_addr
self.container = None
def _connect(self):
''' connect to the lxc; nothing to do here '''
super(Connection, self)._connect()
if not HAS_LIBLXC:
msg = "lxc bindings for python2 are not installed"
raise errors.AnsibleError(msg)
if self.container:
return
self._display.vvv("THIS IS A LOCAL LXC DIR", host=self.container_name)
self.container = _lxc.Container(self.container_name)
if self.container.state == "STOPPED":
raise errors.AnsibleError("%s is not running" % self.container_name)
def _communicate(self, pid, in_data, stdin, stdout, stderr):
buf = {stdout: [], stderr: []}
read_fds = [stdout, stderr]
if in_data:
write_fds = [stdin]
else:
write_fds = []
while len(read_fds) > 0 or len(write_fds) > 0:
try:
ready_reads, ready_writes, _ = select.select(read_fds, write_fds, [])
except select.error as e:
if e.args[0] == errno.EINTR:
continue
raise
for fd in ready_writes:
in_data = in_data[os.write(fd, in_data):]
if len(in_data) == 0:
write_fds.remove(fd)
for fd in ready_reads:
data = os.read(fd, 32768)
if not data:
read_fds.remove(fd)
buf[fd].append(data)
(pid, returncode) = os.waitpid(pid, 0)
return returncode, b"".join(buf[stdout]), b"".join(buf[stderr])
def _set_nonblocking(self, fd):
flags = fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK
fcntl.fcntl(fd, fcntl.F_SETFL, flags)
return fd
def exec_command(self, cmd, in_data=None, sudoable=False):
''' run a command on the chroot '''
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
# python2-lxc needs bytes. python3-lxc needs text.
executable = to_native(self._play_context.executable, errors='surrogate_or_strict')
local_cmd = [executable, '-c', to_native(cmd, errors='surrogate_or_strict')]
read_stdout, write_stdout = None, None
read_stderr, write_stderr = None, None
read_stdin, write_stdin = None, None
try:
read_stdout, write_stdout = os.pipe()
read_stderr, write_stderr = os.pipe()
kwargs = {
'stdout': self._set_nonblocking(write_stdout),
'stderr': self._set_nonblocking(write_stderr),
'env_policy': _lxc.LXC_ATTACH_CLEAR_ENV
}
if in_data:
read_stdin, write_stdin = os.pipe()
kwargs['stdin'] = self._set_nonblocking(read_stdin)
self._display.vvv("EXEC %s" % (local_cmd), host=self.container_name)
pid = self.container.attach(_lxc.attach_run_command, local_cmd, **kwargs)
if pid == -1:
msg = "failed to attach to container %s" % self.container_name
raise errors.AnsibleError(msg)
write_stdout = os.close(write_stdout)
write_stderr = os.close(write_stderr)
if read_stdin:
read_stdin = os.close(read_stdin)
return self._communicate(pid,
in_data,
write_stdin,
read_stdout,
read_stderr)
finally:
fds = [read_stdout,
write_stdout,
read_stderr,
write_stderr,
read_stdin,
write_stdin]
for fd in fds:
if fd:
os.close(fd)
def put_file(self, in_path, out_path):
''' transfer a file from local to lxc '''
super(Connection, self).put_file(in_path, out_path)
self._display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.container_name)
in_path = to_bytes(in_path, errors='surrogate_or_strict')
out_path = to_bytes(out_path, errors='surrogate_or_strict')
if not os.path.exists(in_path):
msg = "file or module does not exist: %s" % in_path
raise errors.AnsibleFileNotFound(msg)
try:
src_file = open(in_path, "rb")
except IOError:
traceback.print_exc()
raise errors.AnsibleError("failed to open input file to %s" % in_path)
try:
def write_file(args):
with open(out_path, 'wb+') as dst_file:
shutil.copyfileobj(src_file, dst_file)
try:
self.container.attach_wait(write_file, None)
except IOError:
traceback.print_exc()
msg = "failed to transfer file to %s" % out_path
raise errors.AnsibleError(msg)
finally:
src_file.close()
def fetch_file(self, in_path, out_path):
''' fetch a file from lxc to local '''
super(Connection, self).fetch_file(in_path, out_path)
self._display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.container_name)
in_path = to_bytes(in_path, errors='surrogate_or_strict')
out_path = to_bytes(out_path, errors='surrogate_or_strict')
try:
dst_file = open(out_path, "wb")
except IOError:
traceback.print_exc()
msg = "failed to open output file %s" % out_path
raise errors.AnsibleError(msg)
try:
def write_file(args):
try:
with open(in_path, 'rb') as src_file:
shutil.copyfileobj(src_file, dst_file)
finally:
# this is needed in the lxc child process
# to flush internal python buffers
dst_file.close()
try:
self.container.attach_wait(write_file, None)
except IOError:
traceback.print_exc()
msg = "failed to transfer file from %s to %s" % (in_path, out_path)
raise errors.AnsibleError(msg)
finally:
dst_file.close()
def close(self):
''' terminate the connection; nothing to do here '''
super(Connection, self).close()
self._connected = False
| 35.627706
| 95
| 0.576306
|
4a0b40481502f6e12e2eced913f41305cdbe61c7
| 1,339
|
py
|
Python
|
setup.py
|
xtacocorex/chip-python-aREST
|
bc9bb01f569ae32adf4717b9d7414a4f3c481874
|
[
"MIT"
] | 6
|
2017-01-02T19:24:02.000Z
|
2019-02-14T06:45:07.000Z
|
setup.py
|
xtacocorex/chip-python-aREST
|
bc9bb01f569ae32adf4717b9d7414a4f3c481874
|
[
"MIT"
] | 10
|
2017-01-03T05:25:40.000Z
|
2017-12-16T19:53:41.000Z
|
setup.py
|
xtacocorex/chip-python-aREST
|
bc9bb01f569ae32adf4717b9d7414a4f3c481874
|
[
"MIT"
] | 2
|
2017-08-06T23:35:46.000Z
|
2018-10-17T02:07:36.000Z
|
import distribute_setup
distribute_setup.use_setuptools()
from setuptools import setup, Extension, find_packages
classifiers = ['Development Status :: 3 - Alpha',
'Operating System :: POSIX :: Linux',
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Software Development',
'Topic :: Home Automation',
'Topic :: System :: Hardware']
setup(name = 'CHIP_aREST',
version = '0.3',
author = 'Robert Wolterman',
author_email = 'robert.wolterman@gmail.com',
description = 'A module to control the CHIP IO channels via a REST API',
long_description = open('README.rst').read() + open('CHANGELOG.rst').read(),
license = 'MIT',
keywords = 'CHIP CHIPPRO NextThingCo IO GPIO PWM ADC SERVO REST',
url = 'https://github.com/xtacocorex/CHIP_aREST/',
classifiers = classifiers,
install_requires = [ "flask", "CHIP_IO", "requests", "paho-mqtt" ],
packages = [ "CHIP_aREST" ],
scripts = [ "examples/chip-arest-basic.py", "examples/chip-arest-cloud.py" ])
| 46.172414
| 92
| 0.5646
|
4a0b415f8fe0a27dac87ceda1a3be9d30350494f
| 29,075
|
py
|
Python
|
electrum/base_wizard.py
|
ProjectMerge/electrum-merge
|
9a77ca74fec434ccdc862aeb82093f90e96cb550
|
[
"MIT"
] | null | null | null |
electrum/base_wizard.py
|
ProjectMerge/electrum-merge
|
9a77ca74fec434ccdc862aeb82093f90e96cb550
|
[
"MIT"
] | null | null | null |
electrum/base_wizard.py
|
ProjectMerge/electrum-merge
|
9a77ca74fec434ccdc862aeb82093f90e96cb550
|
[
"MIT"
] | 1
|
2020-12-18T17:13:10.000Z
|
2020-12-18T17:13:10.000Z
|
# -*- coding: utf-8 -*-
#
# Electrum - lightweight Merge client
# Copyright (C) 2016 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import sys
import copy
import traceback
from functools import partial
from typing import List, TYPE_CHECKING, Tuple, NamedTuple, Any, Dict, Optional
from . import bitcoin
from . import keystore
from . import mnemonic
from .bip32 import is_bip32_derivation, xpub_type, normalize_bip32_derivation
from .keystore import bip44_derivation, purpose48_derivation
from .wallet import (Imported_Wallet, Standard_Wallet, Multisig_Wallet,
wallet_types, Wallet, Abstract_Wallet)
from .storage import (WalletStorage, STO_EV_USER_PW, STO_EV_XPUB_PW,
get_derivation_used_for_hw_device_encryption)
from .i18n import _
from .util import UserCancelled, InvalidPassword, WalletFileException
from .simple_config import SimpleConfig
from .plugin import Plugins, HardwarePluginLibraryUnavailable
from .logging import Logger
from .plugins.hw_wallet.plugin import OutdatedHwFirmwareException, HW_PluginBase
if TYPE_CHECKING:
from .plugin import DeviceInfo
# hardware device setup purpose
HWD_SETUP_NEW_WALLET, HWD_SETUP_DECRYPT_WALLET = range(0, 2)
class ScriptTypeNotSupported(Exception): pass
class GoBack(Exception): pass
class WizardStackItem(NamedTuple):
action: Any
args: Any
kwargs: Dict[str, Any]
storage_data: dict
class BaseWizard(Logger):
def __init__(self, config: SimpleConfig, plugins: Plugins):
super(BaseWizard, self).__init__()
Logger.__init__(self)
self.config = config
self.plugins = plugins
self.data = {}
self.pw_args = None
self._stack = [] # type: List[WizardStackItem]
self.plugin = None
self.keystores = []
self.is_kivy = config.get('gui') == 'kivy'
self.seed_type = None
def set_icon(self, icon):
pass
def run(self, *args, **kwargs):
action = args[0]
args = args[1:]
storage_data = copy.deepcopy(self.data)
self._stack.append(WizardStackItem(action, args, kwargs, storage_data))
if not action:
return
if type(action) is tuple:
self.plugin, action = action
if self.plugin and hasattr(self.plugin, action):
f = getattr(self.plugin, action)
f(self, *args, **kwargs)
elif hasattr(self, action):
f = getattr(self, action)
f(*args, **kwargs)
else:
raise Exception("unknown action", action)
def can_go_back(self):
return len(self._stack) > 1
def go_back(self):
if not self.can_go_back():
return
# pop 'current' frame
self._stack.pop()
# pop 'previous' frame
stack_item = self._stack.pop()
# try to undo side effects since we last entered 'previous' frame
# FIXME only self.storage is properly restored
self.data = copy.deepcopy(stack_item.storage_data)
# rerun 'previous' frame
self.run(stack_item.action, *stack_item.args, **stack_item.kwargs)
def reset_stack(self):
self._stack = []
def new(self):
title = _("Create new wallet")
message = '\n'.join([
_("What kind of wallet do you want to create?")
])
wallet_kinds = [
('standard', _("Standard wallet")),
('2fa', _("Wallet with two-factor authentication")),
('multisig', _("Multi-signature wallet")),
('imported', _("Import Merge addresses or private keys")),
]
choices = [pair for pair in wallet_kinds if pair[0] in wallet_types]
self.choice_dialog(title=title, message=message, choices=choices, run_next=self.on_wallet_type)
def upgrade_storage(self, storage):
exc = None
def on_finished():
if exc is None:
self.terminate(storage=storage)
else:
raise exc
def do_upgrade():
nonlocal exc
try:
storage.upgrade()
except Exception as e:
exc = e
self.waiting_dialog(do_upgrade, _('Upgrading wallet format...'), on_finished=on_finished)
def load_2fa(self):
self.data['wallet_type'] = '2fa'
self.data['use_trustedcoin'] = True
self.plugin = self.plugins.load_plugin('trustedcoin')
def on_wallet_type(self, choice):
self.data['wallet_type'] = self.wallet_type = choice
if choice == 'standard':
action = 'choose_keystore'
elif choice == 'multisig':
action = 'choose_multisig'
elif choice == '2fa':
self.load_2fa()
action = self.plugin.get_action(self.data)
elif choice == 'imported':
action = 'import_addresses_or_keys'
self.run(action)
def choose_multisig(self):
def on_multisig(m, n):
multisig_type = "%dof%d" % (m, n)
self.data['wallet_type'] = multisig_type
self.n = n
self.run('choose_keystore')
self.multisig_dialog(run_next=on_multisig)
def choose_keystore(self):
assert self.wallet_type in ['standard', 'multisig']
i = len(self.keystores)
title = _('Add cosigner') + ' (%d of %d)'%(i+1, self.n) if self.wallet_type=='multisig' else _('Keystore')
if self.wallet_type =='standard' or i==0:
message = _('Do you want to create a new seed, or to restore a wallet using an existing seed?')
choices = [
('choose_seed_type', _('Create a new seed')),
('restore_from_seed', _('I already have a seed')),
('restore_from_key', _('Use a master key')),
]
if not self.is_kivy:
choices.append(('choose_hw_device', _('Use a hardware device')))
else:
message = _('Add a cosigner to your multi-sig wallet')
choices = [
('restore_from_key', _('Enter cosigner key')),
('restore_from_seed', _('Enter cosigner seed')),
]
if not self.is_kivy:
choices.append(('choose_hw_device', _('Cosign with hardware device')))
self.choice_dialog(title=title, message=message, choices=choices, run_next=self.run)
def import_addresses_or_keys(self):
v = lambda x: keystore.is_address_list(x) or keystore.is_private_key_list(x, raise_on_error=True)
title = _("Import Merge Addresses")
message = _("Enter a list of Merge addresses (this will create a watching-only wallet), or a list of private keys.")
self.add_xpub_dialog(title=title, message=message, run_next=self.on_import,
is_valid=v, allow_multi=True, show_wif_help=True)
def on_import(self, text):
# text is already sanitized by is_address_list and is_private_keys_list
if keystore.is_address_list(text):
self.data['addresses'] = {}
for addr in text.split():
assert bitcoin.is_address(addr)
self.data['addresses'][addr] = {}
elif keystore.is_private_key_list(text):
self.data['addresses'] = {}
k = keystore.Imported_KeyStore({})
keys = keystore.get_private_keys(text)
for pk in keys:
assert bitcoin.is_private_key(pk)
txin_type, pubkey = k.import_privkey(pk, None)
addr = bitcoin.pubkey_to_address(txin_type, pubkey)
self.data['addresses'][addr] = {'type':txin_type, 'pubkey':pubkey, 'redeem_script':None}
self.keystores.append(k)
else:
return self.terminate()
return self.run('create_wallet')
def restore_from_key(self):
if self.wallet_type == 'standard':
v = keystore.is_master_key
title = _("Create keystore from a master key")
message = ' '.join([
_("To create a watching-only wallet, please enter your master public key (xpub/ypub/zpub)."),
_("To create a spending wallet, please enter a master private key (xprv/yprv/zprv).")
])
self.add_xpub_dialog(title=title, message=message, run_next=self.on_restore_from_key, is_valid=v)
else:
i = len(self.keystores) + 1
self.add_cosigner_dialog(index=i, run_next=self.on_restore_from_key, is_valid=keystore.is_bip32_key)
def on_restore_from_key(self, text):
k = keystore.from_master_key(text)
self.on_keystore(k)
def choose_hw_device(self, purpose=HWD_SETUP_NEW_WALLET, *, storage=None):
title = _('Hardware Keystore')
# check available plugins
supported_plugins = self.plugins.get_hardware_support()
devices = [] # type: List[Tuple[str, DeviceInfo]]
devmgr = self.plugins.device_manager
debug_msg = ''
def failed_getting_device_infos(name, e):
nonlocal debug_msg
err_str_oneline = ' // '.join(str(e).splitlines())
self.logger.warning(f'error getting device infos for {name}: {err_str_oneline}')
indented_error_msg = ' '.join([''] + str(e).splitlines(keepends=True))
debug_msg += f' {name}: (error getting device infos)\n{indented_error_msg}\n'
# scan devices
try:
scanned_devices = devmgr.scan_devices()
except BaseException as e:
self.logger.info('error scanning devices: {}'.format(repr(e)))
debug_msg = ' {}:\n {}'.format(_('Error scanning devices'), e)
else:
for splugin in supported_plugins:
name, plugin = splugin.name, splugin.plugin
# plugin init errored?
if not plugin:
e = splugin.exception
indented_error_msg = ' '.join([''] + str(e).splitlines(keepends=True))
debug_msg += f' {name}: (error during plugin init)\n'
debug_msg += ' {}\n'.format(_('You might have an incompatible library.'))
debug_msg += f'{indented_error_msg}\n'
continue
# see if plugin recognizes 'scanned_devices'
try:
# FIXME: side-effect: unpaired_device_info sets client.handler
device_infos = devmgr.unpaired_device_infos(None, plugin, devices=scanned_devices,
include_failing_clients=True)
except HardwarePluginLibraryUnavailable as e:
failed_getting_device_infos(name, e)
continue
except BaseException as e:
self.logger.exception('')
failed_getting_device_infos(name, e)
continue
device_infos_failing = list(filter(lambda di: di.exception is not None, device_infos))
for di in device_infos_failing:
failed_getting_device_infos(name, di.exception)
device_infos_working = list(filter(lambda di: di.exception is None, device_infos))
devices += list(map(lambda x: (name, x), device_infos_working))
if not debug_msg:
debug_msg = ' {}'.format(_('No exceptions encountered.'))
if not devices:
msg = (_('No hardware device detected.') + '\n' +
_('To trigger a rescan, press \'Next\'.') + '\n\n')
if sys.platform == 'win32':
msg += _('If your device is not detected on Windows, go to "Settings", "Devices", "Connected devices", '
'and do "Remove device". Then, plug your device again.') + '\n'
msg += _('While this is less than ideal, it might help if you run Electrum as Administrator.') + '\n'
else:
msg += _('On Linux, you might have to add a new permission to your udev rules.') + '\n'
msg += '\n\n'
msg += _('Debug message') + '\n' + debug_msg
self.confirm_dialog(title=title, message=msg,
run_next=lambda x: self.choose_hw_device(purpose, storage=storage))
return
# select device
self.devices = devices
choices = []
for name, info in devices:
state = _("initialized") if info.initialized else _("wiped")
label = info.label or _("An unnamed {}").format(name)
try: transport_str = info.device.transport_ui_string[:20]
except: transport_str = 'unknown transport'
descr = f"{label} [{name}, {state}, {transport_str}]"
choices.append(((name, info), descr))
msg = _('Select a device') + ':'
self.choice_dialog(title=title, message=msg, choices=choices,
run_next=lambda *args: self.on_device(*args, purpose=purpose, storage=storage))
def on_device(self, name, device_info, *, purpose, storage=None):
self.plugin = self.plugins.get_plugin(name) # type: HW_PluginBase
try:
self.plugin.setup_device(device_info, self, purpose)
except OSError as e:
self.show_error(_('We encountered an error while connecting to your device:')
+ '\n' + str(e) + '\n'
+ _('To try to fix this, we will now re-pair with your device.') + '\n'
+ _('Please try again.'))
devmgr = self.plugins.device_manager
devmgr.unpair_id(device_info.device.id_)
self.choose_hw_device(purpose, storage=storage)
return
except OutdatedHwFirmwareException as e:
if self.question(e.text_ignore_old_fw_and_continue(), title=_("Outdated device firmware")):
self.plugin.set_ignore_outdated_fw()
# will need to re-pair
devmgr = self.plugins.device_manager
devmgr.unpair_id(device_info.device.id_)
self.choose_hw_device(purpose, storage=storage)
return
except (UserCancelled, GoBack):
self.choose_hw_device(purpose, storage=storage)
return
except BaseException as e:
self.logger.exception('')
self.show_error(str(e))
self.choose_hw_device(purpose, storage=storage)
return
if purpose == HWD_SETUP_NEW_WALLET:
def f(derivation, script_type):
derivation = normalize_bip32_derivation(derivation)
self.run('on_hw_derivation', name, device_info, derivation, script_type)
self.derivation_and_script_type_dialog(f)
elif purpose == HWD_SETUP_DECRYPT_WALLET:
derivation = get_derivation_used_for_hw_device_encryption()
xpub = self.plugin.get_xpub(device_info.device.id_, derivation, 'standard', self)
password = keystore.Xpub.get_pubkey_from_xpub(xpub, ())
try:
storage.decrypt(password)
except InvalidPassword:
# try to clear session so that user can type another passphrase
devmgr = self.plugins.device_manager
client = devmgr.client_by_id(device_info.device.id_)
if hasattr(client, 'clear_session'): # FIXME not all hw wallet plugins have this
client.clear_session()
raise
else:
raise Exception('unknown purpose: %s' % purpose)
def derivation_and_script_type_dialog(self, f):
message1 = _('Choose the type of addresses in your wallet.')
message2 = '\n'.join([
_('You can override the suggested derivation path.'),
_('If you are not sure what this is, leave this field unchanged.')
])
if self.wallet_type == 'multisig':
# There is no general standard for HD multisig.
# For legacy, this is partially compatible with BIP45; assumes index=0
# For segwit, a custom path is used, as there is no standard at all.
default_choice_idx = 2
choices = [
('standard', 'legacy multisig (p2sh)', "m/45'/0"),
('p2wsh-p2sh', 'p2sh-segwit multisig (p2wsh-p2sh)', purpose48_derivation(0, xtype='p2wsh-p2sh')),
('p2wsh', 'native segwit multisig (p2wsh)', purpose48_derivation(0, xtype='p2wsh')),
]
else:
default_choice_idx = 2
choices = [
('standard', 'legacy (p2pkh)', bip44_derivation(0, bip43_purpose=44)),
('p2wpkh-p2sh', 'p2sh-segwit (p2wpkh-p2sh)', bip44_derivation(0, bip43_purpose=49)),
('p2wpkh', 'native segwit (p2wpkh)', bip44_derivation(0, bip43_purpose=84)),
]
while True:
try:
self.choice_and_line_dialog(
run_next=f, title=_('Script type and Derivation path'), message1=message1,
message2=message2, choices=choices, test_text=is_bip32_derivation,
default_choice_idx=default_choice_idx)
return
except ScriptTypeNotSupported as e:
self.show_error(e)
# let the user choose again
def on_hw_derivation(self, name, device_info, derivation, xtype):
from .keystore import hardware_keystore
try:
xpub = self.plugin.get_xpub(device_info.device.id_, derivation, xtype, self)
except ScriptTypeNotSupported:
raise # this is handled in derivation_dialog
except BaseException as e:
self.logger.exception('')
self.show_error(e)
return
d = {
'type': 'hardware',
'hw_type': name,
'derivation': derivation,
'xpub': xpub,
'label': device_info.label,
}
k = hardware_keystore(d)
self.on_keystore(k)
def passphrase_dialog(self, run_next, is_restoring=False):
title = _('Seed extension')
message = '\n'.join([
_('You may extend your seed with custom words.'),
_('Your seed extension must be saved together with your seed.'),
])
warning = '\n'.join([
_('Note that this is NOT your encryption password.'),
_('If you do not know what this is, leave this field empty.'),
])
warn_issue4566 = is_restoring and self.seed_type == 'bip39'
self.line_dialog(title=title, message=message, warning=warning,
default='', test=lambda x:True, run_next=run_next,
warn_issue4566=warn_issue4566)
def restore_from_seed(self):
self.opt_bip39 = True
self.opt_ext = True
is_cosigning_seed = lambda x: mnemonic.seed_type(x) in ['standard', 'segwit']
test = mnemonic.is_seed if self.wallet_type == 'standard' else is_cosigning_seed
self.restore_seed_dialog(run_next=self.on_restore_seed, test=test)
def on_restore_seed(self, seed, is_bip39, is_ext):
self.seed_type = 'bip39' if is_bip39 else mnemonic.seed_type(seed)
if self.seed_type == 'bip39':
f = lambda passphrase: self.on_restore_bip39(seed, passphrase)
self.passphrase_dialog(run_next=f, is_restoring=True) if is_ext else f('')
elif self.seed_type in ['standard', 'segwit']:
f = lambda passphrase: self.run('create_keystore', seed, passphrase)
self.passphrase_dialog(run_next=f, is_restoring=True) if is_ext else f('')
elif self.seed_type == 'old':
self.run('create_keystore', seed, '')
elif mnemonic.is_any_2fa_seed_type(self.seed_type):
self.load_2fa()
self.run('on_restore_seed', seed, is_ext)
else:
raise Exception('Unknown seed type', self.seed_type)
def on_restore_bip39(self, seed, passphrase):
def f(derivation, script_type):
derivation = normalize_bip32_derivation(derivation)
self.run('on_bip43', seed, passphrase, derivation, script_type)
self.derivation_and_script_type_dialog(f)
def create_keystore(self, seed, passphrase):
k = keystore.from_seed(seed, passphrase, self.wallet_type == 'multisig')
self.on_keystore(k)
def on_bip43(self, seed, passphrase, derivation, script_type):
k = keystore.from_bip39_seed(seed, passphrase, derivation, xtype=script_type)
self.on_keystore(k)
def on_keystore(self, k):
has_xpub = isinstance(k, keystore.Xpub)
if has_xpub:
t1 = xpub_type(k.xpub)
if self.wallet_type == 'standard':
if has_xpub and t1 not in ['standard', 'p2wpkh', 'p2wpkh-p2sh']:
self.show_error(_('Wrong key type') + ' %s'%t1)
self.run('choose_keystore')
return
self.keystores.append(k)
self.run('create_wallet')
elif self.wallet_type == 'multisig':
assert has_xpub
if t1 not in ['standard', 'p2wsh', 'p2wsh-p2sh']:
self.show_error(_('Wrong key type') + ' %s'%t1)
self.run('choose_keystore')
return
if k.xpub in map(lambda x: x.xpub, self.keystores):
self.show_error(_('Error: duplicate master public key'))
self.run('choose_keystore')
return
if len(self.keystores)>0:
t2 = xpub_type(self.keystores[0].xpub)
if t1 != t2:
self.show_error(_('Cannot add this cosigner:') + '\n' + "Their key type is '%s', we are '%s'"%(t1, t2))
self.run('choose_keystore')
return
self.keystores.append(k)
if len(self.keystores) == 1:
xpub = k.get_master_public_key()
self.reset_stack()
self.run('show_xpub_and_add_cosigners', xpub)
elif len(self.keystores) < self.n:
self.run('choose_keystore')
else:
self.run('create_wallet')
def create_wallet(self):
encrypt_keystore = any(k.may_have_password() for k in self.keystores)
# note: the following condition ("if") is duplicated logic from
# wallet.get_available_storage_encryption_version()
if self.wallet_type == 'standard' and isinstance(self.keystores[0], keystore.Hardware_KeyStore):
# offer encrypting with a pw derived from the hw device
k = self.keystores[0]
try:
k.handler = self.plugin.create_handler(self)
password = k.get_password_for_storage_encryption()
except UserCancelled:
devmgr = self.plugins.device_manager
devmgr.unpair_xpub(k.xpub)
self.choose_hw_device()
return
except BaseException as e:
self.logger.exception('')
self.show_error(str(e))
return
self.request_storage_encryption(
run_next=lambda encrypt_storage: self.on_password(
password,
encrypt_storage=encrypt_storage,
storage_enc_version=STO_EV_XPUB_PW,
encrypt_keystore=False))
else:
# prompt the user to set an arbitrary password
self.request_password(
run_next=lambda password, encrypt_storage: self.on_password(
password,
encrypt_storage=encrypt_storage,
storage_enc_version=STO_EV_USER_PW,
encrypt_keystore=encrypt_keystore),
force_disable_encrypt_cb=not encrypt_keystore)
def on_password(self, password, *, encrypt_storage,
storage_enc_version=STO_EV_USER_PW, encrypt_keystore):
for k in self.keystores:
if k.may_have_password():
k.update_password(None, password)
if self.wallet_type == 'standard':
self.data['seed_type'] = self.seed_type
keys = self.keystores[0].dump()
self.data['keystore'] = keys
elif self.wallet_type == 'multisig':
for i, k in enumerate(self.keystores):
self.data['x%d/'%(i+1)] = k.dump()
elif self.wallet_type == 'imported':
if len(self.keystores) > 0:
keys = self.keystores[0].dump()
self.data['keystore'] = keys
else:
raise Exception('Unknown wallet type')
self.pw_args = password, encrypt_storage, storage_enc_version
self.terminate()
def create_storage(self, path):
if os.path.exists(path):
raise Exception('file already exists at path')
if not self.pw_args:
return
password, encrypt_storage, storage_enc_version = self.pw_args
storage = WalletStorage(path)
storage.set_keystore_encryption(bool(password))
if encrypt_storage:
storage.set_password(password, enc_version=storage_enc_version)
for key, value in self.data.items():
storage.put(key, value)
storage.write()
storage.load_plugins()
return storage
def terminate(self, *, storage: Optional[WalletStorage] = None):
raise NotImplementedError() # implemented by subclasses
def show_xpub_and_add_cosigners(self, xpub):
self.show_xpub_dialog(xpub=xpub, run_next=lambda x: self.run('choose_keystore'))
def choose_seed_type(self, message=None, choices=None):
title = _('Choose Seed type')
if message is None:
message = ' '.join([
_("The type of addresses used by your wallet will depend on your seed."),
_("Segwit wallets use bech32 addresses, defined in BIP173."),
_("Please note that websites and other wallets may not support these addresses yet."),
_("Thus, you might want to keep using a non-segwit wallet in order to be able to receive bitcoins during the transition period.")
])
if choices is None:
choices = [
('create_segwit_seed', _('Segwit')),
('create_standard_seed', _('Legacy')),
]
self.choice_dialog(title=title, message=message, choices=choices, run_next=self.run)
def create_segwit_seed(self): self.create_seed('segwit')
def create_standard_seed(self): self.create_seed('standard')
def create_seed(self, seed_type):
from . import mnemonic
self.seed_type = seed_type
seed = mnemonic.Mnemonic('en').make_seed(self.seed_type)
self.opt_bip39 = False
f = lambda x: self.request_passphrase(seed, x)
self.show_seed_dialog(run_next=f, seed_text=seed)
def request_passphrase(self, seed, opt_passphrase):
if opt_passphrase:
f = lambda x: self.confirm_seed(seed, x)
self.passphrase_dialog(run_next=f)
else:
self.run('confirm_seed', seed, '')
def confirm_seed(self, seed, passphrase):
f = lambda x: self.confirm_passphrase(seed, passphrase)
self.confirm_seed_dialog(run_next=f, test=lambda x: x==seed)
def confirm_passphrase(self, seed, passphrase):
f = lambda x: self.run('create_keystore', seed, x)
if passphrase:
title = _('Confirm Seed Extension')
message = '\n'.join([
_('Your seed extension must be saved together with your seed.'),
_('Please type it here.'),
])
self.line_dialog(run_next=f, title=title, message=message, default='', test=lambda x: x==passphrase)
else:
f('')
| 44.868827
| 145
| 0.598246
|
4a0b4282bd342428c9fc2b1d56148fc26e464531
| 243
|
py
|
Python
|
python_exercises/48numerical_sets.py
|
Matheus-IT/lang-python-related
|
dd2e5d9b9f16d3838ba1670fdfcba1fa3fe305e9
|
[
"MIT"
] | null | null | null |
python_exercises/48numerical_sets.py
|
Matheus-IT/lang-python-related
|
dd2e5d9b9f16d3838ba1670fdfcba1fa3fe305e9
|
[
"MIT"
] | null | null | null |
python_exercises/48numerical_sets.py
|
Matheus-IT/lang-python-related
|
dd2e5d9b9f16d3838ba1670fdfcba1fa3fe305e9
|
[
"MIT"
] | null | null | null |
x = {1, 2, 3}
y = {2, 3, 4}
print('x =', x)
print('x =', y)
# Union
print('Union:', x | y)
# Intersection
print('Intersection:', x & y)
# Difference
print('Difference:', x - y)
# Symmetric difference
print('Symmetric difference:', x ^ y)
| 13.5
| 37
| 0.584362
|
4a0b42ad8fa149537003b47c237a32b13311bf8b
| 5,170
|
py
|
Python
|
demystifying/feature_extraction/rbm_feature_extractor.py
|
delemottelab/demystifying
|
e8527b52d5fbe0570cd391921ecda5aefceb797a
|
[
"MIT"
] | 16
|
2020-01-04T14:46:03.000Z
|
2021-07-10T05:54:05.000Z
|
demystifying/feature_extraction/rbm_feature_extractor.py
|
delemottelab/demystifying
|
e8527b52d5fbe0570cd391921ecda5aefceb797a
|
[
"MIT"
] | 11
|
2020-01-10T16:18:17.000Z
|
2022-03-20T09:53:33.000Z
|
demystifying/feature_extraction/rbm_feature_extractor.py
|
delemottelab/demystifying
|
e8527b52d5fbe0570cd391921ecda5aefceb797a
|
[
"MIT"
] | 3
|
2020-03-16T04:35:01.000Z
|
2022-02-10T12:39:01.000Z
|
import logging
import sys
logging.basicConfig(
stream=sys.stdout,
level=logging.INFO,
format='%(asctime)s %(name)s-%(levelname)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
import numpy as np
from .. import relevance_propagation as relprop
from .feature_extractor import FeatureExtractor
from sklearn.neural_network import BernoulliRBM
from .. import utils
import scipy
logger = logging.getLogger("rbm")
class RbmFeatureExtractor(FeatureExtractor):
def __init__(self,
supervised=False,
name="RBM",
randomize=True,
relevance_method="from_lrp",
variance_cutoff='auto',
classifier_kwargs={
'n_components': 1,
},
**kwargs):
FeatureExtractor.__init__(self,
supervised=supervised,
name=name,
**kwargs)
self.relevance_method = relevance_method
self.variance_cutoff = variance_cutoff
self.randomize = randomize
self.classifier_kwargs = classifier_kwargs.copy()
if not self.randomize:
self.classifier_kwargs['random_state'] = 89274
logger.debug("Initializing RBM with the following parameters: "
" randomize %s, relevance_method %s, relevance_method %s, variance_cutoff %s,"
" classifier_kwargs %s",
randomize, relevance_method, relevance_method, variance_cutoff, classifier_kwargs)
def train(self, train_set, train_labels):
if self.supervised and train_labels is not None:
return self._train_unsupervised_methods_per_class(train_set, train_labels)
else:
model = BernoulliRBM(**self.classifier_kwargs)
model.fit(train_set)
return model
def get_feature_importance(self, model, samples, labels):
if self.supervised and labels is not None:
return self._get_feature_importance_for_unsupervised_per_class(model, samples, labels)
logger.debug("RBM psuedo-loglikelihood: " + str(model.score_samples(samples).mean()))
if self.relevance_method == "from_lrp":
nframes, nfeatures = samples.shape
labels_propagation = model.transform(samples) # same as perfect classification
# Calculate relevance
# see https://scikit-learn.org/stable/modules/neural_networks_unsupervised.html
layers = self._create_layers(model)
propagator = relprop.RelevancePropagator(layers)
relevance = propagator.propagate(samples, labels_propagation)
# Rescale relevance according to min and max relevance in each frame
logger.debug("Rescaling feature importance extracted using RBM in each frame between min and max ...")
for i in range(relevance.shape[0]):
ind_negative = np.where(relevance[i, :] < 0)[0]
relevance[i, ind_negative] = 0
relevance[i, :] = (relevance[i, :] - np.min(relevance[i, :])) / (
np.max(relevance[i, :]) - np.min(relevance[i, :]) + 1e-9)
if self.supervised:
return relevance.mean(axis=0)
# Average relevance per cluster
nclusters = labels.shape[1]
result = np.zeros((nfeatures, nclusters))
frames_per_cluster = np.zeros((nclusters))
for frame_idx, frame in enumerate(labels):
cluster_idx = labels[frame_idx].argmax()
frames_per_cluster[cluster_idx] += 1
for frame_idx, rel in enumerate(relevance):
cluster_idx = labels[frame_idx].argmax()
result[:, cluster_idx] += rel / frames_per_cluster[cluster_idx]
return result
elif self.relevance_method == "from_components":
# Extract components and compute their variance
components = model.components_
projection = scipy.special.expit(np.matmul(samples, components.T))
components_var = projection.var(axis=0)
# Sort components according to their variance
ind_components_var_sorted = np.argsort(-components_var)
components_var_sorted = components_var[ind_components_var_sorted]
components_var_sorted /= components_var_sorted.sum()
components_sorted = components[ind_components_var_sorted, :]
return utils.compute_feature_importance_from_components(components_var_sorted,
components_sorted,
self.variance_cutoff)
else:
raise Exception("Method {} not supported".format(self.relevance_method))
def _create_layers(self, classifier):
return [relprop.FirstLinear(min_val=0, max_val=1, weight=classifier.components_.T,
bias=classifier.intercept_hidden_),
relprop.LogisticSigmoid()
]
| 42.03252
| 114
| 0.604062
|
4a0b42f8c14c61686a4a94664b1c713895cfde02
| 1,202
|
py
|
Python
|
cogs/utils/Events.py
|
MisakaMikoto0502/zeroday
|
e0a7a652ccca1af10e3ded01eef0baa8eccf0976
|
[
"MIT"
] | null | null | null |
cogs/utils/Events.py
|
MisakaMikoto0502/zeroday
|
e0a7a652ccca1af10e3ded01eef0baa8eccf0976
|
[
"MIT"
] | null | null | null |
cogs/utils/Events.py
|
MisakaMikoto0502/zeroday
|
e0a7a652ccca1af10e3ded01eef0baa8eccf0976
|
[
"MIT"
] | 1
|
2020-11-12T23:02:15.000Z
|
2020-11-12T23:02:15.000Z
|
import discord
import time
import datetime
from discord.ext import commands
from discord.ext.commands import Context, Bot
from app.controller.logger import Logger
class Events(commands.Cog):
def __init__(self, bot: Bot) -> None:
self.bot = bot
self.logger = Logger.generate_log()
@staticmethod
async def error_binder(ctx: Context, error, exception, title: str, description: str):
if isinstance(error, exception):
err = discord.Embed(
title=title,
description=description,
)
return await ctx.send(embed=err, delete_after=5)
@commands.Cog.listener()
async def on_command_error(self, ctx: Context, error):
"""
await self.error_binder(
ctx=ctx,
error=error,
exception=commands.ExtensionError,
title="Extension Error",
description=error
)
"""
await self.error_binder(
ctx=ctx,
error=error,
exception=commands.BotMissingPermissions,
title="Bot Missing Permissions",
description=error
)
| 30.05
| 90
| 0.578203
|
4a0b43588e2408c1aec112417199b6e118fac969
| 2,952
|
py
|
Python
|
product/create_product.py
|
t-karasova/grs-samples-python
|
0941265a6b8dd0bd6bd1db540b49307674360db7
|
[
"Apache-2.0"
] | 1
|
2021-12-22T14:47:30.000Z
|
2021-12-22T14:47:30.000Z
|
product/create_product.py
|
t-karasova/grs-samples-python
|
0941265a6b8dd0bd6bd1db540b49307674360db7
|
[
"Apache-2.0"
] | null | null | null |
product/create_product.py
|
t-karasova/grs-samples-python
|
0941265a6b8dd0bd6bd1db540b49307674360db7
|
[
"Apache-2.0"
] | 2
|
2021-10-05T09:40:02.000Z
|
2021-12-17T16:04:26.000Z
|
# Copyright 2021 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START retail_create_product]
# Create product in a catalog using Retail API
#
import os
import random
import string
from google.api_core.client_options import ClientOptions
from google.cloud.retail import CreateProductRequest, Product, \
ProductServiceClient
from google.cloud.retail_v2 import PriceInfo
from google.cloud.retail_v2.types import product
from setup.setup_cleanup import delete_product
project_number = os.getenv('PROJECT_NUMBER')
default_branch_name = "projects/" + project_number + "/locations/global/catalogs/default_catalog/branches/default_branch"
endpoint = "retail.googleapis.com"
generated_product_id = ''.join(random.sample(string.ascii_lowercase, 8))
# get product service client
def get_product_service_client():
client_options = ClientOptions(endpoint)
return ProductServiceClient(client_options=client_options)
# generate product to create
def generate_product() -> Product:
price_info = PriceInfo()
price_info.price = 30.0
price_info.original_price = 35.5
price_info.currency_code = "USD"
return product.Product(
title='Nest Mini',
type_=product.Product.Type.PRIMARY,
categories=['Speakers and displays'],
brands=['Google'],
price_info=price_info,
availability="IN_STOCK",
)
# get create product request
def get_create_product_request(product_to_create: Product,
product_id: str) -> object:
create_product_request = CreateProductRequest()
create_product_request.product = product_to_create
create_product_request.product_id = product_id
create_product_request.parent = default_branch_name
print("---create product request---")
print(create_product_request)
return create_product_request
# call the Retail API to create product
def create_product(product_id: str):
create_product_request = get_create_product_request(generate_product(),
product_id)
product_created = get_product_service_client().create_product(
create_product_request)
print("---created product:---")
print(product_created)
return product_created
# create a product
created_product = create_product(generated_product_id)
# delete created product
delete_product(created_product.name)
# [END retail_create_product]
| 32.8
| 121
| 0.747967
|
4a0b43818489845fccffa7e7f6cdd3b91eb2aa6c
| 6,881
|
py
|
Python
|
models/experimental/cifar_keras/cifar_keras.py
|
bileschi/tpu
|
0731831addc47d45342708093697492e4e9a68ca
|
[
"Apache-2.0"
] | null | null | null |
models/experimental/cifar_keras/cifar_keras.py
|
bileschi/tpu
|
0731831addc47d45342708093697492e4e9a68ca
|
[
"Apache-2.0"
] | null | null | null |
models/experimental/cifar_keras/cifar_keras.py
|
bileschi/tpu
|
0731831addc47d45342708093697492e4e9a68ca
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Cifar example using Keras for model definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import flags
import absl.logging as _logging # pylint: disable=unused-import
import tensorflow as tf
from tensorflow.contrib.tpu.python.tpu import tpu_config
from tensorflow.contrib.tpu.python.tpu import tpu_estimator
from tensorflow.contrib.tpu.python.tpu import tpu_optimizer
# Cloud TPU Cluster Resolvers
flags.DEFINE_string(
"gcp_project", default=None,
help="Project name for the Cloud TPU-enabled project. If not specified, we "
"will attempt to automatically detect the GCE project from metadata.")
flags.DEFINE_string(
"tpu_zone", default=None,
help="GCE zone where the Cloud TPU is located in. If not specified, we "
"will attempt to automatically detect the GCE project from metadata.")
flags.DEFINE_string(
"tpu_name", default=None,
help="Name of the Cloud TPU for Cluster Resolvers. You must specify either "
"this flag or --master.")
# Model specific paramenters
flags.DEFINE_string(
"master", default=None,
help="GRPC URL of the master (e.g. grpc://ip.address.of.tpu:8470). You "
"must specify either this flag or --tpu_name.")
flags.DEFINE_integer("batch_size", 128,
"Mini-batch size for the computation. Note that this "
"is the global batch size and not the per-shard batch.")
flags.DEFINE_float("learning_rate", 0.05, "Learning rate.")
flags.DEFINE_string("train_file", "", "Path to cifar10 training data.")
flags.DEFINE_integer("train_steps", 100000,
"Total number of steps. Note that the actual number of "
"steps is the next multiple of --iterations greater "
"than this value.")
flags.DEFINE_bool("use_tpu", True, "Use TPUs rather than plain CPUs")
flags.DEFINE_string("model_dir", None, "Estimator model_dir")
flags.DEFINE_integer("iterations_per_loop", 100,
"Number of iterations per TPU training loop.")
flags.DEFINE_integer("num_shards", 8, "Number of shards (TPU chips).")
FLAGS = flags.FLAGS
def model_fn(features, labels, mode, params):
"""Define a CIFAR model in Keras."""
del params # unused
layers = tf.contrib.keras.layers
# Pass our input tensor to initialize the Keras input layer.
v = layers.Input(tensor=features)
v = layers.Conv2D(filters=32, kernel_size=5,
activation="relu", padding="same")(v)
v = layers.MaxPool2D(pool_size=2)(v)
v = layers.Conv2D(filters=64, kernel_size=5,
activation="relu", padding="same")(v)
v = layers.MaxPool2D(pool_size=2)(v)
v = layers.Flatten()(v)
fc1 = layers.Dense(units=512, activation="relu")(v)
logits = layers.Dense(units=10)(fc1)
# Instead of constructing a Keras model for training, build our loss function
# and optimizer in Tensorflow.
#
# N.B. This construction omits some features that are important for more
# complex models (e.g. regularization, batch-norm). Once
# `model_to_estimator` support is added for TPUs, it should be used instead.
loss = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=labels
)
)
optimizer = tf.train.AdamOptimizer()
if FLAGS.use_tpu:
optimizer = tpu_optimizer.CrossShardOptimizer(optimizer)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
return tpu_estimator.TPUEstimatorSpec(
mode=mode,
loss=loss,
train_op=train_op,
predictions={
"classes": tf.argmax(input=logits, axis=1),
"probabilities": tf.nn.softmax(logits, name="softmax_tensor")
}
)
def input_fn(params):
"""Read CIFAR input data from a TFRecord dataset."""
del params
batch_size = FLAGS.batch_size
def parser(serialized_example):
"""Parses a single tf.Example into image and label tensors."""
features = tf.parse_single_example(
serialized_example,
features={
"image": tf.FixedLenFeature([], tf.string),
"label": tf.FixedLenFeature([], tf.int64),
})
image = tf.decode_raw(features["image"], tf.uint8)
image.set_shape([3*32*32])
image = tf.cast(image, tf.float32) * (1. / 255) - 0.5
image = tf.transpose(tf.reshape(image, [3, 32, 32]))
label = tf.cast(features["label"], tf.int32)
return image, label
dataset = tf.data.TFRecordDataset([FLAGS.train_file])
dataset = dataset.map(parser, num_parallel_calls=batch_size)
dataset = dataset.prefetch(4 * batch_size).cache().repeat()
dataset = dataset.apply(
tf.contrib.data.batch_and_drop_remainder(FLAGS.batch_size)
)
dataset = dataset.prefetch(1)
images, labels = dataset.make_one_shot_iterator().get_next()
return images, labels
def main(argv):
del argv # Unused.
if FLAGS.master is None and FLAGS.tpu_name is None:
raise RuntimeError("You must specify either --master or --tpu_name.")
if FLAGS.master is not None:
if FLAGS.tpu_name is not None:
tf.logging.warn("Both --master and --tpu_name are set. Ignoring "
"--tpu_name and using --master.")
tpu_grpc_url = FLAGS.master
else:
tpu_cluster_resolver = (
tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name,
zone=FLAGS.tpu_zone,
project=FLAGS.gcp_project))
tpu_grpc_url = tpu_cluster_resolver.get_master()
run_config = tpu_config.RunConfig(
master=tpu_grpc_url,
model_dir=FLAGS.model_dir,
save_checkpoints_secs=3600,
session_config=tf.ConfigProto(
allow_soft_placement=True, log_device_placement=True),
tpu_config=tpu_config.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,
num_shards=FLAGS.num_shards),
)
estimator = tpu_estimator.TPUEstimator(
model_fn=model_fn,
use_tpu=FLAGS.use_tpu,
config=run_config,
train_batch_size=FLAGS.batch_size)
estimator.train(input_fn=input_fn, max_steps=FLAGS.train_steps)
if __name__ == "__main__":
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run(main)
| 37.396739
| 80
| 0.692051
|
4a0b43c492fbc466e2b9ed302424668560e82a6e
| 4,154
|
py
|
Python
|
lstm.py
|
NehaPendem/Classical-Music-Combination
|
e1d084ed718ccca5b7d9bfe42ad7fe0a14ed7640
|
[
"MIT"
] | null | null | null |
lstm.py
|
NehaPendem/Classical-Music-Combination
|
e1d084ed718ccca5b7d9bfe42ad7fe0a14ed7640
|
[
"MIT"
] | null | null | null |
lstm.py
|
NehaPendem/Classical-Music-Combination
|
e1d084ed718ccca5b7d9bfe42ad7fe0a14ed7640
|
[
"MIT"
] | null | null | null |
""" This module prepares midi file data and feeds it to the neural
network for training """
import glob
import pickle
import numpy
from music21 import converter, instrument, note, chord
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import LSTM
from keras.layers import Activation
from keras.layers import BatchNormalization as BatchNorm
from keras.utils import np_utils
from keras.callbacks import ModelCheckpoint
def train_network():
""" Train a Neural Network to generate music """
notes = get_notes()
# get amount of pitch names
n_vocab = len(set(notes))
network_input, network_output = prepare_sequences(notes, n_vocab)
model = create_network(network_input, n_vocab)
train(model, network_input, network_output)
def get_notes():
""" Get all the notes and chords from the midi files in the ./midi_songs directory """
notes = []
for file in glob.glob("midi_songs/*.mid"):
midi = converter.parse(file)
print("Parsing %s" % file)
notes_to_parse = None
try: # file has instrument parts
s2 = instrument.partitionByInstrument(midi)
notes_to_parse = s2.parts[0].recurse()
except: # file has notes in a flat structure
notes_to_parse = midi.flat.notes
for element in notes_to_parse:
if isinstance(element, note.Note):
notes.append(str(element.pitch))
elif isinstance(element, chord.Chord):
notes.append('.'.join(str(n) for n in element.normalOrder))
with open('data/notes', 'wb') as filepath:
pickle.dump(notes, filepath)
return notes
def prepare_sequences(notes, n_vocab):
""" Prepare the sequences used by the Neural Network """
sequence_length = 100
# get all pitch names
pitchnames = sorted(set(item for item in notes))
# create a dictionary to map pitches to integers
note_to_int = dict((note, number) for number, note in enumerate(pitchnames))
network_input = []
network_output = []
# create input sequences and the corresponding outputs
for i in range(0, len(notes) - sequence_length, 1):
sequence_in = notes[i:i + sequence_length]
sequence_out = notes[i + sequence_length]
network_input.append([note_to_int[char] for char in sequence_in])
network_output.append(note_to_int[sequence_out])
n_patterns = len(network_input)
# reshape the input into a format compatible with LSTM layers
network_input = numpy.reshape(network_input, (n_patterns, sequence_length, 1))
# normalize input
network_input = network_input / float(n_vocab)
network_output = np_utils.to_categorical(network_output)
return (network_input, network_output)
def create_network(network_input, n_vocab):
""" create the structure of the neural network """
model = Sequential()
model.add(LSTM(
512,
input_shape=(network_input.shape[1], network_input.shape[2]),
recurrent_dropout=0.3,
return_sequences=True
))
model.add(LSTM(512, return_sequences=True, recurrent_dropout=0.3,))
model.add(LSTM(512))
model.add(BatchNorm())
model.add(Dropout(0.3))
model.add(Dense(256))
model.add(Activation('relu'))
model.add(BatchNorm())
model.add(Dropout(0.3))
model.add(Dense(n_vocab))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
return model
def train(model, network_input, network_output):
""" train the neural network """
filepath = "weights-improvement-{epoch:02d}-{loss:.4f}-bigger.hdf5"
checkpoint = ModelCheckpoint(
filepath,
monitor='loss',
verbose=0,
save_best_only=True,
mode='min'
)
callbacks_list = [checkpoint]
model.load_weights('/content/drive/MyDrive/Colab Notebooks/Classical Piano 2/weights-improvement-07-3.3489-bigger.hdf5')
model.fit(network_input, network_output, epochs=200, batch_size=128, callbacks=callbacks_list)
if __name__ == '__main__':
train_network()
| 32.453125
| 124
| 0.689697
|
4a0b4714dd5cac1b633c1d58c1b0d4eb81a04d88
| 2,371
|
py
|
Python
|
labscript_devices/NI_DAQmx/models/NI_USB_6008.py
|
chrisjbillington/labscript_devices
|
75b175d44c1fdca55b7cae30a898bbea59c2a5d7
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] | 1
|
2022-03-03T04:22:47.000Z
|
2022-03-03T04:22:47.000Z
|
labscript_devices/NI_DAQmx/models/NI_USB_6008.py
|
chrisjbillington/labscript_devices
|
75b175d44c1fdca55b7cae30a898bbea59c2a5d7
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] | 43
|
2020-05-12T20:34:36.000Z
|
2022-03-29T21:47:29.000Z
|
labscript_devices/NI_DAQmx/models/NI_USB_6008.py
|
chrisjbillington/labscript_devices
|
75b175d44c1fdca55b7cae30a898bbea59c2a5d7
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] | 23
|
2020-05-31T03:15:59.000Z
|
2022-02-25T14:36:45.000Z
|
#####################################################################
# #
# /NI_DAQmx/models/_subclass_template.py #
# #
# Copyright 2018, Christopher Billington #
# #
# This file is part of the module labscript_devices, in the #
# labscript suite (see http://labscriptsuite.org), and is #
# licensed under the Simplified BSD License. See the license.txt #
# file in the root of the project for the full license. #
# #
#####################################################################
#####################################################################
# WARNING #
# #
# This file is auto-generated, any modifications may be #
# overwritten. See README.txt in this folder for details #
# #
#####################################################################
from labscript_devices.NI_DAQmx.labscript_devices import NI_DAQmx
#:
CAPABILITIES = {
'AI_range': [-10.0, 10.0],
'AI_start_delay': 8.333333333333334e-08,
'AO_range': [0.0, 5.0],
'max_AI_multi_chan_rate': 10000.0,
'max_AI_single_chan_rate': 10000.0,
'max_AO_sample_rate': None,
'max_DO_sample_rate': None,
'min_semiperiod_measurement': None,
'num_AI': 8,
'num_AO': 2,
'num_CI': 1,
'ports': {
'port0': {'num_lines': 8, 'supports_buffered': False},
'port1': {'num_lines': 4, 'supports_buffered': False},
},
'supports_buffered_AO': False,
'supports_buffered_DO': False,
'supports_semiperiod_measurement': False,
}
class NI_USB_6008(NI_DAQmx):
description = 'NI-USB-6008'
def __init__(self, *args, **kwargs):
"""Class for NI-USB-6008"""
# Any provided kwargs take precedent over capabilities
combined_kwargs = CAPABILITIES.copy()
combined_kwargs.update(kwargs)
NI_DAQmx.__init__(self, *args, **combined_kwargs)
| 41.596491
| 69
| 0.432307
|
4a0b474b46ef02386e18f168c988b1edcaf549a5
| 16,859
|
py
|
Python
|
dsf_cnn/model/utils/gconv_utils.py
|
EJOOSTEROP/dsf-cnn
|
cdf9478365814eab57668f20cbab713783540496
|
[
"MIT"
] | 1
|
2021-03-05T08:19:57.000Z
|
2021-03-05T08:19:57.000Z
|
dsf_cnn/model/utils/gconv_utils.py
|
EJOOSTEROP/dsf-cnn
|
cdf9478365814eab57668f20cbab713783540496
|
[
"MIT"
] | null | null | null |
dsf_cnn/model/utils/gconv_utils.py
|
EJOOSTEROP/dsf-cnn
|
cdf9478365814eab57668f20cbab713783540496
|
[
"MIT"
] | null | null | null |
"""
Group equivariant convolution utils
"""
import math
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import random_ops
from tensorpack import *
from tensorpack.tfutils.symbolic_functions import *
from tensorpack.tfutils.summary import *
from tensorpack.tfutils.common import get_tf_version_tuple
from matplotlib import cm
from model.utils.norm_utils import *
from model.utils.rotation_utils import *
####
def GBNReLU(name, x, nr_orients):
"""
A shorthand of Group Equivariant BatchNormalization + ReLU.
Args:
name: variable scope name
x: input tensor
nr_orients: number of filter orientations
Returns:
out: normalised tensor with ReLU activation
"""
shape = x.get_shape().as_list()
chans = shape[3]
c = int(chans/nr_orients)
x = tf.reshape(x, [-1, shape[1], shape[2], nr_orients, c])
bn = BatchNorm3d(name + '_bn', x)
act = tf.nn.relu(bn, name='relu')
out = tf.reshape(act, [-1, shape[1], shape[2], chans])
return out
####
def GBatchNorm(name, x, nr_orients):
"""
Group Equivariant BatchNormalization.
Args:
name: variable scope name
x: input tensor
nr_orients: number of filter orientations
Returns:
out: normalised tensor
"""
shape = x.get_shape().as_list()
chans = shape[3]
c = int(chans/nr_orients)
x = tf.reshape(x, [-1, shape[1], shape[2], nr_orients, c])
bn = BatchNorm3d(name + '_bn', x)
out = tf.reshape(act, [-1, shape[1], shape[2], chans])
return out
####
def get_basis_params(k_size):
"""
Get the filter parameters for a given kernel size
Args:
k_size (int): input kernel size
Returns:
alpha_list: list of alpha values
beta_list: list of beta values
bl_list: used to bandlimit high frequency filters in get_basis_filters()
"""
if k_size == 5:
alpha_list = [0, 1, 2]
beta_list = [0, 1, 2]
bl_list = [0, 2, 2]
if k_size == 7:
alpha_list = [0, 1, 2, 3]
beta_list = [0, 1, 2, 3]
bl_list = [0, 2, 3, 2]
if k_size == 9:
alpha_list = [0, 1, 2, 3, 4]
beta_list = [0, 1, 2, 3, 4]
bl_list = [0, 3, 4, 4, 3]
if k_size == 11:
alpha_list = [0, 1, 2, 3, 4]
beta_list = [1, 2, 3, 4]
bl_list = [0, 3, 4, 4, 3]
return alpha_list, beta_list, bl_list
####
def get_basis_filters(alpha_list, beta_list, bl_list, k_size, eps=10**-8):
"""
Gets the atomic basis filters
Args:
alpha_list: list of alpha values for basis filters
beta_list: list of beta values for the basis filters
bl_list: bandlimit list to reduce aliasing of basis filters
k_size (int): kernel size of basis filters
eps=10**-8: epsilon used to prevent division by 0
Returns:
filter_list_bl: list of filters, with bandlimiting (bl) to reduce aliasing
alpha_list_bl: corresponding list of alpha used in bandlimited filters
beta_list_bl: corresponding list of beta used in bandlimited filters
"""
filter_list = []
freq_list = []
for beta in beta_list:
for alpha in alpha_list:
if alpha <= bl_list[beta]:
his = k_size//2 # half image size
y_index, x_index = np.mgrid[-his:(his+1), -his:(his+1)]
y_index *= -1
z_index = x_index + 1j*y_index
# convert z to natural coordinates and add eps to avoid division by zero
z = (z_index + eps)
r = np.abs(z)
if beta == beta_list[-1]:
sigma = 0.4
else:
sigma = 0.6
rad_prof = np.exp(-(r-beta)**2/(2*(sigma**2)))
c_image = rad_prof * (z/r)**alpha
c_image_norm = (math.sqrt(2)*c_image) / np.linalg.norm(c_image)
# add basis filter to list
filter_list.append(c_image)
# add corresponding frequency of filter to list (info needed for phase manipulation)
freq_list.append(alpha)
filter_array = np.array(filter_list)
filter_array = np.reshape(filter_array, [
filter_array.shape[0], filter_array.shape[1], filter_array.shape[2], 1, 1, 1])
return tf.convert_to_tensor(filter_array, dtype=tf.complex64), freq_list
####
def get_rot_info(nr_orients, alpha_list):
"""
Generate rotation info for phase manipulation of steerable filters.
Rotation is dependent onthe frequency of the filter (alpha)
Args:
nr_orients: number of filter rotations
alpha_list: list of alpha values that detemine the frequency
Returns:
rot_info used to rotate steerable filters
"""
# Generate rotation matrix for phase manipulation of steerable function
rot_list = []
for i in range(len(alpha_list)):
list_tmp = []
for j in range(nr_orients):
# Rotation is dependent on the frequency of the basis filter
angle = (2*np.math.pi / nr_orients) * j
list_tmp.append(np.exp(-1j*alpha_list[i]*angle))
rot_list.append(list_tmp)
rot_info = np.array(rot_list)
# Reshape to enable matrix multiplication
rot_info = np.reshape(
rot_info, [rot_info.shape[0], 1, 1, 1, 1, nr_orients])
rot_info = tf.convert_to_tensor(rot_info, dtype=tf.complex64)
return rot_info
####
def GroupPool(name, x, nr_orients, pool_type='max'):
"""
Perform pooling along the orientation axis.
Args:
name: variable scope name
x: input tensor
nr_orients: number of filter orientations
pool_type: choose either 'max' or 'mean'
Returns:
pool: pooled tensor
"""
shape = x.get_shape().as_list()
new_shape = [-1, shape[1], shape[2], nr_orients, shape[3] // nr_orients]
x_reshape = tf.reshape(x, new_shape)
if pool_type == 'max':
pool = tf.reduce_max(x_reshape, 3)
elif pool_type == 'mean':
pool = tf.reduce_mean(x_reshape, 3)
else:
raise ValueError('Pool type not recognised')
return pool
####
def steerable_initializer(nr_orients, factor=2.0, mode='FAN_IN',
seed=None, dtype=dtypes.float32):
"""
Initialise complex coefficients in accordance with Weiler et al. (https://arxiv.org/pdf/1711.07289.pdf)
Note, here we use the truncated normal dist, whereas Weiler et al. uses the regular normal dist.
Args:
input_layer:
nr_orients: number of filter orientations
factor: factor used for weight init
mode: 'FAN_IN' or 'FAN_OUT
seed: seed for weight init
dtype: data type
Returns:
_initializer:
"""
def _initializer(shape, dtype=dtype, partition_info=None):
# total number of basis filters
Q = shape[0]*shape[1]
if mode == 'FAN_IN':
fan_in = shape[-2]
C = fan_in
# count number of input connections.
elif mode == 'FAN_OUT':
fan_out = shape[-2]
# count number of output connections.
C = fan_out
n = C*Q
# to get stddev = math.sqrt(factor / n) need to adjust for truncated.
trunc_stddev = math.sqrt(factor / n) / .87962566103423978
return random_ops.truncated_normal(shape, 0.0, trunc_stddev, dtype,
seed=seed)
return _initializer
####
def cycle_channels(filters, shape_list):
"""
Perform cyclic permutation of the orientation channels for kernels on the group G.
Args:
filters: input filters
shape_list: [nr_orients_out, ksize, ksize,
nr_orients_in, filters_in, filters_out]
Returns:
tensor of filters with channels permuted
"""
nr_orients_out = shape_list[0]
rotated_filters = [None] * nr_orients_out
for orientation in range(nr_orients_out):
# [K, K, nr_orients_in, filters_in, filters_out]
filters_temp = filters[orientation]
# [K, K, filters_in, filters_out, nr_orients]
filters_temp = tf.transpose(filters_temp, [0, 1, 3, 4, 2])
# [K * K * filters_in * filters_out, nr_orients_in]
filters_temp = tf.reshape(
filters_temp, [shape_list[1] * shape_list[2] * shape_list[4] * shape_list[5], shape_list[3]])
# Cycle along the orientation axis
roll_matrix = tf.constant(
np.roll(np.identity(shape_list[3]), orientation, axis=1), dtype=tf.float32)
filters_temp = tf.matmul(filters_temp, roll_matrix)
filters_temp = tf.reshape(
filters_temp, [shape_list[1], shape_list[2], shape_list[4], shape_list[5], shape_list[3]])
filters_temp = tf.transpose(filters_temp, [0, 1, 4, 2, 3])
rotated_filters[orientation] = filters_temp
return tf.stack(rotated_filters)
####
def gen_rotated_filters(w, filter_type, input_layer, nr_orients_out, basis_filters=None, rot_info=None):
"""
Generate the rotated filters either by phase manipulation or direct rotation of planar filter.
Cyclic permutation of channels is performed for kernels on the group G.
Args:
w: coefficients used to perform a linear combination of basis filters
filter_type: either 'steerable' or 'standard'
input_layer (bool): whether 1st layer convolution or not
nr_orients_out: number of output filter orientations
basis_filters: atomic basis filters
rot_info: array to determine how to rotate filters
Returns:
rot_filters: rotated steerable basis filters, with
cyclic permutation if not the first layer
"""
if filter_type == 'steerable':
# if using steerable filters, then rotate by phase manipulation
rot_filters = [None] * nr_orients_out
for orientation in range(nr_orients_out):
rot_info_tmp = tf.expand_dims(rot_info[..., orientation], -1)
filter_tmp = w * rot_info_tmp * basis_filters # phase manipulation
rot_filters[orientation] = filter_tmp
# [nr_orients_out, J, K, K, nr_orients_in, filters_in, filters_out] (M: nr frequencies, R: nr radial profile params)
rot_filters = tf.stack(rot_filters)
# Linear combination of basis filters
# [nr_orients_out, K, K, nr_orients_in, filters_in, filters_out]
rot_filters = tf.reduce_sum(rot_filters, axis=1)
# Get real part of filters
# [nr_orients_out, K, K, nr_orients_in, filters_in, filters_out]
rot_filters = tf.math.real(rot_filters, name='filters')
else:
# if using regular kernels, rotate by sparse matrix multiplication
# [K, K, nr_orients_in, filters_in, filters_out]
filter_shape = w.get_shape().as_list()
# Flatten the filter
filter_flat = tf.reshape(
w, [filter_shape[0]*filter_shape[1], filter_shape[2]*filter_shape[3]*filter_shape[4]])
# Generate a set of rotated kernels via rotation matrix multiplication
idx, vals = MultiRotationOperatorMatrixSparse(
[filter_shape[0], filter_shape[1]], nr_orients_out, periodicity=2*np.pi, diskMask=True)
# Sparse rotation matrix
rotOp_matrix = tf.SparseTensor(
idx, vals, [nr_orients_out*filter_shape[0]*filter_shape[1], filter_shape[0]*filter_shape[1]])
# Matrix multiplication
rot_filters = tf.sparse_tensor_dense_matmul(
rotOp_matrix, filter_flat)
#[nr_orients_out * K * K, filters_in * filters_out]
# Reshape the filters to [nr_orients_out, K, K, nr_orients_in, filters_in, filters_out]
rot_filters = tf.reshape(
rot_filters, [nr_orients_out, filter_shape[0], filter_shape[1], filter_shape[2], filter_shape[3], filter_shape[4]])
# Do not cycle filter for input convolution f: Z2 -> G
if input_layer is False:
shape_list = rot_filters.get_shape().as_list()
# cycle channels - [nr_orients_out, K, K, nr_orients_in, filters_in, filters_out]
rot_filters = cycle_channels(rot_filters, shape_list)
return rot_filters
####
def GConv2D(
name,
inputs,
filters_out,
kernel_size,
nr_orients,
filter_type,
basis_filters=None,
rot_info=None,
input_layer=False,
strides=[1, 1, 1, 1],
padding='SAME',
data_format='NHWC',
activation='bnrelu',
use_bias=False,
bias_initializer=tf.zeros_initializer()):
"""
Rotation equivatiant group convolution layer
Args:
name: variable scope name
inputs: input tensor
filters_out: number of filters out (per orientation)
kernel_size: size of kernel
basis_filters: atomic basis filters
rot_info: array to determine how to rotate filters
input_layer: whether the operation is the input layer (1st conv)
strides: stride of kernel for convolution
padding: choose either 'SAME' or 'VALID'
data_format: either 'NHWC' or 'NCHW'
activation: activation function to apply
use_bias: whether to use bias
bias_initializer: bias initialiser method
Returns:
conv: group equivariant convolution of input with
steerable filters and optional activation.
"""
if filter_type == 'steerable':
assert basis_filters != None and rot_info != None, 'Must provide basis filters and rotation matrix'
in_shape = inputs.get_shape().as_list()
channel_axis = 3 if data_format == 'NHWC' else 1
if input_layer == False:
nr_orients_in = nr_orients
else:
nr_orients_in = 1
nr_orients_out = nr_orients
filters_in = int(in_shape[channel_axis] / nr_orients_in)
if filter_type == 'steerable':
# shape for the filter coefficients
nr_b_filts = basis_filters.shape[0]
w_shape = [nr_b_filts, 1, 1, nr_orients_in, filters_in, filters_out]
# init complex valued weights with the adapted He init (Weiler et al.)
w1 = tf.get_variable(name + '_W_real', w_shape,
initializer=steerable_initializer(nr_orients_out))
w2 = tf.get_variable(name + '_W_imag', w_shape,
initializer=steerable_initializer(nr_orients_out))
w = tf.complex(w1, w2)
# Generate filters at different orientations- also perform cyclic permutation of channels if f: G -> G
# Cyclic permutation of filters happenens for all rotation equivariant layers except for the input layer
# [nr_orients_out, K, K, nr_orients_in, filters_in, filters_out]
filters = gen_rotated_filters(
w, filter_type, input_layer, nr_orients_out, basis_filters, rot_info)
else:
w_shape = [kernel_size, kernel_size,
nr_orients_in, filters_in, filters_out]
w = tf.get_variable(
name + '_W', w_shape, initializer=tf.variance_scaling_initializer(scale=2.0, mode='fan_out'))
# Generate filters at different orientations- also perform cyclic permutation of channels if f: G -> G
# Cyclic permutation of filters happenens for all rotation equivariant layers except for the input layer
# [nr_orients_out, K, K, nr_orients_in, filters_in, filters_out]
filters = gen_rotated_filters(
w, filter_type, input_layer, nr_orients_out)
# reshape filters for 2D convolution
# [K, K, nr_orients_in, filters_in, nr_orients_out, filters_out]
filters = tf.transpose(filters, [1, 2, 3, 4, 0, 5])
filters = tf.reshape(filters, [
kernel_size, kernel_size, nr_orients_in * filters_in, nr_orients_out * filters_out])
# perform conv with rotated filters (rehshaped so we can perform 2D convolution)
kwargs = dict(data_format=data_format)
conv = tf.nn.conv2d(inputs, filters, strides, padding.upper(), **kwargs)
if use_bias:
# Use same bias for all orientations
b = tf.get_variable(
name + '_bias', [filters_out], initializer=tf.zeros_initializer())
b = tf.stack([b] * nr_orients_out)
b = tf.reshape(b, [nr_orients_out*filters_out])
conv = tf.nn.bias_add(conv, b)
if activation == 'bnrelu':
# Rotation equivariant batch normalisation
conv = GBNReLU(name, conv, nr_orients_out)
if activation == 'bn':
# Rotation equivariant batch normalisation
conv = GBatchNorm(name, conv, nr_orients_out)
if activation == 'relu':
# Rotation equivariant batch normalisation
conv = tf.nn.relu(conv)
return conv
| 35.122917
| 127
| 0.634023
|
4a0b4751c09b32db0da5010591062cb95c7c3983
| 7,500
|
py
|
Python
|
mathics/builtin/assignments/assignment.py
|
adamantinum/mathics-core
|
c8f5d7a7645a17a6a8833f750fb93a352d1ac9b4
|
[
"Apache-2.0"
] | null | null | null |
mathics/builtin/assignments/assignment.py
|
adamantinum/mathics-core
|
c8f5d7a7645a17a6a8833f750fb93a352d1ac9b4
|
[
"Apache-2.0"
] | null | null | null |
mathics/builtin/assignments/assignment.py
|
adamantinum/mathics-core
|
c8f5d7a7645a17a6a8833f750fb93a352d1ac9b4
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Forms of Assignment
"""
from mathics.builtin.base import Builtin, BinaryOperator
from mathics.core.symbols import Symbol
from mathics.core.systemsymbols import (
SymbolFailed,
)
from mathics.core.definitions import PyMathicsLoadException
from mathics.builtin.assignments.internals import _SetOperator
from mathics.core.attributes import hold_all, hold_first, protected, sequence_hold
class Set(BinaryOperator, _SetOperator):
"""
<dl>
<dt>'Set[$expr$, $value$]'
<dt>$expr$ = $value$
<dd>evaluates $value$ and assigns it to $expr$.
<dt>{$s1$, $s2$, $s3$} = {$v1$, $v2$, $v3$}
<dd>sets multiple symbols ($s1$, $s2$, ...) to the corresponding values ($v1$, $v2$, ...).
</dl>
'Set' can be used to give a symbol a value:
>> a = 3
= 3
>> a
= 3
An assignment like this creates an ownvalue:
>> OwnValues[a]
= {HoldPattern[a] :> 3}
You can set multiple values at once using lists:
>> {a, b, c} = {10, 2, 3}
= {10, 2, 3}
>> {a, b, {c, {d}}} = {1, 2, {{c1, c2}, {a}}}
= {1, 2, {{c1, c2}, {10}}}
>> d
= 10
'Set' evaluates its right-hand side immediately and assigns it to
the left-hand side:
>> a
= 1
>> x = a
= 1
>> a = 2
= 2
>> x
= 1
'Set' always returns the right-hand side, which you can again use
in an assignment:
>> a = b = c = 2;
>> a == b == c == 2
= True
'Set' supports assignments to parts:
>> A = {{1, 2}, {3, 4}};
>> A[[1, 2]] = 5
= 5
>> A
= {{1, 5}, {3, 4}}
>> A[[;;, 2]] = {6, 7}
= {6, 7}
>> A
= {{1, 6}, {3, 7}}
Set a submatrix:
>> B = {{1, 2, 3}, {4, 5, 6}, {7, 8, 9}};
>> B[[1;;2, 2;;-1]] = {{t, u}, {y, z}};
>> B
= {{1, t, u}, {4, y, z}, {7, 8, 9}}
#> x = Infinity;
"""
attributes = hold_first | protected | sequence_hold
grouping = "Right"
messages = {
"setraw": "Cannot assign to raw object `1`.",
"shape": "Lists `1` and `2` are not the same shape.",
}
operator = "="
precedence = 40
messages = {
"setraw": "Cannot assign to raw object `1`.",
"shape": "Lists `1` and `2` are not the same shape.",
}
summary_text = "assign a value"
def apply(self, lhs, rhs, evaluation):
"lhs_ = rhs_"
self.assign(lhs, rhs, evaluation)
return rhs
class SetDelayed(Set):
"""
<dl>
<dt>'SetDelayed[$expr$, $value$]'
<dt>$expr$ := $value$
<dd>assigns $value$ to $expr$, without evaluating $value$.
</dl>
'SetDelayed' is like 'Set', except it has attribute 'HoldAll', thus it does not evaluate the right-hand side immediately, but evaluates it when needed.
>> Attributes[SetDelayed]
= {HoldAll, Protected, SequenceHold}
>> a = 1
= 1
>> x := a
>> x
= 1
Changing the value of $a$ affects $x$:
>> a = 2
= 2
>> x
= 2
'Condition' ('/;') can be used with 'SetDelayed' to make an
assignment that only holds if a condition is satisfied:
>> f[x_] := p[x] /; x>0
>> f[3]
= p[3]
>> f[-3]
= f[-3]
It also works if the condition is set in the LHS:
>> F[x_, y_] /; x < y /; x>0 := x / y;
>> F[x_, y_] := y / x;
>> F[2, 3]
= 2 / 3
>> F[3, 2]
= 2 / 3
>> F[-3, 2]
= -2 / 3
"""
operator = ":="
attributes = hold_all | protected | sequence_hold
summary_text = "test a delayed value; used in defining functions"
def apply(self, lhs, rhs, evaluation):
"lhs_ := rhs_"
if self.assign(lhs, rhs, evaluation):
return Symbol("Null")
else:
return SymbolFailed
class TagSet(Builtin, _SetOperator):
"""
<dl>
<dt>'TagSet[$f$, $expr$, $value$]'
<dt>'$f$ /: $expr$ = $value$'
<dd>assigns $value$ to $expr$, associating the corresponding assignment with the symbol $f$.
</dl>
Create an upvalue without using 'UpSet':
>> x /: f[x] = 2
= 2
>> f[x]
= 2
>> DownValues[f]
= {}
>> UpValues[x]
= {HoldPattern[f[x]] :> 2}
The symbol $f$ must appear as the ultimate head of $lhs$ or as the head of a leaf in $lhs$:
>> x /: f[g[x]] = 3;
: Tag x not found or too deep for an assigned rule.
>> g /: f[g[x]] = 3;
>> f[g[x]]
= 3
"""
attributes = hold_all | protected | sequence_hold
messages = {
"tagnfd": "Tag `1` not found or too deep for an assigned rule.",
}
summary_text = "assign a value to an expression, associating the corresponding assignment with the a symbol"
def apply(self, f, lhs, rhs, evaluation):
"f_ /: lhs_ = rhs_"
name = f.get_name()
if not name:
evaluation.message(self.get_name(), "sym", f, 1)
return
rhs = rhs.evaluate(evaluation)
self.assign_elementary(lhs, rhs, evaluation, tags=[name])
return rhs
class TagSetDelayed(TagSet):
"""
<dl>
<dt>'TagSetDelayed[$f$, $expr$, $value$]'
<dt>'$f$ /: $expr$ := $value$'
<dd>is the delayed version of 'TagSet'.
</dl>
"""
attributes = hold_all | protected | sequence_hold
summary_text = "assign a delayed value to an expression, associating the corresponding assignment with the a symbol"
def apply(self, f, lhs, rhs, evaluation):
"f_ /: lhs_ := rhs_"
name = f.get_name()
if not name:
evaluation.message(self.get_name(), "sym", f, 1)
return
if self.assign_elementary(lhs, rhs, evaluation, tags=[name]):
return Symbol("Null")
else:
return SymbolFailed
# Placing this here is a bit weird, but it is not clear where else is better suited for this right now.
class LoadModule(Builtin):
"""
<dl>
<dt>'LoadModule[$module$]'</dt>
<dd>'Load Mathics definitions from the python module $module$</dd>
</dl>
>> LoadModule["nomodule"]
: Python module nomodule does not exist.
= $Failed
>> LoadModule["sys"]
: Python module sys is not a pymathics module.
= $Failed
"""
name = "LoadModule"
messages = {
"notfound": "Python module `1` does not exist.",
"notmathicslib": "Python module `1` is not a pymathics module.",
}
def apply(self, module, evaluation):
"LoadModule[module_String]"
try:
evaluation.definitions.load_pymathics_module(module.value)
except PyMathicsLoadException:
evaluation.message(self.name, "notmathicslib", module)
return SymbolFailed
except ImportError:
evaluation.message(self.get_name(), "notfound", module)
return SymbolFailed
else:
# Add Pymathics` to $ContextPath so that when user don't
# have to qualify Pymathics variables and functions,
# as the those in the module just loaded.
# Following the example of $ContextPath in the WL
# reference manual where PackletManager appears first in
# the list, it seems to be preferable to add this PyMathics
# at the beginning.
context_path = evaluation.definitions.get_context_path()
if "Pymathics`" not in context_path:
context_path.insert(0, "Pymathics`")
evaluation.definitions.set_context_path(context_path)
return module
| 26.315789
| 155
| 0.548933
|
4a0b4765771b044bd6dd2b63d6654c1c699d63aa
| 8,202
|
py
|
Python
|
src/python2.7/THINCARB_py27_math.py
|
smk78/thincarb
|
9d3ce615b10405ccc6fa6063b0d2e0f51f41d100
|
[
"MIT"
] | null | null | null |
src/python2.7/THINCARB_py27_math.py
|
smk78/thincarb
|
9d3ce615b10405ccc6fa6063b0d2e0f51f41d100
|
[
"MIT"
] | null | null | null |
src/python2.7/THINCARB_py27_math.py
|
smk78/thincarb
|
9d3ce615b10405ccc6fa6063b0d2e0f51f41d100
|
[
"MIT"
] | null | null | null |
# THINCARB_py27_math.py
#
# (c)2015 S. King, H Jarvie & C Neal (Sci. Tot. Env. 575 (2017) 496-512)
#
# Calculates CO2 & CO3 saturation indices, CO2 partial pressures, and total dissolved inorganic carbon
# concentrations from hydrochemical data
#
# For the detail behind the calculations, see C. Neal, A. House & K. Down, Sci. Tot. Env. 211 (1998) 173-185
#
# This version of THINCARB replaces the Python '**' operator for exponentiation & square root with math.pow() & math.sqrt()
#
# This version of THINCARB includes the DIC contribution from CaHCO3+ and CaCO3
#
# 14-Jul-2017: Altitude-corrected EpCO2 now properly applied to calculation of H2CO3 activity
#
# 13-Mar-2018: Trap AX=0 by catching ever decreasing M when alkalinity is near zero
#
# 25-Jan-2021: Increase precision on temperature output to 2 d.p.
import math
def MainRoutineMath(fileout,targetvalue,tolerance,SITE,A,B,C,D,E,F,G):
# INPUTS:
# fileout = Name of text file to contain results
# targetvalue = Desired value of M (usually 0.0 !)
# tolerance = Acceptable deviation from targetvalue; ie, M +/- tolerance
# SITE = Location
# A = Date
# B = Time
#
# C = Altitude (m)
# D = pH
# E = Alkalinity (uEq/L)
# F = Temperature (degC)
# G = [Ca] (mg/L)
#
# OUTPUTS:
# Ep is the excess partial pressure
# k is an equilibrum constant
# DIC is dissolved inorganic carbon
#
# H = EpCO2 rough
# I = EpCO2 less rough
# J = EpCO2 less rough including CO3
# K = EpCO2 accurate {NB: this returned by optimisation}
# L = EpCO2 accurate {corrected for altitude}
# M = Charge balance
# N = Total [Ca] (M/L)
# O = CaCO3 saturation (Log)
# P = k0 pCO2-H2CO3
# Q = k1 H2CO3-HCO3
# R = k2 HCO3-CO3
# S = k3 CaHCO3
# T = k4 CaCO3(0)
# U = k5 CaOH
# V = k6 H2O
# W = k7 CaCO3(SO)
# X = OH activity
# Y = H2CO3 activity
# Z = HCO3 activity
# AA = CO3 activity
# AB = Ca2+ activity
# AC = CaHCO3+ activity
# AD = CaCO3 activity
# AE = CaOH- activity
# AF = root(ionic strength)
# AG = gamma1 (1-) activity coefficient
# AH = gamma2 (2-) activity coefficient
# AI = (HCO3)- concentration (mM)
# AJ = (CO3)2- concentration (mM)
# AK = (H2CO3) concentration (mM)
# AL = (HCO3)- concentration (mM) in (CaHCO3)+
# AM = (CO3)2- concentration (mM) in (CaCO3)
# AN = (HCO3)- concentration (mg/L)
# AO = (CO3)2- concentration (mg/L)
# AP = (H2CO3) concentration (mg/L)
# AQ = (HCO3)- concentration (mg/L) in (CaHCO3)+
# AR = (CO3)2- concentration (mg/L) in (CaCO3)
# AS = concentration of C in (HCO3)- (mg/L)
# AT = concentration of C in (CO3)2- (mg/L)
# AU = concentration of C in (H2CO3) (mg/L)
# AV = concentration of C in (HCO3)- (mg/L) in (CaHCO3)+
# AW = concentration of C in (CO3)2- (mg/L) in (CaCO3)
# AX = Total DIC (mg/L)
# AY = DIC as HCO3 (%)
# AZ = DIC as CO3 (%)
# BA = DIC as H2CO3 (%)
# BB = DIC as HCO3 (%) in (CaHCO3)+
# BC = DIC as CO3 (%) in (CaCO3)
# BD = Total DIC (%)
# Initialising value for M
M=1000.0
lastM=999.0
# The following hydrochemical estimates are dependent only on the given inputs
H =(E+math.pow(10.0,(6.0-D)))*math.pow(10.0,(6.0-D))/5.25
I =((0.95*E)+math.pow(10.0,(6.0-D)))*math.pow(10.0,(6.0-D))/(6.46-(0.0636*F))
N =(G/40000.0)
P =math.pow(10.0,-(13.417-(2299.6/(273.0+F))-(0.01422*(273.0+F))))
Q =math.pow(10.0,(-(-14.8345+(3404.71/(273.0+F))+(0.03279*(273.0+F)))))
R =math.pow(10.0,(-(-6.498+(2902.39/(273.0+F))+(0.02379*(273.0+F)))))
S =math.pow(10.0,-(-2.95+(0.0133*(273.0+F))))
T =math.pow(10.0,(-(-27.393+(4114.0/(273.0+F))+(0.0561*(273.0+F)))))
U =math.pow(10.0,-1.4)
V =math.pow(10.0,-(-6.0846+(4471.33/(273.0+F))+(0.017053*(273.0+F))))
W =math.pow(10.0,-(-13.543+(3000.0/(273.0+F))+0.0401*(273.0+F)))
X =V/math.pow(10.0,-D)
J =( \
(0.95*E)+ \
(math.pow(10.0,(6.0-D))/0.95)+ \
(math.pow(10.0,(D+6.0+math.log10(V)))/0.95) \
)* \
math.pow(10.0,(6.0-D))/ \
( \
(6.46-(0.0636*F))* \
(1.0+ \
(2.0*(0.95/0.8)*math.pow(10.0,(D+math.log10(R))))) \
)
# Initialise K
K=J
# Now iterate on K to minimise M
# Uses a simple brute-force bisection algorithm which changes K by deltaK on each iteration
# NB: This can become unreliable if tolerance is set too small - say <1.0E-08 - but it is unlikely that M would ever need to be
# determined with that precision!
# while ((M>(targetvalue+tolerance)) or (M<(targetvalue-tolerance))):
while (((M>(targetvalue+tolerance)) or (M<(targetvalue-tolerance))) and (M!=lastM)):
deltaK=(targetvalue+K)/2.0
# Correct K for altitude (Note: this is a correction of the original correction in Neal 1998 from xPs/P0 to xP0/Ps)
L =K*math.pow(((288.0-0.0065*C)/288.0),-5.256)
# The following hydrochemical estimates are dependent on K
# Y =P*math.pow(10.0,-3.5)*K
Y =P*math.pow(10.0,-3.5)*L
Z =Q*Y/math.pow(10.0,-D)
AA=R*Z/math.pow(10.0,-D)
AF=math.sqrt(((20.0/35450.0)+X+Z+math.pow(10.0,-D)+(4.0*(AA+N)))/2.0)
AG=math.pow(10.0,-(0.5*((AF/(1+AF))-(0.3*AF))))
AH=math.pow(10.0,-(0.5*4.0*((AF/(1.0+AF))-(0.3*AF))))
AB=N/((1.0+(Z*S/AG)+(AA*T/AG)+(U*X/AG)))
AC=S*AB*Z
AD=T*AB*AA
AE=U*X*AB
# DIC calculations
AI=1000.0*(Z/AG)
AJ=1000.0*(AA/AH)
AK=1000.0*Y
AL=1000.0*(AC/AG)
AM=1000.0*(AD/AH)
AN=AI*61.0
AO=AJ*60.0
AP=AK*62.0
AQ=AL*61.0
AR=AM*60.0
AS=(12.0/61.0)*AN
AT=(12.0/60.0)*AO
AU=(12.0/62.0)*AP
AV=(12.0/61.0)*AQ
AW=(12.0/60.0)*AR
AX=AS+AT+AU+AV+AW
AY=(AS/AX)*100.0
AZ=(AT/AX)*100.0
BA=(AU/AX)*100.0
BB=(AV/AX)*100.0
BC=(AW/AX)*100.0
BD=AY+AZ+BA+BB+BC
# Trap cases where N=0
if (N==0.0):
O=float('NaN')
else:
O=math.log10(AA*AB)-math.log10(W)
lastM=M
M =(E*math.pow(10.0,-6.0))+(math.pow(10.0,-D)/AG)-(Z/AG)-(2.0*AA/AH)-(AC/AG)-(2.0*AD/1.0)-(X/AG)-(AE/AG)
# Decide which way to adjust K and set a flag to remember it
lastchange=0
if (M<(targetvalue-tolerance)):
K=K-deltaK
lastchange=-1
else:
K=K+deltaK
lastchange=1
# Correct K for the adjustment made right before the While loop exited
if (lastchange==-1):
K=K+deltaK
else:
K=K-deltaK
# Output the original input data AND the computed hydrochemical estimates to the terminal
# print SITE,A,B,'%.1f'%C,'%.1f'%D,'%.1f'%E,'%.2f'%F,'%.2f'%G,'%.3f'%H,'%.3f'%I,'%.3f'%J,'%.3f'%K,'%.3f'%L,'%.1e'%M,'%.3e'%N, \
# '%.3f'%O,'%.3e'%P,'%.3e'%Q,'%.3e'%R,'%.3e'%S,'%.3e'%T,'%.3e'%U,'%.3e'%V,'%.3e'%W,'%.3e'%X,'%.3e'%Y,'%.3e'%Z,'%.3e'%AA, \
# '%.3e'%AB,'%.3e'%AC,'%.3e'%AD,'%.2e'%AE,'%.2e'%AF,'%.3f'%AG,'%.3f'%AH,'%.2f'%AI,'%.2f'%AJ,'%.2f'%AK,'%.2f'%AL,'%.2f'%AM, \
# '%.2f'%AN,'%.2f'%AO,'%.2f'%AP,'%.2f'%AQ,'%.2f'%AR,'%.2f'%AS,'%.2f'%AT,'%.2f'%AU,'%.2f'%AV,'%.2f'%AW,'%.2f'%AX,'%.1f'%AY, \
# '%.1f'%AZ,'%.1f'%BA,'%.2f'%BB,'%.2f'%BC,'%.1f'%BD
# print ' '
# Output the original input data AND the computed hydrochemical estimates to a file
fileout.write(str(SITE)+' '+str(A)+' '+str(B)+' '+str('%.1f'%C)+' '+str('%.2f'%D)+' '+str('%.2f'%E)+' '+str('%.2f'%F)+' '+str('%.2f'%G)+' '+ \
str('%.3f'%H)+' '+str('%.3f'%I)+' '+str('%.3f'%J)+' '+str('%.3f'%K)+' '+str('%.3f'%L)+' '+str('%.1e'%M)+' '+str('%.3e'%N)+' '+ \
str('%.3f'%O)+' '+str('%.3e'%P)+' '+str('%.3e'%Q)+' '+str('%.3e'%R)+' '+str('%.3e'%S)+' '+str('%.3e'%T)+' '+str('%.3e'%U)+' '+ \
str('%.3e'%V)+' '+str('%.3e'%W)+' '+str('%.3e'%X)+' '+str('%.3e'%Y)+' '+str('%.3e'%Z)+' '+str('%.3e'%AA)+' '+str('%.3e'%AB)+' '+ \
str('%.3e'%AC)+' '+str('%.3e'%AD)+' '+str('%.3e'%AE)+' '+str('%.2e'%AF)+' '+str('%.3f'%AG)+' '+str('%.3f'%AH)+' '+ \
str('%.2f'%AI)+' '+str('%.2f'%AJ)+' '+str('%.2f'%AK)+' '+str('%.2f'%AL)+' '+str('%.2f'%AM)+' '+str('%.2f'%AN)+' '+ \
str('%.2f'%AO)+' '+str('%.2f'%AP)+' '+str('%.2f'%AQ)+' '+str('%.2f'%AR)+' '+str('%.2f'%AS)+' '+str('%.2f'%AT)+' '+ \
str('%.2f'%AU)+' '+str('%.2f'%AV)+' '+str('%.2f'%AW)+' '+str('%.2f'%AX)+' '+str('%.1f'%AY)+' '+str('%.1f'%AZ)+' '+ \
str('%.1f'%BA)+' '+str('%.2f'%BB)+' '+str('%.2f'%BC)+' '+str('%.1f'%BD)+'\n')
# END
| 37.623853
| 144
| 0.541209
|
4a0b47915d0e31a5f54d37296aa3739d37f13ced
| 10,890
|
py
|
Python
|
corehq/apps/domain/deletion.py
|
rochakchauhan/commcare-hq
|
aa7ab3c2d0c51fe10f2b51b08101bb4b5a376236
|
[
"BSD-3-Clause"
] | null | null | null |
corehq/apps/domain/deletion.py
|
rochakchauhan/commcare-hq
|
aa7ab3c2d0c51fe10f2b51b08101bb4b5a376236
|
[
"BSD-3-Clause"
] | null | null | null |
corehq/apps/domain/deletion.py
|
rochakchauhan/commcare-hq
|
aa7ab3c2d0c51fe10f2b51b08101bb4b5a376236
|
[
"BSD-3-Clause"
] | null | null | null |
import itertools
import logging
from datetime import date
from django.apps import apps
from django.conf import settings
from django.db import connection, transaction
from django.db.models import Q
from dimagi.utils.chunked import chunked
from corehq.apps.accounting.models import Subscription
from corehq.apps.accounting.utils import get_change_status
from corehq.apps.custom_data_fields.dbaccessors import get_by_domain_and_type
from corehq.apps.domain.utils import silence_during_tests
from corehq.apps.locations.views import LocationFieldsView
from corehq.apps.products.views import ProductFieldsView
from corehq.apps.userreports.dbaccessors import (
delete_all_ucr_tables_for_domain,
)
from corehq.apps.users.views.mobile import UserFieldsView
from corehq.blobs import CODES, get_blob_db
from corehq.blobs.models import BlobMeta
from corehq.form_processor.backends.sql.dbaccessors import doc_type_to_state
from corehq.form_processor.interfaces.dbaccessors import (
CaseAccessors,
FormAccessors,
)
from corehq.util.log import with_progress_bar
logger = logging.getLogger(__name__)
class BaseDeletion(object):
def __init__(self, app_label):
self.app_label = app_label
def is_app_installed(self):
try:
return bool(apps.get_app_config(self.app_label))
except LookupError:
return False
class CustomDeletion(BaseDeletion):
def __init__(self, app_label, deletion_fn):
super(CustomDeletion, self).__init__(app_label)
self.deletion_fn = deletion_fn
def execute(self, domain_name):
if self.is_app_installed():
self.deletion_fn(domain_name)
class RawDeletion(BaseDeletion):
def __init__(self, app_label, raw_query):
super(RawDeletion, self).__init__(app_label)
self.raw_query = raw_query
def execute(self, cursor, domain_name):
if self.is_app_installed():
cursor.execute(self.raw_query, [domain_name])
class ModelDeletion(BaseDeletion):
def __init__(self, app_label, model_name, domain_filter_kwarg):
super(ModelDeletion, self).__init__(app_label)
self.domain_filter_kwarg = domain_filter_kwarg
self.model_name = model_name
def get_model_class(self):
return apps.get_model(self.app_label, self.model_name)
def execute(self, domain_name):
if not domain_name:
# The Django orm will properly turn a None domain_name to a
# IS NULL filter. We don't want to allow deleting records for
# NULL domain names since they might have special meaning (like
# in some of the SMS models).
raise RuntimeError("Expected a valid domain name")
if self.is_app_installed():
model = self.get_model_class()
model.objects.filter(**{self.domain_filter_kwarg: domain_name}).delete()
def _delete_domain_backend_mappings(domain_name):
model = apps.get_model('sms', 'SQLMobileBackendMapping')
model.objects.filter(is_global=False, domain=domain_name).delete()
def _delete_domain_backends(domain_name):
model = apps.get_model('sms', 'SQLMobileBackend')
model.objects.filter(is_global=False, domain=domain_name).delete()
def _delete_web_user_membership(domain_name):
from corehq.apps.users.models import WebUser
active_web_users = WebUser.by_domain(domain_name)
inactive_web_users = WebUser.by_domain(domain_name, is_active=False)
for web_user in list(active_web_users) + list(inactive_web_users):
web_user.delete_domain_membership(domain_name)
if settings.UNIT_TESTING and not web_user.domain_memberships:
web_user.delete()
else:
web_user.save()
def _terminate_subscriptions(domain_name):
today = date.today()
with transaction.atomic():
current_subscription = Subscription.get_active_subscription_by_domain(domain_name)
if current_subscription:
current_subscription.date_end = today
current_subscription.is_active = False
current_subscription.save()
current_subscription.transfer_credits()
_, downgraded_privs, upgraded_privs = get_change_status(current_subscription.plan_version, None)
current_subscription.subscriber.deactivate_subscription(
downgraded_privileges=downgraded_privs,
upgraded_privileges=upgraded_privs,
old_subscription=current_subscription,
new_subscription=None,
)
Subscription.visible_objects.filter(
Q(date_start__gt=today) | Q(date_start=today, is_active=False),
subscriber__domain=domain_name,
).update(is_hidden_to_ops=True)
def _delete_all_cases(domain_name):
logger.info('Deleting cases...')
case_accessor = CaseAccessors(domain_name)
case_ids = case_accessor.get_case_ids_in_domain()
for case_id_chunk in chunked(with_progress_bar(case_ids, stream=silence_during_tests()), 500):
case_accessor.soft_delete_cases(list(case_id_chunk))
logger.info('Deleting cases complete.')
def _delete_all_forms(domain_name):
logger.info('Deleting forms...')
form_accessor = FormAccessors(domain_name)
form_ids = list(itertools.chain(*[
form_accessor.get_all_form_ids_in_domain(doc_type=doc_type)
for doc_type in doc_type_to_state
]))
for form_id_chunk in chunked(with_progress_bar(form_ids, stream=silence_during_tests()), 500):
form_accessor.soft_delete_forms(list(form_id_chunk))
logger.info('Deleting forms complete.')
def _delete_data_files(domain_name):
get_blob_db().bulk_delete(metas=list(BlobMeta.objects.partitioned_query(domain_name).filter(
parent_id=domain_name,
type_code=CODES.data_file,
)))
def _delete_custom_data_fields(domain_name):
# The CustomDataFieldsDefinition instances are cleaned up as part of the
# bulk couch delete, but we also need to clear the cache
logger.info('Deleting custom data fields...')
for field_view in [LocationFieldsView, ProductFieldsView, UserFieldsView]:
get_by_domain_and_type.clear(domain_name, field_view.field_type)
logger.info('Deleting custom data fields complete.')
# We use raw queries instead of ORM because Django queryset delete needs to
# fetch objects into memory to send signals and handle cascades. It makes deletion very slow
# if we have a millions of rows in stock data tables.
DOMAIN_DELETE_OPERATIONS = [
RawDeletion('stock', """
DELETE FROM stock_stocktransaction
WHERE report_id IN (SELECT id FROM stock_stockreport WHERE domain=%s)
"""),
RawDeletion('stock', "DELETE FROM stock_stockreport WHERE domain=%s"),
RawDeletion('stock', """
DELETE FROM commtrack_stockstate
WHERE product_id IN (SELECT product_id FROM products_sqlproduct WHERE domain=%s)
"""),
ModelDeletion('products', 'SQLProduct', 'domain'),
ModelDeletion('locations', 'SQLLocation', 'domain'),
ModelDeletion('locations', 'LocationType', 'domain'),
ModelDeletion('stock', 'DocDomainMapping', 'domain_name'),
ModelDeletion('domain_migration_flags', 'DomainMigrationProgress', 'domain'),
ModelDeletion('sms', 'DailyOutboundSMSLimitReached', 'domain'),
ModelDeletion('sms', 'SMS', 'domain'),
ModelDeletion('sms', 'SQLLastReadMessage', 'domain'),
ModelDeletion('sms', 'ExpectedCallback', 'domain'),
ModelDeletion('ivr', 'Call', 'domain'),
ModelDeletion('sms', 'Keyword', 'domain'),
ModelDeletion('sms', 'PhoneNumber', 'domain'),
ModelDeletion('sms', 'MessagingSubEvent', 'parent__domain'),
ModelDeletion('sms', 'MessagingEvent', 'domain'),
ModelDeletion('sms', 'QueuedSMS', 'domain'),
ModelDeletion('sms', 'SelfRegistrationInvitation', 'domain'),
CustomDeletion('sms', _delete_domain_backend_mappings),
ModelDeletion('sms', 'MobileBackendInvitation', 'domain'),
CustomDeletion('sms', _delete_domain_backends),
CustomDeletion('users', _delete_web_user_membership),
CustomDeletion('accounting', _terminate_subscriptions),
CustomDeletion('form_processor', _delete_all_cases),
CustomDeletion('form_processor', _delete_all_forms),
ModelDeletion('aggregate_ucrs', 'AggregateTableDefinition', 'domain'),
ModelDeletion('case_importer', 'CaseUploadRecord', 'domain'),
ModelDeletion('case_search', 'CaseSearchConfig', 'domain'),
ModelDeletion('case_search', 'CaseSearchQueryAddition', 'domain'),
ModelDeletion('case_search', 'FuzzyProperties', 'domain'),
ModelDeletion('case_search', 'IgnorePatterns', 'domain'),
ModelDeletion('data_analytics', 'GIRRow', 'domain_name'),
ModelDeletion('data_analytics', 'MALTRow', 'domain_name'),
ModelDeletion('data_dictionary', 'CaseType', 'domain'),
ModelDeletion('data_interfaces', 'CaseRuleAction', 'rule__domain'),
ModelDeletion('data_interfaces', 'CaseRuleCriteria', 'rule__domain'),
ModelDeletion('data_interfaces', 'CaseRuleSubmission', 'rule__domain'),
ModelDeletion('data_interfaces', 'CaseRuleSubmission', 'domain'), # TODO
ModelDeletion('data_interfaces', 'AutomaticUpdateRule', 'domain'),
ModelDeletion('data_interfaces', 'DomainCaseRuleRun', 'domain'),
ModelDeletion('domain', 'TransferDomainRequest', 'domain'),
ModelDeletion('export', 'EmailExportWhenDoneRequest', 'domain'),
CustomDeletion('export', _delete_data_files),
ModelDeletion('locations', 'LocationFixtureConfiguration', 'domain'),
ModelDeletion('ota', 'MobileRecoveryMeasure', 'domain'),
ModelDeletion('ota', 'SerialIdBucket', 'domain'),
ModelDeletion('phone', 'OwnershipCleanlinessFlag', 'domain'),
ModelDeletion('phone', 'SyncLogSQL', 'domain'),
ModelDeletion('reminders', 'EmailUsage', 'domain'),
ModelDeletion('reports', 'ReportsSidebarOrdering', 'domain'),
ModelDeletion('smsforms', 'SQLXFormsSession', 'domain'),
ModelDeletion('userreports', 'AsyncIndicator', 'domain'),
ModelDeletion('users', 'DomainRequest', 'domain'),
ModelDeletion('zapier', 'ZapierSubscription', 'domain'),
ModelDeletion('motech', 'RequestLog', 'domain'),
ModelDeletion('couchforms', 'UnfinishedSubmissionStub', 'domain'),
CustomDeletion('custom_data_fields', _delete_custom_data_fields),
CustomDeletion('ucr', delete_all_ucr_tables_for_domain),
]
def apply_deletion_operations(domain_name):
raw_ops, model_ops = _split_ops_by_type(DOMAIN_DELETE_OPERATIONS)
with connection.cursor() as cursor:
for op in raw_ops:
op.execute(cursor, domain_name)
for op in model_ops:
op.execute(domain_name)
def _split_ops_by_type(ops):
raw_ops = []
model_ops = []
for op in ops:
if isinstance(op, RawDeletion):
raw_ops.append(op)
else:
model_ops.append(op)
return raw_ops, model_ops
| 40.634328
| 108
| 0.726354
|
4a0b47fbe3046efb9803f9308b125587251c225f
| 4,260
|
py
|
Python
|
ernie/split_strategies.py
|
MarcosFP97/ernie
|
62632e66c425be9ea09ab4a1724a315a297612a1
|
[
"Apache-2.0"
] | 147
|
2020-02-23T19:07:00.000Z
|
2020-06-08T08:42:27.000Z
|
ernie/split_strategies.py
|
MarcosFP97/ernie
|
62632e66c425be9ea09ab4a1724a315a297612a1
|
[
"Apache-2.0"
] | 9
|
2020-03-02T03:26:48.000Z
|
2020-06-08T18:50:46.000Z
|
ernie/split_strategies.py
|
MarcosFP97/ernie
|
62632e66c425be9ea09ab4a1724a315a297612a1
|
[
"Apache-2.0"
] | 14
|
2020-03-02T02:48:04.000Z
|
2020-06-05T07:49:58.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
class RegexExpressions:
split_by_dot = re.compile(r'[^.]+(?:\.\s*)?')
split_by_semicolon = re.compile(r'[^;]+(?:\;\s*)?')
split_by_colon = re.compile(r'[^:]+(?:\:\s*)?')
split_by_comma = re.compile(r'[^,]+(?:\,\s*)?')
url = re.compile(
r'https?:\/\/(www\.)?[-a-zA-Z0-9@:%._\+~#=]{1,256}\.[a-zA-Z0-9()]{1,6}'
r'\b([-a-zA-Z0-9()@:%_\+.~#?&//=]*)'
)
domain = re.compile(r'\w+\.\w+')
class SplitStrategy:
def __init__(
self,
split_patterns,
remove_patterns=None,
group_splits=True,
remove_too_short_groups=True
):
if not isinstance(split_patterns, list):
self.split_patterns = [split_patterns]
else:
self.split_patterns = split_patterns
if remove_patterns is not None \
and not isinstance(remove_patterns, list):
self.remove_patterns = [remove_patterns]
else:
self.remove_patterns = remove_patterns
self.group_splits = group_splits
self.remove_too_short_groups = remove_too_short_groups
def split(self, text, tokenizer, split_patterns=None):
if split_patterns is None:
if self.split_patterns is None:
return [text]
split_patterns = self.split_patterns
def len_in_tokens(text_):
no_tokens = len(tokenizer.encode(text_, add_special_tokens=False))
return no_tokens
no_special_tokens = len(tokenizer.encode('', add_special_tokens=True))
max_tokens = tokenizer.max_len - no_special_tokens
if self.remove_patterns is not None:
for remove_pattern in self.remove_patterns:
text = re.sub(remove_pattern, '', text).strip()
if len_in_tokens(text) <= max_tokens:
return [text]
selected_splits = []
splits = map(lambda x: x.strip(), re.findall(split_patterns[0], text))
aggregated_splits = ''
for split in splits:
if len_in_tokens(split) > max_tokens:
if len(split_patterns) > 1:
sub_splits = self.split(
split, tokenizer, split_patterns[1:])
selected_splits.extend(sub_splits)
else:
selected_splits.append(split)
else:
if not self.group_splits:
selected_splits.append(split)
else:
new_aggregated_splits = \
f'{aggregated_splits} {split}'.strip()
if len_in_tokens(new_aggregated_splits) <= max_tokens:
aggregated_splits = new_aggregated_splits
else:
selected_splits.append(aggregated_splits)
aggregated_splits = split
if aggregated_splits:
selected_splits.append(aggregated_splits)
remove_too_short_groups = len(selected_splits) > 1 \
and self.group_splits \
and self.remove_too_short_groups
if not remove_too_short_groups:
final_splits = selected_splits
else:
final_splits = []
min_length = tokenizer.max_len / 2
for split in selected_splits:
if len_in_tokens(split) >= min_length:
final_splits.append(split)
return final_splits
class SplitStrategies:
SentencesWithoutUrls = SplitStrategy(split_patterns=[
RegexExpressions.split_by_dot,
RegexExpressions.split_by_semicolon,
RegexExpressions.split_by_colon,
RegexExpressions.split_by_comma
],
remove_patterns=[RegexExpressions.url, RegexExpressions.domain],
remove_too_short_groups=False,
group_splits=False)
GroupedSentencesWithoutUrls = SplitStrategy(split_patterns=[
RegexExpressions.split_by_dot,
RegexExpressions.split_by_semicolon,
RegexExpressions.split_by_colon,
RegexExpressions.split_by_comma
],
remove_patterns=[RegexExpressions.url, RegexExpressions.domain],
remove_too_short_groups=True,
group_splits=True)
| 33.809524
| 79
| 0.592254
|
4a0b4821000f24089671de66eb1be0471c555d2d
| 15,201
|
py
|
Python
|
SensiScan/sensors.py
|
MadScientistHK/ScanWifiBLE-python
|
92db6f61b089495732d0f99ace489c05c6c29e06
|
[
"MIT"
] | null | null | null |
SensiScan/sensors.py
|
MadScientistHK/ScanWifiBLE-python
|
92db6f61b089495732d0f99ace489c05c6c29e06
|
[
"MIT"
] | null | null | null |
SensiScan/sensors.py
|
MadScientistHK/ScanWifiBLE-python
|
92db6f61b089495732d0f99ace489c05c6c29e06
|
[
"MIT"
] | 2
|
2018-05-15T12:29:49.000Z
|
2018-07-24T14:06:54.000Z
|
import sys
import wifi
import base64
import csv
import json
import threading
from datetime import datetime
import subprocess
import socket
import urllib
import os
import configparser
import subprocess as sp
os.system('sudo systemctl start bluetooth')
import pygatt
from binascii import hexlify
import time
import binascii
from bluepy import btle
from bluepy.btle import Scanner, DefaultDelegate
import paho.mqtt.client as mqtt
#Get the serial number of the raspberry pi
def getserial():
# Extract serial from cpuinfo file
cpuserial = "0000000000000000"
try:
f = open('/proc/cpuinfo','r')
for line in f:
if line[0:6]=='Serial':
cpuserial = line[10:26]
f.close()
except:
cpuserial = "ERROR000000000"
return cpuserial
#MQTT parameters, if you need to edit, use the file mqtt.conf
global id,client,a,b,c,d,e,f,g,h,i,j,topic
try:
config = configparser.RawConfigParser()
config.read('mqtt.conf')
id = str(getserial())
topic = config.get('MQTT','topic')
broker_address = config.get('MQTT','broker_address')
client = mqtt.Client(id)
#To change the certificat for the mqtt server, replace the older one, or create a new one and uncomment the line below : client.tls_set("Name_of_your_certificat.crt")
client.tls_set("ca.crt")
client.username_pw_set(config.get('MQTT','username'), config.get('MQTT','password'))
client.connect(broker_address)
except:
try:
config = configparser.RawConfigParser()
config.read('mqtt.conf')
id = str(getserial())
topic = config.get('MQTT','topic')
broker_address = "109.219.230.92"
client = mqtt.Client(id)
#To change the certificat for the mqtt server, replace the older one, or create a new one and uncomment the line below : client.tls_set("Name_of_your_certificat.crt")
client.tls_set("ca2.crt")
client.username_pw_set(config.get('MQTT','username'), config.get('MQTT','password'))
client.connect(broker_address)
except: print("Can\'t connect to mqtt server")
#Create directory in the working directory and usb stick to save data
if os.path.isdir("/home/pi/Data") != True:
os.system("sudo mkdir /home/pi/Data")
try:
if os.path.isdir("/media/usb/Data") != True:
os.system("sudo mkdir /media/usb/Data")
except:error("usb stick unmounted")
#Return True or False, if there a internet connection or not
def checkInternet():
connected = False
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(("www.google.com", 80))
connected = True
return connected
except socket.gaierror:
print ("Not connected")
return connected
#Encode string to base64
def ToBase64(s):
return base64.b64encode(s.encode('utf-8'))
#Search if there is a number in a string
def hasNumbers(inputString):
return any(char.isdigit() for char in inputString)
#Print error for debug
def error(msg):
print (msg)
#log = open('logError.txt','a') log.write('\n Error at : '+str(time.time())+' , '+str(msg)) log.close()
#Convert wifi.csv file to .json file
def wcj(file):
try:
try:
csvfile = open(file, 'r')
except:error('Can\'t convert in csv to json the file :'+str(file)+' (wifi) in wcj(util.py)' )
try:
jfile = file[:-3]+"json"
jsonfile = open(jfile, 'w')
except:error('Can\'t create json in wcj(util.py)')
try:
reader = csv.DictReader(csvfile,("ID","TimeStamp","b64","BSSID","channel","RSSI","EncryptionKey","SSID","Chiffrement","idp"))
out = json.dumps( [ row for row in reader ] )
jsonfile.write(out)
os.system('sudo rm '+file)
return jfile
except:error('Can\'t write json in wcj(util.py)')
except:error('Can\'t convert (wifi) csv to json in wcj(util.py)')
#Scan all the wifi around and save it in bsae64 in csv, then it's convert to json
def SearchW(comp):
#Scan every wifi around
try:
cells = wifi.Cell.all('wlan0')
except:error('wlan0 busy, can\'t scan (wif.py)')
#If the program restart, there might be some old files, to avoid to lost them by rewrite, we skip it for the name, and send it later
try:
while os.path.isfile('/home/pi/Data/wifi_'+str(comp)+'.json') == True or os.path.isfile('/home/pi/Data/sw_'+str(comp)+'.json') == True:
comp=comp+1
except:error('Failed to skip the name (wif.py)')
#Changing name in case we lost connection
try:
file = '/home/pi/Data/sw_'+str(comp)+'.csv'
idp = "sw_"+str(comp)
#id = str(getserial())
b64 = 'true'
except:error('Failed to write the name of the file (wif.py)')
#Create file to send data
try:
print (file)
z = open(file,'w')
except:error('Failed to create file for scan (wif.py)')
#Get data of scanning, all the data is in base64 to avoid error about special character
try:
for cell in cells:
print (cell)
timestamptmp = str(time.time())
timestamp = ToBase64(timestamptmp)
bssidtmp = str(cell.address)
bssid = ToBase64(bssidtmp)
channeltmp = str(cell.channel)
channel = ToBase64(channeltmp)
rssitmp = str(cell.signal)
rssi = ToBase64(rssitmp)
encryptiontmp = str(cell.encrypted)
encryption = ToBase64(encryptiontmp)
ssidtmp = str(cell.ssid)
ssid = ToBase64(ssidtmp)
if encryption == ToBase64('True'):
chiffrementtmp = str(cell.encryption_type)
chiffrement = ToBase64(chiffrementtmp)
else: chiffrement = ToBase64('Not protected')
rowtmp = str(id)+','+str(timestamp)+','+str(b64)+','+str(bssid)+','+str(channel)+','+str(rssi)+','+str(encryption)+','+str(ssid)+','+str(chiffrement)+','+str(idp)+'\n'
#Writing data
rowtmp2 = rowtmp.replace("b'","")
row = rowtmp2.replace("'","")
#print (row)
try:
z.write(row)
except:error('Failed to write the file (wif.py)')
except:error('Failed to collect data (wif.py)')
#Close and open the file to save the data before conversion
try:
z.close
z = open(file,'r')
except:error('Failed to open the file (wif.py)')
#Convert csv to json
try:
jfile = wcj(file)
except:error('Failed to convert the file (wif.py)')
#Sending the file if there is a internet connection
try:
os.system("sudo cp "+jfile+" /media/usb/Data")
except:error("usb stick unmounted")
#Scan wifi every 10 seconds
def w():
comp = 0
while 1:
print ("\n=========================== Scan Wifi Start =========================\n")
try:
SearchW(comp)
comp=comp+1
print (comp)
except:error('\n Error at : '+str(time.time())+' . Can\'t run scanWifi in sw.py')
print ("\n========================= Scan Wifi Complete ========================\n")
time.sleep(10)
def whichConnectedWifi():
networkInfos = subprocess.check_output(['iwgetid']).split()
for networkInfo in networkInfos:
if networkInfo[0:5]=="ESSID":
info = networkInfo.split('"')
connectedWifi = info[1]
return connectedWifi
class ScanDelegate(DefaultDelegate):
def __init__(self):
DefaultDelegate.__init__(self)
def handleDiscovery(self, dev, isNewDev, isNewData):
if isNewDev:
print("Discovered device", dev.addr)
elif isNewData:
print("Received new data from", dev.addr)
#Saves all the captors' data in the working directory if there is no internet connection
def writedata(data):
if checkInternet() == False:
with open("mydata.json","a") as f:
f.write(str(data)+',')
f.close()
#Saves all the captors' data in the usb stick as a historic
def savedata(data):
with open("/media/usb/mydata.json","a") as f:
f.write(str(data)+',\n')
f.close()
#If there is a save file in the working directory, send it when connection is available
def offline(handle,value):
if checkInternet() == True and os.path.isfile("mydata.json")==True:
with open("mydata.json","r") as alldata:
test = str(alldata.read())
print (test)
client.publish("sensiLogger", test)
os.system('sudo rm mydata.json')
elif os.path.isfile("mydata.json")==False:
client.publish("sensiLogger","Pas de tableau a envoyer")
#Get the luminosity value
def Luminosity(handle, value):
lumHex = str(hexlify(value))
lum = int(lumHex[8:10] + lumHex[6:8],16)
tim = int(round(time.time() * 1000))
myData="{\"type\":\"Luminosity\", \"id\" :\""+str(id)+"\", \"timestamp\" : \""+str(tim)+"\", \"value\" : \""+str(lum)+"\"}"
client.publish(topic, str(myData))
writedata(str(myData))
savedata(str(myData))
#Get the temperature value
def Temperature(handle, value):
temHex = str(hexlify(value))
tem = int(temHex[8:10] + temHex[6:8],16)/10
tim = int(round(time.time() * 1000))
myData="{\"type\":\"Temperature\", \"id\" :\""+str(id)+"\", \"timestamp\" : \""+str(tim)+"\", \"value\" : \""+str(tem)+"\"}"
writedata(str(myData))
savedata(str(myData))
client.publish(topic,str(myData))
#Get the battery level
def Battery(handle, value):
batHex = str(hexlify(value))
bat = int(batHex[12:14] +batHex[10:12],16)/1000
tim = int(round(time.time() * 1000))
myData="{\"type\":\"Battery\", \"id\":\""+str(id)+"\", \"timestamp\" : \""+str(tim)+"\", \"value\" : \""+str(bat)+"\"}"
client.publish(topic,str(myData))
writedata(str(myData))
savedata(str(myData))
#Get the humidity value
def Humidity(handle, value):
humHex = str(hexlify(value))
hum = int(humHex[8:10] + humHex[6:8],16)/10
tim = int(round(time.time() * 1000))
myData="{\"type\":\"Humidity\", \"id\" :\""+str(id)+"\", \"timestamp\" : \""+str(tim)+"\", \"value\" : \""+str(hum)+"\"}"
client.publish(topic, str(myData))
writedata(str(myData))
savedata(str(myData))
#Get the accelerometer, gyroscope and magnetometer values
def Motion(handle, value):
motHex = str(hexlify(value))
tim = int(round(time.time() * 1000))
accX = int(motHex[8:10] + motHex[6:8],16)/100
accY = int(motHex[12:14] + motHex[10:12],16)/100
accZ = int(motHex[16:18] + motHex[14:16],16)/100
gyrX = int(motHex[20:22] + motHex[18:20],16)
gyrY = int(motHex[24:26] + motHex[22:24],16)
gyrZ = int(motHex[28:30] + motHex[26:28],16)
magX = int(motHex[32:34] + motHex[30:32],16)/100
magY = int(motHex[36:38] + motHex[34:36],16)/100
magZ = int(motHex[40:42] + motHex[38:40],16)/100
myData="{\"type\":\"Accelerometer\", \"id\" :\""+str(id)+"\", \"timestamp\" : \""+str(tim)+"\", \"X\" : \""+str(accX)+"\", \"Y\" : \""+str(accY)+"\", \"Z\" : \""+str(accZ)+"\"}"
client.publish(topic, str(myData))
writedata(str(myData))
savedata(str(myData))
myData="{\"type\":\"Gyroscope\", \"id\" :\""+str(id)+"\", \"timestamp\" : \""+str(tim)+"\", \"X\" : \""+str(gyrX)+"\", \"Y\" : \""+str(gyrY)+"\", \"Z\" : \""+str(gyrZ)+"\"}"
client.publish(topic, str(myData))
writedata(str(myData))
savedata(str(myData))
myData="{\"type\":\"Magnetometer\", \"id\" :\""+str(id)+"\", \"timestamp\" : \""+str(tim)+"\", \"X\" : \""+str(magX)+"\", \"Y\" : \""+str(magY)+"\", \"Z\" : \""+str(magZ)+"\"}"
client.publish(topic, str(myData))
writedata(str(myData))
savedata(str(myData))
#Get the pressure value
def Pressure(handle, value):
preHex = str(hexlify(value))
pre = int(preHex[12:14] + preHex[10:12] + preHex[8:10] + preHex[6:8],16)/100
tim = int(round(time.time() * 1000))
myData="{\"type\":\"Pressure\", \"id\" :\""+str(id)+"\", \"timestamp\" : \""+str(tim)+"\", \"value\" : \""+str(pre)+"\"}"
client.publish(topic, str(myData))
writedata(str(myData))
savedata(str(myData))
#Get the mic level
def Mic_Level(handle, value):
micHex = str(hexlify(value))
mic = int(micHex[8:10] + micHex[6:8],16)
tim = int(round(time.time() * 1000))
myData="{\"type\":\"Mic_Level\", \"id\" :\""+str(id)+"\", \"timestamp\" : \""+str(tim)+"\", \"value\" : \""+str(mic)+"\"}"
client.publish(topic, str(myData))
writedata(str(myData))
savedata(str(myData))
#Connect to the sensiBLE, to the mqtt server and send all the data of the captors
def senddata():
while 1:
cont = 1
client.connect(broker_address)
client.loop_start()
connectedWifi = whichConnectedWifi()
scanner = Scanner().withDelegate(ScanDelegate())
devices = scanner.scan(10.0)
uuid = "00:00:00:00:00:00"
for dev in devices:
print("Device %s (%s), RSSI=%d dB" % (dev.addr, dev.addrType, dev.rssi))
for (adtype, desc, value) in dev.getScanData():
if value=="SensiBLE":
uuid = dev.addr
print(" %s = %s" % (desc, value))
print("Connecting...")
time.sleep(1)
adapter = pygatt.GATTToolBackend()
try:
adapter.start()
device = adapter.connect(uuid)
device.subscribe("01000000-0001-11e1-ac36-0002a5d5c51b",callback=Luminosity)
time.sleep(1)
device.subscribe("00040000-0001-11e1-ac36-0002a5d5c51b",callback=Temperature)
time.sleep(1)
device.subscribe("00020000-0001-11e1-ac36-0002a5d5c51b",callback=Battery)
time.sleep(1)
device.subscribe("00080000-0001-11e1-ac36-0002a5d5c51b",callback=Humidity)
time.sleep(1)
device.subscribe("00e00000-0001-11e1-ac36-0002a5d5c51b",callback=Motion)
time.sleep(1)
device.subscribe("00100000-0001-11e1-ac36-0002a5d5c51b",callback=Pressure)
time.sleep(1)
device.subscribe("04000000-0001-11e1-ac36-0002a5d5c51b",callback=Mic_Level)
time.sleep(1)
device.subscribe("04000000-0001-11e1-ac36-0002a5d5c51b",callback=offline)
while cont==1:
stdoutdata = sp.getoutput("hcitool con")
if not uuid.upper() in stdoutdata.split() or connectedWifi != whichConnectedWifi():
print("not connected")
client.loop_stop()
client.disconnect()
cont = 0
else:
print("connected")
except:
print("error")
myData={"error":"Couldn't connect to the sensiBLE"}
client.publish(topic, str(myData))
client.loop_stop()
client.disconnect()
finally:
adapter.stop()
#Launch wifi scan and sensible thread
def launcher():
wifi_thread = threading.Thread(target=w,args=())
sensi_thread = threading.Thread(target=senddata,args=())
while 1:
if wifi_thread.is_alive() == False:
wifi_thread.start()
if sensi_thread.is_alive() == False:
sensi_thread.start()
launcher()
| 41.532787
| 181
| 0.596079
|
4a0b48727259600e5e90e851ffbc7d70b5a86485
| 1,712
|
py
|
Python
|
src/current_year_data_loader.py
|
ma-kain/vscode-remote-python
|
2337508b496ad78978aeba4f83120120dee46de9
|
[
"MIT"
] | null | null | null |
src/current_year_data_loader.py
|
ma-kain/vscode-remote-python
|
2337508b496ad78978aeba4f83120120dee46de9
|
[
"MIT"
] | null | null | null |
src/current_year_data_loader.py
|
ma-kain/vscode-remote-python
|
2337508b496ad78978aeba4f83120120dee46de9
|
[
"MIT"
] | null | null | null |
import sqlite3 as sl
import pandas as pd
import nse_india
import datetime
import one_time_data_loader
def import_into_db(data, sql, con, index):
rows = []
for row in data[1:]:
row = list(row)
row[0] = datetime.datetime.strptime(row[0], '%d-%b-%Y').strftime('%Y-%m-%d')
rows.append(row + [index])
with con:
print(con.executemany(sql, rows).rowcount, 'rows affected')
if __name__ == "__main__":
db_con = sl.connect('nse_india.db')
start, end = None, datetime.date.today()
with db_con:
data = db_con.execute("SELECT max(Date) FROM HISTORICALINDICES WHERE Indices = 'NIFTY 50';").fetchall()[0]
start = datetime.datetime.strptime(data[0], '%Y-%m-%d').date() + datetime.timedelta(days=1)
print(f'Start: {start}, End: {end}.')
if start >= end:
print('Exit program as start is not greater than end.')
print(exit)
exit()
resource, index = 'historicalindices', 'NIFTY 50'
data = nse_india.NseIndia().get_data(resource, index, start, end)
import_into_db(data, one_time_data_loader.historicalindices_sql, db_con, index)
index = 'NIFTY NEXT 50'
data = nse_india.NseIndia().get_data(resource, index, start, end)
import_into_db(data, one_time_data_loader.historicalindices_sql, db_con, index)
resource, index = 'historical_pepb', 'NIFTY 50'
data = nse_india.NseIndia().get_data(resource, index, start, end)
import_into_db(data, one_time_data_loader.historical_pepb_sql, db_con, index)
index = 'NIFTY NEXT 50'
data = nse_india.NseIndia().get_data(resource, index, start, end)
import_into_db(data, one_time_data_loader.historical_pepb_sql, db_con, index)
| 33.568627
| 114
| 0.675234
|
4a0b4a05187385879426d4744d7387442891c1a9
| 2,799
|
py
|
Python
|
scsr_api/social/models/transaction.py
|
hiperlogic/scsr-api
|
d1c40d7b86b94c50c88833149c29f413e6d39843
|
[
"MIT"
] | 1
|
2021-02-09T21:33:56.000Z
|
2021-02-09T21:33:56.000Z
|
scsr_api/social/models/transaction.py
|
hiperlogic/scsr-api
|
d1c40d7b86b94c50c88833149c29f413e6d39843
|
[
"MIT"
] | null | null | null |
scsr_api/social/models/transaction.py
|
hiperlogic/scsr-api
|
d1c40d7b86b94c50c88833149c29f413e6d39843
|
[
"MIT"
] | null | null | null |
import json
from mongoengine import signals
from email.utils import parseaddr
from application import db
class FriendTransaction(db.Document):
"""
Class that represents a friend request/cancelation/block transaction
Attributes:
type: The type of transaction - [REQUEST, CANCEL, BLOCK, UNBLOCK]
dateRequest: The date the transaction was made
sender: Who issued the request
receiver: To whom the request was issued
status: What is the status of the transaction: Accepted, Rejected, Pending
Reasons:
Maintaining the transactions for friend request allows for the administrators to perform governance tasks.
"""
"""Transaction Type:
Refers to what the transaction means.
The CNL and BLK transaction types must refer to a REQ transaction
The UBL transaction type must refer to a BLK transaction
"""
type = {
"REQ": "REQUEST",
"CNL": "CANCEL",
"BLK": "BLOCK",
"UBL": "UNBLOCK"
}
status_codes = {
"PND": "PENDING",
"ACC": "ACCEPTED",
"REJ": "REJECTED"
}
external_id = db.StringField(db_field="external_id", required=True)
date_request = db.DateTimeField(db_field="date_request")
sender = db.ReferenceField(User, required=True, db_field="sender")
# The receiver is only users, this is the transaction for friends maintenance and governance
receiver = db.ReferenceField(User, required=True, db_field="receiver")
transaction_type = db.StringField(db_field="transaction_type", choices = type.keys(), required=True, default="REQ")
status = db.StringField(db_field="status", choices=status_codes.keys(),default="PND", required = True)
# Referred Transaction occurs when a Cancel or Unblock transaction occurs.
# The Cancel refers to (accepted) Request transactions
# The Unblock refers to Block transactions
referred_transaction = db.ReferenceField('self', db_field="referred_transaction", required = False)
def to_obj(self):
jsonStr=self.to_json()
retorno=json.loads(jsonStr)
retorno.pop("_id")
retorno['sender'] = self.sender.to_obj()
retorno['receiver'] = self.receiver.to_obj()
retorno['referred_transaction'] = self.referred_transaction.to_obj()
retorno["links"]= [
{"rel": "self", "href": "/transaction/" + self.external_id }
]
return retorno
@staticmethod
def to_obj_list(trans_list):
retorno=[trans.to_obj() for trans in trans_list]
return retorno
def getStatus(self):
return self.status_codes[self.status]
def getTransactionType(self):
return self.type[self.transaction_type]
class SystemTransaction(db.Document):
pass
| 36.350649
| 119
| 0.67274
|
4a0b4a070b712dd659d0c776f0c17472e43cd29c
| 927
|
py
|
Python
|
agents/abstract.py
|
NunoEdgarGFlowHub/reco-gym
|
42701b7ae115b879edf6881f368878c458a2a368
|
[
"Apache-2.0"
] | null | null | null |
agents/abstract.py
|
NunoEdgarGFlowHub/reco-gym
|
42701b7ae115b879edf6881f368878c458a2a368
|
[
"Apache-2.0"
] | null | null | null |
agents/abstract.py
|
NunoEdgarGFlowHub/reco-gym
|
42701b7ae115b879edf6881f368878c458a2a368
|
[
"Apache-2.0"
] | null | null | null |
from numpy.random import choice
class Agent:
"""This is an abstract Agent class. It shows you the methods you need to
implement to create your own reccomendation agent"""
def __init__(self, env):
self.env = env
def act(self, observation):
"""An act method takes in an observation, which could either be
`None` or an Organic_Session (see reco_gym/session.py) and returns
a integer between 0 and num_products indicating which product the
agent reccomends"""
if observation is not None:
# process Organic_Session here
pass
# for now just return random random reccomendation
action = choice(self.env.action_space)
return action
def train(self, observation, action, reward):
"""Use this function to update your model based on observation, action,
reward tuples"""
pass
| 33.107143
| 79
| 0.648328
|
4a0b4be34c40601d11035cc95c24a8f6c615a23e
| 1,346
|
py
|
Python
|
setup.py
|
s-sajid-ali/xwp_cython
|
b95610b9bfb2af7a1fc69a3cee2e20e7970c7c46
|
[
"MIT"
] | null | null | null |
setup.py
|
s-sajid-ali/xwp_cython
|
b95610b9bfb2af7a1fc69a3cee2e20e7970c7c46
|
[
"MIT"
] | null | null | null |
setup.py
|
s-sajid-ali/xwp_cython
|
b95610b9bfb2af7a1fc69a3cee2e20e7970c7c46
|
[
"MIT"
] | null | null | null |
from setuptools import setup
from setuptools.extension import Extension
from Cython.Distutils.build_ext import new_build_ext
import numpy as np
ext_modules = [
Extension('xwp_cython.prop1d',
sources=['xwp_cython/prop1d.pyx'],
extra_compile_args=['-O3', '-fopenmp', '-march=native'],
extra_link_args=['-lomp'],
language='c'),
Extension('xwp_cython.prop2d_2loop',
sources=['xwp_cython/prop2d_2loop.pyx'],
extra_compile_args=['-O3', '-fopenmp', '-march=native'],
extra_link_args=['-lomp'],
language='c'),
Extension('xwp_cython.prop2d_4loop',
sources=['xwp_cython/prop2d_4loop.pyx'],
extra_compile_args=['-O3', '-fopenmp', '-march=native'],
extra_link_args=['-lomp'],
language='c')
]
if __name__ == "__main__":
setup(
name='xwp_cython',
packages=[
'xwp_cython',
],
cmdclass={'build_ext': new_build_ext},
ext_modules=(ext_modules),
include_dirs=[np.get_include()],
description='X-ray wave propagation techniques in cython',
url='https://github.com/s-sajid-ali/xwp_cython',
author='Sajid Ali',
author_email='sajidsyed2021@u.northwestern.edu',
zip_safe=False
)
| 32.829268
| 70
| 0.590639
|
4a0b4cf7f9836feb79e1f381c3ef1cd7492233a2
| 2,495
|
py
|
Python
|
web/service/github/api/v3/authentication/OAuthTokenFromDatabaseAndCreateApiAuthentication.py
|
ytyaru/GitHub.UserRegister.Authentication.Abstract.201704201525
|
b7d41894be6a9ebcafdc065505aebf0222524f1f
|
[
"CC0-1.0"
] | 1
|
2017-06-08T10:40:56.000Z
|
2017-06-08T10:40:56.000Z
|
web/service/github/api/v3/authentication/OAuthTokenFromDatabaseAndCreateApiAuthentication.py
|
ytyaru/Python.OTP.tools.201704200841
|
120239f96e40467203939492fd7ad9c5967fac0f
|
[
"CC0-1.0"
] | null | null | null |
web/service/github/api/v3/authentication/OAuthTokenFromDatabaseAndCreateApiAuthentication.py
|
ytyaru/Python.OTP.tools.201704200841
|
120239f96e40467203939492fd7ad9c5967fac0f
|
[
"CC0-1.0"
] | null | null | null |
#!python3
#encoding:utf-8
import traceback
from web.service.github.api.v3.authentication.OAuthTokenFromDatabaseAuthentication import OAuthTokenFromDatabaseAuthentication
import pyotp
import dataset
class OAuthTokenFromDatabaseAndCreateApiAuthentication(OAuthTokenFromDatabaseAuthentication):
def __init__(self, db, username, password, two_factor_secret=None):
self.__db = db
self.__username = username
self.__password = password
self.__two_factor_secret = two_factor_secret
try:
super().__init__(self.__db, self.__username)
except:
traceback.print_exc()
def SetAccessToken(self, scopes=None):
try:
self.__token = super().SetAccessToken(scopes)
except:
# APIで新しいTokenを生成する。次回から使いまわせるようDBに保存する。
traceback.print_exc()
self.__token = self.__CreateToken(scopes)
def __CreateToken(scopes):
account = self.__db.account['Accounts'].find_one(Username=args.username)
if None is account:
raise Exception('指定ユーザ {user} はDB未登録です。登録してください。')
self.__db.account['Accounts'].insert(self.__CreateRecordAccount())
otp = None
if None is not self.__two_factor_secret:
self.__totp = pyotp.TOTP(self.__two_factor_secret)
otp = self.__totp.now()
api = web.service.github.api.v3.Authorizations(self.__username, self.__password)
j = api.Create(otp=self.__totp.now(), scopes=scopes)
self.__db.account['AccessTokens'].insert(self.__CreateRecordToken(account['Id'], j))
return j['token']
def __CreateRecordToken(self, account_id, j):
return dict(
AccountId=account_id,
IdOnGitHub=j['id'],
Note=j['note'],
AccessToken=j['token'],
Scopes=self.__ArrayToString(j['scopes']),
SshKeyId=ssh_key_id
)
"""
requestsライブラリのAPIで使うheadersを生成する。
"""
def GetHeaders(self):
return super().GetHeaders()
"""
requestsライブラリのAPIで使う**kwargsを生成する。
requests.get(url, **this.GetRequestParameters())
"""
def GetRequestParameters(self):
return super().GetRequestParameters()
def __ArrayToString(self, array):
if None is array or 0 == len(array):
return None
ret = ""
for v in array:
ret += v + ','
print(ret)
print(ret[:-1])
return ret[:-1]
| 34.178082
| 126
| 0.627255
|
4a0b4d64e178d4888ee4d28841ef98128525d761
| 14,829
|
py
|
Python
|
tests/test_diff.py
|
dxenonb/hangar-py
|
a47aaf0ab69333e5d2629b231e0f9b4ca835c801
|
[
"Apache-2.0"
] | null | null | null |
tests/test_diff.py
|
dxenonb/hangar-py
|
a47aaf0ab69333e5d2629b231e0f9b4ca835c801
|
[
"Apache-2.0"
] | null | null | null |
tests/test_diff.py
|
dxenonb/hangar-py
|
a47aaf0ab69333e5d2629b231e0f9b4ca835c801
|
[
"Apache-2.0"
] | null | null | null |
import pytest
import numpy as np
def create_meta_nt(name):
from hangar.records.parsing import MetadataRecordKey
res = MetadataRecordKey(name)
return res
class TestReaderDiff(object):
def test_diff_by_commit_and_branch(self, repo_2_br_no_conf):
repo = repo_2_br_no_conf
testco = repo.checkout(branch='testbranch')
masterco = repo.checkout(branch='master')
commit_diffs = masterco.diff.commit(testco.commit_hash)
branch_diffs = masterco.diff.branch('testbranch')
assert commit_diffs == branch_diffs
testco.close()
masterco.close()
def test_diff_with_wrong_commit_hash(self, repo_2_br_no_conf):
repo = repo_2_br_no_conf
testco = repo.checkout(branch='testbranch')
masterco = repo.checkout(branch='master')
wrong_commit_hash = testco.commit_hash + 'WrongHash'
with pytest.raises(ValueError):
masterco.diff.commit(wrong_commit_hash)
testco.close()
masterco.close()
def test_diff_with_wrong_branch_name(self, repo_1_br_no_conf):
repo = repo_1_br_no_conf
masterco = repo.checkout(branch='master')
with pytest.raises(ValueError):
masterco.diff.branch('wrong_branch_name')
masterco.close()
def test_comparing_diffs_of_dev_and_master(self, repo_1_br_no_conf):
repo = repo_1_br_no_conf
dummyData = np.arange(50)
# mutating and removing data from testbranch
testco = repo.checkout(write=True, branch='testbranch')
testco.arraysets['dummy']['1'] = dummyData
del testco.arraysets['dummy']['2']
testco.commit("mutation and removal")
testco.close()
co = repo.checkout(branch='master')
diffdata = co.diff.branch('testbranch')
diffs1 = diffdata[0]
co = repo.checkout(branch='testbranch')
diffdata = co.diff.branch('master')
diffs2 = diffdata[0]
assert diffs1['samples']['dev']['dummy']['additions'] == diffs2['samples']['master']['dummy']['additions']
assert diffs1['samples']['dev']['dummy']['mutations'] == diffs2['samples']['master']['dummy']['mutations']
assert diffs1['samples']['dev']['dummy']['removals'] == diffs2['samples']['master']['dummy']['removals']
assert diffs1['samples']['dev']['dummy']['unchanged'] == diffs2['samples']['master']['dummy']['unchanged']
co.close()
def test_diff_data_samples(self, repo_1_br_no_conf):
repo = repo_1_br_no_conf
dummyData = np.arange(50)
# mutating and removing data from testbranch
testco = repo.checkout(write=True, branch='testbranch')
testco.arraysets['dummy']['1'] = dummyData
del testco.arraysets['dummy']['2']
testco.commit("mutation and removal")
testco.close()
co = repo.checkout(branch='master')
diffdata = co.diff.branch('testbranch')
conflict_dict = diffdata[1]
assert conflict_dict['conflict_found'] is False
diffs = diffdata[0]
# testing arraysets and metadata that has no change
assert diffs['arraysets']['dev']['additions'] == {}
assert diffs['arraysets']['dev']['mutations'] == {}
assert diffs['arraysets']['dev']['removals'] == {}
assert 'dummy' in diffs['arraysets']['master']['unchanged'].keys()
assert create_meta_nt('foo' ) in diffs['metadata']['dev']['additions'].keys()
assert len(diffs['metadata']['master']['additions'].keys()) == 0
assert create_meta_nt('hello') in diffs['metadata']['master']['unchanged'].keys()
assert create_meta_nt('hello') in diffs['metadata']['dev']['unchanged'].keys()
assert diffs['metadata']['dev']['mutations'] == {}
assert diffs['metadata']['dev']['removals'] == {}
# testing datarecords for addition, unchanged mutated, removed
for datarecord in diffs['samples']['dev']['dummy']['additions']:
assert 9 < int(datarecord.data_name) < 20
for datarecord in diffs['samples']['dev']['dummy']['unchanged']:
assert 0 <= int(datarecord.data_name) < 10
for removed in diffs['samples']['dev']['dummy']['removals']:
removed.data_name == 2
for mutated in diffs['samples']['dev']['dummy']['mutations']:
mutated.data_name == 1
co.close()
def test_sample_addition_conflict(self, repo_1_br_no_conf):
# t1
repo = repo_1_br_no_conf
dummyData = np.arange(50)
# adding data in master
co = repo.checkout(write=True)
dummyData[:] = 123
co.arraysets['dummy']['55'] = dummyData
co.commit('Adding data in master')
co.close()
# adding data in testbranch
co = repo.checkout(write=True, branch='testbranch')
dummyData[:] = 234
co.arraysets['dummy']['55'] = dummyData
co.commit('adding data in testbranch')
co.close()
co = repo.checkout()
conflicts = co.diff.branch('testbranch')[1]
assert conflicts['conflict_found'] is True
assert len(conflicts['sample']['dummy'].t1) == 1
assert conflicts['sample']['dummy'].t1[0].data_name == '55'
co.close()
def test_sample_removal_conflict(self, repo_1_br_no_conf):
# t21 and t22
dummyData = np.arange(50)
dummyData[:] = 123
repo = repo_1_br_no_conf
co = repo.checkout(write=True)
del co.arraysets['dummy']['6']
co.arraysets['dummy']['7'] = dummyData
co.commit('removal & mutation in master')
co.close()
co = repo.checkout(write=True, branch='testbranch')
co.arraysets['dummy']['6'] = dummyData
del co.arraysets['dummy']['7']
co.commit('removal & mutation in dev')
co.close()
co = repo.checkout()
conflicts = co.diff.branch('testbranch')[1]
assert len(conflicts['sample']['dummy'].t21) == 1
assert len(conflicts['sample']['dummy'].t22) == 1
assert conflicts['sample']['dummy'].t21[0].data_name == '6'
assert conflicts['sample']['dummy'].t22[0].data_name == '7'
co.close()
def test_sample_mutation_conflict(self, repo_1_br_no_conf):
# t3
dummyData = np.arange(50)
dummyData[:] = 123
repo = repo_1_br_no_conf
co = repo.checkout(write=True)
co.arraysets['dummy']['7'] = dummyData
co.commit('mutation in master')
co.close()
co = repo.checkout(write=True, branch='testbranch')
dummyData[:] = 234
co.arraysets['dummy']['7'] = dummyData
co.commit('mutation in dev')
co.close()
co = repo.checkout()
conflicts = co.diff.branch('testbranch')[1]
assert len(conflicts['sample']['dummy'].t3) == 1
assert conflicts['sample']['dummy'].t3[0].data_name == '7'
co.close()
def test_aset_addition_conflict(self, written_repo):
# t1
repo = written_repo
repo.create_branch('testbranch')
co = repo.checkout(write=True)
co.arraysets.init_arrayset(name='testing_aset', shape=(5, 7), dtype=np.float64)
co.commit('aset init in master')
co.close()
co = repo.checkout(write=True, branch='testbranch')
co.arraysets.init_arrayset(name='testing_aset', shape=(7, 7), dtype=np.float64)
co.commit('aset init in dev')
co.close()
co = repo.checkout()
conflicts = co.diff.branch('testbranch')[1]
assert len(conflicts['aset'].t1) == 1
assert conflicts['aset'].t1[0] == 'testing_aset'
co.close()
def test_aset_removal_conflict(self, written_repo):
# t21 and t22
repo = written_repo
co = repo.checkout(write=True)
co.arraysets.init_arrayset(name='testing_aset1', shape=(5, 7), dtype=np.float64)
co.arraysets.init_arrayset(name='testing_aset2', shape=(5, 7), dtype=np.float64)
co.commit('added asets')
co.close()
repo.create_branch('testbranch')
co = repo.checkout(write=True)
del co.arraysets['testing_aset1']
del co.arraysets['testing_aset2']
co.arraysets.init_arrayset(name='testing_aset2', shape=(5, 7), dtype=np.float32)
co.commit('mutation and removal from master')
co.close()
co = repo.checkout(write=True, branch='testbranch')
del co.arraysets['testing_aset1']
del co.arraysets['testing_aset2']
co.arraysets.init_arrayset(name='testing_aset1', shape=(5, 7), dtype=np.float32)
co.commit('mutation and removal from dev')
co.close()
co = repo.checkout()
conflicts = co.diff.branch('testbranch')[1]
assert len(conflicts['aset'].t21) == 1
assert len(conflicts['aset'].t22) == 1
assert conflicts['aset'].t21[0] == 'testing_aset1'
assert conflicts['aset'].t22[0] == 'testing_aset2'
co.close()
def test_aset_mutation_conflict(self, written_repo):
# t3
repo = written_repo
co = repo.checkout(write=True)
co.arraysets.init_arrayset(name='testing_aset', shape=(5, 7), dtype=np.float64)
co.commit('added aset')
co.close()
repo.create_branch('testbranch')
co = repo.checkout(write=True)
del co.arraysets['testing_aset']
co.arraysets.init_arrayset(name='testing_aset', shape=(7, 7), dtype=np.float64)
co.commit('mutation from master')
co.close()
co = repo.checkout(write=True, branch='testbranch')
del co.arraysets['testing_aset']
co.arraysets.init_arrayset(name='testing_aset', shape=(5, 7), dtype=np.float32)
co.commit('mutation from dev')
co.close()
co = repo.checkout()
conflicts = co.diff.branch('testbranch')[1]
assert len(conflicts['aset'].t3) == 1
assert conflicts['aset'].t3[0] == 'testing_aset'
co.close()
def test_meta_addition_conflict(self, repo_1_br_no_conf):
# t1
repo = repo_1_br_no_conf
co = repo.checkout(write=True, branch='testbranch')
co.metadata['metatest'] = 'value1'
co.commit('metadata addition')
co.close()
co = repo.checkout(write=True)
co.metadata['metatest'] = 'value2'
co.commit('metadata addition')
co.close()
co = repo.checkout()
conflicts = co.diff.branch('testbranch')[1]
assert conflicts['meta'].t1[0] == create_meta_nt('metatest')
assert len(conflicts['meta'].t1) == 1
co.close()
def test_meta_removal_conflict(self, repo_1_br_no_conf):
# t21 and t22
repo = repo_1_br_no_conf
co = repo.checkout(write=True, branch='testbranch')
co.metadata['hello'] = 'again' # this is world in master
del co.metadata['somemetadatakey']
co.commit('removed & mutated')
co.close()
co = repo.checkout(write=True)
del co.metadata['hello']
co.metadata['somemetadatakey'] = 'somemetadatavalue - not anymore'
co.commit('removed & mutation')
co.close()
co = repo.checkout()
conflicts = co.diff.branch('testbranch')[1]
assert conflicts['meta'].t21[0] == create_meta_nt('hello')
assert len(conflicts['meta'].t21) == 1
assert conflicts['meta'].t22[0] == create_meta_nt('somemetadatakey')
assert len(conflicts['meta'].t22) == 1
co.close()
def test_meta_mutation_conflict(self, repo_1_br_no_conf):
# t3
repo = repo_1_br_no_conf
co = repo.checkout(write=True, branch='testbranch')
co.metadata['hello'] = 'again' # this is world in master
co.commit('mutated')
co.close()
co = repo.checkout(write=True)
co.metadata['hello'] = 'again and again'
co.commit('mutation')
co.close()
co = repo.checkout()
conflicts = co.diff.branch('testbranch')[1]
assert conflicts['meta'].t3[0] == create_meta_nt('hello')
assert len(conflicts['meta'].t3) == 1
co.close()
def test_commits_inside_cm(self, written_repo, array5by7):
repo = written_repo
repo.create_branch('testbranch')
co = repo.checkout(write=True, branch='testbranch')
aset = co.arraysets['_aset']
aset2 = co.arraysets.init_arrayset('aset2', prototype=array5by7)
aset2[1] = array5by7
with aset, co.metadata:
aset[100] = array5by7
co.metadata['crazykey'] = 'crazyvalue'
co.commit('inside cm')
aset[101] = array5by7
co.commit('another commit inside cm')
co.close()
co = repo.checkout(branch='testbranch')
assert np.allclose(co.arraysets['_aset'][101], array5by7)
diff = co.diff.branch('master')[0]
assert create_meta_nt('crazykey') in diff['metadata']['master']['additions'].keys()
assert 'aset2' in diff['arraysets']['master']['additions'].keys()
for record in diff['samples']['master']['_aset']['additions']:
assert record.data_name in [100, 101]
co.close()
class TestWriterDiff(object):
def test_status_and_staged_meta(self, written_repo):
repo = written_repo
co = repo.checkout(write=True)
co.metadata['hello_from_test'] = 'hai to test'
assert co.diff.status() == 'DIRTY'
diff = co.diff.staged()[0]
assert create_meta_nt('hello_from_test') in diff['metadata']['master']['additions']
co.commit('init metadata')
assert co.diff.status() == 'CLEAN'
co.close()
def test_status_and_staged_samples(self, written_repo):
dummyData = np.zeros((5, 7))
repo = written_repo
co = repo.checkout()
with pytest.raises(AttributeError):
co.diff.status() # Read checkout doesn't have status()
co = repo.checkout(write=True)
co.arraysets['_aset']['45'] = dummyData
assert co.diff.status() == 'DIRTY'
diffs = co.diff.staged()[0]
for key in diffs['samples']['master']['_aset']['additions'].keys():
assert key.data_name == '45'
co.commit('adding')
assert co.diff.status() == 'CLEAN'
co.close()
def test_status_and_staged_aset(self, written_repo):
repo = written_repo
co = repo.checkout(write=True)
co.arraysets.init_arrayset(name='sampleaset', shape=(3, 5), dtype=np.float32)
assert co.diff.status() == 'DIRTY'
diff = co.diff.staged()[0]
assert 'sampleaset' in diff['arraysets']['master']['additions'].keys()
assert '_aset' in diff['arraysets']['master']['unchanged'].keys()
co.commit('init aset')
assert co.diff.status() == 'CLEAN'
co.close()
| 38.718016
| 114
| 0.606312
|
4a0b4dbe7bd81cb0ee2332ba66acb8eaf9e1a4bf
| 2,356
|
py
|
Python
|
datahub/company/test/models/test_adviser.py
|
Staberinde/data-hub-api
|
3d0467dbceaf62a47158eea412a3dba827073300
|
[
"MIT"
] | 6
|
2019-12-02T16:11:24.000Z
|
2022-03-18T10:02:02.000Z
|
datahub/company/test/models/test_adviser.py
|
Staberinde/data-hub-api
|
3d0467dbceaf62a47158eea412a3dba827073300
|
[
"MIT"
] | 1,696
|
2019-10-31T14:08:37.000Z
|
2022-03-29T12:35:57.000Z
|
datahub/company/test/models/test_adviser.py
|
Staberinde/data-hub-api
|
3d0467dbceaf62a47158eea412a3dba827073300
|
[
"MIT"
] | 9
|
2019-11-22T12:42:03.000Z
|
2021-09-03T14:25:05.000Z
|
import pytest
from django.core.exceptions import ValidationError
from django.db.utils import IntegrityError
from datahub.company.models import Advisor
from datahub.company.test.factories import AdviserFactory
pytestmark = pytest.mark.django_db
@pytest.mark.parametrize(
'email, domain',
(
('adviser@dit.gov.uk', 'dit.gov.uk'),
# Emails can have @ if in quotes
('"adviser@dit"@dit.gov.uk', 'dit.gov.uk'),
# Domain may not have a .
('adviser@dit', 'dit'),
# Domain may have different case
('adviser@Dit.gov.uk', 'dit.gov.uk'),
('adviser@DIT.GOV.UK', 'dit.gov.uk'),
# Invalid email
('adviser', None),
),
)
def test_get_email_domain(email, domain, db):
"""
Test that the `Adviser.get_email_domain` method
returns the domain for the given adviser's email.
"""
adviser = AdviserFactory(email=email, contact_email=email)
assert adviser.get_email_domain() == domain
@pytest.mark.parametrize(
'sso_email_user_id',
(
'test@dit.gov.uk',
None,
),
)
def test_adviser_sso_email_user_id_can_store_email_or_none(sso_email_user_id):
"""Test that SSO email user ID can store email or None."""
assert Advisor.objects.count() == 0
AdviserFactory(sso_email_user_id=sso_email_user_id)
assert Advisor.objects.filter(sso_email_user_id=sso_email_user_id).exists()
def test_adviser_sso_email_user_id_is_validated():
"""Test that SSO email user ID is being validated."""
adviser = Advisor.objects.create()
adviser.sso_email_user_id = 'lorem ipsum'
with pytest.raises(ValidationError) as excinfo:
adviser.full_clean()
assert dict(excinfo.value)['sso_email_user_id'] == ['Enter a valid email address.']
def test_adviser_sso_email_user_id_unique_constraint():
"""Test that SSO email user ID unique constraint."""
duplicate_email = 'test@dit.gov.uk'
# The `AdviserFactory` is configured to use `get_or_create` instead of `create`
Advisor.objects.create(email='a@a.a', sso_email_user_id=duplicate_email)
with pytest.raises(IntegrityError) as excinfo:
Advisor.objects.create(email='b@b.b', sso_email_user_id=duplicate_email)
assert (
'duplicate key value violates unique constraint "company_advisor_sso_email_user_id_key"'
) in str(excinfo.value)
| 34.647059
| 96
| 0.701188
|
4a0b4e3e52d11f811baabd144c14ee0c675103ea
| 2,391
|
py
|
Python
|
magenta/models/latent_transfer/configs/joint_exp_2mnist_parameterized.py
|
workproduct/magenta
|
ba43c3e1a2b3b6a5731fa10a5a6bddd0c821eb84
|
[
"Apache-2.0"
] | null | null | null |
magenta/models/latent_transfer/configs/joint_exp_2mnist_parameterized.py
|
workproduct/magenta
|
ba43c3e1a2b3b6a5731fa10a5a6bddd0c821eb84
|
[
"Apache-2.0"
] | null | null | null |
magenta/models/latent_transfer/configs/joint_exp_2mnist_parameterized.py
|
workproduct/magenta
|
ba43c3e1a2b3b6a5731fa10a5a6bddd0c821eb84
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Config for MNIST <> MNIST transfer.
"""
# pylint:disable=invalid-name
import functools
from magenta.models.latent_transfer import model_joint
import tensorflow as tf
FLAGS = tf.flags.FLAGS
n_latent = FLAGS.n_latent
n_latent_shared = FLAGS.n_latent_shared
layers = (128,) * 4
batch_size = 128
Encoder = functools.partial(
model_joint.EncoderLatentFull,
input_size=n_latent,
output_size=n_latent_shared,
layers=layers)
Decoder = functools.partial(
model_joint.DecoderLatentFull,
input_size=n_latent_shared,
output_size=n_latent,
layers=layers)
vae_config_A = {
'Encoder': Encoder,
'Decoder': Decoder,
'prior_loss_beta': FLAGS.prior_loss_beta_A,
'prior_loss': 'KL',
'batch_size': batch_size,
'n_latent': n_latent,
'n_latent_shared': n_latent_shared,
}
vae_config_B = {
'Encoder': Encoder,
'Decoder': Decoder,
'prior_loss_beta': FLAGS.prior_loss_beta_B,
'prior_loss': 'KL',
'batch_size': batch_size,
'n_latent': n_latent,
'n_latent_shared': n_latent_shared,
}
config = {
'vae_A': vae_config_A,
'vae_B': vae_config_B,
'config_A': 'mnist_0_nlatent64',
'config_B': 'mnist_0_nlatent64',
'config_classifier_A': 'mnist_classifier_0',
'config_classifier_B': 'mnist_classifier_0',
# model
'prior_loss_align_beta': FLAGS.prior_loss_align_beta,
'mean_recons_A_align_beta': FLAGS.mean_recons_A_align_beta,
'mean_recons_B_align_beta': FLAGS.mean_recons_B_align_beta,
'mean_recons_A_to_B_align_beta': FLAGS.mean_recons_A_to_B_align_beta,
'mean_recons_B_to_A_align_beta': FLAGS.mean_recons_B_to_A_align_beta,
'pairing_number': FLAGS.pairing_number,
# training dynamics
'batch_size': batch_size,
'n_latent': n_latent,
'n_latent_shared': n_latent_shared,
}
| 28.807229
| 74
| 0.737767
|
4a0b4e5acefa9b9def6ca5a01c75a450e7449dbd
| 3,475
|
py
|
Python
|
widgets/list_box/list_box.py
|
ardovm/wxGlade
|
a4cf8e65bcc6df5f65cf8ca5c49b9a628bf1e8eb
|
[
"MIT"
] | null | null | null |
widgets/list_box/list_box.py
|
ardovm/wxGlade
|
a4cf8e65bcc6df5f65cf8ca5c49b9a628bf1e8eb
|
[
"MIT"
] | null | null | null |
widgets/list_box/list_box.py
|
ardovm/wxGlade
|
a4cf8e65bcc6df5f65cf8ca5c49b9a628bf1e8eb
|
[
"MIT"
] | null | null | null |
"""\
wxListBox objects
@copyright: 2002-2007 Alberto Griggio
@copyright: 2014-2016 Carsten Grohmann
@license: MIT (see LICENSE.txt) - THIS PROGRAM COMES WITH NO WARRANTY
"""
import wx
import common
from edit_windows import ManagedBase, EditStylesMixin
import new_properties as np
from ChoicesProperty import *
class EditListBox(ManagedBase, EditStylesMixin):
"Class to handle wxListBox objects"
WX_CLASS = "wxListBox"
_PROPERTIES = ["Widget", "style", "selection", "choices"]
PROPERTIES = ManagedBase.PROPERTIES + _PROPERTIES + ManagedBase.EXTRA_PROPERTIES
def __init__(self, name, parent, index, choices):
ManagedBase.__init__(self, name, parent, index)
EditStylesMixin.__init__(self)
# initialise instance properties
self.selection = np.SpinProperty(-1, val_range=len(choices)-1, immediate=True )
self.choices = ChoicesProperty( choices, [(_('Label'), np.GridProperty.STRING)] )
def create_widget(self):
choices = [c[0] for c in self.choices]
self.widget = wx.ListBox(self.parent_window.widget, self.id, choices=choices)
if self.selection>=0: self.widget.SetSelection(self.selection)
self.widget.Bind(wx.EVT_LEFT_DOWN, self.on_set_focus)
def get_property_handler(self, prop_name):
if prop_name == 'choices':
return ChoicesHandler(self)
return ManagedBase.get_property_handler(self, prop_name)
def _properties_changed(self, modified, actions): # XXX from CheckListBox
# self.selection needs to be in range (-1,len(self.choices)-1)
choices = self.choices
max_selection = len(choices)-1
set_selection = False
if not modified or "choices" in modified:
# adjust range of selection
self.properties['selection'].set_range(-1, max_selection)
if self.selection>max_selection:
set_selection = True
if self.widget:
# update widget
self.widget.Clear()
for c in choices: self.widget.Append(c[0])
if modified: actions.add("layout")
if not modified or "selection" in modified or set_selection:
if self.selection>max_selection:
self.properties['selection'].set(max_selection)
set_selection = True
if self.widget and set_selection:
self.widget.SetSelection(self.selection) # -1 is identical to wx.NOT_FOUND
EditStylesMixin._properties_changed(self, modified, actions)
ManagedBase._properties_changed(self, modified, actions)
def builder(parent, index):
"factory function for EditListBox objects"
name = parent.toplevel_parent.get_next_contained_name('list_box_%d')
with parent.frozen():
editor = EditListBox(name, parent, index, [[u'choice 1']])
editor.properties["style"].set_to_default()
if parent.widget: editor.create()
return editor
def xml_builder(parser, base, name, parent, index):
"factory to build EditListBox objects from a XML file"
return EditListBox(name, parent, index, [])
def initialize():
"initialization function for the module: returns a wxBitmapButton to be added to the main palette"
common.widget_classes['EditListBox'] = EditListBox
common.widgets['EditListBox'] = builder
common.widgets_from_xml['EditListBox'] = xml_builder
return common.make_object_button('EditListBox', 'list_box.xpm')
| 37.771739
| 102
| 0.684029
|
4a0b503e4193269154a98f895378cc4e36d7df59
| 30,946
|
py
|
Python
|
bpy_lambda/2.78/scripts/addons/object_skinify.py
|
resultant-gamedev/bpy_lambda
|
c8cf46c10c69e74a0892b621d76c62edaa5b04bc
|
[
"MIT"
] | null | null | null |
bpy_lambda/2.78/scripts/addons/object_skinify.py
|
resultant-gamedev/bpy_lambda
|
c8cf46c10c69e74a0892b621d76c62edaa5b04bc
|
[
"MIT"
] | null | null | null |
bpy_lambda/2.78/scripts/addons/object_skinify.py
|
resultant-gamedev/bpy_lambda
|
c8cf46c10c69e74a0892b621d76c62edaa5b04bc
|
[
"MIT"
] | 1
|
2019-11-24T18:43:42.000Z
|
2019-11-24T18:43:42.000Z
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
bl_info = {
"name": "Skinify Rig",
"author": "Albert Makac (karab44)",
"version": (0, 9, 1),
"blender": (2, 7, 9),
"location": "Properties > Bone > Skinify Rig (visible on pose mode only)",
"description": "Creates a mesh object from selected bones",
"warning": "",
"wiki_url": "https://wiki.blender.org/index.php/Extensions:2.6/"
"Py/Scripts/Object/Skinify",
"category": "Object"}
import bpy
from bpy.props import (
FloatProperty,
IntProperty,
BoolProperty,
PointerProperty,
)
from bpy.types import (
Operator,
Panel,
PropertyGroup,
)
from mathutils import (
Vector,
Euler,
)
from bpy.app.handlers import persistent
from enum import Enum
# can the armature data properties group_prop and row be fetched directly from the rigify script?
horse_data = \
(1, 5), (2, 4), (3, 0), (4, 3), (5, 4), (1, 0), (1, 0), (7, 2), (8, 5), (9, 4), \
(7, 2), (8, 5), (9, 4), (10, 2), (11, 5), (12, 4), (10, 2), (11, 5), (12, 4), \
(13, 6), (1, 4), (14, 6), (1, 0), (1, 0), (1, 0), (1, 0), (1, 0), (1, 0), (14, 1),
shark_data = \
(1, 5), (2, 4), (1, 0), (3, 3), (4, 4), (5, 6), (6, 5), (7, 4), (6, 5), (7, 4), \
(8, 3), (9, 4), (1, 0), (1, 6), (1, 0), (1, 0), (1, 0), (1, 0), (1, 0), (1, 0), \
(1, 0), (1, 0), (1, 0), (1, 0), (1, 0), (1, 0), (1, 0), (1, 0), (14, 1),
bird_data = \
(1, 6), (2, 4), (1, 0), (3, 3), (4, 4), (1, 0), (1, 0), (6, 5), (8, 0), (7, 4), (6, 5), \
(8, 0), (7, 4), (10, 2), (11, 5), (12, 4), (10, 2), (11, 5), (12, 4), (1, 0), (1, 0), \
(13, 6), (14, 4), (1, 0), (8, 6), (1, 0), (1, 0), (1, 0), (14, 1),
cat_data = \
(1, 5), (2, 2), (2, 3), (3, 3), (4, 4), (5, 6), (6, 4), (7, 2), (8, 5), (9, 4), (7, 2), \
(8, 5), (9, 4), (10, 2), (11, 5), (12, 4), (10, 2), (11, 5), (12, 4), (13, 3), (14, 4), \
(1, 0), (1, 0), (1, 0), (1, 0), (1, 0), (1, 0), (1, 0), (16, 1),
biped_data = \
(1, 0), (1, 0), (1, 0), (3, 3), (4, 4), (1, 0), (1, 0), (7, 2), (8, 5), (9, 4), (7, 2), \
(8, 5), (9, 4), (10, 2), (11, 5), (12, 4), (10, 2), (11, 5), (12, 4), (1, 0), (1, 0), \
(1, 0), (1, 0), (1, 0), (1, 0), (1, 0), (1, 0), (1, 0), (14, 1),
human_data = \
(1, 5), (2, 2), (2, 3), (3, 3), (4, 4), (5, 6), (6, 4), (7, 2), (8, 5), (9, 4), (7, 2), \
(8, 5), (9, 4), (10, 2), (11, 5), (12, 4), (10, 2), (11, 5), (12, 4), (1, 0), (1, 0), \
(1, 0), (1, 0), (1, 0), (1, 0), (1, 0), (1, 0), (1, 0), (14, 1),
wolf_data = \
(1, 5), (2, 2), (2, 3), (3, 3), (4, 4), (5, 6), (6, 4), (7, 2), (8, 5), (9, 4), (7, 2), \
(8, 5), (9, 4), (10, 2), (11, 5), (12, 4), (10, 2), (11, 5), (12, 4), (13, 6), (1, 0), \
(13, 0), (13, 0), (1, 0), (1, 0), (1, 0), (1, 0), (1, 0), (14, 1),
quadruped_data = \
(1, 0), (2, 0), (2, 0), (3, 3), (4, 4), (5, 0), (6, 0), (7, 2), (8, 5), (9, 4), \
(7, 2), (8, 5), (9, 4), (10, 2), (11, 5), (12, 4), (10, 2), (11, 5), (12, 4), (13, 6), \
(1, 0), (13, 0), (13, 0), (1, 0), (1, 0), (1, 0), (1, 0), (1, 0), (14, 1),
human_legacy_data = \
(1, None), (1, None), (2, None), (1, None), (3, None), (3, None), (4, None), (5, None), \
(6, None), (4, None), (5, None), (6, None), (7, None), (8, None), (9, None), (7, None), \
(8, None), (9, None), (1, None), (1, None), (1, None), (1, None), (1, None), (1, None), \
(1, None), (1, None), (1, None), (1, None),
pitchipoy_data = \
(1, None), (2, None), (2, None), (3, None), (4, None), (5, None), (6, None), (7, None), \
(8, None), (9, None), (7, None), (8, None), (9, None), (10, None), (11, None), (12, None), \
(10, None), (11, None), (12, None), (1, None), (1, None), (1, None), (1, None), (1, None), \
(1, None), (1, None), (1, None), (1, None),
rigify_data = horse_data, shark_data, bird_data, cat_data, biped_data, human_data, \
wolf_data, quadruped_data, human_legacy_data, pitchipoy_data
# Skin.rig_type.ENUM
# Skin.junction_dict['bname'].data[0] idx, data[1] idx + 1, data[2] thickness
# NOTE each fragment contains section information about adequate bone
# junctions idx and idx + 1 and these vertices' ids share common thickness
class Skin(object):
class Rig_type(Enum):
HORSE = 0
SHARK = 1
BIRD = 2
CAT = 3
BIPED = 4
HUMAN = 5
WOLF = 6
QUAD = 7
LEGACY = 8
PITCHIPOY = 9
OTHER = 10
def __init__(self, rig_type):
self.rig_type = rig_type
self.junctions_dict = dict()
def fragment_create(self, bname, idx=None, thickness=0.0):
data = []
data.insert(0, idx)
if idx is not None:
data.insert(1, idx + 1)
else:
data.insert(1, None)
self.junctions_dict[bname] = data
self.junctions_dict[bname].append(thickness)
# for the sake of code clarity
def fragment_update(self, bname, idx=None, thickness=0.0):
self.fragment_create(bname, idx, thickness)
rig_type = Skin.Rig_type.OTHER
# initialize properties
def init_props():
# additional check - this should be a rare case if the handler
# wasn't removed for some reason and the add-on is not toggled on/off
if hasattr(bpy.types.Scene, "skinify"):
scn = bpy.context.scene.skinify
scn.connect_mesh = False
scn.connect_parents = False
scn.generate_all = False
scn.thickness = 0.8
scn.finger_thickness = 0.25
scn.apply_mod = True
scn.parent_armature = True
scn.sub_level = 1
def identify_rig():
if 'rigify_layers' not in bpy.context.object.data:
return Skin.Rig_type.OTHER # non recognized
LEGACY_LAYERS_SIZE = 28
layers = bpy.context.object.data['rigify_layers']
for type, rig in enumerate(rigify_data):
index = 0
for props in layers:
if len(layers) == LEGACY_LAYERS_SIZE and 'group_prop' not in props:
if props['row'] != rig[index][0] or rig[index][1] is not None:
break
elif (props['row'] != rig[index][0]) or (props['group_prop'] != rig[index][1]):
break
# SUCCESS if reaches the end
if index == len(layers) - 1:
return Skin.Rig_type(type)
index = index + 1
return Skin.Rig_type.OTHER
# prepares customizable ignore and thickness lists
# edit these lists to suits your taste
def prepare_lists(rig_type, finger_thickness):
# EXAMPLE IGNORE LIST
# detect the head, face, hands, breast, heels or other exceptionary bones for exclusion or customization
common_ignore_list = ['eye', 'heel', 'breast', 'root']
horse_ignore_list = ['chest', 'belly', 'pelvis', 'jaw', 'nose', 'skull', 'ear.']
shark_ignore_list = ['jaw']
bird_ignore_list = [
'face', 'pelvis', 'nose', 'lip', 'jaw', 'chin', 'ear.', 'brow',
'lid', 'forehead', 'temple', 'cheek', 'teeth', 'tongue', 'beak'
]
cat_ignore_list = [
'face', 'belly', 'pelvis.C', 'nose', 'lip', 'jaw', 'chin', 'ear.', 'brow',
'lid', 'forehead', 'temple', 'cheek', 'teeth', 'tongue'
]
biped_ignore_list = ['pelvis']
human_ignore_list = [
'face', 'pelvis', 'nose', 'lip', 'jaw', 'chin', 'ear.', 'brow',
'lid', 'forehead', 'temple', 'cheek', 'teeth', 'tongue'
]
wolf_ignore_list = [
'face', 'pelvis', 'nose', 'lip', 'jaw', 'chin', 'ear.', 'brow',
'lid', 'forehead', 'temple', 'cheek', 'teeth', 'tongue'
]
quad_ignore_list = [
'face', 'pelvis', 'nose', 'lip', 'jaw', 'chin', 'ear.', 'brow',
'lid', 'forehead', 'temple', 'cheek', 'teeth', 'tongue'
]
rigify_legacy_ignore_list = []
pitchipoy_ignore_list = [
'face', 'pelvis', 'nose', 'lip', 'jaw', 'chin', 'ear.', 'brow',
'lid', 'forehead', 'temple', 'cheek', 'teeth', 'tongue'
]
other_ignore_list = []
# EXAMPLE THICKNESS
# feel free to modify and customize the list by adding elements followed by comma
# common_thickness_dict = {"hand": common_finger_thickness, "head": common_head_thickness}
common_finger_thickness = finger_thickness
common_thickness_dict = {"hand": common_finger_thickness}
horse_thickness_dict = {}
shark_thickness_dict = {}
bird_thickness_dict = {}
cat_thickness_dict = {}
face_thickness = 0.20
biped_thickness_dict = {}
human_thickness_dict = {"face": face_thickness}
wolf_thickness_dict = {}
quad_thickness_dict = {}
rigify_legacy_thickness_dict = {}
pitchipoy_thickness_dict = {"face": face_thickness}
other_thickness_dict = {}
# combine lists depending on rig type
ignore_list = common_ignore_list
thickness_dict = common_thickness_dict
if rig_type == Skin.Rig_type.HORSE:
ignore_list = ignore_list + horse_ignore_list
thickness_dict.update(horse_thickness_dict)
print("RIDER OF THE APOCALYPSE")
elif rig_type == Skin.Rig_type.SHARK:
ignore_list = ignore_list + shark_ignore_list
thickness_dict.update(shark_thickness_dict)
print("DEADLY JAWS")
elif rig_type == Skin.Rig_type.BIRD:
ignore_list = ignore_list + bird_ignore_list
thickness_dict.update(bird_thickness_dict)
print("WINGS OF LIBERTY")
elif rig_type == Skin.Rig_type.CAT:
ignore_list = ignore_list + cat_ignore_list
thickness_dict.update(cat_thickness_dict)
print("MEOW")
elif rig_type == Skin.Rig_type.BIPED:
ignore_list = ignore_list + biped_ignore_list
thickness_dict.update(biped_thickness_dict)
print("HUMANOID")
elif rig_type == Skin.Rig_type.HUMAN:
ignore_list = ignore_list + human_ignore_list
thickness_dict.update(human_thickness_dict)
print("JUST A HUMAN AFTER ALL")
elif rig_type == Skin.Rig_type.WOLF:
ignore_list = ignore_list + wolf_ignore_list
thickness_dict.update(wolf_thickness_dict)
print("WHITE FANG")
elif rig_type == Skin.Rig_type.QUAD:
ignore_list = ignore_list + quad_ignore_list
thickness_dict.update(quad_thickness_dict)
print("MYSTERIOUS CREATURE")
elif rig_type == Skin.Rig_type.LEGACY:
ignore_list = ignore_list + rigify_legacy_ignore_list
thickness_dict.update(rigify_legacy_thickness_dict)
print("LEGACY RIGIFY")
elif rig_type == Skin.Rig_type.PITCHIPOY:
ignore_list = ignore_list + pitchipoy_ignore_list
thickness_dict.update(pitchipoy_thickness_dict)
print("PITCHIPOY")
elif rig_type == Skin.Rig_type.OTHER:
ignore_list = ignore_list + other_ignore_list
thickness_dict.update(other_thickness_dict)
print("rig non recognized...")
return ignore_list, thickness_dict
# generates edges from vertices used by skin modifier
def generate_edges(mesh, shape_object, bones, scale, connect_mesh=False, connect_parents=False,
head_ornaments=False, generate_all=False, thickness=0.0, finger_thickness=0.0):
"""
This function adds vertices for all bones' heads and tails
"""
me = mesh
verts = []
edges = []
idx = 0
rig_type = identify_rig()
skin = Skin(rig_type)
# prepare the list
ignore_list, thickness_dict = prepare_lists(skin.rig_type, finger_thickness)
# create default junctions for all bones
for b in bones:
# set default thickness to all new junctions
skin.fragment_create(bname=b.name, idx=None, thickness=thickness)
# edge generator loop
for b in bones:
# look for rig's specific bones and their childs and set individual thickness
for bname, thick in thickness_dict.items():
if bname.lower() in b.name.lower():
skin.fragment_update(bname=b.name, idx=None, thickness=thick)
for c in b.children_recursive:
# update junctions with specific thickness
skin.fragment_update(bname=c.name, idx=None, thickness=thick)
found = False
for i in ignore_list:
if i.lower() in b.name.lower():
found = True
break
if found and generate_all is False:
continue
# fix for drawing rootbone and relationship lines
if 'root' in b.name.lower() and generate_all is False:
continue
# ignore any head ornaments
if head_ornaments is False:
if b.parent is not None:
if 'head' in b.parent.name.lower() and not rig_type == Skin.Rig_type.HUMAN:
continue
if 'face' in b.parent.name.lower() and rig_type == Skin.Rig_type.HUMAN:
continue
if connect_parents:
if b.parent is not None and b.parent.bone.select is True and b.bone.use_connect is False:
if 'root' in b.parent.name.lower() and generate_all is False:
continue
# ignore shoulder
if 'shoulder' in b.name.lower() and connect_mesh is True:
continue
# connect the upper arm directly with chest ommiting shoulders
if 'shoulder' in b.parent.name.lower() and connect_mesh is True:
vert1 = b.head
vert2 = b.parent.parent.tail
else:
vert1 = b.head
vert2 = b.parent.tail
verts.append(vert1)
verts.append(vert2)
edges.append([idx, idx + 1])
# also make list of edges made of gaps between the bones
for bname, data in skin.junctions_dict.items():
if b.name == bname:
skin.fragment_update(b.name, idx, data[2])
break
# create new segment for new connections
skin.fragment_create(b.name + b.parent.name, idx, data[2])
idx = idx + 2
# for bvh free floating hips and hips correction for rigify and pitchipoy
if ((generate_all is False and 'hip' in b.name.lower()) or
(generate_all is False and (b.name == 'hips' and rig_type == Skin.Rig_type.LEGACY) or
(b.name == 'spine' and rig_type == Skin.Rig_type.PITCHIPOY) or (b.name == 'spine' and
rig_type == Skin.Rig_type.HUMAN) or (b.name == 'spine' and rig_type == Skin.Rig_type.BIPED))):
continue
vert1 = b.head
vert2 = b.tail
verts.append(vert1)
verts.append(vert2)
edges.append([idx, idx + 1])
# insert idx to junctions and update
for bname, data in skin.junctions_dict.items():
if b.name == bname:
skin.fragment_update(b.name, idx, data[2])
idx = idx + 2
# Create mesh from given verts, faces
me.from_pydata(verts, edges, [])
# Update mesh with new data
me.update()
# set object scale exact as armature's scale
shape_object.scale = scale
return skin
# selects vertices
def select_vertices(mesh_obj, idx):
bpy.context.scene.objects.active = mesh_obj
mode = mesh_obj.mode
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_all(action='DESELECT')
bpy.ops.object.mode_set(mode='OBJECT')
for i in idx:
mesh_obj.data.vertices[i].select = True
selectedVerts = [v.index for v in mesh_obj.data.vertices if v.select]
bpy.ops.object.mode_set(mode=mode)
return selectedVerts
def generate_mesh(shape_object, size, sub_level=1, connect_mesh=False, connect_parents=False,
generate_all=False, apply_mod=True, skin=None, bones=[]):
"""
This function adds modifiers for generated edges
"""
total_bones_num = len(bpy.context.object.pose.bones.keys())
selected_bones_num = len(bones)
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_all(action='DESELECT')
# add skin modifier
shape_object.modifiers.new("Skin", 'SKIN')
bpy.ops.mesh.select_all(action='SELECT')
override = bpy.context.copy()
for area in bpy.context.screen.areas:
if area.type == 'VIEW_3D':
for region in area.regions:
if region.type == 'WINDOW':
override['area'] = area
override['region'] = region
override['edit_object'] = bpy.context.edit_object
override['scene'] = bpy.context.scene
override['active_object'] = shape_object
override['object'] = shape_object
override['modifier'] = bpy.context.object.modifiers
break
# calculate optimal, normalized thickness for each segment
bpy.ops.object.skin_root_mark(override)
# select finger vertices and calculate optimal thickness for fingers to fix proportions
# by default set fingers thickness to 25 percent of body thickness
# make loose hands only for better topology
if len(skin.junctions_dict.keys()) > 0:
for bname, data in skin.junctions_dict.items():
if data[0] is not None:
fragment_idx = list()
fragment_idx.append(data[0])
fragment_idx.append(data[1])
thickness = data[2]
select_vertices(shape_object, fragment_idx)
bpy.ops.transform.skin_resize(override,
value=(1 * thickness * (size / 10), 1 * thickness * (size / 10),
1 * thickness * (size / 10)), constraint_axis=(False, False, False),
constraint_orientation='GLOBAL', mirror=False, proportional='DISABLED',
proportional_edit_falloff='SMOOTH', proportional_size=1
)
shape_object.modifiers["Skin"].use_smooth_shade = True
shape_object.modifiers["Skin"].use_x_symmetry = True
# bpy.ops.mesh.select_all(action='DESELECT')
if connect_mesh:
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_all(action='DESELECT')
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.mesh.remove_doubles()
# fix rigify and pitchipoy hands topology
if connect_mesh and connect_parents and generate_all is False and \
(skin.rig_type == Skin.Rig_type.LEGACY or skin.rig_type == Skin.Rig_type.PITCHIPOY or
skin.rig_type == Skin.Rig_type.HUMAN) and selected_bones_num == total_bones_num:
# thickness will set palm vertex for both hands look pretty
corrective_thickness = 2.5
# left hand verts
merge_idx = []
if skin.rig_type == Skin.Rig_type.LEGACY:
merge_idx = [8, 9, 14, 18, 23, 28]
elif skin.rig_type == Skin.Rig_type.PITCHIPOY or skin.rig_type == Skin.Rig_type.HUMAN:
merge_idx = [10, 11, 16, 20, 25, 30]
select_vertices(shape_object, merge_idx)
bpy.ops.mesh.merge(type='CENTER')
bpy.ops.transform.skin_resize(override,
value=(corrective_thickness, corrective_thickness, corrective_thickness),
constraint_axis=(False, False, False), constraint_orientation='GLOBAL',
mirror=False, proportional='DISABLED', proportional_edit_falloff='SMOOTH',
proportional_size=1
)
bpy.ops.mesh.select_all(action='DESELECT')
# right hand verts
if skin.rig_type == Skin.Rig_type.LEGACY:
merge_idx = [31, 32, 37, 41, 46, 51]
elif skin.rig_type == Skin.Rig_type.PITCHIPOY or skin.rig_type == Skin.Rig_type.HUMAN:
merge_idx = [33, 34, 39, 43, 48, 53]
select_vertices(shape_object, merge_idx)
bpy.ops.mesh.merge(type='CENTER')
bpy.ops.transform.skin_resize(override,
value=(corrective_thickness, corrective_thickness, corrective_thickness),
constraint_axis=(False, False, False), constraint_orientation='GLOBAL',
mirror=False, proportional='DISABLED', proportional_edit_falloff='SMOOTH',
proportional_size=1
)
# making hands even more pretty
bpy.ops.mesh.select_all(action='DESELECT')
hands_idx = [] # left and right hand vertices
if skin.rig_type == Skin.Rig_type.LEGACY:
# hands_idx = [8, 33] # L and R
hands_idx = [7, 30]
elif skin.rig_type == Skin.Rig_type.PITCHIPOY or skin.rig_type == Skin.Rig_type.HUMAN:
# hands_idx = [10, 35] # L and R
hands_idx = [9, 32]
select_vertices(shape_object, hands_idx)
# change the thickness to make hands look less blocky and more sexy
corrective_thickness = 0.7
bpy.ops.transform.skin_resize(override,
value=(corrective_thickness, corrective_thickness, corrective_thickness),
constraint_axis=(False, False, False), constraint_orientation='GLOBAL',
mirror=False, proportional='DISABLED', proportional_edit_falloff='SMOOTH',
proportional_size=1
)
bpy.ops.mesh.select_all(action='DESELECT')
# todo optionally take root from rig's hip tail or head depending on scenario
root_idx = []
if skin.rig_type == Skin.Rig_type.LEGACY and selected_bones_num == total_bones_num:
root_idx = [59]
elif (skin.rig_type == Skin.Rig_type.PITCHIPOY or skin.rig_type == Skin.Rig_type.HUMAN) and \
selected_bones_num == total_bones_num:
root_idx = [56]
elif selected_bones_num == total_bones_num:
root_idx = [0]
if len(root_idx) > 0:
select_vertices(shape_object, root_idx)
bpy.ops.object.skin_root_mark(override)
# skin in edit mode
# add Subsurf modifier
shape_object.modifiers.new("Subsurf", 'SUBSURF')
shape_object.modifiers["Subsurf"].levels = sub_level
shape_object.modifiers["Subsurf"].render_levels = sub_level
bpy.ops.object.mode_set(mode='OBJECT')
# object mode apply all modifiers
if apply_mod:
bpy.ops.object.modifier_apply(override, apply_as='DATA', modifier="Skin")
bpy.ops.object.modifier_apply(override, apply_as='DATA', modifier="Subsurf")
return {'FINISHED'}
def main(context):
"""
This script will create a custome shape
"""
# ### Check if selection is OK ###
if len(context.selected_pose_bones) == 0 or \
len(context.selected_objects) == 0 or \
context.selected_objects[0].type != 'ARMATURE':
return {'CANCELLED'}, "No bone selected or the Armature is hidden"
scn = bpy.context.scene
sknfy = scn.skinify
# initialize the mesh object
mesh_name = context.selected_objects[0].name + "_mesh"
obj_name = context.selected_objects[0].name + "_object"
armature_object = context.object
origin = context.object.location
bone_selection = context.selected_pose_bones
oldLocation = None
oldRotation = None
oldScale = None
armature_object = scn.objects.active
armature_object.select = True
old_pose_pos = armature_object.data.pose_position
bpy.ops.object.mode_set(mode='OBJECT')
oldLocation = Vector(armature_object.location)
oldRotation = Euler(armature_object.rotation_euler)
oldScale = Vector(armature_object.scale)
bpy.ops.object.rotation_clear(clear_delta=False)
bpy.ops.object.location_clear(clear_delta=False)
bpy.ops.object.scale_clear(clear_delta=False)
if sknfy.apply_mod and sknfy.parent_armature:
armature_object.data.pose_position = 'REST'
scale = bpy.context.object.scale
size = bpy.context.object.dimensions[2]
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.select_all(action='DESELECT')
bpy.ops.object.add(type='MESH', enter_editmode=False, location=origin)
# get the mesh object
ob = scn.objects.active
ob.name = obj_name
me = ob.data
me.name = mesh_name
# this way we fit mesh and bvh with armature modifier correctly
skin = generate_edges(
me, ob, bone_selection, scale, sknfy.connect_mesh,
sknfy.connect_parents, sknfy.head_ornaments,
sknfy.generate_all, sknfy.thickness, sknfy.finger_thickness
)
generate_mesh(ob, size, sknfy.sub_level,
sknfy.connect_mesh, sknfy.connect_parents, sknfy.generate_all,
sknfy.apply_mod, skin, bone_selection)
# parent mesh with armature only if modifiers are applied
if sknfy.apply_mod and sknfy.parent_armature:
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.select_all(action='DESELECT')
ob.select = True
armature_object.select = True
scn.objects.active = armature_object
bpy.ops.object.parent_set(type='ARMATURE_AUTO')
armature_object.data.pose_position = old_pose_pos
armature_object.select = False
else:
bpy.ops.object.mode_set(mode='OBJECT')
ob.location = oldLocation
ob.rotation_euler = oldRotation
ob.scale = oldScale
ob.select = False
armature_object.select = True
scn.objects.active = armature_object
armature_object.location = oldLocation
armature_object.rotation_euler = oldRotation
armature_object.scale = oldScale
bpy.ops.object.mode_set(mode='POSE')
return {'FINISHED'}, me
class BONE_OT_custom_shape(Operator):
'''Creates a mesh object at the selected bones positions'''
bl_idname = "object.skinify_rig"
bl_label = "Skinify Rig"
bl_options = {'REGISTER', 'UNDO'}
@classmethod
def poll(cls, context):
return context.active_object is not None
def execute(self, context):
Mesh = main(context)
if Mesh[0] == {'CANCELLED'}:
self.report({'WARNING'}, Mesh[1])
return {'CANCELLED'}
else:
self.report({'INFO'}, Mesh[1].name + " has been created")
return {'FINISHED'}
class BONE_PT_custom_shape(Panel):
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = "bone"
bl_label = "Skinify Rig"
@classmethod
def poll(cls, context):
ob = context.object
return ob and ob.mode == 'POSE' and context.bone
def draw(self, context):
layout = self.layout
scn = context.scene.skinify
row = layout.row()
row.operator("object.skinify_rig", text="Add Shape", icon='BONE_DATA')
split = layout.split(percentage=0.3)
split.label("Thickness:")
split.prop(scn, "thickness", text="Body", icon='MOD_SKIN')
split.prop(scn, "finger_thickness", text="Fingers", icon='HAND')
split = layout.split(percentage=0.3)
split.label("Mesh Density:")
split.prop(scn, "sub_level", icon='MESH_ICOSPHERE')
row = layout.row()
row.prop(scn, "connect_mesh", icon='EDITMODE_HLT')
row.prop(scn, "connect_parents", icon='CONSTRAINT_BONE')
row = layout.row()
row.prop(scn, "head_ornaments", icon='GROUP_BONE')
row.prop(scn, "generate_all", icon='GROUP_BONE')
row = layout.row()
row.prop(scn, "apply_mod", icon='FILE_TICK')
if scn.apply_mod:
row = layout.row()
row.prop(scn, "parent_armature", icon='POSE_HLT')
# define the scene properties in a group - call them with context.scene.skinify
class Skinify_Properties(PropertyGroup):
sub_level = IntProperty(
name="Sub level",
min=0, max=4,
default=1,
description="Mesh density"
)
thickness = FloatProperty(
name="Thickness",
min=0.01,
default=0.8,
description="Adjust shape thickness"
)
finger_thickness = FloatProperty(
name="Finger Thickness",
min=0.01, max=1.0,
default=0.25,
description="Adjust finger thickness relative to body"
)
connect_mesh = BoolProperty(
name="Solid Shape",
default=False,
description="Makes solid shape from bone chains"
)
connect_parents = BoolProperty(
name="Fill Gaps",
default=False,
description="Fills the gaps between parented bones"
)
generate_all = BoolProperty(
name="All Shapes",
default=False,
description="Generates shapes from all bones"
)
head_ornaments = BoolProperty(
name="Head Ornaments",
default=False,
description="Includes head ornaments"
)
apply_mod = BoolProperty(
name="Apply Modifiers",
default=True,
description="Applies Modifiers to mesh"
)
parent_armature = BoolProperty(
name="Parent Armature",
default=True,
description="Applies mesh to Armature"
)
# startup defaults
@persistent
def startup_init(dummy):
init_props()
def register():
bpy.utils.register_class(BONE_OT_custom_shape)
bpy.utils.register_class(BONE_PT_custom_shape)
bpy.utils.register_class(Skinify_Properties)
bpy.types.Scene.skinify = PointerProperty(
type=Skinify_Properties
)
# startup defaults
bpy.app.handlers.load_post.append(startup_init)
def unregister():
bpy.utils.unregister_class(BONE_OT_custom_shape)
bpy.utils.unregister_class(BONE_PT_custom_shape)
bpy.utils.unregister_class(Skinify_Properties)
# cleanup the handler
bpy.app.handlers.load_post.remove(startup_init)
del bpy.types.Scene.skinify
if __name__ == "__main__":
register()
| 37.239471
| 109
| 0.591676
|
4a0b5075db862add1b9fc7397469b3e4acac45aa
| 1,251
|
py
|
Python
|
src/sms.py
|
trevtravtrev/PetDiabetesAssistant
|
2c4bff97ac1854eee2a27086d4ebf063172b86f1
|
[
"MIT"
] | null | null | null |
src/sms.py
|
trevtravtrev/PetDiabetesAssistant
|
2c4bff97ac1854eee2a27086d4ebf063172b86f1
|
[
"MIT"
] | null | null | null |
src/sms.py
|
trevtravtrev/PetDiabetesAssistant
|
2c4bff97ac1854eee2a27086d4ebf063172b86f1
|
[
"MIT"
] | null | null | null |
import smtplib
import ssl
class Sms:
def __init__(self, number, phone_carrier, username, password, email_domain, email_port):
self.CARRIERS = {
'att': '@mms.att.net',
'tmobile': '@tmomail.net',
'verizon': '@vtext.com',
'sprint': '@messaging.sprintpcs.com',
'virgin': '@vmobl.com',
'boost': '@smsmyboostmobile.com',
'cricket': '@sms.cricketwireless.net',
'metro': '@mymetropcs.com',
'us cellular': '@email.uscc.net',
'xfinity': '@vtext.com'
}
self.EMAIL_DOMAIN = email_domain
self.EMAIL_PORT = email_port
self.receiver_email = f'{number}{self.CARRIERS.get(phone_carrier)}'
self.username = username
self.password = password
self.initialize_server()
def initialize_server(self):
context = ssl.create_default_context()
self.server = smtplib.SMTP(self.EMAIL_DOMAIN, self.EMAIL_PORT)
self.server.ehlo()
self.server.starttls(context=context)
self.server.ehlo()
self.server.login(self.username, self.password)
def send(self, message):
self.server.sendmail(self.username, self.receiver_email, message)
| 32.921053
| 92
| 0.60032
|
4a0b518f1ee03f484e329c5315a813ca74c5d414
| 244
|
py
|
Python
|
data_analysis/test_moving_average.py
|
parevalo/2018-agu-workshop
|
2e7f93ce1e20d7de56a4cb7bd89c57211908c4d6
|
[
"MIT"
] | 1
|
2021-06-14T18:09:55.000Z
|
2021-06-14T18:09:55.000Z
|
data_analysis/test_moving_average.py
|
parevalo/2018-agu-workshop
|
2e7f93ce1e20d7de56a4cb7bd89c57211908c4d6
|
[
"MIT"
] | null | null | null |
data_analysis/test_moving_average.py
|
parevalo/2018-agu-workshop
|
2e7f93ce1e20d7de56a4cb7bd89c57211908c4d6
|
[
"MIT"
] | null | null | null |
import numpy as np
from .data_analysis import moving_average
def test_moving_avg():
avg = moving_average(np.ones(10), 2)
assert np.all(np.isnan(avg[0:2]))
assert np.all(np.isnan(avg[-2:]))
assert np.allclose(avg[2:-2] == 1, 1)
| 27.111111
| 41
| 0.672131
|
4a0b5199d5d60156f3ea355fd6c558f9df538d4b
| 766
|
py
|
Python
|
rta/align/strategies.py
|
MatteoLacki/rta
|
93944d6fc934126e0bb4d076c8b4213cadbe49a1
|
[
"BSD-2-Clause"
] | 1
|
2018-05-31T14:31:18.000Z
|
2018-05-31T14:31:18.000Z
|
rta/align/strategies.py
|
MatteoLacki/rta
|
93944d6fc934126e0bb4d076c8b4213cadbe49a1
|
[
"BSD-2-Clause"
] | null | null | null |
rta/align/strategies.py
|
MatteoLacki/rta
|
93944d6fc934126e0bb4d076c8b4213cadbe49a1
|
[
"BSD-2-Clause"
] | null | null | null |
from rta.reference import stat_reference
def Tenzerize(X, n, a):
"""Perform a hunt for correct alignment.
Args:
X (pd.DataFrame): DataFrame with columns x, y (reference), and run.
n (int): number of repeated fittings.
a (Aligner): an initialized aligner.
"""
for i in range(n):
a.fit(X)
x = a(X)
X.rename(columns={'x':'x'+str(i), 'y':'y'+str(i)}, inplace=True)
X['x'] = x
X = stat_reference(X)
X.rename(columns={'x':'x'+str(n), 'y':'y'+str(n)}, inplace=True)
return a, X
def Matteotti(X, a):
"""A simple strategy.
Args:
X (pd.DataFrame): DataFrame with columns x, y (reference), and run.
a (Aligner): an initialized aligner.
"""
a.fit(X)
X['x_aligned'] = a.fitted()
return a, X
| 20.702703
| 72
| 0.590078
|
4a0b5287356bff72bde40a441a5a9158d438eb91
| 829
|
py
|
Python
|
core/database/FreeBSDx64/execc.py
|
xorond/l0l
|
bb0c2bb23fc49997b695cf27d2b2b25169395521
|
[
"WTFPL"
] | 6
|
2018-10-29T19:46:49.000Z
|
2022-03-10T15:39:47.000Z
|
core/database/FreeBSDx64/execc.py
|
xorond/l0l
|
bb0c2bb23fc49997b695cf27d2b2b25169395521
|
[
"WTFPL"
] | null | null | null |
core/database/FreeBSDx64/execc.py
|
xorond/l0l
|
bb0c2bb23fc49997b695cf27d2b2b25169395521
|
[
"WTFPL"
] | 4
|
2018-10-16T13:28:27.000Z
|
2022-02-05T18:43:57.000Z
|
def execc( command):
shellcode = r"\x48\x31\xd2\xe8\x06\x00\x00\x00\x68\x65\x6c"
shellcode += r"\x6c\x6f\x00\x5f\x52\x57\x48\x89\xe6\x48\x31\xc0\x48\x83\xc8\x3b\x0f\x05"
shellcode += command
return shellcode
#Shellcode disassembly with 32bit so must be edit little bit.Instructions are wrong.
"""
00000000 48 dec eax
00000001 31D2 xor edx,edx
00000003 E806000000 call dword 0xe
00000008 68656C6C6F push dword 0x6f6c6c65
0000000D 005F52 add [edi+0x52],bl
00000010 57 push edi
00000011 48 dec eax
00000012 89E6 mov esi,esp
00000014 48 dec eax
00000015 31C0 xor eax,eax
00000017 48 dec eax
00000018 83C83B or eax,byte +0x3b
0000001B 0F05 syscall
"""
| 36.043478
| 89
| 0.610374
|
4a0b52dbac49abf01d4dd0404a6e67d0e6e24238
| 13,223
|
py
|
Python
|
train_samm2.py
|
IcedDoggie/Micro-Expression-with-Deep-Learning
|
2ae489bf1040b29a2b535fb4178d6e7bbe7bba49
|
[
"BSD-3-Clause-Attribution"
] | 249
|
2018-04-19T08:30:19.000Z
|
2022-03-30T06:31:09.000Z
|
train_samm2.py
|
IcedDoggie/Micro-Expression-with-Deep-Learning
|
2ae489bf1040b29a2b535fb4178d6e7bbe7bba49
|
[
"BSD-3-Clause-Attribution"
] | 40
|
2018-05-17T03:02:06.000Z
|
2021-04-16T02:28:24.000Z
|
train_samm2.py
|
IcedDoggie/Micro-Expression-with-Deep-Learning
|
2ae489bf1040b29a2b535fb4178d6e7bbe7bba49
|
[
"BSD-3-Clause-Attribution"
] | 111
|
2018-04-09T01:53:29.000Z
|
2022-03-19T08:59:28.000Z
|
import numpy as np
import sys
import math
import operator
import csv
import glob,os
import xlrd
import cv2
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from collections import Counter
from sklearn.metrics import confusion_matrix
import scipy.io as sio
import pydot, graphviz
from PIL import Image
from keras.models import Sequential, Model
from keras.utils import np_utils, plot_model
from keras import metrics
from keras import backend as K
from keras.models import model_from_json
from keras.layers import Dense, Dropout, Flatten, Activation, GlobalAveragePooling2D
from keras.layers import Conv2D, MaxPooling2D
from keras.preprocessing.sequence import pad_sequences
from keras import optimizers
from keras.applications.vgg16 import VGG16 as keras_vgg16
from keras.preprocessing.image import ImageDataGenerator, array_to_img
import keras
from keras.callbacks import EarlyStopping
from labelling import collectinglabel
from reordering import readinput
from evaluationmatrix import fpr
from utilities import Read_Input_Images, get_subfolders_num, data_loader_with_LOSO, label_matching, duplicate_channel
from utilities import record_scores, loading_smic_table, loading_casme_table, ignore_casme_samples, ignore_casmergb_samples, LossHistory
from utilities import loading_samm_table, loading_casme_objective_table, filter_objective_samples
from samm_utilitis import get_subfolders_num_crossdb, Read_Input_Images_SAMM_CASME, loading_samm_labels
from models import VGG_16, temporal_module, VGG_16_4_channels, convolutional_autoencoder
def train_samm(batch_size, spatial_epochs, temporal_epochs, train_id, dB, spatial_size, flag, tensorboard):
############## Path Preparation ######################
root_db_path = "/media/ice/OS/Datasets/"
workplace = root_db_path + dB + "/"
inputDir = root_db_path + dB + "/" + dB + "/"
######################################################
classes = 5
if dB == 'CASME2_TIM':
table = loading_casme_table(workplace + 'CASME2-ObjectiveClasses.xlsx')
listOfIgnoredSamples, IgnoredSamples_index = ignore_casme_samples(inputDir)
############## Variables ###################
r = w = spatial_size
subjects=2
n_exp = 5
# VidPerSubject = get_subfolders_num(inputDir, IgnoredSamples_index)
listOfIgnoredSamples = []
VidPerSubject = [2,1]
timesteps_TIM = 10
data_dim = r * w
pad_sequence = 10
channel = 3
############################################
os.remove(workplace + "Classification/CASME2_TIM_label.txt")
elif dB == 'CASME2_Optical':
table = loading_casme_table(workplace + 'CASME2-ObjectiveClasses.xlsx')
listOfIgnoredSamples, IgnoredSamples_index = ignore_casme_samples(inputDir)
############## Variables ###################
r = w = spatial_size
subjects=26
n_exp = 5
VidPerSubject = get_subfolders_num(inputDir, IgnoredSamples_index)
timesteps_TIM = 9
data_dim = r * w
pad_sequence = 9
channel = 3
############################################
# os.remove(workplace + "Classification/CASME2_TIM_label.txt")
elif dB == 'SAMM_TIM10':
table, table_objective = loading_samm_table(root_db_path, dB)
listOfIgnoredSamples = []
IgnoredSamples_index = np.empty([0])
################# Variables #############################
r = w = spatial_size
subjects = 29
n_exp = 8
VidPerSubject = get_subfolders_num(inputDir, IgnoredSamples_index)
timesteps_TIM = 10
data_dim = r * w
pad_sequence = 10
channel = 3
classes = 8
#########################################################
elif dB == 'SAMM_CASME_Strain':
# total amount of videos 253
table, table_objective = loading_samm_table(root_db_path, dB)
table = table_objective
table2 = loading_casme_objective_table(root_db_path, dB)
# merge samm and casme tables
table = np.concatenate((table, table2), axis=1)
# print(table.shape)
# listOfIgnoredSamples, IgnoredSamples_index, sub_items = ignore_casme_samples(inputDir)
listOfIgnoredSamples = []
IgnoredSamples_index = np.empty([0])
sub_items = np.empty([0])
list_samples = filter_objective_samples(table)
r = w = spatial_size
subjects = 47 # some subjects were removed because of objective classes and ignore samples: 47
n_exp = 5
# TODO:
# 1) Further decrease the video amount, the one with objective classes >= 6
# list samples: samples with wanted objective class
VidPerSubject, list_samples = get_subfolders_num_crossdb(inputDir, IgnoredSamples_index, sub_items, table, list_samples)
# print(VidPerSubject)
# print(len(VidPerSubject))
# print(sum(VidPerSubject))
timesteps_TIM = 9
data_dim = r * w
channel = 3
classes = 5
if os.path.isfile(workplace + "Classification/SAMM_CASME_Optical_label.txt"):
os.remove(workplace + "Classification/SAMM_CASME_Optical_label.txt")
##################### Variables ######################
######################################################
############## Flags ####################
tensorboard_flag = tensorboard
resizedFlag = 1
train_spatial_flag = 0
train_temporal_flag = 0
svm_flag = 0
finetuning_flag = 0
cam_visualizer_flag = 0
channel_flag = 0
if flag == 'st':
train_spatial_flag = 1
train_temporal_flag = 1
finetuning_flag = 1
elif flag == 's':
train_spatial_flag = 1
finetuning_flag = 1
elif flag == 't':
train_temporal_flag = 1
elif flag == 'nofine':
svm_flag = 1
elif flag == 'scratch':
train_spatial_flag = 1
train_temporal_flag = 1
elif flag == 'st4':
train_spatial_flag = 1
train_temporal_flag = 1
channel_flag = 1
elif flag == 'st7':
train_spatial_flag = 1
train_temporal_flag = 1
channel_flag = 2
#########################################
############ Reading Images and Labels ################
SubperdB = Read_Input_Images_SAMM_CASME(inputDir, list_samples, listOfIgnoredSamples, dB, resizedFlag, table, workplace, spatial_size, channel)
print("Loaded Images into the tray...")
labelperSub = label_matching(workplace, dB, subjects, VidPerSubject)
print("Loaded Labels into the tray...")
if channel_flag == 1:
SubperdB_strain = Read_Input_Images(inputDir, listOfIgnoredSamples, 'CASME2_Strain_TIM10', resizedFlag, table, workplace, spatial_size, 1)
elif channel_flag == 2:
SubperdB_strain = Read_Input_Images(inputDir, listOfIgnoredSamples, 'CASME2_Strain_TIM10', resizedFlag, table, workplace, spatial_size, 1)
SubperdB_gray = Read_Input_Images(inputDir, listOfIgnoredSamples, 'CASME2_TIM', resizedFlag, table, workplace, spatial_size, 3)
#######################################################
########### Model Configurations #######################
sgd = optimizers.SGD(lr=0.0001, decay=1e-7, momentum=0.9, nesterov=True)
adam = optimizers.Adam(lr=0.00001, decay=0.000001)
adam2 = optimizers.Adam(lr= 0.00075, decay= 0.0001)
# Different Conditions for Temporal Learning ONLY
if train_spatial_flag == 0 and train_temporal_flag == 1 and dB != 'CASME2_Optical':
data_dim = spatial_size * spatial_size
elif train_spatial_flag == 0 and train_temporal_flag == 1 and dB == 'CASME2_Optical':
data_dim = spatial_size * spatial_size * 3
else:
data_dim = 4096
########################################################
########### Training Process ############
# total confusion matrix to be used in the computation of f1 score
tot_mat = np.zeros((n_exp,n_exp))
# model checkpoint
spatial_weights_name = 'vgg_spatial_'+ str(train_id) + '_casme2_'
temporal_weights_name = 'temporal_ID_' + str(train_id) + '_casme2_'
history = LossHistory()
stopping = EarlyStopping(monitor='loss', min_delta = 0, mode = 'min')
for sub in range(subjects):
############### Reinitialization & weights reset of models ########################
vgg_model_cam = VGG_16(spatial_size=spatial_size, classes=classes, weights_path='VGG_Face_Deep_16.h5')
temporal_model = temporal_module(data_dim=data_dim, classes=classes, timesteps_TIM=timesteps_TIM)
temporal_model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=[metrics.categorical_accuracy])
conv_ae = convolutional_autoencoder(spatial_size = spatial_size, classes = classes)
conv_ae.compile(loss='binary_crossentropy', optimizer=adam)
if channel_flag == 1 or channel_flag == 2:
vgg_model = VGG_16_4_channels(classes=classes, spatial_size = spatial_size)
vgg_model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=[metrics.categorical_accuracy])
else:
vgg_model = VGG_16(spatial_size = spatial_size, classes=classes, weights_path='VGG_Face_Deep_16.h5')
vgg_model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=[metrics.categorical_accuracy])
svm_classifier = SVC(kernel='linear', C=1)
####################################################################################
############ for tensorboard ###############
if tensorboard_flag == 1:
cat_path = tensorboard_path + str(sub) + "/"
os.mkdir(cat_path)
tbCallBack = keras.callbacks.TensorBoard(log_dir=cat_path, write_graph=True)
cat_path2 = tensorboard_path + str(sub) + "spat/"
os.mkdir(cat_path2)
tbCallBack2 = keras.callbacks.TensorBoard(log_dir=cat_path2, write_graph=True)
#############################################
image_label_mapping = np.empty([0])
Train_X, Train_Y, Test_X, Test_Y, Test_Y_gt = data_loader_with_LOSO(sub, SubperdB, labelperSub, subjects, classes)
# Rearrange Training labels into a vector of images, breaking sequence
Train_X_spatial = Train_X.reshape(Train_X.shape[0]*timesteps_TIM, r, w, channel)
Test_X_spatial = Test_X.reshape(Test_X.shape[0]* timesteps_TIM, r, w, channel)
# Special Loading for 4-Channel
if channel_flag == 1:
Train_X_Strain, _, Test_X_Strain, _, _ = data_loader_with_LOSO(sub, SubperdB_strain, labelperSub, subjects, classes)
Train_X_Strain = Train_X_Strain.reshape(Train_X_Strain.shape[0]*timesteps_TIM, r, w, 1)
Test_X_Strain = Test_X_Strain.reshape(Test_X.shape[0]*timesteps_TIM, r, w, 1)
# Concatenate Train X & Train_X_Strain
Train_X_spatial = np.concatenate((Train_X_spatial, Train_X_Strain), axis=3)
Test_X_spatial = np.concatenate((Test_X_spatial, Test_X_Strain), axis=3)
channel = 4
elif channel_flag == 2:
Train_X_Strain, _, Test_X_Strain, _, _ = data_loader_with_LOSO(sub, SubperdB_strain, labelperSub, subjects, classes)
Train_X_gray, _, Test_X_gray, _, _ = data_loader_with_LOSO(sub, SubperdB_gray, labelperSub, subjects)
Train_X_Strain = Train_X_Strain.reshape(Train_X_Strain.shape[0]*timesteps_TIM, r, w, 1)
Test_X_Strain = Test_X_Strain.reshape(Test_X_Strain.shape[0]*timesteps_TIM, r, w, 1)
Train_X_gray = Train_X_gray.reshape(Train_X_gray.shape[0]*timesteps_TIM, r, w, 3)
Test_X_gray = Test_X_gray.reshape(Test_X_gray.shape[0]*timesteps_TIM, r, w, 3)
# Concatenate Train_X_Strain & Train_X & Train_X_gray
Train_X_spatial = np.concatenate((Train_X_spatial, Train_X_Strain, Train_X_gray), axis=3)
Test_X_spatial = np.concatenate((Test_X_spatial, Test_X_Strain, Test_X_gray), axis=3)
channel = 7
if channel == 1:
# Duplicate channel of input image
Train_X_spatial = duplicate_channel(Train_X_spatial)
Test_X_spatial = duplicate_channel(Test_X_spatial)
# Extend Y labels 10 fold, so that all images have labels
Train_Y_spatial = np.repeat(Train_Y, timesteps_TIM, axis=0)
Test_Y_spatial = np.repeat(Test_Y, timesteps_TIM, axis=0)
# print ("Train_X_shape: " + str(np.shape(Train_X_spatial)))
# print ("Train_Y_shape: " + str(np.shape(Train_Y_spatial)))
# print ("Test_X_shape: " + str(np.shape(Test_X_spatial)))
# print ("Test_Y_shape: " + str(np.shape(Test_Y_spatial)))
# print(Train_X_spatial)
##################### Training & Testing #########################
X = Train_X_spatial.reshape(Train_X_spatial.shape[0], channel, r, w)
y = Train_Y_spatial.reshape(Train_Y_spatial.shape[0], classes)
normalized_X = X.astype('float32') / 255.
test_X = Test_X_spatial.reshape(Test_X_spatial.shape[0], channel, r, w)
test_y = Test_Y_spatial.reshape(Test_Y_spatial.shape[0], classes)
normalized_test_X = test_X.astype('float32') / 255.
print(X.shape)
###### conv weights must be freezed for transfer learning ######
if finetuning_flag == 1:
for layer in vgg_model.layers[:33]:
layer.trainable = False
if train_spatial_flag == 1 and train_temporal_flag == 1:
# Autoencoder features
# conv_ae.fit(normalized_X, normalized_X, batch_size=batch_size, epochs=spatial_epochs, shuffle=True)
# Spatial Training
if tensorboard_flag == 1:
vgg_model.fit(X, y, batch_size=batch_size, epochs=spatial_epochs, shuffle=True, callbacks=[tbCallBack2])
else:
vgg_model.fit(X, y, batch_size=batch_size, epochs=spatial_epochs, shuffle=True, callbacks=[history, stopping])
# record f1 and loss
file_loss = open(workplace+'Classification/'+ 'Result/'+dB+'/loss_' + str(train_id) + '.txt', 'a')
file_loss.write(str(history.losses) + "\n")
file_loss.close()
file_loss = open(workplace+'Classification/'+ 'Result/'+dB+'/accuracy_' + str(train_id) + '.txt', 'a')
file_loss.write(str(history.accuracy) + "\n")
file_loss.close()
vgg_model.save_weights(spatial_weights_name + str(sub) + ".h5")
| 37.888252
| 144
| 0.701732
|
4a0b54454af49f91ae69d9980aef3b2b5764e55d
| 3,197
|
py
|
Python
|
examples/janheapen.py
|
tenkeyaikoukaint/cggpyg
|
af381da19486e7f6f990e6a70660815266a5d75c
|
[
"MIT"
] | 1
|
2021-08-01T09:30:01.000Z
|
2021-08-01T09:30:01.000Z
|
examples/janheapen.py
|
tenkeyaikoukaint/cggpyg
|
af381da19486e7f6f990e6a70660815266a5d75c
|
[
"MIT"
] | null | null | null |
examples/janheapen.py
|
tenkeyaikoukaint/cggpyg
|
af381da19486e7f6f990e6a70660815266a5d75c
|
[
"MIT"
] | null | null | null |
import random
import time
import pygame
from syslcd1l import CGGPYG
from cggframe import cggframe
class gamevals:
def __init__(self):
self.sc=0
self.chr="r"
self.emtx=["","","","","","","","","","","","","","","","","","","","",""]
self.ex=20
self.mx=0
self.ct=0
class jantris(cggframe):
def __init__(self):
self.cgg=CGGPYG()
pygame.mixer.init()
self.hit=pygame.mixer.Sound("po.wav")
self.bgm=pygame.mixer.Sound("pin-re.wav")
self.gamestate="title"
self.gv=gamevals()
self.cgg.setcolor(7)
def keyin(self,key):
if self.gamestate=="title" and key==pygame.K_RETURN:
self.gamestate="play"
if self.gamestate=="play":
if key==pygame.K_DOWN:
if self.gv.chr=="r":
self.gv.chr="s"
elif self.gv.chr=="s":
self.gv.chr="p"
else:
self.gv.chr="r"
if key==pygame.K_UP:
if self.gv.chr=="p":
self.gv.chr="s"
elif self.gv.chr=="s":
self.gv.chr="r"
else:
self.gv.chr="p"
if key==pygame.K_RIGHT:
self.gv.mx=self.gv.mx+1
self.draw()
self.check()
if self.gamestate=="gameover" and key==pygame.K_RETURN:
self.gamestate="title"
self.__init__()
def title(self):
self.cgg.cls()
self.cgg.setcolor(7)
self.cgg.printc("janheap hit ret key",0,0)
def gameover(self):
self.cgg.cls()
self.cgg.setcolor(7)
self.cgg.printc("score:"+str(self.gv.sc),0,0)
def draw(self):
self.cgg.cls()
self.cgg.setcolor(7)
self.cgg.puth(self.gv.chr,self.gv.mx,0)
for i in range(self.gv.ex,20):
self.cgg.puth(self.gv.emtx[i],i,0)
def check(self):
if self.gv.mx==self.gv.ex-1:
h1=self.gv.chr
h2=self.gv.emtx[self.gv.ex]
if (h1=="r" and h2=="s") or (h1=="s" and h2=="p") or (h1=="p" and h2=="r"):
self.gv.emtx[self.gv.ex]=""
self.gv.ex=self.gv.ex+1
self.gv.sc=self.gv.sc+1
self.hit.play()
pygame.display.flip()
self.gv.mx=0
def emove(self):
self.gv.ex=self.gv.ex-1
for i in range(self.gv.ex,20):
self.gv.emtx[i]=self.gv.emtx[i+1]
r=random.randint(0,2)
if r==0:
self.gv.emtx[19]="r"
if r==1:
self.gv.emtx[19]="s"
if r==2:
self.gv.emtx[19]="p"
self.draw()
if self.gv.ex<=0:
self.gamestate="gameover"
if self.gv.ex-1==self.gv.mx:
self.check()
self.draw()
def routine(self):
self.gv.ct=self.gv.ct+1
if self.gv.sc<100:
gap=21-int(self.gv.sc/10)
else:
gap=10
if self.gv.ct>=gap:
self.emove()
self.gv.ct=0
self.bgm.play()
self.draw()
jt=jantris()
jt.main(0.05)
| 25.373016
| 87
| 0.464811
|
4a0b545cd9d4e23e8e72530a0ff883ad31ef24c5
| 412
|
py
|
Python
|
tests/ecosystem/destroy/test_destroy_cluster.py
|
zmc/ocs-ci
|
fcf51f3637f657689ba5a8ac869f2b14ac04b0cf
|
[
"MIT"
] | null | null | null |
tests/ecosystem/destroy/test_destroy_cluster.py
|
zmc/ocs-ci
|
fcf51f3637f657689ba5a8ac869f2b14ac04b0cf
|
[
"MIT"
] | null | null | null |
tests/ecosystem/destroy/test_destroy_cluster.py
|
zmc/ocs-ci
|
fcf51f3637f657689ba5a8ac869f2b14ac04b0cf
|
[
"MIT"
] | null | null | null |
import logging
from ocs_ci.framework import config
from ocs_ci.framework.testlib import EcosystemTest, destroy
from ocs_ci.utility.utils import destroy_cluster
log = logging.getLogger(__name__)
@destroy
class TestDestroy(EcosystemTest):
def test_destroy_cluster(self, log_cli_level):
log.info("Running OCS cluster destroy")
destroy_cluster(config.ENV_DATA['cluster_path'], log_cli_level)
| 27.466667
| 71
| 0.793689
|
4a0b5565f98fd02b6fbc63d466c9378a1840b127
| 903
|
py
|
Python
|
hyperverlet/loss.py
|
Zinoex/hyperverlet
|
431ef92fa2448ce69c357f01c0862353067bfa8a
|
[
"MIT"
] | 7
|
2021-08-02T09:10:35.000Z
|
2022-03-16T13:24:22.000Z
|
hyperverlet/loss.py
|
Zinoex/hyperverlet
|
431ef92fa2448ce69c357f01c0862353067bfa8a
|
[
"MIT"
] | 2
|
2021-06-15T11:50:59.000Z
|
2021-06-16T12:23:51.000Z
|
hyperverlet/loss.py
|
Zinoex/hyperverlet
|
431ef92fa2448ce69c357f01c0862353067bfa8a
|
[
"MIT"
] | null | null | null |
import torch
from torch import nn, Tensor
class TimeDecayMSELoss(nn.Module):
def __init__(self, decay_factor=0.99):
super().__init__()
self.decay_factor = decay_factor
def forward(self, input: Tensor, target: Tensor) -> Tensor:
size = [input.size(0), -1]
input = input.view(*size)
target = target.view(*size)
squared_diff = (input - target) ** 2
loss = torch.mean(squared_diff, dim=1)
decay_factor = torch.cumprod(torch.full_like(loss, self.decay_factor), 0)
return torch.mean(loss * decay_factor)
class MeanNormLoss(nn.Module):
def forward(self, input: Tensor, target: Tensor) -> Tensor:
size = [input.size(0), input.size(1), -1]
input = input.view(*size)
target = target.view(*size)
diff = target - input
loss = torch.norm(diff, dim=2)
return torch.mean(loss)
| 28.21875
| 81
| 0.619048
|
4a0b558998e8f89146f5e4be5175a6b6d9218a67
| 1,066
|
py
|
Python
|
imagepy/menus/Image/Adjust/colorstairs_plg.py
|
dada1437903138/imagepy
|
65d9ce088894eef587054e04018f9d34ff65084f
|
[
"BSD-4-Clause"
] | 1,178
|
2017-05-25T06:59:01.000Z
|
2022-03-31T11:38:53.000Z
|
imagepy/menus/Image/Adjust/colorstairs_plg.py
|
TomisTony/imagepy
|
3c378ebaf72762b94f0826a410897757ebafe689
|
[
"BSD-4-Clause"
] | 76
|
2017-06-10T17:01:50.000Z
|
2021-12-23T08:13:29.000Z
|
imagepy/menus/Image/Adjust/colorstairs_plg.py
|
TomisTony/imagepy
|
3c378ebaf72762b94f0826a410897757ebafe689
|
[
"BSD-4-Clause"
] | 315
|
2017-05-25T12:59:53.000Z
|
2022-03-07T22:52:21.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 27 00:56:00 2016
@author: yxl
"""
import numpy as np
from sciapp.action import Filter
#from imagepy.ui.widgets import HistCanvas
class Plugin(Filter):
title = 'Color Stairs'
note = ['rgb', 'auto_msk', 'auto_snap', 'not_channel', 'preview']
para = {'thre_r':(0, 255), 'thre_g':(0, 255), 'thre_b':(0, 255)}
def load(self, ips):
hists = [ips.histogram(chans=i, step=512) for i in (0,1,2)]
self. view = [('hist', 'thre_r', 'lh', hists[0], (0,255), 0),
('hist', 'thre_g', 'lh', hists[1], (0,255), 0),
('hist', 'thre_b', 'lh', hists[2], (0,255), 0)]
return True
#process
def run(self, ips, snap, img, para = None):
if para == None: para = self.para
for i, c in zip([0,1,2],'rgb'):
t1, t2 = para['thre_'+c]
xs = np.linspace(0,255,256)
ys = (xs-t1)*(255/max(0.5, t2-t1))
index = np.clip(ys, 0, 255).astype(np.uint8)
img[:,:,i] = index[snap[:,:,i]]
| 34.387097
| 69
| 0.508443
|
4a0b559e99fb88613cc4709bc77b1b57357718fe
| 12,726
|
py
|
Python
|
improver_tests/blending/blend_across_adjacent_points/test_TriangularWeightedBlendAcrossAdjacentPoints.py
|
LaurenceBeard/improver
|
b7cfe44f3a802d2a3d65f76a325215033c9de074
|
[
"BSD-3-Clause"
] | null | null | null |
improver_tests/blending/blend_across_adjacent_points/test_TriangularWeightedBlendAcrossAdjacentPoints.py
|
LaurenceBeard/improver
|
b7cfe44f3a802d2a3d65f76a325215033c9de074
|
[
"BSD-3-Clause"
] | 2
|
2020-03-30T17:25:18.000Z
|
2021-06-25T15:30:29.000Z
|
improver_tests/blending/blend_across_adjacent_points/test_TriangularWeightedBlendAcrossAdjacentPoints.py
|
LaurenceBeard/improver
|
b7cfe44f3a802d2a3d65f76a325215033c9de074
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2019 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Unit tests for the
weighted_blend.TriangularWeightedBlendAcrossAdjacentPoints plugin."""
import unittest
from datetime import datetime as dt
import iris
import numpy as np
from iris.tests import IrisTest
from improver.blending.blend_across_adjacent_points import (
TriangularWeightedBlendAcrossAdjacentPoints)
from improver.metadata.amend import add_coord
from improver.utilities.cube_manipulation import concatenate_cubes
from improver.utilities.warnings_handler import ManageWarnings
from ...set_up_test_cubes import set_up_variable_cube
def set_up_cubes_for_process_tests():
"""Set up some cubes with data for testing the "process" and
"find_central_point" functions"""
central_cube = set_up_variable_cube(
np.array([[1., 1.], [1., 1.]], dtype=np.float32),
name='lwe_thickness_of_precipitation_amount', units='m',
time=dt(2017, 1, 10, 3, 0), frt=dt(2017, 1, 10, 3, 0),
time_bounds=(dt(2017, 1, 10, 0, 0), dt(2017, 1, 10, 3, 0)))
another_cube = set_up_variable_cube(
np.array([[2., 2.], [2., 2.]], dtype=np.float32),
name='lwe_thickness_of_precipitation_amount', units='m',
time=dt(2017, 1, 10, 4, 0), frt=dt(2017, 1, 10, 3, 0),
time_bounds=(dt(2017, 1, 10, 1, 0), dt(2017, 1, 10, 4, 0)))
cube = iris.cube.CubeList(
[central_cube, another_cube]).merge_cube()
return central_cube, cube
class Test__repr__(IrisTest):
"""Test the __repr__ method."""
def test_basic(self):
"""Test that the __repr__ returns the expected string."""
width = 3.0
forecast_period = 1
result = str(TriangularWeightedBlendAcrossAdjacentPoints(
'time', forecast_period, 'hours', width))
msg = ('<TriangularWeightedBlendAcrossAdjacentPoints:'
' coord = time, central_point = 1.00, '
'parameter_units = hours, width = 3.00')
self.assertEqual(result, msg)
class Test__init__(IrisTest):
"""Test the __init__ method."""
def test_basic(self):
"""Test that the __repr__ returns the expected string."""
width = 3.0
forecast_period = 1
plugin = TriangularWeightedBlendAcrossAdjacentPoints(
'time', forecast_period, 'hours', width)
expected_coord = 'time'
expected_width = 3.0
expected_parameter_units = 'hours'
self.assertEqual(plugin.coord, expected_coord)
self.assertEqual(plugin.width, expected_width)
self.assertEqual(plugin.parameter_units, expected_parameter_units)
class Test__find_central_point(IrisTest):
"""Test the _find_central_point."""
def setUp(self):
"""Set up a test cubes."""
self.central_cube, self.cube = set_up_cubes_for_process_tests()
self.forecast_period = self.central_cube.coord(
"forecast_period").points[0]
self.width = 1.0
def test_central_point_available(self):
"""Test that the central point is available within the input cube."""
plugin = TriangularWeightedBlendAcrossAdjacentPoints(
'forecast_period', self.forecast_period, 'hours', self.width)
central_cube = plugin._find_central_point(self.cube)
self.assertEqual(self.central_cube.coord('forecast_period'),
central_cube.coord('forecast_period'))
self.assertEqual(self.central_cube.coord('time'),
central_cube.coord('time'))
self.assertArrayEqual(self.central_cube.data, central_cube.data)
def test_central_point_not_available(self):
"""Test that the central point is not available within the
input cube."""
forecast_period = 2
plugin = TriangularWeightedBlendAcrossAdjacentPoints(
'forecast_period', forecast_period, 'hours', self.width)
msg = 'The central point 2 in units of hours'
with self.assertRaisesRegex(ValueError, msg):
plugin._find_central_point(self.cube)
class Test_process(IrisTest):
"""Test the process method."""
def setUp(self):
"""Set up test cubes."""
self.central_cube, self.cube = set_up_cubes_for_process_tests()
self.forecast_period = self.central_cube.coord(
"forecast_period").points[0]
@ManageWarnings(
ignored_messages=["Collapsing a non-contiguous coordinate."])
def test_basic_triangle_width_1(self):
"""Test that the plugin produces sensible results when the width
of the triangle is 1. This is equivalent to no blending."""
width = 1.0
plugin = TriangularWeightedBlendAcrossAdjacentPoints(
'forecast_period', self.forecast_period, 'hours', width)
result = plugin(self.cube)
self.assertEqual(self.central_cube.coord('forecast_period'),
result.coord('forecast_period'))
self.assertEqual(self.central_cube.coord('time'), result.coord('time'))
self.assertArrayEqual(self.central_cube.data, result.data)
@ManageWarnings(
ignored_messages=["Collapsing a non-contiguous coordinate."])
def test_basic_triangle_width_2(self):
"""Test that the plugin produces sensible results when the width
of the triangle is 2 and there is some blending."""
width = 2.0
plugin = TriangularWeightedBlendAcrossAdjacentPoints(
'forecast_period', self.forecast_period, 'hours', width)
result = plugin(self.cube)
expected_data = np.array([[1.333333, 1.333333],
[1.333333, 1.333333]])
self.assertEqual(self.central_cube.coord('forecast_period'),
result.coord('forecast_period'))
self.assertEqual(self.central_cube.coord('time'), result.coord('time'))
self.assertArrayAlmostEqual(expected_data, result.data)
@ManageWarnings(
ignored_messages=["Collapsing a non-contiguous coordinate."])
def test_central_point_not_in_allowed_range(self):
"""Test that an exception is generated when the central cube is not
within the allowed range."""
width = 1.0
forecast_period = 2
plugin = TriangularWeightedBlendAcrossAdjacentPoints(
'forecast_period', forecast_period, 'hours', width)
msg = "The central point 2 in units of hours"
with self.assertRaisesRegex(ValueError, msg):
plugin(self.cube)
@ManageWarnings(
ignored_messages=["Collapsing a non-contiguous coordinate."])
def test_alternative_parameter_units(self):
"""Test that the plugin produces sensible results when the width
of the triangle is 7200 seconds. """
forecast_period = 0
width = 7200.0
plugin = TriangularWeightedBlendAcrossAdjacentPoints(
'forecast_period', forecast_period, 'seconds', width)
result = plugin(self.cube)
expected_data = np.array([[1.333333, 1.333333],
[1.333333, 1.333333]])
self.assertEqual(self.central_cube.coord('forecast_period'),
result.coord('forecast_period'))
self.assertEqual(self.central_cube.coord('time'), result.coord('time'))
self.assertArrayAlmostEqual(expected_data, result.data)
@ManageWarnings(
ignored_messages=["Collapsing a non-contiguous coordinate."])
def test_input_cube_no_change(self):
"""Test that the plugin does not change the original input cube."""
# Add threshold axis to standard input cube.
changes = {'points': [0], 'units': '1', 'var_name': 'threshold'}
cube_with_thresh = add_coord(
self.cube.copy(), 'precipitation_amount', changes)
original_cube = cube_with_thresh.copy()
width = 2.0
plugin = TriangularWeightedBlendAcrossAdjacentPoints(
'forecast_period', self.forecast_period, 'hours', width)
_ = plugin(cube_with_thresh)
# Test that the input cube is unchanged by the function.
self.assertEqual(cube_with_thresh, original_cube)
@ManageWarnings(
ignored_messages=["Collapsing a non-contiguous coordinate."])
def test_works_one_thresh(self):
"""Test that the plugin retains the single threshold from the input
cube."""
# Creates a cube containing the expected outputs.
fill_value = 1 + 1/3.0
data = np.full((2, 2), fill_value)
# Take a slice of the time coordinate.
expected_cube = self.cube[0].copy(data.astype(np.float32))
# Add threshold axis to expected output cube.
changes = {'points': [0.5], 'units': '1', 'var_name': 'threshold'}
expected_cube = add_coord(
expected_cube, 'precipitation_amount', changes)
# Add threshold axis to standard input cube.
cube_with_thresh = add_coord(
self.cube.copy(), 'precipitation_amount', changes)
width = 2.0
plugin = TriangularWeightedBlendAcrossAdjacentPoints(
'forecast_period', self.forecast_period, 'hours', width)
result = plugin(cube_with_thresh)
# Test that the result cube retains threshold co-ordinates
# from original cube.
self.assertEqual(expected_cube.coord('precipitation_amount'),
result.coord('precipitation_amount'))
self.assertArrayEqual(expected_cube.data, result.data)
self.assertEqual(expected_cube, result)
@ManageWarnings(
ignored_messages=["Collapsing a non-contiguous coordinate."])
def test_works_two_thresh(self):
"""Test that the plugin works with a cube that contains multiple
thresholds."""
width = 2.0
thresh_cube = self.cube.copy()
thresh_cube.remove_coord("forecast_reference_time")
changes = {'points': [0.25], 'units': '1', 'var_name': 'threshold'}
cube_with_thresh1 = add_coord(
thresh_cube.copy(), 'precipitation_amount', changes)
changes = {'points': [0.5], 'units': '1', 'var_name': 'threshold'}
cube_with_thresh2 = add_coord(
thresh_cube.copy(), 'precipitation_amount', changes)
changes = {'points': [0.75], 'units': '1', 'var_name': 'threshold'}
cube_with_thresh3 = add_coord(
thresh_cube.copy(), 'precipitation_amount', changes)
cubelist = iris.cube.CubeList([cube_with_thresh1, cube_with_thresh2,
cube_with_thresh3])
thresh_cubes = concatenate_cubes(
cubelist, coords_to_slice_over='precipitation_amount')
plugin = TriangularWeightedBlendAcrossAdjacentPoints(
'forecast_period', self.forecast_period, 'hours', width)
result = plugin(thresh_cubes)
# Test that the result cube retains threshold co-ordinates
# from original cube.
self.assertEqual(thresh_cubes.coord('precipitation_amount'),
result.coord('precipitation_amount'))
if __name__ == '__main__':
unittest.main()
| 42.993243
| 79
| 0.666274
|
4a0b55e6d26562e0008015bbb243feda2a21147d
| 632
|
py
|
Python
|
tools/help-center-exporter/print-article.py
|
mpillar/zendesk-tools
|
0ebaaac2d1481f4459fef60be352b223e75099e3
|
[
"MIT"
] | 2
|
2015-03-04T16:42:52.000Z
|
2016-03-24T04:00:03.000Z
|
tools/help-center-exporter/print-article.py
|
mpillar/zendesk-tools
|
0ebaaac2d1481f4459fef60be352b223e75099e3
|
[
"MIT"
] | null | null | null |
tools/help-center-exporter/print-article.py
|
mpillar/zendesk-tools
|
0ebaaac2d1481f4459fef60be352b223e75099e3
|
[
"MIT"
] | null | null | null |
"""
Python script to print a single zendesk article.
"""
import sys
from zendesk.api import DomainConfiguration
from zendesk.api import HelpCenter
from zendesk.formatter import format_tags_remote
def main(sub_domain, aid):
config = DomainConfiguration(sub_domain)
hc = HelpCenter(config)
article = HelpCenter.Article(hc, aid)
print('<h2>%s</h2>' % article.get_name())
print(format_tags_remote(config, article.get_body()))
if __name__ == '__main__':
if len(sys.argv) != 3:
print('Usage: python %s <zendesk_sub_domain> <article_id>' % sys.argv[0])
else:
main(sys.argv[1], sys.argv[2])
| 26.333333
| 81
| 0.697785
|
4a0b565a3582b685f936fa6268803daae6d62b2f
| 12,096
|
py
|
Python
|
openaset/dinkestebingtinggi/admin.py
|
muntaza/Open-Aset
|
f5eb6770a9f7184e3860a18cd655b35b248a9dd5
|
[
"BSD-2-Clause"
] | null | null | null |
openaset/dinkestebingtinggi/admin.py
|
muntaza/Open-Aset
|
f5eb6770a9f7184e3860a18cd655b35b248a9dd5
|
[
"BSD-2-Clause"
] | null | null | null |
openaset/dinkestebingtinggi/admin.py
|
muntaza/Open-Aset
|
f5eb6770a9f7184e3860a18cd655b35b248a9dd5
|
[
"BSD-2-Clause"
] | 2
|
2019-02-18T05:25:23.000Z
|
2021-02-01T16:45:23.000Z
|
### $Id: admin.py,v 1.13 2017/12/04 08:16:37 muntaza Exp $
from django.contrib import admin
from umum.models import Provinsi, Kabupaten, LokasiBidang, SKPD, SUBSKPD, KodeBarang, HakTanah, SatuanBarang, KeadaanBarang, SKPenghapusan, MutasiBerkurang, JenisPemanfaatan, AsalUsul, Tahun, GolonganBarang, Tanah, KontrakTanah, PenghapusanTanah, TanahPenghapusan, PemanfaatanTanah, TanahPemanfaatan, HargaTanah
#### Tanah
from umum.models import TanahDinkesTebingTinggi, HargaTanahDinkesTebingTinggi
from umum.admin import HargaTanahInline, TanahAdmin, KontrakTanahAdmin, HargaTanahAdmin
from umum.admin import GedungBangunanInline
#### Gedung Bangunan
from gedungbangunan.models import StatusTingkat, StatusBeton, KontrakGedungBangunan, HargaGedungBangunan, GedungBangunan, PenghapusanGedungBangunan, PemanfaatanGedungBangunan, TahunBerkurangGedungBangunan, Ruangan
from gedungbangunan.models import GedungBangunanPemanfaatan, GedungBangunanPenghapusan, GedungBangunanRuangan
from gedungbangunan.models import GedungBangunanDinkesTebingTinggi, HargaGedungBangunanDinkesTebingTinggi, GedungBangunanRuanganDinkesTebingTinggi
from gedungbangunan.admin import HargaGedungBangunanInline, GedungBangunanAdmin, KontrakGedungBangunanAdmin, HargaGedungBangunanAdmin, RuanganInline, GedungBangunanRuanganAdmin
#### Peralatan Mesin
from peralatanmesin.models import KontrakPeralatanMesin, HargaPeralatanMesin, PeralatanMesin, PenghapusanPeralatanMesin, PemanfaatanPeralatanMesin, TahunBerkurangPeralatanMesin
#untuk menampung inline
from peralatanmesin.models import PeralatanMesinPemanfaatan, PeralatanMesinPenghapusan
from peralatanmesin.models import PeralatanMesinDinkesTebingTinggi, HargaPeralatanMesinDinkesTebingTinggi
from peralatanmesin.admin import HargaPeralatanMesinInline, PeralatanMesinAdmin, KontrakPeralatanMesinAdmin, HargaPeralatanMesinAdmin
#### Class Tanah
class GedungBangunanDinkesTebingTinggiInline(GedungBangunanInline):
model = GedungBangunanDinkesTebingTinggi
class HargaTanahDinkesTebingTinggiInline(HargaTanahInline):
model = HargaTanahDinkesTebingTinggi
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_kontrak":
kwargs["queryset"] = KontrakTanah.objects.filter(id_skpd__exact=5)
return super(HargaTanahDinkesTebingTinggiInline, self).formfield_for_foreignkey(db_field, request, **kwargs)
class TanahDinkesTebingTinggiAdmin(TanahAdmin):
inlines = [HargaTanahDinkesTebingTinggiInline]
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_sub_skpd":
qs_id_sub_skpd=38
kwargs["queryset"] = SUBSKPD.objects.filter(id__exact=qs_id_sub_skpd)
return super(TanahDinkesTebingTinggiAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
def get_queryset(self, request):
qs_id_sub_skpd=38
return self.model.objects.filter(id_sub_skpd__exact=qs_id_sub_skpd).filter(id_mutasi_berkurang__exact=5)
class HargaTanahDinkesTebingTinggiAdmin(HargaTanahAdmin):
def get_queryset(self, request):
qs_id_sub_skpd=38
tanah_qs = Tanah.objects.filter(id_sub_skpd__exact=qs_id_sub_skpd)
return self.model.objects.filter(id_tanah__in=tanah_qs)
### Register Tanah DinkesTebingTinggi
admin.site.register(TanahDinkesTebingTinggi, TanahDinkesTebingTinggiAdmin)
admin.site.register(HargaTanahDinkesTebingTinggi, HargaTanahDinkesTebingTinggiAdmin)
#### Class Gedung dan Bangunan
class HargaGedungBangunanDinkesTebingTinggiInline(HargaGedungBangunanInline):
model = HargaGedungBangunanDinkesTebingTinggi
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_kontrak_gedung_bangunan":
kwargs["queryset"] = KontrakGedungBangunan.objects.filter(id_skpd__exact=5)
return super(HargaGedungBangunanDinkesTebingTinggiInline, self).formfield_for_foreignkey(db_field, request, **kwargs)
class GedungBangunanDinkesTebingTinggiAdmin(GedungBangunanAdmin):
inlines = [HargaGedungBangunanDinkesTebingTinggiInline]
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_sub_skpd":
qs_id_sub_skpd=38
kwargs["queryset"] = SUBSKPD.objects.filter(id__exact=qs_id_sub_skpd)
return super(GedungBangunanDinkesTebingTinggiAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
def get_queryset(self, request):
qs_id_sub_skpd=38
return self.model.objects.filter(id_sub_skpd__exact=qs_id_sub_skpd).filter(id_golongan_barang__exact=3).filter(id_mutasi_berkurang__exact=5)
class GedungBangunanRuanganDinkesTebingTinggiAdmin(GedungBangunanRuanganAdmin):
def get_queryset(self, request):
qs = 38
return self.model.objects.filter(id_sub_skpd__exact=qs).filter(id_golongan_barang__exact=3).filter(id_mutasi_berkurang__exact=5)
class HargaGedungBangunanDinkesTebingTinggiAdmin(HargaGedungBangunanAdmin):
def get_queryset(self, request):
qs_id_sub_skpd=38
gedung_bangunan_qs = GedungBangunan.objects.filter(id_sub_skpd__exact=qs_id_sub_skpd)
return self.model.objects.filter(id_gedung_bangunan__in=gedung_bangunan_qs)
###Register GedungBangunan DinkesTebingTinggi
admin.site.register(GedungBangunanDinkesTebingTinggi, GedungBangunanDinkesTebingTinggiAdmin)
admin.site.register(GedungBangunanRuanganDinkesTebingTinggi, GedungBangunanRuanganDinkesTebingTinggiAdmin)
admin.site.register(HargaGedungBangunanDinkesTebingTinggi, HargaGedungBangunanDinkesTebingTinggiAdmin)
#### Class Peralatan Mesin
class HargaPeralatanMesinDinkesTebingTinggiInline(HargaPeralatanMesinInline):
model = HargaPeralatanMesinDinkesTebingTinggi
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_kontrak_peralatan_mesin":
kwargs["queryset"] = KontrakPeralatanMesin.objects.filter(id_skpd__exact=5)
return super(HargaPeralatanMesinDinkesTebingTinggiInline, self).formfield_for_foreignkey(db_field, request, **kwargs)
class PeralatanMesinDinkesTebingTinggiAdmin(PeralatanMesinAdmin):
inlines = [HargaPeralatanMesinDinkesTebingTinggiInline]
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_sub_skpd":
qs_id_sub_skpd=38
kwargs["queryset"] = SUBSKPD.objects.filter(id__exact=qs_id_sub_skpd)
if db_field.name == "id_ruangan":
qs_id_sub_skpd=38
kwargs["queryset"] = Ruangan.objects.filter(id_gedung_bangunan__id_sub_skpd__exact=qs_id_sub_skpd)
return super(PeralatanMesinDinkesTebingTinggiAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
def get_queryset(self, request):
qs_id_sub_skpd=38
return self.model.objects.filter(id_sub_skpd__exact=qs_id_sub_skpd).filter(id_mutasi_berkurang__exact=5)
class HargaPeralatanMesinDinkesTebingTinggiAdmin(HargaPeralatanMesinAdmin):
def get_queryset(self, request):
qs_id_sub_skpd=38
peralatan_mesin_qs = PeralatanMesin.objects.filter(id_sub_skpd__exact=qs_id_sub_skpd)
return self.model.objects.filter(id_peralatan_mesin__in=peralatan_mesin_qs)
###Register PeralatanMesin DinkesTebingTinggi
admin.site.register(PeralatanMesinDinkesTebingTinggi, PeralatanMesinDinkesTebingTinggiAdmin)
admin.site.register(HargaPeralatanMesinDinkesTebingTinggi, HargaPeralatanMesinDinkesTebingTinggiAdmin)
#### Jalan, Irigasi, dan Jaringan
from jalanirigasijaringan.models import KontrakJalanIrigasiJaringan, HargaJalanIrigasiJaringan, JalanIrigasiJaringan, PenghapusanJalanIrigasiJaringan, PemanfaatanJalanIrigasiJaringan, TahunBerkurangJalanIrigasiJaringan
from jalanirigasijaringan.models import JalanIrigasiJaringanPemanfaatan, JalanIrigasiJaringanPenghapusan
from jalanirigasijaringan.models import JalanIrigasiJaringanDinkesTebingTinggi, HargaJalanIrigasiJaringanDinkesTebingTinggi
from jalanirigasijaringan.admin import HargaJalanIrigasiJaringanInline, JalanIrigasiJaringanAdmin, KontrakJalanIrigasiJaringanAdmin, HargaJalanIrigasiJaringanAdmin
#### Class Jalan, Irigasi dan Jaringan
class HargaJalanIrigasiJaringanDinkesTebingTinggiInline(HargaJalanIrigasiJaringanInline):
model = HargaJalanIrigasiJaringanDinkesTebingTinggi
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_kontrak_jalan_irigasi_jaringan":
kwargs["queryset"] = KontrakJalanIrigasiJaringan.objects.filter(id_skpd__exact=5)
return super(HargaJalanIrigasiJaringanDinkesTebingTinggiInline, self).formfield_for_foreignkey(db_field, request, **kwargs)
class JalanIrigasiJaringanDinkesTebingTinggiAdmin(JalanIrigasiJaringanAdmin):
inlines = [HargaJalanIrigasiJaringanDinkesTebingTinggiInline]
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_sub_skpd":
qs_id_sub_skpd=38
kwargs["queryset"] = SUBSKPD.objects.filter(id__exact=qs_id_sub_skpd)
return super(JalanIrigasiJaringanDinkesTebingTinggiAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
def get_queryset(self, request):
qs_id_sub_skpd=38
return self.model.objects.filter(id_sub_skpd__exact=qs_id_sub_skpd).filter(id_golongan_barang__exact=4).filter(id_mutasi_berkurang__exact=5)
class HargaJalanIrigasiJaringanDinkesTebingTinggiAdmin(HargaJalanIrigasiJaringanAdmin):
def get_queryset(self, request):
qs_id_sub_skpd=38
jalan_irigasi_jaringan_qs = JalanIrigasiJaringan.objects.filter(id_sub_skpd__exact=qs_id_sub_skpd)
return self.model.objects.filter(id_jalan_irigasi_jaringan__in=jalan_irigasi_jaringan_qs)
###Register JalanIrigasiJaringan DinkesTebingTinggi
admin.site.register(JalanIrigasiJaringanDinkesTebingTinggi, JalanIrigasiJaringanDinkesTebingTinggiAdmin)
admin.site.register(HargaJalanIrigasiJaringanDinkesTebingTinggi, HargaJalanIrigasiJaringanDinkesTebingTinggiAdmin)
#### Aset Tetap Lainnya
from atl.models import KontrakATL, HargaATL, ATL, PenghapusanATL, PemanfaatanATL, TahunBerkurangATL, TahunBerkurangUsulHapusATL
from atl.models import ATLPemanfaatan, ATLPenghapusan
from atl.models import ATLDinkesTebingTinggi, HargaATLDinkesTebingTinggi
from atl.admin import HargaATLInline, ATLAdmin, KontrakATLAdmin, HargaATLAdmin
#### Class Aset Tetap Lainnya
class HargaATLDinkesTebingTinggiInline(HargaATLInline):
model = HargaATLDinkesTebingTinggi
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_kontrak_atl":
kwargs["queryset"] = KontrakATL.objects.filter(id_skpd__exact=5)
return super(HargaATLDinkesTebingTinggiInline, self).formfield_for_foreignkey(db_field, request, **kwargs)
class ATLDinkesTebingTinggiAdmin(ATLAdmin):
inlines = [HargaATLDinkesTebingTinggiInline]
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_sub_skpd":
qs_id_sub_skpd=38
kwargs["queryset"] = SUBSKPD.objects.filter(id__exact=qs_id_sub_skpd)
if db_field.name == "id_ruangan":
qs_id_sub_skpd=38
kwargs["queryset"] = Ruangan.objects.filter(id_gedung_bangunan__id_sub_skpd__exact=qs_id_sub_skpd)
return super(ATLDinkesTebingTinggiAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
def get_queryset(self, request):
qs_id_sub_skpd=38
return self.model.objects.filter(id_sub_skpd__exact=qs_id_sub_skpd).filter(id_mutasi_berkurang__exact=5)
class HargaATLDinkesTebingTinggiAdmin(HargaATLAdmin):
def get_queryset(self, request):
qs_id_sub_skpd=38
atl_qs = ATL.objects.filter(id_sub_skpd__exact=qs_id_sub_skpd)
return self.model.objects.filter(id_atl__in=atl_qs)
###Register ATL DinkesTebingTinggi
admin.site.register(ATLDinkesTebingTinggi, ATLDinkesTebingTinggiAdmin)
admin.site.register(HargaATLDinkesTebingTinggi, HargaATLDinkesTebingTinggiAdmin)
| 41.56701
| 311
| 0.809441
|
4a0b567ae96fa06f32cb0b2d82ece41be4c65eee
| 3,491
|
py
|
Python
|
perest.py
|
georgelegrand/first_gog
|
50510f392e19662ee56b863698dec1963460f1f9
|
[
"MIT"
] | null | null | null |
perest.py
|
georgelegrand/first_gog
|
50510f392e19662ee56b863698dec1963460f1f9
|
[
"MIT"
] | null | null | null |
perest.py
|
georgelegrand/first_gog
|
50510f392e19662ee56b863698dec1963460f1f9
|
[
"MIT"
] | null | null | null |
'''
ОТКРЫТЫЙ ТЕКСТ: двойная перестановка
МАРШРУТ ВПИСЫВАНИЯ: слева - направо
МАРШРУТ ВЫПИСЫВАНИЯ: сверху - вниз
СТОЛБЦЫ: ( 3, 1, 4, 2)
СТРОКИ: ( 3, 2, 4, 1, 5)
'''
def checker(col_set, col_dim):
for i in range(1, len(col_set)):
if col_set[i] == col_set[i-1]:
print("Неправильная последовательность! Введите другую: ")
col_set = str(input("Введите новую последовательность от 0 до " + str(col_dim-1) +" включительно (без пробелов): "))
checker(col_set, col_dim)
return col_set
def d_print(x,y):
print("Расшифрованный текст: ", x)
return 1
def setToCh(smt):
smt_ch = []
for n in smt:
smt_ch.append(int(n))
print(type(smt_ch), smt_ch)
return smt_ch
def strToTable(msg, row_dim, col_dim): #вписывает слева-направо
msg_table = []
for i in range(0, row_dim):
msg_table.append([])
for j in range(0, col_dim):
msg_table[i].append(msg[col_dim*i +j])
print(msg_table)
return msg_table
def changeCols(msg_table, col_ch, row_dim): #working!!!
new_msg_table = []
for i in range(0, row_dim):
new_msg_table.append([])
for j in col_ch:
new_msg_table[i].append(msg_table[i][j])
print("Таблица после перестановки столбцов: ", new_msg_table)
return new_msg_table
def changeRows(msg_table, row_set):
new_msg_table = []
for i in range(0, len(row_set)):
a = int(row_set[i])
new_msg_table.append(msg_table[a])
print("Таблица после перестановки строк: ", new_msg_table)
return new_msg_table
'''
def printCrypt(msg_table, col_dim, row_dim): #выписывает слева-направо
print_msg = []
for i in range(0, len(msg_table)):
for j in range(0, len(msg_table[i])):
if msg_table[i][j] != "+":
print_msg.append(msg_table[i][j])
print_msg = "".join(print_msg)
print("Зашифрованный текст: ", print_msg)
'''
def printCrypt(msg_table, col_dim, row_dim): #выписывает сверху-вниз
print_msg = []
for i in range(0, col_dim):
for j in range(0, row_dim):
if msg_table[j][i] != "+":
print_msg.append(msg_table[j][i])
print_msg = "".join(print_msg)
print("Зашифрованный текст: ", print_msg)
def decryptTable(msg, msg_table, col_dim, row_dim, col_ch, row_set):
d_msg_table = changeRows(msg_table, row_set) #меняем строки
d_msg_table = changeCols(msg_table, col_ch, row_dim) #меняем столбцы
d_print(msg, d_msg_table)
return d_msg_table
def genOrder(msg):
col_dim = int(input("Введите количество столбцов таблицы: "))
#col_dim = random.randint(2,len(msg)-1)
if len(msg) % col_dim == 0:
row_dim = int(len(msg) / col_dim)
else:
row_dim = int(len(msg) // col_dim + 1)
for add in range(col_dim - (len(msg) % col_dim)):
#msg = msg + "+"
msg = msg + " "
print(msg)
col_set = str(input("Введите порядок столбцов от 0 до " + str(col_dim-1) +" включительно (без пробелов): "))
checker(col_set, col_dim)
col_ch = setToCh(col_set)
row_set = str(input("Введите порядок строк от 0 до " + str(row_dim-1) +" включительно (без пробелов): "))
checker(row_set, row_dim)
row_ch = setToCh(row_set)
msg_table = strToTable(msg,row_dim,col_dim)
msg_table = changeCols(msg_table, col_ch, row_dim)
msg_table = changeRows(msg_table, row_set)
return msg_table, col_dim, row_dim, col_ch, row_set
print("\n")
print("Праздник шифрования начинается!!!")
print("\n")
msg = input("Введите текст: ")
res = genOrder(msg)
printCrypt(res[0], res[1], res[2])
decryptTable(msg, res[0], res[1], res[2], res[3], res[4])
| 29.091667
| 120
| 0.670581
|
4a0b56836a3e7eb5dce0dee5da999a2bc47c0f32
| 3,305
|
py
|
Python
|
rllib/agents/mbmpo/model_vector_env.py
|
carlos-aguayo/ray
|
fedbdd5dc6a47aa9cba170816f8c0950193b4fd6
|
[
"Apache-2.0"
] | null | null | null |
rllib/agents/mbmpo/model_vector_env.py
|
carlos-aguayo/ray
|
fedbdd5dc6a47aa9cba170816f8c0950193b4fd6
|
[
"Apache-2.0"
] | null | null | null |
rllib/agents/mbmpo/model_vector_env.py
|
carlos-aguayo/ray
|
fedbdd5dc6a47aa9cba170816f8c0950193b4fd6
|
[
"Apache-2.0"
] | null | null | null |
import logging
import numpy as np
from ray.rllib.utils.annotations import override
from ray.rllib.env.vector_env import VectorEnv
from ray.rllib.evaluation.rollout_worker import get_global_worker
from ray.rllib.env.base_env import BaseEnv
logger = logging.getLogger(__name__)
def custom_model_vector_env(env):
"""Returns a VectorizedEnv wrapper around the current envioronment
To obtain worker configs, one can call get_global_worker().
"""
worker = get_global_worker()
worker_index = worker.worker_index
if worker_index:
env = _VectorizedModelGymEnv(
make_env=worker.make_env_fn,
existing_envs=[env],
num_envs=worker.num_envs,
observation_space=env.observation_space,
action_space=env.action_space,
)
return BaseEnv.to_base_env(
env,
make_env=worker.make_env_fn,
num_envs=worker.num_envs,
remote_envs=False,
remote_env_batch_wait_ms=0)
class _VectorizedModelGymEnv(VectorEnv):
"""Vectorized Environment Wrapper for MB-MPO. Primary change is
in the vector_step method, which calls the dynamics models for
"""
def __init__(self,
make_env=None,
existing_envs=None,
num_envs=1,
*,
observation_space=None,
action_space=None,
env_config=None):
self.make_env = make_env
self.envs = existing_envs
self.num_envs = num_envs
while len(self.envs) < num_envs:
self.envs.append(self.make_env(len(self.envs)))
super().__init__(
observation_space=observation_space
or self.envs[0].observation_space,
action_space=action_space or self.envs[0].action_space,
num_envs=num_envs)
worker = get_global_worker()
self.model, self.device = worker.foreach_policy(
lambda x, y: (x.dynamics_model, x.device))[0]
@override(VectorEnv)
def vector_reset(self):
self.cur_obs = [e.reset() for e in self.envs]
return self.cur_obs
@override(VectorEnv)
def reset_at(self, index):
obs = self.envs[index].reset()
self.cur_obs[index] = obs
return obs
@override(VectorEnv)
def vector_step(self, actions):
if self.cur_obs is None:
raise ValueError("Need to reset env first")
obs_batch = np.stack(self.cur_obs, axis=0)
action_batch = np.stack(actions, axis=0)
next_obs_batch = self.model.predict_model_batches(
obs_batch, action_batch, device=self.device)
next_obs_batch = np.clip(next_obs_batch, -1000, 1000)
rew_batch = self.envs[0].reward(obs_batch, action_batch,
next_obs_batch)
if hasattr(self.envs[0], "done"):
dones_batch = self.envs[0].done(next_obs_batch)
else:
dones_batch = np.asarray([False for _ in range(self.num_envs)])
info_batch = [{} for _ in range(self.num_envs)]
self.cur_obs = next_obs_batch
return list(next_obs_batch), list(rew_batch), list(
dones_batch), info_batch
@override(VectorEnv)
def get_unwrapped(self):
return self.envs
| 31.778846
| 75
| 0.632678
|
4a0b576b504a43635db63a3649d789a15ee83059
| 3,639
|
py
|
Python
|
mimic/model/mailgun_objects.py
|
ksheedlo/mimic
|
c84b6a0d336e8a37a685b5d71537aec5e44d9a8f
|
[
"Apache-2.0"
] | 141
|
2015-01-07T19:28:31.000Z
|
2022-02-11T06:04:13.000Z
|
mimic/model/mailgun_objects.py
|
ksheedlo/mimic
|
c84b6a0d336e8a37a685b5d71537aec5e44d9a8f
|
[
"Apache-2.0"
] | 575
|
2015-01-04T20:23:08.000Z
|
2019-10-04T08:20:04.000Z
|
mimic/model/mailgun_objects.py
|
ksheedlo/mimic
|
c84b6a0d336e8a37a685b5d71537aec5e44d9a8f
|
[
"Apache-2.0"
] | 63
|
2015-01-09T20:39:41.000Z
|
2020-07-06T14:20:56.000Z
|
"""
Mailgun object storage
"""
from __future__ import absolute_import, division, unicode_literals
import attr
import time
@attr.s
class Message(object):
"""
A :obj:`Message` is a representation of an email in Mailgun.
It can produce JSON-serializable objects for various pieces of
state that are required for API responses.
"""
message_id = attr.ib()
to = attr.ib()
msg_from = attr.ib()
subject = attr.ib()
body = attr.ib()
custom_headers = attr.ib(default=attr.Factory(dict))
static_defaults = {
"tags": [],
"delivery-status": {
"message": "",
"code": 0,
"description": None,
"session-seconds": 1.114408016204834
},
"envelope": {
"transport": "smtp",
"sending-ip": "127.0.0.1",
},
"recipient-domain": "mailgun.com",
"id": "mimic-LCZuENBlS0iWjs-yBpNJaQ",
"campaigns": [],
"user-variables": {},
"flags": {
"is-routed": None,
"is-authenticated": True,
"is-system-test": False,
"is-test-mode": False
},
"log-level": "info",
"timestamp": time.time(),
"message": {
"headers": {},
"attachments": [],
"recipients": [],
"size": 0
},
"recipient": None,
"event": "delivered"
}
def generate_events(self):
"""
Long-form JSON-serializable object representation of this message, as
returned by a GET on this individual message.
"""
template = self.static_defaults.copy()
template.update({
"envelope": {
"sender": self.msg_from,
"targets": self.to
},
"message": {
"headers": {
"to": self.to,
"message-id": self.message_id,
"from": self.msg_from,
"subject": self.subject
},
"recipients": [self.to],
"recipient": self.to
}
})
return template
@attr.s
class MessageStore(object):
"""
A collection of message objects.
"""
message_store = attr.ib(default=attr.Factory(list))
def add_to_message_store(self, **attributes):
"""
Create a new Message object and add it to the
:obj: `message_store`
"""
msg = Message(**attributes)
self.message_store.append(msg)
return
def list_messages(self, filter_by=None):
"""
List all the messages.
:param str filter_by: supports filtering the List by
`to` addresses only currently.
"""
to_be_listed = self.message_store
if filter_by:
events = [msg.generate_events()
for msg in to_be_listed if msg.to in filter_by]
else:
events = [msg.generate_events() for msg in to_be_listed]
return {
"items": events,
"paging": {
"next": "http://i-am-a-fake-link-to-nothing",
"last": "http://i-am-a-fake-link-to-nothing",
"first": "http://i-am-a-fake-link-to-nothing=",
"previous": "http://i-am-a-fake-link-to-nothing=="
}}
def filter_message_by_to_address(self, to_address):
"""
Retrieve a :obj:`Message` object by its `to` address.
"""
for each_msg in self.message_store:
if each_msg.to == to_address:
return each_msg
| 28.209302
| 77
| 0.508107
|
4a0b58b8442242a34b0d4c6bf303dbd918cc41a7
| 786
|
py
|
Python
|
examples/locations/make_enum.py
|
mconlon17/vivo-org-ingest
|
7ea3130c78322c18cc263263ee369588c3e1fc0a
|
[
"BSD-2-Clause"
] | 6
|
2015-04-22T15:17:51.000Z
|
2019-03-01T16:26:35.000Z
|
examples/locations/make_enum.py
|
mconlon17/vivo-org-ingest
|
7ea3130c78322c18cc263263ee369588c3e1fc0a
|
[
"BSD-2-Clause"
] | 174
|
2015-02-18T13:32:39.000Z
|
2019-06-26T18:56:18.000Z
|
examples/locations/make_enum.py
|
mconlon17/vivo-org-ingest
|
7ea3130c78322c18cc263263ee369588c3e1fc0a
|
[
"BSD-2-Clause"
] | 11
|
2015-04-22T15:17:59.000Z
|
2019-06-24T10:49:40.000Z
|
#!/usr/bin/env/python
"""
make_enum.py -- make enumerations for locations
"""
__author__ = "Michael Conlon"
__copyright__ = "Copyright 2016 (c) Michael Conlon"
__license__ = "BSD 3-Clause license"
__version__ = "0.1.2"
from datetime import datetime
from pump.vivopump import get_parms, create_enum
def main():
"""
Generate the enums for degrees
"""
print datetime.now(), "Start"
parms = get_parms()
# person
query = """
SELECT (MIN (?xlabel) AS ?short) ?vivo
WHERE
{
?vivo rdf:type vivo:GeographicLocation .
?vivo rdfs:label ?xlabel .
}
GROUP BY ?vivo
ORDER BY ?short
"""
create_enum("location_enum.txt", query, parms)
print datetime.now(), "End"
if __name__ == "__main__":
main()
| 18.27907
| 51
| 0.62341
|
4a0b58cfc65f32366188f761ff5f2dc3394301eb
| 8,664
|
py
|
Python
|
tests/testTrack.py
|
zivkaplan/gensound
|
4d4120f429ab2e9334a1a52b6d6c2f1a050bc260
|
[
"Apache-2.0"
] | null | null | null |
tests/testTrack.py
|
zivkaplan/gensound
|
4d4120f429ab2e9334a1a52b6d6c2f1a050bc260
|
[
"Apache-2.0"
] | null | null | null |
tests/testTrack.py
|
zivkaplan/gensound
|
4d4120f429ab2e9334a1a52b6d6c2f1a050bc260
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 17 23:28:31 2019
@author: Dror
This is for internal purposes; many errors due to changing code!
"""
import numpy as np
from gensound.signals import Signal, Sine, Square, Triangle, Sawtooth, WhiteNoise, WAV, Step
from gensound.transforms import Fade, SineAM, Shift, Pan, Extend, \
Downsample, Amplitude, \
Reverse, Repan, Gain, Limiter, Convolution, Slice, \
Mono, ADSR, CrossFade
# from gensound.filters import MovingAverage, LowPassBasic, Butterworth, IIR_basic, \
# IIR_general, IIR_OnePole, IIR_OnePole_LowPass, IIR_OnePole_HighPass
from gensound.amplifiers import GuitarAmp_Test
from gensound.curve import Curve, Constant, Line, Logistic, SineCurve, MultiCurve
from gensound.io import export_test # better than export_WAV for debugging
from gensound.musicTheory import midC, semitone
african = "../data/african_sketches_1.wav"
gtrcln = "../data/guitar_clean.wav"
### for testing filters
def SweepTest(stay=0.5e3, step=0.5e3): # start at really low A
start = 55
octaves = 4
c = Constant(start, stay)
for octave in range(octaves):
c |= Line(start, start*semitone**4, step) | Constant(start*semitone**4, stay)
start *= semitone**4
c |= Line(start, start*semitone**3, step) | Constant(start*semitone**3, stay)
start *= semitone**3
c |= Line(start, start*semitone**5, step) | Constant(start*semitone**5, stay)
start *= semitone**5
return Sine(frequency=c, duration=(step+stay)*3*octaves+stay)
#########
def lowpass_FIR_test():
#s = WAV(african)[10e3:20e3]*LowPassBasic(cutoff=880, width=100)
c = Line(55,110, 3e3) | Constant(110,2e3)
c |= Line(110, 220, 3e3) | Constant(220, 2e3)
c |= Line(220, 440, 3e3) | Constant(440, 2e3)
c |= Line(440, 880, 3e3) | Constant(880, 2e3)
s = Sine(frequency=c, duration=20e3)[0:2]
s[1] *= LowPassBasic(cutoff=330, width=100)
audio = s.mixdown(sample_rate=44100, byte_width=2, max_amplitude=0.2)
#play_Audio(audio)
# reverse h?
# using parallel track computation (should be 0.05 the time)
#export_test(audio, lowpass_FIR_test)
def Butterworth_test():
#s = WAV(african)[10e3:20e3]
c = Line(55,110, 3e3) | Constant(110,2e3)
c |= Line(110, 220, 3e3) | Constant(220, 2e3)
c |= Line(220, 440, 3e3) | Constant(440, 2e3)
c |= Line(440, 880, 3e3) | Constant(880, 2e3)
c |= Line(880, 2*880, 3e3) | Constant(2*880, 2e3)
s = Sine(frequency=c, duration=20e3)[0:2]
s[1] *= Butterworth(cutoff=880)
audio = s.mixdown(sample_rate=44100, byte_width=2, max_amplitude=0.2)
play_Audio(audio)
#export_test(audio, Butterworth_test)
def Butterworth_experiment():
s = WAV(african)[10e3:25e3]
s1 = s[0]*Butterworth(cutoff=880)
c = Line(-100, 100, 13e3)
s2 = s[1]*Pan(c)
t = s1[0:2] + s2
audio = t.mixdown(sample_rate=44100, byte_width=2, max_amplitude=0.2)
play_Audio(audio)
# export_test(audio, Butterworth_experiment)
def additive_complex_sound_test():
def s(f, duration):
return sum([(1/i**1.5)*Sine(frequency = f*i, duration=duration)*ADSR((0.01e3)*(i), 0.8, 0.5+(1/(i+2)), 0.02e3) for i in range(1, 20)])
freqs = [midC(-3), midC(1), midC(-4), midC(4), midC(6), midC(2), midC(-1), midC(11)]*2
duration = 1e3
t = Signal.concat(*[s(f, duration) for f in freqs])
audio = t.mixdown(sample_rate=44100, byte_width=2, max_amplitude=0.2)
#play_Audio(audio)
export_test(audio, additive_complex_sound_test)
def IIR_basic_test():
s = WAV(african)[10e3:20e3]
s[5e3:] *= IIR_basic() # y(n) = 0.3*x(n) + 0.7*y(n-1)
audio = s.mixdown(sample_rate=44100, byte_width=2, max_amplitude=0.2)
# play_Audio(audio)
export_test(audio, IIR_basic_test)
def IIR_general_test():
s = WAV(african)[10e3:20e3]
s[3e3:] *= IIR_general([0, -0.5,0,0],
[0.25, 0.15,0.07,0.03])
audio = s.mixdown(sample_rate=44100, byte_width=2, max_amplitude=0.2)
# play_Audio(audio)
export_test(audio, IIR_general_test)
def IIR_one_pole_test():
s = WAV(african)[10e3:20e3]
Fc = 880/44100
b1 = np.e**(-2*np.pi*Fc)
s[:,:5e3] *= IIR_OnePole(1-b1, b1)
audio = s.mixdown(sample_rate=44100, byte_width=2, max_amplitude=0.2)
#play_Audio(audio)
export_test(audio, IIR_one_pole_test)
def IIR_one_pole_filters_test():
s = WAV(african)[10e3:20e3]
s[:5e3] *= IIR_OnePole_LowPass(880)
s[5e3:] *= IIR_OnePole_HighPass(440)
audio = s.mixdown(sample_rate=44100, byte_width=2, max_amplitude=0.2)
#play_Audio(audio)
export_test(audio, IIR_one_pole_filters_test)
def sweep_test():
s = SweepTest()
#s *= Butterworth(cutoff=440)
audio = s.play(max_amplitude=0.2)
def test_transform_chain():
s = WAV(african)[10e3:20e3]
t = MovingAverage(5) * Fade(duration=0.5e3)
t *= Gain(Line(0,-10,3e3) | Line(-10, 0, 5e3))
s *= t
audio = s.mixdown(sample_rate=44100, byte_width=2, max_amplitude=0.2)
play_Audio(audio)
def test_negative_shift():
s = WAV(african)[10e3:20e3]
s = s[:5e3] | s[5e3:]*Shift(-2.5e3)
audio = s.mixdown(sample_rate=44100, byte_width=2, max_amplitude=0.2)
#export_test(audio, test_negative_shift)
play_Audio(audio)
def test_negative_shift_combine():
s = WAV(african)[10e3:20e3]
s[5e3:] = s[5e3:]*Shift(1e3)
audio = s.mixdown(sample_rate=44100, byte_width=2, max_amplitude=0.2)
export_test(audio, test_negative_shift_combine)
#play_Audio(audio)
def crossfade_bitransform_syntax_test():
s = WAV(african)[10e3:20e3]
s = s[:5e3] | CrossFade(duration=0.5e3) | s[5e3:]
audio = s.mixdown(sample_rate=44100, byte_width=2, max_amplitude=0.2)
# now with linear amp fade, not db linear fade
#export_test(audio, crossfade_bitransform_syntax_test)
def guitar_amp_test():
s = WAV(gtrcln)*Gain(20)*GuitarAmp_Test(harshness=10,cutoff=4000)
audio = s.mixdown(sample_rate=44100, byte_width=2, max_amplitude=0.2)
export_test(audio, guitar_amp_test)
def one_impulse_reverb_test():
from gensound.effects import OneImpulseReverb
s = WAV(african)[10e3:20e3]*OneImpulseReverb(mix=1.2, num=2000, curve="steep")
s.play(44100, max_amplitude=0.2)
export_test(s.mixdown(44100), one_impulse_reverb_test)
def next_gen_parse_osc_melody_test():
from gensound import Square, Signal
sig = Square
t = 0.5e3
v1 = sig("C3 Eb F G F Eb "*10, duration=t)
v2 = sig("Eb4=3 Bb "*10, duration=t)
v3 = sig("G4=2 A D, "*10, duration=t)
s = Signal()
s[0] = v2
s[1] = v3
s += 0.5*v1
export_test(s.mixdown(44100), next_gen_parse_osc_melody_test)
def chorale_example():
sig = Triangle # Square?
beat = 0.5e3 # 120 bpm
fermata = 0.1 # make fermatas in the melody slightly longer
pause = 0.6 # and breathe for a moment before starting the next phrase
s = sig(f"r D5 D=2 C#=1 B-13=2 A=1 D E=2 F#-13={2+fermata} r={pause} F#=1 F#=2 F#=1 E=2 F#-13=1 G F#-13=2 E={2+fermata} r={pause} "
f"D+16=1 E=2 F#-13=1 E=2 D+16=1 B-13 C#=2 D+9={2+fermata} r={pause} A'=1 F#-13=2 D+16=1 E=2 G=1 F#-13 E=2 D=3", beat)
a = sig(f"r A4 B-16=2 A+16=1 G=2 F#-13=1 F# B-13 A A={2+fermata} r={pause} C#=1 B=2 B=1 B A A A D A A={2+fermata} r={pause} "
f"B=1 A=2 A=1 B-13 A=0.5 G F#=1 B-13 B A#-13 B={2+fermata} r={pause} A=1 A=2 B=1 A=2 A=1 A B-13 A F#-13=3", beat)
t = sig(f"r F#4-13 F#=2 F#=1 D=2 D=1 D D C#-13 D={2+fermata} r={pause} C#=1 D+16=2 D+16=1 D C#-13 D E A, D C#-13={2+fermata} r={pause} "
f"F#=1 E=2 D=1 D C#-13 D+16 D G+5 F# F#={2+fermata} r={pause} E=1 F#-13=2 F#=1 E=2 C#-13=1 A B C#-13 D=3", beat)
b = sig(f"r D3 B-16 D F# G B-13 D B-16 G A D,={2+fermata} r={pause} A#'-13=1 B=2 A=1 G#-13 A F#-13 C#-13 D F#-13 A={2+fermata} r={pause} "
f"B=1 C#-13=2 D=1 G, A B G E F# B,={2+fermata} r={pause} C#'-13=1 D C# B C#-13 B A D G, A D,=3", beat)
chorale = s*Pan(25) + b*Pan(-25) + t*Pan(80) + a*Pan(-80)
from gensound.filters import MovingAverage
chorale *= MovingAverage(5)#*Reverse()
#export_test(chorale.mixdown(44100), chorale_example)
chorale.play() # can you spot the parallel octaves?
if __name__ == "__main__":
#Butterworth_experiment()
#additive_complex_sound_test()
#IIR_general_test()
#sweep_test()
#one_impulse_reverb_test()
#next_gen_parse_osc_melody_test()
#chorale_example()
# custom_pan_scheme_test() # come back to this?
#%%%%%
...
| 34.380952
| 142
| 0.634349
|
4a0b58d96e2203704c53921c10ca1be8f3352c10
| 1,429
|
py
|
Python
|
alveo/examples/caffe/pix2pix/bw2color/convert_rgb_bw.py
|
dendisuhubdy/Vitis-AI
|
524f65224c52314155dafc011d488ed30e458fcb
|
[
"Apache-2.0"
] | 1
|
2021-04-01T06:38:48.000Z
|
2021-04-01T06:38:48.000Z
|
alveo/examples/caffe/pix2pix/bw2color/convert_rgb_bw.py
|
dendisuhubdy/Vitis-AI
|
524f65224c52314155dafc011d488ed30e458fcb
|
[
"Apache-2.0"
] | null | null | null |
alveo/examples/caffe/pix2pix/bw2color/convert_rgb_bw.py
|
dendisuhubdy/Vitis-AI
|
524f65224c52314155dafc011d488ed30e458fcb
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# convert rgb to b/w
#%% import package
import argparse
#import numpy as np
from skimage import color
import matplotlib
matplotlib.use('PS')
import matplotlib.pyplot as plt
import skimage.io as io
#%% main
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--image', required=True,
help='User can provide an image to run')
args = vars(parser.parse_args())
image_path = args["image"]
# load image
image = plt.imread(image_path) # [0,255]
# convert rgb to gray [0,1]
img_gray1 = color.rgb2gray(image)
# convert 1channel to 3 channel [0,1]
img_gray2 = color.gray2rgb(img_gray1)
img_gray3 = img_gray2*255
img_gray3 = img_gray3.astype('uint8')
fn = image_path.split('.')[0]
io.imsave(fn+'_bw.jpg',img_gray3)
| 28.019608
| 74
| 0.688593
|
4a0b5a57a8876b2faca3ed7b09a6d22783750601
| 12,918
|
py
|
Python
|
ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/session_528e31d9c93d0ad2afb513a10d9d8be3.py
|
OpenIxia/ixnetwork_restpy
|
f628db450573a104f327cf3c737ca25586e067ae
|
[
"MIT"
] | 20
|
2019-05-07T01:59:14.000Z
|
2022-02-11T05:24:47.000Z
|
ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/session_528e31d9c93d0ad2afb513a10d9d8be3.py
|
OpenIxia/ixnetwork_restpy
|
f628db450573a104f327cf3c737ca25586e067ae
|
[
"MIT"
] | 60
|
2019-04-03T18:59:35.000Z
|
2022-02-22T12:05:05.000Z
|
ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/session_528e31d9c93d0ad2afb513a10d9d8be3.py
|
OpenIxia/ixnetwork_restpy
|
f628db450573a104f327cf3c737ca25586e067ae
|
[
"MIT"
] | 13
|
2019-05-20T10:48:31.000Z
|
2021-10-06T07:45:44.000Z
|
# MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
from typing import List, Any, Union
class Session(Base):
"""Configures a session under a BFD interface.
The Session class encapsulates a list of session resources that are managed by the user.
A list of resources can be retrieved from the server using the Session.find() method.
The list can be managed by using the Session.add() and Session.remove() methods.
"""
__slots__ = ()
_SDM_NAME = 'session'
_SDM_ATT_MAP = {
'BfdSessionType': 'bfdSessionType',
'Enabled': 'enabled',
'EnabledAutoChooseSource': 'enabledAutoChooseSource',
'IpType': 'ipType',
'LocalBfdAddress': 'localBfdAddress',
'MyDisc': 'myDisc',
'RemoteBfdAddress': 'remoteBfdAddress',
'RemoteDisc': 'remoteDisc',
'RemoteDiscLearned': 'remoteDiscLearned',
}
_SDM_ENUM_MAP = {
'bfdSessionType': ['singleHop', 'multipleHops'],
'ipType': ['ipv4', 'ipv6'],
}
def __init__(self, parent, list_op=False):
super(Session, self).__init__(parent, list_op)
@property
def BfdSessionType(self):
# type: () -> str
"""
Returns
-------
- str(singleHop | multipleHops): The type of BFD session, either single or multiple hop.
"""
return self._get_attribute(self._SDM_ATT_MAP['BfdSessionType'])
@BfdSessionType.setter
def BfdSessionType(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['BfdSessionType'], value)
@property
def Enabled(self):
# type: () -> bool
"""
Returns
-------
- bool: Enables the use of this route range for the simulated router. The default is disable.
"""
return self._get_attribute(self._SDM_ATT_MAP['Enabled'])
@Enabled.setter
def Enabled(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['Enabled'], value)
@property
def EnabledAutoChooseSource(self):
# type: () -> bool
"""
Returns
-------
- bool: If true, enables the session to automatically choose the source IP address for the BFD session.
"""
return self._get_attribute(self._SDM_ATT_MAP['EnabledAutoChooseSource'])
@EnabledAutoChooseSource.setter
def EnabledAutoChooseSource(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['EnabledAutoChooseSource'], value)
@property
def IpType(self):
# type: () -> str
"""
Returns
-------
- str(ipv4 | ipv6): The session is created with the remote IP. IPv4 or IPv6 (default = IPv4).
"""
return self._get_attribute(self._SDM_ATT_MAP['IpType'])
@IpType.setter
def IpType(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['IpType'], value)
@property
def LocalBfdAddress(self):
# type: () -> str
"""
Returns
-------
- str: The first IP address that will be used for simulated routers. IPv4 or IPv6.
"""
return self._get_attribute(self._SDM_ATT_MAP['LocalBfdAddress'])
@LocalBfdAddress.setter
def LocalBfdAddress(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['LocalBfdAddress'], value)
@property
def MyDisc(self):
# type: () -> int
"""
Returns
-------
- number: Needs to be a unique value in node. This option is used to demultiplex multiple BFD sessions.
"""
return self._get_attribute(self._SDM_ATT_MAP['MyDisc'])
@MyDisc.setter
def MyDisc(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['MyDisc'], value)
@property
def RemoteBfdAddress(self):
# type: () -> str
"""
Returns
-------
- str: The remote address in which the BFD session is active.
"""
return self._get_attribute(self._SDM_ATT_MAP['RemoteBfdAddress'])
@RemoteBfdAddress.setter
def RemoteBfdAddress(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['RemoteBfdAddress'], value)
@property
def RemoteDisc(self):
# type: () -> int
"""
Returns
-------
- number: This is the discriminator used by the remote system to identify the BFD session. This must be initialized to zero.
"""
return self._get_attribute(self._SDM_ATT_MAP['RemoteDisc'])
@RemoteDisc.setter
def RemoteDisc(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['RemoteDisc'], value)
@property
def RemoteDiscLearned(self):
# type: () -> bool
"""
Returns
-------
- bool: The default is 0. If it is set to 0, then the Remote Discriminator will be learned.
"""
return self._get_attribute(self._SDM_ATT_MAP['RemoteDiscLearned'])
@RemoteDiscLearned.setter
def RemoteDiscLearned(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['RemoteDiscLearned'], value)
def update(self, BfdSessionType=None, Enabled=None, EnabledAutoChooseSource=None, IpType=None, LocalBfdAddress=None, MyDisc=None, RemoteBfdAddress=None, RemoteDisc=None, RemoteDiscLearned=None):
# type: (str, bool, bool, str, str, int, str, int, bool) -> Session
"""Updates session resource on the server.
Args
----
- BfdSessionType (str(singleHop | multipleHops)): The type of BFD session, either single or multiple hop.
- Enabled (bool): Enables the use of this route range for the simulated router. The default is disable.
- EnabledAutoChooseSource (bool): If true, enables the session to automatically choose the source IP address for the BFD session.
- IpType (str(ipv4 | ipv6)): The session is created with the remote IP. IPv4 or IPv6 (default = IPv4).
- LocalBfdAddress (str): The first IP address that will be used for simulated routers. IPv4 or IPv6.
- MyDisc (number): Needs to be a unique value in node. This option is used to demultiplex multiple BFD sessions.
- RemoteBfdAddress (str): The remote address in which the BFD session is active.
- RemoteDisc (number): This is the discriminator used by the remote system to identify the BFD session. This must be initialized to zero.
- RemoteDiscLearned (bool): The default is 0. If it is set to 0, then the Remote Discriminator will be learned.
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def add(self, BfdSessionType=None, Enabled=None, EnabledAutoChooseSource=None, IpType=None, LocalBfdAddress=None, MyDisc=None, RemoteBfdAddress=None, RemoteDisc=None, RemoteDiscLearned=None):
# type: (str, bool, bool, str, str, int, str, int, bool) -> Session
"""Adds a new session resource on the server and adds it to the container.
Args
----
- BfdSessionType (str(singleHop | multipleHops)): The type of BFD session, either single or multiple hop.
- Enabled (bool): Enables the use of this route range for the simulated router. The default is disable.
- EnabledAutoChooseSource (bool): If true, enables the session to automatically choose the source IP address for the BFD session.
- IpType (str(ipv4 | ipv6)): The session is created with the remote IP. IPv4 or IPv6 (default = IPv4).
- LocalBfdAddress (str): The first IP address that will be used for simulated routers. IPv4 or IPv6.
- MyDisc (number): Needs to be a unique value in node. This option is used to demultiplex multiple BFD sessions.
- RemoteBfdAddress (str): The remote address in which the BFD session is active.
- RemoteDisc (number): This is the discriminator used by the remote system to identify the BFD session. This must be initialized to zero.
- RemoteDiscLearned (bool): The default is 0. If it is set to 0, then the Remote Discriminator will be learned.
Returns
-------
- self: This instance with all currently retrieved session resources using find and the newly added session resources available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
def remove(self):
"""Deletes all the contained session resources in this instance from the server.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, BfdSessionType=None, Enabled=None, EnabledAutoChooseSource=None, IpType=None, LocalBfdAddress=None, MyDisc=None, RemoteBfdAddress=None, RemoteDisc=None, RemoteDiscLearned=None):
# type: (str, bool, bool, str, str, int, str, int, bool) -> Session
"""Finds and retrieves session resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve session resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all session resources from the server.
Args
----
- BfdSessionType (str(singleHop | multipleHops)): The type of BFD session, either single or multiple hop.
- Enabled (bool): Enables the use of this route range for the simulated router. The default is disable.
- EnabledAutoChooseSource (bool): If true, enables the session to automatically choose the source IP address for the BFD session.
- IpType (str(ipv4 | ipv6)): The session is created with the remote IP. IPv4 or IPv6 (default = IPv4).
- LocalBfdAddress (str): The first IP address that will be used for simulated routers. IPv4 or IPv6.
- MyDisc (number): Needs to be a unique value in node. This option is used to demultiplex multiple BFD sessions.
- RemoteBfdAddress (str): The remote address in which the BFD session is active.
- RemoteDisc (number): This is the discriminator used by the remote system to identify the BFD session. This must be initialized to zero.
- RemoteDiscLearned (bool): The default is 0. If it is set to 0, then the Remote Discriminator will be learned.
Returns
-------
- self: This instance with matching session resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of session data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the session resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
| 45.167832
| 198
| 0.662796
|
4a0b5a922d8c1e410523a2612d5379622a4d5dbd
| 2,918
|
py
|
Python
|
VoronoiWall/structures.py
|
donsheehy/VoronoiWall
|
4d85dfa3c1b3ae302f841fb16204c15eaa8398e4
|
[
"MIT"
] | null | null | null |
VoronoiWall/structures.py
|
donsheehy/VoronoiWall
|
4d85dfa3c1b3ae302f841fb16204c15eaa8398e4
|
[
"MIT"
] | null | null | null |
VoronoiWall/structures.py
|
donsheehy/VoronoiWall
|
4d85dfa3c1b3ae302f841fb16204c15eaa8398e4
|
[
"MIT"
] | null | null | null |
from scipy.spatial import Voronoi
from sys import stderr
class Point:
def __init__(self, point, region_obj):
self.point = point
self.region = region_obj
region_obj.point = self
class Region:
def __init__(self, voronoi, vertices_i):
self.vertices_i = vertices_i
self.vertices = [voronoi.vertices[i] for i in vertices_i if i != -1] # -1 means unbounded; don't index to -1
self.facets = []
self.point = None
class Facet:
def __init__(self, voronoi, vertices_i):
self.voronoi = voronoi
self.vertices_i = vertices_i
self.vertices = [voronoi.vertices[i] for i in vertices_i if i != -1]
self.regions = []
def triangles(self):
for i in range(1, len(self.vertices) - 1):
yield Facet(self.voronoi, [self.vertices_i[0], self.vertices_i[i], self.vertices_i[i + 1]])
class Diagram:
def __init__(self, points):
self.voronoi = Voronoi(points)
self.vertices = self.voronoi.vertices
self.facets = [Facet(self.voronoi, facet) for facet in self.voronoi.ridge_vertices]
self.regions = [Region(self.voronoi, region) for region in self.voronoi.regions]
# match facets to the region containing them
for facet_obj in self.facets:
facet_vert_set = set(facet_obj.vertices_i)
for region_obj in self.regions:
region_vert_set = set(region_obj.vertices_i)
if facet_vert_set <= region_vert_set:
region_obj.facets.append(facet_obj)
facet_obj.regions.append(region_obj)
self.points = [Point(self.voronoi.points[i], self.regions[region_i]) for i, region_i in
enumerate(self.voronoi.point_region)]
self.bounded_facets = [facet for facet in self.facets if -1 not in facet.vertices_i]
self.bounded_regions = [region for region in self.regions if
-1 not in region.vertices_i and len(region.vertices_i) > 3]
if __name__ == '__main__':
import numpy as np
points = np.array([[0, 0, 3], [0, 3, 3], [3, 0, 3], [3, 3, 3], [0, 0, 0], [0, 3, 0], [3, 0, 0], [3, 3, 0],
[1, 2, 2], [2, 2, 2], [1, 2, 1], [2, 2, 1], [1, 1, 2], [2, 1, 2], [1, 1, 1], [2, 1, 1]])
diagram = Diagram(points)
# A list of Point objects (the input points)
point_list = diagram.points
# A Point object
point_obj = point_list[0]
# The point's location as [x, y, z]
point_coord = point_obj.point
# A Region object produced from the Point above
region_obj = point_obj.region
# A list of the Region's Facet objects
facet_list = region_obj.facets
# A Facet object
facet_obj = facet_list[0]
# A list of the Facet's vertices
vertex_list = facet_obj.vertices
# A vertex as [x, y, z]
vertex = vertex_list[0]
print(vertex)
| 32.786517
| 117
| 0.61035
|
4a0b5ac416edb3b91a9fbf20633c85ca2f8dc3bb
| 2,211
|
py
|
Python
|
python/xml2text.py
|
lappsgrid-incubator/galaxy-paper-rank
|
db5f87a159d78e643c89b94462a91401fca306ca
|
[
"Apache-2.0"
] | null | null | null |
python/xml2text.py
|
lappsgrid-incubator/galaxy-paper-rank
|
db5f87a159d78e643c89b94462a91401fca306ca
|
[
"Apache-2.0"
] | 12
|
2020-09-21T17:08:59.000Z
|
2020-10-24T19:48:37.000Z
|
python/xml2text.py
|
lappsgrid-incubator/galaxy-paper-rank
|
db5f87a159d78e643c89b94462a91401fca306ca
|
[
"Apache-2.0"
] | null | null | null |
import os
import sys
from bs4 import BeautifulSoup
failed = 0
passed = 0
skipped = list()
def extract(xml_path, outdir):
global failed, passed
try:
with open(xml_path, "r") as xml_file:
doc = BeautifulSoup(xml_file, 'xml')
except:
print(sys.exc_info()[0])
failed += 1
return False
body = doc.find_all("body")
if body is None or len(body) == 0:
print(f"{xml_path}: No body element found.")
failed += 1
return False
fname = os.path.basename(xml_path).replace(".xml", ".txt")
txt_path = os.path.join(outdir, fname)
with open(txt_path, "w") as txt_file:
txt_file.write(body[0].text)
passed += 1
return True
def process(indir, outdir):
global skipped
for f in os.listdir(indir):
full_path = os.path.join(indir, f)
if os.path.isfile(full_path) and full_path.endswith(".xml"):
extract(full_path, outdir)
elif os.path.isdir(full_path):
child = os.path.join(outdir, os.path.basename(f))
if not os.path.exists(child):
os.makedirs(child, 0o774)
process(full_path, child)
else:
skipped.append(full_path)
def ok(directory, type):
if not os.path.exists(directory):
print(f"The {type} directory {indir} does not exist")
return False
if not os.path.isdir(directory):
print(f"{directory} is not a directory")
return False
return True
if __name__ == "__main__":
if len(sys.argv) != 3:
print("USAGE: python xml2text.py <INDIR> <OUTDIR>")
print("\nXML files will be read from <INDIR> and the extracted text written to <OUTDIR>")
print("Both directories must exist and the directory structure in <INDIR> will")
print("be created inside <OUTDIR>.")
sys.exit(1)
indir = sys.argv[1]
if not ok(indir, "input"):
sys.exit(1)
outdir = sys.argv[2]
if not ok(outdir, "output"):
sys.exit(1)
process(indir, outdir)
print(f"Wrote {passed} files.")
print(f"Encountered {failed} problems.")
print(f"Skipped {len(skipped)} files.")
for f in skipped:
print(f)
| 29.48
| 97
| 0.601538
|
4a0b5ac7be865a15abf34a68847b2a9156823608
| 5,756
|
py
|
Python
|
Scripts/sims4communitylib/enums/common_age.py
|
ColonolNutty/Sims4CommunityLibrary
|
684f28dc3c7deb4d9fd520e21e63942b65a91d31
|
[
"CC-BY-4.0"
] | 118
|
2019-08-31T04:33:18.000Z
|
2022-03-28T21:12:14.000Z
|
Scripts/sims4communitylib/enums/common_age.py
|
ColonolNutty/Sims4CommunityLibrary
|
684f28dc3c7deb4d9fd520e21e63942b65a91d31
|
[
"CC-BY-4.0"
] | 15
|
2019-12-05T01:29:46.000Z
|
2022-02-18T17:13:46.000Z
|
Scripts/sims4communitylib/enums/common_age.py
|
ColonolNutty/Sims4CommunityLibrary
|
684f28dc3c7deb4d9fd520e21e63942b65a91d31
|
[
"CC-BY-4.0"
] | 28
|
2019-09-07T04:11:05.000Z
|
2022-02-07T18:31:40.000Z
|
"""
The Sims 4 Community Library is licensed under the Creative Commons Attribution 4.0 International public license (CC BY 4.0).
https://creativecommons.org/licenses/by/4.0/
https://creativecommons.org/licenses/by/4.0/legalcode
Copyright (c) COLONOLNUTTY
"""
from typing import Dict, Union, Tuple
from sims.sim_info import SimInfo
from sims.sim_info_types import Age
from sims4communitylib.enums.enumtypes.common_int import CommonInt
class CommonAge(CommonInt):
"""Custom Age enum containing all ages, because there have been too many problems when referencing the vanilla Age in various places.
"""
INVALID: 'CommonAge' = 0
BABY: 'CommonAge' = 1
TODDLER: 'CommonAge' = 2
CHILD: 'CommonAge' = 4
TEEN: 'CommonAge' = 8
YOUNGADULT: 'CommonAge' = 16
ADULT: 'CommonAge' = 32
ELDER: 'CommonAge' = 64
@classmethod
def get_all(cls) -> Tuple['CommonAge']:
"""get_all()
Retrieve a collection of all CommonAge, excluding CommonAge.INVALID.
:return: A collection of all CommonAge, without CommonAge.INVALID.
:rtype: Tuple[CommonAge]
"""
value_list: Tuple[CommonAge] = tuple([value for value in cls.values if value != cls.INVALID])
return value_list
@classmethod
def get_all_names(cls) -> Tuple[str]:
"""get_all_names()
Retrieve a collection of the names of all CommonAge, excluding CommonAge.INVALID.
:return: A collection of the names of all CommonAge, without CommonAge.INVALID.
:rtype: Tuple[str]
"""
name_list: Tuple[str] = tuple([value.name for value in cls.get_all()])
return name_list
@staticmethod
def get_age(sim_info: SimInfo) -> 'CommonAge':
"""get_age(sim_info)
Retrieve the CommonAge of a Sim.
:param sim_info: An instance of a Sim.
:type sim_info: SimInfo
:return: The CommonAge that represents what age a Sim is or CommonAge.INVALID if their age cannot be determined.
:rtype: CommonAge
"""
from sims4communitylib.utils.sims.common_age_utils import CommonAgeUtils
if CommonAgeUtils.is_baby(sim_info):
return CommonAge.BABY
elif CommonAgeUtils.is_toddler(sim_info):
return CommonAge.TODDLER
elif CommonAgeUtils.is_child(sim_info):
return CommonAge.CHILD
elif CommonAgeUtils.is_teen(sim_info):
return CommonAge.TEEN
elif CommonAgeUtils.is_young_adult(sim_info):
return CommonAge.YOUNGADULT
elif CommonAgeUtils.is_adult(sim_info):
return CommonAge.ADULT
elif CommonAgeUtils.is_elder(sim_info):
return CommonAge.ELDER
return CommonAge.INVALID
@staticmethod
def convert_to_vanilla(age: 'CommonAge') -> Union[Age, None]:
"""convert_to_vanilla(age)
Convert a CommonAge into the vanilla Age enum.
:param age: An instance of a CommonAge
:type age: CommonAge
:return: The specified CommonAge translated to an Age or None if the CommonAge could not be translated.
:rtype: Union[Age, None]
"""
if age is None or age == CommonAge.INVALID:
return None
if isinstance(age, Age):
return age
age_conversion_mapping: Dict[CommonAge, Age] = {
CommonAge.BABY: Age.BABY,
CommonAge.TODDLER: Age.TODDLER,
CommonAge.CHILD: Age.CHILD,
CommonAge.TEEN: Age.TEEN,
CommonAge.YOUNGADULT: Age.YOUNGADULT,
CommonAge.ADULT: Age.ADULT,
CommonAge.ELDER: Age.ELDER
}
return age_conversion_mapping.get(age, None)
@staticmethod
def convert_from_vanilla(age: Age) -> 'CommonAge':
"""convert_from_age(age)
Convert a vanilla Age to a CommonAge.
:param age: An instance of an Age
:type age: Age
:return: The specified Age translated to a CommonAge or CommonAge.INVALID if the Age could not be translated.
:rtype: CommonAge
"""
if age is None:
return CommonAge.INVALID
if isinstance(age, CommonAge):
return age
age_conversion_mapping: Dict[int, CommonAge] = {
int(Age.BABY): CommonAge.BABY,
int(Age.TODDLER): CommonAge.TODDLER,
int(Age.CHILD): CommonAge.CHILD,
int(Age.TEEN): CommonAge.TEEN,
int(Age.YOUNGADULT): CommonAge.YOUNGADULT,
int(Age.ADULT): CommonAge.ADULT,
int(Age.ELDER): CommonAge.ELDER
}
age = int(age)
if age not in age_conversion_mapping:
return CommonAge.INVALID
return age_conversion_mapping[age]
@staticmethod
def convert_to_localized_string_id(age: 'CommonAge') -> Union[int, str]:
"""convert_to_localized_string_id(age)
Convert a CommonAge into a Localized String identifier.
:param age: An instance of a CommonAge
:type age: CommonAge
:return: The specified CommonAge translated to a localized string identifier or the name property of the value, if no localized string id is found.
:rtype: Union[int, str]
"""
from sims4communitylib.enums.strings_enum import CommonStringId
display_name_mapping = {
CommonAge.BABY: CommonStringId.BABY,
CommonAge.TODDLER: CommonStringId.TODDLER,
CommonAge.CHILD: CommonStringId.CHILD,
CommonAge.TEEN: CommonStringId.TEEN,
CommonAge.YOUNGADULT: CommonStringId.YOUNG_ADULT,
CommonAge.ADULT: CommonStringId.ADULT,
CommonAge.ELDER: CommonStringId.ELDER
}
return display_name_mapping.get(age, age.name)
| 36.66242
| 155
| 0.648714
|
4a0b5b041382c7332bec4125b3965d39cf329ddf
| 2,851
|
py
|
Python
|
python/geospark/core/formatMapper/geo_json_reader.py
|
Maxar-Corp/GeoSpark
|
6248c6773dc88bf3354ea9b223f16ceb064e7627
|
[
"Apache-2.0",
"MIT"
] | 1
|
2021-10-19T07:57:29.000Z
|
2021-10-19T07:57:29.000Z
|
python/geospark/core/formatMapper/geo_json_reader.py
|
mayankkt9/GeoSpark
|
618da90413f7d86c59def92ba765fbd6d9d49761
|
[
"Apache-2.0",
"MIT"
] | 3
|
2020-03-24T18:20:35.000Z
|
2021-02-02T22:36:37.000Z
|
python/geospark/core/formatMapper/geo_json_reader.py
|
mayankkt9/GeoSpark
|
618da90413f7d86c59def92ba765fbd6d9d49761
|
[
"Apache-2.0",
"MIT"
] | 1
|
2021-09-26T15:51:22.000Z
|
2021-09-26T15:51:22.000Z
|
from pyspark import SparkContext, RDD
from geospark.core.SpatialRDD.spatial_rdd import SpatialRDD
from geospark.core.formatMapper.geo_reader import GeoDataReader
from geospark.core.jvm.config import since
from geospark.utils.decorators import require
from geospark.utils.meta import MultipleMeta
class GeoJsonReader(GeoDataReader, metaclass=MultipleMeta):
@classmethod
@since("1.2.0")
@require(["GeoJsonReader"])
def validate_imports(cls) -> bool:
return True
@classmethod
def readToGeometryRDD(cls, sc: SparkContext, inputPath: str) -> SpatialRDD:
"""
:param sc: SparkContext
:param inputPath: str, file input location
:return: SpatialRDD
"""
GeoJsonReader.validate_imports()
jvm = sc._jvm
srdd = jvm.GeoJsonReader.readToGeometryRDD(
sc._jsc, inputPath
)
spatial_rdd = SpatialRDD(sc)
spatial_rdd.set_srdd(srdd)
return spatial_rdd
@classmethod
def readToGeometryRDD(cls, sc: SparkContext, inputPath: str, allowInvalidGeometries: bool,
skipSyntacticallyInvalidGeometries: bool) -> SpatialRDD:
"""
:param sc: SparkContext
:param inputPath: str, path to the file
:param allowInvalidGeometries: bool
:param skipSyntacticallyInvalidGeometries: bool
:return: SpatialRDD
"""
GeoJsonReader.validate_imports()
jvm = sc._jvm
srdd = jvm.GeoJsonReader.readToGeometryRDD(
sc._jsc, inputPath, allowInvalidGeometries, skipSyntacticallyInvalidGeometries
)
spatial_rdd = SpatialRDD(sc)
spatial_rdd.set_srdd(srdd)
return spatial_rdd
@classmethod
def readToGeometryRDD(cls, rawTextRDD: RDD) -> SpatialRDD:
"""
:param rawTextRDD: RDD
:return: SpatialRDD
"""
GeoJsonReader.validate_imports()
sc = rawTextRDD.ctx
jvm = sc._jvm
srdd = jvm.GeoJsonReader.readToGeometryRDD(
rawTextRDD._jrdd
)
spatial_rdd = SpatialRDD(sc)
spatial_rdd.set_srdd(srdd)
return spatial_rdd
@classmethod
def readToGeometryRDD(cls, rawTextRDD: RDD, allowInvalidGeometries: bool, skipSyntacticallyInvalidGeometries: bool) -> SpatialRDD:
"""
:param rawTextRDD: RDD
:param allowInvalidGeometries: bool
:param skipSyntacticallyInvalidGeometries: bool
:return: SpatialRDD
"""
GeoJsonReader.validate_imports()
sc = rawTextRDD.ctx
jvm = sc._jvm
srdd = jvm.GeoJsonReader.readToGeometryRDD(
rawTextRDD._jrdd, allowInvalidGeometries, skipSyntacticallyInvalidGeometries
)
spatial_rdd = SpatialRDD(sc)
spatial_rdd.set_srdd(srdd)
return spatial_rdd
| 30.010526
| 134
| 0.65591
|
4a0b5d11610add74b98f98b5492fcbe085beee93
| 57,827
|
py
|
Python
|
discord/abc.py
|
brotherelric/deezcord.py
|
f7419bf2c67c2006702cccc4850cd9332bce00c6
|
[
"MIT"
] | null | null | null |
discord/abc.py
|
brotherelric/deezcord.py
|
f7419bf2c67c2006702cccc4850cd9332bce00c6
|
[
"MIT"
] | null | null | null |
discord/abc.py
|
brotherelric/deezcord.py
|
f7419bf2c67c2006702cccc4850cd9332bce00c6
|
[
"MIT"
] | null | null | null |
"""
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Copyright (c) 2021-present 404kuso
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
import copy
import asyncio
from typing import (
Any,
Callable,
Dict,
List,
Optional,
TYPE_CHECKING,
Protocol,
Sequence,
Tuple,
TypeVar,
Union,
overload,
runtime_checkable,
)
from .iterators import HistoryIterator
from .context_managers import Typing
from .enums import ChannelType
from .errors import InvalidArgument, ClientException
from .mentions import AllowedMentions
from .permissions import PermissionOverwrite, Permissions
from .role import Role
from .invite import Invite
from .file import File
from .voice_client import VoiceClient, VoiceProtocol
from .sticker import GuildSticker, StickerItem
from .components import Component, ComponentStore
from . import utils
__all__ = (
'Snowflake',
'User',
'PrivateChannel',
'GuildChannel',
'Messageable',
'Connectable',
)
T = TypeVar('T', bound=VoiceProtocol)
if TYPE_CHECKING:
from datetime import datetime
from .client import Client
from .user import ClientUser
from .asset import Asset
from .state import ConnectionState
from .guild import Guild
from .member import Member
from .channel import CategoryChannel
from .embeds import Embed
from .message import Message, MessageReference, PartialMessage
from .channel import TextChannel, DMChannel, GroupChannel, PartialMessageable
from .threads import Thread
from .enums import InviteTarget
from .ui.view import View
from .types.channel import (
PermissionOverwrite as PermissionOverwritePayload,
Channel as ChannelPayload,
GuildChannel as GuildChannelPayload,
OverwriteType,
)
PartialMessageableChannel = Union[TextChannel, Thread, DMChannel, PartialMessageable]
MessageableChannel = Union[PartialMessageableChannel, GroupChannel]
SnowflakeTime = Union["Snowflake", datetime]
MISSING = utils.MISSING
class _Undefined:
def __repr__(self) -> str:
return 'see-below'
_undefined: Any = _Undefined()
@runtime_checkable
class Snowflake(Protocol):
"""An ABC that details the common operations on a Discord model.
Almost all :ref:`Discord models <discord_api_models>` meet this
abstract base class.
If you want to create a snowflake on your own, consider using
:class:`.Object`.
Attributes
-----------
id: :class:`int`
The model's unique ID.
"""
__slots__ = ()
id: int
@runtime_checkable
class User(Snowflake, Protocol):
"""An ABC that details the common operations on a Discord user.
The following implement this ABC:
- :class:`~discord.User`
- :class:`~discord.ClientUser`
- :class:`~discord.Member`
This ABC must also implement :class:`~discord.abc.Snowflake`.
Attributes
-----------
name: :class:`str`
The user's username.
discriminator: :class:`str`
The user's discriminator.
avatar: :class:`~discord.Asset`
The avatar asset the user has.
bot: :class:`bool`
If the user is a bot account.
"""
__slots__ = ()
name: str
discriminator: str
avatar: Asset
bot: bool
@property
def display_name(self) -> str:
""":class:`str`: Returns the user's display name."""
raise NotImplementedError
@property
def mention(self) -> str:
""":class:`str`: Returns a string that allows you to mention the given user."""
raise NotImplementedError
@runtime_checkable
class PrivateChannel(Snowflake, Protocol):
"""An ABC that details the common operations on a private Discord channel.
The following implement this ABC:
- :class:`~discord.DMChannel`
- :class:`~discord.GroupChannel`
This ABC must also implement :class:`~discord.abc.Snowflake`.
Attributes
-----------
me: :class:`~discord.ClientUser`
The user presenting yourself.
"""
__slots__ = ()
me: ClientUser
class _Overwrites:
__slots__ = ('id', 'allow', 'deny', 'type')
ROLE = 0
MEMBER = 1
def __init__(self, data: PermissionOverwritePayload):
self.id: int = int(data['id'])
self.allow: int = int(data.get('allow', 0))
self.deny: int = int(data.get('deny', 0))
self.type: OverwriteType = data['type']
def _asdict(self) -> PermissionOverwritePayload:
return {
'id': self.id,
'allow': str(self.allow),
'deny': str(self.deny),
'type': self.type,
}
def is_role(self) -> bool:
return self.type == 0
def is_member(self) -> bool:
return self.type == 1
GCH = TypeVar('GCH', bound='GuildChannel')
class GuildChannel:
"""An ABC that details the common operations on a Discord guild channel.
The following implement this ABC:
- :class:`~discord.TextChannel`
- :class:`~discord.VoiceChannel`
- :class:`~discord.CategoryChannel`
- :class:`~discord.StageChannel`
This ABC must also implement :class:`~discord.abc.Snowflake`.
Attributes
-----------
name: :class:`str`
The channel name.
guild: :class:`~discord.Guild`
The guild the channel belongs to.
position: :class:`int`
The position in the channel list. This is a number that starts at 0.
e.g. the top channel is position 0.
"""
__slots__ = ()
id: int
name: str
guild: Guild
type: ChannelType
position: int
category_id: Optional[int]
_state: ConnectionState
_overwrites: List[_Overwrites]
if TYPE_CHECKING:
def __init__(self, *, state: ConnectionState, guild: Guild, data: Dict[str, Any]):
...
def __str__(self) -> str:
return self.name
@property
def _sorting_bucket(self) -> int:
raise NotImplementedError
def _update(self, guild: Guild, data: Dict[str, Any]) -> None:
raise NotImplementedError
async def _move(
self,
position: int,
parent_id: Optional[Any] = None,
lock_permissions: bool = False,
*,
reason: Optional[str],
) -> None:
if position < 0:
raise InvalidArgument('Channel position cannot be less than 0.')
http = self._state.http
bucket = self._sorting_bucket
channels: List[GuildChannel] = [c for c in self.guild.channels if c._sorting_bucket == bucket]
channels.sort(key=lambda c: c.position)
try:
# remove ourselves from the channel list
channels.remove(self)
except ValueError:
# not there somehow lol
return
else:
index = next((i for i, c in enumerate(channels) if c.position >= position), len(channels))
# add ourselves at our designated position
channels.insert(index, self)
payload = []
for index, c in enumerate(channels):
d: Dict[str, Any] = {'id': c.id, 'position': index}
if parent_id is not _undefined and c.id == self.id:
d.update(parent_id=parent_id, lock_permissions=lock_permissions)
payload.append(d)
await http.bulk_channel_update(self.guild.id, payload, reason=reason)
async def _edit(self, options: Dict[str, Any], reason: Optional[str]) -> Optional[ChannelPayload]:
try:
parent = options.pop('category')
except KeyError:
parent_id = _undefined
else:
parent_id = parent and parent.id
try:
options['rate_limit_per_user'] = options.pop('slowmode_delay')
except KeyError:
pass
try:
rtc_region = options.pop('rtc_region')
except KeyError:
pass
else:
options['rtc_region'] = None if rtc_region is None else str(rtc_region)
try:
video_quality_mode = options.pop('video_quality_mode')
except KeyError:
pass
else:
options['video_quality_mode'] = int(video_quality_mode)
lock_permissions = options.pop('sync_permissions', False)
try:
position = options.pop('position')
except KeyError:
if parent_id is not _undefined:
if lock_permissions:
category = self.guild.get_channel(parent_id)
if category:
options['permission_overwrites'] = [c._asdict() for c in category._overwrites]
options['parent_id'] = parent_id
elif lock_permissions and self.category_id is not None:
# if we're syncing permissions on a pre-existing channel category without changing it
# we need to update the permissions to point to the pre-existing category
category = self.guild.get_channel(self.category_id)
if category:
options['permission_overwrites'] = [c._asdict() for c in category._overwrites]
else:
await self._move(position, parent_id=parent_id, lock_permissions=lock_permissions, reason=reason)
overwrites = options.get('overwrites', None)
if overwrites is not None:
perms = []
for target, perm in overwrites.items():
if not isinstance(perm, PermissionOverwrite):
raise InvalidArgument(f'Expected PermissionOverwrite received {perm.__class__.__name__}')
allow, deny = perm.pair()
payload = {
'allow': allow.value,
'deny': deny.value,
'id': target.id,
}
if isinstance(target, Role):
payload['type'] = _Overwrites.ROLE
else:
payload['type'] = _Overwrites.MEMBER
perms.append(payload)
options['permission_overwrites'] = perms
try:
ch_type = options['type']
except KeyError:
pass
else:
if not isinstance(ch_type, ChannelType):
raise InvalidArgument('type field must be of type ChannelType')
options['type'] = ch_type.value
if options:
return await self._state.http.edit_channel(self.id, reason=reason, **options)
def _fill_overwrites(self, data: GuildChannelPayload) -> None:
self._overwrites = []
everyone_index = 0
everyone_id = self.guild.id
for index, overridden in enumerate(data.get('permission_overwrites', [])):
overwrite = _Overwrites(overridden)
self._overwrites.append(overwrite)
if overwrite.type == _Overwrites.MEMBER:
continue
if overwrite.id == everyone_id:
# the @everyone role is not guaranteed to be the first one
# in the list of permission overwrites, however the permission
# resolution code kind of requires that it is the first one in
# the list since it is special. So we need the index so we can
# swap it to be the first one.
everyone_index = index
# do the swap
tmp = self._overwrites
if tmp:
tmp[everyone_index], tmp[0] = tmp[0], tmp[everyone_index]
@property
def changed_roles(self) -> List[Role]:
"""List[:class:`~discord.Role`]: Returns a list of roles that have been overridden from
their default values in the :attr:`~discord.Guild.roles` attribute."""
ret = []
g = self.guild
for overwrite in filter(lambda o: o.is_role(), self._overwrites):
role = g.get_role(overwrite.id)
if role is None:
continue
role = copy.copy(role)
role.permissions.handle_overwrite(overwrite.allow, overwrite.deny)
ret.append(role)
return ret
@property
def mention(self) -> str:
""":class:`str`: The string that allows you to mention the channel."""
return f'<#{self.id}>'
@property
def created_at(self) -> datetime:
""":class:`datetime.datetime`: Returns the channel's creation time in UTC."""
return utils.snowflake_time(self.id)
def overwrites_for(self, obj: Union[Role, User]) -> PermissionOverwrite:
"""Returns the channel-specific overwrites for a member or a role.
Parameters
-----------
obj: Union[:class:`~discord.Role`, :class:`~discord.abc.User`]
The role or user denoting
whose overwrite to get.
Returns
---------
:class:`~discord.PermissionOverwrite`
The permission overwrites for this object.
"""
if isinstance(obj, User):
predicate = lambda p: p.is_member()
elif isinstance(obj, Role):
predicate = lambda p: p.is_role()
else:
predicate = lambda p: True
for overwrite in filter(predicate, self._overwrites):
if overwrite.id == obj.id:
allow = Permissions(overwrite.allow)
deny = Permissions(overwrite.deny)
return PermissionOverwrite.from_pair(allow, deny)
return PermissionOverwrite()
@property
def overwrites(self) -> Dict[Union[Role, Member], PermissionOverwrite]:
"""Returns all of the channel's overwrites.
This is returned as a dictionary where the key contains the target which
can be either a :class:`~discord.Role` or a :class:`~discord.Member` and the value is the
overwrite as a :class:`~discord.PermissionOverwrite`.
Returns
--------
Dict[Union[:class:`~discord.Role`, :class:`~discord.Member`], :class:`~discord.PermissionOverwrite`]
The channel's permission overwrites.
"""
ret = {}
for ow in self._overwrites:
allow = Permissions(ow.allow)
deny = Permissions(ow.deny)
overwrite = PermissionOverwrite.from_pair(allow, deny)
target = None
if ow.is_role():
target = self.guild.get_role(ow.id)
elif ow.is_member():
target = self.guild.get_member(ow.id)
# TODO: There is potential data loss here in the non-chunked
# case, i.e. target is None because get_member returned nothing.
# This can be fixed with a slight breaking change to the return type,
# i.e. adding discord.Object to the list of it
# However, for now this is an acceptable compromise.
if target is not None:
ret[target] = overwrite
return ret
@property
def category(self) -> Optional[CategoryChannel]:
"""Optional[:class:`~discord.CategoryChannel`]: The category this channel belongs to.
If there is no category then this is ``None``.
"""
return self.guild.get_channel(self.category_id) # type: ignore
@property
def permissions_synced(self) -> bool:
""":class:`bool`: Whether or not the permissions for this channel are synced with the
category it belongs to.
If there is no category then this is ``False``.
.. versionadded:: 1.3
"""
if self.category_id is None:
return False
category = self.guild.get_channel(self.category_id)
return bool(category and category.overwrites == self.overwrites)
def permissions_for(self, obj: Union[Member, Role], /) -> Permissions:
"""Handles permission resolution for the :class:`~discord.Member`
or :class:`~discord.Role`.
This function takes into consideration the following cases:
- Guild owner
- Guild roles
- Channel overrides
- Member overrides
If a :class:`~discord.Role` is passed, then it checks the permissions
someone with that role would have, which is essentially:
- The default role permissions
- The permissions of the role used as a parameter
- The default role permission overwrites
- The permission overwrites of the role used as a parameter
.. versionchanged:: 2.0
The object passed in can now be a role object.
Parameters
----------
obj: Union[:class:`~discord.Member`, :class:`~discord.Role`]
The object to resolve permissions for. This could be either
a member or a role. If it's a role then member overwrites
are not computed.
Returns
-------
:class:`~discord.Permissions`
The resolved permissions for the member or role.
"""
# The current cases can be explained as:
# Guild owner get all permissions -- no questions asked. Otherwise...
# The @everyone role gets the first application.
# After that, the applied roles that the user has in the channel
# (or otherwise) are then OR'd together.
# After the role permissions are resolved, the member permissions
# have to take into effect.
# After all that is done.. you have to do the following:
# If manage permissions is True, then all permissions are set to True.
# The operation first takes into consideration the denied
# and then the allowed.
if self.guild.owner_id == obj.id:
return Permissions.all()
default = self.guild.default_role
base = Permissions(default.permissions.value)
# Handle the role case first
if isinstance(obj, Role):
base.value |= obj._permissions
if base.administrator:
return Permissions.all()
# Apply @everyone allow/deny first since it's special
try:
maybe_everyone = self._overwrites[0]
if maybe_everyone.id == self.guild.id:
base.handle_overwrite(allow=maybe_everyone.allow, deny=maybe_everyone.deny)
except IndexError:
pass
if obj.is_default():
return base
overwrite = utils.get(self._overwrites, type=_Overwrites.ROLE, id=obj.id)
if overwrite is not None:
base.handle_overwrite(overwrite.allow, overwrite.deny)
return base
roles = obj._roles
get_role = self.guild.get_role
# Apply guild roles that the member has.
for role_id in roles:
role = get_role(role_id)
if role is not None:
base.value |= role._permissions
# Guild-wide Administrator -> True for everything
# Bypass all channel-specific overrides
if base.administrator:
return Permissions.all()
# Apply @everyone allow/deny first since it's special
try:
maybe_everyone = self._overwrites[0]
if maybe_everyone.id == self.guild.id:
base.handle_overwrite(allow=maybe_everyone.allow, deny=maybe_everyone.deny)
remaining_overwrites = self._overwrites[1:]
else:
remaining_overwrites = self._overwrites
except IndexError:
remaining_overwrites = self._overwrites
denies = 0
allows = 0
# Apply channel specific role permission overwrites
for overwrite in remaining_overwrites:
if overwrite.is_role() and roles.has(overwrite.id):
denies |= overwrite.deny
allows |= overwrite.allow
base.handle_overwrite(allow=allows, deny=denies)
# Apply member specific permission overwrites
for overwrite in remaining_overwrites:
if overwrite.is_member() and overwrite.id == obj.id:
base.handle_overwrite(allow=overwrite.allow, deny=overwrite.deny)
break
# if you can't send a message in a channel then you can't have certain
# permissions as well
if not base.send_messages:
base.send_tts_messages = False
base.mention_everyone = False
base.embed_links = False
base.attach_files = False
# if you can't read a channel then you have no permissions there
if not base.read_messages:
denied = Permissions.all_channel()
base.value &= ~denied.value
return base
async def delete(self, *, reason: Optional[str] = None) -> None:
"""|coro|
Deletes the channel.
You must have :attr:`~discord.Permissions.manage_channels` permission to use this.
Parameters
-----------
reason: Optional[:class:`str`]
The reason for deleting this channel.
Shows up on the audit log.
Raises
-------
~discord.Forbidden
You do not have proper permissions to delete the channel.
~discord.NotFound
The channel was not found or was already deleted.
~discord.HTTPException
Deleting the channel failed.
"""
await self._state.http.delete_channel(self.id, reason=reason)
@overload
async def set_permissions(
self,
target: Union[Member, Role],
*,
overwrite: Optional[Union[PermissionOverwrite, _Undefined]] = ...,
reason: Optional[str] = ...,
) -> None:
...
@overload
async def set_permissions(
self,
target: Union[Member, Role],
*,
reason: Optional[str] = ...,
**permissions: bool,
) -> None:
...
async def set_permissions(self, target, *, overwrite=_undefined, reason=None, **permissions):
r"""|coro|
Sets the channel specific permission overwrites for a target in the
channel.
The ``target`` parameter should either be a :class:`~discord.Member` or a
:class:`~discord.Role` that belongs to guild.
The ``overwrite`` parameter, if given, must either be ``None`` or
:class:`~discord.PermissionOverwrite`. For convenience, you can pass in
keyword arguments denoting :class:`~discord.Permissions` attributes. If this is
done, then you cannot mix the keyword arguments with the ``overwrite``
parameter.
If the ``overwrite`` parameter is ``None``, then the permission
overwrites are deleted.
You must have the :attr:`~discord.Permissions.manage_roles` permission to use this.
.. note::
This method *replaces* the old overwrites with the ones given.
Examples
----------
Setting allow and deny: ::
await message.channel.set_permissions(message.author, read_messages=True,
send_messages=False)
Deleting overwrites ::
await channel.set_permissions(member, overwrite=None)
Using :class:`~discord.PermissionOverwrite` ::
overwrite = discord.PermissionOverwrite()
overwrite.send_messages = False
overwrite.read_messages = True
await channel.set_permissions(member, overwrite=overwrite)
Parameters
-----------
target: Union[:class:`~discord.Member`, :class:`~discord.Role`]
The member or role to overwrite permissions for.
overwrite: Optional[:class:`~discord.PermissionOverwrite`]
The permissions to allow and deny to the target, or ``None`` to
delete the overwrite.
\*\*permissions
A keyword argument list of permissions to set for ease of use.
Cannot be mixed with ``overwrite``.
reason: Optional[:class:`str`]
The reason for doing this action. Shows up on the audit log.
Raises
-------
~discord.Forbidden
You do not have permissions to edit channel specific permissions.
~discord.HTTPException
Editing channel specific permissions failed.
~discord.NotFound
The role or member being edited is not part of the guild.
~discord.InvalidArgument
The overwrite parameter invalid or the target type was not
:class:`~discord.Role` or :class:`~discord.Member`.
"""
http = self._state.http
if isinstance(target, User):
perm_type = _Overwrites.MEMBER
elif isinstance(target, Role):
perm_type = _Overwrites.ROLE
else:
raise InvalidArgument('target parameter must be either Member or Role')
if overwrite is _undefined:
if len(permissions) == 0:
raise InvalidArgument('No overwrite provided.')
try:
overwrite = PermissionOverwrite(**permissions)
except (ValueError, TypeError):
raise InvalidArgument('Invalid permissions given to keyword arguments.')
else:
if len(permissions) > 0:
raise InvalidArgument('Cannot mix overwrite and keyword arguments.')
# TODO: wait for event
if overwrite is None:
await http.delete_channel_permissions(self.id, target.id, reason=reason)
elif isinstance(overwrite, PermissionOverwrite):
(allow, deny) = overwrite.pair()
await http.edit_channel_permissions(self.id, target.id, allow.value, deny.value, perm_type, reason=reason)
else:
raise InvalidArgument('Invalid overwrite type provided.')
async def _clone_impl(
self: GCH,
base_attrs: Dict[str, Any],
*,
name: Optional[str] = None,
reason: Optional[str] = None,
) -> GCH:
base_attrs['permission_overwrites'] = [x._asdict() for x in self._overwrites]
base_attrs['parent_id'] = self.category_id
base_attrs['name'] = name or self.name
guild_id = self.guild.id
cls = self.__class__
data = await self._state.http.create_channel(guild_id, self.type.value, reason=reason, **base_attrs)
obj = cls(state=self._state, guild=self.guild, data=data)
# temporarily add it to the cache
self.guild._channels[obj.id] = obj # type: ignore
return obj
async def clone(self: GCH, *, name: Optional[str] = None, reason: Optional[str] = None) -> GCH:
"""|coro|
Clones this channel. This creates a channel with the same properties
as this channel.
You must have the :attr:`~discord.Permissions.manage_channels` permission to
do this.
.. versionadded:: 1.1
Parameters
------------
name: Optional[:class:`str`]
The name of the new channel. If not provided, defaults to this
channel name.
reason: Optional[:class:`str`]
The reason for cloning this channel. Shows up on the audit log.
Raises
-------
~discord.Forbidden
You do not have the proper permissions to create this channel.
~discord.HTTPException
Creating the channel failed.
Returns
--------
:class:`.abc.GuildChannel`
The channel that was created.
"""
raise NotImplementedError
@overload
async def move(
self,
*,
beginning: bool,
offset: int = MISSING,
category: Optional[Snowflake] = MISSING,
sync_permissions: bool = MISSING,
reason: Optional[str] = MISSING,
) -> None:
...
@overload
async def move(
self,
*,
end: bool,
offset: int = MISSING,
category: Optional[Snowflake] = MISSING,
sync_permissions: bool = MISSING,
reason: str = MISSING,
) -> None:
...
@overload
async def move(
self,
*,
before: Snowflake,
offset: int = MISSING,
category: Optional[Snowflake] = MISSING,
sync_permissions: bool = MISSING,
reason: str = MISSING,
) -> None:
...
@overload
async def move(
self,
*,
after: Snowflake,
offset: int = MISSING,
category: Optional[Snowflake] = MISSING,
sync_permissions: bool = MISSING,
reason: str = MISSING,
) -> None:
...
async def move(self, **kwargs) -> None:
"""|coro|
A rich interface to help move a channel relative to other channels.
If exact position movement is required, ``edit`` should be used instead.
You must have the :attr:`~discord.Permissions.manage_channels` permission to
do this.
.. note::
Voice channels will always be sorted below text channels.
This is a Discord limitation.
.. versionadded:: 1.7
Parameters
------------
beginning: :class:`bool`
Whether to move the channel to the beginning of the
channel list (or category if given).
This is mutually exclusive with ``end``, ``before``, and ``after``.
end: :class:`bool`
Whether to move the channel to the end of the
channel list (or category if given).
This is mutually exclusive with ``beginning``, ``before``, and ``after``.
before: :class:`~discord.abc.Snowflake`
The channel that should be before our current channel.
This is mutually exclusive with ``beginning``, ``end``, and ``after``.
after: :class:`~discord.abc.Snowflake`
The channel that should be after our current channel.
This is mutually exclusive with ``beginning``, ``end``, and ``before``.
offset: :class:`int`
The number of channels to offset the move by. For example,
an offset of ``2`` with ``beginning=True`` would move
it 2 after the beginning. A positive number moves it below
while a negative number moves it above. Note that this
number is relative and computed after the ``beginning``,
``end``, ``before``, and ``after`` parameters.
category: Optional[:class:`~discord.abc.Snowflake`]
The category to move this channel under.
If ``None`` is given then it moves it out of the category.
This parameter is ignored if moving a category channel.
sync_permissions: :class:`bool`
Whether to sync the permissions with the category (if given).
reason: :class:`str`
The reason for the move.
Raises
-------
InvalidArgument
An invalid position was given or a bad mix of arguments were passed.
Forbidden
You do not have permissions to move the channel.
HTTPException
Moving the channel failed.
"""
if not kwargs:
return
beginning, end = kwargs.get('beginning'), kwargs.get('end')
before, after = kwargs.get('before'), kwargs.get('after')
offset = kwargs.get('offset', 0)
if sum(bool(a) for a in (beginning, end, before, after)) > 1:
raise InvalidArgument('Only one of [before, after, end, beginning] can be used.')
bucket = self._sorting_bucket
parent_id = kwargs.get('category', MISSING)
# fmt: off
channels: List[GuildChannel]
if parent_id not in (MISSING, None):
parent_id = parent_id.id
channels = [
ch
for ch in self.guild.channels
if ch._sorting_bucket == bucket
and ch.category_id == parent_id
]
else:
channels = [
ch
for ch in self.guild.channels
if ch._sorting_bucket == bucket
and ch.category_id == self.category_id
]
# fmt: on
channels.sort(key=lambda c: (c.position, c.id))
try:
# Try to remove ourselves from the channel list
channels.remove(self)
except ValueError:
# If we're not there then it's probably due to not being in the category
pass
index = None
if beginning:
index = 0
elif end:
index = len(channels)
elif before:
index = next((i for i, c in enumerate(channels) if c.id == before.id), None)
elif after:
index = next((i + 1 for i, c in enumerate(channels) if c.id == after.id), None)
if index is None:
raise InvalidArgument('Could not resolve appropriate move position')
channels.insert(max((index + offset), 0), self)
payload = []
lock_permissions = kwargs.get('sync_permissions', False)
reason = kwargs.get('reason')
for index, channel in enumerate(channels):
d = {'id': channel.id, 'position': index}
if parent_id is not MISSING and channel.id == self.id:
d.update(parent_id=parent_id, lock_permissions=lock_permissions)
payload.append(d)
await self._state.http.bulk_channel_update(self.guild.id, payload, reason=reason)
async def create_invite(
self,
*,
reason: Optional[str] = None,
max_age: int = 0,
max_uses: int = 0,
temporary: bool = False,
unique: bool = True,
target_type: Optional[InviteTarget] = None,
target_user: Optional[User] = None,
target_application_id: Optional[int] = None,
) -> Invite:
"""|coro|
Creates an instant invite from a text or voice channel.
You must have the :attr:`~discord.Permissions.create_instant_invite` permission to
do this.
Parameters
------------
max_age: :class:`int`
How long the invite should last in seconds. If it's 0 then the invite
doesn't expire. Defaults to ``0``.
max_uses: :class:`int`
How many uses the invite could be used for. If it's 0 then there
are unlimited uses. Defaults to ``0``.
temporary: :class:`bool`
Denotes that the invite grants temporary membership
(i.e. they get kicked after they disconnect). Defaults to ``False``.
unique: :class:`bool`
Indicates if a unique invite URL should be created. Defaults to True.
If this is set to ``False`` then it will return a previously created
invite.
reason: Optional[:class:`str`]
The reason for creating this invite. Shows up on the audit log.
target_type: Optional[:class:`.InviteTarget`]
The type of target for the voice channel invite, if any.
.. versionadded:: 2.0
target_user: Optional[:class:`User`]
The user whose stream to display for this invite, required if `target_type` is `TargetType.stream`. The user must be streaming in the channel.
.. versionadded:: 2.0
target_application_id:: Optional[:class:`int`]
The id of the embedded application for the invite, required if `target_type` is `TargetType.embedded_application`.
.. versionadded:: 2.0
Raises
-------
~discord.HTTPException
Invite creation failed.
~discord.NotFound
The channel that was passed is a category or an invalid channel.
Returns
--------
:class:`~discord.Invite`
The invite that was created.
"""
data = await self._state.http.create_invite(
self.id,
reason=reason,
max_age=max_age,
max_uses=max_uses,
temporary=temporary,
unique=unique,
target_type=target_type.value if target_type else None,
target_user_id=target_user.id if target_user else None,
target_application_id=target_application_id,
)
return Invite.from_incomplete(data=data, state=self._state)
async def invites(self) -> List[Invite]:
"""|coro|
Returns a list of all active instant invites from this channel.
You must have :attr:`~discord.Permissions.manage_channels` to get this information.
Raises
-------
~discord.Forbidden
You do not have proper permissions to get the information.
~discord.HTTPException
An error occurred while fetching the information.
Returns
-------
List[:class:`~discord.Invite`]
The list of invites that are currently active.
"""
state = self._state
data = await state.http.invites_from_channel(self.id)
guild = self.guild
return [Invite(state=state, data=invite, channel=self, guild=guild) for invite in data]
class Messageable:
"""An ABC that details the common operations on a model that can send messages.
The following implement this ABC:
- :class:`~discord.TextChannel`
- :class:`~discord.DMChannel`
- :class:`~discord.GroupChannel`
- :class:`~discord.User`
- :class:`~discord.Member`
- :class:`~discord.ext.commands.Context`
- :class:`~discord.Thread`
"""
__slots__ = ()
_state: ConnectionState
async def _get_channel(self) -> MessageableChannel:
raise NotImplementedError
@overload
async def send(
self,
content: Optional[str] = ...,
*,
tts: bool = ...,
embed: Embed = ...,
file: File = ...,
stickers: Sequence[Union[GuildSticker, StickerItem]] = ...,
delete_after: float = ...,
nonce: Union[str, int] = ...,
allowed_mentions: AllowedMentions = ...,
reference: Union[Message, MessageReference, PartialMessage] = ...,
mention_author: bool = ...,
components: List[Component] = ...,
) -> Message:
...
@overload
async def send(
self,
content: Optional[str] = ...,
*,
tts: bool = ...,
embed: Embed = ...,
files: List[File] = ...,
stickers: Sequence[Union[GuildSticker, StickerItem]] = ...,
delete_after: float = ...,
nonce: Union[str, int] = ...,
allowed_mentions: AllowedMentions = ...,
reference: Union[Message, MessageReference, PartialMessage] = ...,
mention_author: bool = ...,
components: List[Component] = ...,
) -> Message:
...
@overload
async def send(
self,
content: Optional[str] = ...,
*,
tts: bool = ...,
embeds: List[Embed] = ...,
file: File = ...,
stickers: Sequence[Union[GuildSticker, StickerItem]] = ...,
delete_after: float = ...,
nonce: Union[str, int] = ...,
allowed_mentions: AllowedMentions = ...,
reference: Union[Message, MessageReference, PartialMessage] = ...,
mention_author: bool = ...,
components: List[Component] = ...,
) -> Message:
...
@overload
async def send(
self,
content: Optional[str] = ...,
*,
tts: bool = ...,
embeds: List[Embed] = ...,
files: List[File] = ...,
stickers: Sequence[Union[GuildSticker, StickerItem]] = ...,
delete_after: float = ...,
nonce: Union[str, int] = ...,
allowed_mentions: AllowedMentions = ...,
reference: Union[Message, MessageReference, PartialMessage] = ...,
mention_author: bool = ...,
components: List[Component] = ...,
) -> Message:
...
async def send(
self,
content=None,
*,
tts=None,
embed=None,
embeds=None,
file=None,
files=None,
stickers=None,
delete_after=None,
nonce=None,
allowed_mentions=None,
reference=None,
mention_author=None,
components=None
):
"""|coro|
Sends a message to the destination with the content given.
The content must be a type that can convert to a string through ``str(content)``.
If the content is set to ``None`` (the default), then the ``embed`` parameter must
be provided.
To upload a single file, the ``file`` parameter should be used with a
single :class:`~discord.File` object. To upload multiple files, the ``files``
parameter should be used with a :class:`list` of :class:`~discord.File` objects.
**Specifying both parameters will lead to an exception**.
To upload a single embed, the ``embed`` parameter should be used with a
single :class:`~discord.Embed` object. To upload multiple embeds, the ``embeds``
parameter should be used with a :class:`list` of :class:`~discord.Embed` objects.
**Specifying both parameters will lead to an exception**.
Parameters
------------
content: Optional[:class:`str`]
The content of the message to send.
tts: :class:`bool`
Indicates if the message should be sent using text-to-speech.
embed: :class:`~discord.Embed`
The rich embed for the content.
file: :class:`~discord.File`
The file to upload.
files: List[:class:`~discord.File`]
A list of files to upload. Must be a maximum of 10.
nonce: :class:`int`
The nonce to use for sending this message. If the message was successfully sent,
then the message will have a nonce with this value.
delete_after: :class:`float`
If provided, the number of seconds to wait in the background
before deleting the message we just sent. If the deletion fails,
then it is silently ignored.
allowed_mentions: :class:`~discord.AllowedMentions`
Controls the mentions being processed in this message. If this is
passed, then the object is merged with :attr:`~discord.Client.allowed_mentions`.
The merging behaviour only overrides attributes that have been explicitly passed
to the object, otherwise it uses the attributes set in :attr:`~discord.Client.allowed_mentions`.
If no object is passed at all then the defaults given by :attr:`~discord.Client.allowed_mentions`
are used instead.
.. versionadded:: 1.4
reference: Union[:class:`~discord.Message`, :class:`~discord.MessageReference`, :class:`~discord.PartialMessage`]
A reference to the :class:`~discord.Message` to which you are replying, this can be created using
:meth:`~discord.Message.to_reference` or passed directly as a :class:`~discord.Message`. You can control
whether this mentions the author of the referenced message using the :attr:`~discord.AllowedMentions.replied_user`
attribute of ``allowed_mentions`` or by setting ``mention_author``.
.. versionadded:: 1.6
mention_author: Optional[:class:`bool`]
If set, overrides the :attr:`~discord.AllowedMentions.replied_user` attribute of ``allowed_mentions``.
.. versionadded:: 1.6
view: :class:`discord.ui.View`
A Discord UI View to add to the message.
embeds: List[:class:`~discord.Embed`]
A list of embeds to upload. Must be a maximum of 10.
.. versionadded:: 2.0
stickers: Sequence[Union[:class:`~discord.GuildSticker`, :class:`~discord.StickerItem`]]
A list of stickers to upload. Must be a maximum of 3.
.. versionadded:: 2.0
Raises
--------
~discord.HTTPException
Sending the message failed.
~discord.Forbidden
You do not have the proper permissions to send the message.
~discord.InvalidArgument
The ``files`` list is not of the appropriate size,
you specified both ``file`` and ``files``,
or you specified both ``embed`` and ``embeds``,
or the ``reference`` object is not a :class:`~discord.Message`,
:class:`~discord.MessageReference` or :class:`~discord.PartialMessage`.
Returns
---------
:class:`~discord.Message`
The message that was sent.
"""
channel = await self._get_channel()
state = self._state
content = str(content) if content is not None else None
if embed is not None and embeds is not None:
raise InvalidArgument('cannot pass both embed and embeds parameter to send()')
if embed is not None:
embed = embed.to_dict()
elif embeds is not None:
if len(embeds) > 10:
raise InvalidArgument('embeds parameter must be a list of up to 10 elements')
embeds = [embed.to_dict() for embed in embeds]
if stickers is not None:
stickers = [sticker.id for sticker in stickers]
if allowed_mentions is not None:
if state.allowed_mentions is not None:
allowed_mentions = state.allowed_mentions.merge(allowed_mentions).to_dict()
else:
allowed_mentions = allowed_mentions.to_dict()
else:
allowed_mentions = state.allowed_mentions and state.allowed_mentions.to_dict()
if mention_author is not None:
allowed_mentions = allowed_mentions or AllowedMentions().to_dict()
allowed_mentions['replied_user'] = bool(mention_author)
if reference is not None:
try:
reference = reference.to_message_reference_dict()
except AttributeError:
raise InvalidArgument('reference parameter must be Message, MessageReference, or PartialMessage') from None
if components is not None:
components = ComponentStore(components).to_dict()
if file is not None and files is not None:
raise InvalidArgument('cannot pass both file and files parameter to send()')
if file is not None:
if not isinstance(file, File):
raise InvalidArgument('file parameter must be File')
try:
data = await state.http.send_files(
channel.id,
files=[file],
allowed_mentions=allowed_mentions,
content=content,
tts=tts,
embed=embed,
embeds=embeds,
nonce=nonce,
message_reference=reference,
stickers=stickers,
components=components,
)
finally:
file.close()
elif files is not None:
if len(files) > 10:
raise InvalidArgument('files parameter must be a list of up to 10 elements')
elif not all(isinstance(file, File) for file in files):
raise InvalidArgument('files parameter must be a list of File')
try:
data = await state.http.send_files(
channel.id,
files=files,
content=content,
tts=tts,
embed=embed,
embeds=embeds,
nonce=nonce,
allowed_mentions=allowed_mentions,
message_reference=reference,
stickers=stickers,
components=components,
)
finally:
for f in files:
f.close()
else:
data = await state.http.send_message(
channel.id,
content,
tts=tts,
embed=embed,
embeds=embeds,
nonce=nonce,
allowed_mentions=allowed_mentions,
message_reference=reference,
stickers=stickers,
components=components,
)
ret = state.create_message(channel=channel, data=data)
# if view:
# state.store_view(view, ret.id)
if delete_after is not None:
await ret.delete(delay=delete_after)
return ret
async def trigger_typing(self) -> None:
"""|coro|
Triggers a *typing* indicator to the destination.
*Typing* indicator will go away after 10 seconds, or after a message is sent.
"""
channel = await self._get_channel()
await self._state.http.send_typing(channel.id)
def typing(self) -> Typing:
"""Returns a context manager that allows you to type for an indefinite period of time.
This is useful for denoting long computations in your bot.
.. note::
This is both a regular context manager and an async context manager.
This means that both ``with`` and ``async with`` work with this.
Example Usage: ::
async with channel.typing():
# simulate something heavy
await asyncio.sleep(10)
await channel.send('done!')
"""
return Typing(self)
async def fetch_message(self, id: int, /) -> Message:
"""|coro|
Retrieves a single :class:`~discord.Message` from the destination.
Parameters
------------
id: :class:`int`
The message ID to look for.
Raises
--------
~discord.NotFound
The specified message was not found.
~discord.Forbidden
You do not have the permissions required to get a message.
~discord.HTTPException
Retrieving the message failed.
Returns
--------
:class:`~discord.Message`
The message asked for.
"""
channel = await self._get_channel()
data = await self._state.http.get_message(channel.id, id)
return self._state.create_message(channel=channel, data=data)
async def pins(self) -> List[Message]:
"""|coro|
Retrieves all messages that are currently pinned in the channel.
.. note::
Due to a limitation with the Discord API, the :class:`.Message`
objects returned by this method do not contain complete
:attr:`.Message.reactions` data.
Raises
-------
~discord.HTTPException
Retrieving the pinned messages failed.
Returns
--------
List[:class:`~discord.Message`]
The messages that are currently pinned.
"""
channel = await self._get_channel()
state = self._state
data = await state.http.pins_from(channel.id)
return [state.create_message(channel=channel, data=m) for m in data]
def history(
self,
*,
limit: Optional[int] = 100,
before: Optional[SnowflakeTime] = None,
after: Optional[SnowflakeTime] = None,
around: Optional[SnowflakeTime] = None,
oldest_first: Optional[bool] = None,
) -> HistoryIterator:
"""Returns an :class:`~discord.AsyncIterator` that enables receiving the destination's message history.
You must have :attr:`~discord.Permissions.read_message_history` permissions to use this.
Examples
---------
Usage ::
counter = 0
async for message in channel.history(limit=200):
if message.author == client.user:
counter += 1
Flattening into a list: ::
messages = await channel.history(limit=123).flatten()
# messages is now a list of Message...
All parameters are optional.
Parameters
-----------
limit: Optional[:class:`int`]
The number of messages to retrieve.
If ``None``, retrieves every message in the channel. Note, however,
that this would make it a slow operation.
before: Optional[Union[:class:`~discord.abc.Snowflake`, :class:`datetime.datetime`]]
Retrieve messages before this date or message.
If a datetime is provided, it is recommended to use a UTC aware datetime.
If the datetime is naive, it is assumed to be local time.
after: Optional[Union[:class:`~discord.abc.Snowflake`, :class:`datetime.datetime`]]
Retrieve messages after this date or message.
If a datetime is provided, it is recommended to use a UTC aware datetime.
If the datetime is naive, it is assumed to be local time.
around: Optional[Union[:class:`~discord.abc.Snowflake`, :class:`datetime.datetime`]]
Retrieve messages around this date or message.
If a datetime is provided, it is recommended to use a UTC aware datetime.
If the datetime is naive, it is assumed to be local time.
When using this argument, the maximum limit is 101. Note that if the limit is an
even number then this will return at most limit + 1 messages.
oldest_first: Optional[:class:`bool`]
If set to ``True``, return messages in oldest->newest order. Defaults to ``True`` if
``after`` is specified, otherwise ``False``.
Raises
------
~discord.Forbidden
You do not have permissions to get channel message history.
~discord.HTTPException
The request to get message history failed.
Yields
-------
:class:`~discord.Message`
The message with the message data parsed.
"""
return HistoryIterator(self, limit=limit, before=before, after=after, around=around, oldest_first=oldest_first)
class Connectable(Protocol):
"""An ABC that details the common operations on a channel that can
connect to a voice server.
The following implement this ABC:
- :class:`~discord.VoiceChannel`
- :class:`~discord.StageChannel`
Note
----
This ABC is not decorated with :func:`typing.runtime_checkable`, so will fail :func:`isinstance`/:func:`issubclass`
checks.
"""
__slots__ = ()
_state: ConnectionState
def _get_voice_client_key(self) -> Tuple[int, str]:
raise NotImplementedError
def _get_voice_state_pair(self) -> Tuple[int, int]:
raise NotImplementedError
async def connect(
self,
*,
timeout: float = 60.0,
reconnect: bool = True,
cls: Callable[[Client, Connectable], T] = VoiceClient,
) -> T:
"""|coro|
Connects to voice and creates a :class:`VoiceClient` to establish
your connection to the voice server.
This requires :attr:`Intents.voice_states`.
Parameters
-----------
timeout: :class:`float`
The timeout in seconds to wait for the voice endpoint.
reconnect: :class:`bool`
Whether the bot should automatically attempt
a reconnect if a part of the handshake fails
or the gateway goes down.
cls: Type[:class:`VoiceProtocol`]
A type that subclasses :class:`~discord.VoiceProtocol` to connect with.
Defaults to :class:`~discord.VoiceClient`.
Raises
-------
asyncio.TimeoutError
Could not connect to the voice channel in time.
~discord.ClientException
You are already connected to a voice channel.
~discord.opus.OpusNotLoaded
The opus library has not been loaded.
Returns
--------
:class:`~discord.VoiceProtocol`
A voice client that is fully connected to the voice server.
"""
key_id, _ = self._get_voice_client_key()
state = self._state
if state._get_voice_client(key_id):
raise ClientException('Already connected to a voice channel.')
client = state._get_client()
voice = cls(client, self)
if not isinstance(voice, VoiceProtocol):
raise TypeError('Type must meet VoiceProtocol abstract base class.')
state._add_voice_client(key_id, voice)
try:
await voice.connect(timeout=timeout, reconnect=reconnect)
except asyncio.TimeoutError:
try:
await voice.disconnect(force=True)
except Exception:
# we don't care if disconnect failed because connection failed
pass
raise # re-raise
return voice
| 34.298339
| 154
| 0.594791
|
4a0b5fa72af71599b106b26877b44f7317e9ae20
| 3,163
|
py
|
Python
|
tests/data_context/test_data_context_store_configs.py
|
joshuataylor/great_expectations
|
19dcead43aef9a833b3aa894a1226714a80ab840
|
[
"Apache-2.0"
] | 1
|
2021-01-20T18:29:52.000Z
|
2021-01-20T18:29:52.000Z
|
tests/data_context/test_data_context_store_configs.py
|
joshuataylor/great_expectations
|
19dcead43aef9a833b3aa894a1226714a80ab840
|
[
"Apache-2.0"
] | 47
|
2020-07-15T06:32:50.000Z
|
2022-03-29T12:03:23.000Z
|
tests/data_context/test_data_context_store_configs.py
|
joshuataylor/great_expectations
|
19dcead43aef9a833b3aa894a1226714a80ab840
|
[
"Apache-2.0"
] | null | null | null |
import os
import pytest
from ruamel.yaml import YAML
import great_expectations as ge
yaml = YAML()
yaml.default_flow_style = False
@pytest.fixture(scope="function")
def totally_empty_data_context(tmp_path_factory):
# NOTE: This sets up a DataContext with a real path and a config saved to that path.
# Now that BaseDataContext exists, it's possible to test most DataContext methods without touching the file system.
# However, as of 2019/08/22, most tests still use filesystem-based fixtures.
# TODO: Where appropriate, switch DataContext tests to the new method.
project_root_dir = str(tmp_path_factory.mktemp("totally_empty_data_context"))
os.mkdir(os.path.join(project_root_dir, "great_expectations"))
config = {
"config_version": 2,
"plugins_directory": "plugins/",
"evaluation_parameter_store_name": "not_a_real_store_name",
"validations_store_name": "another_fake_store",
"expectations_store_name": "expectations_store",
"datasources": {},
"stores": {
"expectations_store": {
"class_name": "ExpectationsStore",
"store_backend": {
"class_name": "TupleFilesystemStoreBackend",
"base_directory": "expectations/",
},
},
},
"data_docs_sites": {},
"validation_operators": {},
}
with open(
os.path.join(project_root_dir, "great_expectations/great_expectations.yml"), "w"
) as config_file:
yaml.dump(config, config_file)
context = ge.data_context.DataContext(
os.path.join(project_root_dir, "great_expectations")
)
# print(json.dumps(context._project_config, indent=2))
return context
def test_create(tmp_path_factory):
project_path = str(tmp_path_factory.mktemp("path_001"))
context = ge.data_context.DataContext.create(project_path)
assert isinstance(context, ge.data_context.DataContext)
def test_add_store(totally_empty_data_context):
assert len(totally_empty_data_context.stores.keys()) == 1
totally_empty_data_context.add_store(
"my_new_store",
{
"module_name": "great_expectations.data_context.store",
"class_name": "ValidationsStore",
},
)
assert "my_new_store" in totally_empty_data_context.stores.keys()
assert len(totally_empty_data_context.stores.keys()) == 2
def test_default_config_yml_stores(tmp_path_factory):
project_path = str(tmp_path_factory.mktemp("totally_empty_data_context"))
context = ge.data_context.DataContext.create(project_path)
assert set(context.stores.keys()) == {
"expectations_store",
"validations_store",
"evaluation_parameter_store",
}
context.add_store(
"my_new_validations_store",
{
"module_name": "great_expectations.data_context.store",
"class_name": "ValidationsStore",
},
)
assert set(context.stores.keys()) == {
"expectations_store",
"validations_store",
"evaluation_parameter_store",
"my_new_validations_store",
}
| 32.608247
| 119
| 0.67025
|
4a0b5fe4a7a5f2332568257d12055aa709e38a5b
| 1,138
|
py
|
Python
|
client.py
|
Scauting-Burgum/ScautNet-python
|
665d9f6a9d981aeec8cb17290653121e76a3bf96
|
[
"MIT"
] | 1
|
2018-06-11T16:51:48.000Z
|
2018-06-11T16:51:48.000Z
|
client.py
|
Scauting-Burgum/ScautNet-python
|
665d9f6a9d981aeec8cb17290653121e76a3bf96
|
[
"MIT"
] | null | null | null |
client.py
|
Scauting-Burgum/ScautNet-python
|
665d9f6a9d981aeec8cb17290653121e76a3bf96
|
[
"MIT"
] | null | null | null |
from . import connection as connection_module
from . import pipeline as pipeline_module
from threading import Thread
from socket import socket
class Client(Thread):
def __init__(self, hostname, port):
super().__init__()
self.hostname = hostname
self.port = port
self._alive = True
def get_pipeline(self):
connection = connection_module.Connection(self.socket)
pipeline = pipeline_module.Pipeline(connection)
return pipeline
def start(self):
self.socket = socket()
self.socket.settimeout(1)
self.socket.connect((self.hostname, self.port))
self.pipeline = self.get_pipeline()
super().start()
def run(self):
try:
while self.alive and self.pipeline.alive:
pass
finally:
self.pipeline.kill()
def push(self, data):
self.pipeline.push(data)
def pull(self, timeout = 1):
return self.pipeline.pull(timeout = timeout)
@property
def alive(self):
return self.is_alive() and self._alive
def kill(self):
self._alive = False
| 25.863636
| 62
| 0.623902
|
4a0b602f115a2e4234cff8f72c6fd59498c0c1e0
| 1,038
|
py
|
Python
|
test/test_add_contact.py
|
vyacheslavmarkov/python_training
|
ccf3e565e2eda3f04b29931b6fad3c70069959cc
|
[
"Apache-2.0"
] | null | null | null |
test/test_add_contact.py
|
vyacheslavmarkov/python_training
|
ccf3e565e2eda3f04b29931b6fad3c70069959cc
|
[
"Apache-2.0"
] | null | null | null |
test/test_add_contact.py
|
vyacheslavmarkov/python_training
|
ccf3e565e2eda3f04b29931b6fad3c70069959cc
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from model.contact import Contact
def test_add_contact(app, db, json_contacts, check_ui):
contact = json_contacts
# concatenate emails and phones for the future comparison with db data
contact.all_emails_from_homepage = contact.email + contact.email_2 + contact.email_3
contact.all_phones_from_homepage = contact.homephone + contact.mobilephone + contact.workphone + contact.secondaryphone
old_contacts = db.get_contact_list()
app.contact.create(contact)
new_contacts = db.get_contact_list()
assert len(old_contacts) + 1 == len(new_contacts)
old_contacts.append(contact)
assert sorted(old_contacts, key=Contact.id_or_max) == sorted(new_contacts, key=Contact.id_or_max)
if check_ui:
# tune db contact data to be appropriate for the homepage representation
old_contacts = app.contact.make_contacts_like_on_homepage(old_contacts)
assert sorted(old_contacts, key=Contact.id_or_max) == sorted(app.contact.get_contacts_list(), key=Contact.id_or_max)
| 51.9
| 124
| 0.760116
|
4a0b60878aa6ccf9746e26f6052357e9987ab4bb
| 2,490
|
py
|
Python
|
pytorch/cifer10_half.py
|
0h-n0/py-dnn-tuto
|
ad21714652e6e3591b914f0a8e23a7be8cd5d2e4
|
[
"MIT"
] | null | null | null |
pytorch/cifer10_half.py
|
0h-n0/py-dnn-tuto
|
ad21714652e6e3591b914f0a8e23a7be8cd5d2e4
|
[
"MIT"
] | null | null | null |
pytorch/cifer10_half.py
|
0h-n0/py-dnn-tuto
|
ad21714652e6e3591b914f0a8e23a7be8cd5d2e4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
import numpy
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torchvision import datasets, transforms
def get_data():
DATAPATH = '../datasets/cifar-10-batches-py'
file_list = ['data_batch_1',
'data_batch_2',
'data_batch_3',
'data_batch_4',
'data_batch_5',
'test_batch',]
def unpickle(file):
import pickle
with open(file, 'rb') as f:
dictonary = pickle.load(f, encoding='bytes')
## keys (batch_lable, labels, data, filename)
return dictonary
all_data = [unpickle(DATAPATH + '/' + i) for i in file_list]
return all_data
class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
self.fc1 = nn.Linear(32*32, 1000)
self.fc2 = nn.Linear(1000, 500)
self.fc3 = nn.Linear(500, 10)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
return F.softmax(x)
batch_size = 32
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=batch_size, shuffle=True, **{})
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=batch_size, shuffle=True, **{})
m = CNN()
m.cuda()
optimizer = optim.SGD(m.parameters(), lr=0.1, momentum=0.9)
def train(epoch):
m.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
print(data)
optimizer.zero_grad()
output = m(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % 10 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.data[0]))
train(1)
| 30
| 75
| 0.554618
|
4a0b60f9237bb21310fcbe4e50c41f5ec824ceb0
| 718
|
py
|
Python
|
com/algorithm/kNN.py
|
liuyangyang2015/PythonDemo
|
a72c009a31ff833dd12405bb97e688ae91ceda6c
|
[
"MIT"
] | null | null | null |
com/algorithm/kNN.py
|
liuyangyang2015/PythonDemo
|
a72c009a31ff833dd12405bb97e688ae91ceda6c
|
[
"MIT"
] | null | null | null |
com/algorithm/kNN.py
|
liuyangyang2015/PythonDemo
|
a72c009a31ff833dd12405bb97e688ae91ceda6c
|
[
"MIT"
] | null | null | null |
import numpy as np
import operator
from imp import reload
# k近邻算法实现
# data=[[1,1.1],[1,1],[0,0],[0,0.1]]
# dataSet=np.array(data)
# labels=['A','A','B','B']
def classify0(inX,dataSet,labels,k):
dataSetSize = dataSet.shape[0]
diffMat = np.tile(inX,(dataSetSize,1)) - dataSet
sqDiffMat = diffMat**2
sqDistances=sqDiffMat.sum(axis=1)
distance = sqDistances**0.5
sortedDistIndicies = np.argsort(distance)
classCount={}
for i in range(k):
voteIlabel = labels[sortedDistIndicies[i]]
classCount[voteIlabel]=classCount.get(voteIlabel,0)+1
sortedClassCount=sorted(classCount.items(),key=operator.itemgetter(1),reverse =True)
return sortedClassCount[0][0]
# reload(np)
| 28.72
| 88
| 0.682451
|
4a0b61921eb124568e6fb9f1d552292e245c7044
| 244
|
py
|
Python
|
src/test/tls.py
|
jalapenopuzzle/rr
|
6f8b3c73868d9fd3e6ac14a1322b92dbe9958807
|
[
"BSD-1-Clause"
] | 5,156
|
2015-01-01T06:10:28.000Z
|
2020-11-13T15:12:34.000Z
|
src/test/tls.py
|
jalapenopuzzle/rr
|
6f8b3c73868d9fd3e6ac14a1322b92dbe9958807
|
[
"BSD-1-Clause"
] | 1,214
|
2015-01-02T02:32:13.000Z
|
2020-11-09T04:36:26.000Z
|
src/test/tls.py
|
jalapenopuzzle/rr
|
6f8b3c73868d9fd3e6ac14a1322b92dbe9958807
|
[
"BSD-1-Clause"
] | 402
|
2015-01-13T22:54:32.000Z
|
2020-11-05T15:02:25.000Z
|
from util import *
send_gdb('break breakpoint_fn')
expect_gdb('Breakpoint 1')
send_gdb('c')
expect_gdb('Breakpoint 1')
send_gdb('print tlsvar')
expect_gdb(' = 97')
send_gdb('reverse-stepi')
send_gdb('print tlsvar')
expect_gdb(' = 97')
ok()
| 15.25
| 31
| 0.717213
|
4a0b6246888a7357a020eb9166da58f542d13042
| 2,483
|
py
|
Python
|
saleor/order/migrations/0102_auto_20210310_1552.py
|
fairhopeweb/saleor
|
9ac6c22652d46ba65a5b894da5f1ba5bec48c019
|
[
"CC-BY-4.0"
] | 15,337
|
2015-01-12T02:11:52.000Z
|
2021-10-05T19:19:29.000Z
|
saleor/order/migrations/0102_auto_20210310_1552.py
|
fairhopeweb/saleor
|
9ac6c22652d46ba65a5b894da5f1ba5bec48c019
|
[
"CC-BY-4.0"
] | 7,486
|
2015-02-11T10:52:13.000Z
|
2021-10-06T09:37:15.000Z
|
saleor/order/migrations/0102_auto_20210310_1552.py
|
aminziadna/saleor
|
2e78fb5bcf8b83a6278af02551a104cfa555a1fb
|
[
"CC-BY-4.0"
] | 5,864
|
2015-01-16T14:52:54.000Z
|
2021-10-05T23:01:15.000Z
|
# Generated by Django 3.1.7 on 2021-03-10 15:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("order", "0101_auto_20210308_1213"),
]
operations = [
migrations.AlterField(
model_name="order",
name="language_code",
field=models.CharField(
choices=[
("ar", "Arabic"),
("az", "Azerbaijani"),
("bg", "Bulgarian"),
("bn", "Bengali"),
("ca", "Catalan"),
("cs", "Czech"),
("da", "Danish"),
("de", "German"),
("el", "Greek"),
("en", "English"),
("es", "Spanish"),
("es-co", "Colombian Spanish"),
("et", "Estonian"),
("fa", "Persian"),
("fi", "Finnish"),
("fr", "French"),
("hi", "Hindi"),
("hu", "Hungarian"),
("hy", "Armenian"),
("id", "Indonesian"),
("is", "Icelandic"),
("it", "Italian"),
("ja", "Japanese"),
("ka", "Georgian"),
("km", "Khmer"),
("ko", "Korean"),
("lt", "Lithuanian"),
("mn", "Mongolian"),
("my", "Burmese"),
("nb", "Norwegian"),
("nl", "Dutch"),
("pl", "Polish"),
("pt", "Portuguese"),
("pt-br", "Brazilian Portuguese"),
("ro", "Romanian"),
("ru", "Russian"),
("sk", "Slovak"),
("sl", "Slovenian"),
("sq", "Albanian"),
("sr", "Serbian"),
("sv", "Swedish"),
("sw", "Swahili"),
("ta", "Tamil"),
("th", "Thai"),
("tr", "Turkish"),
("uk", "Ukrainian"),
("vi", "Vietnamese"),
("zh-hans", "Simplified Chinese"),
("zh-hant", "Traditional Chinese"),
],
default="en",
max_length=35,
),
),
]
| 34.013699
| 55
| 0.306081
|
4a0b627640802f52d9ee6bfc605553802d8f36e0
| 585
|
py
|
Python
|
backend/prog-hist/backend/src/code/test/playground/chapter02_13.py
|
halilagin/d3studies
|
fbcf50a845cd17bcb469a428afce9854b1b63971
|
[
"CC0-1.0"
] | null | null | null |
backend/prog-hist/backend/src/code/test/playground/chapter02_13.py
|
halilagin/d3studies
|
fbcf50a845cd17bcb469a428afce9854b1b63971
|
[
"CC0-1.0"
] | null | null | null |
backend/prog-hist/backend/src/code/test/playground/chapter02_13.py
|
halilagin/d3studies
|
fbcf50a845cd17bcb469a428afce9854b1b63971
|
[
"CC0-1.0"
] | null | null | null |
import code.book_plots as bp
import code.gh_internal as gh
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import numpy as np;
from filterpy.discrete_bayes import normalize
def perfect_predict(belief, move):
n = len(belief)
result = np.zeros(n)
for i in range(n):
result[i] = belief[(i-move)%n]
return result
plt.figure(1)
belief = np.array([.35, .1, .2, .3, 0, 0, 0, 0, 0, .05])
plt.subplot(121)
bp.bar_plot(belief, ylim=(0,.4))
newBelief = perfect_predict(belief, 1)
plt.subplot(122)
bp.bar_plot(newBelief, ylim=(0,.4))
plt.show()
| 20.172414
| 56
| 0.683761
|
4a0b62e49dfaf1404a8bae771a001d5966a1c5f7
| 1,558
|
py
|
Python
|
astroscrappy/tests/test_gmos.py
|
saimn/astroscrappy
|
631ae4dc8eb0d9f4775bba7680baf817007d30a8
|
[
"BSD-3-Clause"
] | 48
|
2015-08-10T03:07:48.000Z
|
2022-03-26T11:11:02.000Z
|
astroscrappy/tests/test_gmos.py
|
saimn/astroscrappy
|
631ae4dc8eb0d9f4775bba7680baf817007d30a8
|
[
"BSD-3-Clause"
] | 72
|
2015-06-23T21:36:13.000Z
|
2021-12-13T04:25:01.000Z
|
astroscrappy/tests/test_gmos.py
|
saimn/astroscrappy
|
631ae4dc8eb0d9f4775bba7680baf817007d30a8
|
[
"BSD-3-Clause"
] | 36
|
2015-06-26T14:26:56.000Z
|
2022-03-30T17:04:11.000Z
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import numpy as np
import scipy.ndimage as ndi
from astropy.io import fits
from ..astroscrappy import detect_cosmics
TESTFILE = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'data', 'gmos.fits')
def test_gmos():
"""This test uses a small cutout from a standard observation with GMOS
(S20190808S0048). The extracted region is [362:, 480:680], and the file has
been reduced with DRAGONS.
"""
with fits.open(TESTFILE) as hdul:
data = hdul['SCI'].data
var = hdul['VAR'].data
sky = hdul['SKYFIT'].data
m1, _ = detect_cosmics(data, readnoise=4.24, gain=1.933)
m2, _ = detect_cosmics(data, inbkg=sky, readnoise=4.24, gain=1.933)
m3, _ = detect_cosmics(data, inbkg=sky, invar=var, readnoise=4.24, gain=1.933)
cosmic1 = (slice(41, 72), slice(142, 161))
cosmic2 = (slice(117, 147), slice(35, 43))
# We must find 2 cosmic rays, but m1 (without bkg and var) also flags
# 2 additional pixels that are identified as independent regions
label, nb = ndi.label(m1)
assert nb == 4
objects = ndi.find_objects(label)
assert cosmic1 in objects
assert cosmic2 in objects
areas = sorted([np.sum(label == (i+1)) for i in range(nb)])
assert areas == [1, 1, 74, 93]
for mask in m2, m3:
label, nb = ndi.label(mask)
assert nb == 2
objects = ndi.find_objects(label)
assert objects[0] == cosmic1
assert objects[1] == cosmic2
| 32.458333
| 82
| 0.643132
|
4a0b64aaa8b1d042be5aa44631b07f8644b9c616
| 6,808
|
py
|
Python
|
Bot/bot.py
|
ChrissisCodeXD/Hikari-TestProject
|
236c8fc9081172d9edff6d629e5d11c5abe64205
|
[
"MIT"
] | null | null | null |
Bot/bot.py
|
ChrissisCodeXD/Hikari-TestProject
|
236c8fc9081172d9edff6d629e5d11c5abe64205
|
[
"MIT"
] | null | null | null |
Bot/bot.py
|
ChrissisCodeXD/Hikari-TestProject
|
236c8fc9081172d9edff6d629e5d11c5abe64205
|
[
"MIT"
] | null | null | null |
import hikari
import lightbulb
import utils
from imports import *
_BotT = t.TypeVar("_BotT", bound="Bot")
from Bot.DataBase.Connection import DBConnection
from Bot.DataBase.prefix import DBPrefix
from Bot.DataBase.warnsys import DBwarn
from Bot.DataBase.mutesys import DBMute
from Bot.DataBase.settings import DBSettings
from Bot.DataBase.erros import DBErros
from Bot.DataBase.welcome import *
from Bot.DataBase.cocsys import DBCoc
from Bot.DataBase.LinkSystem import DBLink
from Bot.DataBase.levelsys import DBLevel
from Bot.DataBase.auto_role import DBRole
from Bot.DataBase.badword import DBBadWord
from Bot.DataBase.logChannel import DBlog
from Bot.DataBase.aduitlogsys import DBAuditLog
from Bot import __version__, __prefix__, __beta__, __guilds__
from utils import HelpCommand
log = logging.getLogger(__name__)
class Prefixes:
def __init__(self, bot):
self.bot = bot
self.prefixes = {}
self.prefixes = DBPrefix(self.bot.db).get_all_prefixes()
def get_prefix(self, guild_id):
if str(guild_id) not in self.prefixes:
result = DBPrefix(self.bot.db).get_all_prefixes()
if str(guild_id) not in result:
result[str(guild_id)] = "!"
DBPrefix(self.bot.db).insert_one(guild_id, "!")
self.prefixes = result
return self.prefixes.get(str(guild_id))
def get_prefixes(self, bot, msg):
return self.get_prefix(msg.guild_id)
def change_prefix(self, guild_id, prefix):
result = DBPrefix(self.bot.db).get_prefix_for_guild(guild_id)
if len(result) == 0:
DBPrefix(self.bot.db).insert_one(guild_id, prefix)
else:
DBPrefix(self.bot.db).update_one(guild_id, prefix)
result = DBPrefix(self.bot.db).get_all_prefixes()
self.prefixes = result
class FirstBot(lightbulb.BotApp):
def __init__(self):
self.log = log
self.db = DBConnection()
self._prefix__get_class = Prefixes(self)
self._extensions = [p.stem for p in Path("./extensions/").glob("*.py")]
self._extensions.extend([f"moderation.{p.stem}" for p in Path("./extensions/moderation/").glob("*.py")])
self._extensions.extend([f"events.{p.stem}" for p in Path("./extensions/events/").glob("*.py")])
self._extensions.extend([f"settings.{p.stem}" for p in Path("./extensions/settings/").glob("*.py")])
self._extensions.extend([f"test.{p.stem}" for p in Path("./extensions/test/").glob("*.py")])
self._extensions.extend([f"security.{p.stem}" for p in Path("./extensions/security/").glob("*.py")])
self._extensions.extend([f"fun.{p.stem}" for p in Path("./extensions/fun/").glob("*.py")])
self._extensions.extend(
[f"server_managment.{p.stem}" for p in Path("./extensions/server_managment/").glob("*.py")])
self.env = utils.env()
self.token = token = self.env.get('TOKEN1')
if __beta__ == True:
super().__init__(
token=token,
intents=hikari.Intents.ALL,
prefix=lightbulb.app.when_mentioned_or(self._prefix__get_class.get_prefixes),
default_enabled_guilds=__guilds__,
help_class=HelpCommand,
help_slash_command=True,
ignore_bots=True,
case_insensitive_prefix_commands=True,
logs={
"version": 1,
"incremental": True,
"loggers": {
"hikari": {"level": "INFO"},
"lightbulb": {"level": "INFO"},
},
},
)
else:
super().__init__(
token=token,
intents=hikari.Intents.ALL,
prefix=lightbulb.app.when_mentioned_or(self._prefix__get_class.get_prefixes),
ignore_bots=True,
help_class=HelpCommand,
help_slash_command=True,
case_insensitive_prefix_commands=True,
logs={
"version": 1,
"incremental": True,
"loggers": {
"hikari": {"level": "INFO"},
"lightbulb": {"level": "INFO"},
},
},
)
def run(self: _BotT) -> None:
self.event_manager.subscribe(hikari.StartingEvent, self.on_starting)
self.event_manager.subscribe(hikari.StartedEvent, self.on_started)
self.event_manager.subscribe(hikari.StoppingEvent, self.on_stopping)
self.event_manager.subscribe(hikari.VoiceStateUpdateEvent, self.on_voice_state_update)
self.event_manager.subscribe(hikari.VoiceServerUpdateEvent, self.on_voice_server_update)
super().run(
activity=hikari.Activity(
name=f"Version {__version__}",
type=hikari.ActivityType.COMPETING,
)
)
async def on_starting(self: _BotT, event: hikari.StartingEvent) -> None:
for ext in self._extensions:
self.load_extensions(f"Bot.extensions.{ext}")
log.info(f"'{ext}' extension loaded")
DBPrefix(self.db).create()
DBwarn(self.db).create()
DBMute(self.db).create()
DBSettings(self.db).create()
DBErros(self.db).create()
DBWelcomeChannel(self.db).create()
DBCoc(self.db).create()
DBLink(self.db).create()
DBLevel(self.db).create()
DBRole(self.db).create()
DBBadWord(self.db).create()
DBlog(self.db).create()
DBAuditLog(self.db).create()
# cache = sake.redis.RedisCache(self, self, address="redis://127.0.0.1")
# await cache.open()
# log.info("Connected to Redis server")
async def on_started(self: _BotT, event: lightbulb.LightbulbStartedEvent) -> None:
"""builder = (
lavasnek_rs.LavalinkBuilder(int(b64decode(self.token.split(".")[0])), self.token)
.set_host("127.0.0.1")
)
builder.set_start_gateway(False)
self.lavalink = await builder.build(EventHandler())
log.info("Created Lavalink instance")
# self.stdout_channel = await self.rest.fetch_channel(STDOUT_CHANNEL_ID)
# await self.stdout_channel.send(f"Testing v{__version__} now online!")"""
async def on_stopping(self: _BotT, event: hikari.StoppingEvent) -> None:
# This is gonna be fixed.
# await self.stdout_channel.send(f"Testing v{__version__} is shutting down.")
...
async def on_voice_state_update(self, event: hikari.VoiceStateUpdateEvent) -> None:
return
async def on_voice_server_update(self, event: hikari.VoiceServerUpdateEvent) -> None:
return
| 39.126437
| 112
| 0.611633
|
4a0b6666c074189021a817dc7a5bd64e4972f236
| 397
|
py
|
Python
|
platzigram/wsgi.py
|
AlejoRMinetti/my-django-clonegram
|
c8ace136cbea6d5250925b2b42ad1e8cd7458b23
|
[
"MIT"
] | 68
|
2018-06-28T04:18:47.000Z
|
2022-01-27T03:01:22.000Z
|
platzigram/wsgi.py
|
AlejoRMinetti/my-django-clonegram
|
c8ace136cbea6d5250925b2b42ad1e8cd7458b23
|
[
"MIT"
] | 12
|
2020-02-12T00:37:53.000Z
|
2022-03-11T23:50:33.000Z
|
platzigram/wsgi.py
|
JavierGlezB/platzigram
|
29cce12de133154f9c537c23285925492c615d3d
|
[
"MIT"
] | 80
|
2018-08-16T00:31:46.000Z
|
2022-02-12T17:24:00.000Z
|
"""
WSGI config for platzigram project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "platzigram.settings")
application = get_wsgi_application()
| 23.352941
| 78
| 0.788413
|
4a0b6755997287020efa70d09cd6c9ec88111bc2
| 662
|
py
|
Python
|
tests/return/test_at_top_level.py
|
sco1/pylox
|
b4820828306c20cee3f8533c2547fafb92c6c1bd
|
[
"MIT"
] | 2
|
2021-12-18T01:52:50.000Z
|
2022-01-17T19:41:52.000Z
|
tests/return/test_at_top_level.py
|
sco1/pylox
|
b4820828306c20cee3f8533c2547fafb92c6c1bd
|
[
"MIT"
] | 18
|
2021-11-30T04:05:53.000Z
|
2022-02-01T03:30:04.000Z
|
tests/return/test_at_top_level.py
|
sco1/pylox
|
b4820828306c20cee3f8533c2547fafb92c6c1bd
|
[
"MIT"
] | null | null | null |
from textwrap import dedent
import pytest
from pylox.lox import Lox
# Base cases from https://github.com/munificent/craftinginterpreters/blob/master/test/return/at_top_level.lox
TEST_SRC = dedent(
"""\
return "wat"; // Error at 'return': Can't return from top-level code.
"""
)
EXPECTED_STDOUTS = ["1:1: LoxResolverError: Can't return from top-level code."]
def test_at_top_level(capsys: pytest.CaptureFixture) -> None:
interpreter = Lox()
interpreter.run(TEST_SRC)
assert interpreter.had_error
assert not interpreter.had_runtime_error
all_out = capsys.readouterr().out.splitlines()
assert all_out == EXPECTED_STDOUTS
| 25.461538
| 109
| 0.732628
|
4a0b678794c4fc9b044ac7f6d54dfa3b9eb6e313
| 781
|
py
|
Python
|
config.py
|
Kevson102/Phoenix-Blogs
|
78e2728cf050752ad45957887eb52067eda718dd
|
[
"MIT"
] | null | null | null |
config.py
|
Kevson102/Phoenix-Blogs
|
78e2728cf050752ad45957887eb52067eda718dd
|
[
"MIT"
] | null | null | null |
config.py
|
Kevson102/Phoenix-Blogs
|
78e2728cf050752ad45957887eb52067eda718dd
|
[
"MIT"
] | null | null | null |
import os
class Config:
SECRET_KEY = os.environ.get('SECRET_KEY')
UPLOADED_PHOTOS_DEST ='app/static/photos'
QUOTE_API_BASE_URL = 'http://quotes.stormconsultancy.co.uk/random.json'
class ProdConfig(Config):
'''
Production Configuration child class
Args:
Config: The parent configuration class with general configuration settings
'''
SQLALCHEMY_DATABASE_URI = os.environ.get("DATABASE_URL")
class DevConfig(Config):
'''
Development configuration child class.
Args:
Config: The parent configuration class with general configuration settings
'''
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://kevson:Antidolofinomonoligasta102@localhost/blogs'
DEBUG = True
config_options = {
'development' : DevConfig,
'production' : ProdConfig
}
| 25.193548
| 101
| 0.751601
|
4a0b695bf01e9148ad252e511b81300e43f53a9f
| 1,516
|
py
|
Python
|
pythran/tests/cases/lu.py
|
davidbrochart/pythran
|
24b6c8650fe99791a4091cbdc2c24686e86aa67c
|
[
"BSD-3-Clause"
] | 1,647
|
2015-01-13T01:45:38.000Z
|
2022-03-28T01:23:41.000Z
|
pythran/tests/cases/lu.py
|
davidbrochart/pythran
|
24b6c8650fe99791a4091cbdc2c24686e86aa67c
|
[
"BSD-3-Clause"
] | 1,116
|
2015-01-01T09:52:05.000Z
|
2022-03-18T21:06:40.000Z
|
pythran/tests/cases/lu.py
|
davidbrochart/pythran
|
24b6c8650fe99791a4091cbdc2c24686e86aa67c
|
[
"BSD-3-Clause"
] | 180
|
2015-02-12T02:47:28.000Z
|
2022-03-14T10:28:18.000Z
|
#runas import numpy as np; x = np.arange(1., 26.).reshape(5,5); factorMatrix0(x), factorMatrix1(x)
import numpy as np
#pythran export factorMatrix0(float[:,:])
def factorMatrix0(M):
# Gaussian elimination, partial pivoting.
# M must be an (n,n+1) numpy array. Not tested!
n = M.shape[0]
m= M.shape[1]
for line in range(0, n-1):
# find pivot
cmax = np.argmax(abs(M[line:n,line])) + line
# exchange rows if necessary
if cmax != line:
M[[line,cmax]]=M[[cmax,line]]
# eliminate
pivot = M[line,line]
for j in range(line+1,n):
v= M[j,line]/pivot
for k in range(line,m):
M[j,k]-= v*M[line,k]
#pythran export factorMatrix1(float[:,:])
def factorMatrix1(M):
# Gaussian elimination, partial pivoting.
# M must be an (n,n+1) numpy array. Not tested!
n = M.shape[0]
m= M.shape[1]
for line in range(0, n-1):
# find pivot
cmax=line
vmax= abs(M[line,line])
for i in range(line+1,n):
if abs(M[i,line])> vmax:
vmax= abs(M[i,line])
cmax= i
# exchange rows if necessary
if cmax != line:
for j in range(line,m):
t= M[line,j]
M[line,j]= M[cmax,j]
M[cmax,j]= t
# eliminate
pivot = M[line,line]
for j in range(line+1,n):
v= M[j,line]/pivot
for k in range(line,m):
M[j,k]-= v*M[line,k]
| 29.72549
| 98
| 0.513852
|
4a0b69ad5d272c0d184da0f4e5c335f1b8669506
| 40,375
|
py
|
Python
|
addons/io_scene_gltf2/blender/exp/gltf2_blender_extract.py
|
khasvictor/glTF-Blender-IO
|
9f2c4eb42b0415f887ecdcec8a92ff70adb3a561
|
[
"Apache-2.0"
] | null | null | null |
addons/io_scene_gltf2/blender/exp/gltf2_blender_extract.py
|
khasvictor/glTF-Blender-IO
|
9f2c4eb42b0415f887ecdcec8a92ff70adb3a561
|
[
"Apache-2.0"
] | null | null | null |
addons/io_scene_gltf2/blender/exp/gltf2_blender_extract.py
|
khasvictor/glTF-Blender-IO
|
9f2c4eb42b0415f887ecdcec8a92ff70adb3a561
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 The glTF-Blender-IO authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Imports
#
from mathutils import Vector, Quaternion
from mathutils.geometry import tessellate_polygon
from operator import attrgetter
from . import gltf2_blender_export_keys
from ...io.com.gltf2_io_debug import print_console
from ...io.com.gltf2_io_color_management import color_srgb_to_scene_linear
from io_scene_gltf2.blender.exp import gltf2_blender_gather_skins
#
# Globals
#
INDICES_ID = 'indices'
MATERIAL_ID = 'material'
ATTRIBUTES_ID = 'attributes'
COLOR_PREFIX = 'COLOR_'
MORPH_TANGENT_PREFIX = 'MORPH_TANGENT_'
MORPH_NORMAL_PREFIX = 'MORPH_NORMAL_'
MORPH_POSITION_PREFIX = 'MORPH_POSITION_'
TEXCOORD_PREFIX = 'TEXCOORD_'
WEIGHTS_PREFIX = 'WEIGHTS_'
JOINTS_PREFIX = 'JOINTS_'
TANGENT_ATTRIBUTE = 'TANGENT'
NORMAL_ATTRIBUTE = 'NORMAL'
POSITION_ATTRIBUTE = 'POSITION'
GLTF_MAX_COLORS = 2
#
# Classes
#
class ShapeKey:
def __init__(self, shape_key, vertex_normals, polygon_normals):
self.shape_key = shape_key
self.vertex_normals = vertex_normals
self.polygon_normals = polygon_normals
#
# Functions
#
def convert_swizzle_location(loc, export_settings):
"""Convert a location from Blender coordinate system to glTF coordinate system."""
if export_settings[gltf2_blender_export_keys.YUP]:
return Vector((loc[0], loc[2], -loc[1]))
else:
return Vector((loc[0], loc[1], loc[2]))
def convert_swizzle_tangent(tan, export_settings):
"""Convert a tangent from Blender coordinate system to glTF coordinate system."""
if tan[0] == 0.0 and tan[1] == 0.0 and tan[2] == 0.0:
print_console('WARNING', 'Tangent has zero length.')
if export_settings[gltf2_blender_export_keys.YUP]:
return Vector((tan[0], tan[2], -tan[1], 1.0))
else:
return Vector((tan[0], tan[1], tan[2], 1.0))
def convert_swizzle_rotation(rot, export_settings):
"""
Convert a quaternion rotation from Blender coordinate system to glTF coordinate system.
'w' is still at first position.
"""
if export_settings[gltf2_blender_export_keys.YUP]:
return Quaternion((rot[0], rot[1], rot[3], -rot[2]))
else:
return Quaternion((rot[0], rot[1], rot[2], rot[3]))
def convert_swizzle_scale(scale, export_settings):
"""Convert a scale from Blender coordinate system to glTF coordinate system."""
if export_settings[gltf2_blender_export_keys.YUP]:
return Vector((scale[0], scale[2], scale[1]))
else:
return Vector((scale[0], scale[1], scale[2]))
def decompose_transition(matrix, export_settings):
translation, rotation, scale = matrix.decompose()
# Put w at the end.
rotation = Quaternion((rotation[1], rotation[2], rotation[3], rotation[0]))
return translation, rotation, scale
def extract_primitive_floor(a, indices, use_tangents):
"""Shift indices, that the first one starts with 0. It is assumed, that the indices are packed."""
attributes = {
POSITION_ATTRIBUTE: [],
NORMAL_ATTRIBUTE: []
}
if use_tangents:
attributes[TANGENT_ATTRIBUTE] = []
result_primitive = {
MATERIAL_ID: a[MATERIAL_ID],
INDICES_ID: [],
ATTRIBUTES_ID: attributes
}
source_attributes = a[ATTRIBUTES_ID]
#
tex_coord_index = 0
process_tex_coord = True
while process_tex_coord:
tex_coord_id = TEXCOORD_PREFIX + str(tex_coord_index)
if source_attributes.get(tex_coord_id) is not None:
attributes[tex_coord_id] = []
tex_coord_index += 1
else:
process_tex_coord = False
tex_coord_max = tex_coord_index
#
color_index = 0
process_color = True
while process_color:
color_id = COLOR_PREFIX + str(color_index)
if source_attributes.get(color_id) is not None:
attributes[color_id] = []
color_index += 1
else:
process_color = False
color_max = color_index
#
bone_index = 0
process_bone = True
while process_bone:
joint_id = JOINTS_PREFIX + str(bone_index)
weight_id = WEIGHTS_PREFIX + str(bone_index)
if source_attributes.get(joint_id) is not None:
attributes[joint_id] = []
attributes[weight_id] = []
bone_index += 1
else:
process_bone = False
bone_max = bone_index
#
morph_index = 0
process_morph = True
while process_morph:
morph_position_id = MORPH_POSITION_PREFIX + str(morph_index)
morph_normal_id = MORPH_NORMAL_PREFIX + str(morph_index)
morph_tangent_id = MORPH_TANGENT_PREFIX + str(morph_index)
if source_attributes.get(morph_position_id) is not None:
attributes[morph_position_id] = []
attributes[morph_normal_id] = []
if use_tangents:
attributes[morph_tangent_id] = []
morph_index += 1
else:
process_morph = False
morph_max = morph_index
#
min_index = min(indices)
max_index = max(indices)
for old_index in indices:
result_primitive[INDICES_ID].append(old_index - min_index)
for old_index in range(min_index, max_index + 1):
for vi in range(0, 3):
attributes[POSITION_ATTRIBUTE].append(source_attributes[POSITION_ATTRIBUTE][old_index * 3 + vi])
attributes[NORMAL_ATTRIBUTE].append(source_attributes[NORMAL_ATTRIBUTE][old_index * 3 + vi])
if use_tangents:
for vi in range(0, 4):
attributes[TANGENT_ATTRIBUTE].append(source_attributes[TANGENT_ATTRIBUTE][old_index * 4 + vi])
for tex_coord_index in range(0, tex_coord_max):
tex_coord_id = TEXCOORD_PREFIX + str(tex_coord_index)
for vi in range(0, 2):
attributes[tex_coord_id].append(source_attributes[tex_coord_id][old_index * 2 + vi])
for color_index in range(0, color_max):
color_id = COLOR_PREFIX + str(color_index)
for vi in range(0, 4):
attributes[color_id].append(source_attributes[color_id][old_index * 4 + vi])
for bone_index in range(0, bone_max):
joint_id = JOINTS_PREFIX + str(bone_index)
weight_id = WEIGHTS_PREFIX + str(bone_index)
for vi in range(0, 4):
attributes[joint_id].append(source_attributes[joint_id][old_index * 4 + vi])
attributes[weight_id].append(source_attributes[weight_id][old_index * 4 + vi])
for morph_index in range(0, morph_max):
morph_position_id = MORPH_POSITION_PREFIX + str(morph_index)
morph_normal_id = MORPH_NORMAL_PREFIX + str(morph_index)
morph_tangent_id = MORPH_TANGENT_PREFIX + str(morph_index)
for vi in range(0, 3):
attributes[morph_position_id].append(source_attributes[morph_position_id][old_index * 3 + vi])
attributes[morph_normal_id].append(source_attributes[morph_normal_id][old_index * 3 + vi])
if use_tangents:
for vi in range(0, 4):
attributes[morph_tangent_id].append(source_attributes[morph_tangent_id][old_index * 4 + vi])
return result_primitive
def extract_primitive_pack(a, indices, use_tangents):
"""Pack indices, that the first one starts with 0. Current indices can have gaps."""
attributes = {
POSITION_ATTRIBUTE: [],
NORMAL_ATTRIBUTE: []
}
if use_tangents:
attributes[TANGENT_ATTRIBUTE] = []
result_primitive = {
MATERIAL_ID: a[MATERIAL_ID],
INDICES_ID: [],
ATTRIBUTES_ID: attributes
}
source_attributes = a[ATTRIBUTES_ID]
#
tex_coord_index = 0
process_tex_coord = True
while process_tex_coord:
tex_coord_id = TEXCOORD_PREFIX + str(tex_coord_index)
if source_attributes.get(tex_coord_id) is not None:
attributes[tex_coord_id] = []
tex_coord_index += 1
else:
process_tex_coord = False
tex_coord_max = tex_coord_index
#
color_index = 0
process_color = True
while process_color:
color_id = COLOR_PREFIX + str(color_index)
if source_attributes.get(color_id) is not None:
attributes[color_id] = []
color_index += 1
else:
process_color = False
color_max = color_index
#
bone_index = 0
process_bone = True
while process_bone:
joint_id = JOINTS_PREFIX + str(bone_index)
weight_id = WEIGHTS_PREFIX + str(bone_index)
if source_attributes.get(joint_id) is not None:
attributes[joint_id] = []
attributes[weight_id] = []
bone_index += 1
else:
process_bone = False
bone_max = bone_index
#
morph_index = 0
process_morph = True
while process_morph:
morph_position_id = MORPH_POSITION_PREFIX + str(morph_index)
morph_normal_id = MORPH_NORMAL_PREFIX + str(morph_index)
morph_tangent_id = MORPH_TANGENT_PREFIX + str(morph_index)
if source_attributes.get(morph_position_id) is not None:
attributes[morph_position_id] = []
attributes[morph_normal_id] = []
if use_tangents:
attributes[morph_tangent_id] = []
morph_index += 1
else:
process_morph = False
morph_max = morph_index
#
old_to_new_indices = {}
new_to_old_indices = {}
new_index = 0
for old_index in indices:
if old_to_new_indices.get(old_index) is None:
old_to_new_indices[old_index] = new_index
new_to_old_indices[new_index] = old_index
new_index += 1
result_primitive[INDICES_ID].append(old_to_new_indices[old_index])
end_new_index = new_index
for new_index in range(0, end_new_index):
old_index = new_to_old_indices[new_index]
for vi in range(0, 3):
attributes[POSITION_ATTRIBUTE].append(source_attributes[POSITION_ATTRIBUTE][old_index * 3 + vi])
attributes[NORMAL_ATTRIBUTE].append(source_attributes[NORMAL_ATTRIBUTE][old_index * 3 + vi])
if use_tangents:
for vi in range(0, 4):
attributes[TANGENT_ATTRIBUTE].append(source_attributes[TANGENT_ATTRIBUTE][old_index * 4 + vi])
for tex_coord_index in range(0, tex_coord_max):
tex_coord_id = TEXCOORD_PREFIX + str(tex_coord_index)
for vi in range(0, 2):
attributes[tex_coord_id].append(source_attributes[tex_coord_id][old_index * 2 + vi])
for color_index in range(0, color_max):
color_id = COLOR_PREFIX + str(color_index)
for vi in range(0, 4):
attributes[color_id].append(source_attributes[color_id][old_index * 4 + vi])
for bone_index in range(0, bone_max):
joint_id = JOINTS_PREFIX + str(bone_index)
weight_id = WEIGHTS_PREFIX + str(bone_index)
for vi in range(0, 4):
attributes[joint_id].append(source_attributes[joint_id][old_index * 4 + vi])
attributes[weight_id].append(source_attributes[weight_id][old_index * 4 + vi])
for morph_index in range(0, morph_max):
morph_position_id = MORPH_POSITION_PREFIX + str(morph_index)
morph_normal_id = MORPH_NORMAL_PREFIX + str(morph_index)
morph_tangent_id = MORPH_TANGENT_PREFIX + str(morph_index)
for vi in range(0, 3):
attributes[morph_position_id].append(source_attributes[morph_position_id][old_index * 3 + vi])
attributes[morph_normal_id].append(source_attributes[morph_normal_id][old_index * 3 + vi])
if use_tangents:
for vi in range(0, 4):
attributes[morph_tangent_id].append(source_attributes[morph_tangent_id][old_index * 4 + vi])
return result_primitive
def extract_primitives(glTF, blender_mesh, blender_vertex_groups, modifiers, export_settings):
"""
Extract primitives from a mesh. Polygons are triangulated and sorted by material.
Furthermore, primitives are split up, if the indices range is exceeded.
Finally, triangles are also split up/duplicated, if face normals are used instead of vertex normals.
"""
print_console('INFO', 'Extracting primitive: ' + blender_mesh.name)
use_tangents = False
if blender_mesh.uv_layers.active and len(blender_mesh.uv_layers) > 0:
try:
blender_mesh.calc_tangents()
use_tangents = True
except Exception:
print_console('WARNING', 'Could not calculate tangents. Please try to triangulate the mesh first.')
#
material_map = {}
#
# Gathering position, normal and tex_coords.
#
no_material_attributes = {
POSITION_ATTRIBUTE: [],
NORMAL_ATTRIBUTE: []
}
if use_tangents:
no_material_attributes[TANGENT_ATTRIBUTE] = []
#
# Directory of materials with its primitive.
#
no_material_primitives = {
MATERIAL_ID: '',
INDICES_ID: [],
ATTRIBUTES_ID: no_material_attributes
}
material_name_to_primitives = {'': no_material_primitives}
#
vertex_index_to_new_indices = {}
material_map[''] = vertex_index_to_new_indices
#
# Create primitive for each material.
#
for blender_material in blender_mesh.materials:
if blender_material is None:
continue
attributes = {
POSITION_ATTRIBUTE: [],
NORMAL_ATTRIBUTE: []
}
if use_tangents:
attributes[TANGENT_ATTRIBUTE] = []
primitive = {
MATERIAL_ID: blender_material.name,
INDICES_ID: [],
ATTRIBUTES_ID: attributes
}
material_name_to_primitives[blender_material.name] = primitive
#
vertex_index_to_new_indices = {}
material_map[blender_material.name] = vertex_index_to_new_indices
tex_coord_max = 0
if blender_mesh.uv_layers.active:
tex_coord_max = len(blender_mesh.uv_layers)
#
vertex_colors = {}
color_index = 0
for vertex_color in blender_mesh.vertex_colors:
vertex_color_name = COLOR_PREFIX + str(color_index)
vertex_colors[vertex_color_name] = vertex_color
color_index += 1
if color_index >= GLTF_MAX_COLORS:
break
color_max = color_index
#
bone_max = 0
for blender_polygon in blender_mesh.polygons:
for loop_index in blender_polygon.loop_indices:
vertex_index = blender_mesh.loops[loop_index].vertex_index
bones_count = len(blender_mesh.vertices[vertex_index].groups)
if bones_count > 0:
if bones_count % 4 == 0:
bones_count -= 1
bone_max = max(bone_max, bones_count // 4 + 1)
#
morph_max = 0
blender_shape_keys = []
if blender_mesh.shape_keys is not None:
morph_max = len(blender_mesh.shape_keys.key_blocks) - 1
for blender_shape_key in blender_mesh.shape_keys.key_blocks:
if blender_shape_key != blender_shape_key.relative_key:
blender_shape_keys.append(ShapeKey(
blender_shape_key,
blender_shape_key.normals_vertex_get(), # calculate vertex normals for this shape key
blender_shape_key.normals_polygon_get())) # calculate polygon normals for this shape key
#
# Convert polygon to primitive indices and eliminate invalid ones. Assign to material.
#
for blender_polygon in blender_mesh.polygons:
export_color = True
#
if blender_polygon.material_index < 0 or blender_polygon.material_index >= len(blender_mesh.materials) or \
blender_mesh.materials[blender_polygon.material_index] is None:
primitive = material_name_to_primitives['']
vertex_index_to_new_indices = material_map['']
else:
primitive = material_name_to_primitives[blender_mesh.materials[blender_polygon.material_index].name]
vertex_index_to_new_indices = material_map[blender_mesh.materials[blender_polygon.material_index].name]
#
attributes = primitive[ATTRIBUTES_ID]
face_normal = blender_polygon.normal
face_tangent = Vector((0.0, 0.0, 0.0))
face_bitangent = Vector((0.0, 0.0, 0.0))
if use_tangents:
for loop_index in blender_polygon.loop_indices:
temp_vertex = blender_mesh.loops[loop_index]
face_tangent += temp_vertex.tangent
face_bitangent += temp_vertex.bitangent
face_tangent.normalize()
face_bitangent.normalize()
#
indices = primitive[INDICES_ID]
loop_index_list = []
if len(blender_polygon.loop_indices) == 3:
loop_index_list.extend(blender_polygon.loop_indices)
elif len(blender_polygon.loop_indices) > 3:
# Triangulation of polygon. Using internal function, as non-convex polygons could exist.
polyline = []
for loop_index in blender_polygon.loop_indices:
vertex_index = blender_mesh.loops[loop_index].vertex_index
v = blender_mesh.vertices[vertex_index].co
polyline.append(Vector((v[0], v[1], v[2])))
triangles = tessellate_polygon((polyline,))
for triangle in triangles:
loop_index_list.append(blender_polygon.loop_indices[triangle[0]])
loop_index_list.append(blender_polygon.loop_indices[triangle[2]])
loop_index_list.append(blender_polygon.loop_indices[triangle[1]])
else:
continue
for loop_index in loop_index_list:
vertex_index = blender_mesh.loops[loop_index].vertex_index
if vertex_index_to_new_indices.get(vertex_index) is None:
vertex_index_to_new_indices[vertex_index] = []
#
v = None
n = None
t = None
b = None
uvs = []
colors = []
joints = []
weights = []
target_positions = []
target_normals = []
target_tangents = []
vertex = blender_mesh.vertices[vertex_index]
v = convert_swizzle_location(vertex.co, export_settings)
if blender_polygon.use_smooth:
if blender_mesh.has_custom_normals:
n = convert_swizzle_location(blender_mesh.loops[loop_index].normal, export_settings)
else:
n = convert_swizzle_location(vertex.normal, export_settings)
if use_tangents:
t = convert_swizzle_tangent(blender_mesh.loops[loop_index].tangent, export_settings)
b = convert_swizzle_location(blender_mesh.loops[loop_index].bitangent, export_settings)
else:
n = convert_swizzle_location(face_normal, export_settings)
if use_tangents:
t = convert_swizzle_tangent(face_tangent, export_settings)
b = convert_swizzle_location(face_bitangent, export_settings)
if use_tangents:
tv = Vector((t[0], t[1], t[2]))
bv = Vector((b[0], b[1], b[2]))
nv = Vector((n[0], n[1], n[2]))
if (nv.cross(tv)).dot(bv) < 0.0:
t[3] = -1.0
if blender_mesh.uv_layers.active:
for tex_coord_index in range(0, tex_coord_max):
uv = blender_mesh.uv_layers[tex_coord_index].data[loop_index].uv
uvs.append([uv.x, 1.0 - uv.y])
#
if color_max > 0 and export_color:
for color_index in range(0, color_max):
color_name = COLOR_PREFIX + str(color_index)
color = vertex_colors[color_name].data[loop_index].color
colors.append([
color_srgb_to_scene_linear(color[0]),
color_srgb_to_scene_linear(color[1]),
color_srgb_to_scene_linear(color[2]),
1.0
])
#
bone_count = 0
if blender_vertex_groups is not None and vertex.groups is not None and len(vertex.groups) > 0 and export_settings[gltf2_blender_export_keys.SKINS]:
joint = []
weight = []
vertex_groups = vertex.groups
if not export_settings['gltf_all_vertex_influences']:
# sort groups by weight descending
vertex_groups = sorted(vertex.groups, key=attrgetter('weight'), reverse=True)
for group_element in vertex_groups:
if len(joint) == 4:
bone_count += 1
joints.append(joint)
weights.append(weight)
joint = []
weight = []
#
joint_weight = group_element.weight
if joint_weight <= 0.0:
continue
#
vertex_group_index = group_element.group
vertex_group_name = blender_vertex_groups[vertex_group_index].name
joint_index = None
if modifiers is not None:
modifiers_dict = {m.type: m for m in modifiers}
if "ARMATURE" in modifiers_dict:
modifier = modifiers_dict["ARMATURE"]
armature = modifier.object
if armature:
skin = gltf2_blender_gather_skins.gather_skin(armature, modifier.id_data, export_settings)
for index, j in enumerate(skin.joints):
if j.name == vertex_group_name:
joint_index = index
break
#
if joint_index is not None:
joint.append(joint_index)
weight.append(joint_weight)
if len(joint) > 0:
bone_count += 1
for fill in range(0, 4 - len(joint)):
joint.append(0)
weight.append(0.0)
joints.append(joint)
weights.append(weight)
for fill in range(0, bone_max - bone_count):
joints.append([0, 0, 0, 0])
weights.append([0.0, 0.0, 0.0, 0.0])
#
if morph_max > 0 and export_settings[gltf2_blender_export_keys.MORPH]:
for morph_index in range(0, morph_max):
blender_shape_key = blender_shape_keys[morph_index]
v_morph = convert_swizzle_location(blender_shape_key.shape_key.data[vertex_index].co,
export_settings)
# Store delta.
v_morph -= v
target_positions.append(v_morph)
#
n_morph = None
if blender_polygon.use_smooth:
temp_normals = blender_shape_key.vertex_normals
n_morph = (temp_normals[vertex_index * 3 + 0], temp_normals[vertex_index * 3 + 1],
temp_normals[vertex_index * 3 + 2])
else:
temp_normals = blender_shape_key.polygon_normals
n_morph = (
temp_normals[blender_polygon.index * 3 + 0], temp_normals[blender_polygon.index * 3 + 1],
temp_normals[blender_polygon.index * 3 + 2])
n_morph = convert_swizzle_location(n_morph, export_settings)
# Store delta.
n_morph -= n
target_normals.append(n_morph)
#
if use_tangents:
rotation = n_morph.rotation_difference(n)
t_morph = Vector((t[0], t[1], t[2]))
t_morph.rotate(rotation)
target_tangents.append(t_morph)
#
#
create = True
for current_new_index in vertex_index_to_new_indices[vertex_index]:
found = True
for i in range(0, 3):
if attributes[POSITION_ATTRIBUTE][current_new_index * 3 + i] != v[i]:
found = False
break
if attributes[NORMAL_ATTRIBUTE][current_new_index * 3 + i] != n[i]:
found = False
break
if use_tangents:
for i in range(0, 4):
if attributes[TANGENT_ATTRIBUTE][current_new_index * 4 + i] != t[i]:
found = False
break
if not found:
continue
for tex_coord_index in range(0, tex_coord_max):
uv = uvs[tex_coord_index]
tex_coord_id = TEXCOORD_PREFIX + str(tex_coord_index)
for i in range(0, 2):
if attributes[tex_coord_id][current_new_index * 2 + i] != uv[i]:
found = False
break
if export_color:
for color_index in range(0, color_max):
color = colors[color_index]
color_id = COLOR_PREFIX + str(color_index)
for i in range(0, 3):
# Alpha is always 1.0 - see above.
current_color = attributes[color_id][current_new_index * 4 + i]
if color_srgb_to_scene_linear(current_color) != color[i]:
found = False
break
if export_settings[gltf2_blender_export_keys.SKINS]:
for bone_index in range(0, bone_max):
joint = joints[bone_index]
weight = weights[bone_index]
joint_id = JOINTS_PREFIX + str(bone_index)
weight_id = WEIGHTS_PREFIX + str(bone_index)
for i in range(0, 4):
if attributes[joint_id][current_new_index * 4 + i] != joint[i]:
found = False
break
if attributes[weight_id][current_new_index * 4 + i] != weight[i]:
found = False
break
if export_settings[gltf2_blender_export_keys.MORPH]:
for morph_index in range(0, morph_max):
target_position = target_positions[morph_index]
target_normal = target_normals[morph_index]
if use_tangents:
target_tangent = target_tangents[morph_index]
target_position_id = MORPH_POSITION_PREFIX + str(morph_index)
target_normal_id = MORPH_NORMAL_PREFIX + str(morph_index)
target_tangent_id = MORPH_TANGENT_PREFIX + str(morph_index)
for i in range(0, 3):
if attributes[target_position_id][current_new_index * 3 + i] != target_position[i]:
found = False
break
if attributes[target_normal_id][current_new_index * 3 + i] != target_normal[i]:
found = False
break
if use_tangents:
if attributes[target_tangent_id][current_new_index * 3 + i] != target_tangent[i]:
found = False
break
if found:
indices.append(current_new_index)
create = False
break
if not create:
continue
new_index = 0
if primitive.get('max_index') is not None:
new_index = primitive['max_index'] + 1
primitive['max_index'] = new_index
vertex_index_to_new_indices[vertex_index].append(new_index)
#
#
indices.append(new_index)
#
attributes[POSITION_ATTRIBUTE].extend(v)
attributes[NORMAL_ATTRIBUTE].extend(n)
if use_tangents:
attributes[TANGENT_ATTRIBUTE].extend(t)
if blender_mesh.uv_layers.active:
for tex_coord_index in range(0, tex_coord_max):
tex_coord_id = TEXCOORD_PREFIX + str(tex_coord_index)
if attributes.get(tex_coord_id) is None:
attributes[tex_coord_id] = []
attributes[tex_coord_id].extend(uvs[tex_coord_index])
if export_color:
for color_index in range(0, color_max):
color_id = COLOR_PREFIX + str(color_index)
if attributes.get(color_id) is None:
attributes[color_id] = []
attributes[color_id].extend(colors[color_index])
if export_settings[gltf2_blender_export_keys.SKINS]:
for bone_index in range(0, bone_max):
joint_id = JOINTS_PREFIX + str(bone_index)
if attributes.get(joint_id) is None:
attributes[joint_id] = []
attributes[joint_id].extend(joints[bone_index])
weight_id = WEIGHTS_PREFIX + str(bone_index)
if attributes.get(weight_id) is None:
attributes[weight_id] = []
attributes[weight_id].extend(weights[bone_index])
if export_settings[gltf2_blender_export_keys.MORPH]:
for morph_index in range(0, morph_max):
target_position_id = MORPH_POSITION_PREFIX + str(morph_index)
if attributes.get(target_position_id) is None:
attributes[target_position_id] = []
attributes[target_position_id].extend(target_positions[morph_index])
target_normal_id = MORPH_NORMAL_PREFIX + str(morph_index)
if attributes.get(target_normal_id) is None:
attributes[target_normal_id] = []
attributes[target_normal_id].extend(target_normals[morph_index])
if use_tangents:
target_tangent_id = MORPH_TANGENT_PREFIX + str(morph_index)
if attributes.get(target_tangent_id) is None:
attributes[target_tangent_id] = []
attributes[target_tangent_id].extend(target_tangents[morph_index])
#
# Add primitive plus split them if needed.
#
result_primitives = []
for material_name, primitive in material_name_to_primitives.items():
export_color = True
#
indices = primitive[INDICES_ID]
if len(indices) == 0:
continue
position = primitive[ATTRIBUTES_ID][POSITION_ATTRIBUTE]
normal = primitive[ATTRIBUTES_ID][NORMAL_ATTRIBUTE]
if use_tangents:
tangent = primitive[ATTRIBUTES_ID][TANGENT_ATTRIBUTE]
tex_coords = []
for tex_coord_index in range(0, tex_coord_max):
tex_coords.append(primitive[ATTRIBUTES_ID][TEXCOORD_PREFIX + str(tex_coord_index)])
colors = []
if export_color:
for color_index in range(0, color_max):
tex_coords.append(primitive[ATTRIBUTES_ID][COLOR_PREFIX + str(color_index)])
joints = []
weights = []
if export_settings[gltf2_blender_export_keys.SKINS]:
for bone_index in range(0, bone_max):
joints.append(primitive[ATTRIBUTES_ID][JOINTS_PREFIX + str(bone_index)])
weights.append(primitive[ATTRIBUTES_ID][WEIGHTS_PREFIX + str(bone_index)])
target_positions = []
target_normals = []
target_tangents = []
if export_settings[gltf2_blender_export_keys.MORPH]:
for morph_index in range(0, morph_max):
target_positions.append(primitive[ATTRIBUTES_ID][MORPH_POSITION_PREFIX + str(morph_index)])
target_normals.append(primitive[ATTRIBUTES_ID][MORPH_NORMAL_PREFIX + str(morph_index)])
if use_tangents:
target_tangents.append(primitive[ATTRIBUTES_ID][MORPH_TANGENT_PREFIX + str(morph_index)])
#
count = len(indices)
if count == 0:
continue
max_index = max(indices)
#
# NOTE: Values used by some graphics APIs as "primitive restart" values are disallowed.
# Specifically, the value 65535 (in UINT16) cannot be used as a vertex index.
# https://github.com/KhronosGroup/glTF/issues/1142
# https://github.com/KhronosGroup/glTF/pull/1476/files
range_indices = 65535
#
if max_index >= range_indices:
#
# Splitting result_primitives.
#
# At start, all indices are pending.
pending_attributes = {
POSITION_ATTRIBUTE: [],
NORMAL_ATTRIBUTE: []
}
if use_tangents:
pending_attributes[TANGENT_ATTRIBUTE] = []
pending_primitive = {
MATERIAL_ID: material_name,
INDICES_ID: [],
ATTRIBUTES_ID: pending_attributes
}
pending_primitive[INDICES_ID].extend(indices)
pending_attributes[POSITION_ATTRIBUTE].extend(position)
pending_attributes[NORMAL_ATTRIBUTE].extend(normal)
if use_tangents:
pending_attributes[TANGENT_ATTRIBUTE].extend(tangent)
tex_coord_index = 0
for tex_coord in tex_coords:
pending_attributes[TEXCOORD_PREFIX + str(tex_coord_index)] = tex_coord
tex_coord_index += 1
if export_color:
color_index = 0
for color in colors:
pending_attributes[COLOR_PREFIX + str(color_index)] = color
color_index += 1
if export_settings[gltf2_blender_export_keys.SKINS]:
joint_index = 0
for joint in joints:
pending_attributes[JOINTS_PREFIX + str(joint_index)] = joint
joint_index += 1
weight_index = 0
for weight in weights:
pending_attributes[WEIGHTS_PREFIX + str(weight_index)] = weight
weight_index += 1
if export_settings[gltf2_blender_export_keys.MORPH]:
morph_index = 0
for target_position in target_positions:
pending_attributes[MORPH_POSITION_PREFIX + str(morph_index)] = target_position
morph_index += 1
morph_index = 0
for target_normal in target_normals:
pending_attributes[MORPH_NORMAL_PREFIX + str(morph_index)] = target_normal
morph_index += 1
if use_tangents:
morph_index = 0
for target_tangent in target_tangents:
pending_attributes[MORPH_TANGENT_PREFIX + str(morph_index)] = target_tangent
morph_index += 1
pending_indices = pending_primitive[INDICES_ID]
# Continue until all are processed.
while len(pending_indices) > 0:
process_indices = pending_primitive[INDICES_ID]
max_index = max(process_indices)
pending_indices = []
#
#
all_local_indices = []
for i in range(0, (max_index // range_indices) + 1):
all_local_indices.append([])
#
#
# For all faces ...
for face_index in range(0, len(process_indices), 3):
written = False
face_min_index = min(process_indices[face_index + 0], process_indices[face_index + 1],
process_indices[face_index + 2])
face_max_index = max(process_indices[face_index + 0], process_indices[face_index + 1],
process_indices[face_index + 2])
# ... check if it can be but in a range of maximum indices.
for i in range(0, (max_index // range_indices) + 1):
offset = i * range_indices
# Yes, so store the primitive with its indices.
if face_min_index >= offset and face_max_index < offset + range_indices:
all_local_indices[i].extend(
[process_indices[face_index + 0], process_indices[face_index + 1],
process_indices[face_index + 2]])
written = True
break
# If not written, the triangle face has indices from different ranges.
if not written:
pending_indices.extend([process_indices[face_index + 0], process_indices[face_index + 1],
process_indices[face_index + 2]])
# Only add result_primitives, which do have indices in it.
for local_indices in all_local_indices:
if len(local_indices) > 0:
current_primitive = extract_primitive_floor(pending_primitive, local_indices, use_tangents)
result_primitives.append(current_primitive)
print_console('DEBUG', 'Adding primitive with splitting. Indices: ' + str(
len(current_primitive[INDICES_ID])) + ' Vertices: ' + str(
len(current_primitive[ATTRIBUTES_ID][POSITION_ATTRIBUTE]) // 3))
# Process primitive faces having indices in several ranges.
if len(pending_indices) > 0:
pending_primitive = extract_primitive_pack(pending_primitive, pending_indices, use_tangents)
print_console('DEBUG', 'Creating temporary primitive for splitting')
else:
#
# No splitting needed.
#
result_primitives.append(primitive)
print_console('DEBUG', 'Adding primitive without splitting. Indices: ' + str(
len(primitive[INDICES_ID])) + ' Vertices: ' + str(
len(primitive[ATTRIBUTES_ID][POSITION_ATTRIBUTE]) // 3))
print_console('INFO', 'Primitives created: ' + str(len(result_primitives)))
return result_primitives
| 36.113596
| 159
| 0.57605
|
4a0b6a4384ebc50d25a7747e3b55deac1f97ec42
| 5,570
|
py
|
Python
|
GURA/avg_xwc.py
|
tony10101105/HEAR-2021-NeurIPS-Challenge---NTU
|
9a5349f95e6b980fbc9b44e18f263c725043a350
|
[
"MIT"
] | 5
|
2021-11-09T17:50:48.000Z
|
2022-01-04T08:34:46.000Z
|
GURA/avg_xwc.py
|
tony10101105/HEAR-2021-NeurIPS-Challenge---NTU-GURA
|
9a5349f95e6b980fbc9b44e18f263c725043a350
|
[
"MIT"
] | null | null | null |
GURA/avg_xwc.py
|
tony10101105/HEAR-2021-NeurIPS-Challenge---NTU-GURA
|
9a5349f95e6b980fbc9b44e18f263c725043a350
|
[
"MIT"
] | 1
|
2022-01-13T12:30:16.000Z
|
2022-01-13T12:30:16.000Z
|
from typing import Tuple
import torch
import torch.nn.functional as F
from torch import Tensor
####### Crepe #######
import torchcrepe
SAMPLE_RATE = 16000
TIMESTAMP_HOP_SIZE = 50
SCENE_HOP_SIZE = 250
TIMESTAMP_HOP_SIZE_SAMPLES = (SAMPLE_RATE * TIMESTAMP_HOP_SIZE) // 1000
SCENE_HOP_SIZE_SAMPLES = (SAMPLE_RATE * SCENE_HOP_SIZE) // 1000
####### Hubert and Wav2vec2 #######
from transformers import Wav2Vec2Model, HubertModel
class hubert_xlarge(torch.nn.Module):
def __init__(self):
super(hubert_xlarge, self).__init__()
self.hubert = HubertModel.from_pretrained("facebook/hubert-xlarge-ll60k")
def forward(self, x):
out = self.hubert(x)
last_hidden_states = out.last_hidden_state
return last_hidden_states
class wav2vec2(torch.nn.Module):
def __init__(self):
super(wav2vec2, self).__init__()
self.wav2vec2 = Wav2Vec2Model.from_pretrained("facebook/wav2vec2-large-960h-lv60-self")
def forward(self, x):
out = self.wav2vec2(x)
last_hidden_states = out.last_hidden_state
return last_hidden_states
class TorchCrepeModel(torch.nn.Module):
"""
A pretty gross wrapper on torchcrepe, because of its implicit singleton
model loading: https://github.com/maxrmorrison/torchcrepe/issues/13
"""
# sample rate and embedding sizes are required model attributes for the HEAR API
sample_rate = SAMPLE_RATE
def __init__(self):
super().__init__()
# This is gross.
if torch.cuda.is_available():
torchcrepe.load.model(device="cuda", capacity="full")
else:
torchcrepe.load.model(device="cpu", capacity="full")
def forward(self, x: Tensor, hop_size_samples: int):
# Or do x.device?
if torch.cuda.is_available():
device = "cuda"
else:
device = "cpu"
if x.ndim == 1:
x = x.view(1, x.shape[0])
assert x.ndim == 2
# This is lame, sorry
# torchcrepe only can process one audio at a time
embeddings = []
for i in range(x.shape[0]):
embedding = torchcrepe.embed(
audio=x[i].view(1, x.shape[1]),
sample_rate=self.sample_rate,
hop_length=hop_size_samples,
model="full",
device=device,
pad=True,
# Otherwise dcase exceeds memory on a V100
batch_size=512,
)
# Convert 1 x frames x 32x64 embedding to 1 x frames x 32*64
assert embedding.shape[0] == 1
assert embedding.ndim == 4
embedding = embedding.view((1, embedding.shape[1], -1))
embeddings.append(embedding)
return torch.cat(embeddings)
class XWC_avg(torch.nn.Module):
def __init__(self):
super().__init__()
self.hubert = hubert_xlarge()
self.wav2vec2 = wav2vec2()
self.crepe = TorchCrepeModel()
def forward(self, x, hop_size_samples):
hubert_output = self.hubert(x)
wav2vec2_output = self.wav2vec2(x)
crepe_output = self.crepe(x, hop_size_samples)
return hubert_output, wav2vec2_output, crepe_output
def load_model(model_file_path: str = "") -> torch.nn.Module:
"""
Args:
model_file_path: Ignored
Returns:
XWC_fusion()
"""
model = XWC_avg()
model.sample_rate = SAMPLE_RATE
model.timestamp_embedding_size = 1024
model.scene_embedding_size = 1024
return model
def get_timestamp_embeddings(
audio: Tensor,
model: torch.nn.Module,
hop_size_samples: int = TIMESTAMP_HOP_SIZE_SAMPLES,
) -> Tuple[Tensor, Tensor]:
if audio.ndim != 2:
raise ValueError(
"audio input tensor must be 2D with shape (n_sounds, num_samples)"
)
if not isinstance(model, XWC_avg):
raise ValueError(f"Model must be an instance of {XWC_avg.__name__}")
model.eval()
with torch.no_grad():
xlarge_embeddings, wav2vec2_embeddings, crepe_embeddings = model(audio, hop_size_samples)
xlarge_embeddings = F.interpolate(xlarge_embeddings,
size = wav2vec2_embeddings.shape[2],
mode = "linear")
crepe_embeddings = F.interpolate(crepe_embeddings,
size = wav2vec2_embeddings.shape[2],
mode = "linear"
)
crepe_embeddings = F.interpolate(crepe_embeddings.permute(0, 2, 1),
size = wav2vec2_embeddings.shape[1],
mode = "linear"
).permute(0, 2, 1)
audio_ms = int(audio.shape[1] / model.sample_rate * 1000)
ntimestamps = (audio_ms - 5) // 20
last_center = 12.5 + (ntimestamps - 1) * 20
timestamps = torch.arange(12.5, last_center + 20, 20)
assert len(timestamps) == ntimestamps
timestamps = timestamps.expand((wav2vec2_embeddings.shape[0], timestamps.shape[0]))
assert timestamps.shape[1] == wav2vec2_embeddings.shape[1]
embeddings = (xlarge_embeddings + wav2vec2_embeddings + crepe_embeddings) / 3
return embeddings, timestamps
def get_scene_embeddings(
audio: Tensor,
model: torch.nn.Module,
) -> Tensor:
embeddings, _ = get_timestamp_embeddings(
audio, model, hop_size_samples=SCENE_HOP_SIZE_SAMPLES
)
# not use timestamps here
# already compress each embeddings to 1024 dimension
embeddings = torch.mean(embeddings, dim=1)
return embeddings
| 31.292135
| 97
| 0.625494
|
4a0b6a7e9bd34a3d2a260400219ba67d050799c5
| 568
|
py
|
Python
|
Section 1/1.3_objects.py
|
PacktPublishing/-Getting-Started-with-Object-Oriented-Programming-in-Python-3
|
bf52dcad71f962177f8b73c9510bbcbbcaacf315
|
[
"MIT"
] | 9
|
2019-01-03T15:22:19.000Z
|
2021-10-01T22:06:00.000Z
|
Section 1/1.3_objects.py
|
anupsingh/-Getting-Started-with-Object-Oriented-Programming-in-Python-3
|
b30059c1902773d6d6ae781e3ab473b28288435d
|
[
"MIT"
] | null | null | null |
Section 1/1.3_objects.py
|
anupsingh/-Getting-Started-with-Object-Oriented-Programming-in-Python-3
|
b30059c1902773d6d6ae781e3ab473b28288435d
|
[
"MIT"
] | 7
|
2018-06-10T23:16:48.000Z
|
2021-06-24T06:39:34.000Z
|
class Car:
'Common base class for all cars'
carCount = 0
def __init__(self, name, year):
self.name = name
self.year = year
Car.carCount += 1
def displayCount(self):
print ("Total Car %d" % Car.carCount)
def displayCar(self):
print ("Name : ", self.name, ", Year: ", self.year)
#This would create first object of Car class"
car1 = Car("Honda", 2000)
#This would create second object of Car class"
car2 = Car("BMW", 2017)
car1.displayCar()
car2.displayCar()
print ("Total Car %d" % Car.carCount)
| 24.695652
| 59
| 0.610915
|
4a0b6ad262659b23f5d9acf414164980de591344
| 304
|
py
|
Python
|
atgym/atstrategies/__init__.py
|
liv20/atgym
|
848ba65e89928e1cebb994fa5000f8afe026c76b
|
[
"MIT"
] | null | null | null |
atgym/atstrategies/__init__.py
|
liv20/atgym
|
848ba65e89928e1cebb994fa5000f8afe026c76b
|
[
"MIT"
] | null | null | null |
atgym/atstrategies/__init__.py
|
liv20/atgym
|
848ba65e89928e1cebb994fa5000f8afe026c76b
|
[
"MIT"
] | null | null | null |
from .base_strategy import AbstractStrategy
from .engulfing_pattern_strategy import EngulfingStrategy
from .ma_strategy import SMAStrategy, WMAStrategy, EMAStrategy
from .rsi_strategy import RSIStrategy
from .rl_strategy import RLStrategy
from .rl_strategy import A2CStrategy, DQNStrategy, PPOStrategy
| 33.777778
| 62
| 0.868421
|
4a0b6b4580ee102e9806a898cb7f40204c7c0037
| 584
|
py
|
Python
|
pexels-crawler/crawler.py
|
sudhanshu-jha/Scrapers
|
1203c5ed3ebb4b0664af41e95bde3fc15662af64
|
[
"MIT"
] | null | null | null |
pexels-crawler/crawler.py
|
sudhanshu-jha/Scrapers
|
1203c5ed3ebb4b0664af41e95bde3fc15662af64
|
[
"MIT"
] | null | null | null |
pexels-crawler/crawler.py
|
sudhanshu-jha/Scrapers
|
1203c5ed3ebb4b0664af41e95bde3fc15662af64
|
[
"MIT"
] | 1
|
2019-05-29T09:54:14.000Z
|
2019-05-29T09:54:14.000Z
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import os
import configparser
from yaml import load
import sys
import getopt
from run import run
cur_path = os.path.dirname(os.path.realpath(__file__))
config_path = os.path.join(cur_path, "config.yaml")
with open(config_path, "rb") as f:
cont = f.read()
cf = load(cont)
def main():
user_agent = cf.get("user_agent")
download_dir = cf.get("download_dir")
element = cf.get("element")
search_range = cf.get("search_range")
run(user_agent, download_dir, element, search_range)
if __name__ == "__main__":
main()
| 20.857143
| 56
| 0.690068
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.