seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
30622885552 | from pybricks.hubs import CityHub
from pybricks.pupdevices import Motor, ColorDistanceSensor
from pybricks.parameters import Port, Stop, Color
from pybricks.tools import wait
# Initialize devices.
hub = CityHub()
motor = Motor(Port.B)
colorSensor = ColorDistanceSensor(Port.A)
while True:
colors = []
angles = []
#Mode 1 - Train
hub.light.on(Color.WHITE)
wait(500)
hub.light.off()
wait(500)
hub.light.on(Color.WHITE)
wait(500)
hub.light.off()
wait(500)
hub.light.on(Color.WHITE)
wait(500)
#Train for 4 color cases
for case in range(4):
#5 trainings for each case
for i in range(5):
try:
while True:
color = colorSensor.color()
hub.light.on(color)
wait(100)
except SystemExit:
pass
#Get color
color = colorSensor.color()
print(color)
#Turn hub light color of sensor reading
hub.light.on(color)
#Get motor angle
angle = motor.angle()
print(angle)
#Add the sensor reading and motor angle to list
colors.append(color)
angles.append(angle)
wait(100)
hub.light.on(Color.WHITE)
wait(2000)
print(colors)
print(angles)
#Mode 2 - Run
try:
while True:
counter = 0
avgPos = 0
sumPos = 0
#Get color sensor reading
color = colorSensor.color()
hub.light.on(color)
if color != Color.NONE:
for i in range(len(colors)):
#Find average position reading for sensed color
if colors[i] == color:
counter = counter + 1
sumPos = (sumPos+angles[i])
avgPos = sumPos/counter
print(avgPos)
#Move motor to average position at 400 (can change speed)
motor.run_target(400, avgPos)
motor.stop()
wait(500)
except SystemExit:
try:
wait(1000)
break
except SystemExit:
pass
| olivia-tomassetti/CEEO-smartmotors | SmartMotorPython/Pybricks/SmartMotorLEGOBoost.py | SmartMotorLEGOBoost.py | py | 2,282 | python | en | code | 0 | github-code | 36 |
72692461223 | #!/usr/bin/env python3
import asyncio
import unittest
from click.testing import CliRunner
from base_cli import _handle_debug, async_main, main
class TestCLI(unittest.TestCase):
def test_async_main(self) -> None:
self.assertEqual(0, asyncio.run(async_main(True)))
def test_debug_output(self) -> None:
self.assertTrue(_handle_debug(None, None, True))
def test_help(self) -> None:
runner = CliRunner()
result = runner.invoke(main, ["--help"])
assert result.exit_code == 0
if __name__ == "__main__": # pragma: no cover
unittest.main()
| cooperlees/base_clis | py/tests.py | tests.py | py | 597 | python | en | code | 2 | github-code | 36 |
18914948323 | import pytest
from src.guess_number_higher_or_lower import Solution
@pytest.mark.parametrize(
"n,pick,expected",
[
(10, 6, 6),
(1, 1, 1),
(2, 1, 1),
],
)
def test_solution(n, pick, expected, monkeypatch):
monkeypatch.setenv("SECRET", str(pick))
assert Solution().guessNumber(n) == expected
| lancelote/leetcode | tests/test_guess_number_higher_or_lower.py | test_guess_number_higher_or_lower.py | py | 337 | python | en | code | 3 | github-code | 36 |
10392845560 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Python version: 3.6
import os
import copy
import time
import pickle
import numpy as np
from tqdm import tqdm
import yaml
import argparse
import torch
from tensorboardX import SummaryWriter
# from src.options import args_parser
from update import LocalUpdate
from utils import get_dataset, average_weights, exp_details
from deepctr_torch.inputs import get_feature_names
from sklearn.metrics import log_loss, roc_auc_score
from deepctr_torch.models import DeepFM
if __name__ == '__main__':
start_time = time.time()
# define paths
path_project = os.path.abspath('..')
logger = SummaryWriter('../logs')
parser = argparse.ArgumentParser(description='PPDL')
parser.add_argument('--params', dest='params', default='utils/params.yaml')
params = parser.parse_args()
with open(f'../{params.params}', 'r') as f:
args = yaml.load(f)
exp_details(args)
exit(0)
if args['gpu']:
torch.cuda.set_device(args['gpu'])
device = 'cuda' if args['gpu'] else 'cpu'
# load dataset and user groups # prepare feature for model
(train_dataset, test_dataset, user_groups),fixlen_feature_columns = get_dataset(args)
# count #unique features for each sparse field,and record dense feature field name
dnn_feature_columns = fixlen_feature_columns
linear_feature_columns = fixlen_feature_columns
feature_names = get_feature_names(linear_feature_columns + dnn_feature_columns)
# BUILD MODEL
if args['model'].lower() == 'deepfm':
# 4.Define Model,train,predict and evaluate
global_model = DeepFM(linear_feature_columns, dnn_feature_columns, task='binary')
global_model.compile("adam", "binary_crossentropy",
metrics=['binary_crossentropy'], )
else:
exit('Error: unrecognized model')
# # Set the model to train and send it to device.
# global_model.to(device)
# global_model.train() # torch claim
# print(global_model)
# copy weights
global_weights = global_model.state_dict()
# print(global_weights.keys())
# Training
# train_loss, train_accuracy = [], []
# val_acc_list, net_list = [], []
# cv_loss, cv_acc = [], []
# print_every = 2
# val_loss_pre, counter = 0, 0
# temp test data
test_model_input = {name: test_dataset[name] for name in feature_names}
# for comparsion
# best_model = copy.deepcopy(global_model)
min_loss = 1000.0
max_auc = -1.0
for epoch in tqdm(range(args['epochs'])):
local_weights= [] #, local_losses , []
print(f'\n | Global Training Round : {epoch+1} |\n')
# frac default 0.1; num_users default 100
m = max(int(args['frac'] * args['num_users']), 1)
# 100 randomly select 10 as training client
idxs_users = np.random.choice(range(args['num_users']), m, replace=False)
for idx in idxs_users: # 10 random users
local_model = LocalUpdate(args=args, dataset=train_dataset,
idxs=user_groups[idx], logger=logger)
w = local_model.update_weights(
model=copy.deepcopy(global_model), features=feature_names)
local_weights.append(copy.deepcopy(w))
# local_losses.append(copy.deepcopy(loss))
# update global weights
global_weights = average_weights(local_weights)
global_model.load_state_dict(global_weights)
# temp test
pred_ans = global_model.predict(test_model_input, batch_size=256)
logloss = log_loss(test_dataset['label'].values, pred_ans)
aucscore = roc_auc_score(test_dataset['label'].values, pred_ans)
print("test LogLoss", round(logloss, 4))
print("test AUC", round(aucscore, 4))
if aucscore > max_auc:
# best_model = copy.deepcopy(global_model)
min_loss = logloss
max_auc = aucscore
print("|---- Min log loss: {:.4f}%".format(min_loss))
print("|---- Best AUC: {:.4f}%".format(max_auc))
print("test done")
| gongzhimin/Trojan-Attack-Against-Structural-Data-in-Federated-Learning | src/federated_main_nonattack.py | federated_main_nonattack.py | py | 4,103 | python | en | code | 1 | github-code | 36 |
12409403965 | import collections
import re
import furl
from django.core.urlresolvers import resolve, reverse, NoReverseMatch
from django.core.exceptions import ImproperlyConfigured
from django.http.request import QueryDict
from rest_framework import exceptions
from rest_framework import serializers as ser
from rest_framework.fields import SkipField
from rest_framework.fields import get_attribute as get_nested_attributes
from api.base import utils
from api.base.exceptions import InvalidQueryStringError
from api.base.exceptions import Conflict
from api.base.exceptions import JSONAPIException
from api.base.exceptions import TargetNotSupportedError
from api.base.exceptions import RelationshipPostMakesNoChanges
from api.base.settings import BULK_SETTINGS
from api.base.utils import absolute_reverse, extend_querystring_params, get_user_auth, extend_querystring_if_key_exists
from framework.auth import core as auth_core
from website import settings
from website import util as website_utils
from website.models import Node
from website.util.sanitize import strip_html
def format_relationship_links(related_link=None, self_link=None, rel_meta=None, self_meta=None):
"""
Properly handles formatting of self and related links according to JSON API.
Removes related or self link, if none.
"""
ret = {'links': {}}
if related_link:
ret['links'].update({
'related': {
'href': related_link or {},
'meta': rel_meta or {}
}
})
if self_link:
ret['links'].update({
'self': {
'href': self_link or {},
'meta': self_meta or {}
}
})
return ret
def is_anonymized(request):
private_key = request.query_params.get('view_only', None)
return website_utils.check_private_key_for_anonymized_link(private_key)
class HideIfRegistration(ser.Field):
"""
If node is a registration, this field will return None.
"""
def __init__(self, field, **kwargs):
super(HideIfRegistration, self).__init__(**kwargs)
self.field = field
self.source = field.source
self.required = field.required
self.read_only = field.read_only
def get_attribute(self, instance):
if instance.is_registration:
if isinstance(self.field, RelationshipField):
raise SkipField
else:
return None
return self.field.get_attribute(instance)
def bind(self, field_name, parent):
super(HideIfRegistration, self).bind(field_name, parent)
self.field.bind(field_name, self)
def to_internal_value(self, data):
return self.field.to_internal_value(data)
def to_representation(self, value):
if getattr(self.field.root, 'child', None):
self.field.parent = self.field.root.child
else:
self.field.parent = self.field.root
return self.field.to_representation(value)
def to_esi_representation(self, value, envelope='data'):
if getattr(self.field.root, 'child', None):
self.field.parent = self.field.root.child
else:
self.field.parent = self.field.root
return self.field.to_esi_representation(value, envelope)
class HideIfDisabled(ser.Field):
"""
If the user is disabled, returns None for attribute fields, or skips
if a RelationshipField.
"""
def __init__(self, field, **kwargs):
super(HideIfDisabled, self).__init__(**kwargs)
self.field = field
self.source = field.source
self.required = field.required
self.read_only = field.read_only
def get_attribute(self, instance):
if instance.is_disabled:
if isinstance(self.field, RelationshipField):
raise SkipField
else:
return None
return self.field.get_attribute(instance)
def bind(self, field_name, parent):
super(HideIfDisabled, self).bind(field_name, parent)
self.field.bind(field_name, self)
def to_internal_value(self, data):
return self.field.to_internal_value(data)
def to_representation(self, value):
if getattr(self.field.root, 'child', None):
self.field.parent = self.field.root.child
else:
self.field.parent = self.field.root
return self.field.to_representation(value)
def to_esi_representation(self, value, envelope='data'):
if getattr(self.field.root, 'child', None):
self.field.parent = self.field.root.child
else:
self.field.parent = self.field.root
return self.field.to_esi_representation(value, envelope)
class HideIfWithdrawal(HideIfRegistration):
"""
If registration is withdrawn, this field will return None.
"""
def get_attribute(self, instance):
if instance.is_retracted:
if isinstance(self.field, RelationshipField):
raise SkipField
else:
return None
return self.field.get_attribute(instance)
class AllowMissing(ser.Field):
def __init__(self, field, **kwargs):
super(AllowMissing, self).__init__(**kwargs)
self.field = field
def to_representation(self, value):
return self.field.to_representation(value)
def bind(self, field_name, parent):
super(AllowMissing, self).bind(field_name, parent)
self.field.bind(field_name, self)
def get_attribute(self, instance):
"""
Overwrite the error message to return a blank value is if there is no existing value.
This allows the display of keys that do not exist in the DB (gitHub on a new OSF account for example.)
"""
try:
return self.field.get_attribute(instance)
except SkipField:
return ''
def to_internal_value(self, data):
return self.field.to_internal_value(data)
def _url_val(val, obj, serializer, **kwargs):
"""Function applied by `HyperlinksField` to get the correct value in the
schema.
"""
url = None
if isinstance(val, Link): # If a Link is passed, get the url value
url = val.resolve_url(obj, **kwargs)
elif isinstance(val, basestring): # if a string is passed, it's a method of the serializer
if getattr(serializer, 'field', None):
serializer = serializer.parent
url = getattr(serializer, val)(obj) if obj is not None else None
else:
url = val
if not url and url != 0:
raise SkipField
else:
return url
class IDField(ser.CharField):
"""
ID field that validates that 'id' in the request body is the same as the instance 'id' for single requests.
"""
def __init__(self, **kwargs):
kwargs['label'] = 'ID'
super(IDField, self).__init__(**kwargs)
# Overrides CharField
def to_internal_value(self, data):
request = self.context.get('request')
if request:
if request.method in utils.UPDATE_METHODS and not utils.is_bulk_request(request):
id_field = self.get_id(self.root.instance)
if id_field != data:
raise Conflict(detail=('The id you used in the URL, "{}", does not match the id you used in the json body\'s id field, "{}". The object "{}" exists, otherwise you\'d get a 404, so most likely you need to change the id field to match.'.format(id_field, data, id_field)))
return super(IDField, self).to_internal_value(data)
def get_id(self, obj):
return getattr(obj, self.source, '_id')
class TypeField(ser.CharField):
"""
Type field that validates that 'type' in the request body is the same as the Meta type.
Also ensures that type is write-only and required.
"""
def __init__(self, **kwargs):
kwargs['write_only'] = True
kwargs['required'] = True
super(TypeField, self).__init__(**kwargs)
# Overrides CharField
def to_internal_value(self, data):
if isinstance(self.root, JSONAPIListSerializer):
type_ = self.root.child.Meta.type_
else:
type_ = self.root.Meta.type_
if type_ != data:
raise Conflict(detail=('This resource has a type of "{}", but you set the json body\'s type field to "{}". You probably need to change the type field to match the resource\'s type.'.format(type_, data)))
return super(TypeField, self).to_internal_value(data)
class TargetTypeField(ser.CharField):
"""
Enforces that the related resource has the correct type
"""
def __init__(self, **kwargs):
kwargs['write_only'] = True
kwargs['required'] = True
self.target_type = kwargs.pop('target_type')
super(TargetTypeField, self).__init__(**kwargs)
def to_internal_value(self, data):
if self.target_type != data:
raise Conflict(detail=('The target resource has a type of "{}", but you set the json body\'s type field to "{}". You probably need to change the type field to match the target resource\'s type.'.format(self.target_type, data)))
return super(TargetTypeField, self).to_internal_value(data)
class JSONAPIListField(ser.ListField):
def to_internal_value(self, data):
if not isinstance(data, list):
self.fail('not_a_list', input_type=type(data).__name__)
return super(JSONAPIListField, self).to_internal_value(data)
class AuthorizedCharField(ser.CharField):
"""
Passes auth of the logged-in user to the object's method
defined as the field source.
Example:
content = AuthorizedCharField(source='get_content')
"""
def __init__(self, source=None, **kwargs):
assert source is not None, 'The `source` argument is required.'
self.source = source
super(AuthorizedCharField, self).__init__(source=self.source, **kwargs)
def get_attribute(self, obj):
user = self.context['request'].user
auth = auth_core.Auth(user)
field_source_method = getattr(obj, self.source)
return field_source_method(auth=auth)
class RelationshipField(ser.HyperlinkedIdentityField):
"""
RelationshipField that permits the return of both self and related links, along with optional
meta information. ::
children = RelationshipField(
related_view='nodes:node-children',
related_view_kwargs={'node_id': '<pk>'},
self_view='nodes:node-node-children-relationship',
self_view_kwargs={'node_id': '<pk>'},
related_meta={'count': 'get_node_count'}
)
The lookup field must be surrounded in angular brackets to find the attribute on the target. Otherwise, the lookup
field will be returned verbatim. ::
wiki_home = RelationshipField(
related_view='addon:addon-detail',
related_view_kwargs={'node_id': '<_id>', 'provider': 'wiki'},
)
'_id' is enclosed in angular brackets, but 'wiki' is not. 'id' will be looked up on the target, but 'wiki' will not.
The serialized result would be '/nodes/abc12/addons/wiki'.
Field can handle nested attributes: ::
wiki_home = RelationshipField(
related_view='wiki:wiki-detail',
related_view_kwargs={'node_id': '<_id>', 'wiki_id': '<wiki_pages_current.home>'}
)
Field can handle a filter_key, which operates as the source field (but
is named differently to not interfere with HyperLinkedIdentifyField's source
The ``filter_key`` argument defines the Mongo key (or ODM field name) to filter on
when using the ``FilterMixin`` on a view. ::
parent = RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<parent_node._id>'},
filter_key='parent_node'
)
Field can include optional filters:
Example:
replies = RelationshipField(
self_view='nodes:node-comments',
self_view_kwargs={'node_id': '<node._id>'},
filter={'target': '<pk>'})
)
"""
json_api_link = True # serializes to a links object
def __init__(self, related_view=None, related_view_kwargs=None, self_view=None, self_view_kwargs=None,
self_meta=None, related_meta=None, always_embed=False, filter=None, filter_key=None, **kwargs):
related_view = related_view
self_view = self_view
related_kwargs = related_view_kwargs
self_kwargs = self_view_kwargs
self.views = {'related': related_view, 'self': self_view}
self.view_kwargs = {'related': related_kwargs, 'self': self_kwargs}
self.related_meta = related_meta
self.self_meta = self_meta
self.always_embed = always_embed
self.filter = filter
self.filter_key = filter_key
assert (related_view is not None or self_view is not None), 'Self or related view must be specified.'
if related_view:
assert related_kwargs is not None, 'Must provide related view kwargs.'
if not callable(related_kwargs):
assert isinstance(related_kwargs,
dict), "Related view kwargs must have format {'lookup_url_kwarg: lookup_field}."
if self_view:
assert self_kwargs is not None, 'Must provide self view kwargs.'
assert isinstance(self_kwargs, dict), "Self view kwargs must have format {'lookup_url_kwarg: lookup_field}."
view_name = related_view
if view_name:
lookup_kwargs = related_kwargs
else:
view_name = self_view
lookup_kwargs = self_kwargs
if kwargs.get('lookup_url_kwarg', None):
lookup_kwargs = kwargs.pop('lookup_url_kwarg')
super(RelationshipField, self).__init__(view_name, lookup_url_kwarg=lookup_kwargs, **kwargs)
# Allow a RelationshipField to be modified if explicitly set so
if kwargs.get('read_only') is not None:
self.read_only = kwargs['read_only']
def resolve(self, resource, field_name):
"""
Resolves the view when embedding.
"""
lookup_url_kwarg = self.lookup_url_kwarg
if callable(lookup_url_kwarg):
lookup_url_kwarg = lookup_url_kwarg(getattr(resource, field_name))
kwargs = {attr_name: self.lookup_attribute(resource, attr) for (attr_name, attr) in
lookup_url_kwarg.items()}
view = self.view_name
if callable(self.view_name):
view = view(getattr(resource, field_name))
return resolve(
reverse(
view,
kwargs=kwargs
)
)
def process_related_counts_parameters(self, params, value):
"""
Processes related_counts parameter.
Can either be a True/False value for fetching counts on all fields, or a comma-separated list for specifying
individual fields. Ensures field for which we are requesting counts is a relationship field.
"""
if utils.is_truthy(params) or utils.is_falsy(params):
return params
field_counts_requested = [val for val in params.split(',')]
countable_fields = {field for field in self.parent.fields if
getattr(self.parent.fields[field], 'json_api_link', False) or
getattr(getattr(self.parent.fields[field], 'field', None), 'json_api_link', None)}
for count_field in field_counts_requested:
# Some fields will hide relationships, e.g. HideIfWithdrawal
# Ignore related_counts for these fields
fetched_field = self.parent.fields.get(count_field)
hidden = fetched_field and isinstance(fetched_field, HideIfWithdrawal) and getattr(value, 'is_retracted', False)
if not hidden and count_field not in countable_fields:
raise InvalidQueryStringError(
detail="Acceptable values for the related_counts query param are 'true', 'false', or any of the relationship fields; got '{0}'".format(
params),
parameter='related_counts'
)
return field_counts_requested
def get_meta_information(self, meta_data, value):
"""
For retrieving meta values, otherwise returns {}
"""
meta = {}
for key in meta_data or {}:
if key == 'count' or key == 'unread':
show_related_counts = self.context['request'].query_params.get('related_counts', False)
if self.context['request'].parser_context.get('kwargs'):
if self.context['request'].parser_context['kwargs'].get('is_embedded'):
show_related_counts = False
field_counts_requested = self.process_related_counts_parameters(show_related_counts, value)
if utils.is_truthy(show_related_counts):
meta[key] = website_utils.rapply(meta_data[key], _url_val, obj=value, serializer=self.parent)
elif utils.is_falsy(show_related_counts):
continue
elif self.field_name in field_counts_requested:
meta[key] = website_utils.rapply(meta_data[key], _url_val, obj=value, serializer=self.parent)
else:
continue
elif key == 'projects_in_common':
if not get_user_auth(self.context['request']).user:
continue
if not self.context['request'].query_params.get('show_projects_in_common', False):
continue
meta[key] = website_utils.rapply(meta_data[key], _url_val, obj=value, serializer=self.parent)
else:
meta[key] = website_utils.rapply(meta_data[key], _url_val, obj=value, serializer=self.parent)
return meta
def lookup_attribute(self, obj, lookup_field):
"""
Returns attribute from target object unless attribute surrounded in angular brackets where it returns the lookup field.
Also handles the lookup of nested attributes.
"""
bracket_check = _tpl(lookup_field)
if bracket_check:
source_attrs = bracket_check.split('.')
# If you are using a nested attribute for lookup, and you get the attribute wrong, you will not get an
# error message, you will just not see that field. This allows us to have slightly more dynamic use of
# nested attributes in relationship fields.
try:
return_val = get_nested_attributes(obj, source_attrs)
except KeyError:
return None
return return_val
return lookup_field
def kwargs_lookup(self, obj, kwargs_dict):
"""
For returning kwargs dictionary of format {"lookup_url_kwarg": lookup_value}
"""
if callable(kwargs_dict):
kwargs_dict = kwargs_dict(obj)
kwargs_retrieval = {}
for lookup_url_kwarg, lookup_field in kwargs_dict.items():
try:
lookup_value = self.lookup_attribute(obj, lookup_field)
except AttributeError as exc:
raise AssertionError(exc)
if lookup_value is None:
return None
kwargs_retrieval[lookup_url_kwarg] = lookup_value
return kwargs_retrieval
# Overrides HyperlinkedIdentityField
def get_url(self, obj, view_name, request, format):
urls = {}
for view_name, view in self.views.items():
if view is None:
urls[view_name] = {}
else:
kwargs = self.kwargs_lookup(obj, self.view_kwargs[view_name])
if kwargs is None:
urls[view_name] = {}
else:
if callable(view):
view = view(getattr(obj, self.field_name))
url = self.reverse(view, kwargs=kwargs, request=request, format=format)
if self.filter:
formatted_filter = self.format_filter(obj)
if formatted_filter:
url = extend_querystring_params(url, {'filter': formatted_filter})
else:
url = None
url = extend_querystring_if_key_exists(url, self.context['request'], 'view_only')
urls[view_name] = url
if not urls['self'] and not urls['related']:
urls = None
return urls
def to_esi_representation(self, value, envelope='data'):
relationships = self.to_representation(value)
try:
href = relationships['links']['related']['href']
except KeyError:
raise SkipField
else:
if href and not href == '{}':
if self.always_embed:
envelope = 'data'
query_dict = dict(format=['jsonapi', ], envelope=[envelope, ])
if 'view_only' in self.parent.context['request'].query_params.keys():
query_dict.update(view_only=[self.parent.context['request'].query_params['view_only']])
esi_url = extend_querystring_params(href, query_dict)
return '<esi:include src="{}"/>'.format(esi_url)
def format_filter(self, obj):
qd = QueryDict(mutable=True)
filter_fields = self.filter.keys()
for field_name in filter_fields:
try:
# check if serializer method passed in
serializer_method = getattr(self.parent, self.filter[field_name])
except AttributeError:
value = self.lookup_attribute(obj, self.filter[field_name])
else:
value = serializer_method(obj)
if not value:
continue
qd.update({'[{}]'.format(field_name): value})
if not qd.keys():
return None
return qd.urlencode(safe=['[', ']'])
# Overrides HyperlinkedIdentityField
def to_representation(self, value):
request = self.context.get('request', None)
format = self.context.get('format', None)
assert request is not None, (
'`%s` requires the request in the serializer'
" context. Add `context={'request': request}` when instantiating "
'the serializer.' % self.__class__.__name__
)
# By default use whatever format is given for the current context
# unless the target is a different type to the source.
#
# Eg. Consider a HyperlinkedIdentityField pointing from a json
# representation to an html property of that representation...
#
# '/snippets/1/' should link to '/snippets/1/highlight/'
# ...but...
# '/snippets/1/.json' should link to '/snippets/1/highlight/.html'
if format and self.format and self.format != format:
format = self.format
# Return the hyperlink, or error if incorrectly configured.
try:
url = self.get_url(value, self.view_name, request, format)
except NoReverseMatch:
msg = (
'Could not resolve URL for hyperlinked relationship using '
'view name "%s". You may have failed to include the related '
'model in your API, or incorrectly configured the '
'`lookup_field` attribute on this field.'
)
if value in ('', None):
value_string = {'': 'the empty string', None: 'None'}[value]
msg += (
' WARNING: The value of the field on the model instance '
"was %s, which may be why it didn't match any "
'entries in your URL conf.' % value_string
)
raise ImproperlyConfigured(msg % self.view_name)
if url is None:
raise SkipField
related_url = url['related']
related_meta = self.get_meta_information(self.related_meta, value)
self_url = url['self']
self_meta = self.get_meta_information(self.self_meta, value)
return format_relationship_links(related_url, self_url, related_meta, self_meta)
class FileCommentRelationshipField(RelationshipField):
def get_url(self, obj, view_name, request, format):
if obj.kind == 'folder':
raise SkipField
return super(FileCommentRelationshipField, self).get_url(obj, view_name, request, format)
class TargetField(ser.Field):
"""
Field that returns a nested dict with the url (constructed based
on the object's type), optional meta information, and link_type.
Example:
target = TargetField(link_type='related', meta={'type': 'get_target_type'})
"""
json_api_link = True # serializes to a links object
view_map = {
'node': {
'view': 'nodes:node-detail',
'lookup_kwarg': 'node_id'
},
'comment': {
'view': 'comments:comment-detail',
'lookup_kwarg': 'comment_id'
},
'nodewikipage': {
'view': None,
'lookup_kwarg': None
}
}
def __init__(self, **kwargs):
self.meta = kwargs.pop('meta', {})
self.link_type = kwargs.pop('link_type', 'url')
super(TargetField, self).__init__(read_only=True, **kwargs)
def resolve(self, resource, field_name):
"""
Resolves the view for target node or target comment when embedding.
"""
view_info = self.view_map.get(resource.target.referent._name, None)
if not view_info:
raise TargetNotSupportedError('{} is not a supported target type'.format(
resource.target._name
))
if not view_info['view']:
return None, None, None
embed_value = resource.target._id
kwargs = {view_info['lookup_kwarg']: embed_value}
return resolve(
reverse(
view_info['view'],
kwargs=kwargs
)
)
def to_esi_representation(self, value, envelope='data'):
href = value.get_absolute_url()
if href:
esi_url = extend_querystring_params(href, dict(envelope=[envelope, ], format=['jsonapi', ]))
return '<esi:include src="{}"/>'.format(esi_url)
return self.to_representation(value)
def to_representation(self, value):
"""
Returns nested dictionary in format {'links': {'self.link_type': ... }
If no meta information, self.link_type is equal to a string containing link's URL. Otherwise,
the link is represented as a links object with 'href' and 'meta' members.
"""
meta = website_utils.rapply(self.meta, _url_val, obj=value, serializer=self.parent)
return {'links': {self.link_type: {'href': value.referent.get_absolute_url(), 'meta': meta}}}
class LinksField(ser.Field):
"""Links field that resolves to a links object. Used in conjunction with `Link`.
If the object to be serialized implements `get_absolute_url`, then the return value
of that method is used for the `self` link.
Example: ::
links = LinksField({
'html': 'absolute_url',
'children': {
'related': Link('nodes:node-children', node_id='<pk>'),
'count': 'get_node_count'
},
'contributors': {
'related': Link('nodes:node-contributors', node_id='<pk>'),
'count': 'get_contrib_count'
},
'registrations': {
'related': Link('nodes:node-registrations', node_id='<pk>'),
'count': 'get_registration_count'
},
})
"""
def __init__(self, links, *args, **kwargs):
ser.Field.__init__(self, read_only=True, *args, **kwargs)
self.links = links
def get_attribute(self, obj):
# We pass the object instance onto `to_representation`,
# not just the field attribute.
return obj
def extend_absolute_url(self, obj):
return extend_querystring_if_key_exists(obj.get_absolute_url(), self.context['request'], 'view_only')
def to_representation(self, obj):
ret = {}
for name, value in self.links.iteritems():
try:
url = _url_val(value, obj=obj, serializer=self.parent)
except SkipField:
continue
else:
ret[name] = url
if hasattr(obj, 'get_absolute_url') and 'self' not in self.links:
ret['self'] = self.extend_absolute_url(obj)
return ret
_tpl_pattern = re.compile(r'\s*<\s*(\S*)\s*>\s*')
def _tpl(val):
"""Return value within ``< >`` if possible, else return ``None``."""
match = _tpl_pattern.match(val)
if match:
return match.groups()[0]
return None
def _get_attr_from_tpl(attr_tpl, obj):
attr_name = _tpl(str(attr_tpl))
if attr_name:
attribute_value = obj
for attr_segment in attr_name.split('.'):
attribute_value = getattr(attribute_value, attr_segment, ser.empty)
if attribute_value is not ser.empty:
return attribute_value
elif attr_name in obj:
return obj[attr_name]
else:
raise AttributeError(
'{attr_name!r} is not a valid '
'attribute of {obj!r}'.format(
attr_name=attr_name, obj=obj,
))
else:
return attr_tpl
# TODO: Make this a Field that is usable on its own?
class Link(object):
"""Link object to use in conjunction with Links field. Does reverse lookup of
URLs given an endpoint name and attributed enclosed in `<>`. This includes
complex key strings like 'user.id'
"""
def __init__(self, endpoint, args=None, kwargs=None, query_kwargs=None, **kw):
self.endpoint = endpoint
self.kwargs = kwargs or {}
self.args = args or tuple()
self.reverse_kwargs = kw
self.query_kwargs = query_kwargs or {}
def resolve_url(self, obj):
kwarg_values = {key: _get_attr_from_tpl(attr_tpl, obj) for key, attr_tpl in self.kwargs.items()}
arg_values = [_get_attr_from_tpl(attr_tpl, obj) for attr_tpl in self.args]
query_kwarg_values = {key: _get_attr_from_tpl(attr_tpl, obj) for key, attr_tpl in self.query_kwargs.items()}
# Presumably, if you have are expecting a value but the value is empty, then the link is invalid.
for item in kwarg_values:
if kwarg_values[item] is None:
raise SkipField
return utils.absolute_reverse(
self.endpoint,
args=arg_values,
kwargs=kwarg_values,
query_kwargs=query_kwarg_values,
**self.reverse_kwargs
)
class WaterbutlerLink(Link):
"""Link object to use in conjunction with Links field. Builds a Waterbutler URL for files.
"""
def __init__(self, must_be_file=None, must_be_folder=None, **kwargs):
self.kwargs = kwargs
self.must_be_file = must_be_file
self.must_be_folder = must_be_folder
def resolve_url(self, obj):
"""Reverse URL lookup for WaterButler routes
"""
if self.must_be_folder is True and not obj.path.endswith('/'):
raise SkipField
if self.must_be_file is True and obj.path.endswith('/'):
raise SkipField
url = website_utils.waterbutler_api_url_for(obj.node._id, obj.provider, obj.path, **self.kwargs)
if not url:
raise SkipField
else:
return url
class NodeFileHyperLinkField(RelationshipField):
def __init__(self, kind=None, never_embed=False, **kws):
self.kind = kind
self.never_embed = never_embed
super(NodeFileHyperLinkField, self).__init__(**kws)
def get_url(self, obj, view_name, request, format):
if self.kind and obj.kind != self.kind:
raise SkipField
return super(NodeFileHyperLinkField, self).get_url(obj, view_name, request, format)
class JSONAPIListSerializer(ser.ListSerializer):
def to_representation(self, data):
enable_esi = self.context.get('enable_esi', False)
envelope = self.context.update({'envelope': None})
# Don't envelope when serializing collection
errors = {}
bulk_skip_uneditable = utils.is_truthy(self.context['request'].query_params.get('skip_uneditable', False))
if isinstance(data, collections.Mapping):
errors = data.get('errors', None)
data = data.get('data', None)
if enable_esi:
ret = [
self.child.to_esi_representation(item, envelope=None) for item in data
]
else:
ret = [
self.child.to_representation(item, envelope=envelope) for item in data
]
if errors and bulk_skip_uneditable:
ret.append({'errors': errors})
return ret
# Overrides ListSerializer which doesn't support multiple update by default
def update(self, instance, validated_data):
# avoiding circular import
from api.nodes.serializers import ContributorIDField
# if PATCH request, the child serializer's partial attribute needs to be True
if self.context['request'].method == 'PATCH':
self.child.partial = True
bulk_skip_uneditable = utils.is_truthy(self.context['request'].query_params.get('skip_uneditable', False))
if not bulk_skip_uneditable:
if len(instance) != len(validated_data):
raise exceptions.ValidationError({'non_field_errors': 'Could not find all objects to update.'})
id_lookup = self.child.fields['id'].source
data_mapping = {item.get(id_lookup): item for item in validated_data}
if isinstance(self.child.fields['id'], ContributorIDField):
instance_mapping = {self.child.fields['id'].get_id(item): item for item in instance}
else:
instance_mapping = {getattr(item, id_lookup): item for item in instance}
ret = {'data': []}
for resource_id, resource in instance_mapping.items():
data = data_mapping.pop(resource_id, None)
ret['data'].append(self.child.update(resource, data))
# If skip_uneditable in request, add validated_data for nodes in which the user did not have edit permissions to errors
if data_mapping and bulk_skip_uneditable:
ret.update({'errors': data_mapping.values()})
return ret
# overrides ListSerializer
def run_validation(self, data):
meta = getattr(self, 'Meta', None)
bulk_limit = getattr(meta, 'bulk_limit', BULK_SETTINGS['DEFAULT_BULK_LIMIT'])
num_items = len(data)
if num_items > bulk_limit:
raise JSONAPIException(source={'pointer': '/data'},
detail='Bulk operation limit is {}, got {}.'.format(bulk_limit, num_items))
return super(JSONAPIListSerializer, self).run_validation(data)
# overrides ListSerializer: Add HTML-sanitization similar to that used by APIv1 front-end views
def is_valid(self, clean_html=True, **kwargs):
"""
After validation, scrub HTML from validated_data prior to saving (for create and update views)
Exclude 'type' from validated_data.
"""
ret = super(JSONAPIListSerializer, self).is_valid(**kwargs)
if clean_html is True:
self._validated_data = website_utils.rapply(self.validated_data, strip_html)
for data in self._validated_data:
data.pop('type', None)
return ret
class JSONAPISerializer(ser.Serializer):
"""Base serializer. Requires that a `type_` option is set on `class Meta`. Also
allows for enveloping of both single resources and collections. Looks to nest fields
according to JSON API spec. Relational fields must set json_api_link=True flag.
Self/html links must be nested under "links".
"""
# Don't serialize relationships that use these views
# when viewing thru an anonymous VOL
views_to_hide_if_anonymous = {
'users:user-detail',
'nodes:node-registrations',
}
# overrides Serializer
@classmethod
def many_init(cls, *args, **kwargs):
kwargs['child'] = cls()
return JSONAPIListSerializer(*args, **kwargs)
def invalid_embeds(self, fields, embeds):
fields_check = fields[:]
for index, field in enumerate(fields_check):
if getattr(field, 'field', None):
fields_check[index] = field.field
invalid_embeds = set(embeds.keys()) - set(
[f.field_name for f in fields_check if getattr(f, 'json_api_link', False)])
return invalid_embeds
def to_esi_representation(self, data, envelope='data'):
href = None
query_params_blacklist = ['page[size]']
href = self.get_absolute_url(data)
if href and href != '{}':
esi_url = furl.furl(href).add(args=dict(self.context['request'].query_params)).remove(
args=query_params_blacklist).remove(args=['envelope']).add(args={'envelope': envelope}).url
return '<esi:include src="{}"/>'.format(esi_url)
# failsafe, let python do it if something bad happened in the ESI construction
return super(JSONAPISerializer, self).to_representation(data)
# overrides Serializer
def to_representation(self, obj, envelope='data'):
"""Serialize to final representation.
:param obj: Object to be serialized.
:param envelope: Key for resource object.
"""
ret = {}
meta = getattr(self, 'Meta', None)
type_ = getattr(meta, 'type_', None)
assert type_ is not None, 'Must define Meta.type_'
data = {
'id': '',
'type': type_,
'attributes': {},
'relationships': {},
'embeds': {},
'links': {},
}
embeds = self.context.get('embed', {})
context_envelope = self.context.get('envelope', envelope)
if context_envelope == 'None':
context_envelope = None
enable_esi = self.context.get('enable_esi', False)
is_anonymous = is_anonymized(self.context['request'])
to_be_removed = set()
if is_anonymous and hasattr(self, 'non_anonymized_fields'):
# Drop any fields that are not specified in the `non_anonymized_fields` variable.
allowed = set(self.non_anonymized_fields)
existing = set(self.fields.keys())
to_be_removed = existing - allowed
fields = [field for field in self.fields.values() if
not field.write_only and field.field_name not in to_be_removed]
invalid_embeds = self.invalid_embeds(fields, embeds)
invalid_embeds = invalid_embeds - to_be_removed
if invalid_embeds:
raise InvalidQueryStringError(parameter='embed',
detail='The following fields are not embeddable: {}'.format(
', '.join(invalid_embeds)))
for field in fields:
try:
attribute = field.get_attribute(obj)
except SkipField:
continue
nested_field = getattr(field, 'field', None)
if attribute is None:
# We skip `to_representation` for `None` values so that
# fields do not have to explicitly deal with that case.
data['attributes'][field.field_name] = None
else:
try:
representation = field.to_representation(attribute)
except SkipField:
continue
if getattr(field, 'json_api_link', False) or getattr(nested_field, 'json_api_link', False):
# If embed=field_name is appended to the query string or 'always_embed' flag is True, directly embed the
# results in addition to adding a relationship link
if embeds and (field.field_name in embeds or getattr(field, 'always_embed', None)):
if enable_esi:
try:
result = field.to_esi_representation(attribute, envelope=envelope)
except SkipField:
continue
else:
try:
# If a field has an empty representation, it should not be embedded.
result = self.context['embed'][field.field_name](obj)
except SkipField:
result = None
if result:
data['embeds'][field.field_name] = result
else:
data['embeds'][field.field_name] = {'error': 'This field is not embeddable.'}
try:
if not (is_anonymous and
hasattr(field, 'view_name') and
field.view_name in self.views_to_hide_if_anonymous):
data['relationships'][field.field_name] = representation
except SkipField:
continue
elif field.field_name == 'id':
data['id'] = representation
elif field.field_name == 'links':
data['links'] = representation
else:
data['attributes'][field.field_name] = representation
if not data['relationships']:
del data['relationships']
if not data['embeds']:
del data['embeds']
if context_envelope:
ret[context_envelope] = data
if is_anonymous:
ret['meta'] = {'anonymous': True}
else:
ret = data
return ret
def get_absolute_url(self, obj):
raise NotImplementedError()
def get_absolute_html_url(self, obj):
return extend_querystring_if_key_exists(obj.absolute_url, self.context['request'], 'view_only')
# overrides Serializer: Add HTML-sanitization similar to that used by APIv1 front-end views
def is_valid(self, clean_html=True, **kwargs):
"""
After validation, scrub HTML from validated_data prior to saving (for create and update views)
Exclude 'type' and '_id' from validated_data.
"""
ret = super(JSONAPISerializer, self).is_valid(**kwargs)
if clean_html is True:
self._validated_data = self.sanitize_data()
self._validated_data.pop('type', None)
self._validated_data.pop('target_type', None)
if self.context['request'].method in utils.UPDATE_METHODS:
self._validated_data.pop('_id', None)
return ret
def sanitize_data(self):
return website_utils.rapply(self.validated_data, strip_html)
class JSONAPIRelationshipSerializer(ser.Serializer):
"""Base Relationship serializer. Requires that a `type_` option is set on `class Meta`.
Provides a simplified serialization of the relationship, allowing for simple update request
bodies.
"""
id = ser.CharField(required=False, allow_null=True)
type = TypeField(required=False, allow_null=True)
def to_representation(self, obj):
meta = getattr(self, 'Meta', None)
type_ = getattr(meta, 'type_', None)
assert type_ is not None, 'Must define Meta.type_'
relation_id_field = self.fields['id']
attribute = relation_id_field.get_attribute(obj)
relationship = relation_id_field.to_representation(attribute)
data = {'type': type_, 'id': relationship} if relationship else None
return data
def DevOnly(field):
"""Make a field only active in ``DEV_MODE``. ::
experimental_field = DevMode(CharField(required=False))
"""
return field if settings.DEV_MODE else None
class RestrictedDictSerializer(ser.Serializer):
def to_representation(self, obj):
data = {}
fields = [field for field in self.fields.values() if not field.write_only]
for field in fields:
try:
attribute = field.get_attribute(obj)
except ser.SkipField:
continue
if attribute is None:
# We skip `to_representation` for `None` values so that
# fields do not have to explicitly deal with that case.
data[field.field_name] = None
else:
data[field.field_name] = field.to_representation(attribute)
return data
def relationship_diff(current_items, new_items):
"""
To be used in POST and PUT/PATCH relationship requests, as, by JSON API specs,
in update requests, the 'remove' items' relationships would be deleted, and the
'add' would be added, while for create requests, only the 'add' would be added.
:param current_items: The current items in the relationship
:param new_items: The items passed in the request
:return:
"""
return {
'add': {k: new_items[k] for k in (set(new_items.keys()) - set(current_items.keys()))},
'remove': {k: current_items[k] for k in (set(current_items.keys()) - set(new_items.keys()))}
}
class AddonAccountSerializer(JSONAPISerializer):
id = ser.CharField(source='_id', read_only=True)
provider = ser.CharField(read_only=True)
profile_url = ser.CharField(required=False, read_only=True)
display_name = ser.CharField(required=False, read_only=True)
links = links = LinksField({
'self': 'get_absolute_url',
})
class Meta:
type_ = 'external_accounts'
def get_absolute_url(self, obj):
kwargs = self.context['request'].parser_context['kwargs']
kwargs.update({'account_id': obj._id})
return absolute_reverse(
'users:user-external_account-detail',
kwargs=kwargs
)
return obj.get_absolute_url()
class LinkedNode(JSONAPIRelationshipSerializer):
id = ser.CharField(source='node._id', required=False, allow_null=True)
class Meta:
type_ = 'linked_nodes'
class LinkedNodesRelationshipSerializer(ser.Serializer):
data = ser.ListField(child=LinkedNode())
links = LinksField({'self': 'get_self_url',
'html': 'get_related_url'})
def get_self_url(self, obj):
return obj['self'].linked_nodes_self_url
def get_related_url(self, obj):
return obj['self'].linked_nodes_related_url
class Meta:
type_ = 'linked_nodes'
def get_pointers_to_add_remove(self, pointers, new_pointers):
diff = relationship_diff(
current_items={pointer.node._id: pointer for pointer in pointers},
new_items={val['node']['_id']: val for val in new_pointers}
)
nodes_to_add = []
for node_id in diff['add']:
node = Node.load(node_id)
if not node:
raise exceptions.NotFound(detail='Node with id "{}" was not found'.format(node_id))
nodes_to_add.append(node)
return nodes_to_add, diff['remove'].values()
def make_instance_obj(self, obj):
# Convenience method to format instance based on view's get_object
return {'data': [
pointer for pointer in
obj.nodes_pointer
if not pointer.node.is_deleted and not pointer.node.is_collection
], 'self': obj}
def update(self, instance, validated_data):
collection = instance['self']
auth = utils.get_user_auth(self.context['request'])
add, remove = self.get_pointers_to_add_remove(pointers=instance['data'], new_pointers=validated_data['data'])
for pointer in remove:
collection.rm_pointer(pointer, auth)
for node in add:
collection.add_pointer(node, auth)
return self.make_instance_obj(collection)
def create(self, validated_data):
instance = self.context['view'].get_object()
auth = utils.get_user_auth(self.context['request'])
collection = instance['self']
add, remove = self.get_pointers_to_add_remove(pointers=instance['data'], new_pointers=validated_data['data'])
if not len(add):
raise RelationshipPostMakesNoChanges
for node in add:
collection.add_pointer(node, auth)
return self.make_instance_obj(collection)
| karenhanson/osf.io_rmap_integration_old | api/base/serializers.py | serializers.py | py | 49,061 | python | en | code | 0 | github-code | 36 |
70508461223 | # !/usr/bin/env python3
# -*- coding: utf-8 -*-
# Created on 2019-05-25 10:00
import re
"""
验证邮箱地址
"""
def verification(str):
re_str = re.compile('^[\w-]+(\.[\w-]+)*@[\w-]+(\.[\w-]+)+$')
result = re.match(re_str, str)
print(result)
if result is None:
return "error"
else:
return 'ok'
if __name__ == '__main__':
s = verification('wpf1011467276@gmail.com')
print(s)
| lhtest429/Music-website-based-django | MyMusic/music/my_tools/verification.py | verification.py | py | 439 | python | en | code | 0 | github-code | 36 |
9480946225 | import random
from collections import defaultdict
import torch
from ltp import LTP
from .base_func import BaseFunc
class NerFunc(BaseFunc):
def __init__(self, config):
super(NerFunc, self).__init__(config)
self.augment_num = config.ner_func.augment_num
self.combine_dict = self.load_ner_files()
self.model = LTP(config.ner_func.ner_tool_name)
if torch.cuda.is_available():
self.model.to("cuda")
@staticmethod
def load_ner_files():
combine_dict = defaultdict(set)
# Nh file
for line in open("files/ner/people_name.txt", "r", encoding="utf-8"):
combine_dict["Nh"].add(line.strip())
# Ns file
for line in open("files/ner/place_name.txt", "r", encoding="utf-8"):
combine_dict["Ns"].add(line.strip())
# Ni file
for line in open("files/ner/company_name.txt", "r", encoding="utf-8"):
combine_dict["Ni"].add(line.strip())
return combine_dict
def process(self, sentence):
final_augment_sentence = []
seg_list = self.cut_words(sentence)
result = self.model.pipeline(seg_list, tasks=["ner"])
if len(result.ner) == 0:
return final_augment_sentence
for _ in range(self.augment_num):
n, word = random.choice(result.ner)
if n in self.combine_dict.keys():
new_word = random.choice(list(self.combine_dict[n]))
old_index = seg_list.index(word)
seg_list[old_index] = new_word
new_sentence = ''.join(seg_list)
final_augment_sentence.append(new_sentence)
return final_augment_sentence
| shawn0wang/Text_Augment | function/ner_func.py | ner_func.py | py | 1,703 | python | en | code | 0 | github-code | 36 |
71656882025 | # Based on https://github.com/NATSpeech/NATSpeech
import utils.commons.single_thread_env # NOQA
import json
import numpy as np
import os
import random
import traceback
from functools import partial
from resemblyzer import VoiceEncoder
from tqdm import tqdm
from utils.audio.align import get_mel2note
from utils.audio.mel_processing import torch_wav2spec
from utils.audio.pitch.utils import f0_to_coarse
from utils.audio.pitch_extractors import extract_pitch_simple
from utils.commons.hparams import hparams
from utils.commons.indexed_datasets import IndexedDatasetBuilder
from utils.commons.multiprocess_utils import multiprocess_run_tqdm
from utils.os_utils import remove_file, copy_file
np.seterr(divide="ignore", invalid="ignore")
class BinarizationError(Exception):
pass
class BaseBinarizer:
def __init__(self, processed_data_dir=None):
if processed_data_dir is None:
processed_data_dir = hparams["processed_data_dir"]
self.processed_data_dir = processed_data_dir
self.hparams = hparams
self.binary_data_dir = hparams["binary_data_dir"]
self.preprocess_args = hparams["preprocess_args"]
self.binarization_args = hparams["binarization_args"]
self.items = {}
self.item_names = []
if self.binarization_args["with_spk_f0_norm"]:
self.spk_pitch_map = {}
def load_meta_data(self):
processed_data_dir = self.processed_data_dir
item_list = json.load(open(f"{processed_data_dir}/metadata.json"))
for r in tqdm(item_list, desc="Loading meta data."):
item_name = r["item_name"]
self.items[item_name] = r
self.item_names.append(item_name)
if self.binarization_args["shuffle"]:
random.seed(1234)
random.shuffle(self.item_names)
@property
def train_index_item_names(self):
range_ = self._convert_range(self.binarization_args["train_range"])
return self.item_names[range_[0]:range_[1]]
@property
def valid_index_item_names(self):
range_ = self._convert_range(self.binarization_args["valid_range"])
return self.item_names[range_[0]:range_[1]]
@property
def test_index_item_names(self) -> list:
range_ = self._convert_range(self.binarization_args["test_range"])
return self.item_names[range_[0]:range_[1]]
def _convert_range(self, range_: list):
if range_[1] == -1:
range_[1] = len(self.item_names)
return range_
@property
def train_title_item_names(self):
return [item_name for item_name in self.item_names \
if any(title in item_name for title in self.binarization_args["train_range"])]
@property
def valid_title_item_names(self):
return [item_name for item_name in self.item_names \
if any(title in item_name for title in self.binarization_args["valid_range"])]
@property
def test_title_item_names(self):
return [item_name for item_name in self.item_names \
if any(title in item_name for title in self.binarization_args["test_range"])]
def meta_data(self, prefix: str, dataset_range):
"""
Parameter
---------
prefix: str
Choose one of ["train", "valid", "test"]
"""
if prefix == "valid":
if dataset_range == "index":
item_names = self.valid_index_item_names
elif dataset_range == "title":
item_names = self.valid_title_item_names
elif prefix == "test":
if dataset_range == "index":
item_names = self.test_index_item_names
elif dataset_range == "title":
item_names = self.test_title_item_names
else:
if dataset_range == "index":
item_names = self.train_index_item_names
elif dataset_range == "title":
item_names = self.train_title_item_names
for item_name in item_names:
yield self.items[item_name]
def process(self):
self.load_meta_data()
os.makedirs(hparams["binary_data_dir"], exist_ok=True)
for fn in ["phone_set.json", "spk_map.json"]:
remove_file(f"{hparams['binary_data_dir']}/{fn}")
copy_file(f"{hparams['processed_data_dir']}/{fn}", f"{hparams['binary_data_dir']}/{fn}")
self.note_pitch_map = self.build_pitch_map()
self.note_dur_map = self.build_dur_map()
self.note_tempo_map = self.build_tempo_map()
self.process_data("valid")
self.process_data("test")
self.process_data("train")
def process_data(self, prefix: str):
"""
Parameter
---------
prefix: str
Choose one of ["train", "valid", "test"]
"""
data_dir = hparams["binary_data_dir"]
meta_data = list(self.meta_data(prefix, self.binarization_args["dataset_range"]))
process_item = partial(self.process_item, preprocess_args=self.preprocess_args,
binarization_args=self.binarization_args)
builder = IndexedDatasetBuilder(f"{data_dir}/{prefix}")
ph_lengths = []
mel_lengths = []
total_sec = 0
max_sec = 0
total_file = 0
items = []
args = [{"item": item, "note_pitch_map": self.note_pitch_map, "note_dur_map": self.note_dur_map,
"note_tempo_map": self.note_tempo_map} for item in meta_data[:len(meta_data)]]
# Get information from audio and transcript
for item_id, item in multiprocess_run_tqdm(process_item, args, desc="Processing data"):
if item is not None:
items.append(item)
if self.binarization_args["with_spk_f0_norm"] and prefix == "train":
self.calculate_spk_stats(item["f0"], item["spk_id"])
# Use pre-trained speaker embeddings
if self.binarization_args["with_spk_embed"]:
args = [{"wav": item["wav"]} for item in items]
for item_id, spk_embed in multiprocess_run_tqdm(
self.get_spk_embed, args,
init_ctx_func=lambda wid: {"voice_encoder": VoiceEncoder().cuda()}, num_workers=4,
desc="Extracting spk embed"):
items[item_id]["spk_embed"] = spk_embed
for item in items:
if not self.binarization_args["with_wav"] and "wav" in item:
del item["wav"]
mel_lengths.append(item["len"])
assert item["len"] > 0, (item["item_name"], item["text"], item["mel2ph"])
if "ph_len" in item:
ph_lengths.append(item["ph_len"])
if max_sec < item["sec"]:
max_sec = item["sec"]
total_sec += item["sec"]
if "midi_info" in item:
del item["midi_info"]
del item["sec"]
del item["others"]
if not self.binarization_args["with_mel"] and "mel" in item:
del item["mel"]
builder.add_item(item)
total_file += len(items)
builder.finalize()
if os.path.exists(f"{data_dir}/{prefix}_lengths.npy"):
mel_lengths_ = np.load(f"{data_dir}/{prefix}_lengths.npy").tolist()
mel_lengths_.extend(mel_lengths)
mel_lengths = mel_lengths_
np.save(f"{data_dir}/{prefix}_lengths.npy", mel_lengths)
if len(ph_lengths) > 0:
if os.path.exists(f"{data_dir}/{prefix}_ph_lenghts.npy"):
ph_lengths_ = np.load(f"{data_dir}/{prefix}_ph_lenghts.npy").tolist()
ph_lengths.extend(ph_lengths_)
np.save(f"{data_dir}/{prefix}_ph_lenghts.npy", ph_lengths)
if self.binarization_args["with_spk_f0_norm"] and prefix == "train":
self.build_spk_pitch_map()
print(f"| {prefix} total files: {total_file}, total duration: {total_sec:.3f}s, max duration: {max_sec:.3f}s")
@classmethod
def process_item(cls, item: dict, note_pitch_map, note_dur_map, note_tempo_map, preprocess_args, binarization_args: dict):
item["ph_len"] = len(item["ph_token"])
item_name = item["item_name"]
wav_fn = item["wav_fn"]
# Get Waveform and Mel-spectrogram information
wav, mel = cls.process_audio(wav_fn, item, binarization_args)
if mel.shape[0] > 2:
try:
n_bos_frames, n_eos_frames = 0, 0
if preprocess_args["use_text"] and preprocess_args["use_midi"]:
if binarization_args["with_midi_align"]:
# Align text information
cls.process_midi_align(item)
# Mapping pitch and dur map of note
cls.process_note(item, note_pitch_map, note_dur_map, note_tempo_map, preprocess_args, binarization_args)
if binarization_args["trim_eos_bos"]:
n_bos_frames = item["duration"][0] if preprocess_args["use_midi"] else 0
n_eos_frames = item["duration"][-1] if preprocess_args["use_midi"] else mel.shape[0]
T = len(mel)
item["mel"] = mel[n_bos_frames:T - n_eos_frames]
item["mel2ph"] = item["mel2ph"][n_bos_frames:T - n_eos_frames]
item["duration"] = item["duration"][1:-1]
item["duration_midi"] = item["duration_midi"][1:-1]
item["len"] = item["mel"].shape[0]
item["wav"] = wav[n_bos_frames * hparams["hop_size"]:len(wav) - n_eos_frames * hparams["hop_size"]]
if binarization_args["with_f0"]:
# Get pitch information
cls.process_pitch(item, n_bos_frames, n_eos_frames)
except BinarizationError as e:
print(f"| Skip item ({e}). item_name: {item_name}, wav_fm: {wav_fn}")
return None
except Exception as e:
traceback.print_exc()
print(f"| Skip item. item_name: {item_name}, wav_fm: {wav_fn}")
return None
return item
@classmethod
def process_audio(cls, wav_fn: str, res: dict, binarization_args: dict):
# Get Mel-spectrogram
wav2spec_dict = torch_wav2spec(wav_fn,
fft_size=hparams["fft_size"],
hop_size=hparams["hop_size"],
win_length=hparams["win_size"],
num_mels=hparams["num_mel_bins"],
fmin=hparams["fmin"],
fmax=hparams["fmax"],
sample_rate=hparams["sample_rate"])
mel = wav2spec_dict["mel"]
wav = wav2spec_dict["wav"].astype(np.float16)
# Check Linear-spectrogram
if binarization_args["with_linear"]:
res["linear"] = wav2spec_dict["linear"]
if "wav_norm" in wav2spec_dict:
res["wav_norm"] = wav2spec_dict["wav_norm"]
res.update({"mel": mel, "wav": wav, "sec": len(wav) / hparams["sample_rate"], "len": mel.shape[0]})
return wav, mel
@staticmethod
def process_midi_align(item: dict):
mel = item["mel"]
midi_info = item["midi_info"]
# Get align information and duration
mel2phone, mel2note, duration, ph_token, ph_list, _, item["midi_info"] = get_mel2note(midi_info, mel, hparams["hop_size"],
hparams["sample_rate"], item["silence"])
item["ph_token"] = ph_token
item["text"] = ph_list
if len(ph_list) < hparams["binarization_args"]["min_text"] or ph_list is None:
raise BinarizationError(
f"| Less than min text sequence: {len(item['ph_token'])}")
if np.array(mel2phone).max() - 1 >= len(ph_token):
raise BinarizationError(
f"Align does not match: mel2ph.max - 1 != {np.array(mel2phone).max() - 1}, len(ph_token): {len(ph_token)}")
item["mel2ph"] = mel2phone
item["mel2note"] = mel2note
item["duration"] = duration
# Get phoneme to word information
assert len(ph_token) == len(duration), "| phoneme : {len(ph_token)}, ph_duration : {len(duration)}"
@staticmethod
def process_note(item, note_pitch_map, note_dur_map, note_tempo_map, preprocess_args, binarization_args):
dur_enc = list()
dur_dec = list()
for i in range(binarization_args["max_durations"]):
for _ in range(binarization_args["pos_resolution"]):
dur_dec.append(len(dur_enc))
for _ in range(2 ** i):
dur_enc.append(len(dur_dec) - 1)
def d2e(x):
return dur_enc[x] if x < len(dur_enc) else dur_enc[-1]
if preprocess_args["use_midi"]:
item["note_duration"] = [note_dur_map[str(d2e(note[3]))] for note in item["midi_info"]]
item["note_pitch"] = [note_pitch_map[str(note[2])] for note in item["midi_info"]]
item["note_tempo"] = [note_tempo_map[str(note[6])] for note in item["midi_info"]]
else:
item["note_duration"] = [0]
item["note_pitch"] = [0]
item["note_tempo"] = [0]
@staticmethod
def process_pitch(item: dict, n_bos_frames: int, n_eos_frames: int):
wav, mel = item["wav"], item["mel"]
# Get f0 from waveform
f0 = extract_pitch_simple(wav)
if sum(f0) == 0:
raise BinarizationError("Empty f0")
assert len(mel) == len(f0) // hparams["f0_resolution"], (len(mel), len(f0))
# Quantize f0 values
pitch_coarse = f0_to_coarse(f0)
item["f0"] = f0
item["pitch"] = pitch_coarse
if hparams["binarization_args"]["with_f0cwt"]:
_, cont_logf0 = get_cont_logf0(f0)
logf0s_mean, logf0s_std = np.mean(cont_logf0), np.std(cont_logf0)
cont_logf0_norm = (cont_logf0 - logf0s_mean) / logf0s_std
cwt_spec, _ = get_logf0_cwt(cont_logf0_norm)
item["cwt_spec"] = cwt_spec
item["cwt_mean"] = logf0s_mean
item["cwt_std"] = logf0s_std
def build_pitch_map(self):
""" Using 0 to 128 notes for MIDI. """
pitch_map = {"0": 0}
for i, x in enumerate(range(self.hparams["note_range"][0], self.hparams["note_range"][1])):
pitch_map[str(x)] = i + 1
json.dump(pitch_map, open(f"{self.binary_data_dir}/pitch_map.json", "w"), ensure_ascii=False)
return pitch_map
def build_dur_map(self):
""" Using max duration for MIDI. """
dur_map = {"[PAD]": 0, "[BOS]": 1, "[EOS]": 2}
for i, x in enumerate(range(0, 128)):
dur_map[str(x)] = i + 4
json.dump(dur_map, open(f"{self.binary_data_dir}/dur_map.json", "w"), ensure_ascii=False)
return dur_map
def build_tempo_map(self):
tempo_map = {"[PAD]": 0, "[BOS]": 1, "[EOS]": 2}
tempo_range = self.binarization_args["tempo_range"]
for i, x in enumerate(range(tempo_range[0], tempo_range[1] + 1)):
tempo_map[str(x)] = i + 4
json.dump(tempo_map, open(f"{self.binary_data_dir}/tempo_map.json", "w"), ensure_ascii=False)
return tempo_map
def calculate_spk_stats(self, f0, spk_id):
f0_min = f0[np.nonzero(f0)].min()
f0_max = f0.max()
if str(spk_id) in self.spk_pitch_map:
spk_pitch_stat = self.spk_pitch_map[str(spk_id)]
if spk_pitch_stat["min"] > f0_min:
self.spk_pitch_map[str(spk_id)]["min"] = f0_min
if spk_pitch_stat["max"] < f0_max:
self.spk_pitch_map[str(spk_id)]["max"] = f0_max
else:
spk_pitch_stat = {}
spk_pitch_stat["max"] = f0_max
spk_pitch_stat["min"] = f0_min
self.spk_pitch_map[str(spk_id)] = spk_pitch_stat
def build_spk_pitch_map(self):
spk_pitch_map = {}
stat_map_dir = f"{self.binary_data_dir}/spk_pitch_map.json"
if os.path.exists(stat_map_dir):
spk_pitch_map = json.load(open(stat_map_dir, "r"))
spk_pitch_map.update(self.spk_pitch_map)
spk_pitch_map = {key: value for key, value in sorted(spk_pitch_map.items(), key=lambda x: int(x[0]))}
print("| Statistics of speaker's pitch is saved.")
json.dump(spk_pitch_map, open(stat_map_dir, "w"), ensure_ascii=False)
@staticmethod
def get_spk_embed(wav, ctx):
return ctx["voice_encoder"].embed_utterance(wav.astype(float))
@property
def num_workers(self):
return int(os.getenv("N_PROC", hparams.get("N_PROC", os.cpu_count())))
| jisang93/VISinger | preprocessor/base_binarizer.py | base_binarizer.py | py | 16,910 | python | en | code | 13 | github-code | 36 |
10834105092 | from turtle import Turtle
ALIGNMENT = 'center'
FONT = ('courier', 20, 'normal')
class GameOver(Turtle):
def __init__(self):
super().__init__()
self.hideturtle()
self.color('white')
self.penup()
self.write(arg=f'Game Over', align=ALIGNMENT, move=False, font=FONT)
| joshrivera116/snake | gameover.py | gameover.py | py | 325 | python | en | code | 0 | github-code | 36 |
3184473921 | # -*- coding: utf-8 -*-
import yaml
from . import instructions
from . import value_containers
from .exceptions import ParserException
from .method import Method
def _process_func(method, func):
if not func or not isinstance(func, dict):
raise ParserException('"func" not defined')
method.function_name = func['name']
method.return_type = value_containers.types[func['type'].lower()]()
_process_vars(method, func['args'], inc_arg_count=True)
def _add_label(method, label, index):
if not isinstance(label, str):
raise ParserException('label %s needs to be a string' % label)
if label is None:
raise ParserException('every local variable needs a label')
if label in method.labels:
raise ParserException('labels has to be unique: duplicate %s' % label)
method.labels[label] = index
def _process_vars(method, args, inc_arg_count=False):
for arg in args:
t = arg['type'].lower()
if inc_arg_count:
method.argument_count += 1
method.variables.append(value_containers.types[t]())
l = arg.get('label', None)
_add_label(method, l, len(method.variables) - 1)
def _process_ins(method, ins):
# pass1: collect labels
offset = 0
label = None
label_current = None
for i in ins:
if isinstance(i, dict):
if not len(i) == 1:
raise ParserException('bad syntax for data %s' % i)
label_current = i.get('label', None)
if label and label_current:
raise ParserException('label cannot follow label: %s, %s' % (label, label_current))
label = label_current
else:
label = None
label_current = None
if label:
_add_label(method, label, offset)
else:
offset += 1
else:
if label:
raise ParserException('label cannot be as last instruction %s' % i)
# pass2: use labels and collect instructions
offset = 0
for i in ins:
if isinstance(i, dict) and i.get('label', None):
continue
else:
offset += 1
if isinstance(i, str):
inst = instructions.keywords[i.lower()]
if not issubclass(inst, instructions.InsNoArgument):
raise ParserException('instruction %s requires argument' % i)
method.code.append(inst())
elif isinstance(i, dict):
kw, value = i.popitem()
inst = instructions.keywords[kw.lower()]
if issubclass(inst, instructions.InsNoArgument):
raise ParserException('instruction %s takes no argument' % i)
if issubclass(inst, instructions.InsArgILabel):
if isinstance(value, str):
try:
value = method.labels[value]
except KeyError:
raise ParserException('label %s is not defined' % value)
else:
raise ParserException('instruction %s requires label as argument' % i)
elif issubclass(inst, instructions.InsArgInteger):
if value != int(value):
raise ParserException('instruction %s requires integer argument' % i)
elif issubclass(inst, instructions.InsArgFloat):
if value != float(value):
raise ParserException('instruction %s requires float argument' % i)
method.code.append(inst(instructions.contain_value(inst, value)))
else:
raise ParserException('unknown instruction format %s' % i)
def process_yaml(structure):
m = Method()
_process_func(m, structure.get('func', {}))
_process_vars(m, structure.get('lvars', []))
_process_ins(m, structure.get('ins', []))
return m
def parse_string(data):
structure = yaml.safe_load(data)
return process_yaml(structure)
def parse_file(fname):
with open(fname, 'r') as f:
return parse_string(f)
| lukleh/Tiny-Stackbased-Virtual-Machine-in-Python | TSBVMIP/code_parser.py | code_parser.py | py | 4,024 | python | en | code | 4 | github-code | 36 |
12210284781 | import openpyxl
from openpyxl.utils import cell
def read(config):
try:
workbook = openpyxl.load_workbook(config.source_path, data_only=True)
datasets = {}
for source_tab_name in config.source_tabs:
datasets[source_tab_name] = extract_dataset(workbook, source_tab_name, config)
return datasets
finally:
workbook.close()
def extract_dataset(workbook, source_tab_name, config):
worksheet = workbook.get_sheet_by_name(source_tab_name)
last_data_row = find_last_data_row(worksheet, config.start_search_row, config.column_range_start)
copy_range_start = "{}{}".format(config.column_range_start, config.start_search_row)
copy_range_end = "{}{}".format(config.column_range_stop, last_data_row)
return worksheet[copy_range_start:copy_range_end]
def find_last_data_row(worksheet, start_row, column_range_start):
column_index = cell.column_index_from_string(column_range_start)
for current_row in range(start_row, worksheet.max_row):
val = worksheet.cell(row = current_row, column = column_index).value
if worksheet.cell(row = current_row, column = column_index).value == None:
return current_row - 1
return worksheet.max_row | mcweglowski/codegens | codegen/excel/excel_reader.py | excel_reader.py | py | 1,239 | python | en | code | 0 | github-code | 36 |
8561617144 | stock = {
"banana": 6,
"apple": 0,
"orange": 32,
"pear": 15
}
prices = {
"banana": 4,
"apple": 2,
"orange": 1.5,
"pear": 3
}
price = 0
for fruit in stock:
price += stock[fruit] * prices[fruit]
print(str(int(price)) + " bucks")
| EdyStan/homework_beetroot | homework/lms-homework/06_dicts-for-loops-comprehensions/task_2.py | task_2.py | py | 265 | python | en | code | 0 | github-code | 36 |
25086860267 | class SiteData:
lang_dict = {
'C++': 'image/langs/cpp-logo.png',
'Python': 'image/langs/python-logo.jpg',
'Go': 'image/langs/go-logo.png',
'PL/pgSQL': 'image/langs/plpgsql.png'
}
projects = [
{
'name': 'web-testing-tool',
'image': lang_dict['Python'],
'front_description': 'Students testing tool.',
'back_description': 'Testing tool implemented on Django. Data storage is implemented in the DBMS MongoDB.',
'tags': [
'web',
'python',
'html5',
'js',
'bootstrap4',
'django',
'rest api'
'mongo',
'docker',
'travis ci',
'github actions'
],
'github_name': 'web-testing-tool',
},
{
'name': 'video-tracker',
'image': lang_dict['C++'],
'front_description': 'Detecting and tracking objects on video with OpenCV usage.',
'back_description': 'Application for tracking various objects and estimating their speed in video stream.',
'tags': [
'c++17',
'computer vision',
'ml',
'cmake',
'opencv',
'dlib',
'mobile net ssd',
'video detection',
'video tracking'
],
'github_name': 'video-tracker',
},
{
'name': 'smart-text-editor',
'image': lang_dict['Python'],
'front_description': 'Text editor with intelligent typing based on relevant search in Elasticsearch.',
'back_description': 'Web application for editing text documents with support for intelligent typing ' +
'based on relevant search in Elasticsearch.',
'tags': [
'web',
'python',
'flask',
'rest api',
'elasticsearch',
'docker',
'travis ci',
'github actions'
],
'github_name': 'smart-text-editor'
},
{
'name': 'vk-news-dashboard',
'image': lang_dict['Python'],
'front_description': 'Simple dashboard for vk news groups.',
'back_description': 'Simple dashboard implemented on Dash. Provides monitoring VK news groups posts.',
'tags': [
'web',
'python',
'dash',
'nlp',
'postgres',
'docker',
'docker compose',
'travis ci'
],
'github_name': 'vk-news-dashboard',
'ref': 'http://vknews.vnkrtv.ru/',
},
{
'name': 'vk-tracker',
'image': lang_dict['Python'],
'front_description': 'Web app for tracking, analyzing and searching people in VK.',
'back_description': 'Web app for tracking and searching VK users. ' +
'Implemented on django and dash, data is stored in MongoDB and Neo4j. ',
'tags': [
'web',
'python',
'html5',
'js',
'bootstrap4',
'django',
'dash',
'mongo',
'neo4j',
'docker',
'docker compose',
'travis ci',
'github actions',
'vk api'
],
'github_name': 'vk-tracker',
},
{
'name': 'go-vk-news-loader',
'image': lang_dict['Go'],
'front_description': 'Service loading news from vk news groups.',
'back_description': 'Service providing loading news from list of vk news groups into PostgreSQL DB.',
'tags': [
'microservice',
'go',
'postgres',
'docker',
'docker compose',
'travis ci'
],
'github_name': 'go-vk-news-loader',
},
{
'name': 'telegram-channels-loader',
'image': lang_dict['Python'],
'front_description': 'Service providing loading messages from telegram channels.',
'back_description': 'Service providing loading messages from given telegram channels to PostgreSQL DB.',
'tags': [
'microservice',
'python',
'telegram',
'async',
'postgres',
'docker',
'docker compose',
'travis ci'
],
'github_name': 'telegram-channels-loader',
},
{
'name': 'tiktok-loader',
'image': lang_dict['Python'],
'front_description': 'Service loading TikTok information.',
'back_description': 'Service loading information about russian TikTik popular users. ' +
'Loads to DB latest tiktoks with audios and videos.',
'tags': [
'microservice',
'python',
'tiktok',
'postgres',
'docker',
'docker compose',
'travis ci'
],
'github_name': 'tiktok-loader',
},
{
'name': 'web-library',
'image': lang_dict['Python'],
'front_description': 'Web app for storing and adding translations of various compositions.',
'back_description': 'App for adding and storing translations of various compositions. ' +
'All data stored in PostgreSQL, web app implemented in Django.',
'tags': [
'web',
'python',
'html5',
'js',
'docker',
'docker compose',
'travis ci',
'github actions'
],
'github_name': 'web-library',
},
{
'name': 'go-habr-loader',
'image': lang_dict['Go'],
'front_description': 'Utility loading all posts from habr.com.',
'back_description': 'Utility providing loading all posts from habr.com into PostgreSQL DB.',
'tags': [
'utility',
'go',
'postgres',
'docker',
'travis ci'
],
'github_name': 'go-habr-loader',
'ref': 'https://www.kaggle.com/leadness/habr-posts/'
},
{
'name': 'postgres-markov-chain',
'image': lang_dict['PL/pgSQL'],
'front_description': 'Text generator based on Markov chain.',
'back_description': 'Text generator based on Markov chain written on pure PL/pgSQL. Providing text ' +
'processing, training Markov chain and generating text.',
'tags': [
'utility',
'postgres',
'plpgsql',
'nlp',
],
'github_name': 'postgres-markov-chain',
},
{
'name': 'go-wiki-parser',
'image': lang_dict['Go'],
'front_description': 'Utility parsing large xml wiki dump into MongoDB',
'back_description': 'Utility providing parsing large xml wiki dump into MongoDB. Stored each article as ' +
'dict of subtopic with list of all references for other articles.',
'tags': [
'utility',
'go',
'travis ci'
],
'github_name': 'go-wiki-parser',
},
{
'name': 'web-db-manager',
'image': lang_dict['Python'],
'front_description': 'Web app for managing specific MS SQL Server DB.',
'back_description': 'Web application for working with the subject database of the shop stored in MS SQL ' +
'Server DB.',
'tags': [
'web',
'python',
'html5',
'js',
'flask',
'mssql server',
'travis ci'
],
'github_name': 'web-db-manager',
},
{
'name': 'matrix-multiplier',
'image': lang_dict['C++'],
'front_description': 'Calculates all possible products of vectors in a given matrix.',
'back_description': 'Calculates all possible products of vectors in a given matrix with specified ' +
'threshold, preserving the final product chains.',
'tags': [
'utility',
'c++14',
'libtorch',
'qt5',
'map reduce',
'cmake',
'cuda'
],
'github_name': 'matrix-multiplier',
},
{
'name': 'cmd-vk-tracker',
'image': lang_dict['C++'],
'front_description': 'Console application for Windows providing monitoring VK users.',
'back_description': 'Console application providing loading VK users information, getting ' +
'account changes and users relationship.',
'tags': [
'utility',
'c++14',
'cmake',
'vk api'
],
'github_name': 'cmd-vk-tracker',
},
{
'name': 'wifi-detect',
'image': lang_dict['C++'],
'front_description': 'Small console application for detecting Wi-Fi routers and connected to them devices.',
'back_description': 'Detects wifi routers and all devices, connected to them. Also records all captured ' +
'traffic in capture.pcap.',
'tags': [
'utility',
'c++14',
'cmake'
],
'github_name': 'wifi-detect',
},
{
'name': 'tf-net-analizer',
'image': lang_dict['Python'],
'front_description': 'Notebooks with NN classifying internet traffic.',
'back_description': 'Notebooks with NN classifying internet traffic from KDD Cup 1999 data. Implemented '
'on TensowFlow.',
'tags': [
'data science',
'ml',
'python',
'tensorflow',
'sklearn',
'jupyter notebook'
],
'github_name': 'tf-net-analizer',
'ref': 'https://www.kaggle.com/leadness/knncup99-ffnn-0-96-acc-0-57-weighted-acc'
},
{
'name': 'go-tcp-chat',
'image': lang_dict['Go'],
'front_description': 'Simple TCP chat with tui.',
'back_description': 'Simple TCP chat on sockets with text user interface.',
'tags': [
'service',
'go'
],
'github_name': 'go-tcp-chat'
},
{
'name': 'my-site',
'image': lang_dict['Python'],
'front_description': 'This site.',
'back_description': 'Simple single page site implemented on Flask.',
'tags': [
'web',
'python',
'html5',
'bootstrap4',
'flask'
],
'github_name': 'my-site'
}
]
| vnkrtv/my-site | app/projects.py | projects.py | py | 11,856 | python | en | code | 0 | github-code | 36 |
417669106 | from array import *
import os
from PIL import Image
Buttons=[0x300fd40bf,0x300fdc03f,0x300fd20df,0x300fda05f,0x300fd609f,0x300fde01f,0x300fd10ef,0x300fd906f,0x300fd50af,0x300fd30cf,0x300fdb24d,0x300fd728d,0x300fdf20d,0x300fd8877,0x300fd48b7]
ButtonsNames=["One","Two","Three","Four","Five","Six","Seven","Eight","Nine","Zero","MENU","TITLE","L/R","Left_key","Right_key"]
distantToFolderMappings = {
12: "C:/Users/rakib/OneDrive/Desktop/Optoshi/12 Feet",
10: "C:/Users/rakib/OneDrive/Desktop/Optoshi/10 Feet",
8: "C:/Users/rakib/OneDrive/Desktop/Optoshi/8 Feet"
}
menuMapping = {
"One": "e",
"Two": "pediatric"
}
distanceMapping = {
"Eight": 8,
"Ten": 10
}
def convertHex(binaryValue):
tmpB2 =int(str(binaryValue),2)
return hex(tmpB2)
class Folder:
menuId = ""
images = []
name = ""
currentIndex = 0
def __init__(self, distance, menuId):
self.name = menuMapping[menuId]
distancePath = distantToFolderMappings[distance]
actualFolderPath = distancePath+"/"+self.name
folderImages = self.get_files_in_directory(actualFolderPath)
self.images = [file for file in folderImages if file.endswith(".png") or file.endswith(".jpg")]
self.currentIndex = 0
self.menuId = menuId
self.openImage()
#print(self.images)
def openImage(self):
print("Opening from Folder "+self.name+" with index "+str(self.currentIndex))
img = Image.open(self.images[self.currentIndex])
self.currentImage = img
img.show()
def moveIndexToLeft(self):
if self.currentIndex-1<0:
self.currentIndex = len(self.images)-1
else:
self.currentIndex = self.currentIndex-1
print("Processing done")
def moveIndexToRight(self):
if self.currentIndex+1<len(self.images):
self.currentIndex = self.currentIndex + 1
else:
self.currentIndex = 0
def processIrSignal(self, signal):
status = 1
if signal == "Left":
self.moveIndexToLeft()
print(signal + " Pressed on " + self.name)
elif signal == "Right":
self.moveIndexToRight()
print(signal + " Pressed on " + self.name)
elif signal == self.findKey(menuMapping, self.name):
print("Menu button pressed. Opening Next image")
self.moveIndexToRight()
else:
status = -1
print("Command not found inside class and status is -1")
if status == 1:
self.openImage()
return status
def getImagePath(self, distance):
return distantToFolderMappings[distance]+self.name
def get_files_in_directory(self, directory):
return [os.path.join(directory, f) for f in os.listdir(directory) if os.path.isfile(os.path.join(directory, f))]
def findKey(self, dictionary, value):
for dict_key, dict_value in dictionary.items():
if dict_value == value:
return dict_key
return None
currentFolder = None
def createFolder(distance, value):
if value in menuMapping:
return Folder(distance, value)
else:
return None
def process_signal(distance, value):
selButton = value
global currentFolder
if currentFolder is not None:
status = currentFolder.processIrSignal(selButton)
if status == -1:
print("Status found -1 from processSignal")
newFolder = createFolder(distance, value)
if newFolder is not None:
currentFolder = newFolder
else:
currentFolder = createFolder(distance, value)
if currentFolder is not None:
print(menuMapping[value]+" Menu Selected")
else:
print("Command not found")
while True:
distance = 10
while True:
inData = input("Enter a string: ")
if inData == "Exit" and currentFolder is None:
print("Exiting to select distance")
currentFolder = None
break
elif inData == "Exit" and currentFolder is not None:
print("Exiting to Select folder")
currentFolder = None
elif inData in distanceMapping:
distance = distanceMapping[inData]
print(str(distance) + " Selected")
currentFolder = None
else:
process_signal(distance, inData)
print("Action taken with "+str(distance)+" feet distance")
| Rakibuz/Robotics_HCI | Raspberry Pi/hypothetical_final_0.1.py | hypothetical_final_0.1.py | py | 4,522 | python | en | code | 0 | github-code | 36 |
957425679 | from flask import render_template, flash, redirect, url_for, request, current_app
from flask_login import login_required, current_user
from apps.app import db
from apps.model import Task, Kind
from apps.todolist import todolist
from apps.todolist.course import AddCategory, AddToDoList, ChangeToDoList
# User View Task List
@todolist.route('/list/', methods=['GET', 'POST'])
@login_required
def list_1():
list = AddToDoList()
page = int(request.args.get('page', 1))
todolistPage = Task.query.filter_by(user_id=current_user.id).paginate(per_page=current_app.config['PER_PAGE'],
error_out=False)
return render_template('todolist/list.html', todoObj=todolistPage, form=list)
@todolist.route('/add/', methods=['POST', 'GET'])
@login_required
def add_list():
list = AddToDoList()
page = int(request.args.get('page', 1))
todolistPage = Task.query.filter_by(user_id=current_user.id).paginate(per_page=current_app.config['PER_PAGE'],
error_out=False)
if list.validate_on_submit():
# get users submission
Title = list.Title.data
content = list.content.data
kind_id = list.category.data
urgent = list.urgent.data
deadline = list.deadline.data
# add to database
status = False
add = Task(task_Title=Title, task_content=content,
category_id=kind_id,
task_urgent=urgent, task_deadline=deadline, user_id=current_user.id, task_status=status)
db.session.add(add)
flash('Add task successfully!', category='success')
return redirect(url_for('todolist.add_list'))
else:
flash('Cannot add this task!', category='error')
return render_template('todolist/list.html', todoObj=todolistPage, form=list)
@todolist.route('/change/<int:id>', methods=['GET', 'POST'])
def change(id):
list = ChangeToDoList()
page = int(request.args.get('page', 1))
todolistPage = Task.query.filter_by(user_id=current_user.id).paginate(per_page=current_app.config['PER_PAGE'],
error_out=False)
task = Task.query.filter_by(id=id).first()
list.content.data = task.task_content
# list.category.data = task.category_id
# list.urgent.data = task.task_urgent
# list.deadline.data = task.task_deadline
list.Title.data = task.task_moduleTitle
list.urgent.data = task.task_urgent
list.category.data = task.category_id
if list.validate_on_submit():
content = request.form.get('content')
Title = request.form.get('Title')
category_id = request.form.get('category')
urgent = request.form.get('urgent')
task.task_content = content
task.category_id = category_id
task.urgent = urgent
task.task_Title = Title
db.session.add(task)
flash('Task has been changed', category='success')
return redirect(url_for('todolist.list_1'))
else:
flash('Changed failed', category='error')
return render_template('todolist/change.html', form=list, todoObj=todolistPage)
@todolist.route('/delete/<int:id>/')
@login_required
def delete(id):
task = Task.query.filter_by(id=id).first()
# task.task_status = True
db.session.delete(task)
flash("Task has been deleted successfully.", category='success')
return redirect(url_for('todolist.list_1'))
@todolist.route('/deletec/<int:id>/')
@login_required
def deletec(id):
kind = Kind.query.filter_by(id=id).first()
# task.task_status = True
db.session.delete(kind)
flash("Category has been deleted successfully.", category='success')
category_list = AddCategory()
KindPage = Kind.query.filter_by(user_id=current_user.id).paginate(per_page=current_app.config['PER_PAGE'],
error_out=False)
return render_template('todolist/category.html', kindObj=KindPage, form=category_list)
@todolist.route('/hasdone/<int:id>/')
@login_required
def hasdone(id):
task = Task.query.filter_by(id=id).first()
task.task_status = True
db.session.add(task)
flash("Task status has been changed successfully.", category='success')
return redirect(url_for('todolist.list_1'))
@todolist.route('/hasnotdone/<int:id>/')
@login_required
def hasnotdone(id):
task = Task.query.filter_by(id=id).first()
task.task_status = False
db.session.add(task)
flash("Task status has been changed successfully.", category='success')
return redirect(url_for('todolist.list_1'))
@todolist.route('/urgent/<int:id>/')
@login_required
def urgent(id):
task = Task.query.filter_by(id=id).first()
task.task_urgent = 1
db.session.add(task)
flash("Task priority has been changed successfully.", category='success')
return redirect(url_for('todolist.list_1'))
@todolist.route('/noturgent/<int:id>/')
@login_required
def noturgent(id):
task = Task.query.filter_by(id=id).first()
task.task_urgent = 2
db.session.add(task)
flash("Task priority has been changed successfully.", category='success')
return redirect(url_for('todolist.list_1'))
@todolist.route('/search/<int:id>/', methods=['POST', 'GET'])
@login_required
def search(id):
task = Task.query.filter_by(id=id).all()
return render_template('todolist/search.html', todoObj=task)
@todolist.route('/searchTitle/<int:id>/', methods=['POST', 'GET'])
@login_required
def search_Title(id):
Title = request.form.get('Title')
task = Task.query.filter_by(user_id=id, task_Title=Title).paginate(per_page=current_app.config['C_PAGE'],
error_out=False)
return render_template('todolist/search.html', todoObj=task)
@todolist.route('/searchContent/<int:id>/', methods=['POST', 'GET'])
@login_required
def search_Content(id):
content = request.form.get('content')
task = Task.query.filter_by(user_id=id, task_content=content).paginate(per_page=current_app.config['C_PAGE'],
error_out=False)
return render_template('todolist/search.html', todoObj=task)
@todolist.route('/searchPriority/<int:id>/', methods=['POST', 'GET'])
@login_required
def search_Urgent(id):
priority = request.form.get('priority')
task = Task.query.filter_by(user_id=id, task_urgent=priority).paginate(per_page=current_app.config['C_PAGE'],
error_out=False)
return render_template('todolist/search.html', todoObj=task)
@todolist.route('/category/', methods=['POST', 'GET'])
@login_required
def category():
category_list = AddCategory()
KindPage = Kind.query.filter_by(user_id=current_user.id).paginate(per_page=current_app.config['PER_PAGE'],
error_out=False)
return render_template('todolist/category.html', kindObj=KindPage, form=category_list)
@todolist.route('/addcategory/', methods=['POST', 'GET'])
@login_required
def add_category():
list = AddCategory()
KindPage = Kind.query.filter_by(user_id=current_user.id).paginate(per_page=current_app.config['PER_PAGE'],
error_out=False)
if list.validate_on_submit():
# get category submission
name = list.category.data
user_id = current_user.id
# add to database
add = Kind(name=name, user_id=user_id)
db.session.add(add)
flash('Add category successfully!', category='success')
return redirect(url_for('todolist.category'))
else:
flash('Cannot add this category', category='error')
return render_template('todolist/category.html', kindObj=KindPage, form=list)
@todolist.route('/complete/<int:id>/', methods=['POST', 'GET'])
@login_required
def complete(id):
todolistPage = Task.query.filter_by(user_id=current_user.id).paginate(per_page=current_app.config['C_PAGE'],
error_out=False)
return render_template('todolist/complete.html', todoObj=todolistPage)
@todolist.route('/uncomplete/<int:id>/', methods=['POST', 'GET'])
@login_required
def uncomplete(id):
todolistPage = Task.query.filter_by(user_id=current_user.id).paginate(per_page=current_app.config['C_PAGE'],
error_out=False)
return render_template('todolist/uncomplete.html', todoObj=todolistPage)
| INversionNan/Flask | apps/todolist/base.py | base.py | py | 8,802 | python | en | code | 0 | github-code | 36 |
73917308585 | from __future__ import print_function, division
import vim
import subprocess
python_format = '/home/ekern/python_format/python_format.py'
if __name__ == '__main__':
start = vim.current.range.start
end = vim.current.range.end
buf = vim.current.buffer
while buf[end].rstrip().endswith('\\'):
end += 1
while (start >= 0) and buf[start - 1].rstrip().endswith('\\'):
start -= 1
text = ''.join([l + '\n' for l in buf[start : end + 1]])
p = subprocess.Popen(['python', python_format], stdout = subprocess.PIPE,
stderr = subprocess.PIPE, stdin = subprocess.PIPE)
stdout, stderr = p.communicate(input = text)
if not stdout and not stderr:
print('No output from python_format')
elif stderr:
print('Error from python_format:', stderr)
elif stdout != text:
lines = stdout.splitlines()
vim.command('%d,%ds/%s/%s/g' %
(start + 1, end + 1, '^.*\\n' * (end - start + 1),
'\\r' * len(lines)))
for i, line in enumerate(lines):
buf[start + i] = line
| ekedaigle/python-format | python_format_vim.py | python_format_vim.py | py | 1,090 | python | en | code | 0 | github-code | 36 |
32409336010 | import time
from torch import optim
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
from ..models import *
from ..utils import AverageMeter, calculate_accuracy, Logger, MyDataset
from visdom import Visdom
DatasetsList = ['CIFAR10', 'CIFAR100']
ModelList = {'AlexNet': AlexNet, 'alexnet': alexnet,
'DenseNet': DenseNet, 'densenet121': densenet121, 'densenet169': densenet169,
'densenet201': densenet201, 'densenet161': densenet161,
'Inception3': Inception3, 'inception_v3': inception_v3,
'ResNet': ResNet, 'resnet18': resnet18, 'resnet34': resnet34,
'resnet50': resnet50, 'resnet101': resnet101, 'resnet152': resnet152,
'SqueezeNet': SqueezeNet, 'squeezenet1_0': squeezenet1_0, 'squeezenet1_1': squeezenet1_1,
'VGG': VGG, 'vgg11': vgg11, 'vgg11_bn': vgg11_bn, 'vgg13': vgg13, 'vgg13_bn': vgg13_bn,
'vgg16': vgg16, 'vgg16_bn': vgg16_bn, 'vgg19_bn': vgg19_bn, 'vgg19': vgg19,
'se_resnet18': se_resnet18, 'se_resnet34': se_resnet34, 'se_resnet50': se_resnet50,
'se_resnet101': se_resnet101, 'se_resnet152': se_resnet152,
'hr18_net': hr18_net,
'mobilenetv3': mobilenetv3,
'shufflenetv2': shufflenetv2}
class TrainPipline(object):
def __init__(self, opt):
self.root_path = opt['path']['root_path']
self.result_path = os.path.join(self.root_path, opt['path']['result_path'])
self.datasets_path = os.path.join(self.root_path, opt['path']['datasest_path'])
self.n_classes = opt['model']['n_classes']
self.momentum = opt['model']['momentum']
self.weight_decay = opt['model']['weight_decay']
self.nesterov = opt['model']['nesterov']
self.n_epochs = opt['train']['n_epochs']
self.batch_size = opt['train']['batch_size']
self.learning_rate = opt['train']['learning_rate']
self.n_threads = opt['train']['n_threads']
self.checkpoint = opt['train']['checkpoint']
self.no_cuda = opt['cuda']['no_cuda']
self.model_name = ''
self.model_ft = ''
self.visdom_log_file = os.path.join(self.result_path, 'log_files', 'visdom.log')
self.vis = Visdom(port=8097,
log_to_filename=self.visdom_log_file,
env='myTest_1')
self.vis_loss_opts = {'xlabel': 'epoch',
'ylabel': 'loss',
'title': 'losses',
'legend': ['train_loss', 'val_loss']}
self.vis_tpr_opts = {'xlabel': 'epoch',
'ylabel': 'tpr',
'title': 'val_tpr',
'legend': ['tpr@fpr10-2', 'tpr@fpr10-3', 'tpr@fpr10-4']}
self.vis_epochloss_opts = {'xlabel': 'epoch',
'ylabel': 'loss',
'title': 'epoch_losses',
'legend': ['train_loss', 'val_loss']}
def datasets(self, data_name=None):
assert data_name in DatasetsList
if data_name == 'CIFAR10':
training_data = datasets.CIFAR10(root='./modelzoo/datasets/', train=True, download=False,
transform=transforms.Compose([
# transforms.RandomResizedCrop(224),
transforms.Pad(96),
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))]))
val_data = datasets.CIFAR10(root='./modelzoo/datasets/', train=False, download=False,
transform=transforms.Compose([
# transforms.RandomResizedCrop(224),
transforms.Pad(96),
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))]))
elif data_name == 'CIFAR100':
training_data = datasets.CIFAR100(root='./modelzoo/datasets/', train=True, download=True,
transform=transforms.Compose([
# transforms.RandomResizedCrop(224),
transforms.Pad(96),
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))]))
val_data = datasets.CIFAR100(root='./modelzoo/datasets/', train=False, download=True,
transform=transforms.Compose([
# transforms.RandomResizedCrop(224),
transforms.Pad(96),
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))]))
else:
train_txt_path = os.path.join(self.datasets_path, 'train.txt')
val_txt_path = os.path.join(self.datasets_path, 'val.txt')
my_transform = transforms.Compose([transforms.Resize(224),
transforms.ToTensor()])
training_data = MyDataset(train_txt_path, transform=my_transform)
val_data = MyDataset(val_txt_path, transform=my_transform)
return training_data, val_data
def model(self, model_name='resnet18', model_path=None):
assert model_name in ModelList
self.model_name = model_name
# model_ft = resnet18(pretrained=True)
# num_ftrs = model_ft.fc.in_features
# model_ft.fc = nn.Linear(num_ftrs, 10)
self.model_ft = ModelList[self.model_name](num_classes=self.n_classes)
if model_path is not None:
self.model_ft.load_state_dict(model_path)
else:
self.model_ft.apply(weights_init)
return self.model_ft
def train(self, training_data, val_data, model):
# data init
train_loader = DataLoader(training_data,
batch_size=self.batch_size,
shuffle=True,
# num_workers=self.n_threads,
pin_memory=True)
# result writer
train_logger = Logger(os.path.join(self.result_path, self.model_name + '_train.log'),
['epoch', 'loss', 'acc', 'lr'])
train_batch_logger = Logger(os.path.join(self.result_path, self.model_name + '_train_batch.log'),
['epoch', 'batch', 'iter', 'loss', 'acc', 'lr'])
val_logger = Logger(os.path.join(self.result_path, self.model_name + '_test.log'),
['time', 'loss', 'acc'])
# optimizer init
optimizer = optim.SGD(model.parameters(),
lr=self.learning_rate,
momentum=self.momentum,
weight_decay=self.weight_decay,
nesterov=self.nesterov)
# loss init
criterion = nn.CrossEntropyLoss()
print(model)
if not self.no_cuda:
model = nn.DataParallel(model, device_ids=[0, 1, 2, 3]).cuda()
# start train
for i in range(0, self.n_epochs + 1):
self.train_epoch(i, train_loader, model, criterion, optimizer,
train_logger, train_batch_logger)
self.validation(val_data, model, criterion, val_logger)
def train_epoch(self, epoch, data_loader, model, criterion, optimizer,
epoch_logger, batch_logger):
print('train at epoch {}'.format(epoch))
# set model to train mode
model.train()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
accuracies = AverageMeter()
end_time = time.time()
for i, (inputs, targets) in enumerate(data_loader):
data_time.update(time.time() - end_time)
if not self.no_cuda:
model = model.cuda()
inputs = inputs.cuda()
targets = targets.cuda()
# inputs = Variable(inputs)
# targets = Variable(targets)
outputs = model(inputs)
loss = criterion(outputs, targets)
acc = calculate_accuracy(outputs, targets)
losses.update(loss.data, inputs.size(0))
accuracies.update(acc, inputs.size(0))
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch_time.update(time.time() - end_time)
end_time = time.time()
batch_logger.log({
'epoch': epoch,
'batch': i + 1,
'iter': (epoch - 1) * len(data_loader) + (i + 1),
'loss': losses.val,
'acc': accuracies.val,
'lr': optimizer.param_groups[0]['lr']
})
self.vislog_batch(i, losses.val)
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Acc {acc.val:.3f} ({acc.avg:.3f})'.format(
epoch,
i + 1,
len(data_loader),
batch_time=batch_time,
data_time=data_time,
loss=losses,
acc=accuracies))
epoch_logger.log({
'epoch': epoch,
'loss': losses.avg,
'acc': accuracies.avg,
'lr': optimizer.param_groups[0]['lr']
})
if epoch % self.checkpoint == 0:
save_file_path = os.path.join(self.result_path, self.model_name+'save_{}.pth'.format(epoch))
states = {
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
}
torch.save(states, save_file_path)
def validation(self, val_data, model, criterion, val_logger):
val_loader = torch.utils.data.DataLoader(
val_data,
batch_size=self.batch_size,
shuffle=False,
# num_workers=self.n_threads,
pin_memory=True)
model.eval()
losses = AverageMeter()
accuracies = AverageMeter()
end_time = time.time()
for i, (inputs, targets) in enumerate(val_loader):
if not self.no_cuda:
inputs = inputs.cuda()
targets = targets.cuda()
outputs = model(inputs)
loss = criterion(outputs, targets)
acc = calculate_accuracy(outputs, targets)
losses.update(loss.data, inputs.size(0))
accuracies.update(acc, inputs.size(0))
test_time = time.time() - end_time
val_logger.log({'time': test_time,
'loss': losses.avg,
'acc': accuracies.avg})
print('TestTime {test_time:.3f}\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Acc {acc.val:.3f} ({acc.avg:.3f})'.format(
test_time=test_time,
loss=losses,
acc=accuracies))
def vislog_batch(self, batch_idx,loss):
x_value = batch_idx
y_value = loss
self.vis.line([y_value], [x_value],
name='train_loss',
win='losses',
update='append')
self.vis.line([2], [x_value],
name='test_loss',
win='losses',
update='append')
self.vis.update_window_opts(win='losses', opts=self.vis_loss_opts)
def weights_init(m):
# weight initialization
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out')
# nn.init.xavier_normal_(m.weight)
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.BatchNorm2d):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
nn.init.zeros_(m.bias)
| jimmy0087/model_zoo_torch | modelzoo/libs/train/train.py | train.py | py | 13,106 | python | en | code | 0 | github-code | 36 |
3274572845 | T = int(input())
for t in range(1, T+1):
n, m = map(int, input().split())
li = list(map(int, input().split()))
rli = []
for i in li:
if i%4 == 0 or i%6 == 0 or i%7 == 0 or i%9 == 0 or i%11 == 0: # 보석의 배수에 하나라도 해당되면
rli.append(i) # 새로운 리스트에 추가
result = []
for i in range(1 << len(rli)): # 0땜에 공집합 포함
subset = []
for j in range(len(rli)):
if i & (1 << j):
subset.append(rli[j])
if sum(subset) <= m: # 예산 이하인 값만 저장
result.append(sum(subset))
result.sort() # 정렬
print(f'#{t} {result[-1]}') #정렬후 예산으로 살수 있는 보석의 최고값 프린트 | mihyeon1234/TIL | 알고수업/부울경_2반_이미현/Algo2_부울경_2반_이미현.py | Algo2_부울경_2반_이미현.py | py | 774 | python | ko | code | 0 | github-code | 36 |
37744864931 | import tagNtokenize
import correlation_cf as cf
import time
from os.path import dirname, join
import pickle
current_dir = dirname(__file__)
file_path = join(current_dir, 'question_answers.pickle')
with open(file_path, 'rb') as f:
question_answers= pickle.load(f)
current_milli_time = lambda: int(round(time.time() * 1000))
def getResponse(uIn, sub):
global question_answers
tagtime = current_milli_time()
tagged = tagNtokenize.tagNtokenize(uIn, True)
tagtime = current_milli_time() - tagtime
max_u = 0.6
str_ans = "Sorry I don't quite know how to answer that!"
corrtime = current_milli_time()
# If in subquestions
if(sub =="1"):
str_ans = "That's interesting"
sub = ""
elif(sub != ""):
for quest in question_answers[sub]:
tagged2 = tagNtokenize.tagNtokenize(quest)
temp_u = cf.correlate(tagged,tagged2)
## print("u= " + str(temp_u))
if(temp_u>max_u):
try:
str_ans = question_answers[sub][quest]
## print(tagged2)
max_u=temp_u
except:
print("Error in Sub referencing")
sub = ""
for quest in question_answers:
## print("\n"+quest)
tagged2 = tagNtokenize.tagNtokenize(quest)
temp_u = cf.correlate(tagged,tagged2)
## print("u= " + str(temp_u))
if(temp_u>max_u):
max_u=temp_u
str_ans = question_answers[quest]['ans']
## print(tagged2)
sub = quest
corrtime = current_milli_time() - corrtime
if(max_u <= 0.6):
for t in tagged:
if "NN" in t[1]:
namedEntity = tagNtokenize.NER([(t[0],t[1])])
for n in namedEntity:
if("PERSON" in n):
str_ans = "Who is " + t[0] + "?"
sub = ""
elif("GPE" in n):
str_ans = "Where is " + t[0] + "?"
sub = "!"
return [str_ans, sub] | msheroubi/Charles_the_Chatbot | charles.py | charles.py | py | 2,366 | python | en | code | 0 | github-code | 36 |
21683160230 | from flask import Flask, make_response, jsonify, request
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics.pairwise import cosine_similarity
import requests
import numpy as np
import pandas as pd
import json
app = Flask(__name__)
rows = []
@app.route('/getDB')
def emplace():
prod_name = ''
prod_name = request.args.get('name')
res = requests.get('http://43.201.114.241:8080/userinfo/product/')
rows = res.json()
product_name = []
ingredients_list = []
calories = []
price = []
amount = []
carbohydrate = []
cholesterol = []
company = []
cooking_type = []
fat = []
id = []
primary_type = []
product_category = []
product_image = []
protein = []
sat_fat = []
secondary_type = []
serving_size = []
sodium = []
specific = []
sugar = []
trans_fat = []
vegan_option = []
for i in range(len(rows)):
product_name.append(rows[i]["product_name"])
ingredients_list.append(rows[i]["ingredient"])
calories.append(rows[i]["calory"])
price.append(rows[i]["price"])
amount.append(rows[i]["amount"])
carbohydrate.append(rows[i]["carbohydrate"])
cholesterol.append(rows[i]["cholesterol"])
vegan_option.append(rows[i]["vegan_option"])
trans_fat.append(rows[i]["trans_fat"])
sugar.append(rows[i]["sugar"])
company.append(rows[i]["company"])
cooking_type.append(rows[i]["cooking_type"])
fat.append(rows[i]["fat"])
id.append(rows[i]["id"])
primary_type.append(rows[i]["primary_type"])
product_category.append(rows[i]["product_category"])
product_image.append(rows[i]["product_image"])
protein.append(rows[i]["protein"])
sat_fat.append(rows[i]["sat_fat"])
secondary_type.append(rows[i]["secondary_type"])
serving_size.append(rows[i]["serving_size"])
sodium.append(rows[i]["sodium"])
specific.append(rows[i]["specific"])
data = pd.DataFrame({"product_name": product_name,
"ingredients_list": ingredients_list,
"calories": calories,
"price": price,
"amount": amount,
"carbohydrate": carbohydrate,
"cholesterol": cholesterol,
"company": company,
"cooking_type": cooking_type,
"fat": fat,
"id": id,
"primary_type": primary_type,
"product_category": product_category,
"product_image": product_image,
"protein": protein,
"sat_fat": sat_fat,
"secondary_type": secondary_type,
"serving_size": serving_size,
"sodium": sodium,
"specific": specific,
"sugar": sugar,
"trans_fat": trans_fat,
"vegan_option": vegan_option})
cnt_vector = CountVectorizer(ngram_range=(1, 3))
vector_categ = cnt_vector.fit_transform(data['ingredients_list'])
categ_sim = cosine_similarity(
vector_categ, vector_categ).argsort()[:, ::-1]
target_idx = data[data['product_name'] == prod_name].index.values
target_idx = target_idx[:1]
sim_idx = categ_sim[target_idx, :].reshape(-1)
sim_idx = sim_idx[sim_idx != target_idx]
result = data.iloc[sim_idx].sort_values('price', ascending=False)
data = data.iloc[target_idx[0]]
result = result[(result['price'] > (data.price*0.9)) &
(result['price'] < (data.price*1.1))]
result = result[(result['calories'] > (data.calories*0.9)) &
(result['calories'] < (data.calories*1.1))]
result = result.to_json(orient='records', force_ascii=False)
result = json.loads(result)
return make_response(jsonify(result), 200)
if __name__ == '__main__':
app.run(host='0.0.0.0')
| jane-k/RecommendationSystem | app.py | app.py | py | 4,148 | python | en | code | 0 | github-code | 36 |
20857956907 | # https://www.hackerrank.com/challenges/recursive-digit-sum/problem
#!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'superDigit' function below.
#
# The function is expected to return an INTEGER.
# The function accepts following parameters:
# 1. STRING n
# 2. INTEGER k
#
def getAns(num):
if num <= 9:
return num
sum = 0
for x in str(num):
sum = sum+int(x)
return getAns(sum)
def superDigit(n, k):
# Write your code here
sum = 0
for x in str(n):
sum = sum+int(x)
q = int(sum*k)
print(q)
ans = getAns(q)
return ans
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
first_multiple_input = input().rstrip().split()
n = first_multiple_input[0]
k = int(first_multiple_input[1])
result = superDigit(n, k)
fptr.write(str(result) + '\n')
fptr.close()
| manu-karenite/Problem-Solving | Recursion/superNumber.py | superNumber.py | py | 920 | python | en | code | 0 | github-code | 36 |
37636221210 | # A full binary tree is a binary tree where each node has exactly 0 or 2 children.
# Return a list of all possible full binary trees with N nodes. Each element of the answer is the root node of one possible tree.
# Each node of each tree in the answer must have node.val = 0.
# You may return the final list of trees in any order.
# Example 1:
# Input: 7
# Output: [[0,0,0,null,null,0,0,null,null,0,0],[0,0,0,null,null,0,0,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,null,null,null,null,0,0],[0,0,0,0,0,null,null,0,0]]
# Explanation:
# Note:
# 1 <= N <= 20
class Solution:
def allPossibleFBT(self, N: 'int') -> 'List[TreeNode]':
if N % 2 == 0:
return []
return self.helper(N)
def helper(self,n):
root = TreeNode(0)
remain = n-1
if remain==0:
return [root]
ans = []
for left in range(1,remain,2):
left_subtrees = self.helper(left)
right_subtrees = self.helper(remain-left)
for left in left_subtrees:
for right in right_subtrees:
root.left = left
root.right = right
ans.append(self.get_copy(root))
return ans
def get_copy(self,root):
tmproot = TreeNode(0)
def helper(node1,node2):
if node2.left:
tmpleft = TreeNode(0)
node1.left = tmpleft
helper(tmpleft,node2.left)
if node2.right:
tmpright = TreeNode(0)
node1.right = tmpright
helper(tmpright,node2.right)
helper(tmproot,root)
return tmproot
class Solution:
def allPossibleFBT(self, N: 'int') -> 'List[TreeNode]':
if N % 2 == 0:
return []
self.cache = {}
return self.helper(N)
def helper(self,n):
if n in self.cache:
return self.cache[n]
remain = n-1
if remain==0:
self.cache[n] = [TreeNode(0)]
return self.cache[n]
ans = []
for left in range(1,remain,2):
left_subtrees = self.helper(left)
right_subtrees = self.helper(remain-left)
for left in left_subtrees:
for right in right_subtrees:
tmp = TreeNode(0)
tmp.left = left
tmp.right = right
ans.append(tmp)
self.cache[n] = ans
return ans
| sunnyyeti/Leetcode-solutions | 894_All_Possible_Full_Binary_Trees.py | 894_All_Possible_Full_Binary_Trees.py | py | 2,512 | python | en | code | 0 | github-code | 36 |
9416483162 | # Cemantix game solver
import logging
import os
import yaml
from src import *
os.chdir(os.path.abspath(os.path.dirname(__file__)))
with open("config.yaml", "r") as config_file:
config = yaml.load(config_file, Loader=yaml.FullLoader)
logging.basicConfig(filename=f"./logs/cemantix_{dt.datetime.now().strftime(format='%Y-%m-%d_%Hh%M')}.log", level=logging.INFO)
def main():
model = load_model(embedding=config['word2vec'])
game = CemantixGame(executable_path="./artifacts/chromedriver.exe",game_url=config["game_url"])
if config["agent_type"] == "bandit":
agent = CemantixBandit(model=model, **config["agent_params"])
elif config["agent_type"] == "gangster":
agent = CemantixGangster(model=model, **config["agent_params"])
else:
raise ValueError("Unknown agent_type")
game.play_strategy(agent, max_iter=config["max_iter"])
game.save_result()
game.end()
main()
| CorentinMary/cemantix | main.py | main.py | py | 922 | python | en | code | 0 | github-code | 36 |
12530669262 | import torch
from torch import nn
from .strategy import Strategy
from .utils import ner_predict, re_predict
class EntropySampling(Strategy):
def __init__(self, annotator_config_name, pool_size, setting: str='knn', engine: str='gpt-35-turbo-0301',
reduction: str='mean'):
super().__init__(annotator_config_name, pool_size, setting, engine)
assert reduction in ['mean', 'sum', 'max']
self.reduction = reduction
def query(self, args, k: int, model: nn.Module, features):
pool_indices = self._get_pool_indices()
pool_features = [features[i] for i in pool_indices]
if self.task_type == 'ner':
pred_logits = ner_predict(args, pool_features, model)
uncertainties = []
for logit in pred_logits:
prob = torch.softmax(logit, dim=-1)
entropy = torch.special.entr(prob).sum(dim=-1) # entropy over each token
if self.reduction == 'mean':
uncertainties.append(entropy.mean())
elif self.reduction == 'sum':
uncertainties.append(entropy.sum())
elif self.reduction == 'max':
uncertainties.append(entropy.max())
uncertainties = torch.stack(uncertainties)
elif self.task_type == 're':
pred_logits = re_predict(args, pool_features, model)
prob = torch.softmax(pred_logits, dim=-1)
entropy = torch.special.entr(prob).sum(dim=-1)
uncertainties = entropy
else:
raise ValueError('tbd.')
lab_indices = torch.topk(uncertainties, k=k)[1]
lab_indices = [pool_indices[i] for i in lab_indices]
return lab_indices | ridiculouz/LLMaAA | src/active_learning/entropy_sampling.py | entropy_sampling.py | py | 1,745 | python | en | code | 5 | github-code | 36 |
29884394473 | """ Run Length Encoding """
def main():
""" print changed password """
text = input()
collector = []
each_al = ""
result = ""
count = ""
if len(text) > 1:
for jay in range(len(text)-1):
if text[jay] == text[jay+1]:
each_al += text[jay]
else:
each_al += text[jay]
collector.append(each_al)
each_al = ""
count = str(jay)
# บวกชุดสุดท้ายเข้าไป
jay = int(count)
each_al += text[jay+1]
collector.append(each_al)
for k in range(len(collector)):
result += str(len(collector[k]))
result += str(collector[k][0])
else:
result += "1%s"%text
print(result)
main()
| DefinitelyNotJay/ejudge | Run Length Encoding.py | Run Length Encoding.py | py | 806 | python | en | code | 0 | github-code | 36 |
5030031968 | # Um professor quer sortear um dos seus quatro alunos para apagar o quadro.
# Faça um programa que ajude ele, lendo o nome dos alunos e escrevendo na tela o nome do escolhido.
import random
print('='*23, '\n QUEM APAGARÁ A LOUSA❓')
print('='*23)
n1 = input('Primeiro nome: ')
n2 = input('Segundo nome: ')
n3 = input('Terceiro nome: ')
n4 = input('Quarto nome: ')
l = [n1, n2, n3, n4]
a = random.choice(l)
print('Pois eh...\nFoi tu:\n¯\_(ツ)_/¯ {} ¯\_(ツ)_/¯'.format(a))
| hdtorrad/Estudos-Python3 | Só exercícios/ex019-Sortei Apagar lousa.py | ex019-Sortei Apagar lousa.py | py | 484 | python | pt | code | 1 | github-code | 36 |
20465273392 | # -*- coding: utf-8 -*-
# @Project : CrawlersTools
# @Time : 2022/6/21 17:06
# @Author : MuggleK
# @File : proxy.py
import httpx
from loguru import logger
def get_proxies(proxy_url=None, http2=False):
"""
默认httpx代理模式
@param proxy_url: 代理请求链接
@param http2: 默认http1.1规则
@return:
"""
if not proxy_url: return
protocol = 'http://'
try:
proxy = httpx.get(proxy_url).text.strip()
proxy = protocol + proxy
if http2:
return {protocol: proxy, 'https://': proxy}
return {"http": proxy, "https": proxy}
except Exception as err:
logger.error(f'获取代理失败:{err}')
| MuggleK/CrawlersTools | CrawlersTools/requests/proxy.py | proxy.py | py | 697 | python | en | code | 16 | github-code | 36 |
899076815 | import sqlite3
import os
import bamnostic_mod as bn
import argparse
import bisect
import time
#Downloads\Lung\47b982b3-c7ce-4ca7-8c86-c71c15979620\G28588.NCI-H1915.1.bam
#Downloads\Lung\98a0206b-29f5-42d3-957b-6480e2fde185\G20483.HCC-15.2.bam
#Downloads\Lung\18004fb1-89a2-4ba1-a321-a0aa854e98c3\G25210.NCI-H510.1.bam
#Downloads\Lung\47030e40-acbd-4905-939c-d57441d7042e\G25222.NCI-H2171.1.bam
#Downloads\Lung\1357785f-f84b-4688-9b4c-0c2b5472ef51\G27281.RERF-LC-MS.1.bam
#Downloads\Lung\e48ea2ee-1dda-4061-a199-6e22fd2df382\G25212.NCI-H661.1.bam
#Downloads\Lung\f03dbfee-a523-438f-8459-f47f2ff1880f\G25224.NCI-H2066.1.bam
#Downloads\HeadAndNeck\0e67231f-97be-447c-b3b0-a656fc30a62d\G27454.PE_CA-PJ15.2.bam
#Downloads\HeadAndNeck\1acf65a0-0268-4288-9904-33bff618a31d\G27515.PE_CA-PJ41__clone_D2_.2.bam
#Downloads\HeadAndNeck\1f290458-df28-4c78-b73d-0202fb53bb0e\G27220.SCC-4.1.bam
#Downloads\HeadAndNeck\2b507086-977b-4cb7-abd9-83ee4ce9a893\G27489.PE_CA-PJ34__clone_C12_.2.bam
#Downloads\HeadAndNeck\7ed3e895-6826-430d-a39d-338111f16083\G27512.SNU-1214.2.bam
#Downloads\HeadAndNeck\c11aa745-72ea-44ca-b70d-7811c2f244b7\G27533.SNU-1066.2.bam
#Downloads\HeadAndNeck\dc8393c0-7d9e-4040-a91a-5783544cac35\G28853.HSC-4.3.bam
parser = argparse.ArgumentParser(description='takes the given .bam file and looks through all the reads to construct a count of all exons and splices in the reference splice graphs', usage='splicerSampleProcessor database_directory bam_file sample_name novelSplicesToggle(True|False)')
parser.add_argument("Database", help='The path to where you want to store the database file.')
parser.add_argument("Bam", help='The .bam file to count the reads from')
parser.add_argument("sampleName", help='Name for current sample in the sample table')
parser.add_argument("novelSplices", choices=['True', 'False'], help='Controls whether the program tries to find new splices')
args = parser.parse_args()
os.chdir("D:\\")
#get connection to the sqlite database
conn = sqlite3.connect(args.Database + os.path.sep + 'splice.sqlite', isolation_level=None)
c = conn.cursor()
c.execute('''CREATE TABLE IF NOT EXISTS Sample
(Sample_ID INTEGER PRIMARY KEY NOT NULL DEFAULT NULL,
Sample_Name varchar(40) NOT NULL DEFAULT NULL,
Total_Reads INTEGER NOT NULL DEFAULT NULL,
Transcript_Reads INTEGER NOT NULL DEFAULT NULL);''')
c.execute("CREATE INDEX IF NOT EXISTS idx_sample_name ON Sample(sample_name);")
c.execute('''CREATE TABLE IF NOT EXISTS Sample_Attribs
(Sample_ID INTEGER NOT NULL DEFAULT NULL,
Attribute varchar(255) NOT NULL DEFAULT NULL,
Value varchar(255) NOT NULL DEFAULT NULL);''')
c.execute('''CREATE TABLE IF NOT EXISTS Exon_Counts
(Sample_ID INTEGER NOT NULL DEFAULT NULL,
SG_Exon_ID INTEGER NOT NULL DEFAULT NULL,
Count INTEGER NOT NULL DEFAULT NULL);''')
c.execute("CREATE INDEX IF NOT EXISTS idx_ec_sample_id ON Exon_Counts(sample_id);")
c.execute("CREATE INDEX IF NOT EXISTS idx_ec_exon_id ON Exon_Counts(SG_Exon_ID);")
c.execute('''CREATE TABLE IF NOT EXISTS Splice_Counts
(Sample_ID INTEGER NOT NULL DEFAULT NULL,
SG_Splice_ID INTEGER NOT NULL DEFAULT NULL,
Count INTEGER NOT NULL DEFAULT NULL);''')
c.execute("CREATE INDEX IF NOT EXISTS idx_sc_sample_id ON Splice_Counts(sample_id);")
c.execute("CREATE INDEX IF NOT EXISTS idx_sc_splice_id ON Splice_Counts(SG_Splice_ID);")
#Find out the next assignable ID for this sample
c.execute("Select MAX(Sample_ID) FROM Sample")
ret = c.fetchone()
prevId = ret[0]
if prevId:
Sample_Id = int(prevId)+1
else:
Sample_Id = 1
novelSplices = args.novelSplices
#initialize the splice dictionary
sDict = {}
eDict = {}
epDict = {}
ecDict = {}
scDict = {}
discoverySplices = {}
start_time = time.time()
c.execute("SELECT SG_Splice_ID, Start_Position, Stop_Position, Chromosome FROM SG_Splice")
ret = c.fetchall()
#load the splice dictionary keyed on start-stop with the sg id as the value
for y in range(len(ret)):
key = str(ret[y][1])+'-'+str(ret[y][2])
chrom = ret[y][3]
if not chrom.startswith("chr"):
chrom = "chr"+chrom
if chrom == "chrMT":
chrom = "chrM"
if chrom not in sDict:
sDict[chrom] = {}
if novelSplices == 'True':
discoverySplices[chrom] = {}
sDict[chrom][key] = ret[y][0]
c.execute("SELECT SG_Exon_ID, Start_Position, Stop_Position, Chromosome FROM SG_Exon")
ret = c.fetchall()
#load the exon dictionary keyed on the start and the stop with the sg id as the value
for y in range(len(ret)):
chrom = ret[y][3]
if not chrom.startswith("chr"):
chrom = "chr"+chrom
if chrom == "chrMT":
chrom = "chrM"
if chrom not in eDict:
eDict[chrom] = {}
epDict[chrom] = []
#add start
eDict[chrom][ret[y][1]] = ret[y][0]
#add stop
eDict[chrom][ret[y][2]] = ret[y][0]
#add to tuple exon positions list (flip start and stop to correct if negative strand)
if ret[y][1] < ret[y][2]:
epDict[chrom].append((ret[y][1], ret[y][2]))
else:
epDict[chrom].append((ret[y][2], ret[y][1]))
#sorted list of all exon start stop tuples keyed on chromosome
for key in epDict:
epDict[key] = sorted(epDict[key])
#"hg19test.bam"
samfile = bn.AlignmentFile(args.Bam, "rb")
def exonIncrement(start, stop, chro):
exonIds = []
try:
pList = epDict[chro]
#flip start in stop to correct for negative strand
if start > stop:
temp = start
start = stop
stop = temp
#find the index that start belongs at
idx = bisect.bisect(pList, (start,stop))
i = idx
if i == len(pList):
return([])
#move down the exons adding the ids of those included in the read
while (i > -1 and start <= pList[i][1]):
exonIds.append(eDict[chro][pList[i][0]])
i-=1
#ISSUE IF NEVER LOOP****************
#if it goes off the end of a known exon add none and scrap the read
if start < pList[i+1][0]:
return([])
i = idx
looped = False
#move up the exons adding ids of those included in the read
while (i < len(pList) and i > -1 and stop >= pList[i][0]):
exonIds.append(eDict[chro][pList[i][1]])
i+=1
looped = True
#if it goes of the end of a known exon add none and scrap the read
if looped and stop > pList[i-1][1]:
return([])
except Exception:
pass
return(exonIds)
fns = open ('novelSplices.txt', 'w')
i = 0
totalCount = 0
missingAttrCount = 0
tranCount = 0
totalDupeCount = 0
tranDupeCount = 0
totalJRcount = 0
tranJRcount = 0
totalNJRcount = 0
tranNJRcount = 0
exceptionCount = 0
prevRead = ""
prevExons = ""
prevSplices = ""
for read in samfile:
#read does not have an alignment
if (
not hasattr(read, 'reference_name') or read.reference_name == None or
not hasattr(read, 'reference_start') or read.reference_start == None or
not hasattr(read, 'reference_end') or read.reference_end == None or
not hasattr(read, 'cigarstring') or read.cigarstring == None or
not hasattr(read, 'cigar') or read.cigar == None
):
missingAttrCount += 1
continue
dupeTag = False
exonSet = set()
spliceSet = set()
readR_S = read.reference_start
readR_E = read.reference_end
i+=1
totalCount += 1
if totalCount % 1000000 == 0:
print(f"{totalCount:,d}")
break
tranBool = False
cigarString = read.cigarstring
cigar = read.cigar
chro = read.reference_name
if not chro.startswith("chr"):
chro = "chr"+chro
if str(readR_S)+"-"+str(readR_E)+"-"+cigarString+"-"+chro == prevRead:
dupeTag = True
totalDupeCount += 1
for exon in prevExons:
tranBool = True
ecDict[exon] += 1
for splice in prevSplices:
tranBool = True
scDict[splice] += 1
if tranBool == True:
tranDupeCount += 1
elif "N" in cigarString:
totalJRcount += 1
#initialize the start and stop based on the first junction
start = readR_S+cigar[0][1]
stop = start+cigar[1][1]+1
#exon check from the start of the read to the start of the first splice
exonSet.update(exonIncrement(readR_S+1, start, chro))
for x in range(int(len(cigar)/2)):
#if this is not the first junction adjust the start and stop
if x != 0:
start = stop+cigar[x*2][1]-1
#exon check from the end of the last splice to the beginning of this splice
exonSet.update(exonIncrement(stop, start, chro))
stop = start+cigar[x*2+1][1]+1
#check if the splice is known and count it if so
try:
if str(start)+"-"+str(stop) in sDict[chro] or str(stop)+"-"+str(start) in sDict[chro]:
spliceID = sDict[chro][str(start)+"-"+str(stop)]
spliceSet.add(spliceID)
tranBool = True
if spliceID in scDict:
scDict[spliceID] += 1
else:
scDict[spliceID] = 1
elif novelSplices == 'True':
if start in eDict[chro] and stop in eDict[chro]:
if str(start)+"-"+str(stop) in discoverySplices[chro]:
discoverySplices[chro][str(start)+"-"+str(stop)]+=1
else:
discoverySplices[chro][str(start)+"-"+str(stop)]=1
experiSplicect = 1
except Exception as e:
exceptionCount += 1
exonID = ""
exonSet.update(exonIncrement(stop, readR_E, chro))
if (tranBool or len(exonSet) != 0):
tranJRcount += 1
else:
totalNJRcount += 1
start = readR_S+1
stop = start+cigar[0][1]
exonSet.update(exonIncrement(start, stop, chro))
if (len(exonSet) != 0):
tranNJRcount += 1
else:
print("Missing: " + chro + ' ' + str(start) + ' ' + str(stop))
#add in all the sets
for exon in exonSet:
tranBool = True
#print("exon: "+str(exon))
if exon in ecDict:
ecDict[exon] += 1
else:
ecDict[exon] = 1
if tranBool == True:
tranCount += 1
#set this line to prevRead
if dupeTag == False:
prevRead = str(readR_S)+"-"+str(readR_E)+"-"+cigarString+"-"+chro
prevExons = exonSet
prevSplices = spliceSet
#if i == 5000000:
# break
c.execute('begin')
for key in scDict:
c.execute("INSERT INTO Splice_Counts VALUES("+str(Sample_Id)+", "+str(key)+", "+str(scDict[key])+")")
for key in ecDict:
c.execute("INSERT INTO Exon_Counts VALUES("+str(Sample_Id)+", "+str(key)+", "+str(ecDict[key])+")")
#add this sample to the sample table
c.execute("INSERT INTO Sample VALUES("+str(Sample_Id)+", "+args.sampleName+", "+str(totalCount)+", "+str(tranCount)+")")
c.execute('commit')
for chromkey in discoverySplices:
for skey in discoverySplices[chromkey]:
fns.write(skey + ", Count: " + str(discoverySplices[chromkey][skey])+'\n')
fns.close()
print("missing attribute reads: " + str(missingAttrCount))
print("transcript junction reads: "+str(tranJRcount))
print("total junction reads: "+str(totalJRcount))
print("transcript duplicate reads: "+str(tranDupeCount))
print("total duplicate reads: "+str(totalDupeCount))
print("transcript Non junction reads: "+str(tranNJRcount))
print("total Non junction reads: "+str(totalNJRcount))
print("number of exceptions caught: "+ str(exceptionCount))
print("--- %.2f seconds---" % (time.time() - start_time))
print('Done')
| InSilicoSolutions/Splicer | Splicer/splicerSampleProcessor.py | splicerSampleProcessor.py | py | 12,134 | python | en | code | 0 | github-code | 36 |
23971763808 | from replit import db
from util import str_to_arr
from person import Person
def matches_to_string(matches):
string = "List of matches:\n"
for match in matches:
string += match + "\n"
return string
async def make_connections(message, person_calling):
matches = []
person_1 = Person.str_to_person(db[f"{person_calling}"])
person_1_interests = str_to_arr(person_1.interests)
person_1_courses = str_to_arr(person_1.courses)
for person in db.keys():
similarities = 0
if str(person) == str(person_calling):
continue
person_2 = Person.str_to_person(db[f"{person}"])
person_2_interests = str_to_arr(person_2.interests)
person_2_courses = str_to_arr(person_2.courses)
if person_1.year == person_2.year:
similarities += 1
for interest in person_1_interests:
if interest in person_2_interests:
similarities += 1
for course in person_1_courses:
if course in person_2_courses:
similarities += 1
if int(similarities/2) >= person_1.fit:
matches.append(person_2.user)
await message.author.send(matches_to_string(matches))
return
| Sharjeeliv/monty-bot | connect.py | connect.py | py | 1,083 | python | en | code | 0 | github-code | 36 |
43552704116 | import os
from bot.misc.util import download, calculate_hash
from bot.functions import lessonsToday
from bot.database.main import filesDB
import hashlib
import datetime
files = filesDB()
def filesCheck(urls) -> list:
done = []
filesHash = []
for name, url in urls.items():
h = files.get(name)
if h == -1:
download(url, name)
rez = calculate_hash(f'{name}')
filesHash.append({'name': name, 'hash': rez, 'date': datetime.datetime.now()})
else:
done.append({'file_id': h['file_id'],
'url': url})
return done, filesHash
| i3sey/EljurTelegramBot | bot/functions/files.py | files.py | py | 651 | python | en | code | 2 | github-code | 36 |
43372121821 | import cv2
import numpy as np
video_path = '/Users/bigphess/Desktop/omnidirection/res/rabbit_250fps.mp4'
cap = cv2.VideoCapture(0)
cap2 = cv2.VideoCapture(video_path)
while True:
ret, frame = cap2.read()
# image = cv2.imread('/Users/bigphess/Downloads/IMG_6453.JPG')
debug = frame
if cv2.waitKey(100) & 0xFF == ord('q'):
cv2.imwrite('/Users/bigphess/Desktop/xu.jpg',debug)
print("successful write")
break
# h,w,ch = debug.shape
print('the size of the frame is {} and {}'.format(debug.shape[1],debug.shape[0]))
cv2.imshow('??', frame)
cv2.waitKey(0)
break
# debug = cv2.logPolar(frame,(debug.shape[0]/2,debug.shape[1]/2),100,cv2.WARP_FILL_OUTLIERS)
# cv2.waitKey(1)
| Bigphess/Notes | OpenCV/polor.py | polor.py | py | 703 | python | en | code | 1 | github-code | 36 |
2532407157 | import re
import sys
import numpy as np
import pandas as pd
from sklearn.base import TransformerMixin
from sklearn.compose import ColumnTransformer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import OneHotEncoder
class PreprocessDataframe(TransformerMixin):
def fit(self, df, _):
# Drop random/singular columns
# self.mask = [bool(pd.Series(col).dropna().size) for col, s in X.T]
self.drop_cols = df.columns[df.isna().all(axis=0)]
print('Drop cols', self.drop_cols, file=sys.stderr)
return self
def transform(self, df: pd.DataFrame):
assert (df.iloc[:, 0].str.count(' ') == 2).all()
df = df.drop(columns=df.columns[0]) # process pid, container id/image
return df.drop(columns=self.drop_cols)
class PreprocessText(TransformerMixin):
fit = lambda x, *_: x
def _fix(self, s):
s = re.sub(r'(?<![.:\d])[a-f\d\[\]]+(:+[a-f\d\[\]]+)+|\d+([.:-]+\d+){2,4}', ' ', s) # IP:port
return s
assert not _fix(None, '''
123.123.123.123:1234
123:[fab:12e:123:123]:1234
''').strip()
def transform(self, X):
Xt = [self._fix(s) for s in X]
return Xt
class Transformer(TransformerMixin):
CATEGORICAL_WEIGHT = 30
def fit(self, df, _):
transformers = []
for i, (_, s) in enumerate(df.items()):
n_words = s.fillna('').astype(str).str.count(r'\b\w+')
assert n_words.mean() > 0
if n_words.mean() <= 1:
print(i, n_words.mean(), np.unique(n_words), sep='\t')
transformers.append((
str(i),
OneHotEncoder(handle_unknown='ignore', dtype=np.int8),
[i] * self.CATEGORICAL_WEIGHT,
))
else:
n_features = round(max(5, min(50, n_words.median() * 5)))
print(i, f'{n_words.mean():.2f}', n_features, sep='\t')
transformers.append((
str(i),
make_pipeline(
PreprocessText(),
HashingVectorizer(
n_features=n_features,
token_pattern=r'[>\w-]+',
ngram_range=(1, 2),
lowercase=False,
alternate_sign=False,
norm=None,
dtype=np.uint8,
),
),
i
))
self.transformer = ColumnTransformer(
transformers=transformers,
# n_jobs=-1,
)
X = df.fillna('').astype(str).values
self.transformer.fit(X)
return self
def transform(self, df):
X = df.fillna('').astype(str).values
Xt = self.transformer.transform(X)
assert Xt.size, Xt
return Xt
| kernc/Containersec | lib.py | lib.py | py | 3,012 | python | en | code | 0 | github-code | 36 |
19956578192 | """
Given an array of strings, return another array containing all of its longest strings.
Example
For inputArray = ["aba", "aa", "ad", "vcd", "aba"], the output should be
solution(inputArray) = ["aba", "vcd", "aba"].
Input/Output
[execution time limit] 4 seconds (py3)
[input] array.string inputArray
A non-empty array.
Guaranteed constraints:
1 ≤ inputArray.length ≤ 10,
1 ≤ inputArray[i].length ≤ 10.
[output] array.string
Array of the longest strings, stored in the same order as in the inputArray.
"""
def solution(inputArray):
longest = []
length = 0
for i in range (len(inputArray)):
if len(inputArray[i]) > length:
length = len(inputArray[i])
for x in range(len(inputArray)):
if len(inputArray[x]) == length:
longest.append(inputArray[x])
print(longest)
return longest | scottmm374/coding_challenges | codesignal/arcade/intro/all_longest_strings.py | all_longest_strings.py | py | 920 | python | en | code | 1 | github-code | 36 |
29454018683 | #!/usr/bin/python3 -u
import sys, re, math
from img2c import *
def ascii2c(lines, img, dest = sys.stdout, h = None, w = None):
width = max([len(line) for line in lines])
height = len(lines)
image = [[0 for x in range(width)] for y in range(height)]
y = 0
for line in lines:
x = 0
for c in line:
if c != ' ':
image[y][x] = 1
else:
image[y][x] = 0
x += 1
y += 1
printComment(image, height, width, dest)
printArray(image, img, height, width, dest, h, w)
return width, height
def main():
letter = False
if "--letter" in sys.argv:
letter = True
sys.argv.remove("--letter")
if len(sys.argv) < 2:
print("Usage: ascii2c.py <source> [destination]")
sys.exit()
source = sys.argv[1]
try:
destination = sys.argv[2]
img = re.sub(r"^.*[/\\]", "", destination)
img = re.sub(r"\..*$", "", img)
except:
destination = sys.stdout
img = re.sub(r"^.*[/\\]", "", source)
img = re.sub(r"\..*$", "", img)
try:
lines = [x.strip('\n') for x in open(source, "r").readlines()]
except:
print("Error: Cannot open file: %s" % source)
sys.exit()
if letter:
lines = lines[1:]
if destination != sys.stdout:
try:
dest = open(destination, "w")
except:
print("Error: Cannot open file: %s" % destination)
sys.exit()
else:
dest = destination
print("#include <pm_graphics.h>", file=dest)
print(file=dest)
w, h = ascii2c(lines, '_' + img, dest)
print(file=dest)
print("const pm_image %s = {" % img, file=dest)
print("\t_%s, %d, %d" % (img, w, h), file=dest)
print("};", file=dest)
dest.close()
if __name__ == "__main__":
main() | michaelrm97/Turnomatic | software/graphics/ascii2c.py | ascii2c.py | py | 1,863 | python | en | code | 1 | github-code | 36 |
20822639893 | import json
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics import euclidean_distances
import numpy as np
from sklearn.linear_model import LogisticRegression
def load():
with open('C:/Users/Administrator/Desktop/backend-interview-1/samples/generated_test_cases.txt', 'r',
encoding='utf-8') as json_file:
data = json.load(json_file)
return data
def get_word_embeddings():
embeddings = {}
with open('C:/Users/Administrator/Desktop/numberbatch-en.txt', encoding='utf-8') as em:
for embed in em:
em_line = embed.split(' ')
if len(em_line) > 2:
word = em_line[0]
embedding = np.array(em_line[1:])
embeddings[word] = embedding
print('Word embeddings:', len(embeddings))
return embeddings
def get_min(target_list):
min_num = target_list[0]
for i in target_list:
if i < min_num:
min_num = i
return min_num
def get_min_index(target_list):
min_num = target_list[0]
for i in target_list:
if i < min_num:
min_num = i
return target_list.index(min_num)
def find_match_document_distance(query, candidates, W):
if len(query) == 0:
return "The value of query is null."
if len(candidates) == 0:
return "The value of candidates is null."
vec1 = CountVectorizer(stop_words="english").fit([str(query)])
vec2 = CountVectorizer(stop_words="english").fit([str(candidates)])
if len(vec1.get_feature_names()) == 0 or len(vec2.get_feature_names()) == 0:
return -1
# print("Query Features:", ", ".join(vec1.get_feature_names()))
# print("Candidates Features:", ", ".join(vec2.get_feature_names()))
W1 = [W[w] for w in vec1.get_feature_names()]
W2 = [W[w] for w in vec2.get_feature_names()]
result = []
for i in range(0, len(W1)):
for j in range(0, len(W2)):
res = {}
res['q'] = vec1.get_feature_names()[i]
res['c'] = vec2.get_feature_names()[j]
res['r'] = float(euclidean_distances([W1[i]], [W2[j]])[0][0])
result.append(res)
t_list = []
for i in range(0, len(result)):
t_list.append(float(result[i]['r']))
return get_min(t_list)
def get_model(W):
json_data = load()
print(len(json_data))
list_X = []
list_Y = []
try:
for i in range(0, len(json_data)):
aaa = find_match_document_distance(str(json_data[i]['query']), str(json_data[i]['candidates']), W)
list_X.append(aaa)
if json_data[i]['correct_index'] >= 0:
list_Y.append("1")
else:
list_Y.append("-1")
except Exception as e:
print(i)
print("word_embeddings中未含有该词")
print(len(list_X))
print(len(list_Y))
x = np.array(list_X).reshape(-1, 1)
y = np.array(list_Y)
lr = LogisticRegression(C=1000.0, random_state=0)
lr.fit(x, y)
# print(lr.predict(x))
from sklearn.model_selection import cross_val_score
acy = cross_val_score(lr, x, y)
print(acy.mean())
return lr
def find_best_match_with_threshold(query, candidates, lr, W):
if len(query) == 0:
return "The value of query is null."
if len(candidates) == 0:
return "The value of candidates is null."
vec1 = CountVectorizer(stop_words="english").fit([query])
vec2 = CountVectorizer(stop_words="english").fit([str(candidates)])
if len(vec1.get_feature_names()) == 0 or len(vec2.get_feature_names()) == 0:
return -1
print("Features:", ", ".join(vec1.get_feature_names()))
print("Features:", ", ".join(vec2.get_feature_names()))
W1 = [W[w] for w in vec1.get_feature_names()]
W2 = [W[w] for w in vec2.get_feature_names()]
result = []
for i in range(0, len(W1)):
for j in range(0, len(W2)):
res = {}
res['q'] = vec1.get_feature_names()[i]
res['c'] = vec2.get_feature_names()[j]
res['r'] = float(euclidean_distances([W1[i]], [W2[j]])[0][0])
result.append(res)
t_list = []
for i in range(0, len(result)):
t_list.append(float(result[i]['r']))
# print(t_list)
# print(lr.predict(get_min(t_list))[0])
if lr.predict(get_min(t_list))[0] == "-1":
return -1
else:
return candidates.index(result[get_min_index(t_list)]['c'])
W = get_word_embeddings()
lr = get_model(W)
query = "i am really hungry"
candidates = ['story', 'song', 'wake up', 'restart']
print(find_best_match_with_threshold(query, candidates, lr, W))
| Qt7mira/LenovoIVProblem | mira/part3.py | part3.py | py | 4,656 | python | en | code | 1 | github-code | 36 |
8944245432 | import numpy as np
from numpy import linalg as LA
#
# a = np.array( [[1,2],
# [2,4]] )
# b = np.array( [[2,3],
# [3,4]])
#
# print(np.matmul(a,b))
# Excercise 1.1 => 3b
array_1 = np.array([0,-1,-2])
array_2 = np.array([1,-3])
array_3 = np.array([1,-3])
array_4 = np.array([1,-3])
array_5 = np.array([1,2])
array_6 = np.array([1,-2])
x = np.dot(np.transpose(array_3),array_4)
y = np.dot(array_5,np.transpose(array_6))
xy = np.dot(x,y)
infinit_norm = LA.norm(array_1, np.inf)
euclid_norm = LA.norm(array_2,2)
resultHalf = np.dot((infinit_norm**2),(euclid_norm**-2))
result = np.dot(resultHalf,xy)
print(result);
#
# # In[3]:
#
#
# x = 3 + 3j
# y = 4 + 5j
# print(x+y)
#
#
# # In[4]:
#
#
# # exp(2)
#
#
# # In[5]:
#
#
# np.exp(2)
#
#
# # In[6]:
#
#
# # print(2i + 4i)
#
#
# # In[7]:
#
#
# print(2j + 4j)
#
#
# # In[8]:
#
#
# print(a*2)
#
#
# # In[9]:
#
#
# z= a*2
#
#
# # In[10]:
#
#
# print()
#
#
# # In[11]:
#
#
# print(z)
#
#
# # In[12]:
#
#
# print(a)
#
#
# # In[13]:
#
#
# a1 = a
#
#
# # In[14]:
#
#
# print(a)
#
#
# # In[15]:
#
#
# print(a1)
#
#
# # In[16]:
#
#
# a1 = np.array([1,-3])
# a2 = np.array([[1], [-3]])
# print(np.matmul(a1,a2))
#
#
# # In[17]:
#
#
# print(max
# )
#
#
# # In[18]:
#
#
# print(np.max)
#
# # In[27]:
#
#
# print(np.finfo(np.float64).max)
#
#
# # In[28]:
#
#
# print(np.finfo(np.float32).max)
#
#
# # In[29]:
#
#
# print(np.finfo(np.float64).tiny)
#
#
# # In[30]:
#
#
# print(np.finfo(np.float32).max)
#
#
# # In[31]:
#
#
# print(np.finfo(np.float64).precision)
#
#
# # In[32]:
#
#
# print(np.finfo(np.float32).precision)
#
#
# # In[33]:
#
#
# print(np.finfo(np.double).precision)
#
#
# # In[34]:
#
#
# print(np.finfo(np.double).tiny)
#
#
# # In[35]:
#
#
# print(np.finfo(np.float).max)
#
#
# # In[36]:
#
#
# print(np.finfo(np.float64).max)
#
#
# # In[37]:
#
#
# print(np.finfo(np.double).max)
#
#
# # In[44]:
#
#
# from numpy import linalg
# m0 = np.array([[0],[-1],[-2]])
# linalg.norm(m0, np.inf)
#
#
# # In[60]:
#
#
# from numpy import linalg as LA
# m0 = np.array([1,-3,2])
# m1 = np.array([[1],[-1]])
# m1 = m1.transpose()
# m2 = np.array([[1,2,0],
# [-3, -2 ,1]])
# m3 = np.array([1,-3,2])
# res = LA.norm(m0, 1) + np.matmul(m1,np.matmul(m2,m3))
# print(res)
#
#
# # In[84]:
#
#
#
# m0 = np.array([0,-1,-2])
# m1 = np.array([1,-3])
# m2 = np.array([[1],[-3]])
# m3 = np.array([[1],[-3]])
# m4 = np.array([[1],[2]])
# m5 = np.array([[1],[-2]])
# res = np.power(LA.norm(m0, np.inf) ,2) * np.power(LA.norm(m1, 2), -2) * np.multiply(np.matmul(m2.transpose(),m3), np.matmul(m4,m5.transpose()) )
# print(res)
#
# # @ can be used instead of np.matmul
| Mithunjack/Optimization-and-Data-Science | Mithun/ODS- Final/ex1.py | ex1.py | py | 2,615 | python | en | code | 1 | github-code | 36 |
26579648710 | def uglyNumber(n):
if n<=0:
return False
while not n%2:
n//=2
while not n%3:
n//=3
while not n%5:
n//=5
return n==1
assert True == uglyNumber(1)
assert False == uglyNumber(-2125563)
assert True == uglyNumber(1024)
assert False == uglyNumber(19)
| msencer/leetcode-solutions | easy/python/UglyNumber.py | UglyNumber.py | py | 299 | python | en | code | 5 | github-code | 36 |
71300190183 | import time
import tqdm
import torch
import numpy as np
import torch.nn as nn
import torch.optim as optim
from utils import *
def prepare_sequence(seq, word2idx):
idxs = [word2idx[w] for w in seq]
return torch.tensor(idxs, dtype=torch.long)
class BiLSTM_CRF_S(nn.Module):
def __init__(self, vocab_size, label2idx, embedding_dim, hidden_size, num_layers, dropout_ratio=0.3):
super(BiLSTM_CRF_S, self).__init__()
self.vocab_size = vocab_size
self.labelset_size = len(label2idx)
self.embedding_dim = embedding_dim
self.label2idx = label2idx
self.hidden_size = hidden_size
self.embeddings = nn.Embedding(vocab_size, embedding_dim)
# 非BatchFirst在实际其实更方便...
self.LSTM = nn.LSTM(
input_size=embedding_dim,
hidden_size=hidden_size // 2,
num_layers=num_layers,
bidirectional=True,
# batch_first=True
)
# 把output转化为label
self.output2label = nn.Linear(hidden_size, self.labelset_size)
# 标签的转移得分
# transitons[i, j] 表示 从 i 转移到 j 的得分
self.transitions = nn.Parameter(
torch.randn(self.labelset_size, self.labelset_size, requires_grad=True))
# 不可能从STOP转移到其他标签,也不可能从其他标签转移到START
# 必须要加detach
self.transitions.detach()[label2idx[STOP_label], :] = -10000
self.transitions.detach()[:, label2idx[START_label]] = -10000
self.dropout1 = nn.Dropout(p=dropout_ratio)
self.dropout2 = nn.Dropout(p=dropout_ratio)
self.hidden = None
self.seq_length = None
self.batch_size = None
def init_uniform(self):
for ind in range(0, self.LSTM.num_layers):
weight = eval('self.LSTM.weight_ih_l' + str(ind))
bias = np.sqrt(6.0 / (weight.size(0) / 4 + weight.size(1)))
nn.init.uniform_(weight, -bias, bias)
weight = eval('self.LSTM.weight_hh_l' + str(ind))
bias = np.sqrt(6.0 / (weight.size(0) / 4 + weight.size(1)))
nn.init.uniform_(weight, -bias, bias)
if self.LSTM.bias:
for ind in range(0, self.LSTM.num_layers):
weight = eval('self.LSTM.bias_ih_l' + str(ind))
weight.data.zero_()
weight.data[self.LSTM.hidden_size: 2 * self.LSTM.hidden_size] = 1
weight = eval('self.LSTM.bias_hh_l' + str(ind))
weight.data.zero_()
weight.data[self.LSTM.hidden_size: 2 * self.LSTM.hidden_size] = 1
bias = np.sqrt(3.0 / self.embedding_dim)
nn.init.uniform_(self.embeddings.weight, -bias, bias)
def init_hidden(self):
r"""
初始化隐藏层参数
:param batch_size: batch_size
:return:
"""
return (torch.randn(2, self.batch_size, self.hidden_size // 2),
torch.randn(2, self.batch_size, self.hidden_size // 2))
def _get_scores(self, sentences):
'''
得到序列的特征
:param sentences: tensor [length, batch_size]
:return: feats tensor [length, batch_size, labelset_size]
'''
self.hidden = self.init_hidden()
# [length, batch_size] -> [length, batch_size, dim]
sentences_embeddings = self.embeddings(sentences)
sentences_embeddings = self.dropout1(sentences_embeddings)
# outputs [length, batch_size, hidden_size]
outputs, self.hidden = self.LSTM(sentences_embeddings, self.hidden)
outputs = self.dropout2(outputs)
# [length, batch_size, labelset_size]
feats = self.output2label(outputs)
return feats
def _forward_all_logsumexp(self, scores, masks):
r"""
计算所有可能路径的log_sum_exp
:param scores: tensor [length, batch_size, labelset_size]
LSTM传过来的emit score
:param masks: tensor [length, batch_size]
:return: terminal_score: tensor [batch_size]
"""
# 到当前单词,且状态为i的所有路径的log_sum_exp
dp = torch.full((self.labelset_size, self.batch_size), -10000.)
dp[self.label2idx[START_label]] = 0.
for i in range(self.seq_length):
# [batch_size, labelset_size]
score = scores[i]
# [batch_size] -> [batch_size, 1] -> [batch_size, labelset_size]
mask = masks[i].unsqueeze(dim=1).expand(self.batch_size, self.labelset_size)
# [labelset_size_from, batch_size, labelset_size_to]
tmp = dp.transpose(0, 1).unsqueeze(dim=2).expand(self.labelset_size, self.batch_size, self.labelset_size) + \
score.unsqueeze(dim=0).expand(self.labelset_size, self.batch_size, self.labelset_size) + \
self.transitions.unsqueeze(dim=1).expand(self.labelset_size, self.batch_size, self.labelset_size)
# [labelset_size_from, batch_size, labelset_size_to] -> [batch_size, labelset_size_to]
tmp = log_sum_exp(tmp, dim=0)
# mask为1的值更新,为0的不再更新
dp.masked_scatter_(mask, tmp.masked_select(mask))
# dp = dp + self.transitions[self.label2idx[STOP_label]]
dp = log_sum_exp(dp, dim=1)
return dp
def _get_gold_score(self, scores: torch.tensor, labels, masks):
'''
计算出所提供的正确路径得分数
:param scores: tensor [length, batch_size, labelset_size]
LSTM传过来的emit score
:param labels: tensor [length, batch_size]
每个序列正确的路径, 已经加了start
:param masks: tensor [length, batch_size]
:return:
scores: tensor [batch_size]
'''
dp = torch.zeros(self.batch_size)
st = torch.full([1, self.batch_size], self.label2idx[START_label], dtype=torch.long)
# [length + 1, batch_size]
labels = torch.cat(
[st, labels], dim=0
)
for i in range(self.seq_length):
# [batch_size, labelset_size]
score = scores[i]
# [batch_size]
mask = masks[i]
tmp = dp + self.transitions[labels[i], labels[i + 1]] + score[
range(self.batch_size), labels[i + 1]]
# mask为1的值更新为新的tmp值,为0的不再更新
dp.masked_scatter_(mask, tmp.masked_select(mask))
# label最后一个永远是pad....
# dp = dp + self.transitions[labels[-1], self.label2idx[STOP_label]]
# print(time.time() - st)
return dp
def neg_log_likelihood(self, sentences, labels, masks):
r"""
计算正确路径的负对数似然概率
:param sentences: tensor [length, batch_size]
:param labels: tensor [length, batch_size]
正确的label序列
:param masks:tensor [length, batch_size]
:return: FloatTensor
"""
self.set_batch_seq_size(sentences)
# [length, batch_size, labelset_size]
feats = self._get_scores(sentences)
forward_score = self._forward_all_logsumexp(feats, masks)
gold_score = self._get_gold_score(feats, labels, masks)
# print('forward_score: ', forward_score)
# print('gold_score :', gold_score)
return (forward_score - gold_score).sum() / self.batch_size
def _viterbi_decode(self, feats, masks):
r'''
使用维特比算法进行解码,找到最可能的序列结果
:param feats: tensor [length, batch_size, labelset_size]
LSTM传过来的特征向量
:param masks: tensor [length, batch_size]
:return: best_scores tensor [batch_size]
best_paths tensor [length, batch_size]
'''
# 记录每个节点由哪个父节点转移过来
parents = []
# 到当前单词,且状态为i的所有路径中log_sum_exp最大的值
dp = torch.full((self.labelset_size, self.batch_size), -10000.)
# START_label has all of the score.
dp[self.label2idx[START_label]] = 0.
for i in range(feats.shape[1]):
# [batch_size, labelset_size]
feat = feats[i]
# [batch_size] -> [batch_size, 1]
mask = masks[i].unsqueeze(dim=1)
# [labelset_size, batch_size, labelset_size]
# TODO: 搞清楚这些维数!!
tmp = dp.transpose(0, 1).unsqueeze(dim=2) + \
feat.unsqueeze(dim=0) + \
self.transitions.unsqueeze(dim=1)
max_scores, best_choose = tmp.max(dim=0)
# 添加路径信息,[batch_size, labelset_size]
parents.append(best_choose)
# 由于只保留了一条路径,可以省去log_sum_exp过程
dp.masked_scatter_(mask, max_scores.masked_select(mask))
# [batch_size, labelset_size]
# terminal_var = dp + self.transitions[:, self.label2idx[STOP_label]]
terminal_var = dp
# [batch_size]
best_scores, best_path_labels = terminal_var.max(dim=1)
best_paths = [best_path_labels]
for parent in reversed(parents):
best_path_labels = parent[range(parent.shape[0]), best_paths[-1]]
best_paths.append(best_path_labels)
best_paths.pop()
best_paths.reverse()
# 转化为 [batch_size, length]
best_paths = torch.stack(best_paths).transpose(0, 1)
return best_scores, best_paths
def set_batch_seq_size(self, sentence):
"""
set batch size and sequence length
"""
tmp = sentence.size()
self.batch_size = tmp[1]
self.seq_length = tmp[0]
def predict(self, sentences, masks):
r"""
预测数据的最可能序列以及得分
:param sentences: tensor [length, batch_size]
:return:
scores: tensor [batch_size]
paths: list [tensor: [batch_size]....] with length=length
"""
self.set_batch_seq_size(sentences)
# Get the emission scores from the BiLSTM
feats = self._get_scores(sentences)
# Find the best path, given the features.
scores, paths = self._viterbi_decode(feats, masks)
return scores, paths
class BiLSTM_CRF_L(nn.Module):
r"""
Large LSTM,直接使用nn.Linear(hidden_dim, self.labelset_size * self.labelset_size)
代替了转移矩阵,并且在制作数据集的时候采用label_i * labelset_size + label_(i + 1)
的方法,可以一次计算出gold score,在之后也不用每次加trans,大大提高了运行速度,但是
内存占用更大
"""
def __init__(self, vocab_size, label2idx, embedding_dim, hidden_dim, num_layers, dropout_ratio):
super(BiLSTM_CRF_L, self).__init__()
self.embedding_dim = embedding_dim
self.hidden_dim = hidden_dim
self.vocab_size = vocab_size
self.embeddings = nn.Embedding(vocab_size, embedding_dim)
self.LSTM = nn.LSTM(embedding_dim, hidden_dim // 2,
num_layers=num_layers, bidirectional=True)
self.num_layers = num_layers
self.dropout1 = nn.Dropout(p=dropout_ratio)
self.dropout2 = nn.Dropout(p=dropout_ratio)
self.labelset_size = len(label2idx)
self.label2idx = label2idx
self.start_tag = label2idx[START_label]
self.end_tag = label2idx[PAD_label]
self.batch_size = 1
self.seq_length = 1
self.hidden2tag = nn.Linear(hidden_dim, self.labelset_size * self.labelset_size)
def init_uniform(self):
# LSTM
r"""
线性初始化网络
:return:
"""
for ind in range(0, self.LSTM.num_layers):
weight = eval('self.LSTM.weight_ih_l' + str(ind))
bias = np.sqrt(6.0 / (weight.size(0) / 4 + weight.size(1)))
nn.init.uniform_(weight, -bias, bias)
weight = eval('self.LSTM.weight_hh_l' + str(ind))
bias = np.sqrt(6.0 / (weight.size(0) / 4 + weight.size(1)))
nn.init.uniform_(weight, -bias, bias)
if self.LSTM.bias:
for ind in range(0, self.LSTM.num_layers):
weight = eval('self.LSTM.bias_ih_l' + str(ind))
weight.data.zero_()
weight.data[self.LSTM.hidden_size: 2 * self.LSTM.hidden_size] = 1
weight = eval('self.LSTM.bias_hh_l' + str(ind))
weight.data.zero_()
weight.data[self.LSTM.hidden_size: 2 * self.LSTM.hidden_size] = 1
# embedding
# nn.Embeddig.weight默认初始化方式就是N(0, 1)分布
bias = np.sqrt(3.0 / self.embedding_dim)
nn.init.uniform_(self.embeddings.weight, -bias, bias)
# Linear
bias = np.sqrt(6.0 / (self.hidden2tag.weight.size(0) +
self.hidden2tag.weight.size(1)))
nn.init.uniform_(self.hidden2tag.weight, -bias, bias)
def rand_init_hidden(self):
"""
随机初始化hidden
"""
return torch.Tensor(
torch.randn(2 * self.num_layers, self.batch_size, self.hidden_dim // 2)), torch.Tensor(
torch.randn(2 * self.num_layers, self.batch_size, self.hidden_dim // 2))
def set_batch_seq_size(self, sentence):
"""
:param sentence [length, batch_size]
设置batch_size,seq_length
"""
tmp = sentence.size()
self.seq_length = tmp[0]
self.batch_size = tmp[1]
def load_pretrained_embedding(self, pre_embeddings):
"""
加载预训练embedding
"""
assert (pre_embeddings.size()[1] == self.embedding_dim)
self.embeddings.weight = nn.Parameter(pre_embeddings)
def _get_gold_score(self, scores, targets, masks):
r"""
计算正确路径得分
:param scores: [length, batch_size, labelset_size, labelset_size]
:param targets: [length, batch_size]
:param masks: [length, batch_size]
:return: gold_score tensor
"""
# [length, batch_size] -> [length, batch_size, 1]
targets = targets.unsqueeze(dim=2)
gold_score = torch.gather(scores.view(
self.seq_length, self.batch_size, -1), 2, targets).view(self.seq_length,
self.batch_size) # seq_len * batch_size
gold_score = gold_score.masked_select(masks).sum()
return gold_score
def _get_all_logsumexp(self, scores, masks):
r"""
计算所有路径的得分之和
:param scores: [length, batch_size, labelset_size, labelset_size]
:param masks: [length, batch_size]
:return:
"""
seq_iter = enumerate(scores)
# [batch_size, labelset_size_from, labelset_size_to]
_, inivalues = seq_iter.__next__()
# [batch_size, labelset_size_to], ps: 不加clone会报错
# 到当前单词,且状态为i的所有路径的log_sum_exp
dp = inivalues[:, self.start_tag, :].clone()
# 从正式的第一个label开始迭代
for idx, cur_values in seq_iter:
# [batch_size] -> [batch_size, labelset_size]
mask = masks[idx].view(self.batch_size, 1).expand(self.batch_size, self.labelset_size)
# cur_values: [batch_size, labelset_size_from, labelset_size_to]
cur_values = cur_values + dp.contiguous().view(self.batch_size, self.labelset_size,
1).expand(self.batch_size, self.labelset_size,
self.labelset_size)
# [batch_size, from_target, to_target] -> [batch_size, to_target]
tmp = log_sum_exp(cur_values, dim=1)
# 0保留自身值,1采用新的source值
dp.masked_scatter_(mask, tmp.masked_select(mask))
dp = dp[:, self.end_tag].sum()
return dp
def neg_log_likelihood(self, sentences, targets, masks, hidden=None):
r"""
计算损失函数
:param sentences: [length, batch_size]
:param targets: [length, batch_size]
:param masks: [length, batch_size]
:param hidden:
:return:
"""
# [length, batch_size, labelset_size, labelset_size]
crf_scores = self.forward(sentences)
gold_score = self._get_gold_score(crf_scores, targets, masks)
forward_score = self._get_all_logsumexp(crf_scores, masks)
loss = (forward_score - gold_score) / self.batch_size
# print(loss)
return loss
def _viterbi_decode(self, crf_scores, masks):
r'''
使用维特比算法进行解码,找到最可能的序列结果
:param crf_scores: tensor [length, batch_size, labelset_size, labelset_size]
LSTM传过来的emit score + trans score
:param masks: tensor [length, batch_size]
:return: scores tensor [batch_size]
paths [batch_size, seq_length - 1]
'''
# 方便后面的mask fill
masks = ~masks
path = torch.LongTensor(self.seq_length - 1, self.batch_size)
seq_iter = enumerate(crf_scores)
# [batch_size, from_labelset_size, to_labelset_size]
_, inivalues = seq_iter.__next__()
# 只保留start的初始得分, [batch_size, to_labelset_size]
forscores = inivalues[:, self.start_tag, :].clone()
parents = []
# 从正式的第一个label开始迭代
for idx, cur_values in seq_iter:
# [batch_size] -> [batch_size, labelset_size]
mask = masks[idx].view(self.batch_size, 1).expand(self.batch_size, self.labelset_size)
# cur_values: [batch_size, from_target, to_target]
cur_values = cur_values + forscores.contiguous().view(self.batch_size, self.labelset_size,
1).expand(self.batch_size, self.labelset_size,
self.labelset_size)
forscores, cur_parent = torch.max(cur_values, 1)
# [batch_size, to_target], mask是1是直接pad
cur_parent.masked_fill_(mask, self.end_tag)
parents.append(cur_parent)
pointer = parents[-1][:, self.end_tag]
path[-1] = pointer
for idx in range(len(parents) - 2, -1, -1):
back_point = parents[idx]
index = pointer.contiguous().view(-1, 1)
pointer = torch.gather(back_point, 1, index).view(-1)
path[idx] = pointer
return forscores, path.transpose(0, 1)
def predict(self, sentences, masks, hidden=None):
r"""
进行预测,计算得分和最优路径
:param sentences: [length, batch_size]
:param masks: [length, batch_size]
:return:
"""
self.eval()
crf_scores = self.forward(sentences)
scores, path = self._viterbi_decode(crf_scores, masks)
return scores, path
def forward(self, sentences, hidden=None):
r"""
计算crf_scores
:param sentences: [length, batch_size]
:param hidden: LSTM的初始隐藏层
:return: crf_scores [length, batch_size, labelset_size_from, labelset_size_to]
crf_scores[0, 0, 1, 10]: 第一个句的第一个单词 从label_1 -> label_10的emit_score + trans_score
"""
self.set_batch_seq_size(sentences)
embeds = self.embeddings(sentences)
d_embeds = self.dropout1(embeds)
# [length, batch_size, hidden_size]
lstm_out, hidden = self.LSTM(d_embeds, hidden)
lstm_out = lstm_out.view(-1, self.hidden_dim)
d_lstm_out = self.dropout2(lstm_out)
crf_scores = self.hidden2tag(d_lstm_out).view(-1, self.labelset_size, self.labelset_size)
crf_scores = crf_scores.view(self.seq_length, self.batch_size, self.labelset_size, self.labelset_size)
return crf_scores
| YaooXu/Chinese_seg_ner_pos | BiLSTM_CRF.py | BiLSTM_CRF.py | py | 20,207 | python | en | code | 5 | github-code | 36 |
31563331171 | import requests
from bs4 import BeautifulSoup
url = input("Entrer l'URL du site : ")
response = requests.get(url)
if response.status_code == 200:
html_content = response.content
else:
print("Erreur lors de la récupération de la page.")
soup = BeautifulSoup(html_content, "html.parser")
# Extraire le titre de la page
title = soup.title.text
print("Titre de la page :", title)
# Élément de la page à extraire
element = input("Entrez l'élément a extraire du site : ")
elements = soup.find_all(f"{element}")
for item in elements:
print(item)
# Exemple : Extraire tous les liens de la page
# links = soup.find_all("a")
# for link in links:
# print(link.get("href"))
| Lenked/ScrappingApp | main.py | main.py | py | 690 | python | fr | code | 0 | github-code | 36 |
2363693116 | import os
def read(archivo):
try:
archivo=archivo+".txt"
print("El contenido es: ")
file=open(archivo,"r")
for line in file:
print(line, end="")
except FileNotFoundError:
print("No se encontro")
def encontrar():
count = 0
for dirpath, dirnames, filenames in os.walk("."):
for name in filenames:
if ".txt" in name:
print ("Archivo",count+1,":",name)
count=count+1
def agregar(var):
var=var+".txt"
file=open(var,"a")
while True:
url=input("Escribe la url que quieres agregar al "+var+" ")
file.write(url+"\n")
con=input("Quieres agregar mas y/n ")
if con!="y":
break
def eliminar(var):
read(var)
var=var+".txt"
dele=str(input("Que url quieres eliminar "))
with open(var, "r") as f:
lines = f.readlines()
with open(var, "w") as f:
for line in lines:
if line.strip("\n") != dele:
f.write(line)
if __name__=="__main__":
arc=input("Escribe el archivo ")
encontrar()
read(arc)
eliminar(arc)
agregar(arc)
| JulioGrimaldoM/LPC | Practica_1/Buscar.py | Buscar.py | py | 1,239 | python | es | code | 0 | github-code | 36 |
29005165219 | #The program should ask the user to enter three numbers (one number at a time) and should work out how many of these are even and odd. Finally, the program should display the number of even numbers and odd numbers entered.
# Ask user for numbers
print("Please enter the first whole number?")
first_number = int(input())
print("Please enter the second whole number?")
second_number = int(input())
print("Please enter the third whole number?")
third_number = int(input())
even_numbers = 0
odd_numbers = 0
# Determine which numbers are even and which are odd
if (first_number % 2 == 0):
even_numbers = even_numbers + 1
else:
odd_numbers = odd_numbers + 1
if (second_number % 2 == 0):
even_numbers = even_numbers + 1
else:
odd_numbers = odd_numbers + 1
if (third_number % 2 == 0):
even_numbers = even_numbers + 1
else:
odd_numbers = odd_numbers + 1
# Display result
print("There were {} even and {} odd numbers.".format(even_numbers, odd_numbers)) | Combei/QHO426 | week2/decisions/counter.py | counter.py | py | 978 | python | en | code | 0 | github-code | 36 |
830016519 | #!/usr/bin/env python
import gzip
import sys
import tarfile
import threading
import urllib.request
import zipfile
import lib.download.task as task
import lib.independence.fs as fs
import lib.ui.color as printer
# The greater purpose of (functions in) this file is
# to download a list of DownloadTasks
class Downloader(object):
'''Object to facilitate downloading'''
# Constructor
# tasklist is the list of DownloadTasks which should be downloaded
def __init__(self, tasklist):
self.tasklist = tasklist
# Returns True if the name suggests it is a tar archive, otherwise False
def is_tar(self, name):
return name.endswith('.tar') or name.endswith('tar.xz')\
or name.endswith('tar.gz') or name.endswith('.tgz')
# Returns True if the name suggests it is a zip archive, otherwise False
def is_zip(self, name):
return name.endswith('.zip')
# Returns True if the name suggests it is a gzip archive, otherwise False
def is_gzip(self, name):
return name.endswith('.gz')
# Extract output of downloaded file, if it has a compatible format
# task is the task for which the file is downloaded
def extract(self, task):
target = fs.join(task.directory,task.name)
basicstring = printer.format('extracter', printer.Color.CAN)
extractstring = printer.format('extracting', printer.Color.YEL)
print('[{0}] {1} {2}'.format(basicstring, extractstring, task.name))
if self.is_tar(task.url):
ttar = tarfile.open(target, 'r')
ttar.extractall(path=task.directory)
elif self.is_zip(task.url):
tzip = zipfile.ZipFile(target, 'r')
tzip.extractall(task.directory)
tzip.close()
elif self.is_gzip(task.url):
with gzip.open(target, 'rb') as f_in:
with open(task.directory, 'wb') as f_out:
fs.cp(f_in, f_out)
else:
return
finishedstring = printer.format('extracted', printer.Color.GRN)
print('[{0}] {1} {2}'.format(basicstring, finishedstring, task.name))
fs.rm(fs.join(task.directory, task.name))
dircontents = fs.ls(task.directory)
if len(dircontents) == 1 and fs.isdir(task.directory,dircontents[0]):
subdircontents = fs.ls(task.directory,dircontents[0])
for file in subdircontents:
path = fs.join(task.directory,dircontents[0])
fs.mv(fs.join(path,file), task.directory)
fs.rm(task.directory, dircontents[0], ignore_errors=True)
# Downloads a DownloadTask and prints some user information
# task is the downloadtask which contains the download information
def download(self, task):
basicstring = printer.format('downloader', printer.Color.CAN)
downloadstring = printer.format('downloading', printer.Color.YEL)
print('[{0}] {1} {2}'.format(basicstring, downloadstring, task.name))
u = urllib.request.urlopen(task.url)
with open(fs.join(task.directory,task.name), 'wb') as out_file:
out_file.write(u.read())
finishedstring = printer.format('downloaded', printer.Color.GRN)
print('[{0}] {1} {2}'.format(basicstring, finishedstring, task.name))
# Parallel function, which is called to download all tasks
# task is the downloadtask which should be operated on
def parallel_exec(self, task):
self.download(task)
if self.is_tar(task.url)\
or self.is_zip(task.url)\
or self.is_gzip(task.url):
self.extract(task)
# Main function to call. Each DownloadTask will be performed
def download_all(self):
threads = []
for task in self.tasklist:
threads.append(threading.Thread(target=self.parallel_exec, args=(task,)))
for thread in threads:
thread.start()
for thread in threads:
thread.join() | Sebastiaan-Alvarez-Rodriguez/Meizodon | lib/download/downloader.py | downloader.py | py | 3,963 | python | en | code | 4 | github-code | 36 |
800426611 | import mtcnn
from mtcnn.mtcnn import MTCNN
import cv2
detector = MTCNN() # MTCNN is CNN based algorithm
video = cv2.VideoCapture(0)
video.set(3,2000)
video.set(4,3000) # Same as previous technique
while (True):
ret, frame = video.read()
if ret == True:
location = detector.detect_faces(frame) # dectect faces frame by frame
if len(location) > 0:
for face in location:
x, y, width, height = face['box']
x2, y2 = x + width, y + height
cv2.rectangle(frame, (x, y), (x2, y2), (0, 0, 255), 4) # Bounding box width and height
cv2.imshow("Output",frame) # its will show frame's and update it frame by frame to same output file named as "Output"
if cv2.waitKey(1) & 0xFF == ord(' '): # same as previous
break
else:
break
video.release() # releasing camera port
cv2.destroyAllWindows() # destroying all windows
# Lets run and see , Results
# as you can see, it is giving precise result
# so we will go with MTCNN Algorithm
| Sagar-Khode/Face-Detection | MTCNN.py | MTCNN.py | py | 1,089 | python | en | code | 0 | github-code | 36 |
43763780283 | # -*-coding:utf8-*-
################################################################################
#
#
#
################################################################################
"""
模块用法说明:达人推荐详情页
Authors: Turinblueice
Date: 2016/9/10
"""
from base import base_frame_view
from util import log
from gui_widgets.basic_widgets import frame_layout
from gui_widgets.basic_widgets import image_view
from gui_widgets.basic_widgets import text_view
from gui_widgets.basic_widgets import linear_layout
from gui_widgets.basic_widgets import recycler_view
from gui_widgets.basic_widgets import radio_button
from appium.webdriver import WebElement
from appium.webdriver.common import touch_action
from selenium.webdriver.common.touch_actions import TouchActions
from activities import activities
from appium.webdriver.common.mobileby import MobileBy
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
import time
class TalentRecommendActivity(base_frame_view.BaseFrameView):
"""
Summary:
达人推荐页面
Attributes:
"""
name = '.module.square.activity.EretarActivity' # 裁剪图片activity名称
def __init__(self, parent):
super(TalentRecommendActivity, self).__init__(parent)
# 等待初始化
self.wait_for_element_present(self.base_parent, id='com.jiuyan.infashion:id/login_tv_title')
self._scroll_view = recycler_view.RecyclerView(self.parent, id='com.jiuyan.infashion:id/square_rv_tag')
@property
def talent_recommend(self):
"""
Summary:
达人推荐按钮
"""
id_ = 'com.jiuyan.infashion:id/login_tv_title'
return text_view.TextView(self.parent, id=id_)
@property
def talent_apply(self):
"""
Summary:
申请达人按钮
"""
id_ = 'com.jiuyan.infashion:id/login_tv_title_right'
return text_view.TextView(self.parent, id=id_)
@property
def back_button(self):
"""
Summary:
返回按钮
"""
id_ = 'com.jiuyan.infashion:id/login_tv_title_left'
return text_view.TextView(self.parent, id=id_)
@property
def talent_list(self):
"""
Summary:
达人列表
:return:
"""
xpath_ = '//android.support.v7.widget.RecyclerView[@resource-id="com.jiuyan.infashion:id/square_rv_tag"]/' \
'android.widget.LinearLayout'
return TalentContainerList(self.base_parent, xpath=xpath_).item_list
@property
def category_list(self):
"""
Summary:
种类列表
:return:
"""
xpath_ = '//android.widget.ListView[@resource-id="com.jiuyan.infashion:id/square_rv_menu"]/' \
'android.widget.LinearLayout'
return TalentCategoryList(self.base_parent, xpath=xpath_).item_list
# **************************操作方法*****************************
def wait_for_talent(self, timeout=10):
"""
Summary:
显示等待达人加载完毕
:return:
"""
xpath_ = '//android.support.v7.widget.RecyclerView[@resource-id="com.jiuyan.infashion:id/square_rv_tag"]/' \
'android.widget.LinearLayout'
if self.wait_for_element_present(self.base_parent, timeout=timeout, xpath=xpath_):
log.logger.info("达人列表已记载")
return True
log.logger.error("达人列表加载失败")
return False
def tap_back_button(self):
"""
Summary:
点击返回按钮
"""
log.logger.info("开始点击返回按钮")
self.back_button.tap()
log.logger.info("完成返回按钮点击")
if self.wait_activity(activities.ActivityNames.SQUARE_CATEGORY, 10):
log.logger.info("成功返回到话题分类页面")
return True
log.logger.error("返回失败")
return False
class TalentContainer(base_frame_view.BaseFrameView):
"""
Summary:
达人推荐
"""
def __init__(self, parent, item=None, index=None, **kwargs):
super(TalentContainer, self).__init__(parent)
self._layout_view = item if isinstance(item, WebElement) else self.find_element(**kwargs)
self._index = index
self._xpath = '//android.support.v7.widget.RecyclerView[@resource-id="com.jiuyan.infashion:id/square_rv_tag"]/' \
'android.widget.LinearLayout[{}]'.format(self._index+1)
@property
def talent_name(self):
"""
Summary:
达人姓名
"""
id_ = 'com.jiuyan.infashion:id/square_tv_name'
return text_view.TextView(self._layout_view, id=id_).text
@property
def talent_avatar(self):
"""
Summary:
达人头像
"""
id_ = 'com.jiuyan.infashion:id/transition_avatar_id'
return image_view.ImageView(self._layout_view, id=id_)
@property
def follow_button(self):
"""
Summary:
关注按钮
"""
id_ = 'com.jiuyan.infashion:id/square_tv_attention'
return radio_button.RadioButton(self._layout_view, id=id_)
@property
def image_list(self):
"""
Summary:
图片列表
"""
return image_view.ImageViewList(self._layout_view, id='com.jiuyan.infashion:id/login_iv_pic').image_list
# ********************操作方法*************************
def tap_avatar(self):
"""
Summary:
点击头像
"""
curr_name = self.talent_name
log.logger.info("开始点击\"{}\"的头像".format(curr_name))
self.talent_avatar.tap()
log.logger.info("点击完毕")
if self.base_parent.wait_activity(activities.ActivityNames.DIARY_INFO, 10):
log.logger.info("成功进入好友in记页面")
return True
log.logger.error("进入好友in记页面失败")
return False
def tap_image(self, index):
"""
点击图片
Args:
index:
图片序号
Returns:
"""
log.logger.info("点击第{}张".format(index+1))
self.image_list[index].tap()
log.logger.info("完成点击")
if self.base_parent.wait_activity(activities.ActivityNames.FRIEND_PHOTO_DETAIL, 10):
log.logger.info("成功进入好友照片页面")
return True
log.logger.error("进入好友照片页面失败")
return False
def tap_follow_button(self):
"""
Summary:
点击关注按钮
"""
log.logger.info("开始点击关注")
self.follow_button.tap()
time.sleep(2)
log.logger.info("点击完毕")
try:
WebDriverWait(self.base_parent, 10).until(
EC.element_located_to_be_selected(
(MobileBy.XPATH, self._xpath+'/android.widget.RelativeLayout[1]/android.widget.RadioButton[1]')
)
)
return True
except:
return False
class TalentContainerList(base_frame_view.BaseFrameView):
def __init__(self, parent, **kwargs):
super(TalentContainerList, self).__init__(parent)
self.__list = self.find_elements(**kwargs)
@property
def item_list(self):
if self.__list:
return [TalentContainer(item.parent, item, index) for index, item in enumerate(self.__list)]
return None
class TalentCategory(base_frame_view.BaseFrameView):
"""
Summary:
达人分类
"""
def __init__(self, parent, item=None, **kwargs):
super(TalentCategory, self).__init__(parent)
self._layout_view = item if isinstance(item, WebElement) else self.find_element(**kwargs)
@property
def title(self):
"""
Summary:
达人分类的名称
"""
id_ = 'com.jiuyan.infashion:id/square_tv_tag_menu'
return text_view.TextView(self._layout_view, id=id_).text
# ********************操作方法*************************
def tap(self):
"""
Summary:
点击分类
"""
title_ = self.title
log.logger.info("开始点击\"{}\"".format(title_))
self._layout_view.click()
xpath_ = '//android.support.v7.widget.RecyclerView[@resource-id="com.jiuyan.infashion:id/square_rv_tag"]/' \
'android.widget.LinearLayout'
if self.wait_for_element_present(self.base_parent, xpath=xpath_):
# 点击左侧不同的达人类别后,等待右侧达人初始化加载
log.logger.info("\"{}\"的达人已加载成功".format(title_))
return True
log.logger.error("达人列表初始化失败")
return False
class TalentCategoryList(base_frame_view.BaseFrameView):
def __init__(self, parent, **kwargs):
super(TalentCategoryList, self).__init__(parent)
self.__list = self.find_elements(**kwargs)
@property
def item_list(self):
if self.__list:
return [TalentCategory(item.parent, item) for item in self.__list]
return None
| turinblueice/androidUIAutoTest | activities/discover_details_activities/talent_recommend_activity.py | talent_recommend_activity.py | py | 9,563 | python | en | code | 5 | github-code | 36 |
23497344492 | from io import BytesIO
from PIL import Image
from uuid import uuid4
from django.core.files import File
JPEG_IMAGE_QUALITY = 100
def crop_image(image):
im = Image.open(image)
(height, width) = (im.height, im.width)
shortest_side = min(height, width)
dimensions = (0, 0, shortest_side, shortest_side)
image_name = _generate_random_file_name()
im = im.convert("RGB")
im = im.crop(dimensions)
_bytes = BytesIO()
im.save(_bytes, "JPEG", quality=JPEG_IMAGE_QUALITY)
return File(_bytes, image_name)
def _generate_random_file_name():
return str(uuid4()) + ".jpg"
| adrianeriksen/photographic | photographic/photos/utils.py | utils.py | py | 610 | python | en | code | 0 | github-code | 36 |
6800272551 | import yaml
from populate.populator.common.errors import ConfigurationError
from .projects_manager import ProjectsManager
def project_constructor(loader, node):
if isinstance(node, yaml.ScalarNode):
item = loader.construct_scalar(node)
if not isinstance(item, str) or not item:
raise ConfigurationError(
'value {} cannot be interpreted as project'.format(item))
elif isinstance(node, yaml.MappingNode):
item = loader.construct_mapping(node)
if not isinstance(item, dict) or not item:
raise ConfigurationError(
'value {} cannot be interpreted as project'.format(item))
return ProjectsManager().get_object(item)
| tomasgarzon/exo-services | service-exo-projects/populator/projects/project_loader.py | project_loader.py | py | 715 | python | en | code | 0 | github-code | 36 |
10496867860 | import tensorflow as tf
from model.model_builder import ModelBuilder
from utils.model_post_processing import merge_post_process
from tensorflow.keras.models import Model
from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2
from utils.priors import *
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--backbone_name", type=str, help="Pretrained backbone name\
| model_name : description | \
[ 1. mobilenetv2 : MobileNetV2 ]\
[ 2. mobilenetv3s : MobileNetV3-Small ] \
[ 3. mobilenetv3l : MobileNetV3-Large ] \
[ 4. efficient_lite_v0 : EfficientNet-Lite-B0 ]\
[ 5. efficientnetv2b0 : EfficientNet-V2-B0 ]\
[ 6. efficientnetv2b3 : EfficientNet-V2-B3 ]",
default='efficient_lite_v0')
parser.add_argument("--checkpoint_dir", type=str, help="Set the model storage directory",
default='./checkpoints/')
parser.add_argument("--model_weights", type=str, help="Saved model weights directory",
default='0906/_0906_efficient_lite_v0_display-detection_e200_lr0.001_b32_without-norm-small_prior-adam_best_loss.h5')
parser.add_argument("--num_classes", type=int, help="Set num classes for model and post-processing",
default=4)
parser.add_argument("--image_size", type=tuple, help="Set image size for priors and post-processing",
default=(300, 300))
parser.add_argument("--gpu_num", type=int, help="Set GPU number to use(When without distribute training)",
default=0)
parser.add_argument("--frozen_dir", type=str, help="Path to save frozen graph transformation result",
default='./checkpoints/converted_frozen_graph/')
parser.add_argument("--frozen_name", type=str, help="Frozen graph file name to save",
default='frozen_graph')
parser.add_argument("--include_postprocess", help="Frozen graph file name to save",
action='store_true')
parser.add_argument("--load_keras_model", help="Load model from Saved format(.pb) or Keras(.h5)",
action='store_true')
args = parser.parse_args()
if __name__ == '__main__':
tf.config.set_soft_device_placement(True)
tf.config.run_functions_eagerly(True)
gpu_number = '/device:GPU:' + str(args.gpu_num)
with tf.device(gpu_number):
spec_list = convert_spec_list()
priors = create_priors_boxes(specs=spec_list, image_size=args.image_size[0], clamp=True)
target_transform = MatchingPriors(priors, center_variance, size_variance, iou_threshold)
if args.load_keras_model:
model = tf.keras.models.load_model('./checkpoints/pruning', compile=False)
else:
model = ModelBuilder(image_size=args.image_size,
num_classes=args.num_classes,
include_preprocessing=args.include_postprocess).build_model(args.backbone_name)
model.load_weights(args.checkpoint_dir + args.model_weights, by_name=True)
model.summary()
if args.include_postprocess:
detection_output = merge_post_process(detections=model.output,
target_transform=target_transform,
confidence_threshold=0.5,
classes=args.num_classes)
model = Model(inputs=model.input, outputs=detection_output)
# model.summary()
#path of the directory where you want to save your model
frozen_out_path = args.frozen_dir
# name of the .pb file
frozen_graph_filename = args.frozen_name
# Convert Keras model to ConcreteFunction
full_model = tf.function(lambda x: model(x))
full_model = full_model.get_concrete_function(
tf.TensorSpec(model.inputs[0].shape, model.inputs[0].dtype))
# Get frozen ConcreteFunction
frozen_func = convert_variables_to_constants_v2(full_model)
frozen_func.graph.as_graph_def()
layers = [op.name for op in frozen_func.graph.get_operations()]
print("Frozen model layers: ")
# for layer in layers:
# print(layer)
print("Frozen model inputs: {0}".format(frozen_func.inputs))
print("Frozen model outputs: {0}".format(frozen_func.outputs))
# Save frozen graph to disk
tf.io.write_graph(graph_or_graph_def=frozen_func.graph,
logdir=frozen_out_path,
name=f"{frozen_graph_filename}.pb",
as_text=False)
tf.io.write_graph(graph_or_graph_def=frozen_func.graph,
logdir=frozen_out_path,
name=f"{frozen_graph_filename}.pbtxt",
as_text=True) | chansoopark98/Tensorflow-Keras-Object-Detection | convert_frozen_graph.py | convert_frozen_graph.py | py | 5,426 | python | en | code | 6 | github-code | 36 |
10178056973 | import bitstring
import collections
import math
from array import array
import os
""" HGR = 280 * 192
C64 = 40*24 chars => 320 * 192 ; display 3min 20 (2000 images) sec instead of 3min 40 (2200)
Video = 192 * 160 (24*20)
"""
if os.name == 'nt':
IMG_PREFIX = r'c:/PORT-STC/PRIVATE/tmp'
FFMPEG = r'c:\PORT-STC\opt\ffmpeg-20181221-53d3a1c-win64-static\bin\ffmpeg' # -i bad_apple_original.mp4 -an -vf fps=10,scale=36:46 c:\port-stc\private\tmp\bad_apple%05d.png'
else:
IMG_PREFIX = '/tmp'
FFMPEG = 'ffmpeg'
DISK_SIZE = 143360
class SpecialTiles:
def __init__( self, black, white, transparent):
self.black = black
self.white = white
self.transparent = transparent
# self.others = [x for x in range(self.transparent)]
# self.others.remove( self.white)
# self.others.remove( self.black)
def all(self):
return (self.black, self.white, self.transparent)
class Stripe:
def __init__( self, data, special_tiles):
self.data = data
self._hash = hash(array('B',data).tobytes())
self.cycles = None # Number of cycles needed to decompress the stripe
self.stripe_id = None
self.compressed = self._compress_stripe2( self.data, special_tiles)
self.stripe_id2 = None
decomp = self._decompress_stripe( self.compressed, special_tiles)
assert data == decomp, "{} != {}, compressed={}".format( hex_byte(data), decomp, hex_byte(self.compressed))
#self.compressed = self._compress_stripe2( self.data, transparent_tile)
self.label = None
self.frequency = 0
def __str__(self):
return "Stripe freq:{}x {} [hash:{}]".format( self.frequency, ",".join( ['$'+format(b,'02X') for b in self.data] ) ,self._hash)
def __hash__(self):
return self._hash
def has_no_count(self):
return self.compressed[0] & 128 == 128
def _decompress_stripe( self, data, special_tiles):
self.cycles = 80
r = []
if data[0] & 128 == 128:
self.cycles += len(data) * (63+26)
#print("decompress raw bytes : {}".format(data))
r.append( data[0] & 127 )
if data[1] == 255:
return r
i = 1
while True:
r.append( data[i])
if data[i] & 128:
r[-1] = r[-1] & 127
return r
i += 1
return r
else:
self.cycles += 111
#print("decompress byte run")
cmd = data[0] >> 5
cnt = (data[0] & 31) + 1
if cmd == 0:
color = special_tiles.white
self.cycles += cnt * 19
elif cmd == 1:
color = special_tiles.black
self.cycles += cnt * 19
elif cmd == 2:
color = special_tiles.transparent
self.cycles += cnt * 19
r = []
r.extend( [color] * cnt)
r.append( data[1])
return r
def _compress_stripe2( self, values, special_tiles):
# Some calculations :
# There are 2200 frames
# There are 3760 different stripes.
# If want to know how many stripes there are in a frame at run time, I can :
# 1/ Have a map frame -> nb stripes; which optimistically is 2200 bytes
# 2/ I can count the length of the stripes until a I reach a frame, but this is colstly => I need to be able to tell the size
# of a stripe => I add a length byte => 3288 stripes need one => cost 3288 bytes
# 3/ I can add a special strip to mark the end of a frame, but if Huffman compression
# doesn't work well enough (less than 8 bits for that stripe), it might be bigger than 2200.
if len(values) <= 2 or values[0] not in special_tiles.all():
r = [v for v in values ]
r[0] = r[0] | 128
# There are two ways of marking the end of a stream of tiles (see below).
# Optimizing it this way let me spare +/- 5 kb out of 29 kb in the
# stripe dictionary.
if len(values) == 1:
r.append(255)
else:
r[-1] = r[-1] | 128 # BAsically : data byte | MARK, data_byte, ..., data_byte | MARK => I spare a counter byte.
return r
else:
# We always encode : a repetition of one tile followed by a single tile.
# I cannot use the topmost bit because it's used for stream of stripes
# (cf above)
if values[0] == special_tiles.white:
cmd = 0b000 << 5 # 0x00 = 0
elif values[0] == special_tiles.black:
cmd = 0b001 << 5 # 100000 = 0x20
elif values[0] == special_tiles.transparent:
cmd = 0b010 << 5 # 1000000 = 0x40
assert cmd & 128 == 0
# Run over black or white bytes
i = 0
while i+1 < len(values) and values[i+1] == values[0]:
i += 1
assert len(values) > 2, "shorter run should be compressed differently"
assert values[i] == values[0], "this is not a run of bytes that are all the same"
assert i in (len(values)-1, len(values)-2), "{} not in {}".format( i, (len(values)-2, len(values)-1))
assert len(values) - 1 - 1 < 2 ** 5
# -1 because the last tile is put apart, -1 because the cnt of repetition is never 0 (so we save 1 increment)
repeat_byte = cmd | (len(values) - 1 - 1)
additional_tile_byte = values[ len(values) - 1]
return [ repeat_byte, additional_tile_byte]
def ffmpeg( params):
print(params)
os.system("{} {}".format( FFMPEG, params))
def make_delta_frames_stream( frames, special_tiles, bytes_per_frame):
assert len( frames) % bytes_per_frame == 0
stats_change = []
delta_frames_stream = []
delta_frames_stream.extend( frames[0:bytes_per_frame] )
for i in range(bytes_per_frame, len( frames), bytes_per_frame):
old_f = frames[i-bytes_per_frame:i]
f = frames[i:i+bytes_per_frame]
delta_frame = []
for j in range( len( f)):
if f[j] == old_f[j]:
delta_frame.append( special_tiles.transparent)
else:
delta_frame.append( f[j])
# Compute some stats
unchanged = 0
for i in delta_frame:
if i == special_tiles.transparent:
unchanged += 1
stats_change.append(100.0 * unchanged/len(delta_frame))
delta_frames_stream.extend( delta_frame)
avg = sum(stats_change)/len(stats_change)
stddev = sum( [ math.fabs( i - avg) for i in stats_change ])/len(stats_change)
print( "unchanged avg:{}, stddev:{}".format( avg, stddev))
return delta_frames_stream
def peek( data, i, scan_value, strip_max_len):
cnt = 0
while i+cnt < len(data) and data[i+cnt] == scan_value and (cnt < strip_max_len):
cnt += 1
return cnt
def pack_line( data, i, predicate, max_len):
cnt = 0
stripe = []
while i < len(data) and predicate(data[i]) and (cnt < max_len): # and (data[i] in scan_values)
stripe.append(data[i])
i += 1
cnt += 1
return stripe, i
def pack_line_one_pixel_stop( data, i, scan_values, stop_values, max_i, strip_max_len):
""" Somehow, picking an additional, different, pixel after a long run
is really efficient (ie a 8% increase in compression).
Picking 1 more is 10% more efficient.
Picking 2 more is 30% less efficient.
Picking n more is totally not efficient (like 100% less efficient)
"""
cnt = 0
stripe = []
while i < len(data) and data[i] == scan_values and (cnt < strip_max_len) and i < max_i: # and (data[i] in scan_values)
stripe.append(data[i])
i += 1
cnt += 1
stop_value_cnt = 0
while i < len(data) and (cnt < strip_max_len) and i < max_i: # and (data[i] in scan_values)
stripe.append(data[i])
i += 1
cnt += 1
stop_value_cnt += 1
if stop_value_cnt == 1:
break
return stripe, i
def super_pack_line( data, i, scan_value, max_stripe_length):
shorter_len = max_stripe_length // 2
shorter_len = 4
bigger_len = max_stripe_length*4
if bigger_len > 31:
bigger_len = 31
# 31 : gives a few bytes better compression than 32.
assert shorter_len < bigger_len
cnt = peek( data, i, scan_value, bigger_len)
#print(cnt)
if cnt > shorter_len:
# Simple tile repetition
stripe, i = pack_line( data, i, lambda d:d == scan_value, bigger_len)
else:
others = set( range(256))
others.remove( scan_value)
stripe, i = pack_line_one_pixel_stop( data, i, scan_value, others, i+shorter_len, max_stripe_length )
#print("{} {}".format( scan_value, len(stripe)))
return stripe, i
def make_stripes(data_stream, special_tiles, bytes_per_frame, max_stripe_length):
assert len(data_stream) % bytes_per_frame == 0
all_stripes_codes = []
# others = set( range(256))
# others.remove( special_tiles.white)
# others.remove( special_tiles.black)
# others.remove( special_tiles.transparent)
for ndx in range( 0, len(data_stream), bytes_per_frame):
#print(ndx)
data = data_stream[ ndx:ndx+bytes_per_frame]
i = 0
while i < len(data):
if data[i] == special_tiles.transparent:
#print("transparent")
stripe, i = super_pack_line( data, i, special_tiles.transparent, max_stripe_length)
#stripe, i = pack_line( data, i, [transparent_tile])
elif data[i] == special_tiles.white:
#print("white")
#stripe, i = pack_line( data, i, OTHERS + [WHITE], BLACK)
#stripe, i = pack_line( data, i, WHITE, [])
#stripe, i = pack_line_one_pixel_stop( data, i, WHITE, OTHERS, i+MAX_STRIPE_LENGTH )
stripe, i = super_pack_line( data, i, special_tiles.white, max_stripe_length)
elif data[i] == special_tiles.black:
#print("black")
#stripe, i = pack_line( data, i, OTHERS + [BLACK], WHITE)
#stripe, i = pack_line( data, i, BLACK, [])
#stripe, i = pack_line_one_pixel_stop( data, i, BLACK, OTHERS, i+MAX_STRIPE_LENGTH)
stripe, i = super_pack_line( data, i, special_tiles.black, max_stripe_length)
else:
#stripe, i = pack_line( data, i, OTHERS, [BLACK[0], WHITE[0]])
stripe, i = pack_line( data, i, lambda d : d not in special_tiles.all(), 4)
all_stripes_codes.append( Stripe(stripe, special_tiles))
return all_stripes_codes
def simplify_stripes( all_stripes):
unique_stripes = dict()
stripe_id = 1
for s in all_stripes:
h = hash(s)
if h not in unique_stripes:
unique_stripes[ h] = s
s.stripe_id = stripe_id
stripe_id += 1
for i in range( len( all_stripes)):
all_stripes[i] = unique_stripes[ hash(all_stripes[i])]
return unique_stripes
def compute_stripes_frequencies( all_stripes):
ndx = 0
for s, freq in collections.Counter( all_stripes ).items():
s.frequency = freq
s.label = ndx
ndx += 1
def hex_byte(b_or_list, prefix="!byte "):
if type(b_or_list) in (list, bytes):
return prefix + ",".join( ['$'+format(b,'02X') for b in b_or_list] )
else:
return '$'+format(b_or_list,'02X')
def array_to_asm( fo, a, line_prefix, label = ""):
if type(a[0]) == str:
fmt = "{}"
elif line_prefix == '!word':
fmt = "${:04x}"
elif line_prefix == '!byte':
fmt = "${:02x}"
else:
raise Exception("Unknown format {}".format( line_prefix))
if label:
label = "\t{}:".format(label)
else:
label = ""
fo.write("{}; {} values\n".format(label, len(a)))
for i in range( 0, len( a), 10):
end = min( i + 10, len( a))
fo.write("\t{} {}\n".format( line_prefix, ", ".join( [ fmt.format(x) for x in a[i:end]])))
def stats_unique_stipes( unique_stripes):
print("{} unique stripes, stored with {} bytes, representing {} stripes".format(
len(unique_stripes),
sum( [ len(s.compressed) for s in unique_stripes]),
sum( [ s.frequency for s in unique_stripes])))
f1 = [s for s in filter( lambda s:s.frequency == 1, sorted( unique_stripes, key=lambda s:s.frequency)) ]
f1_size = sum( [ len(s.data) for s in f1 ])
print( "{} frequency-1 stripes, totalling {} bytes. Other stripes + index table = {} bytes => total {} bytes".format(
len(f1), f1_size,
sum( [ len(s.compressed) for s in unique_stripes]) - f1_size + 2*(len(unique_stripes) - len(f1)),
f1_size + sum( [ len(s.compressed) for s in unique_stripes]) - f1_size + 2*(len(unique_stripes) - len(f1))))
f2 = [s for s in filter( lambda s:s.frequency == 2, sorted( unique_stripes, key=lambda s:s.frequency)) ]
print( "{} frequency-2 stripes, totalling {} bytes".format( len(f2), sum( [ len(s.data) for s in f2 ])))
# with open("stats.csv","w") as fo:
# for s in sorted( unique_stripes, key=lambda s:s.frequency * 100000 + len(s.compressed)):
# fo.write("{};{};{};{};\"{}\"\n".format( s.frequency, len(s.compressed), len(s.data), s.has_no_count(), (hex_byte(s.data))))
def simple_huffman( unique_stripes, all_stripes):
sid = 1
# Sort stripes, most frequent first
for s in sorted( unique_stripes.values(), key=lambda s:s.frequency, reverse=True):
s.stripe_id2 = sid
sid += 1
# for s in all_stripes[0:100]:
# print("({},{})".format( s.stripe_id, s.stripe_id2 ))
stream = bitstring.BitArray()
d1 = (2 ** 3)
d2 = (2 ** 6) + d1
d3 = (2 ** 9) + d2
d1_count = d2_count = d3_count = d4_count = 0
d1_len = d2_len = d3_len = d4_len = 0
ndx = 0
warn = False
for s in all_stripes:
sid = s.stripe_id2 - 1
if sid < d1:
# 0xxxb => 8 values
bits = bitstring.BitArray(length=4, uint=sid)
d1_count += 1
d1_len += len(s.compressed)
elif d1 <= sid < d2:
# 10yy yyyy => 64 values
bits = bitstring.BitArray(length=8, uint=0b10000000 + sid - d1)
d2_count += 1
d2_len += len(s.compressed)
elif d2 <= sid < d3:
# 110z zzzz zzzz 12 bits, 9 significant => 512 values
bits = bitstring.BitArray(length=12, uint=0b110000000000 + sid - d2)
d3_count += 1
d3_len += len(s.compressed)
elif d3 <= sid < 2 ** 13:
# 111z zzzz zzzz zzzz 16 bits, 13 significant => 8192 values
bits = bitstring.BitArray(length=16, uint=0b1110000000000000 + sid - d3)
d4_count += 1
d4_len += len(s.compressed)
else:
# Error !
warn = True
bits = bitstring.BitArray(length=16, uint=0b1111111111111111)
# if ndx < 300:
# print("s# {} (b: {}) -> {} / {}".format( hex(ndx), len(stream.tobytes()), hex(bits.uint), sid))
stream.append( bits)
ndx += 1
if warn:
print("Too many stripes for the compressor ! (8192) {}".format( len(unique_stripes)))
print("{} * 4 bits for {} bytes, {} * 8 bits for {} bytes, {} * 12 bits for {} bytes, {} * 16 bits for {} bytes".format(d1_count,d1_len,d2_count,d2_len,d3_count,d3_len,d4_count,d4_len))
b = stream.tobytes()
print("Bit stream simple huffman : {} stripes, {} bits, {} bytes".format( len( all_stripes), len( stream), len(b)))
with open("cstripes.data","bw") as fo:
fo.write( b)
# # Allow some wrapping so that the ASM code is simpler
# extra_bytes = 3
# too_much = len(b) - DISK_SIZE
# MAX = 4096
# if too_much <= 0:
# too_much = MAX
# if too_much > MAX:
# too_much = MAX
# with open("compressed.a","w") as fo:
# array_to_asm( fo, b[0:too_much + extra_bytes], '!byte')
# with open("cstripes.dsk","bw") as fo:
# fo.write( disk_2_dos( b[too_much:]))
print("Some stripes:")
for i in range(20):
print( '{:04} '.format(i*16) + ' '.join([ "${:04x}".format(s.stripe_id2 - 1) for s in all_stripes[i*16:(i+1)*16]]))
return
# Test decompression
#print( hex_word([s.stripe_id2 for s in all_stripes[0:500]]))
#print( hex_byte( stream.tobytes()[0:1000]))
decomp_stream = []
max_l = len( stream)
ndx = 0
while ndx < max_l:
half_byte = stream[ndx:ndx+4].uint
if half_byte & 0b1000 == 0:
s = half_byte
elif half_byte & 0b1100 == 0b1000:
s = (half_byte & 0b0011)
ndx += 4
s = (s << 4) + stream[ndx:ndx+4].uint
s += d1
elif half_byte & 0b1110 == 0b1100:
s = (half_byte & 0b0001)
#print( hex(s))
ndx += 4
s = (s << 4) + stream[ndx:ndx+4].uint
#print( hex(s))
ndx += 4
s = (s << 4) + stream[ndx:ndx+4].uint
#print( hex(s))
s += d2
#print( hex(d2))
#print( hex(s))
elif half_byte & 0b1110 == 0b1110:
s = (half_byte & 0b0001)
ndx += 4
s = (s << 4) + stream[ndx:ndx+4].uint
ndx += 4
s = (s << 4) + stream[ndx:ndx+4].uint
ndx += 4
s = (s << 4) + stream[ndx:ndx+4].uint
s += d3
decomp_stream.append(s)
ndx += 4
a = [s.stripe_id2 for s in all_stripes]
b = decomp_stream
for i in range( len(a)):
if a[i] != b[i]:
print(i)
def unique_stripes_to_asm( fo, unique_stripes):
def stripe_id(stripe):
return stripe.stripe_id2
sorted_stripes = sorted( unique_stripes.values(), key=stripe_id)
fo.write('\n')
for s in sorted_stripes:
fo.write("stripe{}\t{}\t; [${:X}] {}\n".format( stripe_id(s), hex_byte(s.compressed), stripe_id(s) - 1, hex_byte(s.data, '')))
fo.write('stripes_indices:\n')
array_to_asm( fo, ["stripe{}".format( stripe_id(s)) for s in sorted_stripes], "!word")
def stripes_to_disk( stripes):
disk = bytearray()
for s in stripes[0:min( (len(stripes) // 2) - 1, (DISK_SIZE//2) - 1)]:
sid = (s.stripe_id2 - 1) * 2
assert sid < 65536
disk.append( sid & 0xFF)
disk.append( sid >> 8)
disk.append( 0xFF)
disk.append( 0xFF)
#disk.extend( bytearray( 143360 - len(disk)))
with open("stripes.dsk","bw") as fo:
fo.write( disk_2_dos( disk))
def disk_2_dos( disk):
disk = bytearray( disk)
dos_sector= [0x0, 0xd, 0xb, 0x9, 0x7, 0x5, 0x3, 0x1,
0xe, 0xc, 0xa, 0x8, 0x6, 0x4, 0x2, 0xf]
prodos_sector = [0x0, 0x8, 0x1, 0x9, 0x2, 0xa, 0x3, 0xb,
0x4, 0xc, 0x5, 0xd, 0x6, 0xe, 0x7, 0xf]
# Dos order : https://en.wikipedia.org/wiki/Apple_DOS
dos_sector= [0x0, 0x7, 0xe, 0x6, 0xd, 0x5, 0xc, 0x4,
0xb, 0x3, 0xa, 0x2, 0x9, 0x1, 0x8, 0xf]
if len(disk) > DISK_SIZE:
print("Disk image too big by {} bytes, truncating...".format(len(disk) - DISK_SIZE))
disk = disk[0:DISK_SIZE]
elif len(disk) < DISK_SIZE:
print("Disk image too small ({}), extending to disk size...".format(len(disk)))
disk.extend( bytearray( DISK_SIZE - len(disk)))
else:
print("disk_2_dos : putting {} bytes on a disk of {}".format(len(disk), DISK_SIZE))
# dos_sector = list( range( 16))
disk_dos = bytearray( DISK_SIZE)
for track in range(35):
for sector in range(16):
track_offset = track * 16 * 256
dos_ofs = track_offset + dos_sector[sector]*256
dsk_ofs = track_offset + sector*256
disk_dos[ dos_ofs:dos_ofs+256] = disk[ dsk_ofs:dsk_ofs+256] # [sector for i in range(256)]
return disk_dos
| wiz21b/badapple | utils.py | utils.py | py | 20,203 | python | en | code | 13 | github-code | 36 |
22918946072 | import unittest
import os
import lsst.utils.tests
import pandas as pd
import numpy as np
import asyncio
import matplotlib.pyplot as plt
from astropy.time import TimeDelta
from lsst.utils import getPackageDir
from lsst.summit.utils.enums import PowerState
from lsst.summit.utils.efdUtils import makeEfdClient, getDayObsStartTime, calcNextDay
from lsst.summit.utils.tmaUtils import (
getSlewsFromEventList,
getTracksFromEventList,
getAzimuthElevationDataForEvent,
plotEvent,
getCommandsDuringEvent,
TMAStateMachine,
TMAEvent,
TMAEventMaker,
TMAState,
AxisMotionState,
getAxisAndType,
_initializeTma,
)
from utils import getVcr
__all__ = [
'writeNewTmaEventTestTruthValues',
]
vcr = getVcr()
def getTmaEventTestTruthValues():
"""Get the current truth values for the TMA event test cases.
Returns
-------
seqNums : `np.array` of `int`
The sequence numbers of the events.
startRows : `np.array` of `int`
The _startRow numbers of the events.
endRows : `np.array` of `int`
The _endRow numbers of the events.
types : `np.array` of `str`
The event types, as a string, i.e. the ``TMAEvent.name`` of the event's
``event.type``.
endReasons : `np.array` of `str`
The event end reasons, as a string, i.e. the ``TMAEvent.name`` of the
event's ``event.endReason``.
"""
packageDir = getPackageDir("summit_utils")
dataFilename = os.path.join(packageDir, "tests", "data", "tmaEventData.txt")
seqNums, startRows, endRows, types, endReasons = np.genfromtxt(dataFilename,
delimiter=',',
dtype=None,
names=True,
encoding='utf-8',
unpack=True
)
return seqNums, startRows, endRows, types, endReasons
def writeNewTmaEventTestTruthValues():
"""This function is used to write out the truth values for the test cases.
If the internal event creation logic changes, these values can change, and
will need to be updated. Run this function, and check the new values into
git.
Note: if you have cause to update values with this function, make sure to
update the version number on the TMAEvent class.
"""
dayObs = 20230531 # obviously must match the day in the test class
eventMaker = TMAEventMaker()
events = eventMaker.getEvents(dayObs)
packageDir = getPackageDir("summit_utils")
dataFilename = os.path.join(packageDir, "tests", "data", "tmaEventData.txt")
columnHeader = "seqNum,startRow,endRow,type,endReason"
with open(dataFilename, 'w') as f:
f.write(columnHeader + '\n')
for event in events:
line = (f"{event.seqNum},{event._startRow},{event._endRow},{event.type.name},"
f"{event.endReason.name}")
f.write(line + '\n')
def makeValid(tma):
"""Helper function to turn a TMA into a valid state.
"""
for name, value in tma._parts.items():
if value == tma._UNINITIALIZED_VALUE:
tma._parts[name] = 1
def _turnOn(tma):
"""Helper function to turn TMA axes on for testing.
Do not call directly in normal usage or code, as this just arbitrarily
sets values to turn the axes on.
Parameters
----------
tma : `lsst.summit.utils.tmaUtils.TMAStateMachine`
The TMA state machine model to initialize.
"""
tma._parts['azimuthSystemState'] = PowerState.ON
tma._parts['elevationSystemState'] = PowerState.ON
class TmaUtilsTestCase(lsst.utils.tests.TestCase):
def test_tmaInit(self):
tma = TMAStateMachine()
self.assertFalse(tma._isValid)
# setting one axis should not make things valid
tma._parts['azimuthMotionState'] = 1
self.assertFalse(tma._isValid)
# setting all the other components should make things valid
tma._parts['azimuthInPosition'] = 1
tma._parts['azimuthSystemState'] = 1
tma._parts['elevationInPosition'] = 1
tma._parts['elevationMotionState'] = 1
tma._parts['elevationSystemState'] = 1
self.assertTrue(tma._isValid)
def test_tmaReferences(self):
"""Check the linkage between the component lists and the _parts dict.
"""
tma = TMAStateMachine()
# setting one axis should not make things valid
self.assertEqual(tma._parts['azimuthMotionState'], tma._UNINITIALIZED_VALUE)
self.assertEqual(tma._parts['elevationMotionState'], tma._UNINITIALIZED_VALUE)
tma.motion[0] = AxisMotionState.TRACKING # set azimuth to 0
tma.motion[1] = AxisMotionState.TRACKING # set azimuth to 0
self.assertEqual(tma._parts['azimuthMotionState'], AxisMotionState.TRACKING)
self.assertEqual(tma._parts['elevationMotionState'], AxisMotionState.TRACKING)
def test_getAxisAndType(self):
# check both the long and short form names work
for s in ['azimuthMotionState', 'lsst.sal.MTMount.logevent_azimuthMotionState']:
self.assertEqual(getAxisAndType(s), ('azimuth', 'MotionState'))
# check in position, and use elevation instead of azimuth to test that
for s in ['elevationInPosition', 'lsst.sal.MTMount.logevent_elevationInPosition']:
self.assertEqual(getAxisAndType(s), ('elevation', 'InPosition'))
for s in ['azimuthSystemState', 'lsst.sal.MTMount.logevent_azimuthSystemState']:
self.assertEqual(getAxisAndType(s), ('azimuth', 'SystemState'))
def test_initStateLogic(self):
tma = TMAStateMachine()
self.assertFalse(tma._isValid)
self.assertFalse(tma.isMoving)
self.assertFalse(tma.canMove)
self.assertFalse(tma.isTracking)
self.assertFalse(tma.isSlewing)
self.assertEqual(tma.state, TMAState.UNINITIALIZED)
_initializeTma(tma) # we're valid, but still aren't moving and can't
self.assertTrue(tma._isValid)
self.assertNotEqual(tma.state, TMAState.UNINITIALIZED)
self.assertTrue(tma.canMove)
self.assertTrue(tma.isNotMoving)
self.assertFalse(tma.isMoving)
self.assertFalse(tma.isTracking)
self.assertFalse(tma.isSlewing)
_turnOn(tma) # can now move, still valid, but not in motion
self.assertTrue(tma._isValid)
self.assertTrue(tma.canMove)
self.assertTrue(tma.isNotMoving)
self.assertFalse(tma.isMoving)
self.assertFalse(tma.isTracking)
self.assertFalse(tma.isSlewing)
# consider manipulating the axes by hand here and testing these?
# it's likely not worth it, given how much this exercised elsewhere,
# but these are the only functions not yet being directly tested
# tma._axesInFault()
# tma._axesOff()
# tma._axesOn()
# tma._axesInMotion()
# tma._axesTRACKING()
# tma._axesInPosition()
@vcr.use_cassette()
class TMAEventMakerTestCase(lsst.utils.tests.TestCase):
@classmethod
@vcr.use_cassette()
def setUpClass(cls):
try:
cls.client = makeEfdClient(testing=True)
except RuntimeError:
raise unittest.SkipTest("Could not instantiate an EFD client")
cls.dayObs = 20230531
# get a sample expRecord here to test expRecordToTimespan
cls.tmaEventMaker = TMAEventMaker(cls.client)
cls.events = cls.tmaEventMaker.getEvents(cls.dayObs) # does the fetch
cls.sampleData = cls.tmaEventMaker._data[cls.dayObs] # pull the data from the object and test length
@vcr.use_cassette()
def tearDown(self):
loop = asyncio.get_event_loop()
loop.run_until_complete(self.client.influx_client.close())
@vcr.use_cassette()
def test_events(self):
data = self.sampleData
self.assertIsInstance(data, pd.DataFrame)
self.assertEqual(len(data), 993)
@vcr.use_cassette()
def test_rowDataForValues(self):
rowsFor = set(self.sampleData['rowFor'])
self.assertEqual(len(rowsFor), 6)
# hard coding these ensures that you can't extend the axes/model
# without being explicit about it here.
correct = {'azimuthInPosition',
'azimuthMotionState',
'azimuthSystemState',
'elevationInPosition',
'elevationMotionState',
'elevationSystemState'}
self.assertSetEqual(rowsFor, correct)
@vcr.use_cassette()
def test_monotonicTimeInDataframe(self):
# ensure that each row is later than the previous
times = self.sampleData['private_efdStamp']
self.assertTrue(np.all(np.diff(times) > 0))
@vcr.use_cassette()
def test_monotonicTimeApplicationOfRows(self):
# ensure you can apply rows in the correct order
tma = TMAStateMachine()
row1 = self.sampleData.iloc[0]
row2 = self.sampleData.iloc[1]
# just running this check it is OK
tma.apply(row1)
tma.apply(row2)
# and that if you apply them in reverse order then things will raise
tma = TMAStateMachine()
with self.assertRaises(ValueError):
tma.apply(row2)
tma.apply(row1)
@vcr.use_cassette()
def test_fullDaySequence(self):
# make sure we can apply all the data from the day without falling
# through the logic sieve
for engineering in (True, False):
tma = TMAStateMachine(engineeringMode=engineering)
_initializeTma(tma)
for rowNum, row in self.sampleData.iterrows():
tma.apply(row)
@vcr.use_cassette()
def test_endToEnd(self):
eventMaker = self.tmaEventMaker
events = eventMaker.getEvents(self.dayObs)
self.assertIsInstance(events, list)
self.assertEqual(len(events), 200)
self.assertIsInstance(events[0], TMAEvent)
slews = [e for e in events if e.type == TMAState.SLEWING]
tracks = [e for e in events if e.type == TMAState.TRACKING]
self.assertEqual(len(slews), 157)
self.assertEqual(len(tracks), 43)
seqNums, startRows, endRows, types, endReasons = getTmaEventTestTruthValues()
for eventNum, event in enumerate(events):
self.assertEqual(event.seqNum, seqNums[eventNum])
self.assertEqual(event._startRow, startRows[eventNum])
self.assertEqual(event._endRow, endRows[eventNum])
self.assertEqual(event.type.name, types[eventNum])
self.assertEqual(event.endReason.name, endReasons[eventNum])
@vcr.use_cassette()
def test_noDataBehaviour(self):
eventMaker = self.tmaEventMaker
noDataDayObs = 19500101 # do not use 19700101 - there is data for that day!
with self.assertLogs(level='WARNING') as cm:
correctMsg = f"No EFD data found for dayObs={noDataDayObs}"
events = eventMaker.getEvents(noDataDayObs)
self.assertIsInstance(events, list)
self.assertEqual(len(events), 0)
msg = cm.output[0]
self.assertIn(correctMsg, msg)
@vcr.use_cassette()
def test_helperFunctions(self):
eventMaker = self.tmaEventMaker
events = eventMaker.getEvents(self.dayObs)
slews = [e for e in events if e.type == TMAState.SLEWING]
tracks = [e for e in events if e.type == TMAState.TRACKING]
foundSlews = getSlewsFromEventList(events)
foundTracks = getTracksFromEventList(events)
self.assertEqual(slews, foundSlews)
self.assertEqual(tracks, foundTracks)
@vcr.use_cassette()
def test_getEvent(self):
# test the singular event getter, and what happens if the event doesn't
# exist for the day
eventMaker = self.tmaEventMaker
events = eventMaker.getEvents(self.dayObs)
nEvents = len(events)
event = eventMaker.getEvent(self.dayObs, 0)
self.assertIsInstance(event, TMAEvent)
self.assertEqual(event, events[0])
event = eventMaker.getEvent(self.dayObs, 100)
self.assertIsInstance(event, TMAEvent)
self.assertEqual(event, events[100])
with self.assertLogs(level='WARNING') as cm:
correctMsg = f"Event {nEvents+1} not found for {self.dayObs}"
event = eventMaker.getEvent(self.dayObs, nEvents+1)
msg = cm.output[0]
self.assertIn(correctMsg, msg)
@vcr.use_cassette()
def test_printing(self):
eventMaker = self.tmaEventMaker
events = eventMaker.getEvents(self.dayObs)
# test str(), repr(), and _ipython_display_() for an event
print(str(events[0]))
print(repr(events[0]))
print(events[0]._ipython_display_())
# spot-check both a slow and a track to print
slews = [e for e in events if e.type == TMAState.SLEWING]
tracks = [e for e in events if e.type == TMAState.TRACKING]
eventMaker.printEventDetails(slews[0])
eventMaker.printEventDetails(tracks[0])
eventMaker.printEventDetails(events[-1])
# check the full day trick works
eventMaker.printFullDayStateEvolution(self.dayObs)
tma = TMAStateMachine()
_initializeTma(tma) # the uninitialized state contains wrong types for printing
eventMaker.printTmaDetailedState(tma)
@vcr.use_cassette()
def test_getAxisData(self):
eventMaker = self.tmaEventMaker
events = eventMaker.getEvents(self.dayObs)
azData, elData = getAzimuthElevationDataForEvent(self.client, events[0])
self.assertIsInstance(azData, pd.DataFrame)
self.assertIsInstance(elData, pd.DataFrame)
paddedAzData, paddedElData = getAzimuthElevationDataForEvent(self.client,
events[0],
prePadding=2,
postPadding=1)
self.assertGreater(len(paddedAzData), len(azData))
self.assertGreater(len(paddedElData), len(elData))
# just check this doesn't raise when called, and check we can pass the
# data in
plotEvent(self.client, events[0], azimuthData=azData, elevationData=elData)
@vcr.use_cassette()
def test_plottingAndCommands(self):
eventMaker = self.tmaEventMaker
events = eventMaker.getEvents(self.dayObs)
event = events[28] # this one has commands, and we'll check that later
# check we _can_ plot without a figure, and then stop doing that
plotEvent(self.client, event)
fig = plt.figure(figsize=(10, 8))
# just check this doesn't raise when called
plotEvent(self.client, event, fig=fig)
plt.close(fig)
commandsToPlot = ['raDecTarget', 'moveToTarget', 'startTracking', 'stopTracking']
commands = getCommandsDuringEvent(self.client, event, commandsToPlot, doLog=False)
self.assertTrue(not all([time is None for time in commands.values()])) # at least one command
plotEvent(self.client, event, fig=fig, commands=commands)
del fig
@vcr.use_cassette()
def test_findEvent(self):
eventMaker = self.tmaEventMaker
events = eventMaker.getEvents(self.dayObs)
event = events[28] # this one has a contiguous event before it
time = event.begin
found = eventMaker.findEvent(time)
self.assertEqual(found, event)
dt = TimeDelta(0.01, format='sec')
# must be just inside to get the same event back, because if a moment
# is shared it gives the one which starts with the moment (whilst
# logging info messages about it)
time = event.end - dt
found = eventMaker.findEvent(time)
self.assertEqual(found, event)
# now check that if we're a hair after, we don't get the same event
time = event.end + dt
found = eventMaker.findEvent(time)
self.assertNotEqual(found, event)
# Now check the cases which don't find an event at all. It would be
# nice to check the log messages here, but it seems too fragile to be
# worth it
dt = TimeDelta(1, format='sec')
tooEarlyOnDay = getDayObsStartTime(self.dayObs) + dt # 1 second after start of day
found = eventMaker.findEvent(tooEarlyOnDay)
self.assertIsNone(found)
# 1 second before end of day and this day does not end with an open
# event
tooLateOnDay = getDayObsStartTime(calcNextDay(self.dayObs)) - dt
found = eventMaker.findEvent(tooLateOnDay)
self.assertIsNone(found)
# going just inside the last event of the day should be fine
lastEvent = events[-1]
found = eventMaker.findEvent(lastEvent.end - dt)
self.assertEqual(found, lastEvent)
# going at the very end of the last event of the day should actually
# find nothing, because the last moment of an event isn't actually in
# the event itself, because of how contiguous events are defined to
# behave (being half-open intervals)
found = eventMaker.findEvent(lastEvent.end)
self.assertIsNone(found, lastEvent)
class TestMemory(lsst.utils.tests.MemoryTestCase):
pass
def setup_module(module):
lsst.utils.tests.init()
if __name__ == "__main__":
lsst.utils.tests.init()
unittest.main()
| lsst-sitcom/summit_utils | tests/test_tmaUtils.py | test_tmaUtils.py | py | 17,835 | python | en | code | 4 | github-code | 36 |
25717810431 | from cassiopeia import Queue, Summoner, SummonersRiftArea
def test_summonersrift_map():
summoner = Summoner(name="Kalturi", region="NA")
match = summoner.match_history(queues=[Queue.ranked_solo_fives])[0]
for frame in match.timeline.frames:
for event in frame.events:
if event.type == "CHAMPION_KILL":
SummonersRiftArea.from_position(event.position)
def test_from_match():
summoner = Summoner(name="Kalturi", region="NA")
match_history = summoner.match_history
match = match_history[0]
timeline = match.timeline
for frame in timeline.frames[:-1]:
for pf in frame.participant_frames.values():
print(pf.position.location)
| meraki-analytics/cassiopeia | test/test_map_location.py | test_map_location.py | py | 713 | python | en | code | 522 | github-code | 36 |
36991126999 | """
A simple tool for converting 512x64x512 icemap files into vxl.
NOTE: this does NOT do the icemap footer variant. (Yet.)
GreaseMonkey, 2012 - Public Domain
"""
from __future__ import print_function
import sys, struct
# Backwards compatibility - make new code work on old version, not vice-versa
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY2:
# This script didn't use range() anyway, so no problem overwriting it in Py2
import __builtin__
range = getattr(__builtin__, "xrange")
_ord = ord
else:
_ord = lambda x: x
USAGE_MSG = """
usage:
python2 icemap2vxl.py in.icemap out.vxl
note: icemap file MUST be 512x64x512,
and the type information will be LOST!
"""
if len(sys.argv) <= 2:
print(USAGE_MSG)
exit()
infp = open(sys.argv[1],"rb")
if infp.read(8) != "IceMap\x1A\x01":
raise Exception("not an IceMap v1 file")
while True:
tag = infp.read(7)
taglen = _ord(infp.read(1))
if tag == b" "*7:
break
if taglen == 255:
taglen, = struct.unpack("<I", infp.read(4))
if tag == "MapData":
xlen, ylen, zlen = struct.unpack("<HHH", infp.read(6))
if xlen != 512 or ylen != 64 or zlen != 512:
raise Exception("not a 512x64x512 map")
outfp = open(sys.argv[2],"wb")
for z in range(512):
for x in range(512):
k = True
while k:
cblk = infp.read(4)
outfp.write(cblk)
n = _ord(cblk[0])
if n == 0:
n = _ord(cblk[2])-_ord(cblk[1])+1
k = False
else:
n = n - 1
for i in range(n):
s = infp.read(4)
outfp.write(s[:3]+b"\x7F")
outfp.close()
infp.close()
break
else:
infp.seek(taglen, 1) # SEEK_CUR
| iamgreaser/iceball | tools/icemap2vxl.py | icemap2vxl.py | py | 1,637 | python | en | code | 111 | github-code | 36 |
23716751711 | import json
from PIL import Image
import os
def main(json_file_path):
json_file = open (json_file_path)
json_string = json_file.read()
json_data = json.loads(json_string)
image = json_data[0]
#for image in json_data:
image_file_path = image['image_path']
image_to_crop = Image.open(image_file_path)
cropped_images = []
image_rects = image['rects']
for r in image_rects:
score = r['score']
if score > 0.0:
diff_x = r['x2']-r['x1']
diff_y = r['y2']-r['y1']
if diff_x < diff_y:
wider_x = diff_y-diff_x + 10
wider_y = 10
else:
wider_y = diff_x - diff_y + 10
wider_x = 10
new_image = image_to_crop.crop((r['x1']-wider_x/2, r['y1']-wider_y/2, r['x2']+wider_x/2, r['y2']+wider_y/2))
#new_image = transform_image_to_square(new_image) #transform the image to square
cropped_images.append(new_image)
return cropped_images
def transform_image_to_square(img):
longer_side = max(img.size)
horizontal_padding = (longer_side - img.size[0]) / 2
vertical_padding = (longer_side - img.size[1]) / 2
square_img = img.crop(
(
-horizontal_padding,
-vertical_padding,
img.size[0] + horizontal_padding,
img.size[1] + vertical_padding
)
)
return square_img | Larbohell/datasyn | crop_image.py | crop_image.py | py | 1,446 | python | en | code | 1 | github-code | 36 |
33088736021 | import random
while 1:
a=input('''Press Enter to play <--- Rock! Paper! and Sciccor! -->
To 'QUIT' press any key and press enter !''')
if a=='':
l=["Rock","Paper","Scissor"]
a=random.randint(0,2)
print(l[a])
else:
break
| Kashyap03-K/Easy-and-simple-Python-games | Rock_Paper_Scissor.py | Rock_Paper_Scissor.py | py | 276 | python | en | code | 0 | github-code | 36 |
22347170498 | import os
from logging import (
CRITICAL,
DEBUG,
ERROR,
getLogger,
INFO,
Logger,
WARNING,
)
from pathlib import Path
from rich.logging import RichHandler
from rich.highlighter import NullHighlighter
from .config import BodyworkConfig
from .constants import (
DEFAULT_LOG_LEVEL,
DEFAULT_LOG_LEVEL_ENV_VAR,
DEFAULT_PROJECT_DIR,
LOG_TIME_FORMAT,
PROJECT_CONFIG_FILENAME,
)
from .cli.terminal import console
from .exceptions import BodyworkConfigError
def bodywork_log_factory(
log_level: str = None,
config_file_path: Path = DEFAULT_PROJECT_DIR / PROJECT_CONFIG_FILENAME,
) -> Logger:
"""Create a standardised Bodywork logger.
If a log level is specified as an argument, then it will take
precedence overall all other methods of setting the log-level. Next
in the waterfall of priority is the log-level set in the project
config file, and then after that the level set by the
BODYWORK_LOG_LEVEL environment variable. Failing that, the default
log level (INFO) will be used.
:param log_level: The minimum severity level of messages to log,
defaults to None.
:param config_file_path: Path to project config file, defaults
DEFAULT_PROJECT_DIR/PROJECT_CONFIG_FILENAME.
"""
log_level_mapping = {
"DEBUG": DEBUG,
"INFO": INFO,
"WARNING": WARNING,
"ERROR": ERROR,
"CRITICAL": CRITICAL,
}
log = getLogger("bodywork")
if log_level is not None:
log.setLevel(log_level_mapping[log_level])
else:
try:
bodywork_config = BodyworkConfig(config_file_path)
log.setLevel(bodywork_config.logging.log_level)
except BodyworkConfigError:
try:
log_level_from_env_var = os.environ[DEFAULT_LOG_LEVEL_ENV_VAR]
log.setLevel(log_level_mapping[log_level_from_env_var])
except KeyError:
log.setLevel(log_level_mapping[DEFAULT_LOG_LEVEL])
if not log.hasHandlers():
log_handler = RichHandler(
console=console,
show_path=False,
highlighter=NullHighlighter(),
omit_repeated_times=False,
rich_tracebacks=True,
log_time_format=LOG_TIME_FORMAT,
)
log.addHandler(log_handler)
return log
| bodywork-ml/bodywork-core | src/bodywork/logs.py | logs.py | py | 2,348 | python | en | code | 430 | github-code | 36 |
16571409721 | import os
import io
import hashlib
from base64 import standard_b64encode
from six.moves.urllib.request import urlopen, Request
from six.moves.urllib.error import HTTPError
from infi.pyutils.contexts import contextmanager
from infi.pypi_manager import PyPI, DistributionNotFound
from logging import getLogger
logger = getLogger()
def send_setuptools_request(repository, username, password, data):
# code taken from distribute 40.9.0, file ./setuptools/command/upload.py
# changed logging and return value
# TODO use code from twine?
# set up the authentication
user_pass = (username + ":" + password).encode('ascii')
# The exact encoding of the authentication string is debated.
# Anyway PyPI only accepts ascii for both username or password.
auth = "Basic " + standard_b64encode(user_pass).decode('ascii')
# Build up the MIME payload for the POST data
boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254'
sep_boundary = b'\r\n--' + boundary.encode('ascii')
end_boundary = sep_boundary + b'--\r\n'
body = io.BytesIO()
for key, value in data.items():
title = '\r\nContent-Disposition: form-data; name="%s"' % key
# handle multiple entries for the same name
if not isinstance(value, list):
value = [value]
for value in value:
if type(value) is tuple:
title += '; filename="%s"' % value[0]
value = value[1]
else:
value = str(value).encode('utf-8')
body.write(sep_boundary)
body.write(title.encode('utf-8'))
body.write(b"\r\n\r\n")
body.write(value)
body.write(end_boundary)
body = body.getvalue()
logger.info("Submitting %s to %s" % (data['content'][0], repository))
# build the Request
headers = {
'Content-type': 'multipart/form-data; boundary=%s' % boundary,
'Content-length': str(len(body)),
'Authorization': auth,
}
request = Request(repository, data=body,
headers=headers)
# send the data
try:
result = urlopen(request)
status = result.getcode()
reason = result.msg
except HTTPError as e:
status = e.code
reason = e.msg
except OSError as e:
logger.exception("")
raise
if status == 200:
return True
else:
logger.error('Upload failed (%s): %s' % (status, reason))
return False
def mirror_file(repository_config, filename, package_name, package_version, metadata):
# merge the metadata with constant data that setuptools sends and data about the file.
# then call the function that actually sends the post request.
f = open(filename, 'rb')
content = f.read()
f.close()
basename = os.path.basename(filename)
data = {
':action': 'file_upload',
'protocol_version': '1',
'metadata_version': '2.1',
'content': (basename, content),
'md5_digest': hashlib.md5(content).hexdigest(),
'name': package_name,
'version': package_version,
}
data.update(metadata)
repository = repository_config["repository"]
username = repository_config.get("username", "")
password = repository_config.get("password", "")
send_setuptools_request(repository, username, password, data)
@contextmanager
def temp_urlretrieve(url, localpath):
import requests
logger.info("Retrieving {}".format(url))
req = requests.get(url)
with open(localpath, 'wb') as fd:
fd.write(req.content)
try:
yield
finally:
os.remove(localpath)
def mirror_release(repository_config, package_name, version, version_data, release_data):
""" mirror a release (e.g. one sdist/bdist_egg etc.) based on data retrieved from
pypi about the package version and the release itself. """
# prepare metadata to post, download the file, and call mirror_file which finalizes the data and
# posts it to the server
metadata = {
'filetype': release_data['packagetype'],
'pyversion': '' if release_data['python_version'] == 'source' else release_data['python_version'],
'comment': release_data['comment_text'],
}
metadata_keys = ('platform','supported_platform','summary','description',
'keywords','home_page','download_url','author','author_email',
'maintainer','maintainer_email','license','classifier', 'classifiers',
'requires_dist','provides_dist','obsoletes_dist',
'requires_python','requires_external','project_urls',
'provides_extras', 'description_content_type')
for key in metadata_keys:
if key in version_data:
metadata[key] = version_data[key]
if "classifier" in metadata:
metadata["classifiers"] = metadata["classifier"]
if "classifiers" in metadata:
metadata["classifier"] = metadata["classifiers"]
with temp_urlretrieve(release_data['url'], release_data['filename']):
return mirror_file(repository_config, release_data['filename'], package_name, version, metadata)
def get_repository_config(server_name):
# we get a pypi repository alias but we need the url+username+password from pypirc
# distutils does the translation, but we have to fool it a little
from distutils.config import PyPIRCCommand
from distutils.dist import Distribution
pypirc = PyPIRCCommand(Distribution())
pypirc.repository = server_name
return pypirc._read_pypirc()
def mirror_package(server_name, package_name, version=None, pypi=None):
if pypi is None:
pypi = PyPI()
version = version or pypi.get_latest_version(package_name)
version_data = pypi.get_release_data(package_name, version)
release_dataset = pypi.get_releases_for_version(package_name, version)
repository_config = get_repository_config(server_name)
assert repository_config, "No repository config found for '{}'".format(server_name)
final_result = True
if not release_dataset:
msg = "No distributions found for {} {} (maybe you should try to build from download url?)"
raise DistributionNotFound(msg.format(package_name, version))
for release_data in release_dataset:
try:
result = mirror_release(repository_config, package_name, version, version_data, release_data)
except Exception:
logger.exception("Failed to upload {}".format(release_data))
result = False
final_result = final_result and result
return final_result
| Infinidat/infi.pypi_manager | src/infi/pypi_manager/mirror/mirror_all.py | mirror_all.py | py | 6,623 | python | en | code | 2 | github-code | 36 |
37636102850 | # Given an array of unique integers, arr, where each integer arr[i] is strictly greater than 1.
# We make a binary tree using these integers, and each number may be used for any number of times. Each non-leaf node's value should be equal to the product of the values of its children.
# Return the number of binary trees we can make. The answer may be too large so return the answer modulo 109 + 7.
# Example 1:
# Input: arr = [2,4]
# Output: 3
# Explanation: We can make these trees: [2], [4], [4, 2, 2]
# Example 2:
# Input: arr = [2,4,5,10]
# Output: 7
# Explanation: We can make these trees: [2], [4], [5], [10], [4, 2, 2], [10, 2, 5], [10, 5, 2].
# Constraints:
# 1 <= arr.length <= 1000
# 2 <= arr[i] <= 109
class Solution:
def numFactoredBinaryTrees(self, arr: List[int]) -> int:
arr.sort()
store = {}
ans = 0
for i, v in enumerate(arr):
#print(i,v)
cur = 1
for j in range(0,i):
left = arr[j]
right = v//left
if right*left == v and right in store:
#print(f"left:{left} right:{right}")
#print(f"store[left]*store[right]={store[left]}*{store[right]}={store[left]*store[right]}")
subs = store[left]*store[right]
cur += subs
#print(f"cur is {cur}")
store[v] = cur
ans += cur
#print(store)
#print(ans)
return ans%(10**9+7) | sunnyyeti/Leetcode-solutions | 823 Binary Trees With Factors.py | 823 Binary Trees With Factors.py | py | 1,503 | python | en | code | 0 | github-code | 36 |
10589852930 | """
Limpieza de datos usando Pandas
-----------------------------------------------------------------------------------------
Realice la limpieza del dataframe. Los tests evaluan si la limpieza fue realizada
correctamente. Tenga en cuenta datos faltantes y duplicados.
"""
import pandas as pd
def clean_data():
df = pd.read_csv("solicitudes_credito.csv", sep=";")
#
# Inserte su código aquí
#
###########################################################
# Borrado de registros duplicados
#
df=df.drop(['Unnamed: 0'], axis=1)
df[df.duplicated()]
df.drop_duplicates(inplace=True)
df.fecha_de_beneficio = pd.to_datetime(df.fecha_de_beneficio, dayfirst=True)
#Convertir sexo y emprendimiento lower ################################################
df.sexo=df.sexo.str.lower()
df.tipo_de_emprendimiento=df.tipo_de_emprendimiento.str.lower()
#CATEGORIAS ARREGLADAS ###############################################
idea_negocio_df = pd.DataFrame({ 'idea_negocio': list(df.idea_negocio) })
idea_negocio_df = idea_negocio_df.drop_duplicates()
idea_negocio_df = idea_negocio_df.assign(key=idea_negocio_df.idea_negocio.str.lower())
idea_negocio_df.groupby('key').agg(list)
#
# Corrección por reemplazo
#
df['idea_negocio'] = df.idea_negocio.str.replace(' ', '_')
df['idea_negocio'] = df.idea_negocio.str.replace('-', '_')
df['idea_negocio'] = df.idea_negocio.str.replace('_', ' ')
df['idea_negocio'] = df.idea_negocio.str.lower()
df.sort_values('idea_negocio')
#CATEGORIAS ARREGLADAS ###############################################
barrio_df = pd.DataFrame({ 'barrio': list(df.barrio) })
barrio_df = barrio_df.drop_duplicates()
barrio_df = barrio_df.assign(key=barrio_df.barrio.str.lower())
barrio_df.groupby('key').agg(list)
#
# Corrección por reemplazo
#
df['barrio'] = df.barrio.str.replace(' ', '_')
df['barrio'] = df.barrio.str.replace('-', '_')
df['barrio'] = df.barrio.str.replace('_', ' ')
df['barrio'] = df.barrio.str.lower()
df.sort_values('barrio')
#CATEGORIAS ARREGLADAS ###############################################
línea_credito_df = pd.DataFrame({ 'línea_credito': list(df.línea_credito) })
línea_credito_df = línea_credito_df.drop_duplicates()
línea_credito_df = línea_credito_df.assign(key=línea_credito_df.línea_credito.str.lower())
línea_credito_df.groupby('key').agg(list)
#
# Corrección por reemplazo
#
df['línea_credito'] = df.línea_credito.str.replace(' ', '_')
df['línea_credito'] = df.línea_credito.str.replace('-', '_')
df['línea_credito'] = df.línea_credito.str.replace('_', ' ')
df['línea_credito'] = df.línea_credito.str.lower()
df.sort_values('línea_credito')
df[df.duplicated()]
df.drop_duplicates(inplace=True)
###############################################
# Corrección
#
df.monto_del_credito = df.monto_del_credito.str.strip('$')
df.monto_del_credito = df.monto_del_credito.str.replace(',', '')
df.monto_del_credito = df.monto_del_credito
df.monto_del_credito = df.monto_del_credito.astype(float)
df.monto_del_credito.value_counts()
df[df.duplicated()]
df.drop_duplicates(inplace=True)
# BORRAR NA DE BARRIO
df=df.dropna(subset=["barrio"])
df=df.dropna(subset=["tipo_de_emprendimiento"])
return df
| ciencia-de-los-datos/data-cleaning-solicitudes-credito-paquijanoc | pregunta.py | pregunta.py | py | 3,360 | python | es | code | 0 | github-code | 36 |
18932441390 | #! /usr/bin/env python
# -*- coding:utf-8 -*-
"""
@author : MG
@Time : 19-4-3 下午5:28
@File : __init__.py.py
@contact : mmmaaaggg@163.com
@desc :
"""
import logging
from logging.config import dictConfig
# log settings
logging_config = dict(
version=1,
formatters={
'simple': {
'format': '%(asctime)s %(levelname)s %(name)s %(filename)s.%(funcName)s:%(lineno)d|%(message)s'}
},
handlers={
'file_handler':
{
'class': 'logging.handlers.RotatingFileHandler',
'filename': 'logger.log',
'maxBytes': 1024 * 1024 * 10,
'backupCount': 5,
'level': 'DEBUG',
'formatter': 'simple',
'encoding': 'utf8'
},
'console_handler':
{
'class': 'logging.StreamHandler',
'level': 'DEBUG',
'formatter': 'simple'
}
},
root={
'handlers': ['console_handler', 'file_handler'],
'level': logging.DEBUG,
}
)
# logging.getLogger('sqlalchemy.engine').setLevel(logging.WARN)
# logging.getLogger('urllib3.connectionpool').setLevel(logging.INFO)
dictConfig(logging_config)
if __name__ == "__main__":
pass
| IBATS/IBATS_Utils | ibats_utils/__init__.py | __init__.py | py | 1,277 | python | en | code | 3 | github-code | 36 |
40222438483 | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
'''
Ideje kako poboljšati kod:
1. Kreirati novu klasu satelit koja je pod gravitacijskim utjecajem ostalih planeta ali ona ne utječe na njih
2. Ta klasa ima metodu boost koja ju odjednom ubrza
3. Pogledati i probati dolje zakomentiranu metodu za optimizaciju
4. Dodati tragove orbita
'''
class Body(object):
def __init__(self, name, colour, mass, init_position, init_velocity):
self.name = name
self.colour = colour
self.mass = mass
self.position = init_position
self.velocity = init_velocity
self.current_acceleration = 0 # TODO: think if better initial acceleration can be found
self.previous_acceleration = 0
def update_position(self, timestep):
"""Updates the position of the body"""
new_position = self.position + self.velocity * timestep + 1 / 6 * (4 * self.current_acceleration
- self.previous_acceleration) * timestep ** 2
self.position = new_position
def update_velocity(self, new_acceleration, timestep):
"""New acceleration is the acceleration in the next timestep. Updates the body velocity"""
new_velocity = self.velocity + 1 / 6 * (2 * new_acceleration + 5 * self.current_acceleration
- self.previous_acceleration) * timestep
self.velocity = new_velocity
def calc_KE(self):
"""Returns the kinetic energy of the body"""
KE = 1 / 2 * self.mass * np.linalg.norm(self.velocity) ** 2
return KE
class SmallBody(Body):
def __init__(self, name, colour, mass, init_position, init_velocity, bodyOfInterest, bodyOfInterestPosition):
super().__init__(name, colour, mass, init_position, init_velocity)
self.closestDistance = np.linalg.norm(bodyOfInterestPosition - init_position)
self.timeToBodyOfInterest = 0
self.bodyOfInterest = bodyOfInterest
def distance_from_body(body1, body2):
distancevec = body1.position - body2.position
distance = np.linalg.norm(distancevec)
return distance
class Simulation(object):
def __init__(self, timestep, num_iterations):
self.timestep = timestep
self.num_iterations = num_iterations
self.patches = []
self.timeElapsed = 0
def read_input_data(self, file):
"""Opens the specific file and reads the input data. File has to be in a specific format"""
df = open(file, 'r')
data = df.readlines()
data.pop(0) # pop the first two lines of the file, they describe how the file is to be formated
data.pop(0)
bodies = []
smallBodies = []
for line in data:
line = line.strip()
line = line.split(',')
line[3] = line[3].split(';')
line[4] = line[4].split(';')
name, color, mass, init_position, init_velocity = line[0].strip(), line[1].strip(), float(line[2]), \
np.array([float(line[3][0].strip()),
float(line[3][1].strip())]), \
np.array([float(line[4][0].strip()),
float(line[4][1].strip())])
if line[-1].strip() == 'Body':
bodies.append(Body(name, color, mass, init_position, init_velocity))
elif line[-1].strip() == 'SmallBody':
bodyOfInterest = line[-2].strip()
for body in bodies:
if body.name == bodyOfInterest:
bodyOfInterestPosition = body.position
smallBodies.append(SmallBody(name, color, mass, init_position, init_velocity, bodyOfInterest,
bodyOfInterestPosition))
self.body_list = bodies
self.smallBodies = smallBodies
for body in self.body_list:
# create patches for each body of the system
xpos = body.position[0]
ypos = body.position[1]
if body.name == 'Sun':
self.patches.append(plt.Circle((xpos, ypos), radius=10000000000, color=body.colour, animated=True))
else:
for i in range(10):
self.patches.append(
plt.Circle((xpos, ypos), radius=(5000000000 / (10 - i)), color=body.colour, animated=True))
for smallBody in self.smallBodies:
xpos = smallBody.position[0]
ypos = smallBody.position[1]
for i in range(10):
self.patches.append(
plt.Circle((xpos, ypos), radius=(5000000000 / (10 - i)), color=smallBody.colour, animated=True)
)
xmax = 0
for body in self.body_list:
# find the axes range
if body.position[0] > xmax:
xmax = body.position[0] * 1.5
if body.position[1] > xmax:
xmax = body.position[1] * 1.5
self.xmax = xmax
def run_simulation(self):
# running the simulation for the inputed number of iterations
for i in range(self.num_iterations):
self.step_forward()
def step_forward(self):
# Move the bodies one timestep
# New positions of all the bodies are calculated first
self.timeElapsed += self.timestep
for body in self.body_list:
body.update_position(self.timestep)
for smallBody in self.smallBodies:
smallBody.update_position(self.timestep)
for body in self.body_list:
new_acceleration = self.calc_acceleration(body)
body.update_velocity(new_acceleration, self.timestep)
body.previous_acceleration = body.current_acceleration
body.current_acceleration = new_acceleration
for smallBody in self.smallBodies:
new_acceleration = self.calc_acceleration(smallBody)
smallBody.update_velocity(new_acceleration, self.timestep)
smallBody.previous_acceleration = smallBody.current_acceleration
smallBody.current_acceleration = new_acceleration
for body in self.body_list:
if smallBody.bodyOfInterest == body.name:
distance = distance_from_body(smallBody, body)
if distance < smallBody.closestDistance:
smallBody.closestDistance = distance
smallBody.timeToBodyOfInterest = self.timeElapsed
def calc_acceleration(self, body):
# find the acceleration on a single body. Returns a np array of acceleration
forceOnBody = np.array([0.0, 0.0])
for secondBody in self.body_list:
if secondBody.name != body.name:
displacementVec = secondBody.position - body.position
distance = np.linalg.norm(displacementVec)
displacementVec = displacementVec / distance
magnitude = G * body.mass * secondBody.mass / (distance ** 2)
force = magnitude * displacementVec
forceOnBody += force
acceleration = forceOnBody / body.mass
return acceleration
def update_display(self, i):
# single timestep change in display
self.step_forward()
j = 0
for body in self.body_list:
if body.name == 'Sun':
self.patches[j].center = (body.position[0], body.position[1])
else:
for i in range(1, 10):
self.patches[(j - 1) * 10 + i].center = self.patches[(j - 1) * 10 + i + 1].center
self.patches[j * 10].center = (body.position[0], body.position[1])
j += 1
for smallBody in self.smallBodies:
for i in range(1, 10):
self.patches[(j - 1) * 10 + i].center = self.patches[(j - 1) * 10 + i + 1].center
self.patches[j * 10].center = (smallBody.position[0], smallBody.position[1])
j += 1
return self.patches
def animate(self):
# animate the bodies for the duration of the simulation
plt.style.use('dark_background')
fig = plt.figure(1)
ax = plt.axes()
for patch in self.patches:
ax.add_patch(patch)
ax.axis('scaled')
ax.set_xlim(-self.xmax, self.xmax)
ax.set_ylim(-self.xmax, self.xmax)
ax.set_xlabel('x')
ax.set_ylabel('y')
plt.axis('off')
anim = FuncAnimation(fig, self.update_display, frames=self.num_iterations, repeat=False, interval=50, blit=True)
plt.show()
def calc_PE(self):
# Calculates the total potential energy. Returns a float of the energy
PE = 0
for body in self.body_list:
for secondBody in self.body_list:
if body.name != secondBody.name:
displacementVec = secondBody.position - body.position
distance = np.linalg.norm(displacementVec)
PE += -1 / 2 * G * body.mass * secondBody.mass / distance
return PE
def calc_tot_energy(self):
# Calculates the total energy. Returns a float
PE = self.calc_PE
KE = 0
for body in self.body_list:
KE += body.calc_KE()
return KE + PE
def check_orbital_period(self, body):
# Finds the orbital period of a given body using trigonometric functions. Returns a float
orbital_period = 0
while not 0 > np.arctan2(body.position[1], body.position[0]) > -0.01:
self.step_forward()
orbital_period += self.timestep
orbital_period = orbital_period / 86400
return orbital_period
def launch_sattelite(self, name, colour, mass, launchBodyName, radius, initVelocity, launchOrientation,
interestBody):
"""This is a function that launches a satellite from a given body. Input parameters are name, colour, mass,
name of the body from which the satellite is to be launched, distance from the center of the body where the
satellite is launched, initial velocity, orientation of the launch - inner if launching from the side facing the
Sun, outer otherwise, and name of the body the satellite is trying to reach"""
for body in self.body_list:
if interestBody == body.name:
interestBodyName = body.name
interestBodyPosition = body.position
if launchBodyName == body.name:
launchBody = body
xBodyPos = launchBody.position[0]
yBodyPos = launchBody.position[1]
angle = np.arctan2(yBodyPos, xBodyPos)
if launchOrientation == 'inner':
xOffset = -1 * np.tan(angle) * radius
yOffset = -1 * np.tan(angle) * radius
launchPosition = np.array([xBodyPos + xOffset, yBodyPos + yOffset])
self.smallBodies.append(SmallBody(name, colour, mass, launchPosition, initVelocity, interestBodyName,
interestBodyPosition))
for i in range(10):
self.patches.append(
plt.Circle((launchPosition[0], launchPosition[1]), radius=(2000000000 / (10 - i)),
color=colour, animated=True))
elif launchOrientation == 'outer':
xOffset = np.tan(angle) * radius
yOffset = np.tan(angle) * radius
launchPosition = np.array([xBodyPos + xOffset, yBodyPos + yOffset])
self.smallBodies.append(SmallBody(name, colour, mass, launchPosition, initVelocity, interestBodyName,
interestBodyPosition))
for i in range(10):
self.patches.append(
plt.Circle((launchPosition[0], launchPosition[1]), radius=(2000000000 / (10 - i)),
color=colour, animated=True))
G = 6.67408e-11
Universe = Simulation(200000, 2000)
Universe.read_input_data('Parameters.txt')
Universe.animate()
# print('Orbital period of the Earth is: ' + str(Universe.check_orbital_period(Universe.body_list[3])) + ' days')
# print(str(Universe.calc_tot_energy()))
# Universe.run_simulation()
| MatejVe/Solar-System-Simulation | Solar system for testing new code.py | Solar system for testing new code.py | py | 12,798 | python | en | code | 0 | github-code | 36 |
70388179303 | from django.contrib.auth import get_user_model
from django.core.validators import MinValueValidator, MaxValueValidator
from django.db import models
from foodgram_backend.settings import STANDARTLENGTH
User = get_user_model()
class Tag(models.Model):
name = models.CharField(
max_length=STANDARTLENGTH,
unique=True,
blank=False,
verbose_name='Название тега')
color = models.CharField(
max_length=7,
unique=True,
blank=False,
verbose_name='Цвет')
slug = models.SlugField(
max_length=STANDARTLENGTH,
unique=True,
blank=False,
verbose_name='Слаг')
def __str__(self):
return self.name
class Ingredient(models.Model):
name = models.CharField(
max_length=STANDARTLENGTH,
verbose_name='Название ингредиента',
blank=False,
db_index=True)
measurement_unit = models.CharField(
max_length=STANDARTLENGTH,
verbose_name='Единицы измерения',
blank=False)
def __str__(self):
return self.name
class Recipe(models.Model):
author = models.ForeignKey(
User,
related_name='recipes',
on_delete=models.CASCADE,
blank=False,
verbose_name='Автор')
name = models.CharField(
max_length=STANDARTLENGTH,
verbose_name='Название рецепта',
blank=False,)
image = models.ImageField(
upload_to='recipes/images/',
blank=False)
text = models.TextField(
blank=False,
verbose_name='Описание')
ingredients = models.ManyToManyField(
Ingredient,
through='RecipeIngredient',
blank=False,)
tags = models.ManyToManyField(
Tag,
through='RecipeTag',
blank=False,
verbose_name='Теги')
cooking_time = models.PositiveSmallIntegerField(
validators=[MinValueValidator(1),
MaxValueValidator(1000)],
blank=False,
verbose_name='Время приготовления',
help_text='в минутах')
pub_date = models.DateTimeField(
auto_now_add=True,
editable=False,
verbose_name='Дата создания')
class Meta:
constraints = [models.UniqueConstraint(
fields=['name', 'author'],
name='unique_name_author')]
def __str__(self):
return f'{self.name} {self.author}'
class RecipeIngredient(models.Model):
ingredient = models.ForeignKey(
Ingredient,
on_delete=models.CASCADE,
verbose_name='Ингредиент')
recipe = models.ForeignKey(
Recipe,
on_delete=models.CASCADE,
verbose_name='Рецепт')
amount = models.PositiveSmallIntegerField(
validators=[MinValueValidator(1),
MaxValueValidator(1000)],
verbose_name='Количество')
class Meta:
constraints = [models.UniqueConstraint(
fields=['recipe', 'ingredient'],
name='unique_recipe_ingredient')]
def __str__(self):
return f'{self.recipe} {self.ingredient}'
class RecipeTag(models.Model):
tag = models.ForeignKey(
Tag,
on_delete=models.CASCADE,
verbose_name='Тег')
recipe = models.ForeignKey(
Recipe,
on_delete=models.CASCADE,
verbose_name='Рецепт')
class Meta:
constraints = [models.UniqueConstraint(
fields=['recipe', 'tag'],
name='unique_recipe_tag')]
def __str__(self):
return f'{self.recipe} {self.tag}'
class ShoppingCart(models.Model):
user = models.ForeignKey(
User,
on_delete=models.CASCADE,
verbose_name='Пользователь',
related_name='shopping')
recipe = models.ForeignKey(
Recipe,
on_delete=models.CASCADE,
verbose_name='Рецепт продуктовой корзины',
related_name='shopping')
class Meta:
constraints = [models.UniqueConstraint(
fields=['recipe', 'user'],
name='unique_favorite_recipe')]
def __str__(self):
return f'{self.recipe} {self.user}'
class Favourite(models.Model):
user = models.ForeignKey(
User,
on_delete=models.CASCADE,
verbose_name='Пользователь')
recipe = models.ForeignKey(
Recipe,
on_delete=models.CASCADE,
verbose_name='Избранный рецепт',
related_name='favorite')
class Meta:
constraints = [models.UniqueConstraint(
fields=['recipe', 'user'],
name='unique_recipe_in_shopping_cart')]
def __str__(self):
return f'{self.recipe} {self.user}'
| Gustcat/foodgram-project-react | backend/recipes/models.py | models.py | py | 4,848 | python | en | code | 0 | github-code | 36 |
4023780816 | from bs4 import BeautifulSoup
import urllib.request
import os
class Sachalayatan:
sachDS = {}
def __init__(self, BASE_URL):
self.sachDS['BASE_URL'] = BASE_URL
def getHtml(self, url=''):
if len(url) > 0:
source = urllib.request.urlopen(url).read()
soup = BeautifulSoup(source,'lxml')
return soup
else:
source = urllib.request.urlopen(self.sachDS['BASE_URL']).read()
soup = BeautifulSoup(source, 'lxml')
return soup
def getMainNavURL(self, html):
urls = html.select("ul#subnavlist > li > a")
urlList = []
for url in urls:
urlList.append(url.get('href'))
self.sachDS['main_nav'] = urlList
self.writeListInFile('main_nav.txt', 'a', urlList)
return urlList
def getPaginationFromMainURL(self):
fileName = 'navigationlink.txt'
mainNavList = [line.rstrip('\n') for line in open('./main_nav.txt')]
if os.path.isfile(fileName) and os.access(fileName, os.R_OK):
open(fileName, "w").close()
for nav in mainNavList:
print('working with: ', nav)
print(nav)
self.writeLineInFile('navigationlink.txt', 'a', nav)
html = self.getHtml(nav)
urls = html.select('ul.pager > li.pager-item > a')
for url in urls:
print(url)
self.writeLineInFile('navigationlink.txt', 'a', url.get('href'))
self.writeLineInFile('navigationlink.txt', 'a', '')
def writeListInFile(self, fileName, mode, writeList):
# print(type(writeList))
txtFile = open(fileName, mode, encoding="utf-8")
for line in writeList:
txtFile.write(line + "\n")
txtFile.write("\n")
txtFile.close()
def writeLineInFile(self, fileName, mode, line):
# print(type(writeList))
txtFile = open(fileName, mode, encoding="utf-8")
txtFile.write(line + "\n")
txtFile.close()
def showList(self, itemList):
for iList in itemList:
print(iList)
BASE_URL = 'http://www.sachalayatan.com/'
sachObj = Sachalayatan(BASE_URL=BASE_URL)
# html = sachObj.getHtml()
# sachObj.getMainNavURL(html)
sachObj.getPaginationFromMainURL() | kakanghosh/sachalayatan | scrapping.py | scrapping.py | py | 2,300 | python | en | code | 0 | github-code | 36 |
41242548190 | # -*- coding: utf-8 -*-
# @Date : 2017-08-02 21:54:08
# @Author : lileilei
def assert_in(asserqiwang,fanhuijson):
if len(asserqiwang.split('=')) > 1:
data = asserqiwang.split('&')
result = dict([(item.split('=')) for item in data])
try:
value1=([(str(fanhuijson[key])) for key in result.keys()])
value2=([(str(value)) for value in result.values()])
if value1==value2:
return 'pass'
else:
return 'fail'
except Exception as e:
return 'fail ',e
else:
return '请检查断言'
def assertre(asserqingwang):
if len(asserqingwang.split('=')) > 1:
data = asserqingwang.split('&')
result = dict([(item.split('=')) for item in data])
return result
else:
return u'请填写期望值'
| mingming2513953126/pythondemo | FXTest-master/app/common/panduan.py | panduan.py | py | 859 | python | en | code | 0 | github-code | 36 |
13847534454 | from collections import namedtuple
# Define a namedtuple to represent search results
SearchResult = namedtuple(
"SearchResult", ["name", "location", "job_title", "profile_url"]
)
# Dummy data for testing
dummy_data = [
SearchResult(
name="John Smith",
location="New York, NY",
job_title="Software Engineer",
profile_url="https://example.com/johnsmith",
),
SearchResult(
name="Jane Doe",
location="San Francisco, CA",
job_title="Product Manager",
profile_url="https://example.com/janedoe",
),
SearchResult(
name="David Lee",
location="Seattle, WA",
job_title="Data Analyst",
profile_url="https://example.com/davidlee",
),
SearchResult(
name="Amy Chen",
location="Boston, MA",
job_title="Marketing Specialist",
profile_url="https://example.com/amychen",
),
SearchResult(
name="Mark Johnson",
location="Chicago, IL",
job_title="Sales Manager",
profile_url="https://example.com/markjohnson",
),
SearchResult(
name="Karen Kim",
location="Los Angeles, CA",
job_title="Graphic Designer",
profile_url="https://example.com/karenkim",
),
SearchResult(
name="Chris Taylor",
location="Austin, TX",
job_title="Software Developer",
profile_url="https://example.com/christaylor",
),
SearchResult(
name="Julia Rodriguez",
location="Miami, FL",
job_title="Project Manager",
profile_url="https://example.com/juliarodriguez",
),
SearchResult(
name="Michael Brown",
location="Denver, CO",
job_title="Business Analyst",
profile_url="https://example.com/michaelbrown",
),
SearchResult(
name="Lisa Nguyen",
location="Portland, OR",
job_title="UX Designer",
profile_url="https://example.com/lisanguyen",
),
]
grades = {
1: "Nursery 1",
2: "Nursery 2",
3: "Kindergarten 1",
4: "Kindergarten 2",
5: "Primary 1",
6: "Primary 2",
}
strands = [
{
"id": "strand-1",
"name": "Strand 1",
"substrands": [
{
"id": "substrand-1",
"name": "Substrand 1",
"topics": [
{
"id": "topic-1",
"name": "Topic 1",
"content": "This is the content for Topic 1",
},
{
"id": "topic-2",
"name": "Topic 2",
"content": "This is the content for Topic 2",
},
],
},
{
"id": "substrand-2",
"name": "Substrand 2",
"topics": [
{
"id": "topic-3",
"name": "Topic 3",
"content": "This is the content for Topic 3",
},
{
"id": "topic-4",
"name": "Topic 4",
"content": "This is the content for Topic 4",
},
],
},
],
},
{
"id": "strand-2",
"name": "Strand 2",
"substrands": [
{
"id": "substrand-3",
"name": "Substrand 3",
"topics": [
{
"id": "topic-5",
"name": "Topic 5",
"content": "This is the content for Topic 5",
},
{
"id": "topic-6",
"name": "Topic 6",
"content": "This is the content for Topic 6",
},
],
},
{
"id": "substrand-4",
"name": "Substrand 4",
"topics": [
{
"id": "topic-7",
"name": "Topic 7",
"content": "This is the content for Topic 7",
},
{
"id": "topic-8",
"name": "Topic 8",
"content": "This is the content for Topic 8",
},
],
},
],
},
]
curriculums = [
{
"subject": "Mathematics",
"grade": 1,
},
{
"subject": "English",
"grade": 2,
},
{
"subject": "Science",
"grade": 1,
},
{
"subject": "Social Studies",
"grade": 3,
},
{
"subject": "Art",
"grade": 2,
},
{
"subject": "History",
"grade": 3,
},
{
"subject": "Geography",
"grade": 1,
},
{
"subject": "Physical Education",
"grade": 2,
},
{
"subject": "Music",
"grade": 1,
},
{
"subject": "Foreign Language",
"grade": 2,
},
{
"subject": "Computer Science",
"grade": 3,
},
{
"subject": "Writing",
"grade": 1,
},
{
"subject": "Reading",
"grade": 2,
},
{
"subject": "Drama",
"grade": 3,
},
{
"subject": "Business",
"grade": 1,
},
{
"subject": "Engineering",
"grade": 2,
},
{
"subject": "Psychology",
"grade": 3,
},
{
"subject": "Philosophy",
"grade": 1,
},
{
"subject": "Marketing",
"grade": 2,
},
]
user = {
"id": 2,
"first_name": "Adwoa",
"middle_name": "Yaa",
"last_name": "Appiah",
"nickname": "Adyaa",
"full_name": "Adwoa Yaa Appiah",
"email": "adwoa.appiah@gmail.com",
"phone_number": "0241234567",
"birthdate": "1995-06-15",
"gender": "F",
"bio": "Software developer",
"friendship_status": None,
"profile_picture": "http://127.0.0.1:8000/media/users/IMG_20210920_100458_312.jpg",
"cover_picture": "http://127.0.0.1:8000/media/users/IMG_20210920_100458_312.jpg",
"school": "University of Ghana",
"education_history": ["St. Monica's Senior High School"],
"subjects": ["Computer Science", "Mathematics"],
"level": "Undergraduate",
"points": 200,
"url": "http://127.0.0.1:8000/api/users/2/",
"date_joined": "2023-03-25T09:13:36.104947Z",
"is_active": True,
"last_login": "2023-03-27T06:56:39.442993Z",
}
friends = [
{
"friend": {
"username": "johndoe",
"email": "johndoe@example.com",
"profile_picture": "/media/profile_pictures/johndoe.jpg",
}
},
{
"friend": {
"username": "janedoe",
"email": "janedoe@example.com",
"profile_picture": "/media/profile_pictures/janedoe.jpg",
}
},
{
"friend": {
"username": "bobsmith",
"email": "bobsmith@example.com",
"profile_picture": "/media/profile_pictures/bobsmith.jpg",
}
},
{
"friend": {
"username": "kwame",
"email": "kwame@example.com",
"profile_picture": "/media/profile_pictures/kwame.jpg",
"status": "online",
}
},
{
"friend": {
"username": "ama",
"email": "ama@example.com",
"profile_picture": "/media/profile_pictures/ama.jpg",
"status": "offline",
}
},
{
"friend": {
"username": "yaw",
"email": "yaw@example.com",
"profile_picture": "/media/profile_pictures/yaw.jpg",
"status": "online",
}
},
{
"friend": {
"username": "akosua",
"email": "akosua@example.com",
"profile_picture": "/media/profile_pictures/akosua.jpg",
"status": "offline",
}
},
]
album = {
"user_photos": [
{
"name": "Photo 1",
"url": "https://dummyimage.com/600x400/000/fff&text=Photo+1",
},
{
"name": "Photo 2",
"url": "https://dummyimage.com/600x400/000/fff&text=Photo+2",
},
{
"name": "Photo 3",
"url": "https://dummyimage.com/600x400/000/fff&text=Photo+3",
},
],
"user_videos": [
{
"name": "Video 1",
"url": "https://sample-videos.com/video123/mp4/720/big_buck_bunny_720p_1mb.mp4",
"mime_type": "video/mp4",
},
{
"name": "Video 2",
"url": "https://sample-videos.com/video123/mp4/720/big_buck_bunny_720p_2mb.mp4",
"mime_type": "video/mp4",
},
{
"name": "Video 3",
"url": "https://sample-videos.com/video123/mp4/720/big_buck_bunny_720p_5mb.mp4",
"mime_type": "video/mp4",
},
],
}
| Kwekuasiedu315/PROJECTS | askademy/aska/web/dummy.py | dummy.py | py | 9,177 | python | en | code | 0 | github-code | 36 |
5543905270 | def fixLayout(str):
en = "qwertyuiop[]asdfghjkl;'zxcvbnm,./"
ru = "йцукенгшщзхъфывапролджэячсмитьбю."
res = ""
for i in range(len(str)):
pos = en.find(str[i])
if pos != -1:
res += ru[pos]
else:
pos = ru.find(str[i])
if pos != -1:
res += en[pos]
else:
res += str[i]
return res
print(fixLayout('d%sk fh jkjао*вр')) | Grigorij-Kuzmin/Python | Раскладка клавиатуры.py | Раскладка клавиатуры.py | py | 473 | python | en | code | 0 | github-code | 36 |
33807342463 | from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QVBoxLayout, QHBoxLayout
# 0 = empty
# 1 = X
# 2 = O
board = [0,0,0,0,0,0,0,0,0]
app = QApplication([])
window = QWidget()
layout1 = QHBoxLayout()
layout2 = QHBoxLayout()
layout3 = QHBoxLayout()
layoutMain = QVBoxLayout()
buttons = [QPushButton(' '), QPushButton(' '), QPushButton(' '), QPushButton(' '), QPushButton(' '), QPushButton(' '),QPushButton(' '), QPushButton(' '), QPushButton(' ')]
layout1.addWidget(buttons[0])
layout1.addWidget(buttons[1])
layout1.addWidget(buttons[2])
layout2.addWidget(buttons[3])
layout2.addWidget(buttons[4])
layout2.addWidget(buttons[5])
layout3.addWidget(buttons[6])
layout3.addWidget(buttons[7])
layout3.addWidget(buttons[8])
layoutMain.addLayout(layout1)
layoutMain.addLayout(layout2)
layoutMain.addLayout(layout3)
window.setLayout(layoutMain)
bestMoveCache = {}
# Event handlers
def button0_click():
global board
makeMove(0, board)
def button1_click():
global board
makeMove(1, board)
def button2_click():
global board
makeMove(2, board)
def button3_click():
global board
makeMove(3, board)
def button4_click():
global board
makeMove(4, board)
def button5_click():
global board
makeMove(5, board)
def button6_click():
global board
makeMove(6, board)
def button7_click():
global board
makeMove(7, board)
def button8_click():
global board
makeMove(8, board)
# Main algorithm
def minimax(p, b, depth=0):
# check if terminal state
# player 2 = AI = maximizing player
#print(depth,b)
check1 = boardWin(b, 1)
check2 = boardWin(b, 2)
if check1 == True:
return (-10, -1, depth)
elif check2 == True:
return (10, -1, depth)
elif 0 not in b:
return (0, -1, depth)
# find empty spots
spots = []
for i in range(len(b)):
if b[i] == 0:
spots.append(i)
bestmove = -1
bestscore = 0
bestdepth = 0
# init scores
if (p == 2):
bestscore = -10000
else:
bestscore = 10000
# for each spot get score
for i in spots:
board = b
board[i] = p
if (p == 2): # maximize
score, move, d = minimax(1, board, depth+1)
if score > bestscore:
bestscore = score
bestmove = i
else: # minimize
score, move, d = minimax(2, board, depth+1)
if score < bestscore:
bestscore = score
bestmove = i
board[i] = 0
return (bestscore, bestmove, bestdepth)
# Check if player p has a winning condition
def boardWin(b, p):
if b[0] == p and b[1] == p and b[2] == p:
return True
if b[3] == p and b[4] == p and b[5] == p:
return True
if b[6] == p and b[7] == p and b[8] == p:
return True
if b[0] == p and b[3] == p and b[6] == p:
return True
if b[1] == p and b[4] == p and b[7] == p:
return True
if b[2] == p and b[5] == p and b[8] == p:
return True
if b[0] == p and b[4] == p and b[8] == p:
return True
if b[6] == p and b[4] == p and b[2] == p:
return True
return False
# Check if the board is in a valid state
def boardValid(b):
global bestMoveCache
countX = 0
countO = 0
for i in b:
if i == 1:
countX += 1
elif i == 2:
countO += 1
if (countX != countO) and (countX != countO + 1):
return False
if boardWin(b, 2):
if boardWin(b,1):
return False
return countX == countO # for O win, counts need to be equal
if boardWin(b, 1):
if countX != countO + 1: # for X win, counts need to be unequal
return False
# no winner, but valid board
print(b)
if boardWin(b, 1) or boardWin(b, 2):
return False
if 0 not in b:
# board is not counted because we cannot make more moves
return False
# Calculate best moves
if countX == countO + 1:
bestmove = minimax(2, b)
print("2:", tuple(b), bestmove[1])
bestMoveCache[tuple(b)] = bestmove[1]
else:
bestmove = minimax(1,b)
print("2:", tuple(b), bestmove[1])
bestMoveCache[tuple(b)] = bestmove[1]
return True
# Generate all valid possible game states
def generateValidMoves(size):
validboards = 0
for i in range(3**size):
# convert to base 3
b = []
cur = i
for j in range(size):
b.insert(0, cur % 3)
cur = cur // 3
if boardValid(b):
validboards += 1
print(str(validboards) + " valid boards!")
# Make a move on the board, first human then AI
def makeMove(pos, board):
#global bestMoveCache
if boardWin(board,1) or boardWin(board,2) or (0 not in board):
return
if board[pos] != 0:
return
else:
# play human move
board[pos] = 1
buttons[pos].setText("X")
res = boardWin(board,1)
if res == True:
print("Player 1 wins!")
return
elif (0 not in board):
print("Tie!")
return
print(board)
# play AI move
#print(tuple(board))
#print(bestMoveCache)
if tuple(board) not in bestMoveCache:
print("AI FAIL")
return
else:
aipos = bestMoveCache[tuple(board)]
#aiscore, aipos, aidepth = minimax(2, board)
print("AI move " + str(aipos))
board[aipos] = 2
buttons[aipos].setText("O")
res = boardWin(board,2)
if res == True:
print("Player 2 wins!")
return
elif (0 not in board):
print("Tie!")
return
print(board)
buttons[0].clicked.connect(button0_click)
buttons[1].clicked.connect(button1_click)
buttons[2].clicked.connect(button2_click)
buttons[3].clicked.connect(button3_click)
buttons[4].clicked.connect(button4_click)
buttons[5].clicked.connect(button5_click)
buttons[6].clicked.connect(button6_click)
buttons[7].clicked.connect(button7_click)
buttons[8].clicked.connect(button8_click)
print("Go!")
generateValidMoves(9)
print(minimax(2, [1, 2, 1, 2, 2, 1, 0, 0, 0]))
window.show()
app.exec_()
buffer = ""
counter = 0
for i in bestMoveCache:
x = list(i)
p1_bin = []
p2_bin = []
#for j in range(9):
# if x[j] == 1:
# p1_bin.insert(0,1)
# p2_bin.insert(0,0)
# elif x[j] == 2:
# p1_bin.insert(0,0)
# p2_bin.insert(0,1)
# else:
# p1_bin.insert(0,0)
# p2_bin.insert(0,0)
for j in range(9):
if x[j] == 1:
p1_bin.append(1)
p2_bin.append(0)
elif x[j] == 2:
p1_bin.append(0)
p2_bin.append(1)
else:
p1_bin.append(0)
p2_bin.append(0)
#print("record",str(i),"board1",''.join(str(e) for e in p1_bin),"board2",''.join(str(e) for e in p2_bin),"best move",bestMoveCache[i])
print("sync_reset;\ncheck_mem("+str(counter)+",\""+''.join(str(e) for e in p1_bin)+"\",\""+''.join(str(e) for e in p2_bin)+"\","+str(bestMoveCache[i])+",\'1\'); -- " + str(i))
y = "00" + ''.join(str(e) for e in p1_bin) + ''.join(str(e) for e in p2_bin) + '{0:04b}'.format(bestMoveCache[i])
#print(y, '{0:08x}'.format(int(y, 2)))
buffer = y + buffer
counter += 1
offset = len(buffer)
f = open("tictactoe.txt", "w")
done = False
for i in range(32):
f.write("ram512x8_inst_" + str(i) + " : SB_RAM512X8\n")
f.write("generic map (\n")
if done:
break
for j in range(16):
if offset <= 0:
done = True
break
cur = ""
subtract = min(offset, 256)
offset -= subtract
cur += '{0:064X}'.format(int(buffer[offset:offset+subtract], 2))
print(cur)
f.write("INIT_" + '{0:01X}'.format(j) + " => X\"" + cur + "\"")
if j == 15:
f.write("\n")
else:
f.write(",\n")
f.write(")\n")
f.write("port map (\nRDATA => RDATA_a("+str(i)+"),\nRADDR => RADDR_c,\nRCLK => RCLK_c,\nRCLKE => RCLKE_c("+str(i)+"),\nRE => RE_c("+str(i)+"),\nWADDR => (others => \'0\'),\nWCLK=> \'0\',\nWCLKE => \'0\',\nWDATA => (others => \'0\'),\nWE => \'0\'\n);\n")
f.close()
| j-tetteroo/tictactoe-fpga | python/tictactoe.py | tictactoe.py | py | 7,748 | python | en | code | 0 | github-code | 36 |
29426080346 | import requests
from bs4 import BeautifulSoup
import csv
url = "https://www.gov.uk/search/news-and-communications"
page = requests.get(url)
soup = BeautifulSoup(page.content, "html.parser")
titres_bs = soup.find_all('a')
titres = []
for titre in titres_bs:
titres.append(titre.string)
print(titres)
en_tete = ["lien"]
with open('data.csv', 'w') as fichier_csv:
write = csv.writer(fichier_csv, delimiter=',')
write.writerow(en_tete)
for liens in titres:
write.writerow([liens]) | Lemak243/python_ | ecrire.py | ecrire.py | py | 512 | python | fr | code | 0 | github-code | 36 |
42242684970 | import math
import numpy as np
import matplotlib.pyplot as plt
gap_list = [5.0e-6, 7.5e-6, 10e-6]
lam_list = np.logspace(-1.0,2.0,20)*1e-6
print(lam_list)
sens_vals_num = np.zeros((len(lam_list),len(gap_list)))
for i in range(len(gap_list)):
for j in range(4,len(lam_list)):
gap = gap_list[i]
lam = lam_list[j]
fname = 'data/lam_arr_cyl_%.3f_%.3f.npy' % (gap*1e6,lam*1e6)
cval = np.load(fname)
sigf = 1.9e-16
sens_vals_num[j,i] = sigf/cval[0]
f0 = 1e3
m = 1e-13
xvals = np.linspace(-2e-6,2e-6,1e3)
harm_pot = 0.5*m*(2*math.pi*f0)**2 * xvals**2
## now assume point mass at distance d from origin
d = 10e-6
Ma = 10e-13
alpha = 1.0e16
lam = 10e-6
G = 6.67e-11
grav_pot = alpha*G*m*Ma * (2.0*np.exp(-(d/lam))/d - np.exp(-np.abs(d-xvals)/lam)/np.abs(d-xvals) - np.exp(-np.abs(-d-xvals)/lam)/np.abs(-d-xvals))
grav_pot_approx = -2*alpha*G*m*Ma/d**3*np.exp(-d/lam)*(1 + d/lam + 0.5*(d/lam)**2)*xvals**2
print((1 + d/lam + 0.5*(d/lam)**2))
## now fit to a quadratic term near the minimum
fit_win = [400,600]
p1 = np.polyfit(xvals[fit_win[0]:fit_win[1]], harm_pot[fit_win[0]:fit_win[1]],2)
print(p1)
tot_pot = harm_pot + grav_pot
p2 = np.polyfit(xvals[fit_win[0]:fit_win[1]], tot_pot[fit_win[0]:fit_win[1]],2)
print(p2)
fig = plt.figure(33)
plt.plot(xvals,harm_pot)
plt.plot(xvals,harm_pot + grav_pot,'r')
xx = xvals[fit_win[0]:fit_win[1]]
plt.plot(xx,np.polyval(p1,xx),'c')
plt.plot(xx,np.polyval(p2,xx),'m')
fig2 = plt.figure(34)
plt.plot(xvals,grav_pot)
plt.plot(xvals,grav_pot_approx,'r')
plt.show()
| charlesblakemore/opt_lev_analysis | casimir/force_calc/plot_point_pot.py | plot_point_pot.py | py | 1,570 | python | en | code | 1 | github-code | 36 |
75127834344 | import sys
from cravat import BaseAnnotator
from cravat import InvalidData
import sqlite3
import os
class CravatAnnotator(BaseAnnotator):
def annotate(self, input_data):
chrom = input_data['chrom']
pos = input_data['pos']
ref = input_data['ref_base']
alt = input_data['alt_base']
q = 'select CEU, FIN, GBR, IBS, TSI from '\
+'%s where pos=%s and ref="%s" and alt="%s";' \
%(chrom, pos, ref, alt)
self.cursor.execute(q)
result = self.cursor.fetchone()
if result:
return {
'ceu_af': result[0],
'fin_af': result[1],
'gbr_af': result[2],
'ibs_af': result[3],
'tsi_af': result[4],
}
def cleanup(self):
pass
if __name__ == '__main__':
annotator = CravatAnnotator(sys.argv)
annotator.run() | KarchinLab/open-cravat-modules-karchinlab | annotators/thousandgenomes_european/thousandgenomes_european.py | thousandgenomes_european.py | py | 907 | python | en | code | 1 | github-code | 36 |
14451113965 | # -*- coding: utf-8 -*-
"""
Created on Tue Sep 19 15:27:09 2017
@author: Administrator
"""
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import SGD
model = Sequential() #模型 初始化
model.add( Dense( 20, 64)) #添加 输入 层( 20 节点)、 第一 隐藏 层( 64 节点) 的 连接
model.add( Activation(' tanh')) #第一 隐藏 层 用 tanh 作为 激活 函数
model.add( Dropout( 0.5)) #使用 Dropout 防止 过 拟 合
model.add( Dense( 64, 64)) #添加 第一 隐藏 层( 64 节点)、 第二 隐藏 层( 64 节点) 的 连接
model.add( Activation(' tanh')) #第二 隐藏 层 用 tanh 作为 激活 函数
model.add( Dropout( 0.5)) #使用 Dropout 防止 过 拟 合
model.add( Dense( 64, 1)) #添加 第二 隐藏 层( 64 节点)、 输出 层( 1 节点) 的 连接
model.add( Activation(' sigmoid')) #输出 层 用 sigmoid 作为 激活 函数
sgd = SGD( lr= 0.1, decay= 1e-6, momentum= 0.9, nesterov= True) #定义 求解 算法
model.compile( loss=' mean_ squared_ error', optimizer= sgd) #编译 生成 模型, 损失 函数 为 平均 误差 平方 和
model.fit( X_train, y_train, nb_epoch= 20, batch_size= 16) #训练 模型
score = model.evaluate( X_test, y_test, batch_size= 16) #测试 模型
| golfbears/gameofclassname | keras_sample.py | keras_sample.py | py | 1,334 | python | zh | code | 0 | github-code | 36 |
12958605056 | import sys
from requests import get
from core.colors import bad, info, red, green, end
def honeypot(inp):
honey = 'https://api.shodan.io/labs/honeyscore/%s?key=C23OXE0bVMrul2YeqcL7zxb6jZ4pj2by' % inp
try:
result = get(honey).text
except:
result = None
sys.stdout.write('%s No information available' % bad + '\n')
if result:
if float(result) < 0.5:
color = green
else:
color = red
probability = str(float(result) * 10)
sys.stdout.write('%s Honeypot Probabilty: %s%s%%%s' %
(info, color, probability, end) + '\n')
| s0md3v/ReconDog | plugins/honeypot.py | honeypot.py | py | 635 | python | en | code | 1,623 | github-code | 36 |
41133436478 | from PyQt5.QtCore import Qt
from PyQt5.QtGui import QPalette
from PyQt5.QtWidgets import *
app = QApplication([])
app.setStyle('Fusion')
window = QWidget()
palette = QPalette()
palette.setColor(QPalette.ButtonText, Qt.blue)
app.setStyleSheet("QPushButton { margin: 10ex; background-color: #4747D2 }")
app.setPalette(palette)
layout = QVBoxLayout()
layout.addWidget(QPushButton('Top'))
layout.addWidget(QPushButton('Bottom'))
window.setLayout(layout)
window.show()
app.exec_() | imdiode/PythonExper | home5.py | home5.py | py | 476 | python | en | code | 0 | github-code | 36 |
21050437162 | import unittest
import json
from app import create_app, bad_request, forbidden, not_found, unauthorized, internal_error
class APITestCase(unittest.TestCase):
def setUp(self):
self.app = create_app('testing')
self.app_context = self.app.app_context()
self.app_context.push()
self.client = self.app.test_client()
def tearDown(self):
self.app_context.pop()
def test_health_check(self):
response = self.client.get(
'/health',
headers={})
self.assertEqual(response.status_code, 200)
json_response = json.loads(response.get_data(as_text=True))
self.assertTrue('OK' == json_response.get('status'))
self.assertTrue({} == json_response.get('data'))
def test_app_info(self):
response = self.client.get(
'/',
headers={})
self.assertEqual(response.status_code, 200)
json_response = json.loads(response.get_data(as_text=True))
self.assertTrue('OK' == json_response.get('status'))
self.assertEqual('test', json_response.get('version'))
def test_app_info(self):
response = self.client.get(
'/not_found',
headers={})
self.assertEqual(response.status_code, 404)
json_response = json.loads(response.get_data(as_text=True))
self.assertEqual(404, json_response.get('error'))
self.assertEqual('not found', json_response.get('message'))
def test_bad_request(self):
msg = 'test_bad_request'
code = 1234
res = bad_request(msg, code)
self.assertEqual(msg, res.json['message'])
self.assertEqual(code, res.json['error'])
self.assertEqual(400, res.status_code)
def test_forbidden(self):
msg = 'test_forbidden'
code = 12345
res = forbidden(msg, code)
self.assertEqual(msg, res.json['message'])
self.assertEqual(code, res.json['error'])
self.assertEqual(403, res.status_code)
def test_not_found(self):
msg = 'test_not_found'
code = 'âsav'
res = not_found(msg, code)
self.assertEqual(msg, res.json['message'])
self.assertEqual(code, res.json['error'])
self.assertEqual(404, res.status_code)
def test_unauthorized(self):
msg = 'test_unauthorized'
code = 12345
res = unauthorized(msg, code)
self.assertEqual(msg, res.json['message'])
self.assertEqual(code, res.json['error'])
self.assertEqual(401, res.status_code)
def test_internal_error(self):
msg = 'test_internal_error'
code = 12345
res = internal_error(msg, code)
self.assertEqual(msg, res.json['message'])
self.assertEqual(code, res.json['error'])
self.assertEqual(500, res.status_code)
| hungvm90/flask_tdd | tests/test_health_check_api.py | test_health_check_api.py | py | 2,834 | python | en | code | 0 | github-code | 36 |
8724066404 | """
Example of pi-IW: guided Rollout-IW, interleaving planning and learning.
"""
import numpy as np
import tensorflow as tf
from planning_step import gridenvs_BASIC_features, features_to_atoms
from online_planning import softmax_Q_tree_policy
# Function that will be executed at each interaction with the environment
def observe_pi_iw_dynamic(env, node):
x = tf.constant(np.expand_dims(node.data["obs"], axis=0).astype(np.float32))
logits, features = model(x, output_features=True)
node.data["probs"] = tf.nn.softmax(logits).numpy().ravel()
node.data["features"] = features_to_atoms(features.numpy().ravel().astype(np.bool)) # discretization -> bool
def observe_pi_iw_BASIC(env, node):
x = tf.constant(np.expand_dims(node.data["obs"], axis=0).astype(np.float32))
logits = model(x)
node.data["probs"] = tf.nn.softmax(logits).numpy().ravel()
gridenvs_BASIC_features(env, node) # compute BASIC features
def planning_step(actor, planner, dataset, policy_fn, tree_budget, cache_subtree, discount_factor):
nodes_before_planning = len(actor.tree)
budget_fn = lambda: len(actor.tree) - nodes_before_planning == tree_budget
planner.plan(tree=actor.tree,
successor_fn=actor.generate_successor,
stop_condition_fn=budget_fn,
policy_fn=policy_fn)
tree_policy = softmax_Q_tree_policy(actor.tree, actor.tree.branching_factor, discount_factor, temp=0)
a = sample_pmf(tree_policy)
prev_root_data, current_root_data = actor.step(a, cache_subtree=cache_subtree)
dataset.append({"observations": prev_root_data["obs"],
"target_policy": tree_policy})
return current_root_data["r"], current_root_data["done"]
if __name__ == "__main__":
import gym
from rollout_iw import RolloutIW
from tree import TreeActor
from supervised_policy import SupervisedPolicy, Mnih2013
from utils import sample_pmf
from experience_replay import ExperienceReplay
import gridenvs.examples # load simple envs
# HYPERPARAMETERS
seed = 0
env_id = "GE_PathKeyDoor-v0"
use_dynamic_feats = False # otherwise BASIC features will be used
n_episodes = 5
tree_budget = 20
discount_factor = 0.99
cache_subtree = True
batch_size = 32
learning_rate = 0.0007
replay_capacity = 1000
regularization_factor = 0.001
clip_grad_norm = 40
rmsprop_decay = 0.99
rmsprop_epsilon = 0.1
# Set random seed
np.random.seed(seed)
tf.random.set_seed(seed)
# Instead of env.step() and env.reset(), we'll use TreeActor helper class, which creates a tree and adds nodes to it
env = gym.make(env_id)
observe_fn = observe_pi_iw_dynamic if use_dynamic_feats else observe_pi_iw_BASIC
actor = TreeActor(env, observe_fn=observe_fn)
planner = RolloutIW(branching_factor=env.action_space.n, ignore_cached_nodes=True)
model = Mnih2013(num_logits=env.action_space.n, add_value=False)
optimizer = tf.keras.optimizers.RMSprop(learning_rate=learning_rate,
rho=rmsprop_decay,
epsilon=rmsprop_epsilon)
learner = SupervisedPolicy(model, optimizer, regularization_factor=regularization_factor, use_graph=True)
experience_replay = ExperienceReplay(capacity=replay_capacity)
def network_policy(node, branching_factor):
return node.data["probs"]
# Initialize experience replay: run some steps until we have enough examples to form one batch
print("Initializing experience replay", flush=True)
actor.reset()
while len(experience_replay) < batch_size:
r, episode_done = planning_step(actor=actor,
planner=planner,
dataset=experience_replay,
policy_fn=network_policy,
tree_budget=tree_budget,
cache_subtree=cache_subtree,
discount_factor=discount_factor)
if episode_done: actor.reset()
# Interleave planning and learning steps
print("\nInterleaving planning and learning steps.", flush=True)
actor.reset()
steps_cnt = 0
episode_steps = 0
episodes_cnt = 0
while episodes_cnt < n_episodes:
r, episode_done = planning_step(actor=actor,
planner=planner,
dataset=experience_replay,
policy_fn=network_policy,
tree_budget=tree_budget,
cache_subtree=cache_subtree,
discount_factor=discount_factor)
# Learning step
batch = experience_replay.sample(batch_size)
loss, _ = learner.train_step(tf.constant(batch["observations"], dtype=tf.float32),
tf.constant(batch["target_policy"], dtype=tf.float32))
steps_cnt += 1
episode_steps +=1
print("\n".join([" ".join(row) for row in env.unwrapped.get_char_matrix(actor.tree.root.data["s"])]),
"Reward: ", r, "Simulator steps:", actor.nodes_generated,
"Planning steps:", steps_cnt, "Loss:", loss.numpy(), "\n")
if episode_done:
print("Problem solved in %i steps (min 13 steps)."%episode_steps)
actor.reset()
episodes_cnt += 1
episode_steps = 0
if episodes_cnt < n_episodes: print("\n------- New episode -------") | aig-upf/pi-IW | online_planning_learning.py | online_planning_learning.py | py | 5,661 | python | en | code | 3 | github-code | 36 |
9580739784 | #!/usr/bin/python3
'''
File Storage
'''
import os
import json
import models
from models.base_model import BaseModel
from models.user import User
from models.state import State
from models.city import City
from models.amenity import Amenity
from models.place import Place
from models.review import Review
classes = {
'BaseModel': BaseModel,
'User': User,
'Place': Place,
'State': State,
'City': City,
'Amenity': Amenity,
'Review': Review
}
class FileStorage:
'''
Serializes instances to a JSON file
and deserializes JSON file to instances
'''
__file_path = 'file.json'
__objects = {}
def all(self):
''' Returns the dictionary __objects '''
return self.__objects
def new(self, obj):
''' sets in __objects the obj with key <obj class name>.id '''
key = '{}.{}'.format(obj.__class__.__name__, obj.id)
value = obj
dic = {key: value}
self.__objects.update(dic)
def save(self):
''' serializes __objects to the JSON file (path: __file_path) '''
with open(self.__file_path, 'w') as f:
objs = {}
for key, value in self.__objects.items():
objs[key] = value.to_dict()
json.dump(objs, f)
def reload(self):
'''
deserializes the JSON file to __objects
(only if the JSON file (__file_path) exists
'''
try:
if os.path.exists(self.__file_path):
with open(self.__file_path) as f:
objs = json.load(f)
for key, value in objs.items():
obj_class = key.split('.')[0]
self.__objects[key] = classes[obj_class](**value)
except Exception:
pass
| Davidbukz4/AirBnB_clone | models/engine/file_storage.py | file_storage.py | py | 1,815 | python | en | code | 0 | github-code | 36 |
20149744558 | # from django.http import HttpResponseServerError
from rest_framework.viewsets import ViewSet
from rest_framework.response import Response
from rest_framework import serializers, status
from holdmycomicsapi.models import User
class UserView(ViewSet):
"""HMC Users View"""
def create(self, request):
"""CREATE User"""
user = User.objects.create(
user_name=request.data["userName"],
store_name=request.data["storeName"],
email=request.data["email"],
uid=request.data["uid"],
)
serializer = UserSerializer(user)
return Response(serializer.data, status=status.HTTP_201_CREATED)
def retrieve(self, request, pk):
"""GET Single User"""
try:
user = User.objects.get(pk=pk)
serializer = UserSerializer(user)
return Response(serializer.data)
except User.DoesNotExist as ex:
return Response({'message': ex.args[0]}, status=status.HTTP_404_NOT_FOUND)
def destroy(self, request, pk):
"""DELETE User"""
user = User.objects.get(pk=pk)
user.delete()
return Response('User Deleted', status=status.HTTP_204_NO_CONTENT)
class UserSerializer(serializers.ModelSerializer):
"""JSON Serializer for Users"""
class Meta:
model = User
fields = ('id', 'user_name', 'store_name', 'email', 'uid')
| SeaForeEx/HoldMyComics-Server | holdmycomicsapi/views/user.py | user.py | py | 1,451 | python | en | code | 1 | github-code | 36 |
13820585974 | '''
You are given the following information, but you may prefer to do some research
for yourself.
- 1 Jan 1900 was a Monday.
- Thirty days has September,
April, June and November.
All the rest have thirty-one,
Saving February alone,
Which has twenty-eight, rain or shine.
And on leap years, twenty-nine.
- A leap year occurs on any year evenly divisible by 4, but not on a century
unless it is divisible by 400.
How many Sundays fell on the first of the month during the twentieth century
(1 Jan 1901 to 31 Dec 2000)?
'''
import numpy as np
def days_in_year(year):
# using the information given as an exercise
days = np.array([31] * 12)
days[[9, 4, 6, 11]] = 30
if year % 4 == 0 and (year % 100 != 0 or year % 400 == 0):
days[2] = 29
else:
days[2] = 28
return sum(days)
def counting_sundays(jan_year, dec_year):
start = 1
for y in range(1900, jan_year):
start += days_in_year(y) % 7
num_days = 0
for y in range(jan_year, dec_year + 1):
num_days += days_in_year(y)
weeks = (num_days - (7 - start)) // 7
return weeks
counting_sundays(1901, 2000) | jydiw/assorted-algorithms | project-euler/python/19_counting-sundays.py | 19_counting-sundays.py | py | 1,172 | python | en | code | 0 | github-code | 36 |
41701093659 | # Returns maximal decimal precision of a list of coins
def get_decimal_precision(list_coins):
precision = 0
for c in list_coins:
if not '.' in str(c):
continue
p_coin = len(str(c).split('.')[1])
if p_coin > precision:
precision = p_coin
return precision
# Returns list of possibles list of coins to change nb_units
def number_changing_possibilities(nb_units, list_coins, memory=None, precision=None):
# Intialization
if memory is None:
memory = {}
if precision is None:
precision = get_decimal_precision(list_coins)
if nb_units == 0:
return []
memory[nb_units] = []
# Exploration of each possibility
for c in list_coins:
if nb_units < c:
continue
new_nb_units = round(nb_units - c, precision)
# Change possibilities for nb_units-c
change_results = number_changing_possibilities(new_nb_units, list_coins=list_coins, memory=memory, precision=precision)
# If nb_units is in list_coins we add the change as the value in list_coins
change_list = []
if change_results == []:
change_list = [[c]]
# We add every possibility to change nb_units
for change_result in change_results:
change_list.append(sorted([c] + change_result))
# We add the change to memory if it is not already in it
for change in change_list:
if not change in memory[nb_units]:
memory[nb_units].append(change)
return memory[nb_units]
list_coins = [0.1, 0.5, 1, 2, 5]
print(len(number_changing_possibilities(nb_units=5, list_coins=list_coins))) | MatthMig/CoinChangeProblem | main.py | main.py | py | 1,683 | python | en | code | 0 | github-code | 36 |
71017062823 | """ som exception handling """
""" my own exception raise"""
#raise Exception("hell wrong one")
""" handle exception divide by 0"""
try:
a=int(input("Enter the first number :"))
b=int(input("Enter second numbe :"))
print(a/b)
except ZeroDivisionError:
print("Idiot, you have try to divide with zero")
else:
print("well done")
| bg0csj/Pythonbeginners | exception.py | exception.py | py | 347 | python | en | code | 0 | github-code | 36 |
28240417211 | import requests
import argparse
import json
import pandas as pd
import streamlit as st
APP_URL = "http://127.0.0.1:8000/predict"
# Adding arguments to customize CLI
argparser = argparse.ArgumentParser(description='Process hyper-parameters')
argparser.add_argument('--movie_title', type=str, default='', help='movie title')
argparser.add_argument('--scraping_limit', type=int, default=10, help='scraping limit')
argparser.add_argument('--reviewer', type=str, default='user', help='reviwer type')
argparser.add_argument('--char_limit', type=int, default=30000, help='char limit summary input')
argparser.add_argument('--max_length', type=int, default=100, help='char limit summary output')
args = argparser.parse_args()
print('\n ---------------------')
print('Scraping Details: ')
print(f'Movie title: {args.movie_title}')
print(f'Number of total reviews attempted to scrape: {args.scraping_limit}')
print(f'Reviews from: {args.reviewer}')
print(f'Character limit for summary text: {args.char_limit}')
payload = {
'movie_title': args.movie_title,
'scraping_limit': args.scraping_limit,
'reviewer': args.reviewer,
'char_limit': args.char_limit,
'max_length':args.max_length
}
response=requests.post(APP_URL, json=payload)
decoded_output=response.content.decode('UTF-8')
output=json.loads(decoded_output)
print(output) | lethologicoding/text_summarization | app/server.py | server.py | py | 1,345 | python | en | code | 0 | github-code | 36 |
25380312208 | #!usr/bin/env python
# coding:utf-8
__author__ = 'sunyaxiong'
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import os
sys.path.append('E:/GitWorkspace/enndc_management/enndc_management')
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
import django
django.setup()
from django.db.models import F, Count
from vmserver.models import List
from openpyxl import load_workbook
from enndc_management.settings import *
import pprint
import json
import logging
LOG = logging.getLogger('vmserver')
BUG = logging.getLogger('request')
def get_excel_info():
wb = load_workbook(filename=BASE_DIR + '/vmserver/pyvmomi_api/info-lzc.xlsx')
ws = wb['info']
rows = ws.rows
info_list = []
for row in rows:
info = {}
for i in range(len(row)):
info[rows[0][i].value] = row[i].value
info_list.append(info)
del info_list[0]
# print len(info_list)
# json.dump(info_list, open('info.json', 'w'), indent=4, ensure_ascii=False)
return info_list
def compare_update():
info_list = get_excel_info()
successed_flag = 0
failed_flag = 0
for info in info_list:
query = List.objects.filter(list_name=info['list_name'])
if len(query) == 0:
LOG.info('{0} not found'.format(info['list_name']))
failed_flag += 1
else:
query.update(**info)
LOG.info('vm {0} has updated'.format(info['list_name']))
successed_flag += 1
LOG.info('there has {0} vm info update and {1} vm info failed'.format(successed_flag, failed_flag))
if __name__ == '__main__':
get_excel_info()
| willsion/enndc_management | vmserver/pyvmomi_api/appinfo_excel_to_db.py | appinfo_excel_to_db.py | py | 1,630 | python | en | code | 0 | github-code | 36 |
32199462370 | #!/usr/bin/python3
"""hard coding is a hard working"""
import requests
import sys
if __name__ == '__main__':
moi = requests.get(sys.argv[1])
if moi.status_code >= 400:
print('Error code: {}'.format(moi.status_code))
else:
print(moi.text)
| jinDeHao/alx-higher_level_programming | 0x11-python-network_1/7-error_code.py | 7-error_code.py | py | 267 | python | en | code | 0 | github-code | 36 |
34112929148 | import requests
import json
battles_win_history = []
# Request Player History from splinterland API
resp = requests.get('https://api.splinterlands.io/battle/history?player=kingsgambit0615').json()
battles = resp['battles']
temp = []
for battle in battles:
temp.append(battle['mana_cap'])
output = []
for x in temp:
if x not in output:
output.append(x)
output.sort()
print(output)
# print('Loop:' + str(temp))
# temp = temp + 1
# # Loads Battle Details
# fight_details = json.loads(battle['details'])
# # Get player winning deck summoners and monsters
# if fight_details['winner'] == "kingsgambit0615":
# # Get Mana Cap Of the Match
# mana_cap = battle['mana_cap']
# print("mana: " + str(mana_cap))
# # Get Ruleset Of the Match
# ruleset = battle['ruleset']
# print("ruleset: " + ruleset)
# # team_one = (fight_details['team1'])
# # print(team_one['player'])
# try:
# if fight_details['team1']['player'] == "kingsgambit0615":
# summoner = fight_details['team1']['summoner']['card_detail_id']
# print("Summoner: " + str(summoner))
# monsters = fight_details['team1']['monsters']
# for monster in monsters:
# print("monster:" + str(monster['card_detail_id']))
# else:
# summoner = fight_details['team2']['summoner']['card_detail_id']
# print("Summoner: " + str(summoner))
# monsters = fight_details['team2']['monsters']
# for monster in monsters:
# print("monster:" + str(monster['card_detail_id']))
# except:
# pass
| jomarmontuya/splinterlands-bot-python | data.py | data.py | py | 1,816 | python | en | code | 0 | github-code | 36 |
19500865293 | import csv
from ctypes import pointer
import math
from time import sleep
from unittest import result
from matplotlib import pyplot as plt
import matplotlib.animation as animation
from matplotlib.pyplot import MultipleLocator
import numpy as np
def write_csv_list_a(sk_list, path):
with open(path,'a', newline='') as f:
csv_f = csv.writer(f)
csv_f.writerows(sk_list)
def write_csv_list_w(sk_list, path):
with open(path,'w', newline='') as f:
csv_f = csv.writer(f)
csv_f.writerows(sk_list)
def eval_list(sk_list):
new_sk_list = []
for i in sk_list:
new_sk_list.append(eval(i))
return new_sk_list
def read_csv_list(path):
with open(path,'r') as f:
csv_list = []
f_csv = csv.reader(f)
for i in f_csv:
csv_list.append(eval_list(i))
return csv_list
def str_list_2_float(str_list):
ret_list = []
for item in str_list:
ret_list.append(float(item))
return ret_list
def read_csv_17_list(path, epoch):
with open(path,'r') as f:
f_csv = csv.reader(f)
start_index = epoch*17
end_index = (epoch+1)*17-1
result_list = []
count = 0
for item in f_csv:
if count >= start_index and count<=end_index:
result_list.append(str_list_2_float(item))
count+=1
# print(result_list)
return result_list
# _________________________________________________________________________________________________
# _________________________________________________________________________________________________
# print(sk_list[0][0])
# print(type(sk_list[0][0]))
# print(sk_list)
# print(len(sk_list))
# Set the cs_p P17 as the initial centre point, to entablish the whole spherical coordinates system
# Pre-set the distance of each two skeleton segment points set
# 1 shoulder centre point to left shoulder point
d_cs_l_s = 1
# 2 shoulder centre point to right shoulder point
d_cs_r_s = 1
# 3 left shoulder point to left elbow point
d_l_s_eb = 1.1
# 4 left elbow point to left wrist point
d_l_eb_w = 1.5
# 5 right shoulder point to right elbow point
d_r_s_eb = 1.1
# 6 right elbow point to right wrist point
d_r_eb_w = 1.5
# 7 shoulder centre point to nose point
d_cs_n = 1
# 8 nose point to left eye point
d_n_l_e = 0.3
# 9 nose point to rigth eye point
d_n_r_e = 0.3
# 10 left point eye to left ear point
d_l_e_er = 0.35
# 11 rigth eye point to rigth ear point
d_r_e_er = 0.35
# 12 shoulder centre point to hip centre point
d_cs_ch = 3
# 13 hip centre point to left hip point
d_ch_l_h = 0.9
# 14 hip centre point to right hip point
d_ch_r_h = 0.9
# 15 left hip point to left knee point
d_l_h_k = 1.8
# 16 right hip point to right knee point
d_r_h_k = 1.8
# 17 left knee point to left ankle point
d_l_k_a = 1.8
# 18 right knee point to right ankle point
d_r_k_a = 1.8
# COCO_PERSON_KEYPOINT_NAMES = [0'nose', 1'left_eye', 2'right_eye', 3'left_ear',
# 4'right_ear', 5'left_shoulder', 6'right_shoulder', 7'left_elbow',
# 8'right_elbow', 9'left_wrist', 10'right_wrist', 11'left_hip', 12'right_hip',
# 13'left_knee', 14'right_knee', 15'left_ankle', 16'right_ankle']
# ratio_d = [0:d_cs_l_s, 1:d_cs_r_s, 2:d_l_s_eb, 3:d_l_eb_w, 4:d_r_s_eb, 5:d_r_eb_w, 6:d_cs_n, 7:d_n_l_e, 8:d_n_r_e, 9:d_l_e_er,
# 10:d_r_e_er, 11:d_cs_ch, 12:d_ch_l_h, 13:d_ch_r_h, 14:d_l_h_k, 15:d_r_h_k, 16:d_l_k_a, 17:d_r_k_a]
# Segments length set
ratio_d = [d_cs_l_s, d_cs_r_s, d_l_s_eb, d_l_eb_w, d_r_s_eb, d_r_eb_w, d_cs_n, d_n_l_e, d_n_r_e, d_l_e_er,
d_r_e_er, d_cs_ch, d_ch_l_h, d_ch_r_h, d_l_h_k, d_r_h_k, d_l_k_a, d_r_k_a]
# Define the trainning sequence-(math.pi*0.25) < theta
seq_train_set = [[17, 5], [17, 6], [5, 7], [7, 9], [6, 8], [8, 10],
[17, 0], [0, 1], [0, 2], [1, 3], [2, 4], [17, 18], [18, 11], [18, 12],
[11, 13], [12, 14], [13, 15], [14, 16]]
# Segments available zoom ratio set:17
zr = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
# plus or minus for the x value
plus_minus = [1, 1, 1, -1, -1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
pre_plus_minus = [1, 1, 1, -1, -1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
# Segments max available zoom ratio set
# max_zr = [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
# Initail x values set
x_set = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
# Define the learning rate
lr = 0.1
# Define the sk__list of last frame
sk_list_last_d2d = []
# _________________________________________________________________________________________
# Add the addictional points P17 and P18
def add_p1718(sk_list):
# New and add the shoulder centre and hip centre points to sk_list
cs_x2d = (sk_list[5][0] + sk_list[6][0]) / 2
cs_y2d = (sk_list[5][1] + sk_list[6][1]) / 2
ch_x2d = (sk_list[11][0] + sk_list[12][0]) / 2
ch_y2d = (sk_list[11][1] + sk_list[12][1]) / 2
sk_list.append([cs_x2d, cs_y2d]) #P17
sk_list.append([ch_x2d, ch_y2d]) #P18
# print(sk_list)
return sk_list
# ______________________________________________________________________________________________________________________
# Get 2d distance of specific two points in sk_list
def get_points_d2d(sk_list, p_in__1, p_in__2):
return math.sqrt((sk_list[p_in__1][0] - sk_list[p_in__2][0])**2 + (sk_list[p_in__1][1] - sk_list[p_in__2][1])**2)
# ______________________________________________________________________________________________________________________
# Normalizing the 3d data
def normalizing(sk_list):
new_sk_list = []
central_y = sk_list[17][0]
central_z = sk_list[17][1]
# print(central_y)
# print(central_z)
for item in sk_list:
y = item[0] - central_y
z = item[1] - central_z
new_sk_list.append([y, -z])
return new_sk_list
# ______________________________________________________________________________________________________________________
# ______________________________________________________________________________________________________________________
# Transforms the sk_list from 2d to 3d
# d2d^2 = (y1-y2)^2 + (z1-z2)^2
# d3d^2 = (x1-x2)^2 + d2d^2
# x1=0, x2 = -(x1-x2) = -math.sqrt(d3d^2-d2d^2)
def sk_list_to_3d(sk_list):
global zr
global x_set
global ratio_d
global plus_minus
global seq_train_set
global plus_minus
global pre_plus_minus
d2d_17_18 = get_points_d2d(sk_list, 17, 18)
d3d_17_18 = zr[11] * d2d_17_18
# =============================================
update_plus_minus(get_d2d_set(sk_list))
# =============================================
# print('d3d_17_18 = ', d3d_17_18)
# Deal with the plus_minus[]
if get_points_d2d(sk_list, 1, 3) >= get_points_d2d(sk_list, 2, 4):
plus_minus[5] = -1
plus_minus[6] = 1
else:
plus_minus[5] = 1
plus_minus[6] = -1
# plus_minus[11] = 1
# plus_minus[16] = -1
# print(len(x_set))
for i in range(len(seq_train_set)):
global ratio_d
d2d_seg = get_points_d2d(sk_list, seq_train_set[i][0], seq_train_set[i][1])
# print('ratio_d[i] = ', ratio_d[i])
# print('ratio_d[11] = ', ratio_d[11])
d3d_seg = zr[i] * (ratio_d[i]/ratio_d[11]) * d3d_17_18
x_f = x_set[seq_train_set[i][0]]
x_b = -(math.sqrt(abs(d3d_seg**2 - d2d_seg**2)) - x_f)
zoom = 0.25
x_set[seq_train_set[i][1]] = -zoom*plus_minus[seq_train_set[i][1]]*x_b
plus_minus = pre_plus_minus
temp_list = sk_list.copy()
for i in range(len(sk_list)):
sk_list[i] = [x_set[i]] + temp_list[i]
# Judge if change the bnding action of the left hip_kneee_ankle
# x-y-z
# y-z-x
d1 = sk_list[11][0]-sk_list[13][0]
d2 = sk_list[13][0]-sk_list[15][0]
if d1 != 0 and d2 != 0:
k_l_h_k = abs((sk_list[11][1]-sk_list[13][1]) / d1)
k_l_k_a = abs((sk_list[13][1]-sk_list[15][1]) / d2)
# print(k_l_h_k)
# print(k_l_k_a)
if k_l_h_k > k_l_k_a:
sk_list[15][0] = -(abs(sk_list[15][0]))
# print('sk_list[15][0] = ', sk_list[15][0], '\n')
# Judge if change the bnding action of the right hip_kneee_ankle
d3 = sk_list[12][0]-sk_list[14][0]
d4 = sk_list[14][0]-sk_list[16][0]
if d3 != 0 and d4 != 0:
k_r_h_k = abs((sk_list[12][1]-sk_list[14][1]) / d3)
k_r_k_a = abs((sk_list[14][1]-sk_list[16][1]) / d4)
# print(k_r_h_k)
# print(k_r_k_a)
if k_r_h_k > k_r_k_a:
sk_list[16][0] = -(abs(sk_list[16][0]))
# Judge if change the font-back locations of the shoulder ponits
if sk_list[5][0] >= 0:
# sk_list[11][0] = -(abs(sk_list[11][0]))
sk_list[11][0] = abs(sk_list[11][0])
sk_list[6][0] = -(abs(sk_list[6][0]))
else:
sk_list[11][0] = -(abs(sk_list[11][0]))
sk_list[6][0] = abs(sk_list[6][0])
if sk_list[11][0] >= 0:
sk_list[12][0] = -(abs(sk_list[12][0]))
else:
sk_list[12][0] = abs(sk_list[12][0])
if sk_list[7][0] >= sk_list[9][0]:
sk_list[7][0] = -(abs(sk_list[7][0]))
if sk_list[8][0] >= sk_list[10][0]:
sk_list[8][0] = -(abs(sk_list[8][0]))
# print('sk_list_to_3d: sk_list = ', sk_list)
return sk_list
# ______________________________________________________________________________________________________________________
# Get draw set
def get_draw_set(points_list, sk_list_3d):
p_xs = []
p_ys = []
p_zs = []
for i in points_list:
# p_xs.append(1)
p_xs.append(sk_list_3d[i][0])
p_ys.append(sk_list_3d[i][1])
p_zs.append(sk_list_3d[i][2])
return [p_xs, p_ys, p_zs]
# ______________________________________________________________________________________________________________________
# Get d2d set of each segments
def get_d2d_set(sk_list):
global seq_train_set
sk_list_new_d2d = []
for i in range(18):
sk_list_new_d2d.append(get_points_d2d(sk_list, seq_train_set[i][0], seq_train_set[i][1]))
return sk_list_new_d2d
# ______________________________________________________________________________________________________________________
# Update the plus_minus
def update_plus_minus(sk_list_new_d2d):
global x_set
global plus_minus
global sk_list_last_d2d
for i in range(19):
if x_set[i] > 0:
if sk_list_new_d2d[i] <= sk_list_last_d2d[i]:
plus_minus[i] = 1
if x_set[i] < 0:
if sk_list_new_d2d[i] <= sk_list_last_d2d[i]:
plus_minus[i] = -1
sk_list_last_d2d = sk_list_new_d2d
# ______________________________________________________________________________________________________________________
# Define the ax 3d drawing constraint
def ax3d_constraint(ax, sk_list_3d):
left_line_color = 'r'
central_line_color = 'gold'
right_line_color = 'lime'
msize = 8
# ________________________________________________
left_n_e_er = [0, 1, 3]
ax.plot3D(xs=get_draw_set(left_n_e_er, sk_list_3d)[0],
ys=get_draw_set(left_n_e_er, sk_list_3d)[1],
zs=get_draw_set(left_n_e_er, sk_list_3d)[2],
zdir='z',
c=left_line_color, # line color
marker='o', # mark style
mfc='cyan', # marker facecolor
mec='g', # marker edgecolor
ms=msize, # marker size
linewidth=3.0 # linewidth
)
# ________________________________________________
right_n_e_er = [0, 0, 4]
ax.plot3D(xs=get_draw_set(right_n_e_er, sk_list_3d)[0],
ys=get_draw_set(right_n_e_er, sk_list_3d)[1],
zs=get_draw_set(right_n_e_er, sk_list_3d)[2],
zdir='z',
c=right_line_color, # line color
marker='o', # mark style
mfc='cyan', # marker facecolor
mec='g', # marker edgecolor
ms=msize, # marker size
linewidth=3.0 # linewidth
)
# ________________________________________________
n_cs_ch = [0, 17, 18]
ax.plot3D(xs=get_draw_set(n_cs_ch, sk_list_3d)[0],
ys=get_draw_set(n_cs_ch, sk_list_3d)[1],
zs=get_draw_set(n_cs_ch, sk_list_3d)[2],
zdir='z',
c=central_line_color, # line color
marker='o', # mark style
mfc='cyan', # marker facecolor
mec='g', # marker edgecolor
ms=msize, # marker size
linewidth=3.0 # linewidth
)
# ________________________________________________
l_cs_s_e_w = [17, 5, 7, 9]
ax.plot3D(xs=get_draw_set(l_cs_s_e_w, sk_list_3d)[0],
ys=get_draw_set(l_cs_s_e_w, sk_list_3d)[1],
zs=get_draw_set(l_cs_s_e_w, sk_list_3d)[2],
zdir='z',
c=left_line_color, # line color
marker='o', # mark style
mfc='cyan', # marker facecolor
mec='g', # marker edgecolor
ms=msize, # marker size
linewidth=3.0 # linewidth
)
# ________________________________________________
r_cs_s_e_w = [17, 6, 8, 10]
ax.plot3D(xs=get_draw_set(r_cs_s_e_w, sk_list_3d)[0],
ys=get_draw_set(r_cs_s_e_w, sk_list_3d)[1],
zs=get_draw_set(r_cs_s_e_w, sk_list_3d)[2],
zdir='z',
c=right_line_color, # line color
marker='o', # mark style
mfc='cyan', # marker facecolor
mec='g', # marker edgecolor
ms=msize, # marker size
linewidth=3.0 # linewidth
)
# ________________________________________________
l_ch_h_k_a = [18, 11, 13, 15]
ax.plot3D(xs=get_draw_set(l_ch_h_k_a, sk_list_3d)[0],
ys=get_draw_set(l_ch_h_k_a, sk_list_3d)[1],
zs=get_draw_set(l_ch_h_k_a, sk_list_3d)[2],
zdir='z',
c=left_line_color, # line color
marker='o', # mark style
mfc='cyan', # marker facecolor
mec='g', # marker edgecolor
ms=msize, # marker size
linewidth=3.0 # linewidth
)
# ________________________________________________
r_ch_h_k_a = [18, 12, 14, 16]
ax.plot3D(xs=get_draw_set(r_ch_h_k_a, sk_list_3d)[0],
ys=get_draw_set(r_ch_h_k_a, sk_list_3d)[1],
zs=get_draw_set(r_ch_h_k_a, sk_list_3d)[2],
zdir='z',
c=right_line_color, # line color
marker='o', # mark style
mfc='cyan', # marker facecolor
mec='g', # marker edgecolor
ms=msize, # marker size
linewidth=3.0 # linewidth
)
# ________________________________________________
n_cs_ch = [0, 17, 18]
ax.plot3D(xs=get_draw_set(n_cs_ch, sk_list_3d)[0],
ys=get_draw_set(n_cs_ch, sk_list_3d)[1],
zs=get_draw_set(n_cs_ch, sk_list_3d)[2],
zdir='z',
c=central_line_color, # line color
marker='o', # mark style
mfc='cyan', # marker facecolor
mec='g', # marker edgecolor
ms=msize, # marker size
linewidth=3.0 # linewidth
)
# ________________________________________________
# 设置坐标轴标题和刻度
ax.set(
xlabel='X',
ylabel='Y',
zlabel='Z',
)
x_major_locator = MultipleLocator(100)
y_major_locator = MultipleLocator(300)
ax.xaxis.set_major_locator(x_major_locator)
ax.yaxis.set_major_locator(y_major_locator)
# 调整视角
ax.view_init(elev=30, # 仰角
azim=-20 # 方位角
)
return ax
# ______________________________________________________________________________________________________________________
# Draw the skeleton
# def show3Dske(sk_list_3d):
def show3Dske(csv_path, mod):
# print('show3Dske: sk_list_3d = ', sk_list_3d)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# ===================================
# ===================================
ax.set_box_aspect((1, 2, 5))
# ===================================
# ===================================
if mod == 'p':
sk_list_i = read_csv_17_list(csv_path, 0)
sk_list19 = add_p1718(sk_list_i)
# print('1 sk_list19 = ', sk_list19)
sk_list19 = normalizing(sk_list19)
# print('2 sk_list19 = ', sk_list19)
sk_list_3d = sk_list_to_3d(sk_list19)
# Save the 3D data
path_data_3d = csv_path + '_3d.csv'
write_csv_list_w(sk_list_3d, path_data_3d)
ax3d_constraint(ax, sk_list_3d)
if mod == 'v':
def update(i):
# print(i)
sk_list_i = read_csv_17_list(csv_path, i)
sk_list19 = add_p1718(sk_list_i)
# print('1 sk_list19 = ', sk_list19)
sk_list19 = normalizing(sk_list19)
# print('sk_list19 = ', sk_list19)
sk_list_3d = sk_list_to_3d(sk_list19)
# Save the 3D data
path_data_3d = csv_path + '_3d.csv'
write_csv_list_a(sk_list_3d, path_data_3d)
plt.cla()
ax3d_constraint(ax, sk_list_3d)
# anim = animation.FuncAnimation(fig, func=update, blit=False, interval=50, frames=600,
anim = animation.FuncAnimation(fig, func=update, blit=False, interval=50,
repeat=False, cache_frame_data=False)
# Set here to save the result gif
# _____________________________________
anim.save(csv_path + '.gif')
# _____________________________________
plt.autoscale(False)
# Show picture
plt.show()
# Test()
# =============================================================================
# =============================================================================
# =============================================================================
def test_cycle():
csv_path1 = './pictures/7.jpg.csv'
show3Dske(csv_path1, 'p')
# csv_path2 = './csv/3.mp4.csv'
# show3Dske(csv_path2, 'v')
# csv_path3 = './longvideos/1.mp4.csv'
# show3Dske(csv_path3, 'v')
test_cycle()
| JYLinOK/3DSKeleton | showSkeleton.py | showSkeleton.py | py | 19,714 | python | en | code | 1 | github-code | 36 |
38966181487 |
def isChange(arr, n, before):
flag = True
new_arr = arr[:(n//2)]
new = []
for i in range(len(new_arr)):
new.append(new_arr[i][0])
for i in range(len(new_arr)):
if new[i] not in before:
flag = False
break
return flag, new
def solution(n, student, point):
answer = 0
total = []
for i in range(1, n + 1):
total.append([i, 0])
for s, p in zip(student, point):
before = []
#점수 분배 중
for i in range(len(total)):
if total[i][0] == s:
total[i][1] += p
for i in range(len(total) // 2):
before.append(total[i][0])
#정렬후
total.sort(key=lambda x: (-x[1], x[0]))
flag, new_arr = isChange(total, n, before)
if not flag:
answer += 1
return answer
print(solution(6, [6,1,4,2,5,1,3,3,1,6,5], [3,2,5,3,4,2,4,2,3,2,2])) | leehyeji319/PS-Python | 기출/22winter2.py | 22winter2.py | py | 936 | python | en | code | 0 | github-code | 36 |
30346539101 | import os
import psycopg2
from flask import Flask, render_template, request, url_for, redirect
from app import app
def get_db_connection():
conn = psycopg2.connect(host='localhost',
database='restaurant',
user=os.environ['DB_USERNAME'],
password=os.environ['DB_PASSWORD'])
return conn
@app.route('/')
def index():
return render_template("index.html")
@app.route('/restaurant/')
def restaurant():
conn = get_db_connection()
cur = conn.cursor()
cur.execute('SELECT * FROM restaurants;')
restaurants = cur.fetchall()
cur.close()
conn.close()
return render_template('restaurant.html', restaurants=restaurants)
@app.route('/create/', methods=('GET', 'POST'))
def create():
if request.method == 'POST':
restaurant_name = request.form['restaurant_name']
area = request.form['area']
category = request.form['category']
restaurant_visited = int(request.form['restaurant_visited'])
average_rating = request.form['average_rating']
ratings_count = int(request.form['ratings_count'])
conn = get_db_connection()
cur = conn.cursor()
cur.execute('INSERT INTO restaurants (restaurant_name, area, category, restaurant_visited, average_rating, ratings_count)'
'VALUES (%s, %s, %s, %s, %s, %s)',
(restaurant_name, area, category, restaurant_visited, average_rating, ratings_count))
conn.commit()
cur.close()
conn.close()
return redirect(url_for('/restaurant/'))
return render_template('create.html') | anthonygfrn/Restaurant-App-Demo1 | app/routes.py | routes.py | py | 1,668 | python | en | code | 0 | github-code | 36 |
26974973866 | from .helpers import fetch_one, create_and_return_id
def get_set_id(conn, source):
SQL = "SELECT id FROM Sets WHERE source=%s"
data = (source, )
return fetch_one(conn, SQL, data)
def create_set(conn, source, dj_id, venue_id, occasion_id):
SQL = "INSERT INTO Sets (dj_id, source, occasion_id, venue_id) VALUES (%s, %s, %s, %s) RETURNING id;"
data = (dj_id, source, occasion_id, venue_id, )
id = create_and_return_id(conn, SQL, data)
print("Added Set: %s" % source)
return id
| cocain-app/crawler | database/set.py | set.py | py | 511 | python | en | code | 0 | github-code | 36 |
28592053848 | import random
import time
#These have to do with importing the pictures
import io
import os
import PySimpleGUI as sg
import PIL
from PIL import Image
n = 49
image = Image.open(r'C:\Users\carte\OneDrive\Desktop\Coding\Hangman\HM_' + chr(n) + '.png')
image.thumbnail((200, 200))
bio = io.BytesIO()
image.save(bio, format="PNG")
sg.theme ( 'DarkPurple' )
layout =[ [sg.Image( key="-IMAGE-")],
[sg.Text(('')), sg.Text(size=(50,1), key='-OUTPUT-')],
[sg.Text(('')), sg.Text(size=(50,1), key='-OUTPUT2-')],
[sg.Text(('')), sg.Text(size=(50,1), key='-OUTPUT3-')],
[sg.Input(key='-IN-', do_not_clear=False)],
[sg.Button("ENTER"), sg.Exit("EXIT GAME")]]
window = sg.Window("Hangman Game", layout, margins = (150, 150),finalize=True, resizable = True)
window['-OUTPUT-'].update('Hello, please enter a word or phrase for the Hangman Game')
#Opens the window and asks for an input to play hangman
def getInput():
valid = False
values = ""
length = 0
while valid == False:
event , values = window.read()
#Rechecks everything after every press of enter
if event == "ENTER":
#Checks length everytime it loops
length = len(values['-IN-'])
inputString = values['-IN-']
if (length == 0 or (has_numbers(values['-IN-']) == True)):
#
#Delete this at the end
print('Invalid Entry')
window['-OUTPUT2-'].update('Invalid Entry - No Input')
#Need this to not get errror is the length is zero
else:
#Have to do this after making sure the length isnt' zero
last_char = inputString[-1]
if ( last_char == ' '):
#
#Delete this at the end
print ( "Invalid Entry - Ends with a Space" )
window['-OUTPUT2-'].update('Invalid Entry - Ends with a Space')
else:
print('Valid Entry')
window['-OUTPUT2-'].update('')
PlayGame(values['-IN-'])
valid = True
if event == sg.WIN_CLOSED or event =='EXIT GAME':
break
def PlayGame(inputString):
x = 0
correctGuesses = 0
#Refreshing the screen to the game screen
Refresh( n )
arr = list(inputString)
arrGuessed = []
correctGuesses = numSpaces(arr)
root = arrayToList(arr, len(arr))
String = update(root)
window['-OUTPUT2-'].update(String)
#Guessing Loop
#There isn't a do while. Might have to do while(True) and break statement with the Gamewon() function
while(correctGuesses != len(arr)):
x = 0
event , values = window.read()
inVal = values['-IN-']
guessed = alreadyGuessed(arrGuessed, inVal )
if(event == sg.WIN_CLOSED or event =='EXIT GAME'):
break
elif( n == 55 ):
newImage(n)
GameLost(inputString)
return 0
elif( len(inVal) == 1 and (inVal.isdigit() == False and guessed == False)):
arrGuessed.append(inVal)
print(alreadyGuessed)
print("Valid Input")
root, x = CheckGuess( inVal, root )
if(x == 0):
print("Incorrect Guess")
newImage(n)
window['-OUTPUT2-'].update(update(root))
correctGuesses = correctGuesses + x
else:
print( "Invalid" )
if(correctGuesses == len(arr)):
#window['-Image-'].update("")
window['-OUTPUT-'].update("You won the Game!")
window['-OUTPUT2-'].update("The answer was: "+ inputString)
window['-OUTPUT3-'].update("")
event , values = window.read()
event , values = window.read()
def newImage(i):
global n
print( n )
n +=1
image = Image.open(r'C:\Users\carte\OneDrive\Desktop\Coding\Hangman\HM_' + chr(n) + '.png')
image.thumbnail((200, 200))
bio = io.BytesIO()
image.save(bio, format="PNG")
window['-IMAGE-'].update(data=bio.getvalue())
def alreadyGuessed(arr, char):
for x in arr:
if(x == char):
return True
return False
#Checks if the input has numbers in ( we don't want numbers in their)
def has_numbers(inputString):
return any(char.isdigit() for char in inputString)
#Now it will update the text.
#Needs to update the text box to get rid of it
#Needs to input my picture
def Refresh( a ):
#HOLY FUCK I DID IT
window['-IMAGE-'].update(data=bio.getvalue())
window['-OUTPUT-'].update(("Please Enter a letter to guess"))
def GameLost(inputString):
window['-OUTPUT-'].update("You fucking lost the Game!")
window['-OUTPUT2-'].update("The answer was: "+ inputString)
window['-OUTPUT3-'].update("You suck")
event , values = window.read()
def playAgain():
global n
n = 49
#---------------------------------------------------------------------
#--------Input, Node, Checkguess, New Image Functions-----------------
#---------------------------------------------------------------------
#---------------------------------------------------------------------
# Representation of a node
class Node:
def __init__(self, val, show = False):
self.val = val
self.next = None
self.show = False
# Function to insert node
def insert(root, item):
temp = Node(item)
if(item == ' '):
temp.show = True
else:
temp.show = False
if (root == None):
root = temp
else :
ptr = root
while (ptr.next != None):
ptr = ptr.next
ptr.next = temp
return root
def update(root):
Str = ""
while (root != None):
if( root.show == True ):
if(root.val == " "):
Str = Str + " " + root.val
else:
Str = Str + " " + root.val
else:
Str = Str + " _ "
root = root.next
return Str
def arrayToList(arr, n):
root = None
for i in range(0, n, 1):
root = insert(root, arr[i])
return root
#Finds the number of spaces in the array of characters
def numSpaces(arr):
p = 0
for x in arr:
if(arr[x] == ' '):
p += 1
return p
def CheckGuess( char, head ):
curr = head
n = 0
while( curr != None ):
if( curr.val == char or curr.val == char.upper() or curr.val == char.lower() ):
if( curr.show == False ):
n = n + 1
curr.show = True
curr = curr.next
else:
curr = curr.next
print( "You found ", n ," instances of -" , char , "-" )
return head, n
def numSpaces(array):
p = 0
for x in array:
if(x == ' '):
p += 1
return p
getInput()
print( "Window Closed")
window.close()
| CarterDFluckiger/Hangman | Hangman.py | Hangman.py | py | 7,079 | python | en | code | 0 | github-code | 36 |
33512429756 | # -*- coding: utf-8 -*-
#
# File: setuphandlers.py
#
#
# GNU General Public License (GPL)
#
__docformat__ = 'plaintext'
from collective.contact.core.interfaces import IContactCoreParameters
from plone import api
from z3c.relationfield.relation import RelationValue
from zope import component
from zope.intid.interfaces import IIntIds
import datetime
import logging
# from plone.registry.interfaces import IRegistry
logger = logging.getLogger('collective.contact.core: setuphandlers')
def isNotCollectiveContactContentProfile(context):
return context.readDataFile("collective_contact_core_marker.txt") is None
def isNotTestDataProfile(context):
return context.readDataFile("collective_contact_core_test_data_marker.txt") is None
def postInstall(context):
"""Called as at the end of the setup process. """
# the right place for your custom code
if isNotCollectiveContactContentProfile(context):
return
# we CAN NOT reinstall the product using portal_quickinstaller because
# it removes manualy added fields for dexterity types
import traceback
for line in traceback.format_stack():
if 'QuickInstallerTool.py' in line and 'reinstallProducts' in line:
raise Exception('You can not reinstall this product, use portal_setup to re-apply the relevant profile !')
# Set default values in registry
for name in ('person_contact_details_private', 'person_title_in_title', 'use_held_positions_to_search_person',
'use_description_to_search_person'):
val = api.portal.get_registry_record(name=name, interface=IContactCoreParameters)
if val is None:
api.portal.set_registry_record(name=name, value=True, interface=IContactCoreParameters)
# we need to remove the default model_source added to our portal_types
# XXX to be done
def create_test_contact_data(portal):
"""Create test contact data in portal"""
position_types = [{'name': u'General', 'token': u'general'},
{'name': u'Sergeant', 'token': u'sergeant'},
{'name': u'Colonel', 'token': u'colonel'},
{'name': u'Lieutenant', 'token': u'lieutenant'},
{'name': u'Captain', 'token': u'captain'},
{'name': u'Admiral', 'token': u'admiral'},
]
organization_types = [{'name': u'Navy', 'token': u'navy'},
{'name': u'Army', 'token': u'army'},
{'name': u'Air force', 'token': u'air_force'},
]
organization_levels = [{'name': u'Corps', 'token': u'corps'},
{'name': u'Division', 'token': u'division'},
{'name': u'Regiment', 'token': u'regiment'},
{'name': u'Squad', 'token': u'squad'},
]
# Examples structure
# ------------------
# organizations (* = organization, £ = position)
# * Armée de terre
# * Corps A
# * Division Alpha
# * Régiment H
# * Brigade LH
# £ Sergent
# £ Capitaine
# * Division Beta
# * Corps B
# £ Général
#
# persons (> = person, @ = held_position)
# > De Gaulle
# @ Armée de terre
# @ Général
# > Pepper
# @ Sergent
# > Rambo
# @ Brigade LH
# > Draper
# @ Capitaine
# @ Division Beta
params = {'title': u"Military directory",
'position_types': position_types,
'organization_types': organization_types,
'organization_levels': organization_levels,
}
portal.invokeFactory('directory', 'mydirectory', **params)
mydirectory = portal['mydirectory']
params = {'lastname': u'De Gaulle',
'firstname': u'Charles',
'gender': u'M',
'person_title': u'Général',
'birthday': datetime.date(1901, 11, 22),
'email': u'charles.de.gaulle@private.com',
'country': u'France',
'city': u"Colombey les deux églises",
'number': u'6bis',
'street': u'rue Jean Moulin',
'zip_code': u'52330',
'additional_address_details': u'bâtiment D',
'use_parent_address': False,
'website': 'www.charles-de-gaulle.org'
}
mydirectory.invokeFactory('person', 'degaulle', **params)
degaulle = mydirectory['degaulle']
params = {'lastname': u'Pepper',
'gender': u'M',
'person_title': u'Mister',
'birthday': datetime.date(1967, 6, 1),
'email': u'stephen.pepper@private.com',
'phone': u'0288443344',
'city': u'Liverpool',
'country': u'England',
'use_parent_address': False,
'website': 'http://www.stephen-pepper.org'
}
mydirectory.invokeFactory('person', 'pepper', **params)
pepper = mydirectory['pepper']
params = {'lastname': u'Rambo',
'firstname': u'John',
'phone': u'0788556644',
'use_parent_address': True,
}
mydirectory.invokeFactory('person', 'rambo', **params)
rambo = mydirectory['rambo']
params = {'lastname': u'Draper',
'firstname': u'John',
'person_title': u'Mister',
'use_parent_address': False,
}
mydirectory.invokeFactory('person', 'draper', **params)
draper = mydirectory['draper']
params = {'title': u"Armée de terre",
'organization_type': u'army',
'phone': u'01000000001',
'email': u'contact@armees.fr',
'use_parent_address': False,
'city': u'Paris',
'street': u'Avenue des Champs-Élysées',
'number': u'1',
'zip_code': u'75008',
'country': u'France',
'enterprise_number': 'BE123456789',
}
mydirectory.invokeFactory('organization', 'armeedeterre', **params)
armeedeterre = mydirectory['armeedeterre']
params = {'title': u"Corps A",
'organization_type': u'corps',
'street': u"rue Philibert Lucot",
'city': u'Orléans',
'country': u'France',
'use_parent_address': False,
}
armeedeterre.invokeFactory('organization', 'corpsa', **params)
corpsa = armeedeterre['corpsa']
params = {'title': u"Corps B",
'organization_type': u'corps',
'use_parent_address': True,
}
armeedeterre.invokeFactory('organization', 'corpsb', **params)
params = {'title': u"Division Alpha",
'organization_type': u'division',
'use_parent_address': True,
}
corpsa.invokeFactory('organization', 'divisionalpha', **params)
params = {'title': u"Division Beta",
'organization_type': u'division',
'use_parent_address': True,
}
corpsa.invokeFactory('organization', 'divisionbeta', **params)
divisionalpha = corpsa['divisionalpha']
divisionbeta = corpsa['divisionbeta']
params = {'title': u"Régiment H",
'organization_type': u'regiment',
'number': u"11",
'street': u"rue de l'harmonie",
'city': u"Villeneuve d'Ascq",
'zip_code': u'59650',
'country': u'France',
'use_parent_address': False,
}
divisionalpha.invokeFactory('organization', 'regimenth', **params)
regimenth = divisionalpha['regimenth']
params = {'title': u"Brigade LH",
'organization_type': u'squad',
'use_parent_address': True,
}
regimenth.invokeFactory('organization', 'brigadelh', **params)
brigadelh = regimenth['brigadelh']
params = {'title': u"Général de l'armée de terre",
'position_type': u'general',
'email': u'general@armees.fr',
'use_parent_address': False,
'city': u'Lille',
'street': u"Rue de la Porte d'Ypres",
'number': u'1',
'zip_code': u'59800',
'country': u'France',
}
armeedeterre.invokeFactory('position', 'general_adt', **params)
params = {'title': u"Capitaine de la division Alpha",
'position_type': u'captain',
'use_parent_address': True,
}
divisionalpha.invokeFactory('position', 'capitaine_alpha', **params)
capitaine_alpha = divisionalpha['capitaine_alpha']
params = {'title': u"Sergent de la brigade LH",
'position_type': u'sergeant',
'cell_phone': u'0654875233',
'email': u'brigade_lh@armees.fr',
'im_handle': u'brigade_lh@jabber.org',
'use_parent_address': True,
}
brigadelh.invokeFactory('position', 'sergent_lh', **params)
sergent_lh = brigadelh['sergent_lh']
intids = component.getUtility(IIntIds)
params = {'start_date': datetime.date(1940, 5, 25),
'end_date': datetime.date(1970, 11, 9),
'position': RelationValue(intids.getId(armeedeterre)),
}
degaulle.invokeFactory('held_position', 'adt', **params)
general_adt = armeedeterre['general_adt']
params = {'start_date': datetime.date(1940, 5, 25),
'end_date': datetime.date(1970, 11, 9),
'position': RelationValue(intids.getId(general_adt)),
'label': u"Émissaire OTAN",
'phone': u'0987654321',
'country': u'France',
'use_parent_address': True,
}
degaulle.invokeFactory('held_position', 'gadt', **params)
params = {'start_date': datetime.date(1980, 6, 5),
'position': RelationValue(intids.getId(sergent_lh)),
'email': u'sgt.pepper@armees.fr',
'phone': u'0288552211',
'city': u'Liverpool',
'street': u'Water Street',
'number': u'1',
'zip_code': u'L3 4FP',
'country': u'England',
'use_parent_address': False,
'website': 'http://www.sergent-pepper.org'
}
pepper.invokeFactory('held_position', 'sergent_pepper', **params)
params = {'position': RelationValue(intids.getId(capitaine_alpha)),
'use_parent_address': True,
}
draper.invokeFactory('held_position', 'captain_crunch', **params)
params = {'position': RelationValue(intids.getId(divisionbeta)),
'use_parent_address': True,
}
draper.invokeFactory('held_position', 'divisionbeta', **params)
params = {'position': RelationValue(intids.getId(brigadelh)),
'use_parent_address': True,
}
rambo.invokeFactory('held_position', 'brigadelh', **params)
def createTestData(context):
"""Create test data for collective.contact.core"""
if isNotTestDataProfile(context):
return
portal = context.getSite()
create_test_contact_data(portal)
| collective/collective.contact.core | src/collective/contact/core/setuphandlers.py | setuphandlers.py | py | 11,311 | python | en | code | 6 | github-code | 36 |
13395821581 | """
Entrypoints.
@author: gjorando
"""
import os
import json
from datetime import datetime
import torch
import click
from PIL import Image
import neurartist
def odd_int(value):
value = int(value)
if value % 2 == 0:
raise ValueError("Odd number required")
return value
def threshold_or_neg(value):
value = float(value)
if value > 1:
raise ValueError("Value should be between 0 and 1, or negative")
return value
def list_parameter(value_type=int):
def list_parameter_(param_value):
nonlocal value_type
if param_value is None:
result = None
else:
result = json.loads(param_value)
assert isinstance(result, list), "parameter should be a list"
for i, v in enumerate(result):
result[i] = value_type(v)
return result
return list_parameter_
@click.command()
# General
@click.option(
"--content", "-c",
"content_path",
required=True,
type=click.Path(exists=True, dir_okay=False),
help="Content image"
)
@click.option(
"--style", "-s",
"style_path",
required=True,
type=click.Path(exists=True, dir_okay=False),
help="Style image"
)
@click.option(
"--output", "-o",
"output_path",
default="./",
type=click.Path(dir_okay=True, writable=True),
help="Output path"
)
@click.option(
"--size", "-S",
"img_size",
default=512,
type=click.INT,
help="Output size"
)
@click.option(
"--epochs", "-e",
"num_epochs",
default=250,
type=click.INT,
help="Maximum number of epochs"
)
@click.option(
"--trade-off",
"trade_off",
default=3,
type=click.FLOAT,
help="Trade-off between content (>1) and style (<1) faithfullness"
)
@click.option(
"--init-random/--init-image",
"random_init",
default=False,
help="Init optimizer either from random noise, or image (default)"
)
@click.option(
"--init-image-path",
"random_init_path",
default=None,
type=click.Path(exists=True, dir_okay=False),
help="If --init-image is set, path to an image (default: content image)"
)
# Layers options
@click.option(
"--content-layers",
default=None,
type=list_parameter(),
help="Indexes of content layers (as a string representing a list)"
)
@click.option(
"--style-layers",
default=None,
type=list_parameter(),
help="Indexes of style layers (as a string representing a list)"
)
@click.option(
"--content-weights",
default=None,
type=list_parameter(float),
help="Content weights (as a string representing a list)"
)
@click.option(
"--style-weights",
default=None,
type=list_parameter(float),
help="Style weights (as a string representing a list)"
)
# Color control
@click.option(
"--color-control",
default="none",
type=click.Choice(["histogram_matching", "luminance_only", "none"]),
help="Color control method (default: none)"
)
@click.option(
"--cc-luminance-only-normalize",
"luminance_only_normalize",
is_flag=True,
help="For color control/luminance only method, normalize output luma"
)
# Spatial control
@click.option(
"--content-guidance",
"content_guidance_path",
default=None,
type=click.Path(exists=True, dir_okay=True, file_okay=False),
help="Content guidance channels folder path"
)
@click.option(
"--style-guidance",
"style_guidance_path",
default=None,
type=click.Path(exists=True, dir_okay=True, file_okay=False),
help="Style guidance channels folder path"
)
@click.option(
"--guidance-propagation-method",
default="simple",
type=click.Choice(["simple", "inside", "all"]),
help="Propagation method for guidance channels"
)
@click.option(
"--guidance-threshold",
default=.5,
type=threshold_or_neg,
help="Threshold between 0 and 1 for guidance channels thresholding, or any"
" negative value for non thresholding"
)
@click.option(
"--guidance-propagation-kernel-size",
default=None,
type=odd_int,
help="Kernel size for propagation of guidance channels (relevant for "
"inside and all methods)"
)
@click.option(
"--guidance-propagation-dilation",
default=None,
type=click.INT,
help="Dilation for propagation of guidance channels (relevant for "
"inside and all methods)"
)
# Meta
@click.option(
"--device", "-d",
default=None,
help="PyTorch device to use (default: cuda if available, otherwise cpu)"
)
@click.option(
"--verbose/--quiet",
"verbose",
default=True,
help="Verbose flag prints info during computation (default: verbose)"
)
@click.version_option(version=neurartist.__version__)
def main(
content_path,
style_path,
output_path,
img_size,
num_epochs,
trade_off,
random_init,
random_init_path,
content_layers,
style_layers,
content_weights,
style_weights,
color_control,
luminance_only_normalize,
content_guidance_path,
style_guidance_path,
guidance_propagation_method,
guidance_threshold,
guidance_propagation_kernel_size,
guidance_propagation_dilation,
device,
verbose
):
"""
Create beautiful art using deep learning.
"""
# Check that content_guidance_path and style_guidance_path are either both
# None or both set
guidance_check = int(content_guidance_path is None)
guidance_check += int(style_guidance_path is None)
if guidance_check not in (0, 2):
raise ValueError(
"content_guidance and style_guidance must be both set or both None"
)
# If a negative value is set, no thresholding is done
if guidance_threshold < 0:
guidance_threshold = None
# If the output path is a directory, we append a generated filename
if os.path.isdir(output_path):
output_path = os.path.join(
output_path,
"{}.png".format(datetime.now().strftime('%Y-%m-%dT%H:%M:%S'))
)
# Automatic detection of optimal device
if device is None:
device = "cuda" if torch.cuda.is_available() else "cpu"
# RuntimeError if we use a non-valid device
torch.device(device)
# Load and transform the input images
content_image, style_image = neurartist.utils.load_input_images(
content_path,
style_path,
img_size,
device
)
# If color control mode is histogram matching, update style image
if color_control == "histogram_matching":
neurartist.utils.color_histogram_matching(content_image, style_image)
# Instantiate the model
model = neurartist.models.NeuralStyle(
content_layers=content_layers,
style_layers=style_layers,
content_weights=content_weights,
style_weights=style_weights,
trade_off=trade_off,
device=device
)
# Load guidance channels if desired
if content_guidance_path is None:
content_guidance = None
style_guidance = None
else:
kernel_params = {}
if guidance_propagation_kernel_size is not None:
kernel_params["kernel_size"] = \
(guidance_propagation_kernel_size,)*2
if guidance_propagation_dilation is not None:
kernel_params["dilation"] = \
(guidance_propagation_dilation,)*2
content_guidance = neurartist.utils.load_guidance_channels(
content_guidance_path,
img_size,
model,
method=guidance_propagation_method,
threshold=guidance_threshold,
kernel_parameters=kernel_params,
fallback_channel=True,
device=device
)
style_guidance = neurartist.utils.load_guidance_channels(
style_guidance_path,
img_size,
model,
method=guidance_propagation_method,
kernel_parameters=kernel_params,
fallback_channel=True,
device=device
)
# Initialize the optimizer
if random_init:
# Despite what's described in the article, initializing the gradient
# descent with a random input doesn't produce good results at all
output = torch.randn(content_image.size()).type_as(content_image.data)
elif random_init_path is None:
output = content_image.clone()
else:
output = neurartist.utils.input_transforms(
content_image.shape[-2:], # Use actual content size
device=device
)(Image.open(random_init_path))
# The output image is updated by backward propagation
output.requires_grad_(True)
optimizer = torch.optim.LBFGS([output])
# Fetch the target style and content
content_targets, style_targets = model.get_images_targets(
content_image,
style_image,
style_guidance
)
if verbose:
print(f"Device={device}")
print(f"Content={content_path}")
print(f"Style={style_path}")
print(f"Output={output_path}")
print(f"Size={img_size}")
print(f"Epochs={num_epochs}")
print(f"Trade-off={trade_off}")
print(f"Random init={random_init}")
print(f"Color control={color_control}")
print(f"Guidance={content_guidance_path is not None}")
if content_guidance_path is not None:
print(f"Guidance propagation method={guidance_propagation_method}")
print(f"Model={model}")
print()
print("Ctrl-C to prematurely end computations")
print("Epoch\tContent loss\tStyle loss\tOverall")
try:
for i in range(num_epochs):
# Run a forward/backward pass
content_loss, style_loss, overall_loss = model.epoch(
output,
content_targets,
style_targets,
optimizer,
content_guidance
)
if verbose:
print("{}/{}\t{:.2f}\t{:.2f}\t{:.2f}".format(
str(i+1).zfill(len(str(num_epochs))),
num_epochs,
content_loss,
style_loss,
overall_loss
))
except KeyboardInterrupt: # Handle manual interruption through Ctrl-C
if verbose:
print("Manual interruption")
# Convert the output image
output_image = neurartist.utils.output_transforms()(
output
)
# Luminance-only
if color_control == "luminance_only":
output_image = neurartist.utils.luminance_only(
neurartist.utils.output_transforms()(
content_image
),
output_image,
luminance_only_normalize
)
# Finally save the output
output_image.save(output_path)
| gjorando/style-transfer | neurartist/cli.py | cli.py | py | 10,810 | python | en | code | 2 | github-code | 36 |
8798334453 | #!/usr/bin/env python3
# -*- coding: UTF-8 -*-
"""key.py: Handles the keysubmissions for groups"""
import json
import os
import sqlite3
import sys
import string
import auth
from httperror import HTTPError
RETURN_HEADERS = []
def __do_get():
RETURN_HEADERS.append('Status: 403')
return "This script is NOT get-able"
def __do_post():
postdata = sys.stdin.read()
try:
postdata = json.loads(postdata)
except json.JSONDecodeError:
RETURN_HEADERS.append('Status: 400')
return "Malformed Request. Data not JSON-decodable"
if 'action' in postdata and postdata['action'] == 'submitkey':
return __submitkey(postdata)
if 'action' in postdata and postdata['action'] == 'groupstatus':
return __groupstatus(postdata)
RETURN_HEADERS.append('Status: 500')
return "Not implemented"
def __submitkey(postdata):
if not 'authtoken' in postdata or not 'key' in postdata:
raise HTTPError("Missing Required Attributes")
return submitkey(postdata['authtoken'], postdata['key'])
def cleanstring(dirtystring):
cleanstring = dirtystring.lower().strip()
printable = set(string.printable)
cleanstring = ''.join(filter(lambda x: x in printable, cleanstring))
return cleanstring
def submitkey(authtoken, key):
"""Verifies a key, and submits it. Returns the groups status"""
group = auth.verify_token(authtoken)
if group is None:
raise HTTPError("Invalid Authtoken, please relogin", 401)
group_status = json.loads(groupstate(authtoken))
if int(group_status['remain_guess']) < 1:
raise HTTPError(groupstate(authtoken), 403)
key = cleanstring(key)
database = sqlite3.connect('database.sqlite3')
submitted = database.execute(('SELECT count() FROM claims'
' WHERE groupname=:groupname AND key=:key'),
{"groupname": group, "key": key}).fetchone()[0]
if submitted != 0:
raise HTTPError(groupstate(authtoken), 410)
badkey = database.execute(('SELECT count() FROM badkeys'
' WHERE groupname=:groupname AND key=:key'),
{"groupname": group, "key": key}).fetchone()[0]
if badkey != 0:
raise HTTPError(groupstate(authtoken), 410)
keyexist = database.execute('SELECT count() FROM keys WHERE LOWER(key)=:key',
{'key': key}).fetchone()[0]
if keyexist == 0:
database.execute('INSERT INTO badkeys(groupname, key) values(:groupname, :key)',
{'groupname': group, 'key': key})
database.commit()
raise HTTPError(groupstate(authtoken), 400)
database.execute('INSERT INTO claims(groupname, key) values(:groupname, :key)',
{'groupname': group, 'key': key})
database.commit()
return groupstate(authtoken)
def __groupstatus(request):
if not 'authtoken' in request:
raise HTTPError("Missing Authtoken")
status = groupstate(request['authtoken'])
if status is None:
raise HTTPError("Authtoken is not valid. Please relogin")
return status
def groupstate(authtoken):
"""Calculates the groups state, and returns it as a json-string"""
group = auth.verify_token(authtoken)
if group is None:
return None
database = sqlite3.connect('database.sqlite3')
status = database.execute(('SELECT count(),'
' strftime("%s", datetime(min(submittime), "+10 minute"))'
' FROM badkeys WHERE'
' groupname=:groupname AND '
' submittime > datetime("now", "-10 minute")'),
{"groupname": group}).fetchone()
returnvalue = {
"group": group,
"points": get_all_points(),
"remain_guess": 3 - status[0],
"time_to_new_guess": int(status[1]) if (type(status[1]) == str) else None
}
return json.dumps(returnvalue)
def get_all_points():
"""Retrieves a calculated list of all groups points"""
database = sqlite3.connect('database.sqlite3')
allclaims = database.execute(('select cl.groupname, cl.catchtime, ke.key,'
' ke.first, ke.second, ke.third, ke.other'
' from claims as cl inner join keys as ke'
' on (ke.key == cl.key collate nocase)'
' order by ke.key asc, cl.catchtime asc;'))
allrows = allclaims.fetchall()
groups = {}
key = None
num_in_key = 0
for row in allrows:
_key = row[2]
_group = row[0]
_point1 = row[3]
_point2 = row[4]
_point3 = row[5]
_point = row[6]
if _key != key:
num_in_key = 0
key = _key
if not _group in groups:
groups[_group] = 0
num_in_key = num_in_key + 1
if num_in_key == 1:
groups[_group] = groups[_group] + _point1
elif num_in_key == 2:
groups[_group] = groups[_group] + _point2
elif num_in_key == 3:
groups[_group] = groups[_group] + _point3
else:
groups[_group] = groups[_group] + _point
returnvalue = []
for group in groups.keys():
returnvalue.append({"name": group, "score": groups[group]})
return returnvalue
def __main():
if not 'REQUEST_METHOD' in os.environ:
raise HTTPError("Missing REQUEST_METHOD")
if os.environ['REQUEST_METHOD'] == 'GET':
return __do_get()
if os.environ['REQUEST_METHOD'] == 'POST':
return __do_post()
raise HTTPError("Undhandled REQUEST_METHOD")
if __name__ == '__main__':
try:
RESPONSE = __main()
except HTTPError as err:
if err.status:
RETURN_HEADERS.append('Status: %d' % err.status)
else:
RETURN_HEADERS.append('Status: 400')
RESPONSE = err.message
NUM_HEADERS = len(RETURN_HEADERS)
if NUM_HEADERS == 0:
print('Status: 200')
else:
for header in RETURN_HEADERS:
print(header)
print('Content-Length: %d' % len(RESPONSE))
print()
print(RESPONSE)
| daGnutt/skvaderhack | api/key.py | key.py | py | 6,279 | python | en | code | 0 | github-code | 36 |
34603270416 | import tensorflow as tf
import pandas as pd
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
#mnist = input_data.read_data_sets('./data/', one_hot = True)
def numtolist(num):
'将标签转化为01数组'
r1 = np.asarray([0]*21)
r1[20 - num] = 1
return r1
def numto2D(a):
'将标签转化为01数组'
if a >= 18:
return np.asarray([0,0,1])
elif a >= 12:
return np.asarray([0,1,0])
else:
return np.asarray([1,0,0])
def train(hidden_units_size,training_iterations):
num_classes = 3 # 输出大小
input_size = 32 # 输入大小
batch_num = 10
# hidden_units_size = 60 # 隐藏层节点数量
# training_iterations = 20
data = pd.read_csv("student-mat.csv")
# print(data.head())
# 输入数据
# 数据类型转换 - 转换为可计算的数据类型int
for col in data.columns:
if (not str(data[col].dtype).startswith("int")):
# print("Coloum Name ", col, " Type ", data[col].dtype)
# print("Unique values for ", col, data[col].unique(), "\n")
values = data[col].unique() # 取出该列所以可能取值
convertor = dict(zip(values, range(len(values)))) # 定义转换函数
data[col] = [convertor[item] for item in data[col]] # 将定性数据转为定量的数据
# print("Coloum Name ", col, " Type ", data[col].dtype)
# print("Unique values for ", col, data[col].unique(), "\n")
#print(data.describe()) # 查看数据描述信息,均值,max,min
print('Data Size ', data.shape)
training_features = ['school', 'sex', 'age', 'address', 'famsize', 'Pstatus', 'Medu', 'Fedu','Mjob', 'Fjob', 'reason', 'guardian', 'traveltime', 'studytime',
'failures', 'schoolsup', 'famsup', 'paid', 'activities', 'nursery',
'higher', 'internet', 'romantic', 'famrel', 'freetime', 'goout', 'Dalc',
'Walc', 'health', 'absences', 'G1', 'G2']
label_feature = ['G3']
selected_feature_data = data
# 区分训练集和测试集
num_data = data.shape[0]
num_input = data.shape[1] - 1
number_training = int(0.7 * num_data) # 训练数据个数
number_testing = int(num_data - number_training) # 测试数据个数
print("number of traning :",number_training)
print("number of testing :",number_testing)
# -- 训练数据集 --
training_data_features = np.array(selected_feature_data.head(number_training)[training_features])
# 转为4D
training_data_labels = np.zeros([number_training,num_classes], dtype = float, order = 'C')
temp_label = np.array(selected_feature_data.head(number_training)[label_feature])
for i in range(number_training):
label = temp_label[i] # 取出当前标签
training_data_labels[i] = numto2D(int(label))
# print('training:')
# print(training_data_features[0:5])
# print(training_data_labels[0:5])
# -- 测试数据集 --house_info.loc[3:6]
testing_data_features = np.array(selected_feature_data.loc[number_testing:][training_features])
# 转为4D
testing_data_labels = np.zeros([number_training,num_classes], dtype = float, order = 'C')
temp_label = np.array(selected_feature_data.loc[number_testing:][label_feature])
for i in range(number_testing):
label = temp_label[i] # 取出当前标签
testing_data_labels[i] = numto2D(int(label))
# --------------- 以上,数据集已经整理好 ---------------
# print('训练数据集特征:',training_data_features.shape)
# print('训练数据集标签:',training_data_labels.shape)
# print('测试数据集特征:',testing_data_features.shape)
# print('测试数据集标签:',testing_data_labels.shape)
# 300 20 0.05
X = tf.placeholder(tf.float32, shape = [None, input_size])
Y = tf.placeholder(tf.float32, shape = [None, num_classes])
# W1 = tf.Variable(tf.random_normal ([input_size, num_classes], stddev = 0.1))
# B1 = tf.Variable(tf.constant (0.1), [num_classes])
#final_opt = tf.matmul(X, W1) + B1 # 输入层到隐藏层正向传播
# 含一个隐层
W1 = tf.Variable(tf.random_normal ([input_size, hidden_units_size], stddev = 0.1))
B1 = tf.Variable(tf.constant (0.1), [hidden_units_size])
W2 = tf.Variable(tf.random_normal ([hidden_units_size, num_classes], stddev = 0.1))
B2 = tf.Variable(tf.constant (0.1), [num_classes])
hidden_opt = tf.matmul(X, W1) + B1 # 输入层到隐藏层正向传播
hidden_opt = tf.nn.relu(hidden_opt) # 激活函数,用于计算节点输出值
final_opt = tf.matmul(hidden_opt, W2) + B2 # 隐藏层到输出层正向传播
# 对输出层计算交叉熵损失
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=Y, logits=final_opt))
# 梯度下降算法,这里使用了反向传播算法用于修改权重,减小损失
opt = tf.train.GradientDescentOptimizer(0.05).minimize(loss)
# 初始化变量
init = tf.global_variables_initializer()
# 计算准确率
correct_prediction =tf.equal (tf.argmax (Y, 1), tf.argmax(final_opt, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))
sess = tf.Session ()
sess.run (init)
train_acc = []
test_acc = []
train_loss = []
for i in range (training_iterations) :
batch_input = training_data_features[i*batch_num:i*batch_num+batch_num]
batch_labels = training_data_labels[i*batch_num:i*batch_num+batch_num]
# 训练
training_loss = sess.run ([opt, loss], feed_dict = {X: batch_input, Y: batch_labels})
train_accuracy = accuracy.eval (session = sess, feed_dict = {X: batch_input,Y: batch_labels})
test_accuracy = accuracy.eval(session=sess, feed_dict={X: testing_data_features, Y: testing_data_labels})
train_acc.append(train_accuracy)
test_acc.append(test_accuracy)
train_loss.append(training_loss[1])
print ("step : %d, training accuracy = %2f ,training loss %2f,test_accuracy %2f "% (i, train_accuracy,training_loss[1],test_accuracy))
return train_acc,test_acc,train_loss
# print('testing -----')
# test_accuracy = accuracy.eval(session=sess, feed_dict={X: testing_data_features, Y: testing_data_labels})
# print("tesing accuracy = %2f " % (test_accuracy))
def averagenum(num):
print(num)
nsum = 0
for i in range(len(num)):
nsum += num[i]
return nsum / len(num)
def lineplot(x_data, y_data, x_label="", y_label="", title=""):
# Create the plot object
_, ax = plt.subplots() # Plot the best fit line, set the linewidth (lw), color and
# transparency (alpha) of the line
ax.plot(x_data, y_data, lw = 2, color = '#539caf', alpha = 1) # Label the axes and provide a title
ax.set_title(title)
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
return ax
def plot_trendline1(x_num, y, n):
ax1 = plt.subplot(2, 2, 1)
plt.sca(ax1) # 选择子图1
plt.legend() # 添加这个才能显示图例
x = np.linspace(1, x_num, x_num)
plt.xticks(x)
parameter = np.polyfit(x, y, n) # 计算趋势线
y2 = [0]*len(y)
for i in range(len(y)) :
y2[i] = parameter[0] * i ** 3 + parameter[1] * i ** 2 + parameter[2] * i + 0.6
plt.xlabel('training step', color='black')
plt.ylabel('training accuracy', color='black')
plt.scatter(x, y,label='training accuracy') # 画散点图
plt.plot(x, y2, color='g',label='trendline')# 画趋势线
plt.legend() # 添加这个才能显示图例
def plot_trendline2(x_num, y, n):
ax2 = plt.subplot(2, 2, 2)
plt.sca(ax2) # 选择子图2
x = np.linspace(1, x_num, x_num)
plt.xticks(x)
parameter = np.polyfit(x, y, n)
y2 = [0]*len(y)
print(y2)
for i in range(len(y)) :
y2[i] = parameter[0] * i ** 3 + parameter[1] * i ** 2 + parameter[2] * i + 0.6
avg = averagenum(y)
# for i in range(len(y2)):
# y2[i] = y2[i] + avg
print('----- training svg',avg)
plt.xlabel('training step', color='black')
plt.ylabel('testing accuracy', color='black')
plt.scatter(x, y,label='testing accuracy')
plt.plot(x, y2, color='g',label='trendline')
plt.legend()
def plot_trendline3(x_num, y, n):
ax3 = plt.subplot(2, 2, 3)
plt.sca(ax3) # 选择子图3
x = np.linspace(1, x_num, x_num)
plt.xticks(x)
# plt.bar(x, y, width=0.6, tick_label=x, fc='y',color='blue')
# plt.xlabel('training step', color='black')
# plt.ylabel('cross entropy', color='black')
# plt.legend()
# plt.show()
parameter = np.polyfit(x, y, n)
y2 = [0] * len(y)
print(y2)
for i in range(len(y)):
y2[i] = parameter[0] * i ** 3 + parameter[1] * i ** 2 + parameter[2] * i + 1.2
avg = averagenum(y)
# for i in range(len(y2)):
# y2[i] = y2[i] + avg
print('----- training svg', avg)
plt.xlabel('training step', color='black')
plt.ylabel('training cross entropy', color='black')
plt.scatter(x, y, label='training cross entropy')
plt.plot(x, y2, color='r', label='trendline')
plt.legend() # 添加这个才能显示图例
plt.show()
# plt.barh(range(len(x)), y, tick_label=x)
# plt.show()
# parameter = np.polyfit(x, y, n)
# y2 = [0]*len(y)
# print(y2)
# for i in range(len(y)) :
# y2[i] = parameter[0] * i ** 3 + parameter[1] * i ** 2 + parameter[2] * i + 1.0
# avg = averagenum(y2)
# for i in range(len(y2)):
# y2[i] = y2[i] + avg
# plt.xlabel('testing step', color='black')
# plt.ylabel('cross entropy', color='black')
# plt.scatter(x, y)
# plt.plot(x, y2, color='g')
# plt.show()
if __name__ == '__main__':
train_acc, test_acc, train_loss = train(15, 18)
# print(train_acc)
# print(test_acc)
# print(train_loss)
# lineplot(range(28), test_acc, x_label="step", y_label="test_acc", title="")
# lineplot(range(28), train_acc, x_label="step", y_label="train_acc", title="")
# plt.show(lineplot(range(28), train_loss, x_label="step", y_label="train_acc", title=""))
plot_trendline1(18, test_acc,3)
plot_trendline2(18, train_acc, 3)
plot_trendline3(18, train_loss, 3)
# dotImg(range(21), test_acc)
# dotImg(range(21), train_acc)
# dotImg(range(21), train_loss)
# choose step17 | cloud0606/AI | BP神经网络/bp3d_2l.py | bp3d_2l.py | py | 10,717 | python | en | code | 0 | github-code | 36 |
35206134452 | from typing import List
import random
##################################
# GENERAL STUFF
class Queen():
def __init__(self, pos: int, threats: int = -1):
# the position of the queen on the board
self.pos = pos
# the number of threats on the queen
self.threats = threats
def __eq__(self, o: object) -> bool:
return (self.pos == o.pos) & (self.threats == o.threats)
def __str__(self) -> str:
return f'Queen(pos: {self.pos}, threats: {self.threats})'
def calc_threats(queens: List[Queen]) -> List[int]:
# check threats on same row, main diagonal and antidiagonal
# the check for same row is obvious: [0, 2, 1, 2]
# loop and check for duplicated of the number (position)
# the check for the diagonal goes like this:
# the diagonal is formed by pos, pos + 1, pos + 2, pos + 3, and so on....
# the same for pos - 1, pos - 2, pos - 3 ....
# for a conflict to occur on the diagonal, pos will be an increment of ouu current queen position,
# also the increment increases by 1 when the position increse
# example: [1, 3, 0, 2] has no conflict even though the last queen is an increment of the first queen position.
# and that is because the increment is not constant and increses as we go further.
# the same for why the second queen is not counted as a threat, it is not a single increment.
# the increment changes to a decrement for other sides of the diagonal.
length = len(queens)
threats = [0] * length
# check for same row threats
col = 0
while col < length:
# check for +ve same row threats
row = col + 1
while row < length:
if (queens[row].pos == queens[col].pos):
threats[col] += 1
row += 1
# check for -ve same row threats
row = col - 1
while row > -1:
if (queens[row].pos == queens[col].pos):
threats[col] += 1
row -= 1
col += 1
# check for main diagonal threats
col = 0
while col < length:
# check for +ve main diagonal
row = col + 1
inc = 1
while row < length:
if queens[row].pos == queens[col].pos + inc:
threats[col] += 1
inc = inc + 1
row = row + 1
# check for -ve main diagonal
row = col - 1
dec = 1
while row > -1:
if queens[row].pos == queens[col].pos - dec:
threats[col] += 1
dec = dec + 1
row = row - 1
col += 1
# check for antidiagonal threats
col = 0
while col < length:
# check for +ve antidiagonal threats
row = col + 1
dec = 1
while row < length:
if queens[row].pos == queens[col].pos - dec:
threats[col] += 1
dec = dec + 1
row = row + 1
# check for -ve antidiagonal threats
row = col - 1
inc = 1
while row > -1:
if queens[row].pos == queens[col].pos + inc:
threats[col] += 1
inc = inc + 1
row = row - 1
col += 1
return threats
def update_threats(queens: List[Queen], threats: List[int]) -> None:
for i, queen in enumerate(queens):
queen.threats = threats[i]
def copy_queens(queens: List[Queen]) -> List[Queen]:
copy = []
for queen in queens:
copy.append(Queen(queen.pos, queen.threats))
return copy
##################################
# A* STUFF
class BoardNode():
# no. of steps default to 0 representing the initial (start) state
def __init__(self, queens: List[Queen] = None, steps: int = 0):
# the queens that defines the state of this board node
# the index of the array itself represents what col we are at.
# the value in the index represents what row we are at.
# example: [2, 0, 1], represents:
# 0 1 2 <- column
# 0 - x -
# 1 - - x
# 2 x - -
# ^ row
self.queens = queens
# total_threats "h": defines the total number of threats on the board
total_threats = 0
for queen in queens:
total_threats += queen.threats
self.total_threats = total_threats
# steps "g": defines the number of steps taken by a queen to reach this state
self.steps = steps
# f = g + h
self.cost = self.steps + self.total_threats
def __eq__(self, o: object) -> bool:
return self.queens == o.queens
def __str__(self) -> str:
str = '['
for q in self.queens:
str += f'{q.pos}, '
str += ']'
return f'BoardNode({self.cost} = {self.steps} + {self.total_threats}, queens: {str})'
def generate_states(queensList: List[Queen]) -> List[BoardNode]:
# the goal of this function is to generate all possible moves
# or (states), a state represents the board, hence the usage of BoardNode
# a boardNode stores the cost of reaching (steps) it and the amount of threats in it
# this is used to calculate its cost (cost [f] = steps [g] + h [total_threats])
# a boardNode is generated by moving a queen, using the new positions and threats to create it.
# after exploring all possible (vertical) moves, they all will be added to the possible_states.
# the possible states will be used to pick the lowest cost, and then repeat.
possible_states: List[BoardNode] = []
# generate +ve moves
col = 0
while col < len(queensList):
queens = copy_queens(queensList)
steps = 0
row = queens[col].pos
while row < len(queens) - 1:
queens[col].pos += 1
steps += 1
new_threats = calc_threats(queens)
update_threats(queens, new_threats)
qs = copy_queens(queens)
possible_states.append(BoardNode(qs, steps))
row += 1
col += 1
# generate -ve moves
col = 0
while col < len(queensList):
queens = copy_queens(queensList)
steps = 0
row = queens[col].pos
while row > 0:
queens[col].pos -= 1
steps += 1
new_threats = calc_threats(queens)
update_threats(queens, new_threats)
qs = copy_queens(queens)
possible_states.append(BoardNode(qs, steps))
row -= 1
col += 1
def sortKey(e: BoardNode):
return e.cost
possible_states.sort(reverse=True, key=sortKey)
return possible_states
def rand_initial_state(N: int) -> BoardNode:
queens: List[Queen] = []
for n in range(N):
queens.append(Queen(random.randint(0, N - 1)))
threats = calc_threats(queens)
update_threats(queens, threats)
return BoardNode(queens)
def a_star(state: BoardNode, visited_states: List[BoardNode], steps_count):
# generate possible next moves/states
states: List[BoardNode] = generate_states(state.queens)
# get the move/state with lowest cost
next_state: BoardNode = states.pop()
# if the popped state and the one before it has equal cost (f),
# check if the one before it has lower threats (h), if yes choose it.
if next_state.cost == states[-1].cost:
if states[-1].total_threats < next_state.total_threats:
next_state = states.pop()
# check if the goal state has been reached.
# the goal states is defined by the threats (h) being 0
if next_state.total_threats == 0:
visited_states.clear()
print('HOLAAA')
print(f'final state: {next_state}')
steps_count[0] += 1
return next_state
# check if the popped state has already been visited before
# if yes, get the next possible state/move, and repeat.
i = 0
while i < len(visited_states):
if next_state == visited_states[i]:
if (len(states) > 0):
next_state = states.pop()
i = 0
continue
i += 1
steps_count[0] += 1
visited_states.append(next_state)
return next_state
##################################
# GENETIC STUFF
MUTATE_RATE: float = 0.05
CROSSOVER_RATE: float = 1.0
MULTIPOINT: bool = False
class Solution():
def __init__(self, queens: List[Queen]):
# the queens define the solution/chromosome,
# The position of each queen is a gene.
# the queen object itself is just a wrapper class for the queen position and theeats on it.
self.queens = queens
# total_threats (fitness): the fitness of the solution, lower is better. 0 is solved.
total_threats = 0
for queen in queens:
total_threats += queen.threats
self.total_threats = total_threats
def __str__(self) -> str:
str = '['
for q in self.queens:
str += f'{q.pos}, '
str += ']'
return f'Solution(fitness: {self.total_threats}, queens: {str})'
# creates a random solution (random queen positions)
def create_solution(N) -> Solution:
queens: List[Queen] = []
for n in range(N):
queens.append(Queen(random.randint(0, N - 1)))
threats = calc_threats(queens)
update_threats(queens, threats)
return Solution(queens)
# returns a mutated gene (a new position for a queen)
def mutated_gene(N: int) -> int:
return random.randint(0, N - 1)
# where the magic happens,
# depending on the passe paras it will crossover and mutate to produce a new solution out of the two passed solutions.
def mate(parent1: Solution, parent2: Solution, mutate_rate: float = MUTATE_RATE, multipoint: bool = MULTIPOINT, crossover_rate: float = CROSSOVER_RATE) -> Solution:
child: Solution = None
prob = random.random()
if prob < crossover_rate:
child = crossover(parent1, parent2, multipoint)
else:
child = parent1 if parent1.total_threats < parent2.total_threats else parent2
for queen in child.queens:
prob = random.random()
if prob < mutate_rate:
queen.pos = mutated_gene(len(child.queens))
return child
# takes two solutions and crosses them over on a random point,
# this produces to children, the fittest is returned.
def crossover(parent1: Solution, parent2: Solution, multipoint: bool = False) -> Solution:
if not multipoint:
point: int = random.randint(0, len(parent1.queens) - 1)
queens1: List[Queen] = copy_queens(
parent1.queens[:point] + parent2.queens[point:])
queens2: List[Queen] = copy_queens(
parent2.queens[:point] + parent1.queens[point:])
new_threats = calc_threats(queens1)
update_threats(queens1, new_threats)
new_threats = calc_threats(queens2)
update_threats(queens2, new_threats)
child1: Solution = Solution(queens1)
child2: Solution = Solution(queens2)
return child1 if child1.total_threats < child2.total_threats else child2
def genetic(N: int, population_size: int, generations: int, elitism: bool = True, mutate_rate: float = MUTATE_RATE, multipoint: bool = MULTIPOINT, crossover_rate: float = CROSSOVER_RATE, generation_count=[0]) -> Solution:
generation: int = 1
solved: bool = False
population: List[Solution] = []
for _ in range(population_size):
population.append(create_solution(N))
while (generation <= generations) & (not solved):
# sort the population based on fitness (threats)
population.sort(key=lambda solution: solution.total_threats)
if population[0].total_threats == 0:
solved = True
print('Hola FOUND ITTTT')
print(population[0])
generation_count[0] = generation
return population[0]
new_generation: List[Solution] = []
if elitism:
# pass the top 10% solutions to the next generation
top_ten = int((10 * population_size) / 100)
new_generation.extend(population[:top_ten])
# pick and mate parents for the next genration randomly from the top 50%
top_fifty = int((50 * population_size) / 100)
for _ in range(int((90 * population_size) / 100)):
parent1 = random.choice(population[:top_fifty])
parent2 = random.choice(population[:top_fifty])
child = mate(parent1, parent2, mutate_rate,
multipoint, crossover_rate)
new_generation.append(child)
population = new_generation
# print(f'gen: {generation}, {population[0]}')
generation += 1
generation_count[0] = generation
population.sort(key=lambda solution: solution.total_threats)
return population[0]
# print(genetic(8, 100, 100))
| Just-Hussain/n-queen | nqueen.py | nqueen.py | py | 12,806 | python | en | code | 0 | github-code | 36 |
36120976493 | from typing import Any, Dict
import os
import sys
from forte.data.caster import MultiPackBoxer
from forte.data.data_pack import DataPack
from forte.data.multi_pack import MultiPack
from forte.data.readers import OntonotesReader, DirPackReader
from forte.data.readers.deserialize_reader import MultiPackDirectoryReader
from forte.pipeline import Pipeline
from forte.processors.base import MultiPackProcessor, MultiPackWriter
from forte.processors.writers import PackNameJsonPackWriter
from fortex.nltk import NLTKWordTokenizer, NLTKPOSTagger, NLTKSentenceSegmenter
from ft.onto.base_ontology import EntityMention, CrossDocEntityRelation
class PackCopier(MultiPackProcessor):
"""
Copy the text from existing pack to a new pack.
"""
def _process(self, input_pack: MultiPack):
from_pack: DataPack = input_pack.get_pack(self.configs.copy_from)
copy_pack: DataPack = input_pack.add_pack(self.configs.copy_to)
copy_pack.set_text(from_pack.text)
if from_pack.pack_name is not None:
copy_pack.pack_name = from_pack.pack_name + "_copy"
else:
copy_pack.pack_name = "copy"
ent: EntityMention
for ent in from_pack.get(EntityMention):
EntityMention(copy_pack, ent.begin, ent.end)
@classmethod
def default_configs(cls) -> Dict[str, Any]:
return {"copy_from": "default", "copy_to": "duplicate"}
class ExampleCoreferencer(MultiPackProcessor):
"""
Mark some example coreference relations.
"""
def _process(self, input_pack: MultiPack):
pack_i = input_pack.get_pack("default")
pack_j = input_pack.get_pack("duplicate")
for ent_i, ent_j in zip(
pack_i.get(EntityMention), pack_j.get(EntityMention)
):
link = CrossDocEntityRelation(input_pack, ent_i, ent_j)
link.rel_type = "coreference"
input_pack.add_entry(link)
class ExampleCorefCounter(MultiPackProcessor):
def __init__(self):
super().__init__()
self.coref_count = 0
def _process(self, input_pack: MultiPack):
rels = list(input_pack.get_entries_of(CrossDocEntityRelation))
self.coref_count += len(rels)
def finish(self, _):
print(f"Found {self.coref_count} pairs in the multi packs.")
def pack_example(input_path, output_path):
"""
This example read data from input path and serialize to output path.
Args:
input_path:
output_path:
Returns:
"""
print("Pack serialization example.")
nlp = Pipeline[DataPack]()
nlp.set_reader(OntonotesReader())
nlp.add(NLTKSentenceSegmenter())
nlp.add(NLTKWordTokenizer())
nlp.add(NLTKPOSTagger())
# This is a simple writer that serialize the result to the current
# directory and will use the DocID field in the data pack as the file name.
nlp.add(
PackNameJsonPackWriter(),
{
"output_dir": output_path,
"indent": 2,
"overwrite": True,
},
)
nlp.run(input_path)
def multi_example(input_path, output_path):
"""
This example reads data from input path, and write multi pack output
to output path.
Args:
input_path:
output_path:
Returns:
"""
print("Multi Pack serialization example.")
print(
"We first read the data, and add multi-packs to them, and then "
"save the results."
)
coref_pl = Pipeline()
coref_pl.set_reader(DirPackReader())
coref_pl.add(MultiPackBoxer())
coref_pl.add(PackCopier())
coref_pl.add(ExampleCoreferencer())
coref_pl.add(ExampleCorefCounter())
coref_pl.add(
MultiPackWriter(),
config={
"output_dir": output_path,
"indent": 2,
"overwrite": True,
},
)
coref_pl.run(input_path)
print(
"We can then load the saved results, and see if everything is OK. "
"We should see the same number of multi packs there. "
)
reading_pl = Pipeline()
reading_pl.set_reader(
MultiPackDirectoryReader(),
config={
"multi_pack_dir": os.path.join(output_path, "multi"),
"data_pack_dir": os.path.join(output_path, "packs"),
},
)
reading_pl.add(ExampleCorefCounter())
reading_pl.run()
def main(data_path: str):
pack_output = "pack_out"
multipack_output = "multi_out"
pack_example(data_path, pack_output)
multi_example(pack_output, multipack_output)
if __name__ == "__main__":
main(sys.argv[1])
| asyml/forte | examples/serialization/serialize_example.py | serialize_example.py | py | 4,579 | python | en | code | 230 | github-code | 36 |
33908545643 | budget = float(input())
number_nights = int(input())
price_one_night = float(input())
percent_more_expenses = int(input())/100
if number_nights > 7:
price_one_night *= 0.95
total = (number_nights * price_one_night) + (percent_more_expenses * budget)
left_needed_money = abs(budget - total)
if budget >= total:
print(f"Ivanovi will be left with {left_needed_money:.2f} leva after vacation.")
else:
print(f"{left_needed_money:.2f} leva needed.") | IvayloSavov/Programming-basics | exams/6_7_July_2019/family_trip.py | family_trip.py | py | 458 | python | en | code | 0 | github-code | 36 |
28078812679 | import numpy as np
from PIL import Image
img=Image.open("tiger.jpg")
img=np.array(img)
def rgb2gray(rgb):
return np.dot(rgb, [0.299, 0.587, 0.114])
img=rgb2gray(img)
row=img.shape[0]
col=img.shape[1]
print(row)
print(col)
# img.resize(1200,1920);
# row=img.shape[0]
# col=img.shape[1]
# print(row)
# print(col)
Image.fromarray(img).show()
filtered_image=[]
new_row=row/3
new_col=col/3
filter=[[1,1,1],
[1,1,1],
[1,1,1]]
for i in range(0,row,3):
lis=[]
for j in range(0,col,3):
val=0
for k in range(3):
for l in range(3):
val+=img[i+k][j+l]
lis.append(val/9)
filtered_image.append(lis)
filtered_image=np.array(filtered_image)
print(filtered_image)
print(filtered_image.shape[0])
print(filtered_image.shape[1])
Image.fromarray(filtered_image).show() | NegiArvind/NeroFuzzyTechniques-Lab-Program | compressing_filter.py | compressing_filter.py | py | 787 | python | en | code | 2 | github-code | 36 |
43299849494 | #!/usr/bin/env python
bundle = ['sqlite3', 'ssl', 'crypto', 'ffi', 'expat', 'tcl8', 'tk8', 'gdbm',
'lzma', 'tinfo', 'tinfow', 'ncursesw', 'panelw', 'ncurses', 'panel',
'panelw']
import os
from os.path import dirname, relpath, join, exists, basename, realpath
from shutil import copy, copytree
import sys
from glob import glob
from subprocess import check_output, check_call
def get_deps_darwin(binary):
deps = {}
output = check_output(['otool', '-L', binary])
output = output.splitlines()
output = output[1:] # first line is binary name
for line in output:
path = line.strip().split()[0]
if (not path or
not path.startswith('/usr/local/') or
basename(path) == basename(binary)):
continue
needed = basename(path)
deps[needed] = path
deps.update(get_deps(path))
return deps
def get_deps(binary):
if sys.platform == 'darwin':
return get_deps_darwin(binary)
deps = {}
output = check_output(['ldd', binary])
for line in output.splitlines():
if '=>' not in line:
continue
line = line.strip()
needed, path = line.split(' => ')
if path == 'not found':
raise ValueError('Broken dependency in ' + binary)
path = path.split(' ')[0]
if not path:
continue
if needed[3:].split('.', 1)[0] not in bundle:
continue
deps[needed] = path
deps.update(get_deps(path))
return deps
def gather_deps(binaries):
deps = {}
for binary in binaries:
deps.update(get_deps(binary))
return deps
def copy_deps(deps):
copied = {}
for needed, path in deps.items():
bname = basename(path)
copy(realpath(path), 'lib/' + bname)
copied[path] = 'lib/' + bname
if not exists('lib/' + needed):
os.symlink(bname, 'lib/' + needed)
return copied
def rpath_binaries(binaries):
rpaths = {}
for binary in binaries:
check_call(['chmod', 'a+w', binary])
if sys.platform == 'darwin':
rpath = join('@executable_path', relpath('lib', dirname(binary)))
check_call(['install_name_tool', '-add_rpath', rpath, binary])
# change path for deps, this deps call is sorta redundant, but we
# don't have this dependency info in the passed in data...
deps = get_deps(binary)
for dep, path in deps.items():
rpath = join('@rpath', dep)
if rpath != path:
print('Set RPATH of {0} for {1} to {2}'.format(binary, path, rpath))
check_call(['install_name_tool', '-change', path, rpath, binary])
else:
rpath = join('$ORIGIN', relpath('lib', dirname(binary)))
check_call(['patchelf', '--set-rpath', rpath, binary])
rpaths[binary] = rpath
return rpaths
def make_portable():
exts = ['so']
if sys.platform == 'darwin':
exts = ['dylib', 'so']
binaries = glob('bin/libpypy*.' + exts[0])
if not binaries:
raise ValueError('Could not find bin/libpypy*.%s in "%s"' % (exts[0], os.getcwd()))
for ext in exts:
binaries.extend(glob('lib_pypy/*_cffi.pypy*.' + ext))
binaries.extend(glob('lib_pypy/_pypy_openssl*.' + ext))
binaries.extend(glob('lib_pypy/_tkinter/*_cffi.pypy*.' + ext))
deps = gather_deps(binaries)
copied = copy_deps(deps)
for path, item in copied.items():
print('Copied {0} to {1}'.format(path, item))
binaries.extend(copied.values())
rpaths = rpath_binaries(binaries)
for binary, rpath in rpaths.items():
print('Set RPATH of {0} to {1}'.format(binary, rpath))
# copy tcl/tk shared files, search /usr and copy the containing dir...
found_tk = found_tcl = False
for path, dirs, files in os.walk('/usr'):
if not found_tk and 'tk.tcl' in files:
print('Found tk shared files at: %s' % (path))
found_tk = True
copytree(path, 'lib/tk')
if not found_tcl and 'init.tcl' in files:
print('Found tcl shared files at: %s' % (path))
found_tcl = True
copytree(path, 'lib/tcl')
return deps
if __name__ == '__main__':
try:
os.chdir(sys.argv[1])
except:
print('Call as %s <path/to/pypy/topdir' % sys.argv[0])
exit(-1)
try:
os.mkdir('lib')
except OSError:
pass
make_portable()
| mozillazg/pypy | pypy/tool/release/make_portable.py | make_portable.py | py | 4,560 | python | en | code | 430 | github-code | 36 |
35217427072 | from urllib.request import urlopen
import urllib
from selenium import webdriver
from bs4 import BeautifulSoup
import http.client
from openpyxl import Workbook
from openpyxl import load_workbook
from openpyxl.writer.excel import ExcelWriter
from openpyxl.cell.cell import ILLEGAL_CHARACTERS_RE
import json
import re
import copy
import string
http.client._MAXHEADERS = 1000
def urllib_download(IMAGE_URL, pName):
try:
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
urllib.request.install_opener(opener)
urllib.request.urlretrieve(IMAGE_URL, pName.replace("/","").replace("\\","")+'.jpg')
except:
print('no')
def getNodeText(node):
if(node == None):
return ""
else:
return node.get_text().strip()
retryCount = 0
loadCount = 0
def getHtmlFromUrl(url, type="get", para={}):
global retryCount
try:
url = urllib.parse.quote(url, safe=string.printable).replace(' ','%20')
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.193 Safari/537.36"}
request_obj=urllib.request.Request(url=url)
response_obj=urllib.request.urlopen(request_obj)
html_code=response_obj.read()
return html_code
except:
print("retry"+url)
retryCount += 1
print(retryCount)
if retryCount< 5:
getHtmlFromUrl(url)
def getRenderdHtmlFromUrl(url):
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--disable-gpu')
chrome_options.add_argument("window-size=1024,768")
chrome_options.add_argument("--no-sandbox")
browser = webdriver.Chrome(chrome_options=chrome_options)
browser.get(url)
return browser.page_source
def writeExcel(workSheet, headers, rowIndex, info):
cellIndex=1
for head in headers:
try:
if head in info:
content = ILLEGAL_CHARACTERS_RE.sub(r'', info[head])
workSheet.cell(rowIndex, cellIndex).value = content.strip()
else:
workSheet.cell(rowIndex, cellIndex).value = ""
cellIndex=cellIndex+1
except:
print(rowIndex)
def getProductInfo(url, products):
print(str(len(products)) + url)
tempPinfo = {}
productHtml = getRenderdHtmlFromUrl(url)
if productHtml != None:
tempPinfo["link"] = url
sope = BeautifulSoup(productHtml, "html.parser",from_encoding="utf-8")
nameArea = sope.find("h3",attrs={"itemprop":"name"})
imgArea = sope.find("img",attrs={"itemprop":"image"})
tempPinfo["name"] = getNodeText(nameArea)
if imgArea!=None:
src = imgArea["src"]
if src.find("https:") < 0:
src = "https:"+src
urllib_download(src, tempPinfo["name"])
tempPinfo["img"] = tempPinfo["name"]+".jpg"
specs = sope.find_all("label",attrs={"class":"pull-left"})
for spec in specs:
title = getNodeText(spec)
tempPinfo[title] = getNodeText(spec.nextSibling.nextSibling)
products.append(tempPinfo.copy())
def getProductList(url, products):
print(url)
productListHtml = getRenderdHtmlFromUrl(url)
sope = BeautifulSoup(productListHtml, "html.parser",from_encoding="utf-8")
pros = sope.find_all("article")
for pro in pros:
link = pro.find("a")
if link!=None:
getProductInfo("https://www.001chemical.com"+link["href"], products)
excelFileName="cfmats.xlsx"
wb = Workbook()
workSheet = wb.active
products = []
# getProductInfo('https://www.001chemical.com/chem/204580-28-9', products)
# getProductList('https://www.001chemical.com/chem/organic-siliconeslist1', products)
for pageIndex in range(1, 20):
getProductList("https://www.001chemical.com/chem/organic-siliconeslist"+str(pageIndex), products)
headers=['link','name','img','CAS Number','Catalog Number','Molecular Formula',
'Molecular Weight']
rindex = 1
for p in products:
writeExcel(workSheet, headers, rindex, p)
if rindex%100 == 0:
wb.save(excelFileName)
rindex = rindex+1
print("flish")
wb.save(excelFileName) | Just-Doing/python-caiji | src/work/2021年3月15日/chemical.py | chemical.py | py | 3,863 | python | en | code | 1 | github-code | 36 |
28493766698 | import time
import os
import numpy as np
import pyaudio
import tensorflow as tf
import speech_recognition as sr
from datetime import datetime
import wave
import threading
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
from ThreeCharacterClassicInference import ThreeCharacterClassicInference
from tts import text_to_speech
# Set seeds for reproducibility
seed = 42
tf.random.set_seed(seed)
np.random.seed(seed)
# Constants
FRAMES_PER_BUFFER = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 16000
TAILING_DURATION = 1.5 # Tailing audio duration in seconds
KEYWORD = "你好"
# Global variables
stop_plotting_thread = False
# Load the model
interpreter = tf.lite.Interpreter(model_path="hey_ego_44100_obama_5.tflite")
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
three_char_classic_model = ThreeCharacterClassicInference(
model_path="3character.tflite", dictionary_path="3character_dict.pickle"
)
def get_spectrogram(waveform):
"""Convert the audio waveform to a spectrogram."""
input_len = 66150
waveform = waveform[:input_len]
zero_padding = tf.zeros([input_len] - tf.shape(waveform), dtype=tf.float32)
waveform = tf.cast(waveform, dtype=tf.float32)
equal_length = tf.concat([waveform, zero_padding], 0)
spectrogram = tf.signal.stft(equal_length, frame_length=512, frame_step=256)
spectrogram = tf.abs(spectrogram)
spectrogram = spectrogram[..., tf.newaxis]
return spectrogram
def preprocess_audiobuffer(waveform):
"""Preprocess the audio buffer for the model."""
waveform = waveform / 32768
waveform = tf.convert_to_tensor(waveform, dtype=tf.float32)
spectogram = get_spectrogram(waveform)
spectogram = tf.expand_dims(spectogram, 0)
return spectogram
def predict_mic(audio):
"""Predict the command from the audio."""
start = time.time()
spec = preprocess_audiobuffer(audio)
interpreter.set_tensor(input_details[0]["index"], spec)
interpreter.invoke()
prediction = tf.nn.softmax(interpreter.get_tensor(output_details[0]["index"]))
label_pred = np.argmax(prediction, axis=1)
time_taken = time.time() - start
print(prediction)
print(label_pred)
print(f"Predicted in: {time_taken}")
return label_pred[0]
def save_audio_to_wav(audio_buffer, output_folder=None, rate=44100):
"""Save the audio buffer to a mono channel WAV file with a unique name."""
output_folder = output_folder or os.getcwd()
os.makedirs(output_folder, exist_ok=True)
# Generate a unique name for the WAV file
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
file_name = f"audio_{timestamp}.wav"
output_file = os.path.join(output_folder, file_name)
# Open a WAV file for writing
with wave.open(output_file, "wb") as wav_file:
wav_file.setnchannels(1) # Mono channel
wav_file.setsampwidth(
pyaudio.get_sample_size(pyaudio.paInt16)
) # 16-bit samples
wav_file.setframerate(rate) # Set the frame rate
wav_file.writeframes(audio_buffer.tobytes()) # Write the audio buffer data
return output_file
# def plot_spectrogram(audio_buffer, spectrogram_func, stop_event):
# while not stop_event.is_set():
# # Set up the initial plot
# fig, ax = plt.subplots()
# spec = spectrogram_func(audio_buffer)
# im = ax.imshow(
# spec,
# aspect="auto",
# origin="lower",
# cmap="viridis",
# vmin=0.0,
# vmax=1.0,
# )
# ax.set_xlabel("Time")
# ax.set_ylabel("Frequency")
# plt.colorbar(im, ax=ax)
# ax.set_title("Spectrogram")
# # Add a text element to display the update frequency
# freq_text = ax.text(
# 0.01, 0.95, "", transform=ax.transAxes, fontsize=10, color="white"
# )
# # Update function for the plot
# def update(frame):
# nonlocal audio_buffer
# start_time = time.time()
# spec = spectrogram_func(audio_buffer)
# im.set_data(spec)
# im.set_clim(vmin=0.0, vmax=1.0)
# # Calculate and display the update frequency
# end_time = time.time()
# update_freq = 1 / (end_time - start_time)
# freq_text.set_text(f"{update_freq:.2f} fps")
# return [im, freq_text]
# # Create the animation
# ani = FuncAnimation(fig, update, blit=True, interval=RATE / FRAMES_PER_BUFFER)
# # Show the plot
# plt.show()
# if stop_event.is_set():
# plt.close()
# break
def plot_spectrogram(audio_buffer, spectrogram_func, stop_event):
# Initialize the stop_event flag
stopped = [False]
def handle_close(evt):
stopped[0] = True
while not stop_event.is_set():
# Set up the initial plot
fig, ax = plt.subplots()
spec = spectrogram_func(audio_buffer)
im = ax.imshow(
spec,
aspect="auto",
origin="lower",
cmap="viridis",
vmin=0.0,
vmax=1.0,
)
ax.set_xlabel("Time")
ax.set_ylabel("Frequency")
plt.colorbar(im, ax=ax)
ax.set_title("Spectrogram")
# Add a text element to display the update frequency
freq_text = ax.text(
0.01, 0.95, "", transform=ax.transAxes, fontsize=10, color="white"
)
# Update function for the plot
def update(frame):
nonlocal audio_buffer
start_time = time.time()
spec = spectrogram_func(audio_buffer)
im.set_data(spec)
im.set_clim(vmin=0.0, vmax=1.0)
# Calculate and display the update frequency
end_time = time.time()
update_freq = 1 / (end_time - start_time)
freq_text.set_text(f"{update_freq:.2f} fps")
if stopped[0]:
return []
return [im, freq_text]
# Create the animation
ani = FuncAnimation(fig, update, blit=True, interval=RATE / FRAMES_PER_BUFFER)
# Connect the event handler
fig.canvas.mpl_connect("close_event", handle_close)
# Show the plot
plt.show(block=False)
while not stop_event.is_set() and not stopped[0]:
fig.canvas.flush_events()
time.sleep(0.1)
if stop_event.is_set() or stopped[0]:
plt.close()
break
def record_and_detect_keyword():
"""Continuously record audio and predict the command."""
global stop_plotting_thread
audio_buffer = np.zeros(int(TAILING_DURATION * RATE), dtype=np.int16)
p = pyaudio.PyAudio()
stream = p.open(
format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=FRAMES_PER_BUFFER,
)
stop_event = threading.Event()
try:
plot_thread = threading.Thread(
target=plot_spectrogram,
args=(
audio_buffer,
lambda buf: preprocess_audiobuffer(buf).numpy().squeeze(),
stop_event,
),
daemon=True,
)
plot_thread.start()
is_awake = False
recognizer = sr.Recognizer()
while True and not is_awake:
data = stream.read(FRAMES_PER_BUFFER)
new_audio = np.frombuffer(data, dtype=np.int16)
# Update the audio buffer
audio_buffer[:-FRAMES_PER_BUFFER] = audio_buffer[FRAMES_PER_BUFFER:]
audio_buffer[-FRAMES_PER_BUFFER:] = new_audio
# Save the audio buffer to a WAV file
# output_file = save_audio_to_wav(
# audio_buffer, output_folder="recorded_audio"
# )
# Predict using the tailing audio data
if not is_awake:
result = predict_mic(audio_buffer)
if result == 0:
print(f"Obama model detected {KEYWORD}")
# is_awake = True
audio_data = sr.AudioData(
audio_buffer.tobytes(), sample_rate=RATE, sample_width=2
)
try:
text = recognizer.recognize_google(audio_data, language="zh-CN")
print("You said: ", text)
if KEYWORD in text:
is_awake = True
except sr.UnknownValueError:
print("Google Speech Recognition could not understand audio")
except sr.RequestError as e:
print(
"Could not request results from Google Speech Recognition service; {0}".format(
e
)
)
if is_awake:
p.terminate()
stop_event.set()
except Exception as e:
print(e)
p.terminate()
stop_event.set()
def three_char_classic_reply():
previous_sr_text = ""
audio_buffer = np.zeros(int(5 * RATE), dtype=np.int16)
p = pyaudio.PyAudio()
stream = p.open(
format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=FRAMES_PER_BUFFER,
)
recognizer = sr.Recognizer()
buffers_per_second = int(RATE / FRAMES_PER_BUFFER) * 2
idel_start_time = time.time()
while True:
audio_data = np.empty((buffers_per_second, FRAMES_PER_BUFFER), dtype=np.int16)
for i in range(buffers_per_second):
audio_data[i] = np.frombuffer(
stream.read(FRAMES_PER_BUFFER), dtype=np.int16
)
audio_data = audio_data.flatten()
audio_buffer[: -audio_data.shape[0]] = audio_buffer[audio_data.shape[0] :]
audio_buffer[-audio_data.shape[0] :] = audio_data
audio_data = sr.AudioData(
audio_buffer.tobytes(), sample_rate=RATE, sample_width=2
)
try:
text = recognizer.recognize_google(audio_data, language="zh-CN")
print("You said: ", text)
if len(text) >= 3 and text != previous_sr_text:
previous_sr_text = text
reply = three_char_classic_model.predict_next_3(text[0:3])
text_to_speech(reply, "zh")
print(f"Model reply: {reply}")
idel_start_time = time.time()
except sr.UnknownValueError:
print("Google Speech Recognition could not understand audio")
except sr.RequestError as e:
print(
"Could not request results from Google Speech Recognition service; {0}".format(
e
)
)
if (time.time() - idel_start_time) > 10:
text_to_speech("晚安寶貝兒", "zh")
return
if __name__ == "__main__":
text_to_speech("開始了", "zh")
while True:
print("start...")
record_and_detect_keyword()
print("awake...")
print("Your three char classic?..")
text_to_speech("你好呀。 請讀出你的三字經典三連音。", "zh")
three_char_classic_reply()
| charis2324/SoundCube | src/main.py | main.py | py | 11,703 | python | en | code | 0 | github-code | 36 |
72467060903 | from edgar3.filing_13f import Filing_13F
from edgar3 import __version__
import os
import datetime
import csv
from google.cloud import storage
from distutils import util
from io import StringIO
def save_filing(fil: Filing_13F, year: int, quarter: int):
path_with_name = f"etl-13f/processed/reports/{year}/{quarter}/{fil.accession_number}.csv"
blob = storage_bucket.blob(path_with_name)
si = StringIO()
csv_writer = csv.writer(si, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL)
csv_writer.writerow(
[
"ManagerName",
"CIK",
"Street1",
"Street2",
"City",
"StateOrCountry",
"ZipCode",
"AccessionNumber",
"PeriodOfReport",
"SignatureDate",
"CUSIP",
"SecurityName",
"TitleOfClass",
"ValueOfShares",
"NumberOfShares",
"TypeOfShares",
]
)
for holding in fil.holdings:
csv_writer.writerow(
[
fil.manager_name,
fil.cik,
fil.street1,
fil.street2,
fil.city,
fil.state_or_country,
fil.zip_code,
fil.accession_number,
fil.period_of_report,
fil.signature_date,
holding.cusip,
holding.name_of_issuer,
holding.title_of_class,
holding.value,
holding.number,
holding.type,
]
)
blob.upload_from_string(si.getvalue().strip("\r\n"))
def process_filing(path: str, year: int, quarter: int) -> bool:
text = storage_bucket.blob(path).download_as_string().decode("utf-8")
if len(text) == 0:
print("Zero length")
log_failed_process(path, year, quarter)
return True # allowed failure??
elif text.startswith("<!DOCTYPE html>"):
print("Invalid download")
log_failed_process(path, year, quarter)
return True
fil = Filing_13F(text)
if "13F-NT" in fil.documents:
return True # we don't care about these
elif "13F-NT/A" in fil.documents:
return True # don't care about these either
elif "13F-HR/A" in fil.documents:
return True
try:
if fil.process():
save_filing(fil, year, quarter)
else:
return False
except Exception as e:
print(f"Exception on {path}: {e}")
print(path)
log_failed_process(path, year, quarter)
return False
return True
def log_failed_process(path: str, year: int, quarter: int):
file_name = path.split("/")[-1]
new_path = f"etl-13f/failed_reports/{year}Q{quarter}_{file_name}"
print(f"Failed on {path}, copied to {new_path}")
storage_bucket.copy_blob(storage_bucket.blob(path), storage_bucket, new_path)
print(f"Using Edgar Version: {__version__}", flush=True)
now = datetime.datetime.now()
start_year = int(os.getenv("START_YEAR", now.year))
start_quarter = int(os.getenv("START_QUARTER", (now.month - 1) // 3 + 1))
end_year = int(os.getenv("END_YEAR", now.year))
end_quarter = int(os.getenv("END_QUARTER", (now.month - 1) // 3 + 1))
bucket_name = os.getenv("BUCKET_NAME", "farr-ai-data-lake")
force_process = bool(util.strtobool(os.getenv("FORCE_PROCESS", "False")))
print(f"Processing 13Fs for {start_year}:Q{start_quarter}-{end_year}:Q{end_quarter} into {bucket_name}", flush=True)
storage_client = storage.Client()
storage_bucket = storage_client.get_bucket(bucket_name)
for year in range(start_year, end_year + 1):
# if we're starting, the first quarter of the year can be passed in
if year == start_year:
quarter_low = start_quarter
else:
quarter_low = 1
# and if we're ending, the last quarter of the year can be passed in
if year == end_year:
quarter_high = end_quarter
else:
if year == now.year:
quarter_high = (now.month - 1) // 3 + 1
else:
quarter_high = 4
for quarter in range(quarter_low, quarter_high + 1):
print(f"Processing {year}:Q{quarter}", end="", flush=True)
base_path = f"etl-13f/reports/{year}/{quarter}"
known_blobs = [blob.name for blob in storage_bucket.list_blobs(prefix=base_path)]
for file in known_blobs:
process_filing(file, year, quarter)
print(f" {len(known_blobs)} processed", flush=True)
| kfarr3/etl-13f | process_filings/src/process_filings.py | process_filings.py | py | 4,530 | python | en | code | 0 | github-code | 36 |
1456859147 | class Node:
def __init__(self, ip, type=None, target=False):
self.ip = ip
self.hostname = ip
#node is default, we expect to work with 'endnode', 'router', ...
self.type = type
self.target = target
self.plugins = {}
def addPluginResults(self, name,lines):
self.plugins[name] = lines
class EndNode:
def __init__(self, localIp, targetIp, nodeId, ttl_recv):
self.targetIp = targetIp
self.endnodeId = nodeId
self.status = 0
self.times_processed = 0
#self.hops = {}
self.routes = {}
self.addHop(0,localIp,localIp)
self.distance = self.detDistance(ttl_recv)
def addHop(self,hop,ip,localIp):
route = 0
if hop > 255:
route,hop = divmod(hop,256)
if not self.routes.has_key(route):
self.routes[route] = {}
if localIp:
self.routes[route][0] = localIp
self.routes[route][hop] = ip
def detDistance(self,ttl_recv):
if ttl_recv > 128:
return 255 - ttl_recv
elif ttl_recv <= 64:
return 64 - ttl_recv
else:
return 128 - ttl_recv
def cleanRoute(route):
from sendPacket import isValidIp
#set cursor to last item
last = route.keys().__reversed__().next()
at_end = True
cursors = range(1,last+1)
cursors.reverse()
for cursor in cursors:
if not route.has_key(cursor -1):
#route[cursor-1] = "unknown_%s.%d"%(nodeId,cursor-1)
#we replace this line to link a unknown step to the following step.
if isValidIp( route[cursor] ):
route[cursor-1] = "unknown_%s.%d"%(route[cursor],cursor-1)
at_end = False
else:
route[cursor-1] = route[cursor]
#route.pop(cursor)
at_end = False
elif (route[cursor] == route[cursor-1] and at_end):
#we have 2 times same ip in route at the end delete last
route.pop(cursor)
else:
#We have an existing hop before this one and it is not similar to next hop
at_end = False
return(route)
def uniq(alist): # Fastest without order preserving
set = {}
map(set.__setitem__, alist, [])
return set.keys()
| xychix/gtrcrt | node.py | node.py | py | 2,369 | python | en | code | 0 | github-code | 36 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.