text stringlengths 38 1.54M |
|---|
# Webhooks for external integrations.
import re
from typing import Dict, List, Optional, Tuple
from django.http import HttpRequest, HttpResponse
from zerver.decorator import authenticated_rest_api_view
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_success
from zerver.lib.validator import WildValue, check_int, check_string, to_wild_value
from zerver.lib.webhooks.common import check_send_webhook_message
from zerver.lib.webhooks.git import TOPIC_WITH_BRANCH_TEMPLATE, get_push_commits_event_message
from zerver.models import UserProfile
def build_message_from_gitlog(
user_profile: UserProfile,
name: str,
ref: str,
commits: WildValue,
before: str,
after: str,
url: str,
pusher: str,
forced: Optional[str] = None,
created: Optional[str] = None,
deleted: bool = False,
) -> Tuple[str, str]:
short_ref = re.sub(r"^refs/heads/", "", ref)
topic = TOPIC_WITH_BRANCH_TEMPLATE.format(repo=name, branch=short_ref)
commits_data = _transform_commits_list_to_common_format(commits)
content = get_push_commits_event_message(pusher, url, short_ref, commits_data, deleted=deleted)
return topic, content
def _transform_commits_list_to_common_format(commits: WildValue) -> List[Dict[str, str]]:
return [
{
"name": commit["author"]["name"].tame(check_string),
"sha": commit["id"].tame(check_string),
"url": commit["url"].tame(check_string),
"message": commit["message"].tame(check_string),
}
for commit in commits
]
@authenticated_rest_api_view(
webhook_client_name="Beanstalk",
# Beanstalk's web hook UI rejects URL with a @ in the username section
# So we ask the user to replace them with %40
beanstalk_email_decode=True,
)
@has_request_variables
def api_beanstalk_webhook(
request: HttpRequest,
user_profile: UserProfile,
payload: WildValue = REQ(converter=to_wild_value),
branches: Optional[str] = REQ(default=None),
) -> HttpResponse:
# Beanstalk supports both SVN and Git repositories
# We distinguish between the two by checking for a
# 'uri' key that is only present for Git repos
git_repo = "uri" in payload
if git_repo:
if branches is not None and branches.find(payload["branch"].tame(check_string)) == -1:
return json_success(request)
topic, content = build_message_from_gitlog(
user_profile,
payload["repository"]["name"].tame(check_string),
payload["ref"].tame(check_string),
payload["commits"],
payload["before"].tame(check_string),
payload["after"].tame(check_string),
payload["repository"]["url"].tame(check_string),
payload["pusher_name"].tame(check_string),
)
else:
author = payload["author_full_name"].tame(check_string)
url = payload["changeset_url"].tame(check_string)
revision = payload["revision"].tame(check_int)
(short_commit_msg, _, _) = payload["message"].tame(check_string).partition("\n")
topic = f"svn r{revision}"
content = f"{author} pushed [revision {revision}]({url}):\n\n> {short_commit_msg}"
check_send_webhook_message(request, user_profile, topic, content)
return json_success(request)
|
import os
import sys
import time
os.system("sudo service apache2 stop")
time.sleep(5)
print "apache2 stopped"
os.chdir("/home/pi/node-rtsp-rtmp-server")
os.system("./start_server.sh &")
print "streaming server starting"
time.sleep(40)
print "streaming server started"
os.chdir("/home/pi/picam")
os.system("./picam --alsadev hw:1,0 --rtspout -w 240 -h 160 -v 100000 -f 20 &")
print "Start streaming video"
|
variables = {}
variables["app.name"] = "Microbe"
variables["year"] = "2011"
variables["author"] = "Alexandre Deckner <alex@zappotek.com>"
variables["app.class"] = "App"
variables["main.view.class"] = "MainView"
variables["main.window.class"] = "MainWindow"
variables["app.signature"] = "application/x-vnd.Haiku-" + variables["app.name"]
variables["app.folder"] = variables["app.name"].lower()
def makeHeaderGuard(classname):
return "_" + classname.upper() + "_" + "H"
variables["app.header.guard"] = makeHeaderGuard(variables["app.class"])
variables["main.view.header.guard"] = makeHeaderGuard(variables["main.view.class"])
variables["main.window.header.guard"] = makeHeaderGuard(variables["main.window.class"])
variables["class.name"] = "Div"
variables["class.headerguard"] = makeHeaderGuard(variables["class.name"])
|
import time
import base64
import uuid
from fastapi import APIRouter,Request
from base import get_base_resp
dynamic_data_router = APIRouter(prefix="/dynamic-data", tags=["Dyncmic Data"])
@dynamic_data_router.get("/base64/{value}")
async def get_base64_value(value:str="SFRUUEJJTiBpcyBhd2Vzb21l"):
resp_text = "Incorrect Base64 data try: SFRUUEJJTiBpcyBhd2Vzb21l"
try:
resp_text = base64.decodebytes(value.encode(encoding="utf-8"))
except Exception as e:
print(e)
return resp_text
@dynamic_data_router.get("/bytes/{n}")
async def get_bytes_n(n:int,request:Request):
return get_base_resp(request)
@dynamic_data_router.delete("/delay/{delay}")
async def delete_delay_delay(delay:int,request:Request):
if delay< 0 or delay> 10:
return "invalid:delay must 0<=delay<10"
time.sleep(delay)
return get_base_resp(request)
@dynamic_data_router.get("/delay/{delay}")
async def get_delay_delay(delay:int,request:Request):
if delay< 0 or delay> 10:
return "invalid:delay must 0<=delay<10"
time.sleep(delay)
return get_base_resp(request)
@dynamic_data_router.patch("/delay/{delay}")
async def patch_delay_delay(delay:int,request:Request):
if delay< 0 or delay> 10:
return "invalid:delay must 0<=delay<10"
time.sleep(delay)
return get_base_resp(request)
@dynamic_data_router.post("/delay/{delay}")
async def post_delay_delay(delay:int,request:Request):
if delay< 0 or delay> 10:
return "invalid:delay must 0<=delay<10"
time.sleep(delay)
return get_base_resp(request)
@dynamic_data_router.put("/delay/{delay}")
async def put_delay_delay(delay:int,request:Request):
if delay< 0 or delay> 10:
return "invalid:delay must 0<=delay<10"
time.sleep(delay)
return get_base_resp(request)
@dynamic_data_router.get("/drip")
async def get_drip():
return "get_drip"
@dynamic_data_router.get("/links/{n}/{offset}")
async def get_links(n, offset):
return n, offset
@dynamic_data_router.get("/range/{numbytes}")
async def get_range(numbytes):
return numbytes
@dynamic_data_router.get("/stream-bytes/{n}")
async def get_stream_bytes(n):
return n
@dynamic_data_router.get("/stream/{n}")
async def get_stream(n):
return n
@dynamic_data_router.get("/uuid")
async def get_uuid():
return {"uuid":uuid.uuid4()}
|
from django.shortcuts import render
from rest_framework import viewsets,generics,mixins
from django.views.generic import DetailView,ListView
from .serializers import AssetSerializer,ServerSerializer,TreeNodeSerializer,IDCSerializer
from .models import Asset,Server,TreeNode,IDC
from .page import StandardResultsSetPagination
#资产表取数据
class AssetViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Asset.objects.all()
serializer_class = AssetSerializer
pagination_class=StandardResultsSetPagination
# 资产表过滤 通过id来过滤
class AssetListView(generics.ListAPIView):
serializer_class = AssetSerializer
def get_queryset(self):
queryset = Asset.objects.all()
id = self.request.query_params.get('id', None)
if id:
queryset = queryset.filter(id=id)
return queryset
class AssetnodeView(generics.ListAPIView):
serializer_class = AssetSerializer
def get_queryset(self):
queryset = Asset.objects.all()
id = self.request.query_params.get('id', None)
if id:
queryset = queryset.filter(node__id=id)
return queryset
class AssetmodelView(mixins.CreateModelMixin,mixins.UpdateModelMixin,mixins.DestroyModelMixin,generics.GenericAPIView):
queryset = Asset.objects.all()
serializer_class = AssetSerializer
def get(self, request, *args, **kwargs):
return self.create(request, *args, **kwargs)
def put(self, request, *args, **kwargs):
return self.update(request, *args, **kwargs)
def delete(self, request, *args, **kwargs):
return self.destroy(request, *args, **kwargs)
# server表取数据
class ServerViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Server.objects.all()
serializer_class = ServerSerializer
pagination_class=StandardResultsSetPagination
# class ServerDetailView(DetailView):
# model = Server
# context_object_name = "serverdatil"
# template_name = "cmdb/server-detail.html"
class ServerDListView(ListView):
context_object_name = "serverdatil"
template_name = "cmdb/server-detail.html"
def get_queryset(self):
node_id = self.request.GET.get("node_id")
return Server.objects.filter(asset__node__id = node_id)
class TreenodeViewSet(viewsets.ModelViewSet):
queryset = TreeNode.objects.filter(node_upstream=None)
serializer_class = TreeNodeSerializer
class IDCViewset(viewsets.ReadOnlyModelViewSet):
queryset = IDC.objects.all()
serializer_class = IDCSerializer |
# query string -> "http://www.example.com?key1=value1&key2=value2"
# it is this part after question mark
# we can check options for requests on "https://icanhazdadjoke.com/api"
import pyfiglet
from random import choice
import requests
url = "https://icanhazdadjoke.com/search"
def print_f(text_to_print, color="MAGENTA"):
pyfiglet.print_figlet(text=text_to_print, colors=color)
def get_jokes(term):
response = requests.get(
url,
headers={"Accept": "application/json"},
params={"term": term}
)
data = response.json()
return data['results']
print_f(text_to_print="DaddyJoke90210")
topic = input("Let me tell you a joke! Give me a topic: ").lower()
jokes = get_jokes(topic)
if len(jokes) > 1:
print("I've got {} jokes about {}. Here's one: \n{}".format(len(jokes), topic, choice(jokes)['joke']))
elif len(jokes) == 1:
print("I've got one joke about {}. Here it is: \n{}".format(topic, jokes['joke']))
else:
print("No jokes about {}. Please try again!".format(topic))
|
#100-999之间的水仙花数
for item in range(100,1000):
ge=item%10
shi=item//10%10
bai=item//100
#print(ge,shi,bai)
if ge**3+shi**3+bai**3==item:
print(item,'is a flower.')
|
'''
Module providing the `Synapses` class and related helper classes/functions.
'''
import collections
from collections import defaultdict
import functools
import weakref
import re
import numbers
import numpy as np
from brian2.core.base import weakproxy_with_fallback
from brian2.core.base import device_override
from brian2.core.namespace import get_local_namespace
from brian2.core.variables import (DynamicArrayVariable, Variables)
from brian2.codegen.codeobject import create_runner_codeobj
from brian2.codegen.translation import get_identifiers_recursively
from brian2.devices.device import get_device
from brian2.equations.equations import (Equations, SingleEquation,
DIFFERENTIAL_EQUATION, SUBEXPRESSION,
PARAMETER, INTEGER,
check_subexpressions)
from brian2.groups.group import Group, CodeRunner, get_dtype
from brian2.groups.neurongroup import (extract_constant_subexpressions,
SubexpressionUpdater,
check_identifier_pre_post)
from brian2.stateupdaters.base import (StateUpdateMethod,
UnsupportedEquationsException)
from brian2.stateupdaters.exact import linear, independent
from brian2.units.fundamentalunits import (Quantity, DIMENSIONLESS,
fail_for_dimension_mismatch)
from brian2.units.allunits import second
from brian2.utils.logger import get_logger
from brian2.utils.stringtools import get_identifiers, word_substitute
from brian2.utils.arrays import calc_repeats
from brian2.core.spikesource import SpikeSource
from brian2.synapses.parse_synaptic_generator_syntax import parse_synapse_generator
from brian2.parsing.bast import brian_ast
from brian2.parsing.rendering import NodeRenderer
MAX_SYNAPSES = 2147483647
__all__ = ['Synapses']
logger = get_logger(__name__)
class StateUpdater(CodeRunner):
'''
The `CodeRunner` that updates the state variables of a `Synapses`
at every timestep.
'''
def __init__(self, group, method, clock, order, method_options=None):
self.method_choice = method
self.method_options = method_options
CodeRunner.__init__(self, group,
'stateupdate',
clock=clock,
when='groups',
order=order,
name=group.name + '_stateupdater',
check_units=False,
generate_empty_code=False)
def update_abstract_code(self, run_namespace=None, level=0):
if len(self.group.equations) > 0:
stateupdate_output = StateUpdateMethod.apply_stateupdater(self.group.equations,
self.group.variables,
self.method_choice,
method_options=self.method_options,
group_name=self.group.name)
if isinstance(stateupdate_output, str):
self.abstract_code = stateupdate_output
else:
# Note that the reason to send self along with this method is so the StateUpdater
# can be modified! i.e. in GSL StateUpdateMethod a custom CodeObject gets added
# to the StateUpdater together with some auxiliary information
self.abstract_code = stateupdate_output(self)
else:
self.abstract_code = ''
class SummedVariableUpdater(CodeRunner):
'''
The `CodeRunner` that updates a value in the target group with the
sum over values in the `Synapses` object.
'''
def __init__(self, expression, target_varname, synapses, target,
target_size_name, index_var):
# Handling sumped variables using the standard mechanisms is not
# possible, we therefore also directly give the names of the arrays
# to the template.
code = '''
_synaptic_var = {expression}
'''.format(expression=expression,
target_varname=target_varname)
self.target_var = synapses.variables[target_varname]
self.target = target
template_kwds = {'_target_var': self.target_var,
'_target_size_name': target_size_name,
'_index_var': synapses.variables[index_var],
'_target_start': getattr(target, 'start', 0),
'_target_stop': getattr(target, 'stop', -1)}
CodeRunner.__init__(self, group=synapses,
template='summed_variable',
code=code,
needed_variables=[target_varname, target_size_name,
index_var],
# We want to update the summed variable before
# the target group gets updated
clock=target.clock,
when='groups',
order=target.order-1,
name=synapses.name + '_summed_variable_' + target_varname,
template_kwds=template_kwds)
class SynapticPathway(CodeRunner, Group):
'''
The `CodeRunner` that applies the pre/post statement(s) to the state
variables of synapses where the pre-/postsynaptic group spiked in this
time step.
Parameters
----------
synapses : `Synapses`
Reference to the main `Synapses` object
prepost : {'pre', 'post'}
Whether this object should react to pre- or postsynaptic spikes
objname : str, optional
The name to use for the object, will be appendend to the name of
`synapses` to create a name in the sense of `Nameable`. If ``None``
is provided (the default), ``prepost`` will be used.
delay : `Quantity`, optional
A scalar delay (same delay for all synapses) for this pathway. If
not given, delays are expected to vary between synapses.
'''
def __init__(self, synapses, code, prepost, objname=None,
delay=None, event='spike'):
self.code = code
self.prepost = prepost
self.event = event
if prepost == 'pre':
self.source = synapses.source
self.target = synapses.target
self.synapse_sources = synapses.variables['_synaptic_pre']
self.synapse_targets = synapses.variables['_synaptic_post']
order = -1
elif prepost == 'post':
self.source = synapses.target
self.target = synapses.source
self.synapse_sources = synapses.variables['_synaptic_post']
self.synapse_targets = synapses.variables['_synaptic_pre']
order = 1
else:
raise ValueError('prepost argument has to be either "pre" or '
'"post"')
self.synapses = weakref.proxy(synapses)
# Allow to use the same indexing of the delay variable as in the parent
# Synapses object (e.g. 2d indexing with pre- and post-synaptic indices)
self._indices = self.synapses._indices
if objname is None:
objname = prepost
CodeRunner.__init__(self, synapses,
'synapses',
code=code,
clock=self.source.clock,
when='synapses',
order=order,
name=synapses.name + '_' + objname,
template_kwds={'pathway': self})
self._pushspikes_codeobj = None
self.spikes_start = self.source.start
self.spikes_stop = self.source.stop
self.eventspace_name = '_{}space'.format(event)
self.eventspace = None # will be set in before_run
# Setting the Synapses object instead of "self" as an owner makes
# indexing conflicts disappear (e.g. with synapses connecting subgroups)
self.variables = Variables(synapses)
self.variables.add_reference(self.eventspace_name, self.source)
self.variables.add_reference('N', synapses)
if prepost == 'pre':
self.variables.add_reference('_n_sources', synapses, 'N_pre')
self.variables.add_reference('_n_targets', synapses, 'N_post')
self.variables.add_reference('_source_dt', synapses.source, 'dt')
else:
self.variables.add_reference('_n_sources', synapses, 'N_post')
self.variables.add_reference('_n_targets', synapses, 'N_pre')
self.variables.add_reference('_source_dt', synapses.target, 'dt')
if delay is None: # variable delays
if getattr(synapses, 'N', None) is not None:
n_synapses = synapses.N
else:
n_synapses = 0
self.variables.add_dynamic_array('delay', dimensions=second.dim,
size=n_synapses, constant=True)
# Register the object with the `SynapticIndex` object so it gets
# automatically resized
synapses.register_variable(self.variables['delay'])
else:
if not isinstance(delay, Quantity):
raise TypeError(('Cannot set the delay for pathway "%s": '
'expected a quantity, got %s instead.') % (objname,
type(delay)))
if delay.size != 1:
raise TypeError(('Cannot set the delay for pathway "%s": '
'expected a scalar quantity, got a '
'quantity with shape %s instead.') % str(delay.shape))
fail_for_dimension_mismatch(delay, second, ('Delay has to be '
'specified in units '
'of seconds but got '
'{value}'),
value=delay)
# We use a "dynamic" array of constant size here because it makes
# the generated code easier, we don't need to deal with a different
# type for scalar and variable delays
self.variables.add_dynamic_array('delay', dimensions=second.dim,
size=1, constant=True,
scalar=True)
# Since this array does not grow with the number of synapses, we
# have to resize it ourselves
self.variables['delay'].resize(1)
self.variables['delay'].set_value(delay)
self._delays = self.variables['delay']
# Re-extract the last part of the name from the full name
self.objname = self.name[len(synapses.name) + 1:]
#: The `SpikeQueue`
self.queue = None
#: The `CodeObject` initalising the `SpikeQueue` at the begin of a run
self._initialise_queue_codeobj = None
self.namespace = synapses.namespace
# Allow the use of string expressions referring to synaptic (including
# pre-/post-synaptic) variables
# Only include non-private variables (and their indices)
synaptic_vars = {varname for varname in list(synapses.variables.keys())
if not varname.startswith('_')}
synaptic_idcs = {varname: synapses.variables.indices[varname]
for varname in synaptic_vars}
synaptic_vars |= {index_name for index_name in list(synaptic_idcs.values())
if index_name not in ['_idx', '0']}
self.variables.add_references(synapses, synaptic_vars)
self.variables.indices.update(synaptic_idcs)
self._enable_group_attributes()
def check_variable_write(self, variable):
# Forward the check to the `Synapses` object (raises an error if no
# synapse has been created yet)
self.synapses.check_variable_write(variable)
@device_override('synaptic_pathway_update_abstract_code')
def update_abstract_code(self, run_namespace=None, level=0):
if self.synapses.event_driven is not None:
event_driven_eqs = self.synapses.event_driven
clock_driven_eqs = self.synapses.equations
try:
event_driven_update = linear(event_driven_eqs,
self.group.variables)
except UnsupportedEquationsException:
# Check whether equations are independent
for var, expr in event_driven_eqs.diff_eq_expressions:
for identifier in expr.identifiers:
if identifier == var:
continue
if (identifier in event_driven_eqs.diff_eq_names or
identifier in clock_driven_eqs):
err = ("Cannot solve the differential equation for "
"'{}' as event-driven, it depends on "
"another variable '{}'. Use (clock-driven) "
"instead.".format(var,
identifier))
raise UnsupportedEquationsException(err)
# All equations are independent, go ahead
event_driven_update = independent(self.synapses.event_driven,
self.group.variables)
# TODO: Any way to do this more elegantly?
event_driven_update = re.sub(r'\bdt\b', '(t - lastupdate)',
event_driven_update)
self.abstract_code = event_driven_update + '\n'
else:
self.abstract_code = ''
self.abstract_code += self.code + '\n'
if self.synapses.event_driven is not None:
self.abstract_code += 'lastupdate = t\n'
@device_override('synaptic_pathway_before_run')
def before_run(self, run_namespace):
# execute code to initalize the spike queue
if self._initialise_queue_codeobj is None:
self._initialise_queue_codeobj = create_runner_codeobj(self,
'', # no code,
'synapses_initialise_queue',
name=self.name+'_initialise_queue',
check_units=False,
additional_variables=self.variables,
run_namespace=run_namespace)
self._initialise_queue_codeobj()
CodeRunner.before_run(self, run_namespace)
# we insert rather than replace because CodeRunner puts a CodeObject in updaters already
if self._pushspikes_codeobj is None:
# Since this now works for general events not only spikes, we have to
# pass the information about which variable to use to the template,
# it can not longer simply refer to "_spikespace"
# Strictly speaking this is only true for the standalone mode at the
# moment, since in runtime, all the template does is to call
# SynapticPathway.push_spike
eventspace_name = '_{}space'.format(self.event)
template_kwds = {'eventspace_variable': self.source.variables[eventspace_name]}
needed_variables = [eventspace_name]
self._pushspikes_codeobj = create_runner_codeobj(self,
'', # no code
'synapses_push_spikes',
name=self.name+'_push_spikes',
check_units=False,
additional_variables=self.variables,
needed_variables=needed_variables,
template_kwds=template_kwds,
run_namespace=run_namespace)
self._code_objects.insert(0, weakref.proxy(self._pushspikes_codeobj))
def initialise_queue(self):
self.eventspace = self.source.variables[self.eventspace_name].get_value()
if not self.synapses._connect_called:
raise TypeError(("Synapses object '%s' does not do anything, since "
"it has not created synapses with 'connect'. "
"Set its active attribute to False if you "
"intend to do only do this for a subsequent"
" run.") % self.synapses.name)
if self.queue is None:
self.queue = get_device().spike_queue(self.source.start, self.source.stop)
self.variables.add_object('_queue', self.queue)
# Update the dt (might have changed between runs)
self.queue.prepare(self._delays.get_value(), self.source.clock.dt_,
self.synapse_sources.get_value())
if len({self.source.clock.dt_, self.synapses.clock.dt_,
self.target.clock.dt_}) > 1:
logger.warn(("Note that the synaptic pathway '{pathway}' will run on the "
"clock of the group '{source}' using a dt of {dt}. Either "
"the Synapses object '{synapses}' or the target '{target}' "
"(or both) are using a different dt. This might lead to "
"unexpected results. In particular, all delays will be rounded to "
"multiples of {dt}. If in doubt, try to ensure that "
"'{source}', '{synapses}', and '{target}' use the "
"same dt.").format(pathway=self.name,
source=self.source.name,
target=self.target.name,
dt=self.source.clock.dt,
synapses=self.synapses.name),
'synapses_dt_mismatch', once=True)
def _full_state(self):
state = super(SynapticPathway, self)._full_state()
if self.queue is not None:
state['_spikequeue'] = self.queue._full_state()
else:
state['_spikequeue'] = None
return state
def _restore_from_full_state(self, state):
# We have to handle the SpikeQueue separately from the other state
# variables, so remove it from the state dictionary so that it does not
# get treated as a state variable by the standard mechanism in
# `VariableOwner`
queue_state = state.pop('_spikequeue')
super(SynapticPathway, self)._restore_from_full_state(state)
if self.queue is None:
self.queue = get_device().spike_queue(self.source.start, self.source.stop)
self.queue._restore_from_full_state(queue_state)
# Put the spike queue state back for future restore calls
state['_spikequeue'] = queue_state
def push_spikes(self):
# Push new events (e.g. spikes) into the queue
events = self.eventspace[:self.eventspace[len(self.eventspace)-1]]
if len(events):
self.queue.push(events)
def slice_to_test(x):
'''
Returns a testing function corresponding to whether an index is in slice x.
x can also be an int.
'''
try:
x = int(x)
return lambda y: (y == x)
except TypeError:
pass
if isinstance(x, slice):
if isinstance(x, slice) and x == slice(None):
# No need for testing
return lambda y: np.repeat(True, len(y))
start, stop, step = x.start, x.stop, x.step
if start is None:
# No need to test for >= start
if step is None:
# Only have a stop value
return lambda y: (y < stop)
else:
# Stop and step
return lambda y: (y < stop) & ((y % step) == 0)
else:
# We need to test for >= start
if step is None:
if stop is None:
# Only a start value
return lambda y: (y >= start)
else:
# Start and stop
return lambda y: (y >= start) & (y < stop)
else:
if stop is None:
# Start and step value
return lambda y: (y >= start) & ((y-start)%step == 0)
else:
# Start, step and stop
return lambda y: (y >= start) & ((y-start)%step == 0) & (y < stop)
else:
raise TypeError('Expected int or slice, got {} instead'.format(type(x)))
def find_synapses(index, synaptic_neuron):
try:
index = int(index)
except TypeError:
pass
if isinstance(index, (int, slice)):
test = slice_to_test(index)
found = test(synaptic_neuron)
synapses = np.flatnonzero(found)
else:
synapses = []
for neuron in index:
targets = np.flatnonzero(synaptic_neuron == neuron)
synapses.extend(targets)
synapses = np.array(synapses, dtype=np.int32)
return synapses
class SynapticSubgroup(object):
'''
A simple subgroup of `Synapses` that can be used for indexing.
Parameters
----------
indices : `ndarray` of int
The synaptic indices represented by this subgroup.
synaptic_pre : `DynamicArrayVariable`
References to all pre-synaptic indices. Only used to throw an error
when new synapses where added after creating this object.
'''
def __init__(self, synapses, indices):
self.synapses = weakproxy_with_fallback(synapses)
self._stored_indices = indices
self._synaptic_pre = synapses.variables['_synaptic_pre']
self._source_N = self._synaptic_pre.size # total number of synapses
def _indices(self, index_var='_idx'):
if index_var != '_idx':
raise AssertionError('Did not expect index %s here.' % index_var)
if len(self._synaptic_pre.get_value()) != self._source_N:
raise RuntimeError(('Synapses have been added/removed since this '
'synaptic subgroup has been created'))
return self._stored_indices
def __repr__(self):
return '<%s, storing %d indices of %s>' % (self.__class__.__name__,
len(self._stored_indices),
self.synapses.name)
class SynapticIndexing(object):
def __init__(self, synapses):
self.synapses = weakref.proxy(synapses)
self.source = weakproxy_with_fallback(self.synapses.source)
self.target = weakproxy_with_fallback(self.synapses.target)
self.synaptic_pre = synapses.variables['_synaptic_pre']
self.synaptic_post = synapses.variables['_synaptic_post']
if synapses.multisynaptic_index is not None:
self.synapse_number = synapses.variables[synapses.multisynaptic_index]
else:
self.synapse_number = None
def __call__(self, index=None, index_var='_idx'):
'''
Returns synaptic indices for `index`, which can be a tuple of indices
(including arrays and slices), a single index or a string.
'''
if index is None or (isinstance(index, str) and index == 'True'):
index = slice(None)
if (not isinstance(index, (tuple, str)) and
(isinstance(index, (numbers.Integral, np.ndarray, slice,
collections.Sequence))
or hasattr(index, '_indices'))):
if hasattr(index, '_indices'):
final_indices = index._indices(index_var=index_var).astype(np.int32)
elif isinstance(index, slice):
start, stop, step = index.indices(len(self.synaptic_pre.get_value()))
final_indices = np.arange(start, stop, step, dtype=np.int32)
else:
final_indices = np.asarray(index)
elif isinstance(index, tuple):
if len(index) == 2: # two indices (pre- and postsynaptic cell)
index = (index[0], index[1], slice(None))
elif len(index) > 3:
raise IndexError('Need 1, 2 or 3 indices, got %d.' % len(index))
I, J, K = index
# Convert to absolute indices (e.g. for subgroups)
# Allow the indexing to fail, we'll later return an empty array in
# that case
try:
if hasattr(I, '_indices'): # will return absolute indices already
I = I._indices()
else:
I = self.source._indices(I)
pre_synapses = find_synapses(I, self.synaptic_pre.get_value())
except IndexError:
pre_synapses = np.array([], dtype=np.int32)
try:
if hasattr(J, '_indices'):
J = J._indices()
else:
J = self.target._indices(J)
post_synapses = find_synapses(J, self.synaptic_post.get_value())
except IndexError:
post_synapses = np.array([], dtype=np.int32)
matching_synapses = np.intersect1d(pre_synapses, post_synapses,
assume_unique=True)
if isinstance(K, slice) and K == slice(None):
final_indices = matching_synapses
else:
if self.synapse_number is None:
raise IndexError('To index by the third dimension you need '
'to switch on the calculation of the '
'"multisynaptic_index" when you create '
'the Synapses object.')
if isinstance(K, (numbers.Integral, slice)):
test_k = slice_to_test(K)
else:
raise NotImplementedError(('Indexing synapses with arrays not'
'implemented yet'))
# We want to access the raw arrays here, not go through the Variable
pre_neurons = self.synaptic_pre.get_value()[matching_synapses]
post_neurons = self.synaptic_post.get_value()[matching_synapses]
synapse_numbers = self.synapse_number.get_value()[matching_synapses]
final_indices = np.intersect1d(matching_synapses,
np.flatnonzero(test_k(synapse_numbers)),
assume_unique=True)
else:
raise IndexError('Unsupported index type {itype}'.format(itype=type(index)))
if index_var not in ('_idx', '0'):
return index_var.get_value()[final_indices.astype(np.int32)]
else:
return final_indices.astype(np.int32)
class Synapses(Group):
'''
Class representing synaptic connections.
Creating a new `Synapses` object does by default not create any synapses,
you have to call the `Synapses.connect` method for that.
Parameters
----------
source : `SpikeSource`
The source of spikes, e.g. a `NeuronGroup`.
target : `Group`, optional
The target of the spikes, typically a `NeuronGroup`. If none is given,
the same as `source`
model : `str`, `Equations`, optional
The model equations for the synapses.
on_pre : str, dict, optional
The code that will be executed after every pre-synaptic spike. Can be
either a single (possibly multi-line) string, or a dictionary mapping
pathway names to code strings. In the first case, the pathway will be
called ``pre`` and made available as an attribute of the same name.
In the latter case, the given names will be used as the
pathway/attribute names. Each pathway has its own code and its own
delays.
pre : str, dict, optional
Deprecated. Use ``on_pre`` instead.
on_post : str, dict, optional
The code that will be executed after every post-synaptic spike. Same
conventions as for `on_pre``, the default name for the pathway is
``post``.
post : str, dict, optional
Deprecated. Use ``on_post`` instead.
delay : `Quantity`, dict, optional
The delay for the "pre" pathway (same for all synapses) or a dictionary
mapping pathway names to delays. If a delay is specified in this way
for a pathway, it is stored as a single scalar value. It can still
be changed afterwards, but only to a single scalar value. If you want
to have delays that vary across synapses, do not use the keyword
argument, but instead set the delays via the attribute of the pathway,
e.g. ``S.pre.delay = ...`` (or ``S.delay = ...`` as an abbreviation),
``S.post.delay = ...``, etc.
on_event : str or dict, optional
Define the events which trigger the pre and post pathways. By default,
both pathways are triggered by the ``'spike'`` event, i.e. the event
that is triggered by the ``threshold`` condition in the connected
groups.
multisynaptic_index : str, optional
The name of a variable (which will be automatically created) that stores
the "synapse number". This number enumerates all synapses between the
same source and target so that they can be distinguished. For models
where each source-target pair has only a single connection, this number
only wastes memory (it would always default to 0), it is therefore not
stored by default. Defaults to ``None`` (no variable).
namespace : dict, optional
A dictionary mapping identifier names to objects. If not given, the
namespace will be filled in at the time of the call of `Network.run`,
with either the values from the ``namespace`` argument of the
`Network.run` method or from the local context, if no such argument is
given.
dtype : `dtype`, dict, optional
The `numpy.dtype` that will be used to store the values, or a
dictionary specifying the type for variable names. If a value is not
provided for a variable (or no value is provided at all), the preference
setting `core.default_float_dtype` is used.
codeobj_class : class, optional
The `CodeObject` class to use to run code.
dt : `Quantity`, optional
The time step to be used for the update of the state variables.
Cannot be combined with the `clock` argument.
clock : `Clock`, optional
The update clock to be used. If neither a clock, nor the `dt` argument
is specified, the `defaultclock` will be used.
order : int, optional
The priority of of this group for operations occurring at the same time
step and in the same scheduling slot. Defaults to 0.
method : str, `StateUpdateMethod`, optional
The numerical integration method to use. If none is given, an
appropriate one is automatically determined.
name : str, optional
The name for this object. If none is given, a unique name of the form
``synapses``, ``synapses_1``, etc. will be automatically chosen.
'''
add_to_magic_network = True
def __init__(self, source, target=None, model=None, on_pre=None,
pre=None, on_post=None, post=None,
connect=None, delay=None, on_event='spike',
multisynaptic_index=None,
namespace=None, dtype=None,
codeobj_class=None,
dt=None, clock=None, order=0,
method=('exact', 'euler', 'heun'),
method_options=None,
name='synapses*'):
if connect is not None:
raise TypeError('The connect keyword argument is no longer '
'supported, call the connect method instead.')
if pre is not None:
if on_pre is not None:
raise TypeError("Cannot specify both 'pre' and 'on_pre'. The "
"'pre' keyword is deprecated, use the 'on_pre' "
"keyword instead.")
logger.warn("The 'pre' keyword is deprecated, use 'on_pre' "
"instead.", 'deprecated_pre', once=True)
on_pre = pre
if post is not None:
if on_post is not None:
raise TypeError("Cannot specify both 'post' and 'on_post'. The "
"'post' keyword is deprecated, use the "
"'on_post' keyword instead.")
logger.warn("The 'post' keyword is deprecated, use 'on_post' "
"instead.", 'deprecated_post', once=True)
on_post = post
Group.__init__(self, dt=dt, clock=clock, when='start', order=order,
name=name)
if dtype is None:
dtype = {}
if isinstance(dtype, collections.MutableMapping):
dtype['lastupdate'] = self._clock.variables['t'].dtype
#: remember whether connect was called to raise an error if an
#: assignment to a synaptic variable is attempted without a preceding
#: connect.
self._connect_called = False
self.codeobj_class = codeobj_class
self.source = source
self.add_dependency(source)
if target is None:
self.target = self.source
else:
self.target = target
self.add_dependency(target)
##### Prepare and validate equations
if model is None:
model = ''
if isinstance(model, str):
model = Equations(model)
if not isinstance(model, Equations):
raise TypeError(('model has to be a string or an Equations '
'object, is "%s" instead.') % type(model))
# Check flags
model.check_flags({DIFFERENTIAL_EQUATION: ['event-driven', 'clock-driven'],
SUBEXPRESSION: ['summed', 'shared',
'constant over dt'],
PARAMETER: ['constant', 'shared']},
incompatible_flags=[('event-driven', 'clock-driven'),
# 'summed' cannot be combined with
# any other flag
('summed', 'shared',
'constant over dt')])
for name in ['i', 'j', 'delay']:
if name in model.names:
raise SyntaxError('"%s" is a reserved name that cannot be '
'used as a variable name.' % name)
# Add the "multisynaptic index", if desired
self.multisynaptic_index = multisynaptic_index
if multisynaptic_index is not None:
if not isinstance(multisynaptic_index, str):
raise TypeError('multisynaptic_index argument has to be a string')
model = model + Equations('{} : integer'.format(multisynaptic_index))
# Separate subexpressions depending whether they are considered to be
# constant over a time step or not
model, constant_over_dt = extract_constant_subexpressions(model)
# Separate the equations into event-driven equations,
# continuously updated equations and summed variable updates
event_driven = []
continuous = []
summed_updates = []
for single_equation in model.values():
if 'event-driven' in single_equation.flags:
event_driven.append(single_equation)
elif 'summed' in single_equation.flags:
summed_updates.append(single_equation)
else:
if (single_equation.type == DIFFERENTIAL_EQUATION and
'clock-driven' not in single_equation.flags):
logger.info(('The synaptic equation for the variable {var} '
'does not specify whether it should be '
'integrated at every timestep ("clock-driven") '
'or only at spiking events ("event-driven"). '
'It will be integrated at every timestep '
'which can slow down your simulation '
'unnecessarily if you only need the values of '
'this variable whenever a spike occurs. '
'Specify the equation as clock-driven '
'explicitly to avoid this '
'warning.').format(var=single_equation.varname),
'clock_driven',
once=True)
continuous.append(single_equation)
if len(event_driven):
self.event_driven = Equations(event_driven)
# Add the lastupdate variable, needed for event-driven updates
model += Equations('lastupdate : second')
else:
self.event_driven = None
self._create_variables(model, user_dtype=dtype)
self.equations = Equations(continuous)
if namespace is None:
namespace = {}
#: The group-specific namespace
self.namespace = namespace
#: Set of `Variable` objects that should be resized when the
#: number of synapses changes
self._registered_variables = set()
for varname, var in self.variables.items():
if (isinstance(var, DynamicArrayVariable) and
self.variables.indices[varname] == '_idx'):
# Register the array with the `SynapticItemMapping` object so
# it gets automatically resized
self.register_variable(var)
# Support 2d indexing
self._indices = SynapticIndexing(self)
if delay is None:
delay = {}
if isinstance(delay, Quantity):
delay = {'pre': delay}
elif not isinstance(delay, collections.Mapping):
raise TypeError('Delay argument has to be a quantity or a '
'dictionary, is type %s instead.' % type(delay))
#: List of names of all updaters, e.g. ['pre', 'post']
self._synaptic_updaters = []
#: List of all `SynapticPathway` objects
self._pathways = []
if isinstance(on_event, str):
events_dict = collections.defaultdict(lambda: on_event)
else:
events_dict = collections.defaultdict(lambda: 'spike')
events_dict.update(on_event)
#: "Events" for all the pathways
self.events = events_dict
for prepost, argument in zip(('pre', 'post'), (on_pre, on_post)):
if not argument:
continue
if isinstance(argument, str):
pathway_delay = delay.get(prepost, None)
self._add_updater(argument, prepost, delay=pathway_delay,
event=self.events[prepost])
elif isinstance(argument, collections.Mapping):
for key, value in argument.items():
if not isinstance(key, str):
err_msg = ('Keys for the "on_{}" argument'
'have to be strings, got '
'{} instead.').format(prepost, type(key))
raise TypeError(err_msg)
pathway_delay = delay.get(key, None)
self._add_updater(value, prepost, objname=key,
delay=pathway_delay, event=self.events[key])
# Check whether any delays were specified for pathways that don't exist
for pathway in delay:
if not pathway in self._synaptic_updaters:
raise ValueError(('Cannot set the delay for pathway '
'"%s": unknown pathway.') % pathway)
#: Performs numerical integration step
self.state_updater = None
# We only need a state update if we have differential equations
if len(self.equations.diff_eq_names):
self.state_updater = StateUpdater(self, method, method_options=method_options,
clock=self.clock,
order=order)
self.contained_objects.append(self.state_updater)
#: Update the "constant over a time step" subexpressions
self.subexpression_updater = None
if len(constant_over_dt) > 0:
self.subexpression_updater = SubexpressionUpdater(self,
constant_over_dt)
self.contained_objects.append(self.subexpression_updater)
#: "Summed variable" mechanism -- sum over all synapses of a
#: pre-/postsynaptic target
self.summed_updaters = {}
# We want to raise an error if the same variable is updated twice
# using this mechanism. This could happen if the Synapses object
# connected a NeuronGroup to itself since then all variables are
# accessible as var_pre and var_post.
summed_targets = set()
for single_equation in summed_updates:
varname = single_equation.varname
if not (varname.endswith('_pre') or varname.endswith('_post')):
raise ValueError(('The summed variable "%s" does not end '
'in "_pre" or "_post".') % varname)
if not varname in self.variables:
raise ValueError(('The summed variable "%s" does not refer'
'to any known variable in the '
'target group.') % varname)
if varname.endswith('_pre'):
summed_target = self.source
summed_target_size_name = 'N_pre'
orig_varname = varname[:-4]
summed_var_index = '_synaptic_pre'
else:
summed_target = self.target
summed_target_size_name = 'N_post'
orig_varname = varname[:-5]
summed_var_index = '_synaptic_post'
target_eq = getattr(summed_target, 'equations', {}).get(orig_varname, None)
if target_eq is None or target_eq.type != PARAMETER:
raise ValueError(('The summed variable "%s" needs a '
'corresponding parameter "%s" in the '
'target group.') % (varname,
orig_varname))
fail_for_dimension_mismatch(self.variables['_summed_'+varname].dim,
self.variables[varname].dim,
('Summed variables need to have '
'the same units in Synapses '
'and the target group'))
if self.variables[varname] in summed_targets:
raise ValueError(('The target variable "%s" is already '
'updated by another summed '
'variable') % orig_varname)
summed_targets.add(self.variables[varname])
updater = SummedVariableUpdater(single_equation.expr,
varname, self, summed_target,
summed_target_size_name,
summed_var_index)
self.summed_updaters[varname] = updater
self.contained_objects.append(updater)
# Activate name attribute access
self._enable_group_attributes()
def __getitem__(self, item):
indices = self.indices[item]
return SynapticSubgroup(self, indices)
def _set_delay(self, delay, with_unit):
if 'pre' not in self._synaptic_updaters:
raise AttributeError("Synapses do not have a 'pre' pathway, "
"do not know what 'delay' refers to.")
# Note that we cannot simply say: "self.pre.delay = delay" because this
# would not correctly deal with references to external constants
var = self.pre.variables['delay']
if with_unit:
reference = var.get_addressable_value_with_unit('delay', self.pre)
else:
reference = var.get_addressable_value('delay', self.pre)
reference.set_item('True', delay, level=2)
def _get_delay(self, with_unit):
if 'pre' not in self._synaptic_updaters:
raise AttributeError("Synapses do not have a 'pre' pathway, "
"do not know what 'delay' refers to.")
var = self.pre.variables['delay']
if with_unit:
return var.get_addressable_value_with_unit('delay', self.pre)
else:
return var.get_addressable_value('delay', self.pre)
delay = property(functools.partial(_get_delay, with_unit=True),
functools.partial(_set_delay, with_unit=True),
doc='The presynaptic delay (if a pre-synaptic pathway '
'exists).')
delay_ = property(functools.partial(_get_delay, with_unit=False),
functools.partial(_set_delay, with_unit=False),
doc='The presynaptic delay without unit information (if a'
'pre-synaptic pathway exists).')
def _add_updater(self, code, prepost, objname=None, delay=None,
event='spike'):
'''
Add a new target updater. Users should call `add_pre` or `add_post`
instead.
Parameters
----------
code : str
The abstract code that should be executed on pre-/postsynaptic
spikes.
prepost : {'pre', 'post'}
Whether the code is triggered by presynaptic or postsynaptic spikes
objname : str, optional
A name for the object, see `SynapticPathway` for more details.
delay : `Quantity`, optional
A scalar delay (same delay for all synapses) for this pathway. If
not given, delays are expected to vary between synapses.
Returns
-------
objname : str
The final name for the object. Equals `objname` if it was explicitly
given (and did not end in a wildcard character).
'''
if prepost == 'pre':
spike_group, group_name = self.source, 'Source'
elif prepost == 'post':
spike_group, group_name = self.target, 'Target'
else:
raise AssertionError(('"prepost" argument has to be "pre" or '
'"post", is "%s".') % prepost)
if event not in spike_group.events:
raise ValueError(("%s group does not define an event "
"'%s'.") % (group_name, event))
if not isinstance(spike_group, SpikeSource) or not hasattr(spike_group, 'clock'):
raise TypeError(('%s has to be a SpikeSource with spikes and'
' clock attribute. Is type %r instead')
% (group_name, type(spike_group)))
updater = SynapticPathway(self, code, prepost, objname,
delay=delay, event=event)
objname = updater.objname
if hasattr(self, objname):
raise ValueError(('Cannot add updater with name "{name}", synapses '
'object already has an attribute with this '
'name.').format(name=objname))
setattr(self, objname, updater)
self._synaptic_updaters.append(objname)
self._pathways.append(updater)
self.contained_objects.append(updater)
return objname
def _create_variables(self, equations, user_dtype=None):
'''
Create the variables dictionary for this `Synapses`, containing
entries for the equation variables and some standard entries.
'''
self.variables = Variables(self)
# Standard variables always present
self.variables.add_dynamic_array('_synaptic_pre', size=0,
dtype=np.int32)
self.variables.add_dynamic_array('_synaptic_post', size=0,
dtype=np.int32)
self.variables.create_clock_variables(self._clock,
prefix='_clock_')
if '_offset' in self.target.variables:
self.variables.add_reference('_target_offset', self.target,
'_offset')
else:
self.variables.add_constant('_target_offset', value=0)
if '_offset' in self.source.variables:
self.variables.add_reference('_source_offset', self.source,
'_offset')
else:
self.variables.add_constant('_source_offset', value=0)
# To cope with connections to/from other synapses, N_incoming/N_outgoing
# will be resized when synapses are created
self.variables.add_dynamic_array('N_incoming', size=0, dtype=np.int32,
constant=True, read_only=True,
index='_postsynaptic_idx')
self.variables.add_dynamic_array('N_outgoing', size=0, dtype=np.int32,
constant=True, read_only=True,
index='_presynaptic_idx')
# We have to make a distinction here between the indices
# and the arrays (even though they refer to the same object)
# the synaptic propagation template would otherwise overwrite
# synaptic_post in its namespace with the value of the
# postsynaptic index, leading to errors for the next
# propagation.
self.variables.add_reference('_presynaptic_idx',
self,
'_synaptic_pre')
self.variables.add_reference('_postsynaptic_idx',
self,
'_synaptic_post')
# Except for subgroups (which potentially add an offset), the "i" and
# "j" variables are simply equivalent to `_synaptic_pre` and
# `_synaptic_post`
if getattr(self.source, 'start', 0) == 0:
self.variables.add_reference('i', self, '_synaptic_pre')
else:
self.variables.add_reference('_source_i', self.source.source, 'i',
index='_presynaptic_idx')
self.variables.add_reference('_source_offset', self.source, '_offset')
self.variables.add_subexpression('i',
dtype=self.source.source.variables['i'].dtype,
expr='_source_i - _source_offset',
index='_presynaptic_idx')
if getattr(self.target, 'start', 0) == 0:
self.variables.add_reference('j', self, '_synaptic_post')
else:
self.variables.add_reference('_target_j', self.target.source, 'i',
index='_postsynaptic_idx')
self.variables.add_reference('_target_offset', self.target, '_offset')
self.variables.add_subexpression('j',
dtype=self.target.source.variables['i'].dtype,
expr='_target_j - _target_offset',
index='_postsynaptic_idx')
# Add the standard variables
self.variables.add_array('N', dtype=np.int32, size=1, scalar=True,
constant=True, read_only=True)
for eq in equations.values():
dtype = get_dtype(eq, user_dtype)
if eq.type in (DIFFERENTIAL_EQUATION, PARAMETER):
check_identifier_pre_post(eq.varname)
constant = 'constant' in eq.flags
shared = 'shared' in eq.flags
if shared:
self.variables.add_array(eq.varname, size=1,
dimensions=eq.dim,
dtype=dtype,
constant=constant,
scalar=True,
index='0')
else:
self.variables.add_dynamic_array(eq.varname, size=0,
dimensions=eq.dim,
dtype=dtype,
constant=constant)
elif eq.type == SUBEXPRESSION:
if 'summed' in eq.flags:
# Give a special name to the subexpression for summed
# variables to avoid confusion with the pre/postsynaptic
# target variable
varname = '_summed_'+eq.varname
else:
check_identifier_pre_post(eq.varname)
varname = eq.varname
self.variables.add_subexpression(varname, dimensions=eq.dim,
expr=str(eq.expr),
scalar='shared' in eq.flags,
dtype=dtype)
else:
raise AssertionError('Unknown type of equation: ' + eq.eq_type)
# Stochastic variables
for xi in equations.stochastic_variables:
self.variables.add_auxiliary_variable(xi, dimensions=(second ** -0.5).dim)
# Add all the pre and post variables with _pre and _post suffixes
for name in getattr(self.source, 'variables', {}).keys():
# Raise an error if a variable name is also used for a synaptic
# variable (we ignore 'lastupdate' to allow connections from another
# Synapses object)
if (name in equations.names and name != 'lastupdate' and
'summed' not in equations[name].flags):
error_msg = ('The pre-synaptic variable {name} has the same '
'name as a synaptic variable, rename the synaptic '
'variable ').format(name=name)
if name+'_syn' not in self.variables:
error_msg += ("(for example to '{name}_syn') ".format(name=name))
error_msg += 'to avoid confusion'
raise ValueError(error_msg)
if name.startswith('_'):
continue # Do not add internal variables
var = self.source.variables[name]
index = '0' if var.scalar else '_presynaptic_idx'
try:
self.variables.add_reference(name + '_pre', self.source, name,
index=index)
except TypeError:
logger.diagnostic(('Cannot include a reference to {var} in '
'{synapses}, {var} uses a non-standard '
'indexing in the pre-synaptic group '
'{source}.').format(var=name,
synapses=self.name,
source=self.source.name))
for name in getattr(self.target, 'variables', {}).keys():
# Raise an error if a variable name is also used for a synaptic
# variable (we ignore 'lastupdate' to allow connections to another
# Synapses object)
if (name in equations.names and name != 'lastupdate' and
'summed' not in equations[name].flags):
error_msg = ("The post-synaptic variable '{name}' has the same "
"name as a synaptic variable, rename the synaptic "
"variable ").format(name=name)
if name+'_syn' not in self.variables:
error_msg += ("(for example to '{name}_syn') ".format(name=name))
error_msg += 'to avoid confusion'
raise ValueError(error_msg)
if name.startswith('_'):
continue # Do not add internal variables
var = self.target.variables[name]
index = '0' if var.scalar else '_postsynaptic_idx'
try:
self.variables.add_reference(name + '_post', self.target, name,
index=index)
# Also add all the post variables without a suffix, but only if
# it does not have a post or pre suffix in the target group
# (which could happen when connecting to synapses)
if not name.endswith('_post') or name.endswith('_pre'):
self.variables.add_reference(name, self.target, name,
index=index)
except TypeError:
logger.diagnostic(('Cannot include a reference to {var} in '
'{synapses}, {var} uses a non-standard '
'indexing in the post-synaptic group '
'{target}.').format(var=name,
synapses=self.name,
target=self.target.name))
# Check scalar subexpressions
for eq in equations.values():
if eq.type == SUBEXPRESSION and 'shared' in eq.flags:
var = self.variables[eq.varname]
for identifier in var.identifiers:
if identifier in self.variables:
if not self.variables[identifier].scalar:
raise SyntaxError(('Shared subexpression %s refers '
'to non-shared variable %s.')
% (eq.varname, identifier))
def before_run(self, run_namespace):
self.equations.check_units(self, run_namespace=run_namespace)
# Check that subexpressions that refer to stateful functions are labeled
# as "constant over dt"
check_subexpressions(self, self.equations, run_namespace)
super(Synapses, self).before_run(run_namespace=run_namespace)
@device_override('synapses_connect')
def connect(self, condition=None, i=None, j=None, p=1., n=1,
skip_if_invalid=False,
namespace=None, level=0):
'''
Add synapses.
See :doc:`/user/synapses` for details.
Parameters
----------
condition : str, bool, optional
A boolean or string expression that evaluates to a boolean.
The expression can depend on indices ``i`` and ``j`` and on
pre- and post-synaptic variables. Can be combined with
arguments ``n``, and ``p`` but not ``i`` or ``j``.
i : int, ndarray of int, optional
The presynaptic neuron indices (in the form of an index or an array
of indices). Must be combined with ``j`` argument.
j : int, ndarray of int, str, optional
The postsynaptic neuron indices. It can be an index or array of
indices if combined with the ``i`` argument, or it can be a string
generator expression.
p : float, str, optional
The probability to create ``n`` synapses wherever the ``condition``
evaluates to true. Cannot be used with generator syntax for ``j``.
n : int, str, optional
The number of synapses to create per pre/post connection pair.
Defaults to 1.
skip_if_invalid : bool, optional
If set to True, rather than raising an error if you try to
create an invalid/out of range pair (i, j) it will just
quietly skip those synapses.
namespace : dict-like, optional
A namespace that will be used in addition to the group-specific
namespaces (if defined). If not specified, the locals
and globals around the run function will be used.
level : int, optional
How deep to go up the stack frame to look for the locals/global
(see ``namespace`` argument).
Examples
--------
>>> from brian2 import *
>>> import numpy as np
>>> G = NeuronGroup(10, 'dv/dt = -v / tau : 1', threshold='v>1', reset='v=0')
>>> S = Synapses(G, G, 'w:1', on_pre='v+=w')
>>> S.connect(condition='i != j') # all-to-all but no self-connections
>>> S.connect(i=0, j=0) # connect neuron 0 to itself
>>> S.connect(i=np.array([1, 2]), j=np.array([2, 1])) # connect 1->2 and 2->1
>>> S.connect() # connect all-to-all
>>> S.connect(condition='i != j', p=0.1) # Connect neurons with 10% probability, exclude self-connections
>>> S.connect(j='i', n=2) # Connect all neurons to themselves with 2 synapses
>>> S.connect(j='k for k in range(i+1)') # Connect neuron i to all j with 0<=j<=i
>>> S.connect(j='i+(-1)**k for k in range(2) if i>0 and i<N_pre-1') # connect neuron i to its neighbours if it has both neighbours
>>> S.connect(j='k for k in sample(N_post, p=i*1.0/(N_pre-1))') # neuron i connects to j with probability i/(N-1)
'''
# check types
if condition is not None and not isinstance(condition, (bool,
str)):
raise TypeError("condition argument must be bool or string. If you "
"want to connect based on indices, use "
"connect(i=..., j=...).")
if i is not None and (not (isinstance(i, (numbers.Integral,
np.ndarray,
collections.Sequence)) or
hasattr(i, '_indices')) or
isinstance(i, str)):
raise TypeError("i argument must be int or array")
if j is not None and not (isinstance(j, (numbers.Integral,
np.ndarray,
collections.Sequence)) or
hasattr(j, '_indices')):
raise TypeError("j argument must be int, array or string")
# TODO: eliminate these restrictions
if not isinstance(p, (int, float, str)):
raise TypeError("p must be float or string")
if not isinstance(n, (int, str)):
raise TypeError("n must be int or string")
if isinstance(condition, str) and re.search(r'\bfor\b',
condition):
raise ValueError("Generator expression given for condition, write "
"connect(j='{condition}'...) instead of "
"connect('{condition}'...).".format(condition=condition))
# TODO: check if string expressions have the right types and return
# useful error messages
self._connect_called = True
# which connection case are we in?
if condition is None and i is None and j is None:
condition = True
try:
if condition is not None:
if i is not None or j is not None:
raise ValueError("Cannot combine condition with i or j "
"arguments")
# convert to generator syntax
if condition is False:
return
if condition is True:
condition = 'True'
condition = word_substitute(condition, {'j': '_k'})
if not isinstance(p, str) and p == 1:
j = ('_k for _k in range(N_post) '
'if {expr}').format(expr=condition)
else:
j = None
if isinstance(p, str):
p_dep = self._expression_index_dependence(p)
if '_postsynaptic_idx' in p_dep or '_iterator_idx' in p_dep:
j = ('_k for _k in range(N_post) '
'if ({expr}) and '
'rand()<{p}').format(expr=condition, p=p)
if j is None:
j = ('_k for _k in sample(N_post, p={p}) '
'if {expr}').format(expr=condition, p=p)
# will now call standard generator syntax (see below)
elif i is not None:
if j is None:
raise ValueError("i argument must be combined with j "
"argument")
if skip_if_invalid:
raise ValueError("Can only use skip_if_invalid with string "
"syntax")
if hasattr(i, '_indices'):
i = i._indices()
i = np.asarray(i)
if not np.issubdtype(i.dtype, np.signedinteger):
raise TypeError(('Presynaptic indices have to be given as '
'integers, are type %s '
'instead.') % i.dtype)
if hasattr(j, '_indices'):
j = j._indices()
j = np.asarray(j)
if not np.issubdtype(j.dtype, np.signedinteger):
raise TypeError(('Presynaptic indices can only be combined '
'with postsynaptic integer indices))'))
if isinstance(n, str):
raise TypeError(('Indices cannot be combined with a string'
'expression for n. Either use an '
'array/scalar for n, or a string '
'expression for the connections'))
i, j, n = np.broadcast_arrays(i, j, n)
if i.ndim > 1:
raise ValueError('Can only use 1-dimensional indices')
self._add_synapses_from_arrays(i, j, n, p, namespace=namespace)
return
elif j is not None:
if isinstance(p, str) or p != 1:
raise ValueError("Generator syntax cannot be combined with "
"p argument")
if not re.search(r'\bfor\b', j):
if_split = j.split(' if ')
if len(if_split) == 1:
j = '{j} for _ in range(1)'.format(j=j)
elif len(if_split) == 2:
j = '{target} for _ in range(1) if {cond}'.format(target=if_split[0],
cond=if_split[1])
else:
raise SyntaxError("Error parsing expression '{j}'. "
"Expression must have generator "
"syntax, for example 'k for k in "
"range(i-10, i+10)'".format(j=j))
# will now call standard generator syntax (see below)
else:
raise ValueError("Must specify at least one of condition, i or "
"j arguments")
# standard generator syntax
self._add_synapses_generator(j, n, skip_if_invalid=skip_if_invalid,
namespace=namespace, level=level+2)
except IndexError as e:
raise IndexError("Tried to create synapse indices outside valid "
"range. Original error message: " + str(e))
def check_variable_write(self, variable):
'''
Checks that `Synapses.connect` has been called before setting a
synaptic variable.
Parameters
----------
variable : `Variable`
The variable that the user attempts to set.
Raises
------
TypeError
If `Synapses.connect` has not been called yet.
'''
if not self._connect_called:
raise TypeError(("Cannot write to synaptic variable '%s', you need "
"to call connect(...) first") % variable.name)
def _resize(self, number):
if not isinstance(number, (numbers.Integral, np.integer)):
raise TypeError(('Expected an integer number got {} '
'instead').format(type(number)))
if number < self.N:
raise ValueError(('Cannot reduce number of synapses, '
'{} < {}').format(number, len(self)))
for variable in self._registered_variables:
variable.resize(number)
self.variables['N'].set_value(number)
def _update_synapse_numbers(self, old_num_synapses):
source_offset = self.variables['_source_offset'].get_value()
target_offset = self.variables['_target_offset'].get_value()
# This resizing is only necessary if we are connecting to/from synapses
post_with_offset = (int(self.variables['N_post'].get_value()) +
target_offset)
pre_with_offset = (int(self.variables['N_pre'].get_value()) +
source_offset)
self.variables['N_incoming'].resize(post_with_offset)
self.variables['N_outgoing'].resize(pre_with_offset)
N_outgoing = self.variables['N_outgoing'].get_value()
N_incoming = self.variables['N_incoming'].get_value()
synaptic_pre = self.variables['_synaptic_pre'].get_value()
synaptic_post = self.variables['_synaptic_post'].get_value()
# Update the number of total outgoing/incoming synapses per
# source/target neuron
N_outgoing[:] += np.bincount(synaptic_pre[old_num_synapses:],
minlength=len(N_outgoing))
N_incoming[:] += np.bincount(synaptic_post[old_num_synapses:],
minlength=len(N_incoming))
if self.multisynaptic_index is not None:
synapse_number_var = self.variables[self.multisynaptic_index]
synapse_number = synapse_number_var.get_value()
# Update the "synapse number" (number of synapses for the same
# source-target pair)
# We wrap pairs of source/target indices into a complex number for
# convenience
_source_target_pairs = synaptic_pre + synaptic_post*1j
synapse_number[:] = calc_repeats(_source_target_pairs)
def register_variable(self, variable):
'''
Register a `DynamicArray` to be automatically resized when the size of
the indices change. Called automatically when a `SynapticArrayVariable`
specifier is created.
'''
if not hasattr(variable, 'resize'):
raise TypeError(('Variable of type {} does not have a resize '
'method, cannot register it with the synaptic '
'indices.').format(type(variable)))
self._registered_variables.add(variable)
def unregister_variable(self, variable):
'''
Unregister a `DynamicArray` from the automatic resizing mechanism.
'''
self._registered_variables.remove(variable)
def _get_multisynaptic_indices(self):
template_kwds = {'multisynaptic_index': self.multisynaptic_index}
if self.multisynaptic_index is not None:
needed_variables = [self.multisynaptic_index]
else:
needed_variables=[]
return template_kwds, needed_variables
def _add_synapses_from_arrays(self, sources, targets, n, p,
namespace=None):
template_kwds, needed_variables = self._get_multisynaptic_indices()
variables = Variables(self)
sources = np.atleast_1d(sources).astype(np.int32)
targets = np.atleast_1d(targets).astype(np.int32)
# Check whether the values in sources/targets make sense
error_message = ('The given {source_or_target} indices contain '
'values outside of the range [0, {max_value}] '
'allowed for the {source_or_target} group '
'"{group_name}"')
for indices, source_or_target, group in [(sources, 'source', self.source),
(targets, 'target', self.target)]:
if np.max(indices) >= len(group) or np.min(indices) < 0:
raise IndexError(error_message.format(source_or_target=source_or_target,
max_value=len(group)-1,
group_name=group.name))
n = np.atleast_1d(n)
p = np.atleast_1d(p)
if not len(p) == 1 or p != 1:
use_connections = np.random.rand(len(sources)) < p
sources = sources[use_connections]
targets = targets[use_connections]
n = n[use_connections]
sources = sources.repeat(n)
targets = targets.repeat(n)
variables.add_array('sources', len(sources), dtype=np.int32,
values=sources)
variables.add_array('targets', len(targets), dtype=np.int32,
values=targets)
# These definitions are important to get the types right in C++
variables.add_auxiliary_variable('_real_sources', dtype=np.int32)
variables.add_auxiliary_variable('_real_targets', dtype=np.int32)
abstract_code = ''
if '_offset' in self.source.variables:
variables.add_reference('_source_offset', self.source, '_offset')
abstract_code += '_real_sources = sources + _source_offset\n'
else:
abstract_code += '_real_sources = sources\n'
if '_offset' in self.target.variables:
variables.add_reference('_target_offset', self.target, '_offset')
abstract_code += '_real_targets = targets + _target_offset\n'
else:
abstract_code += '_real_targets = targets'
logger.debug("Creating synapses from group '%s' to group '%s', "
"using pre-defined arrays)" % (self.source.name,
self.target.name))
codeobj = create_runner_codeobj(self,
abstract_code,
'synapses_create_array',
additional_variables=variables,
template_kwds=template_kwds,
needed_variables=needed_variables,
check_units=False,
run_namespace={})
codeobj()
def _expression_index_dependence(self, expr, additional_indices=None):
'''
Returns the set of synaptic indices that expr depends on
'''
nr = NodeRenderer(use_vectorisation_idx=True)
expr = nr.render_expr(expr)
deps = set()
if additional_indices is None:
additional_indices = {}
for varname in get_identifiers_recursively([expr], self.variables):
# Special handling of i and j -- they do not actually use pre-/
# postsynaptic indices (except for subgroups), they *are* the
# pre-/postsynaptic indices
if varname == 'i':
deps.add('_presynaptic_idx')
elif varname == 'j':
deps.add('_iterator_idx')
elif varname in additional_indices:
deps.add(additional_indices[varname])
else:
deps.add(self.variables.indices[varname])
if '0' in deps:
deps.remove('0')
return deps
def _add_synapses_generator(self, j, n, skip_if_invalid=False, namespace=None, level=0):
# Get the local namespace
if namespace is None:
namespace = get_local_namespace(level=level+1)
parsed = parse_synapse_generator(j)
self._check_parsed_synapses_generator(parsed, namespace)
template_kwds, needed_variables = self._get_multisynaptic_indices()
template_kwds.update(parsed)
template_kwds['skip_if_invalid'] = skip_if_invalid
if (parsed['iterator_func'] == 'sample' and
parsed['iterator_kwds']['sample_size']=='fixed'):
raise NotImplementedError("Fixed sample size not implemented yet.")
abstract_code = {'setup_iterator': '',
'create_j': '',
'create_cond': '',
'update_post': ''}
additional_indices = {parsed['iteration_variable']: '_iterator_idx'}
setupiter = ''
for k, v in parsed['iterator_kwds'].items():
if v is not None and k!='sample_size':
deps = self._expression_index_dependence(v, additional_indices)
if '_postsynaptic_idx' in deps or '_iterator_idx' in deps:
raise ValueError('Expression "{}" depends on postsynaptic '
'index or iterator'.format(v))
setupiter += '_iter_'+k+' = '+v+'\n'
# rand() in the if condition depends on _vectorisation_idx, but not if
# its in the range expression (handled above)
additional_indices['_vectorisation_idx'] = '_iterator_idx'
postsynaptic_condition = False
postsynaptic_variable_used = False
if parsed['if_expression'] is not None:
deps = self._expression_index_dependence(parsed['if_expression'],
additional_indices)
if '_postsynaptic_idx' in deps:
postsynaptic_condition = True
postsynaptic_variable_used = True
elif '_iterator_idx' in deps:
postsynaptic_condition = True
template_kwds['postsynaptic_condition'] = postsynaptic_condition
template_kwds['postsynaptic_variable_used'] = postsynaptic_variable_used
abstract_code['setup_iterator'] += setupiter
abstract_code['create_j'] += '_pre_idx = _raw_pre_idx \n'
abstract_code['create_j'] += '_j = '+parsed['element']+'\n'
if postsynaptic_condition:
abstract_code['create_cond'] += '_post_idx = _raw_post_idx \n'
if parsed['if_expression'] is not None:
abstract_code['create_cond'] += ('_cond = ' +
parsed['if_expression'] + '\n')
abstract_code['update_post'] += '_post_idx = _raw_post_idx \n'
abstract_code['update_post'] += '_n = ' + str(n) + '\n'
# This overwrites 'i' and 'j' in the synapses' variables dictionary
# This is necessary because in the context of synapse creation, i
# and j do not correspond to the sources/targets of the existing
# synapses but to all the possible sources/targets
variables = Variables(None)
# Will be set in the template
variables.add_auxiliary_variable('_i', dtype=np.int32)
variables.add_auxiliary_variable('_j', dtype=np.int32)
variables.add_auxiliary_variable('_iter_low', dtype=np.int32)
variables.add_auxiliary_variable('_iter_high', dtype=np.int32)
variables.add_auxiliary_variable('_iter_step', dtype=np.int32)
variables.add_auxiliary_variable('_iter_p')
variables.add_auxiliary_variable('_iter_size', dtype=np.int32)
variables.add_auxiliary_variable(parsed['iteration_variable'],
dtype=np.int32)
# Make sure that variables have the correct type in the code
variables.add_auxiliary_variable('_pre_idx', dtype=np.int32)
variables.add_auxiliary_variable('_post_idx', dtype=np.int32)
if parsed['if_expression'] is not None:
variables.add_auxiliary_variable('_cond', dtype=np.bool)
variables.add_auxiliary_variable('_n', dtype=np.int32)
if '_offset' in self.source.variables:
variables.add_reference('_source_offset', self.source, '_offset')
else:
variables.add_constant('_source_offset', value=0)
if '_offset' in self.target.variables:
variables.add_reference('_target_offset', self.target, '_offset')
else:
variables.add_constant('_target_offset', value=0)
variables.add_auxiliary_variable('_raw_pre_idx', dtype=np.int32)
variables.add_auxiliary_variable('_raw_post_idx', dtype=np.int32)
variable_indices = defaultdict(lambda: '_idx')
for varname in self.variables:
if self.variables.indices[varname] == '_presynaptic_idx':
variable_indices[varname] = '_raw_pre_idx'
elif self.variables.indices[varname] == '_postsynaptic_idx':
variable_indices[varname] = '_raw_post_idx'
if self.variables['i'] is self.variables['_synaptic_pre']:
variables.add_subexpression('i', '_i',
dtype=self.variables['i'].dtype)
if self.variables['j'] is self.variables['_synaptic_post']:
variables.add_subexpression('j', '_j',
dtype=self.variables['j'].dtype)
logger.debug(("Creating synapses from group '%s' to group '%s', "
"using generator '%s'") % (self.source.name,
self.target.name,
parsed['original_expression']))
codeobj = create_runner_codeobj(self,
abstract_code,
'synapses_create_generator',
variable_indices=variable_indices,
additional_variables=variables,
template_kwds=template_kwds,
needed_variables=needed_variables,
check_units=False,
run_namespace=namespace)
codeobj()
def _check_parsed_synapses_generator(self, parsed, namespace):
"""
Type-check the parsed synapses generator. This function will raise a
TypeError if any of the arguments to the iterator function are of an
invalid type.
"""
if parsed['iterator_func'] == 'range':
# We expect all arguments of the range function to be integers
for argname, arg in list(parsed['iterator_kwds'].items()):
identifiers = get_identifiers(arg)
variables = self.resolve_all(identifiers, run_namespace=namespace,
user_identifiers=identifiers)
annotated = brian_ast(arg, variables)
if annotated.dtype != 'integer':
raise TypeError('The "%s" argument of the range function was '
'"%s", but it needs to be an '
'integer.' % (argname, arg))
|
from localground.apps.site.tests.views.print_tests import *
from localground.apps.site.tests.views.forms import *
from localground.apps.site.tests.views.map_tests import *
from localground.apps.site.tests.views.profile_tests import *
from localground.apps.site.tests.views.sharing_tests import *
from localground.apps.site.tests.views.upload_tests import *
|
'''usuario = {
'nombre':'juan perez',
'domicilio':{
'calle':'Calle Falsa 123',
'localidad':'Saenz Peña'
},
'nivel':'basico'
}
print(usuario['domicilio'].get('localidad'))
'''
'''
verduras =['papa','cebolla','rucula','batata','lechuga']
#contador=0
for contador, una_verdura in enumerate(verduras):
print(f'En la posición {contador} esta una {una_verdura}')
'''
'''
def una_funcion_valor(nro):
"""Paso por valor"""
nro=nro+3
print(f"Valor dentro de la funcion {nro}")
nro=22
una_funcion_valor(nro)
print(f"Valor fuera de la funcion {nro}")
'''
'''
colores=['azul','rojo','amarillo','negro']
print(colores[-3])
print(colores[3])
'''
'''
animal ='León'
def mostar_animal():
""" Esta funcion muestra el valor de la variable animal """
#global animal
animal='Ballena'
print(animal)
mostar_animal()
print(animal)
'''
'''
valor=float(input())
if valor<=5:
print('bajo')
elif valor <=10:
print('medio')
else:
print('alto')
'''
'''
def saludo():
print("Hola Mundo")
saludo()
'''
'''
edades =(5,25,2,18,22,45,50,1,80,10)
cont=0
for i in edades
if i>20:
cont +=1
print(f'hay {cont} numeros mayores a 20')
'''
'''
for letra in "Python":
if letra == 'h':
continue
print('erre')
'''
'''
variable='5'
variable= int(variable)
print(type(variable))
'''
'''
animales= ('perro', 'gato', 'gallina',('canario','loro','guacamayo'),['leon','puma','yaguareté'] )
obt_loro = animales[3][1]
obt_puma = animales[4][1]
print(obt_loro)
print(obt_puma)
'''
'''
nombre= input("Ingrese su nombre por favor:")
'''
'''
vocales =('a','e','i','o','u')
mostrar_pos = vocales[2]
print("el la posicion 2 se encuentra la letra: ", mostrar_pos)
'''
'''
try:
for vocales in "Python":
if letra =="h":
pass
print("Letrar actual :", letra)
except Exception as e:
print("Ha ocurrido un error no previsto ", type(e).__name__)
for letra in "Python":
if letra=="h":
pass
print("Letra actual :", letra)
'''
'''
def sumaNumeros(numero1,numero2):
return numero1 + numero2
#resultado= sumaNumeros(5,10)
sumaNumeros(5,2)
'''
'''
def una_funcion_referencia(lista):
lista.append(3500)
print(f"Este es el contenido de la lista una vez ejecutada la funcion: {lista}")
lista = [50,80,100]
una_funcion_referencia(lista)
print(f"Este es el contenido de la lista al llamarla fuera de la funcion una vez ejecutada: {lista}")
'''
'''
def sumaDigitos(numero):
suma=0
while numero!=0:
digito=numero%10
suma=suma+digito
numero=numero//10
return suma
sumatoria=0
num=int(input("Número a procesar: "))
while num!=0:
print("Suma:",sumaDigitos(num))
sumatoria=sumatoria+num
num=int(input("Número a procesar: "))
print("Sumatoria:", sumatoria)
print("Dígitos:", sumaDigitos(sumatoria))
'''
'''
a=1
while a in range (5):
print(a)
'''
'''
from random import shuffle
x =['Tener','El','Azul','Bandera','Volar','Alto']
shuffle(x)
print(len(x))
print(x)
'''
'''
numero = int(input("Dígame cuántas palabras tiene la lista: "))
if numero < 1:
print("¡Imposible!")
else:
lista = []
for i in range(numero):
print("Dígame la palabra", str(i + 1) + ": ", end="")
palabra = input()
lista += [palabra]
print("La lista creada es:", lista)
'''
'''
materias = {}
materias["lunes"] = [6103, 7540]
materias["martes"] = [6201]
materias["miércoles"] = [6103, 7540]
materias["jueves"] = []
materias["viernes"] = [6201]
print(materias["domingo"])
'''
'''
d = {'juan':40, 'pedro':45}
print(d)
'''
'''
diccionario={'Pais':'India','Capital':'Delhi','PM':'Modi'}
print(diccionario['Pais'])
'''
|
# from networkx.generators import random_clustered
import numpy as np
import logging
import sys
import matplotlib.pyplot as plt
from matplotlib.patches import Circle, Rectangle
import networkx as nx
import tqdm
from tqdm import trange
from pyfme.aircrafts import Cessna172
from pyfme.environment.atmosphere import ISA1976
from pyfme.environment.wind import NoWind
from pyfme.environment.gravity import VerticalConstant
from pyfme.environment import Environment
from pyfme.utils.trimmer import steady_state_trim
from pyfme.models.state.position import EarthPosition
from pyfme.models import EulerFlatEarth
from pyfme.utils.input_generator import Constant
from pyfme.simulator import Simulation
from IPython.display import clear_output
class CessnaModel:
aircraft = Cessna172()
environment = Environment(ISA1976(), VerticalConstant(), NoWind())
def __init__(self, x0, y0, h0, v0, psi0=0.5):
self.x0 = x0
self.y0 = y0
self.h0 = h0
self.v0 = v0
self.pos = EarthPosition(x=x0, y=y0, height=h0)
controls0 = {'delta_elevator': 0, 'delta_aileron': 0, 'delta_rudder': 0, 'delta_t': 0.5}
# Trim Aircraft as IC
trimmed_state, trimmed_controls = steady_state_trim(
self.aircraft,
self.environment,
self.pos,
psi0,
v0,
controls0
)
self.environment.update(trimmed_state)
self.state = trimmed_state
self.system = EulerFlatEarth(t0=0, full_state=trimmed_state)
self.solution_found = False
def rand_action(self):
lim_delta_e = self.aircraft.control_limits['delta_elevator']
e_range = lim_delta_e[1] - lim_delta_e[0]
lim_delta_a = self.aircraft.control_limits['delta_aileron']
a_range = lim_delta_a[1] - lim_delta_a[0]
lim_delta_r = self.aircraft.control_limits['delta_rudder']
r_range = lim_delta_r[1] - lim_delta_r[0]
lim_delta_t = self.aircraft.control_limits['delta_t']
t_range = lim_delta_t[1] - lim_delta_t[0]
u = np.random.rand(4)*np.array([e_range,a_range, r_range, t_range])
return u + np.array([lim_delta_e[0],lim_delta_a[0], lim_delta_r[0], lim_delta_t[0]])
def simulate(self, Tprop, controls, init_state=None):
if init_state is None:
init_state = self.state
if not isinstance(controls, dict):
controls = self.format_controls(*controls)
system = EulerFlatEarth(t0=0, full_state=init_state)
sim = Simulation(self.aircraft, system, self.environment, controls)
result = sim.propagate(Tprop)
clear_output()
return result, sim # Changed from just returning result
@staticmethod
def positions_from_sim(results):
'''results are sim position results after propagation'''
return results[['x_earth','height']].to_numpy()
def format_controls(self, delta_e, delta_a, delta_r, delta_t):
'''
Formats controls into pyFME compatible dict
'''
lim_delta_e = self.aircraft.control_limits['delta_elevator']
lim_delta_a = self.aircraft.control_limits['delta_aileron']
lim_delta_r = self.aircraft.control_limits['delta_rudder']
lim_delta_t = self.aircraft.control_limits['delta_t']
# Ensure control bounds aren't exceeded
delta_e = max(min(lim_delta_e[1], delta_e),lim_delta_e[0])
delta_a = max(min(lim_delta_a[1], delta_a),lim_delta_a[0])
delta_r = max(min(lim_delta_r[1], delta_r),lim_delta_r[0])
delta_t = max(min(lim_delta_t[1], delta_t),lim_delta_t[0])
return {
'delta_elevator': Constant(delta_e),
'delta_aileron': Constant(delta_a),
'delta_rudder': Constant(delta_r),
'delta_t': Constant(delta_t)
}
@staticmethod
def pos_from_state(state):
return np.array([state.position.x_earth, state.position.height])
class FullSimRRT:
def __init__(self, model, obstacles, eps, xlim, ylim, qgoal, max_iter=10):
self.model = model
self.x0 = model.x0
self.y0 = model.y0
self.h0 = model.h0
self.v0 = model.v0
self.obstacles = obstacles
self.eps = eps
self.xlim = xlim
self.ylim = ylim
self.qgoal = qgoal
self.Graph = nx.DiGraph()
self.Graph.add_nodes_from([(1,dict(
pos=(model.pos_from_state(model.state)),
state=model.state,
))]
)
self._c = 1 # Node counter
self.solution_found = False
@staticmethod
def get_distance(p1,p2):
return np.linalg.norm(p1-p2)
def dist_to_goal(self, pt):
return np.linalg.norm(self.qgoal-pt)
def is_solution(self,pt):
return self.dist_to_goal(pt) <= self.eps
def sample_pos(self):
while True:
qrand = np.random.rand(2)
qrand[0] = (self.xlim[1]- self.xlim[0])*qrand[0] + self.xlim[0]
qrand[1] = (self.ylim[1]- self.ylim[0])*qrand[1] + self.ylim[0]
if not self.obstacles or all(not obs.is_colliding(qrand) for obs in self.obstacles):
return qrand
def find_closest_node(self, qrand):
# Use Angle off as distance metric as well
min_dist = float('inf')
min_dist_node = None
qnear = None
for node in self.Graph.nodes:
dist = np.linalg.norm(qrand-self.Graph.nodes[node]['pos'])
if dist < min_dist:
min_dist = dist
qnear = self.Graph.nodes[node]['pos']
min_dist_node = node
assert (min_dist < float('inf')) and (qnear is not None)
return qnear, min_dist_node
def is_inbounds(self,pt):
return (self.xlim[0] <= pt[0] <= self.xlim[1]) and (self.ylim[0] <= pt[1] <= self.ylim[1])
def check_path(self, arr):
'''
Check for collision or solution
Input array has columns [x_earth, height]
Returns array up to end point , goal_found_flag
'''
for i in range(arr.shape[0]-1):
#Check Solution
if self.is_solution(arr[i+1]):
return arr[:i+2], True
# Check Bounds
if not self.is_inbounds(arr[i+1]):
return None, False
# Check Collision
for obs in self.obstacles:
if obs.is_segment_intersecting(arr[i],arr[i+1]):
return None, False
return arr, False
def get_solution_trajectory(self, end_node):
child = end_node
node_hist = [end_node]
parent = list(self.Graph.predecessors(end_node))[0]
path = self.model.positions_from_sim(self.Graph[parent][child]['state_hist'])
ctrl = self.Graph[parent][child]['action']
child = parent
node_hist.append(parent)
X_hist = path
u_hist = ctrl
while child != 1:
parent = list(self.Graph.predecessors(child))[0]
path = self.model.positions_from_sim(self.Graph[parent][child]['state_hist'])
ctrl = self.Graph[parent][child]['action']
X_hist = np.vstack([path,X_hist])
u_hist = np.vstack([ctrl,u_hist])
child = parent
node_hist.append(parent)
return reversed(node_hist), X_hist, u_hist
def single_sample(self, Tprop, N_tries):
qrand = self.sample_pos()
qnear, min_dist_node = self.find_closest_node(qrand)
goal_found = False
best_action = None
best_endstate = None
best_result = None
best_endpos = None
best_dist = float('inf')
for i in range(N_tries):
a = self.model.rand_action()
try:
result, sim = self.model.simulate(Tprop, a, self.Graph.nodes[min_dist_node]['state'])
except ValueError:
continue
path = self.model.positions_from_sim(result)
endstate = sim.system.full_state
arr, flag = self.check_path(path)
if arr is None:
continue
end_pos = path[-1]
dist = self.get_distance(end_pos, qrand)
if flag:
goal_found = True
best_action = a
best_endstate = endstate
best_result = result
best_endpos = end_pos
best_dist = dist
continue
if dist < best_dist:
best_action = a
best_endstate = endstate
best_result = result
best_endpos = end_pos
best_dist = dist
return best_result, best_endpos, best_endstate, best_action, goal_found, min_dist_node
def gen_samples(self, Tprop, N_tries, N=None, status=True):
"""
Tprop - Dynamics Propagation Duration
N_tries - Number of tries to get close to qrand
N - Number of sampling iterations
"""
if N is None:
N = self.max_iter
for i in trange(N, disable=not status):
best_result, best_endpos, best_endstate, best_action, goal_found, prev_node = self.single_sample(Tprop,N_tries)
if goal_found:
self.Graph.add_node(
self._c + 1,
pos = best_endpos,
state = best_endstate
)
self.Graph.add_edge(prev_node, self._c+1,
state_hist = best_result,
action = best_action
)
self._c += 1
self.solution_found = True
node_hist, X_hist, u_hist = self.get_solution_trajectory(self._c)
self.sol_node_hist = node_hist
self.sol_X_hist = X_hist
self.sol_u_hist = u_hist
break
elif best_result is None:
continue
self.Graph.add_node(
self._c + 1,
pos = best_endpos,
state = best_endstate
)
self.Graph.add_edge(prev_node, self._c+1,
state_hist = best_result,
action = best_action
)
self._c += 1
def plot_path(self, figsize= (12,7), solution=False):
fig, ax = plt.subplots(figsize=figsize)
ax.set_xlim(self.xlim)
ax.set_ylim(self.ylim)
if solution:
ax.plot(self.sol_X_hist[:,0],self.sol_X_hist[:,1])
else:
for i,j in self.Graph.edges:
X = self.model.positions_from_sim(self.Graph[i][j]['state_hist'])
ax.plot(X[:,0], X[:,1], c='blue')
ax.add_patch(Circle(self.qgoal, self.eps, fill=False, ec='green'))
for obs in self.obstacles:
ax.add_patch(Rectangle(obs.xy,obs.sx, obs.sy, fc='red'))
return fig, ax
|
FACT_MAP = {
0: 1,
1: 1
}
def factorial(n):
if n in FACT_MAP:
return FACT_MAP[n]
return n * factorial(n - 1)
def sumDigits(toSum):
value = 0
while toSum > 0:
value += toSum % 10
toSum = toSum // 10
return value
sumDigits(factorial(100))
|
# this all of the function and variables from tkinter.
from tkinter import *
# this to import the theme from tkinter. theme is like a background color or font.
from tkinter import ttk
# this will create the top parent windows which i can use as a parent windows for others widgets.
# the reference for the parent windows will be store d in the variable call root
root = Tk()
# NB: the tkinter will always begin with this 3 lines of code.
root.geometry("600x400")
# now we have a windows. Let create a button to at to it.
# the first parameter is the root and the second parameter is the test that we want display.
# this will not display if we execute this command because we need to pack it on the parent windows using the pack command.
# this is because tk do ont know where to put it yet.
button = ttk.Button(root, text = 'Click Me')
# this will look like at standard windows button because we use the theme function.
# pack is to display the click me button on the screen.
button.pack()
# this to print the content of the label test in python IDE.
print(button['text'])
# this to print the content of the label test in python Shell.
button['text']
# this to change the test in the label. first method
button['text'] = 'Press Me'
button['text'] = 'Press Here'
button['text'] = 'Press Here User'
# this to change the test in the label. Second method
button.config(text = 'Push Me')
button.config(text = 'Press Here User')
# This command will pack the label on the screen without storing it in any variable.
# this can not be use later in the program because we it is not being stored in the variable
# this just to pack Hello, tk on tke screen at on ce
ttk.Label(root, text ='Hello, Tkinter!').pack()
# proper way
label1 = ttk.Label(root, text ='Hello, Tkinter!')
label1.pack()
button = ttk.Button(root, text ='Hello, Tkinter!')
button.pack()
# this the different between the button and the label
# NB: button most be use for every button that we want to create.
Label = ttk.Label(root, text ='Hello, Tkinter!')
Label.pack()
button1 = ttk.Button(root, text ='Hello, Tkinter!')
button1.pack()
labe2 = ttk.Label(root, text ='Final exam for the end of the semester.')
labe2.pack()
button2 = ttk.Button(root, text ='click here to upload your final.')
button2.pack()
button3 = ttk.Button(root, text ='Click here to submit your final.')
button3.pack()
root.mainloop()
|
# Searching algorithms
def linear_search(array, val, len):
answer = 'Not found'
index = 0
while index < len:
if array[index] == val:
answer = index
index += 1
return answer
def better_linear_search(array, val, len):
answer = 'Not found'
index = 0
while index < len:
if array[index] == val:
return index
index += 1
return answer
def sentinel_linear_search(array, val, len):
last = array[-1]
array[-1] = val
index = 0
answer = 'Not found'
while array[index] != val:
index += 1
array[-1] = last
if index < len - 1 or array[-1] == val:
return index
return answer
def recursive_linear_search(array, val, len, i):
while i < len:
if array[i] == val:
return i
i += 1
recursive_linear_search(array, val, len, i)
return "Not found"
array = [11,9,3,7,4,8]
val = 8
n = len(array)
print(linear_search(array, val, n))
print(better_linear_search(array, val, n))
print(sentinel_linear_search(array, val, n))
print(recursive_linear_search(array, val, n, 0))
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('snippets', '0002_remove_like_whether_like'),
]
operations = [
migrations.RenameModel(
old_name='Like',
new_name='ProductLike',
),
]
|
import sys
def get_min_tapes():
global N, L, points
cnt, now = 0, -1
for point in points:
if point > now:
now = point + L -1
cnt += 1
return cnt
if __name__ == '__main__':
N, L = map(int, input().split())
points = sorted(list(map(int, sys.stdin.readline().split())))
print(get_min_tapes()) |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 3.3.0 (http://hl7.org/fhir/StructureDefinition/EligibilityRequest) on 2018-05-12.
# 2018, SMART Health IT.
from . import domainresource
class EligibilityRequest(domainresource.DomainResource):
""" Determine insurance validity and scope of coverage.
The EligibilityRequest provides patient and insurance coverage information
to an insurer for them to respond, in the form of an EligibilityResponse,
with information regarding whether the stated coverage is valid and in-
force and optionally to provide the insurance details of the policy.
"""
resource_type = "EligibilityRequest"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.authorization = None
""" Services which may require prior authorization.
List of `EligibilityRequestAuthorization` items (represented as `dict` in JSON). """
self.benefitCategory = None
""" Type of services covered.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.benefitSubCategory = None
""" Detailed services covered within the type.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.businessArrangement = None
""" Business agreement.
Type `str`. """
self.coverage = None
""" Insurance or medical plan.
Type `FHIRReference` (represented as `dict` in JSON). """
self.created = None
""" Creation date.
Type `FHIRDate` (represented as `str` in JSON). """
self.enterer = None
""" Author.
Type `FHIRReference` (represented as `dict` in JSON). """
self.facility = None
""" Servicing Facility.
Type `FHIRReference` (represented as `dict` in JSON). """
self.identifier = None
""" Business Identifier.
List of `Identifier` items (represented as `dict` in JSON). """
self.insurer = None
""" Target.
Type `FHIRReference` (represented as `dict` in JSON). """
self.patient = None
""" The subject of the Products and Services.
Type `FHIRReference` (represented as `dict` in JSON). """
self.priority = None
""" Desired processing priority.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.provider = None
""" Responsible practitioner.
Type `FHIRReference` (represented as `dict` in JSON). """
self.servicedDate = None
""" Estimated date or dates of Service.
Type `FHIRDate` (represented as `str` in JSON). """
self.servicedPeriod = None
""" Estimated date or dates of Service.
Type `Period` (represented as `dict` in JSON). """
self.status = None
""" active | cancelled | draft | entered-in-error.
Type `str`. """
super(EligibilityRequest, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(EligibilityRequest, self).elementProperties()
js.extend([
("authorization", "authorization", EligibilityRequestAuthorization, True, None, False),
("benefitCategory", "benefitCategory", codeableconcept.CodeableConcept, False, None, False),
("benefitSubCategory", "benefitSubCategory", codeableconcept.CodeableConcept, False, None, False),
("businessArrangement", "businessArrangement", str, False, None, False),
("coverage", "coverage", fhirreference.FHIRReference, False, None, False),
("created", "created", fhirdate.FHIRDate, False, None, False),
("enterer", "enterer", fhirreference.FHIRReference, False, None, False),
("facility", "facility", fhirreference.FHIRReference, False, None, False),
("identifier", "identifier", identifier.Identifier, True, None, False),
("insurer", "insurer", fhirreference.FHIRReference, False, None, False),
("patient", "patient", fhirreference.FHIRReference, False, None, False),
("priority", "priority", codeableconcept.CodeableConcept, False, None, False),
("provider", "provider", fhirreference.FHIRReference, False, None, False),
("servicedDate", "servicedDate", fhirdate.FHIRDate, False, "serviced", False),
("servicedPeriod", "servicedPeriod", period.Period, False, "serviced", False),
("status", "status", str, False, None, False),
])
return js
from . import backboneelement
class EligibilityRequestAuthorization(backboneelement.BackboneElement):
""" Services which may require prior authorization.
A list of billable services for which an authorization prior to service
delivery may be required by the payor.
"""
resource_type = "EligibilityRequestAuthorization"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.diagnosis = None
""" List of Diagnosis.
List of `EligibilityRequestAuthorizationDiagnosis` items (represented as `dict` in JSON). """
self.facility = None
""" Servicing Facility.
Type `FHIRReference` (represented as `dict` in JSON). """
self.modifier = None
""" Service/Product billing modifiers.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.quantity = None
""" Count of products or services.
Type `Quantity` (represented as `dict` in JSON). """
self.sequence = None
""" Procedure sequence for reference.
Type `int`. """
self.service = None
""" Billing Code.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.unitPrice = None
""" Fee, charge or cost per point.
Type `Money` (represented as `dict` in JSON). """
super(EligibilityRequestAuthorization, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(EligibilityRequestAuthorization, self).elementProperties()
js.extend([
("diagnosis", "diagnosis", EligibilityRequestAuthorizationDiagnosis, True, None, False),
("facility", "facility", fhirreference.FHIRReference, False, None, False),
("modifier", "modifier", codeableconcept.CodeableConcept, True, None, False),
("quantity", "quantity", quantity.Quantity, False, None, False),
("sequence", "sequence", int, False, None, True),
("service", "service", codeableconcept.CodeableConcept, False, None, True),
("unitPrice", "unitPrice", money.Money, False, None, False),
])
return js
class EligibilityRequestAuthorizationDiagnosis(backboneelement.BackboneElement):
""" List of Diagnosis.
List of patient diagnosis for which care is sought.
"""
resource_type = "EligibilityRequestAuthorizationDiagnosis"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.diagnosisCodeableConcept = None
""" Patient's diagnosis.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.diagnosisReference = None
""" Patient's diagnosis.
Type `FHIRReference` (represented as `dict` in JSON). """
super(EligibilityRequestAuthorizationDiagnosis, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(EligibilityRequestAuthorizationDiagnosis, self).elementProperties()
js.extend([
("diagnosisCodeableConcept", "diagnosisCodeableConcept", codeableconcept.CodeableConcept, False, "diagnosis", False),
("diagnosisReference", "diagnosisReference", fhirreference.FHIRReference, False, "diagnosis", False),
])
return js
from . import codeableconcept
from . import fhirdate
from . import fhirreference
from . import identifier
from . import money
from . import period
from . import quantity
|
from django.test import TestCase
from django.contrib.auth.models import User
from django.urls import reverse
from .models import *
import unittest
class AddProductTest(TestCase):
"""Tests the "sell" form where a user adds a new product for sale
Model(s): Product, ProductType, User
Template(s): create.html, product_details.html
Author(s): Nolan Little
"""
def test_product_form_template(self):
response = self.client.get(reverse('website:sell'))
def test_product_form_validation(self):
self.user = User.objects.create_user(username='testuser', password='12345')
login = self.client.login(username='testuser', password='12345')
category = ProductType.objects.create(productCategory="Toys", deleted = 0)
valid_response = self.client.post(reverse('website:sell'), {
'title': 'bike',
'description': 'It\'s a bike',
'price': 10,
'quantity': 1,
'category': 1
}
)
invalid_response = self.client.post(reverse('website:sell'), {
'title': 'bike',
'description': 'It\'s a bike',
'price': 0,
'quantity': -1,
'category': 1
}
)
self.assertEqual(valid_response.status_code, 302)
self.assertEqual(invalid_response.status_code, 200)
self.assertNotEqual(invalid_response.status_code, 302)
class OrderHistoryTest(TestCase):
"""Tests the order history view accessed from user profile
Model(s): User, Order, ProductOrder, Product, PaymentType, PaymentMethod
Template(s): order_history.html, order_history_detail.html
Author(s): Nolan Little
"""
def test_order_history_res(self):
self.user = User.objects.create_user(username='testuser', password='12345')
login = self.client.login(username='testuser', password='12345')
customer = Customer.objects.create(user=self.user, address="test", phoneNumber="test", deleted=0)
category = ProductType.objects.create(productCategory="Toys", deleted = 0)
pay_type = PaymentType.objects.create(paymentCategory="credit card", deleted = 0)
payment_method = PaymentMethod.objects.create(accountNumber=1, customerPayment=customer, paymentName=pay_type)
productType = ProductType.objects.create(productCategory="toys", deleted=0)
product = Product.objects.create(
title="bike",
description="its a bike",
price=1.00, quantity=1,
image="https://hips.hearstapps.com/vader-prod.s3.amazonaws.com/1544049078-gifts-for-brothers-jabra-ear-buds-1541438364.jpg?crop=1xw:1xh;center,top&resize=768:*",
deleted=0,
customer=self.user,
productType= productType
)
order = Order.objects.create(deleted=0, customerOrder=customer, paymentOrder=payment_method)
product_order = ProductOrder.objects.create(deleted=0, order=order, product=product)
response = self.client.get(reverse('website:order_history', kwargs={'pk':self.user.id}))
self.assertEqual(response.status_code, 200)
def test_order_history_detail_res(self):
self.user = User.objects.create_user(username='testuser', password='12345')
login = self.client.login(username='testuser', password='12345')
customer = Customer.objects.create(user=self.user, address="test", phoneNumber="test", deleted=0)
category = ProductType.objects.create(productCategory="Toys", deleted = 0)
pay_type = PaymentType.objects.create(paymentCategory="credit card", deleted = 0)
payment_method = PaymentMethod.objects.create(accountNumber=1, customerPayment=customer, paymentName=pay_type)
productType = ProductType.objects.create(productCategory="toys", deleted=0)
product = Product.objects.create(
title="bike",
description="its a bike",
price=1.00, quantity=1,
image="https://hips.hearstapps.com/vader-prod.s3.amazonaws.com/1544049078-gifts-for-brothers-jabra-ear-buds-1541438364.jpg?crop=1xw:1xh;center,top&resize=768:*",
deleted=0,
customer=self.user,
productType= productType
)
order = Order.objects.create(deleted=0, customerOrder=customer, paymentOrder=payment_method)
product_order = ProductOrder.objects.create(deleted=0, order=order, product=product)
response = self.client.get(reverse('website:order_history_details', kwargs={'pk':self.user.id, 'order_id': 1}))
self.assertEqual(response.status_code, 200)
def test_addition(self):
a = 3
b = 5
c = a + b
assert a + b == c |
###PRIVATE PREAMBLE###
import numpy as np
from utils.util_data import integers_to_symbols, add_cartesian_awgn as add_awgn
###PRIVATE PREAMBLE###
def trainer(*,
agents,
bits_per_symbol: int,
batch_size: int,
train_SNR_db: float,
signal_power: float = 1.0,
backwards_only: bool = False,
**kwargs
):
integers_to_symbols_map = integers_to_symbols(np.arange(0, 2 ** bits_per_symbol), bits_per_symbol)
A = agents[0]
B = agents[1]
batches_sent = 0
integers = np.random.randint(low=0, high=2 ** bits_per_symbol, size=[batch_size])
preamble = integers_to_symbols_map[integers] # new shared preamble
# A
if A.to_echo is not None:
c_signal_backward = A.mod.modulate(A.to_echo, mode='explore', dtype='cartesian')
c_signal_forward = A.mod.modulate(preamble, mode='explore', dtype='cartesian')
A.preamble = preamble
A.actions = c_signal_forward
# Channel
if A.to_echo is not None:
c_signal_backward_noisy = add_awgn(c_signal_backward, SNR_db=train_SNR_db, signal_power=signal_power)
c_signal_forward_noisy = add_awgn(c_signal_forward, SNR_db=train_SNR_db, signal_power=signal_power)
# B
if A.to_echo is not None:
# Update mod and demod after a roundtrip pass
preamble_roundtrip = B.demod.demodulate(c_signal_backward_noisy)
B.mod.update(B.preamble, B.actions, preamble_roundtrip)
B.demod.update(c_signal_backward_noisy, B.preamble)
batches_sent += 2
# guess of new preamble
B.to_echo = B.demod.demodulate(c_signal_forward_noisy)
return A, B, batches_sent
|
import numpy as np
from skimage import img_as_ubyte
__all__ = [
"percentile_normalize",
"percentile_normalize99",
"normalize",
"minmax_normalize",
"float2ubyte",
]
def percentile_normalize(
img: np.ndarray, lower: float = 0.01, upper: float = 99.99
) -> np.ndarray:
"""Channelwise percentile normalization to range [0, 1].
Parameters
----------
img : np.ndarray:
Input image to be normalized. Shape (H, W, C)|(H, W).
lower : float, default=0.01:
The lower percentile
upper : float, default=99.99:
The upper percentile
Returns
-------
np.ndarray:
Normalized img. Same shape as input. dtype: float32.
Raises
------
ValueError
If input image does not have shape (H, W) or (H, W, C).
"""
axis = (0, 1)
if img.ndim not in (2, 3):
raise ValueError(
f"Input img needs to have shape (H, W, C)|(H, W). Got: {img.shape}"
)
im = img.copy()
upercentile = np.percentile(im, upper)
lpercentile = np.percentile(im, lower)
return np.interp(im, (lpercentile, upercentile), axis).astype(np.float32)
def percentile_normalize99(
img: np.ndarray, amin: float = None, amax: float = None
) -> np.ndarray:
"""Channelwise 1-99 percentile normalization. Optional clamping.
Parameters
----------
img : np.ndarray:
Input image to be normalized. Shape (H, W, C)|(H, W).
amin : float, optional
Clamp min value. No clamping performed if None.
amax : float, optional
Clamp max value. No clamping performed if None.
Returns
-------
np.ndarray:
Normalized image. Same shape as input. dtype: float32.
Raises
------
ValueError
If input image does not have shape (H, W) or (H, W, C).
"""
axis = (0, 1)
if img.ndim not in (2, 3):
raise ValueError(
f"Input img needs to have shape (H, W, C)|(H, W). Got: {img.shape}"
)
im = img.copy()
percentile1 = np.percentile(im, q=1, axis=axis)
percentile99 = np.percentile(im, q=99, axis=axis)
im = (im - percentile1) / (percentile99 - percentile1 + 1e-7)
# clamp
if not any(x is None for x in (amin, amax)):
im = np.clip(im, a_min=amin, a_max=amax)
return im.astype(np.float32)
def normalize(
img: np.ndarray, standardize: bool = True, amin: float = None, amax: float = None
) -> np.ndarray:
"""Channelwise mean centering or standardizing of an image. Optional clamping.
Parameters
----------
img : np.ndarray
Input image to be normalized. Shape (H, W, C)|(H, W).
standardize: bool, default=True
If True, divide with standard deviation after mean centering
amin : float, optional
Clamp min value. No clamping performed if None.
amax : float, optional
Clamp max value. No clamping performed if None.
Returns
-------
np.ndarray:
Normalized image. Same shape as input. dtype: float32.
Raises
------
ValueError
If input image does not have shape (H, W) or (H, W, C).
"""
axis = (0, 1)
if img.ndim not in (2, 3):
raise ValueError(
f"Input img needs to have shape (H, W, C)|(H, W). Got: {img.shape}"
)
im = img.copy()
# mean center
im = im - im.mean(axis=axis, keepdims=True)
if standardize:
im = im / (im.std(axis=axis, keepdims=True) + 1e-8)
# clamp
if not any(x is None for x in (amin, amax)):
im = np.clip(im, a_min=amin, a_max=amax)
return im.astype(np.float32)
def minmax_normalize(
img: np.ndarray, amin: float = None, amax: float = None
) -> np.ndarray:
"""Min-max normalization per image channel. Optional clamping.
Parameters
----------
img : np.ndarray:
Input image to be normalized. Shape (H, W, C)|(H, W).
amin : float, optional
Clamp min value. No clamping performed if None.
amax : float, optional
Clamp max value. No clamping performed if None.
Returns
-------
np.ndarray:
Min-max normalized image. Same shape as input. dtype: float32.
Raises
------
ValueError
If input image does not have shape (H, W) or (H, W, C).
"""
if img.ndim not in (2, 3):
raise ValueError(
f"Input img needs to have shape (H, W, C)|(H, W). Got: {img.shape}"
)
im = img.copy()
im = (im - im.min()) / (im.max() - im.min() + 1e-8)
# clamp
if not any(x is None for x in (amin, amax)):
im = np.clip(im, a_min=amin, a_max=amax)
return im.astype(np.float32)
def float2ubyte(mat: np.ndarray, normalize: bool = False) -> np.ndarray:
"""Convert float64 to uint8.
Float matrix values need to be in range [-1, 1] for img_as_ubyte so
the image is normalized or clamped before conversion.
Parameters
----------
mat : np.ndarray
A float64 matrix. Shape (H, W, C).
normalize (bool, default=False):
Normalizes input to [0, 1] first. If not True,
clips values between [-1, 1].
Returns
-------
np.ndarray:
A uint8 matrix. Shape (H, W, C). dtype: uint8.
"""
m = mat.copy()
if normalize:
m = minmax_normalize(m)
else:
m = np.clip(m, a_min=-1, a_max=1)
return img_as_ubyte(m)
|
STANFORD_JAR="/home/dang/Desktop/stanford-corenlp-full-2016-10-31/stanford-corenlp-3.7.0.jar"
STANFORD_MODEL="/home/dang/Desktop/stanford-corenlp-full-2016-10-31/stanford-corenlp-3.7.0-models.jar"
from nltk.parse.stanford import StanfordParser
def sfParser(jar_path=STANFORD_JAR,model_path=STANFORD_MODEL):
return StanfordParser(STANFORD_JAR,STANFORD_MODEL) |
import seaborn as sns
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
tips = sns.load_dataset("tips")
g = sns.jointplot("total_bill", "tip",
data=tips, kind="hex",
xlim=(0, 60), ylim=(0, 12))
k = sns.jointplot("total_bill", "tip",
data=tips, kind="hex",
xlim=(0, 60), ylim=(0, 12),
color="k", height=7)
# 2D
def fm(x, y):
return 20*np.sin(10*np.pi*x) + 0.05 * y**2
x = np.linspace(0, 100, 50)
y = np.linspace(0, 100, 50)
X, Y = np.meshgrid(x,y)
print(X.shape)
#print(Y)
Z = fm(X, Y)
print(Z.shape)
fig = plt.figure()
ax = fig.gca(projection='3d')
surf = ax.plot_surface(X, Y, Z,
rstride=2, cstride=2,
cmap=mpl.cm.coolwarm,
linewidth=0.5,
antialiased=True)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('f(x,y)')
fig.colorbar(surf, shrink=1, aspect=20)
plt.show() |
""" making a code for withdrawal """
savings_account = 100000
current_account = 100000
withdrawal = 0
account_type = int(input("""
enter the account type
1. savings 2. current
>>> """.title()))
if account_type == 1:
prompt = int(input("\nenter amount\n>>> ".title()))
if prompt <= savings_account:
print("your withdrawal is successful".title())
elif prompt > savings_account:
print("\ninsufficient funds".title())
elif account_type == 2:
prompt = int(input("\nenter amount\n>>> ".title()))
if prompt <= current_account:
print("your withdrawal is successful".title())
elif prompt > current_account:
print("\ninsufficient funds".title())
else:
print("error, wrong input")
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
def no_op(apps, schema_editor):
# Do nothing on reversal
pass
# The following data was downloaded from the world bank dataset
# http://databank.worldbank.org/data/views/reports/metadataview.aspx
# Latitude/longitude data found in
# https://opendata.socrata.com/views/mnkm-8ram/files/k_EEIsAP9vAU4a3vRQw6-zNBqghV_zSVK76s0Rongow
def create_countries(apps, schema_editor):
Country = apps.get_model('portal_pages', 'Country')
country_data = [
['Afghanistan', 33.00, 65.00],
['Albania', 41.00, 20.00],
['Algeria', 28.00, 3.00],
['American Samoa', -14.33, -170.00],
['Andorra', 42.50, 1.60],
['Angola', -12.50, 18.50],
['Antigua and Barbuda', 17.05, -61.80],
['Argentina', -34.00, -64.00],
['Armenia', 40.00, 45.00],
['Aruba', 12.50, -69.97],
['Australia', -27.00, 133.00],
['Austria', 47.33, 13.33],
['Azerbaijan', 40.50, 47.50],
['The Bahamas', 24.25, -76.00],
['Bahrain', 26.00, 50.55],
['Bangladesh', 24.00, 90.00],
['Barbados', 13.17, -59.53],
['Belarus', 53.00, 28.00],
['Belgium', 50.83, 4.00],
['Belize', 17.25, -88.75],
['Benin', 9.50, 2.25],
['Bermuda', 32.33, -64.75],
['Bhutan', 27.50, 90.50],
['Bolivia', -17.00, -65.00],
['Bosnia and Herzegovina', 44.00, 18.00],
['Botswana', -22.00, 24.00],
['Brazil', -10.00, -55.00],
['Brunei', 4.50, 114.67],
['Bulgaria', 43.00, 25.00],
['Burkina Faso', 13.00, -2.00],
['Burundi', -3.50, 30.00],
['Cabo Verde', 16.00, -24.00],
['Cambodia', 13.00, 105.00],
['Cameroon', 6.00, 12.00],
['Canada', 60.00, -95.00],
['Cayman Islands', 19.50, -80.50],
['Central African Republic', 7.00, 21.00],
['Chad', 15.00, 19.00],
['Channel Islands', 49.40, 2.30],
['Chile', -30.00, -71.00],
['China', 35.00, 105.00],
['Colombia', 4.00, -72.00],
['Comoros', -12.17, 44.25],
['Dem. Rep. Congo', 0.00, 25.00],
['Congo', -1.00, 15.00],
['Costa Rica', 10.00, -84.00],
['Cote d''Ivoire', 8.00, -5.00],
['Croatia', 45.17, 15.50],
['Cuba', 21.50, -80.00],
['Curacao', 12.20, 69.00],
['Cyprus', 35.00, 33.00],
['Czech Republic', 49.75, 15.50],
['Denmark', 56.00, 10.00],
['Djibouti', 11.50, 43.00],
['Dominica', 15.42, -61.33],
['Dominican Republic', 19.00, -70.67],
['Ecuador', -2.00, -77.50],
['Egypt', 27.00, 30.00],
['El Salvador', 13.83, -88.92],
['Equatorial Guinea', 2.00, 10.00],
['Eritrea', 15.00, 39.00],
['Estonia', 59.00, 26.00],
['Ethiopia', 8.00, 38.00],
['Faeroe Islands', 62.00, -7.00],
['Fiji', -18.00, 175.00],
['Finland', 64.00, 26.00],
['France', 46.00, 2.00],
['French Polynesia', -15.00, -140.00],
['Gabon', -1.00, 11.75],
['The Gambia', 13.47, -16.57],
['Georgia', 42.00, 43.50],
['Germany', 51.00, 9.00],
['Ghana', 8.00, -2.00],
['Greece', 39.00, 22.00],
['Greenland', 72.00, -40.00],
['Grenada', 12.12, -61.67],
['Guam', 13.47, 144.78],
['Guatemala', 15.50, -90.25],
['Guinea', 11.00, -10.00],
['Guinea-Bissau', 12.00, -15.00],
['Guyana', 5.00, -59.00],
['Haiti', 19.00, -72.42],
['Honduras', 15.00, -86.50],
['Hong Kong SAR, China', 22.25, 114.17],
['Hungary', 47.00, 20.00],
['Iceland', 65.00, -18.00],
['India', 20.00, 77.00],
['Indonesia', -5.00, 120.00],
['Iran', 32.00, 53.00],
['Iraq', 33.00, 44.00],
['Ireland', 53.00, -8.00],
['Isle of Man', 54.23, -4.55],
['Israel', 31.50, 34.75],
['Italy', 42.83, 12.83],
['Jamaica', 18.25, -77.50],
['Japan', 36.00, 138.00],
['Jordan', 31.00, 36.00],
['Kazakhstan', 48.00, 68.00],
['Kenya', 1.00, 38.00],
['Kiribati', 1.42, 173.00],
['Dem. People''s Rep. Korea', 40.00, 127.00],
['Korea', 37.00, 127.50],
['Kosovo', 42.60, 20.90],
['Kuwait', 29.34, 47.66],
['Kyrgyz Republic', 41.00, 75.00],
['Lao PDR', 18.00, 105.00],
['Latvia', 57.00, 25.00],
['Lebanon', 33.83, 35.83],
['Lesotho', -29.50, 28.50],
['Liberia', 6.50, -9.50],
['Libya', 25.00, 17.00],
['Liechtenstein', 47.17, 9.53],
['Lithuania', 56.00, 24.00],
['Luxembourg', 49.75, 6.17],
['Macao SAR, China', 22.17, 113.55],
['Macedonia', 41.83, 22.00],
['Madagascar', -20.00, 47.00],
['Malawi', -13.50, 34.00],
['Malaysia', 2.50, 112.50],
['Maldives', 3.25, 73.00],
['Mali', 17.00, -4.00],
['Malta', 35.83, 14.58],
['Marshall Islands', 9.00, 168.00],
['Mauritania', 20.00, -12.00],
['Mauritius', -20.28, 57.55],
['Mexico', 23.00, -102.00],
['Micronesia', 6.92, 158.25],
['Moldova', 47.00, 29.00],
['Monaco', 43.73, 7.40],
['Mongolia', 46.00, 105.00],
['Montenegro', 42.00, 19.00],
['Morocco', 32.00, -5.00],
['Mozambique', -18.25, 35.00],
['Myanmar', 22.00, 98.00],
['Namibia', -22.00, 17.00],
['Nepal', 28.00, 84.00],
['Netherlands', 52.50, 5.75],
['New Caledonia', -21.50, 165.50],
['New Zealand', -41.00, 174.00],
['Nicaragua', 13.00, -85.00],
['Niger', 16.00, 8.00],
['Nigeria', 10.00, 8.00],
['Northern Mariana Islands', 15.20, 145.75],
['Norway', 62.00, 10.00],
['Oman', 21.00, 57.00],
['Pakistan', 30.00, 70.00],
['Palau', 7.50, 134.50],
['Panama', 9.00, -80.00],
['Papua New Guinea', -6.00, 147.00],
['Paraguay', -23.00, -58.00],
['Peru', -10.00, -76.00],
['Philippines', 13.00, 122.00],
['Poland', 52.00, 20.00],
['Portugal', 39.50, -8.00],
['Puerto Rico', 18.25, -66.50],
['Qatar', 25.50, 51.25],
['Romania', 46.00, 25.00],
['Russia', 60.00, 100.00],
['Rwanda', -2.00, 30.00],
['Samoa', -13.58, -172.33],
['San Marino', 43.77, 12.42],
['Sao Tome and Principe', 1.00, 7.00],
['Saudi Arabia', 25.00, 45.00],
['Senegal', 14.00, -14.00],
['Serbia', 44.00, 21.00],
['Seychelles', -4.58, 55.67],
['Sierra Leone', 8.50, -11.50],
['Singapore', 1.37, 103.80],
['Sint Maarten (Dutch part)', 18.00, 63.00],
['Slovak Republic', 48.67, 19.50],
['Slovenia', 46.00, 15.00],
['Solomon Islands', -8.00, 159.00],
['Somalia', 10.00, 49.00],
['South Africa', -29.00, 24.00],
['South Sudan', 4.90, 31.60],
['Spain', 40.00, -4.00],
['Sri Lanka', 7.00, 81.00],
['St. Kitts and Nevis', 17.33, -62.75],
['St. Lucia', 13.88, -61.13],
['St. Martin ', 18.00, 63.00],
['St. Vincent and the Grenadines', 13.25, -61.20],
['Sudan', 15.00, 30.00],
['Suriname', 4.00, -56.00],
['Swaziland', -26.50, 31.50],
['Sweden', 62.00, 15.00],
['Switzerland', 47.00, 8.00],
['Syrian Arab Republic', 35.00, 38.00],
['Tajikistan', 39.00, 71.00],
['Tanzania', -6.00, 35.00],
['Thailand', 15.00, 100.00],
['Timor-Leste', -8.55, 125.52],
['Togo', 8.00, 1.17],
['Tonga', -20.00, -175.00],
['Trinidad and Tobago', 11.00, -61.00],
['Tunisia', 34.00, 9.00],
['Turkey', 39.00, 35.00],
['Turkmenistan', 40.00, 60.00],
['Turks and Caicos Islands', 21.75, -71.58],
['Tuvalu', -8.00, 178.00],
['Uganda', 1.00, 32.00],
['Ukraine', 49.00, 32.00],
['United Arab Emirates', 24.00, 54.00],
['United Kingdom', 54.00, -2.00],
['United States', 38.00, -97.00],
['Uruguay', -33.00, -56.00],
['Uzbekistan', 41.00, 64.00],
['Vanuatu', -16.00, 167.00],
['Venezuela', 8.00, -66.00],
['Vietnam', 16.00, 106.00],
['Virgin Islands', 18.50, -64.50],
['West Bank and Gaza', 32.00, 35.40],
['Yemen', 15.00, 48.00],
['Zambia', -15.00, 30.00],
['Zimbabwe', -20.00, 30.00],
]
for country in country_data:
Country.objects.get_or_create(name=country[0])
Country.objects.filter(name=country[0]).update(latitude=country[1], longitude=country[2])
class Migration(migrations.Migration):
dependencies = [
('portal_pages', '0022_country_modifications'),
]
operations = [
migrations.RunPython(create_countries, no_op),
]
|
# Copyright 2021 Alibaba Group Holding Limited. All Rights Reserved.
from ..utils.registry import Registry
MODELS = Registry("MODELS")
BACKBONES = Registry("BACKBONES")
NECKS = Registry("NECKS")
HEADS = Registry("HEADS")
BRICKS = Registry("BRICKS")
STEMS = BRICKS
LOSSES = Registry("LOSSES")
|
import json
import general
def get(memb):
with open(general.settings) as f:
settings = json.load(f)
f.close()
lvl1 = settings["perms"]["lvl1"]
lvl2 = settings["perms"]["lvl2"]
lvl3 = settings["perms"]["lvl3"]
lvl = [0]
for r in memb.roles:
if r.name in lvl3:
lvl.append(3)
elif r.name in lvl2:
lvl.append(2)
elif r.name in lvl1:
lvl.append(1)
return max(lvl)
def check(memb, lvl):
return get(memb) >= lvl
|
import FWCore.ParameterSet.Config as cms
from CalibTracker.SiStripChannelGain.SiStripGainsPCLHarvester_cfi import SiStripGainsPCLHarvester
alcaSiStripGainsAAGHarvester = SiStripGainsPCLHarvester.clone()
alcaSiStripGainsAAGHarvester.calibrationMode = cms.untracked.string('AagBunch')
alcaSiStripGainsAAGHarvester.DQMdir = cms.untracked.string('AlCaReco/SiStripGainsAAG')
alcaSiStripGainsAAGHarvester.Record = cms.untracked.string('SiStripApvGainRcdAAG')
|
#!/usr/bin/python3
# Platform module
import platform
print(platform.platform())
print(platform.platform(1))
print(platform.platform(0,1))
|
from sympy import *
from sympy.parsing.latex import parse_latex
import os
import subprocess
# import random
from PyQt5.QtWidgets import QDialog
class vh_bbt_b3(QDialog):
def __init__(self, parent=None):
super().__init__(parent)
def bbt_b3(self):
# bien = self.lne_bien.text()
# lay thong tin ham so
x = symbols('x')
hs1 = self.lne_nhap.text() # ham so kieu latex khi nhap
hs2 = parse_latex(hs1) # ham so kieu sympy
bt = poly(hs2, x) # chuyen hs ve dang da thuc
heso = bt.all_coeffs() # lay cac he so cua bt
a = heso[0]
dh2 = diff(hs2, x) # dao ham hs2
gptdh2 = solveset(dh2, x, domain=S.Reals) # giai pt dh2
songhiem = len(gptdh2)
with open('bbt_hs.tex', 'w', encoding='utf-8') as file:
file.write("\\documentclass{standalone}\n")
file.write("\\usepackage{amsmath}\n")
file.write("\\usepackage{luamplib}\n")
file.write("\\begin{document}\n")
file.write("\\input{bbt}\n")
if songhiem == 2:
x_1 = gptdh2.args[0]
x_2 = gptdh2.args[1]
y_1 = hs2.subs(x, x_1)
y_2 = hs2.subs(x, x_2)
if a > 0:
file.write("\\hsbbd{"+str(latex(x_1))+"}{"+str(latex(x_2))+"}{"+str(latex(y_1))+"}{"+str(latex(y_2))+"}\n")
if a < 0:
file.write("\\hsbba{"+str(latex(x_1))+"}{"+str(latex(x_2))+"}{"+str(latex(y_1))+"}{"+str(latex(y_2))+"}\n")
if songhiem == 1:
x_1 = gptdh2.args[0]
y_1 = hs2.subs(x, x_1)
if a > 0:
file.write("\\hsbbdt{"+str(latex(x_1))+"}{"+str(latex(y_1))+"}\n")
if a < 0:
file.write("\\hsbbdg{"+str(latex(x_1))+"}{"+str(latex(y_1))+"}\n")
if songhiem == 0:
if a > 0:
file.write("\\hsbbngt\n")
if a < 0:
file.write("\\hsbbngg\n")
file.write("\\end{document}\n")
# Mở file pdf khi hoàn thành
kt = subprocess.call('pdflatex -synctex=1 --shell-escape -interaction=nonstopmode bbt_hsb3.tex')
if kt != 0:
print('Exit-code not 0, check result!')
else:
os.system('start bbt_hsb3.pdf')
|
#!/usr/bin/env python3
'''
obecnyURL = driver.get_url
driver.close() -> zamyka karte
driver.quit() -> wychodzi?
opis: bot zakupowy -> do sklepow komputronik i x-kom.
jesli dany produkt jest dostepny,
zostanie zakupiony i zamowiony do salonu.
jesli jest niedostepny skryp ma sie przerwac.
docelowo ma zostac zrobiony log do plixku txt, a takze
zrzut ekranu zapisany w lokalizacji pliku skryptu.
Docelowa funkcjonalnosc skryptu:
-> SKRYPCIE ZSH(powloki) ZAINICJOWAC
SKRYPT PYTHON, cyklicznie
python3 -m xbotcombo... [bez .py] > output.txt ->zapisane nasze dziady a >> gdy majabyc nadpisane
dobrze jak skrypt bedzie w zmiennej srodowiskowej
if _name_ == _main_:
bot()
(utworzyc klase bot)
rowniez sprawdzic _init_ itp itd:
petle zrobic jako metody klasy bot
#if name == main: ma za zadanie unikniecie global wariables ktore moglby wplynac zle na program
WSKAZOWKI:
obsluga wyjatkow: https://selenium-python.readthedocs.io/api.html
action chains: -> .perform()
explicit waits -> czekamy az podanny element wyswietlli
sie na stronie: https://selenium-python.readthedocs.io/waits.html
NIEWYKORZYSTANE IMPORTY:
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import InvalidSwitchToTargetException
from selenium.common.exceptions import InvalidElementStateException
import sys
'''
import re
import time
import requests
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
URLX = 'http://www.google.com/search?q=x-kom+ryzen7+5800x'
xkom = URLX
NAME_AND_SURNAME = "Daniel Janowczyk"
EMAIL = "danieljan@gmail.com"
PHONE = "552388984"
def funkcjazbiorcza(instancja):
kierunek = xkom
zupa = kierunekposzukiwan(kierunek)
produkt = pierwszylink(zupa)
probazakupuxkom(produkt, instancja)
def kierunekposzukiwan(kierunek):
page = requests.get(kierunek)
soup = BeautifulSoup(page.text, "html.parser")
return soup
def pierwszylink(soup):
EUREKA = None
#PETLA SZUKAJACA 1 WYNIKU GOOGLE
for tag in soup.findAll('a',href=True):
link = tag['href']
if re.search(r'/url*',link):
print('eurekaXKOM')
EUREKA = link
break
#OSKROBANY LINK
EUREKA_LINK = EUREKA[7:]
return EUREKA_LINK
def probazakupuxkom(EUREKA_LINK, driver):
driver.get(EUREKA_LINK)
time.sleep(1)
#DODAJEMY PRODUKT
driver.find_element_by_xpath('//*[@title="Dodaj do koszyka"]').click()
time.sleep(2)
#www.x-kom.pl/KOSZYK
driver.find_element_by_css_selector('.sc-1v4lzt5-13').click()
time.sleep(2)
#przejdz do dostawy selektor
driver.find_element_by_css_selector(".pvj85d-4").click()
#kontynuluj jako gosc
time.sleep(3)
driver.find_element_by_xpath('/html/body/div[1]/div/div/div[1]/div/div[2]/div[1]/a[1]').click()
#odbior osobisty w salonie
time.sleep(2)
driver.find_element_by_css_selector('.sc-14rohpf-0').click()
#wybierz salon z listy
driver.find_element_by_css_selector('div.sc-1hndnir-2:nth-child(1) > button:nth-child(1)').click()
#promenda
driver.find_element_by_css_selector('div.sc-2jtv1t-6:nth-child(47) > div:nth-child(1) > div:nth-child(2) > button:nth-child(1)').click()
#platnosc przy odbiorze(bezplatnie)
time.sleep(2)
driver.find_element_by_css_selector('div.nhgagy-11:nth-child(5)').click()
time.sleep(4)
try:
#Formularze wypelnione danymi
action = ActionChains(driver)
action.send_keys(Keys.TAB)
time.sleep(1)
action.send_keys(Keys.TAB)
time.sleep(1)
action.send_keys(Keys.TAB)
time.sleep(1)
action.send_keys(Keys.TAB)
action.send_keys(NAME_AND_SURNAME)#>>>>>>>> szybciej
action.send_keys(Keys.TAB)
action.send_keys(EMAIL)
action.send_keys(Keys.TAB)
action.send_keys(PHONE)
# ->>>>>>>>>>>>>>> xxxddddfffFadsadsadasdsasd zjechac na dol strony myszka bo inaczej nie zobaczy sie elementu ptaszka itp itd
action.perform()
#zgoda odznaczona ptaszkiem
zgoda = driver.find_element_by_css_selector('div.sc-1jh87d2-0:nth-child(1)') #-> czy dziala
action.move_to_element(zgoda).click()
#do poprawy
except WebDriverException as wde: #mowi jaki blad wystapil
aktualnylink = driver.current_url
print("link do obecnej strony: -> ",aktualnylink, '\n')
print(wde)
print("-----")
print(str(wde))
print("-----")
print(wde.args)
print(aktualnylink)
time.sleep(5)
#driver.quit()
def probazakupukomputornik(EUREKA_LINK):
driver = webdriver.Firefox()
driver.get(EUREKA_LINK)
def main():
driver = webdriver.Firefox()
user = driver
funkcjazbiorcza(user)
main()
|
# Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import absolute_import
from telemetry.internal.browser import extension_to_load
class ExtensionDict(object):
"""Dictionary of ExtensionPage instances, with extension_id as key."""
def __init__(self, extension_backend):
self._extension_backend = extension_backend
def __getitem__(self, load_extension):
"""Given an ExtensionToLoad instance, returns the corresponding
ExtensionPage instance."""
if not isinstance(load_extension, extension_to_load.ExtensionToLoad):
raise TypeError("Input param must be of type ExtensionToLoad")
return self.GetByExtensionId(load_extension.extension_id)[0]
def __contains__(self, load_extension):
"""Checks if this ExtensionToLoad instance has been loaded"""
if not isinstance(load_extension, extension_to_load.ExtensionToLoad):
raise TypeError("Input param must be of type ExtensionToLoad")
return load_extension.extension_id in self._extension_backend
def keys(self):
# pylint: disable=invalid-name
return list(self._extension_backend.keys())
def GetByExtensionId(self, extension_id):
"""Returns a list of extensions given an extension id. This is useful for
connecting to built-in apps and component extensions."""
return self._extension_backend[extension_id]
|
from unittest import TestCase, main
from telegraph.utils import content_to_html, html_to_content
from fixtures import CONTENT, HTML
class TestUtils(TestCase):
def test_content_to_html(self):
html = content_to_html(CONTENT)
self.assertEqual(html, HTML)
def test_html_to_content(self):
content = html_to_content(HTML)
self.assertEqual(content, CONTENT)
if __name__ == '__main__':
main()
|
#!/usr/local/bin/python
import httplib, sys, string, regsub
def massage(stuff):
stuff = regsub.gsub('\(GRAND PRAIRIE\|CALGARY\|EDMONTON\|MEDICINE HAT\) *, *CA', '\\1, AB', stuff)
stuff = regsub.gsub('\(WHISTLER\|VANCOUVER\) *, *CA', '\\1, BC', stuff)
stuff = regsub.gsub('WINNIPEG *, *CA', 'WINNIPEG, MB', stuff)
stuff = regsub.gsub('\(GRAND FALLS\) *, *CA', '\\1, NF', stuff)
stuff = regsub.gsub('CANSO *, *CA', 'CANSO, NS', stuff)
stuff = regsub.gsub('\(EMO\|TORONTO\|OTTAWA\|HAMILTON\|GUELPH LAKE'
'\|BARRIE\|EAST KITCHENER\|BARRIE'
'\) *, *CA', '\\1, ON', stuff)
stuff = regsub.gsub('QUEBEC CITY', 'QUEBEC', stuff)
stuff = regsub.gsub('\(ALMA\|BUCKINGHAM\|MONTREAL\|QUEBEC\) *, *CA', '\\1, QC', stuff)
stuff = regsub.gsub('SASKATOON *, *CA', 'SASKATOON, SK', stuff)
stuff = regsub.gsub('<td>\([^,]*\),[^<]*</td> </tr>$',
'<td>\\1</td> </tr>', stuff)
stuff = regsub.gsub(', *AUST?\., *AU<', ', AU<', stuff)
stuff = regsub.gsub('; *AU<', ', AU<', stuff)
stuff = regsub.gsub("\(<TD><FONT SIZE=2><A HREF=/musicdb/\?MIval=tourquery_v&venue=[0-9]+>\)\([^,<]*\),[^<]*\(.*\)", "\\1\\2\\3", stuff)
return stuff
def main():
input = string.strip(sys.stdin.readline())
if input:
query_string = 'MIval=tourquery_a&days=90&artist=%s' % input
httpobj = httplib.HTTP('www.automatrix.com', 3128)
httpobj.putrequest('POST', 'http://www2.music.sony.com/musicdb/')
httpobj.putheader('Host', 'www2.music.sony.com')
httpobj.putheader('Connection', 'Keep-Alive')
httpobj.putheader('Accept', 'image/gif, image/x-xbitmap, image/jpeg, image/pjpeg, */*')
httpobj.putheader('Content-type', 'application/x-www-form-urlencoded')
httpobj.putheader('Content-length', '%d' % len(query_string))
httpobj.endheaders()
httpobj.send(query_string)
reply, msg, hdrs = httpobj.getreply()
stuff = massage(httpobj.getfile().read())
sys.stdout.write(stuff)
|
import logging
from moon_manager.api.base_exception import BaseException
logger = logging.getLogger("moon.manager.api." + __name__)
class UnknownName(BaseException):
def __init__(self, message):
# Call the base class constructor with the parameters it needs
super(UnknownName, self).__init__(message)
class UnknownId(BaseException):
def __init__(self, message):
# Call the base class constructor with the parameters it needs
super(UnknownId, self).__init__(message)
class MissingIdOrName(BaseException):
def __init__(self, message):
# Call the base class constructor with the parameters it needs
super(MissingIdOrName, self).__init__(message)
class UnknownField(BaseException):
def __init__(self, message):
# Call the base class constructor with the parameters it needs
super(UnknownField, self).__init__(message)
class JsonUtils:
@staticmethod
def get_override(json_content):
if "override" in json_content:
return json_content["override"]
return False
@staticmethod
def get_mandatory(json_content):
if "mandatory" in json_content:
return json_content["mandatory"]
return False
@staticmethod
def copy_field_if_exists(json_in, json_out, field_name, type_field, default_value=None):
if field_name in json_in:
json_out[field_name] = json_in[field_name]
else:
if type_field is bool:
if default_value is None:
default_value = False
json_out[field_name] = default_value
if type_field is str:
if default_value is None:
default_value = ""
json_out[field_name] = default_value
if type_field is dict:
json_out[field_name] = dict()
if type_field is list:
json_out[field_name] = []
@staticmethod
def _get_element_in_db_from_id(element_type, element_id, user_id, policy_id, category_id,
meta_rule_id, manager):
# the item is supposed to be in the db, we check it exists!
if element_type == "model":
data_db = manager.get_models(user_id, model_id=element_id)
elif element_type == "policy":
data_db = manager.get_policies(user_id, policy_id=element_id)
elif element_type == "subject":
data_db = manager.get_subjects(user_id, policy_id, perimeter_id=element_id)
elif element_type == "object":
data_db = manager.get_objects(user_id, policy_id, perimeter_id=element_id)
elif element_type == "action":
data_db = manager.get_actions(user_id, policy_id, perimeter_id=element_id)
elif element_type == "subject_category":
data_db = manager.get_subject_categories(user_id, category_id=element_id)
elif element_type == "object_category":
data_db = manager.get_object_categories(user_id, category_id=element_id)
elif element_type == "action_category":
data_db = manager.get_action_categories(user_id, category_id=element_id)
elif element_type == "meta_rule":
data_db = manager.get_meta_rules(user_id, meta_rule_id=element_id)
elif element_type == "subject_data":
data_db = manager.get_subject_data(user_id, policy_id, data_id=element_id,
category_id=category_id)
elif element_type == "object_data":
data_db = manager.get_object_data(user_id, policy_id, data_id=element_id,
category_id=category_id)
elif element_type == "action_data":
data_db = manager.get_action_data(user_id, policy_id, data_id=element_id,
category_id=category_id)
elif element_type == "meta_rule":
data_db = manager.get_meta_rules(user_id, meta_rule_id=meta_rule_id)
else:
raise Exception("Conversion of {} not implemented yet!".format(element_type))
# logger.info(data_db)
# do some post processing ... the result should be {key : { .... .... } }
if element_type == "subject_data" or element_type == "object_data" or element_type == "action_data":
if data_db is not None and isinstance(data_db, list):
# TODO remove comments after fixing the bug on moondb when adding metarule : we can have several identical entries !
# if len(data_db) > 1:
# raise Exception("Several {} with the same id : {}".format(element_type, data_db))
data_db = data_db[0]
if data_db is not None and data_db["data"] is not None and isinstance(data_db["data"],
dict):
# TODO remove comments after fixing the bug on moondb when adding metarule : we can have several identical entries !
# if len(data_db["data"].values()) != 1:
# raise Exception("Several {} with the same id : {}".format(element_type, data_db))
# data_db = data_db["data"]
# TODO remove these two lines after fixing the bug on moondb when adding metarule : we can have several identical entries !
list_values = list(data_db["data"].values())
data_db = list_values[0]
# logger.info("subject data after postprocessing {}".format(data_db))
return data_db
@staticmethod
def _get_element_id_in_db_from_name(element_type, element_name, user_id, policy_id, category_id,
meta_rule_id, manager):
if element_type == "model":
data_db = manager.get_models(user_id)
elif element_type == "policy":
data_db = manager.get_policies(user_id)
elif element_type == "subject":
data_db = manager.get_subjects(user_id, policy_id)
elif element_type == "object":
data_db = manager.get_objects(user_id, policy_id)
elif element_type == "action":
data_db = manager.get_actions(user_id, policy_id)
elif element_type == "subject_category":
data_db = manager.get_subject_categories(user_id)
elif element_type == "object_category":
data_db = manager.get_object_categories(user_id)
elif element_type == "action_category":
data_db = manager.get_action_categories(user_id)
elif element_type == "meta_rule":
data_db = manager.get_meta_rules(user_id)
elif element_type == "subject_data":
data_db = manager.get_subject_data(user_id, policy_id, category_id=category_id)
elif element_type == "object_data":
data_db = manager.get_object_data(user_id, policy_id, category_id=category_id)
elif element_type == "action_data":
data_db = manager.get_action_data(user_id, policy_id, category_id=category_id)
elif element_type == "meta_rule":
data_db = manager.get_meta_rules(user_id)
elif element_type == "rule":
data_db = manager.get_rules(user_id, policy_id)
else:
raise BaseException("Conversion of {} not implemented yet!".format(element_type))
if isinstance(data_db, dict):
for key_id in data_db:
if isinstance(data_db[key_id], dict) and "name" in data_db[key_id]:
if data_db[key_id]["name"] == element_name:
return key_id
else:
for elt in data_db:
if isinstance(elt,
dict) and "data" in elt: # we handle here subject_data, object_data and action_data...
for data_key in elt["data"]:
# logger.info("data from the db {} ".format(elt["data"][data_key]))
data = elt["data"][data_key]
if "name" in data and data["name"] == element_name:
return data_key
if "value" in data and data["value"]["name"] == element_name:
return data_key
return None
@staticmethod
def convert_name_to_id(json_in, json_out, field_name_in, field_name_out, element_type, manager,
user_id, policy_id=None, category_id=None, meta_rule_id=None,
field_mandatory=True):
if field_name_in not in json_in:
raise UnknownField("The field {} is not in the input json".format(field_name_in))
if "id" in json_in[field_name_in]:
data_db = JsonUtils._get_element_in_db_from_id(element_type,
json_in[field_name_in]["id"], user_id,
policy_id, category_id, meta_rule_id,
manager)
if data_db is None:
raise UnknownId("No {} with id {} found in database".format(element_type,
json_in[field_name_in]["id"]))
json_out[field_name_out] = json_in[field_name_in]["id"]
elif "name" in json_in[field_name_in]:
id_in_db = JsonUtils._get_element_id_in_db_from_name(element_type,
json_in[field_name_in]["name"],
user_id, policy_id, category_id,
meta_rule_id, manager)
if id_in_db is None:
raise UnknownName(
"No {} with name {} found in database".format(element_type,
json_in[field_name_in]["name"]))
json_out[field_name_out] = id_in_db
elif field_mandatory is True:
raise MissingIdOrName("No id or name found in the input json {}".format(json_in))
@staticmethod
def convert_id_to_name(id_, json_out, field_name_out, element_type, manager, user_id,
policy_id=None, category_id=None, meta_rule_id=None):
json_out[field_name_out] = {
"name": JsonUtils.convert_id_to_name_string(id_, element_type, manager, user_id,
policy_id, category_id, meta_rule_id)}
@staticmethod
def __convert_results_to_element(element):
if isinstance(element, dict) and "name" not in element and "value" not in element:
list_values = [v for v in element.values()]
elif isinstance(element, list):
list_values = element
else:
list_values = []
list_values.append(element)
return list_values[0]
@staticmethod
def convert_id_to_name_string(id_, element_type, manager, user_id,
policy_id=None, category_id=None, meta_rule_id=None):
element = JsonUtils._get_element_in_db_from_id(element_type, id_, user_id, policy_id,
category_id, meta_rule_id, manager)
# logger.info(element)
if element is None:
raise UnknownId("No {} with id {} found in database".format(element_type, id_))
res = JsonUtils.__convert_results_to_element(element)
# logger.info(res)
if "name" in res:
return res["name"]
if "value" in res and "name" in res["value"]:
return res["value"]["name"]
return None
@staticmethod
def convert_names_to_ids(json_in, json_out, field_name_in, field_name_out, element_type,
manager, user_id, policy_id=None, category_id=None, meta_rule_id=None,
field_mandatory=True):
ids = []
if field_name_in not in json_in:
raise UnknownField("The field {} is not in the input json".format(field_name_in))
for elt in json_in[field_name_in]:
if "id" in elt:
data_db = JsonUtils._get_element_in_db_from_id(element_type, elt["id"], user_id,
policy_id, category_id,
meta_rule_id, manager)
if data_db is None:
raise UnknownId(
"No {} with id {} found in database".format(element_type, elt["id"]))
ids.append(elt["id"])
elif "name" in elt:
id_in_db = JsonUtils._get_element_id_in_db_from_name(element_type, elt["name"],
user_id, policy_id,
category_id, meta_rule_id,
manager)
if id_in_db is None:
raise UnknownName(
"No {} with name {} found in database".format(element_type, elt["name"]))
ids.append(id_in_db)
elif field_mandatory is True:
raise MissingIdOrName("No id or name found in the input json {}".format(elt))
json_out[field_name_out] = ids
@staticmethod
def convert_ids_to_names(ids, json_out, field_name_out, element_type, manager, user_id,
policy_id=None, category_id=None, meta_rule_id=None):
res_array = []
for id_ in ids:
element = JsonUtils._get_element_in_db_from_id(element_type, id_, user_id, policy_id,
category_id, meta_rule_id, manager)
if element is None:
raise UnknownId("No {} with id {} found in database".format(element_type, id_))
res = JsonUtils.__convert_results_to_element(element)
# logger.info(res)
if "name" in res:
res_array.append({"name": res["name"]})
if "value" in res and "name" in res["value"]:
res_array.append({"name": res["value"]["name"]})
json_out[field_name_out] = res_array
|
# Dzien 5 CIEZKIE ZAJECIA
# Sowniki
# Slownik tworzy sie otwierajac nawias klamrowy.
# names, surnames i city to klucze i im przypisane sa wartosci.
# # deklaruje slownik:
# contacts = {"names": ["Ala", "Ola", "Jan"], "surnames": ["Kowalski", "Malinowska", "Igrekowski"],
# "cities": ["Warszawa", "Gdansk", "Krakow"]}
# print(type(contacts)) # sprawdzenie jaki jest to typ (czy na pewno sllownik)
# print(contacts["cities"]) # wyrzuci wszystko co jest w kluczu cities
# print(contacts["country"]) # wyrzuci blad ze nie ma takiego klucza
# contacts["cities"].append("Rumia") # appent dodaje element
# print(contacts["cities"])
# contacts.update({"countries": ["Polska", "Niemcy", "Czechy", "Polska"]}) #uaktualnienie
# print(contacts)
# update mozna zapisac tez tak:
# contacts["countries"] = ["Polska", "Niemcy", "Czechy", "Polska"]
# print(contacts)
#
# print(len(contacts)) # sprawdzi ilosc kluczy!!!!
# print(contacts.keys()) # wyrzuci nazwy wszystkich kluczy w slowniku
# print(contacts.items()) # slownik, ktory zawiera krotki z wszystkimi wartosciami kolejnych kluczy
# print(list(contacts)) # zadziala tu jak contacts.keys() - domyslnie zwroci kllucze
# Zadanie: Wydrukuj zdanie "Ala Kowalska mieszka w Warszawie (Polska)"
# "Ola Malinowska mieszka w Gdansk"
# "Jan Igrekowski mieszka w Krakow'
## przypisuje kolejnyych wartosci z kolejnych kluczy
# print(contacts)
# # enumerate zwraca pare wartosci: indeks wartosci i wartosc przypisana do tego indeksu
# for key, value in enumerate(contacts["names"]):
# # print(key)
# # print(value)
# name = value
# surname = contacts["surnames"][key]
# cities = contacts["cities"][key]
# countries = contacts["countries"][key]
# print(f"{name} {surname} mieszka w {cities} ({countries})")
# ## Dzialaj malymi kroczkami! najpierw printuje key i values a potem dzialam wykorzystujac key, jako indeksowanie elementow
# # ============================================================================
# Jeszcze raz to samo, tylko inny uklad danych
# contacts = {
# "0": {"name": "Ala", "surname": "Kowalska", "cities": "Gdansk"},
# "1": {"name": "Ola", "surname": "Malinowska", "cities": "Warszawa"},
# "2": {"name": "Jan", "surname": "Igrekowski", "cities": "Krakow"}
# }
#
# print(contacts)
#
# ## key jest wierszem a value kolumna
# for key, value in enumerate(contacts):
# # print(key)
# # print(value)
#
# name = contacts[str(key)]["name"]
# surname = contacts[str(key)]["surname"]
# city = contacts[str(key)]["cities"]
#
# # print(imie)
# print(f"{name} {surname} mieszka w {city}")
## ====================================================================================
## Jeszcze raz to samo, ale tym razem dane zapisane w liscie:
# contacts = [
# {"name": "Ala", "surname": "Kowalska", "cities": "Gdansk"},
# {"name": "Ola", "surname": "Malinowska", "cities": "Warszawa"},
# {"name": "Jan", "surname": "Igrekowski", "cities": "Krakow"}
# ]
#
# for rows in contacts:
# # print(rows["name"])
# name = rows["name"]
# surname = rows["surname"]
# cities = rows["cities"]
#
# print(f"{name} {surname} mieszka w {cities}")
# # ====================================================================================
# # OPERACJE NA PLIKACH - MYSL O WIRTUALNYM KURSORZE
## tworze plik tekstowy - zeby to sie udalo, musze wpisac tryb 'w'
# file = open("Pierwszy_plik.txt", "w")
# file.write("zapisz prosze do pliku jeszcze raz")
# file.close()
## jesli otwieram plik z uwzyciem with nie musze juz pamietac zeby zamknac na koncu swoj plik tekstowy
# with open("file.txt", "w") as file:
# file.write("zapisz prosze \n")
# file.write("i jeszcze ra \n")
# file.write("i jeszcze jeden i jeszcze raz \n")
# z kazdym kolejnym dopisaniem przesuwa sie kursor
# print(file.tell()) ## pokazuje w ktorym miejscu pliku tekstowego znajduje sie kursor
# to samo co wyzej, ale file.seek ustawie kursor w wybrane miejsce
# with open("file.txt", "w") as file:
# file.write("zapisz prosze \n")
# file.seek(5) ## przez to nadpisze czesc tekstu, pozostawi reszte, ktora wystaje
# file.write("i jeszcze ra \n")
# file.write("i jeszcze jeden i jeszcze raz \n")
#
# print(file.read()) #chces przeczytac tekst, ale w trybie "w" jest tylko zapisywanie
# # to samo co wyzej, ale jeszcze odczytujemy:
# with open("file.txt", "w+") as file:
# file.write("zapisz prosze \n")
# file.seek(5) ## przez to nadpisze czesc tekstu, pozostawi reszte, ktora wystaje
# file.write("i jeszcze ra \n")
# file.write("i jeszcze jeden i jeszcze raz \n")
# file.seek(0)
# print(file.read()) # najpierw ustawiam seek na 0, zeby kursor byl na poczatku pliku. samo czytanie tekstu rowniez przesuwa kursor.
## Zadanie: Program jak sie uruchomi to ma krzyknac ile razy sie uruchomil
file = open("licznik.txt", "r+")
file.write("Plik otwarty {n} razy")
file.close()
print(file.read())
|
a = int(input())
for _ in range(a):
num = list(map(str, input().split()))
sum = 0
for i in range(len(num)):
if i == 0:
sum = float(num[i])
elif num[i] == "@":
sum = sum * 3
elif num[i] == "%":
sum = sum + 5
elif num[i] == "#":
sum = sum - 7
print("%.2f" % sum) |
a = []
with open('journey.txt') as f:
for line in f:
line = line.strip('\n')
a.append(line)
def sliceArray(array, n):
a = array[::n]
return a
def calculateTrees(right, down):
array = a
trees = 0
u = 0
if (down > 1):
array = sliceArray(array, down)
for item in array:
if len(item) <= u:
u = u - len(item)
if item[u] == '#':
trees += 1
u += right
return trees
result = 1
result *= calculateTrees(1,1)
result *= calculateTrees(3,1)
result *= calculateTrees(5,1)
result *= calculateTrees(7,1)
result *= calculateTrees(1,2)
print(result) |
import pygame
import random
import neat
import math
#Initializing the pygame
pygame.init()
#create screen
screen=pygame.display.set_mode((640,960))
score=0
px = 320
py = 944
status="T"
red=(255,0,0)
green=(0,255,0)
WHITE=(255,255,255)
status="T"
step_x=0
step_y=-16
pos=[(320,944)]
snek_len=1
#FOOD
def ran():
lx=[i[0] for i in pos]
ly=[j[1] for j in pos]
while True:
x=random.randrange(0,640,16)
y=random.randrange(0,960,16)
if x not in lx and y not in ly:
return x,y
else:
continue
F_x,F_y = ran()
def draw():
for i in range(0,snek_len):
pygame.draw.rect(screen, red, (pos[i][0],pos[i][1], 16, 16))
def ad(q,w):
pos.append((q,w))
def pos_update(x,y):
for i in range(snek_len-1,0,-1):
pos[i]=pos[i-1]
pos[0]=(x,y)
font_name = pygame.font.match_font('arial')
def draw_text(surf, text, size, x, y):
font = pygame.font.Font(font_name, size)
text_surface = font.render(text, True, WHITE)
text_rect = text_surface.get_rect()
text_rect.midtop = (x, y)
surf.blit(text_surface, text_rect)
config_path="./config.txt"
running=True
while running:
clock = pygame.time.Clock()
pygame.display.flip()
# Ensure program maintains a rate of frames per second
clock.tick(10)
screen.fill((0,0,0))
#screen.blit(back,(0,0))
for event in pygame.event.get():
if event.type == pygame.QUIT:
running=False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
if status is "L" or status is "R":
continue
elif status is "T" or status is "B":
step_y=0
step_x=-16
status="L"
if event.key == pygame.K_RIGHT:
if status is "L" or status is "R":
continue
elif status is "T" or status is "B":
step_y=0
step_x=16
status="R"
if event.key == pygame.K_UP:
if status is "T" or status is "B":
continue
elif status is "R" or status is "L":
step_y=-16
step_x=0
status="T"
if event.key == pygame.K_DOWN:
if status is "T" or status is "B":
continue
elif status is "L" or status is "R":
step_y=16
step_x=0
status="B"
last_x=px
last_y=py
px=px+step_x
py=py+step_y
if (px == F_x and py == F_y):
snek_len=snek_len+1
score=score+1
print(score)
ad(last_x,last_y)
F_x,F_y = ran()
pygame.draw.rect(screen,green,(F_x,F_y,16,16))
else:
pygame.draw.rect(screen,green,(F_x,F_y,16,16))
if ((px>=0 and px<=626 and py>=0 and py<=944) and ((px,py) not in pos )):
pos_update(px,py)
draw()
#pygame.draw.rect(screen, red, (px, py, 16, 16))
else:
print("END")
break
draw_text(screen, str(score), 18, 320,10)
pygame.display.update()
"""
def run():
config = neat.config.Config(neat.DefaultGenome, neat.DefaultReproduction,
neat.DefaultSpeciesSet, neat.DefaultStagnation,
config_file)
p = neat.Population(config)
# Add a stdout reporter to show progress in the terminal.
p.add_reporter(neat.StdOutReporter(True))
stats = neat.StatisticsReporter()
p.add_reporter(stats)
winner= p.run(f,50)""" |
import cv2
import numpy as np
from color_test import BaseDetector
class PutdownPosition(object):
def __init__(self, level=None, row=None, col=None, bag_d=140, left_edge=90, right_edge=510, top_edge=100,
bottom_edge=400):
self.level, self.row, self.col = level, row, col
self.bag_d = bag_d
self._cent_x, self._cent_y = None, None
self._left_edge, self._right_edge, self._top_edge, self._bottom_edge = left_edge, right_edge, top_edge, bottom_edge
@property
def cent(self):
self._cent_x = self._left_edge + self.col * self.bag_d + 0.5 * self.bag_d
self._cent_y = self._top_edge + self.row * self.bag_d + 0.5 * self.bag_d
return self._cent_x, self._cent_y
@property
def cent_x(self):
return self._cent_x
@property
def cent_y(self):
return self._cent_y
class PositionUtil(object):
def __init__(self, init_left_edge=90, init_right_edge=510, init_top_edge=100, init_bottom_edge=400, bag_dim=140):
self.history = []
self.left_edge, self.right_edge, self.top_edge, self.bottom_edge, self.bag_dim = init_left_edge, init_right_edge, init_top_edge, \
init_bottom_edge, bag_dim
self.positions = []
self.current_level = 0
def __getitem__(self, item):
return self.history[item]
def fetchimage_cal_edge(self):
# 实时从摄像头获取放置区域图像
# TODO left->right top->bottom
bagimg = cv2.imread("D:/bags.png")
gray = cv2.cvtColor(bagimg, cv2.COLOR_BGR2GRAY)
ret, white_binary = cv2.threshold(gray, 90, 255, cv2.THRESH_BINARY)
red_binary, red_contours = BaseDetector().red_contours(bagimg, middle_start=0, middle_end=1300)
if red_contours is None or len(red_contours) == 0:
return 100, 530, 110, 390
x_small, y_small, x_max, y_max = 10000, 10000, 0, 0
for c in red_contours:
x, y, w, h = cv2.boundingRect(c)
if x < x_small: x_small = x
if y < y_small: y_small = y
if x_max < x: x_max = x
if y_max < y: y_max = y
# print((x_small,y_small),(x_max, y_max))
# cv2.imshow("bags", red_binary)
#
# cv2.waitKey(0)
return x_small, x_max, y_small, y_max
def compute_position(self, level=0):
print("level:{}".format(level))
if level == 0:
# history 对应位置设置为1,标名该位置已经放置了袋子
self.history.append(np.zeros(
(divmod(self.bottom_edge - self.top_edge, self.bag_dim)[0],
divmod(self.right_edge - self.left_edge, self.bag_dim)[0]
), dtype=int))
else:
# 重新计算 左右边界 上下边界 并且重新加入history记录情况
left, right, top, bottom = self.fetchimage_cal_edge()
self.history.append(np.zeros(
(divmod(bottom - top, self.bag_dim)[0],
divmod(right - left, self.bag_dim)[0]
)))
def add_putdown_positon(self, level=0, row=0, col=0):
# TODO 真正操作行车将袋子放置在这里
newposition = PutdownPosition(level=level, row=row, col=col)
# TODO 一系列的放置动作
print("real put to ({},{})".format(*newposition.cent))
def add_bag(self):
# 添加袋子
col_item, find, positionarea, row_item = self.find_empty_position()
if find == True:
print("level {} th,find a position to put bag({},{})".format(self.current_level, row_item, col_item))
self.add_putdown_positon(self.current_level, row_item, col_item)
positionarea[row_item][col_item] = 1
print("has put to ({},{})".format(row_item, col_item))
else:
print("该{}层已满,放置在{}层上".format(self.current_level, self.current_level + 1))
self.current_level += 1
col_item, now_find, positionarea, row_item = self.find_empty_position()
if now_find == False: return
print("level {} th,find a position to put bag({},{})".format(self.current_level, row_item, col_item))
self.add_putdown_positon(self.current_level, row_item, col_item)
positionarea[row_item][col_item] = 1
print("has put to ({},{})".format(row_item, col_item))
def find_empty_position(self):
try:
positionarea = self.history[self.current_level]
except Exception as e:
self.compute_position(self.current_level)
positionarea = self.history[self.current_level]
row, col = positionarea.shape
print(positionarea)
find = False
for row_item in range(row):
for col_item in range(col):
if positionarea[row_item][col_item] == 0:
find = True
break
if find == True: break
return col_item, find, positionarea, row_item
if __name__ == "__main__":
# print(divmod(170, 140))
# print(divmod(280, 140))
positionvector = PositionUtil()
# positionvector.compute_position(level=0)
# positionvector.compute_position(level=1)
positionvector.add_bag()
positionvector.add_bag()
positionvector.add_bag()
positionvector.add_bag()
positionvector.add_bag()
positionvector.add_bag()
positionvector.add_bag()
positionvector.add_bag()
positionvector.add_bag()
positionvector.add_bag()
|
class Solution(object):
def arrayPairSum(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
total = 0
nums.sort()
for i in range(len(nums) // 2):
total += nums[2 * i]
return total
nums = [6, 2, 6, 5, 1, 2]
# Output: 4
sol = Solution()
print(sol.arrayPairSum(nums))
|
# -*- coding:utf-8 -*-
"""
@project : CCMS
@author:hyongchang
@file:test_accounts.py
@ide: PyCharm
@time: 2020-09-26 18:47
"""
import sys
import os
curPath = os.path.abspath(os.path.dirname(__file__))
rootPath = os.path.split(curPath)[0]
sys.path.append(rootPath)
import pytest,allure
from service.api.Business.Business import accounts
from utils.CommonUtils.AllureUtils import AllureUtils
@allure.feature("当前公司已经存在签约对象")
class TestAccounts():
def setup_class(self):
self.payload = {
"$top": 50,
"$select": "name,phone,address,company_id",
"$count": "true"
}
# @allure.story("创建新的签约对象")
@AllureUtils.dynamic_title("创建新的签约对象")
@pytest.mark.usefixtures("init_accounts_data")
def test_tc001002(self,init_org_data):
org_id = init_org_data
with allure.step("创建新的签约对象"):
ret_add_accounts = accounts.add(name="拼多多",category="1",company_ids=[org_id])
for accounts_one in accounts.list(self.payload):
if ret_add_accounts["_id"] == accounts_one["_id"]:
assert "拼多多" == accounts_one["name"]
break
# @allure.story("修改id存在签约对象")
@AllureUtils.dynamic_title("修改id存在签约对象")
def test_tc001051(self,init_accounts_data):
accounts_id = init_accounts_data
allure.step("修改id存在签约对象")
accounts.modify(accounts_id,name="京东物流")
for accounts_one in accounts.list(self.payload):
if accounts_id == accounts_one["_id"]:
assert "京东物流" == accounts_one["name"]
break
# @allure.story("删除id不存在的签约对象")
@AllureUtils.dynamic_title("删除id不存在的签约对象")
@pytest.mark.usefixtures("init_accounts_data")
def test_tc001092(self):
ret_list_account_before = accounts.list(self.payload)
accounts.delete("test_delete")
ret_list_account_after = accounts.list(self.payload)
assert ret_list_account_before == ret_list_account_after
|
import numpy as np
import matplotlib.pyplot as plt
from src.learning_perceptrons.basic_classifier import LinearClassifier
from src.sigmoid_perceptrons.sigmoid_perceptrons import SigmoidClassifier
def accuracies_plot(train: int = 5000):
"""
Generates a plot showing the accuracy of the classifier's output over
amount of training examples
:param train: Amount of training to be done
"""
inputs = [np.array([0.0, 0.0]), np.array([0.0, 1.0]), np.array([1.0, 0.0]), np.array([1.0, 1.0])]
outputs = {"and": [0, 0, 0, 1], "or": [0, 1, 1, 1], "nand": [1, 1, 1, 0],
"xor": [0, 1, 1, 0]}
learners = {
"and": [LinearClassifier(0, 0), SigmoidClassifier(0, 0)],
"or": [LinearClassifier(0, 0), SigmoidClassifier(0, 0)],
"nand": [LinearClassifier(0, 0), SigmoidClassifier(0, 0)],
"xor": [LinearClassifier(0, 0), SigmoidClassifier(0, 0)]}
accuracies = {"and": [], "or": [], "nand": [], "xor": []}
sigmoid_accuracies = {"and": [], "or": [], "nand": [], "xor": []}
for time in range(train + 1):
if time != 0:
for key in outputs.keys():
for i in range(4):
learners.get(key)[0].train(inputs[i][0], inputs[i][1], outputs.get(key)[i])
learners.get(key)[1].train(inputs[i][0], inputs[i][1], outputs.get(key)[i])
for key in outputs.keys():
accuracies.get(key).append(0)
sigmoid_accuracies.get(key).append(0)
for i in range(4):
result = learners.get(key)[0].classification(inputs[i][0], inputs[i][1])
sigmoid_result = learners.get(key)[1].classification(inputs[i][0], inputs[i][1])
accuracies.get(key)[time] += 1 if result == outputs.get(key)[i] else 0
sigmoid_accuracies.get(key)[time] += 1 if sigmoid_result == outputs.get(key)[i] else 0
accuracies.get(key)[time] *= 100 / 4
sigmoid_accuracies.get(key)[time] *= 100 / 4
fig, ax = plt.subplots()
for key in outputs.keys():
ax.plot(range(train + 1), accuracies.get(key), label="Normal "+key)
ax.plot(range(train + 1), sigmoid_accuracies.get(key), label="Sigmoid "+key)
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles, labels)
ax.set_xlim([0, train])
ax.set_ylim([0, 100])
plt.title("Accuracy of classifier v/s times trained for a total of " + str(train) + " training examples")
ax.grid(True)
plt.show()
def main():
accuracies_plot()
if __name__ == '__main__':
main()
|
def counting_sheep(n):
result = []
i = 1
if result is None:
result = []
if n == 0:
return "INSOMNIA"
while len(result) < 10:
num_to_check = n * i
for c in str(num_to_check):
if c not in result:
result.append(c)
i += 1
return num_to_check
test_cases = int(input())
for tc in range(test_cases):
result_txt = "Case #" + str(tc+1) + ": "
print(result_txt, counting_sheep(int(input())),sep="")
|
import folium
from folium import plugins
import pandas as pd
import requests
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from xml.etree import ElementTree
%matplotlib inline
df=pd.read_csv('pop_data.csv')
m = folium.Map([6.5244,3.3792], zoom_start=1)
one=df.loc[df['Year'] == 2010]
firstmap = folium.Map([6.5244,3.3792], zoom_start=1)
one.apply(lambda row:folium.CircleMarker(location=[row["Latitude"],
row["Longitude"]]).add_to(m),
axis=1)
for i in range(0,len(one)):
folium.Circle(
location=[one.iloc[i]['Latitude'], one.iloc[i]['Longitude']],
popup=one.iloc[i]['Cities'],
radius=one.iloc[i]['Population']*20000,
color='black',
fill=True,
fill_color='black'
).add_to(firstmap)
firstmap.save(outfile= "one.html")
two=df.loc[df['Year'] == 2025]
secondmap = folium.Map([6.5244,3.3792], zoom_start=1)
two.apply(lambda row:folium.CircleMarker(location=[row["Latitude"],
row["Longitude"]]).add_to(m),
axis=1)
for i in range(0,len(two)):
folium.Circle(
location=[two.iloc[i]['Latitude'], two.iloc[i]['Longitude']],
popup=two.iloc[i]['Cities'],
radius=two.iloc[i]['Population']*20000,
color='black',
fill=True,
fill_color='black'
).add_to(secondmap)
secondmap.save(outfile= "two.html")
three=df.loc[df['Year'] == 2050]
thirdmap = folium.Map([6.5244,3.3792], zoom_start=1)
three.apply(lambda row:folium.CircleMarker(location=[row["Latitude"],
row["Longitude"]]).add_to(m),
axis=1)
for i in range(0,len(three)):
folium.Circle(
location=[three.iloc[i]['Latitude'], three.iloc[i]['Longitude']],
popup=three.iloc[i]['Cities'],
radius=three.iloc[i]['Population']*20000,
color='black',
fill=True,
fill_color='black'
).add_to(thirdmap)
thirdmap.save(outfile= "three.html")
four=df.loc[df['Year'] == 2075]
fourthmap = folium.Map([6.5244,3.3792], zoom_start=1)
four.apply(lambda row:folium.CircleMarker(location=[row["Latitude"],
row["Longitude"]]).add_to(m),
axis=1)
for i in range(0,len(four)):
folium.Circle(
location=[four.iloc[i]['Latitude'], four.iloc[i]['Longitude']],
popup=four.iloc[i]['Cities'],
radius=four.iloc[i]['Population']*20000,
color='black',
fill=True,
fill_color='black'
).add_to(fourthmap)
fourthmap.save(outfile= "four.html")
five=df.loc[df['Year'] == 2100]
fifthmap = folium.Map([6.5244,3.3792], zoom_start=1)
five.apply(lambda row:folium.CircleMarker(location=[row["Latitude"],
row["Longitude"]]).add_to(m),
axis=1)
for i in range(0,len(five)):
folium.Circle(
location=[five.iloc[i]['Latitude'], five.iloc[i]['Longitude']],
popup=five.iloc[i]['Cities'],
radius=five.iloc[i]['Population']*20000,
color='black',
fill=True,
fill_color='black'
).add_to(fifthmap)
fifthmap.save(outfile= "five.html") |
from django.urls import path
from . import views
urlpatterns = [
path('auth/', views.auth, name='auth'),
path('token/', views.token, name='token'),
path('', views.handle, name='handle'),
]
|
from __future__ import division
import numpy as np
import os
from scipy import ndimage as ndi
from skimage import feature
from scipy.misc import imread
from scipy.misc import imsave
import shutil
azcomplexity = []
#calculate complexity of letters a-z and store them in a list
for i in range(65,91):
#read image file to 2d array
filename = str(i)+".png"
im = imread("az/"+filename, flatten=True)
# Calculate area. The area we are interested in is the sum of the white pixels
area = (im>0).sum()
# Compute the Canny filter. This leaves only a white border around the shape.
edges = feature.canny(im, sigma=3.0)
#Calculate perimiter. The permiter we are interested in is the sum of the white pixels
#now that we've adjusted the image to be the white pixel border only
perimiter = (edges>0).sum()
complexity = (perimiter * perimiter)/area
azcomplexity.append(int(round(complexity)))
#create folders a-z
for i in range (0,26):
directory = "images/" + str(chr(97+i))
os.makedirs(directory)
#calculate complexity of each image and move it to relevant folder
for i in range(0,10000):
#read image file to 2d array
filename = str(i)+".png"
im = imread("images/initial/"+filename, flatten=True)
# Calculate area. The area we are interested in is the sum of the white pixels
area = (im>0).sum()
# Compute the Canny filter. This leaves only a white border around the shape.
edges = feature.canny(im, sigma=3.0)
#Calculate perimiter. The permiter we are interested in is the sum of the white pixels
#now that we've adjusted the image to be the white pixel border only
perimiter = (edges>0).sum()
if area > 1:
complexity = (perimiter * perimiter)/area
roundedcomplexity = int(round(complexity))
print roundedcomplexity
for x in xrange(len(azcomplexity)):
if roundedcomplexity == azcomplexity[x]:
shutil.copy("images/initial/"+filename,"images/"+str(chr(97+x)))
|
from cattr._compat import is_bare, is_py37, is_py38
if is_py37 or is_py38:
from typing import Dict, List
def change_type_param(cl, new_params):
if is_bare(cl):
return cl[new_params]
return cl.copy_with(new_params)
List_origin = List
Dict_origin = Dict
else:
def change_type_param(cl, new_params):
cl.__args__ = (new_params,)
return cl
List_origin = list
Dict_origin = dict
|
from selenium.webdriver.common.keys import Keys
import time
class TicketDeletePage:
URL = 'http://127.0.0.1:5000/tickets'
def __init__(self, browser):
self.browser = browser
def load(self):
self.browser.get(self.URL)
def delete_ticket(self, tl):
# * operator expands self.SEARCH_INPUT into positional arguments for the method call
title = self.browser.find_element_by_link_text(tl)
title.send_keys(Keys.TAB + Keys.TAB + Keys.RETURN)
time.sleep(3)
|
import sqlite3
# get access to a db file
conn = sqlite3.connect('jobDB.db')
# create cursor object to gain access to methods like commit and execute
cur = conn.cursor()
conn.commit()
try:
cur.execute('''DROP TABLE JobsTable''')
conn.commit()
except:
pass
# create a new table
cur.execute('''CREATE TABLE JobsTable(
Title TEXT,
Company TEXT,
Location TEXT,
Date TEXT);''')
conn.commit() |
import socket
import pygame
class Dualshock:
def __init__(self,id):
self.loopFlag = True
pygame.joystick.init()
self.keyMap = {'joyX':3,'joyY':4,'start':9}
self.__id = id
#canvas variables
self.clock = pygame.time.Clock()
self.x = 10
self.y = 10
self.width = 500
self.height = 200
self.size = [self.width,self.height]
#udp
self.UDP_IP_ADDRESS = "192.168.0.182"
self.UDP_PORT = 6969
self.bufferSize = 1024
def init(self):
#controller intialization
self.controller = pygame.joystick.Joystick(self.__id)
self.controller.init()
#display initialization
self.screen =pygame.display.set_mode(self.size)
pygame.display.set_caption('Controller States')
self.screen.fill((255,255,255))
#udp init
self.clientSocket = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
def displayInfo(self,screen,string):
self.font = pygame.font.Font(None,20)
self.text = self.font.render(string,True,(0,0,0))
screen.blit(self.text,[self.x,self.y])
#indent
self.y+=12
def getBtn(self,id):
self.state = self.controller.get_button(id)
return self.state
def getAxis(self,id):
self.value = self.controller.get_axis(id)
return self.value
def getControlStates(self):
pygame.init()
self.init()
self.clock = pygame.time.Clock()
while self.loopFlag:
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.loopFlag = False
self.screen.fill((255,255,255))
self.xval = self.getAxis(self.keyMap['joyX'])
self.yval = self.getAxis(self.keyMap['joyY'])
self.startBtn = self.getBtn(self.keyMap['start'])
values = (self.xval,self.yval,self.startBtn)
self.message = '''x:{},y:{},s:{}'''.format(self.xval,self.yval,self.startBtn)
self.clientSocket.sendto(self.message.encode('utf-8'),(self.UDP_IP_ADDRESS,self.UDP_PORT))
#ack
self.data,self.addr = self.clientSocket.recvfrom(self.bufferSize)
print("received: ",self.data," from: ",self.addr)
#display the controller states
#self.displayInfo(self.screen,'Left joystick x value: {}'.format(self.xval))
#self.displayInfo(self.screen,'Left joystick y value: {}'.format(self.yval))
#self.displayInfo(self.screen,'Left joystick x value: {}'.format(self.startBtn))
#pygame.display.flip()
self.clock.tick(500)
self.clientSocket.close()
pygame.quit()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from flask import Flask, jsonify, request
tasks = [
{
"id": 1,
"title": "learn python",
"description": "blablabla",
"done": False
},
{
"id": 2,
"title": "learn flask",
"description": "foofoofoo",
"done": False
}
]
app = Flask(__name__)
@app.route('/')
def index():
return 'Index Page'
@app.route('/hello')
def hello():
return 'Hello, World'
@app.route('/todo/api/tasks', methods=['GET'])
def todo():
return jsonify({"tasks": tasks})
@app.route('/todo/api/task/<int:post_id>')
def show_post(post_id):
return jsonify({"tasks": tasks[post_id - 1]})
@app.route('/todo/api/tasks', methods=['POST'])
def add_task():
print("656666666666")
print(request.method)
content = request.get_json()
print(content)
print("============")
tasks[len(tasks)] = {
"id": len(tasks) - 1,
"title": content["title"],
"description": content["description"],
"done": False
}
print(tasks)
return jsonify({"tasks": tasks[len(tasks) - 1]})
# jsonify
|
#!/usr/bin/python3
"""Module Student to Disk and Reload"""
class Student:
"""Defines class student"""
def __init__(self, first_name, last_name, age):
self.first_name = first_name
self.last_name = last_name
self.age = age
def to_json(self, attrs=None):
"""Retrieves a dictionary representation of a student instance"""
if attrs is None:
return self.__dict__
d = {}
for k, v in self.__dict__.items():
for a in attrs:
if k == a:
d[k] = v
return d
def reload_from_json(self, json):
"""Replaces all attributes of the Student instance"""
for k, v in json.items():
for key, value in self.__dict__.items():
if k == key:
self.__dict__[k] = v
|
#!/usr/bin/python
from pychartdir import *
# Sample data for the Box-Whisker chart. Represents the minimum, 1st quartile, medium, 3rd quartile
# and maximum values of some quantities
Q0Data = [40, 45, 40, 30, 20, 50, 25, 44]
Q1Data = [55, 60, 50, 40, 38, 60, 51, 60]
Q2Data = [62, 70, 60, 50, 48, 70, 62, 70]
Q3Data = [70, 80, 65, 60, 53, 78, 69, 76]
Q4Data = [80, 90, 75, 70, 60, 85, 80, 84]
# The labels for the chart
labels = ["Group A", "Group B", "Group C", "Group D", "Group E", "Group F", "Group G", "Group H"]
# Create a XYChart object of size 550 x 250 pixels
c = XYChart(550, 250)
# Set the plotarea at (50, 25) and of size 450 x 200 pixels. Enable both horizontal and vertical
# grids by setting their colors to grey (0xc0c0c0)
c.setPlotArea(50, 25, 450, 200).setGridColor(0xc0c0c0, 0xc0c0c0)
# Add a title to the chart
c.addTitle("Computer Vision Test Scores")
# Set the labels on the x axis and the font to Arial Bold
c.xAxis().setLabels(labels).setFontStyle("arialbd.ttf")
# Set the font for the y axis labels to Arial Bold
c.yAxis().setLabelStyle("arialbd.ttf")
# Add a Box Whisker layer using light blue 0x9999ff as the fill color and blue (0xcc) as the line
# color. Set the line width to 2 pixels
c.addBoxWhiskerLayer(Q3Data, Q1Data, Q4Data, Q0Data, Q2Data, 0x9999ff, 0x0000cc).setLineWidth(2)
# Output the chart
print("Content-type: image/png\n")
binaryPrint(c.makeChart2(PNG))
|
import winsound
from random import randint
from time import sleep
import wx
ganarcambio=0
ganarsincambio=0
perdercambio=0
perdersincambio=0
abierta=0
actual=0
otra=0
premio=0
turno = False
class MiFrame(wx.Frame):
def __init__(self,*args,**kwargs):
global turno
wx.Frame.__init__(self,*args,**kwargs)
self.Show()
blanco = wx.Colour(255, 255, 255)
self.SetBackgroundColour(blanco)
panel = wx.Panel(self, -1, pos=(0, 0), size=(800, 600))
panel.SetBackgroundColour(blanco)
cabra1 = wx.StaticBitmap(self, -1, wx.Bitmap('cabra.png', wx.BITMAP_TYPE_ANY),
pos=wx.Point(88, 35), size=(125, 200))
cabra2 = wx.StaticBitmap(self, -1, wx.Bitmap('cabra.png', wx.BITMAP_TYPE_ANY),
pos=wx.Point(340, 35), size=(125, 200))
cabra3 = wx.StaticBitmap(self, -1, wx.Bitmap('cabra.png', wx.BITMAP_TYPE_ANY),
pos=wx.Point(585, 35), size=(125, 200))
carro1 = wx.StaticBitmap(self, -1, wx.Bitmap('carro.png', wx.BITMAP_TYPE_ANY),
pos=wx.Point(0, 35), size=(200, 200))
carro2 = wx.StaticBitmap(self, -1, wx.Bitmap('carro.png', wx.BITMAP_TYPE_ANY),
pos=wx.Point(252, 35), size=(200, 200))
carro3 = wx.StaticBitmap(self, -1, wx.Bitmap('carro.png', wx.BITMAP_TYPE_ANY),
pos=wx.Point(497, 35), size=(200, 200))
# Puertas
puerta1 = wx.BitmapButton(panel, -1, wx.Bitmap('puerta.png', wx.BITMAP_TYPE_ANY),
wx.Point(88, 35), wx.Size(120, 200), 0)
puerta2 = wx.BitmapButton(panel, -1, wx.Bitmap('puerta.png', wx.BITMAP_TYPE_ANY),
wx.Point(340, 35), wx.Size(120, 200), 0)
puerta3 = wx.BitmapButton(panel, -1, wx.Bitmap('puerta.png', wx.BITMAP_TYPE_ANY),
wx.Point(585, 35), wx.Size(120, 200), 0)
change = wx.BitmapButton(panel, -1, wx.Bitmap('checked.png', wx.BITMAP_TYPE_ANY),
wx.Point(290, 340), wx.Size(64, 64), 0)
do_not_change = wx.BitmapButton(panel, -1, wx.Bitmap('cross.png', wx.BITMAP_TYPE_ANY),
wx.Point(440, 340), wx.Size(64, 64), 0)
reset = wx.BitmapButton(panel, -1, wx.Bitmap('refreshing1.png', wx.BITMAP_TYPE_ANY),
wx.Point(368, 340), wx.Size(64, 64), 0)
reset.SetBackgroundColour((0,0,0))
texto = wx.StaticText(self, id=-1, label="Elige una puerta para iniciar el juego. ", pos=(200, 265),
size=(400, 50), style=wx.ALIGN_CENTRE)
font = wx.Font(15, wx.DECORATIVE, wx.NORMAL, wx.BOLD)
texto.SetFont(font)
Ganadas = wx.StaticText(self, id=-1, label=("Partidas ganadas cambiando puerta: %d" % ganarcambio), pos=(30, 475),
size=(250, 50), style=wx.ALIGN_LEFT)
Perdidas = wx.StaticText(self, id=-1, label="Partidas perdidas cambiando puerta: %d" % perdercambio, pos=(550, 475),
size=(250, 50), style=wx.ALIGN_LEFT)
Ganadas2 = wx.StaticText(self, id=-1, label="Partidas ganadas sin cambio de puerta: %d" % ganarsincambio, pos=(30, 525),
size=(250, 50), style=wx.ALIGN_LEFT)
Perdidas2 = wx.StaticText(self, id=-1, label="Partidas perdidas sin cambio de puerta: %d" % perdersincambio,
pos=(550, 525), size=(250, 50), style=wx.ALIGN_LEFT)
change.Hide()
do_not_change.Hide()
reset.Hide()
cabra1.Hide()
cabra2.Hide()
cabra3.Hide()
carro1.Hide()
carro2.Hide()
carro3.Hide()
def reset_listener(self):
global abierta;
global actual
global otra
global premio
global turno
reset.Hide()
cabra1.Hide()
cabra2.Hide()
cabra3.Hide()
carro1.Hide()
carro2.Hide()
carro3.Hide()
puerta1.Show()
puerta1.Enable(True)
puerta2.Show()
puerta2.Enable(True)
puerta3.Show()
puerta3.Enable(True)
turno = False
texto.SetLabel("Elige una puerta para empezar a jugar:")
abierta=0
actual=0
otra=0
premio=0
def change_door(self):
global perdercambio
global ganarcambio
global imagen
global actual
global premio
global abierta
change.Hide()
do_not_change.Hide()
duplicado = actual
if duplicado==1:
puerta1.Enable(True)
if duplicado==2:
puerta2.Enable(True)
if duplicado==3:
puerta3.Enable(True)
actual = randint(1, 3)
while actual == abierta or actual == duplicado:
actual=randint(1, 3)
if actual==1:
puerta1.Enable(False)
if actual==2:
puerta2.Enable(False)
if actual==3:
puerta3.Enable(False)
texto.SetLabel("Cambiaste la puerta %d por la puerta %d" % (duplicado, actual))
sleep(2.2)
winsound.PlaySound("door.wav", winsound.SND_ASYNC | winsound.SND_ALIAS)
print("El actual es %d" % actual)
print("El premio es %d" % premio)
print ("El abierta es %d" % abierta)
if actual==1:
puerta1.Hide();
if actual==premio:
texto.SetLabel("¡Buena opción! Encontraste el premio.")
carro1.Show()
ganarcambio= ganarcambio + 1
Ganadas.SetLabel("Partidas ganadas cambiando puerta: %d" % ganarcambio)
else:
texto.SetLabel("Perdiste... Elegista la puerta de la cabra")
cabra1.Show()
perdercambio= perdercambio + 1
Perdidas.SetLabel("Partidas perdidas cambiando puerta: %d" % perdercambio)
if actual==2:
puerta2.Hide();
if actual==premio:
texto.SetLabel("¡Buena opción! Encontraste el premio.")
carro2.Show()
ganarcambio= ganarcambio + 1
Ganadas.SetLabel("Partidas ganadas cambiando puerta: %d" % ganarcambio)
else:
texto.SetLabel("Perdiste... Elegista la puerta de la cabra")
cabra2.Show()
perdercambio= perdercambio + 1
Perdidas.SetLabel("Partidas perdidas cambiando puerta: %d" % perdercambio)
if actual==3:
puerta3.Hide();
if actual==premio:
texto.SetLabel("¡Buena opción! Encontraste el premio.")
carro3.Show()
ganarcambio= ganarcambio + 1
Ganadas.SetLabel("Partidas ganadas cambiando puerta: %d" % ganarcambio)
else:
texto.SetLabel("Perdiste... Elegista la puerta de la cabra")
cabra3.Show()
perdercambio= perdercambio + 1
Perdidas.SetLabel("Partidas perdidas cambiando puerta: %d" % perdercambio)
reset.Show()
def do_not_change_door(self):
global perdersincambio
global ganarsincambio
change.Hide()
do_not_change.Hide()
texto.SetLabel("Elegiste quedarte con la puerta %d." % actual)
print("Nueva partida")
print("El actual es %d" % actual)
print("El premio es %d" % premio)
print ("El abierta es %d" % abierta)
sleep(2.2)
if actual==1:
puerta1.Hide();
if actual==premio:
texto.SetLabel("¡Buena opción! Encontraste el premio.")
carro1.Show()
ganarsincambio= ganarsincambio + 1
Ganadas2.SetLabel("Partidas ganadas sin cambio de puerta: %d" % ganarsincambio)
else:
texto.SetLabel("Perdiste... Elegista la puerta de la cabra")
cabra1.Show()
perdersincambio= perdersincambio + 1
Perdidas2.SetLabel("Partidas perdidas sin cambio de puerta: %d" % perdersincambio)
if actual==2:
puerta2.Hide();
if actual==premio:
texto.SetLabel("¡Buena opción! Encontraste el premio.")
carro2.Show()
ganarsincambio= ganarsincambio + 1
Ganadas2.SetLabel("Partidas ganadas sin cambio de puerta: %d" % ganarsincambio)
else:
texto.SetLabel("Perdiste... Elegista la puerta de la cabra")
cabra2.Show()
perdersincambio= perdersincambio + 1
Perdidas2.SetLabel("Partidas perdidas sin cambio de puerta: %d" % perdersincambio)
if actual==3:
puerta3.Hide();
if actual==premio:
texto.SetLabel("¡Buena opción! Encontraste el premio.")
carro3.Show()
ganarsincambio= ganarsincambio + 1
Ganadas2.SetLabel("Partidas ganadas sin cambio de puerta: %d" % ganarsincambio)
else:
texto.SetLabel("Perdiste... Elegista la puerta de la cabra")
cabra3.Show()
perdersincambio= perdersincambio + 1
Perdidas2.SetLabel("Partidas perdidas sin cambio de puerta: %d" % perdersincambio)
reset.Show()
def timeout(abierta):
texto.SetLabel("Observa que en la puerta %d esta la cabra." % abierta)
sleep(2.2)
timeout2(abierta)
def timeout2(abierta):
global actual
global premio
if actual == 1:
texto.SetLabel("¿Deseas cambiar de puerta?")
if actual == 2:
texto.SetLabel("¿Deseas cambiar de puerta?")
if actual == 3:
texto.SetLabel("¿Deseas cambiar de puerta?")
winsound.PlaySound("door.wav", winsound.SND_ASYNC | winsound.SND_ALIAS)
if abierta==1:
puerta1.Hide();
cabra1.Show()
if abierta==2:
puerta2.Hide();
cabra2.Show()
if abierta==3:
puerta3.Hide();
cabra3.Show()
change.Show()
do_not_change.Show()
print("El actual es %d" % actual)
print("El premio es %d" % premio)
print("La abierta es %d" % abierta)
def puertaA(usuario):
global premio
premio=randint(1, 3)
global abierta
abierta=randint(1, 3)
while abierta==premio or abierta==usuario:
abierta=randint(1, 3)
return abierta
def onpuerta1(self):
global turno
global actual
global abierta
turno = True
actual=1
abierta=puertaA(actual)
texto.SetLabel("Elegista la puerta 1")
puerta1.Disable()
sleep(2.2)
timeout(abierta)
print(u"Has presionado el botón 1")
def onpuerta2(self):
global actual
global abierta
global turno
turno = True
actual=2
abierta=puertaA(actual)
texto.SetLabel("Elegiste la puerta 2")
puerta2.Disable()
sleep(2.2)
timeout(abierta)
print(u"Has presionado el botón 2")
def onpuerta3(self):
global actual
global abierta
global turno
turno = True
actual=3
abierta=puertaA(actual)
texto.SetLabel("Elegista la puerta 3")
puerta3.Disable()
sleep(2.2)
timeout(abierta)
print (u"Has presionado el botón 3")
if turno is False:
puerta1.Bind(wx.EVT_BUTTON, onpuerta1)
puerta2.Bind(wx.EVT_BUTTON, onpuerta2)
puerta3.Bind(wx.EVT_BUTTON, onpuerta3)
change.Bind(wx.EVT_BUTTON, change_door)
do_not_change.Bind(wx.EVT_BUTTON, do_not_change_door)
reset.Bind(wx.EVT_BUTTON, reset_listener)
if __name__ == '__main__':
app = wx.App()
fr = MiFrame(None, -1, " Juego de las puertas", size=(800,600),style=wx.DEFAULT_FRAME_STYLE ^ wx.RESIZE_BORDER)
app.MainLoop()
|
# Copyright 2019 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from condensa import dtypes
def dequantize(module, dtype):
"""
De-quantizes module to given data type (inplace).
:param module: PyTorch module.
:type module: `torch.nn.Module`
:param dtype: Target data type.
"""
if dtype.as_dtype_enum == dtypes.DT_FLOAT32:
module.float()
elif dtype.as_dtype_enum == dtypes.DT_FLOAT64:
module.double()
else:
raise TypeError('Unknown data type specified for de-quantization')
|
#-*- coding:utf8-*-
#制作图表,添加节点和边
import networkx as nx
import matplotlib.pyplot as plt
#/Users/kun/Desktop/
G = nx.DiGraph()
G.add_node(1)
G.add_node(2)
G.add_nodes_from([3,4,5,6])
G.add_cycle([1,2,3,4])
G.add_edge(1,3)
G.add_edges_from([(3,5),(3,6),(6,7)])
nx.draw(G)
#write_pajek(G, path, encoding='UTF-8')
#nx.write_pajek(G,'' , encoding='UTF-8')
#nx.write_pajek(G, "D://111.net", encoding='UTF-8')
#plt.savefig("youxiangtu.png")
plt.show()
|
def combination(arr, r):
def generate(chosen):
global answer
if len(chosen) == r:
answer += [chosen[:]]
return
start = arr.index(chosen[-1]) + 1 if chosen else 0
for nxt in range(start, len(arr)):
chosen += [arr[nxt]]
generate(chosen)
chosen.pop()
generate([])
return answer
T = int(input())
for tc in range(1, T+1):
answer = []
N = int(input())
synergy = [list(map(int, input().split())) for _ in range(N)]
A_cases = combination(list(range(N)), N//2)
B_cases = [[i for i in range(N) if i not in A] for A in A_cases]
minN = 40000
for k in range(len(A_cases)):
A_ingredient, B_ingredient = 0,0
A_case, B_case = A_cases[k], B_cases[k]
for i in range(N//2):
for j in range(i+1, N//2):
A_ingredient += synergy[A_case[i]][A_case[j]]+synergy[A_case[j]][A_case[i]]
B_ingredient += synergy[B_case[i]][B_case[j]]+synergy[B_case[j]][B_case[i]]
minN = min(minN, abs(A_ingredient-B_ingredient))
print(minN) |
def intersect(A, B):
n = len(A)
m = len(B)
iA = 0
iB = 0
common = []
while iA < n and iB < m:
if A[iA] < B[iB]:
iA += 1
elif B[iB] < A[iA]:
iB += 1
elif A[iA] == B[iB]:
common.append(A[iA])
iA += 1
iB += 1
return common
print intersect([1,2, 3, 3, 4, 5, 6], [3, 3, 5]) |
'''
Given a string s, return the longest palindromic substring in s.
'''
class Solution:
def longestPalindrome(self, s):
possible = set()
longest = s[0]
for char1 in s:
char = char1
possible.add(char)
s = s[1:]
for char2 in s:
char = char + char2
possible.add(char)
for string in possible:
if len(string) > len(longest):
for x in range(len(string) // 2):
if string[x] != string[-x - 1]:
break
if len(longest) < len(string) and string[x] == string[-x - 1]:
longest = string
return longest
print(Solution.longestPalindrome("test", "babad")) |
import csv
import os
from worker.abstract_item_generator import BaseItemGenerator
__author__ = 'pradeepv'
class SnomedConceptGenerator(BaseItemGenerator):
def __init__(self):
self.input_file = super().file_to_read('conceptfile')
self.infile = None
@property
def generate(self):
"""Process the file."""
print('reading files')
self.infile = open(self.input_file, 'rt', encoding='utf-8')
reader = csv.DictReader(self.infile, quotechar='|', quoting=csv.QUOTE_NONNUMERIC)
return reader
def close(self):
self.infile.close()
|
import torch
from tqdm import tqdm
def land_auto(loc, A, z_points, grid, dv, model, constant=None, batch_size=1024, metric_grid_sum=None,
grid_sampled=None,
init_curve=None, grid_init_curves=None, q_prob=None):
"""
Checks if scale is tensor or scale and calls corresponding LAND function
:param loc: mu
:param A: std in either full cov or scalar
:param z_points: latent points to optimize for
:param grid: grid for constant estimation
:param dv: grid square size
:param constant: constant input to save recalculation, by default None
:param model: model to get metric from
:param batch_size: batch_size
:return: lpz, init_curve, D2, constant
"""
if metric_grid_sum is not None:
lpz, init_curve, D2, constant = LAND_fullcov_sampled(loc=loc,
A=A,
z_points=z_points,
sampled_grid_points=grid_sampled,
metric_sum=metric_grid_sum,
dv=dv,
model=model,
logspace=True,
init_curve=init_curve,
grid_init_curves=grid_init_curves,
batch_size=batch_size)
elif A.dim() == 1:
lpz, init_curve, D2, constant = LAND_scalar_variance(loc=loc,
scale=A,
z_points=z_points,
grid_points=grid,
dv=dv,
constant=constant,
model=model,
logspace=True,
init_curve=init_curve,
batch_size=batch_size)
else:
lpz, init_curve, D2, constant = LAND_fullcov(loc=loc,
A=A,
z_points=z_points,
dv=dv,
grid_points=grid,
constant=constant,
model=model,
logspace=True,
init_curve=init_curve,
batch_size=batch_size)
return lpz, init_curve, D2, constant
def LAND_fullcov(loc, A, z_points, dv, grid_points, constant=None, model=None, logspace=True,
init_curve=None, batch_size=1024):
"""
full covariance matrix
"""
if constant == None:
constant = estimate_constant_full(mu=loc, A=A, grid=grid_points, dv=dv, model=model, batch_size=batch_size)
loc = loc.repeat([z_points.shape[0], 1])
if init_curve is not None:
D2, init_curve, _ = model.dist2_explicit(loc, z_points, C=init_curve, A=A)
else:
D2, init_curve, _ = model.dist2_explicit(loc, z_points, A=A)
inside = (-1 * D2 / 2).squeeze(-1)
pz = (1 / constant) * inside.exp()
if logspace:
lpz = -1 * pz.log()
return lpz, init_curve, D2, constant
else:
return pz, init_curve, D2, constant
def estimate_constant_full(mu, A, grid, dv, model, batch_size=512, sum=True):
""" Estimate constant using a full covariance matrix. """
iters = (grid.shape[0] // batch_size) + 1
device = torch.device('cuda:0') if torch.cuda.is_available() else torch.device('cpu')
with torch.no_grad():
for i in tqdm(range(iters)):
# data
grid_points_batch = grid[i * batch_size:(i + 1) * batch_size, :] if i < (iters - 1) else grid[
i * batch_size:, :]
mu_repeated = mu.repeat([grid_points_batch.shape[0], 1])
# calcs
D2, _, _ = model.dist2_explicit(mu_repeated, grid_points_batch.to(device), A=A)
exponential_term = (-D2 / 2).squeeze(-1).exp()
metric_determinant = model.metric(grid_points_batch.to(device)).det()
if metric_determinant < 0:
model = model.double()
metric_determinant = model.metric(grid_points_batch.double().to(device)).double().det()
model = model.float()
metric_term = metric_determinant.sqrt()
constant = metric_term * exponential_term * dv
if i == 0:
approx_constant = constant
if not sum:
metric_vector = metric_term.cpu()
else:
approx_constant = torch.cat((approx_constant, constant), dim=0)
if not sum:
metric_vector = torch.cat((metric_vector, metric_term.cpu()), dim=0)
if sum:
return approx_constant.sum()
else:
return approx_constant, metric_vector
def LAND_fullcov_sampled(loc, A, z_points, dv, sampled_grid_points, metric_sum, model=None, logspace=True,
init_curve=None, batch_size=256, grid_init_curves=None):
"""
full covariance matrix, expecting sampled grid data points.
"""
iters = sampled_grid_points.shape[0] // batch_size if (sampled_grid_points.shape[0] % batch_size) == 0 else \
sampled_grid_points.shape[0] // batch_size + 1
device = torch.device('cuda:0') if torch.cuda.is_available() else torch.device('cpu')
for i in range(iters):
# data
grid_points_batch = sampled_grid_points[i * batch_size:(i + 1) * batch_size, :] if i < (
iters - 1) else sampled_grid_points[i * batch_size:, :]
mu_repeated = loc.repeat([grid_points_batch.shape[0], 1])
# calcs
D2, _, _ = model.dist2_explicit(mu_repeated, grid_points_batch.to(device), A=A, C=grid_init_curves)
exponential_term = (-D2 / 2).squeeze(-1).exp()
if i == 0:
approx_constant = exponential_term
else:
approx_constant = torch.cat((approx_constant, exponential_term), dim=0)
constant = approx_constant.mean() * metric_sum * dv
mu_repeated = loc.repeat([z_points.shape[0], 1])
D2, init_curve, success = model.dist2_explicit(mu_repeated, z_points, A=A, C=init_curve)
inside = (-(D2) / 2).squeeze(-1)
pz = (1 / constant) * inside.exp()
if logspace:
lpz = -1 * (pz).log()
return lpz, init_curve, D2, constant
else:
return pz, init_curve, D2, constant
def LAND_grid_prob(grid, model, batch_size=1024, device="cuda"):
tmp = (grid.shape[0] // batch_size)
iters = tmp + 1 if grid.shape[0] / batch_size > tmp else tmp
model.eval()
with torch.no_grad():
for i in range(iters):
grid_point = grid[i * batch_size:(i + 1) * batch_size, :] if i < (iters - 1) else grid[i * batch_size:, :]
metric_determinant = model.metric(grid_point.to(device)).det()
if metric_determinant < 0:
model = model.double()
metric_determinant = model.metric(grid_point.double().to(device)).double().det()
model = model.float()
if i == 0: # for metric
grid_save = metric_determinant
else:
grid_save = torch.cat((grid_save, metric_determinant), dim=0)
print("negative grid metric:", grid_save[grid_save < 0].shape[0])
if grid_save[grid_save < 0].shape[0] > 0:
grid_save[grid_save < 0] = 0
grid_metric = grid_save.sqrt()
grid_prob = grid_metric / grid_metric.sum()
return grid_prob.cpu(), grid_metric.cpu(), grid_metric.sum().cpu(), grid_save
def LAND_scalar_variance(loc, scale, z_points, grid_points, dv, constant=None, model=None, logspace=True,
init_curve=None,
batch_size=1024):
"""
Uses a scalar covariance instead of a covariance matrix
OBSOLETE
"""
if constant is None:
constant = estimate_constant_simple(mu=loc,
std=scale,
grid=grid_points,
dv=dv,
model=model,
batch_size=batch_size)
loc = loc.repeat([z_points.shape[0], 1])
if init_curve is not None:
D2, init_curve, success = model.dist2_explicit(loc, z_points, C=init_curve)
else:
D2, init_curve, success = model.dist2_explicit(loc, z_points)
inside = -D2 / (2 * scale ** 2)
pz = (1 / constant) * inside.exp()
if logspace:
lpz = -1 * pz.log()
return lpz, init_curve, D2, constant
else:
return pz, init_curve, D2, constant
def estimate_constant_simple(mu, std, grid, dv, model, batch_size=512):
""" OBSOLETE """
iters = (grid.shape[0] // batch_size) + 1
device = torch.device('cuda:0') if torch.cuda.is_available() else torch.device('cpu')
for i in range(iters):
# data
grid_points_batch = grid[i * batch_size:(i + 1) * batch_size, :] if i < (iters - 1) else grid[
i * batch_size:, :]
mu_repeated = mu.repeat([grid_points_batch.shape[0], 1])
# calcs
D2, _, _ = model.dist2_explicit(mu_repeated, grid_points_batch.to(device), A=None)
exponential_term = (-D2 / (2 * std ** 2)).squeeze(-1).exp()
metric_term = model.metric(grid_points_batch.to(device)).det().sqrt()
constant = metric_term * exponential_term * dv
if i == 0:
approx_constant = constant
else:
approx_constant = torch.cat((approx_constant, constant), dim=0)
return approx_constant.sum()
|
while True:
n_50 = n_20 = n_10 = n_1 = 0
while True:
try:
value = int(input('Quanto deseja sacar?: R$'))
break
except ValueError:
print('Somente números inteiros, Por Favor!!')
print('Voce Recebeu:')
while value >= 50:
n_50 += 1
value -= 50
if n_50 > 0:
print(f'{n_50} notas de 50 Reais')
while value >= 20:
n_20 += 1
value -= 20
if n_20 > 0:
print(f'{n_20} notas de 20 Reais')
while value >= 10:
n_10 += 1
value -= 10
if n_10 > 0:
print(f'{n_10} notas de 10 Reais')
while value >= 1:
n_1 += 1
value -= 1
if n_1 > 0:
print(f'{n_1} notas de 1 Real')
rst = str(input('Deseja continuar?[S/N}: ')).upper()[0]
if rst == 'N':
break
print('Fim de Execução!')
|
class Student:
__totalCGPA = 0.0
__totalCredits = 0
def __init__(self, id, name, address):
self.__studentId = id
self.__studentName = name
self.__studentAddress = address
def getStudentId(self):
return self.__studentId
def getStudentName(self):
return self.__studentName
def getStudentAddress(self):
return self.__studentAddress
def getCgpa(self):
cgpa = self.__totalCGPA / self.__totalCredits
if(self.__totalCredits == 0.0 and self.__totalCGPA == 0):
return 0
# try:
# cgpa = self.__totalCGPA / self.__totalCredits
# except ZeroDivisionError:
# return 0
# if(self.__totalCredits == 0.0):
# return 0.0
return cgpa
def setStudentAddress(self, address):
self.__studentAddress = address
def getTotalCredits(self):
return self.__totalCredits
def addGrade(self, grade, credits):
numericGrade = 0.0
if (grade == "A+"):
numericGrade = 4.0
elif (grade == "A"):
numericGrade = 3.75
elif (grade == "A-"):
numericGrade = 3.50
elif (grade == "B+"):
numericGrade = 3.25
elif (grade == "B"):
numericGrade = 3.00
elif (grade == "B-"):
numericGrade = 2.75
elif (grade == "C+"):
numericGrade = 2.50
elif (grade == "C"):
numericGrade = 2.25
elif (grade == "D"):
numericGrade = 2.00
elif (grade == "F"):
numericGrade = 0.00
else:
return None
self.__totalCredits += credits
self.__totalCGPA += (numericGrade * credits)
#In java this one called toString
def __str__(self):
return str(self.__studentId) + " " +self.__studentName + " " + self.__studentAddress + " " +str(self.getCgpa()) + " " + str(self.getTotalCredits())
studentList = []
studentList.append(Student(100, "Abul", "Uttara"))
studentList.append(Student(101, "Babul", "Kalabagan"))
studentList.append(Student(102, "Kabul", "Malibagh"))
studentList.append(Student(103, "Abul", "Mirpur"))
studentList.append(Student(104, "Putul", "Nakhalpara"))
studentList[0].addGrade("A+", 1)
studentList[4].addGrade("A", 3)
studentList[3].addGrade("B+", 3)
studentList[3].addGrade("C+", 3)
studentList[2].addGrade("C+", 3)
studentList[1].addGrade("B+", 3)
studentList[1].addGrade("A+", 3)
for i in studentList:
print(i)
|
import time
import argparse
import os
import random
import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.utils as vutils
import torch.autograd as autograd
import data_loader
from torch.autograd import Variable
from model import _G_xvz, _G_vzx, _D_xvs, _G_vzx_withID
from itertools import *
import pdb
import ID_models.IdPreserving as ID_pre
import ID_models.MobileFaceNet as MBF
import PIL
import json
dd = pdb.set_trace
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--data_list", type=str, default="./list.txt")
parser.add_argument("-ns", "--nsnapshot", type=int, default=700)
parser.add_argument("-b", "--batch_size", type=int, default=64) # 16
parser.add_argument("-lr", "--learning_rate", type=float, default=1e-4)
parser.add_argument("-m", "--momentum", type=float, default=0.) # 0.5
parser.add_argument("-m2", "--momentum2", type=float, default=0.9) # 0.999
parser.add_argument('--outf', default='./output',
help='folder to output images and model checkpoints')
parser.add_argument('--modelf', default='./output',
help='folder to output images and model checkpoints')
parser.add_argument('--cuda', action='store_true',
help='enables cuda', default=True)
parser.add_argument('-j', '--workers', default=8, type=int, metavar='N',
help='number of data loading workers (default: 8)')
parser.add_argument('--epochs', default=25, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument("-c", "--continue_training",
action='store_true', help='continue training', default=False)
parser.add_argument("-G_xvz", "--G_xvz_name", type=str,
default="netG_xvz_epoch_24_699.pth")
parser.add_argument("-G_vzx", "--G_vzx_name", type=str,
default="netG_vzx_epoch_24_699.pth")
parser.add_argument("-D_xvs", "--D_xvs_name", type=str,
default="netD_xvs_epoch_24_699.pth")
parser.add_argument('--id_loss', action='store_true',
help='add id preserving loss', default=False)
parser.add_argument("-id_w", "--id_weight", type=int, default=20)
parser.add_argument('--sym_loss', action='store_true',
help='add symmetry loss', default=False)
parser.add_argument("-sym_w", "--symmetry_loss_weight", type=int, default=0.1)
parser.add_argument("-s", "--save", action='store_true',
help='save good&bad imgs', default=False)
# Initialize networks
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
elif classname.find('LayerNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
elif classname.find('Linear') != -1:
m.weight.data.normal_(0.0, 0.02)
m.bias.data.fill_(0)
def load_model(net, path, name):
state_dict = torch.load('%s/%s' % (path, name))
own_state = net.state_dict()
for name, param in state_dict.items():
if name not in own_state:
print('not load weights %s' % name)
continue
own_state[name].copy_(param)
# N*128*128*3(r,g,b) -> N*112*96*3(r,g,b)
def resize_img_to_MBF_input(args, img_tensor):
loader = transforms.Compose([transforms.ToTensor()])
unloader = transforms.ToPILImage()
MBF_input_tensor = torch.FloatTensor(args.batch_size, 3, 112, 96)
# convert tensor to PIL
i = 0
for img in img_tensor:
PIL_img = img.cpu().clone()
PIL_img = PIL_img.squeeze(0)
PIL_img = unloader(PIL_img)
# resize img
PIL_img = PIL_img.resize((96, 112))
# convert PIL to tensor
image_tensor = loader(PIL_img).unsqueeze(0)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
image_tensor = image_tensor.to(device, torch.float)
# combine
MBF_input_tensor[i] = image_tensor
i += 1
return MBF_input_tensor
def L1_loss(x, y):
return torch.mean(torch.sum(torch.abs(x-y), 1))
# symmetry loss
def Sym_loss(img128_fake):
inv_idx128 = torch.arange(img128_fake.size()[3]-1, -1, -1).long().cuda()
img128_fake_flip = img128_fake.index_select(3, Variable(inv_idx128))
img128_fake_flip.detach_()
l1_loss = torch.nn.L1Loss().cuda()
symmetry_128_loss = l1_loss(img128_fake, img128_fake_flip) # shape (1L,)
# print("symmetry_128_loss:", symmetry_128_loss)
return symmetry_128_loss
args = parser.parse_args()
print(args)
try:
os.makedirs(args.outf)
except OSError:
pass
if torch.cuda.is_available() and not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
# need initialize!!
G_xvz = _G_xvz()
G_vzx = _G_vzx_withID()
D_xvs = _D_xvs()
G_xvz.apply(weights_init)
G_vzx.apply(weights_init)
D_xvs.apply(weights_init)
train_list = args.data_list
train_loader = torch.utils.data.DataLoader(
data_loader.ImageList(train_list,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])),
batch_size=args.batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True)
v_siz = 9
z_siz = 128-v_siz
ID_r_siz = 256
mbf_ID_r_siz = 128
x1 = torch.FloatTensor(args.batch_size, 3, 128, 128)
x2 = torch.FloatTensor(args.batch_size, 3, 128, 128)
mbf_input = torch.FloatTensor(args.batch_size, 3, 112, 96)
v1 = torch.FloatTensor(args.batch_size, v_siz)
v2 = torch.FloatTensor(args.batch_size, v_siz)
z = torch.FloatTensor(args.batch_size, z_siz)
ID_r = torch.FloatTensor(args.batch_size, ID_r_siz)
mbf_ID_r = torch.FloatTensor(args.batch_size, mbf_ID_r_siz)
if args.cuda:
G_xvz = torch.nn.DataParallel(G_xvz).cuda()
G_vzx = torch.nn.DataParallel(G_vzx).cuda()
D_xvs = torch.nn.DataParallel(D_xvs).cuda()
x1 = x1.cuda()
x2 = x2.cuda()
mbf_input = mbf_input.cuda()
v1 = v1.cuda()
v2 = v2.cuda()
z = z.cuda()
x1 = Variable(x1)
x2 = Variable(x2)
mbf_input = Variable(mbf_input)
v1 = Variable(v1)
v2 = Variable(v2)
z = Variable(z)
ID_r = Variable(ID_r)
mbf_ID_r = Variable(mbf_ID_r)
# load_model(G_xvz, '/ssd01/wanghuijiao/F06All/pretrained_model', 'netG_xvz.pth')
# load_model(G_vzx, '/ssd01/wanghuijiao/F06All/pretrained_model', 'netG_vzx.pth')
# load_model(D_xvs, '/ssd01/wanghuijiao/F06All/pretrained_model', 'netD_xvs_epoch_24_1686.pth')
lr = args.learning_rate
ourBetas = [args.momentum, args.momentum2]
batch_size = args.batch_size
snapshot = args.nsnapshot
start_time = time.time()
ID_weight = args.id_weight
G_xvz_solver = optim.Adam(G_xvz.parameters(), lr=lr, betas=ourBetas)
G_vzx_solver = optim.Adam(G_vzx.parameters(), lr=lr, betas=ourBetas)
D_xvs_solver = optim.Adam(D_xvs.parameters(), lr=lr, betas=ourBetas)
cudnn.benchmark = True
crossEntropyLoss = nn.CrossEntropyLoss().cuda()
fout = open(args.modelf+"/log.txt", "w")
fout.write(str(args))
# initialize MobileFaceNet
MobileFacenet = MBF.MobileFacenet()
checkpoint = torch.load(
'/ssd01/wanghuijiao/F06All/ID_models/MobileFaceNet.ckpt')
MobileFacenet.load_state_dict(checkpoint['net_state_dict'])
for epoch in range(args.epochs):
for i, (view1, view2, data1, data2) in enumerate(train_loader):
# our framework:
# path 1: (v, z)-->G_vzx-->x_bar--> D_xvs( (v,x_bar), (v,x) )
# This path to make sure G_vzx can generate good quality images with any random input
# path 2: x-->G_xvz-->(v_bar, z_bar)-->G_vzx-->x_bar_bar--> D_xvs( (v,x_bar_bar), (v,x) ) + L1_loss(x_bar_bar, x)
# This path to make sure G_xvz is the reverse of G_vzx
eps = random.uniform(0, 1)
tmp = random.uniform(0, 1)
reconstruct_fake = False
if tmp < 0.05:
reconstruct_fake = True
D_xvs.zero_grad()
G_xvz.zero_grad()
G_vzx.zero_grad()
img1 = data1
img2 = data2
# get x-->real image v--> view and z-->random vector
with torch.no_grad():
x1.resize_(img1.size()).copy_(img1)
x2.resize_(img2.size()).copy_(img2)
v1.zero_()
v2.zero_()
for d in range(view1.size(0)):
v1.data[d][view1[d]] = 1
for d in range(view2.size(0)):
v2.data[d][view2[d]] = 1
z.data.uniform_(-1, 1) # random z
ID_r.data.uniform_(-1, 1) # random ID
mbf_ID_r.data.uniform_(-1, 1) # random ID
targetNP = v1.cpu().data.numpy()
idxs = np.where(targetNP > 0)[1]
tmp = torch.LongTensor(idxs)
vv1 = Variable(tmp).cuda() # v1 target
targetNP = v2.cpu().data.numpy()
idxs = np.where(targetNP > 0)[1]
tmp = torch.LongTensor(idxs)
vv2 = Variable(tmp).cuda() # v2 target
# path 1: (v, z)-->G_vzx-->x_bar--> D_xvs( (v,x_bar), (v,x_real) )
# path 1, update D_xvs
x_bar = G_vzx(v1, z, ID_r, mbf_ID_r) # random z to generate img x_bar
# interpolation of x_bar and x1
x_hat = eps*x1.data + (1-eps)*x_bar.data
x_hat = Variable(x_hat, requires_grad=True)
D_x_hat_v, D_x_hat_s = D_xvs(x_hat)
grads = autograd.grad(outputs=D_x_hat_s,
inputs=x_hat,
grad_outputs=torch.ones(D_x_hat_s.size()).cuda(),
retain_graph=True,
create_graph=True,
only_inputs=True)[0]
grad_norm = grads.pow(2).sum().sqrt()
gp_loss = torch.mean((grad_norm - 1) ** 2) # gradient with v1
x_bar_loss_v, x_bar_loss_s = D_xvs(x_bar.detach()) # score of x_bar
x_bar_loss_s = x_bar_loss_s.mean()
x_loss_v, x_loss_s = D_xvs(x1) # score of x1
x_loss_s = x_loss_s.mean()
v_loss_x = crossEntropyLoss(x_loss_v, vv1) # ACGAN loss of x1(v1)
d_xvs_loss = x_bar_loss_s - x_loss_s + 10. * gp_loss + \
v_loss_x # x1 real sample, x_bar fake sample
d_xvs_loss.backward(retain_graph=True)
D_xvs_solver.step()
# path 1, update G_vzx
D_xvs.zero_grad()
G_xvz.zero_grad()
G_vzx.zero_grad()
x_bar_loss_v, x_bar_loss_s = D_xvs(x_bar) # score of x_bar
x_bar_loss_s = x_bar_loss_s.mean()
v_loss_x_bar = crossEntropyLoss(
x_bar_loss_v, vv1) # ACGAN loss of x_bar(v1)
g_vzx_loss = -x_bar_loss_s + v_loss_x_bar
g_vzx_loss.backward(retain_graph=True)
G_vzx_solver.step()
# path 2: x-->G_xvz-->(v_bar, z_bar)-->G_vzx-->x_bar_bar--> D_xvs( (v,x_bar_bar), (v,x) ) + L1_loss(x_bar_bar, x)
# path 2, update D_x
D_xvs.zero_grad()
G_xvz.zero_grad()
G_vzx.zero_grad()
# id-preserving module #
LightCNN = ID_pre.define_R(gpu_ids=[0, 1, 2, 3, 4, 5, 6, 7],
lightcnn_path='./ID_models/LightCNN_29Layers_V2_checkpoint.pth').cuda()
cri_rec = nn.CosineEmbeddingLoss().cuda()
if reconstruct_fake is True:
# path 2A
v_bar, z_bar = G_xvz(x_bar.detach())
x_bar_ID_fea = LightCNN(x_bar)
mbf_x_bar = resize_img_to_MBF_input(args, x_bar)
mbf_x_bar_fea = MobileFacenet(mbf_x_bar)
x_bar_bar = G_vzx(v1, z_bar, x_bar_ID_fea, mbf_x_bar_fea)
# interpolation of x1 and x_bar_bar
x_hat = eps*x1.data + (1-eps)*x_bar_bar.data
else:
# path 2B
v_bar, z_bar = G_xvz(x1) # view invariant part of x1
x1_ID_fea = LightCNN(x1)
mbf_x1 = resize_img_to_MBF_input(args, x1)
mbf_x1_fea = MobileFacenet(mbf_x1)
# x_bar_bar: reconstruction of x2
x_bar_bar = G_vzx(v2, z_bar, x1_ID_fea, mbf_x1_fea)
# interpolation of x2 and x_bar_bar
x_hat = eps*x2.data + (1-eps)*x_bar_bar.data
real_fea = LightCNN(x2).detach() # id-preserving
fake_fea = LightCNN(x_bar_bar) # id-preserving
# print("############## x2.shape: ",x2.shape)
x_hat = Variable(x_hat, requires_grad=True)
D_x_hat_v, D_x_hat_s = D_xvs(x_hat)
grads = autograd.grad(outputs=D_x_hat_s,
inputs=x_hat,
grad_outputs=torch.ones(D_x_hat_s.size()).cuda(),
retain_graph=True,
create_graph=True,
only_inputs=True)[0]
grad_norm = grads.pow(2).sum().sqrt()
gp_loss = torch.mean((grad_norm - 1) ** 2)
x_loss_v, x_loss_s = D_xvs(x2)
x_loss_s = x_loss_s.mean()
x_bar_bar_loss_v, x_bar_bar_loss_s = D_xvs(
x_bar_bar.detach()) # x_bar_bar score
x_bar_bar_loss_s = x_bar_bar_loss_s.mean()
v_loss_x = crossEntropyLoss(x_loss_v, vv2) # ACGAN loss of x2(v2)
d_x_loss = x_bar_bar_loss_s - x_loss_s + 10. * gp_loss + v_loss_x
d_x_loss.backward()
D_xvs_solver.step()
# 2st path, update G_xvz
x_bar_bar_loss_v, x_bar_bar_loss_s = D_xvs(
x_bar_bar) # x_bar_bar score
x_bar_bar_loss_s = x_bar_bar_loss_s.mean()
if reconstruct_fake is True:
x_l1_loss = L1_loss(x_bar_bar, x_bar.detach())
v_loss_x_bar_bar = crossEntropyLoss(
x_bar_bar_loss_v, vv1) # ACGAN loss of x_bar_bar(v1)
else:
# L1 loss between x_bar_bar and x2
x_l1_loss = L1_loss(x_bar_bar, x2)
v_loss_x_bar_bar = crossEntropyLoss(
x_bar_bar_loss_v, vv2) # ACGAN loss of x_bar_bar(v2)
# fake_fea = LightCNN(x_bar_bar) # id-preserving
v_loss_x = crossEntropyLoss(v_bar, vv1)
#g_loss = -x_bar_bar_loss_s + 4*x_l1_loss + v_loss_x_bar_bar + 0.01*v_loss_x
# g_loss.backward()
if reconstruct_fake is False:
# id-preserving, loss
l_g_rec = ID_weight * \
cri_rec(fake_fea, real_fea, Variable(
torch.ones((real_fea.shape[0], 1))).cuda())
# add symmetry loss
symmetry_128_loss = args.symmetry_loss_weight * Sym_loss(x_bar_bar)
# id-preserving, new appended loss, constraint G + E
g_loss = -x_bar_bar_loss_s + 4*x_l1_loss + v_loss_x_bar_bar + \
0.01*v_loss_x + l_g_rec + symmetry_128_loss
else:
g_loss = -x_bar_bar_loss_s + 4*x_l1_loss + v_loss_x_bar_bar + 0.01*v_loss_x
g_loss.backward()
if reconstruct_fake is False:
G_vzx_solver.step()
G_xvz_solver.step()
if reconstruct_fake is False:
print("Epoch: [%2d] [%4d/%4d] time: %4.2f, "
"loss_D_vx: %.2f, loss_D_x: %.2f, loss_G: %.2f, ID_loss: %.2f, sym_loss: %.2f"
% (epoch, i, len(data1), time.time() - start_time, d_xvs_loss.data,
d_x_loss.data, (g_loss.data-l_g_rec.data), l_g_rec.data,
symmetry_128_loss.data))
loss_str = "Epoch: " + '[' + str(epoch) + ']' + '[' + str(i) + '/' + str(len(data1)) + ']' + \
" loss_D_vx: " + str(d_xvs_loss.data) + " loss_D_x: " + \
str(d_x_loss.data) + " loss_G: " + str((g_loss.data-l_g_rec.data)) + \
" l_g_rec: " + str(l_g_rec.data) + \
str(symmetry_128_loss.data) + "\n"
else:
print("Epoch: [%2d] [%4d/%4d] time: %4.2f, ""loss_D_vx: %.2f, loss_D_x: %.2f, loss_G: %.2f" %
(epoch, i, len(data1), time.time() - start_time, d_xvs_loss.data, d_x_loss.data, g_loss.data))
loss_str = "Epoch: " + '[' + str(epoch) + ']' + '[' + str(i) + '/' + str(len(data1)) + ']' + " loss_D_vx: " + \
str(d_xvs_loss.data) + " loss_D_x: " + \
str(d_x_loss.data) + " loss_G: " + str(g_loss.data) + "\n"
fout.write(loss_str)
if i % snapshot == snapshot-1:
vutils.save_image(x_bar.data,
'%s/x_bar_epoch_%03d_%04d.png' % (args.outf, epoch, i), normalize=True)
vutils.save_image(x_bar_bar.data,
'%s/x_bar_bar_epoch_%03d_%04d.png' % (args.outf, epoch, i), normalize=True)
vutils.save_image(x1.data,
'%s/x1_epoch_%03d_%04d.png' % (args.outf, epoch, i), normalize=True)
vutils.save_image(x2.data,
'%s/x2_epoch_%03d_%04d.png' % (args.outf, epoch, i), normalize=True)
torch.save(G_xvz.state_dict(), '%s/netG_xvz_epoch_%d_%d.pth' %
(args.outf, epoch, i))
torch.save(G_vzx.state_dict(), '%s/netG_vzx_epoch_%d_%d.pth' %
(args.outf, epoch, i))
torch.save(D_xvs.state_dict(), '%s/netD_xvs_epoch_%d_%d.pth' %
(args.outf, epoch, i))
|
# Generated by Django 3.1.2 on 2021-01-04 08:04
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Drive',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('file', models.FileField(upload_to='')),
('added_on', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Lic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=50)),
('last_name', models.CharField(max_length=50)),
('email', models.EmailField(max_length=50)),
('dob', models.DateField(max_length=50)),
('contact', models.IntegerField()),
('address', models.CharField(max_length=100)),
('policy_type', models.IntegerField(choices=[(0, 'jan_dhan_yojna'), (1, 'jivan_bima'), (2, 'jivan_laabh')])),
('policy_number', models.CharField(max_length=10, unique=True)),
('premium', models.IntegerField()),
('sum_assured', models.IntegerField()),
('year_of_policy', models.IntegerField()),
('beneficiary_name', models.CharField(max_length=50)),
('created_on', models.DateField()),
('renew_date', models.DateField()),
('status', models.IntegerField(choices=[(0, 'Deactive'), (1, 'Active')], default=1)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
def kratke_slova(list):
"""
Funkce hleda v seznamu sloa kratsi nez 5 pismen
"""
vysledek = []
for slovo in list:
if len(slovo) < 5:
vysledek.append(slovo)
return vysledek
def slova_k(list):
"""
funkce hleda v seznamu zvirata zacinajici na "k"
"""
vysledek = []
for slovo in list:
if slovo[0].lower() == "k":
vysledek.append(slovo)
return vysledek
def findin_slovo(list):
"""
Uzivatel zada zvire, funkce hleda je stli zadane zvire je vseznamu
ci nikoliv
"""
while True:
slovo = input("Napis zvire, ktere si myslíš ze je ve seznamu:\n")
if not 2 < len(slovo) < 15:
print("Napsal jsi nejakou blbost zkus to znova")
else:
break
for zvire in list:
if slovo.lower() == zvire:
return (True, slovo.lower())
return (False, slovo.lower())
# Uloha 0
zvirata = ["pes", "kočka", "králík", "had"]
# Uloha 1
kratka_zvirata = kratke_slova(zvirata)
print(kratka_zvirata)
# Uloha 2
kratka_k = slova_k(zvirata)
print(kratka_k)
# Uloha 3
exist_zvire, slovo = findin_slovo(zvirata)
if exist_zvire:
print("Zadane zvire", slovo, "je v seznamu.")
else:
print("Zadane zvire", slovo, "neni v seznamu.")
|
import requests, threading, os
from colorama import Fore
os.system(f'title [server leaver]')
os.system(f'mode 80,20')
print(f' {Fore.CYAN}server leaver \n\n')
print(f' {Fore.YELLOW}@9n8 {Fore.LIGHTMAGENTA_EX} \n')
def leave(guild_id, token):
rq = requests.delete(f"https://discord.com/api/v9/users/@me/guilds/{guild_id}", headers={"Authorization": token})
if rq.status_code == 204 or 200:
print(f"left {guild_id}")
def get_all_guilds(token) -> list():
servers = []
rq = requests.get("https://discord.com/api/v9/users/@me/guilds", headers={"Authorization": token})
for guild in rq.json():
servers.append(guild["id"])
return servers
def start():
token = input(f"Token: ")
guilds = get_all_guilds(token)
for guild in guilds:
threading.Thread(target=leave, args=(guild, token,)).start()
start()
|
"""
Tests for utils
To run all tests in suite from commandline:
python -m unittest tests.utils
Specific test class:
python -m unittest tests.utils.TestTicker
"""
# import pandas as pd
# import numpy as np
from .context import yfinance as yf
from .context import session_gbl
import unittest
# import requests_cache
import tempfile
class TestUtils(unittest.TestCase):
session = None
@classmethod
def setUpClass(cls):
cls.tempCacheDir = tempfile.TemporaryDirectory()
yf.set_tz_cache_location(cls.tempCacheDir.name)
@classmethod
def tearDownClass(cls):
cls.tempCacheDir.cleanup()
def test_storeTzNoRaise(self):
# storing TZ to cache should never raise exception
tkr = 'AMZN'
tz1 = "America/New_York"
tz2 = "London/Europe"
cache = yf.utils.get_tz_cache()
cache.store(tkr, tz1)
cache.store(tkr, tz2)
def suite():
suite = unittest.TestSuite()
suite.addTest(TestUtils('Test utils'))
return suite
if __name__ == '__main__':
unittest.main()
|
# Generated by Django 3.2.2 on 2021-05-29 18:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('contact', '0003_about_imageabout_social'),
]
operations = [
migrations.AddField(
model_name='imageabout',
name='alt',
field=models.CharField(default=1, max_length=200),
preserve_default=False,
),
]
|
# https://github.com/MatrixManAtYrService/MathScripts/blob/master/listTopologies.py
# Consider the discrete topology on X
# X_Discrete = { {} {a} {b} {c} {a,b} {a,c} {b,c} {a,b,c} }
# Let the set Y contain all nontrivial elements of X_discrete
# Y = { {a} {b} {c} {a,b} {a,c} {b,c} }
# Notice that for any subset Z of Y, the following union is a basis for some topology
# Z \union {} \union {a,b,c}
# There are 2^6=64 such generating sets, but several of them may generate the same topology
# For example, both of the bases below generate X_Discrete (once {} and X are included)
# Z_111000 = Z_56 = { {a}, {b}, {c} }
# Z_111111 = Z_63 = { {a}, {b}, {c}, {ab}, {ac}, {bc} }
# Let the relation ~ indicate that two sets Z_i and Z_j are equivalent if Z_i and Z_j
# generate the same topology.
#
# '~' is an equivalence relation for all the same reasons that '=' is.
# This program generates a topology for each of these bases, so the number (and size) of
# the resultant equivalence classes can be explored
NullSet_str = "{} "
A_str = "{a} "
B_str = "{b} "
C_str = "{c} "
AB_str = "{a,b} "
AC_str = "{a,c} "
BC_str = "{b,c} "
ABC_str = "{a,b,c} "
nullSet = 0b10000000
a = 0b01000000
b = 0b00100000
c = 0b00010000
ab = 0b00001000
ac = 0b00000100
bc = 0b00000010
abc = 0b00000001
# The only unions we must compute are those resulting in {a,b}, {a,c}, and {b,c}
def TryUnionAB(gen):
if ((gen & a != 0) & (gen & b != 0)): # if both {a} and {b} are present
return gen | ab # include {a,b}
else: # else
return gen # make no change
def TryUnionAC(gen):
if ((gen & a != 0) & (gen & c != 0)):
return gen | ac
else:
return gen
def TryUnionBC(gen):
if ((gen & b != 0) & (gen & c != 0)):
return gen | bc
else:
return gen
# The only intersections we must compute are those resulting in {a} {b} or {c}
def TryIntersectA(gen):
if ((gen & ab != 0) & (gen & ac != 0)): # if both {ab} and {ac} are present
return gen | a # include {a}
else: # else
return gen # make no change
def TryIntersectB(gen):
if ((gen & ab != 0) & (gen & bc != 0)):
return gen | b
else:
return gen
def TryIntersectC(gen):
if ((gen & ac != 0) & (gen & bc != 0)):
return gen | c
else:
return gen
def GenerateTopology(Z):
protoTopo = (Z << 1) | nullSet | abc # shift so Z's sets line up with X_discrete's nontrivial sets
previous = 0
while protoTopo != previous: # Keep generating the topology until no more changes can be made
previous = protoTopo
protoTopo = TryUnionAB(protoTopo)
protoTopo = TryUnionAC(protoTopo)
protoTopo = TryUnionBC(protoTopo)
protoTopo = TryIntersectA(protoTopo)
protoTopo = TryIntersectB(protoTopo)
protoTopo = TryIntersectC(protoTopo)
return protoTopo
TopologiesOnX = set();
for Z in range(0,64):
Topo = GenerateTopology(Z)
TopologiesOnX.add(Topo)
print "Z_{0:{fill}6b} generates topology: {1:b} ({2:d})".format(Z,Topo,Topo,fill='0')
n = 1
print "\nTopologies on X:\n"
for T in TopologiesOnX:
print'{0: <2}'.format("%d" %n),
print "(%d):" %T,
print "{ ",
if ((T & nullSet) != 0):
print NullSet_str,
if ((T & a) != 0):
print A_str,
else:
print " ",
if ((T & b) != 0):
print B_str,
else:
print " ",
if ((T & c) != 0):
print C_str,
else:
print " ",
if ((T & ab) != 0):
print AB_str,
else:
print " ",
if ((T & ac) != 0):
print AC_str,
else:
print " ",
if ((T & bc) != 0):
print BC_str,
else:
print " ",
if ((T & abc) != 0):
print ABC_str,
print "}"
n += 1
|
#'playlistId': 'PLcFcktZ0wnNn0VMRzVqV82s4vKpaTii_W',
# Import the modules
import requests
import pprint
import json
from YouTube_API_Key import get_my_api_key
# Define API KEY
DEVELOPER_KEY = get_my_api_key()
# Define Base URL
BASE_URL = 'https://www.googleapis.com/youtube/v3'
# Define Endpoint
ENDPOINT = 'commentThreads'
#ENDPOINT = 'playlistItems'
# Construct URL
final_url = BASE_URL + '/' + ENDPOINT
# Define my parameters of the search
PARAMETERS = {'part': 'snippet,replies',
'allThreadsRelatedToChannelId':'UCxX9wt5FWQUAAz4UrysqK9A',
'key': DEVELOPER_KEY}
# Make a request to the Yelp API
response = requests.get(url = final_url,
params = PARAMETERS)
# Decode the response
encoded_response = response.json()
pprint.pprint(encoded_response)
# Get the text from a comment
for item in encoded_response['items']:
print(item['snippet']['topLevelComment']['snippet']['textDisplay'])
|
# 람다라는 함수가 있는데 당장 이해하실 필요는 없습니다.
# 함수를 아주 간단하게 표현할때 쓰는데요. 언젠가 필요할 때 쓰시면 됩니다.
# 어려우면 안써도 지장없습니다.
# 먼저 lambda가 아닌 함수를 만들어볼게요.
# 아래와 같이 x를 주면 1을 더해서 돌려주는 함수를 보세요.
def addOne(x) :
return x + 1
# 1을 x로 함수에 전달하니 f(x) = x + 1에서 1 + 1 = 2가 됩니다.
print(addOne(1))
# 그런데 아래처럼 한줄로 간단하게 표현할 수 있어요.
addOneLambda = (lambda x : x + 1)
# 이것도 1 + 1이니 2로 동일합니다.
print(addOneLambda(1))
# 왜 쓰냐하면 가끔 함수를 한번쓰고 버리거나 할 때 사용합니다.
# 아래와 같이 1,2,3,4 가 들어있는 리스트(목록)이 있고 이 값들을 모두 제곱하고 싶습니다.
numbers = [0,1,2,3,4]
# 람다를 안쓰면 함수 정의하고 호출하는데 3줄
def square(x) :
return x * x # 코딩에서 곱하기는 *입니당
# map이라는 것은 저런 목록을 돌면서 하나씩 처리해주는 함수예요.
# map 함수는 리스트에서 원소를 하나씩 꺼내서 함수를 적용시킨 결과를 새로운 # 리스트에 담아주니까, 위의 예제는 0을 제곱하고, 1을 제곱하고, 2, 3, 4를 # 제곱한 것을 새로운 리스트에 넣어주는 것입니다.
square_result = list(map(square, numbers))
print(square_result)
# 람다를 쓰면 한줄로 되죠.
lambda_square_result = list(map(lambda x: x * x, numbers))
print(lambda_square_result) |
# Generated by Django 3.1 on 2021-03-12 19:59
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('react_chat_app', '0006_post'),
]
operations = [
migrations.AddField(
model_name='comment',
name='post_connected',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='react_chat_app.post'),
),
migrations.AlterField(
model_name='post',
name='image',
field=models.ImageField(height_field=500, null=True, upload_to='posts/images/%Y%M%D', width_field=100),
),
]
|
from django.urls import path
from ProyectoUniversidadApp import views
urlpatterns = [
path('index',views.index, name="Index"),
path('credito',views.credito, name="Credito"),
path('financiero',views.financiero, name="Financiero"),
path('operacional',views.operacional, name="Operacional"),
]
|
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.shortcuts import render, redirect, get_object_or_404
from django.http import JsonResponse, HttpResponse
from .models import Snippet, Tag, Library, Language
from .forms import SnippetForm
from users.models import User
@login_required(login_url='/accounts/login')
def snippets(request):
user = User.objects.get(username=request.user.username)
snippets = Snippet.objects.all()
context = {'snippets': snippets}
return render(request, 'core/snippets.html', context=context)
@login_required(login_url='/accounts/login')
def snippet_details(request, pk):
user = User.objects.get(username=request.user.username)
snippets = Snippet.objects.all()
snippet = Snippet.objects.get(pk=pk)
context = {'snippet': snippet, 'pk': pk}
return render(request, 'core/snippet_details.html', context=context )
@login_required(login_url='/accounts/login')
def add_snippet(request):
snippets = Snippet.objects.all()
if request.method == 'POST':
form = SnippetForm(request.POST)
language = request.POST.get('language')
form.fields['language'].choices = [(language, language)]
if form.is_valid():
snippet = form.save
return redirect('snippets')
else:
form = SnippetForm()
context = {'form': form, 'snippets': snippets}
return render(request, 'core/add_snippet.html', context=context)
@login_required(login_url='/accounts/login')
def edit_snippet(request, pk):
user = User.objects.get(username=request.user.username)
snippets = Snippet.objects.all()
snippet = get_object_or_404(Snippet, pk=pk)
if request.method == 'POST':
form = SnippetForm(request.POST, instance=snippet)
if form.is_valid():
snippet = form.save()
# form.save()
return redirect('snippets')
else:
# snippet = Snippet.objects.get(pk=pk)
form = SnippetForm(instance=snippet)
context = {'form':form, 'snippets': snippets}
return render(request, 'core/edit_snippet.html', context=context)
@login_required(login_url='/accounts/login')
def snip_category(request, slug):
Language = Language.objects.get(slug-slug)
snip_category = Snippet.objects.filter(language=language)
return render(request, 'core/snip_category.html', {'snippets': snip_category, 'language': language})
|
from collections import defaultdict
import pandas as pd
import numpy as np
def getMostSignificantLabel(LABELS_FILE_PATH, LABELS_SEPERATOR, LABELS_NAMES):
df = pd.read_csv(LABELS_FILE_PATH, header= None, sep=LABELS_SEPERATOR,
names = LABELS_NAMES)
shapeOfDF = df.shape
numberOfColumns = shapeOfDF[1]
listOfLabelSum = {}
colNameSumList = defaultdict(list)
# colNameSumList = list()
columnNames = list(df.columns.values)
for i in range(0, numberOfColumns):
colNameSumTuple = pd.Series(df[columnNames[i]].sum(), index=[columnNames[i]])
colNameSumList[colNameSumTuple[0]].append(columnNames[i])
mostInsignificantLabel = max(colNameSumList.items())
print(mostInsignificantLabel)
print(mostInsignificantLabel[1][0])
print(colNameSumList)
return mostInsignificantLabel[1][0]
|
import xarray as xr
import numpy as np
test_gebco=False
test_bedmachine=True
#gebco
if test_gebco:
gebco1 = xr.open_dataset('../grid_gebco_30sec.nc')
gebco2 = xr.open_dataset('../grid_gebco_30sec_original.nc')
assert np.allclose(gebco1.lon.data, gebco2.lon.data)
assert np.allclose(gebco1.lat.data, gebco2.lat.data)
print(np.equal(gebco1.lon.data, gebco2.lon.data)[900:1000])
print('number of mismatch points in lon array:')
print(len(np.where(~np.equal(gebco1.lon.data, gebco2.lon.data))[0]))
print(np.where(~np.equal(gebco1.lon.data, gebco2.lon.data)))
print(gebco1.lon.data[np.where(~np.equal(gebco1.lon.data, gebco2.lon.data))])
print('number of mismatch points in lat array:')
print(len(np.where(~np.equal(gebco1.lat.data, gebco2.lat.data))[0]))
#print(gebco1.lon.data[:100] - gebco2.lon.data[:100])
#print(np.equal(gebco1.lat.data, gebco2.lat.data)[:100])
#print(gebco1.lat.data[:100] - gebco2.lat.data[:100])
#assert np.equal(gebco1.lon.data, gebco2.lon.data).all()
#assert np.equal(gebco1.lat.data, gebco2.lat.data).all()
#assert hash(gebco1.lon.data.tobytes()) == hash(gebco2.lon.data.tobytes())
#assert hash(gebco1.lat.data.tobytes()) == hash(gebco2.lat.data.tobytes())
#bedmachine
if test_bedmachine:
bm1 = xr.open_dataset('../grid_bedmachineAnt.nc')
bm2 = xr.open_dataset('/local2/home/OM4_125_bedmachine_ant1/raw_data/bedmachine+geo.nc')
assert np.allclose(bm1.lat.data, bm2.lat.data)
assert np.allclose(bm1.lon.data, bm2.lon.data)
assert np.allclose(bm1.y.data, bm2.y.data)
assert np.allclose(bm1.x.data, bm2.x.data)
assert hash(bm1.lon.data.tobytes()) == hash(bm2.lon.data.tobytes())
assert hash(bm1.lat.data.tobytes()) == hash(bm2.lat.data.tobytes())
|
# Generated by Django 2.1.15 on 2020-04-03 20:49
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='NgoTable',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('STATE_UT', models.CharField(max_length=200)),
('SPECIFIC_LOCATION', models.CharField(max_length=200)),
('TARGET_GROUPS', models.CharField(max_length=200)),
('TYPE_OF_BENEFIT', models.CharField(max_length=200)),
('ORGANIZATION', models.CharField(max_length=200)),
('INITIATIVE', models.CharField(max_length=200)),
('WEBSITE', models.CharField(max_length=200)),
('DONATION_LINK', models.CharField(max_length=200)),
('DONATION_INFO', models.CharField(max_length=200)),
('FOREIGN_DONATION', models.CharField(max_length=200)),
('INFO', models.CharField(max_length=200)),
],
),
]
|
print("(LabWork 2)Богатько Александр В2 ИПЗ-12")
print("Ведите первое число: ")
A = int(input())
print("Введите второе число: ")
B = int(input())
print("Введите третье число: ")
C = int(input())
if A>=1 and A<=3:
print("Ответ: " + str(A))
if B>=1 and B<=3:
print("Ответ: " + str(B))
if C>=1 and C<=3:
print("Ответ: " + str(C))
input('Press ENTER to exit')
|
import numpy
n,m = map(int,input().split())
arr = numpy.zeros((n,m),int)
for i in range(n):
arr[i] = numpy.array(input().split(),int)
print(numpy.prod(numpy.sum(arr, axis = 0))) |
import numpy as np
import cv2
cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
'''
Cartesian plane starts from top left in open cv
(0,0) denotes top left corner of screen
move down: increase height
move right : increase width
'''
while True:
ret, frame = cap.read()
height = int(cap.get(4))
width = int(cap.get(3))
# Draw a line on the feed from (0,0) to bottom left corner (width,height)
# colour in BGR value (B,G,R)
# 10 is the line weight
img = cv2.line(frame, (0, 0), (width, height), (255, 0, 0), 10)
# Draw a rectangle from top left corner to bottom right corner eg.(100, 100) to (300, 300)
# 5 denotes the edges width
# -1 in edge width to fill the rectangle
img = cv2.rectangle(img, (10, 100), (100, 200), (0, 0, 255), 5)
img = cv2.rectangle(img, (10, 220), (100, 320), (0, 255, 0), -1)
# Draw a circle from centre (500, 200)
# 5 denotes the circle width
# -1 in edge width to fill the circle
img = cv2.circle(img, (500, 200), 100, (50, 100, 150),5)
img = cv2.circle(img, (500, 200), 50, (150, 200, 50), -1)
# font: font style for text eg. italics, hershey
font = cv2.FONT_HERSHEY_SIMPLEX
# Write text on the screen
# 1.5 is the font size
# 5 is the font weight
img = cv2.putText(img, 'This is OpenCV', (200, 400), font, 1.5, (0, 0, 0), 5)
cv2.imshow('Feed', img)
if cv2.waitKey(1) == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
|
from insuletchallenge import datasets
from insuletchallenge import models
from sklearn.model_selection import train_test_split
from tensorflow.keras.layers import Dense
from tensorflow.keras.models import Model
from tensorflow.keras.models import load_model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.layers import concatenate
import numpy as np
import argparse
import pandas as pd
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-tr", "--train", type=str, required=True,
help="path to train dataset")
ap.add_argument("-vl", "--pred", type=str, required=True,
help="path to train dataset")
# load dataset attributes
dt = datasets.load_train_attributes('training.csv')
# load images to train then normalize pixel values
img_train = datasets.load_images('training.csv')
img_train = img_train / 255.0
# split test and train data using 75% of data for training
split = train_test_split(dt, img_train, test_size=0.25)
(trainX, testX, trainImgX, testImgX) = split
# find largest value in prediction variable and then
# scale it in the range[0,1]
maxValue = dt['target'].max()
trainY = trainX['target']/maxValue
testY = testX['target']/maxValue
# process attributes to perform min-max scaling for continous
# features and stack them to categorical features
(trainX, testX) = datasets.process_train_attributes(trainX, testX)
# generate mlp and cnn architecture models
mlp = models.create_mlp(trainX.shape[1], regress=False)
cnn = models.create_cnn(512, 512, 3)#,regress=False)
# input of final layers as the output of both created
# models (cnn and mlp)
combinedInput = concatenate([mlp.output, cnn.output])
# add fully connected layer with two dense
# layers with the final one as out regressor
x = Dense(4, activation="relu")(combinedInput)
x = Dense(1, activation="linear")(x)
# final model accepts the two types of data for features extracted, also
# it accepts the images as input of the layer which at then end outputs a single value
# which is the variable that we are trying to predict
model = Model(inputs=[mlp.input, cnn.input], outputs=x)
# build the model architecture using mean_absolute_percentage_error
# as the loss function, which implies that we want to minimize the
# percentage difference between our target predictions and the actual target value
opt = Adam()
model.compile(loss="mean_absolute_percentage_error", optimizer=opt)
# train model
print("[INFO] training model...")
model.fit(x=[trainX, trainImgX], y=trainY,validation_data=([testX, testImgX], testY),epochs=20, batch_size=4)
# predictions
print("[INFO] predicting house prices...")
preds = model.predict([testX, testImgX])
# differences between predicted and actual from test data,
# then compute % difference between predicten and actual target values
# and the absolute %diff
diff = preds.flatten() - testY
percentDiff = (diff / testY) * 100
absPercentDiff = np.abs(percentDiff)
# compute the mean and standard deviation of the absolute percentage
# difference
mean = np.mean(absPercentDiff)
std = np.std(absPercentDiff)
# models stats
print("[INFO] avg. target value: {}, std target value: {}".format(
dt["target"].mean(),
dt["target"].std()))
print("[INFO] Absolute Percentage Difference mean: {:.2f}%, and std: {:.2f}%".format(mean, std))
# load input data to predict
dt_validate = datasets.load_predict_attributes('test.csv')
# load images then normalize pixel values for input prediction
img_train_validate = datasets.load_images('test.csv')
validateImgX = img_train_validate / 255.0
# process attributes and performs same transformations
# as with train and test data
validateX = datasets.process_predict_attributes(dt_validate)
preds_predict = model.predict([validateX, validateImgX])
print(preds_predict.flatten()*maxValue)
gomez_answer = pd.read_csv('test.csv')
gomez_answer['target_predict'] = preds_predict.flatten()*maxValue
gomez_answer.to_csv('gomez-answer.csv')
data = pd.read_csv('training.csv')
images = datasets.load_images('training.csv')
data_predict = datasets.process_predict_attributes(data)
p = model.predict([data_predict, images])
data['predict_target'] = p.flatten()*maxValue
rmse = np.sqrt((np.sum(((np.array(data['target']) - np.array(data['predict_target']))**2)))/len(data['target']))
data.to_csv('p.csv')
print(rmse)
|
from io import BytesIO
import random
from flask import Response
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
@app.route('/plot.png')
def plot_png():
fig = create_figure()
output = BytesIO()
FigureCanvas(fig).print_png(output)
return Response(output.getvalue(), mimetype='image/png')
def create_figure():
fig = Figure()
axis = fig.add_subplot(1, 1, 1)
xs = range(100)
ys = [random.randint(1, 50) for x in xs]
axis.plot(xs, ys)
return fig
|
def average(arr):
new_list = set(arr)
total_numbers = len(new_list)
sum_number = sum(new_list)
average_number = sum_number / total_numbers
return average_number
n = int(input())
arr = list(map(int, input().split()))
result = average(arr)
print(result) |
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2019-09-15 19:22
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('tv_shows_app', '0001_initial'),
]
operations = [
migrations.RenameModel(
old_name='Show',
new_name='Tv_show',
),
]
|
import numpy as np
from scipy.optimize import minimize
def slabr(n,n1,n2,d,freq_range):
r=((n1-n)/(n1+n)+(n-n2)/(n+n2)*np.exp(4*np.pi*1j*n*d/2.9979e8*freq_range))/(1+(n1-n)/(n1+n)*(n-n2)/(n+n2)*np.exp(4*np.pi*1j*n*d/2.9979e8*freq_range))
return r
def slabrr(n,r12,r23,d,freq):
r=((r12+r23*np.exp(4*np.pi*1j*d*n*freq/2.9979e8))/(1+r12*r23*np.exp(4*np.pi*1j*d*n*freq/2.9979e8)))
'''
calculate refractive index from reflectance
all units mks
'''
def refr_errfun(r_data,n,n1,n2,d,freq):
out=abs(slabr(n,n1,n2,d,freq)-r_data)**2
return out
def ncalc(r_data,nguess,n1,n2,d,freq_range):
guess_array=np.array([nguess.real, nguess.imag])
nout=np.array([])
ii=0
if len(n1) != 1:
print n1[ii]
print guess_array
for freq in freq_range:
new_errfun = lambda x: refr_errfun(r_data[ii],x[0]+1j*x[1],n1[ii,0],n2,d,freq_range[ii])
outp=minimize(new_errfun,guess_array,method='Nelder-Mead')
nout=np.append(nout,[outp.x[0]+1j*outp.x[1]])
ii=ii+1
'''
else:
for freq in freq_range:
new_errfun = lambda x: refr_errfun(r_data[ii],x[0]+1j*x[1],n1,n2,d,freq_range[ii])
outp=minimize(new_errfun,guess_array,method='Nelder-Mead')
nout=np.append(nout,[outp.x[0]+1j*outp.x[1]])
ii=ii+1
'''
return nout
def sheet_refl(n1,n2,sigma):
#sigma is the surface conductivity
#freqs is a vector
return (n2+sigma/2.9979e8/8.85e-12-n1)/(n2+sigma/2.9979e8/8.85e-12+n1)
def sheet_trans(n1,n2,sigma):
#transmittance
return np.sqrt(n2/n1)*2*n1/(n1+n2+sigma/2.9979e8/8.85e-12)
#air on both sides
#slab of index n in between with surface sheets of conductance sigma
def hr_rv(freq,sigma_dc):
return (1- 2*np.sqrt(4*8.85e-12*np.pi*freq/sigma_dc))
def slabr_sheet(n,sigma,d,freq):
r12=sheet_trans(1,1,sigma)
return (r12*(1-np.exp(4*np.pi*1j*n*d/2.9979e8*freq))/
(1-r12**2*np.exp(4*np.pi*1j*n*d/2.9979e8*freq)))
def ncalc_back(r_data,n1,n_front,d_front,n2,d,freq_range,nguess):
r12_front=(n1-n_front)/(n1+n_front)
guess_array=np.array([nguess.real, nguess.imag])
nout=np.array([])
ii=0
for freq in freq_range:
new_errfun = lambda x: (
abs(
(r12_front[ii]+slabr(x[0]+x[1]*1j,n_front[ii],n2,d,freq)*np.exp(
4*np.pi*1j*n_front[ii]*d_front*freq/2.9979e8
)
)/
(1+r12_front[ii]*slabr(x[0]+x[1]*1j,n_front[ii],n2,d,freq)*np.exp(
4*np.pi*1j*n_front[ii]*d_front*freq/2.9979e8
)
) - r_data[ii])**2
)
outp=minimize(new_errfun,guess_array,method='Nelder-Mead')
nout=np.append(nout,[outp.x[0]+1j*outp.x[1]])
ii=ii+1
return nout
#smart wrapper for ncalc
#calculates full n spectrum then alters guess to converge on a particular branch
'''
def nsm
'''
|
#Metar Bug
#Patrick Pragman
#Ciego Services
#January 31, 2018
#Flask App to watch for changes in the weather
from flask import Flask, render_template, request, jsonify
from metar import Metar
from get_metar import get_metar
from local_config import Path
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/get_metar', methods=['POST'])
def get_report():
req_data = request.form['code']
try:
raw_report = get_metar(req_data)
if raw_report[0]:
#if the report returns, build it and send it.
obs = Metar.Metar(raw_report[1])
lowest = 100000
try:
for skc in obs.sky:
#find the lowest ceiling
ht = int(skc[1].value())
#a ceiling is the lowest broken or overcast layer
if (skc[0] == "BKN" or skc[0] == "OVC"):
if ht < lowest:
lowest = skc[1].value()
except:
#if this fails, it's fine, just pass out of here and leave 100k feet as the ceiling
pass
#validate some of the things I want to send back
#python gets angry if you don't make sure there is data to send
try:
vis = obs.vis.value()
except:
vis = 000
try:
wind = obs.wind_speed.value()
except:
wind = 0
response = {"ICAO": obs.station_id,
"RAW": obs.code,
"VIS": vis,
"CX": lowest,
"WIND": wind,
"ERROR": False,
"ERROR_TYPE": "NA"}
return jsonify(response)
if not raw_report[0]:
#if the report is unavailable, tell the user
return jsonify({"ERROR" : True,
"ICAO" : req_data,
"ERROR_TYPE": raw_report[1]})
except:
#in the event that this process fails, we need to still send something back to the frontend
#we'll call all of these "server errors" - at some point I should probably log these or something
#I'm not sure what the best course of action for this would be.
return jsonify({"ERROR" : True,
"ICAO" : req_data,
"ERROR_TYPE": "SERVER ERROR"})
@app.route('/get_stations',methods = ['GET'])
def get_stations():
#open up the stations list and send it as requested
with open(Path.stations, "r") as station_list:
data = station_list.read()
return data
if __name__ == "__main__":
app.run()
|
# utf-8
# exercício 103
def ficha(name='<desconhecido>', score=0):
print(f'O jogador {name} fez {score} gol(s) no campeonato.')
# programa principal
n = str(input('Nome do jogador: '))
g = str(input('Número de Gols: '))
if g.isnumeric():
g = int(g)
else:
g = 0
if n.strip() == '':
ficha(score=g)
else:
ficha(n, g)
|
import turtle as t
def tp(x,y):
t.pu()
t.goto(x,y)
t.pd()
t.screensize(1920,1080,"black")
t.setup(1920,1080,0,0)
t.pensize(1)
t.speed(10)
t.pencolor("white")
tp(-300,300)
t.color('white','white')
t.begin_fill()
for i in range(4):
t.fd(600)
t.rt(90)
t.end_fill()
tp(-290,290)
t.color('black','black')
t.begin_fill()
for a in range(4):
t.fd(580)
t.rt(90)
t.end_fill()
#后盖
tp(-172,-172)
t.rt(45)
t.begin_fill()
t.color('white','white')
t.fd(15)
t.lt(60)
t.fd(20)
t.lt(120)
t.fd(30)
tp(-172,-172)
t.end_fill()
t.begin_fill()
t.fd(15)
t.rt(60)
t.fd(20)
t.rt(120)
t.fd(30)
tp(-172,-172)
t.end_fill()
#后盖
#主体
tp(-155,-155)
t.begin_fill()
t.fd(30)
for b in range(2):
t.lt(90)
t.fd(160)
t.lt(90)
t.fd(60)
t.end_fill()
#主体
#尾翼
t.begin_fill()
t.rt(10)
t.fd(50)
t.lt(80)
t.fd(10)
t.lt(70)
t.fd(100)
t.goto(-155,-155)
t.end_fill()
t.begin_fill()
t.lt(40)
t.fd(30)
t.lt(10)
t.fd(50)
t.rt(80)
t.fd(10)
t.rt(70)
t.fd(100)
t.goto(-155,-155)
t.end_fill()
tp(-140,-140)
t.begin_fill()
t.color('black','black')
t.rt(40)
t.fd(10)
for c in range(2):
t.lt(90)
t.fd(140)
t.lt(90)
t.fd(20)
t.end_fill()
t.pu()
t.lt(90)
t.fd(145)
t.pd()
t.color('white','white')
t.begin_fill()
t.rt(90)
t.fd(20)
for d in range(4):
t.lt(45)
t.fd(10)
t.lt(45)
t.fd(60)
t.end_fill()
t.lt(45)
t.fd(10)
t.lt(45)
t.fd(60)
t.begin_fill()
t.lt(20)
t.fd(108.389)
t.lt(140)
t.fd(108.389)
t.lt(110)
t.fd(74.144)
t.end_fill()
t.pu()
t.lt(110)
t.fd(90)
t.lt(40)
t.pensize(5)
t.pencolor('black')
t.pd()
t.fd(100)
t.lt(90)
t.fd(17)
t.lt(90)
t.fd(200)
t.rt(90)
t.fd(17)
t.rt(90)
t.fd(200)
t.lt(90)
t.fd(18)
t.lt(90)
t.fd(200)
tp(-155,-155)
t.pensize(1)
t.pu()
t.rt(150)
t.fd(30)
t.rt(90)
t.pd()
t.color('black','black')
t.begin_fill()
for d in range(2):
t.fd(50)
t.lt(90)
t.fd(5)
t.lt(90)
t.end_fill()
tp(-155,-155)
t.pu()
t.rt(90)
t.fd(30)
t.lt(90)
t.pd()
t.color('black','black')
t.begin_fill()
for d in range(2):
t.fd(50)
t.rt(90)
t.fd(5)
t.rt(90)
t.end_fill()
t.pencolor('orange')
tp(250,250)
t.colormode(255)
t.pencolor(227,101,35)
t.fillcolor
t.begin_fill()
t.goto(-100,100)
t.goto(-230,70)
t.goto(-150,150)
t.goto(-320,140)
t.goto(-75,225)
t.goto(-280,280)
t.goto(20,300)
t.goto(250,250)
t.end_fill()
t.done()
|
import pandas as pd
data_file = r'data.csv'
data_df = pd.read_csv(data_file)
print data_df.shape # (484192, 15)
print data_df.columns
'''columns_names = [u'id', u'pickup_user_id', u'total_amount', u'pickup_user_address_id',
u'created_at.x', u'ki', u'cost_for_two', u'created_at.y',
u'driver_assigned_at', u'reached_shop_at', u'shipped_at',
u'reached_customer_at', u'complete_at', u'linked_at', u'item_name']
2 columns are extra. the columns : 'ki', 'created_at.x'
'''
sample_df = data_df.head(100)
sample_df.to_csv('sample_data.csv', index = False)
data_df.describe()
''' gives stats about the numerical variables, only meaningful are from pickup_user_address_id, cost_from_two'''
print data_df.dtypes
obj_var = data_df.dtypes.index[data_df.dtypes == 'object']
'''[u'created_at.x', u'ki', u'created_at.y', u'driver_assigned_at', u'reached_shop_at', u'shipped_at',
u'reached_customer_at', u'complete_at', u'linked_at', u'item_name']'''
non_obj_var = data_df.dtypes.index[data_df.dtypes != 'object']
'''[u'id', u'pickup_user_id', u'total_amount', u'pickup_user_address_id', u'cost_for_two']'''
'''analysis for variable : id'''
val_c_id = data_df['id'].value_counts()
print len(val_c_id)# 195319
print sum(val_c_id) # the sum is eaqual to the total rows in the data, so no missing value
val_c_id.value_counts().plot.bar() # analysing the duplicacies in the the id
#occurance of id varies from 1 to 60.
'''analysis for variable : pickup_user_id'''
val_c_user_id = data_df['pickup_user_id'].value_counts()
print len(val_c_user_id)# 2847
print sum(val_c_user_id) # the sum is eaqual to the total rows in the data, so no missing value
'''analysis for variable : total_amount'''
print data_df['total_amount'].mean() # 578.257935694931
print data_df['total_amount'].std() # 615.4290590777657
print data_df['total_amount'].min() # 30
print data_df['total_amount'].skew() # 8.935612965123536 highly skewed
data_df.sort_values(['total_amount'])['total_amount'].reset_index(drop = True).plot()
'''
there is a unique behaviour in the variable:
there are high occurance of the heigh amount :
29070.48 : 13
17914.76 : 18
'''
'''analysis for variable : pickup_user_address_id'''
val_c_user_addr_id = data_df['pickup_user_address_id'].value_counts()
print len(val_c_user_addr_id)# 3194
print sum(val_c_user_addr_id) # the sum is eaqual to the total rows in the data, so no missing value
'''analysis for variable : created_at.y'''
sum(data_df['created_at.y'].isnull())
print data_df['created_at.y'].head() #timestamp
# data_df['created_at.y'] = pd.to_datetime(data_df['created_at.y'])
'''analysis for variable : created_at.x'''
print data_df['created_at.x'].head() # null values
sum(data_df['created_at.x'].isnull()) # number of null values 372477
data_df['created_at.x'][~data_df['created_at.x'].isnull()].head()
# data_df['created_at.x'] = pd.to_datetime(data_df['created_at.x'])
# the null values will be NaT not a time
'''analysis for variable : ki'''
print data_df['ki'].head() # null values
sum(data_df['ki'].isnull()) # number of null values 372477
data_df['ki'][~data_df['ki'].isnull()].head()
data_df['ki'].value_counts(dropna=False)
'''analysis for variable : cost_for_two'''
print data_df['cost_for_two'].head() # null values
sum(data_df['cost_for_two'].isnull()) # number of null values 372477
data_df['cost_for_two'][~data_df['cost_for_two'].isnull()].head()
data_df['cost_for_two'].value_counts(dropna=False)
'''analysis for variable : driver_assigned_at'''
print data_df['driver_assigned_at'].head() # null values
sum(data_df['driver_assigned_at'].isnull()) # number of null values 281
# data_df['driver_assigned_at'] = pd.to_datetime(data_df['driver_assigned_at'])
'''analysis for variable : reached_shop_at'''
print data_df['reached_shop_at'].head() # null values
sum(data_df['reached_shop_at'].isnull()) # number of null values 0
# data_df['reached_shop_at'] = pd.to_datetime(data_df['reached_shop_at'])
'''analysis for variable : shipped_at'''
print data_df['shipped_at'].head() # null values
sum(data_df['shipped_at'].isnull()) # number of null values 290
# data_df['shipped_at'] = pd.to_datetime(data_df['shipped_at'])
'''analysis for variable : reached_customer_at'''
print data_df['reached_customer_at'].head() # null values
sum(data_df['reached_customer_at'].isnull()) # number of null values 1391
# data_df['reached_customer_at'] = pd.to_datetime(data_df['reached_customer_at'])
'''analysis for variable : complete_at'''
print data_df['complete_at'].head() # null values
sum(data_df['complete_at'].isnull()) # number of null values 2013
# data_df['complete_at'] = pd.to_datetime(data_df['complete_at'])
'''analysis for variable : linked_at'''
print data_df['linked_at'].head() # null values
sum(data_df['linked_at'].isnull()) # number of null values 0
# data_df['linked_at'] = pd.to_datetime(data_df['linked_at'])
'''analysis for variable : item_name'''
print data_df['item_name'].head() # null values
sum(data_df['item_name'].isnull()) # number of null values 0
val_c_item_nm = data_df['item_name'].value_counts()
print len(val_c_item_nm)# 42554
data_df['kpt'].min() # should be > 0
print data_df[data_df['kpt']< pd.Timedelta(0)] # 2 values
# need to drop these 2 entries
print data_df[data_df['kpt']> pd.Timedelta(days = 1)].shape # 16 values
print data_df[data_df['kpt']> pd.Timedelta(hours = 5)].shape # 46 values
print data_df[data_df['kpt']> pd.Timedelta(hours = 2)].shape # 468 values
print data_df[data_df['kpt']> pd.Timedelta(hours = 1)].shape # 12027 values
# need to drop the 46 entries
print 'number of null values : ', sum(data_df['kpt'].isnull()) # 290
# need to drop the 290 entries
|
import json
import logging
import os
import pathlib
import sys
from collections import OrderedDict
from datetime import datetime
import click
import humanfriendly
import pandas
__version__ = '1.1.5'
logger = logging.getLogger()
@click.group()
@click.option('--debug', is_flag=True)
@click.pass_context
def cli(ctx, debug):
"""
This is a tool to generate an excel file based on a provided source excel and transformation mapping
"""
log_format = '%(asctime)s|%(levelname)s|%(name)s|(%(funcName)s):-%(message)s'
logging.basicConfig(level=logging.DEBUG if debug else logging.INFO, stream=sys.stdout, format=log_format)
if ctx.invoked_subcommand not in ['version']:
logger.info(f'{"-" * 20} Starting Logging for {ctx.invoked_subcommand} (v{__version__}) {"-" * 20}')
def process_column_mappings(source_df, column_mappings):
out_df = source_df.copy(deep=True)
name_map = {}
exclude_columns = []
pending_columns = False
for x in column_mappings:
if x[0][:3] == '[-]':
exclude_columns.append(x[0][3:])
elif x[0] == '*':
pending_columns = True
else:
name_map.update({x[0]: x[1] if x[1] != '_' else x[0]})
index_map = {'_': []}
for mapping in column_mappings:
index = mapping[2]
value = mapping[0] if mapping[1] == '_' else mapping[1]
if index == '_':
if value != '*' and value[:3] != '[-]':
index_map['_'].append(value)
continue
if index not in index_map:
index_map[index] = value
exclude_columns.append(value)
else:
raise Exception(f'Cannot have same column index for multiple columns, please check your column mapping\n'
f'{index=}, {mapping=}')
out_df = out_df.rename(columns=name_map)
pending_columns_list = list(set(out_df.columns).difference(exclude_columns)) if pending_columns else []
return {'df': out_df, 'index_map': index_map, 'pending_columns': pending_columns_list}
def process_mappings(source_df_dict, mappings):
worksheets_dict = {}
for mapping in mappings:
count = -1
for sheet_identifier, sheet_mapping in mapping.items():
count += 1
entry = get_dict_entry(count, sheet_identifier, source_df_dict)
sheet_name = entry.get('name')
if sheet_name not in worksheets_dict:
# noinspection PyArgumentList
worksheets_dict.update({sheet_name: {
'source': entry.get('item').copy(deep=True),
'dest': {}
}})
dest_sheet_name = sheet_mapping.get('dest_worksheet_name') or sheet_name
dest_sheet_name = sheet_name if dest_sheet_name == '_' else dest_sheet_name
mapping_processed = process_column_mappings(worksheets_dict.get(sheet_name).get('source'),
sheet_mapping.get('columns'))
mapping_processed.update({'merge_columns': sheet_mapping.get('merge_columns')})
worksheets_dict[sheet_name]['dest'].update({dest_sheet_name: mapping_processed})
return worksheets_dict
@cli.command()
@click.argument('source', nargs=-1)
@click.argument('mapping')
@click.option('-o', '--output', help='relative or absolute path to output file')
@click.pass_context
def transform(ctx, **kwargs):
transform_spreadsheets(**kwargs)
def transform_spreadsheets(source, mapping, output):
"""Produces a new spreadsheet with transformation mapping applied"""
s_time = datetime.now()
try:
source_paths = [get_path(x) for x in source]
mapping_path = get_path(mapping, make_dir=False)
output_path = get_path(output or 'excel_transform_output.xlsx', make_dir=True)
source_dfs = OrderedDict()
try:
logger.info('processing mappings file')
with open(mapping_path) as f:
mappings = json.load(f)
except Exception as e:
logger.critical(f'Encountered error trying to read the mapping file:\n{e}')
sys.exit()
logger.info('processing source files')
for source_path in source_paths:
try:
source_dfs.update({source_path.stem: pandas.read_excel(source_path, sheet_name=None)})
except Exception as e:
logger.critical(f'Encountered error processing source file: {source_path}\n{e}')
sys.exit()
count = -1
processed_source = {}
for identifier, mapping in mappings.items():
if '__' == identifier[:2]:
continue
count += 1
entry = get_dict_entry(count, identifier, source_dfs)
logger.info(f'processing mappings for: {entry.get("name")}')
processed_source.update({entry.get('name'): process_mappings(entry.get("item"), mapping)})
logger.info('grouping processed source data by destination worksheet')
dest_worksheet_dict = {}
for worksheets in processed_source.values():
for data in worksheets.values():
for dest_worksheet_name, dest_data in data['dest'].items():
if dest_worksheet_name not in dest_worksheet_dict:
dest_worksheet_dict[dest_worksheet_name] = []
dest_worksheet_dict[dest_worksheet_name].append(dest_data)
logger.info('merging destination worksheet data')
out_dict = {}
for dest_worksheet_name, data_list in dest_worksheet_dict.items():
temp_df = pandas.DataFrame()
columns = {'_': [], '*': [], 'indexed': {}}
for data in data_list:
columns['_'].extend(data['index_map']['_'])
columns['*'].extend(data['pending_columns'])
for index, column_name in data['index_map'].items():
if index == '_':
continue
if index not in columns['indexed']:
columns['indexed'][index] = column_name
else:
raise Exception(f'Cannot have same column index for multiple columns, please check your'
f' column mapping\n{dest_worksheet_name=}, {column_name=}, {index=}')
if temp_df.empty:
temp_df = data.get('df')
else:
temp_df = pandas.merge(temp_df, data.get('df'), how='outer', on=data.get('merge_columns'))
sorted_column_list = []
i = 0
while len(columns['_']) > 0 or len(columns['*']) > 0:
if str(i) in columns['indexed']:
column_name = columns['indexed'][str(i)]
elif len(columns['_']) > 0:
column_name = columns['_'].pop()
else:
column_name = columns['*'].pop()
if column_name not in sorted_column_list:
sorted_column_list.append(column_name)
i += 1
out_dict[dest_worksheet_name] = temp_df[sorted_column_list]
logger.info(f'generating merged excel spreadsheet')
writer = pandas.ExcelWriter(output_path, engine='openpyxl')
for sheet_name, df in out_dict.items():
try:
logger.info(f'processing sheet: {sheet_name}')
df.to_excel(writer, sheet_name=sheet_name, index=False, )
except Exception as e:
logger.error(f'encountered error processing sheet: {sheet_name}\n{e}')
try:
writer.save()
except Exception as e:
logger.critical(f'encountered error trying to save spreadsheet: {output_path}\n{e}')
except Exception as e:
logger.critical(f'Encountered unexpected error:\n{e}')
processing_time = humanfriendly.format_timespan(datetime.now() - s_time)
logger.info(f'done processing in {processing_time}')
def get_dict_entry(iteration_index, identifier, iterable):
if isinstance(identifier, int) or identifier.isdigit() or identifier == '_':
index = iteration_index if identifier == '_' else int(identifier) - 1
df = list(iterable.values())[index]
name = list(iterable)[index]
else:
df = iterable[identifier]
name = identifier
return {'name': name, 'item': df}
@cli.command()
@click.option('-o', '--output', help='relative or absolute path to output file')
def mapping_skeleton(**kwargs):
"""Generates a skeleton of the mapping file"""
try:
out_path = get_path(kwargs.get('output') or 'mapping_skeleton.json', make_dir=True)
if p.suffix != '.json':
out_path = pathlib.Path(f'{out_path.name.split(".")[0]}.json')
skeleton = {
'__instructions__': {
'1': 'names starting with double underscore (\'__\') will be ignored',
'2': 'fields enclosed with \'<>\' should be replaced completely',
'3': 'use the underscore character (\'_\') to use system defaults',
'4': 'use the asterisk character (\'*\') as a wildcard in the columns list to ensure all other'
' columns are included. Note that when asterisk is used, column name and position will be default'
' and all other column mappings will be ignored therefore the asterisk should only be used at the'
' end of the mapping',
'5': 'in the column mappings, use the following notation to exclude a column from the output: [-]',
'6': 'note that the merge_columns need to match on the respective sheets that are being merged'
},
'<spreadsheet 1 name> or <position> or _': [
{
'<worksheet 1 name> or _': {
'dest_worksheet_name': '<dest worksheet name> or _',
'merge_columns': ["<name of reference columns for merging multiple spreadsheets>"],
'columns': [
['<column 1 name>', '<column 1 dest name> or _', '<column 1 dest position> or _'],
['<column 2 name>', '<column 2 dest name> or _', '<column 2 dest position> or _']
]
}
},
{
'<worksheet 2 name> or _': {
'dest_worksheet_name': '<dest worksheet name> or _',
'merge_columns': '[<name of reference columns for merging multiple spreadsheets>]',
'columns': [
['<column 1 name>', '<column 1 dest name> or _', '<column 1 dest position> or _'],
['<column 2 name>', '<column 2 dest name> or _', '<column 2 dest position> or _']
]
}
}
],
'<spreadsheet 2 name> or <position> or _': [
{
'<worksheet 1 name> or _': {
'dest_worksheet_name': '<dest worksheet name> or _',
'merge_columns': '[<name of reference columns for merging multiple spreadsheets>]',
'columns': [
['<column 1 name>', '<column 1 dest name> or _', '<column 1 dest position> or _']
]
}
}
]
}
with open(out_path, 'w+') as f:
json.dump(skeleton, f, indent=2)
except Exception as e:
logger.critical(f'Encountered unexpected error:\n{e}')
def get_path(path, make_dir=True):
out_path = pathlib.Path(path)
if not out_path.is_absolute():
out_path = pathlib.Path(os.getcwd()) / out_path
if make_dir and not out_path.parent.exists():
out_path.parent.mkdir(parents=True)
return out_path
@cli.command()
def gui():
"""Launches a PYQT5 gui"""
from excel_transform.gui import launch_gui
launch_gui()
@cli.command()
def version():
"""Shows the version of the application"""
click.echo(f'v{__version__}')
if __name__ == '__main__':
cli()
|
from django import template
from django.template import Library
register = template.Library()
@register.filter
def url_replace(request, field, value):
dict_ = request.GET.copy()
dict_[field] = value
return dict_.urlencode() |
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2018-05-25 17:05
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('default', '0002_job'),
]
operations = [
migrations.RenameField(
model_name='job',
old_name='title',
new_name='job',
),
]
|
from django.shortcuts import render, redirect
from django.http import HttpResponse
from .forms import UploadFileForm
from django.contrib import messages
from .models import Leaderboard
from .fileProcessor import handle_uploaded_file
def viewAll(request):
board=Leaderboard.objects.all()
return render(request, 'leaderboard/sample.html', {'records': board})
def details(request, pk):
detail=Leaderboard.objects.get(pk=pk)
return render(request, 'leaderboard/detail.html',{ 'user': detail})
def updateBoard(request):
form = UploadFileForm()
if request.method == 'POST':
form = UploadFileForm(request.POST, request.FILES)
response = handle_uploaded_file(request.FILES['file'])
if response:
return redirect('index')
messages.info(request, 'Input file should be a CSV or JSON file.')
else:
form = UploadFileForm()
return render(request, 'leaderboard/index.html', {'form': form}) |
# Vocabulary
RESERVED_TOKENS = {'PAD': 0, 'UNK': 1}
RESERVED_ENTS = {'PAD': 0, 'UNK': 1}
RESERVED_ENT_TYPES = {'PAD': 0, 'UNK': 1}
RESERVED_RELS = {'PAD': 0, 'UNK': 1}
extra_vocab_tokens = ['alias', 'true', 'false', 'num', 'bool'] + \
['np', 'organization', 'date', 'number', 'misc', 'ordinal', 'duration', 'person', 'time', 'location'] + \
['__np__', '__organization__', '__date__', '__number__', '__misc__', '__ordinal__', '__duration__', '__person__', '__time__', '__location__']
extra_rels = ['alias']
extra_ent_types = ['num', 'bool']
# BAMnet entity mention types
topic_mention_types = {'person', 'organization', 'location', 'misc'}
# delex_mention_types = {'date', 'time', 'ordinal', 'number'}
delex_mention_types = {'date', 'ordinal', 'number'}
constraint_mention_types = delex_mention_types
|
"""GLD module."""
import numpy as np
import matplotlib.pyplot as plt
from scipy import optimize, special, stats
class GLD:
r"""Univariate Generalized Lambda Distribution class.
GLD is flexible family of continuous probability distributions with wide variety of shapes.
GLD has 4 parameters and defined by quantile function. Probability density function and cumulative distribution function
are not available in closed form and can be calculated only with the help of numerical methods.
This tool implements three different parameterization types of GLD: 'RS' (introduced by Ramberg and Schmeiser, 1974),
'FMKL' (introduced by Freimer, Mudholkar, Kollia and Lin, 1988) and 'VSL' (introduced by van Staden and Loots, 2009).
It provides methods for calculating different characteristics of GLD, parameter estimating, generating random variables and so on.
Attributes:
----------
param_type : str
Parameterization type of Generalized Lambda Distributions, should be 'RS', 'FMKL' or 'VSL'.
Notes:
-----
Different parameterization types of GLD are not equivalent and specify similar but deifferent distribution families,
there is no one-to-one correspondence between their parameters.
GLD of 'RS' type is characterized by quantile function :math:`Q(y)` and density quantile function :math:`f(y)`:
.. math::
Q(y) = \lambda_1 + \frac{y^{\lambda_3} - (1-y)^{\lambda_4}}{\lambda_2},
.. math::
f(y) = \frac{\lambda_2}{\lambda_3 y^{\lambda_3-1} - \lambda_4 (1-y)^{\lambda_4-1}},
where :math:`\lambda_1` - location parameter, :math:`\lambda_2` - inverse scale parameter,
:math:`\lambda_3, \lambda_4` - shape parameters.
GLD of 'RS' type is defined only for certain values of the shape parameters which provide
non-negative density function and there are a complex series of rules determining which parameters
specify a valid statistical distribution.
'FMKL' parameterization removes this restrictions. GLD of 'FMKL' type is defined for all values of
shape parameters and described by following quantile function :math:`Q(y)` and density quantile function :math:`f(y)`:
.. math::
Q(y) = \lambda_1 + \frac{(y^{\lambda_3}-1)/\lambda_3 - ((1-y)^{\lambda_4}-1)/\lambda_4}{\lambda_2},
.. math::
f(y) = \frac{\lambda_2}{y^{\lambda_3-1} - (1-y)^{\lambda_4-1}}.
'VSL' parameterization was introduced for simple parameter estimating in closed form using L-moments. Its quantile function :math:`Q(y)` and density quantile function :math:`f(y)` are:
.. math::
Q(y) = \alpha + \beta \Big((1-\delta)\frac{y^\lambda - 1}{\lambda} - \delta\frac{(1-y)^\lambda - 1}{\lambda}\Big),
.. math::
f(y) = \frac{1}{\beta ((1-\delta)y^{\lambda-1}+\delta(1-y)^{\lambda-1})},
where parameters have a different designation: :math:`\alpha` - location parameter, :math:`\beta` - scale parameter,
:math:`\delta` - skewness parameter (should be in the interval [0,1]), :math:`\lambda` - shape parameter.
References:
----------
.. [1] Ramberg, J.S., & Schmeiser, B.W. 1974. An approximate method for generating asymmetric random variables.
Communications of the ACM, 17(2), 78–82
.. [2] Freimer, M., Kollia, G., Mudholkar, G.S., & Lin, C.T. 1988. A study of the
generalized Tukey lambda family. Communications in Statistics-Theory and Methods, 17, 3547–3567.
.. [3] Van Staden, Paul J., & M.T. Loots. 2009. Method of L-moment estimation for generalized lambda distribution.
Third Annual ASEARC Conference. Newcastle, Australia.
"""
def __init__(self, param_type):
"""Create a new GLD with given parameterization type.
Parameters
----------
param_type : str
Parameterization type. Should be 'RS','FMKL' or 'VSL'.
Raises
------
ValueError
If param_type is not one of 'RS','FMKL' or 'VSL'.
"""
if param_type not in ['RS','FMKL','VSL']:
raise ValueError('Unknown parameterisation \'%s\' . Use \'RS\',\'FMKL\' or \'VSL\'' %param_type)
else:
self.param_type = param_type
def check_param(self,param):
"""Check if parameters specify a valid distribution with non-negative density function.
Parameters
----------
param : array-like
Parameters of GLD
Raises
------
ValueError
If number of parameters is not equal to 4.
Returns
-------
bool
True for valid parameters and False for invalid.
"""
if len(param)!=4:
raise ValueError('GLD has 4 parameters')
if not np.isfinite(param).all():
return False
else:
if self.param_type == 'RS':
r1 = (param[1]<0) and (param[2]<=-1) and (param[3]>=1)
r2 = (param[1]<0) and (param[2]>=1) and (param[3]<=-1)
r3 = (param[1]>0) and (param[2]>=0) and (param[3]>=0) and (param[2]!=0 or param[3]!=0)
r4 = (param[1]<0) and (param[2]<=0) and (param[3]<=0) and (param[2]!=0 or param[3]!=0)
r5 = (param[1]<0) and (param[2]<=0 and param[2]>=-1) and (param[3]>=1)
r6 = (param[1]<0) and (param[2]>=1) and (param[3]>=-1 and param[3]<=0)
if r5:
r5 = r5 and (1-param[2])**(1-param[2])*(param[3]-1)**(param[3]-1)/(param[3] - param[2])**(param[3]- param[2])<=-param[2]/param[3]
if r6:
r6 = r6 and (1-param[3])**(1-param[3])*(param[2]-1)**(param[2]-1)/(param[2] - param[3])**(param[2]- param[3])<=-param[3]/param[2]
return r1 or r2 or r3 or r4 or r5 or r6
if self.param_type == 'FMKL':
return param[1]>0
if self.param_type == 'VSL':
return np.logical_and(param[1]>0, np.logical_and(param[2]>=0, param[2]<=1))
def Q(self, y, param):
"""Calculate quantile function of GLD at `y` for given parameters.
Parameters
----------
y : array-like
Lower tail probability, must be between 0 and 1.
param : array-like
Parameters of GLD.
Raises
------
ValueError
If input parameters are not valid.
Returns
-------
array-like
Value of quantile function evaluated at `y`.
"""
y = np.array(y).astype(float)
param = np.array(param)
if np.logical_or(y>1, y<0).any():
raise ValueError('y should be in range [0,1]')
if not self.check_param(param):
raise ValueError('Parameters are not valid')
if self.param_type == 'RS':
return param[0] + (y**param[2] - (1-y)**param[3])/param[1]
if self.param_type == 'FMKL':
f1 = (y**param[2]-1)/param[2] if param[2]!=0 else np.log(y)
f2 = ((1-y)**param[3] - 1)/param[3] if param[3]!=0 else np.log(1-y)
return param[0] + (f1 - f2)/param[1]
if self.param_type == 'VSL':
if param[3]!=0:
return param[0] + ((1 - param[2])*(y**param[3] - 1)/param[3] - param[2]*((1-y)**param[3] - 1)/param[3])*param[1]
else:
return param[0] + param[1]*np.log(y**(1-param[2])/(1-y)**param[2])
def PDF_Q(self, y, param):
"""Calculate density quantile function of GLD at `y` for given parameters.
Parameters
----------
y : array-like
Lower tail probability, must be between 0 and 1.
param : array-like
Parameters of GLD.
Raises
------
ValueError
If input parameters are not valid.
Returns
-------
array-like
Value of density quantile function evaluated at `y`.
"""
y = np.array(y).astype(float)
if np.logical_or(y>1, y<0).any():
raise ValueError('y should be in range [0,1]')
if not self.check_param(param):
raise ValueError('Parameters are not valid')
if self.param_type == 'RS':
return param[1]/((param[2]*y**(param[2]-1) + param[3]*(1-y)**(param[3]-1)))
if self.param_type == 'FMKL':
return param[1]/((y**(param[2]-1) + (1-y)**(param[3]-1)))
if self.param_type == 'VSL':
return 1/((1 - param[2])*y**(param[3] - 1) + param[2]*(1-y)**(param[3] - 1))/param[1]
def CDF_num(self, x, param, xtol = 1e-05):
"""Calculate cumulative distribution function of GLD numerically at `x` for given parameters.
Parameters
----------
x : array-like
Argument of CDF.
param : array-like
Parameters of GLD.
xtol : float, optional
Absolute error parameter for optimization procedure. The default is 1e-05.
Raises
------
ValueError
If input parameters are not valid.
Returns
-------
array-like
Value of cumulative distribution function evaluated at `x`.
"""
if not self.check_param(param):
raise ValueError('Parameters are not valid')
x = np.array([x]).ravel()
ans = x*np.nan
a,b = self.supp(param)
ans[x<a] = 0
ans[x>b] = 1
def for_calc_F(y):
"""Auxiliary function for optimization."""
return (self.Q(y,param) - x_arg)**2
ind = np.nonzero(np.isnan(ans))[0]
for i in ind:
x_arg = x[i]
ans[i] = optimize.fminbound(for_calc_F,0,1, xtol = xtol)
return ans
def PDF_num(self, x, param, xtol = 1e-05):
"""Calculate probability density function of GLD numerically at `x` for given parameters.
Parameters
----------
x : array-like
Argument of PDF.
param : array-like
Parameters of GLD.
xtol : float, optional
Absolute error parameter for optimization procedure. The default is 1e-05.
Raises
------
ValueError
If input parameters are not valid.
Returns
-------
array-like
Value of probability density function evaluated at `x`.
"""
y = self.CDF_num(x, param, xtol)
ans = self.PDF_Q(y,param)
a,b = self.supp(param)
ans[np.logical_or(x<a, x>b)] = 0
return ans
def supp(self,param):
"""Return support of GLD for given parameters.
Parameters
----------
param : array-like
Parameters of GLD.
Raises
------
ValueError
If input parameters are not valid.
Returns
-------
array-like
Support of GLD.
"""
if not self.check_param(param):
raise ValueError('Parameters are not valid')
return self.Q(0,param), self.Q(1,param)
def rand(self, param, size = 1, random_state = None):
"""Generate random variables of GLD.
Parameters
----------
param : array-like
Parameters of GLD.
size : int, optional
Number of random variables. The default is 1.
random_state : None or int, optional
The seed of the pseudo random number generator. The default is None.
Returns
-------
array-like
Sample of GLD random variables of given size.
"""
if random_state:
np.random.seed(random_state)
alpha = np.random.random(size)
return self.Q(alpha,param)
def correct_supp(self, data, param, eps = 0.0001):
"""Correct support of GLD due to data.
In certain cases some data points can be outside of finite support of GLD.
This method corrects parameters of location and scale to fit support to data.
It is used as a component of some parameter estimation methods.
Parameters
----------
data : array-like
Input data.
param : array-like
Parameters of GLD.
eps : float, optional
Parameter of support fitting. Tail probability of minimum and maximum data points. The default is 0.0001.
Returns
-------
array-like
Corrected parameters of GLD.
"""
data = data.ravel()
def fun_opt(x):
"""Auxiliary function for optimization."""
A = np.min([np.min(data), self.Q(eps,param)])
B = np.max([np.max(data), self.Q(1-eps,param)])
par = np.hstack([x,param[2:]])
if not self.check_param(par):
return np.inf
return np.max([np.abs(self.Q(eps,par) - A), np.abs(self.Q(1-eps,par) - B)])
x = optimize.fmin(fun_opt,param[:2], disp=False)
param[:2] = x
return param
def GoF_Q_metric(self,data,param):
"""Calculate Goodness-of-Fit metric based on discrepancy between empirical and theoretical quantile functions.
It can be used for simple comparison of different fitted distributions.
Parameters
----------
data : array-like
Input data.
param : array-like
Parameters of GLD.
Returns
-------
float
Mean square deviation of empirical and theoretical quantiles.
"""
data = data.ravel()
return np.mean((np.sort(data) - self.Q((np.arange(len(data))+0.5)/len(data),param))**2)
def GoF_tests(self,param, data, bins_gof = 8):
"""Perform two Goodness-of_Fit tests: Kolmogorov-Smirnov test and one-way chi-square test from scipy.stats.
Parameters
----------
param : array-like
Parameters of GLD.
data : array-like
Input data.
bins_gof : int, optional
Number of bins for chi-square test. The default is 8.
Returns
-------
scipy.stats.stats.KstestResult
Result of Kolmogorov-Smirnov test including statistic and p-value.
scipy.stats.stats.Power_divergenceResult
Result of chi-square test including statistic and p-value.
"""
def cdf(x):
"""Auxiliary function for GoF test."""
return self.CDF_num(x,param)
ks = stats.kstest(data, cdf)
chi2 = stats.chisquare(np.histogram(data,self.Q(np.linspace(0, 1, bins_gof + 1),param))[0],[len(data)/bins_gof]*bins_gof )
return ks, chi2
def plot_cdf(self, param_list, data = None, ymin = 0.01, ymax = 0.99, n_points = 100, names = None, color_emp = 'lightgrey', colors = None):
"""Plot cumulative distribution functions of GLD.
This allows to compare GLD cumulative distribution functions with different parameters.
Also it is possible to add empirical CDF on the plot.
Parameters
----------
param_list : array-like or list of array-like
List of GLD parameters for plotting.
data : array-like, optional
If not None empirical CDF estimated by data will be added to the plot. The default is None.
ymin : float, optional
Minimal lower tail probability for plotting. The default is 0.01.
ymax : float, optional
Maximal lower tail probability for plotting. The default is 0.99.
n_points : int, optional
Number of points for plotting. The default is 100.
names : list of str, optional
Names of labels for the legend. Length of the list should be equal to the length of param_list.
color_emp : str, optional
Line color of empirical CDF. It's ignored if data is None. The default is 'lightgrey'.
colors : list of str, optional
Line colors of CDFs. Length of the list should be equal to the length of param_list.
plot_fitting(self, data, param, bins=None)
"""
param_list = np.array(param_list)
if param_list.ndim==1:
param_list = param_list.reshape(1,-1)
if names is None:
names = [str(x) for x in param_list]
if colors is None:
colors = [None]*len(param_list)
plt.figure()
plt.grid()
if not (data is None):
data = data.ravel()
plt.plot(np.sort(data), np.arange(len(data))/len(data),color = color_emp,lw = 2)
names = np.hstack(['empirical data', names ])
y = np.linspace(ymin,ymax,n_points)
for i in range(param_list.shape[0]):
param = param_list[i]
plt.plot(self.Q(y,param), y, color = colors[i])
plt.ylim(ymin = 0)
plt.legend(names,bbox_to_anchor=(1.0, 1.0 ))
plt.title('CDF')
def plot_pdf(self, param_list, data = None, ymin = 0.01, ymax = 0.99, n_points = 100, bins = None, names = None, color_emp = 'lightgrey', colors = None):
"""Plot probability density functions of GLD.
This allows to compare GLD probability density functions with different parameters.
Also it is possible to add data histogram on the plot.
Parameters
----------
param_list : array-like or list of array-like
List of GLD parameters for plotting.
data : array-like, optional
If not None empirical CDF estimated by data will be added to the plot.
ymin : float, optional
Minimal lower tail probability for plotting. The default is 0.01.
ymax : float, optional
Maximal lower tail probability for plotting. The default is 0.99.
n_points : int, optional
Number of points for plotting. The default is 100.
bins : int, optional
Number of bins for histogram. It's ignored if data is None.
names : list of str, optional
Names of labels for the legend. Length of the list should be equal to the length of param_list. The default is None.
color_emp : str, optional
Color of the histogram. It's ignored if data is None. The default is 'lightgrey'.
colors : list of str, optional
Line colors of PDFs. Length of the list should be equal to the length of param_list.
"""
param_list = np.array(param_list)
if param_list.ndim==1:
param_list = param_list.reshape(1,-1)
if names is None:
names = [str(x) for x in param_list]
plt.figure()
plt.grid()
pdf_max = 0
if not data is None:
data = data.ravel()
p = plt.hist(data, bins = bins, color = color_emp, density = True)
pdf_max = np.max(p[0])
if colors is None:
colors = [None]*len(param_list)
y = np.linspace(ymin,ymax,n_points)
for i in range(param_list.shape[0]):
param = param_list[i]
plt.plot(self.Q(y,param), self.PDF_Q(y,param),color = colors[i])
pdf_max = np.max([pdf_max,np.max(self.PDF_Q(y,param))])
plt.ylim(ymin = 0,ymax = pdf_max * 1.05)
plt.legend(names,bbox_to_anchor=(1.0, 1.0 ))
plt.title('PDF')
def plot_fitting(self,data,param, bins = None):
"""Construct plots for comparing fitted GLD with data.
It allows to compare data histogram and PDF of fitted GLD on the one plot,
empirical and theoretical CDFs on the second plot and
theoretical and empirical quantiles plotted against each other on the third plot.
Parameters
----------
data : array-like
Input data.
param : array-like
Parameters of GLD.
bins : int, optional
Number of bins for histogram.
"""
data = data.ravel()
fig,ax = plt.subplots(1,3,figsize = (15,3))
ax[0].hist(data,bins = bins,density = True,color = 'skyblue')
y = np.linspace(0.001,0.999,100)
ax[0].plot(self.Q(y,param),self.PDF_Q(y,param),lw = 2,color = 'r')
ax[0].set_title('PDF')
ax[0].grid()
ax[1].plot(np.sort(data), np.arange(len(data))/len(data))
ax[1].plot(self.Q(y,param), y)
ax[1].grid()
ax[1].set_title('CDF')
x = np.sort(data)
y = (np.arange(len(data))+0.5)/len(data)
ax[2].plot(self.Q(y,param), x,'bo',ms = 3)
m1 = np.min([x,self.Q(y,param)])
m2 = np.max([x,self.Q(y,param)])
ax[2].plot([m1,m2], [m1,m2],'r')
ax[2].grid()
ax[2].set_title('Q-Q-plot')
def __sum_Ez(self,k,p3,p4):
"""Auxiliary function for moments calculation."""
s = 0
p3 = np.array(p3)
p4 = np.array(p4)
if self.param_type == 'RS':
for i in range(0,k+1):
s+=special.binom(k,i)*(-1)**i *special.beta(p3*(k-i)+1, p4*i+1)
if self.param_type == 'FMKL':
for i in range(0,k+1):
for j in range(0, k-i+1):
s+=(p3-p4)**i/(p3*p4)**k * special.binom(k,i)*special.binom(k-i,j)*(-1)**j*p4**(k-i-j)*p3**j*special.beta(p3*(k-i-j)+1,p4*j+1)
if self.param_type=='VSL':
for i in range(0,k+1):
for j in range(0, k-i+1):
s+=(2*p3-1)**i/p4**k*special.binom(k,i)*special.binom(k-i,j)*(-1)**j*(1-p3)**(k-i-j)*p3**j*special.beta(p4*(k-i-j)+1,p4*j+1)
return s
def mean(self, param):
"""Calculate mean of the GLD for given parameters.
Parameters
----------
param : array-like
Parameters of GLD.
Raises
------
ValueError
If input parameters are not valid.
Returns
-------
float
Mean of GLD.
"""
if not self.check_param(param):
raise ValueError('Parameters are not valid')
if param[2]>-1 and param[3]>-1:
A = self.__sum_Ez(1,param[2], param[3])
L = 1/param[1] if self.param_type=='VSL' else param[1]
return A/L + param[0]
else:
return np.nan
def var(self, param):
"""Calculate variance of the GLD for given parameters.
Parameters
----------
param : array-like
Parameters of GLD.
Raises
------
ValueError
If input parameters are not valid.
Returns
-------
float
Variance of GLD.
"""
if not self.check_param(param):
raise ValueError('Parameters are not valid')
if param[2]>-1/2 and param[3]>-1/2:
A = self.__sum_Ez(1,param[2], param[3])
B = self.__sum_Ez(2,param[2], param[3])
L = 1/param[1] if self.param_type=='VSL' else param[1]
return (B-A**2)/L**2
else:
return np.nan
def std(self, param):
"""Calculate standard deviation of the GLD for given parameters.
Parameters
----------
param : array-like
Parameters of GLD.
Raises
------
ValueError
If input parameters are not valid.
Returns
-------
float
Standard deviation of GLD.
"""
return np.sqrt(self.var(param))
def skewness(self, param):
"""Calculate skewness of the GLD for given parameters.
Parameters
----------
param : array-like
Parameters of GLD.
Raises
------
ValueError
If input parameters are not valid.
Returns
-------
float
Skewness of GLD.
"""
if not self.check_param(param):
raise ValueError('Parameters are not valid')
if param[2]>-1/3 and param[3]>-1/3:
A = self.__sum_Ez(1,param[2], param[3])
B = self.__sum_Ez(2,param[2], param[3])
C = self.__sum_Ez(3,param[2], param[3])
L = 1/param[1] if self.param_type=='VSL' else param[1]
a2 = (B-A**2)/L**2
return (C-3*A*B+2*A**3)/L**3/a2**1.5
else:
return np.nan
def kurtosis(self, param):
"""Calculate kurtosis of the GLD for given parameters.
Parameters
----------
param : array-like
Parameters of GLD.
Raises
------
ValueError
If input parameters are not valid.
Returns
-------
float
Kurtosis of GLD.
"""
if not self.check_param(param):
raise ValueError('Parameters are not valid')
if param[2]>-1/4 and param[3]>-1/4:
A = self.__sum_Ez(1,param[2], param[3])
B = self.__sum_Ez(2,param[2], param[3])
C = self.__sum_Ez(3,param[2], param[3])
D = self.__sum_Ez(4,param[2], param[3])
L = 1/param[1] if self.param_type=='VSL' else param[1]
a2 = (B-A**2)/L**2
return (D-4*A*C+6*A**2*B-3*A**4)/L**4/a2**2
else:
return np.nan
def median(self,param):
"""Calculate median of the GLD for given parameters.
Parameters
----------
param : array-like
Parameters of GLD.
Raises
------
ValueError
If input parameters are not valid.
Returns
-------
float
Median of GLD.
"""
return self.Q(0.5,param)
def fit_MM(self,data, initial_guess, xtol=0.0001, maxiter=None, maxfun=None , disp_optimizer=True, disp_fit = True, bins_hist = None, test_gof = True, bins_gof = 8):
"""Fit GLD to data using method of moments.
It estimates parameters of GLD by setting first four sample moments equal to their GLD counterparts.
Resulting system of equations are solved using numerical methods for given initial guess.
There are some restrictions of this method related to existence of moments and computational difficulties.
Parameters
----------
data : array-like
Input data.
initial_guess : array-like
Initial guess for third and fourth parameters.
xtol : float, optional
Absolute error for optimization procedure. The default is 0.0001.
maxiter : int, optional
Maximum number of iterations for optimization procedure.
maxfun : int, optional
Maximum number of function evaluations for optimization procedure.
disp_optimizer : bool, optional
Set True to display information about optimization procedure. The default is True.
disp_fit : bool, optional
Set True to display information about fitting. The default is True.
bins_hist : int, optional
Number of bins for histogram. It's ignored if disp_fit is False.
test_gof : bool, optional
Set True to perform Goodness-of-Fit tests and print results. It's ignored if disp_fit is False. The default is True.
bins_gof : int, optional
Number of bins for chi-square test. It's ignored if test_gof is False. The default is 8.
Raises
------
ValueError
If length of initial guess is incorrect.
Returns
-------
array-like
Fitted parameters of GLD.
References:
----------
.. [1] Karian, Z.A., Dudewicz, E.J. 2000. Fitting statistical distributions: the generalized
lambda distribution and generalized bootstrap methods. Chapman and Hall/CRC.
"""
initial_guess = np.array(initial_guess)
data = data.ravel()
def sample_moments(data):
"""Calculate first four sample moments."""
a1 = np.mean(data)
a2 = np.mean((data - a1)**2)
a3 = np.mean((data - a1)**3)/a2**1.5
a4 = np.mean((data - a1)**4)/a2**2
return a1,a2,a3,a4
def moments( param):
"""Calculate first four GLD moments."""
A = self.__sum_Ez(1,param[2], param[3])
B = self.__sum_Ez(2,param[2], param[3])
C = self.__sum_Ez(3,param[2], param[3])
D = self.__sum_Ez(4,param[2], param[3])
L = 1/param[1] if self.param_type=='VSL' else param[1]
a1 = A/L + param[0]
a2 = (B-A**2)/L**2
a3 = (C-3*A*B+2*A**3)/L**3/a2**1.5
a4 = (D-4*A*C+6*A**2*B-3*A**4)/L**4/a2**2
return a1,a2,a3,a4
def fun_VSL(x):
"""Auxiliary function for optimization."""
if x[0]<0 or x[0] >1 or x[1]<-0.25:
return np.inf
A = self.__sum_Ez(1,x[0],x[1])
B = self.__sum_Ez(2,x[0],x[1])
C = self.__sum_Ez(3,x[0],x[1])
D = self.__sum_Ez(4,x[0],x[1])
return np.max([np.abs((C-3*A*B+2*A**3)/(B-A**2)**1.5 - a3), np.abs( (D-4*A*C+6*A**2*B-3*A**4)/(B-A**2)**2 - a4)])
def fun_RS_FMKL(x):
"""Auxiliary function for optimization."""
if x[0] <-0.25 or x[1]<-0.25:
return np.inf
A = self.__sum_Ez(1,x[0],x[1])
B = self.__sum_Ez(2,x[0],x[1])
C = self.__sum_Ez(3,x[0],x[1])
D = self.__sum_Ez(4,x[0],x[1])
return np.max([np.abs((C-3*A*B+2*A**3)/(B-A**2)**1.5 - a3), np.abs( (D-4*A*C+6*A**2*B-3*A**4)/(B-A**2)**2 - a4)])
fun_opt = fun_VSL if self.param_type=='VSL' else fun_RS_FMKL
if initial_guess.ndim==0 or len(initial_guess)!=2:
raise ValueError('Specify initial guess for two parameters')
a1,a2,a3,a4 = sample_moments(data)
[p3,p4] = optimize.fmin(fun_opt,initial_guess,maxfun = maxfun,xtol=xtol, maxiter=maxiter, disp = disp_optimizer)
A = self.__sum_Ez(1,p3,p4)
B = self.__sum_Ez(2,p3,p4)
C = self.__sum_Ez(3,p3,p4)
D = self.__sum_Ez(4,p3,p4)
p2 = (((B-A**2)/a2)**0.5)**(-1 if self.param_type=='VSL' else 1)
p1 = a1 - A/(p2**(-1 if self.param_type=='VSL' else 1))
param = [p1,p2,p3,p4]
if self.param_type=='RS' and not self.check_param(param):
p3, p4 = p4,p3
p2 = p2* (-1)
p1 = a1 + A/(p2)
param = [p1,p2,p3,p4]
if disp_fit:
print('')
print('Sample moments: ', sample_moments(data))
print('Fitted moments: ', moments(param))
print('')
print('Parameters: ', param)
if not self.check_param(param):
print('')
print('Parameters are not valid. Try another initial guess.')
else:
if test_gof:
ks, chi2 = self.GoF_tests(param, data, bins_gof)
print('')
print('Goodness-of-Fit')
print(ks)
print(chi2)
self.plot_fitting(data,param,bins = bins_hist)
return np.array(param)
def fit_PM(self,data, initial_guess, u = 0.1, xtol=0.0001, maxiter=None, maxfun=None , disp_optimizer=True, disp_fit = True, bins_hist = None, test_gof = True, bins_gof = 8):
"""Fit GLD to data using method of percentiles.
It estimates parameters of GLD by setting four percentile-based sample statistics equal to their corresponding GLD statistics.
To calculate this statistics it's necessary to specify parameter u (number between 0 and 0.25).
Resulting system of equations are solved using numerical methods for given initial guess.
Parameters
----------
data : array-like
Input data.
initial_guess : array-like
Initial guess for third and fourth parameters if parameterization type is 'RS' or 'FMKL'
and for only fourth parameter if parameterization type is 'VSL'.
u : float, optional
Parameter for calculating percentile-based statistics. Arbitrary number between 0 and 0.25. The default is 0.1.
xtol : float, optional
Absolute error for optimization procedure. The default is 0.0001.
maxiter : int, optional
Maximum number of iterations for optimization procedure.
maxfun : int, optional
Maximum number of function evaluations for optimization procedure.
disp_optimizer : bool, optional
Set True to display information about optimization procedure. The default is True.
disp_fit : bool, optional
Set True to display information about fitting. The default is True.
bins_hist : int, optional
Number of bins for histogram. It's ignored if disp_fit is False.
test_gof : bool, optional
Set True to perform Goodness-of-Fit tests and print results. It's ignored if disp_fit is False. The default is True.
bins_gof : int, optional
Number of bins for chi-square test. It's ignored if test_gof is False. The default is 8.
Raises
------
ValueError
If length of initial guess is incorrect or parameter u is out of range [0,0.25].
Returns
-------
array-like
Fitted parameters of GLD.
References:
----------
.. [1] Karian, Z.A., Dudewicz, E.J. 2000. Fitting statistical distributions: the generalized
lambda distribution and generalized bootstrap methods. Chapman and Hall/CRC.
"""
initial_guess = np.array(initial_guess)
data = data.ravel()
if u<0 or u>0.25:
raise ValueError('u should be in interval [0,0.25]')
def sample_statistics(data, u):
"""Calculate four sample percentile-based statistics."""
p1 = np.quantile(data, 0.5)
p2 = np.quantile(data, 1-u) - np.quantile(data, u)
p3 = (np.quantile(data, 0.5) - np.quantile(data, u))/(np.quantile(data, 1-u) - np.quantile(data, 0.5))
p4 = (np.quantile(data, 0.75) - np.quantile(data, 0.25))/p2
return p1,p2,p3,p4
a1,a2,a3,a4 = sample_statistics(data,u)
if self.param_type=='RS':
def theor_statistics(param,u):
"""Calculate four GLD percentile-based statistics."""
[l1,l2,l3,l4] = param
p1 = l1+(0.5**l3 - 0.5**l4)/l2
p2 = ((1-u)**l3 - u**l4 - u**l3+(1-u)**l4)/l2
p3 = (0.5**l3 - 0.5**l4 - u**l3 +(1-u)**l4)/((1-u)**l3 - u**l4 - 0.5**l3 +0.5**l4)
p4 = (0.75**l3 - 0.25**l4 - 0.25**l3 +0.75**l4)/((1-u)**l3-u**l4 - u**l3+(1-u)**l4)
return p1,p2,p3,p4
def fun_opt(x):
"""Auxiliary function for optimization."""
l3 = x[0]
l4 = x[1]
return np.max([( (0.75**l3 - 0.25**l4 - 0.25**l3 +0.75**l4)/((1-u)**l3-u**l4 - u**l3+(1-u)**l4) - a4),
np.abs((0.5**l3 - 0.5**l4 - u**l3 +(1-u)**l4)/((1-u)**l3 - u**l4 - 0.5**l3 +0.5**l4) - a3)])
if initial_guess.ndim==0 or len(initial_guess)!=2:
raise ValueError('Specify initial guess for two parameters')
[l3,l4] = optimize.fmin(fun_opt,initial_guess,maxfun = maxfun,xtol=xtol, maxiter=maxiter, disp = disp_optimizer)
l2 = 1/a2*((1-u)**l3-u**l3 + (1-u)**l4 - u**l4)
l1 = a1 - 1/l2*(0.5**l3 - 0.5**l4)
param = np.array([l1,l2,l3,l4]).ravel()
theor_stat = theor_statistics(param,u)
if self.param_type == 'FMKL':
def theor_statistics(param,u):
"""Calculate four GLD percentile-based statistics."""
[l1,l2,l3,l4] = param
p1 = l1+((0.5**l3-1)/l3 - (0.5**l4-1)/l4)/l2
p2 = (((1-u)**l3 - u**l3)/l3 +((1-u)**l4- u**l4)/l4)/l2
p3 = ((0.5**l3 - u**l3 )/l3 +((1-u)**l4- 0.5**l4)/l4) / (((1-u)**l3 - 0.5**l3)/l3 +(0.5**l4- u**l4)/l4)
p4 = ((0.75**l3 - 0.25**l3 )/l3 +(0.75**l4- 0.25**l4)/l4)/(((1-u)**l3 - u**l3)/l3 + ((1-u)**l4- u**l4)/l4)
return p1,p2,p3,p4
def fun_opt(x):
"""Auxiliary function for optimization."""
l3 = x[0]
l4 = x[1]
return np.max([np.abs(((0.75**l3 - 0.25**l3 )/l3 +(0.75**l4- 0.25**l4)/l4)/(((1-u)**l3 - u**l3)/l3 + ((1-u)**l4- u**l4)/l4) - a4),
np.abs(((0.5**l3 - u**l3 )/l3 +((1-u)**l4- 0.5**l4)/l4) / (((1-u)**l3 - 0.5**l3)/l3 +(0.5**l4- u**l4)/l4) - a3)])
if initial_guess.ndim==0 or len(initial_guess)!=2:
raise ValueError('Specify initial guess for two parameters')
[l3,l4] = optimize.fmin(fun_opt,initial_guess,maxfun = maxfun,xtol=xtol, maxiter=maxiter, disp = disp_optimizer)
l2 = 1/a2*(((1-u)**l3-u**l3)/l3 + ((1-u)**l4 - u**l4)/l4)
l1 = a1 - 1/l2*((0.5**l3 - 1)/l3 - (0.5**l4 - 1)/l4)
param = np.array([l1,l2,l3,l4]).ravel()
theor_stat = theor_statistics(param,u)
if self.param_type == 'VSL':
def theor_statistics(param,u):
"""Calculate four GLD percentile-based statistics."""
[a,b,d,l] = param
p1 = a+b*(0.5**l - 1)*(1-2*d)/l
p2 = b*((1-u)**l - u**l)/l
p3 = ((1-d)*(0.5**l - u**l)+d*((1-u)**l - 0.5**l))/((1-d)*((1-u)**l - 0.5**l)+d*(0.5**l - u**l))
p4 = (0.75**l - 0.25**l)/((1-u)**l - u**l)
return p1,p2,p3,p4
def fun_opt(x):
"""Auxiliary function for optimization."""
return np.abs((0.75**x - 0.25**x)/((1-u)**x - u**x) - a4)
if initial_guess.ndim!=0 and len(initial_guess)!=1:
raise ValueError('Specify initial guess for one parameter')
l = optimize.fmin(fun_opt,initial_guess,maxfun = maxfun,xtol=xtol, maxiter=maxiter, disp = disp_optimizer)[0]
d = (a3*((1-u)**l - 0.5**l) - 0.5**l +u**l)/(a3+1)/((1-u)**l - 2*0.5**l+u**l)
d = np.max([0,np.min([1,d])])
b = a2*l/((1-u)**l - u**l)
a = a1 - b*(0.5**l - 1)*(1-2*d)/l
param = np.array([a,b,d,l]).ravel()
theor_stat = theor_statistics(param,u)
if disp_fit:
print('')
print('Sample statistics: ', sample_statistics(data,u))
print('Fitted statistics: ', theor_stat)
print('')
print('Parameters: ', param)
if not self.check_param(param):
print('')
print('Parameters are not valid. Try another initial guess.')
else:
if test_gof:
ks, chi2 = self.GoF_tests(param, data, bins_gof)
print('')
print('Goodness-of-Fit')
print(ks)
print(chi2)
self.plot_fitting(data,param,bins = bins_hist)
return np.array(param)
def fit_LMM(self,data, initial_guess = None, xtol=0.0001, maxiter=None, maxfun=None , disp_optimizer=True, disp_fit = True, bins_hist = None, test_gof = True, bins_gof = 8):
"""Fit GLD to data using method of L-moments.
It estimates parameters of GLD by equating four sample L-moments and L-moments ratios and their GLD counterparts.
L-moments are linear combinations of order statistics analogous to conventional moments.
Resulting system of equations for 'RS' and 'FMKL' parameterizations are solved using numerical methods for given initial guess.
For 'VSL' parameterization there is exact analytical solution of the equations.
In general case there are two different sets of parameters which give the same values of L-moments.
The method returns solution which is closest to initial guess.
If initial_guess is None the best solution is chosen using GLD.GoF_Q_metric.
Parameters
----------
data : array-like
Input data.
initial_guess : array-like
Initial guess for third and fourth parameters. It's ignored for 'VSL' parameterization type.
xtol : float, optional
Absolute error for optimization procedure. The default is 0.0001. It's ignored for 'VSL' parameterization type.
maxiter : int, optional
Maximum number of iterations for optimization procedure. It's ignored for 'VSL' parameterization type.
maxfun : int, optional
Maximum number of function evaluations for optimization procedure. It's ignored for 'VSL' parameterization type.
disp_optimizer : bool, optional
Set True to display information about optimization procedure. The default is True. It's ignored for 'VSL' parameterization type.
disp_fit : bool, optional
Set True to display information about fitting. The default is True.
bins_hist : int, optional
Number of bins for histogram. It's ignored if disp_fit is False.
test_gof : bool, optional
Set True to perform Goodness-of-Fit tests and print results. It's ignored if disp_fit is False. The default is True.
bins_gof : int, optional
Number of bins for chi-square test. It's ignored if test_gof is False. The default is 8.
Raises
------
ValueError
If length of initial guess is incorrect.
Returns
-------
array-like
Fitted parameters of GLD.
References:
----------
.. [1] Karvanen, J. and Nuutinen, A. 2008. Characterizing the generalized lambda distribution by L-moments.
Computational Statistics & Data Analysis, 52(4):1971–1983.
.. [2] Van Staden, Paul J., & M.T. Loots. 2009. Method of L-moment estimation for generalized lambda distribution.
Third Annual ASEARC Conference. Newcastle, Australia.
"""
if not initial_guess is None:
initial_guess = np.array(initial_guess)
data = data.ravel()
def sample_lm(data):
"""Calculate four sample L-moments and L-moment ratios."""
x = np.sort(data)
n = len(data)
l1 = np.mean(x)
l2 = np.sum(np.array([2*i-n - 1 for i in range(1,n+1)])*x)/2/special.binom(n,2)
l3 = np.sum(np.array([special.binom(i-1,2) - 2*(i-1)*(n-i)+special.binom(n-i,2) for i in range(1,n+1)])*x)/3/special.binom(n,3)
l4 = np.sum(np.array([special.binom(i-1,3) - 3*special.binom(i-1,2)*(n-i)+3*(i-1)*special.binom(n-i,2)-special.binom(n-i,3) for i in range(1,n+1)])*x)/4/special.binom(n,4)
return l1,l2,l3/l2,l4/l2
a1,a2,a3,a4 = sample_lm(data)
def lm(param):
"""Calculate four GLD L-moments and L-moment ratios."""
def lr(r,param):
"""Auxiliary function for L-moments calculation."""
if self.param_type=='VSL':
[a,b,d,l] = param
s = 0
for k in range(r):
s+=(-1)**(r-k-1)*special.binom(r-1,k)*special.binom(r+k-1,k)*((1-d-(-1)**(r-1)*d)/l/(l+k+1))
if r==1:
s = s*b+a+b*(2*d-1)/l
else:
s = s*b
return s
if self.param_type=='RS':
[l1,l2,l3,l4] = param
s = 0
for k in range(r):
s+=(-1)**(r-k-1)*special.binom(r-1,k)*special.binom(r+k-1,k)*(1/(l3+k+1) - (-1)**(r-1)/(l4+k+1))
if r==1:
s = s/l2+l1
else:
s = s/l2
return s
if self.param_type=='FMKL':
[l1,l2,l3,l4] = param
s = 0
for k in range(r):
s+=(-1)**(r-k-1)*special.binom(r-1,k)*special.binom(r+k-1,k)*(1/(l3+k+1)/l3 - (-1)**(r-1)/(l4+k+1)/l4)
if r==1:
s = s/l2+l1 - 1/l2/l3 +1/l2/l4
else:
s = s/l2
return s
l1 = lr(1,param)
l2 = lr(2,param)
l3 = lr(3,param)
l4 = lr(4,param)
return l1,l2,l3/l2, l4/l2
if self.param_type=='RS':
def fun_opt(x):
"""Auxiliary function for optimization."""
[l3, l4] = x
L2 = -1/(1+l3)+2/(2+l3)-1/(1+l4)+2/(2+l4)
return np.max([np.abs((1/(l3+1) - 6/(2+l3) + 6/(3+l3) - 1/(l4+1) + 6/(2+l4) - 6/(3+l4))/L2 - a3),
np.abs((-1/(1+l3) + 12/(2+l3) - 30/(3+l3) + 20/(4+l3)-1/(1+l4) + 12/(2+l4) - 30/(3+l4) + 20/(4+l4))/L2 - a4)])
if initial_guess is None or initial_guess.ndim==0 or len(initial_guess)!=2:
raise ValueError('Specify initial guess for two parameters')
[l3,l4] = optimize.fmin(fun_opt,initial_guess,maxfun = maxfun,xtol=xtol, maxiter=maxiter, disp = disp_optimizer)
l2 = (-1/(1+l3)+2/(2+l3)-1/(1+l4)+2/(2+l4))/a2
l1 = a1 + 1/l2*(1/(1+l4) - 1/(1+l3))
param = np.array([l1,l2,l3,l4]).ravel()
if self.param_type == 'FMKL':
def fun_opt(x):
"""Auxiliary function for optimization."""
[l3, l4] = x
L2 = -1/(1+l3)/l3+2/(2+l3)/l3-1/(1+l4)/l4+2/(2+l4)/l4
return np.max([np.abs((1/(l3+1)/l3 - 6/(2+l3)/l3 + 6/(3+l3)/l3 - 1/(l4+1)/l4 + 6/(2+l4)/l4 - 6/(3+l4)/l4)/L2 - a3),
np.abs((-1/(1+l3)/l3 + 12/(2+l3)/l3 - 30/(3+l3)/l3 + 20/(4+l3)/l3-1/(1+l4)/l4 + 12/(2+l4)/l4 - 30/(3+l4)/l4 + 20/(4+l4)/l4)/L2 - a4)])
if initial_guess is None or initial_guess.ndim==0 or len(initial_guess)!=2:
raise ValueError('Specify initial guess for two parameters')
[l3,l4] = optimize.fmin(fun_opt,initial_guess,maxfun = maxfun,xtol=xtol, maxiter=maxiter, disp = disp_optimizer)
l2 = (-1/(1+l3)/l3+2/(2+l3)/l3-1/(1+l4)/l4+2/(2+l4)/l4)/a2
l1 = a1 + 1/l2*(1/(1+l4)/l4 - 1/(1+l3)/l3)+1/l2/l3 - 1/l2/l4
param = np.array([l1,l2,l3,l4]).ravel()
if self.param_type == 'VSL':
if a4**2+98*a4 +1 <0:
a4 = (-98+(98**2 - 4)**0.5)/2+10**(-10)
p4 = np.array([(3+7*a4 + np.sqrt(a4**2+98*a4 +1))/(2*(1-a4)), (3+7*a4 - np.sqrt(a4**2+98*a4 +1))/(2*(1-a4))])
p3 = 0.5*(1-a3*(p4+3)/(p4-1))
p3[p4==1] = 0.5
p3[p3<0] = 0
p3[p3>1] = 1
p2 = a2*(p4+1)*(p4+2)
p1 = a1+p2*(1-2*p3)/(p4+1)
param1 = [p1[0], p2[0],p3[0],p4[0]]
param2 = [p1[1], p2[1],p3[1],p4[1]]
if initial_guess is None:
best = [self.check_param(param1)*1,self.check_param(param2)*1]
if np.sum(best)==2:
GoF = [self.GoF_Q_metric(data,param1),self.GoF_Q_metric(data,param2)]
best = (GoF == np.min(GoF))*1
param = np.array([param1,param2][np.argmax(best)]).ravel()
else:
if initial_guess.ndim!=0 and len(initial_guess)!=1:
raise ValueError('Specify initial guess for one parameter')
if np.abs(initial_guess - param1[3]) <= np.abs(initial_guess - param2[3]):
param = np.array(param1)
else:
param = np.array(param2)
if disp_fit:
print('')
print('Sample L-moments: ', sample_lm(data))
print('Fitted L-moments: ', lm(param))
print('')
print('Parameters: ', param)
if not self.check_param(param):
print('')
print('Parameters are not valid. Try another initial guess.')
else:
if test_gof:
ks, chi2 = self.GoF_tests(param, data, bins_gof)
print('')
print('Goodness-of-Fit')
print(ks)
print(chi2)
self.plot_fitting(data,param,bins = bins_hist)
return np.array(param)
def grid_search(self, data, fun_min, grid_min = -3, grid_max = 3, n_grid = 10):
"""Find parameters of GLD by grid search procedure.
It does grid search for third and fourth parameters. First two parameters are calculated by fitting
support to data. It returns parameters with minimum value of `fun_min`.
Parameters
----------
data : array-like
Input data.
fun_min : function
Function of parameters to minimize for choosing the best parameters. For example, negative log-likelihood function.
grid_min : float, optional
Minimum value of shape parameters for the grid. The default is -3.
grid_max : float, optional
Maximum value of shape parameters for the grid. The default is -3.
n_grid : int, optional
Number of grid points for each parameter. The default is 10.
Returns
-------
array-like
Parameters of GLD.
"""
eps = 0.01
def fun_opt_supp(x):
"""Auxiliary function for estimation of first two parameters by fitting support to data."""
A = np.min(data)
B = np.max(data)
par = np.hstack([x,param[2:]])
if not self.check_param(par):
return np.inf
return np.max([np.abs(self.Q(eps,par) - A), np.abs(self.Q(1-eps,par) - B)])
if self.param_type == 'VSL':
p3_list = np.linspace(0,1,n_grid)
p4_list = np.linspace(grid_min,grid_max,n_grid)
else:
p3_list = np.linspace(grid_min,grid_max,n_grid)
p4_list = np.linspace(grid_min,grid_max,n_grid)
res = np.zeros((n_grid, n_grid))
for i in range(n_grid):
for j in range(n_grid):
param = [np.mean(data),1,p3_list[i], p4_list[j]]
if self.param_type == 'RS' and not self.check_param(param):
param[1] = -1
x = optimize.fmin(fun_opt_supp,param[:2], disp=False, xtol = 10**(-8))
param[:2] = x
res[i,j] = fun_min(param)
ind = np.unravel_index(np.argmin(res, axis=None), res.shape)
p3,p4 = p3_list[ind[0]], p4_list[ind[1]]
param = np.hstack([np.mean(data),1,p3,p4])
x = optimize.fmin(fun_opt_supp,param[:2], disp=False, xtol = 10**(-8))
return np.hstack([x,p3,p4])
def fit_MPS(self,data, initial_guess = None, method = 'grid', u = 0.1,grid_min = -3, grid_max = 3, n_grid = 10, xtol=0.0001, maxiter=None, maxfun=None , disp_optimizer=True, disp_fit = True, bins_hist = None, test_gof = True, bins_gof = 8):
"""Fit GLD to data using method of maximum product of spacing.
It estimates parameters of GLD by maximization of the geometric mean of spacings in the data,
which are the differences between the values of the cumulative distribution function at neighbouring data points.
This consists of two steps. The first step is finding initial values of parameters for maximization procedure
using method of moments, method of percentiles, method of L-moments or grid search procedure.
The second step is maximization of the geometric mean of spacings using numerical methods.
The optimization procedure is quite difficult and requires some time (especially for large samples).
Parameters
----------
data : array-like
Input data.
initial_guess : array-like, optional
Initial guess for the first step. Length of initial_guess depends on the method used at the first step.
It's ignored if method is 'grid'.
method : str, optional
Method used for finding initial parameters at the first step. Should be 'MM' for method of moments,
'PM' for method of percentiles, 'LMM' for method of L-moments or 'grid' for grid search procedure.
The default is 'grid'.
u : float, optional
Parameter for calculating percentile-based statistics for method of percentiles.
Arbitrary number between 0 and 0.25. The default is 0.1. It's ignored if method is not 'PM'.
grid_min : float, optional
Minimum value of shape parameters for the grid search. The default is -3. It's ignored if method is not 'grid'.
grid_max : float, optional
Maximum value of shape parameters for the grid search. The default is -3. It's ignored if method is not 'grid'.
n_grid : int, optional
Number of grid points for the grid search. The default is 10. It's ignored if method is not 'grid'.
xtol : float, optional
Absolute error for optimization procedure. The default is 0.0001.
maxiter : int, optional
Maximum number of iterations for optimization procedure.
maxfun : int, optional
Maximum number of function evaluations for optimization procedure.
disp_optimizer : bool, optional
Set True to display information about optimization procedure. The default is True.
disp_fit : bool, optional
Set True to display information about fitting. The default is True.
bins_hist : int, optional
Number of bins for histogram. It's ignored if disp_fit is False.
test_gof : bool, optional
Set True to perform Goodness-of-Fit tests and print results. It's ignored if disp_fit is False. The default is True.
bins_gof : int, optional
Number of bins for chi-square test. It's ignored if test_gof is False. The default is 8.
Raises
------
ValueError
If input parameters are incorrect or parameters of GLD from the first step are not valid.
Returns
-------
array-like
Fitted parameters of GLD.
References:
----------
.. [1] Cheng, R.C.H., & Amin, N.A.K. 1983. Estimating parameters in continuous univariate
distributions with a shifted origin. Journal of the Royal Statistical Society: Series B
(Methodological), 45(3), 394–403.
.. [2] Ranneby, B. 1984. The maximum spacing method. an estimation method related to the
maximum likelihood method. Scandinavian Journal of Statistics, 93–112.
.. [3] Chalabi, Y., Scott, D.J., & Wuertz, D. 2012. Flexible distribution modeling with the
generalized lambda distribution.
"""
data = np.sort(data.ravel())
unique, counts = np.unique(data, return_counts=True)
delta = np.min(np.diff(unique))/2
ind = np.nonzero(counts>1)[0]
ind1 = np.nonzero(np.isin(data, unique[ind]))[0]
data[ind1] = data[ind1] + stats.norm.rvs(0,delta/3,len(ind1))
def S(param):
"""Spacing function for optimization."""
if not self.check_param(param):
return np.inf
return -np.mean(np.log(np.abs(np.diff(self.CDF_num(np.sort((data)),param)))))
if method not in ['MM','LMM','PM','grid']:
raise ValueError('Unknown method \'%s\' . Use \'MM\',\'LMM\' , \'PM\' or \'grid\'' %method)
if method=='MM':
param1 = self.fit_MM(data, initial_guess, xtol=xtol, maxiter=maxiter, maxfun=maxfun , disp_optimizer=False, disp_fit = False, test_gof = False)
if method=='PM':
param1 = self.fit_PM(data, initial_guess, u = u, xtol=xtol, maxiter=maxiter, maxfun=maxfun , disp_optimizer=False, disp_fit = False, test_gof = False)
if method=='LMM':
param1 = self.fit_LMM(data, initial_guess, xtol=xtol, maxiter=maxiter, maxfun=maxfun , disp_optimizer=False, disp_fit = False, test_gof = False)
if method=='grid':
param1 = self.grid_search(data, fun_min = S, grid_min = grid_min, grid_max = grid_max, n_grid = n_grid)
if not self.check_param(param1):
raise ValueError('Parameters are not valid. Try another initial guess.')
if np.min(data)<self.supp(param1)[0] or np.max(data)>self.supp(param1)[1]:
param1 = self.correct_supp(data, param1)
param = optimize.fmin(S,param1,maxfun = maxfun,xtol=xtol, maxiter=maxiter, disp = disp_optimizer)
if disp_fit:
print('')
print('Initial point for Maximum Product of Spacing Method: ', param1)
print('Estimated by ', method)
print('')
print('Initial negative logarithm of mean spacing: ', S(param1))
print('Optimized negative logarithm of mean spacing: ', S(param))
print('')
print('Parameters: ', param)
if test_gof:
ks, chi2 = self.GoF_tests(param, data, bins_gof)
print('')
print('Goodness-of-Fit')
print(ks)
print(chi2)
self.plot_fitting(data,param,bins = bins_hist)
return np.array(param)
def fit_ML(self,data, initial_guess = None, method = 'grid', u = 0.1, grid_min = -3, grid_max = 3, n_grid = 10, xtol=0.0001, maxiter=None, maxfun=None , disp_optimizer=True, disp_fit = True, bins_hist = None, test_gof = True, bins_gof = 8):
"""Fit GLD to data using method of maximum likelihood.
It estimates parameters of GLD by maximizing a likelihood function.
This consists of two steps. The first step is finding initial values of parameters for maximization procedure
using method of moments, method of percentiles, method of L-moments or grid search procedure.
The second step is maximization of likelihood function using numerical methods.
The optimization procedure is quite difficult and requires some time (especially for large samples).
Parameters
----------
data : array-like
Input data.
initial_guess : array-like, optional
Initial guess for the first step. Length of initial_guess depends on the method used at the first step.
It's ignored if method is 'grid'.
method : str, optional
Method used for finding initial parameters at the first step. Should be 'MM' for method of moments,
'PM' for method of percentiles, 'LMM' for method of L-moments or 'grid' for grid search procedure.
The default is 'grid'.
u : float, optional
Parameter for calculating percentile-based statistics for method of percentiles.
Arbitrary number between 0 and 0.25. The default is 0.1. It's ignored if method is not 'PM'.
grid_min : float, optional
Minimum value of shape parameters for the grid search. The default is -3. It's ignored if method is not 'grid'.
grid_max : float, optional
Maximum value of shape parameters for the grid search. The default is -3. It's ignored if method is not 'grid'.
n_grid : int, optional
Number of grid points for the grid search. The default is 10. It's ignored if method is not 'grid'.
xtol : float, optional
Absolute error for optimization procedure. The default is 0.0001.
maxiter : int, optional
Maximum number of iterations for optimization procedure.
maxfun : int, optional
Maximum number of function evaluations for optimization procedure.
disp_optimizer : bool, optional
Set True to display information about optimization procedure. The default is True.
disp_fit : bool, optional
Set True to display information about fitting. The default is True.
bins_hist : int, optional
Number of bins for histogram. It's ignored if disp_fit is False.
test_gof : bool, optional
Set True to perform Goodness-of-Fit tests and print results. It's ignored if disp_fit is False. The default is True.
bins_gof : int, optional
Number of bins for chi-square test. It's ignored if test_gof is False. The default is 8.
Raises
------
ValueError
If input parameters are incorrect or parameters of GLD from the first step are not valid.
Returns
-------
array-like
Fitted parameters of GLD.
References:
----------
.. [1] Su, S. 2007. Numerical maximum log likelihood estimation for generalized lambda distributions.
Computational Statistics & Data Analysis, 51(8), 3983–3998.
"""
data = data.ravel()
def lnL(param):
"""Likelihood function for optimization."""
if not self.check_param(param):
return np.inf
return -np.sum(np.log(self.PDF_num(data,param)))
if method not in ['MM','LMM','PM','grid']:
raise ValueError('Unknown method \'%s\' . Use \'MM\',\'LMM\', \'PM\' or \'grid\'' %method)
if method=='MM':
param1 = self.fit_MM(data, initial_guess, xtol=xtol, maxiter=maxiter, maxfun=maxfun , disp_optimizer=False, disp_fit = False, test_gof = False)
if method=='PM':
param1 = self.fit_PM(data, initial_guess, u = u, xtol=xtol, maxiter=maxiter, maxfun=maxfun , disp_optimizer=False, disp_fit = False, test_gof = False)
if method=='LMM':
param1 = self.fit_LMM(data, initial_guess, xtol=xtol, maxiter=maxiter, maxfun=maxfun , disp_optimizer=False, disp_fit = False, test_gof = False)
if method=='grid':
param1 = self.grid_search(data, fun_min = lnL, grid_min = grid_min, grid_max = grid_max, n_grid = n_grid)
if not self.check_param(param1):
raise ValueError('Parameters are not valid. Try another initial guess.')
if np.min(data)<self.supp(param1)[0] or np.max(data)>self.supp(param1)[1]:
param1 = self.correct_supp(data, param1)
param = optimize.fmin(lnL,param1,maxfun = maxfun,xtol=xtol, maxiter=maxiter, disp = disp_optimizer)
if disp_fit:
print('')
print('Initial point for Maximum Likilehood Method: ', param1)
print('Estimated by ', method)
print('')
print('Initial negative log-likelihood function: ', lnL(param1))
print('Optimized negative log-likelihood function: : ', lnL(param))
print('')
print('Parameters: ', param)
if test_gof:
ks, chi2 = self.GoF_tests(param, data, bins_gof)
print('')
print('Goodness-of-Fit')
print(ks)
print(chi2)
self.plot_fitting(data,param,bins = bins_hist)
return np.array(param)
def fit_starship(self,data, initial_guess = None, method = 'grid', u = 0.1,grid_min = -3, grid_max = 3, n_grid = 10, xtol=0.0001, maxiter=None, maxfun=None , disp_optimizer=True, disp_fit = True, bins_hist = None, test_gof = True, bins_gof = 8):
"""Fit GLD to data using starship method.
It estimates parameters of GLD by transformation data to uniform distribution (using numerical calculation of GLD
cumulative distribution function) and optimization goodness-of-fit measure (Andersod-Darling statistic is used).
This consists of two steps. The first step is finding initial values of parameters for optimization procedure
using method of moments, method of percentiles, method of L-moments or grid search procedure.
The second step is optimization of Anderson-Darling statistic for transformed data.
The optimization procedure is quite difficult and requires some time (especially for large samples).
Parameters
----------
data : array-like
Input data.
initial_guess : array-like, optional
Initial guess for the first step. Length of initial_guess depends on the method used at the first step.
It's ignored if method is 'grid'.
method : str, optional
Method used for finding initial parameters at the first step. Should be 'MM' for method of moments,
'PM' for method of percentiles, 'LMM' for method of L-moments or 'grid' for grid search procedure.
The default is 'grid'.
u : float, optional
Parameter for calculating percentile-based statistics for method of percentiles.
Arbitrary number between 0 and 0.25. The default is 0.1. It's ignored if method is not 'PM'.
grid_min : float, optional
Minimum value of shape parameters for the grid search. The default is -3. It's ignored if method is not 'grid'.
grid_max : float, optional
Maximum value of shape parameters for the grid search. The default is -3. It's ignored if method is not 'grid'.
n_grid : int, optional
Number of grid points for the grid search. The default is 10. It's ignored if method is not 'grid'.
xtol : float, optional
Absolute error for optimization procedure. The default is 0.0001.
maxiter : int, optional
Maximum number of iterations for optimization procedure.
maxfun : int, optional
Maximum number of function evaluations for optimization procedure.
disp_optimizer : bool, optional
Set True to display information about optimization procedure. The default is True.
disp_fit : bool, optional
Set True to display information about fitting. The default is True.
bins_hist : int, optional
Number of bins for histogram. It's ignored if disp_fit is False.
test_gof : bool, optional
Set True to perform Goodness-of-Fit tests and print results. It's ignored if disp_fit is False. The default is True.
bins_gof : int, optional
Number of bins for chi-square test. It's ignored if test_gof is False. The default is 8.
Raises
------
ValueError
If input parameters are incorrect or parameters of GLD from the first step are not valid.
Returns
-------
array-like
Fitted parameters of GLD.
References:
----------
.. [1] King, R. A. R., and MacGillivray, H. L. 1999. "A Starship Estimation Method for
the Generalized Lambda Distributions," Australian and New Zealand Journal of Statistics, 41, 353–374.
"""
data = np.sort(data.ravel())
def fun_opt(param):
"""AD-statistic for optimization."""
if not self.check_param(param):
return np.inf
u = self.CDF_num(data, param)
return -len(data) - 1/len(data)*np.sum((np.arange(1,len(data)+1)*2 - 1)*(np.log(u) + np.log(1 - u[::-1])))
if method not in ['MM','LMM','PM','grid']:
raise ValueError('Unknown method \'%s\' . Use \'MM\',\'LMM\' , \'PM\' or \'grid\'' %method)
if method=='MM':
param1 = self.fit_MM(data, initial_guess, xtol=xtol, maxiter=maxiter, maxfun=maxfun , disp_optimizer=False, disp_fit = False, test_gof = False)
if method=='PM':
param1 = self.fit_PM(data, initial_guess, u = u, xtol=xtol, maxiter=maxiter, maxfun=maxfun , disp_optimizer=False, disp_fit = False, test_gof = False)
if method=='LMM':
param1 = self.fit_LMM(data, initial_guess, xtol=xtol, maxiter=maxiter, maxfun=maxfun , disp_optimizer=False, disp_fit = False, test_gof = False)
if method=='grid':
param1 = self.grid_search(data, fun_min = fun_opt, grid_min = grid_min, grid_max = grid_max, n_grid = n_grid)
if not self.check_param(param1):
raise ValueError('Parameters are not valid. Try another initial guess.')
if np.min(data)<self.supp(param1)[0] or np.max(data)>self.supp(param1)[1]:
param1 = self.correct_supp(data, param1)
param = optimize.fmin(fun_opt,param1,maxfun = maxfun,xtol=xtol, maxiter=maxiter, disp = disp_optimizer)
if disp_fit:
print('')
print('Initial point for Starship Method: ', param1)
print('Estimated by ', method)
print('')
print('Initial KS-statistic: ', fun_opt(param1))
print('Optimized KS-statistic : ', fun_opt(param))
print('')
print('Parameters: ', param)
if test_gof:
ks, chi2 = self.GoF_tests(param, data, bins_gof)
print('')
print('Goodness-of-Fit')
print(ks)
print(chi2)
self.plot_fitting(data,param,bins = bins_hist)
return np.array(param)
def fit_curve(self,x,y, initial_guess = None, method = 'MM', u = 0.1, N_gen = 1000, shift = False, ymin = 0.01, optimization_phase = True, random_state = None, xtol=0.0001, maxiter=None, maxfun=None,disp_optimizer=True,disp_fit = True,bins_hist = None,):
"""Fit GLD to arbitrary curve given by x and y coordinates.
It models a curve as `y = c * GLD.PDF_num(x, param) + shift_val` where `param` is parameters of GLD,
`c` is normalizing constant and `shift_val` is y-shift.
If y-shift is zero the values of y should be non-negative.
The procedure of curve fitting consists of two phases: simulation and optimization.
At the simulation phase the curve is normalized to specify probability density function
(it should be non-negative and the area under the curve should be equal to 1).
Then it generates sample of random variables defined by this density function and fit GLD to the sample
using one of the methods: method of moments, method of percentiles or method of L-moments.
Then at the optimization phase it provides more accurate solution by minimizing mean square error.
This procedure is quite difficult and requires some time, so optimization phase is optional.
Parameters
----------
x : array-like
x-coordinates of the curve.
y : array-like
y-coordinates of the curve.
initial_guess : array-like, optional
Initial guess for estimating parameters at the simulation phase. Length of initial_guess depends on the method used at the simulation phase.
method : str, optional
Method used for estimating parameters at the simulation phase. Should be 'MM' for method of moments,
'PM' for method of percentiles or 'LMM' for method of L-moments. The default is 'MM'.
u : float, optional
Parameter for calculating percentile-based statistics for method of percentiles.
Arbitrary number between 0 and 0.25. The default is 0.1. It's ignored if method is not 'PM'.
N_gen : int, optional
Size of sample generated at the simulation phase. The default is 1000.
shift : bool, optional
Set True to fit y-shift. Set False to use zero y-shift. The default is False.
ymin : float, optional
Minimum value of y-coordinates after shifting. Should be positive. The default is 0.01. It's ignored if shift is False.
optimization_phase : bool, optional
Set True to perform optimization phase. Set False to skip optimization phase. The default is True.
random_state : None or int, optional
The seed of the pseudo random number generator. The default is None.
xtol : float, optional
Absolute error for optimization procedure. The default is 0.0001.
maxiter : int, optional
Maximum number of iterations for optimization procedure.
maxfun : int, optional
Maximum number of function evaluations for optimization procedure.
disp_optimizer : bool, optional
Set True to display information about optimization procedure. The default is True.
disp_fit : bool, optional
Set True to display information about fitting. The default is True.
bins_hist : int, optional
Number of bins for histogram. It's ignored if disp_fit is False.
Raises
------
ValueError
If input parameters are incorrect or parameters of GLD from the simulation phase are not valid.
Returns
-------
param : array-like
Parameters of GLD.
c : float
Normalizing constant.
shift_val : float
Value of y-shift.
"""
x = x.ravel()
y = y.ravel()
if not shift and ((y<0).any() or (y==0).all()):
raise ValueError('y shouldn\'t be zero or contain negative values. Use \'shift = True\'')
if shift and ymin<=0:
raise ValueError('ymin should be positive')
if shift:
shift_val = np.min(y) - ymin
else:
shift_val = 0
y = y-shift_val
S = np.diff(x)*(y[:-1]+y[1:])/2
C = np.sum(S)
p = S/C
y1 = y/C
if random_state:
np.random.seed(random_state)
def gen(p):
"""Auxiliary function for generating random variables."""
a = np.random.rand(1)
i = np.nonzero(a<=np.cumsum(p))[0][0]
return x[i] + 2*(a - np.sum(p[:i]))/(y1[i]+y1[i+1])
data = np.array([gen(p) for i in range(N_gen)]).ravel()
if method not in ['MM','LMM','PM']:
raise ValueError('Unknown method \'%s\' . Use \'MM\',\'LMM\' or \'PM\'' %method)
if method=='MM':
param1 = self.fit_MM(data, initial_guess, xtol=xtol, maxiter=maxiter, maxfun=maxfun , disp_optimizer=False, disp_fit = False, test_gof = False)
if method=='PM':
param1 = self.fit_PM(data, initial_guess, u = u, xtol=xtol, maxiter=maxiter, maxfun=maxfun , disp_optimizer=False, disp_fit = False, test_gof = False)
if method=='LMM':
param1 = self.fit_LMM(data, initial_guess, xtol=xtol, maxiter=maxiter, maxfun=maxfun , disp_optimizer=False, disp_fit = False, test_gof = False)
if not self.check_param(param1):
raise ValueError('Parameters are not valid. Try another initial guess.')
def fun_opt(p):
"""Auxiliary function for optimization."""
param = p[:4]
c = p[4]
if not self.check_param(param):
return np.inf
return np.mean((self.PDF_num(x,param)*c - y)**2)
if optimization_phase:
res = optimize.fmin(fun_opt,np.hstack([param1,C]),xtol = xtol, maxiter = maxiter,maxfun = maxfun, disp = disp_optimizer)
param = res[:4]
c = res[4]
else:
param = param1
c = C
if disp_fit:
print('')
print('MSE: ',fun_opt(np.hstack([param,c]) ))
print('')
print('Parameters: ', param)
print('C: ', c)
if shift:
print('shift: ', shift_val)
fig, ax = plt.subplots(1,2,figsize = (12,3.5))
ax[0].grid()
ax[0].hist(data,color = 'skyblue',density = True, bins = bins_hist)
p = np.linspace(0.01,0.99,100)
ax[0].plot(self.Q(p,param1), self.PDF_Q(p,param1),'r')
ax[0].set_title('Simulation phase')
y_fit = self.PDF_num(x,param)*c + shift_val
y = y + shift_val
delta = np.max([0.05*np.abs(np.mean(y)), 10**(-5)])
ax[1].grid()
ax[1].plot(x,y,'b')
ax[1].plot(x,y_fit,'r')
ax[1].set_ylim(np.min([y,y_fit]) - delta,np.max([y,y_fit])+delta)
ax[1].legend(['data', 'GLD'])
ax[1].set_title('Result of curve fitting')
return param, c, shift_val |
from hw2 import *
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
img = mpimg.imread('kodim04.png')
plt.imshow(img)
C = split(img, 16)
A, B, e_rel = compress(C, 50)
print(e_rel)
img2 = join(A @ B, 16, img.shape[1], img.shape[0])
print(relError(img, img2))
plt.figure()
plt.imshow(img2)
plt.show() |
from random import *
def new_game(n):
matrix = []
for i in range(n):
matrix.append([0] * n)
return matrix
def add_two(mat):
a=randint(0,len(mat)-1)
b=randint(0,len(mat)-1)
while(mat[a][b]!=0):
a=randint(0,len(mat)-1)
b=randint(0,len(mat)-1)
mat[a][b]=2
return mat
def game_state(mat):
for i in range(len(mat)):
for j in range(len(mat[0])):
if mat[i][j]==2048:
return 'win'
for i in range(len(mat)-1): #intentionally reduced to check the row on the right and below
for j in range(len(mat[0])-1): #more elegant to use exceptions but most likely this will be their solution
if mat[i][j]==mat[i+1][j] or mat[i][j+1]==mat[i][j]:
return 'not over'
for i in range(len(mat)): #check for any zero entries
for j in range(len(mat[0])):
if mat[i][j]==0:
return 'not over'
for k in range(len(mat)-1): #to check the left/right entries on the last row
if mat[len(mat)-1][k]==mat[len(mat)-1][k+1]:
return 'not over'
for j in range(len(mat)-1): #check up/down entries on last column
if mat[j][len(mat)-1]==mat[j+1][len(mat)-1]:
return 'not over'
return 'lose'
def reverse(mat):
new=[]
for i in range(len(mat)):
new.append([])
for j in range(len(mat[0])):
new[i].append(mat[i][len(mat[0])-j-1])
return new
def transpose(mat):
new=[]
for i in range(len(mat[0])):
new.append([])
for j in range(len(mat)):
new[i].append(mat[j][i])
return new
def cover_up(mat):
new=[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]]
done=False
for i in range(4):
count=0
for j in range(4):
if mat[i][j]!=0:
new[i][count]=mat[i][j]
if j!=count:
done=True
count+=1
return (new,done)
def merge(mat):
done=False
for i in range(4):
for j in range(3):
if mat[i][j]==mat[i][j+1] and mat[i][j]!=0:
mat[i][j]*=2
mat[i][j+1]=0
done=True
return (mat,done)
def up(game):
print("up")
# return matrix after shifting up
game=transpose(game)
game,done=cover_up(game)
temp=merge(game)
game=temp[0]
done=done or temp[1]
game=cover_up(game)[0]
game=transpose(game)
return (game,done)
def down(game):
print("down")
game=reverse(transpose(game))
game,done=cover_up(game)
temp=merge(game)
game=temp[0]
done=done or temp[1]
game=cover_up(game)[0]
game=transpose(reverse(game))
return (game,done)
def left(game):
print("left")
# return matrix after shifting left
game,done=cover_up(game)
temp=merge(game)
game=temp[0]
done=done or temp[1]
game=cover_up(game)[0]
return (game,done)
def right(game):
print("right")
# return matrix after shifting right
game=reverse(game)
game,done=cover_up(game)
temp=merge(game)
game=temp[0]
done=done or temp[1]
game=cover_up(game)[0]
game=reverse(game)
return (game,done)
|
sounds = [{
"soundName": "A Bass",
"md5": "c04ebf21e5e19342fa1535e4efcdb43b.wav",
"sampleCount": 28160,
"rate": 22050,
"format": "",
"tags": [
"music",
"instruments",
"notes"
]
},
{
"soundName": "A Elec Bass",
"md5": "5cb46ddd903fc2c9976ff881df9273c9.wav",
"sampleCount": 5920,
"rate": 22050,
"format": "",
"tags": [
"music",
"instruments",
"notes"
]
},
{
"soundName": "A Elec Guitar",
"md5": "fa5f7fea601e9368dd68449d9a54c995.wav",
"sampleCount": 44100,
"rate": 22050,
"format": "",
"tags": []
},
{
"soundName": "A Elec Piano",
"md5": "0cfa8e84d6a5cd63afa31d541625a9ef.wav",
"sampleCount": 44100,
"rate": 22050,
"format": "",
"tags": []
},
{
"soundName": "A Guitar",
"md5": "ee753e87d212d4b2fb650ca660f1e839.wav",
"sampleCount": 31872,
"rate": 22050,
"format": "",
"tags": []
},
{
"soundName": "A Minor Ukulele",
"md5": "69d25af0fd065da39c71439174efc589.wav",
"sampleCount": 18267,
"rate": 22050,
"format": "",
"tags": [
"music",
"instruments",
"notes",
"chords"
]
},
{
"soundName": "A Piano",
"md5": "0727959edb2ea0525feed9b0c816991c.wav",
"sampleCount": 44100,
"rate": 22050,
"format": "",
"tags": [
"music",
"instruments",
"notes"
]
},
{
"soundName": "A Sax",
"md5": "420991e0d6d99292c6d736963842536a.wav",
"sampleCount": 6472,
"rate": 22050,
"format": "",
"tags": []
},
{
"soundName": "A Trombone",
"md5": "863ccc8ba66e6dabbce2a1261c22be0f.wav",
"sampleCount": 17227,
"rate": 22050,
"format": "adpcm",
"tags": [
"music",
"instruments",
"notes"
]
},
{
"soundName": "A Trumpet",
"md5": "d2dd6b4372ca17411965dc92d52b2172.wav",
"sampleCount": 13911,
"rate": 22050,
"format": "",
"tags": []
},
{
"soundName": "Afro String",
"md5": "3477ccfde26047eeb93ff43a21ac7d3d.wav",
"sampleCount": 9807,
"rate": 11025,
"format": "",
"tags": []
},
{
"soundName": "Alert",
"md5": "f62e3bfccab9c23eee781473c94a009c.wav",
"sampleCount": 21362,
"rate": 22050,
"format": "adpcm",
"tags": [
"effects",
"electronic",
"games",
"space"
]
},
{
"soundName": "Alien Creak1",
"md5": "0377a7476136e5e8c780c64a4828922d.wav",
"sampleCount": 8045,
"rate": 11025,
"format": "",
"tags": [
"effects",
"space"
]
},
{
"soundName": "Alien Creak2",
"md5": "21f82b7f1a83c501539c5031aea4fa8c.wav",
"sampleCount": 8300,
"rate": 11025,
"format": "",
"tags": [
"effects",
"space"
]
},
{
"soundName": "B Bass",
"md5": "e31dcaf7bcdf58ac2a26533c48936c45.wav",
"sampleCount": 25792,
"rate": 22050,
"format": "",
"tags": [
"music",
"instruments",
"notes"
]
},
{
"soundName": "B Elec Bass",
"md5": "5a0701d0a914223b5288300ac94e90e4.wav",
"sampleCount": 6208,
"rate": 22050,
"format": "",
"tags": [
"music",
"instruments",
"notes"
]
},
{
"soundName": "B Elec Guitar",
"md5": "81f142d0b00189703d7fe9b1f13f6f87.wav",
"sampleCount": 44100,
"rate": 22050,
"format": "",
"tags": []
},
{
"soundName": "B Elec Piano",
"md5": "9cc77167419f228503dd57fddaa5b2a6.wav",
"sampleCount": 44100,
"rate": 22050,
"format": "",
"tags": []
},
{
"soundName": "B Guitar",
"md5": "2ae2d67de62df8ca54d638b4ad2466c3.wav",
"sampleCount": 29504,
"rate": 22050,
"format": "",
"tags": []
},
{
"soundName": "B Piano",
"md5": "86826c6022a46370ed1afae69f1ab1b9.wav",
"sampleCount": 44100,
"rate": 22050,
"format": "",
"tags": [
"music",
"instruments",
"notes"
]
},
{
"soundName": "B Sax",
"md5": "653ebe92d491b49ad5d8101d629f567b.wav",
"sampleCount": 9555,
"rate": 22050,
"format": "",
"tags": []
},
{
"soundName": "B Trombone",
"md5": "85b663229525b73d9f6647f78eb23e0a.wav",
"sampleCount": 15522,
"rate": 22050,
"format": "",
"tags": [
"music",
"instruments",
"notes"
]
},
{
"soundName": "B Trumpet",
"md5": "cad2bc57729942ed9b605145fc9ea65d.wav",
"sampleCount": 14704,
"rate": 22050,
"format": "",
"tags": []
},
{
"soundName": "Baa",
"md5": "ca694053020e42704bcf1fc01a70f1c3.wav",
"sampleCount": 41822,
"rate": 22050,
"format": "adpcm",
"tags": [
"animals",
"sheep"
]
},
{
"soundName": "Bark",
"md5": "cd8fa8390b0efdd281882533fbfcfcfb.wav",
"sampleCount": 3168,
"rate": 22050,
"format": "",
"tags": []
},
{
"soundName": "Basketball Bounce",
"md5": "1727f65b5f22d151685b8e5917456a60.wav",
"sampleCount": 8099,
"rate": 22050,
"format": "adpcm",
"tags": [
"sports",
"effects"
]
},
{
"soundName": "Bass Beatbox",
"md5": "28153621d293c86da0b246d314458faf.wav",
"sampleCount": 6720,
"rate": 22050,
"format": "",
"tags": []
},
{
"soundName": "Beat Box1",
"md5": "663270af0235bf14c890ba184631675f.wav",
"sampleCount": 5729,
"rate": 11025,
"format": "",
"tags": [
"music",
"human",
"voice",
"hiphop"
]
},
{
"soundName": "Beat Box2",
"md5": "b9b8073f6aa9a60085ad11b0341a4af2.wav",
"sampleCount": 5729,
"rate": 11025,
"format": "",
"tags": [
"music",
"human",
"voice",
"hiphop"
]
},
{
"soundName": "Bell Cymbal",
"md5": "efddec047de95492f775a1b5b2e8d19e.wav",
"sampleCount": 19328,
"rate": 22050,
"format": "",
"tags": []
},
{
"soundName": "Bell Toll",
"md5": "25d61e79cbeba4041eebeaebd7bf9598.wav",
"sampleCount": 45168,
"rate": 11025,
"format": "",
"tags": [
"effects",
"dramatic"
]
},
{
"soundName": "Big Boing",
"md5": "00d6e72ef8bf7088233e98fbcee0ec6d.wav",
"sampleCount": 18174,
"rate": 22050,
"format": "adpcm",
"tags": [
"wacky",
"effects",
"cartoon"
]
},
{
"soundName": "Bird",
"md5": "18bd4b634a3f992a16b30344c7d810e0.wav",
"sampleCount": 3840,
"rate": 11025,
"format": "",
"tags": [
"animals"
]
},
{
"soundName": "Birthday",
"md5": "89691587a169d935a58c48c3d4e78534.wav",
"sampleCount": 161408,
"rate": 22050,
"format": "",
"tags": []
},
{
"soundName": "Bite",
"md5": "0039635b1d6853face36581784558454.wav",
"sampleCount": 7672,
"rate": 22050,
"format": "adpcm",
"tags": [
"wacky",
"effects",
"human"
]
},
{
"soundName": "Boing",
"md5": "53a3c2e27d1fb5fdb14aaf0cb41e7889.wav",
"sampleCount": 6804,
"rate": 22050,
"format": "adpcm",
"tags": [
"effects"
]
},
{
"soundName": "Bonk",
"md5": "dd93f7835a407d4de5b2512ec4a6a806.wav",
"sampleCount": 13908,
"rate": 22050,
"format": "adpcm",
"tags": [
"wacky",
"effects",
"cartoon"
]
},
{
"soundName": "Boom Cloud",
"md5": "62d87dfb0f873735e59669d965bdbd7d.wav",
"sampleCount": 88200,
"rate": 22050,
"format": "adpcm",
"tags": [
"effects",
"games",
"space",
"dramatic"
]
},
{
"soundName": "Boop Bing Bop",
"md5": "66968153be7dce9e5abf62d627ffe40f.wav",
"sampleCount": 54957,
"rate": 22050,
"format": "adpcm",
"tags": [
"wacky",
"effects",
"cartoon"
]
},
{
"soundName": "Bowling Strike",
"md5": "32f3af03ddfbd9cc89c8565678a26813.wav",
"sampleCount": 26629,
"rate": 22050,
"format": "adpcm",
"tags": [
"sports",
"effects"
]
},
{
"soundName": "Bubbles",
"md5": "78b0be9c9c2f664158b886bc7e794095.wav",
"sampleCount": 45056,
"rate": 11025,
"format": "",
"tags": [
"effects",
"water"
]
},
{
"soundName": "Buzz Whir",
"md5": "d4f76ded6bccd765958d15b63804de55.wav",
"sampleCount": 9037,
"rate": 11025,
"format": "",
"tags": []
},
{
"soundName": "C Bass",
"md5": "c3566ec797b483acde28f790994cc409.wav",
"sampleCount": 44608,
"rate": 22050,
"format": "",
"tags": [
"music",
"instruments",
"notes"
]
},
{
"soundName": "C Elec Bass",
"md5": "69eee3d038ea0f1c34ec9156a789236d.wav",
"sampleCount": 5216,
"rate": 22050,
"format": "",
"tags": [
"music",
"instruments",
"notes"
]
},
{
"soundName": "C Elec Guitar",
"md5": "0d340de02e14bebaf8dfa0e43eb3f1f9.wav",
"sampleCount": 44100,
"rate": 22050,
"format": "",
"tags": []
},
{
"soundName": "C Elec Piano",
"md5": "8366ee963cc57ad24a8a35a26f722c2b.wav",
"sampleCount": 44100,
"rate": 22050,
"format": "",
"tags": []
},
{
"soundName": "C Guitar",
"md5": "22baa07795a9a524614075cdea543793.wav",
"sampleCount": 44864,
"rate": 22050,
"format": "",
"tags": []
},
{
"soundName": "C Major Ukulele",
"md5": "aa2ca112507b59b5337f341aaa75fb08.wav",
"sampleCount": 18203,
"rate": 22050,
"format": "",
"tags": [
"music",
"instruments",
"notes",
"chords"
]
},
{
"soundName": "C Piano",
"md5": "d27ed8d953fe8f03c00f4d733d31d2cc.wav",
"sampleCount": 44100,
"rate": 22050,
"format": "",
"tags": [
"music",
"instruments",
"notes"
]
},
{
"soundName": "C Sax",
"md5": "4d2c939d6953b5f241a27a62cf72de64.wav",
"sampleCount": 9491,
"rate": 22050,
"format": "",
"tags": []
},
{
"soundName": "C Trombone",
"md5": "821b23a489201a0f21f47ba8528ba47f.wav",
"sampleCount": 19053,
"rate": 22050,
"format": "",
"tags": [
"music",
"instruments",
"notes"
]
},
{
"soundName": "C Trumpet",
"md5": "8970afcdc4e47bb54959a81fe27522bd.wav",
"sampleCount": 13118,
"rate": 22050,
"format": "",
"tags": []
},
{
"soundName": "C2 Bass",
"md5": "667d6c527b79321d398e85b526f15b99.wav",
"sampleCount": 24128,
"rate": 22050,
"format": "",
"tags": [
"music",
"instruments",
"notes"
]
},
{
"soundName": "C2 Elec Bass",
"md5": "56fc995b8860e713c5948ecd1c2ae572.wav",
"sampleCount": 5792,
"rate": 22050,
"format": "",
"tags": [
"music",
"instruments",
"notes"
]
},
{
"soundName": "C2 Elec Guitar",
"md5": "3a8ed3129f22cba5b0810bc030d16b5f.wav",
"sampleCount": 44100,
"rate": 22050,
"format": "",
"tags": []
},
{
"soundName": "C2 Elec Piano",
"md5": "366c7edbd4dd5cca68bf62902999bd66.wav",
"sampleCount": 44100,
"rate": 22050,
"format": "",
"tags": []
},
{
"soundName": "C2 Guitar",
"md5": "c8d2851bd99d8e0ce6c1f05e4acc7f34.wav",
"sampleCount": 27712,
"rate": 22050,
"format": "",
"tags": []
},
{
"soundName": "C2 Piano",
"md5": "75d7d2c9b5d40dd4e1cb268111abf1a2.wav",
"sampleCount": 44100,
"rate": 22050,
"format": "",
"tags": [
"music",
"instruments",
"notes"
]
},
{
"soundName": "C2 Sax",
"md5": "ea8d34b18c3d8fe328cea201666458bf.wav",
"sampleCount": 7349,
"rate": 22050,
"format": "adpcm",
"tags": []
},
{
"soundName": "C2 Trombone",
"md5": "68aec107bd3633b2ee40c532eedc3897.wav",
"sampleCount": 13904,
"rate": 22050,
"format": "",
"tags": [
"music",
"instruments",
"notes"
]
},
{
"soundName": "C2 Trumpet",
"md5": "df08249ed5446cc5e10b7ac62faac89b.wav",
"sampleCount": 15849,
"rate": 22050,
"format": "",
"tags": []
},
{
"soundName": "Car Horn",
"md5": "7c887f6a2ecd1cdb85d5527898d7f7a0.wav",
"sampleCount": 42443,
"rate": 22050,
"format": "adpcm",
"tags": [
"effects",
"transportation"
]
},
{
"soundName": "Car Passing",
"md5": "c21a5ad00b40b5ce923e56c905c94a9f.wav",
"sampleCount": 84992,
"rate": 11025,
"format": "",
"tags": [
"transportation",
"ambience",
"background"
]
},
{
"soundName": "Car Vroom",
"md5": "ead1da4a87ff6cb53441142f7ac37b8f.wav",
"sampleCount": 43358,
"rate": 22050,
"format": "adpcm",
"tags": [
"sports",
"transportation"
]
},
{
"soundName": "Cave",
"md5": "881f1bf5f301a36efcce4204a44af9ab.wav",
"sampleCount": 163584,
"rate": 22050,
"format": "adpcm",
"tags": [
"music",
"loops"
]
},
{
"soundName": "Chatter",
"md5": "fd8543abeeba255072da239223d2d342.wav",
"sampleCount": 25843,
"rate": 22050,
"format": "adpcm",
"tags": [
"animals",
"squirrel",
"chipmunk"
]
},
{
"soundName": "Chee Chee",
"md5": "25f4826cdd61e0a1c623ec2324c16ca0.wav",
"sampleCount": 34560,
"rate": 22050,
"format": "",
"tags": [
"animals",
"monkey"
]
},
{
"soundName": "Cheer",
"md5": "170e05c29d50918ae0b482c2955768c0.wav",
"sampleCount": 108864,
"rate": 22050,
"format": "adpcm",
"tags": [
"sports",
"human",
"voice"
]
},
{
"soundName": "Chirp",
"md5": "3b8236bbb288019d93ae38362e865972.wav",
"sampleCount": 5301,
"rate": 22050,
"format": "adpcm",
"tags": [
"animals",
"bird"
]
},
{
"soundName": "Chomp",
"md5": "0b1e3033140d094563248e61de4039e5.wav",
"sampleCount": 2912,
"rate": 11025,
"format": "",
"tags": [
"effects",
"human"
]
},
{
"soundName": "Chord",
"md5": "7ffe91cce06c5415df53610d173336e7.wav",
"sampleCount": 20608,
"rate": 11025,
"format": "",
"tags": [
"music",
"electronic"
]
},
{
"soundName": "Clang",
"md5": "4102d78dc98ae81448b140f35fd73e80.wav",
"sampleCount": 26703,
"rate": 22050,
"format": "adpcm",
"tags": [
"effects",
"games"
]
},
{
"soundName": "Clap Beatbox",
"md5": "abc70bb390f8e55f22f32265500d814a.wav",
"sampleCount": 4224,
"rate": 22050,
"format": "",
"tags": []
},
{
"soundName": "Clapping",
"md5": "684ffae7bc3a65e35e9f0aaf7a579dd5.wav",
"sampleCount": 84160,
"rate": 22050,
"format": "",
"tags": [
"human"
]
},
{
"soundName": "Clock Ticking",
"md5": "a634fcb87894520edbd7a534d1479ec4.wav",
"sampleCount": 109584,
"rate": 22050,
"format": "adpcm",
"tags": [
"effects",
"home"
]
},
{
"soundName": "Clown Honk",
"md5": "ec66961f188e9b8a9c75771db744d096.wav",
"sampleCount": 9009,
"rate": 22050,
"format": "adpcm",
"tags": [
"wacky",
"horn"
]
},
{
"soundName": "Coin",
"md5": "1f81d88fb419084f4d82ffb859b94ed6.wav",
"sampleCount": 3975,
"rate": 22050,
"format": "adpcm",
"tags": [
"effects",
"electronic",
"games"
]
},
{
"soundName": "Collect",
"md5": "32514c51e03db680e9c63857b840ae78.wav",
"sampleCount": 13320,
"rate": 22050,
"format": "adpcm",
"tags": [
"effects",
"electronic",
"games"
]
},
{
"soundName": "Computer Beep",
"md5": "28c76b6bebd04be1383fe9ba4933d263.wav",
"sampleCount": 9536,
"rate": 11025,
"format": "",
"tags": []
},
{
"soundName": "Computer Beep2",
"md5": "1da43f6d52d0615da8a250e28100a80d.wav",
"sampleCount": 19200,
"rate": 11025,
"format": "",
"tags": [
"effects",
"electronic"
]
},
{
"soundName": "Connect",
"md5": "9aad12085708ccd279297d4bea9c5ae0.wav",
"sampleCount": 22623,
"rate": 22050,
"format": "adpcm",
"tags": [
"effects",
"electronic",
"games"
]
},
{
"soundName": "Cough1",
"md5": "98ec3e1eeb7893fca519aa52cc1ef3c1.wav",
"sampleCount": 7516,
"rate": 11025,
"format": "",
"tags": [
"human"
]
},
{
"soundName": "Cough2",
"md5": "467fe8ef3cab475af4b3088fd1261510.wav",
"sampleCount": 16612,
"rate": 22050,
"format": "",
"tags": [
"human"
]
},
{
"soundName": "Crank",
"md5": "a54f8ce520a0b9fff3cd53817e280ede.wav",
"sampleCount": 100649,
"rate": 22050,
"format": "adpcm",
"tags": [
"effects"
]
},
{
"soundName": "Crash Beatbox",
"md5": "725e29369e9138a43f11e0e5eb3eb562.wav",
"sampleCount": 26883,
"rate": 22050,
"format": "",
"tags": []
},
{
"soundName": "Crash Cymbal",
"md5": "f2c47a46f614f467a7ac802ed9ec3d8e.wav",
"sampleCount": 25220,
"rate": 22050,
"format": "",
"tags": []
},
{
"soundName": "Crazy Laugh",
"md5": "2293a751b71a2df8cdce1bec5558cc1e.wav",
"sampleCount": 37485,
"rate": 22050,
"format": "adpcm",
"tags": [
"human",
"cartoon",
"voice"
]
},
{
"soundName": "Cricket",
"md5": "a2b3cac37065c109aac17ed46005445e.wav",
"sampleCount": 3673,
"rate": 22050,
"format": "",
"tags": [
"animals",
"insects",
"bugs"
]
},
{
"soundName": "Crickets",
"md5": "cae6206eb3c57bb8c4b3e2ca362dfa6d.wav",
"sampleCount": 92160,
"rate": 22050,
"format": "",
"tags": [
"animals",
"insects",
"bugs",
"ambience",
"background"
]
},
{
"soundName": "Croak",
"md5": "c6ce0aadb89903a43f76fc20ea57633e.wav",
"sampleCount": 6424,
"rate": 22050,
"format": "adpcm",
"tags": [
"animals",
"frog",
"toad"
]
},
{
"soundName": "Crowd Gasp",
"md5": "0eaf773c9d1b06e801e7b5fd56298801.wav",
"sampleCount": 27434,
"rate": 22050,
"format": "adpcm",
"tags": [
"voice",
"human"
]
},
{
"soundName": "Crowd Laugh",
"md5": "f4942ab2532087118e11b0c4d4e0e342.wav",
"sampleCount": 91584,
"rate": 22050,
"format": "adpcm",
"tags": [
"voice",
"human"
]
},
{
"soundName": "Crunch",
"md5": "cac3341417949acc66781308a254529c.wav",
"sampleCount": 4297,
"rate": 22050,
"format": "adpcm",
"tags": [
"effects",
"electronic",
"games"
]
},
{
"soundName": "Cymbal",
"md5": "7c5405a9cf561f65a941aff10e661593.wav",
"sampleCount": 24118,
"rate": 22050,
"format": "adpcm",
"tags": [
"music",
"percussion",
"electronic"
]
},
{
"soundName": "Cymbal Crash",
"md5": "fa2c9da1d4fd70207ab749851853cb50.wav",
"sampleCount": 25219,
"rate": 22050,
"format": "",
"tags": [
"music",
"percussion"
]
},
{
"soundName": "Cymbal Echo",
"md5": "bb243badd1201b2607bf2513df10cd97.wav",
"sampleCount": 44326,
"rate": 22050,
"format": "",
"tags": [
"music",
"loops",
"hiphop"
]
},
{
"soundName": "D Bass",
"md5": "5a3ae8a2665f50fdc38cc301fbac79ba.wav",
"sampleCount": 40192,
"rate": 22050,
"format": "",
"tags": [
"music",
"instruments",
"notes"
]
},
{
"soundName": "D Elec Bass",
"md5": "67a6d1aa68233a2fa641aee88c7f051f.wav",
"sampleCount": 5568,
"rate": 22050,
"format": "",
"tags": [
"music",
"instruments",
"notes"
]
},
{
"soundName": "D Elec Guitar",
"md5": "1b5de9866801eb2f9d4f57c7c3b473f5.wav",
"sampleCount": 44100,
"rate": 22050,
"format": "",
"tags": []
},
{
"soundName": "D Elec Piano",
"md5": "835f136ca8d346a17b4d4baf8405be37.wav",
"sampleCount": 44100,
"rate": 22050,
"format": "",
"tags": []
},
{
"soundName": "D Guitar",
"md5": "2dbcfae6a55738f94bbb40aa5fcbf7ce.wav",
"sampleCount": 41120,
"rate": 22050,
"format": "",
"tags": []
},
{
"soundName": "D Piano",
"md5": "51381ac422605ee8c7d64cfcbfd75efc.wav",
"sampleCount": 44100,
"rate": 22050,
"format": "",
"tags": [
"music",
"instruments",
"notes"
]
},
{
"soundName": "D Sax",
"md5": "39f41954a73c0e15d842061e1a4c5e1d.wav",
"sampleCount": 9555,
"rate": 22050,
"format": "",
"tags": []
},
{
"soundName": "D Trombone",
"md5": "f3afca380ba74372d611d3f518c2f35b.wav",
"sampleCount": 17339,
"rate": 22050,
"format": "",
"tags": [
"music",
"instruments",
"notes"
]
},
{
"soundName": "D Trumpet",
"md5": "0b1345b8fe2ba3076fedb4f3ae48748a.wav",
"sampleCount": 12702,
"rate": 22050,
"format": "",
"tags": []
},
{
"soundName": "Dance Around",
"md5": "8bcea76415eaf98ec1cbc3825845b934.wav",
"sampleCount": 343746,
"rate": 22050,
"format": "adpcm",
"tags": [
"music",
"electronic",
"loops"
]
},
{
"soundName": "Dance Celebrate",
"md5": "0edb8fb88af19e6e17d0f8cf64c1d136.wav",
"sampleCount": 176401,
"rate": 22050,
"format": "adpcm",
"tags": [
"music",
"loops",
"electronic"
]
},
{
"soundName": "Dance Celebrate2",
"md5": "0edb8fb88af19e6e17d0f8cf64c1d136.wav",
"sampleCount": 176401,
"rate": 22050,
"format": "adpcm",
"tags": []
},
{
"soundName": "Dance Chill Out",
"md5": "b235da45581b1f212c9e9cce70d2a2dc.wav",
"sampleCount": 222822,
"rate": 22050,
"format": "adpcm",
"tags": [
"music",
"electronic",
"loops"
]
},
{
"soundName": "Dance Funky",
"md5": "a8383eaddc02d33714dc5832c02ccf13.wav",
"sampleCount": 111412,
"rate": 22050,
"format": "adpcm",
"tags": [
"music",
"electronic",
"loops"
]
},
{
"soundName": "Dance Head Nod",
"md5": "65e8a47d55df3f4cb17722959f6220db.wav",
"sampleCount": 124519,
"rate": 22050,
"format": "adpcm",
"tags": [
"music",
"electronic",
"loops"
]
},
{
"soundName": "Dance Magic",
"md5": "042309f190183383c0b1c1fc3edc2e84.wav",
"sampleCount": 187200,
"rate": 22050,
"format": "adpcm",
"tags": [
"music",
"electronic",
"loops"
]
},
{
"soundName": "Dance Slow Mo",
"md5": "329ee6f3418c0a569418e102e620edf0.wav",
"sampleCount": 445643,
"rate": 22050,
"format": "adpcm",
"tags": [
"music",
"electronic",
"loops"
]
},
{
"soundName": "Dance Snare Beat",
"md5": "562587bdb75e3a8124cdaa46ba0f648b.wav",
"sampleCount": 176401,
"rate": 22050,
"format": "adpcm",
"tags": [
"music",
"electronic",
"loops"
]
},
{
"soundName": "Dance Space",
"md5": "e15333f5ffaf08e145ace1610fccd67d.wav",
"sampleCount": 88200,
"rate": 22050,
"format": "adpcm",
"tags": [
"music",
"electronic",
"loops"
]
},
{
"soundName": "Disconnect",
"md5": "56df0714ed1ed455a2befd787a077214.wav",
"sampleCount": 27563,
"rate": 22050,
"format": "adpcm",
"tags": [
"effects",
"electronic",
"games"
]
},
{
"soundName": "Dog1",
"md5": "b15adefc3c12f758b6dc6a045362532f.wav",
"sampleCount": 3672,
"rate": 22050,
"format": "",
"tags": [
"animals"
]
},
{
"soundName": "Dog2",
"md5": "cd8fa8390b0efdd281882533fbfcfcfb.wav",
"sampleCount": 3168,
"rate": 22050,
"format": "",
"tags": [
"animals"
]
},
{
"soundName": "Door Closing",
"md5": "d8c78c6c272cca91342435ff543c1274.wav",
"sampleCount": 7454,
"rate": 22050,
"format": "adpcm",
"tags": [
"effects",
"home"
]
},
{
"soundName": "Door Creak",
"md5": "56985da9c052a5e26007c99aa5a958f7.wav",
"sampleCount": 54272,
"rate": 11025,
"format": "",
"tags": [
"effects",
"home"
]
},
{
"soundName": "Doorbell",
"md5": "b67db6ed07f882e52a9ef4dbb76f5f64.wav",
"sampleCount": 109662,
"rate": 22050,
"format": "adpcm",
"tags": [
"effects",
"home"
]
},
{
"soundName": "Drip Drop",
"md5": "3249e61fa135d0a1d68ff515ba3bd92f.wav",
"sampleCount": 62680,
"rate": 22050,
"format": "adpcm",
"tags": [
"music",
"electronic",
"loops"
]
},
{
"soundName": "Drive Around",
"md5": "a3a85fb8564b0266f50a9c091087b7aa.wav",
"sampleCount": 44096,
"rate": 22050,
"format": "",
"tags": [
"music",
"loops",
"electronic"
]
},
{
"soundName": "Drum",
"md5": "f730246174873cd4ae4127c83e475b50.wav",
"sampleCount": 107136,
"rate": 22050,
"format": "adpcm",
"tags": [
"music",
"percussion",
"loops",
"hiphop",
"jazz"
]
},
{
"soundName": "Drum Bass1",
"md5": "48328c874353617451e4c7902cc82817.wav",
"sampleCount": 6528,
"rate": 22050,
"format": "",
"tags": []
},
{
"soundName": "Drum Bass2",
"md5": "711a1270d1cf2e5de9b145ee539213e4.wav",
"sampleCount": 3791,
"rate": 22050,
"format": "adpcm",
"tags": []
},
{
"soundName": "Drum Bass3",
"md5": "c21704337b16359ea631b5f8eb48f765.wav",
"sampleCount": 8576,
"rate": 22050,
"format": "",
"tags": []
},
{
"soundName": "Drum Boing",
"md5": "5f4216970527d5a2e259758ba12e6a1b.wav",
"sampleCount": 18640,
"rate": 22050,
"format": "adpcm",
"tags": [
"effects",
"wacky",
"cartoon",
"percussion"
]
},
{
"soundName": "Drum Buzz",
"md5": "3650dc4262bcc5010c0d8fa8d7c670cf.wav",
"sampleCount": 5742,
"rate": 11025,
"format": "",
"tags": [
"music",
"electronic",
"percussion"
]
},
{
"soundName": "Drum Funky",
"md5": "fb56022366d21b299cbc3fd5e16000c2.wav",
"sampleCount": 44748,
"rate": 22050,
"format": "adpcm",
"tags": [
"music",
"loops",
"hiphop"
]
},
{
"soundName": "Drum Jam",
"md5": "8b5486ccc806e97e83049d25b071f7e4.wav",
"sampleCount": 44288,
"rate": 22050,
"format": "",
"tags": [
"music",
"loops",
"percussion"
]
},
{
"soundName": "Drum Machine",
"md5": "f9d53d773b42e16df3dfca6174015592.wav",
"sampleCount": 105984,
"rate": 22050,
"format": "adpcm",
"tags": [
"music",
"electronic",
"loops"
]
},
{
"soundName": "Drum Roll",
"md5": "fb12e119d7a88a7f75ab980243f75073.wav",
"sampleCount": 37809,
"rate": 22050,
"format": "adpcm",
"tags": [
"wacky",
"percussion"
]
},
{
"soundName": "Drum Satellite",
"md5": "079067d7909f791b29f8be1c00fc2131.wav",
"sampleCount": 44096,
"rate": 22050,
"format": "",
"tags": [
"music",
"loops",
"percussion"
]
},
{
"soundName": "Drum Set1",
"md5": "38a2bb8129bddb4e8eaa06781cfa3040.wav",
"sampleCount": 46080,
"rate": 22050,
"format": "adpcm",
"tags": [
"music",
"percussion",
"jazz",
"loops"
]
},
{
"soundName": "Drum Set2",
"md5": "738e871fda577295e8beb9021f670e28.wav",
"sampleCount": 37440,
"rate": 22050,
"format": "adpcm",
"tags": [
"music",
"percussion",
"jazz",
"loops"
]
},
{
"soundName": "Duck",
"md5": "af5b039e1b05e0ccb12944f648a8884e.wav",
"sampleCount": 5792,
"rate": 22050,
"format": "",
"tags": [
"animals"
]
},
{
"soundName": "Dun Dun Dunnn",
"md5": "e956a99ab9ac64cfb5c6b2d8b1e949eb.wav",
"sampleCount": 63729,
"rate": 22050,
"format": "adpcm",
"tags": [
"effects",
"surprise",
"wacky",
"dramatic"
]
},
{
"soundName": "E Bass",
"md5": "0657e39bae81a232b01a18f727d3b891.wav",
"sampleCount": 36160,
"rate": 22050,
"format": "",
"tags": [
"music",
"instruments",
"notes"
]
},
{
"soundName": "E Elec Bass",
"md5": "0704b8ceabe54f1dcedda8c98f1119fd.wav",
"sampleCount": 5691,
"rate": 22050,
"format": "",
"tags": [
"music",
"instruments",
"notes"
]
},
{
"soundName": "E Elec Guitar",
"md5": "2e6a6ae3e0f72bf78c74def8130f459a.wav",
"sampleCount": 44100,
"rate": 22050,
"format": "",
"tags": []
},
{
"soundName": "E Elec Piano",
"md5": "ab3c198f8e36efff14f0a5bad35fa3cd.wav",
"sampleCount": 44100,
"rate": 22050,
"format": "",
"tags": []
},
{
"soundName": "E Guitar",
"md5": "4b5d1da83e59bf35578324573c991666.wav",
"sampleCount": 38400,
"rate": 22050,
"format": "",
"tags": []
},
{
"soundName": "E Piano",
"md5": "c818fdfaf8a0efcb562e24e794700a57.wav",
"sampleCount": 44100,
"rate": 22050,
"format": "",
"tags": [
"music",
"instruments",
"notes"
]
},
{
"soundName": "E Sax",
"md5": "3568b7dfe173fab6877a9ff1dcbcf1aa.wav",
"sampleCount": 7489,
"rate": 22050,
"format": "",
"tags": []
},
{
"soundName": "E Trombone",
"md5": "c859fb0954acaa25c4b329df5fb76434.wav",
"sampleCount": 16699,
"rate": 22050,
"format": "",
"tags": [
"music",
"instruments",
"notes"
]
},
{
"soundName": "E Trumpet",
"md5": "494295a92314cadb220945a6711c568c.wav",
"sampleCount": 8680,
"rate": 22050,
"format": "adpcm",
"tags": []
},
{
"soundName": "Eggs",
"md5": "659de1f3826ece8dbeca948884835f14.wav",
"sampleCount": 336480,
"rate": 22050,
"format": "adpcm",
"tags": [
"music",
"loops"
]
},
{
"soundName": "Elec Piano A Minor",
"md5": "8fe470b5f2fb58364b153fe647adcbbf.wav",
"sampleCount": 44100,
"rate": 22050,
"format": "",
"tags": [
"music",
"notes",
"instruments"
]
},
{
"soundName": "Elec Piano C Major",
"md5": "228429930dfc60f48d75ce8e14291416.wav",
"sampleCount": 44100,
"rate": 22050,
"format": "",
"tags": [
"music",
"notes",
"instruments"
]
},
{
"soundName": "Elec Piano F Major",
"md5": "740098316ed06d9a64c14b93f65c5da5.wav",
"sampleCount": 44100,
"rate": 22050,
"format": "",
"tags": [
"music",
"notes",
"instruments"
]
},
{
"soundName": "Elec Piano G Major",
"md5": "5a5f5de80bcdf782250e889747b374bd.wav",
"sampleCount": 43908,
"rate": 22050,
"format": "",
"tags": [
"music",
"notes",
"instruments"
]
},
{
"soundName": "Elec Piano Loop",
"md5": "7b4822ccca655db47de0880bab0e7bd9.wav",
"sampleCount": 43844,
"rate": 22050,
"format": "",
"tags": [
"music",
"notes",
"instruments",
"loops"
]
},
{
"soundName": "Engine",
"md5": "f5c4e2311024f18c989e53f9b3448db8.wav",
"sampleCount": 172729,
"rate": 22050,
"format": "adpcm",
"tags": [
"effects",
"transportation"
]
},
{
"soundName": "F Bass",
"md5": "ea21bdae86f70d60b28f1dddcf50d104.wav",
"sampleCount": 34368,
"rate": 22050,
"format": "",
"tags": [
"music",
"instruments",
"notes"
]
},
{
"soundName": "F Elec Bass",
"md5": "45eedb4ce62a9cbbd2207824b94a4641.wav",
"sampleCount": 5312,
"rate": 22050,
"format": "",
"tags": [
"music",
"instruments",
"notes"
]
},
{
"soundName": "F Elec Guitar",
"md5": "5eb00f15f21f734986aa45156d44478d.wav",
"sampleCount": 44100,
"rate": 22050,
"format": "",
"tags": []
},
{
"soundName": "F Elec Piano",
"md5": "dc5e368fc0d0dad1da609bfc3e29aa15.wav",
"sampleCount": 44100,
"rate": 22050,
"format": "",
"tags": []
},
{
"soundName": "F Guitar",
"md5": "b51d086aeb1921ec405561df52ecbc50.wav",
"sampleCount": 36416,
"rate": 22050,
"format": "",
"tags": []
},
{
"soundName": "F Major Ukulele",
"md5": "cd0ab5d1b0120c6ed92a1654ccf81376.wav",
"sampleCount": 18235,
"rate": 22050,
"format": "",
"tags": [
"music",
"instruments",
"notes",
"chords"
]
},
{
"soundName": "F Piano",
"md5": "cdab3cce84f74ecf53e3941c6a003b5e.wav",
"sampleCount": 44100,
"rate": 22050,
"format": "",
"tags": [
"music",
"instruments",
"notes"
]
},
{
"soundName": "F Sax",
"md5": "2ae3083817bcd595e26ea2884b6684d5.wav",
"sampleCount": 7361,
"rate": 22050,
"format": "adpcm",
"tags": []
},
{
"soundName": "F Trombone",
"md5": "d6758470457aac2aa712717a676a5163.wav",
"sampleCount": 19373,
"rate": 22050,
"format": "",
"tags": [
"music",
"instruments",
"notes"
]
},
{
"soundName": "F Trumpet",
"md5": "5fa3108b119ca266029b4caa340a7cd0.wav",
"sampleCount": 12766,
"rate": 22050,
"format": "",
"tags": []
},
{
"soundName": "Fairydust",
"md5": "b92de59d992a655c1b542223a784cda6.wav",
"sampleCount": 11247,
"rate": 22050,
"format": "",
"tags": [
"effects",
"fantasy",
"magic"
]
},
{
"soundName": "Finger Snap",
"md5": "99d02ffb3212d86b3e5b173b6f33f835.wav",
"sampleCount": 1985,
"rate": 11025,
"format": "",
"tags": [
"effects",
"percussion",
"human"
]
},
{
"soundName": "Flam Snare",
"md5": "3b6cce9f8c56c0537ca61eee3945cd1d.wav",
"sampleCount": 4416,
"rate": 22050,
"format": "",
"tags": []
},
{
"soundName": "Footsteps",
"md5": "c893b0a9b3e2e0594f1f921a12aa66be.wav",
"sampleCount": 58880,
"rate": 11025,
"format": "",
"tags": [
"effects",
"human"
]
},
{
"soundName": "G Bass",
"md5": "05c192194e8f1944514dce3833e33439.wav",
"sampleCount": 30976,
"rate": 22050,
"format": "",
"tags": [
"music",
"instruments",
"notes"
]
},
{
"soundName": "G Elec Bass",
"md5": "97b187d72219b994a6ef6a5a6b09605c.wav",
"sampleCount": 5568,
"rate": 22050,
"format": "",
"tags": [
"music",
"instruments",
"notes"
]
},
{
"soundName": "G Elec Guitar",
"md5": "cd0d0e7dad415b2ffa2ba7a61860eaf8.wav",
"sampleCount": 44100,
"rate": 22050,
"format": "",
"tags": []
},
{
"soundName": "G Elec Piano",
"md5": "39525f6545d62a95d05153f92d63301a.wav",
"sampleCount": 44100,
"rate": 22050,
"format": "",
"tags": []
},
{
"soundName": "G Guitar",
"md5": "98a835713ecea2f3ef9f4f442d52ad20.wav",
"sampleCount": 33600,
"rate": 22050,
"format": "",
"tags": []
},
{
"soundName": "G Piano",
"md5": "42bb2ed28e7023e111b33220e1594a6f.wav",
"sampleCount": 44100,
"rate": 22050,
"format": "",
"tags": [
"music",
"instruments",
"notes"
]
},
{
"soundName": "G Sax",
"md5": "cefba5de46adfe5702485e0934bb1e13.wav",
"sampleCount": 7349,
"rate": 22050,
"format": "adpcm",
"tags": []
},
{
"soundName": "G Trombone",
"md5": "9436fd7a0eacb4a6067e7db14236dde1.wav",
"sampleCount": 17179,
"rate": 22050,
"format": "",
"tags": [
"music",
"instruments",
"notes"
]
},
{
"soundName": "G Trumpet",
"md5": "e84afda25975f14b364118591538ccf4.wav",
"sampleCount": 14640,
"rate": 22050,
"format": "",
"tags": []
},
{
"soundName": "G Ukulele",
"md5": "d20218f92ee606277658959005538e2d.wav",
"sampleCount": 18235,
"rate": 22050,
"format": "",
"tags": [
"music",
"instruments",
"notes",
"chords"
]
},
{
"soundName": "Gallop",
"md5": "8388c266cd774a8e8c8796155b18ef47.wav",
"sampleCount": 36209,
"rate": 22050,
"format": "adpcm",
"tags": [
"animals",
"horse"
]
},
{
"soundName": "Garden",
"md5": "7c25f6d39011cd2ee5ffb1af539d9d0c.wav",
"sampleCount": 371520,
"rate": 22050,
"format": "adpcm",
"tags": [
"music",
"loops"
]
},
{
"soundName": "Glass Breaking",
"md5": "4b33c58ba14e4555373fa2478b3f891f.wav",
"sampleCount": 52237,
"rate": 22050,
"format": "adpcm",
"tags": [
"effects"
]
},
{
"soundName": "Glug",
"md5": "5606722c6105f3c58f9689a958f5c45f.wav",
"sampleCount": 12100,
"rate": 22050,
"format": "adpcm",
"tags": [
"effects",
"potion",
"drink",
"water"
]
},
{
"soundName": "Goal Cheer",
"md5": "a434069c58e79d42f5d21abb1c318919.wav",
"sampleCount": 84096,
"rate": 22050,
"format": "adpcm",
"tags": [
"sports",
"human",
"voice"
]
},
{
"soundName": "Gong",
"md5": "9d30c38443691e9626d510546d98327c.wav",
"sampleCount": 114432,
"rate": 11025,
"format": "",
"tags": [
"music",
"percussion"
]
},
{
"soundName": "Goose",
"md5": "16a3b9d516e125cdb2ad74cd8d205d71.wav",
"sampleCount": 8208,
"rate": 22050,
"format": "",
"tags": [
"animals",
"birds"
]
},
{
"soundName": "Growl",
"md5": "79d052b0921d2078d42389328b1be168.wav",
"sampleCount": 19228,
"rate": 22050,
"format": "adpcm",
"tags": [
"animals",
"dog"
]
},
{
"soundName": "Grunt",
"md5": "caa0a1685ef7a5334413834c6c818c5a.wav",
"sampleCount": 20551,
"rate": 22050,
"format": "adpcm",
"tags": [
"animals",
"buffalo"
]
},
{
"soundName": "Guitar Chords1",
"md5": "2b1a5bc63580d8625cf24ff3d7622c0b.wav",
"sampleCount": 123264,
"rate": 22050,
"format": "adpcm",
"tags": [
"music",
"instruments",
"loops"
]
},
{
"soundName": "Guitar Chords2",
"md5": "e956f15da397a13fae0c90d9fe4571fb.wav",
"sampleCount": 158976,
"rate": 22050,
"format": "adpcm",
"tags": [
"music",
"instruments",
"loops"
]
},
{
"soundName": "Guitar Strum",
"md5": "29000fa713f70765147ee0551fa42d9e.wav",
"sampleCount": 25216,
"rate": 11025,
"format": "",
"tags": [
"music",
"instruments",
"chords"
]
},
{
"soundName": "Hand Clap",
"md5": "9502142875e67f7b0292a117a27e9563.wav",
"sampleCount": 2464,
"rate": 22050,
"format": "",
"tags": [
"human",
"percussion"
]
},
{
"soundName": "Head Shake",
"md5": "e56fdc9f76d035ff01f4e7b39e9e9989.wav",
"sampleCount": 20025,
"rate": 22050,
"format": "adpcm",
"tags": [
"effects",
"wacky",
"cartoon"
]
},
{
"soundName": "Hey",
"md5": "ec7c272faa862c9f8f731792e686e3c9.wav",
"sampleCount": 5414,
"rate": 22050,
"format": "adpcm",
"tags": [
"human",
"voice"
]
},
{
"soundName": "Hi Beatbox",
"md5": "5a07847bf246c227204728b05a3fc8f3.wav",
"sampleCount": 5856,
"rate": 22050,
"format": "",
"tags": []
},
{
"soundName": "Hi Na Tabla",
"md5": "35b42d98c43404a5b1b52fb232a62bd7.wav",
"sampleCount": 4096,
"rate": 22050,
"format": "",
"tags": [
"music",
"percussion",
"drums"
]
},
{
"soundName": "Hi Tun Tabla",
"md5": "da734693dfa6a9a7eccdc7f9a0ca9840.wav",
"sampleCount": 18656,
"rate": 22050,
"format": "",
"tags": [
"music",
"percussion",
"drums"
]
},
{
"soundName": "High Conga",
"md5": "16144544de90e98a92a265d4fc3241ea.wav",
"sampleCount": 8192,
"rate": 22050,
"format": "",
"tags": [
"music",
"percussion",
"drums"
]
},
{
"soundName": "High Hat",
"md5": "0d91b2759ac861d156235f5ecf8d3218.wav",
"sampleCount": 2757,
"rate": 22050,
"format": "adpcm",
"tags": [
"music",
"percussion",
"drums"
]
},
{
"soundName": "High Tom",
"md5": "d623f99b3c8d33932eb2c6c9cfd817c5.wav",
"sampleCount": 12320,
"rate": 22050,
"format": "",
"tags": []
},
{
"soundName": "High Whoosh",
"md5": "6a10c380af8c400f8f6eea84eb28bd12.wav",
"sampleCount": 6116,
"rate": 22050,
"format": "adpcm",
"tags": [
"effects",
"games"
]
},
{
"soundName": "Hihat Beatbox",
"md5": "5a07847bf246c227204728b05a3fc8f3.wav",
"sampleCount": 5856,
"rate": 22050,
"format": "",
"tags": [
"human",
"percussion",
"music",
"hiphop"
]
},
{
"soundName": "Hihat Cymbal",
"md5": "2d01f60d0f20ab39facbf707899c6b2a.wav",
"sampleCount": 2752,
"rate": 22050,
"format": "",
"tags": []
},
{
"soundName": "Hip Hop",
"md5": "7ed8ce1853bde6dcbc6f7f5a1c65ae47.wav",
"sampleCount": 108864,
"rate": 22050,
"format": "adpcm",
"tags": [
"music",
"loops",
"hiphop"
]
},
{
"soundName": "Horse",
"md5": "45ffcf97ee2edca0199ff5aa71a5b72e.wav",
"sampleCount": 14464,
"rate": 11025,
"format": "",
"tags": [
"animals",
"effects"
]
},
{
"soundName": "Horse Gallop",
"md5": "058a34b5fb8b57178b5322d994b6b8c8.wav",
"sampleCount": 38336,
"rate": 11025,
"format": "",
"tags": [
"animals",
"effects"
]
},
{
"soundName": "Human Beatbox1",
"md5": "37f37455c35fea71449926eb0bff05dd.wav",
"sampleCount": 103680,
"rate": 22050,
"format": "adpcm",
"tags": [
"human",
"percussion",
"music",
"hiphop",
"loops"
]
},
{
"soundName": "Human Beatbox2",
"md5": "f62e9f7deeb0e06268df6edffa14f5de.wav",
"sampleCount": 62392,
"rate": 22050,
"format": "adpcm",
"tags": [
"human",
"percussion",
"music",
"hiphop",
"loops"
]
},
{
"soundName": "Jump",
"md5": "6fcd64d6357e4ea03704e5f96bfd35ba.wav",
"sampleCount": 6867,
"rate": 22050,
"format": "adpcm",
"tags": [
"effects",
"electronic",
"games"
]
},
{
"soundName": "Jungle",
"md5": "b234a04cc3958437c43ed3d93f34a345.wav",
"sampleCount": 76032,
"rate": 22050,
"format": "adpcm",
"tags": [
"music",
"electronic",
"loops"
]
},
{
"soundName": "Jungle Frogs",
"md5": "2ca5fbda5288b79a6e12f5ca3c20b0fa.wav",
"sampleCount": 291214,
"rate": 22050,
"format": "adpcm",
"tags": [
"animals",
"background",
"crickets",
"ambience"
]
},
{
"soundName": "Kick Back",
"md5": "9cd340d9d568b1479f731e69e103b3ce.wav",
"sampleCount": 44748,
"rate": 22050,
"format": "adpcm",
"tags": [
"music",
"loops",
"hiphop"
]
},
{
"soundName": "Kick Drum",
"md5": "711a1270d1cf2e5de9b145ee539213e4.wav",
"sampleCount": 3791,
"rate": 22050,
"format": "adpcm",
"tags": [
"music",
"percussion",
"hiphop"
]
},
{
"soundName": "Large Cowbell",
"md5": "006316650ffc673dc02d36aa55881327.wav",
"sampleCount": 20856,
"rate": 22050,
"format": "adpcm",
"tags": [
"music",
"percussion",
"drums"
]
},
{
"soundName": "Laser1",
"md5": "46571f8ec0f2cc91666c80e312579082.wav",
"sampleCount": 516,
"rate": 11025,
"format": "",
"tags": []
},
{
"soundName": "Laser2",
"md5": "27654ed2e3224f0a3f77c244e4fae9aa.wav",
"sampleCount": 755,
"rate": 11025,
"format": "",
"tags": []
},
{
"soundName": "Laugh1",
"md5": "1e8e7fb94103282d02a4bb597248c788.wav",
"sampleCount": 13547,
"rate": 11025,
"format": "",
"tags": [
"human",
"voice"
]
},
{
"soundName": "Laugh2",
"md5": "8b1e025f38b0635f7e34e9afcace1b5e.wav",
"sampleCount": 14662,
"rate": 11025,
"format": "",
"tags": [
"human",
"voice"
]
},
{
"soundName": "Laugh3",
"md5": "86dee6fa7cd73095ba17e4d666a27804.wav",
"sampleCount": 32065,
"rate": 11025,
"format": "",
"tags": [
"human",
"voice"
]
},
{
"soundName": "Lo Geh Tabla",
"md5": "9205359ab69d042ed3da8a160a651690.wav",
"sampleCount": 30784,
"rate": 22050,
"format": "",
"tags": [
"music",
"percussion",
"drums"
]
},
{
"soundName": "Lo Gliss Tabla",
"md5": "d7cd24689737569c93e7ea7344ba6b0e.wav",
"sampleCount": 7008,
"rate": 22050,
"format": "",
"tags": [
"music",
"percussion",
"drums"
]
},
{
"soundName": "Lose",
"md5": "d73eacaf5a905bf864041c7a70937ac4.wav",
"sampleCount": 81379,
"rate": 22050,
"format": "adpcm",
"tags": [
"effects",
"electronic",
"games"
]
},
{
"soundName": "Low Boing",
"md5": "33e9314fd25ef8e800a749c86487f7a9.wav",
"sampleCount": 16592,
"rate": 22050,
"format": "adpcm",
"tags": [
"effects"
]
},
{
"soundName": "Low Conga",
"md5": "0b6f94487cd8a1cf0bb77e15966656c3.wav",
"sampleCount": 8384,
"rate": 22050,
"format": "",
"tags": [
"music",
"percussion",
"drums"
]
},
{
"soundName": "Low Squeak",
"md5": "0aae06b65c875a6ba1fd51f4251b16b3.wav",
"sampleCount": 16736,
"rate": 22050,
"format": "adpcm",
"tags": [
"effects"
]
},
{
"soundName": "Low Tom",
"md5": "1569bbbd8952b0575e5a5cb5aefb50ba.wav",
"sampleCount": 20000,
"rate": 22050,
"format": "",
"tags": []
},
{
"soundName": "Low Whoosh",
"md5": "d42f096c89764484a442046f4342c9ad.wav",
"sampleCount": 11220,
"rate": 22050,
"format": "adpcm",
"tags": [
"effects",
"games"
]
},
{
"soundName": "Machine",
"md5": "e7dfb630116153533989ff839c1973a5.wav",
"sampleCount": 10209,
"rate": 22050,
"format": "adpcm",
"tags": [
"effects"
]
},
{
"soundName": "Magic Spell",
"md5": "1cb60ecdb1075c8769cb346d5c2a22c7.wav",
"sampleCount": 43077,
"rate": 22050,
"format": "adpcm",
"tags": [
"effects",
"fantasy"
]
},
{
"soundName": "Medieval1",
"md5": "9329fef6a59c5406d70cbe5837976d6b.wav",
"sampleCount": 213120,
"rate": 22050,
"format": "adpcm",
"tags": [
"music",
"loops",
"fantasy"
]
},
{
"soundName": "Medieval2",
"md5": "7bc8c4a9d0525f04451356c6cc483dd7.wav",
"sampleCount": 324288,
"rate": 22050,
"format": "adpcm",
"tags": [
"music",
"loops",
"fantasy"
]
},
{
"soundName": "Meow",
"md5": "83c36d806dc92327b9e7049a565c6bff.wav",
"sampleCount": 18688,
"rate": 22050,
"format": "",
"tags": [
"animals",
"cat"
]
},
{
"soundName": "Meow2",
"md5": "cf51a0c4088942d95bcc20af13202710.wav",
"sampleCount": 6512,
"rate": 11025,
"format": "",
"tags": [
"animals",
"cat"
]
},
{
"soundName": "Moo",
"md5": "7206280bd4444a06d25f19a84dcb56b1.wav",
"sampleCount": 27225,
"rate": 22050,
"format": "adpcm",
"tags": [
"animals",
"cow"
]
},
{
"soundName": "Motorcycle Passing",
"md5": "b49ab3a926da46578396d1faffd24d3b.wav",
"sampleCount": 86016,
"rate": 11025,
"format": "",
"tags": [
"transportation",
"ambience",
"background"
]
},
{
"soundName": "Muted Conga",
"md5": "1d4abbe3c9bfe198a88badb10762de75.wav",
"sampleCount": 4544,
"rate": 22050,
"format": "",
"tags": [
"music",
"percussion",
"drums"
]
},
{
"soundName": "Ocean Wave",
"md5": "c904610d770398b98872a708a2f75611.wav",
"sampleCount": 99206,
"rate": 22050,
"format": "adpcm",
"tags": [
"effects",
"water",
"underwater"
]
},
{
"soundName": "Odesong-b",
"md5": "2c41921491b1da2bfa1ebcaba34265ca.wav",
"sampleCount": 212553,
"rate": 22050,
"format": "adpcm",
"tags": [
"music",
"loops",
"electronic"
]
},
{
"soundName": "Oops",
"md5": "1139072c3d2d31fa5903c46632789d08.wav",
"sampleCount": 30514,
"rate": 22050,
"format": "adpcm",
"tags": [
"effects",
"electronic",
"games"
]
},
{
"soundName": "Orchestra Tuning",
"md5": "9fdef8a1f57a24b99add29d4f1925c76.wav",
"sampleCount": 221837,
"rate": 22050,
"format": "adpcm",
"tags": [
"effects",
"ambience",
"background",
"music"
]
},
{
"soundName": "Owl",
"md5": "e8b6d605f5a1bb36c29e4e21ef754209.wav",
"sampleCount": 8111,
"rate": 11025,
"format": "",
"tags": [
"animals",
"birds"
]
},
{
"soundName": "Party Noise",
"md5": "8f5a994abfa814da72272e766772dbac.wav",
"sampleCount": 44672,
"rate": 11025,
"format": "",
"tags": [
"human",
"voice",
"ambience",
"background"
]
},
{
"soundName": "Pew",
"md5": "21a2cc083ef51767fb13791151194348.wav",
"sampleCount": 5816,
"rate": 22050,
"format": "adpcm",
"tags": [
"effects",
"electronic",
"games"
]
},
{
"soundName": "Ping Pong Hit",
"md5": "8357b4bdf6fbe10b972be3b78167b3c8.wav",
"sampleCount": 11171,
"rate": 22050,
"format": "adpcm",
"tags": [
"sports"
]
},
{
"soundName": "Pluck",
"md5": "0f2aa4c395cb932512defb2d14dc1691.wav",
"sampleCount": 6537,
"rate": 22050,
"format": "adpcm",
"tags": [
"effects",
"wacky",
"cartoon"
]
},
{
"soundName": "Plunge",
"md5": "c09455ee9da0e7eeead42d4e73c2555d.wav",
"sampleCount": 22400,
"rate": 11025,
"format": "",
"tags": [
"effects",
"water",
"splash"
]
},
{
"soundName": "Police Siren",
"md5": "b10dcd209865fbd392534633307dafad.wav",
"sampleCount": 8649,
"rate": 22050,
"format": "adpcm",
"tags": [
"effects"
]
},
{
"soundName": "Pop",
"md5": "83a9787d4cb6f3b7632b4ddfebf74367.wav",
"sampleCount": 258,
"rate": 11025,
"format": "",
"tags": []
},
{
"soundName": "Pop2",
"md5": "83a9787d4cb6f3b7632b4ddfebf74367.wav",
"sampleCount": 258,
"rate": 11025,
"format": "",
"tags": [
"effects"
]
},
{
"soundName": "Rain",
"md5": "b5db20c28ef4946137129b47772dcf69.wav",
"sampleCount": 220295,
"rate": 22050,
"format": "adpcm",
"tags": [
"ambience",
"background",
"weather",
"water"
]
},
{
"soundName": "Rattle",
"md5": "74f1c07e0bcd7811fd9d456a5f8667f8.wav",
"sampleCount": 13184,
"rate": 22050,
"format": "",
"tags": [
"effects",
"percussion"
]
},
{
"soundName": "Referee Whistle",
"md5": "8468b9b3f11a665ee4d215afd8463b97.wav",
"sampleCount": 14034,
"rate": 22050,
"format": "adpcm",
"tags": [
"sports"
]
},
{
"soundName": "Ricochet",
"md5": "49407acfc004ec6960e8b84d363bd98d.wav",
"sampleCount": 23862,
"rate": 22050,
"format": "adpcm",
"tags": [
"effects",
"wacky"
]
},
{
"soundName": "Ride Cymbal",
"md5": "53badb02228d10494e0efdd1e839548d.wav",
"sampleCount": 8144,
"rate": 11025,
"format": "",
"tags": [
"music",
"percussion",
"drums"
]
},
{
"soundName": "Ring Tone",
"md5": "895c0887b4de4e0051e3adbceaf96061.wav",
"sampleCount": 70656,
"rate": 22050,
"format": "adpcm",
"tags": [
"effects",
"home"
]
},
{
"soundName": "Rip",
"md5": "4081f8fac2ca83bd34329400eb95bbde.wav",
"sampleCount": 11877,
"rate": 22050,
"format": "adpcm",
"tags": [
"effects",
"games"
]
},
{
"soundName": "Ripples",
"md5": "d3c95a4ba37dcf90c8a57e8b2fd1632d.wav",
"sampleCount": 21504,
"rate": 11025,
"format": "",
"tags": [
"effects",
"water"
]
},
{
"soundName": "Roll Cymbal",
"md5": "da8355d753cd2a5ddd19cb2bb41c1547.wav",
"sampleCount": 26432,
"rate": 22050,
"format": "",
"tags": []
},
{
"soundName": "Rooster",
"md5": "2e375acae2c7c0d655935a9de14b12f6.wav",
"sampleCount": 17110,
"rate": 11025,
"format": "",
"tags": [
"animals",
"birds"
]
},
{
"soundName": "Scrambling Feet",
"md5": "0fbca8db08d46419416c0f104345bc53.wav",
"sampleCount": 35770,
"rate": 22050,
"format": "adpcm",
"tags": [
"wacky",
"effects",
"cartoon"
]
},
{
"soundName": "Scratch Beatbox",
"md5": "859249563a7b1fc0f6e92e36d1db81c7.wav",
"sampleCount": 11552,
"rate": 22050,
"format": "",
"tags": []
},
{
"soundName": "Scratchy Beat",
"md5": "289dc558e076971e74dd1a0bd55719b1.wav",
"sampleCount": 44096,
"rate": 22050,
"format": "",
"tags": [
"music",
"loops",
"hiphop"
]
},
{
"soundName": "Scream1",
"md5": "10420bb2f5a3ab440f3b10fc8ea2b08b.wav",
"sampleCount": 6628,
"rate": 11025,
"format": "",
"tags": [
"human",
"voice"
]
},
{
"soundName": "Scream2",
"md5": "e06e29398d770dae3cd57447439752ef.wav",
"sampleCount": 17010,
"rate": 22050,
"format": "",
"tags": [
"human",
"voice"
]
},
{
"soundName": "Screech",
"md5": "10644c5cc83a9a2dd3ab466deb0eb03d.wav",
"sampleCount": 12907,
"rate": 22050,
"format": "adpcm",
"tags": [
"animals",
"monkey"
]
},
{
"soundName": "Seagulls",
"md5": "42bbbb6c37439abc82057ec2e67b78dc.wav",
"sampleCount": 64936,
"rate": 22050,
"format": "adpcm",
"tags": [
"animals",
"bird"
]
},
{
"soundName": "Sewing Machine",
"md5": "7bd800cb66d6fb18886a4c5cea1b76a6.wav",
"sampleCount": 107964,
"rate": 22050,
"format": "adpcm",
"tags": [
"effects",
"home"
]
},
{
"soundName": "Shaker",
"md5": "714e598d28e493cc50babc17f2c4895d.wav",
"sampleCount": 18560,
"rate": 11025,
"format": "",
"tags": [
"music",
"percussion"
]
},
{
"soundName": "Ship Bell",
"md5": "4cbd4dc0c55656e7edc4b0f00a3f9738.wav",
"sampleCount": 78597,
"rate": 22050,
"format": "adpcm",
"tags": [
"effects",
"transportation"
]
},
{
"soundName": "Sidestick Snare",
"md5": "f6868ee5cf626fc4ef3ca1119dc95592.wav",
"sampleCount": 2336,
"rate": 22050,
"format": "",
"tags": []
},
{
"soundName": "Singer1",
"md5": "92ee32e9be5ed7b69370fc38bb550597.wav",
"sampleCount": 23653,
"rate": 11025,
"format": "",
"tags": [
"human",
"voice",
"vocals",
"music"
]
},
{
"soundName": "Singer2",
"md5": "5d3d2865906889e866b3edf154e6cf5d.wav",
"sampleCount": 28636,
"rate": 11025,
"format": "",
"tags": [
"human",
"voice",
"vocals",
"music"
]
},
{
"soundName": "Siren Whistle",
"md5": "ea0d6aced66db4b8cafaeb6418ef9cf6.wav",
"sampleCount": 20821,
"rate": 22050,
"format": "adpcm",
"tags": [
"wacky",
"effects"
]
},
{
"soundName": "Skid",
"md5": "2c22bb6e3c65d9430185fd83ec3db64a.wav",
"sampleCount": 23939,
"rate": 22050,
"format": "adpcm",
"tags": [
"wacky",
"cartoon",
"effects",
"transportation"
]
},
{
"soundName": "Slide Whistle",
"md5": "3858bab5ea1211ff3c5902a4b680f7d8.wav",
"sampleCount": 12273,
"rate": 22050,
"format": "adpcm",
"tags": [
"wacky",
"effects",
"cartoon"
]
},
{
"soundName": "Small Cowbell",
"md5": "e29154f53f56f96f8a3292bdcddcec54.wav",
"sampleCount": 9718,
"rate": 22050,
"format": "adpcm",
"tags": [
"music",
"percussion",
"drums"
]
},
{
"soundName": "Snap",
"md5": "c2ff5da4d9d85dee866615f672b749ce.wav",
"sampleCount": 15360,
"rate": 22050,
"format": "",
"tags": [
"music",
"percussion",
"drums"
]
},
{
"soundName": "Snare Beatbox",
"md5": "c642c4c00135d890998f351faec55498.wav",
"sampleCount": 5630,
"rate": 22050,
"format": "adpcm",
"tags": []
},
{
"soundName": "Snare Beatbox2",
"md5": "7ede1382b578d8fc32850b48d082d914.wav",
"sampleCount": 4960,
"rate": 22050,
"format": "",
"tags": []
},
{
"soundName": "Snare Drum",
"md5": "c27fb569aba99c7203e954aecb1ed8e4.wav",
"sampleCount": 2757,
"rate": 22050,
"format": "adpcm",
"tags": [
"music",
"percussion",
"drums"
]
},
{
"soundName": "Sneaker Squeak",
"md5": "03f61f7d2c32da8a1493a380414710a2.wav",
"sampleCount": 8370,
"rate": 22050,
"format": "adpcm",
"tags": [
"sports",
"effects"
]
},
{
"soundName": "Sneeze1",
"md5": "31600c613823710b66a74f4dd54c4cdd.wav",
"sampleCount": 11818,
"rate": 11025,
"format": "",
"tags": [
"human",
"voice"
]
},
{
"soundName": "Sneeze2",
"md5": "42b5a31628083f3089f494f2ba644660.wav",
"sampleCount": 15218,
"rate": 22050,
"format": "",
"tags": [
"voice",
"human"
]
},
{
"soundName": "Snoring",
"md5": "5b1a88cd6db7e239642d7ca8a0d74a1a.wav",
"sampleCount": 103974,
"rate": 22050,
"format": "adpcm",
"tags": [
"human",
"wacky",
"voice",
"cartoon"
]
},
{
"soundName": "Snort",
"md5": "362d7440a57cab29914fecea621e50d4.wav",
"sampleCount": 16421,
"rate": 22050,
"format": "adpcm",
"tags": [
"animals",
"horse"
]
},
{
"soundName": "Space Ambience",
"md5": "f8903e89c1082987f18fc30b3de6d61a.wav",
"sampleCount": 220160,
"rate": 22050,
"format": "adpcm",
"tags": [
"effects",
"games",
"background",
"space"
]
},
{
"soundName": "Space Flyby",
"md5": "49c2e36b7258338fb3a8576e646c6738.wav",
"sampleCount": 52770,
"rate": 22050,
"format": "adpcm",
"tags": [
"effects",
"games",
"space",
"transportation"
]
},
{
"soundName": "Space Noise",
"md5": "a5cd5e83841aaaf34583d6ad53d551f5.wav",
"sampleCount": 58212,
"rate": 22050,
"format": "adpcm",
"tags": [
"effects",
"electronic",
"games",
"space"
]
},
{
"soundName": "Space Ripple",
"md5": "ff8b8c3bf841a11fd5fe3afaa92be1b5.wav",
"sampleCount": 41149,
"rate": 11025,
"format": "",
"tags": []
},
{
"soundName": "Spiral",
"md5": "c987c4e2c85d1a034ef047c2611aff25.wav",
"sampleCount": 28672,
"rate": 11025,
"format": "",
"tags": [
"space",
"effects",
"electronic"
]
},
{
"soundName": "Splash",
"md5": "6aed5e38d40b87a21d893d26fa2858c0.wav",
"sampleCount": 46080,
"rate": 22050,
"format": "adpcm",
"tags": [
"sports",
"water"
]
},
{
"soundName": "Splash Cymbal",
"md5": "9d63ed5be96c43b06492e8b4a9cea8d8.wav",
"sampleCount": 9600,
"rate": 22050,
"format": "",
"tags": []
},
{
"soundName": "Spooky String",
"md5": "6648b690e6e22c7504db7746879d51b4.wav",
"sampleCount": 51376,
"rate": 11025,
"format": "",
"tags": [
"effects",
"dramatic"
]
},
{
"soundName": "Squawk",
"md5": "e140d7ff07de8fa35c3d1595bba835ac.wav",
"sampleCount": 8208,
"rate": 22050,
"format": "",
"tags": [
"animals",
"birds"
]
},
{
"soundName": "Squeaks",
"md5": "62244fb9600ee90c780875deba2ba24f.wav",
"sampleCount": 53626,
"rate": 22050,
"format": "adpcm",
"tags": [
"animals",
"guinea pig"
]
},
{
"soundName": "Squeaky Toy",
"md5": "09d36c3c7531a0a1224437f3994bad40.wav",
"sampleCount": 9982,
"rate": 22050,
"format": "adpcm",
"tags": [
"wacky",
"effects",
"cartoon",
"horn"
]
},
{
"soundName": "Squish Pop",
"md5": "853cc25eb47a35c88e3a1fe88b171ed4.wav",
"sampleCount": 9355,
"rate": 22050,
"format": "adpcm",
"tags": [
"wacky",
"effects",
"cartoon"
]
},
{
"soundName": "String Accent",
"md5": "c1b5c86a10f43f87746b1c305d4fd8df.wav",
"sampleCount": 16896,
"rate": 11025,
"format": "",
"tags": [
"effects",
"music"
]
},
{
"soundName": "String Pluck",
"md5": "d658129427a96764819cb9bd52076860.wav",
"sampleCount": 4976,
"rate": 11025,
"format": "",
"tags": [
"effects",
"music",
"instruments"
]
},
{
"soundName": "Suction Cup",
"md5": "76b9d125d013562dc4f423525b028a19.wav",
"sampleCount": 4882,
"rate": 22050,
"format": "adpcm",
"tags": [
"effects"
]
},
{
"soundName": "Suspense",
"md5": "12f86e0188510860970e04df45370c1d.wav",
"sampleCount": 16659,
"rate": 11025,
"format": "",
"tags": [
"effects",
"music",
"dramatic"
]
},
{
"soundName": "Tada",
"md5": "10eed5b6b49ec7baf1d4b3b3fad0ac99.wav",
"sampleCount": 55125,
"rate": 22050,
"format": "adpcm",
"tags": [
"effects",
"surprise",
"wacky",
"dramatic"
]
},
{
"soundName": "Tambura",
"md5": "c2109f07f83086ec863e70887ef55fb6.wav",
"sampleCount": 22261,
"rate": 11025,
"format": "",
"tags": [
"effects",
"music",
"instruments"
]
},
{
"soundName": "Tap Conga",
"md5": "fd9a67157f57f9cc6fe3cdce38a6d4a8.wav",
"sampleCount": 6880,
"rate": 22050,
"format": "",
"tags": [
"percussion",
"drums",
"music"
]
},
{
"soundName": "Tap Snare",
"md5": "d55b3954d72c6275917f375e49b502f3.wav",
"sampleCount": 3296,
"rate": 22050,
"format": "",
"tags": []
},
{
"soundName": "Techno",
"md5": "8700dac70c8e08f4a5d21411980304bb.wav",
"sampleCount": 175680,
"rate": 22050,
"format": "adpcm",
"tags": [
"loops",
"music",
"electronic"
]
},
{
"soundName": "Techno2",
"md5": "693b428f3797561a11ad0ddbd897b5df.wav",
"sampleCount": 327168,
"rate": 22050,
"format": "adpcm",
"tags": [
"loops",
"music",
"electronic"
]
},
{
"soundName": "Telephone Ring",
"md5": "276f97d3a9d0f9938b37db8225af97f5.wav",
"sampleCount": 74666,
"rate": 22050,
"format": "adpcm",
"tags": [
"effects",
"home"
]
},
{
"soundName": "Telephone Ring2",
"md5": "d0096aa9ecc28c0729a99b0349399371.wav",
"sampleCount": 25373,
"rate": 22050,
"format": "adpcm",
"tags": [
"effects",
"home"
]
},
{
"soundName": "Teleport",
"md5": "2d625187556c4323169fc1a8f29a7a7d.wav",
"sampleCount": 110250,
"rate": 22050,
"format": "adpcm",
"tags": [
"effects",
"electronic",
"games",
"space"
]
},
{
"soundName": "Teleport2",
"md5": "7e5019890a930f3535604cf9cad63ba4.wav",
"sampleCount": 15898,
"rate": 22050,
"format": "adpcm",
"tags": [
"effects",
"electronic",
"games",
"space"
]
},
{
"soundName": "Teleport3",
"md5": "58f76f299a1df2373d4fca3614221186.wav",
"sampleCount": 95440,
"rate": 22050,
"format": "adpcm",
"tags": [
"effects",
"electronic",
"games",
"space"
]
},
{
"soundName": "Tennis Hit",
"md5": "01bd4d670cd586613705ee8273f22568.wav",
"sampleCount": 18176,
"rate": 22050,
"format": "adpcm",
"tags": [
"sports",
"effects"
]
},
{
"soundName": "Thunder Storm",
"md5": "11f13be7e53b2e9116d59344c5efc66a.wav",
"sampleCount": 307513,
"rate": 22050,
"format": "adpcm",
"tags": [
"weather",
"rain",
"ambience",
"background",
"dramatic"
]
},
{
"soundName": "Tom Drum",
"md5": "5a8b8678d37a860dd6c08082d5cda3c2.wav",
"sampleCount": 35803,
"rate": 22050,
"format": "adpcm",
"tags": [
"percussion",
"drums",
"music"
]
},
{
"soundName": "Toy Honk",
"md5": "67aadcd28620ecdcdee2ad8eeebefa20.wav",
"sampleCount": 10726,
"rate": 22050,
"format": "adpcm",
"tags": [
"wacky",
"effects",
"transportation"
]
},
{
"soundName": "Toy Zing",
"md5": "52cf0926d9bab8774194a37eba636c0e.wav",
"sampleCount": 14103,
"rate": 22050,
"format": "adpcm",
"tags": [
"effects"
]
},
{
"soundName": "Traffic",
"md5": "c983b482802b15a80983786019276c28.wav",
"sampleCount": 141977,
"rate": 22050,
"format": "adpcm",
"tags": [
"effects",
"transportation",
"ambience",
"background"
]
},
{
"soundName": "Train Whistle",
"md5": "50f29d0e028ec5c11210d0e2f91f83dd.wav",
"sampleCount": 47594,
"rate": 22050,
"format": "adpcm",
"tags": [
"effects",
"transportation"
]
},
{
"soundName": "Triumph",
"md5": "072f4d9a3dfd2a082d50ff90ac7dc8f2.wav",
"sampleCount": 89280,
"rate": 22050,
"format": "adpcm",
"tags": [
"loops",
"music",
"dramatic",
"win"
]
},
{
"soundName": "Tropical Birds",
"md5": "18e5a88512296cd96417449496bd8711.wav",
"sampleCount": 546917,
"rate": 22050,
"format": "adpcm",
"tags": [
"animals",
"background",
"ambience"
]
},
{
"soundName": "Trumpet1",
"md5": "851c9e2c38e5e71922231a8f64c37e70.wav",
"sampleCount": 25800,
"rate": 11025,
"format": "",
"tags": [
"notes",
"music",
"instruments"
]
},
{
"soundName": "Trumpet2",
"md5": "dd73f891deca0241b800ed203408b6f3.wav",
"sampleCount": 23424,
"rate": 11025,
"format": "",
"tags": [
"notes",
"music",
"instruments"
]
},
{
"soundName": "Wah Beatbox",
"md5": "9021b7bb06f2399f18e2db4fb87095dc.wav",
"sampleCount": 6624,
"rate": 22050,
"format": "",
"tags": []
},
{
"soundName": "Wand",
"md5": "d182adef7a68a5f38f1c78ab7d5afd6a.wav",
"sampleCount": 47447,
"rate": 22050,
"format": "adpcm",
"tags": [
"effects",
"fantasy"
]
},
{
"soundName": "Water Drop",
"md5": "e133e625fd367d269e76964d4b722fc2.wav",
"sampleCount": 15131,
"rate": 22050,
"format": "adpcm",
"tags": [
"effects"
]
},
{
"soundName": "Whinny",
"md5": "f9513bacf2fc665de05a8dd9bcb88117.wav",
"sampleCount": 46108,
"rate": 22050,
"format": "adpcm",
"tags": [
"animals",
"horse"
]
},
{
"soundName": "Whistle Thump",
"md5": "a3fab5681aedaa678982173ed9ca3d36.wav",
"sampleCount": 14441,
"rate": 22050,
"format": "adpcm",
"tags": [
"wacky",
"effects",
"cartoon"
]
},
{
"soundName": "Whiz",
"md5": "d790e1887515deb4097f0946fbf597ad.wav",
"sampleCount": 19243,
"rate": 22050,
"format": "adpcm",
"tags": [
"wacky",
"effects",
"cartoon"
]
},
{
"soundName": "Whoop",
"md5": "fbbbb76a2f53dae6ff1cf61b41f66038.wav",
"sampleCount": 54400,
"rate": 11025,
"format": "",
"tags": [
"effects",
"electronic",
"space"
]
},
{
"soundName": "Win",
"md5": "db480f6d5ae6d494dbb76ffb9bd995d5.wav",
"sampleCount": 44771,
"rate": 22050,
"format": "adpcm",
"tags": [
"effects",
"electronic",
"games"
]
},
{
"soundName": "Wobble",
"md5": "9913a64bfb5cfa6bb30ec24002cce56b.wav",
"sampleCount": 39950,
"rate": 22050,
"format": "adpcm",
"tags": [
"wacky",
"effects",
"cartoon"
]
},
{
"soundName": "Wolf Howl",
"md5": "5e36d74bb16aa5085b901362788b0fbf.wav",
"sampleCount": 43008,
"rate": 11025,
"format": "",
"tags": [
"animals",
"dramatic"
]
},
{
"soundName": "Wood Tap",
"md5": "de5b41c7080396986873d97e9e47acf6.wav",
"sampleCount": 2729,
"rate": 22050,
"format": "adpcm",
"tags": [
"effects"
]
},
{
"soundName": "Wub Beatbox",
"md5": "e1f32c057411da4237181ce72ae15d23.wav",
"sampleCount": 7392,
"rate": 22050,
"format": "",
"tags": []
},
{
"soundName": "Xylo1",
"md5": "6ac484e97c1c1fe1384642e26a125e70.wav",
"sampleCount": 238232,
"rate": 22050,
"format": "adpcm",
"tags": [
"music",
"loops"
]
},
{
"soundName": "Xylo2",
"md5": "d38fc904a0acfc27854baf7335ed46f9.wav",
"sampleCount": 246552,
"rate": 22050,
"format": "adpcm",
"tags": [
"music",
"loops"
]
},
{
"soundName": "Xylo3",
"md5": "786a7a66e96c801ca2efed59b20bf025.wav",
"sampleCount": 208832,
"rate": 22050,
"format": "adpcm",
"tags": [
"music",
"loops"
]
},
{
"soundName": "Xylo4",
"md5": "b3ee7b6515eaf85aebab3c624c1423e9.wav",
"sampleCount": 77184,
"rate": 22050,
"format": "adpcm",
"tags": [
"music",
"loops"
]
},
{
"soundName": "Ya",
"md5": "30987bbe464eb8db1e4c781dc238f81c.wav",
"sampleCount": 5691,
"rate": 11025,
"format": "",
"tags": [
"voice",
"hiphop"
]
},
{
"soundName": "Zip",
"md5": "c5f35ef67ab1baccdd3b7df87b329d99.wav",
"sampleCount": 10467,
"rate": 22050,
"format": "adpcm",
"tags": [
"wacky",
"human"
]
},
{
"soundName": "Zoop",
"md5": "01f5372ddac43001a2db4c82d71f37bb.wav",
"sampleCount": 2764,
"rate": 11025,
"format": "",
"tags": [
"effects",
"electronic",
"space"
]
}]
def get_sound_map():
counter = 0
soundmap = {}
for sound in sounds:
sound["soundID"] = counter
soundmap[sound["soundName"]] = sound
counter += 1
return soundmap
def get_sounds():
counter = 0
for sound in sounds:
sound["soundID"] = counter
counter += 1
return sounds
def get_sounds_in_set(soundNames):
filteredList = []
soundmap = get_sound_map()
for name in soundNames:
if name in soundmap:
filteredList.append(soundmap[name])
else:
print('WARNING: no sound called ' + name)
return filteredList
|
# worker.py - master-slave parallelism support
#
# Copyright 2013 Facebook, Inc.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from i18n import _
import errno, os, signal, sys, threading, util
def countcpus():
'''try to count the number of CPUs on the system'''
# posix
try:
n = int(os.sysconf('SC_NPROCESSORS_ONLN'))
if n > 0:
return n
except (AttributeError, ValueError):
pass
# windows
try:
n = int(os.environ['NUMBER_OF_PROCESSORS'])
if n > 0:
return n
except (KeyError, ValueError):
pass
return 1
def _numworkers(ui):
s = ui.config('worker', 'numcpus')
if s:
try:
n = int(s)
if n >= 1:
return n
except ValueError:
raise util.Abort(_('number of cpus must be an integer'))
return min(max(countcpus(), 4), 32)
if os.name == 'posix':
_startupcost = 0.01
else:
_startupcost = 1e30
def worthwhile(ui, costperop, nops):
'''try to determine whether the benefit of multiple processes can
outweigh the cost of starting them'''
linear = costperop * nops
workers = _numworkers(ui)
benefit = linear - (_startupcost * workers + linear / workers)
return benefit >= 0.15
def worker(ui, costperarg, func, staticargs, args):
'''run a function, possibly in parallel in multiple worker
processes.
returns a progress iterator
costperarg - cost of a single task
func - function to run
staticargs - arguments to pass to every invocation of the function
args - arguments to split into chunks, to pass to individual
workers
'''
if worthwhile(ui, costperarg, len(args)):
return _platformworker(ui, func, staticargs, args)
return func(*staticargs + (args,))
def _posixworker(ui, func, staticargs, args):
rfd, wfd = os.pipe()
workers = _numworkers(ui)
oldhandler = signal.getsignal(signal.SIGINT)
signal.signal(signal.SIGINT, signal.SIG_IGN)
pids, problem = [], [0]
for pargs in partition(args, workers):
pid = os.fork()
if pid == 0:
signal.signal(signal.SIGINT, oldhandler)
try:
os.close(rfd)
for i, item in func(*(staticargs + (pargs,))):
os.write(wfd, '%d %s\n' % (i, item))
os._exit(0)
except KeyboardInterrupt:
os._exit(255)
except: # re-raises (close enough for debugging anyway)
try:
ui.traceback()
finally:
os._exit(255)
pids.append(pid)
pids.reverse()
os.close(wfd)
fp = os.fdopen(rfd, 'rb', 0)
def killworkers():
# if one worker bails, there's no good reason to wait for the rest
for p in pids:
try:
os.kill(p, signal.SIGTERM)
except OSError, err:
if err.errno != errno.ESRCH:
raise
def waitforworkers():
for _ in pids:
st = _exitstatus(os.wait()[1])
if st and not problem:
problem[0] = st
killworkers()
t = threading.Thread(target=waitforworkers)
t.start()
def cleanup():
signal.signal(signal.SIGINT, oldhandler)
t.join()
status = problem[0]
if status:
if status < 0:
os.kill(os.getpid(), -status)
sys.exit(status)
try:
for line in fp:
l = line.split(' ', 1)
yield int(l[0]), l[1][:-1]
except: # re-raises
killworkers()
cleanup()
raise
cleanup()
def _posixexitstatus(code):
'''convert a posix exit status into the same form returned by
os.spawnv
returns None if the process was stopped instead of exiting'''
if os.WIFEXITED(code):
return os.WEXITSTATUS(code)
elif os.WIFSIGNALED(code):
return -os.WTERMSIG(code)
if os.name != 'nt':
_platformworker = _posixworker
_exitstatus = _posixexitstatus
def partition(lst, nslices):
'''partition a list into N slices of equal size'''
n = len(lst)
chunk, slop = n / nslices, n % nslices
end = 0
for i in xrange(nslices):
start = end
end = start + chunk
if slop:
end += 1
slop -= 1
yield lst[start:end]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.